hexsha
stringlengths 40
40
| size
int64 2
1.02M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
245
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
245
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 2
1.02M
| avg_line_length
float64 1
417k
| max_line_length
int64 1
987k
| alphanum_fraction
float64 0
1
| content_no_comment
stringlengths 0
1.01M
| is_comment_constant_removed
bool 1
class | is_sharp_comment_removed
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
1c46aabc654962175d4c60a0f2c647f7ad0ed11f
| 435
|
py
|
Python
|
leetcode/34.py
|
windniw/just-for-fun
|
54e5c2be145f3848811bfd127f6a89545e921570
|
[
"Apache-2.0"
] | 1
|
2019-08-28T23:15:25.000Z
|
2019-08-28T23:15:25.000Z
|
leetcode/34.py
|
windniw/just-for-fun
|
54e5c2be145f3848811bfd127f6a89545e921570
|
[
"Apache-2.0"
] | null | null | null |
leetcode/34.py
|
windniw/just-for-fun
|
54e5c2be145f3848811bfd127f6a89545e921570
|
[
"Apache-2.0"
] | null | null | null |
"""
link: https://leetcode-cn.com/problems/find-first-and-last-position-of-element-in-sorted-array
problem: 返回 target 在 nums 中的区间,不存在时返回 [-1, -1]
solution: 二分
"""
class Solution:
def searchRange(self, nums: List[int], target: int) -> List[int]:
a = bisect.bisect_left(nums, target)
b = bisect.bisect_right(nums, target)
if a == b:
return [-1, -1]
else:
return [a, b - 1]
| 24.166667
| 94
| 0.593103
|
class Solution:
def searchRange(self, nums: List[int], target: int) -> List[int]:
a = bisect.bisect_left(nums, target)
b = bisect.bisect_right(nums, target)
if a == b:
return [-1, -1]
else:
return [a, b - 1]
| true
| true
|
1c46aad880355143649c5f0dc5f7f6f388eb64a8
| 3,783
|
py
|
Python
|
pytest_ethereum/plugins.py
|
jacqueswww/pytest-ethereum
|
d45b441bd582eb4a17c37debd1dabf061a3e56eb
|
[
"MIT"
] | null | null | null |
pytest_ethereum/plugins.py
|
jacqueswww/pytest-ethereum
|
d45b441bd582eb4a17c37debd1dabf061a3e56eb
|
[
"MIT"
] | null | null | null |
pytest_ethereum/plugins.py
|
jacqueswww/pytest-ethereum
|
d45b441bd582eb4a17c37debd1dabf061a3e56eb
|
[
"MIT"
] | null | null | null |
import json
from pathlib import Path
from typing import Any, Callable, Dict, Iterable, List, Optional, Tuple
from eth_utils import to_dict, to_hex, to_tuple
import pytest
from vyper import compiler
from web3 import Web3
from ethpm import Package
from ethpm.tools import builder as b
from ethpm.typing import Manifest
from pytest_ethereum.deployer import Deployer
@pytest.fixture
def w3() -> Web3:
w3 = Web3(Web3.EthereumTesterProvider())
return w3
CONTRACTS_DIR = Path("./contracts")
SOURCES_GLOB = "**/*.vy"
@pytest.fixture
def manifest() -> Manifest:
if not CONTRACTS_DIR.is_dir():
raise FileNotFoundError("no contracts_dir")
all_sources = CONTRACTS_DIR.glob(SOURCES_GLOB)
compiler_output = generate_compiler_output(all_sources)
composed_contract_types = generate_contract_types(compiler_output)
composed_inline_sources = generate_inline_sources(compiler_output)
manifest = b.build(
{},
b.package_name("greeter"),
b.version("1.0.0"),
b.manifest_version("2"),
*composed_inline_sources,
*composed_contract_types,
b.validate(),
)
return manifest
def twig_manifest(path: Path, name: str, version: str) -> Manifest:
all_sources = path.glob(SOURCES_GLOB)
compiler_output = generate_compiler_output(all_sources)
composed_contract_types = generate_contract_types(compiler_output)
composed_inline_sources = generate_inline_sources(compiler_output)
manifest = b.build(
{},
b.package_name(name),
b.version(version),
b.manifest_version("2"),
*composed_inline_sources,
*composed_contract_types,
b.validate(),
)
return manifest
@to_tuple
def generate_inline_sources(compiler_output: Dict[str, Any]) -> Iterable[Manifest]:
for path in compiler_output.keys():
contract_type = path.split("/")[-1].split(".")[0]
yield b.inline_source(contract_type, compiler_output)
@to_tuple
def generate_contract_types(compiler_output: Dict[str, Any]) -> Iterable[Manifest]:
for path in compiler_output.keys():
contract_type = path.split("/")[-1].split(".")[0]
yield b.contract_type(contract_type, compiler_output)
@to_dict
def generate_compiler_output(
all_sources: List[Path]
) -> Iterable[Tuple[str, Dict[str, Any]]]:
for source in all_sources:
contract_file = str(source).split("/")[-1]
contract_type = contract_file.split(".")[0]
# todo fix to accomodate multiple types in a single contract file
yield str(source), {contract_type: create_raw_asset_data(source.read_text())}
def create_raw_asset_data(source: str) -> Dict[str, Any]:
return {
"abi": compiler.mk_full_signature(source),
"evm": {
"bytecode": {
"object": to_hex(compiler.compile(source)),
"linkReferences": {},
}
},
}
@pytest.fixture
def package(manifest: Manifest, w3: Web3) -> Package:
return Package(manifest, w3)
# todo squash deployers
@pytest.fixture
def vy_deployer(package: Package) -> Deployer:
return Deployer(package)
@pytest.fixture
def twig_deployer(w3: Web3) -> Callable[[Path, str, str], Deployer]:
def _twig_deployer(
path: Path, name: Optional[str] = "twig", version: Optional[str] = "1.0.0"
) -> Deployer:
manifest = twig_manifest(path, name, version)
pkg = Package(manifest, w3)
return Deployer(pkg)
return _twig_deployer
@pytest.fixture
def solc_deployer(w3: Web3) -> Callable[[Path], Deployer]:
def _solc_deployer(path: Path) -> Deployer:
manifest = json.loads(path.read_text())
package = Package(manifest, w3)
return Deployer(package)
return _solc_deployer
| 28.877863
| 85
| 0.680941
|
import json
from pathlib import Path
from typing import Any, Callable, Dict, Iterable, List, Optional, Tuple
from eth_utils import to_dict, to_hex, to_tuple
import pytest
from vyper import compiler
from web3 import Web3
from ethpm import Package
from ethpm.tools import builder as b
from ethpm.typing import Manifest
from pytest_ethereum.deployer import Deployer
@pytest.fixture
def w3() -> Web3:
w3 = Web3(Web3.EthereumTesterProvider())
return w3
CONTRACTS_DIR = Path("./contracts")
SOURCES_GLOB = "**/*.vy"
@pytest.fixture
def manifest() -> Manifest:
if not CONTRACTS_DIR.is_dir():
raise FileNotFoundError("no contracts_dir")
all_sources = CONTRACTS_DIR.glob(SOURCES_GLOB)
compiler_output = generate_compiler_output(all_sources)
composed_contract_types = generate_contract_types(compiler_output)
composed_inline_sources = generate_inline_sources(compiler_output)
manifest = b.build(
{},
b.package_name("greeter"),
b.version("1.0.0"),
b.manifest_version("2"),
*composed_inline_sources,
*composed_contract_types,
b.validate(),
)
return manifest
def twig_manifest(path: Path, name: str, version: str) -> Manifest:
all_sources = path.glob(SOURCES_GLOB)
compiler_output = generate_compiler_output(all_sources)
composed_contract_types = generate_contract_types(compiler_output)
composed_inline_sources = generate_inline_sources(compiler_output)
manifest = b.build(
{},
b.package_name(name),
b.version(version),
b.manifest_version("2"),
*composed_inline_sources,
*composed_contract_types,
b.validate(),
)
return manifest
@to_tuple
def generate_inline_sources(compiler_output: Dict[str, Any]) -> Iterable[Manifest]:
for path in compiler_output.keys():
contract_type = path.split("/")[-1].split(".")[0]
yield b.inline_source(contract_type, compiler_output)
@to_tuple
def generate_contract_types(compiler_output: Dict[str, Any]) -> Iterable[Manifest]:
for path in compiler_output.keys():
contract_type = path.split("/")[-1].split(".")[0]
yield b.contract_type(contract_type, compiler_output)
@to_dict
def generate_compiler_output(
all_sources: List[Path]
) -> Iterable[Tuple[str, Dict[str, Any]]]:
for source in all_sources:
contract_file = str(source).split("/")[-1]
contract_type = contract_file.split(".")[0]
yield str(source), {contract_type: create_raw_asset_data(source.read_text())}
def create_raw_asset_data(source: str) -> Dict[str, Any]:
return {
"abi": compiler.mk_full_signature(source),
"evm": {
"bytecode": {
"object": to_hex(compiler.compile(source)),
"linkReferences": {},
}
},
}
@pytest.fixture
def package(manifest: Manifest, w3: Web3) -> Package:
return Package(manifest, w3)
@pytest.fixture
def vy_deployer(package: Package) -> Deployer:
return Deployer(package)
@pytest.fixture
def twig_deployer(w3: Web3) -> Callable[[Path, str, str], Deployer]:
def _twig_deployer(
path: Path, name: Optional[str] = "twig", version: Optional[str] = "1.0.0"
) -> Deployer:
manifest = twig_manifest(path, name, version)
pkg = Package(manifest, w3)
return Deployer(pkg)
return _twig_deployer
@pytest.fixture
def solc_deployer(w3: Web3) -> Callable[[Path], Deployer]:
def _solc_deployer(path: Path) -> Deployer:
manifest = json.loads(path.read_text())
package = Package(manifest, w3)
return Deployer(package)
return _solc_deployer
| true
| true
|
1c46ab3936f9d783c9129bd39881ccdee4abfb5d
| 25,848
|
py
|
Python
|
tools/efrotools/pybuild.py
|
nitingupta910/ballistica
|
7c8c645592ac184e80e409c14c7607f91fcc89df
|
[
"MIT"
] | 317
|
2020-04-04T00:33:10.000Z
|
2022-03-28T01:07:09.000Z
|
tools/efrotools/pybuild.py
|
Alshahriah/ballistica
|
326f6677a0118667e93ce9034849622ebef706fa
|
[
"MIT"
] | 315
|
2020-04-04T22:33:10.000Z
|
2022-03-31T22:50:02.000Z
|
tools/efrotools/pybuild.py
|
Alshahriah/ballistica
|
326f6677a0118667e93ce9034849622ebef706fa
|
[
"MIT"
] | 97
|
2020-04-04T01:32:17.000Z
|
2022-03-16T19:02:59.000Z
|
# Released under the MIT License. See LICENSE for details.
#
"""Functionality related to building python for ios, android, etc."""
from __future__ import annotations
import os
from typing import TYPE_CHECKING
from efrotools import PYVER, run, readfile, writefile, replace_one
if TYPE_CHECKING:
from typing import Any
ENABLE_OPENSSL = True
NEWER_PY_TEST = True
PY_VER_EXACT_ANDROID = '3.9.7'
PY_VER_EXACT_APPLE = '3.9.6'
# Filenames we prune from Python lib dirs in source repo to cut down on size.
PRUNE_LIB_NAMES = [
'config-*', 'idlelib', 'lib-dynload', 'lib2to3', 'multiprocessing',
'pydoc_data', 'site-packages', 'ensurepip', 'tkinter', 'wsgiref',
'distutils', 'turtle.py', 'turtledemo', 'test', 'sqlite3/test', 'unittest',
'dbm', 'venv', 'ctypes/test', 'imaplib.py', '_sysconfigdata_*'
]
# Same but for DLLs dir (windows only)
PRUNE_DLL_NAMES = ['*.ico']
def build_apple(arch: str, debug: bool = False) -> None:
"""Run a build for the provided apple arch (mac, ios, or tvos)."""
import platform
import subprocess
from efro.error import CleanError
# IMPORTANT; seems we currently wind up building against /usr/local gettext
# stuff. Hopefully the maintainer fixes this, but for now I need to
# remind myself to blow it away while building.
# (via brew remove gettext --ignore-dependencies)
if ('MacBook-Fro' in platform.node()
and os.environ.get('SKIP_GETTEXT_WARNING') != '1'):
if (subprocess.run('which gettext', shell=True,
check=False).returncode == 0):
raise CleanError(
'NEED TO TEMP-KILL GETTEXT (or set SKIP_GETTEXT_WARNING=1)')
builddir = 'build/python_apple_' + arch + ('_debug' if debug else '')
run('rm -rf "' + builddir + '"')
run('mkdir -p build')
run('git clone '
'https://github.com/beeware/Python-Apple-support.git "' + builddir +
'"')
os.chdir(builddir)
# TEMP: Check out a particular commit while the branch head is broken.
# We can actually fix this to use the current one, but something
# broke in the underlying build even on old commits so keeping it
# locked for now...
# run('git checkout bf1ed73d0d5ff46862ba69dd5eb2ffaeff6f19b6')
run(f'git checkout {PYVER}')
txt = readfile('Makefile')
# Fix a bug where spaces in PATH cause errors (darn you vmware fusion!)
txt = replace_one(
txt, '&& PATH=$(PROJECT_DIR)/$(PYTHON_DIR-macOS)/dist/bin:$(PATH) .',
'&& PATH="$(PROJECT_DIR)/$(PYTHON_DIR-macOS)/dist/bin:$(PATH)" .')
# Turn doc strings on; looks like it only adds a few hundred k.
txt = txt.replace('--without-doc-strings', '--with-doc-strings')
# Set mac/ios version reqs
# (see issue with utimensat and futimens).
txt = replace_one(txt, 'MACOSX_DEPLOYMENT_TARGET=10.8',
'MACOSX_DEPLOYMENT_TARGET=10.15')
# And equivalent iOS (11+).
txt = replace_one(txt, 'CFLAGS-iOS=-mios-version-min=8.0',
'CFLAGS-iOS=-mios-version-min=13.0')
# Ditto for tvOS.
txt = replace_one(txt, 'CFLAGS-tvOS=-mtvos-version-min=9.0',
'CFLAGS-tvOS=-mtvos-version-min=13.0')
if debug:
# Add debug build flag
# (Currently expect to find 2 instances of this).
dline = '--with-doc-strings --enable-ipv6 --without-ensurepip'
splitlen = len(txt.split(dline))
if splitlen != 3:
raise Exception('unexpected configure lines')
txt = txt.replace(dline, '--with-pydebug ' + dline)
# Debug has a different name.
# (Currently expect to replace 12 instances of this).
dline = ('python$(PYTHON_VER)'
if NEWER_PY_TEST else 'python$(PYTHON_VER)m')
splitlen = len(txt.split(dline))
if splitlen != 13:
raise RuntimeError(f'Unexpected configure line count {splitlen}.')
txt = txt.replace(
dline, 'python$(PYTHON_VER)d'
if NEWER_PY_TEST else 'python$(PYTHON_VER)dm')
# Inject our custom modifications to fire before building.
txt = txt.replace(
' # Configure target Python\n',
' cd $$(PYTHON_DIR-$1) && '
f'../../../../../tools/pcommand python_apple_patch {arch}\n'
' # Configure target Python\n',
)
writefile('Makefile', txt)
# Ok; let 'er rip.
# (we run these in parallel so limit to 1 job a piece;
# otherwise they inherit the -j12 or whatever from the top level)
# (also this build seems to fail with multiple threads)
run(
'make -j1 ' + {
'mac': 'Python-macOS',
# 'mac': 'build/macOS/Python-3.9.6-macOS/Makefile',
'ios': 'Python-iOS',
'tvos': 'Python-tvOS'
}[arch])
print('python build complete! (apple/' + arch + ')')
def apple_patch(arch: str) -> None:
"""Run necessary patches on an apple archive before building."""
# Here's the deal: we want our custom static python libraries to
# be as similar as possible on apple platforms and android, so let's
# blow away all the tweaks that this setup does to Setup.local and
# instead apply our very similar ones directly to Setup, just as we
# do for android.
with open('Modules/Setup.local', 'w', encoding='utf-8') as outfile:
outfile.write('# cleared by efrotools build\n')
_patch_setup_file('apple', arch)
def build_android(rootdir: str, arch: str, debug: bool = False) -> None:
"""Run a build for android with the given architecture.
(can be arm, arm64, x86, or x86_64)
"""
import subprocess
builddir = 'build/python_android_' + arch + ('_debug' if debug else '')
run('rm -rf "' + builddir + '"')
run('mkdir -p build')
run('git clone '
'https://github.com/yan12125/python3-android.git "' + builddir + '"')
os.chdir(builddir)
# These builds require ANDROID_NDK to be set; make sure that's the case.
os.environ['ANDROID_NDK'] = subprocess.check_output(
[f'{rootdir}/tools/pcommand', 'android_sdk_utils',
'get-ndk-path']).decode().strip()
# Disable builds for dependencies we don't use.
ftxt = readfile('Android/build_deps.py')
# ftxt = replace_one(ftxt, ' NCurses,\n',
# '# NCurses,\n',)
ftxt = replace_one(
ftxt,
' '
'BZip2, GDBM, LibFFI, LibUUID, OpenSSL, Readline, SQLite, XZ, ZLib,\n',
' '
'BZip2, LibUUID, OpenSSL, SQLite, XZ, ZLib,\n',
)
# Older ssl seems to choke on newer ndk layouts.
ftxt = replace_one(
ftxt,
"source = 'https://www.openssl.org/source/openssl-1.1.1h.tar.gz'",
"source = 'https://www.openssl.org/source/openssl-1.1.1l.tar.gz'")
writefile('Android/build_deps.py', ftxt)
# Tweak some things in the base build script; grab the right version
# of Python and also inject some code to modify bits of python
# after it is extracted.
ftxt = readfile('build.sh')
ftxt = replace_one(ftxt, 'PYVER=3.9.0', f'PYVER={PY_VER_EXACT_ANDROID}')
ftxt = replace_one(
ftxt, ' popd\n', f' ../../../tools/pcommand'
f' python_android_patch Python-{PY_VER_EXACT_ANDROID}\n popd\n')
writefile('build.sh', ftxt)
# Ok, let 'er rip
# (we often run these builds in parallel so limit to 1 job a piece;
# otherwise they each inherit the -j12 or whatever from the top level).
exargs = ' --with-pydebug' if debug else ''
run(f'ARCH={arch} ANDROID_API=21 ./build.sh{exargs}')
print('python build complete! (android/' + arch + ')')
def android_patch() -> None:
"""Run necessary patches on an android archive before building."""
_patch_setup_file('android', '?')
def _patch_setup_file(platform: str, arch: str) -> None:
# pylint: disable=too-many-locals
# pylint: disable=too-many-statements
fname = 'Modules/Setup'
ftxt = readfile(fname)
if platform == 'android':
prefix = '$(srcdir)/Android/sysroot/usr'
uuid_ex = f' -L{prefix}/lib -luuid'
zlib_ex = f' -I{prefix}/include -L{prefix}/lib -lz'
bz2_ex = f' -I{prefix}/include -L{prefix}/lib -lbz2'
ssl_ex = f' -DUSE_SSL -I{prefix}/include -L{prefix}/lib -lssl -lcrypto'
sqlite_ex = f' -I{prefix}/include -L{prefix}/lib'
hash_ex = ' -DUSE_SSL -lssl -lcrypto'
lzma_ex = ' -llzma'
elif platform == 'apple':
prefix = '$(srcdir)/Android/sysroot/usr'
uuid_ex = ''
zlib_ex = ' -I$(prefix)/include -lz'
bz2_ex = (' -I$(srcdir)/../Support/BZip2/Headers'
' -L$(srcdir)/../Support/BZip2 -lbzip2')
ssl_ex = (' -I$(srcdir)/../Support/OpenSSL/Headers'
' -L$(srcdir)/../Support/OpenSSL -lOpenSSL -DUSE_SSL')
sqlite_ex = ' -I$(srcdir)/Modules/_sqlite'
hash_ex = (' -I$(srcdir)/../Support/OpenSSL/Headers'
' -L$(srcdir)/../Support/OpenSSL -lOpenSSL -DUSE_SSL')
lzma_ex = (' -I$(srcdir)/../Support/XZ/Headers'
' -L$(srcdir)/../Support/XZ/ -lxz')
else:
raise RuntimeError(f'Unknown platform {platform}')
# This list should contain all possible compiled modules to start.
# If any .so files are coming out of builds, their names should be
# added here to stop that.
cmodules = [
'_asyncio', '_bisect', '_blake2', '_codecs_cn', '_codecs_hk',
'_codecs_iso2022', '_codecs_jp', '_codecs_kr', '_codecs_tw',
'_contextvars', '_crypt', '_csv', '_ctypes_test', '_ctypes',
'_curses_panel', '_curses', '_datetime', '_decimal', '_elementtree',
'_heapq', '_json', '_lsprof', '_lzma', '_md5', '_multibytecodec',
'_multiprocessing', '_opcode', '_pickle', '_posixsubprocess', '_queue',
'_random', '_sha1', '_sha3', '_sha256', '_sha512', '_socket',
'_statistics', '_struct', '_testbuffer', '_testcapi',
'_testimportmultiple', '_testinternalcapi', '_testmultiphase', '_uuid',
'_xxsubinterpreters', '_xxtestfuzz', '_zoneinfo', 'array', 'audioop',
'binascii', 'cmath', 'fcntl', 'grp', 'math', 'mmap', 'ossaudiodev',
'parser', 'pyexpat', 'resource', 'select', 'syslog', 'termios',
'unicodedata', 'xxlimited', 'zlib'
]
# Selectively uncomment some existing modules for static compilation.
enables = [
'_asyncio', 'array', 'cmath', 'math', '_contextvars', '_struct',
'_random', '_elementtree', '_pickle', '_datetime', '_zoneinfo',
'_bisect', '_heapq', '_json', '_statistics', 'unicodedata', 'fcntl',
'select', 'mmap', '_csv', '_socket', '_sha3', '_blake2', 'binascii',
'_posixsubprocess'
]
# Note that the _md5 and _sha modules are normally only built if the
# system does not have the OpenSSL libs containing an optimized
# version.
if bool(False):
enables += ['_md5']
for enable in enables:
ftxt = replace_one(ftxt, f'#{enable} ', f'{enable} ')
cmodules.remove(enable)
# Disable ones that were enabled:
disables = ['xxsubtype']
for disable in disables:
ftxt = replace_one(ftxt, f'\n{disable} ', f'\n#{disable} ')
# Additions:
ftxt += '\n# Additions by efrotools:\n'
if bool(True):
ftxt += f'_uuid _uuidmodule.c{uuid_ex}\n'
cmodules.remove('_uuid')
ftxt += f'zlib zlibmodule.c{zlib_ex}\n'
cmodules.remove('zlib')
# Why isn't this getting built as a shared lib by default?
# Do we need it for sure?
ftxt += f'_hashlib _hashopenssl.c{hash_ex}\n'
ftxt += f'_lzma _lzmamodule.c{lzma_ex}\n'
cmodules.remove('_lzma')
ftxt += f'_bz2 _bz2module.c{bz2_ex}\n'
ftxt += f'_ssl _ssl.c{ssl_ex}\n'
ftxt += (f'_sqlite3'
f' _sqlite/cache.c'
f' _sqlite/connection.c'
f' _sqlite/cursor.c'
f' _sqlite/microprotocols.c'
f' _sqlite/module.c'
f' _sqlite/prepare_protocol.c'
f' _sqlite/row.c'
f' _sqlite/statement.c'
f' _sqlite/util.c'
f'{sqlite_ex}'
f' -DMODULE_NAME=\'\\"sqlite3\\"\''
f' -DSQLITE_OMIT_LOAD_EXTENSION'
f' -lsqlite3\n')
# Mac needs this:
if arch == 'mac':
ftxt += ('\n'
'# efrotools: mac urllib needs this:\n'
'_scproxy _scproxy.c '
'-framework SystemConfiguration '
'-framework CoreFoundation\n')
# Explicitly mark the remaining ones as disabled
# (so Python won't try to build them as dynamic libs).
remaining_disabled = ' '.join(cmodules)
ftxt += ('\n# Disabled by efrotools build:\n'
'*disabled*\n'
f'{remaining_disabled}\n')
writefile(fname, ftxt)
# Ok, this is weird.
# When applying the module Setup, python looks for any line containing *=*
# and interprets the whole thing a a global define?...
# This breaks things for our static sqlite compile above.
# The check used to look for [A-Z]*=* which didn't break, so let' just
# change it back to that for now.
# UPDATE: Currently this seems to only be necessary on Android;
# perhaps this broke between 3.9.6 and 3.9.7 or perhaps the apple
# bundle already patches it ¯\_(ツ)_/¯
fname = 'Modules/makesetup'
txt = readfile(fname)
if platform == 'android':
txt = replace_one(txt, ' *=*)'
' DEFS="$line$NL$DEFS"; continue;;',
' [A-Z]*=*) DEFS="$line$NL$DEFS";'
' continue;;')
assert txt.count('[A-Z]*=*') == 1
writefile(fname, txt)
def winprune() -> None:
"""Prune unneeded files from windows python dists."""
for libdir in ('assets/src/windows/Win32/Lib',
'assets/src/windows/x64/Lib'):
assert os.path.isdir(libdir)
run('cd "' + libdir + '" && rm -rf ' + ' '.join(PRUNE_LIB_NAMES))
for dlldir in ('assets/src/windows/Win32/DLLs',
'assets/src/windows/x64/DLLs'):
assert os.path.isdir(dlldir)
run('cd "' + dlldir + '" && rm -rf ' + ' '.join(PRUNE_DLL_NAMES))
print('Win-prune successful.')
def gather() -> None:
"""Gather per-platform python headers, libs, and modules together.
This assumes all embeddable py builds have been run successfully,
and that PROJROOT is the cwd.
"""
# pylint: disable=too-many-locals
do_android = True
# First off, clear out any existing output.
existing_dirs = [
os.path.join('src/external', d) for d in os.listdir('src/external')
if d.startswith('python-') and d != 'python-notes.txt'
]
existing_dirs += [
os.path.join('assets/src', d) for d in os.listdir('assets/src')
if d.startswith('pylib-')
]
if not do_android:
existing_dirs = [d for d in existing_dirs if 'android' not in d]
for existing_dir in existing_dirs:
run('rm -rf "' + existing_dir + '"')
apost2 = f'src/Python-{PY_VER_EXACT_ANDROID}/Android/sysroot'
for buildtype in ['debug', 'release']:
debug = buildtype == 'debug'
bsuffix = '_debug' if buildtype == 'debug' else ''
bsuffix2 = '-debug' if buildtype == 'debug' else ''
libname = 'python' + PYVER + ('d' if debug else '')
bases = {
'mac': f'build/python_apple_mac{bsuffix}/build/macOS',
'ios': f'build/python_apple_ios{bsuffix}/build/iOS',
'tvos': f'build/python_apple_tvos{bsuffix}/build/tvOS',
'android_arm': f'build/python_android_arm{bsuffix}/build',
'android_arm64': f'build/python_android_arm64{bsuffix}/build',
'android_x86': f'build/python_android_x86{bsuffix}/build',
'android_x86_64': f'build/python_android_x86_64{bsuffix}/build'
}
bases2 = {
'android_arm': f'build/python_android_arm{bsuffix}/{apost2}',
'android_arm64': f'build/python_android_arm64{bsuffix}/{apost2}',
'android_x86': f'build/python_android_x86{bsuffix}/{apost2}',
'android_x86_64': f'build/python_android_x86_64{bsuffix}/{apost2}'
}
# Note: only need pylib for the first in each group.
builds: list[dict[str, Any]] = [{
'name':
'macos',
'group':
'apple',
'headers':
bases['mac'] + '/Support/Python/Headers',
'libs': [
bases['mac'] + '/Support/Python/libPython.a',
bases['mac'] + '/Support/OpenSSL/libOpenSSL.a',
bases['mac'] + '/Support/XZ/libxz.a',
bases['mac'] + '/Support/BZip2/libbzip2.a',
],
'pylib':
(bases['mac'] + f'/Python-{PY_VER_EXACT_APPLE}-macOS/lib'),
}, {
'name':
'ios',
'group':
'apple',
'headers':
bases['ios'] + '/Support/Python/Headers',
'libs': [
bases['ios'] + '/Support/Python/libPython.a',
bases['ios'] + '/Support/OpenSSL/libOpenSSL.a',
bases['ios'] + '/Support/XZ/libxz.a',
bases['ios'] + '/Support/BZip2/libbzip2.a',
],
}, {
'name':
'tvos',
'group':
'apple',
'headers':
bases['tvos'] + '/Support/Python/Headers',
'libs': [
bases['tvos'] + '/Support/Python/libPython.a',
bases['tvos'] + '/Support/OpenSSL/libOpenSSL.a',
bases['tvos'] + '/Support/XZ/libxz.a',
bases['tvos'] + '/Support/BZip2/libbzip2.a',
],
}, {
'name': 'android_arm',
'group': 'android',
'headers': bases['android_arm'] + f'/usr/include/{libname}',
'libs': [
bases['android_arm'] + f'/usr/lib/lib{libname}.a',
bases2['android_arm'] + '/usr/lib/libssl.a',
bases2['android_arm'] + '/usr/lib/libcrypto.a',
bases2['android_arm'] + '/usr/lib/liblzma.a',
bases2['android_arm'] + '/usr/lib/libsqlite3.a',
bases2['android_arm'] + '/usr/lib/libbz2.a',
bases2['android_arm'] + '/usr/lib/libuuid.a',
],
'libinst': 'android_armeabi-v7a',
'pylib': (bases['android_arm'] + '/usr/lib/python' + PYVER),
}, {
'name': 'android_arm64',
'group': 'android',
'headers': bases['android_arm64'] + f'/usr/include/{libname}',
'libs': [
bases['android_arm64'] + f'/usr/lib/lib{libname}.a',
bases2['android_arm64'] + '/usr/lib/libssl.a',
bases2['android_arm64'] + '/usr/lib/libcrypto.a',
bases2['android_arm64'] + '/usr/lib/liblzma.a',
bases2['android_arm64'] + '/usr/lib/libsqlite3.a',
bases2['android_arm64'] + '/usr/lib/libbz2.a',
bases2['android_arm64'] + '/usr/lib/libuuid.a',
],
'libinst': 'android_arm64-v8a',
}, {
'name': 'android_x86',
'group': 'android',
'headers': bases['android_x86'] + f'/usr/include/{libname}',
'libs': [
bases['android_x86'] + f'/usr/lib/lib{libname}.a',
bases2['android_x86'] + '/usr/lib/libssl.a',
bases2['android_x86'] + '/usr/lib/libcrypto.a',
bases2['android_x86'] + '/usr/lib/liblzma.a',
bases2['android_x86'] + '/usr/lib/libsqlite3.a',
bases2['android_x86'] + '/usr/lib/libbz2.a',
bases2['android_x86'] + '/usr/lib/libuuid.a',
],
'libinst': 'android_x86',
}, {
'name': 'android_x86_64',
'group': 'android',
'headers': bases['android_x86_64'] + f'/usr/include/{libname}',
'libs': [
bases['android_x86_64'] + f'/usr/lib/lib{libname}.a',
bases2['android_x86_64'] + '/usr/lib/libssl.a',
bases2['android_x86_64'] + '/usr/lib/libcrypto.a',
bases2['android_x86_64'] + '/usr/lib/liblzma.a',
bases2['android_x86_64'] + '/usr/lib/libsqlite3.a',
bases2['android_x86_64'] + '/usr/lib/libbz2.a',
bases2['android_x86_64'] + '/usr/lib/libuuid.a',
],
'libinst': 'android_x86_64',
}]
for build in builds:
grp = build['group']
if not do_android and grp == 'android':
continue
builddir = f'src/external/python-{grp}{bsuffix2}'
header_dst = os.path.join(builddir, 'include')
lib_dst = os.path.join(builddir, 'lib')
assets_src_dst = f'assets/src/pylib-{grp}'
# Do some setup only once per group.
if not os.path.exists(builddir):
run('mkdir -p "' + builddir + '"')
run('mkdir -p "' + lib_dst + '"')
# Only pull modules into game assets on release pass.
if not debug:
# Copy system modules into the src assets
# dir for this group.
run('mkdir -p "' + assets_src_dst + '"')
run('rsync --recursive --include "*.py"'
' --exclude __pycache__ --include "*/" --exclude "*" "'
+ build['pylib'] + '/" "' + assets_src_dst + '"')
# Prune a bunch of modules we don't need to cut
# down on size.
run('cd "' + assets_src_dst + '" && rm -rf ' +
' '.join(PRUNE_LIB_NAMES))
# Some minor filtering to system scripts:
# on iOS/tvOS, addusersitepackages() leads to a crash
# due to _sysconfigdata_dm_ios_darwin module not existing,
# so let's skip that.
fname = f'{assets_src_dst}/site.py'
txt = readfile(fname)
txt = replace_one(
txt,
' known_paths = addusersitepackages(known_paths)',
' # efro tweak: this craps out on ios/tvos.\n'
' # (and we don\'t use it anyway)\n'
' # known_paths = addusersitepackages(known_paths)')
writefile(fname, txt)
# Copy in a base set of headers (everything in a group should
# be using the same headers)
run(f'cp -r "{build["headers"]}" "{header_dst}"')
# Clear whatever pyconfigs came across; we'll build our own
# universal one below.
run('rm ' + header_dst + '/pyconfig*')
# Write a master pyconfig header that reroutes to each
# platform's actual header.
with open(header_dst + '/pyconfig.h', 'w',
encoding='utf-8') as hfile:
hfile.write(
'#if BA_OSTYPE_MACOS\n'
'#include "pyconfig-macos.h"\n\n'
'#elif BA_OSTYPE_IOS\n'
'#include "pyconfig-ios.h"\n\n'
'#elif BA_OSTYPE_TVOS\n'
'#include "pyconfig-tvos.h"\n\n'
'#elif BA_OSTYPE_ANDROID and defined(__arm__)\n'
'#include "pyconfig-android_arm.h"\n\n'
'#elif BA_OSTYPE_ANDROID and defined(__aarch64__)\n'
'#include "pyconfig-android_arm64.h"\n\n'
'#elif BA_OSTYPE_ANDROID and defined(__i386__)\n'
'#include "pyconfig-android_x86.h"\n\n'
'#elif BA_OSTYPE_ANDROID and defined(__x86_64__)\n'
'#include "pyconfig-android_x86_64.h"\n\n'
'#else\n'
'#error unknown platform\n\n'
'#endif\n')
# Now copy each build's config headers in with unique names.
cfgs = [
f for f in os.listdir(build['headers'])
if f.startswith('pyconfig')
]
# Copy config headers to their filtered names.
for cfg in cfgs:
out = cfg.replace('pyconfig', 'pyconfig-' + build['name'])
if cfg == 'pyconfig.h':
# For platform's root pyconfig.h we need to filter
# contents too (those headers can themselves include
# others; ios for instance points to a arm64 and a
# x86_64 variant).
contents = readfile(build['headers'] + '/' + cfg)
contents = contents.replace('pyconfig',
'pyconfig-' + build['name'])
writefile(header_dst + '/' + out, contents)
else:
# other configs we just rename
run('cp "' + build['headers'] + '/' + cfg + '" "' +
header_dst + '/' + out + '"')
# Copy in libs. If the lib gave a specific install name,
# use that; otherwise use name.
targetdir = lib_dst + '/' + build.get('libinst', build['name'])
run('rm -rf "' + targetdir + '"')
run('mkdir -p "' + targetdir + '"')
for lib in build['libs']:
run('cp "' + lib + '" "' + targetdir + '"')
print('Great success!')
| 41.757674
| 79
| 0.548205
|
from __future__ import annotations
import os
from typing import TYPE_CHECKING
from efrotools import PYVER, run, readfile, writefile, replace_one
if TYPE_CHECKING:
from typing import Any
ENABLE_OPENSSL = True
NEWER_PY_TEST = True
PY_VER_EXACT_ANDROID = '3.9.7'
PY_VER_EXACT_APPLE = '3.9.6'
PRUNE_LIB_NAMES = [
'config-*', 'idlelib', 'lib-dynload', 'lib2to3', 'multiprocessing',
'pydoc_data', 'site-packages', 'ensurepip', 'tkinter', 'wsgiref',
'distutils', 'turtle.py', 'turtledemo', 'test', 'sqlite3/test', 'unittest',
'dbm', 'venv', 'ctypes/test', 'imaplib.py', '_sysconfigdata_*'
]
PRUNE_DLL_NAMES = ['*.ico']
def build_apple(arch: str, debug: bool = False) -> None:
import platform
import subprocess
from efro.error import CleanError
if ('MacBook-Fro' in platform.node()
and os.environ.get('SKIP_GETTEXT_WARNING') != '1'):
if (subprocess.run('which gettext', shell=True,
check=False).returncode == 0):
raise CleanError(
'NEED TO TEMP-KILL GETTEXT (or set SKIP_GETTEXT_WARNING=1)')
builddir = 'build/python_apple_' + arch + ('_debug' if debug else '')
run('rm -rf "' + builddir + '"')
run('mkdir -p build')
run('git clone '
'https://github.com/beeware/Python-Apple-support.git "' + builddir +
'"')
os.chdir(builddir)
run(f'git checkout {PYVER}')
txt = readfile('Makefile')
txt = replace_one(
txt, '&& PATH=$(PROJECT_DIR)/$(PYTHON_DIR-macOS)/dist/bin:$(PATH) .',
'&& PATH="$(PROJECT_DIR)/$(PYTHON_DIR-macOS)/dist/bin:$(PATH)" .')
txt = txt.replace('--without-doc-strings', '--with-doc-strings')
txt = replace_one(txt, 'MACOSX_DEPLOYMENT_TARGET=10.8',
'MACOSX_DEPLOYMENT_TARGET=10.15')
txt = replace_one(txt, 'CFLAGS-iOS=-mios-version-min=8.0',
'CFLAGS-iOS=-mios-version-min=13.0')
txt = replace_one(txt, 'CFLAGS-tvOS=-mtvos-version-min=9.0',
'CFLAGS-tvOS=-mtvos-version-min=13.0')
if debug:
dline = '--with-doc-strings --enable-ipv6 --without-ensurepip'
splitlen = len(txt.split(dline))
if splitlen != 3:
raise Exception('unexpected configure lines')
txt = txt.replace(dline, '--with-pydebug ' + dline)
dline = ('python$(PYTHON_VER)'
if NEWER_PY_TEST else 'python$(PYTHON_VER)m')
splitlen = len(txt.split(dline))
if splitlen != 13:
raise RuntimeError(f'Unexpected configure line count {splitlen}.')
txt = txt.replace(
dline, 'python$(PYTHON_VER)d'
if NEWER_PY_TEST else 'python$(PYTHON_VER)dm')
txt = txt.replace(
' # Configure target Python\n',
' cd $$(PYTHON_DIR-$1) && '
f'../../../../../tools/pcommand python_apple_patch {arch}\n'
' # Configure target Python\n',
)
writefile('Makefile', txt)
# (we run these in parallel so limit to 1 job a piece;
# otherwise they inherit the -j12 or whatever from the top level)
# (also this build seems to fail with multiple threads)
run(
'make -j1 ' + {
'mac': 'Python-macOS',
# 'mac': 'build/macOS/Python-3.9.6-macOS/Makefile',
'ios': 'Python-iOS',
'tvos': 'Python-tvOS'
}[arch])
print('python build complete! (apple/' + arch + ')')
def apple_patch(arch: str) -> None:
# Here's the deal: we want our custom static python libraries to
# blow away all the tweaks that this setup does to Setup.local and
# instead apply our very similar ones directly to Setup, just as we
# do for android.
with open('Modules/Setup.local', 'w', encoding='utf-8') as outfile:
outfile.write('
_patch_setup_file('apple', arch)
def build_android(rootdir: str, arch: str, debug: bool = False) -> None:
import subprocess
builddir = 'build/python_android_' + arch + ('_debug' if debug else '')
run('rm -rf "' + builddir + '"')
run('mkdir -p build')
run('git clone '
'https://github.com/yan12125/python3-android.git "' + builddir + '"')
os.chdir(builddir)
# These builds require ANDROID_NDK to be set; make sure that's the case.
os.environ['ANDROID_NDK'] = subprocess.check_output(
[f'{rootdir}/tools/pcommand', 'android_sdk_utils',
'get-ndk-path']).decode().strip()
ftxt = readfile('Android/build_deps.py')
# ftxt = replace_one(ftxt, ' NCurses,\n',
# '
ftxt = replace_one(
ftxt,
' '
'BZip2, GDBM, LibFFI, LibUUID, OpenSSL, Readline, SQLite, XZ, ZLib,\n',
' '
'BZip2, LibUUID, OpenSSL, SQLite, XZ, ZLib,\n',
)
# Older ssl seems to choke on newer ndk layouts.
ftxt = replace_one(
ftxt,
"source = 'https://www.openssl.org/source/openssl-1.1.1h.tar.gz'",
"source = 'https://www.openssl.org/source/openssl-1.1.1l.tar.gz'")
writefile('Android/build_deps.py', ftxt)
# Tweak some things in the base build script; grab the right version
# of Python and also inject some code to modify bits of python
# after it is extracted.
ftxt = readfile('build.sh')
ftxt = replace_one(ftxt, 'PYVER=3.9.0', f'PYVER={PY_VER_EXACT_ANDROID}')
ftxt = replace_one(
ftxt, ' popd\n', f' ../../../tools/pcommand'
f' python_android_patch Python-{PY_VER_EXACT_ANDROID}\n popd\n')
writefile('build.sh', ftxt)
# Ok, let 'er rip
exargs = ' --with-pydebug' if debug else ''
run(f'ARCH={arch} ANDROID_API=21 ./build.sh{exargs}')
print('python build complete! (android/' + arch + ')')
def android_patch() -> None:
_patch_setup_file('android', '?')
def _patch_setup_file(platform: str, arch: str) -> None:
fname = 'Modules/Setup'
ftxt = readfile(fname)
if platform == 'android':
prefix = '$(srcdir)/Android/sysroot/usr'
uuid_ex = f' -L{prefix}/lib -luuid'
zlib_ex = f' -I{prefix}/include -L{prefix}/lib -lz'
bz2_ex = f' -I{prefix}/include -L{prefix}/lib -lbz2'
ssl_ex = f' -DUSE_SSL -I{prefix}/include -L{prefix}/lib -lssl -lcrypto'
sqlite_ex = f' -I{prefix}/include -L{prefix}/lib'
hash_ex = ' -DUSE_SSL -lssl -lcrypto'
lzma_ex = ' -llzma'
elif platform == 'apple':
prefix = '$(srcdir)/Android/sysroot/usr'
uuid_ex = ''
zlib_ex = ' -I$(prefix)/include -lz'
bz2_ex = (' -I$(srcdir)/../Support/BZip2/Headers'
' -L$(srcdir)/../Support/BZip2 -lbzip2')
ssl_ex = (' -I$(srcdir)/../Support/OpenSSL/Headers'
' -L$(srcdir)/../Support/OpenSSL -lOpenSSL -DUSE_SSL')
sqlite_ex = ' -I$(srcdir)/Modules/_sqlite'
hash_ex = (' -I$(srcdir)/../Support/OpenSSL/Headers'
' -L$(srcdir)/../Support/OpenSSL -lOpenSSL -DUSE_SSL')
lzma_ex = (' -I$(srcdir)/../Support/XZ/Headers'
' -L$(srcdir)/../Support/XZ/ -lxz')
else:
raise RuntimeError(f'Unknown platform {platform}')
cmodules = [
'_asyncio', '_bisect', '_blake2', '_codecs_cn', '_codecs_hk',
'_codecs_iso2022', '_codecs_jp', '_codecs_kr', '_codecs_tw',
'_contextvars', '_crypt', '_csv', '_ctypes_test', '_ctypes',
'_curses_panel', '_curses', '_datetime', '_decimal', '_elementtree',
'_heapq', '_json', '_lsprof', '_lzma', '_md5', '_multibytecodec',
'_multiprocessing', '_opcode', '_pickle', '_posixsubprocess', '_queue',
'_random', '_sha1', '_sha3', '_sha256', '_sha512', '_socket',
'_statistics', '_struct', '_testbuffer', '_testcapi',
'_testimportmultiple', '_testinternalcapi', '_testmultiphase', '_uuid',
'_xxsubinterpreters', '_xxtestfuzz', '_zoneinfo', 'array', 'audioop',
'binascii', 'cmath', 'fcntl', 'grp', 'math', 'mmap', 'ossaudiodev',
'parser', 'pyexpat', 'resource', 'select', 'syslog', 'termios',
'unicodedata', 'xxlimited', 'zlib'
]
enables = [
'_asyncio', 'array', 'cmath', 'math', '_contextvars', '_struct',
'_random', '_elementtree', '_pickle', '_datetime', '_zoneinfo',
'_bisect', '_heapq', '_json', '_statistics', 'unicodedata', 'fcntl',
'select', 'mmap', '_csv', '_socket', '_sha3', '_blake2', 'binascii',
'_posixsubprocess'
]
if bool(False):
enables += ['_md5']
for enable in enables:
ftxt = replace_one(ftxt, f'#{enable} ', f'{enable} ')
cmodules.remove(enable)
disables = ['xxsubtype']
for disable in disables:
ftxt = replace_one(ftxt, f'\n{disable} ', f'\n#{disable} ')
ftxt += '\n# Additions by efrotools:\n'
if bool(True):
ftxt += f'_uuid _uuidmodule.c{uuid_ex}\n'
cmodules.remove('_uuid')
ftxt += f'zlib zlibmodule.c{zlib_ex}\n'
cmodules.remove('zlib')
# Do we need it for sure?
ftxt += f'_hashlib _hashopenssl.c{hash_ex}\n'
ftxt += f'_lzma _lzmamodule.c{lzma_ex}\n'
cmodules.remove('_lzma')
ftxt += f'_bz2 _bz2module.c{bz2_ex}\n'
ftxt += f'_ssl _ssl.c{ssl_ex}\n'
ftxt += (f'_sqlite3'
f' _sqlite/cache.c'
f' _sqlite/connection.c'
f' _sqlite/cursor.c'
f' _sqlite/microprotocols.c'
f' _sqlite/module.c'
f' _sqlite/prepare_protocol.c'
f' _sqlite/row.c'
f' _sqlite/statement.c'
f' _sqlite/util.c'
f'{sqlite_ex}'
f' -DMODULE_NAME=\'\\"sqlite3\\"\''
f' -DSQLITE_OMIT_LOAD_EXTENSION'
f' -lsqlite3\n')
# Mac needs this:
if arch == 'mac':
ftxt += ('\n'
'
'_scproxy _scproxy.c '
'-framework SystemConfiguration '
'-framework CoreFoundation\n')
# Explicitly mark the remaining ones as disabled
# (so Python won't try to build them as dynamic libs).
remaining_disabled = ' '.join(cmodules)
ftxt += ('\n# Disabled by efrotools build:\n'
'*disabled*\n'
f'{remaining_disabled}\n')
writefile(fname, ftxt)
fname = 'Modules/makesetup'
txt = readfile(fname)
if platform == 'android':
txt = replace_one(txt, ' *=*)'
' DEFS="$line$NL$DEFS"; continue;;',
' [A-Z]*=*) DEFS="$line$NL$DEFS";'
' continue;;')
assert txt.count('[A-Z]*=*') == 1
writefile(fname, txt)
def winprune() -> None:
for libdir in ('assets/src/windows/Win32/Lib',
'assets/src/windows/x64/Lib'):
assert os.path.isdir(libdir)
run('cd "' + libdir + '" && rm -rf ' + ' '.join(PRUNE_LIB_NAMES))
for dlldir in ('assets/src/windows/Win32/DLLs',
'assets/src/windows/x64/DLLs'):
assert os.path.isdir(dlldir)
run('cd "' + dlldir + '" && rm -rf ' + ' '.join(PRUNE_DLL_NAMES))
print('Win-prune successful.')
def gather() -> None:
do_android = True
existing_dirs = [
os.path.join('src/external', d) for d in os.listdir('src/external')
if d.startswith('python-') and d != 'python-notes.txt'
]
existing_dirs += [
os.path.join('assets/src', d) for d in os.listdir('assets/src')
if d.startswith('pylib-')
]
if not do_android:
existing_dirs = [d for d in existing_dirs if 'android' not in d]
for existing_dir in existing_dirs:
run('rm -rf "' + existing_dir + '"')
apost2 = f'src/Python-{PY_VER_EXACT_ANDROID}/Android/sysroot'
for buildtype in ['debug', 'release']:
debug = buildtype == 'debug'
bsuffix = '_debug' if buildtype == 'debug' else ''
bsuffix2 = '-debug' if buildtype == 'debug' else ''
libname = 'python' + PYVER + ('d' if debug else '')
bases = {
'mac': f'build/python_apple_mac{bsuffix}/build/macOS',
'ios': f'build/python_apple_ios{bsuffix}/build/iOS',
'tvos': f'build/python_apple_tvos{bsuffix}/build/tvOS',
'android_arm': f'build/python_android_arm{bsuffix}/build',
'android_arm64': f'build/python_android_arm64{bsuffix}/build',
'android_x86': f'build/python_android_x86{bsuffix}/build',
'android_x86_64': f'build/python_android_x86_64{bsuffix}/build'
}
bases2 = {
'android_arm': f'build/python_android_arm{bsuffix}/{apost2}',
'android_arm64': f'build/python_android_arm64{bsuffix}/{apost2}',
'android_x86': f'build/python_android_x86{bsuffix}/{apost2}',
'android_x86_64': f'build/python_android_x86_64{bsuffix}/{apost2}'
}
builds: list[dict[str, Any]] = [{
'name':
'macos',
'group':
'apple',
'headers':
bases['mac'] + '/Support/Python/Headers',
'libs': [
bases['mac'] + '/Support/Python/libPython.a',
bases['mac'] + '/Support/OpenSSL/libOpenSSL.a',
bases['mac'] + '/Support/XZ/libxz.a',
bases['mac'] + '/Support/BZip2/libbzip2.a',
],
'pylib':
(bases['mac'] + f'/Python-{PY_VER_EXACT_APPLE}-macOS/lib'),
}, {
'name':
'ios',
'group':
'apple',
'headers':
bases['ios'] + '/Support/Python/Headers',
'libs': [
bases['ios'] + '/Support/Python/libPython.a',
bases['ios'] + '/Support/OpenSSL/libOpenSSL.a',
bases['ios'] + '/Support/XZ/libxz.a',
bases['ios'] + '/Support/BZip2/libbzip2.a',
],
}, {
'name':
'tvos',
'group':
'apple',
'headers':
bases['tvos'] + '/Support/Python/Headers',
'libs': [
bases['tvos'] + '/Support/Python/libPython.a',
bases['tvos'] + '/Support/OpenSSL/libOpenSSL.a',
bases['tvos'] + '/Support/XZ/libxz.a',
bases['tvos'] + '/Support/BZip2/libbzip2.a',
],
}, {
'name': 'android_arm',
'group': 'android',
'headers': bases['android_arm'] + f'/usr/include/{libname}',
'libs': [
bases['android_arm'] + f'/usr/lib/lib{libname}.a',
bases2['android_arm'] + '/usr/lib/libssl.a',
bases2['android_arm'] + '/usr/lib/libcrypto.a',
bases2['android_arm'] + '/usr/lib/liblzma.a',
bases2['android_arm'] + '/usr/lib/libsqlite3.a',
bases2['android_arm'] + '/usr/lib/libbz2.a',
bases2['android_arm'] + '/usr/lib/libuuid.a',
],
'libinst': 'android_armeabi-v7a',
'pylib': (bases['android_arm'] + '/usr/lib/python' + PYVER),
}, {
'name': 'android_arm64',
'group': 'android',
'headers': bases['android_arm64'] + f'/usr/include/{libname}',
'libs': [
bases['android_arm64'] + f'/usr/lib/lib{libname}.a',
bases2['android_arm64'] + '/usr/lib/libssl.a',
bases2['android_arm64'] + '/usr/lib/libcrypto.a',
bases2['android_arm64'] + '/usr/lib/liblzma.a',
bases2['android_arm64'] + '/usr/lib/libsqlite3.a',
bases2['android_arm64'] + '/usr/lib/libbz2.a',
bases2['android_arm64'] + '/usr/lib/libuuid.a',
],
'libinst': 'android_arm64-v8a',
}, {
'name': 'android_x86',
'group': 'android',
'headers': bases['android_x86'] + f'/usr/include/{libname}',
'libs': [
bases['android_x86'] + f'/usr/lib/lib{libname}.a',
bases2['android_x86'] + '/usr/lib/libssl.a',
bases2['android_x86'] + '/usr/lib/libcrypto.a',
bases2['android_x86'] + '/usr/lib/liblzma.a',
bases2['android_x86'] + '/usr/lib/libsqlite3.a',
bases2['android_x86'] + '/usr/lib/libbz2.a',
bases2['android_x86'] + '/usr/lib/libuuid.a',
],
'libinst': 'android_x86',
}, {
'name': 'android_x86_64',
'group': 'android',
'headers': bases['android_x86_64'] + f'/usr/include/{libname}',
'libs': [
bases['android_x86_64'] + f'/usr/lib/lib{libname}.a',
bases2['android_x86_64'] + '/usr/lib/libssl.a',
bases2['android_x86_64'] + '/usr/lib/libcrypto.a',
bases2['android_x86_64'] + '/usr/lib/liblzma.a',
bases2['android_x86_64'] + '/usr/lib/libsqlite3.a',
bases2['android_x86_64'] + '/usr/lib/libbz2.a',
bases2['android_x86_64'] + '/usr/lib/libuuid.a',
],
'libinst': 'android_x86_64',
}]
for build in builds:
grp = build['group']
if not do_android and grp == 'android':
continue
builddir = f'src/external/python-{grp}{bsuffix2}'
header_dst = os.path.join(builddir, 'include')
lib_dst = os.path.join(builddir, 'lib')
assets_src_dst = f'assets/src/pylib-{grp}'
if not os.path.exists(builddir):
run('mkdir -p "' + builddir + '"')
run('mkdir -p "' + lib_dst + '"')
if not debug:
run('mkdir -p "' + assets_src_dst + '"')
run('rsync --recursive --include "*.py"'
' --exclude __pycache__ --include "*/" --exclude "*" "'
+ build['pylib'] + '/" "' + assets_src_dst + '"')
# down on size.
run('cd "' + assets_src_dst + '" && rm -rf ' +
' '.join(PRUNE_LIB_NAMES))
# Some minor filtering to system scripts:
# on iOS/tvOS, addusersitepackages() leads to a crash
# due to _sysconfigdata_dm_ios_darwin module not existing,
# so let's skip that.
fname = f'{assets_src_dst}/site.py'
txt = readfile(fname)
txt = replace_one(
txt,
' known_paths = addusersitepackages(known_paths)',
' # efro tweak: this craps out on ios/tvos.\n'
' # (and we don\'t use it anyway)\n'
'
writefile(fname, txt)
# Copy in a base set of headers (everything in a group should
# be using the same headers)
run(f'cp -r "{build["headers"]}" "{header_dst}"')
# Clear whatever pyconfigs came across; we'll build our own
run('rm ' + header_dst + '/pyconfig*')
with open(header_dst + '/pyconfig.h', 'w',
encoding='utf-8') as hfile:
hfile.write(
'
'
'
'
'
'
'
'
'
'
'
'
'
'
'
'
'
# Now copy each build's config headers in with unique names.
cfgs = [
f for f in os.listdir(build['headers'])
if f.startswith('pyconfig')
]
for cfg in cfgs:
out = cfg.replace('pyconfig', 'pyconfig-' + build['name'])
if cfg == 'pyconfig.h':
# contents too (those headers can themselves include
# others; ios for instance points to a arm64 and a
# x86_64 variant).
contents = readfile(build['headers'] + '/' + cfg)
contents = contents.replace('pyconfig',
'pyconfig-' + build['name'])
writefile(header_dst + '/' + out, contents)
else:
# other configs we just rename
run('cp "' + build['headers'] + '/' + cfg + '" "' +
header_dst + '/' + out + '"')
# Copy in libs. If the lib gave a specific install name,
# use that; otherwise use name.
targetdir = lib_dst + '/' + build.get('libinst', build['name'])
run('rm -rf "' + targetdir + '"')
run('mkdir -p "' + targetdir + '"')
for lib in build['libs']:
run('cp "' + lib + '" "' + targetdir + '"')
print('Great success!')
| true
| true
|
1c46ad650091fd8eb656b4ce0564489819168982
| 2,955
|
py
|
Python
|
conans/test/unittests/util/local_db_test.py
|
Wonders11/conan
|
28ec09f6cbf1d7e27ec27393fd7bbc74891e74a8
|
[
"MIT"
] | 6,205
|
2015-12-01T13:40:05.000Z
|
2022-03-31T07:30:25.000Z
|
conans/test/unittests/util/local_db_test.py
|
Wonders11/conan
|
28ec09f6cbf1d7e27ec27393fd7bbc74891e74a8
|
[
"MIT"
] | 8,747
|
2015-12-01T16:28:48.000Z
|
2022-03-31T23:34:53.000Z
|
conans/test/unittests/util/local_db_test.py
|
Mattlk13/conan
|
005fc53485557b0a570bb71670f2ca9c66082165
|
[
"MIT"
] | 961
|
2015-12-01T16:56:43.000Z
|
2022-03-31T13:50:52.000Z
|
import os
import unittest
import uuid
import six
import pytest
from conans.client.store.localdb import LocalDB
from conans.test.utils.test_files import temp_folder
class LocalStoreTest(unittest.TestCase):
def test_localdb(self):
tmp_dir = temp_folder()
db_file = os.path.join(tmp_dir, "dbfile")
localdb = LocalDB.create(db_file)
# Test write and read login
user, token, access_token = localdb.get_login("myurl1")
self.assertIsNone(user)
self.assertIsNone(token)
self.assertIsNone(access_token)
localdb.store("pepe", "token", "access_token", "myurl1")
user, token, access_token = localdb.get_login("myurl1")
self.assertEqual("pepe", user)
self.assertEqual("token", token)
self.assertEqual("access_token", access_token)
self.assertEqual("pepe", localdb.get_username("myurl1"))
def test_token_encryption_ascii(self):
tmp_dir = temp_folder()
db_file = os.path.join(tmp_dir, "dbfile")
encryption_key = str(uuid.uuid4())
localdb = LocalDB.create(db_file, encryption_key=encryption_key)
localdb.store("pepe", "token", "access_token", "myurl1")
user, token, access_token = localdb.get_login("myurl1")
self.assertEqual("pepe", user)
self.assertEqual("token", token)
self.assertEqual("access_token", access_token)
def test_token_encryption_none(self):
tmp_dir = temp_folder()
db_file = os.path.join(tmp_dir, "dbfile")
encryption_key = str(uuid.uuid4())
localdb = LocalDB.create(db_file, encryption_key=encryption_key)
localdb.store("pepe", "token", None, "myurl1")
user, token, access_token = localdb.get_login("myurl1")
self.assertEqual("pepe", user)
self.assertEqual("token", token)
self.assertEqual(None, access_token)
@pytest.mark.skipif(six.PY2, reason="Python2 sqlite3 converts to str")
def test_token_encryption_unicode(self):
tmp_dir = temp_folder()
db_file = os.path.join(tmp_dir, "dbfile")
encryption_key = str(uuid.uuid4())
localdb = LocalDB.create(db_file, encryption_key=encryption_key)
token_input = b'espa\xc3\xb1a\xe2\x82\xac$'.decode('utf-8') # Only ASCII files in codebase
localdb.store("pepe", token_input, token_input, "myurl1")
user, token, access_token = localdb.get_login("myurl1")
self.assertEqual("pepe", user)
self.assertEqual(token_input, token)
self.assertEqual(token_input, access_token)
self.assertEqual("pepe", localdb.get_username("myurl1"))
# Without the encryption key we get obfuscated values
other_db = LocalDB.create(db_file)
user, token, access_token = other_db.get_login("myurl1")
self.assertEqual("pepe", user)
self.assertNotEqual(token_input, token)
self.assertNotEqual(token_input, access_token)
| 38.376623
| 99
| 0.671743
|
import os
import unittest
import uuid
import six
import pytest
from conans.client.store.localdb import LocalDB
from conans.test.utils.test_files import temp_folder
class LocalStoreTest(unittest.TestCase):
def test_localdb(self):
tmp_dir = temp_folder()
db_file = os.path.join(tmp_dir, "dbfile")
localdb = LocalDB.create(db_file)
user, token, access_token = localdb.get_login("myurl1")
self.assertIsNone(user)
self.assertIsNone(token)
self.assertIsNone(access_token)
localdb.store("pepe", "token", "access_token", "myurl1")
user, token, access_token = localdb.get_login("myurl1")
self.assertEqual("pepe", user)
self.assertEqual("token", token)
self.assertEqual("access_token", access_token)
self.assertEqual("pepe", localdb.get_username("myurl1"))
def test_token_encryption_ascii(self):
tmp_dir = temp_folder()
db_file = os.path.join(tmp_dir, "dbfile")
encryption_key = str(uuid.uuid4())
localdb = LocalDB.create(db_file, encryption_key=encryption_key)
localdb.store("pepe", "token", "access_token", "myurl1")
user, token, access_token = localdb.get_login("myurl1")
self.assertEqual("pepe", user)
self.assertEqual("token", token)
self.assertEqual("access_token", access_token)
def test_token_encryption_none(self):
tmp_dir = temp_folder()
db_file = os.path.join(tmp_dir, "dbfile")
encryption_key = str(uuid.uuid4())
localdb = LocalDB.create(db_file, encryption_key=encryption_key)
localdb.store("pepe", "token", None, "myurl1")
user, token, access_token = localdb.get_login("myurl1")
self.assertEqual("pepe", user)
self.assertEqual("token", token)
self.assertEqual(None, access_token)
@pytest.mark.skipif(six.PY2, reason="Python2 sqlite3 converts to str")
def test_token_encryption_unicode(self):
tmp_dir = temp_folder()
db_file = os.path.join(tmp_dir, "dbfile")
encryption_key = str(uuid.uuid4())
localdb = LocalDB.create(db_file, encryption_key=encryption_key)
token_input = b'espa\xc3\xb1a\xe2\x82\xac$'.decode('utf-8')
localdb.store("pepe", token_input, token_input, "myurl1")
user, token, access_token = localdb.get_login("myurl1")
self.assertEqual("pepe", user)
self.assertEqual(token_input, token)
self.assertEqual(token_input, access_token)
self.assertEqual("pepe", localdb.get_username("myurl1"))
other_db = LocalDB.create(db_file)
user, token, access_token = other_db.get_login("myurl1")
self.assertEqual("pepe", user)
self.assertNotEqual(token_input, token)
self.assertNotEqual(token_input, access_token)
| true
| true
|
1c46ae0e3f4a04853fd12feddc7987c8067cadb2
| 934
|
py
|
Python
|
django_angular_url/templatetags/django_angular_url_tags.py
|
rafitorres/django-angular-url
|
c9734f54370f4fb0d2d7bfd2248107ba93126aac
|
[
"MIT"
] | 1
|
2018-06-17T19:28:24.000Z
|
2018-06-17T19:28:24.000Z
|
django_angular_url/templatetags/django_angular_url_tags.py
|
rafitorres/django-angular-url
|
c9734f54370f4fb0d2d7bfd2248107ba93126aac
|
[
"MIT"
] | null | null | null |
django_angular_url/templatetags/django_angular_url_tags.py
|
rafitorres/django-angular-url
|
c9734f54370f4fb0d2d7bfd2248107ba93126aac
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import json
from django.template import Library
from django.core.exceptions import ImproperlyConfigured
from django.utils.safestring import mark_safe
from django_angular_url.core.urlresolvers import get_urls
register = Library()
@register.simple_tag(name='load_djng_urls', takes_context=True)
def djng_urls(context, *namespaces):
def _replace_namespace(n):
if n == 'SELF':
request = context.get('request')
if not request:
raise ImproperlyConfigured(
"'SELF' was used in 'load_djng_urls' for request "
"namespace lookup, but there is no RequestContext.")
return request.resolver_match.namespace
elif n == '':
return None
return n
urls = get_urls([_replace_namespace(x) for x in namespaces])
return mark_safe(json.dumps(urls))
| 32.206897
| 72
| 0.671306
|
from __future__ import unicode_literals
import json
from django.template import Library
from django.core.exceptions import ImproperlyConfigured
from django.utils.safestring import mark_safe
from django_angular_url.core.urlresolvers import get_urls
register = Library()
@register.simple_tag(name='load_djng_urls', takes_context=True)
def djng_urls(context, *namespaces):
def _replace_namespace(n):
if n == 'SELF':
request = context.get('request')
if not request:
raise ImproperlyConfigured(
"'SELF' was used in 'load_djng_urls' for request "
"namespace lookup, but there is no RequestContext.")
return request.resolver_match.namespace
elif n == '':
return None
return n
urls = get_urls([_replace_namespace(x) for x in namespaces])
return mark_safe(json.dumps(urls))
| true
| true
|
1c46af2a12398dfe071582314575709997860fcd
| 5,438
|
py
|
Python
|
Dell/benchmarks/rnnt/implementations/DSS8440x8A100-PCIE-80GB/bind_launch.py
|
gglin001/training_results_v1.1
|
58fd4103f0f465bda6eb56a06a74b7bbccbbcf24
|
[
"Apache-2.0"
] | null | null | null |
Dell/benchmarks/rnnt/implementations/DSS8440x8A100-PCIE-80GB/bind_launch.py
|
gglin001/training_results_v1.1
|
58fd4103f0f465bda6eb56a06a74b7bbccbbcf24
|
[
"Apache-2.0"
] | null | null | null |
Dell/benchmarks/rnnt/implementations/DSS8440x8A100-PCIE-80GB/bind_launch.py
|
gglin001/training_results_v1.1
|
58fd4103f0f465bda6eb56a06a74b7bbccbbcf24
|
[
"Apache-2.0"
] | null | null | null |
import sys
import subprocess
import os
import socket
from argparse import ArgumentParser, REMAINDER
import torch
def parse_args():
"""
Helper function parsing the command line options
@retval ArgumentParser
"""
parser = ArgumentParser(description="PyTorch distributed training launch "
"helper utilty that will spawn up "
"multiple distributed processes")
# Optional arguments for the launch helper
parser.add_argument("--nnodes", type=int, default=1,
help="The number of nodes to use for distributed "
"training")
parser.add_argument("--node_rank", type=int, default=0,
help="The rank of the node for multi-node distributed "
"training")
parser.add_argument("--nproc_per_node", type=int, default=1,
help="The number of processes to launch on each node, "
"for GPU training, this is recommended to be set "
"to the number of GPUs in your system so that "
"each process can be bound to a single GPU.")
parser.add_argument("--master_addr", default="127.0.0.1", type=str,
help="Master node (rank 0)'s address, should be either "
"the IP address or the hostname of node 0, for "
"single node multi-proc training, the "
"--master_addr can simply be 127.0.0.1")
parser.add_argument("--master_port", default=29500, type=int,
help="Master node (rank 0)'s free port that needs to "
"be used for communciation during distributed "
"training")
parser.add_argument('--no_hyperthreads', action='store_true',
help='Flag to disable binding to hyperthreads')
parser.add_argument('--no_membind', action='store_true',
help='Flag to disable memory binding')
# non-optional arguments for binding
parser.add_argument("--nsockets_per_node", type=int, required=True,
help="Number of CPU sockets on a node")
parser.add_argument("--ncores_per_socket", type=int, required=True,
help="Number of CPU cores per socket")
# positional
parser.add_argument("training_script", type=str,
help="The full path to the single GPU training "
"program/script to be launched in parallel, "
"followed by all the arguments for the "
"training script")
# rest from the training program
parser.add_argument('training_script_args', nargs=REMAINDER)
return parser.parse_args()
def main():
args = parse_args()
# variables for numactrl binding
NSOCKETS = args.nsockets_per_node
NGPUS_PER_SOCKET = args.nproc_per_node // args.nsockets_per_node
NCORES_PER_GPU = args.ncores_per_socket // NGPUS_PER_SOCKET
# world size in terms of number of processes
dist_world_size = args.nproc_per_node * args.nnodes
# set PyTorch distributed related environmental variables
current_env = os.environ.copy()
current_env["MASTER_ADDR"] = args.master_addr
current_env["MASTER_PORT"] = str(args.master_port)
current_env["WORLD_SIZE"] = str(dist_world_size)
processes = []
all_cores = torch.arange(0, 96)
even_cores, odd_cores = all_cores[::2].tolist(), all_cores[1::2].tolist()
for local_rank in range(0, args.nproc_per_node):
# each process's rank
dist_rank = args.nproc_per_node * args.node_rank + local_rank
current_env["RANK"] = str(dist_rank)
# form numactrl binding command
#cpu_ranges = [local_rank * NCORES_PER_GPU,
# (local_rank + 1) * NCORES_PER_GPU - 1,
# local_rank * NCORES_PER_GPU + (NCORES_PER_GPU * NGPUS_PER_SOCKET * NSOCKETS),
# (local_rank + 1) * NCORES_PER_GPU + (NCORES_PER_GPU * NGPUS_PER_SOCKET * NSOCKETS) - 1]
numactlargs = []
if args.no_hyperthreads:
raise ValueError("Please enable HT with DSS and continue")
#numactlargs += [ "--physcpubind={}-{}".format(*cpu_ranges[0:2]) ]
else:
if local_rank in [0,1,2,3]:
numactlargs += [ "--physcpubind={}".format(",".join(map(str, even_cores))) ]
elif local_rank in [4,5,6,7]:
numactlargs += [ "--physcpubind={}".format(",".join(map(str, odd_cores))) ]
if not args.no_membind:
memnode = local_rank // NGPUS_PER_SOCKET
numactlargs += [ "--membind={}".format(memnode) ]
# spawn the processes
cmd = [ "/usr/bin/numactl" ] \
+ numactlargs \
+ [ sys.executable,
"-u",
args.training_script,
"--local_rank={}".format(local_rank)
] \
+ args.training_script_args
print(f"##binding cmd: {cmd}")
print(f"##local_rank: {local_rank}")
process = subprocess.Popen(cmd, env=current_env)
processes.append(process)
for process in processes:
process.wait()
if __name__ == "__main__":
main()
| 40.887218
| 109
| 0.578338
|
import sys
import subprocess
import os
import socket
from argparse import ArgumentParser, REMAINDER
import torch
def parse_args():
parser = ArgumentParser(description="PyTorch distributed training launch "
"helper utilty that will spawn up "
"multiple distributed processes")
parser.add_argument("--nnodes", type=int, default=1,
help="The number of nodes to use for distributed "
"training")
parser.add_argument("--node_rank", type=int, default=0,
help="The rank of the node for multi-node distributed "
"training")
parser.add_argument("--nproc_per_node", type=int, default=1,
help="The number of processes to launch on each node, "
"for GPU training, this is recommended to be set "
"to the number of GPUs in your system so that "
"each process can be bound to a single GPU.")
parser.add_argument("--master_addr", default="127.0.0.1", type=str,
help="Master node (rank 0)'s address, should be either "
"the IP address or the hostname of node 0, for "
"single node multi-proc training, the "
"--master_addr can simply be 127.0.0.1")
parser.add_argument("--master_port", default=29500, type=int,
help="Master node (rank 0)'s free port that needs to "
"be used for communciation during distributed "
"training")
parser.add_argument('--no_hyperthreads', action='store_true',
help='Flag to disable binding to hyperthreads')
parser.add_argument('--no_membind', action='store_true',
help='Flag to disable memory binding')
parser.add_argument("--nsockets_per_node", type=int, required=True,
help="Number of CPU sockets on a node")
parser.add_argument("--ncores_per_socket", type=int, required=True,
help="Number of CPU cores per socket")
parser.add_argument("training_script", type=str,
help="The full path to the single GPU training "
"program/script to be launched in parallel, "
"followed by all the arguments for the "
"training script")
parser.add_argument('training_script_args', nargs=REMAINDER)
return parser.parse_args()
def main():
args = parse_args()
NSOCKETS = args.nsockets_per_node
NGPUS_PER_SOCKET = args.nproc_per_node // args.nsockets_per_node
NCORES_PER_GPU = args.ncores_per_socket // NGPUS_PER_SOCKET
dist_world_size = args.nproc_per_node * args.nnodes
current_env = os.environ.copy()
current_env["MASTER_ADDR"] = args.master_addr
current_env["MASTER_PORT"] = str(args.master_port)
current_env["WORLD_SIZE"] = str(dist_world_size)
processes = []
all_cores = torch.arange(0, 96)
even_cores, odd_cores = all_cores[::2].tolist(), all_cores[1::2].tolist()
for local_rank in range(0, args.nproc_per_node):
dist_rank = args.nproc_per_node * args.node_rank + local_rank
current_env["RANK"] = str(dist_rank)
# form numactrl binding command
#cpu_ranges = [local_rank * NCORES_PER_GPU,
# (local_rank + 1) * NCORES_PER_GPU - 1,
# local_rank * NCORES_PER_GPU + (NCORES_PER_GPU * NGPUS_PER_SOCKET * NSOCKETS),
# (local_rank + 1) * NCORES_PER_GPU + (NCORES_PER_GPU * NGPUS_PER_SOCKET * NSOCKETS) - 1]
numactlargs = []
if args.no_hyperthreads:
raise ValueError("Please enable HT with DSS and continue")
#numactlargs += [ "--physcpubind={}-{}".format(*cpu_ranges[0:2]) ]
else:
if local_rank in [0,1,2,3]:
numactlargs += [ "--physcpubind={}".format(",".join(map(str, even_cores))) ]
elif local_rank in [4,5,6,7]:
numactlargs += [ "--physcpubind={}".format(",".join(map(str, odd_cores))) ]
if not args.no_membind:
memnode = local_rank // NGPUS_PER_SOCKET
numactlargs += [ "--membind={}".format(memnode) ]
# spawn the processes
cmd = [ "/usr/bin/numactl" ] \
+ numactlargs \
+ [ sys.executable,
"-u",
args.training_script,
"--local_rank={}".format(local_rank)
] \
+ args.training_script_args
print(f"##binding cmd: {cmd}")
print(f"##local_rank: {local_rank}")
process = subprocess.Popen(cmd, env=current_env)
processes.append(process)
for process in processes:
process.wait()
if __name__ == "__main__":
main()
| true
| true
|
1c46b0e8e1b0e69a358fe2773d36f1292eb76c39
| 141
|
py
|
Python
|
escapement/__init__.py
|
willingc/escapement
|
a02cc5f4367acf6cbc7f0734744b5093b4b02597
|
[
"MIT"
] | null | null | null |
escapement/__init__.py
|
willingc/escapement
|
a02cc5f4367acf6cbc7f0734744b5093b4b02597
|
[
"MIT"
] | null | null | null |
escapement/__init__.py
|
willingc/escapement
|
a02cc5f4367acf6cbc7f0734744b5093b4b02597
|
[
"MIT"
] | null | null | null |
"""Top-level package for Escapement."""
__author__ = """Carol Willing"""
__email__ = "willingc@willingconsulting.com"
__version__ = "0.1.0"
| 23.5
| 44
| 0.716312
|
__author__ = """Carol Willing"""
__email__ = "willingc@willingconsulting.com"
__version__ = "0.1.0"
| true
| true
|
1c46b17b4df598ba18d2b2ad0e6b4ffe03ea914e
| 2,378
|
py
|
Python
|
gemd/demo/measurement_example.py
|
ventura-rivera/gemd-python
|
078eed39de852f830111b77306c2f35146de8ec3
|
[
"Apache-2.0"
] | null | null | null |
gemd/demo/measurement_example.py
|
ventura-rivera/gemd-python
|
078eed39de852f830111b77306c2f35146de8ec3
|
[
"Apache-2.0"
] | null | null | null |
gemd/demo/measurement_example.py
|
ventura-rivera/gemd-python
|
078eed39de852f830111b77306c2f35146de8ec3
|
[
"Apache-2.0"
] | null | null | null |
"""Demonstrate attaching measurements to a material."""
import random
import string
from gemd.entity.attribute.property import Property
from gemd.entity.object import MeasurementRun
from gemd.entity.value.nominal_real import NominalReal
from gemd.entity.value.normal_real import NormalReal
from gemd.enumeration import Origin
# recommended values taken from
# https://www.shimadzu.com/an/industry/petrochemicalchemical/n9j25k00000pyv3w.html
thickness = 4.0 # mm
length = 80.0 # mm
width = 10.0 # mm
span = 64.0 # mm
punch_radius = 5.0 # mm
support_radius = 5.0 # mm
applied_force = 100.0 # N
def __random_my_id():
"""Create random 8-letter id."""
return "".join([random.choice(string.ascii_lowercase) for _ in range(8)])
def make_demo_measurements(num_measurements, extra_tags=frozenset()):
"""Make a measurement object."""
return [
make_flexural_test_measurement(
my_id=__random_my_id(),
deflection=random.random(),
extra_tags=extra_tags
) for _ in range(num_measurements)
]
def make_flexural_test_measurement(my_id, deflection, extra_tags=frozenset()):
"""
Compute the stree, strain, and modulus.
According to https://en.wikipedia.org/wiki/Three-point_flexural_test
"""
stress = 3 * applied_force * span / (2 * thickness * thickness * width)
strain = 6 * deflection * thickness / (span * span)
modulus = stress / strain
measurement = MeasurementRun(
uids={"my_id": my_id},
tags=["3_pt_bend", "mechanical", "flex"] + list(extra_tags),
properties=[
Property(
name="flexural stress",
value=NormalReal(stress, std=(0.01 * stress), units="MPa"),
origin=Origin.MEASURED
),
Property(
name="flexural strain",
value=NormalReal(strain, std=(0.01 * strain), units=""),
origin=Origin.MEASURED
),
Property(
name="flexural modulus",
value=NormalReal(modulus, std=(0.01 * modulus), units="MPa"),
origin=Origin.MEASURED
),
Property(
name="deflection",
value=NominalReal(deflection, units="mm"),
origin=Origin.MEASURED
)
]
)
return measurement
| 31.706667
| 82
| 0.616905
|
import random
import string
from gemd.entity.attribute.property import Property
from gemd.entity.object import MeasurementRun
from gemd.entity.value.nominal_real import NominalReal
from gemd.entity.value.normal_real import NormalReal
from gemd.enumeration import Origin
thickness = 4.0
length = 80.0
width = 10.0
span = 64.0
punch_radius = 5.0
support_radius = 5.0
applied_force = 100.0
def __random_my_id():
return "".join([random.choice(string.ascii_lowercase) for _ in range(8)])
def make_demo_measurements(num_measurements, extra_tags=frozenset()):
return [
make_flexural_test_measurement(
my_id=__random_my_id(),
deflection=random.random(),
extra_tags=extra_tags
) for _ in range(num_measurements)
]
def make_flexural_test_measurement(my_id, deflection, extra_tags=frozenset()):
stress = 3 * applied_force * span / (2 * thickness * thickness * width)
strain = 6 * deflection * thickness / (span * span)
modulus = stress / strain
measurement = MeasurementRun(
uids={"my_id": my_id},
tags=["3_pt_bend", "mechanical", "flex"] + list(extra_tags),
properties=[
Property(
name="flexural stress",
value=NormalReal(stress, std=(0.01 * stress), units="MPa"),
origin=Origin.MEASURED
),
Property(
name="flexural strain",
value=NormalReal(strain, std=(0.01 * strain), units=""),
origin=Origin.MEASURED
),
Property(
name="flexural modulus",
value=NormalReal(modulus, std=(0.01 * modulus), units="MPa"),
origin=Origin.MEASURED
),
Property(
name="deflection",
value=NominalReal(deflection, units="mm"),
origin=Origin.MEASURED
)
]
)
return measurement
| true
| true
|
1c46b284df73fbe899299978530eccccf17a8af1
| 3,066
|
py
|
Python
|
vqa_image_preprocess.py
|
strieb/VisualQuestionAnswering
|
28f6ae1f2abd839145306a1d4f34ee84271cf3c1
|
[
"MIT"
] | 1
|
2020-04-23T09:15:33.000Z
|
2020-04-23T09:15:33.000Z
|
vqa_image_preprocess.py
|
strieb/VisualQuestionAnswering
|
28f6ae1f2abd839145306a1d4f34ee84271cf3c1
|
[
"MIT"
] | null | null | null |
vqa_image_preprocess.py
|
strieb/VisualQuestionAnswering
|
28f6ae1f2abd839145306a1d4f34ee84271cf3c1
|
[
"MIT"
] | null | null | null |
import json
from collections import Counter
import re
from VQA.PythonHelperTools.vqaTools.vqa import VQA
import random
import numpy as np
from keras.preprocessing.image import load_img, img_to_array, ImageDataGenerator
from matplotlib import pyplot as plt
import os
import VQAModel
from keras.applications.xception import decode_predictions, preprocess_input
# from keras.applications.inception_v3 import decode_predictions, preprocess_input
from PIL import Image, ImageOps
from matplotlib import pyplot as plt
import math
from Environment import DATADIR
versionType = 'v2_' # this should be '' when using VQA v2.0 dataset
taskType = 'OpenEnded' # 'OpenEnded' only for v2.0. 'OpenEnded' or 'MultipleChoice' for v1.0
dataType = 'mscoco' # 'mscoco' only for v1.0. 'mscoco' for real and 'abstract_v002' for abstract for v1.0.
dataSubType = 'train2014'
saveDir = 'preprocessed_xcep_24'
annFile = '%s/Annotations/%s%s_%s_annotations.json' % (DATADIR, versionType, dataType, dataSubType)
quesFile = '%s/Questions/%s%s_%s_%s_questions.json' % (DATADIR, versionType, taskType, dataType, dataSubType)
imgDir = '%s/Images/%s/' % (DATADIR, dataSubType)
i = 0
directory = os.fsencode(imgDir)
# 363, 555
# 427, 619
size1 = 299+64
size2 = 299+64
model = VQAModel.createModelXception((size1, size2, 3))
model.summary()
for file in os.listdir(directory):
filename = os.fsdecode(file)
if filename.endswith(".jpg"):
imgPath = os.path.join(imgDir, filename)
id = int(filename[-16:-4])
img = load_img(imgPath)
width, height = img.size
if(width >= height):
img = img.resize((size2, size1), resample=Image.BICUBIC)
img_array = img_to_array(img)
img_array = preprocess_input(img_array)
# img_array = np.tile(img,(32,1,1,1))
img_array = np.expand_dims(img_array, axis=0)
predictions = model.predict(img_array)
pred = predictions[0].reshape(24,2048)
np.save(imgDir+saveDir+"/"+str(id), pred)
if i < 1000 and i%100 == 0:
print(i)
if i % 1000 == 0:
print(i)
i += 1
model = VQAModel.createModelXception((size2, size1, 3))
for file in os.listdir(directory):
filename = os.fsdecode(file)
if filename.endswith(".jpg"):
imgPath = os.path.join(imgDir, filename)
id = int(filename[-16:-4])
img = load_img(imgPath)
width, height = img.size
if(width < height):
img = img.resize((size1, size2), resample=Image.BICUBIC)
img_array = img_to_array(img)
img_array = preprocess_input(img_array)
# img_array = np.tile(img,(32,1,1,1))
img_array = np.expand_dims(img_array, axis=0)
# plt.imshow((img_array[0] + 1)/2)
# plt.show()
predictions = model.predict(img_array)
pred = predictions[0].reshape(24,2048)
np.save(imgDir+saveDir+"/"+str(id), pred)
if i % 1000 == 0:
print(i)
i += 1
| 37.851852
| 109
| 0.643509
|
import json
from collections import Counter
import re
from VQA.PythonHelperTools.vqaTools.vqa import VQA
import random
import numpy as np
from keras.preprocessing.image import load_img, img_to_array, ImageDataGenerator
from matplotlib import pyplot as plt
import os
import VQAModel
from keras.applications.xception import decode_predictions, preprocess_input
from PIL import Image, ImageOps
from matplotlib import pyplot as plt
import math
from Environment import DATADIR
versionType = 'v2_'
taskType = 'OpenEnded'
dataType = 'mscoco'
dataSubType = 'train2014'
saveDir = 'preprocessed_xcep_24'
annFile = '%s/Annotations/%s%s_%s_annotations.json' % (DATADIR, versionType, dataType, dataSubType)
quesFile = '%s/Questions/%s%s_%s_%s_questions.json' % (DATADIR, versionType, taskType, dataType, dataSubType)
imgDir = '%s/Images/%s/' % (DATADIR, dataSubType)
i = 0
directory = os.fsencode(imgDir)
size1 = 299+64
size2 = 299+64
model = VQAModel.createModelXception((size1, size2, 3))
model.summary()
for file in os.listdir(directory):
filename = os.fsdecode(file)
if filename.endswith(".jpg"):
imgPath = os.path.join(imgDir, filename)
id = int(filename[-16:-4])
img = load_img(imgPath)
width, height = img.size
if(width >= height):
img = img.resize((size2, size1), resample=Image.BICUBIC)
img_array = img_to_array(img)
img_array = preprocess_input(img_array)
img_array = np.expand_dims(img_array, axis=0)
predictions = model.predict(img_array)
pred = predictions[0].reshape(24,2048)
np.save(imgDir+saveDir+"/"+str(id), pred)
if i < 1000 and i%100 == 0:
print(i)
if i % 1000 == 0:
print(i)
i += 1
model = VQAModel.createModelXception((size2, size1, 3))
for file in os.listdir(directory):
filename = os.fsdecode(file)
if filename.endswith(".jpg"):
imgPath = os.path.join(imgDir, filename)
id = int(filename[-16:-4])
img = load_img(imgPath)
width, height = img.size
if(width < height):
img = img.resize((size1, size2), resample=Image.BICUBIC)
img_array = img_to_array(img)
img_array = preprocess_input(img_array)
img_array = np.expand_dims(img_array, axis=0)
predictions = model.predict(img_array)
pred = predictions[0].reshape(24,2048)
np.save(imgDir+saveDir+"/"+str(id), pred)
if i % 1000 == 0:
print(i)
i += 1
| true
| true
|
1c46b394ee538fa30ae70b23a0b2eab1f2c3432d
| 554
|
py
|
Python
|
fn_isitPhishing/fn_isitPhishing/lib/isitphishing_util.py
|
rudimeyer/resilient-community-apps
|
7a46841ba41fa7a1c421d4b392b0a3ca9e36bd00
|
[
"MIT"
] | 1
|
2020-08-25T03:43:07.000Z
|
2020-08-25T03:43:07.000Z
|
fn_isitPhishing/fn_isitPhishing/lib/isitphishing_util.py
|
rudimeyer/resilient-community-apps
|
7a46841ba41fa7a1c421d4b392b0a3ca9e36bd00
|
[
"MIT"
] | 1
|
2019-07-08T16:57:48.000Z
|
2019-07-08T16:57:48.000Z
|
fn_isitPhishing/fn_isitPhishing/lib/isitphishing_util.py
|
rudimeyer/resilient-community-apps
|
7a46841ba41fa7a1c421d4b392b0a3ca9e36bd00
|
[
"MIT"
] | null | null | null |
import sys
import base64
def get_license_key(name, license):
# Compute the base64 license key. This key will be provided to you by Vade Secure,
# and has the following format: <CUSTOMER_NAME>:<CUSTOMER_LICENSE>.
url_key = u'{0}:{1}'.format(name, license)
# It must be Base64-encoded. Handled different on Python 2 vs 3.
if sys.version_info[0] == 2:
auth_token = base64.b64encode(bytes(url_key).encode("utf-8"))
else:
auth_token = base64.b64encode(bytes(url_key, 'ascii')).decode('ascii')
return auth_token
| 34.625
| 86
| 0.689531
|
import sys
import base64
def get_license_key(name, license):
url_key = u'{0}:{1}'.format(name, license)
if sys.version_info[0] == 2:
auth_token = base64.b64encode(bytes(url_key).encode("utf-8"))
else:
auth_token = base64.b64encode(bytes(url_key, 'ascii')).decode('ascii')
return auth_token
| true
| true
|
1c46b5a4c2eb213dddaa023db5903639152bb058
| 110
|
py
|
Python
|
padaquant/__init__.py
|
felipm13/PadaQuant
|
09c13d60dee2a75488e101391ab09e9845a66cb5
|
[
"MIT"
] | 1
|
2019-06-21T01:13:29.000Z
|
2019-06-21T01:13:29.000Z
|
padaquant/__init__.py
|
felipm13/PadaQuant
|
09c13d60dee2a75488e101391ab09e9845a66cb5
|
[
"MIT"
] | null | null | null |
padaquant/__init__.py
|
felipm13/PadaQuant
|
09c13d60dee2a75488e101391ab09e9845a66cb5
|
[
"MIT"
] | null | null | null |
import sys
from padaquant.asset_manager import asset_manager
from padaquant.blackscholes import blackscholes
| 22
| 49
| 0.881818
|
import sys
from padaquant.asset_manager import asset_manager
from padaquant.blackscholes import blackscholes
| true
| true
|
1c46b5bee90335b45c1737463373c781e1e0b924
| 1,811
|
py
|
Python
|
python/ray/tests/test_scheduling_2.py
|
daobook/ray
|
af9f1ef4dc160e0671206556b387f8017f3c3930
|
[
"Apache-2.0"
] | 33
|
2020-05-27T14:25:24.000Z
|
2022-03-22T06:11:30.000Z
|
python/ray/tests/test_scheduling_2.py
|
daobook/ray
|
af9f1ef4dc160e0671206556b387f8017f3c3930
|
[
"Apache-2.0"
] | 115
|
2021-01-19T04:40:50.000Z
|
2022-03-26T07:09:00.000Z
|
python/ray/tests/test_scheduling_2.py
|
daobook/ray
|
af9f1ef4dc160e0671206556b387f8017f3c3930
|
[
"Apache-2.0"
] | 5
|
2020-08-06T15:53:07.000Z
|
2022-02-09T03:31:31.000Z
|
import numpy as np
import platform
import pytest
import sys
import time
import ray
@pytest.mark.skipif(
platform.system() == "Windows", reason="Failing on Windows. Multi node.")
def test_load_balancing_under_constrained_memory(ray_start_cluster):
# This test ensures that tasks are being assigned to all raylets in a
# roughly equal manner even when the tasks have dependencies.
cluster = ray_start_cluster
num_nodes = 3
num_cpus = 4
object_size = 4e7
num_tasks = 100
for _ in range(num_nodes):
cluster.add_node(
num_cpus=num_cpus,
memory=(num_cpus - 2) * object_size,
object_store_memory=(num_cpus - 2) * object_size)
cluster.add_node(
num_cpus=0,
resources={"custom": 1},
memory=(num_tasks + 1) * object_size,
object_store_memory=(num_tasks + 1) * object_size)
ray.init(address=cluster.address)
@ray.remote(num_cpus=0, resources={"custom": 1})
def create_object():
return np.zeros(int(object_size), dtype=np.uint8)
@ray.remote
def f(i, x):
print(i, ray.worker.global_worker.node.unique_id)
time.sleep(0.1)
return ray.worker.global_worker.node.unique_id
deps = [create_object.remote() for _ in range(num_tasks)]
for i, dep in enumerate(deps):
print(i, dep)
# TODO(swang): Actually test load balancing. Load balancing is currently
# flaky on Travis, probably due to the scheduling policy ping-ponging
# waiting tasks.
deps = [create_object.remote() for _ in range(num_tasks)]
tasks = [f.remote(i, dep) for i, dep in enumerate(deps)]
for i, dep in enumerate(deps):
print(i, dep)
ray.get(tasks)
if __name__ == "__main__":
import pytest
sys.exit(pytest.main(["-v", __file__]))
| 30.694915
| 77
| 0.663722
|
import numpy as np
import platform
import pytest
import sys
import time
import ray
@pytest.mark.skipif(
platform.system() == "Windows", reason="Failing on Windows. Multi node.")
def test_load_balancing_under_constrained_memory(ray_start_cluster):
cluster = ray_start_cluster
num_nodes = 3
num_cpus = 4
object_size = 4e7
num_tasks = 100
for _ in range(num_nodes):
cluster.add_node(
num_cpus=num_cpus,
memory=(num_cpus - 2) * object_size,
object_store_memory=(num_cpus - 2) * object_size)
cluster.add_node(
num_cpus=0,
resources={"custom": 1},
memory=(num_tasks + 1) * object_size,
object_store_memory=(num_tasks + 1) * object_size)
ray.init(address=cluster.address)
@ray.remote(num_cpus=0, resources={"custom": 1})
def create_object():
return np.zeros(int(object_size), dtype=np.uint8)
@ray.remote
def f(i, x):
print(i, ray.worker.global_worker.node.unique_id)
time.sleep(0.1)
return ray.worker.global_worker.node.unique_id
deps = [create_object.remote() for _ in range(num_tasks)]
for i, dep in enumerate(deps):
print(i, dep)
deps = [create_object.remote() for _ in range(num_tasks)]
tasks = [f.remote(i, dep) for i, dep in enumerate(deps)]
for i, dep in enumerate(deps):
print(i, dep)
ray.get(tasks)
if __name__ == "__main__":
import pytest
sys.exit(pytest.main(["-v", __file__]))
| true
| true
|
1c46b5cfbdc2bcd213cc2381fa6bb4cc7a0d00c3
| 323
|
py
|
Python
|
tests/naip/test_stac.py
|
lossyrob/stactools
|
68f416de38d91738a62c1b090a9c40cc2e56a9f6
|
[
"Apache-2.0"
] | 1
|
2022-03-28T19:13:53.000Z
|
2022-03-28T19:13:53.000Z
|
tests/naip/test_stac.py
|
lossyrob/stactools
|
68f416de38d91738a62c1b090a9c40cc2e56a9f6
|
[
"Apache-2.0"
] | 3
|
2021-08-12T18:06:50.000Z
|
2022-03-29T14:20:33.000Z
|
tests/test_stac.py
|
stactools-packages/naip
|
1f13cc86664436a10f7942ab06547f7e3d8b8928
|
[
"Apache-2.0"
] | null | null | null |
import unittest
from stactools.naip.stac import create_collection
class StacTest(unittest.TestCase):
def test_create_collection(self):
collection = create_collection(seasons=[2011, 2013, 2015, 2017, 2019])
collection.set_self_href('http://example.com/collection.json')
collection.validate()
| 26.916667
| 78
| 0.739938
|
import unittest
from stactools.naip.stac import create_collection
class StacTest(unittest.TestCase):
def test_create_collection(self):
collection = create_collection(seasons=[2011, 2013, 2015, 2017, 2019])
collection.set_self_href('http://example.com/collection.json')
collection.validate()
| true
| true
|
1c46b5f9d0a2b7779bfbb2eb9b3e116a5cd194b6
| 493
|
py
|
Python
|
Lib/site-packages/plotly/validators/scattercarpet/_uid.py
|
tytanya/my-first-blog
|
2b40adb0816c3546e90ad6ca1e7fb50d924c1536
|
[
"bzip2-1.0.6"
] | 12
|
2020-04-18T18:10:22.000Z
|
2021-12-06T10:11:15.000Z
|
plotly/validators/scattercarpet/_uid.py
|
Vesauza/plotly.py
|
e53e626d59495d440341751f60aeff73ff365c28
|
[
"MIT"
] | 6
|
2021-03-18T22:27:08.000Z
|
2022-03-11T23:40:50.000Z
|
plotly/validators/scattercarpet/_uid.py
|
Vesauza/plotly.py
|
e53e626d59495d440341751f60aeff73ff365c28
|
[
"MIT"
] | 6
|
2020-04-18T23:07:08.000Z
|
2021-11-18T07:53:06.000Z
|
import _plotly_utils.basevalidators
class UidValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(
self, plotly_name='uid', parent_name='scattercarpet', **kwargs
):
super(UidValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
anim=kwargs.pop('anim', True),
edit_type=kwargs.pop('edit_type', 'plot'),
role=kwargs.pop('role', 'info'),
**kwargs
)
| 29
| 70
| 0.614604
|
import _plotly_utils.basevalidators
class UidValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(
self, plotly_name='uid', parent_name='scattercarpet', **kwargs
):
super(UidValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
anim=kwargs.pop('anim', True),
edit_type=kwargs.pop('edit_type', 'plot'),
role=kwargs.pop('role', 'info'),
**kwargs
)
| true
| true
|
1c46b67a3322491426a8dcefbb023986ece49b17
| 26,977
|
py
|
Python
|
src/olympia/activity/models.py
|
elyse0/addons-server
|
44fa4946b4b82f7003687b590b8c82c10c418e9e
|
[
"BSD-3-Clause"
] | null | null | null |
src/olympia/activity/models.py
|
elyse0/addons-server
|
44fa4946b4b82f7003687b590b8c82c10c418e9e
|
[
"BSD-3-Clause"
] | 760
|
2021-05-17T07:59:30.000Z
|
2022-03-31T11:14:15.000Z
|
src/olympia/activity/models.py
|
championshuttler/addons-server
|
5d4c1bfbed2fc509ecc1f3f5065955996e057eeb
|
[
"BSD-3-Clause"
] | null | null | null |
import json
import string
import uuid
from collections import defaultdict
from copy import copy
from datetime import datetime
from django.apps import apps
from django.conf import settings
from django.db import models
from django.utils import timezone
from django.utils.functional import cached_property
from django.utils.translation import gettext
import jinja2
import olympia.core.logger
from olympia import amo, constants
from olympia.access.models import Group
from olympia.addons.models import Addon
from olympia.amo.fields import PositiveAutoField
from olympia.amo.models import BaseQuerySet, ManagerBase, ModelBase
from olympia.bandwagon.models import Collection
from olympia.blocklist.models import Block
from olympia.files.models import File
from olympia.ratings.models import Rating
from olympia.reviewers.models import CannedResponse
from olympia.tags.models import Tag
from olympia.users.models import UserProfile
from olympia.users.templatetags.jinja_helpers import user_link
from olympia.versions.models import Version
log = olympia.core.logger.getLogger('z.amo.activity')
# Number of times a token can be used.
MAX_TOKEN_USE_COUNT = 100
class ActivityLogToken(ModelBase):
id = PositiveAutoField(primary_key=True)
version = models.ForeignKey(Version, related_name='token', on_delete=models.CASCADE)
user = models.ForeignKey(
'users.UserProfile',
related_name='activity_log_tokens',
on_delete=models.CASCADE,
)
uuid = models.UUIDField(default=uuid.uuid4, unique=True)
use_count = models.IntegerField(
default=0, help_text='Stores the number of times the token has been used'
)
class Meta:
db_table = 'log_activity_tokens'
constraints = [
models.UniqueConstraint(fields=('version', 'user'), name='version_id'),
]
def is_expired(self):
return self.use_count >= MAX_TOKEN_USE_COUNT
def is_valid(self):
return (
not self.is_expired()
and self.version
== self.version.addon.find_latest_version(
channel=self.version.channel, exclude=()
)
)
def expire(self):
self.update(use_count=MAX_TOKEN_USE_COUNT)
def increment_use(self):
self.__class__.objects.filter(pk=self.pk).update(
use_count=models.expressions.F('use_count') + 1
)
self.use_count = self.use_count + 1
class ActivityLogEmails(ModelBase):
"""A log of message ids of incoming emails so we don't duplicate process
them."""
messageid = models.CharField(max_length=255, unique=True)
class Meta:
db_table = 'log_activity_emails'
class AddonLog(ModelBase):
"""
This table is for indexing the activity log by addon.
"""
addon = models.ForeignKey(Addon, on_delete=models.CASCADE)
activity_log = models.ForeignKey('ActivityLog', on_delete=models.CASCADE)
class Meta:
db_table = 'log_activity_addon'
ordering = ('-created',)
def transfer(self, new_addon):
try:
# arguments is a structure:
# ``arguments = [{'addons.addon':12}, {'addons.addon':1}, ... ]``
arguments = json.loads(self.activity_log._arguments)
except Exception:
log.info(
'unserializing data from addon_log failed: %s' % self.activity_log.id
)
return None
new_arguments = []
for item in arguments:
if item.get('addons.addon', 0) == self.addon.id:
new_arguments.append({'addons.addon': new_addon.id})
else:
new_arguments.append(item)
self.activity_log.update(_arguments=json.dumps(new_arguments))
self.update(addon=new_addon)
class CommentLog(ModelBase):
"""
This table is for indexing the activity log by comment.
"""
activity_log = models.ForeignKey('ActivityLog', on_delete=models.CASCADE)
comments = models.TextField()
class Meta:
db_table = 'log_activity_comment'
ordering = ('-created',)
class VersionLog(ModelBase):
"""
This table is for indexing the activity log by version.
"""
activity_log = models.ForeignKey('ActivityLog', on_delete=models.CASCADE)
version = models.ForeignKey(Version, on_delete=models.CASCADE)
class Meta:
db_table = 'log_activity_version'
ordering = ('-created',)
class UserLog(ModelBase):
"""
This table is for indexing the activity log by user.
Note: This includes activity performed unto the user.
"""
activity_log = models.ForeignKey('ActivityLog', on_delete=models.CASCADE)
user = models.ForeignKey(UserProfile, on_delete=models.CASCADE)
class Meta:
db_table = 'log_activity_user'
ordering = ('-created',)
class GroupLog(ModelBase):
"""
This table is for indexing the activity log by access group.
"""
id = PositiveAutoField(primary_key=True)
activity_log = models.ForeignKey('ActivityLog', on_delete=models.CASCADE)
group = models.ForeignKey(Group, on_delete=models.CASCADE)
class Meta:
db_table = 'log_activity_group'
ordering = ('-created',)
class BlockLog(ModelBase):
"""
This table is for indexing the activity log by Blocklist Block.
"""
id = PositiveAutoField(primary_key=True)
activity_log = models.ForeignKey('ActivityLog', on_delete=models.CASCADE)
block = models.ForeignKey(Block, on_delete=models.SET_NULL, null=True)
guid = models.CharField(max_length=255, null=False)
class Meta:
db_table = 'log_activity_block'
ordering = ('-created',)
class IPLog(ModelBase):
"""
This table is for indexing the activity log by IP (only for specific
actions).
"""
activity_log = models.ForeignKey('ActivityLog', on_delete=models.CASCADE)
ip_address = models.CharField(max_length=45)
class Meta:
db_table = 'log_activity_ip'
ordering = ('-created',)
class DraftComment(ModelBase):
"""A model that allows us to draft comments for reviews before we have
an ActivityLog instance ready.
This is being used by the commenting API by the code-manager.
"""
id = PositiveAutoField(primary_key=True)
version = models.ForeignKey(Version, on_delete=models.CASCADE)
user = models.ForeignKey(UserProfile, on_delete=models.CASCADE)
filename = models.CharField(max_length=255, null=True, blank=True)
lineno = models.PositiveIntegerField(null=True)
canned_response = models.ForeignKey(
CannedResponse, null=True, default=None, on_delete=models.SET_DEFAULT
)
comment = models.TextField(blank=True)
class Meta:
db_table = 'log_activity_comment_draft'
class ActivityLogQuerySet(BaseQuerySet):
def default_transformer(self, logs):
ActivityLog.arguments_builder(logs)
class ActivityLogManager(ManagerBase):
_queryset_class = ActivityLogQuerySet
def get_queryset(self):
qs = super().get_queryset()
qs = qs.transform(qs.default_transformer).prefetch_related('user')
return qs
def for_addons(self, addons):
if isinstance(addons, Addon):
addons = (addons,)
return self.filter(addonlog__addon__in=addons)
def for_versions(self, versions):
if isinstance(versions, Version):
versions = (versions,)
return self.filter(versionlog__version__in=versions)
def for_groups(self, groups):
if isinstance(groups, Group):
groups = (groups,)
return self.filter(grouplog__group__in=groups)
def for_user(self, user):
return self.filter(userlog__user=user)
def for_block(self, block):
return self.filter(blocklog__block=block)
def for_guidblock(self, guid):
return self.filter(blocklog__guid=guid)
def for_developer(self):
return self.exclude(
action__in=constants.activity.LOG_ADMINS
+ constants.activity.LOG_HIDE_DEVELOPER
)
def admin_events(self):
return self.filter(action__in=constants.activity.LOG_ADMINS)
def moderation_events(self):
return self.filter(action__in=constants.activity.LOG_RATING_MODERATION)
def review_queue(self):
qs = self._by_type()
return qs.filter(action__in=constants.activity.LOG_REVIEW_QUEUE).exclude(
user__id=settings.TASK_USER_ID
)
def review_log(self):
qs = self._by_type()
return qs.filter(
action__in=constants.activity.LOG_REVIEWER_REVIEW_ACTION
).exclude(user__id=settings.TASK_USER_ID)
def total_ratings(self, theme=False):
"""Return the top users, and their # of reviews."""
qs = self._by_type()
action_ids = (
[amo.LOG.THEME_REVIEW.id]
if theme
else constants.activity.LOG_REVIEWER_REVIEW_ACTION
)
return (
qs.values('user', 'user__display_name', 'user__username')
.filter(action__in=action_ids)
.exclude(user__id=settings.TASK_USER_ID)
.annotate(approval_count=models.Count('id'))
.order_by('-approval_count')
)
def monthly_reviews(self, theme=False):
"""Return the top users for the month, and their # of reviews."""
qs = self._by_type()
now = datetime.now()
created_date = datetime(now.year, now.month, 1)
actions = (
[constants.activity.LOG.THEME_REVIEW.id]
if theme
else constants.activity.LOG_REVIEWER_REVIEW_ACTION
)
return (
qs.values('user', 'user__display_name', 'user__username')
.filter(created__gte=created_date, action__in=actions)
.exclude(user__id=settings.TASK_USER_ID)
.annotate(approval_count=models.Count('id'))
.order_by('-approval_count')
)
def user_approve_reviews(self, user):
qs = self._by_type()
return qs.filter(
action__in=constants.activity.LOG_REVIEWER_REVIEW_ACTION, user__id=user.id
)
def current_month_user_approve_reviews(self, user):
now = datetime.now()
ago = datetime(now.year, now.month, 1)
return self.user_approve_reviews(user).filter(created__gte=ago)
def user_position(self, values_qs, user):
try:
return (
next(
i
for (i, d) in enumerate(list(values_qs))
if d.get('user') == user.id
)
+ 1
)
except StopIteration:
return None
def total_ratings_user_position(self, user, theme=False):
return self.user_position(self.total_ratings(theme), user)
def monthly_reviews_user_position(self, user, theme=False):
return self.user_position(self.monthly_reviews(theme), user)
def _by_type(self):
qs = self.get_queryset()
table = 'log_activity_addon'
return qs.extra(
tables=[table], where=['%s.activity_log_id=%s.id' % (table, 'log_activity')]
)
class SafeFormatter(string.Formatter):
"""A replacement for str.format that escapes interpolated values."""
def get_field(self, *args, **kw):
# obj is the value getting interpolated into the string.
obj, used_key = super(SafeFormatter, self).get_field(*args, **kw)
return jinja2.escape(obj), used_key
class ActivityLog(ModelBase):
TYPES = sorted(
[(value.id, key) for key, value in constants.activity.LOG_BY_ID.items()]
)
user = models.ForeignKey('users.UserProfile', null=True, on_delete=models.SET_NULL)
action = models.SmallIntegerField(choices=TYPES)
_arguments = models.TextField(blank=True, db_column='arguments')
_details = models.TextField(blank=True, db_column='details')
objects = ActivityLogManager()
formatter = SafeFormatter()
class Meta:
db_table = 'log_activity'
ordering = ('-created',)
indexes = [
models.Index(fields=('action',), name='log_activity_1bd4707b'),
models.Index(fields=('created',), name='created_idx'),
]
def f(self, *args, **kw):
"""Calls SafeFormatter.format and returns a Markup string."""
# SafeFormatter escapes everything so this is safe.
return jinja2.Markup(self.formatter.format(*args, **kw))
@classmethod
def arguments_builder(cls, activities):
def handle_renames(value):
# Cope with renames of key models (use the original model name like
# it was in the ActivityLog as the key so that we can find it
# later)
return 'ratings.rating' if value == 'reviews.review' else value
# We need to do 2 passes on each log:
# - The first time, gather the references to every instance we need
# - The second time, we built querysets for all instances of the same
# type, pick data from that queryset.
#
# Because it relies on in_bulk(), this method needs the pks to be of a
# consistent type, which doesn't appear to be guaranteed in our
# existing data. For this reason, it forces a conversion to int. If we
# ever want to store ActivityLog items pointing to models using a non
# integer PK field, we'll need to make this a little smarter.
instances_to_load = defaultdict(list)
instances = {}
for activity in activities:
try:
# `arguments_data` will be a list of dicts like:
# `[{'addons.addon':12}, {'addons.addon':1}, ... ]`
activity.arguments_data = json.loads(activity._arguments)
except Exception as e:
log.info('unserializing data from activity_log failed: %s', activity.id)
log.info(e)
activity.arguments_data = []
for item in activity.arguments_data:
# Each 'item' should have one key and one value only.
name, pk = list(item.items())[0]
if name not in ('str', 'int', 'null') and pk:
# Convert pk to int to have consistent data for when we
# call .in_bulk() later.
name = handle_renames(name)
instances_to_load[name].append(int(pk))
# At this point, instances_to_load is a dict of "names" that
# each have a bunch of pks we want to load.
for name, pks in instances_to_load.items():
(app_label, model_name) = name.split('.')
model = apps.get_model(app_label, model_name)
# Load the instances, avoiding transformers other than translations
# and coping with soft-deleted models and unlisted add-ons.
qs = model.get_unfiltered_manager().all()
if hasattr(qs, 'only_translations'):
qs = qs.only_translations()
instances[name] = qs.in_bulk(pks)
# instances is now a dict of "model names" that each have a dict of
# {pk: instance}. We do our second pass on the logs to build the
# "arguments" property from that data, which is a list of the instances
# that each particular log has, in the correct order.
for activity in activities:
objs = []
# We preloaded that property earlier
for item in activity.arguments_data:
# As above, each 'item' should have one key and one value only.
name, pk = list(item.items())[0]
if name in ('str', 'int', 'null'):
# It's not actually a model reference, just return the
# value directly.
objs.append(pk)
elif pk:
# Fetch the instance from the cache we built.
name = handle_renames(name)
obj = instances[name].get(int(pk))
# Most of the time, we're eventually going to call
# to_string() on each ActivityLog that we're processing
# here. For some of the models, that will result in a call
# to <model>.get_absolute_url(), which in turn can cause an
# extra SQL query because some parent model is needed to
# build the URL.
# It's difficult to predict what we'll need as ActivitLog
# is fairly generic, but we know Addon is going to be
# needed in some cases for sure (Version, Rating) so if
# we're dealing with objects that have an `addon_id`
# property, and we have already fetched the corresponding
# Addon instance, set the `addon` property on the object
# to the Addon instance we already have to avoid the extra
# SQL query.
addon_id = getattr(obj, 'addon_id', None)
if addon := instances.get('addons.addon', {}).get(addon_id):
obj.addon = addon
objs.append(obj)
# Override the arguments cached_property with what we got.
activity.arguments = objs
@cached_property
def arguments(self):
# This is a fallback : in 99% of the cases we should not be using this
# but go through the default transformer instead, which executes
# arguments_builder on the whole list of items in the queryset,
# allowing us to fetch the instances in arguments in an optimized
# manner.
self.arguments_builder([self])
return self.arguments
def set_arguments(self, args=None):
"""
Takes an object or a tuple of objects and serializes them and stores it
in the db as a json string.
"""
if args is None:
args = []
if not isinstance(args, (list, tuple)):
args = (args,)
serialize_me = []
for arg in args:
if isinstance(arg, str):
serialize_me.append({'str': arg})
elif isinstance(arg, int):
serialize_me.append({'int': arg})
elif isinstance(arg, tuple):
# Instead of passing an addon instance you can pass a tuple:
# (Addon, 3) for Addon with pk=3
serialize_me.append(dict(((str(arg[0]._meta), arg[1]),)))
else:
serialize_me.append(dict(((str(arg._meta), arg.pk),)))
self._arguments = json.dumps(serialize_me)
@property
def details(self):
if self._details:
return json.loads(self._details)
@details.setter
def details(self, data):
self._details = json.dumps(data)
@property
def log(self):
return constants.activity.LOG_BY_ID[self.action]
def to_string(self, type_=None):
log_type = constants.activity.LOG_BY_ID[self.action]
if type_ and hasattr(log_type, '%s_format' % type_):
format = getattr(log_type, '%s_format' % type_)
else:
format = log_type.format
# We need to copy arguments so we can remove elements from it
# while we loop over self.arguments.
arguments = copy(self.arguments)
addon = None
rating = None
version = None
collection = None
tag = None
group = None
file_ = None
status = None
for arg in self.arguments:
if isinstance(arg, Addon) and not addon:
if arg.has_listed_versions():
addon = self.f(
'<a href="{0}">{1}</a>', arg.get_absolute_url(), arg.name
)
else:
addon = self.f('{0}', arg.name)
arguments.remove(arg)
if isinstance(arg, Rating) and not rating:
rating = self.f(
'<a href="{0}">{1}</a>', arg.get_absolute_url(), gettext('Review')
)
arguments.remove(arg)
if isinstance(arg, Version) and not version:
text = gettext('Version {0}')
if arg.channel == amo.RELEASE_CHANNEL_LISTED:
version = self.f(
'<a href="{1}">%s</a>' % text,
arg.version,
arg.get_absolute_url(),
)
else:
version = self.f(text, arg.version)
arguments.remove(arg)
if isinstance(arg, Collection) and not collection:
collection = self.f(
'<a href="{0}">{1}</a>', arg.get_absolute_url(), arg.name
)
arguments.remove(arg)
if isinstance(arg, Tag) and not tag:
if arg.can_reverse():
tag = self.f(
'<a href="{0}">{1}</a>', arg.get_absolute_url(), arg.tag_text
)
else:
tag = self.f('{0}', arg.tag_text)
if isinstance(arg, Group) and not group:
group = arg.name
arguments.remove(arg)
if isinstance(arg, File) and not file_:
validation = 'passed'
if self.action in (
amo.LOG.UNLISTED_SIGNED.id,
amo.LOG.UNLISTED_SIGNED_VALIDATION_FAILED.id,
):
validation = 'ignored'
file_ = self.f(
'<a href="{0}">{1}</a> (validation {2})',
arg.get_absolute_url(),
arg.filename,
validation,
)
arguments.remove(arg)
if self.action == amo.LOG.CHANGE_STATUS.id and not isinstance(arg, Addon):
# Unfortunately, this action has been abused in the past and
# the non-addon argument could be a string or an int. If it's
# an int, we want to retrieve the string and translate it.
if isinstance(arg, int) and arg in amo.STATUS_CHOICES_ADDON:
status = gettext(amo.STATUS_CHOICES_ADDON[arg])
else:
# It's not an int or not one of the choices, so assume it's
# a string or an unknown int we want to display as-is.
status = arg
arguments.remove(arg)
user = user_link(self.user)
try:
kw = {
'addon': addon,
'rating': rating,
'version': version,
'collection': collection,
'tag': tag,
'user': user,
'group': group,
'file': file_,
'status': status,
}
return self.f(str(format), *arguments, **kw)
except (AttributeError, KeyError, IndexError):
log.warning('%d contains garbage data' % (self.id or 0))
return 'Something magical happened.'
def __str__(self):
return self.to_string()
def __html__(self):
return self
@property
def author_name(self):
"""Name of the user that triggered the activity.
If it's a reviewer action that will be shown to developers, the
`reviewer_name` property is used if present, otherwise `name` is
used."""
if self.action in constants.activity.LOG_REVIEW_QUEUE_DEVELOPER:
return self.user.reviewer_name or self.user.name
return self.user.name
@classmethod
def create(cls, action, *args, **kw):
"""
e.g. ActivityLog.create(amo.LOG.CREATE_ADDON, addon),
ActivityLog.create(amo.LOG.ADD_FILE_TO_VERSION, file, version)
In case of circular import you can use `olympia.activity.log_create()`
"""
from olympia import core
user = kw.get('user', core.get_user())
if not user:
log.warning('Activity log called with no user: %s' % action.id)
return
# We make sure that we take the timestamp if provided, instead of
# creating a new one, especially useful for log entries created
# in a loop.
al = ActivityLog(
user=user, action=action.id, created=kw.get('created', timezone.now())
)
al.set_arguments(args)
if 'details' in kw:
al.details = kw['details']
al.save()
if 'details' in kw and 'comments' in al.details:
CommentLog.objects.create(
comments=al.details['comments'],
activity_log=al,
created=kw.get('created', timezone.now()),
)
for arg in args:
if isinstance(arg, tuple):
class_ = arg[0]
id_ = arg[1]
else:
class_ = arg.__class__
id_ = arg.id if isinstance(arg, ModelBase) else None
if class_ == Addon:
AddonLog.objects.create(
addon_id=id_,
activity_log=al,
created=kw.get('created', timezone.now()),
)
elif class_ == Version:
VersionLog.objects.create(
version_id=id_,
activity_log=al,
created=kw.get('created', timezone.now()),
)
elif class_ == UserProfile:
UserLog.objects.create(
user_id=id_,
activity_log=al,
created=kw.get('created', timezone.now()),
)
elif class_ == Group:
GroupLog.objects.create(
group_id=id_,
activity_log=al,
created=kw.get('created', timezone.now()),
)
elif class_ == Block:
BlockLog.objects.create(
block_id=id_,
activity_log=al,
guid=arg.guid,
created=kw.get('created', timezone.now()),
)
if getattr(action, 'store_ip', False):
# Index specific actions by their IP address. Note that the caller
# must take care of overriding remote addr if the action is created
# from a task.
IPLog.objects.create(
ip_address=core.get_remote_addr(),
activity_log=al,
created=kw.get('created', timezone.now()),
)
# Index by every user
UserLog.objects.create(
activity_log=al, user=user, created=kw.get('created', timezone.now())
)
return al
| 35.87367
| 88
| 0.586907
|
import json
import string
import uuid
from collections import defaultdict
from copy import copy
from datetime import datetime
from django.apps import apps
from django.conf import settings
from django.db import models
from django.utils import timezone
from django.utils.functional import cached_property
from django.utils.translation import gettext
import jinja2
import olympia.core.logger
from olympia import amo, constants
from olympia.access.models import Group
from olympia.addons.models import Addon
from olympia.amo.fields import PositiveAutoField
from olympia.amo.models import BaseQuerySet, ManagerBase, ModelBase
from olympia.bandwagon.models import Collection
from olympia.blocklist.models import Block
from olympia.files.models import File
from olympia.ratings.models import Rating
from olympia.reviewers.models import CannedResponse
from olympia.tags.models import Tag
from olympia.users.models import UserProfile
from olympia.users.templatetags.jinja_helpers import user_link
from olympia.versions.models import Version
log = olympia.core.logger.getLogger('z.amo.activity')
MAX_TOKEN_USE_COUNT = 100
class ActivityLogToken(ModelBase):
id = PositiveAutoField(primary_key=True)
version = models.ForeignKey(Version, related_name='token', on_delete=models.CASCADE)
user = models.ForeignKey(
'users.UserProfile',
related_name='activity_log_tokens',
on_delete=models.CASCADE,
)
uuid = models.UUIDField(default=uuid.uuid4, unique=True)
use_count = models.IntegerField(
default=0, help_text='Stores the number of times the token has been used'
)
class Meta:
db_table = 'log_activity_tokens'
constraints = [
models.UniqueConstraint(fields=('version', 'user'), name='version_id'),
]
def is_expired(self):
return self.use_count >= MAX_TOKEN_USE_COUNT
def is_valid(self):
return (
not self.is_expired()
and self.version
== self.version.addon.find_latest_version(
channel=self.version.channel, exclude=()
)
)
def expire(self):
self.update(use_count=MAX_TOKEN_USE_COUNT)
def increment_use(self):
self.__class__.objects.filter(pk=self.pk).update(
use_count=models.expressions.F('use_count') + 1
)
self.use_count = self.use_count + 1
class ActivityLogEmails(ModelBase):
messageid = models.CharField(max_length=255, unique=True)
class Meta:
db_table = 'log_activity_emails'
class AddonLog(ModelBase):
addon = models.ForeignKey(Addon, on_delete=models.CASCADE)
activity_log = models.ForeignKey('ActivityLog', on_delete=models.CASCADE)
class Meta:
db_table = 'log_activity_addon'
ordering = ('-created',)
def transfer(self, new_addon):
try:
arguments = json.loads(self.activity_log._arguments)
except Exception:
log.info(
'unserializing data from addon_log failed: %s' % self.activity_log.id
)
return None
new_arguments = []
for item in arguments:
if item.get('addons.addon', 0) == self.addon.id:
new_arguments.append({'addons.addon': new_addon.id})
else:
new_arguments.append(item)
self.activity_log.update(_arguments=json.dumps(new_arguments))
self.update(addon=new_addon)
class CommentLog(ModelBase):
activity_log = models.ForeignKey('ActivityLog', on_delete=models.CASCADE)
comments = models.TextField()
class Meta:
db_table = 'log_activity_comment'
ordering = ('-created',)
class VersionLog(ModelBase):
activity_log = models.ForeignKey('ActivityLog', on_delete=models.CASCADE)
version = models.ForeignKey(Version, on_delete=models.CASCADE)
class Meta:
db_table = 'log_activity_version'
ordering = ('-created',)
class UserLog(ModelBase):
activity_log = models.ForeignKey('ActivityLog', on_delete=models.CASCADE)
user = models.ForeignKey(UserProfile, on_delete=models.CASCADE)
class Meta:
db_table = 'log_activity_user'
ordering = ('-created',)
class GroupLog(ModelBase):
id = PositiveAutoField(primary_key=True)
activity_log = models.ForeignKey('ActivityLog', on_delete=models.CASCADE)
group = models.ForeignKey(Group, on_delete=models.CASCADE)
class Meta:
db_table = 'log_activity_group'
ordering = ('-created',)
class BlockLog(ModelBase):
id = PositiveAutoField(primary_key=True)
activity_log = models.ForeignKey('ActivityLog', on_delete=models.CASCADE)
block = models.ForeignKey(Block, on_delete=models.SET_NULL, null=True)
guid = models.CharField(max_length=255, null=False)
class Meta:
db_table = 'log_activity_block'
ordering = ('-created',)
class IPLog(ModelBase):
activity_log = models.ForeignKey('ActivityLog', on_delete=models.CASCADE)
ip_address = models.CharField(max_length=45)
class Meta:
db_table = 'log_activity_ip'
ordering = ('-created',)
class DraftComment(ModelBase):
id = PositiveAutoField(primary_key=True)
version = models.ForeignKey(Version, on_delete=models.CASCADE)
user = models.ForeignKey(UserProfile, on_delete=models.CASCADE)
filename = models.CharField(max_length=255, null=True, blank=True)
lineno = models.PositiveIntegerField(null=True)
canned_response = models.ForeignKey(
CannedResponse, null=True, default=None, on_delete=models.SET_DEFAULT
)
comment = models.TextField(blank=True)
class Meta:
db_table = 'log_activity_comment_draft'
class ActivityLogQuerySet(BaseQuerySet):
def default_transformer(self, logs):
ActivityLog.arguments_builder(logs)
class ActivityLogManager(ManagerBase):
_queryset_class = ActivityLogQuerySet
def get_queryset(self):
qs = super().get_queryset()
qs = qs.transform(qs.default_transformer).prefetch_related('user')
return qs
def for_addons(self, addons):
if isinstance(addons, Addon):
addons = (addons,)
return self.filter(addonlog__addon__in=addons)
def for_versions(self, versions):
if isinstance(versions, Version):
versions = (versions,)
return self.filter(versionlog__version__in=versions)
def for_groups(self, groups):
if isinstance(groups, Group):
groups = (groups,)
return self.filter(grouplog__group__in=groups)
def for_user(self, user):
return self.filter(userlog__user=user)
def for_block(self, block):
return self.filter(blocklog__block=block)
def for_guidblock(self, guid):
return self.filter(blocklog__guid=guid)
def for_developer(self):
return self.exclude(
action__in=constants.activity.LOG_ADMINS
+ constants.activity.LOG_HIDE_DEVELOPER
)
def admin_events(self):
return self.filter(action__in=constants.activity.LOG_ADMINS)
def moderation_events(self):
return self.filter(action__in=constants.activity.LOG_RATING_MODERATION)
def review_queue(self):
qs = self._by_type()
return qs.filter(action__in=constants.activity.LOG_REVIEW_QUEUE).exclude(
user__id=settings.TASK_USER_ID
)
def review_log(self):
qs = self._by_type()
return qs.filter(
action__in=constants.activity.LOG_REVIEWER_REVIEW_ACTION
).exclude(user__id=settings.TASK_USER_ID)
def total_ratings(self, theme=False):
qs = self._by_type()
action_ids = (
[amo.LOG.THEME_REVIEW.id]
if theme
else constants.activity.LOG_REVIEWER_REVIEW_ACTION
)
return (
qs.values('user', 'user__display_name', 'user__username')
.filter(action__in=action_ids)
.exclude(user__id=settings.TASK_USER_ID)
.annotate(approval_count=models.Count('id'))
.order_by('-approval_count')
)
def monthly_reviews(self, theme=False):
qs = self._by_type()
now = datetime.now()
created_date = datetime(now.year, now.month, 1)
actions = (
[constants.activity.LOG.THEME_REVIEW.id]
if theme
else constants.activity.LOG_REVIEWER_REVIEW_ACTION
)
return (
qs.values('user', 'user__display_name', 'user__username')
.filter(created__gte=created_date, action__in=actions)
.exclude(user__id=settings.TASK_USER_ID)
.annotate(approval_count=models.Count('id'))
.order_by('-approval_count')
)
def user_approve_reviews(self, user):
qs = self._by_type()
return qs.filter(
action__in=constants.activity.LOG_REVIEWER_REVIEW_ACTION, user__id=user.id
)
def current_month_user_approve_reviews(self, user):
now = datetime.now()
ago = datetime(now.year, now.month, 1)
return self.user_approve_reviews(user).filter(created__gte=ago)
def user_position(self, values_qs, user):
try:
return (
next(
i
for (i, d) in enumerate(list(values_qs))
if d.get('user') == user.id
)
+ 1
)
except StopIteration:
return None
def total_ratings_user_position(self, user, theme=False):
return self.user_position(self.total_ratings(theme), user)
def monthly_reviews_user_position(self, user, theme=False):
return self.user_position(self.monthly_reviews(theme), user)
def _by_type(self):
qs = self.get_queryset()
table = 'log_activity_addon'
return qs.extra(
tables=[table], where=['%s.activity_log_id=%s.id' % (table, 'log_activity')]
)
class SafeFormatter(string.Formatter):
def get_field(self, *args, **kw):
obj, used_key = super(SafeFormatter, self).get_field(*args, **kw)
return jinja2.escape(obj), used_key
class ActivityLog(ModelBase):
TYPES = sorted(
[(value.id, key) for key, value in constants.activity.LOG_BY_ID.items()]
)
user = models.ForeignKey('users.UserProfile', null=True, on_delete=models.SET_NULL)
action = models.SmallIntegerField(choices=TYPES)
_arguments = models.TextField(blank=True, db_column='arguments')
_details = models.TextField(blank=True, db_column='details')
objects = ActivityLogManager()
formatter = SafeFormatter()
class Meta:
db_table = 'log_activity'
ordering = ('-created',)
indexes = [
models.Index(fields=('action',), name='log_activity_1bd4707b'),
models.Index(fields=('created',), name='created_idx'),
]
def f(self, *args, **kw):
return jinja2.Markup(self.formatter.format(*args, **kw))
@classmethod
def arguments_builder(cls, activities):
def handle_renames(value):
return 'ratings.rating' if value == 'reviews.review' else value
# existing data. For this reason, it forces a conversion to int. If we
# ever want to store ActivityLog items pointing to models using a non
# integer PK field, we'll need to make this a little smarter.
instances_to_load = defaultdict(list)
instances = {}
for activity in activities:
try:
activity.arguments_data = json.loads(activity._arguments)
except Exception as e:
log.info('unserializing data from activity_log failed: %s', activity.id)
log.info(e)
activity.arguments_data = []
for item in activity.arguments_data:
name, pk = list(item.items())[0]
if name not in ('str', 'int', 'null') and pk:
name = handle_renames(name)
instances_to_load[name].append(int(pk))
for name, pks in instances_to_load.items():
(app_label, model_name) = name.split('.')
model = apps.get_model(app_label, model_name)
qs = model.get_unfiltered_manager().all()
if hasattr(qs, 'only_translations'):
qs = qs.only_translations()
instances[name] = qs.in_bulk(pks)
for activity in activities:
objs = []
for item in activity.arguments_data:
name, pk = list(item.items())[0]
if name in ('str', 'int', 'null'):
# value directly.
objs.append(pk)
elif pk:
# Fetch the instance from the cache we built.
name = handle_renames(name)
obj = instances[name].get(int(pk))
# Most of the time, we're eventually going to call
# here. For some of the models, that will result in a call
# to <model>.get_absolute_url(), which in turn can cause an
# extra SQL query because some parent model is needed to
# build the URL.
# It's difficult to predict what we'll need as ActivitLog
# is fairly generic, but we know Addon is going to be
# needed in some cases for sure (Version, Rating) so if
# we're dealing with objects that have an `addon_id`
addon_id = getattr(obj, 'addon_id', None)
if addon := instances.get('addons.addon', {}).get(addon_id):
obj.addon = addon
objs.append(obj)
activity.arguments = objs
@cached_property
def arguments(self):
self.arguments_builder([self])
return self.arguments
def set_arguments(self, args=None):
if args is None:
args = []
if not isinstance(args, (list, tuple)):
args = (args,)
serialize_me = []
for arg in args:
if isinstance(arg, str):
serialize_me.append({'str': arg})
elif isinstance(arg, int):
serialize_me.append({'int': arg})
elif isinstance(arg, tuple):
serialize_me.append(dict(((str(arg[0]._meta), arg[1]),)))
else:
serialize_me.append(dict(((str(arg._meta), arg.pk),)))
self._arguments = json.dumps(serialize_me)
@property
def details(self):
if self._details:
return json.loads(self._details)
@details.setter
def details(self, data):
self._details = json.dumps(data)
@property
def log(self):
return constants.activity.LOG_BY_ID[self.action]
def to_string(self, type_=None):
log_type = constants.activity.LOG_BY_ID[self.action]
if type_ and hasattr(log_type, '%s_format' % type_):
format = getattr(log_type, '%s_format' % type_)
else:
format = log_type.format
arguments = copy(self.arguments)
addon = None
rating = None
version = None
collection = None
tag = None
group = None
file_ = None
status = None
for arg in self.arguments:
if isinstance(arg, Addon) and not addon:
if arg.has_listed_versions():
addon = self.f(
'<a href="{0}">{1}</a>', arg.get_absolute_url(), arg.name
)
else:
addon = self.f('{0}', arg.name)
arguments.remove(arg)
if isinstance(arg, Rating) and not rating:
rating = self.f(
'<a href="{0}">{1}</a>', arg.get_absolute_url(), gettext('Review')
)
arguments.remove(arg)
if isinstance(arg, Version) and not version:
text = gettext('Version {0}')
if arg.channel == amo.RELEASE_CHANNEL_LISTED:
version = self.f(
'<a href="{1}">%s</a>' % text,
arg.version,
arg.get_absolute_url(),
)
else:
version = self.f(text, arg.version)
arguments.remove(arg)
if isinstance(arg, Collection) and not collection:
collection = self.f(
'<a href="{0}">{1}</a>', arg.get_absolute_url(), arg.name
)
arguments.remove(arg)
if isinstance(arg, Tag) and not tag:
if arg.can_reverse():
tag = self.f(
'<a href="{0}">{1}</a>', arg.get_absolute_url(), arg.tag_text
)
else:
tag = self.f('{0}', arg.tag_text)
if isinstance(arg, Group) and not group:
group = arg.name
arguments.remove(arg)
if isinstance(arg, File) and not file_:
validation = 'passed'
if self.action in (
amo.LOG.UNLISTED_SIGNED.id,
amo.LOG.UNLISTED_SIGNED_VALIDATION_FAILED.id,
):
validation = 'ignored'
file_ = self.f(
'<a href="{0}">{1}</a> (validation {2})',
arg.get_absolute_url(),
arg.filename,
validation,
)
arguments.remove(arg)
if self.action == amo.LOG.CHANGE_STATUS.id and not isinstance(arg, Addon):
# an int, we want to retrieve the string and translate it.
if isinstance(arg, int) and arg in amo.STATUS_CHOICES_ADDON:
status = gettext(amo.STATUS_CHOICES_ADDON[arg])
else:
# It's not an int or not one of the choices, so assume it's
# a string or an unknown int we want to display as-is.
status = arg
arguments.remove(arg)
user = user_link(self.user)
try:
kw = {
'addon': addon,
'rating': rating,
'version': version,
'collection': collection,
'tag': tag,
'user': user,
'group': group,
'file': file_,
'status': status,
}
return self.f(str(format), *arguments, **kw)
except (AttributeError, KeyError, IndexError):
log.warning('%d contains garbage data' % (self.id or 0))
return 'Something magical happened.'
def __str__(self):
return self.to_string()
def __html__(self):
return self
@property
def author_name(self):
if self.action in constants.activity.LOG_REVIEW_QUEUE_DEVELOPER:
return self.user.reviewer_name or self.user.name
return self.user.name
@classmethod
def create(cls, action, *args, **kw):
from olympia import core
user = kw.get('user', core.get_user())
if not user:
log.warning('Activity log called with no user: %s' % action.id)
return
# We make sure that we take the timestamp if provided, instead of
# creating a new one, especially useful for log entries created
# in a loop.
al = ActivityLog(
user=user, action=action.id, created=kw.get('created', timezone.now())
)
al.set_arguments(args)
if 'details' in kw:
al.details = kw['details']
al.save()
if 'details' in kw and 'comments' in al.details:
CommentLog.objects.create(
comments=al.details['comments'],
activity_log=al,
created=kw.get('created', timezone.now()),
)
for arg in args:
if isinstance(arg, tuple):
class_ = arg[0]
id_ = arg[1]
else:
class_ = arg.__class__
id_ = arg.id if isinstance(arg, ModelBase) else None
if class_ == Addon:
AddonLog.objects.create(
addon_id=id_,
activity_log=al,
created=kw.get('created', timezone.now()),
)
elif class_ == Version:
VersionLog.objects.create(
version_id=id_,
activity_log=al,
created=kw.get('created', timezone.now()),
)
elif class_ == UserProfile:
UserLog.objects.create(
user_id=id_,
activity_log=al,
created=kw.get('created', timezone.now()),
)
elif class_ == Group:
GroupLog.objects.create(
group_id=id_,
activity_log=al,
created=kw.get('created', timezone.now()),
)
elif class_ == Block:
BlockLog.objects.create(
block_id=id_,
activity_log=al,
guid=arg.guid,
created=kw.get('created', timezone.now()),
)
if getattr(action, 'store_ip', False):
# Index specific actions by their IP address. Note that the caller
# must take care of overriding remote addr if the action is created
# from a task.
IPLog.objects.create(
ip_address=core.get_remote_addr(),
activity_log=al,
created=kw.get('created', timezone.now()),
)
# Index by every user
UserLog.objects.create(
activity_log=al, user=user, created=kw.get('created', timezone.now())
)
return al
| true
| true
|
1c46b6c6d53ac094d8f4c8e0d1401edb439f6fc3
| 4,863
|
py
|
Python
|
tests/core/test_virtual_group.py
|
TileDB-Inc/TileDB-CF-Py
|
9aab0fe9ba7346a1846c7458a5d08b123dcf90a8
|
[
"MIT"
] | 12
|
2021-06-07T16:51:32.000Z
|
2022-03-10T12:48:00.000Z
|
tests/core/test_virtual_group.py
|
TileDB-Inc/TileDB-CF-Py
|
9aab0fe9ba7346a1846c7458a5d08b123dcf90a8
|
[
"MIT"
] | 72
|
2021-04-28T21:49:41.000Z
|
2022-02-24T13:58:11.000Z
|
tests/core/test_virtual_group.py
|
TileDB-Inc/TileDB-CF-Py
|
9aab0fe9ba7346a1846c7458a5d08b123dcf90a8
|
[
"MIT"
] | 3
|
2021-08-11T16:33:37.000Z
|
2021-12-01T20:31:12.000Z
|
# Copyright 2021 TileDB Inc.
# Licensed under the MIT License.
import numpy as np
import pytest
import tiledb
from tiledb.cf import GroupSchema, VirtualGroup
_row = tiledb.Dim(name="rows", domain=(1, 4), tile=4, dtype=np.uint64)
_col = tiledb.Dim(name="cols", domain=(1, 4), tile=4, dtype=np.uint64)
_attr_a = tiledb.Attr(name="a", dtype=np.uint64)
_attr_b = tiledb.Attr(name="b", dtype=np.float64)
_attr_c = tiledb.Attr(name="c", dtype=np.dtype("U"))
_array_schema_1 = tiledb.ArraySchema(
domain=tiledb.Domain(_row, _col),
attrs=[_attr_a],
)
_array_schema_2 = tiledb.ArraySchema(
domain=tiledb.Domain(_row),
sparse=True,
attrs=[_attr_b, _attr_c],
)
_array_schema_3 = tiledb.ArraySchema(
domain=tiledb.Domain(_row, _col),
attrs=[_attr_c],
)
class TestCreateVirtualGroup:
_metadata_schema = _array_schema_1
_array_schemas = [
("A1", _array_schema_1),
("A2", _array_schema_2),
]
_group_schema = GroupSchema(_array_schemas, _metadata_schema)
@pytest.fixture(scope="class")
def group_uri(self, tmpdir_factory):
"""Creates a TileDB Group from GroupSchema and returns scenario dict."""
uri = str(tmpdir_factory.mktemp("group1").join("virtual"))
ctx = None
VirtualGroup.create(uri, self._group_schema, ctx=ctx)
return {"__tiledb_group": uri, "A1": f"{uri}_A1", "A2": f"{uri}_A2"}
def test_array_schemas(self, group_uri):
assert (
tiledb.ArraySchema.load(group_uri["__tiledb_group"])
== self._metadata_schema
)
assert tiledb.ArraySchema.load(group_uri["A1"]) == _array_schema_1
assert tiledb.ArraySchema.load(group_uri["A2"]) == _array_schema_2
class TestMetadataOnlyGroup:
_metadata_schema = tiledb.ArraySchema(
domain=tiledb.Domain(
tiledb.Dim(name="rows", domain=(1, 4), tile=2, dtype=np.uint64)
),
attrs=[tiledb.Attr(name="a", dtype=np.uint64)],
sparse=True,
)
@pytest.fixture(scope="class")
def group_uris(self, tmpdir_factory):
uri = str(tmpdir_factory.mktemp("group1"))
tiledb.Array.create(uri, self._metadata_schema)
return {"__tiledb_group": uri}
def test_has_metadata(self, group_uris):
with VirtualGroup(group_uris) as group:
assert isinstance(group, VirtualGroup)
assert group.has_metadata_array
assert group.meta is not None
def test_no_such_attr_error(self, group_uris):
with VirtualGroup(group_uris) as group:
with pytest.raises(KeyError):
group.open_array(attr="a")
class TestVirtualGroupWithArrays:
_metadata_schema = tiledb.ArraySchema(
domain=tiledb.Domain(
tiledb.Dim(name="rows", domain=(1, 4), tile=2, dtype=np.uint64)
),
attrs=[tiledb.Attr(name="a", dtype=np.uint64)],
sparse=True,
)
_A1_data = np.array(
([1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12], [13, 14, 15, 16]), dtype=np.uint64
)
@pytest.fixture(scope="class")
def group_uris(self, tmpdir_factory):
uri = str(tmpdir_factory.mktemp("simple_group"))
tiledb.Array.create(uri + "/metadata", self._metadata_schema)
tiledb.Array.create(uri + "/array1", _array_schema_1)
with tiledb.DenseArray(uri + "/array1", mode="w") as array:
array[:] = self._A1_data
tiledb.Array.create(uri + "/array2", _array_schema_2)
tiledb.Array.create(uri + "/array3", _array_schema_3)
return {
"__tiledb_group": f"{uri}/metadata",
"A1": f"{uri}/array1",
"A2": f"{uri}/array2",
"A3": f"{uri}/array3",
}
def test_open_array_from_group(self, group_uris):
with VirtualGroup(group_uris) as group:
with group.open_array(array="A1") as array:
assert isinstance(array, tiledb.Array)
assert array.mode == "r"
np.testing.assert_equal(array[:, :]["a"], self._A1_data)
def test_open_attr(self, group_uris):
with VirtualGroup(group_uris) as group:
with group.open_array(attr="a") as array:
assert isinstance(array, tiledb.Array)
assert array.mode == "r"
np.testing.assert_equal(array[:, :], self._A1_data)
def test_attr_ambiguous_error(self, group_uris):
with VirtualGroup(group_uris) as group:
with pytest.raises(ValueError):
group.open_array(attr="c")
def test_append_group_warning(tmpdir):
uri = str(tmpdir.mkdir("append_group_test"))
with pytest.warns(Warning):
VirtualGroup.create(
uri + "/test", GroupSchema({"A1": _array_schema_1}), append=True
)
schema = tiledb.ArraySchema.load(uri + "/test_A1")
assert schema == _array_schema_1
| 34.006993
| 88
| 0.631298
|
import numpy as np
import pytest
import tiledb
from tiledb.cf import GroupSchema, VirtualGroup
_row = tiledb.Dim(name="rows", domain=(1, 4), tile=4, dtype=np.uint64)
_col = tiledb.Dim(name="cols", domain=(1, 4), tile=4, dtype=np.uint64)
_attr_a = tiledb.Attr(name="a", dtype=np.uint64)
_attr_b = tiledb.Attr(name="b", dtype=np.float64)
_attr_c = tiledb.Attr(name="c", dtype=np.dtype("U"))
_array_schema_1 = tiledb.ArraySchema(
domain=tiledb.Domain(_row, _col),
attrs=[_attr_a],
)
_array_schema_2 = tiledb.ArraySchema(
domain=tiledb.Domain(_row),
sparse=True,
attrs=[_attr_b, _attr_c],
)
_array_schema_3 = tiledb.ArraySchema(
domain=tiledb.Domain(_row, _col),
attrs=[_attr_c],
)
class TestCreateVirtualGroup:
_metadata_schema = _array_schema_1
_array_schemas = [
("A1", _array_schema_1),
("A2", _array_schema_2),
]
_group_schema = GroupSchema(_array_schemas, _metadata_schema)
@pytest.fixture(scope="class")
def group_uri(self, tmpdir_factory):
uri = str(tmpdir_factory.mktemp("group1").join("virtual"))
ctx = None
VirtualGroup.create(uri, self._group_schema, ctx=ctx)
return {"__tiledb_group": uri, "A1": f"{uri}_A1", "A2": f"{uri}_A2"}
def test_array_schemas(self, group_uri):
assert (
tiledb.ArraySchema.load(group_uri["__tiledb_group"])
== self._metadata_schema
)
assert tiledb.ArraySchema.load(group_uri["A1"]) == _array_schema_1
assert tiledb.ArraySchema.load(group_uri["A2"]) == _array_schema_2
class TestMetadataOnlyGroup:
_metadata_schema = tiledb.ArraySchema(
domain=tiledb.Domain(
tiledb.Dim(name="rows", domain=(1, 4), tile=2, dtype=np.uint64)
),
attrs=[tiledb.Attr(name="a", dtype=np.uint64)],
sparse=True,
)
@pytest.fixture(scope="class")
def group_uris(self, tmpdir_factory):
uri = str(tmpdir_factory.mktemp("group1"))
tiledb.Array.create(uri, self._metadata_schema)
return {"__tiledb_group": uri}
def test_has_metadata(self, group_uris):
with VirtualGroup(group_uris) as group:
assert isinstance(group, VirtualGroup)
assert group.has_metadata_array
assert group.meta is not None
def test_no_such_attr_error(self, group_uris):
with VirtualGroup(group_uris) as group:
with pytest.raises(KeyError):
group.open_array(attr="a")
class TestVirtualGroupWithArrays:
_metadata_schema = tiledb.ArraySchema(
domain=tiledb.Domain(
tiledb.Dim(name="rows", domain=(1, 4), tile=2, dtype=np.uint64)
),
attrs=[tiledb.Attr(name="a", dtype=np.uint64)],
sparse=True,
)
_A1_data = np.array(
([1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12], [13, 14, 15, 16]), dtype=np.uint64
)
@pytest.fixture(scope="class")
def group_uris(self, tmpdir_factory):
uri = str(tmpdir_factory.mktemp("simple_group"))
tiledb.Array.create(uri + "/metadata", self._metadata_schema)
tiledb.Array.create(uri + "/array1", _array_schema_1)
with tiledb.DenseArray(uri + "/array1", mode="w") as array:
array[:] = self._A1_data
tiledb.Array.create(uri + "/array2", _array_schema_2)
tiledb.Array.create(uri + "/array3", _array_schema_3)
return {
"__tiledb_group": f"{uri}/metadata",
"A1": f"{uri}/array1",
"A2": f"{uri}/array2",
"A3": f"{uri}/array3",
}
def test_open_array_from_group(self, group_uris):
with VirtualGroup(group_uris) as group:
with group.open_array(array="A1") as array:
assert isinstance(array, tiledb.Array)
assert array.mode == "r"
np.testing.assert_equal(array[:, :]["a"], self._A1_data)
def test_open_attr(self, group_uris):
with VirtualGroup(group_uris) as group:
with group.open_array(attr="a") as array:
assert isinstance(array, tiledb.Array)
assert array.mode == "r"
np.testing.assert_equal(array[:, :], self._A1_data)
def test_attr_ambiguous_error(self, group_uris):
with VirtualGroup(group_uris) as group:
with pytest.raises(ValueError):
group.open_array(attr="c")
def test_append_group_warning(tmpdir):
uri = str(tmpdir.mkdir("append_group_test"))
with pytest.warns(Warning):
VirtualGroup.create(
uri + "/test", GroupSchema({"A1": _array_schema_1}), append=True
)
schema = tiledb.ArraySchema.load(uri + "/test_A1")
assert schema == _array_schema_1
| true
| true
|
1c46bab99d58eea58a638a070fe13030e84bce32
| 14,212
|
py
|
Python
|
tensorflow/contrib/timeseries/examples/lstm.py
|
uve/tensorflow
|
e08079463bf43e5963acc41da1f57e95603f8080
|
[
"Apache-2.0"
] | null | null | null |
tensorflow/contrib/timeseries/examples/lstm.py
|
uve/tensorflow
|
e08079463bf43e5963acc41da1f57e95603f8080
|
[
"Apache-2.0"
] | null | null | null |
tensorflow/contrib/timeseries/examples/lstm.py
|
uve/tensorflow
|
e08079463bf43e5963acc41da1f57e95603f8080
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A more advanced example, of building an RNN-based time series model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
from os import path
import tempfile
import numpy
import tensorflow as tf
from tensorflow.contrib.timeseries.python.timeseries import estimators as ts_estimators
from tensorflow.contrib.timeseries.python.timeseries import model as ts_model
from tensorflow.contrib.timeseries.python.timeseries import state_management
try:
import matplotlib # pylint: disable=g-import-not-at-top
matplotlib.use("TkAgg") # Need Tk for interactive plots.
from matplotlib import pyplot # pylint: disable=g-import-not-at-top
HAS_MATPLOTLIB = True
except ImportError:
# Plotting requires matplotlib, but the unit test running this code may
# execute in an environment without it (i.e. matplotlib is not a build
# dependency). We'd still like to test the TensorFlow-dependent parts of this
# example.
HAS_MATPLOTLIB = False
_MODULE_PATH = path.dirname(__file__)
_DATA_FILE = path.join(_MODULE_PATH, "data/multivariate_periods.csv")
class _LSTMModel(ts_model.SequentialTimeSeriesModel):
"""A time series model-building example using an RNNCell."""
def __init__(self, num_units, num_features, exogenous_feature_columns=None,
dtype=tf.float32):
"""Initialize/configure the model object.
Note that we do not start graph building here. Rather, this object is a
configurable factory for TensorFlow graphs which are run by an Estimator.
Args:
num_units: The number of units in the model's LSTMCell.
num_features: The dimensionality of the time series (features per
timestep).
exogenous_feature_columns: A list of `tf.feature_column`s representing
features which are inputs to the model but are not predicted by
it. These must then be present for training, evaluation, and
prediction.
dtype: The floating point data type to use.
"""
super(_LSTMModel, self).__init__(
# Pre-register the metrics we'll be outputting (just a mean here).
train_output_names=["mean"],
predict_output_names=["mean"],
num_features=num_features,
exogenous_feature_columns=exogenous_feature_columns,
dtype=dtype)
self._num_units = num_units
# Filled in by initialize_graph()
self._lstm_cell = None
self._lstm_cell_run = None
self._predict_from_lstm_output = None
def initialize_graph(self, input_statistics=None):
"""Save templates for components, which can then be used repeatedly.
This method is called every time a new graph is created. It's safe to start
adding ops to the current default graph here, but the graph should be
constructed from scratch.
Args:
input_statistics: A math_utils.InputStatistics object.
"""
super(_LSTMModel, self).initialize_graph(input_statistics=input_statistics)
with tf.variable_scope("", use_resource=True):
# Use ResourceVariables to avoid race conditions.
self._lstm_cell = tf.nn.rnn_cell.LSTMCell(num_units=self._num_units)
# Create templates so we don't have to worry about variable reuse.
self._lstm_cell_run = tf.make_template(
name_="lstm_cell",
func_=self._lstm_cell,
create_scope_now_=True)
# Transforms LSTM output into mean predictions.
self._predict_from_lstm_output = tf.make_template(
name_="predict_from_lstm_output",
func_=functools.partial(tf.layers.dense, units=self.num_features),
create_scope_now_=True)
def get_start_state(self):
"""Return initial state for the time series model."""
return (
# Keeps track of the time associated with this state for error checking.
tf.zeros([], dtype=tf.int64),
# The previous observation or prediction.
tf.zeros([self.num_features], dtype=self.dtype),
# The most recently seen exogenous features.
tf.zeros(self._get_exogenous_embedding_shape(), dtype=self.dtype),
# The state of the RNNCell (batch dimension removed since this parent
# class will broadcast).
[tf.squeeze(state_element, axis=0)
for state_element
in self._lstm_cell.zero_state(batch_size=1, dtype=self.dtype)])
def _filtering_step(self, current_times, current_values, state, predictions):
"""Update model state based on observations.
Note that we don't do much here aside from computing a loss. In this case
it's easier to update the RNN state in _prediction_step, since that covers
running the RNN both on observations (from this method) and our own
predictions. This distinction can be important for probabilistic models,
where repeatedly predicting without filtering should lead to low-confidence
predictions.
Args:
current_times: A [batch size] integer Tensor.
current_values: A [batch size, self.num_features] floating point Tensor
with new observations.
state: The model's state tuple.
predictions: The output of the previous `_prediction_step`.
Returns:
A tuple of new state and a predictions dictionary updated to include a
loss (note that we could also return other measures of goodness of fit,
although only "loss" will be optimized).
"""
state_from_time, prediction, exogenous, lstm_state = state
with tf.control_dependencies(
[tf.assert_equal(current_times, state_from_time)]):
# Subtract the mean and divide by the variance of the series. Slightly
# more efficient if done for a whole window (using the normalize_features
# argument to SequentialTimeSeriesModel).
transformed_values = self._scale_data(current_values)
# Use mean squared error across features for the loss.
predictions["loss"] = tf.reduce_mean(
(prediction - transformed_values) ** 2, axis=-1)
# Keep track of the new observation in model state. It won't be run
# through the LSTM until the next _imputation_step.
new_state_tuple = (current_times, transformed_values,
exogenous, lstm_state)
return (new_state_tuple, predictions)
def _prediction_step(self, current_times, state):
"""Advance the RNN state using a previous observation or prediction."""
_, previous_observation_or_prediction, exogenous, lstm_state = state
# Update LSTM state based on the most recent exogenous and endogenous
# features.
inputs = tf.concat([previous_observation_or_prediction, exogenous],
axis=-1)
lstm_output, new_lstm_state = self._lstm_cell_run(
inputs=inputs, state=lstm_state)
next_prediction = self._predict_from_lstm_output(lstm_output)
new_state_tuple = (current_times, next_prediction,
exogenous, new_lstm_state)
return new_state_tuple, {"mean": self._scale_back_data(next_prediction)}
def _imputation_step(self, current_times, state):
"""Advance model state across a gap."""
# Does not do anything special if we're jumping across a gap. More advanced
# models, especially probabilistic ones, would want a special case that
# depends on the gap size.
return state
def _exogenous_input_step(
self, current_times, current_exogenous_regressors, state):
"""Save exogenous regressors in model state for use in _prediction_step."""
state_from_time, prediction, _, lstm_state = state
return (state_from_time, prediction,
current_exogenous_regressors, lstm_state)
def train_and_predict(
csv_file_name=_DATA_FILE, training_steps=200, estimator_config=None,
export_directory=None):
"""Train and predict using a custom time series model."""
# Construct an Estimator from our LSTM model.
categorical_column = tf.feature_column.categorical_column_with_hash_bucket(
key="categorical_exogenous_feature", hash_bucket_size=16)
exogenous_feature_columns = [
# Exogenous features are not part of the loss, but can inform
# predictions. In this example the features have no extra information, but
# are included as an API example.
tf.feature_column.numeric_column(
"2d_exogenous_feature", shape=(2,)),
tf.feature_column.embedding_column(
categorical_column=categorical_column, dimension=10)]
estimator = ts_estimators.TimeSeriesRegressor(
model=_LSTMModel(
num_features=5,
num_units=128,
exogenous_feature_columns=exogenous_feature_columns),
optimizer=tf.compat.v1.train.AdamOptimizer(0.001),
config=estimator_config,
# Set state to be saved across windows.
state_manager=state_management.ChainingStateManager())
reader = tf.contrib.timeseries.CSVReader(
csv_file_name,
column_names=((tf.contrib.timeseries.TrainEvalFeatures.TIMES,)
+ (tf.contrib.timeseries.TrainEvalFeatures.VALUES,) * 5
+ ("2d_exogenous_feature",) * 2
+ ("categorical_exogenous_feature",)),
# Data types other than for `times` need to be specified if they aren't
# float32. In this case one of our exogenous features has string dtype.
column_dtypes=((tf.int64,) + (tf.float32,) * 7 + (tf.string,)))
train_input_fn = tf.contrib.timeseries.RandomWindowInputFn(
reader, batch_size=4, window_size=32)
estimator.train(input_fn=train_input_fn, steps=training_steps)
evaluation_input_fn = tf.contrib.timeseries.WholeDatasetInputFn(reader)
evaluation = estimator.evaluate(input_fn=evaluation_input_fn, steps=1)
# Predict starting after the evaluation
predict_exogenous_features = {
"2d_exogenous_feature": numpy.concatenate(
[numpy.ones([1, 100, 1]), numpy.zeros([1, 100, 1])],
axis=-1),
"categorical_exogenous_feature": numpy.array(
["strkey"] * 100)[None, :, None]}
(predictions,) = tuple(estimator.predict(
input_fn=tf.contrib.timeseries.predict_continuation_input_fn(
evaluation, steps=100,
exogenous_features=predict_exogenous_features)))
times = evaluation["times"][0]
observed = evaluation["observed"][0, :, :]
predicted_mean = numpy.squeeze(numpy.concatenate(
[evaluation["mean"][0], predictions["mean"]], axis=0))
all_times = numpy.concatenate([times, predictions["times"]], axis=0)
# Export the model in SavedModel format. We include a bit of extra boilerplate
# for "cold starting" as if we didn't have any state from the Estimator, which
# is the case when serving from a SavedModel. If Estimator output is
# available, the result of "Estimator.evaluate" can be passed directly to
# `tf.contrib.timeseries.saved_model_utils.predict_continuation` as the
# `continue_from` argument.
with tf.Graph().as_default():
filter_feature_tensors, _ = evaluation_input_fn()
with tf.train.MonitoredSession() as session:
# Fetch the series to "warm up" our state, which will allow us to make
# predictions for its future values. This is just a dictionary of times,
# values, and exogenous features mapping to numpy arrays. The use of an
# input_fn is just a convenience for the example; they can also be
# specified manually.
filter_features = session.run(filter_feature_tensors)
if export_directory is None:
export_directory = tempfile.mkdtemp()
input_receiver_fn = estimator.build_raw_serving_input_receiver_fn()
export_location = estimator.export_saved_model(export_directory,
input_receiver_fn)
# Warm up and predict using the SavedModel
with tf.Graph().as_default():
with tf.compat.v1.Session() as session:
signatures = tf.saved_model.loader.load(
session, [tf.saved_model.tag_constants.SERVING], export_location)
state = tf.contrib.timeseries.saved_model_utils.cold_start_filter(
signatures=signatures, session=session, features=filter_features)
saved_model_output = (
tf.contrib.timeseries.saved_model_utils.predict_continuation(
continue_from=state, signatures=signatures,
session=session, steps=100,
exogenous_features=predict_exogenous_features))
# The exported model gives the same results as the Estimator.predict()
# call above.
numpy.testing.assert_allclose(
predictions["mean"],
numpy.squeeze(saved_model_output["mean"], axis=0))
return times, observed, all_times, predicted_mean
def main(unused_argv):
if not HAS_MATPLOTLIB:
raise ImportError(
"Please install matplotlib to generate a plot from this example.")
(observed_times, observations,
all_times, predictions) = train_and_predict()
pyplot.axvline(99, linestyle="dotted")
observed_lines = pyplot.plot(
observed_times, observations, label="Observed", color="k")
predicted_lines = pyplot.plot(
all_times, predictions, label="Predicted", color="b")
pyplot.legend(handles=[observed_lines[0], predicted_lines[0]],
loc="upper left")
pyplot.show()
if __name__ == "__main__":
tf.compat.v1.app.run(main=main)
| 47.373333
| 88
| 0.700113
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
from os import path
import tempfile
import numpy
import tensorflow as tf
from tensorflow.contrib.timeseries.python.timeseries import estimators as ts_estimators
from tensorflow.contrib.timeseries.python.timeseries import model as ts_model
from tensorflow.contrib.timeseries.python.timeseries import state_management
try:
import matplotlib
matplotlib.use("TkAgg")
from matplotlib import pyplot
HAS_MATPLOTLIB = True
except ImportError:
# example.
HAS_MATPLOTLIB = False
_MODULE_PATH = path.dirname(__file__)
_DATA_FILE = path.join(_MODULE_PATH, "data/multivariate_periods.csv")
class _LSTMModel(ts_model.SequentialTimeSeriesModel):
def __init__(self, num_units, num_features, exogenous_feature_columns=None,
dtype=tf.float32):
super(_LSTMModel, self).__init__(
# Pre-register the metrics we'll be outputting (just a mean here).
train_output_names=["mean"],
predict_output_names=["mean"],
num_features=num_features,
exogenous_feature_columns=exogenous_feature_columns,
dtype=dtype)
self._num_units = num_units
self._lstm_cell = None
self._lstm_cell_run = None
self._predict_from_lstm_output = None
def initialize_graph(self, input_statistics=None):
super(_LSTMModel, self).initialize_graph(input_statistics=input_statistics)
with tf.variable_scope("", use_resource=True):
self._lstm_cell = tf.nn.rnn_cell.LSTMCell(num_units=self._num_units)
self._lstm_cell_run = tf.make_template(
name_="lstm_cell",
func_=self._lstm_cell,
create_scope_now_=True)
# Transforms LSTM output into mean predictions.
self._predict_from_lstm_output = tf.make_template(
name_="predict_from_lstm_output",
func_=functools.partial(tf.layers.dense, units=self.num_features),
create_scope_now_=True)
def get_start_state(self):
return (
# Keeps track of the time associated with this state for error checking.
tf.zeros([], dtype=tf.int64),
# The previous observation or prediction.
tf.zeros([self.num_features], dtype=self.dtype),
# The most recently seen exogenous features.
tf.zeros(self._get_exogenous_embedding_shape(), dtype=self.dtype),
# The state of the RNNCell (batch dimension removed since this parent
# class will broadcast).
[tf.squeeze(state_element, axis=0)
for state_element
in self._lstm_cell.zero_state(batch_size=1, dtype=self.dtype)])
def _filtering_step(self, current_times, current_values, state, predictions):
state_from_time, prediction, exogenous, lstm_state = state
with tf.control_dependencies(
[tf.assert_equal(current_times, state_from_time)]):
# Subtract the mean and divide by the variance of the series. Slightly
# more efficient if done for a whole window (using the normalize_features
# argument to SequentialTimeSeriesModel).
transformed_values = self._scale_data(current_values)
# Use mean squared error across features for the loss.
predictions["loss"] = tf.reduce_mean(
(prediction - transformed_values) ** 2, axis=-1)
# Keep track of the new observation in model state. It won't be run
new_state_tuple = (current_times, transformed_values,
exogenous, lstm_state)
return (new_state_tuple, predictions)
def _prediction_step(self, current_times, state):
_, previous_observation_or_prediction, exogenous, lstm_state = state
inputs = tf.concat([previous_observation_or_prediction, exogenous],
axis=-1)
lstm_output, new_lstm_state = self._lstm_cell_run(
inputs=inputs, state=lstm_state)
next_prediction = self._predict_from_lstm_output(lstm_output)
new_state_tuple = (current_times, next_prediction,
exogenous, new_lstm_state)
return new_state_tuple, {"mean": self._scale_back_data(next_prediction)}
def _imputation_step(self, current_times, state):
# models, especially probabilistic ones, would want a special case that
# depends on the gap size.
return state
def _exogenous_input_step(
self, current_times, current_exogenous_regressors, state):
state_from_time, prediction, _, lstm_state = state
return (state_from_time, prediction,
current_exogenous_regressors, lstm_state)
def train_and_predict(
csv_file_name=_DATA_FILE, training_steps=200, estimator_config=None,
export_directory=None):
# Construct an Estimator from our LSTM model.
categorical_column = tf.feature_column.categorical_column_with_hash_bucket(
key="categorical_exogenous_feature", hash_bucket_size=16)
exogenous_feature_columns = [
# Exogenous features are not part of the loss, but can inform
# predictions. In this example the features have no extra information, but
# are included as an API example.
tf.feature_column.numeric_column(
"2d_exogenous_feature", shape=(2,)),
tf.feature_column.embedding_column(
categorical_column=categorical_column, dimension=10)]
estimator = ts_estimators.TimeSeriesRegressor(
model=_LSTMModel(
num_features=5,
num_units=128,
exogenous_feature_columns=exogenous_feature_columns),
optimizer=tf.compat.v1.train.AdamOptimizer(0.001),
config=estimator_config,
# Set state to be saved across windows.
state_manager=state_management.ChainingStateManager())
reader = tf.contrib.timeseries.CSVReader(
csv_file_name,
column_names=((tf.contrib.timeseries.TrainEvalFeatures.TIMES,)
+ (tf.contrib.timeseries.TrainEvalFeatures.VALUES,) * 5
+ ("2d_exogenous_feature",) * 2
+ ("categorical_exogenous_feature",)),
# Data types other than for `times` need to be specified if they aren't
column_dtypes=((tf.int64,) + (tf.float32,) * 7 + (tf.string,)))
train_input_fn = tf.contrib.timeseries.RandomWindowInputFn(
reader, batch_size=4, window_size=32)
estimator.train(input_fn=train_input_fn, steps=training_steps)
evaluation_input_fn = tf.contrib.timeseries.WholeDatasetInputFn(reader)
evaluation = estimator.evaluate(input_fn=evaluation_input_fn, steps=1)
predict_exogenous_features = {
"2d_exogenous_feature": numpy.concatenate(
[numpy.ones([1, 100, 1]), numpy.zeros([1, 100, 1])],
axis=-1),
"categorical_exogenous_feature": numpy.array(
["strkey"] * 100)[None, :, None]}
(predictions,) = tuple(estimator.predict(
input_fn=tf.contrib.timeseries.predict_continuation_input_fn(
evaluation, steps=100,
exogenous_features=predict_exogenous_features)))
times = evaluation["times"][0]
observed = evaluation["observed"][0, :, :]
predicted_mean = numpy.squeeze(numpy.concatenate(
[evaluation["mean"][0], predictions["mean"]], axis=0))
all_times = numpy.concatenate([times, predictions["times"]], axis=0)
# is the case when serving from a SavedModel. If Estimator output is
# available, the result of "Estimator.evaluate" can be passed directly to
# `tf.contrib.timeseries.saved_model_utils.predict_continuation` as the
# `continue_from` argument.
with tf.Graph().as_default():
filter_feature_tensors, _ = evaluation_input_fn()
with tf.train.MonitoredSession() as session:
# Fetch the series to "warm up" our state, which will allow us to make
# predictions for its future values. This is just a dictionary of times,
# values, and exogenous features mapping to numpy arrays. The use of an
# input_fn is just a convenience for the example; they can also be
# specified manually.
filter_features = session.run(filter_feature_tensors)
if export_directory is None:
export_directory = tempfile.mkdtemp()
input_receiver_fn = estimator.build_raw_serving_input_receiver_fn()
export_location = estimator.export_saved_model(export_directory,
input_receiver_fn)
# Warm up and predict using the SavedModel
with tf.Graph().as_default():
with tf.compat.v1.Session() as session:
signatures = tf.saved_model.loader.load(
session, [tf.saved_model.tag_constants.SERVING], export_location)
state = tf.contrib.timeseries.saved_model_utils.cold_start_filter(
signatures=signatures, session=session, features=filter_features)
saved_model_output = (
tf.contrib.timeseries.saved_model_utils.predict_continuation(
continue_from=state, signatures=signatures,
session=session, steps=100,
exogenous_features=predict_exogenous_features))
# The exported model gives the same results as the Estimator.predict()
# call above.
numpy.testing.assert_allclose(
predictions["mean"],
numpy.squeeze(saved_model_output["mean"], axis=0))
return times, observed, all_times, predicted_mean
def main(unused_argv):
if not HAS_MATPLOTLIB:
raise ImportError(
"Please install matplotlib to generate a plot from this example.")
(observed_times, observations,
all_times, predictions) = train_and_predict()
pyplot.axvline(99, linestyle="dotted")
observed_lines = pyplot.plot(
observed_times, observations, label="Observed", color="k")
predicted_lines = pyplot.plot(
all_times, predictions, label="Predicted", color="b")
pyplot.legend(handles=[observed_lines[0], predicted_lines[0]],
loc="upper left")
pyplot.show()
if __name__ == "__main__":
tf.compat.v1.app.run(main=main)
| true
| true
|
1c46bc0e536a5b58bd77d13f7adfafa098ff3d02
| 2,906
|
py
|
Python
|
initialExp/classifiers/iscx_naive_bayes.py
|
bakkerjarr/NetTrafClassificationExploration
|
66febafcbe4820851784ae72c50a49c28fa91df4
|
[
"Apache-2.0"
] | null | null | null |
initialExp/classifiers/iscx_naive_bayes.py
|
bakkerjarr/NetTrafClassificationExploration
|
66febafcbe4820851784ae72c50a49c28fa91df4
|
[
"Apache-2.0"
] | null | null | null |
initialExp/classifiers/iscx_naive_bayes.py
|
bakkerjarr/NetTrafClassificationExploration
|
66febafcbe4820851784ae72c50a49c28fa91df4
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2016 Jarrod N. Bakker
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from numpy import float32 as np_float
import numpy.core.multiarray as np_array
from sklearn.naive_bayes import GaussianNB
import iscx_result_calc as rc
__author__ = "Jarrod N. Bakker"
class NaiveBayesCls:
NAME = "Naive_Bayes"
def __init__(self, data, labels, skf):
"""Initialise.
:param data: Data set for the classifier to use.
:param labels: Labels indicating if a flow is normal or attack.
:param skf: StratifiedKFold object representing what data set
elements belong in each fold.
"""
self._data = data
self._labels = labels
self._kfold = skf
self._classifier = GaussianNB()
def classify(self):
"""Classify DDoS flows using Naive Bayes.
The data passed through to the fit() method cannot be a string
type.
:return: Results of the classification.
"""
all_results = [] # Results from all fold trials
fold_num = 1
for train, test in self._kfold:
print("\tTraining Naive Bayes...")
# NOTE: I have switched the training and testing set around.
train_array = np_array.array(map(self._data.__getitem__,
test)).astype(np_float)
train_label_array = np_array.array(map(
self._labels.__getitem__, test)).astype(np_float)
self._classifier.fit(train_array, train_label_array)
print("\tTesting classifier...")
test_array = np_array.array(map(self._data.__getitem__,
train)).astype(np_float)
test_label_array = np_array.array(map(
self._labels.__getitem__, train)).astype(np_float)
test_size = len(train) # Remember the switch of sets!
pred = self._classifier.predict(test_array)
mislabeled = (test_label_array != pred).sum()
tp, tn, fp, fn = rc.calculate_tpn_fpn(test_label_array, pred)
detection_rate = rc.detection_rate(tp, fn)
false_pos_rate = rc.false_positive_rate(tn, fp)
all_results.append([fold_num, tp, tn, fp, fn, detection_rate,
false_pos_rate, mislabeled, test_size])
fold_num += 1
return all_results
| 38.746667
| 74
| 0.63627
|
from numpy import float32 as np_float
import numpy.core.multiarray as np_array
from sklearn.naive_bayes import GaussianNB
import iscx_result_calc as rc
__author__ = "Jarrod N. Bakker"
class NaiveBayesCls:
NAME = "Naive_Bayes"
def __init__(self, data, labels, skf):
self._data = data
self._labels = labels
self._kfold = skf
self._classifier = GaussianNB()
def classify(self):
all_results = []
fold_num = 1
for train, test in self._kfold:
print("\tTraining Naive Bayes...")
train_array = np_array.array(map(self._data.__getitem__,
test)).astype(np_float)
train_label_array = np_array.array(map(
self._labels.__getitem__, test)).astype(np_float)
self._classifier.fit(train_array, train_label_array)
print("\tTesting classifier...")
test_array = np_array.array(map(self._data.__getitem__,
train)).astype(np_float)
test_label_array = np_array.array(map(
self._labels.__getitem__, train)).astype(np_float)
test_size = len(train)
pred = self._classifier.predict(test_array)
mislabeled = (test_label_array != pred).sum()
tp, tn, fp, fn = rc.calculate_tpn_fpn(test_label_array, pred)
detection_rate = rc.detection_rate(tp, fn)
false_pos_rate = rc.false_positive_rate(tn, fp)
all_results.append([fold_num, tp, tn, fp, fn, detection_rate,
false_pos_rate, mislabeled, test_size])
fold_num += 1
return all_results
| true
| true
|
1c46bc96f2ea4fe428bfdde14733d08a8f455696
| 84,164
|
py
|
Python
|
core/domain/state_domain.py
|
SamriddhiMishra/oppia
|
9f239ce13c11e60e64ca7c04726a55755231d530
|
[
"Apache-2.0"
] | null | null | null |
core/domain/state_domain.py
|
SamriddhiMishra/oppia
|
9f239ce13c11e60e64ca7c04726a55755231d530
|
[
"Apache-2.0"
] | null | null | null |
core/domain/state_domain.py
|
SamriddhiMishra/oppia
|
9f239ce13c11e60e64ca7c04726a55755231d530
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
#
# Copyright 2018 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Domain object for states and their constituents."""
from __future__ import absolute_import # pylint: disable=import-only-modules
from __future__ import unicode_literals # pylint: disable=import-only-modules
import collections
import copy
import logging
from constants import constants
from core.domain import customization_args_util
from core.domain import html_cleaner
from core.domain import interaction_registry
from core.domain import param_domain
import feconf
import python_utils
import utils
class AnswerGroup(python_utils.OBJECT):
"""Value object for an answer group. Answer groups represent a set of rules
dictating whether a shared feedback should be shared with the user. These
rules are ORed together. Answer groups may also support a classifier
that involve soft matching of answers to a set of training data and/or
example answers dictated by the creator.
"""
def to_dict(self):
"""Returns a dict representing this AnswerGroup domain object.
Returns:
dict. A dict, mapping all fields of AnswerGroup instance.
"""
return {
'rule_specs': [rule_spec.to_dict()
for rule_spec in self.rule_specs],
'outcome': self.outcome.to_dict(),
'training_data': self.training_data,
'tagged_skill_misconception_id': self.tagged_skill_misconception_id
}
@classmethod
def from_dict(cls, answer_group_dict):
"""Return a AnswerGroup domain object from a dict.
Args:
answer_group_dict: dict. The dict representation of AnswerGroup
object.
Returns:
AnswerGroup. The corresponding AnswerGroup domain object.
"""
return cls(
Outcome.from_dict(answer_group_dict['outcome']),
[RuleSpec.from_dict(rs) for rs in answer_group_dict['rule_specs']],
answer_group_dict['training_data'],
answer_group_dict['tagged_skill_misconception_id']
)
def __init__(
self, outcome, rule_specs, training_data,
tagged_skill_misconception_id):
"""Initializes a AnswerGroup domain object.
Args:
outcome: Outcome. The outcome corresponding to the answer group.
rule_specs: list(RuleSpec). List of rule specifications.
training_data: list(*). List of answers belonging to training
data of this answer group.
tagged_skill_misconception_id: str or None. The format is
'<skill_id>-<misconception_id>', where skill_id is the skill ID
of the tagged misconception and misconception_id is the id of
the tagged misconception for the answer group. It is not None
only when a state is part of a Question object that
tests a particular skill.
"""
self.rule_specs = [RuleSpec(
rule_spec.rule_type, rule_spec.inputs
) for rule_spec in rule_specs]
self.outcome = outcome
self.training_data = training_data
self.tagged_skill_misconception_id = tagged_skill_misconception_id
def validate(self, interaction, exp_param_specs_dict):
"""Verifies that all rule classes are valid, and that the AnswerGroup
only has one classifier rule.
Args:
interaction: InteractionInstance. The interaction object.
exp_param_specs_dict: dict. A dict of all parameters used in the
exploration. Keys are parameter names and values are ParamSpec
value objects with an object type property (obj_type).
Raises:
ValidationError: One or more attributes of the AnswerGroup are
invalid.
ValidationError: The AnswerGroup contains more than one classifier
rule.
"""
if not isinstance(self.rule_specs, list):
raise utils.ValidationError(
'Expected answer group rules to be a list, received %s'
% self.rule_specs)
if self.tagged_skill_misconception_id is not None:
if not isinstance(
self.tagged_skill_misconception_id,
python_utils.BASESTRING):
raise utils.ValidationError(
'Expected tagged skill misconception id to be a str, '
'received %s' % self.tagged_skill_misconception_id)
if self.tagged_skill_misconception_id.count('-') != 1:
raise utils.ValidationError(
'Expected the format of tagged skill misconception id '
'to be <skill_id>-<misconception_id>, received %s'
% self.tagged_skill_misconception_id)
if len(self.rule_specs) == 0 and len(self.training_data) == 0:
raise utils.ValidationError(
'There must be at least one rule or training data for each'
' answer group.')
for rule_spec in self.rule_specs:
if rule_spec.rule_type not in interaction.rules_dict:
raise utils.ValidationError(
'Unrecognized rule type: %s' % rule_spec.rule_type)
rule_spec.validate(
interaction.get_rule_param_list(rule_spec.rule_type),
exp_param_specs_dict)
self.outcome.validate()
class Hint(python_utils.OBJECT):
"""Value object representing a hint."""
def __init__(self, hint_content):
"""Constructs a Hint domain object.
Args:
hint_content: SubtitledHtml. The hint text and ID referring to the
other assets for this content.
"""
self.hint_content = hint_content
def to_dict(self):
"""Returns a dict representing this Hint domain object.
Returns:
dict. A dict mapping the field of Hint instance.
"""
return {
'hint_content': self.hint_content.to_dict(),
}
@classmethod
def from_dict(cls, hint_dict):
"""Return a Hint domain object from a dict.
Args:
hint_dict: dict. The dict representation of Hint object.
Returns:
Hint. The corresponding Hint domain object.
"""
return cls(SubtitledHtml.from_dict(hint_dict['hint_content']))
def validate(self):
"""Validates all properties of Hint."""
self.hint_content.validate()
class Solution(python_utils.OBJECT):
"""Value object representing a solution.
A solution consists of answer_is_exclusive, correct_answer and an
explanation.When answer_is_exclusive is True, this indicates that it is
the only correct answer; when it is False, this indicates that it is one
possible answer. correct_answer records an answer that enables the learner
to progress to the next card and explanation is an HTML string containing
an explanation for the solution.
"""
def __init__(
self, interaction_id, answer_is_exclusive,
correct_answer, explanation):
"""Constructs a Solution domain object.
Args:
interaction_id: str. The interaction id.
answer_is_exclusive: bool. True if is the only correct answer;
False if is one of possible answer.
correct_answer: str. The correct answer; this answer enables the
learner to progress to the next card.
explanation: SubtitledHtml. Contains text and text id to link audio
translations for the solution's explanation.
"""
self.answer_is_exclusive = answer_is_exclusive
self.correct_answer = (
interaction_registry.Registry.get_interaction_by_id(
interaction_id).normalize_answer(correct_answer))
self.explanation = explanation
def to_dict(self):
"""Returns a dict representing this Solution domain object.
Returns:
dict. A dict mapping all fields of Solution instance.
"""
return {
'answer_is_exclusive': self.answer_is_exclusive,
'correct_answer': self.correct_answer,
'explanation': self.explanation.to_dict(),
}
@classmethod
def from_dict(cls, interaction_id, solution_dict):
"""Return a Solution domain object from a dict.
Args:
interaction_id: str. The interaction id.
solution_dict: dict. The dict representation of Solution object.
Returns:
Solution. The corresponding Solution domain object.
"""
return cls(
interaction_id,
solution_dict['answer_is_exclusive'],
interaction_registry.Registry.get_interaction_by_id(
interaction_id).normalize_answer(
solution_dict['correct_answer']),
SubtitledHtml.from_dict(solution_dict['explanation']))
def validate(self, interaction_id):
"""Validates all properties of Solution.
Args:
interaction_id: str. The interaction id.
Raises:
ValidationError: One or more attributes of the Solution are not
valid.
"""
if not isinstance(self.answer_is_exclusive, bool):
raise utils.ValidationError(
'Expected answer_is_exclusive to be bool, received %s' %
self.answer_is_exclusive)
interaction_registry.Registry.get_interaction_by_id(
interaction_id).normalize_answer(self.correct_answer)
self.explanation.validate()
class InteractionInstance(python_utils.OBJECT):
"""Value object for an instance of an interaction."""
# The default interaction used for a new state.
_DEFAULT_INTERACTION_ID = None
def to_dict(self):
"""Returns a dict representing this InteractionInstance domain object.
Returns:
dict. A dict mapping all fields of InteractionInstance instance.
"""
return {
'id': self.id,
'customization_args': (
{} if self.id is None else
customization_args_util.get_full_customization_args(
self.customization_args,
interaction_registry.Registry.get_interaction_by_id(
self.id).customization_arg_specs)),
'answer_groups': [group.to_dict() for group in self.answer_groups],
'default_outcome': (
self.default_outcome.to_dict()
if self.default_outcome is not None
else None),
'confirmed_unclassified_answers': (
self.confirmed_unclassified_answers),
'hints': [hint.to_dict() for hint in self.hints],
'solution': self.solution.to_dict() if self.solution else None,
}
@classmethod
def from_dict(cls, interaction_dict):
"""Return a InteractionInstance domain object from a dict.
Args:
interaction_dict: dict. The dict representation of
InteractionInstance object.
Returns:
InteractionInstance. The corresponding InteractionInstance domain
object.
"""
default_outcome_dict = (
Outcome.from_dict(interaction_dict['default_outcome'])
if interaction_dict['default_outcome'] is not None else None)
solution_dict = (
Solution.from_dict(
interaction_dict['id'], interaction_dict['solution'])
if (interaction_dict['solution'] and interaction_dict['id'])
else None)
return cls(
interaction_dict['id'],
interaction_dict['customization_args'],
[AnswerGroup.from_dict(h)
for h in interaction_dict['answer_groups']],
default_outcome_dict,
interaction_dict['confirmed_unclassified_answers'],
[Hint.from_dict(h) for h in interaction_dict['hints']],
solution_dict)
def __init__(
self, interaction_id, customization_args, answer_groups,
default_outcome, confirmed_unclassified_answers, hints, solution):
"""Initializes a InteractionInstance domain object.
Args:
interaction_id: str. The interaction id.
customization_args: dict. The customization dict. The keys are
names of customization_args and the values are dicts with a
single key, 'value', whose corresponding value is the value of
the customization arg.
answer_groups: list(AnswerGroup). List of answer groups of the
interaction instance.
default_outcome: Outcome. The default outcome of the interaction
instance.
confirmed_unclassified_answers: list(AnswerGroup). List of answers
which have been confirmed to be associated with the default
outcome.
hints: list(Hint). List of hints for this interaction.
solution: Solution. A possible solution for the question asked in
this interaction.
"""
self.id = interaction_id
# Customization args for the interaction's view. Parts of these
# args may be Jinja templates that refer to state parameters.
# This is a dict: the keys are names of customization_args and the
# values are dicts with a single key, 'value', whose corresponding
# value is the value of the customization arg.
self.customization_args = customization_args
self.answer_groups = answer_groups
self.default_outcome = default_outcome
self.confirmed_unclassified_answers = confirmed_unclassified_answers
self.hints = hints
self.solution = solution
@property
def is_terminal(self):
"""Determines if this interaction type is terminal. If no ID is set for
this interaction, it is assumed to not be terminal.
Returns:
bool. Whether the interaction is terminal.
"""
return self.id and interaction_registry.Registry.get_interaction_by_id(
self.id).is_terminal
def get_all_outcomes(self):
"""Returns a list of all outcomes of this interaction, taking into
consideration every answer group and the default outcome.
Returns:
list(Outcome). List of all outcomes of this interaction.
"""
outcomes = []
for answer_group in self.answer_groups:
outcomes.append(answer_group.outcome)
if self.default_outcome is not None:
outcomes.append(self.default_outcome)
return outcomes
def validate(self, exp_param_specs_dict):
"""Validates various properties of the InteractionInstance.
Args:
exp_param_specs_dict: dict. A dict of specified parameters used in
the exploration. Keys are parameter names and values are
ParamSpec value objects with an object type property(obj_type).
Is used to validate AnswerGroup objects.
Raises:
ValidationError: One or more attributes of the InteractionInstance
are invalid.
"""
if not isinstance(self.id, python_utils.BASESTRING):
raise utils.ValidationError(
'Expected interaction id to be a string, received %s' %
self.id)
try:
interaction = interaction_registry.Registry.get_interaction_by_id(
self.id)
except KeyError:
raise utils.ValidationError('Invalid interaction id: %s' % self.id)
customization_args_util.validate_customization_args_and_values(
'interaction', self.id, self.customization_args,
interaction.customization_arg_specs)
if not isinstance(self.answer_groups, list):
raise utils.ValidationError(
'Expected answer groups to be a list, received %s.'
% self.answer_groups)
if not self.is_terminal and self.default_outcome is None:
raise utils.ValidationError(
'Non-terminal interactions must have a default outcome.')
if self.is_terminal and self.default_outcome is not None:
raise utils.ValidationError(
'Terminal interactions must not have a default outcome.')
if self.is_terminal and self.answer_groups:
raise utils.ValidationError(
'Terminal interactions must not have any answer groups.')
for answer_group in self.answer_groups:
answer_group.validate(interaction, exp_param_specs_dict)
if self.default_outcome is not None:
self.default_outcome.validate()
if not isinstance(self.hints, list):
raise utils.ValidationError(
'Expected hints to be a list, received %s'
% self.hints)
for hint in self.hints:
hint.validate()
if self.solution:
self.solution.validate(self.id)
if self.solution and not self.hints:
raise utils.ValidationError(
'Hint(s) must be specified if solution is specified')
@classmethod
def create_default_interaction(cls, default_dest_state_name):
"""Create a default InteractionInstance domain object:
- customization_args: empty dictionary;
- answer_groups: empty list;
- default_outcome: dest is set to 'default_dest_state_name' and
feedback and param_changes are initialized as empty lists;
- confirmed_unclassified_answers: empty list;
Args:
default_dest_state_name: str. The default destination state.
Returns:
InteractionInstance. The corresponding InteractionInstance domain
object with default values.
"""
default_outcome = Outcome(
default_dest_state_name,
SubtitledHtml.create_default_subtitled_html(
feconf.DEFAULT_OUTCOME_CONTENT_ID), False, {}, None, None)
return cls(
cls._DEFAULT_INTERACTION_ID, {}, [], default_outcome, [], [], {})
def get_all_html_content_strings(self):
"""Get all html content strings in the interaction.
Returns:
list(str): The list of all html content strings in the interaction.
"""
html_list = []
for answer_group in self.answer_groups:
outcome_html = answer_group.outcome.feedback.html
html_list = html_list + [outcome_html]
# Note that ItemSelectionInput replicates the customization arg HTML
# in its answer groups.
if self.id == 'ItemSelectionInput':
for answer_group in self.answer_groups:
for rule_spec in answer_group.rule_specs:
rule_spec_html = rule_spec.inputs['x']
html_list = html_list + rule_spec_html
if self.id == 'DragAndDropSortInput':
for answer_group in self.answer_groups:
for rule_spec in answer_group.rule_specs:
rule_spec_html_list = rule_spec.inputs['x']
for rule_spec_html in rule_spec_html_list:
html_list = html_list + rule_spec_html
if self.default_outcome:
default_outcome_html = self.default_outcome.feedback.html
html_list = html_list + [default_outcome_html]
for hint in self.hints:
hint_html = hint.hint_content.html
html_list = html_list + [hint_html]
if self.solution:
solution_html = self.solution.explanation.html
html_list = html_list + [solution_html]
if self.id in (
'ItemSelectionInput', 'MultipleChoiceInput',
'DragAndDropSortInput'):
customization_args_html_list = (
self.customization_args['choices']['value'])
html_list = html_list + customization_args_html_list
return html_list
class Outcome(python_utils.OBJECT):
"""Value object representing an outcome of an interaction. An outcome
consists of a destination state, feedback to show the user, and any
parameter changes.
"""
def to_dict(self):
"""Returns a dict representing this Outcome domain object.
Returns:
dict. A dict, mapping all fields of Outcome instance.
"""
return {
'dest': self.dest,
'feedback': self.feedback.to_dict(),
'labelled_as_correct': self.labelled_as_correct,
'param_changes': [
param_change.to_dict() for param_change in self.param_changes],
'refresher_exploration_id': self.refresher_exploration_id,
'missing_prerequisite_skill_id': self.missing_prerequisite_skill_id
}
@classmethod
def from_dict(cls, outcome_dict):
"""Return a Outcome domain object from a dict.
Args:
outcome_dict: dict. The dict representation of Outcome object.
Returns:
Outcome. The corresponding Outcome domain object.
"""
return cls(
outcome_dict['dest'],
SubtitledHtml.from_dict(outcome_dict['feedback']),
outcome_dict['labelled_as_correct'],
[param_domain.ParamChange(
param_change['name'], param_change['generator_id'],
param_change['customization_args'])
for param_change in outcome_dict['param_changes']],
outcome_dict['refresher_exploration_id'],
outcome_dict['missing_prerequisite_skill_id']
)
def __init__(
self, dest, feedback, labelled_as_correct, param_changes,
refresher_exploration_id, missing_prerequisite_skill_id):
"""Initializes a Outcome domain object.
Args:
dest: str. The name of the destination state.
feedback: SubtitledHtml. Feedback to give to the user if this rule
is triggered.
labelled_as_correct: bool. Whether this outcome has been labelled
by the creator as corresponding to a "correct" answer.
param_changes: list(ParamChange). List of exploration-level
parameter changes to make if this rule is triggered.
refresher_exploration_id: str or None. An optional exploration ID
to redirect the learner to if they seem to lack understanding
of a prerequisite concept. This should only exist if the
destination state for this outcome is a self-loop.
missing_prerequisite_skill_id: str or None. The id of the skill that
this answer group tests. If this is not None, the exploration
player would redirect to this skill when a learner receives this
outcome.
"""
# Id of the destination state.
# TODO(sll): Check that this state actually exists.
self.dest = dest
# Feedback to give the reader if this rule is triggered.
self.feedback = feedback
# Whether this outcome has been labelled by the creator as
# corresponding to a "correct" answer.
self.labelled_as_correct = labelled_as_correct
# Exploration-level parameter changes to make if this rule is
# triggered.
self.param_changes = param_changes or []
# An optional exploration ID to redirect the learner to if they lack
# understanding of a prerequisite concept. This should only exist if
# the destination state for this outcome is a self-loop.
self.refresher_exploration_id = refresher_exploration_id
# An optional skill id whose concept card would be shown to the learner
# when the learner receives this outcome.
self.missing_prerequisite_skill_id = missing_prerequisite_skill_id
def validate(self):
"""Validates various properties of the Outcome.
Raises:
ValidationError: One or more attributes of the Outcome are invalid.
"""
self.feedback.validate()
if not isinstance(self.labelled_as_correct, bool):
raise utils.ValidationError(
'The "labelled_as_correct" field should be a boolean, received '
'%s' % self.labelled_as_correct)
if self.missing_prerequisite_skill_id is not None:
if not isinstance(
self.missing_prerequisite_skill_id,
python_utils.BASESTRING):
raise utils.ValidationError(
'Expected outcome missing_prerequisite_skill_id to be a '
'string, received %s' % self.missing_prerequisite_skill_id)
if not isinstance(self.param_changes, list):
raise utils.ValidationError(
'Expected outcome param_changes to be a list, received %s'
% self.param_changes)
for param_change in self.param_changes:
param_change.validate()
if self.refresher_exploration_id is not None:
if not isinstance(
self.refresher_exploration_id, python_utils.BASESTRING):
raise utils.ValidationError(
'Expected outcome refresher_exploration_id to be a string, '
'received %s' % self.refresher_exploration_id)
class Voiceover(python_utils.OBJECT):
"""Value object representing an voiceover."""
def to_dict(self):
"""Returns a dict representing this Voiceover domain object.
Returns:
dict. A dict, mapping all fields of Voiceover instance.
"""
return {
'filename': self.filename,
'file_size_bytes': self.file_size_bytes,
'needs_update': self.needs_update,
}
@classmethod
def from_dict(cls, voiceover_dict):
"""Return a Voiceover domain object from a dict.
Args:
voiceover_dict: dict. The dict representation of
Voiceover object.
Returns:
Voiceover. The corresponding Voiceover domain object.
"""
return cls(
voiceover_dict['filename'],
voiceover_dict['file_size_bytes'],
voiceover_dict['needs_update'])
def __init__(self, filename, file_size_bytes, needs_update):
"""Initializes a Voiceover domain object.
Args:
filename: str. The corresponding voiceover file path.
file_size_bytes: int. The file size, in bytes. Used to display
potential bandwidth usage to the learner before they download
the file.
needs_update: bool. Whether voiceover is marked for needing review.
"""
# str. The corresponding audio file path, e.g.
# "content-en-2-h7sjp8s.mp3".
self.filename = filename
# int. The file size, in bytes. Used to display potential bandwidth
# usage to the learner before they download the file.
self.file_size_bytes = file_size_bytes
# bool. Whether audio is marked for needing review.
self.needs_update = needs_update
def validate(self):
"""Validates properties of the Voiceover.
Raises:
ValidationError: One or more attributes of the Voiceover are
invalid.
"""
if not isinstance(self.filename, python_utils.BASESTRING):
raise utils.ValidationError(
'Expected audio filename to be a string, received %s' %
self.filename)
dot_index = self.filename.rfind('.')
if dot_index == -1 or dot_index == 0:
raise utils.ValidationError(
'Invalid audio filename: %s' % self.filename)
extension = self.filename[dot_index + 1:]
if extension not in feconf.ACCEPTED_AUDIO_EXTENSIONS:
raise utils.ValidationError(
'Invalid audio filename: it should have one of '
'the following extensions: %s. Received: %s'
% (list(feconf.ACCEPTED_AUDIO_EXTENSIONS.keys()),
self.filename))
if not isinstance(self.file_size_bytes, int):
raise utils.ValidationError(
'Expected file size to be an int, received %s' %
self.file_size_bytes)
if self.file_size_bytes <= 0:
raise utils.ValidationError(
'Invalid file size: %s' % self.file_size_bytes)
if not isinstance(self.needs_update, bool):
raise utils.ValidationError(
'Expected needs_update to be a bool, received %s' %
self.needs_update)
class WrittenTranslation(python_utils.OBJECT):
"""Value object representing a written translation for a content."""
def __init__(self, html, needs_update):
"""Initializes a WrittenTranslation domain object.
Args:
html: str. A piece of user submitted HTML. This is cleaned in such
a way as to contain a restricted set of HTML tags.
needs_update: bool. Whether html is marked for needing review.
"""
self.html = html_cleaner.clean(html)
self.needs_update = needs_update
def to_dict(self):
"""Returns a dict representing this WrittenTranslation domain object.
Returns:
dict. A dict, mapping all fields of WrittenTranslation instance.
"""
return {
'html': self.html,
'needs_update': self.needs_update,
}
@classmethod
def from_dict(cls, written_translation_dict):
"""Return a WrittenTranslation domain object from a dict.
Args:
written_translation_dict: dict. The dict representation of
WrittenTranslation object.
Returns:
WrittenTranslation. The corresponding WrittenTranslation domain
object.
"""
return cls(
written_translation_dict['html'],
written_translation_dict['needs_update'])
def validate(self):
"""Validates properties of the WrittenTranslation.
Raises:
ValidationError: One or more attributes of the WrittenTranslation
are invalid.
"""
if not isinstance(self.html, python_utils.BASESTRING):
raise utils.ValidationError(
'Invalid content HTML: %s' % self.html)
if not isinstance(self.needs_update, bool):
raise utils.ValidationError(
'Expected needs_update to be a bool, received %s' %
self.needs_update)
class WrittenTranslations(python_utils.OBJECT):
"""Value object representing a content translations which stores
translated contents of all state contents (like hints, feedback etc.) in
different languages linked through their content_id.
"""
def __init__(self, translations_mapping):
"""Initializes a WrittenTranslations domain object."""
self.translations_mapping = translations_mapping
def to_dict(self):
"""Returns a dict representing this WrittenTranslations domain object.
Returns:
dict. A dict, mapping all fields of WrittenTranslations instance.
"""
translations_mapping = {}
for (content_id, language_code_to_written_translation) in (
self.translations_mapping.items()):
translations_mapping[content_id] = {}
for (language_code, written_translation) in (
language_code_to_written_translation.items()):
translations_mapping[content_id][language_code] = (
written_translation.to_dict())
written_translations_dict = {
'translations_mapping': translations_mapping
}
return written_translations_dict
@classmethod
def from_dict(cls, written_translations_dict):
"""Return a WrittenTranslations domain object from a dict.
Args:
written_translations_dict: dict. The dict representation of
WrittenTranslations object.
Returns:
WrittenTranslations. The corresponding WrittenTranslations domain
object.
"""
translations_mapping = {}
for (content_id, language_code_to_written_translation) in (
written_translations_dict['translations_mapping'].items()):
translations_mapping[content_id] = {}
for (language_code, written_translation) in (
language_code_to_written_translation.items()):
translations_mapping[content_id][language_code] = (
WrittenTranslation.from_dict(written_translation))
return cls(translations_mapping)
def get_content_ids_that_are_correctly_translated(self, language_code):
"""Returns a list of content ids in which a correct translation is
available in the given language.
Args:
language_code: str. The abbreviated code of the language.
Return:
list(str). A list of content ids in which the translations are
available in the given language.
"""
correctly_translated_content_ids = []
for content_id, translations in self.translations_mapping.items():
if language_code in translations and not (
translations[language_code].needs_update):
correctly_translated_content_ids.append(content_id)
return correctly_translated_content_ids
def add_translation(self, content_id, language_code, html):
"""Adds a translation for the given content id in a given language.
Args:
content_id: str. The id of the content.
language_code: str. The language code of the translated html.
html: str. The translated html.
"""
written_translation = WrittenTranslation(html, False)
self.translations_mapping[content_id][language_code] = (
written_translation)
def validate(self, expected_content_id_list):
"""Validates properties of the WrittenTranslations.
Args:
expected_content_id_list: A list of content id which are expected to
be inside they WrittenTranslations.
Raises:
ValidationError: One or more attributes of the WrittenTranslations
are invalid.
"""
if expected_content_id_list is not None:
if not set(self.translations_mapping.keys()) == (
set(expected_content_id_list)):
raise utils.ValidationError(
'Expected state written_translations to match the listed '
'content ids %s, found %s' % (
expected_content_id_list,
list(self.translations_mapping.keys()))
)
for (content_id, language_code_to_written_translation) in (
self.translations_mapping.items()):
if not isinstance(content_id, python_utils.BASESTRING):
raise utils.ValidationError(
'Expected content_id to be a string, received %s'
% content_id)
if not isinstance(language_code_to_written_translation, dict):
raise utils.ValidationError(
'Expected content_id value to be a dict, received %s'
% language_code_to_written_translation)
for (language_code, written_translation) in (
language_code_to_written_translation.items()):
if not isinstance(language_code, python_utils.BASESTRING):
raise utils.ValidationError(
'Expected language_code to be a string, received %s'
% language_code)
# Currently, we assume written translations are used by the
# voice-artist to voiceover the translated text so written
# translations can be in supported audio/voiceover languages.
allowed_language_codes = [language['id'] for language in (
constants.SUPPORTED_AUDIO_LANGUAGES)]
if language_code not in allowed_language_codes:
raise utils.ValidationError(
'Invalid language_code: %s' % language_code)
written_translation.validate()
def get_content_ids_for_text_translation(self):
"""Returns a list of content_id available for text translation.
Returns:
list(str). A list of content id available for text translation.
"""
return list(self.translations_mapping.keys())
def get_translated_content(self, content_id, language_code):
"""Returns the translated content for the given content_id in the given
language.
Args:
content_id: str. The ID of the content.
language_code: str. The language code for the translated content.
Returns:
str. The translated content for a given content id in a language.
Raises:
Exception: Translation doesn't exist in the given language.
Exception: The given content id doesn't exist.
"""
if content_id in self.translations_mapping:
if language_code in self.translations_mapping[content_id]:
return self.translations_mapping[content_id][language_code].html
else:
raise Exception(
'Translation for the given content_id %s does not exist in '
'%s language code' % (content_id, language_code))
else:
raise Exception('Invalid content_id: %s' % content_id)
def add_content_id_for_translation(self, content_id):
"""Adds a content id as a key for the translation into the
content_translation dict.
Args:
content_id: str. The id representing a subtitled html.
Raises:
Exception: The content id isn't a string.
"""
if not isinstance(content_id, python_utils.BASESTRING):
raise Exception(
'Expected content_id to be a string, received %s' % content_id)
if content_id in self.translations_mapping:
raise Exception(
'The content_id %s already exist.' % content_id)
else:
self.translations_mapping[content_id] = {}
def delete_content_id_for_translation(self, content_id):
"""Deletes a content id from the content_translation dict.
Args:
content_id: str. The id representing a subtitled html.
Raises:
Exception: The content id isn't a string.
"""
if not isinstance(content_id, python_utils.BASESTRING):
raise Exception(
'Expected content_id to be a string, received %s' % content_id)
if content_id not in self.translations_mapping:
raise Exception(
'The content_id %s does not exist.' % content_id)
else:
self.translations_mapping.pop(content_id, None)
def get_translation_counts(self):
"""Return a dict representing the number of translation available in a
languages in which there exist at least one translation in the
WrittenTranslation object.
Returns:
dict(str, int). A dict with language code as a key and number of
translation available in that language as the value.
"""
translation_counts = collections.defaultdict(int)
for translations in self.translations_mapping.values():
for language, translation in translations.items():
if not translation.needs_update:
translation_counts[language] += 1
return translation_counts
class RecordedVoiceovers(python_utils.OBJECT):
"""Value object representing a recorded voiceovers which stores voiceover of
all state contents (like hints, feedback etc.) in different languages linked
through their content_id.
"""
def __init__(self, voiceovers_mapping):
"""Initializes a RecordedVoiceovers domain object."""
self.voiceovers_mapping = voiceovers_mapping
def to_dict(self):
"""Returns a dict representing this RecordedVoiceovers domain object.
Returns:
dict. A dict, mapping all fields of RecordedVoiceovers instance.
"""
voiceovers_mapping = {}
for (content_id, language_code_to_voiceover) in (
self.voiceovers_mapping.items()):
voiceovers_mapping[content_id] = {}
for (language_code, voiceover) in (
language_code_to_voiceover.items()):
voiceovers_mapping[content_id][language_code] = (
voiceover.to_dict())
recorded_voiceovers_dict = {
'voiceovers_mapping': voiceovers_mapping
}
return recorded_voiceovers_dict
@classmethod
def from_dict(cls, recorded_voiceovers_dict):
"""Return a RecordedVoiceovers domain object from a dict.
Args:
recorded_voiceovers_dict: dict. The dict representation of
RecordedVoiceovers object.
Returns:
RecordedVoiceovers. The corresponding RecordedVoiceovers domain
object.
"""
voiceovers_mapping = {}
for (content_id, language_code_to_voiceover) in (
recorded_voiceovers_dict['voiceovers_mapping'].items()):
voiceovers_mapping[content_id] = {}
for (language_code, voiceover) in (
language_code_to_voiceover.items()):
voiceovers_mapping[content_id][language_code] = (
Voiceover.from_dict(voiceover))
return cls(voiceovers_mapping)
def validate(self, expected_content_id_list):
"""Validates properties of the RecordedVoiceovers.
Args:
expected_content_id_list: A list of content id which are expected to
be inside they RecordedVoiceovers.
Raises:
ValidationError: One or more attributes of the RecordedVoiceovers
are invalid.
"""
if expected_content_id_list is not None:
if not set(self.voiceovers_mapping.keys()) == (
set(expected_content_id_list)):
raise utils.ValidationError(
'Expected state recorded_voiceovers to match the listed '
'content ids %s, found %s' % (
expected_content_id_list,
list(self.voiceovers_mapping.keys()))
)
for (content_id, language_code_to_voiceover) in (
self.voiceovers_mapping.items()):
if not isinstance(content_id, python_utils.BASESTRING):
raise utils.ValidationError(
'Expected content_id to be a string, received %s'
% content_id)
if not isinstance(language_code_to_voiceover, dict):
raise utils.ValidationError(
'Expected content_id value to be a dict, received %s'
% language_code_to_voiceover)
for (language_code, voiceover) in (
language_code_to_voiceover.items()):
if not isinstance(language_code, python_utils.BASESTRING):
raise utils.ValidationError(
'Expected language_code to be a string, received %s'
% language_code)
allowed_language_codes = [language['id'] for language in (
constants.SUPPORTED_AUDIO_LANGUAGES)]
if language_code not in allowed_language_codes:
raise utils.ValidationError(
'Invalid language_code: %s' % language_code)
voiceover.validate()
def get_content_ids_for_voiceovers(self):
"""Returns a list of content_id available for voiceover.
Returns:
list(str). A list of content id available for voiceover.
"""
return list(self.voiceovers_mapping.keys())
def strip_all_existing_voiceovers(self):
"""Strips all existing voiceovers from the voiceovers_mapping."""
for content_id in self.voiceovers_mapping.keys():
self.voiceovers_mapping[content_id] = {}
def add_content_id_for_voiceover(self, content_id):
"""Adds a content id as a key for the voiceover into the
voiceovers_mapping dict.
Args:
content_id: str. The id representing a subtitled html.
Raises:
Exception: The content id isn't a string.
Exception: The content id already exist in the voiceovers_mapping
dict.
"""
if not isinstance(content_id, python_utils.BASESTRING):
raise Exception(
'Expected content_id to be a string, received %s' % content_id)
if content_id in self.voiceovers_mapping:
raise Exception(
'The content_id %s already exist.' % content_id)
self.voiceovers_mapping[content_id] = {}
def delete_content_id_for_voiceover(self, content_id):
"""Deletes a content id from the voiceovers_mapping dict.
Args:
content_id: str. The id representing a subtitled html.
Raises:
Exception: The content id isn't a string.
Exception: The content id does not exist in the voiceovers_mapping
dict.
"""
if not isinstance(content_id, python_utils.BASESTRING):
raise Exception(
'Expected content_id to be a string, received %s' % content_id)
if content_id not in self.voiceovers_mapping:
raise Exception(
'The content_id %s does not exist.' % content_id)
else:
self.voiceovers_mapping.pop(content_id, None)
class RuleSpec(python_utils.OBJECT):
"""Value object representing a rule specification."""
def to_dict(self):
"""Returns a dict representing this RuleSpec domain object.
Returns:
dict. A dict, mapping all fields of RuleSpec instance.
"""
return {
'rule_type': self.rule_type,
'inputs': self.inputs,
}
@classmethod
def from_dict(cls, rulespec_dict):
"""Return a RuleSpec domain object from a dict.
Args:
rulespec_dict: dict. The dict representation of RuleSpec object.
Returns:
RuleSpec. The corresponding RuleSpec domain object.
"""
return cls(
rulespec_dict['rule_type'],
rulespec_dict['inputs']
)
def __init__(self, rule_type, inputs):
"""Initializes a RuleSpec domain object.
Args:
rule_type: str. The rule type, e.g. "CodeContains" or "Equals". A
full list of rule types can be found in
extensions/interactions/rule_templates.json.
inputs: dict. The values of the parameters needed in order to fully
specify the rule. The keys for this dict can be deduced from
the relevant description field in
extensions/interactions/rule_templates.json -- they are
enclosed in {{...}} braces.
"""
self.rule_type = rule_type
self.inputs = inputs
def validate(self, rule_params_list, exp_param_specs_dict):
"""Validates a RuleSpec value object. It ensures the inputs dict does
not refer to any non-existent parameters and that it contains values
for all the parameters the rule expects.
Args:
rule_params_list: A list of parameters used by the rule represented
by this RuleSpec instance, to be used to validate the inputs of
this RuleSpec. Each element of the list represents a single
parameter and is a tuple with two elements:
0: The name (string) of the parameter.
1: The typed object instance for that
parameter (e.g. Real).
exp_param_specs_dict: A dict of specified parameters used in this
exploration. Keys are parameter names and values are ParamSpec
value objects with an object type property (obj_type). RuleSpec
inputs may have a parameter value which refers to one of these
exploration parameters.
Raises:
ValidationError: One or more attributes of the RuleSpec are
invalid.
"""
if not isinstance(self.inputs, dict):
raise utils.ValidationError(
'Expected inputs to be a dict, received %s' % self.inputs)
input_key_set = set(self.inputs.keys())
param_names_set = set([rp[0] for rp in rule_params_list])
leftover_input_keys = input_key_set - param_names_set
leftover_param_names = param_names_set - input_key_set
# Check if there are input keys which are not rule parameters.
if leftover_input_keys:
logging.warning(
'RuleSpec \'%s\' has inputs which are not recognized '
'parameter names: %s' % (self.rule_type, leftover_input_keys))
# Check if there are missing parameters.
if leftover_param_names:
raise utils.ValidationError(
'RuleSpec \'%s\' is missing inputs: %s'
% (self.rule_type, leftover_param_names))
rule_params_dict = {rp[0]: rp[1] for rp in rule_params_list}
for (param_name, param_value) in self.inputs.items():
param_obj = rule_params_dict[param_name]
# Validate the parameter type given the value.
if isinstance(
param_value,
python_utils.BASESTRING) and '{{' in param_value:
# Value refers to a parameter spec. Cross-validate the type of
# the parameter spec with the rule parameter.
start_brace_index = param_value.index('{{') + 2
end_brace_index = param_value.index('}}')
param_spec_name = param_value[
start_brace_index:end_brace_index]
if param_spec_name not in exp_param_specs_dict:
raise utils.ValidationError(
'RuleSpec \'%s\' has an input with name \'%s\' which '
'refers to an unknown parameter within the '
'exploration: %s' % (
self.rule_type, param_name, param_spec_name))
# TODO(bhenning): The obj_type of the param_spec
# (exp_param_specs_dict[param_spec_name]) should be validated
# to be the same as param_obj.__name__ to ensure the rule spec
# can accept the type of the parameter.
else:
# Otherwise, a simple parameter value needs to be normalizable
# by the parameter object in order to be valid.
param_obj.normalize(param_value)
class SubtitledHtml(python_utils.OBJECT):
"""Value object representing subtitled HTML."""
def __init__(self, content_id, html):
"""Initializes a SubtitledHtml domain object.
Args:
content_id: str. A unique id referring to the other assets for this
content.
html: str. A piece of user submitted HTML. This is cleaned in such
a way as to contain a restricted set of HTML tags.
"""
self.content_id = content_id
self.html = html_cleaner.clean(html)
self.validate()
def to_dict(self):
"""Returns a dict representing this SubtitledHtml domain object.
Returns:
dict. A dict, mapping all fields of SubtitledHtml instance.
"""
return {
'content_id': self.content_id,
'html': self.html
}
@classmethod
def from_dict(cls, subtitled_html_dict):
"""Return a SubtitledHtml domain object from a dict.
Args:
subtitled_html_dict: dict. The dict representation of SubtitledHtml
object.
Returns:
SubtitledHtml. The corresponding SubtitledHtml domain object.
"""
return cls(
subtitled_html_dict['content_id'], subtitled_html_dict['html'])
def validate(self):
"""Validates properties of the SubtitledHtml.
Raises:
ValidationError: One or more attributes of the SubtitledHtml are
invalid.
"""
if not isinstance(self.content_id, python_utils.BASESTRING):
raise utils.ValidationError(
'Expected content id to be a string, received %s' %
self.content_id)
if not isinstance(self.html, python_utils.BASESTRING):
raise utils.ValidationError(
'Invalid content HTML: %s' % self.html)
@classmethod
def create_default_subtitled_html(cls, content_id):
"""Create a default SubtitledHtml domain object."""
return cls(content_id, '')
class State(python_utils.OBJECT):
"""Domain object for a state."""
def __init__(
self, content, param_changes, interaction, recorded_voiceovers,
written_translations, solicit_answer_details,
classifier_model_id=None):
"""Initializes a State domain object.
Args:
content: SubtitledHtml. The contents displayed to the reader in this
state.
param_changes: list(ParamChange). Parameter changes associated with
this state.
interaction: InteractionInstance. The interaction instance
associated with this state.
recorded_voiceovers: RecordedVoiceovers. The recorded voiceovers for
the state contents and translations.
written_translations: WrittenTranslations. The written translations
for the state contents.
solicit_answer_details: bool. Whether the creator wants to ask
for answer details from the learner about why they picked a
particular answer while playing the exploration.
classifier_model_id: str or None. The classifier model ID
associated with this state, if applicable.
"""
# The content displayed to the reader in this state.
self.content = content
# Parameter changes associated with this state.
self.param_changes = [param_domain.ParamChange(
param_change.name, param_change.generator.id,
param_change.customization_args
) for param_change in param_changes]
# The interaction instance associated with this state.
self.interaction = InteractionInstance(
interaction.id, interaction.customization_args,
interaction.answer_groups, interaction.default_outcome,
interaction.confirmed_unclassified_answers,
interaction.hints, interaction.solution)
self.classifier_model_id = classifier_model_id
self.recorded_voiceovers = recorded_voiceovers
self.written_translations = written_translations
self.solicit_answer_details = solicit_answer_details
def validate(self, exp_param_specs_dict, allow_null_interaction):
"""Validates various properties of the State.
Args:
exp_param_specs_dict: dict or None. A dict of specified parameters
used in this exploration. Keys are parameter names and values
are ParamSpec value objects with an object type
property(obj_type). It is None if the state belongs to a
question.
allow_null_interaction: bool. Whether this state's interaction is
allowed to be unspecified.
Raises:
ValidationError: One or more attributes of the State are invalid.
"""
self.content.validate()
if not isinstance(self.param_changes, list):
raise utils.ValidationError(
'Expected state param_changes to be a list, received %s'
% self.param_changes)
for param_change in self.param_changes:
param_change.validate()
if not allow_null_interaction and self.interaction.id is None:
raise utils.ValidationError(
'This state does not have any interaction specified.')
elif self.interaction.id is not None:
self.interaction.validate(exp_param_specs_dict)
content_id_list = []
content_id_list.append(self.content.content_id)
for answer_group in self.interaction.answer_groups:
feedback_content_id = answer_group.outcome.feedback.content_id
if feedback_content_id in content_id_list:
raise utils.ValidationError(
'Found a duplicate content id %s' % feedback_content_id)
content_id_list.append(feedback_content_id)
if self.interaction.default_outcome:
default_outcome_content_id = (
self.interaction.default_outcome.feedback.content_id)
if default_outcome_content_id in content_id_list:
raise utils.ValidationError(
'Found a duplicate content id %s'
% default_outcome_content_id)
content_id_list.append(default_outcome_content_id)
for hint in self.interaction.hints:
hint_content_id = hint.hint_content.content_id
if hint_content_id in content_id_list:
raise utils.ValidationError(
'Found a duplicate content id %s' % hint_content_id)
content_id_list.append(hint_content_id)
if self.interaction.solution:
solution_content_id = (
self.interaction.solution.explanation.content_id)
if solution_content_id in content_id_list:
raise utils.ValidationError(
'Found a duplicate content id %s' % solution_content_id)
content_id_list.append(solution_content_id)
if not isinstance(self.solicit_answer_details, bool):
raise utils.ValidationError(
'Expected solicit_answer_details to be a boolean, '
'received %s' % self.solicit_answer_details)
if self.solicit_answer_details:
if self.interaction.id in (
constants.INTERACTION_IDS_WITHOUT_ANSWER_DETAILS):
raise utils.ValidationError(
'The %s interaction does not support soliciting '
'answer details from learners.' % (self.interaction.id))
self.written_translations.validate(content_id_list)
self.recorded_voiceovers.validate(content_id_list)
def get_content_html(self, content_id):
"""Returns the content belongs to a given content id of the object.
Args:
content_id: The id of the content.
Returns:
str. The html content corresponding to the given content id.
Raises:
ValueError: The given content_id does not exist.
"""
content_id_to_html = self._get_all_translatable_content()
if content_id not in content_id_to_html:
raise ValueError('Content ID %s does not exist' % content_id)
return content_id_to_html[content_id]
def get_training_data(self):
"""Retrieves training data from the State domain object."""
state_training_data_by_answer_group = []
for (answer_group_index, answer_group) in enumerate(
self.interaction.answer_groups):
if answer_group.training_data:
answers = copy.deepcopy(answer_group.training_data)
state_training_data_by_answer_group.append({
'answer_group_index': answer_group_index,
'answers': answers
})
return state_training_data_by_answer_group
def can_undergo_classification(self):
"""Checks whether the answers for this state satisfy the preconditions
for a ML model to be trained.
Returns:
bool: True, if the conditions are satisfied.
"""
training_examples_count = 0
labels_count = 0
training_examples_count += len(
self.interaction.confirmed_unclassified_answers)
for answer_group in self.interaction.answer_groups:
training_examples_count += len(answer_group.training_data)
labels_count += 1
if ((training_examples_count >= feconf.MIN_TOTAL_TRAINING_EXAMPLES) and
(labels_count >= feconf.MIN_ASSIGNED_LABELS)):
return True
return False
@classmethod
def convert_state_dict_to_yaml(cls, state_dict, width):
"""Converts the given state dict to yaml format.
Args:
state_dict: dict. A dict representing a state in an exploration.
width: int. The maximum number of characters in a line for the
returned YAML string.
Returns:
str. The YAML version of the state_dict.
Raises:
Exception: The state_dict does not represent a valid state.
"""
try:
# Check if the state_dict can be converted to a State.
state = cls.from_dict(state_dict)
except Exception:
logging.info(
'Bad state dict: %s' % python_utils.UNICODE(state_dict))
raise Exception('Could not convert state dict to YAML.')
return python_utils.yaml_from_dict(state.to_dict(), width=width)
def get_translation_counts(self):
"""Return a dict representing the number of translations available in a
languages in which there exists at least one translation in the state
object.
Returns:
dict(str, int). A dict with language code as a key and number of
translations available in that language as the value.
"""
return self.written_translations.get_translation_counts()
def get_content_count(self):
"""Returns the number of distinct content fields available in the
object.
Returns:
int. The number of distinct content fields available in the state.
"""
return len(self.written_translations.translations_mapping)
def _update_content_ids_in_assets(self, old_ids_list, new_ids_list):
"""Adds or deletes content ids in assets i.e, other parts of state
object such as recorded_voiceovers and written_translations.
Args:
old_ids_list: list(str). A list of content ids present earlier
within the substructure (like answer groups, hints etc.) of
state.
new_ids_list: list(str). A list of content ids currently present
within the substructure (like answer groups, hints etc.) of
state.
"""
content_ids_to_delete = set(old_ids_list) - set(new_ids_list)
content_ids_to_add = set(new_ids_list) - set(old_ids_list)
content_ids_for_text_translations = (
self.written_translations.get_content_ids_for_text_translation())
content_ids_for_voiceovers = (
self.recorded_voiceovers.get_content_ids_for_voiceovers())
for content_id in content_ids_to_delete:
if not content_id in content_ids_for_voiceovers:
raise Exception(
'The content_id %s does not exist in recorded_voiceovers.'
% content_id)
elif not content_id in content_ids_for_text_translations:
raise Exception(
'The content_id %s does not exist in written_translations.'
% content_id)
else:
self.recorded_voiceovers.delete_content_id_for_voiceover(
content_id)
self.written_translations.delete_content_id_for_translation(
content_id)
for content_id in content_ids_to_add:
if content_id in content_ids_for_voiceovers:
raise Exception(
'The content_id %s already exists in recorded_voiceovers'
% content_id)
elif content_id in content_ids_for_text_translations:
raise Exception(
'The content_id %s already exists in written_translations.'
% content_id)
else:
self.recorded_voiceovers.add_content_id_for_voiceover(
content_id)
self.written_translations.add_content_id_for_translation(
content_id)
def add_translation(self, content_id, language_code, translation_html):
"""Adds translation to a given content id in a specific language.
Args:
content_id: str. The id of the content.
language_code: str. The language code.
translation_html: str. The translated html content.
"""
translation_html = html_cleaner.clean(translation_html)
self.written_translations.add_translation(
content_id, language_code, translation_html)
def update_content(self, content):
"""Update the content of this state.
Args:
content: SubtitledHtml. Representation of updated content.
"""
# TODO(sll): Must sanitize all content in RTE component attrs.
self.content = content
def update_param_changes(self, param_changes):
"""Update the param_changes dict attribute.
Args:
param_changes: list(ParamChange). List of param_change domain
objects that represents ParamChange domain object.
"""
self.param_changes = param_changes
def update_interaction_id(self, interaction_id):
"""Update the interaction id attribute.
Args:
interaction_id: str. The new interaction id to set.
"""
self.interaction.id = interaction_id
# TODO(sll): This should also clear interaction.answer_groups (except
# for the default rule). This is somewhat mitigated because the client
# updates interaction_answer_groups directly after this, but we should
# fix it.
def update_interaction_customization_args(self, customization_args):
"""Update the customization_args of InteractionInstance domain object.
Args:
customization_args: dict. The new customization_args to set.
"""
self.interaction.customization_args = customization_args
def update_interaction_answer_groups(self, answer_groups_list):
"""Update the list of AnswerGroup in IteractioInstancen domain object.
Args:
answer_groups_list: list(dict). List of dicts that represent
AnswerGroup domain object.
"""
if not isinstance(answer_groups_list, list):
raise Exception(
'Expected interaction_answer_groups to be a list, received %s'
% answer_groups_list)
interaction_answer_groups = []
old_content_id_list = [
answer_group.outcome.feedback.content_id for answer_group in (
self.interaction.answer_groups)]
# TODO(yanamal): Do additional calculations here to get the
# parameter changes, if necessary.
for answer_group_dict in answer_groups_list:
rule_specs_list = answer_group_dict['rule_specs']
if not isinstance(rule_specs_list, list):
raise Exception(
'Expected answer group rule specs to be a list, '
'received %s' % rule_specs_list)
answer_group = AnswerGroup(
Outcome.from_dict(answer_group_dict['outcome']), [],
answer_group_dict['training_data'],
answer_group_dict['tagged_skill_misconception_id'])
for rule_dict in rule_specs_list:
rule_spec = RuleSpec.from_dict(rule_dict)
# Normalize and store the rule params.
rule_inputs = rule_spec.inputs
if not isinstance(rule_inputs, dict):
raise Exception(
'Expected rule_inputs to be a dict, received %s'
% rule_inputs)
for param_name, value in rule_inputs.items():
param_type = (
interaction_registry.Registry.get_interaction_by_id(
self.interaction.id
).get_rule_param_type(rule_spec.rule_type, param_name))
if (isinstance(value, python_utils.BASESTRING) and
'{{' in value and '}}' in value):
# TODO(jacobdavis11): Create checks that all parameters
# referred to exist and have the correct types.
normalized_param = value
else:
try:
normalized_param = param_type.normalize(value)
except Exception:
raise Exception(
'%s has the wrong type. It should be a %s.' %
(value, param_type.__name__))
rule_inputs[param_name] = normalized_param
answer_group.rule_specs.append(rule_spec)
interaction_answer_groups.append(answer_group)
self.interaction.answer_groups = interaction_answer_groups
new_content_id_list = [
answer_group.outcome.feedback.content_id for answer_group in (
self.interaction.answer_groups)]
self._update_content_ids_in_assets(
old_content_id_list, new_content_id_list)
def update_interaction_default_outcome(self, default_outcome_dict):
"""Update the default_outcome of InteractionInstance domain object.
Args:
default_outcome_dict: dict. Dict that represents Outcome domain
object.
"""
old_content_id_list = []
new_content_id_list = []
if self.interaction.default_outcome:
old_content_id_list.append(
self.interaction.default_outcome.feedback.content_id)
if default_outcome_dict:
if not isinstance(default_outcome_dict, dict):
raise Exception(
'Expected default_outcome_dict to be a dict, received %s'
% default_outcome_dict)
self.interaction.default_outcome = Outcome.from_dict(
default_outcome_dict)
new_content_id_list.append(
self.interaction.default_outcome.feedback.content_id)
else:
self.interaction.default_outcome = None
self._update_content_ids_in_assets(
old_content_id_list, new_content_id_list)
def update_interaction_confirmed_unclassified_answers(
self, confirmed_unclassified_answers):
"""Update the confirmed_unclassified_answers of IteractionInstance
domain object.
Args:
confirmed_unclassified_answers: list(AnswerGroup). The new list of
answers which have been confirmed to be associated with the
default outcome.
Raises:
Exception: 'confirmed_unclassified_answers' is not a list.
"""
if not isinstance(confirmed_unclassified_answers, list):
raise Exception(
'Expected confirmed_unclassified_answers to be a list,'
' received %s' % confirmed_unclassified_answers)
self.interaction.confirmed_unclassified_answers = (
confirmed_unclassified_answers)
def update_interaction_hints(self, hints_list):
"""Update the list of hints.
Args:
hints_list: list(dict). A list of dict; each dict represents a Hint
object.
Raises:
Exception: 'hints_list' is not a list.
"""
if not isinstance(hints_list, list):
raise Exception(
'Expected hints_list to be a list, received %s'
% hints_list)
old_content_id_list = [
hint.hint_content.content_id for hint in self.interaction.hints]
self.interaction.hints = [
Hint.from_dict(hint_dict)
for hint_dict in hints_list]
new_content_id_list = [
hint.hint_content.content_id for hint in self.interaction.hints]
self._update_content_ids_in_assets(
old_content_id_list, new_content_id_list)
def update_interaction_solution(self, solution_dict):
"""Update the solution of interaction.
Args:
solution_dict: dict or None. The dict representation of
Solution object.
Raises:
Exception: 'solution_dict' is not a dict.
"""
old_content_id_list = []
new_content_id_list = []
if self.interaction.solution:
old_content_id_list.append(
self.interaction.solution.explanation.content_id)
if solution_dict is not None:
if not isinstance(solution_dict, dict):
raise Exception(
'Expected solution to be a dict, received %s'
% solution_dict)
self.interaction.solution = Solution.from_dict(
self.interaction.id, solution_dict)
new_content_id_list.append(
self.interaction.solution.explanation.content_id)
else:
self.interaction.solution = None
self._update_content_ids_in_assets(
old_content_id_list, new_content_id_list)
def update_recorded_voiceovers(self, recorded_voiceovers):
"""Update the recorded_voiceovers of a state.
Args:
recorded_voiceovers: RecordedVoiceovers. The new RecordedVoiceovers
object for the state.
"""
self.recorded_voiceovers = recorded_voiceovers
def update_written_translations(self, written_translations):
"""Update the written_translations of a state.
Args:
written_translations: WrittenTranslations. The new
WrittenTranslations object for the state.
"""
self.written_translations = written_translations
def update_solicit_answer_details(self, solicit_answer_details):
"""Update the solicit_answer_details of a state.
Args:
solicit_answer_details: bool. The new value of
solicit_answer_details for the state.
"""
if not isinstance(solicit_answer_details, bool):
raise Exception(
'Expected solicit_answer_details to be a boolean, received %s'
% solicit_answer_details)
self.solicit_answer_details = solicit_answer_details
def _get_all_translatable_content(self):
"""Returns all content which can be translated into different languages.
Returns:
dict(str, str). Returns a dict with key as content id and content
html as the value.
"""
content_id_to_html = {}
content_id_to_html[self.content.content_id] = self.content.html
# TODO(#6178): Remove empty html checks once we add a validation
# check that ensures each content in state should be non-empty html.
default_outcome = self.interaction.default_outcome
if default_outcome is not None and default_outcome.feedback.html != '':
content_id_to_html[default_outcome.feedback.content_id] = (
default_outcome.feedback.html)
for answer_group in self.interaction.answer_groups:
if answer_group.outcome.feedback.html != '':
content_id_to_html[answer_group.outcome.feedback.content_id] = (
answer_group.outcome.feedback.html)
for hint in self.interaction.hints:
if hint.hint_content.html != '':
content_id_to_html[hint.hint_content.content_id] = (
hint.hint_content.html)
solution = self.interaction.solution
if solution is not None and solution.explanation.html != '':
content_id_to_html[solution.explanation.content_id] = (
solution.explanation.html)
return content_id_to_html
def get_content_id_mapping_needing_translations(self, language_code):
"""Returns all text html which can be translated in the given language.
Args:
language_code: str. The abbreviated code of the language.
Returns:
dict(str, str). A dict with key as content id and value as the
content html.
"""
content_id_to_html = self._get_all_translatable_content()
available_translation_content_ids = (
self.written_translations
.get_content_ids_that_are_correctly_translated(language_code))
for content_id in available_translation_content_ids:
del content_id_to_html[content_id]
# TODO(#7571): Add functionality to return the list of
# translations which needs update.
return content_id_to_html
def to_dict(self):
"""Returns a dict representing this State domain object.
Returns:
dict. A dict mapping all fields of State instance.
"""
return {
'content': self.content.to_dict(),
'param_changes': [param_change.to_dict()
for param_change in self.param_changes],
'interaction': self.interaction.to_dict(),
'classifier_model_id': self.classifier_model_id,
'recorded_voiceovers': self.recorded_voiceovers.to_dict(),
'written_translations': self.written_translations.to_dict(),
'solicit_answer_details': self.solicit_answer_details
}
@classmethod
def from_dict(cls, state_dict):
"""Return a State domain object from a dict.
Args:
state_dict: dict. The dict representation of State object.
Returns:
State. The corresponding State domain object.
"""
return cls(
SubtitledHtml.from_dict(state_dict['content']),
[param_domain.ParamChange.from_dict(param)
for param in state_dict['param_changes']],
InteractionInstance.from_dict(state_dict['interaction']),
RecordedVoiceovers.from_dict(state_dict['recorded_voiceovers']),
WrittenTranslations.from_dict(state_dict['written_translations']),
state_dict['solicit_answer_details'],
state_dict['classifier_model_id'])
@classmethod
def create_default_state(
cls, default_dest_state_name, is_initial_state=False):
"""Return a State domain object with default value.
Args:
default_dest_state_name: str. The default destination state.
is_initial_state: bool. Whether this state represents the initial
state of an exploration.
Returns:
State. The corresponding State domain object.
"""
content_html = (
feconf.DEFAULT_INIT_STATE_CONTENT_STR if is_initial_state else '')
content_id = feconf.DEFAULT_NEW_STATE_CONTENT_ID
return cls(
SubtitledHtml(content_id, content_html),
[],
InteractionInstance.create_default_interaction(
default_dest_state_name),
RecordedVoiceovers.from_dict(copy.deepcopy(
feconf.DEFAULT_RECORDED_VOICEOVERS)),
WrittenTranslations.from_dict(
copy.deepcopy(feconf.DEFAULT_WRITTEN_TRANSLATIONS)),
False)
@classmethod
def convert_html_fields_in_state(cls, state_dict, conversion_fn):
"""Applies a conversion function on all the html strings in a state
to migrate them to a desired state.
Args:
state_dict: dict. The dict representation of State object.
conversion_fn: function. The conversion function to be applied on
the states_dict.
Returns:
dict. The converted state_dict.
"""
state_dict['content']['html'] = (
conversion_fn(state_dict['content']['html']))
if state_dict['interaction']['default_outcome']:
interaction_feedback_html = state_dict[
'interaction']['default_outcome']['feedback']['html']
state_dict['interaction']['default_outcome']['feedback'][
'html'] = conversion_fn(interaction_feedback_html)
for answer_group_index, answer_group in enumerate(
state_dict['interaction']['answer_groups']):
answer_group_html = answer_group['outcome']['feedback']['html']
state_dict['interaction']['answer_groups'][
answer_group_index]['outcome']['feedback']['html'] = (
conversion_fn(answer_group_html))
if state_dict['interaction']['id'] == 'ItemSelectionInput':
for rule_spec_index, rule_spec in enumerate(
answer_group['rule_specs']):
for x_index, x in enumerate(rule_spec['inputs']['x']):
state_dict['interaction']['answer_groups'][
answer_group_index]['rule_specs'][
rule_spec_index]['inputs']['x'][x_index] = (
conversion_fn(x))
for hint_index, hint in enumerate(
state_dict['interaction']['hints']):
hint_html = hint['hint_content']['html']
state_dict['interaction']['hints'][hint_index][
'hint_content']['html'] = conversion_fn(hint_html)
if state_dict['interaction']['solution']:
solution_html = state_dict[
'interaction']['solution']['explanation']['html']
state_dict['interaction']['solution']['explanation']['html'] = (
conversion_fn(solution_html))
if state_dict['interaction']['id'] in (
'ItemSelectionInput', 'MultipleChoiceInput'):
for value_index, value in enumerate(
state_dict['interaction']['customization_args'][
'choices']['value']):
state_dict['interaction']['customization_args'][
'choices']['value'][value_index] = conversion_fn(value)
return state_dict
| 41.256863
| 80
| 0.624032
|
from __future__ import absolute_import
from __future__ import unicode_literals
import collections
import copy
import logging
from constants import constants
from core.domain import customization_args_util
from core.domain import html_cleaner
from core.domain import interaction_registry
from core.domain import param_domain
import feconf
import python_utils
import utils
class AnswerGroup(python_utils.OBJECT):
def to_dict(self):
return {
'rule_specs': [rule_spec.to_dict()
for rule_spec in self.rule_specs],
'outcome': self.outcome.to_dict(),
'training_data': self.training_data,
'tagged_skill_misconception_id': self.tagged_skill_misconception_id
}
@classmethod
def from_dict(cls, answer_group_dict):
return cls(
Outcome.from_dict(answer_group_dict['outcome']),
[RuleSpec.from_dict(rs) for rs in answer_group_dict['rule_specs']],
answer_group_dict['training_data'],
answer_group_dict['tagged_skill_misconception_id']
)
def __init__(
self, outcome, rule_specs, training_data,
tagged_skill_misconception_id):
self.rule_specs = [RuleSpec(
rule_spec.rule_type, rule_spec.inputs
) for rule_spec in rule_specs]
self.outcome = outcome
self.training_data = training_data
self.tagged_skill_misconception_id = tagged_skill_misconception_id
def validate(self, interaction, exp_param_specs_dict):
if not isinstance(self.rule_specs, list):
raise utils.ValidationError(
'Expected answer group rules to be a list, received %s'
% self.rule_specs)
if self.tagged_skill_misconception_id is not None:
if not isinstance(
self.tagged_skill_misconception_id,
python_utils.BASESTRING):
raise utils.ValidationError(
'Expected tagged skill misconception id to be a str, '
'received %s' % self.tagged_skill_misconception_id)
if self.tagged_skill_misconception_id.count('-') != 1:
raise utils.ValidationError(
'Expected the format of tagged skill misconception id '
'to be <skill_id>-<misconception_id>, received %s'
% self.tagged_skill_misconception_id)
if len(self.rule_specs) == 0 and len(self.training_data) == 0:
raise utils.ValidationError(
'There must be at least one rule or training data for each'
' answer group.')
for rule_spec in self.rule_specs:
if rule_spec.rule_type not in interaction.rules_dict:
raise utils.ValidationError(
'Unrecognized rule type: %s' % rule_spec.rule_type)
rule_spec.validate(
interaction.get_rule_param_list(rule_spec.rule_type),
exp_param_specs_dict)
self.outcome.validate()
class Hint(python_utils.OBJECT):
def __init__(self, hint_content):
self.hint_content = hint_content
def to_dict(self):
return {
'hint_content': self.hint_content.to_dict(),
}
@classmethod
def from_dict(cls, hint_dict):
return cls(SubtitledHtml.from_dict(hint_dict['hint_content']))
def validate(self):
self.hint_content.validate()
class Solution(python_utils.OBJECT):
def __init__(
self, interaction_id, answer_is_exclusive,
correct_answer, explanation):
self.answer_is_exclusive = answer_is_exclusive
self.correct_answer = (
interaction_registry.Registry.get_interaction_by_id(
interaction_id).normalize_answer(correct_answer))
self.explanation = explanation
def to_dict(self):
return {
'answer_is_exclusive': self.answer_is_exclusive,
'correct_answer': self.correct_answer,
'explanation': self.explanation.to_dict(),
}
@classmethod
def from_dict(cls, interaction_id, solution_dict):
return cls(
interaction_id,
solution_dict['answer_is_exclusive'],
interaction_registry.Registry.get_interaction_by_id(
interaction_id).normalize_answer(
solution_dict['correct_answer']),
SubtitledHtml.from_dict(solution_dict['explanation']))
def validate(self, interaction_id):
if not isinstance(self.answer_is_exclusive, bool):
raise utils.ValidationError(
'Expected answer_is_exclusive to be bool, received %s' %
self.answer_is_exclusive)
interaction_registry.Registry.get_interaction_by_id(
interaction_id).normalize_answer(self.correct_answer)
self.explanation.validate()
class InteractionInstance(python_utils.OBJECT):
_DEFAULT_INTERACTION_ID = None
def to_dict(self):
return {
'id': self.id,
'customization_args': (
{} if self.id is None else
customization_args_util.get_full_customization_args(
self.customization_args,
interaction_registry.Registry.get_interaction_by_id(
self.id).customization_arg_specs)),
'answer_groups': [group.to_dict() for group in self.answer_groups],
'default_outcome': (
self.default_outcome.to_dict()
if self.default_outcome is not None
else None),
'confirmed_unclassified_answers': (
self.confirmed_unclassified_answers),
'hints': [hint.to_dict() for hint in self.hints],
'solution': self.solution.to_dict() if self.solution else None,
}
@classmethod
def from_dict(cls, interaction_dict):
default_outcome_dict = (
Outcome.from_dict(interaction_dict['default_outcome'])
if interaction_dict['default_outcome'] is not None else None)
solution_dict = (
Solution.from_dict(
interaction_dict['id'], interaction_dict['solution'])
if (interaction_dict['solution'] and interaction_dict['id'])
else None)
return cls(
interaction_dict['id'],
interaction_dict['customization_args'],
[AnswerGroup.from_dict(h)
for h in interaction_dict['answer_groups']],
default_outcome_dict,
interaction_dict['confirmed_unclassified_answers'],
[Hint.from_dict(h) for h in interaction_dict['hints']],
solution_dict)
def __init__(
self, interaction_id, customization_args, answer_groups,
default_outcome, confirmed_unclassified_answers, hints, solution):
self.id = interaction_id
# args may be Jinja templates that refer to state parameters.
# This is a dict: the keys are names of customization_args and the
# values are dicts with a single key, 'value', whose corresponding
# value is the value of the customization arg.
self.customization_args = customization_args
self.answer_groups = answer_groups
self.default_outcome = default_outcome
self.confirmed_unclassified_answers = confirmed_unclassified_answers
self.hints = hints
self.solution = solution
@property
def is_terminal(self):
return self.id and interaction_registry.Registry.get_interaction_by_id(
self.id).is_terminal
def get_all_outcomes(self):
outcomes = []
for answer_group in self.answer_groups:
outcomes.append(answer_group.outcome)
if self.default_outcome is not None:
outcomes.append(self.default_outcome)
return outcomes
def validate(self, exp_param_specs_dict):
if not isinstance(self.id, python_utils.BASESTRING):
raise utils.ValidationError(
'Expected interaction id to be a string, received %s' %
self.id)
try:
interaction = interaction_registry.Registry.get_interaction_by_id(
self.id)
except KeyError:
raise utils.ValidationError('Invalid interaction id: %s' % self.id)
customization_args_util.validate_customization_args_and_values(
'interaction', self.id, self.customization_args,
interaction.customization_arg_specs)
if not isinstance(self.answer_groups, list):
raise utils.ValidationError(
'Expected answer groups to be a list, received %s.'
% self.answer_groups)
if not self.is_terminal and self.default_outcome is None:
raise utils.ValidationError(
'Non-terminal interactions must have a default outcome.')
if self.is_terminal and self.default_outcome is not None:
raise utils.ValidationError(
'Terminal interactions must not have a default outcome.')
if self.is_terminal and self.answer_groups:
raise utils.ValidationError(
'Terminal interactions must not have any answer groups.')
for answer_group in self.answer_groups:
answer_group.validate(interaction, exp_param_specs_dict)
if self.default_outcome is not None:
self.default_outcome.validate()
if not isinstance(self.hints, list):
raise utils.ValidationError(
'Expected hints to be a list, received %s'
% self.hints)
for hint in self.hints:
hint.validate()
if self.solution:
self.solution.validate(self.id)
if self.solution and not self.hints:
raise utils.ValidationError(
'Hint(s) must be specified if solution is specified')
@classmethod
def create_default_interaction(cls, default_dest_state_name):
default_outcome = Outcome(
default_dest_state_name,
SubtitledHtml.create_default_subtitled_html(
feconf.DEFAULT_OUTCOME_CONTENT_ID), False, {}, None, None)
return cls(
cls._DEFAULT_INTERACTION_ID, {}, [], default_outcome, [], [], {})
def get_all_html_content_strings(self):
html_list = []
for answer_group in self.answer_groups:
outcome_html = answer_group.outcome.feedback.html
html_list = html_list + [outcome_html]
# Note that ItemSelectionInput replicates the customization arg HTML
# in its answer groups.
if self.id == 'ItemSelectionInput':
for answer_group in self.answer_groups:
for rule_spec in answer_group.rule_specs:
rule_spec_html = rule_spec.inputs['x']
html_list = html_list + rule_spec_html
if self.id == 'DragAndDropSortInput':
for answer_group in self.answer_groups:
for rule_spec in answer_group.rule_specs:
rule_spec_html_list = rule_spec.inputs['x']
for rule_spec_html in rule_spec_html_list:
html_list = html_list + rule_spec_html
if self.default_outcome:
default_outcome_html = self.default_outcome.feedback.html
html_list = html_list + [default_outcome_html]
for hint in self.hints:
hint_html = hint.hint_content.html
html_list = html_list + [hint_html]
if self.solution:
solution_html = self.solution.explanation.html
html_list = html_list + [solution_html]
if self.id in (
'ItemSelectionInput', 'MultipleChoiceInput',
'DragAndDropSortInput'):
customization_args_html_list = (
self.customization_args['choices']['value'])
html_list = html_list + customization_args_html_list
return html_list
class Outcome(python_utils.OBJECT):
def to_dict(self):
return {
'dest': self.dest,
'feedback': self.feedback.to_dict(),
'labelled_as_correct': self.labelled_as_correct,
'param_changes': [
param_change.to_dict() for param_change in self.param_changes],
'refresher_exploration_id': self.refresher_exploration_id,
'missing_prerequisite_skill_id': self.missing_prerequisite_skill_id
}
@classmethod
def from_dict(cls, outcome_dict):
return cls(
outcome_dict['dest'],
SubtitledHtml.from_dict(outcome_dict['feedback']),
outcome_dict['labelled_as_correct'],
[param_domain.ParamChange(
param_change['name'], param_change['generator_id'],
param_change['customization_args'])
for param_change in outcome_dict['param_changes']],
outcome_dict['refresher_exploration_id'],
outcome_dict['missing_prerequisite_skill_id']
)
def __init__(
self, dest, feedback, labelled_as_correct, param_changes,
refresher_exploration_id, missing_prerequisite_skill_id):
# Id of the destination state.
# TODO(sll): Check that this state actually exists.
self.dest = dest
# Feedback to give the reader if this rule is triggered.
self.feedback = feedback
# Whether this outcome has been labelled by the creator as
# corresponding to a "correct" answer.
self.labelled_as_correct = labelled_as_correct
# Exploration-level parameter changes to make if this rule is
# triggered.
self.param_changes = param_changes or []
# An optional exploration ID to redirect the learner to if they lack
# understanding of a prerequisite concept. This should only exist if
# the destination state for this outcome is a self-loop.
self.refresher_exploration_id = refresher_exploration_id
# An optional skill id whose concept card would be shown to the learner
# when the learner receives this outcome.
self.missing_prerequisite_skill_id = missing_prerequisite_skill_id
def validate(self):
self.feedback.validate()
if not isinstance(self.labelled_as_correct, bool):
raise utils.ValidationError(
'The "labelled_as_correct" field should be a boolean, received '
'%s' % self.labelled_as_correct)
if self.missing_prerequisite_skill_id is not None:
if not isinstance(
self.missing_prerequisite_skill_id,
python_utils.BASESTRING):
raise utils.ValidationError(
'Expected outcome missing_prerequisite_skill_id to be a '
'string, received %s' % self.missing_prerequisite_skill_id)
if not isinstance(self.param_changes, list):
raise utils.ValidationError(
'Expected outcome param_changes to be a list, received %s'
% self.param_changes)
for param_change in self.param_changes:
param_change.validate()
if self.refresher_exploration_id is not None:
if not isinstance(
self.refresher_exploration_id, python_utils.BASESTRING):
raise utils.ValidationError(
'Expected outcome refresher_exploration_id to be a string, '
'received %s' % self.refresher_exploration_id)
class Voiceover(python_utils.OBJECT):
def to_dict(self):
return {
'filename': self.filename,
'file_size_bytes': self.file_size_bytes,
'needs_update': self.needs_update,
}
@classmethod
def from_dict(cls, voiceover_dict):
return cls(
voiceover_dict['filename'],
voiceover_dict['file_size_bytes'],
voiceover_dict['needs_update'])
def __init__(self, filename, file_size_bytes, needs_update):
# str. The corresponding audio file path, e.g.
# "content-en-2-h7sjp8s.mp3".
self.filename = filename
# int. The file size, in bytes. Used to display potential bandwidth
# usage to the learner before they download the file.
self.file_size_bytes = file_size_bytes
# bool. Whether audio is marked for needing review.
self.needs_update = needs_update
def validate(self):
if not isinstance(self.filename, python_utils.BASESTRING):
raise utils.ValidationError(
'Expected audio filename to be a string, received %s' %
self.filename)
dot_index = self.filename.rfind('.')
if dot_index == -1 or dot_index == 0:
raise utils.ValidationError(
'Invalid audio filename: %s' % self.filename)
extension = self.filename[dot_index + 1:]
if extension not in feconf.ACCEPTED_AUDIO_EXTENSIONS:
raise utils.ValidationError(
'Invalid audio filename: it should have one of '
'the following extensions: %s. Received: %s'
% (list(feconf.ACCEPTED_AUDIO_EXTENSIONS.keys()),
self.filename))
if not isinstance(self.file_size_bytes, int):
raise utils.ValidationError(
'Expected file size to be an int, received %s' %
self.file_size_bytes)
if self.file_size_bytes <= 0:
raise utils.ValidationError(
'Invalid file size: %s' % self.file_size_bytes)
if not isinstance(self.needs_update, bool):
raise utils.ValidationError(
'Expected needs_update to be a bool, received %s' %
self.needs_update)
class WrittenTranslation(python_utils.OBJECT):
def __init__(self, html, needs_update):
self.html = html_cleaner.clean(html)
self.needs_update = needs_update
def to_dict(self):
return {
'html': self.html,
'needs_update': self.needs_update,
}
@classmethod
def from_dict(cls, written_translation_dict):
return cls(
written_translation_dict['html'],
written_translation_dict['needs_update'])
def validate(self):
if not isinstance(self.html, python_utils.BASESTRING):
raise utils.ValidationError(
'Invalid content HTML: %s' % self.html)
if not isinstance(self.needs_update, bool):
raise utils.ValidationError(
'Expected needs_update to be a bool, received %s' %
self.needs_update)
class WrittenTranslations(python_utils.OBJECT):
def __init__(self, translations_mapping):
self.translations_mapping = translations_mapping
def to_dict(self):
translations_mapping = {}
for (content_id, language_code_to_written_translation) in (
self.translations_mapping.items()):
translations_mapping[content_id] = {}
for (language_code, written_translation) in (
language_code_to_written_translation.items()):
translations_mapping[content_id][language_code] = (
written_translation.to_dict())
written_translations_dict = {
'translations_mapping': translations_mapping
}
return written_translations_dict
@classmethod
def from_dict(cls, written_translations_dict):
translations_mapping = {}
for (content_id, language_code_to_written_translation) in (
written_translations_dict['translations_mapping'].items()):
translations_mapping[content_id] = {}
for (language_code, written_translation) in (
language_code_to_written_translation.items()):
translations_mapping[content_id][language_code] = (
WrittenTranslation.from_dict(written_translation))
return cls(translations_mapping)
def get_content_ids_that_are_correctly_translated(self, language_code):
correctly_translated_content_ids = []
for content_id, translations in self.translations_mapping.items():
if language_code in translations and not (
translations[language_code].needs_update):
correctly_translated_content_ids.append(content_id)
return correctly_translated_content_ids
def add_translation(self, content_id, language_code, html):
written_translation = WrittenTranslation(html, False)
self.translations_mapping[content_id][language_code] = (
written_translation)
def validate(self, expected_content_id_list):
if expected_content_id_list is not None:
if not set(self.translations_mapping.keys()) == (
set(expected_content_id_list)):
raise utils.ValidationError(
'Expected state written_translations to match the listed '
'content ids %s, found %s' % (
expected_content_id_list,
list(self.translations_mapping.keys()))
)
for (content_id, language_code_to_written_translation) in (
self.translations_mapping.items()):
if not isinstance(content_id, python_utils.BASESTRING):
raise utils.ValidationError(
'Expected content_id to be a string, received %s'
% content_id)
if not isinstance(language_code_to_written_translation, dict):
raise utils.ValidationError(
'Expected content_id value to be a dict, received %s'
% language_code_to_written_translation)
for (language_code, written_translation) in (
language_code_to_written_translation.items()):
if not isinstance(language_code, python_utils.BASESTRING):
raise utils.ValidationError(
'Expected language_code to be a string, received %s'
% language_code)
# Currently, we assume written translations are used by the
# voice-artist to voiceover the translated text so written
# translations can be in supported audio/voiceover languages.
allowed_language_codes = [language['id'] for language in (
constants.SUPPORTED_AUDIO_LANGUAGES)]
if language_code not in allowed_language_codes:
raise utils.ValidationError(
'Invalid language_code: %s' % language_code)
written_translation.validate()
def get_content_ids_for_text_translation(self):
return list(self.translations_mapping.keys())
def get_translated_content(self, content_id, language_code):
if content_id in self.translations_mapping:
if language_code in self.translations_mapping[content_id]:
return self.translations_mapping[content_id][language_code].html
else:
raise Exception(
'Translation for the given content_id %s does not exist in '
'%s language code' % (content_id, language_code))
else:
raise Exception('Invalid content_id: %s' % content_id)
def add_content_id_for_translation(self, content_id):
if not isinstance(content_id, python_utils.BASESTRING):
raise Exception(
'Expected content_id to be a string, received %s' % content_id)
if content_id in self.translations_mapping:
raise Exception(
'The content_id %s already exist.' % content_id)
else:
self.translations_mapping[content_id] = {}
def delete_content_id_for_translation(self, content_id):
if not isinstance(content_id, python_utils.BASESTRING):
raise Exception(
'Expected content_id to be a string, received %s' % content_id)
if content_id not in self.translations_mapping:
raise Exception(
'The content_id %s does not exist.' % content_id)
else:
self.translations_mapping.pop(content_id, None)
def get_translation_counts(self):
translation_counts = collections.defaultdict(int)
for translations in self.translations_mapping.values():
for language, translation in translations.items():
if not translation.needs_update:
translation_counts[language] += 1
return translation_counts
class RecordedVoiceovers(python_utils.OBJECT):
def __init__(self, voiceovers_mapping):
self.voiceovers_mapping = voiceovers_mapping
def to_dict(self):
voiceovers_mapping = {}
for (content_id, language_code_to_voiceover) in (
self.voiceovers_mapping.items()):
voiceovers_mapping[content_id] = {}
for (language_code, voiceover) in (
language_code_to_voiceover.items()):
voiceovers_mapping[content_id][language_code] = (
voiceover.to_dict())
recorded_voiceovers_dict = {
'voiceovers_mapping': voiceovers_mapping
}
return recorded_voiceovers_dict
@classmethod
def from_dict(cls, recorded_voiceovers_dict):
voiceovers_mapping = {}
for (content_id, language_code_to_voiceover) in (
recorded_voiceovers_dict['voiceovers_mapping'].items()):
voiceovers_mapping[content_id] = {}
for (language_code, voiceover) in (
language_code_to_voiceover.items()):
voiceovers_mapping[content_id][language_code] = (
Voiceover.from_dict(voiceover))
return cls(voiceovers_mapping)
def validate(self, expected_content_id_list):
if expected_content_id_list is not None:
if not set(self.voiceovers_mapping.keys()) == (
set(expected_content_id_list)):
raise utils.ValidationError(
'Expected state recorded_voiceovers to match the listed '
'content ids %s, found %s' % (
expected_content_id_list,
list(self.voiceovers_mapping.keys()))
)
for (content_id, language_code_to_voiceover) in (
self.voiceovers_mapping.items()):
if not isinstance(content_id, python_utils.BASESTRING):
raise utils.ValidationError(
'Expected content_id to be a string, received %s'
% content_id)
if not isinstance(language_code_to_voiceover, dict):
raise utils.ValidationError(
'Expected content_id value to be a dict, received %s'
% language_code_to_voiceover)
for (language_code, voiceover) in (
language_code_to_voiceover.items()):
if not isinstance(language_code, python_utils.BASESTRING):
raise utils.ValidationError(
'Expected language_code to be a string, received %s'
% language_code)
allowed_language_codes = [language['id'] for language in (
constants.SUPPORTED_AUDIO_LANGUAGES)]
if language_code not in allowed_language_codes:
raise utils.ValidationError(
'Invalid language_code: %s' % language_code)
voiceover.validate()
def get_content_ids_for_voiceovers(self):
return list(self.voiceovers_mapping.keys())
def strip_all_existing_voiceovers(self):
for content_id in self.voiceovers_mapping.keys():
self.voiceovers_mapping[content_id] = {}
def add_content_id_for_voiceover(self, content_id):
if not isinstance(content_id, python_utils.BASESTRING):
raise Exception(
'Expected content_id to be a string, received %s' % content_id)
if content_id in self.voiceovers_mapping:
raise Exception(
'The content_id %s already exist.' % content_id)
self.voiceovers_mapping[content_id] = {}
def delete_content_id_for_voiceover(self, content_id):
if not isinstance(content_id, python_utils.BASESTRING):
raise Exception(
'Expected content_id to be a string, received %s' % content_id)
if content_id not in self.voiceovers_mapping:
raise Exception(
'The content_id %s does not exist.' % content_id)
else:
self.voiceovers_mapping.pop(content_id, None)
class RuleSpec(python_utils.OBJECT):
def to_dict(self):
return {
'rule_type': self.rule_type,
'inputs': self.inputs,
}
@classmethod
def from_dict(cls, rulespec_dict):
return cls(
rulespec_dict['rule_type'],
rulespec_dict['inputs']
)
def __init__(self, rule_type, inputs):
self.rule_type = rule_type
self.inputs = inputs
def validate(self, rule_params_list, exp_param_specs_dict):
if not isinstance(self.inputs, dict):
raise utils.ValidationError(
'Expected inputs to be a dict, received %s' % self.inputs)
input_key_set = set(self.inputs.keys())
param_names_set = set([rp[0] for rp in rule_params_list])
leftover_input_keys = input_key_set - param_names_set
leftover_param_names = param_names_set - input_key_set
# Check if there are input keys which are not rule parameters.
if leftover_input_keys:
logging.warning(
'RuleSpec \'%s\' has inputs which are not recognized '
'parameter names: %s' % (self.rule_type, leftover_input_keys))
# Check if there are missing parameters.
if leftover_param_names:
raise utils.ValidationError(
'RuleSpec \'%s\' is missing inputs: %s'
% (self.rule_type, leftover_param_names))
rule_params_dict = {rp[0]: rp[1] for rp in rule_params_list}
for (param_name, param_value) in self.inputs.items():
param_obj = rule_params_dict[param_name]
# Validate the parameter type given the value.
if isinstance(
param_value,
python_utils.BASESTRING) and '{{' in param_value:
# Value refers to a parameter spec. Cross-validate the type of
# the parameter spec with the rule parameter.
start_brace_index = param_value.index('{{') + 2
end_brace_index = param_value.index('}}')
param_spec_name = param_value[
start_brace_index:end_brace_index]
if param_spec_name not in exp_param_specs_dict:
raise utils.ValidationError(
'RuleSpec \'%s\' has an input with name \'%s\' which '
'refers to an unknown parameter within the '
'exploration: %s' % (
self.rule_type, param_name, param_spec_name))
# TODO(bhenning): The obj_type of the param_spec
# (exp_param_specs_dict[param_spec_name]) should be validated
# to be the same as param_obj.__name__ to ensure the rule spec
# can accept the type of the parameter.
else:
# Otherwise, a simple parameter value needs to be normalizable
# by the parameter object in order to be valid.
param_obj.normalize(param_value)
class SubtitledHtml(python_utils.OBJECT):
def __init__(self, content_id, html):
self.content_id = content_id
self.html = html_cleaner.clean(html)
self.validate()
def to_dict(self):
return {
'content_id': self.content_id,
'html': self.html
}
@classmethod
def from_dict(cls, subtitled_html_dict):
return cls(
subtitled_html_dict['content_id'], subtitled_html_dict['html'])
def validate(self):
if not isinstance(self.content_id, python_utils.BASESTRING):
raise utils.ValidationError(
'Expected content id to be a string, received %s' %
self.content_id)
if not isinstance(self.html, python_utils.BASESTRING):
raise utils.ValidationError(
'Invalid content HTML: %s' % self.html)
@classmethod
def create_default_subtitled_html(cls, content_id):
return cls(content_id, '')
class State(python_utils.OBJECT):
def __init__(
self, content, param_changes, interaction, recorded_voiceovers,
written_translations, solicit_answer_details,
classifier_model_id=None):
# The content displayed to the reader in this state.
self.content = content
# Parameter changes associated with this state.
self.param_changes = [param_domain.ParamChange(
param_change.name, param_change.generator.id,
param_change.customization_args
) for param_change in param_changes]
# The interaction instance associated with this state.
self.interaction = InteractionInstance(
interaction.id, interaction.customization_args,
interaction.answer_groups, interaction.default_outcome,
interaction.confirmed_unclassified_answers,
interaction.hints, interaction.solution)
self.classifier_model_id = classifier_model_id
self.recorded_voiceovers = recorded_voiceovers
self.written_translations = written_translations
self.solicit_answer_details = solicit_answer_details
def validate(self, exp_param_specs_dict, allow_null_interaction):
self.content.validate()
if not isinstance(self.param_changes, list):
raise utils.ValidationError(
'Expected state param_changes to be a list, received %s'
% self.param_changes)
for param_change in self.param_changes:
param_change.validate()
if not allow_null_interaction and self.interaction.id is None:
raise utils.ValidationError(
'This state does not have any interaction specified.')
elif self.interaction.id is not None:
self.interaction.validate(exp_param_specs_dict)
content_id_list = []
content_id_list.append(self.content.content_id)
for answer_group in self.interaction.answer_groups:
feedback_content_id = answer_group.outcome.feedback.content_id
if feedback_content_id in content_id_list:
raise utils.ValidationError(
'Found a duplicate content id %s' % feedback_content_id)
content_id_list.append(feedback_content_id)
if self.interaction.default_outcome:
default_outcome_content_id = (
self.interaction.default_outcome.feedback.content_id)
if default_outcome_content_id in content_id_list:
raise utils.ValidationError(
'Found a duplicate content id %s'
% default_outcome_content_id)
content_id_list.append(default_outcome_content_id)
for hint in self.interaction.hints:
hint_content_id = hint.hint_content.content_id
if hint_content_id in content_id_list:
raise utils.ValidationError(
'Found a duplicate content id %s' % hint_content_id)
content_id_list.append(hint_content_id)
if self.interaction.solution:
solution_content_id = (
self.interaction.solution.explanation.content_id)
if solution_content_id in content_id_list:
raise utils.ValidationError(
'Found a duplicate content id %s' % solution_content_id)
content_id_list.append(solution_content_id)
if not isinstance(self.solicit_answer_details, bool):
raise utils.ValidationError(
'Expected solicit_answer_details to be a boolean, '
'received %s' % self.solicit_answer_details)
if self.solicit_answer_details:
if self.interaction.id in (
constants.INTERACTION_IDS_WITHOUT_ANSWER_DETAILS):
raise utils.ValidationError(
'The %s interaction does not support soliciting '
'answer details from learners.' % (self.interaction.id))
self.written_translations.validate(content_id_list)
self.recorded_voiceovers.validate(content_id_list)
def get_content_html(self, content_id):
content_id_to_html = self._get_all_translatable_content()
if content_id not in content_id_to_html:
raise ValueError('Content ID %s does not exist' % content_id)
return content_id_to_html[content_id]
def get_training_data(self):
state_training_data_by_answer_group = []
for (answer_group_index, answer_group) in enumerate(
self.interaction.answer_groups):
if answer_group.training_data:
answers = copy.deepcopy(answer_group.training_data)
state_training_data_by_answer_group.append({
'answer_group_index': answer_group_index,
'answers': answers
})
return state_training_data_by_answer_group
def can_undergo_classification(self):
training_examples_count = 0
labels_count = 0
training_examples_count += len(
self.interaction.confirmed_unclassified_answers)
for answer_group in self.interaction.answer_groups:
training_examples_count += len(answer_group.training_data)
labels_count += 1
if ((training_examples_count >= feconf.MIN_TOTAL_TRAINING_EXAMPLES) and
(labels_count >= feconf.MIN_ASSIGNED_LABELS)):
return True
return False
@classmethod
def convert_state_dict_to_yaml(cls, state_dict, width):
try:
# Check if the state_dict can be converted to a State.
state = cls.from_dict(state_dict)
except Exception:
logging.info(
'Bad state dict: %s' % python_utils.UNICODE(state_dict))
raise Exception('Could not convert state dict to YAML.')
return python_utils.yaml_from_dict(state.to_dict(), width=width)
def get_translation_counts(self):
return self.written_translations.get_translation_counts()
def get_content_count(self):
return len(self.written_translations.translations_mapping)
def _update_content_ids_in_assets(self, old_ids_list, new_ids_list):
content_ids_to_delete = set(old_ids_list) - set(new_ids_list)
content_ids_to_add = set(new_ids_list) - set(old_ids_list)
content_ids_for_text_translations = (
self.written_translations.get_content_ids_for_text_translation())
content_ids_for_voiceovers = (
self.recorded_voiceovers.get_content_ids_for_voiceovers())
for content_id in content_ids_to_delete:
if not content_id in content_ids_for_voiceovers:
raise Exception(
'The content_id %s does not exist in recorded_voiceovers.'
% content_id)
elif not content_id in content_ids_for_text_translations:
raise Exception(
'The content_id %s does not exist in written_translations.'
% content_id)
else:
self.recorded_voiceovers.delete_content_id_for_voiceover(
content_id)
self.written_translations.delete_content_id_for_translation(
content_id)
for content_id in content_ids_to_add:
if content_id in content_ids_for_voiceovers:
raise Exception(
'The content_id %s already exists in recorded_voiceovers'
% content_id)
elif content_id in content_ids_for_text_translations:
raise Exception(
'The content_id %s already exists in written_translations.'
% content_id)
else:
self.recorded_voiceovers.add_content_id_for_voiceover(
content_id)
self.written_translations.add_content_id_for_translation(
content_id)
def add_translation(self, content_id, language_code, translation_html):
translation_html = html_cleaner.clean(translation_html)
self.written_translations.add_translation(
content_id, language_code, translation_html)
def update_content(self, content):
# TODO(sll): Must sanitize all content in RTE component attrs.
self.content = content
def update_param_changes(self, param_changes):
self.param_changes = param_changes
def update_interaction_id(self, interaction_id):
self.interaction.id = interaction_id
# TODO(sll): This should also clear interaction.answer_groups (except
# for the default rule). This is somewhat mitigated because the client
# updates interaction_answer_groups directly after this, but we should
# fix it.
def update_interaction_customization_args(self, customization_args):
self.interaction.customization_args = customization_args
def update_interaction_answer_groups(self, answer_groups_list):
if not isinstance(answer_groups_list, list):
raise Exception(
'Expected interaction_answer_groups to be a list, received %s'
% answer_groups_list)
interaction_answer_groups = []
old_content_id_list = [
answer_group.outcome.feedback.content_id for answer_group in (
self.interaction.answer_groups)]
# TODO(yanamal): Do additional calculations here to get the
# parameter changes, if necessary.
for answer_group_dict in answer_groups_list:
rule_specs_list = answer_group_dict['rule_specs']
if not isinstance(rule_specs_list, list):
raise Exception(
'Expected answer group rule specs to be a list, '
'received %s' % rule_specs_list)
answer_group = AnswerGroup(
Outcome.from_dict(answer_group_dict['outcome']), [],
answer_group_dict['training_data'],
answer_group_dict['tagged_skill_misconception_id'])
for rule_dict in rule_specs_list:
rule_spec = RuleSpec.from_dict(rule_dict)
# Normalize and store the rule params.
rule_inputs = rule_spec.inputs
if not isinstance(rule_inputs, dict):
raise Exception(
'Expected rule_inputs to be a dict, received %s'
% rule_inputs)
for param_name, value in rule_inputs.items():
param_type = (
interaction_registry.Registry.get_interaction_by_id(
self.interaction.id
).get_rule_param_type(rule_spec.rule_type, param_name))
if (isinstance(value, python_utils.BASESTRING) and
'{{' in value and '}}' in value):
# TODO(jacobdavis11): Create checks that all parameters
# referred to exist and have the correct types.
normalized_param = value
else:
try:
normalized_param = param_type.normalize(value)
except Exception:
raise Exception(
'%s has the wrong type. It should be a %s.' %
(value, param_type.__name__))
rule_inputs[param_name] = normalized_param
answer_group.rule_specs.append(rule_spec)
interaction_answer_groups.append(answer_group)
self.interaction.answer_groups = interaction_answer_groups
new_content_id_list = [
answer_group.outcome.feedback.content_id for answer_group in (
self.interaction.answer_groups)]
self._update_content_ids_in_assets(
old_content_id_list, new_content_id_list)
def update_interaction_default_outcome(self, default_outcome_dict):
old_content_id_list = []
new_content_id_list = []
if self.interaction.default_outcome:
old_content_id_list.append(
self.interaction.default_outcome.feedback.content_id)
if default_outcome_dict:
if not isinstance(default_outcome_dict, dict):
raise Exception(
'Expected default_outcome_dict to be a dict, received %s'
% default_outcome_dict)
self.interaction.default_outcome = Outcome.from_dict(
default_outcome_dict)
new_content_id_list.append(
self.interaction.default_outcome.feedback.content_id)
else:
self.interaction.default_outcome = None
self._update_content_ids_in_assets(
old_content_id_list, new_content_id_list)
def update_interaction_confirmed_unclassified_answers(
self, confirmed_unclassified_answers):
if not isinstance(confirmed_unclassified_answers, list):
raise Exception(
'Expected confirmed_unclassified_answers to be a list,'
' received %s' % confirmed_unclassified_answers)
self.interaction.confirmed_unclassified_answers = (
confirmed_unclassified_answers)
def update_interaction_hints(self, hints_list):
if not isinstance(hints_list, list):
raise Exception(
'Expected hints_list to be a list, received %s'
% hints_list)
old_content_id_list = [
hint.hint_content.content_id for hint in self.interaction.hints]
self.interaction.hints = [
Hint.from_dict(hint_dict)
for hint_dict in hints_list]
new_content_id_list = [
hint.hint_content.content_id for hint in self.interaction.hints]
self._update_content_ids_in_assets(
old_content_id_list, new_content_id_list)
def update_interaction_solution(self, solution_dict):
old_content_id_list = []
new_content_id_list = []
if self.interaction.solution:
old_content_id_list.append(
self.interaction.solution.explanation.content_id)
if solution_dict is not None:
if not isinstance(solution_dict, dict):
raise Exception(
'Expected solution to be a dict, received %s'
% solution_dict)
self.interaction.solution = Solution.from_dict(
self.interaction.id, solution_dict)
new_content_id_list.append(
self.interaction.solution.explanation.content_id)
else:
self.interaction.solution = None
self._update_content_ids_in_assets(
old_content_id_list, new_content_id_list)
def update_recorded_voiceovers(self, recorded_voiceovers):
self.recorded_voiceovers = recorded_voiceovers
def update_written_translations(self, written_translations):
self.written_translations = written_translations
def update_solicit_answer_details(self, solicit_answer_details):
if not isinstance(solicit_answer_details, bool):
raise Exception(
'Expected solicit_answer_details to be a boolean, received %s'
% solicit_answer_details)
self.solicit_answer_details = solicit_answer_details
def _get_all_translatable_content(self):
content_id_to_html = {}
content_id_to_html[self.content.content_id] = self.content.html
# TODO(#6178): Remove empty html checks once we add a validation
# check that ensures each content in state should be non-empty html.
default_outcome = self.interaction.default_outcome
if default_outcome is not None and default_outcome.feedback.html != '':
content_id_to_html[default_outcome.feedback.content_id] = (
default_outcome.feedback.html)
for answer_group in self.interaction.answer_groups:
if answer_group.outcome.feedback.html != '':
content_id_to_html[answer_group.outcome.feedback.content_id] = (
answer_group.outcome.feedback.html)
for hint in self.interaction.hints:
if hint.hint_content.html != '':
content_id_to_html[hint.hint_content.content_id] = (
hint.hint_content.html)
solution = self.interaction.solution
if solution is not None and solution.explanation.html != '':
content_id_to_html[solution.explanation.content_id] = (
solution.explanation.html)
return content_id_to_html
def get_content_id_mapping_needing_translations(self, language_code):
content_id_to_html = self._get_all_translatable_content()
available_translation_content_ids = (
self.written_translations
.get_content_ids_that_are_correctly_translated(language_code))
for content_id in available_translation_content_ids:
del content_id_to_html[content_id]
# TODO(#7571): Add functionality to return the list of
# translations which needs update.
return content_id_to_html
def to_dict(self):
return {
'content': self.content.to_dict(),
'param_changes': [param_change.to_dict()
for param_change in self.param_changes],
'interaction': self.interaction.to_dict(),
'classifier_model_id': self.classifier_model_id,
'recorded_voiceovers': self.recorded_voiceovers.to_dict(),
'written_translations': self.written_translations.to_dict(),
'solicit_answer_details': self.solicit_answer_details
}
@classmethod
def from_dict(cls, state_dict):
return cls(
SubtitledHtml.from_dict(state_dict['content']),
[param_domain.ParamChange.from_dict(param)
for param in state_dict['param_changes']],
InteractionInstance.from_dict(state_dict['interaction']),
RecordedVoiceovers.from_dict(state_dict['recorded_voiceovers']),
WrittenTranslations.from_dict(state_dict['written_translations']),
state_dict['solicit_answer_details'],
state_dict['classifier_model_id'])
@classmethod
def create_default_state(
cls, default_dest_state_name, is_initial_state=False):
content_html = (
feconf.DEFAULT_INIT_STATE_CONTENT_STR if is_initial_state else '')
content_id = feconf.DEFAULT_NEW_STATE_CONTENT_ID
return cls(
SubtitledHtml(content_id, content_html),
[],
InteractionInstance.create_default_interaction(
default_dest_state_name),
RecordedVoiceovers.from_dict(copy.deepcopy(
feconf.DEFAULT_RECORDED_VOICEOVERS)),
WrittenTranslations.from_dict(
copy.deepcopy(feconf.DEFAULT_WRITTEN_TRANSLATIONS)),
False)
@classmethod
def convert_html_fields_in_state(cls, state_dict, conversion_fn):
state_dict['content']['html'] = (
conversion_fn(state_dict['content']['html']))
if state_dict['interaction']['default_outcome']:
interaction_feedback_html = state_dict[
'interaction']['default_outcome']['feedback']['html']
state_dict['interaction']['default_outcome']['feedback'][
'html'] = conversion_fn(interaction_feedback_html)
for answer_group_index, answer_group in enumerate(
state_dict['interaction']['answer_groups']):
answer_group_html = answer_group['outcome']['feedback']['html']
state_dict['interaction']['answer_groups'][
answer_group_index]['outcome']['feedback']['html'] = (
conversion_fn(answer_group_html))
if state_dict['interaction']['id'] == 'ItemSelectionInput':
for rule_spec_index, rule_spec in enumerate(
answer_group['rule_specs']):
for x_index, x in enumerate(rule_spec['inputs']['x']):
state_dict['interaction']['answer_groups'][
answer_group_index]['rule_specs'][
rule_spec_index]['inputs']['x'][x_index] = (
conversion_fn(x))
for hint_index, hint in enumerate(
state_dict['interaction']['hints']):
hint_html = hint['hint_content']['html']
state_dict['interaction']['hints'][hint_index][
'hint_content']['html'] = conversion_fn(hint_html)
if state_dict['interaction']['solution']:
solution_html = state_dict[
'interaction']['solution']['explanation']['html']
state_dict['interaction']['solution']['explanation']['html'] = (
conversion_fn(solution_html))
if state_dict['interaction']['id'] in (
'ItemSelectionInput', 'MultipleChoiceInput'):
for value_index, value in enumerate(
state_dict['interaction']['customization_args'][
'choices']['value']):
state_dict['interaction']['customization_args'][
'choices']['value'][value_index] = conversion_fn(value)
return state_dict
| true
| true
|
1c46bcd3d9c7631a1c1fc9bbcad0750ae3adc519
| 159
|
py
|
Python
|
src/dash_init.py
|
JavaScriipt/iHashTag
|
3b6e95fde0e4b7f35e074c0b0733f2b98bc7763a
|
[
"CC0-1.0"
] | null | null | null |
src/dash_init.py
|
JavaScriipt/iHashTag
|
3b6e95fde0e4b7f35e074c0b0733f2b98bc7763a
|
[
"CC0-1.0"
] | null | null | null |
src/dash_init.py
|
JavaScriipt/iHashTag
|
3b6e95fde0e4b7f35e074c0b0733f2b98bc7763a
|
[
"CC0-1.0"
] | null | null | null |
import os
file = open("resultados.txt", "w")
file.write("Timestamp, Muy Positivos, Muy Negativos, Neutros, Negativos, Muy Negativos, Average\n")
file.close()
| 26.5
| 99
| 0.735849
|
import os
file = open("resultados.txt", "w")
file.write("Timestamp, Muy Positivos, Muy Negativos, Neutros, Negativos, Muy Negativos, Average\n")
file.close()
| true
| true
|
1c46bcdb1d10c9fe63a5f971609c2b06295d9890
| 1,926
|
py
|
Python
|
setup.py
|
wj-Mcat/python-wechaty-puppet-official-account
|
92e762b0345c1faab2563d6da302efa4de273425
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
wj-Mcat/python-wechaty-puppet-official-account
|
92e762b0345c1faab2563d6da302efa4de273425
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
wj-Mcat/python-wechaty-puppet-official-account
|
92e762b0345c1faab2563d6da302efa4de273425
|
[
"Apache-2.0"
] | null | null | null |
"""
setup
"""
import os
import semver
import setuptools
def versioning(version: str) -> str:
"""
version to specification
X.Y.Z -> X.Y.devZ
"""
sem_ver = semver.parse(version)
major = sem_ver['major']
minor = sem_ver['minor']
patch = str(sem_ver['patch'])
fin_ver = '%d.%d.%s' % (
major,
minor,
patch,
)
return fin_ver
def get_version() -> str:
"""
read version from VERSION file
"""
version = '0.0.0'
with open(
os.path.join(
os.path.dirname(__file__),
'VERSION'
)
) as version_fh:
# Get X.Y.Z
version = version_fh.read().strip()
# versioning from X.Y.Z to X.Y.devZ
version = versioning(version)
return version
def get_long_description() -> str:
"""get long_description"""
with open('README.md', 'r') as readme_fh:
return readme_fh.read()
def get_install_requires() -> str:
"""get install_requires"""
with open('requirements.txt', 'r') as requirements_fh:
return requirements_fh.read().splitlines()
setuptools.setup(
name='wechaty-puppet-official-account',
version=get_version(),
author='wj-Mcat',
author_email='wjmcater@gmail.com',
description='Wechaty Puppet for WeChat Official Account',
long_description=get_long_description(),
long_description_content_type='text/markdown',
license='Apache-2.0',
url='https://github.com/wechaty/python-wechaty-puppet-official-account',
packages=setuptools.find_packages('src'),
package_dir={'': 'src'},
install_requires=get_install_requires(),
# packages=setuptools.find_packages('wip'),
# package_dir={'': 'wip'},
classifiers=[
'Programming Language :: Python :: 3.7',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
],
)
| 23.204819
| 76
| 0.609034
|
import os
import semver
import setuptools
def versioning(version: str) -> str:
sem_ver = semver.parse(version)
major = sem_ver['major']
minor = sem_ver['minor']
patch = str(sem_ver['patch'])
fin_ver = '%d.%d.%s' % (
major,
minor,
patch,
)
return fin_ver
def get_version() -> str:
version = '0.0.0'
with open(
os.path.join(
os.path.dirname(__file__),
'VERSION'
)
) as version_fh:
version = version_fh.read().strip()
version = versioning(version)
return version
def get_long_description() -> str:
with open('README.md', 'r') as readme_fh:
return readme_fh.read()
def get_install_requires() -> str:
with open('requirements.txt', 'r') as requirements_fh:
return requirements_fh.read().splitlines()
setuptools.setup(
name='wechaty-puppet-official-account',
version=get_version(),
author='wj-Mcat',
author_email='wjmcater@gmail.com',
description='Wechaty Puppet for WeChat Official Account',
long_description=get_long_description(),
long_description_content_type='text/markdown',
license='Apache-2.0',
url='https://github.com/wechaty/python-wechaty-puppet-official-account',
packages=setuptools.find_packages('src'),
package_dir={'': 'src'},
install_requires=get_install_requires(),
classifiers=[
'Programming Language :: Python :: 3.7',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
],
)
| true
| true
|
1c46bf0877dd01db082a6f46e72eeec5ee132dde
| 5,847
|
py
|
Python
|
scripts/inspect_un_data_sets.py
|
arwhyte/SI664-scripts
|
99daaac123ebdbfb0fbca59251f711efb9a7d39f
|
[
"MIT"
] | null | null | null |
scripts/inspect_un_data_sets.py
|
arwhyte/SI664-scripts
|
99daaac123ebdbfb0fbca59251f711efb9a7d39f
|
[
"MIT"
] | null | null | null |
scripts/inspect_un_data_sets.py
|
arwhyte/SI664-scripts
|
99daaac123ebdbfb0fbca59251f711efb9a7d39f
|
[
"MIT"
] | 1
|
2018-12-08T16:43:45.000Z
|
2018-12-08T16:43:45.000Z
|
import logging
import os
import pandas as pd
import sys as sys
def main(argv=None):
"""
Utilize Pandas library to read in both UNSD M49 country and area .csv file
(tab delimited) as well as the UNESCO heritage site .csv file (tab delimited).
Extract regions, sub-regions, intermediate regions, country and areas, and
other column data. Filter out duplicate values and NaN values and sort the
series in alphabetical order. Write out each series to a .csv file for inspection.
"""
if argv is None:
argv = sys.argv
msg = [
'Source file read {0}',
'UNSD M49 regions written to file {0}',
'UNSD M49 sub-regions written to file {0}',
'UNSD M49 intermediate regions written to file {0}',
'UNSD M49 countries and areas written to file {0}',
'UNSD M49 development status written to file {0}',
'UNESCO heritage site countries/areas written to file {0}',
'UNESCO heritage site categories written to file {0}',
'UNESCO heritage site regions written to file {0}',
'UNESCO heritage site transboundary values written to file {0}'
]
# Setting logging format and default level
logging.basicConfig(format='%(levelname)s: %(message)s', level=logging.DEBUG)
# Read in United Nations Statistical Division (UNSD) M49 Standard data set (tabbed separator)
unsd_csv = './input/csv/un_area_country_codes-m49.csv'
unsd_data_frame = read_csv(unsd_csv, '\t')
logging.info(msg[0].format(os.path.abspath(unsd_csv)))
# Write regions to a .csv file.
unsd_region = extract_filtered_series(unsd_data_frame, 'region_name')
unsd_region_csv = './output/unesco/unsd_region.csv'
write_series_to_csv(unsd_region, unsd_region_csv, '\t', False)
logging.info(msg[1].format(os.path.abspath(unsd_region_csv)))
# Write sub-regions to a .csv file.
unsd_sub_region = extract_filtered_series(unsd_data_frame, 'sub_region_name')
unsd_sub_region_csv = './output/unesco/unsd_sub_region.csv'
write_series_to_csv(unsd_sub_region, unsd_sub_region_csv, '\t', False)
logging.info(msg[2].format(os.path.abspath(unsd_sub_region_csv)))
# Write intermediate_regions to a .csv file.
unsd_intermed_region = extract_filtered_series(unsd_data_frame, 'intermediate_region_name')
unsd_intermed_region_csv = './output/unesco/unsd_intermed_region.csv'
write_series_to_csv(unsd_intermed_region, unsd_intermed_region_csv, '\t', False)
logging.info(msg[3].format(os.path.abspath(unsd_intermed_region_csv)))
# Write countries or areas to a .csv file.
unsd_country_area = extract_filtered_series(unsd_data_frame, 'country_area_name')
unsd_country_area_csv = './output/unesco/unsd_country_area.csv'
write_series_to_csv(unsd_country_area, unsd_country_area_csv, '\t', False)
logging.info(msg[4].format(os.path.abspath(unsd_country_area_csv)))
# Write development status to a .csv file.
unsd_dev_status = extract_filtered_series(unsd_data_frame, 'country_area_development_status')
unsd_dev_status_csv = './output/unesco/unsd_dev_status.csv'
write_series_to_csv(unsd_dev_status, unsd_dev_status_csv, '\t', False)
logging.info(msg[5].format(os.path.abspath(unsd_dev_status_csv)))
# Read UNESCO heritage sites data (tabbed separator)
unesco_csv = './input/csv/unesco_heritage_sites.csv'
unesco_data_frame = read_csv(unesco_csv, '\t')
logging.info(msg[0].format(os.path.abspath(unesco_csv)))
# Write UNESCO heritage site countries and areas to a .csv file
unesco_country_area = extract_filtered_series(unesco_data_frame, 'country_area')
unesco_country_area_csv = './output/unesco/unesco_heritage_site_country_area.csv'
write_series_to_csv(unesco_country_area, unesco_country_area_csv, '\t', False)
logging.info(msg[6].format(os.path.abspath(unesco_country_area_csv)))
# Write UNESCO heritage site categories to a .csv file
unesco_site_category = extract_filtered_series(unesco_data_frame, 'category')
unesco_site_category_csv = './output/unesco/unesco_heritage_site_category.csv'
write_series_to_csv(unesco_site_category, unesco_site_category_csv, '\t', False)
logging.info(msg[7].format(os.path.abspath(unesco_site_category_csv)))
# Write UNESCO heritage site regions to a .csv file
unesco_region = extract_filtered_series(unesco_data_frame, 'region')
unesco_region_csv = './output/unesco/unesco_heritage_site_region.csv'
write_series_to_csv(unesco_region, unesco_region_csv, '\t', False)
logging.info(msg[8].format(os.path.abspath(unesco_region_csv)))
# Write UNESCO heritage site transboundary values to a .csv file
unesco_transboundary = extract_filtered_series(unesco_data_frame, 'transboundary')
unesco_transboundary_csv = './output/unesco/unesco_heritage_site_transboundary.csv'
write_series_to_csv(unesco_transboundary, unesco_transboundary_csv, '\t', False)
logging.info(msg[9].format(os.path.abspath(unesco_transboundary_csv)))
def extract_filtered_series(data_frame, column_name):
"""
Returns a filtered Panda Series one-dimensional ndarray from a targeted column.
Duplicate values and NaN or blank values are dropped from the result set which is
returned sorted (ascending).
:param data_frame: Pandas DataFrame
:param column_name: column name string
:return: Panda Series one-dimensional ndarray
"""
return data_frame[column_name].drop_duplicates().dropna().sort_values(by=column_name)
def read_csv(path, delimiter=','):
"""
Utilize Pandas to read in *.csv file.
:param path: file path
:param delimiter: field delimiter
:return: Pandas DataFrame
"""
return pd.read_csv(path, sep=delimiter, encoding='utf-8', engine='python')
def write_series_to_csv(series, path, delimiter=',', row_name=True):
"""
Write Pandas DataFrame to a *.csv file.
:param series: Pandas one dimensional ndarray
:param path: file path
:param delimiter: field delimiter
:param row_name: include row name boolean
"""
series.to_csv(path, sep=delimiter, index=row_name)
if __name__ == '__main__':
sys.exit(main())
| 43.962406
| 94
| 0.783308
|
import logging
import os
import pandas as pd
import sys as sys
def main(argv=None):
if argv is None:
argv = sys.argv
msg = [
'Source file read {0}',
'UNSD M49 regions written to file {0}',
'UNSD M49 sub-regions written to file {0}',
'UNSD M49 intermediate regions written to file {0}',
'UNSD M49 countries and areas written to file {0}',
'UNSD M49 development status written to file {0}',
'UNESCO heritage site countries/areas written to file {0}',
'UNESCO heritage site categories written to file {0}',
'UNESCO heritage site regions written to file {0}',
'UNESCO heritage site transboundary values written to file {0}'
]
logging.basicConfig(format='%(levelname)s: %(message)s', level=logging.DEBUG)
unsd_csv = './input/csv/un_area_country_codes-m49.csv'
unsd_data_frame = read_csv(unsd_csv, '\t')
logging.info(msg[0].format(os.path.abspath(unsd_csv)))
unsd_region = extract_filtered_series(unsd_data_frame, 'region_name')
unsd_region_csv = './output/unesco/unsd_region.csv'
write_series_to_csv(unsd_region, unsd_region_csv, '\t', False)
logging.info(msg[1].format(os.path.abspath(unsd_region_csv)))
unsd_sub_region = extract_filtered_series(unsd_data_frame, 'sub_region_name')
unsd_sub_region_csv = './output/unesco/unsd_sub_region.csv'
write_series_to_csv(unsd_sub_region, unsd_sub_region_csv, '\t', False)
logging.info(msg[2].format(os.path.abspath(unsd_sub_region_csv)))
unsd_intermed_region = extract_filtered_series(unsd_data_frame, 'intermediate_region_name')
unsd_intermed_region_csv = './output/unesco/unsd_intermed_region.csv'
write_series_to_csv(unsd_intermed_region, unsd_intermed_region_csv, '\t', False)
logging.info(msg[3].format(os.path.abspath(unsd_intermed_region_csv)))
unsd_country_area = extract_filtered_series(unsd_data_frame, 'country_area_name')
unsd_country_area_csv = './output/unesco/unsd_country_area.csv'
write_series_to_csv(unsd_country_area, unsd_country_area_csv, '\t', False)
logging.info(msg[4].format(os.path.abspath(unsd_country_area_csv)))
unsd_dev_status = extract_filtered_series(unsd_data_frame, 'country_area_development_status')
unsd_dev_status_csv = './output/unesco/unsd_dev_status.csv'
write_series_to_csv(unsd_dev_status, unsd_dev_status_csv, '\t', False)
logging.info(msg[5].format(os.path.abspath(unsd_dev_status_csv)))
unesco_csv = './input/csv/unesco_heritage_sites.csv'
unesco_data_frame = read_csv(unesco_csv, '\t')
logging.info(msg[0].format(os.path.abspath(unesco_csv)))
unesco_country_area = extract_filtered_series(unesco_data_frame, 'country_area')
unesco_country_area_csv = './output/unesco/unesco_heritage_site_country_area.csv'
write_series_to_csv(unesco_country_area, unesco_country_area_csv, '\t', False)
logging.info(msg[6].format(os.path.abspath(unesco_country_area_csv)))
unesco_site_category = extract_filtered_series(unesco_data_frame, 'category')
unesco_site_category_csv = './output/unesco/unesco_heritage_site_category.csv'
write_series_to_csv(unesco_site_category, unesco_site_category_csv, '\t', False)
logging.info(msg[7].format(os.path.abspath(unesco_site_category_csv)))
unesco_region = extract_filtered_series(unesco_data_frame, 'region')
unesco_region_csv = './output/unesco/unesco_heritage_site_region.csv'
write_series_to_csv(unesco_region, unesco_region_csv, '\t', False)
logging.info(msg[8].format(os.path.abspath(unesco_region_csv)))
unesco_transboundary = extract_filtered_series(unesco_data_frame, 'transboundary')
unesco_transboundary_csv = './output/unesco/unesco_heritage_site_transboundary.csv'
write_series_to_csv(unesco_transboundary, unesco_transboundary_csv, '\t', False)
logging.info(msg[9].format(os.path.abspath(unesco_transboundary_csv)))
def extract_filtered_series(data_frame, column_name):
return data_frame[column_name].drop_duplicates().dropna().sort_values(by=column_name)
def read_csv(path, delimiter=','):
return pd.read_csv(path, sep=delimiter, encoding='utf-8', engine='python')
def write_series_to_csv(series, path, delimiter=',', row_name=True):
series.to_csv(path, sep=delimiter, index=row_name)
if __name__ == '__main__':
sys.exit(main())
| true
| true
|
1c46bf9669398d790db830f2381d8c2ac1675ffc
| 4,642
|
py
|
Python
|
tests/unit/workflows/java_gradle/test_gradle.py
|
verdimrc/aws-lambda-builders
|
67f42dd936fd4f0c517c38acb8b6a170156549ec
|
[
"Apache-2.0"
] | 1
|
2020-07-21T20:16:12.000Z
|
2020-07-21T20:16:12.000Z
|
tests/unit/workflows/java_gradle/test_gradle.py
|
verdimrc/aws-lambda-builders
|
67f42dd936fd4f0c517c38acb8b6a170156549ec
|
[
"Apache-2.0"
] | 1
|
2020-06-26T12:36:39.000Z
|
2020-06-26T12:36:39.000Z
|
tests/unit/workflows/java_gradle/test_gradle.py
|
verdimrc/aws-lambda-builders
|
67f42dd936fd4f0c517c38acb8b6a170156549ec
|
[
"Apache-2.0"
] | 1
|
2020-04-02T19:12:39.000Z
|
2020-04-02T19:12:39.000Z
|
import subprocess
from unittest import TestCase
from mock import patch
from aws_lambda_builders.binary_path import BinaryPath
from aws_lambda_builders.workflows.java_gradle.gradle import (
SubprocessGradle,
GradleExecutionError,
BuildFileNotFoundError,
)
class FakePopen:
def __init__(self, out=b"out", err=b"err", retcode=0):
self.out = out
self.err = err
self.returncode = retcode
def communicate(self):
return self.out, self.err
def wait(self):
pass
class TestSubprocessGradle(TestCase):
@patch("aws_lambda_builders.workflows.java_gradle.utils.OSUtils")
def setUp(self, MockOSUtils):
self.os_utils = MockOSUtils.return_value
self.os_utils.exists.side_effect = lambda d: True
self.popen = FakePopen()
self.os_utils.popen.side_effect = [self.popen]
self.gradle_path = "/path/to/gradle"
self.gradle_binary = BinaryPath(None, None, "gradle", binary_path=self.gradle_path)
self.source_dir = "/foo/bar/baz"
self.manifest_path = "/foo/bar/baz/build.gradle"
self.init_script = "/path/to/init"
def test_no_os_utils_build_init_throws(self):
with self.assertRaises(ValueError) as err_assert:
SubprocessGradle(gradle_binary=self.gradle_binary)
self.assertEquals(err_assert.exception.args[0], "Must provide OSUtils")
def test_no_gradle_exec_init_throws(self):
with self.assertRaises(ValueError) as err_assert:
SubprocessGradle(None)
self.assertEquals(err_assert.exception.args[0], "Must provide Gradle BinaryPath")
def test_no_build_file_throws(self):
self.os_utils.exists.side_effect = lambda d: False
gradle = SubprocessGradle(gradle_binary=self.gradle_binary, os_utils=self.os_utils)
with self.assertRaises(BuildFileNotFoundError) as raised:
gradle.build(self.source_dir, self.manifest_path)
self.assertEquals(
raised.exception.args[0], "Gradle Failed: Gradle build file not found: %s" % self.manifest_path
)
def test_build_no_init_script(self):
gradle = SubprocessGradle(gradle_binary=self.gradle_binary, os_utils=self.os_utils)
gradle.build(self.source_dir, self.manifest_path)
self.os_utils.popen.assert_called_with(
[self.gradle_path, "build", "--build-file", self.manifest_path],
cwd=self.source_dir,
stderr=subprocess.PIPE,
stdout=subprocess.PIPE,
)
def test_gradlew_path_is_dummy_uses_gradle_binary(self):
gradle = SubprocessGradle(gradle_binary=self.gradle_binary, os_utils=self.os_utils)
gradle.build(self.source_dir, self.manifest_path)
self.os_utils.popen.assert_called_with(
[self.gradle_path, "build", "--build-file", self.manifest_path],
cwd=self.source_dir,
stderr=subprocess.PIPE,
stdout=subprocess.PIPE,
)
def test_build_with_init_script(self):
gradle = SubprocessGradle(gradle_binary=self.gradle_binary, os_utils=self.os_utils)
gradle.build(self.source_dir, self.manifest_path, init_script_path=self.init_script)
self.os_utils.popen.assert_called_with(
[self.gradle_path, "build", "--build-file", self.manifest_path, "--init-script", self.init_script],
cwd=self.source_dir,
stderr=subprocess.PIPE,
stdout=subprocess.PIPE,
)
def test_raises_exception_if_retcode_not_0(self):
self.popen = FakePopen(retcode=1, err=b"Some Error Message")
self.os_utils.popen.side_effect = [self.popen]
gradle = SubprocessGradle(gradle_binary=self.gradle_binary, os_utils=self.os_utils)
with self.assertRaises(GradleExecutionError) as err:
gradle.build(self.source_dir, self.manifest_path)
self.assertEquals(err.exception.args[0], "Gradle Failed: Some Error Message")
def test_includes_build_properties_in_command(self):
gradle = SubprocessGradle(gradle_binary=self.gradle_binary, os_utils=self.os_utils)
gradle.build(self.source_dir, self.manifest_path, init_script_path=self.init_script, properties={"foo": "bar"})
self.os_utils.popen.assert_called_with(
[
self.gradle_path,
"build",
"--build-file",
self.manifest_path,
"-Dfoo=bar",
"--init-script",
self.init_script,
],
cwd=self.source_dir,
stderr=subprocess.PIPE,
stdout=subprocess.PIPE,
)
| 40.719298
| 119
| 0.673632
|
import subprocess
from unittest import TestCase
from mock import patch
from aws_lambda_builders.binary_path import BinaryPath
from aws_lambda_builders.workflows.java_gradle.gradle import (
SubprocessGradle,
GradleExecutionError,
BuildFileNotFoundError,
)
class FakePopen:
def __init__(self, out=b"out", err=b"err", retcode=0):
self.out = out
self.err = err
self.returncode = retcode
def communicate(self):
return self.out, self.err
def wait(self):
pass
class TestSubprocessGradle(TestCase):
@patch("aws_lambda_builders.workflows.java_gradle.utils.OSUtils")
def setUp(self, MockOSUtils):
self.os_utils = MockOSUtils.return_value
self.os_utils.exists.side_effect = lambda d: True
self.popen = FakePopen()
self.os_utils.popen.side_effect = [self.popen]
self.gradle_path = "/path/to/gradle"
self.gradle_binary = BinaryPath(None, None, "gradle", binary_path=self.gradle_path)
self.source_dir = "/foo/bar/baz"
self.manifest_path = "/foo/bar/baz/build.gradle"
self.init_script = "/path/to/init"
def test_no_os_utils_build_init_throws(self):
with self.assertRaises(ValueError) as err_assert:
SubprocessGradle(gradle_binary=self.gradle_binary)
self.assertEquals(err_assert.exception.args[0], "Must provide OSUtils")
def test_no_gradle_exec_init_throws(self):
with self.assertRaises(ValueError) as err_assert:
SubprocessGradle(None)
self.assertEquals(err_assert.exception.args[0], "Must provide Gradle BinaryPath")
def test_no_build_file_throws(self):
self.os_utils.exists.side_effect = lambda d: False
gradle = SubprocessGradle(gradle_binary=self.gradle_binary, os_utils=self.os_utils)
with self.assertRaises(BuildFileNotFoundError) as raised:
gradle.build(self.source_dir, self.manifest_path)
self.assertEquals(
raised.exception.args[0], "Gradle Failed: Gradle build file not found: %s" % self.manifest_path
)
def test_build_no_init_script(self):
gradle = SubprocessGradle(gradle_binary=self.gradle_binary, os_utils=self.os_utils)
gradle.build(self.source_dir, self.manifest_path)
self.os_utils.popen.assert_called_with(
[self.gradle_path, "build", "--build-file", self.manifest_path],
cwd=self.source_dir,
stderr=subprocess.PIPE,
stdout=subprocess.PIPE,
)
def test_gradlew_path_is_dummy_uses_gradle_binary(self):
gradle = SubprocessGradle(gradle_binary=self.gradle_binary, os_utils=self.os_utils)
gradle.build(self.source_dir, self.manifest_path)
self.os_utils.popen.assert_called_with(
[self.gradle_path, "build", "--build-file", self.manifest_path],
cwd=self.source_dir,
stderr=subprocess.PIPE,
stdout=subprocess.PIPE,
)
def test_build_with_init_script(self):
gradle = SubprocessGradle(gradle_binary=self.gradle_binary, os_utils=self.os_utils)
gradle.build(self.source_dir, self.manifest_path, init_script_path=self.init_script)
self.os_utils.popen.assert_called_with(
[self.gradle_path, "build", "--build-file", self.manifest_path, "--init-script", self.init_script],
cwd=self.source_dir,
stderr=subprocess.PIPE,
stdout=subprocess.PIPE,
)
def test_raises_exception_if_retcode_not_0(self):
self.popen = FakePopen(retcode=1, err=b"Some Error Message")
self.os_utils.popen.side_effect = [self.popen]
gradle = SubprocessGradle(gradle_binary=self.gradle_binary, os_utils=self.os_utils)
with self.assertRaises(GradleExecutionError) as err:
gradle.build(self.source_dir, self.manifest_path)
self.assertEquals(err.exception.args[0], "Gradle Failed: Some Error Message")
def test_includes_build_properties_in_command(self):
gradle = SubprocessGradle(gradle_binary=self.gradle_binary, os_utils=self.os_utils)
gradle.build(self.source_dir, self.manifest_path, init_script_path=self.init_script, properties={"foo": "bar"})
self.os_utils.popen.assert_called_with(
[
self.gradle_path,
"build",
"--build-file",
self.manifest_path,
"-Dfoo=bar",
"--init-script",
self.init_script,
],
cwd=self.source_dir,
stderr=subprocess.PIPE,
stdout=subprocess.PIPE,
)
| true
| true
|
1c46c13896c2f68690261b134a22b45479e29be0
| 4,599
|
py
|
Python
|
test/connector/exchange/crypto_com/test_crypto_com_order_book_tracker.py
|
BGTCapital/hummingbot
|
2c50f50d67cedccf0ef4d8e3f4c8cdce3dc87242
|
[
"Apache-2.0"
] | 3,027
|
2019-04-04T18:52:17.000Z
|
2022-03-30T09:38:34.000Z
|
test/connector/exchange/crypto_com/test_crypto_com_order_book_tracker.py
|
BGTCapital/hummingbot
|
2c50f50d67cedccf0ef4d8e3f4c8cdce3dc87242
|
[
"Apache-2.0"
] | 4,080
|
2019-04-04T19:51:11.000Z
|
2022-03-31T23:45:21.000Z
|
test/connector/exchange/crypto_com/test_crypto_com_order_book_tracker.py
|
BGTCapital/hummingbot
|
2c50f50d67cedccf0ef4d8e3f4c8cdce3dc87242
|
[
"Apache-2.0"
] | 1,342
|
2019-04-04T20:50:53.000Z
|
2022-03-31T15:22:36.000Z
|
#!/usr/bin/env python
from os.path import join, realpath
import sys; sys.path.insert(0, realpath(join(__file__, "../../../../../")))
import math
import time
import asyncio
import logging
import unittest
from typing import Dict, Optional, List
from hummingbot.core.event.event_logger import EventLogger
from hummingbot.core.event.events import OrderBookEvent, OrderBookTradeEvent, TradeType
from hummingbot.connector.exchange.crypto_com.crypto_com_order_book_tracker import CryptoComOrderBookTracker
from hummingbot.connector.exchange.crypto_com.crypto_com_api_order_book_data_source import CryptoComAPIOrderBookDataSource
from hummingbot.core.data_type.order_book import OrderBook
class CryptoComOrderBookTrackerUnitTest(unittest.TestCase):
order_book_tracker: Optional[CryptoComOrderBookTracker] = None
events: List[OrderBookEvent] = [
OrderBookEvent.TradeEvent
]
trading_pairs: List[str] = [
"BTC-USDT",
"ETH-USDT",
]
@classmethod
def setUpClass(cls):
cls.ev_loop: asyncio.BaseEventLoop = asyncio.get_event_loop()
cls.order_book_tracker: CryptoComOrderBookTracker = CryptoComOrderBookTracker(cls.trading_pairs)
cls.order_book_tracker.start()
cls.ev_loop.run_until_complete(cls.wait_til_tracker_ready())
@classmethod
async def wait_til_tracker_ready(cls):
while True:
if len(cls.order_book_tracker.order_books) > 0:
print("Initialized real-time order books.")
return
await asyncio.sleep(1)
async def run_parallel_async(self, *tasks, timeout=None):
future: asyncio.Future = asyncio.ensure_future(asyncio.gather(*tasks))
timer = 0
while not future.done():
if timeout and timer > timeout:
raise Exception("Timeout running parallel async tasks in tests")
timer += 1
now = time.time()
_next_iteration = now // 1.0 + 1 # noqa: F841
await asyncio.sleep(1.0)
return future.result()
def run_parallel(self, *tasks):
return self.ev_loop.run_until_complete(self.run_parallel_async(*tasks))
def setUp(self):
self.event_logger = EventLogger()
for event_tag in self.events:
for trading_pair, order_book in self.order_book_tracker.order_books.items():
order_book.add_listener(event_tag, self.event_logger)
def test_order_book_trade_event_emission(self):
"""
Tests if the order book tracker is able to retrieve order book trade message from exchange and emit order book
trade events after correctly parsing the trade messages
"""
self.run_parallel(self.event_logger.wait_for(OrderBookTradeEvent))
for ob_trade_event in self.event_logger.event_log:
self.assertTrue(type(ob_trade_event) == OrderBookTradeEvent)
self.assertTrue(ob_trade_event.trading_pair in self.trading_pairs)
self.assertTrue(type(ob_trade_event.timestamp) in [float, int])
self.assertTrue(type(ob_trade_event.amount) == float)
self.assertTrue(type(ob_trade_event.price) == float)
self.assertTrue(type(ob_trade_event.type) == TradeType)
# datetime is in seconds
self.assertTrue(math.ceil(math.log10(ob_trade_event.timestamp)) == 10)
self.assertTrue(ob_trade_event.amount > 0)
self.assertTrue(ob_trade_event.price > 0)
def test_tracker_integrity(self):
# Wait 5 seconds to process some diffs.
self.ev_loop.run_until_complete(asyncio.sleep(10.0))
order_books: Dict[str, OrderBook] = self.order_book_tracker.order_books
eth_usdt: OrderBook = order_books["ETH-USDT"]
self.assertIsNot(eth_usdt.last_diff_uid, 0)
self.assertGreaterEqual(eth_usdt.get_price_for_volume(True, 10).result_price,
eth_usdt.get_price(True))
self.assertLessEqual(eth_usdt.get_price_for_volume(False, 10).result_price,
eth_usdt.get_price(False))
def test_api_get_last_traded_prices(self):
prices = self.ev_loop.run_until_complete(
CryptoComAPIOrderBookDataSource.get_last_traded_prices(["BTC-USDT", "LTC-BTC"]))
for key, value in prices.items():
print(f"{key} last_trade_price: {value}")
self.assertGreater(prices["BTC-USDT"], 1000)
self.assertLess(prices["LTC-BTC"], 1)
def main():
logging.basicConfig(level=logging.INFO)
unittest.main()
if __name__ == "__main__":
main()
| 42.583333
| 122
| 0.688845
|
from os.path import join, realpath
import sys; sys.path.insert(0, realpath(join(__file__, "../../../../../")))
import math
import time
import asyncio
import logging
import unittest
from typing import Dict, Optional, List
from hummingbot.core.event.event_logger import EventLogger
from hummingbot.core.event.events import OrderBookEvent, OrderBookTradeEvent, TradeType
from hummingbot.connector.exchange.crypto_com.crypto_com_order_book_tracker import CryptoComOrderBookTracker
from hummingbot.connector.exchange.crypto_com.crypto_com_api_order_book_data_source import CryptoComAPIOrderBookDataSource
from hummingbot.core.data_type.order_book import OrderBook
class CryptoComOrderBookTrackerUnitTest(unittest.TestCase):
order_book_tracker: Optional[CryptoComOrderBookTracker] = None
events: List[OrderBookEvent] = [
OrderBookEvent.TradeEvent
]
trading_pairs: List[str] = [
"BTC-USDT",
"ETH-USDT",
]
@classmethod
def setUpClass(cls):
cls.ev_loop: asyncio.BaseEventLoop = asyncio.get_event_loop()
cls.order_book_tracker: CryptoComOrderBookTracker = CryptoComOrderBookTracker(cls.trading_pairs)
cls.order_book_tracker.start()
cls.ev_loop.run_until_complete(cls.wait_til_tracker_ready())
@classmethod
async def wait_til_tracker_ready(cls):
while True:
if len(cls.order_book_tracker.order_books) > 0:
print("Initialized real-time order books.")
return
await asyncio.sleep(1)
async def run_parallel_async(self, *tasks, timeout=None):
future: asyncio.Future = asyncio.ensure_future(asyncio.gather(*tasks))
timer = 0
while not future.done():
if timeout and timer > timeout:
raise Exception("Timeout running parallel async tasks in tests")
timer += 1
now = time.time()
_next_iteration = now // 1.0 + 1
await asyncio.sleep(1.0)
return future.result()
def run_parallel(self, *tasks):
return self.ev_loop.run_until_complete(self.run_parallel_async(*tasks))
def setUp(self):
self.event_logger = EventLogger()
for event_tag in self.events:
for trading_pair, order_book in self.order_book_tracker.order_books.items():
order_book.add_listener(event_tag, self.event_logger)
def test_order_book_trade_event_emission(self):
self.run_parallel(self.event_logger.wait_for(OrderBookTradeEvent))
for ob_trade_event in self.event_logger.event_log:
self.assertTrue(type(ob_trade_event) == OrderBookTradeEvent)
self.assertTrue(ob_trade_event.trading_pair in self.trading_pairs)
self.assertTrue(type(ob_trade_event.timestamp) in [float, int])
self.assertTrue(type(ob_trade_event.amount) == float)
self.assertTrue(type(ob_trade_event.price) == float)
self.assertTrue(type(ob_trade_event.type) == TradeType)
self.assertTrue(math.ceil(math.log10(ob_trade_event.timestamp)) == 10)
self.assertTrue(ob_trade_event.amount > 0)
self.assertTrue(ob_trade_event.price > 0)
def test_tracker_integrity(self):
self.ev_loop.run_until_complete(asyncio.sleep(10.0))
order_books: Dict[str, OrderBook] = self.order_book_tracker.order_books
eth_usdt: OrderBook = order_books["ETH-USDT"]
self.assertIsNot(eth_usdt.last_diff_uid, 0)
self.assertGreaterEqual(eth_usdt.get_price_for_volume(True, 10).result_price,
eth_usdt.get_price(True))
self.assertLessEqual(eth_usdt.get_price_for_volume(False, 10).result_price,
eth_usdt.get_price(False))
def test_api_get_last_traded_prices(self):
prices = self.ev_loop.run_until_complete(
CryptoComAPIOrderBookDataSource.get_last_traded_prices(["BTC-USDT", "LTC-BTC"]))
for key, value in prices.items():
print(f"{key} last_trade_price: {value}")
self.assertGreater(prices["BTC-USDT"], 1000)
self.assertLess(prices["LTC-BTC"], 1)
def main():
logging.basicConfig(level=logging.INFO)
unittest.main()
if __name__ == "__main__":
main()
| true
| true
|
1c46c1a3d3a1d1d7895e4b0c6561df3c3c4494fb
| 4,771
|
py
|
Python
|
library/wait_for_pid.py
|
dusennn/clickhouse-ansible
|
e1fb665c2afc095c9a46087bf948b633e7bcd6f6
|
[
"Apache-2.0"
] | 2
|
2021-09-27T10:16:17.000Z
|
2021-09-27T10:18:20.000Z
|
library/wait_for_pid.py
|
dusennn/clickhouse-ansible
|
e1fb665c2afc095c9a46087bf948b633e7bcd6f6
|
[
"Apache-2.0"
] | null | null | null |
library/wait_for_pid.py
|
dusennn/clickhouse-ansible
|
e1fb665c2afc095c9a46087bf948b633e7bcd6f6
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python3
# -*- coding: utf-8 -*-
import binascii
import datetime
import math
import re
import select
import socket
import sys
import time
import os
from ansible.module_utils._text import to_native
def main():
module = AnsibleModule(
argument_spec = dict(
pid=dict(default=None, type='int'),
pid_file=dict(default=None, type='path'),
timeout=dict(default=300, type='int'),
delay=dict(default=0, type='int'),
thread_name_regex=dict(default=None, type='str'),
thread_num=dict(default=1, type='int'),
state=dict(default='present', choices=['present', 'absent']),
sleep=dict(default=1, type='int')
),
)
params = module.params
pid = params['pid']
pid_file = params['pid_file']
timeout = params['timeout']
delay = params['delay']
thread_name_regex = params['thread_name_regex']
thread_num = params['thread_num']
state = params['state']
sleep = params['sleep']
if thread_name_regex is not None:
compiled_search_re = re.compile(thread_name_regex, re.MULTILINE)
else:
compiled_search_re = None
if pid and pid_file:
module.fail_json(msg="pid and pid_file parameter can not both be passed to wait_for_pid")
start = datetime.datetime.now()
if delay:
time.sleep(delay)
if not pid and not pid_file:
time.sleep(timeout)
elif state == 'absent':
### first wait for the stop condition
end = start + datetime.timedelta(seconds=timeout)
while datetime.datetime.now() < end:
try:
if pid_file:
f = open(pid_file)
pid = f.read().strip()
f.close()
f = open("/proc/%s/comm' %s pid")
f.close()
except IOError:
break
except:
break
# Conditions not yet met, wait and try again
time.sleep(params['sleep'])
else:
elapsed = datetime.datetime.now() - start
if pid_file:
module.fail_json(msg="Timeout when waiting for PID:%s to stop." % (pid_file), elapsed=elapsed.seconds)
elif pid:
module.fail_json(msg="Timeout when waiting for PID:%s to be absent." % (pid), elapsed=elapsed.seconds)
elif state == 'present':
### wait for start condition
end = start + datetime.timedelta(seconds=timeout)
while datetime.datetime.now() < end:
try:
if pid_file:
f = open(pid_file)
pid = f.read().strip()
f.close()
f = open('/proc/%s/comm' % pid)
f.close()
except (OSError, IOError):
e = get_exception()
# If anything except file not present, throw an error
if e.errno != 2:
elapsed = datetime.datetime.now() - start
module.fail_json(msg="Failed to stat %s, %s" % (path, e.strerror), elapsed=elapsed.seconds)
# file doesn't exist yet, so continue
else:
# process exists. Are there additional things to check?
if not compiled_search_re:
# nope, succeed!
break
try:
matches = 0
for thread in os.listdir('/proc/%s/task' % pid):
f = open('/proc/%s/task/%s/comm' % (pid, thread))
try:
if re.search(compiled_search_re, f.read()):
matches += 1
finally:
f.close()
if matches >= thread_num:
# found, success!
break
except (OSError, IOError):
pass
# Conditions not yet met, wait and try again
time.sleep(params['sleep'])
else: # while-else
# Timeout expired
elapsed = datetime.datetime.now() - start
if pid_file:
module.fail_json(msg="Timeout when waiting for PID:%s to stop." % (pid_file), elapsed=elapsed.seconds)
elif pid:
module.fail_json(msg="Timeout when waiting for PID:%s to be absent." % (pid), elapsed=elapsed.seconds)
elapsed = datetime.datetime.now() - start
module.exit_json(state=state, pid=pid, thread_name_regex=thread_name_regex, pid_file=pid_file, elapsed=elapsed.seconds)
# import module snippets
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
| 35.080882
| 123
| 0.530916
|
import binascii
import datetime
import math
import re
import select
import socket
import sys
import time
import os
from ansible.module_utils._text import to_native
def main():
module = AnsibleModule(
argument_spec = dict(
pid=dict(default=None, type='int'),
pid_file=dict(default=None, type='path'),
timeout=dict(default=300, type='int'),
delay=dict(default=0, type='int'),
thread_name_regex=dict(default=None, type='str'),
thread_num=dict(default=1, type='int'),
state=dict(default='present', choices=['present', 'absent']),
sleep=dict(default=1, type='int')
),
)
params = module.params
pid = params['pid']
pid_file = params['pid_file']
timeout = params['timeout']
delay = params['delay']
thread_name_regex = params['thread_name_regex']
thread_num = params['thread_num']
state = params['state']
sleep = params['sleep']
if thread_name_regex is not None:
compiled_search_re = re.compile(thread_name_regex, re.MULTILINE)
else:
compiled_search_re = None
if pid and pid_file:
module.fail_json(msg="pid and pid_file parameter can not both be passed to wait_for_pid")
start = datetime.datetime.now()
if delay:
time.sleep(delay)
if not pid and not pid_file:
time.sleep(timeout)
elif state == 'absent':
le datetime.datetime.now() < end:
try:
if pid_file:
f = open(pid_file)
pid = f.read().strip()
f.close()
f = open("/proc/%s/comm' %s pid")
f.close()
except IOError:
break
except:
break
# Conditions not yet met, wait and try again
time.sleep(params['sleep'])
else:
elapsed = datetime.datetime.now() - start
if pid_file:
module.fail_json(msg="Timeout when waiting for PID:%s to stop." % (pid_file), elapsed=elapsed.seconds)
elif pid:
module.fail_json(msg="Timeout when waiting for PID:%s to be absent." % (pid), elapsed=elapsed.seconds)
elif state == 'present':
### wait for start condition
end = start + datetime.timedelta(seconds=timeout)
while datetime.datetime.now() < end:
try:
if pid_file:
f = open(pid_file)
pid = f.read().strip()
f.close()
f = open('/proc/%s/comm' % pid)
f.close()
except (OSError, IOError):
e = get_exception()
# If anything except file not present, throw an error
if e.errno != 2:
elapsed = datetime.datetime.now() - start
module.fail_json(msg="Failed to stat %s, %s" % (path, e.strerror), elapsed=elapsed.seconds)
# file doesn't exist yet, so continue
else:
if not compiled_search_re:
break
try:
matches = 0
for thread in os.listdir('/proc/%s/task' % pid):
f = open('/proc/%s/task/%s/comm' % (pid, thread))
try:
if re.search(compiled_search_re, f.read()):
matches += 1
finally:
f.close()
if matches >= thread_num:
break
except (OSError, IOError):
pass
time.sleep(params['sleep'])
else:
elapsed = datetime.datetime.now() - start
if pid_file:
module.fail_json(msg="Timeout when waiting for PID:%s to stop." % (pid_file), elapsed=elapsed.seconds)
elif pid:
module.fail_json(msg="Timeout when waiting for PID:%s to be absent." % (pid), elapsed=elapsed.seconds)
elapsed = datetime.datetime.now() - start
module.exit_json(state=state, pid=pid, thread_name_regex=thread_name_regex, pid_file=pid_file, elapsed=elapsed.seconds)
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
| true
| true
|
1c46c33547965d1902ac5b6fd51ac5393e78bf60
| 3,694
|
py
|
Python
|
nengo/utils/tests/test_ensemble.py
|
HugoChateauLaurent/nengo
|
749893186ee09aa6c621a40da3ffd3878114db9c
|
[
"BSD-2-Clause"
] | null | null | null |
nengo/utils/tests/test_ensemble.py
|
HugoChateauLaurent/nengo
|
749893186ee09aa6c621a40da3ffd3878114db9c
|
[
"BSD-2-Clause"
] | null | null | null |
nengo/utils/tests/test_ensemble.py
|
HugoChateauLaurent/nengo
|
749893186ee09aa6c621a40da3ffd3878114db9c
|
[
"BSD-2-Clause"
] | null | null | null |
from __future__ import absolute_import
import numpy as np
import mpl_toolkits.mplot3d
import pytest
import nengo
from nengo.dists import Uniform
from nengo.utils.ensemble import response_curves, tuning_curves
def plot_tuning_curves(plt, eval_points, activities):
if eval_points.ndim <= 2:
plt.plot(eval_points, activities)
elif eval_points.ndim == 3:
assert mpl_toolkits.mplot3d
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.plot_surface(eval_points.T[0], eval_points.T[1], activities.T[0])
else:
raise NotImplementedError()
def test_tuning_curves_1d(Simulator, plt, seed):
"""For 1D ensembles, should be able to do plt.plot(*tuning_curves(...))."""
model = nengo.Network(seed=seed)
with model:
ens_1d = nengo.Ensemble(10, dimensions=1, neuron_type=nengo.LIF())
with Simulator(model) as sim:
plt.plot(*tuning_curves(ens_1d, sim))
@pytest.mark.parametrize('dimensions', [1, 2])
def test_tuning_curves(Simulator, nl_nodirect, plt, seed, dimensions):
radius = 10
max_rate = 400
model = nengo.Network(seed=seed)
with model:
ens = nengo.Ensemble(
10, dimensions=dimensions, neuron_type=nl_nodirect(),
max_rates=Uniform(200, max_rate), radius=radius)
with Simulator(model) as sim:
eval_points, activities = tuning_curves(ens, sim)
plot_tuning_curves(plt, eval_points, activities)
# Check that eval_points cover up to the radius.
assert np.abs(radius - np.max(np.abs(eval_points))) <= (
2 * radius / dimensions)
assert np.all(activities >= 0)
d = np.sqrt(np.sum(np.asarray(eval_points) ** 2, axis=-1))
assert np.all(activities[d <= radius] <= max_rate)
@pytest.mark.parametrize('dimensions', [1, 2])
def test_tuning_curves_direct_mode(Simulator, plt, seed, dimensions):
model = nengo.Network(seed=seed)
with model:
ens = nengo.Ensemble(10, dimensions, neuron_type=nengo.Direct())
with Simulator(model) as sim:
eval_points, activities = tuning_curves(ens, sim)
plot_tuning_curves(plt, eval_points, activities)
# eval_points is passed through in direct mode neurons
assert np.allclose(eval_points, activities)
def test_response_curves(Simulator, nl_nodirect, plt, seed):
max_rate = 400
model = nengo.Network(seed=seed)
with model:
ens = nengo.Ensemble(
10, dimensions=10, neuron_type=nl_nodirect(), radius=1.5,
max_rates=Uniform(200, max_rate))
with Simulator(model) as sim:
eval_points, activities = response_curves(ens, sim)
plot_tuning_curves(plt, eval_points, activities)
assert eval_points.ndim == 1 and eval_points.size > 0
assert np.all(eval_points >= -1.0) and np.all(eval_points <= 1.0)
assert np.all(activities >= 0.0)
assert np.all(activities <= max_rate)
# Activities along preferred direction must increase monotonically.
assert np.all(np.diff(activities, axis=0) >= 0.0)
@pytest.mark.parametrize('dimensions', [1, 2])
def test_response_curves_direct_mode(Simulator, plt, seed, dimensions):
model = nengo.Network(seed=seed)
with model:
ens = nengo.Ensemble(
10, dimensions=dimensions, neuron_type=nengo.Direct(), radius=1.5)
with Simulator(model) as sim:
eval_points, activities = response_curves(ens, sim)
plot_tuning_curves(plt, eval_points, activities)
assert eval_points.ndim == 1 and eval_points.size > 0
assert np.all(eval_points >= -1.0) and np.all(eval_points <= 1.0)
# eval_points is passed through in direct mode neurons
assert np.allclose(eval_points, activities)
| 33.581818
| 79
| 0.692204
|
from __future__ import absolute_import
import numpy as np
import mpl_toolkits.mplot3d
import pytest
import nengo
from nengo.dists import Uniform
from nengo.utils.ensemble import response_curves, tuning_curves
def plot_tuning_curves(plt, eval_points, activities):
if eval_points.ndim <= 2:
plt.plot(eval_points, activities)
elif eval_points.ndim == 3:
assert mpl_toolkits.mplot3d
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.plot_surface(eval_points.T[0], eval_points.T[1], activities.T[0])
else:
raise NotImplementedError()
def test_tuning_curves_1d(Simulator, plt, seed):
model = nengo.Network(seed=seed)
with model:
ens_1d = nengo.Ensemble(10, dimensions=1, neuron_type=nengo.LIF())
with Simulator(model) as sim:
plt.plot(*tuning_curves(ens_1d, sim))
@pytest.mark.parametrize('dimensions', [1, 2])
def test_tuning_curves(Simulator, nl_nodirect, plt, seed, dimensions):
radius = 10
max_rate = 400
model = nengo.Network(seed=seed)
with model:
ens = nengo.Ensemble(
10, dimensions=dimensions, neuron_type=nl_nodirect(),
max_rates=Uniform(200, max_rate), radius=radius)
with Simulator(model) as sim:
eval_points, activities = tuning_curves(ens, sim)
plot_tuning_curves(plt, eval_points, activities)
assert np.abs(radius - np.max(np.abs(eval_points))) <= (
2 * radius / dimensions)
assert np.all(activities >= 0)
d = np.sqrt(np.sum(np.asarray(eval_points) ** 2, axis=-1))
assert np.all(activities[d <= radius] <= max_rate)
@pytest.mark.parametrize('dimensions', [1, 2])
def test_tuning_curves_direct_mode(Simulator, plt, seed, dimensions):
model = nengo.Network(seed=seed)
with model:
ens = nengo.Ensemble(10, dimensions, neuron_type=nengo.Direct())
with Simulator(model) as sim:
eval_points, activities = tuning_curves(ens, sim)
plot_tuning_curves(plt, eval_points, activities)
assert np.allclose(eval_points, activities)
def test_response_curves(Simulator, nl_nodirect, plt, seed):
max_rate = 400
model = nengo.Network(seed=seed)
with model:
ens = nengo.Ensemble(
10, dimensions=10, neuron_type=nl_nodirect(), radius=1.5,
max_rates=Uniform(200, max_rate))
with Simulator(model) as sim:
eval_points, activities = response_curves(ens, sim)
plot_tuning_curves(plt, eval_points, activities)
assert eval_points.ndim == 1 and eval_points.size > 0
assert np.all(eval_points >= -1.0) and np.all(eval_points <= 1.0)
assert np.all(activities >= 0.0)
assert np.all(activities <= max_rate)
assert np.all(np.diff(activities, axis=0) >= 0.0)
@pytest.mark.parametrize('dimensions', [1, 2])
def test_response_curves_direct_mode(Simulator, plt, seed, dimensions):
model = nengo.Network(seed=seed)
with model:
ens = nengo.Ensemble(
10, dimensions=dimensions, neuron_type=nengo.Direct(), radius=1.5)
with Simulator(model) as sim:
eval_points, activities = response_curves(ens, sim)
plot_tuning_curves(plt, eval_points, activities)
assert eval_points.ndim == 1 and eval_points.size > 0
assert np.all(eval_points >= -1.0) and np.all(eval_points <= 1.0)
assert np.allclose(eval_points, activities)
| true
| true
|
1c46c3bd574a713b7791ae587b09e515b813b794
| 584
|
py
|
Python
|
tock/employees/migrations/0024_auto_20171229_1156.py
|
mikiec84/tock
|
15318a45b2b144360e4d7e15db655467a45c2ab9
|
[
"CC0-1.0"
] | 134
|
2015-02-02T18:42:03.000Z
|
2022-01-20T04:27:06.000Z
|
tock/employees/migrations/0024_auto_20171229_1156.py
|
mikiec84/tock
|
15318a45b2b144360e4d7e15db655467a45c2ab9
|
[
"CC0-1.0"
] | 1,220
|
2015-03-19T01:57:30.000Z
|
2022-03-23T21:52:15.000Z
|
tock/employees/migrations/0024_auto_20171229_1156.py
|
mikiec84/tock
|
15318a45b2b144360e4d7e15db655467a45c2ab9
|
[
"CC0-1.0"
] | 49
|
2015-03-09T15:44:33.000Z
|
2022-01-19T02:02:37.000Z
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.8 on 2017-12-29 16:56
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('employees', '0023_userdata_organization'),
]
operations = [
migrations.AlterField(
model_name='userdata',
name='organization',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='organizations.Organization'),
),
]
| 26.545455
| 137
| 0.667808
|
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('employees', '0023_userdata_organization'),
]
operations = [
migrations.AlterField(
model_name='userdata',
name='organization',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='organizations.Organization'),
),
]
| true
| true
|
1c46c3de0b7231d58a348ef921880f2a3b454ce7
| 64,803
|
py
|
Python
|
Base Converter/main.py
|
mrif449/simple-python-projects
|
1d57b861f2d54568ebab955722f782a351a57f21
|
[
"MIT"
] | null | null | null |
Base Converter/main.py
|
mrif449/simple-python-projects
|
1d57b861f2d54568ebab955722f782a351a57f21
|
[
"MIT"
] | null | null | null |
Base Converter/main.py
|
mrif449/simple-python-projects
|
1d57b861f2d54568ebab955722f782a351a57f21
|
[
"MIT"
] | null | null | null |
print("Welcome to Base Converter Calculator!!!")
print("You can select your calculation mode by entering the serial number, or write 'close' stop calculating.")
print()
print("Note: You can also close the whole program by pressing Enter after closing calculation menu or manually.")
#Options:
print("Basic Bases:")
print("Decimal = 10")
print("Binary = 2")
print("Octal = 8")
print("Hexa-Decimal = 16")
print("...............................")
print("Let's Start...")
print("Press Enter to Start...")
inp = input("or Anything to Stop...")
while True:
if inp == "":
#Selecting Calculation Mode:
#command = (input("Select your calculation mode (1-14): "))
i_base = int(input("Enter the input Base: "))
o_base = int(input("Enter the output Base: "))
#Decimal to Binary
if i_base == 10 and o_base == 2:
number = int(input("Enter the Decimal number: "))
temp = number
string = ""
temp_list = []
while temp > 0:
string += str(temp%2)
temp = temp // 2
for x in string:
temp_list.append(x)
temp_list.reverse()
result = ""
for y in temp_list:
result += y
print("=============================")
print("Your result is",result)
print("=============================")
#Decimal to Octal
elif i_base == 10 and o_base == 8:
number = int(input("Enter the Decimal number: "))
temp = number
string = ""
temp_list = []
while temp > 0:
string += str(temp%8)
temp = temp // 8
for x in string:
temp_list.append(x)
temp_list.reverse()
result = ""
for y in temp_list:
result += y
print("=============================")
print("Your result is",result)
print("=============================")
#Decimal to Hexa-Decimal
elif i_base == 10 and o_base == 16:
number = int(input("Enter the Decimal number: "))
temp = number
string = ""
temp_list = []
while temp > 0:
temp2 = temp%16
if temp2 == 10:
string += "A"
elif temp2 == 11:
string += "B"
elif temp2 == 12:
string += "C"
elif temp2 == 13:
string += "D"
elif temp2 == 14:
string += "E"
elif temp2 == 15:
string += "F"
else:
string += str(temp%16)
temp = temp // 16
for x in string:
temp_list.append(x)
temp_list.reverse()
result = ""
for y in temp_list:
result += y
print("=============================")
print("Your result is",result)
print("=============================")
#Binary to Decimal
elif i_base == 2 and o_base == 10:
number = int(input("Enter the Binary number: "))
string = str(number)
count = 0
sum = 0
for x in string:
count += 1
string_list = []
for b in string:
string_list.append(int(b))
temp_list = []
for y in range(0,count):
temp_list.append(int(y))
temp_list.reverse()
for x in range(0,len(string_list)):
sum += (string_list[x]*(2**temp_list[x]))
print("=============================")
print("Your result is",sum)
print("=============================")
#Binary to Octal
elif i_base == 2 and o_base == 8:
number = int(input("Enter the Binary number: "))
string = str(number)
count = 0
sum = 0
for x in string:
count += 1
string_list = []
for b in string:
string_list.append(int(b))
temp_list = []
for y in range(0,count):
temp_list.append(int(y))
temp_list.reverse()
for x in range(0,len(string_list)):
sum += (string_list[x]*(2**temp_list[x]))
number2 = sum
temp = number2
string = ""
temp_list = []
while temp > 0:
string += str(temp%8)
temp = temp // 8
for x in string:
temp_list.append(x)
temp_list.reverse()
result = ""
for y in temp_list:
result += y
print("=============================")
print("Your result is",result)
print("=============================")
#Binary to Hexa-Decimal
elif i_base == 2 and o_base == 16:
number = int(input("Enter the Binary number: "))
string = str(number)
count = 0
sum = 0
for x in string:
count += 1
string_list = []
for b in string:
string_list.append(int(b))
temp_list = []
for y in range(0,count):
temp_list.append(int(y))
temp_list.reverse()
for x in range(0,len(string_list)):
sum += (string_list[x]*(2**temp_list[x]))
number2 = sum
temp = number2
string = ""
temp_list = []
while temp > 0:
temp2 = temp%16
if temp2 == 10:
string += "A"
elif temp2 == 11:
string += "B"
elif temp2 == 12:
string += "C"
elif temp2 == 13:
string += "D"
elif temp2 == 14:
string += "E"
elif temp2 == 15:
string += "F"
else:
string += str(temp%16)
temp = temp // 16
for x in string:
temp_list.append(x)
temp_list.reverse()
result = ""
for y in temp_list:
result += y
print("=============================")
print("Your result is",result)
print("=============================")
#Octal to Decimal
elif i_base == 8 and o_base == 10:
number = int(input("Enter the Octal number: "))
string = str(number)
count = 0
sum = 0
for x in string:
count += 1
string_list = []
for b in string:
string_list.append(int(b))
temp_list = []
for y in range(0,count):
temp_list.append(int(y))
temp_list.reverse()
for x in range(0,len(string_list)):
sum += (string_list[x]*(8**temp_list[x]))
print("=============================")
print("Your result is",sum)
print("=============================")
#Octal to Binary
elif i_base == 8 and o_base == 2:
number = int(input("Enter the Octal number: "))
string = str(number)
count = 0
sum = 0
for x in string:
count += 1
string_list = []
for b in string:
string_list.append(int(b))
temp_list = []
for y in range(0,count):
temp_list.append(int(y))
temp_list.reverse()
for x in range(0,len(string_list)):
sum += (string_list[x]*(8**temp_list[x]))
number2 = sum
temp = number2
string = ""
temp_list = []
while temp > 0:
string += str(temp%2)
temp = temp // 2
for x in string:
temp_list.append(x)
temp_list.reverse()
result = ""
for y in temp_list:
result += y
print("=============================")
print("Your result is",result)
print("=============================")
#Octal to Hexa-Decimal
elif i_base == 8 and o_base == 16:
number = int(input("Enter the Octal number: "))
string = str(number)
count = 0
sum = 0
for x in string:
count += 1
string_list = []
for b in string:
string_list.append(int(b))
temp_list = []
for y in range(0,count):
temp_list.append(int(y))
temp_list.reverse()
for x in range(0,len(string_list)):
sum += (string_list[x]*(8**temp_list[x]))
number2 = sum
temp = number2
string = ""
temp_list = []
while temp > 0:
temp2 = temp%16
if temp2 == 10:
string += "A"
elif temp2 == 11:
string += "B"
elif temp2 == 12:
string += "C"
elif temp2 == 13:
string += "D"
elif temp2 == 14:
string += "E"
elif temp2 == 15:
string += "F"
else:
string += str(temp%16)
temp = temp // 16
for x in string:
temp_list.append(x)
temp_list.reverse()
result = ""
for y in temp_list:
result += y
print("=============================")
print("Your result is",result)
print("=============================")
#Hexa-Decimal to Decimal
elif i_base == 16 and o_base == 10:
string = input("Enter the Hexa-Decimal Number: ")
count = 0
sum = 0
for x in string:
count += 1
string_list = []
for b in string:
if b.upper() == "A":
string_list.append(10)
elif b.upper() == "B":
string_list.append(11)
elif b.upper() == "C":
string_list.append(12)
elif b.upper() == "D":
string_list.append(13)
elif b.upper() == "E":
string_list.append(14)
elif b.upper() == "F":
string_list.append(15)
else:
string_list.append(int(b))
temp_list = []
for y in range(0,count):
temp_list.append(int(y))
temp_list.reverse()
for x in range(0,len(string_list)):
sum += (string_list[x]*(16**temp_list[x]))
print("=============================")
print("Your result is",sum)
print("=============================")
#Hexa-Decimal to Binary
elif i_base == 16 and o_base == 2:
string = input("Enter the Hexa-Decimal Number: ")
count = 0
sum = 0
for x in string:
count += 1
string_list = []
for b in string:
if b.upper() == "A":
string_list.append(10)
elif b.upper() == "B":
string_list.append(11)
elif b.upper() == "C":
string_list.append(12)
elif b.upper() == "D":
string_list.append(13)
elif b.upper() == "E":
string_list.append(14)
elif b.upper() == "F":
string_list.append(15)
else:
string_list.append(int(b))
temp_list = []
for y in range(0,count):
temp_list.append(int(y))
temp_list.reverse()
for x in range(0,len(string_list)):
sum += (string_list[x]*(16**temp_list[x]))
number2 = sum
temp = number2
string = ""
temp_list = []
while temp > 0:
string += str(temp%2)
temp = temp // 2
for x in string:
temp_list.append(x)
temp_list.reverse()
result = ""
for y in temp_list:
result += y
print("=============================")
print("Your result is",result)
print("=============================")
#Hexa-Decimal to Octal
elif i_base == 16 and o_base == 8:
string = input("Enter the Hexa-Decimal Number: ")
count = 0
sum = 0
for x in string:
count += 1
string_list = []
for b in string:
if b.upper() == "A":
string_list.append(10)
elif b.upper() == "B":
string_list.append(11)
elif b.upper() == "C":
string_list.append(12)
elif b.upper() == "D":
string_list.append(13)
elif b.upper() == "E":
string_list.append(14)
elif b.upper() == "F":
string_list.append(15)
else:
string_list.append(int(b))
temp_list = []
for y in range(0,count):
temp_list.append(int(y))
temp_list.reverse()
for x in range(0,len(string_list)):
sum += (string_list[x]*(16**temp_list[x]))
number2 = sum
temp = number2
string = ""
temp_list = []
while temp > 0:
string += str(temp%8)
temp = temp // 8
for x in string:
temp_list.append(x)
temp_list.reverse()
result = ""
for y in temp_list:
result += y
print("=============================")
print("Your result is",result)
print("=============================")
#Decimal to Other Base:
elif i_base == 10:
if o_base == 3:
number = int(input("Enter the Decimal number: "))
temp = number
string = ""
temp_list = []
while temp > 0:
string += str(temp%3)
temp = temp // 3
for x in string:
temp_list.append(x)
temp_list.reverse()
result = ""
for y in temp_list:
result += y
print("=============================")
print("Your result is",result)
print("=============================")
elif o_base == 4:
number = int(input("Enter the Decimal number: "))
temp = number
string = ""
temp_list = []
while temp > 0:
string += str(temp%4)
temp = temp // 4
for x in string:
temp_list.append(x)
temp_list.reverse()
result = ""
for y in temp_list:
result += y
print("=============================")
print("Your result is",result)
print("=============================")
elif o_base == 5:
number = int(input("Enter the Decimal number: "))
temp = number
string = ""
temp_list = []
while temp > 0:
string += str(temp%5)
temp = temp // 5
for x in string:
temp_list.append(x)
temp_list.reverse()
result = ""
for y in temp_list:
result += y
print("=============================")
print("Your result is",result)
print("=============================")
elif o_base == 6:
number = int(input("Enter the Decimal number: "))
temp = number
string = ""
temp_list = []
while temp > 0:
string += str(temp%6)
temp = temp // 6
for x in string:
temp_list.append(x)
temp_list.reverse()
result = ""
for y in temp_list:
result += y
print("=============================")
print("Your result is",result)
print("=============================")
elif o_base == 7:
number = int(input("Enter the Decimal number: "))
temp = number
string = ""
temp_list = []
while temp > 0:
string += str(temp%7)
temp = temp // 7
for x in string:
temp_list.append(x)
temp_list.reverse()
result = ""
for y in temp_list:
result += y
print("=============================")
print("Your result is",result)
print("=============================")
elif o_base == 9:
number = int(input("Enter the Decimal number: "))
temp = number
string = ""
temp_list = []
while temp > 0:
string += str(temp%9)
temp = temp // 9
for x in string:
temp_list.append(x)
temp_list.reverse()
result = ""
for y in temp_list:
result += y
print("=============================")
print("Your result is",result)
print("=============================")
elif o_base == 11:
number = int(input("Enter the Decimal number: "))
temp = number
string = ""
temp_list = []
while temp > 0:
temp2 = temp%11
if temp2 == 10:
string += "A"
else:
string += str(temp%11)
temp = temp // 11
for x in string:
temp_list.append(x)
temp_list.reverse()
result = ""
for y in temp_list:
result += y
print("=============================")
print("Your result is",result)
print("=============================")
elif o_base == 12:
number = int(input("Enter the Decimal number: "))
temp = number
string = ""
temp_list = []
while temp > 0:
temp2 = temp%12
if temp2 == 10:
string += "A"
elif temp2 == 11:
string += "B"
else:
string += str(temp%12)
temp = temp // 12
for x in string:
temp_list.append(x)
temp_list.reverse()
result = ""
for y in temp_list:
result += y
print("=============================")
print("Your result is",result)
print("=============================")
elif o_base == 13:
number = int(input("Enter the Decimal number: "))
temp = number
string = ""
temp_list = []
while temp > 0:
temp2 = temp%13
if temp2 == 10:
string += "A"
elif temp2 == 11:
string += "B"
elif temp2 == 12:
string += "C"
else:
string += str(temp%13)
temp = temp // 13
for x in string:
temp_list.append(x)
temp_list.reverse()
result = ""
for y in temp_list:
result += y
print("=============================")
print("Your result is",result)
print("=============================")
elif o_base == 14:
number = int(input("Enter the Decimal number: "))
temp = number
string = ""
temp_list = []
while temp > 0:
temp2 = temp%14
if temp2 == 10:
string += "A"
elif temp2 == 11:
string += "B"
elif temp2 == 12:
string += "C"
elif temp2 == 13:
string += "D"
else:
string += str(temp%14)
temp = temp // 14
for x in string:
temp_list.append(x)
temp_list.reverse()
result = ""
for y in temp_list:
result += y
print("=============================")
print("Your result is",result)
print("=============================")
elif o_base == 15:
number = int(input("Enter the Decimal number: "))
temp = number
string = ""
temp_list = []
while temp > 0:
temp2 = temp%15
if temp2 == 10:
string += "A"
elif temp2 == 11:
string += "B"
elif temp2 == 12:
string += "C"
elif temp2 == 13:
string += "D"
elif temp2 == 14:
string += "E"
else:
string += str(temp%15)
temp = temp // 15
for x in string:
temp_list.append(x)
temp_list.reverse()
result = ""
for y in temp_list:
result += y
print("=============================")
print("Your result is",result)
print("=============================")
elif o_base == 17:
number = int(input("Enter the Decimal number: "))
temp = number
string = ""
temp_list = []
while temp > 0:
temp2 = temp%17
if temp2 == 10:
string += "A"
elif temp2 == 11:
string += "B"
elif temp2 == 12:
string += "C"
elif temp2 == 13:
string += "D"
elif temp2 == 14:
string += "E"
elif temp2 == 15:
string += "F"
elif temp2 == 16:
string += "G"
else:
string += str(temp%17)
temp = temp // 17
for x in string:
temp_list.append(x)
temp_list.reverse()
result = ""
for y in temp_list:
result += y
print("=============================")
print("Your result is",result)
print("=============================")
elif o_base == 18:
number = int(input("Enter the Decimal number: "))
temp = number
string = ""
temp_list = []
while temp > 0:
temp2 = temp%18
if temp2 == 10:
string += "A"
elif temp2 == 11:
string += "B"
elif temp2 == 12:
string += "C"
elif temp2 == 13:
string += "D"
elif temp2 == 14:
string += "E"
elif temp2 == 15:
string += "F"
elif temp2 == 16:
string += "G"
elif temp2 == 17:
string += "H"
else:
string += str(temp%18)
temp = temp // 18
for x in string:
temp_list.append(x)
temp_list.reverse()
result = ""
for y in temp_list:
result += y
print("=============================")
print("Your result is",result)
print("=============================")
elif o_base == 19:
number = int(input("Enter the Decimal number: "))
temp = number
string = ""
temp_list = []
while temp > 0:
temp2 = temp%19
if temp2 == 10:
string += "A"
elif temp2 == 11:
string += "B"
elif temp2 == 12:
string += "C"
elif temp2 == 13:
string += "D"
elif temp2 == 14:
string += "E"
elif temp2 == 15:
string += "F"
elif temp2 == 16:
string += "G"
elif temp2 == 17:
string += "H"
elif temp2 == 18:
string += "I"
else:
string += str(temp%19)
temp = temp // 19
for x in string:
temp_list.append(x)
temp_list.reverse()
result = ""
for y in temp_list:
result += y
print("=============================")
print("Your result is",result)
print("=============================")
elif o_base == 20:
number = int(input("Enter the Decimal number: "))
temp = number
string = ""
temp_list = []
while temp > 0:
temp2 = temp%20
if temp2 == 10:
string += "A"
elif temp2 == 11:
string += "B"
elif temp2 == 12:
string += "C"
elif temp2 == 13:
string += "D"
elif temp2 == 14:
string += "E"
elif temp2 == 15:
string += "F"
elif temp2 == 16:
string += "G"
elif temp2 == 17:
string += "H"
elif temp2 == 18:
string += "I"
elif temp2 == 19:
string += "J"
else:
string += str(temp%20)
temp = temp // 20
for x in string:
temp_list.append(x)
temp_list.reverse()
result = ""
for y in temp_list:
result += y
print("=============================")
print("Your result is",result)
print("=============================")
elif o_base == 21:
number = int(input("Enter the Decimal number: "))
temp = number
string = ""
temp_list = []
while temp > 0:
temp2 = temp%21
if temp2 == 10:
string += "A"
elif temp2 == 11:
string += "B"
elif temp2 == 12:
string += "C"
elif temp2 == 13:
string += "D"
elif temp2 == 14:
string += "E"
elif temp2 == 15:
string += "F"
elif temp2 == 16:
string += "G"
elif temp2 == 17:
string += "H"
elif temp2 == 18:
string += "I"
elif temp2 == 19:
string += "J"
elif temp2 == 20:
string += "K"
else:
string += str(temp%21)
temp = temp // 21
for x in string:
temp_list.append(x)
temp_list.reverse()
result = ""
for y in temp_list:
result += y
print("=============================")
print("Your result is",result)
print("=============================")
elif o_base == 22:
number = int(input("Enter the Decimal number: "))
temp = number
string = ""
temp_list = []
while temp > 0:
temp2 = temp%22
if temp2 == 10:
string += "A"
elif temp2 == 11:
string += "B"
elif temp2 == 12:
string += "C"
elif temp2 == 13:
string += "D"
elif temp2 == 14:
string += "E"
elif temp2 == 15:
string += "F"
elif temp2 == 16:
string += "G"
elif temp2 == 17:
string += "H"
elif temp2 == 18:
string += "I"
elif temp2 == 19:
string += "J"
elif temp2 == 20:
string += "K"
elif temp2 == 21:
string += "L"
else:
string += str(temp%22)
temp = temp // 22
for x in string:
temp_list.append(x)
temp_list.reverse()
result = ""
for y in temp_list:
result += y
print("=============================")
print("Your result is",result)
print("=============================")
elif o_base == 23:
number = int(input("Enter the Decimal number: "))
temp = number
string = ""
temp_list = []
while temp > 0:
temp2 = temp%23
if temp2 == 10:
string += "A"
elif temp2 == 11:
string += "B"
elif temp2 == 12:
string += "C"
elif temp2 == 13:
string += "D"
elif temp2 == 14:
string += "E"
elif temp2 == 15:
string += "F"
elif temp2 == 16:
string += "G"
elif temp2 == 17:
string += "H"
elif temp2 == 18:
string += "I"
elif temp2 == 19:
string += "J"
elif temp2 == 20:
string += "K"
elif temp2 == 21:
string += "L"
elif temp2 == 22:
string += "M"
else:
string += str(temp%23)
temp = temp // 23
for x in string:
temp_list.append(x)
temp_list.reverse()
result = ""
for y in temp_list:
result += y
print("=============================")
print("Your result is",result)
print("=============================")
elif o_base == 24:
number = int(input("Enter the Decimal number: "))
temp = number
string = ""
temp_list = []
while temp > 0:
temp2 = temp%24
if temp2 == 10:
string += "A"
elif temp2 == 11:
string += "B"
elif temp2 == 12:
string += "C"
elif temp2 == 13:
string += "D"
elif temp2 == 14:
string += "E"
elif temp2 == 15:
string += "F"
elif temp2 == 16:
string += "G"
elif temp2 == 17:
string += "H"
elif temp2 == 18:
string += "I"
elif temp2 == 19:
string += "J"
elif temp2 == 20:
string += "K"
elif temp2 == 21:
string += "L"
elif temp2 == 22:
string += "M"
elif temp2 == 23:
string += "N"
else:
string += str(temp%24)
temp = temp // 24
for x in string:
temp_list.append(x)
temp_list.reverse()
result = ""
for y in temp_list:
result += y
print("=============================")
print("Your result is",result)
print("=============================")
elif o_base == 25:
number = int(input("Enter the Decimal number: "))
temp = number
string = ""
temp_list = []
while temp > 0:
temp2 = temp%25
if temp2 == 10:
string += "A"
elif temp2 == 11:
string += "B"
elif temp2 == 12:
string += "C"
elif temp2 == 13:
string += "D"
elif temp2 == 14:
string += "E"
elif temp2 == 15:
string += "F"
elif temp2 == 16:
string += "G"
elif temp2 == 17:
string += "H"
elif temp2 == 18:
string += "I"
elif temp2 == 19:
string += "J"
elif temp2 == 20:
string += "K"
elif temp2 == 21:
string += "L"
elif temp2 == 22:
string += "M"
elif temp2 == 23:
string += "N"
elif temp2 == 24:
string += "O"
else:
string += str(temp%25)
temp = temp // 25
for x in string:
temp_list.append(x)
temp_list.reverse()
result = ""
for y in temp_list:
result += y
print("=============================")
print("Your result is",result)
print("=============================")
elif o_base == 26:
number = int(input("Enter the Decimal number: "))
temp = number
string = ""
temp_list = []
while temp > 0:
temp2 = temp%26
if temp2 == 10:
string += "A"
elif temp2 == 11:
string += "B"
elif temp2 == 12:
string += "C"
elif temp2 == 13:
string += "D"
elif temp2 == 14:
string += "E"
elif temp2 == 15:
string += "F"
elif temp2 == 16:
string += "G"
elif temp2 == 17:
string += "H"
elif temp2 == 18:
string += "I"
elif temp2 == 19:
string += "J"
elif temp2 == 20:
string += "K"
elif temp2 == 21:
string += "L"
elif temp2 == 22:
string += "M"
elif temp2 == 23:
string += "N"
elif temp2 == 24:
string += "O"
elif temp2 == 25:
string += "P"
else:
string += str(temp%26)
temp = temp // 26
for x in string:
temp_list.append(x)
temp_list.reverse()
result = ""
for y in temp_list:
result += y
print("=============================")
print("Your result is",result)
print("=============================")
elif o_base == 27:
number = int(input("Enter the Decimal number: "))
temp = number
string = ""
temp_list = []
while temp > 0:
temp2 = temp%27
if temp2 == 10:
string += "A"
elif temp2 == 11:
string += "B"
elif temp2 == 12:
string += "C"
elif temp2 == 13:
string += "D"
elif temp2 == 14:
string += "E"
elif temp2 == 15:
string += "F"
elif temp2 == 16:
string += "G"
elif temp2 == 17:
string += "H"
elif temp2 == 18:
string += "I"
elif temp2 == 19:
string += "J"
elif temp2 == 20:
string += "K"
elif temp2 == 21:
string += "L"
elif temp2 == 22:
string += "M"
elif temp2 == 23:
string += "N"
elif temp2 == 24:
string += "O"
elif temp2 == 25:
string += "P"
elif temp2 == 26:
string += "Q"
else:
string += str(temp%27)
temp = temp // 27
for x in string:
temp_list.append(x)
temp_list.reverse()
result = ""
for y in temp_list:
result += y
print("=============================")
print("Your result is",result)
print("=============================")
elif o_base == 28:
number = int(input("Enter the Decimal number: "))
temp = number
string = ""
temp_list = []
while temp > 0:
temp2 = temp%28
if temp2 == 10:
string += "A"
elif temp2 == 11:
string += "B"
elif temp2 == 12:
string += "C"
elif temp2 == 13:
string += "D"
elif temp2 == 14:
string += "E"
elif temp2 == 15:
string += "F"
elif temp2 == 16:
string += "G"
elif temp2 == 17:
string += "H"
elif temp2 == 18:
string += "I"
elif temp2 == 19:
string += "J"
elif temp2 == 20:
string += "K"
elif temp2 == 21:
string += "L"
elif temp2 == 22:
string += "M"
elif temp2 == 23:
string += "N"
elif temp2 == 24:
string += "O"
elif temp2 == 25:
string += "P"
elif temp2 == 26:
string += "Q"
elif temp2 == 27:
string += "R"
else:
string += str(temp%28)
temp = temp // 28
for x in string:
temp_list.append(x)
temp_list.reverse()
result = ""
for y in temp_list:
result += y
print("=============================")
print("Your result is",result)
print("=============================")
elif o_base == 29:
number = int(input("Enter the Decimal number: "))
temp = number
string = ""
temp_list = []
while temp > 0:
temp2 = temp%29
if temp2 == 10:
string += "A"
elif temp2 == 11:
string += "B"
elif temp2 == 12:
string += "C"
elif temp2 == 13:
string += "D"
elif temp2 == 14:
string += "E"
elif temp2 == 15:
string += "F"
elif temp2 == 16:
string += "G"
elif temp2 == 17:
string += "H"
elif temp2 == 18:
string += "I"
elif temp2 == 19:
string += "J"
elif temp2 == 20:
string += "K"
elif temp2 == 21:
string += "L"
elif temp2 == 22:
string += "M"
elif temp2 == 23:
string += "N"
elif temp2 == 24:
string += "O"
elif temp2 == 25:
string += "P"
elif temp2 == 26:
string += "Q"
elif temp2 == 27:
string += "R"
elif temp2 == 28:
string += "S"
else:
string += str(temp%29)
temp = temp // 29
for x in string:
temp_list.append(x)
temp_list.reverse()
result = ""
for y in temp_list:
result += y
print("=============================")
print("Your result is",result)
print("=============================")
elif o_base == 30:
number = int(input("Enter the Decimal number: "))
temp = number
string = ""
temp_list = []
while temp > 0:
temp2 = temp%30
if temp2 == 10:
string += "A"
elif temp2 == 11:
string += "B"
elif temp2 == 12:
string += "C"
elif temp2 == 13:
string += "D"
elif temp2 == 14:
string += "E"
elif temp2 == 15:
string += "F"
elif temp2 == 16:
string += "G"
elif temp2 == 17:
string += "H"
elif temp2 == 18:
string += "I"
elif temp2 == 19:
string += "J"
elif temp2 == 20:
string += "K"
elif temp2 == 21:
string += "L"
elif temp2 == 22:
string += "M"
elif temp2 == 23:
string += "N"
elif temp2 == 24:
string += "O"
elif temp2 == 25:
string += "P"
elif temp2 == 26:
string += "Q"
elif temp2 == 27:
string += "R"
elif temp2 == 28:
string += "S"
elif temp2 == 29:
string += "T"
else:
string += str(temp%30)
temp = temp // 30
for x in string:
temp_list.append(x)
temp_list.reverse()
result = ""
for y in temp_list:
result += y
print("=============================")
print("Your result is",result)
print("=============================")
elif o_base == 31:
number = int(input("Enter the Decimal number: "))
temp = number
string = ""
temp_list = []
while temp > 0:
temp2 = temp%31
if temp2 == 10:
string += "A"
elif temp2 == 11:
string += "B"
elif temp2 == 12:
string += "C"
elif temp2 == 13:
string += "D"
elif temp2 == 14:
string += "E"
elif temp2 == 15:
string += "F"
elif temp2 == 16:
string += "G"
elif temp2 == 17:
string += "H"
elif temp2 == 18:
string += "I"
elif temp2 == 19:
string += "J"
elif temp2 == 20:
string += "K"
elif temp2 == 21:
string += "L"
elif temp2 == 22:
string += "M"
elif temp2 == 23:
string += "N"
elif temp2 == 24:
string += "O"
elif temp2 == 25:
string += "P"
elif temp2 == 26:
string += "Q"
elif temp2 == 27:
string += "R"
elif temp2 == 28:
string += "S"
elif temp2 == 29:
string += "T"
elif temp2 == 30:
string += "U"
else:
string += str(temp%31)
temp = temp // 31
for x in string:
temp_list.append(x)
temp_list.reverse()
result = ""
for y in temp_list:
result += y
print("=============================")
print("Your result is",result)
print("=============================")
elif o_base == 32:
number = int(input("Enter the Decimal number: "))
temp = number
string = ""
temp_list = []
while temp > 0:
temp2 = temp%32
if temp2 == 10:
string += "A"
elif temp2 == 11:
string += "B"
elif temp2 == 12:
string += "C"
elif temp2 == 13:
string += "D"
elif temp2 == 14:
string += "E"
elif temp2 == 15:
string += "F"
elif temp2 == 16:
string += "G"
elif temp2 == 17:
string += "H"
elif temp2 == 18:
string += "I"
elif temp2 == 19:
string += "J"
elif temp2 == 20:
string += "K"
elif temp2 == 21:
string += "L"
elif temp2 == 22:
string += "M"
elif temp2 == 23:
string += "N"
elif temp2 == 24:
string += "O"
elif temp2 == 25:
string += "P"
elif temp2 == 26:
string += "Q"
elif temp2 == 27:
string += "R"
elif temp2 == 28:
string += "S"
elif temp2 == 29:
string += "T"
elif temp2 == 30:
string += "U"
elif temp2 == 31:
string += "V"
else:
string += str(temp%32)
temp = temp // 32
for x in string:
temp_list.append(x)
temp_list.reverse()
result = ""
for y in temp_list:
result += y
print("=============================")
print("Your result is",result)
print("=============================")
elif o_base == 33:
number = int(input("Enter the Decimal number: "))
temp = number
string = ""
temp_list = []
while temp > 0:
temp2 = temp%33
if temp2 == 10:
string += "A"
elif temp2 == 11:
string += "B"
elif temp2 == 12:
string += "C"
elif temp2 == 13:
string += "D"
elif temp2 == 14:
string += "E"
elif temp2 == 15:
string += "F"
elif temp2 == 16:
string += "G"
elif temp2 == 17:
string += "H"
elif temp2 == 18:
string += "I"
elif temp2 == 19:
string += "J"
elif temp2 == 20:
string += "K"
elif temp2 == 21:
string += "L"
elif temp2 == 22:
string += "M"
elif temp2 == 23:
string += "N"
elif temp2 == 24:
string += "O"
elif temp2 == 25:
string += "P"
elif temp2 == 26:
string += "Q"
elif temp2 == 27:
string += "R"
elif temp2 == 28:
string += "S"
elif temp2 == 29:
string += "T"
elif temp2 == 30:
string += "U"
elif temp2 == 31:
string += "V"
elif temp2 == 32:
string += "W"
else:
string += str(temp%33)
temp = temp // 33
for x in string:
temp_list.append(x)
temp_list.reverse()
result = ""
for y in temp_list:
result += y
print("=============================")
print("Your result is",result)
print("=============================")
elif o_base == 34:
number = int(input("Enter the Decimal number: "))
temp = number
string = ""
temp_list = []
while temp > 0:
temp2 = temp%34
if temp2 == 10:
string += "A"
elif temp2 == 11:
string += "B"
elif temp2 == 12:
string += "C"
elif temp2 == 13:
string += "D"
elif temp2 == 14:
string += "E"
elif temp2 == 15:
string += "F"
elif temp2 == 16:
string += "G"
elif temp2 == 17:
string += "H"
elif temp2 == 18:
string += "I"
elif temp2 == 19:
string += "J"
elif temp2 == 20:
string += "K"
elif temp2 == 21:
string += "L"
elif temp2 == 22:
string += "M"
elif temp2 == 23:
string += "N"
elif temp2 == 24:
string += "O"
elif temp2 == 25:
string += "P"
elif temp2 == 26:
string += "Q"
elif temp2 == 27:
string += "R"
elif temp2 == 28:
string += "S"
elif temp2 == 29:
string += "T"
elif temp2 == 30:
string += "U"
elif temp2 == 31:
string += "V"
elif temp2 == 32:
string += "W"
elif temp2 == 33:
string += "X"
else:
string += str(temp%34)
temp = temp // 34
for x in string:
temp_list.append(x)
temp_list.reverse()
result = ""
for y in temp_list:
result += y
print("=============================")
print("Your result is",result)
print("=============================")
elif o_base == 35:
number = int(input("Enter the Decimal number: "))
temp = number
string = ""
temp_list = []
while temp > 0:
temp2 = temp%35
if temp2 == 10:
string += "A"
elif temp2 == 11:
string += "B"
elif temp2 == 12:
string += "C"
elif temp2 == 13:
string += "D"
elif temp2 == 14:
string += "E"
elif temp2 == 15:
string += "F"
elif temp2 == 16:
string += "G"
elif temp2 == 17:
string += "H"
elif temp2 == 18:
string += "I"
elif temp2 == 19:
string += "J"
elif temp2 == 20:
string += "K"
elif temp2 == 21:
string += "L"
elif temp2 == 22:
string += "M"
elif temp2 == 23:
string += "N"
elif temp2 == 24:
string += "O"
elif temp2 == 25:
string += "P"
elif temp2 == 26:
string += "Q"
elif temp2 == 27:
string += "R"
elif temp2 == 28:
string += "S"
elif temp2 == 29:
string += "T"
elif temp2 == 30:
string += "U"
elif temp2 == 31:
string += "V"
elif temp2 == 32:
string += "W"
elif temp2 == 33:
string += "X"
elif temp2 == 34:
string += "Y"
else:
string += str(temp%35)
temp = temp // 35
for x in string:
temp_list.append(x)
temp_list.reverse()
result = ""
for y in temp_list:
result += y
print("=============================")
print("Your result is",result)
print("=============================")
elif o_base == 36:
number = int(input("Enter the Decimal number: "))
temp = number
string = ""
temp_list = []
while temp > 0:
temp2 = temp%36
if temp2 == 10:
string += "A"
elif temp2 == 11:
string += "B"
elif temp2 == 12:
string += "C"
elif temp2 == 13:
string += "D"
elif temp2 == 14:
string += "E"
elif temp2 == 15:
string += "F"
elif temp2 == 16:
string += "G"
elif temp2 == 17:
string += "H"
elif temp2 == 18:
string += "I"
elif temp2 == 19:
string += "J"
elif temp2 == 20:
string += "K"
elif temp2 == 21:
string += "L"
elif temp2 == 22:
string += "M"
elif temp2 == 23:
string += "N"
elif temp2 == 24:
string += "O"
elif temp2 == 25:
string += "P"
elif temp2 == 26:
string += "Q"
elif temp2 == 27:
string += "R"
elif temp2 == 28:
string += "S"
elif temp2 == 29:
string += "T"
elif temp2 == 30:
string += "U"
elif temp2 == 31:
string += "V"
elif temp2 == 32:
string += "W"
elif temp2 == 33:
string += "X"
elif temp2 == 34:
string += "Y"
elif temp2 == 35:
string += "Z"
else:
string += str(temp%36)
temp = temp // 36
for x in string:
temp_list.append(x)
temp_list.reverse()
result = ""
for y in temp_list:
result += y
print("=============================")
print("Your result is",result)
print("=============================")
else:
break
inp = input("Press Enter to close...")
| 38.141848
| 114
| 0.290496
|
print("Welcome to Base Converter Calculator!!!")
print("You can select your calculation mode by entering the serial number, or write 'close' stop calculating.")
print()
print("Note: You can also close the whole program by pressing Enter after closing calculation menu or manually.")
print("Basic Bases:")
print("Decimal = 10")
print("Binary = 2")
print("Octal = 8")
print("Hexa-Decimal = 16")
print("...............................")
print("Let's Start...")
print("Press Enter to Start...")
inp = input("or Anything to Stop...")
while True:
if inp == "":
#Selecting Calculation Mode:
#command = (input("Select your calculation mode (1-14): "))
i_base = int(input("Enter the input Base: "))
o_base = int(input("Enter the output Base: "))
#Decimal to Binary
if i_base == 10 and o_base == 2:
number = int(input("Enter the Decimal number: "))
temp = number
string = ""
temp_list = []
while temp > 0:
string += str(temp%2)
temp = temp // 2
for x in string:
temp_list.append(x)
temp_list.reverse()
result = ""
for y in temp_list:
result += y
print("=============================")
print("Your result is",result)
print("=============================")
#Decimal to Octal
elif i_base == 10 and o_base == 8:
number = int(input("Enter the Decimal number: "))
temp = number
string = ""
temp_list = []
while temp > 0:
string += str(temp%8)
temp = temp // 8
for x in string:
temp_list.append(x)
temp_list.reverse()
result = ""
for y in temp_list:
result += y
print("=============================")
print("Your result is",result)
print("=============================")
#Decimal to Hexa-Decimal
elif i_base == 10 and o_base == 16:
number = int(input("Enter the Decimal number: "))
temp = number
string = ""
temp_list = []
while temp > 0:
temp2 = temp%16
if temp2 == 10:
string += "A"
elif temp2 == 11:
string += "B"
elif temp2 == 12:
string += "C"
elif temp2 == 13:
string += "D"
elif temp2 == 14:
string += "E"
elif temp2 == 15:
string += "F"
else:
string += str(temp%16)
temp = temp // 16
for x in string:
temp_list.append(x)
temp_list.reverse()
result = ""
for y in temp_list:
result += y
print("=============================")
print("Your result is",result)
print("=============================")
#Binary to Decimal
elif i_base == 2 and o_base == 10:
number = int(input("Enter the Binary number: "))
string = str(number)
count = 0
sum = 0
for x in string:
count += 1
string_list = []
for b in string:
string_list.append(int(b))
temp_list = []
for y in range(0,count):
temp_list.append(int(y))
temp_list.reverse()
for x in range(0,len(string_list)):
sum += (string_list[x]*(2**temp_list[x]))
print("=============================")
print("Your result is",sum)
print("=============================")
#Binary to Octal
elif i_base == 2 and o_base == 8:
number = int(input("Enter the Binary number: "))
string = str(number)
count = 0
sum = 0
for x in string:
count += 1
string_list = []
for b in string:
string_list.append(int(b))
temp_list = []
for y in range(0,count):
temp_list.append(int(y))
temp_list.reverse()
for x in range(0,len(string_list)):
sum += (string_list[x]*(2**temp_list[x]))
number2 = sum
temp = number2
string = ""
temp_list = []
while temp > 0:
string += str(temp%8)
temp = temp // 8
for x in string:
temp_list.append(x)
temp_list.reverse()
result = ""
for y in temp_list:
result += y
print("=============================")
print("Your result is",result)
print("=============================")
#Binary to Hexa-Decimal
elif i_base == 2 and o_base == 16:
number = int(input("Enter the Binary number: "))
string = str(number)
count = 0
sum = 0
for x in string:
count += 1
string_list = []
for b in string:
string_list.append(int(b))
temp_list = []
for y in range(0,count):
temp_list.append(int(y))
temp_list.reverse()
for x in range(0,len(string_list)):
sum += (string_list[x]*(2**temp_list[x]))
number2 = sum
temp = number2
string = ""
temp_list = []
while temp > 0:
temp2 = temp%16
if temp2 == 10:
string += "A"
elif temp2 == 11:
string += "B"
elif temp2 == 12:
string += "C"
elif temp2 == 13:
string += "D"
elif temp2 == 14:
string += "E"
elif temp2 == 15:
string += "F"
else:
string += str(temp%16)
temp = temp // 16
for x in string:
temp_list.append(x)
temp_list.reverse()
result = ""
for y in temp_list:
result += y
print("=============================")
print("Your result is",result)
print("=============================")
#Octal to Decimal
elif i_base == 8 and o_base == 10:
number = int(input("Enter the Octal number: "))
string = str(number)
count = 0
sum = 0
for x in string:
count += 1
string_list = []
for b in string:
string_list.append(int(b))
temp_list = []
for y in range(0,count):
temp_list.append(int(y))
temp_list.reverse()
for x in range(0,len(string_list)):
sum += (string_list[x]*(8**temp_list[x]))
print("=============================")
print("Your result is",sum)
print("=============================")
#Octal to Binary
elif i_base == 8 and o_base == 2:
number = int(input("Enter the Octal number: "))
string = str(number)
count = 0
sum = 0
for x in string:
count += 1
string_list = []
for b in string:
string_list.append(int(b))
temp_list = []
for y in range(0,count):
temp_list.append(int(y))
temp_list.reverse()
for x in range(0,len(string_list)):
sum += (string_list[x]*(8**temp_list[x]))
number2 = sum
temp = number2
string = ""
temp_list = []
while temp > 0:
string += str(temp%2)
temp = temp // 2
for x in string:
temp_list.append(x)
temp_list.reverse()
result = ""
for y in temp_list:
result += y
print("=============================")
print("Your result is",result)
print("=============================")
#Octal to Hexa-Decimal
elif i_base == 8 and o_base == 16:
number = int(input("Enter the Octal number: "))
string = str(number)
count = 0
sum = 0
for x in string:
count += 1
string_list = []
for b in string:
string_list.append(int(b))
temp_list = []
for y in range(0,count):
temp_list.append(int(y))
temp_list.reverse()
for x in range(0,len(string_list)):
sum += (string_list[x]*(8**temp_list[x]))
number2 = sum
temp = number2
string = ""
temp_list = []
while temp > 0:
temp2 = temp%16
if temp2 == 10:
string += "A"
elif temp2 == 11:
string += "B"
elif temp2 == 12:
string += "C"
elif temp2 == 13:
string += "D"
elif temp2 == 14:
string += "E"
elif temp2 == 15:
string += "F"
else:
string += str(temp%16)
temp = temp // 16
for x in string:
temp_list.append(x)
temp_list.reverse()
result = ""
for y in temp_list:
result += y
print("=============================")
print("Your result is",result)
print("=============================")
#Hexa-Decimal to Decimal
elif i_base == 16 and o_base == 10:
string = input("Enter the Hexa-Decimal Number: ")
count = 0
sum = 0
for x in string:
count += 1
string_list = []
for b in string:
if b.upper() == "A":
string_list.append(10)
elif b.upper() == "B":
string_list.append(11)
elif b.upper() == "C":
string_list.append(12)
elif b.upper() == "D":
string_list.append(13)
elif b.upper() == "E":
string_list.append(14)
elif b.upper() == "F":
string_list.append(15)
else:
string_list.append(int(b))
temp_list = []
for y in range(0,count):
temp_list.append(int(y))
temp_list.reverse()
for x in range(0,len(string_list)):
sum += (string_list[x]*(16**temp_list[x]))
print("=============================")
print("Your result is",sum)
print("=============================")
#Hexa-Decimal to Binary
elif i_base == 16 and o_base == 2:
string = input("Enter the Hexa-Decimal Number: ")
count = 0
sum = 0
for x in string:
count += 1
string_list = []
for b in string:
if b.upper() == "A":
string_list.append(10)
elif b.upper() == "B":
string_list.append(11)
elif b.upper() == "C":
string_list.append(12)
elif b.upper() == "D":
string_list.append(13)
elif b.upper() == "E":
string_list.append(14)
elif b.upper() == "F":
string_list.append(15)
else:
string_list.append(int(b))
temp_list = []
for y in range(0,count):
temp_list.append(int(y))
temp_list.reverse()
for x in range(0,len(string_list)):
sum += (string_list[x]*(16**temp_list[x]))
number2 = sum
temp = number2
string = ""
temp_list = []
while temp > 0:
string += str(temp%2)
temp = temp // 2
for x in string:
temp_list.append(x)
temp_list.reverse()
result = ""
for y in temp_list:
result += y
print("=============================")
print("Your result is",result)
print("=============================")
#Hexa-Decimal to Octal
elif i_base == 16 and o_base == 8:
string = input("Enter the Hexa-Decimal Number: ")
count = 0
sum = 0
for x in string:
count += 1
string_list = []
for b in string:
if b.upper() == "A":
string_list.append(10)
elif b.upper() == "B":
string_list.append(11)
elif b.upper() == "C":
string_list.append(12)
elif b.upper() == "D":
string_list.append(13)
elif b.upper() == "E":
string_list.append(14)
elif b.upper() == "F":
string_list.append(15)
else:
string_list.append(int(b))
temp_list = []
for y in range(0,count):
temp_list.append(int(y))
temp_list.reverse()
for x in range(0,len(string_list)):
sum += (string_list[x]*(16**temp_list[x]))
number2 = sum
temp = number2
string = ""
temp_list = []
while temp > 0:
string += str(temp%8)
temp = temp // 8
for x in string:
temp_list.append(x)
temp_list.reverse()
result = ""
for y in temp_list:
result += y
print("=============================")
print("Your result is",result)
print("=============================")
#Decimal to Other Base:
elif i_base == 10:
if o_base == 3:
number = int(input("Enter the Decimal number: "))
temp = number
string = ""
temp_list = []
while temp > 0:
string += str(temp%3)
temp = temp // 3
for x in string:
temp_list.append(x)
temp_list.reverse()
result = ""
for y in temp_list:
result += y
print("=============================")
print("Your result is",result)
print("=============================")
elif o_base == 4:
number = int(input("Enter the Decimal number: "))
temp = number
string = ""
temp_list = []
while temp > 0:
string += str(temp%4)
temp = temp // 4
for x in string:
temp_list.append(x)
temp_list.reverse()
result = ""
for y in temp_list:
result += y
print("=============================")
print("Your result is",result)
print("=============================")
elif o_base == 5:
number = int(input("Enter the Decimal number: "))
temp = number
string = ""
temp_list = []
while temp > 0:
string += str(temp%5)
temp = temp // 5
for x in string:
temp_list.append(x)
temp_list.reverse()
result = ""
for y in temp_list:
result += y
print("=============================")
print("Your result is",result)
print("=============================")
elif o_base == 6:
number = int(input("Enter the Decimal number: "))
temp = number
string = ""
temp_list = []
while temp > 0:
string += str(temp%6)
temp = temp // 6
for x in string:
temp_list.append(x)
temp_list.reverse()
result = ""
for y in temp_list:
result += y
print("=============================")
print("Your result is",result)
print("=============================")
elif o_base == 7:
number = int(input("Enter the Decimal number: "))
temp = number
string = ""
temp_list = []
while temp > 0:
string += str(temp%7)
temp = temp // 7
for x in string:
temp_list.append(x)
temp_list.reverse()
result = ""
for y in temp_list:
result += y
print("=============================")
print("Your result is",result)
print("=============================")
elif o_base == 9:
number = int(input("Enter the Decimal number: "))
temp = number
string = ""
temp_list = []
while temp > 0:
string += str(temp%9)
temp = temp // 9
for x in string:
temp_list.append(x)
temp_list.reverse()
result = ""
for y in temp_list:
result += y
print("=============================")
print("Your result is",result)
print("=============================")
elif o_base == 11:
number = int(input("Enter the Decimal number: "))
temp = number
string = ""
temp_list = []
while temp > 0:
temp2 = temp%11
if temp2 == 10:
string += "A"
else:
string += str(temp%11)
temp = temp // 11
for x in string:
temp_list.append(x)
temp_list.reverse()
result = ""
for y in temp_list:
result += y
print("=============================")
print("Your result is",result)
print("=============================")
elif o_base == 12:
number = int(input("Enter the Decimal number: "))
temp = number
string = ""
temp_list = []
while temp > 0:
temp2 = temp%12
if temp2 == 10:
string += "A"
elif temp2 == 11:
string += "B"
else:
string += str(temp%12)
temp = temp // 12
for x in string:
temp_list.append(x)
temp_list.reverse()
result = ""
for y in temp_list:
result += y
print("=============================")
print("Your result is",result)
print("=============================")
elif o_base == 13:
number = int(input("Enter the Decimal number: "))
temp = number
string = ""
temp_list = []
while temp > 0:
temp2 = temp%13
if temp2 == 10:
string += "A"
elif temp2 == 11:
string += "B"
elif temp2 == 12:
string += "C"
else:
string += str(temp%13)
temp = temp // 13
for x in string:
temp_list.append(x)
temp_list.reverse()
result = ""
for y in temp_list:
result += y
print("=============================")
print("Your result is",result)
print("=============================")
elif o_base == 14:
number = int(input("Enter the Decimal number: "))
temp = number
string = ""
temp_list = []
while temp > 0:
temp2 = temp%14
if temp2 == 10:
string += "A"
elif temp2 == 11:
string += "B"
elif temp2 == 12:
string += "C"
elif temp2 == 13:
string += "D"
else:
string += str(temp%14)
temp = temp // 14
for x in string:
temp_list.append(x)
temp_list.reverse()
result = ""
for y in temp_list:
result += y
print("=============================")
print("Your result is",result)
print("=============================")
elif o_base == 15:
number = int(input("Enter the Decimal number: "))
temp = number
string = ""
temp_list = []
while temp > 0:
temp2 = temp%15
if temp2 == 10:
string += "A"
elif temp2 == 11:
string += "B"
elif temp2 == 12:
string += "C"
elif temp2 == 13:
string += "D"
elif temp2 == 14:
string += "E"
else:
string += str(temp%15)
temp = temp // 15
for x in string:
temp_list.append(x)
temp_list.reverse()
result = ""
for y in temp_list:
result += y
print("=============================")
print("Your result is",result)
print("=============================")
elif o_base == 17:
number = int(input("Enter the Decimal number: "))
temp = number
string = ""
temp_list = []
while temp > 0:
temp2 = temp%17
if temp2 == 10:
string += "A"
elif temp2 == 11:
string += "B"
elif temp2 == 12:
string += "C"
elif temp2 == 13:
string += "D"
elif temp2 == 14:
string += "E"
elif temp2 == 15:
string += "F"
elif temp2 == 16:
string += "G"
else:
string += str(temp%17)
temp = temp // 17
for x in string:
temp_list.append(x)
temp_list.reverse()
result = ""
for y in temp_list:
result += y
print("=============================")
print("Your result is",result)
print("=============================")
elif o_base == 18:
number = int(input("Enter the Decimal number: "))
temp = number
string = ""
temp_list = []
while temp > 0:
temp2 = temp%18
if temp2 == 10:
string += "A"
elif temp2 == 11:
string += "B"
elif temp2 == 12:
string += "C"
elif temp2 == 13:
string += "D"
elif temp2 == 14:
string += "E"
elif temp2 == 15:
string += "F"
elif temp2 == 16:
string += "G"
elif temp2 == 17:
string += "H"
else:
string += str(temp%18)
temp = temp // 18
for x in string:
temp_list.append(x)
temp_list.reverse()
result = ""
for y in temp_list:
result += y
print("=============================")
print("Your result is",result)
print("=============================")
elif o_base == 19:
number = int(input("Enter the Decimal number: "))
temp = number
string = ""
temp_list = []
while temp > 0:
temp2 = temp%19
if temp2 == 10:
string += "A"
elif temp2 == 11:
string += "B"
elif temp2 == 12:
string += "C"
elif temp2 == 13:
string += "D"
elif temp2 == 14:
string += "E"
elif temp2 == 15:
string += "F"
elif temp2 == 16:
string += "G"
elif temp2 == 17:
string += "H"
elif temp2 == 18:
string += "I"
else:
string += str(temp%19)
temp = temp // 19
for x in string:
temp_list.append(x)
temp_list.reverse()
result = ""
for y in temp_list:
result += y
print("=============================")
print("Your result is",result)
print("=============================")
elif o_base == 20:
number = int(input("Enter the Decimal number: "))
temp = number
string = ""
temp_list = []
while temp > 0:
temp2 = temp%20
if temp2 == 10:
string += "A"
elif temp2 == 11:
string += "B"
elif temp2 == 12:
string += "C"
elif temp2 == 13:
string += "D"
elif temp2 == 14:
string += "E"
elif temp2 == 15:
string += "F"
elif temp2 == 16:
string += "G"
elif temp2 == 17:
string += "H"
elif temp2 == 18:
string += "I"
elif temp2 == 19:
string += "J"
else:
string += str(temp%20)
temp = temp // 20
for x in string:
temp_list.append(x)
temp_list.reverse()
result = ""
for y in temp_list:
result += y
print("=============================")
print("Your result is",result)
print("=============================")
elif o_base == 21:
number = int(input("Enter the Decimal number: "))
temp = number
string = ""
temp_list = []
while temp > 0:
temp2 = temp%21
if temp2 == 10:
string += "A"
elif temp2 == 11:
string += "B"
elif temp2 == 12:
string += "C"
elif temp2 == 13:
string += "D"
elif temp2 == 14:
string += "E"
elif temp2 == 15:
string += "F"
elif temp2 == 16:
string += "G"
elif temp2 == 17:
string += "H"
elif temp2 == 18:
string += "I"
elif temp2 == 19:
string += "J"
elif temp2 == 20:
string += "K"
else:
string += str(temp%21)
temp = temp // 21
for x in string:
temp_list.append(x)
temp_list.reverse()
result = ""
for y in temp_list:
result += y
print("=============================")
print("Your result is",result)
print("=============================")
elif o_base == 22:
number = int(input("Enter the Decimal number: "))
temp = number
string = ""
temp_list = []
while temp > 0:
temp2 = temp%22
if temp2 == 10:
string += "A"
elif temp2 == 11:
string += "B"
elif temp2 == 12:
string += "C"
elif temp2 == 13:
string += "D"
elif temp2 == 14:
string += "E"
elif temp2 == 15:
string += "F"
elif temp2 == 16:
string += "G"
elif temp2 == 17:
string += "H"
elif temp2 == 18:
string += "I"
elif temp2 == 19:
string += "J"
elif temp2 == 20:
string += "K"
elif temp2 == 21:
string += "L"
else:
string += str(temp%22)
temp = temp // 22
for x in string:
temp_list.append(x)
temp_list.reverse()
result = ""
for y in temp_list:
result += y
print("=============================")
print("Your result is",result)
print("=============================")
elif o_base == 23:
number = int(input("Enter the Decimal number: "))
temp = number
string = ""
temp_list = []
while temp > 0:
temp2 = temp%23
if temp2 == 10:
string += "A"
elif temp2 == 11:
string += "B"
elif temp2 == 12:
string += "C"
elif temp2 == 13:
string += "D"
elif temp2 == 14:
string += "E"
elif temp2 == 15:
string += "F"
elif temp2 == 16:
string += "G"
elif temp2 == 17:
string += "H"
elif temp2 == 18:
string += "I"
elif temp2 == 19:
string += "J"
elif temp2 == 20:
string += "K"
elif temp2 == 21:
string += "L"
elif temp2 == 22:
string += "M"
else:
string += str(temp%23)
temp = temp // 23
for x in string:
temp_list.append(x)
temp_list.reverse()
result = ""
for y in temp_list:
result += y
print("=============================")
print("Your result is",result)
print("=============================")
elif o_base == 24:
number = int(input("Enter the Decimal number: "))
temp = number
string = ""
temp_list = []
while temp > 0:
temp2 = temp%24
if temp2 == 10:
string += "A"
elif temp2 == 11:
string += "B"
elif temp2 == 12:
string += "C"
elif temp2 == 13:
string += "D"
elif temp2 == 14:
string += "E"
elif temp2 == 15:
string += "F"
elif temp2 == 16:
string += "G"
elif temp2 == 17:
string += "H"
elif temp2 == 18:
string += "I"
elif temp2 == 19:
string += "J"
elif temp2 == 20:
string += "K"
elif temp2 == 21:
string += "L"
elif temp2 == 22:
string += "M"
elif temp2 == 23:
string += "N"
else:
string += str(temp%24)
temp = temp // 24
for x in string:
temp_list.append(x)
temp_list.reverse()
result = ""
for y in temp_list:
result += y
print("=============================")
print("Your result is",result)
print("=============================")
elif o_base == 25:
number = int(input("Enter the Decimal number: "))
temp = number
string = ""
temp_list = []
while temp > 0:
temp2 = temp%25
if temp2 == 10:
string += "A"
elif temp2 == 11:
string += "B"
elif temp2 == 12:
string += "C"
elif temp2 == 13:
string += "D"
elif temp2 == 14:
string += "E"
elif temp2 == 15:
string += "F"
elif temp2 == 16:
string += "G"
elif temp2 == 17:
string += "H"
elif temp2 == 18:
string += "I"
elif temp2 == 19:
string += "J"
elif temp2 == 20:
string += "K"
elif temp2 == 21:
string += "L"
elif temp2 == 22:
string += "M"
elif temp2 == 23:
string += "N"
elif temp2 == 24:
string += "O"
else:
string += str(temp%25)
temp = temp // 25
for x in string:
temp_list.append(x)
temp_list.reverse()
result = ""
for y in temp_list:
result += y
print("=============================")
print("Your result is",result)
print("=============================")
elif o_base == 26:
number = int(input("Enter the Decimal number: "))
temp = number
string = ""
temp_list = []
while temp > 0:
temp2 = temp%26
if temp2 == 10:
string += "A"
elif temp2 == 11:
string += "B"
elif temp2 == 12:
string += "C"
elif temp2 == 13:
string += "D"
elif temp2 == 14:
string += "E"
elif temp2 == 15:
string += "F"
elif temp2 == 16:
string += "G"
elif temp2 == 17:
string += "H"
elif temp2 == 18:
string += "I"
elif temp2 == 19:
string += "J"
elif temp2 == 20:
string += "K"
elif temp2 == 21:
string += "L"
elif temp2 == 22:
string += "M"
elif temp2 == 23:
string += "N"
elif temp2 == 24:
string += "O"
elif temp2 == 25:
string += "P"
else:
string += str(temp%26)
temp = temp // 26
for x in string:
temp_list.append(x)
temp_list.reverse()
result = ""
for y in temp_list:
result += y
print("=============================")
print("Your result is",result)
print("=============================")
elif o_base == 27:
number = int(input("Enter the Decimal number: "))
temp = number
string = ""
temp_list = []
while temp > 0:
temp2 = temp%27
if temp2 == 10:
string += "A"
elif temp2 == 11:
string += "B"
elif temp2 == 12:
string += "C"
elif temp2 == 13:
string += "D"
elif temp2 == 14:
string += "E"
elif temp2 == 15:
string += "F"
elif temp2 == 16:
string += "G"
elif temp2 == 17:
string += "H"
elif temp2 == 18:
string += "I"
elif temp2 == 19:
string += "J"
elif temp2 == 20:
string += "K"
elif temp2 == 21:
string += "L"
elif temp2 == 22:
string += "M"
elif temp2 == 23:
string += "N"
elif temp2 == 24:
string += "O"
elif temp2 == 25:
string += "P"
elif temp2 == 26:
string += "Q"
else:
string += str(temp%27)
temp = temp // 27
for x in string:
temp_list.append(x)
temp_list.reverse()
result = ""
for y in temp_list:
result += y
print("=============================")
print("Your result is",result)
print("=============================")
elif o_base == 28:
number = int(input("Enter the Decimal number: "))
temp = number
string = ""
temp_list = []
while temp > 0:
temp2 = temp%28
if temp2 == 10:
string += "A"
elif temp2 == 11:
string += "B"
elif temp2 == 12:
string += "C"
elif temp2 == 13:
string += "D"
elif temp2 == 14:
string += "E"
elif temp2 == 15:
string += "F"
elif temp2 == 16:
string += "G"
elif temp2 == 17:
string += "H"
elif temp2 == 18:
string += "I"
elif temp2 == 19:
string += "J"
elif temp2 == 20:
string += "K"
elif temp2 == 21:
string += "L"
elif temp2 == 22:
string += "M"
elif temp2 == 23:
string += "N"
elif temp2 == 24:
string += "O"
elif temp2 == 25:
string += "P"
elif temp2 == 26:
string += "Q"
elif temp2 == 27:
string += "R"
else:
string += str(temp%28)
temp = temp // 28
for x in string:
temp_list.append(x)
temp_list.reverse()
result = ""
for y in temp_list:
result += y
print("=============================")
print("Your result is",result)
print("=============================")
elif o_base == 29:
number = int(input("Enter the Decimal number: "))
temp = number
string = ""
temp_list = []
while temp > 0:
temp2 = temp%29
if temp2 == 10:
string += "A"
elif temp2 == 11:
string += "B"
elif temp2 == 12:
string += "C"
elif temp2 == 13:
string += "D"
elif temp2 == 14:
string += "E"
elif temp2 == 15:
string += "F"
elif temp2 == 16:
string += "G"
elif temp2 == 17:
string += "H"
elif temp2 == 18:
string += "I"
elif temp2 == 19:
string += "J"
elif temp2 == 20:
string += "K"
elif temp2 == 21:
string += "L"
elif temp2 == 22:
string += "M"
elif temp2 == 23:
string += "N"
elif temp2 == 24:
string += "O"
elif temp2 == 25:
string += "P"
elif temp2 == 26:
string += "Q"
elif temp2 == 27:
string += "R"
elif temp2 == 28:
string += "S"
else:
string += str(temp%29)
temp = temp // 29
for x in string:
temp_list.append(x)
temp_list.reverse()
result = ""
for y in temp_list:
result += y
print("=============================")
print("Your result is",result)
print("=============================")
elif o_base == 30:
number = int(input("Enter the Decimal number: "))
temp = number
string = ""
temp_list = []
while temp > 0:
temp2 = temp%30
if temp2 == 10:
string += "A"
elif temp2 == 11:
string += "B"
elif temp2 == 12:
string += "C"
elif temp2 == 13:
string += "D"
elif temp2 == 14:
string += "E"
elif temp2 == 15:
string += "F"
elif temp2 == 16:
string += "G"
elif temp2 == 17:
string += "H"
elif temp2 == 18:
string += "I"
elif temp2 == 19:
string += "J"
elif temp2 == 20:
string += "K"
elif temp2 == 21:
string += "L"
elif temp2 == 22:
string += "M"
elif temp2 == 23:
string += "N"
elif temp2 == 24:
string += "O"
elif temp2 == 25:
string += "P"
elif temp2 == 26:
string += "Q"
elif temp2 == 27:
string += "R"
elif temp2 == 28:
string += "S"
elif temp2 == 29:
string += "T"
else:
string += str(temp%30)
temp = temp // 30
for x in string:
temp_list.append(x)
temp_list.reverse()
result = ""
for y in temp_list:
result += y
print("=============================")
print("Your result is",result)
print("=============================")
elif o_base == 31:
number = int(input("Enter the Decimal number: "))
temp = number
string = ""
temp_list = []
while temp > 0:
temp2 = temp%31
if temp2 == 10:
string += "A"
elif temp2 == 11:
string += "B"
elif temp2 == 12:
string += "C"
elif temp2 == 13:
string += "D"
elif temp2 == 14:
string += "E"
elif temp2 == 15:
string += "F"
elif temp2 == 16:
string += "G"
elif temp2 == 17:
string += "H"
elif temp2 == 18:
string += "I"
elif temp2 == 19:
string += "J"
elif temp2 == 20:
string += "K"
elif temp2 == 21:
string += "L"
elif temp2 == 22:
string += "M"
elif temp2 == 23:
string += "N"
elif temp2 == 24:
string += "O"
elif temp2 == 25:
string += "P"
elif temp2 == 26:
string += "Q"
elif temp2 == 27:
string += "R"
elif temp2 == 28:
string += "S"
elif temp2 == 29:
string += "T"
elif temp2 == 30:
string += "U"
else:
string += str(temp%31)
temp = temp // 31
for x in string:
temp_list.append(x)
temp_list.reverse()
result = ""
for y in temp_list:
result += y
print("=============================")
print("Your result is",result)
print("=============================")
elif o_base == 32:
number = int(input("Enter the Decimal number: "))
temp = number
string = ""
temp_list = []
while temp > 0:
temp2 = temp%32
if temp2 == 10:
string += "A"
elif temp2 == 11:
string += "B"
elif temp2 == 12:
string += "C"
elif temp2 == 13:
string += "D"
elif temp2 == 14:
string += "E"
elif temp2 == 15:
string += "F"
elif temp2 == 16:
string += "G"
elif temp2 == 17:
string += "H"
elif temp2 == 18:
string += "I"
elif temp2 == 19:
string += "J"
elif temp2 == 20:
string += "K"
elif temp2 == 21:
string += "L"
elif temp2 == 22:
string += "M"
elif temp2 == 23:
string += "N"
elif temp2 == 24:
string += "O"
elif temp2 == 25:
string += "P"
elif temp2 == 26:
string += "Q"
elif temp2 == 27:
string += "R"
elif temp2 == 28:
string += "S"
elif temp2 == 29:
string += "T"
elif temp2 == 30:
string += "U"
elif temp2 == 31:
string += "V"
else:
string += str(temp%32)
temp = temp // 32
for x in string:
temp_list.append(x)
temp_list.reverse()
result = ""
for y in temp_list:
result += y
print("=============================")
print("Your result is",result)
print("=============================")
elif o_base == 33:
number = int(input("Enter the Decimal number: "))
temp = number
string = ""
temp_list = []
while temp > 0:
temp2 = temp%33
if temp2 == 10:
string += "A"
elif temp2 == 11:
string += "B"
elif temp2 == 12:
string += "C"
elif temp2 == 13:
string += "D"
elif temp2 == 14:
string += "E"
elif temp2 == 15:
string += "F"
elif temp2 == 16:
string += "G"
elif temp2 == 17:
string += "H"
elif temp2 == 18:
string += "I"
elif temp2 == 19:
string += "J"
elif temp2 == 20:
string += "K"
elif temp2 == 21:
string += "L"
elif temp2 == 22:
string += "M"
elif temp2 == 23:
string += "N"
elif temp2 == 24:
string += "O"
elif temp2 == 25:
string += "P"
elif temp2 == 26:
string += "Q"
elif temp2 == 27:
string += "R"
elif temp2 == 28:
string += "S"
elif temp2 == 29:
string += "T"
elif temp2 == 30:
string += "U"
elif temp2 == 31:
string += "V"
elif temp2 == 32:
string += "W"
else:
string += str(temp%33)
temp = temp // 33
for x in string:
temp_list.append(x)
temp_list.reverse()
result = ""
for y in temp_list:
result += y
print("=============================")
print("Your result is",result)
print("=============================")
elif o_base == 34:
number = int(input("Enter the Decimal number: "))
temp = number
string = ""
temp_list = []
while temp > 0:
temp2 = temp%34
if temp2 == 10:
string += "A"
elif temp2 == 11:
string += "B"
elif temp2 == 12:
string += "C"
elif temp2 == 13:
string += "D"
elif temp2 == 14:
string += "E"
elif temp2 == 15:
string += "F"
elif temp2 == 16:
string += "G"
elif temp2 == 17:
string += "H"
elif temp2 == 18:
string += "I"
elif temp2 == 19:
string += "J"
elif temp2 == 20:
string += "K"
elif temp2 == 21:
string += "L"
elif temp2 == 22:
string += "M"
elif temp2 == 23:
string += "N"
elif temp2 == 24:
string += "O"
elif temp2 == 25:
string += "P"
elif temp2 == 26:
string += "Q"
elif temp2 == 27:
string += "R"
elif temp2 == 28:
string += "S"
elif temp2 == 29:
string += "T"
elif temp2 == 30:
string += "U"
elif temp2 == 31:
string += "V"
elif temp2 == 32:
string += "W"
elif temp2 == 33:
string += "X"
else:
string += str(temp%34)
temp = temp // 34
for x in string:
temp_list.append(x)
temp_list.reverse()
result = ""
for y in temp_list:
result += y
print("=============================")
print("Your result is",result)
print("=============================")
elif o_base == 35:
number = int(input("Enter the Decimal number: "))
temp = number
string = ""
temp_list = []
while temp > 0:
temp2 = temp%35
if temp2 == 10:
string += "A"
elif temp2 == 11:
string += "B"
elif temp2 == 12:
string += "C"
elif temp2 == 13:
string += "D"
elif temp2 == 14:
string += "E"
elif temp2 == 15:
string += "F"
elif temp2 == 16:
string += "G"
elif temp2 == 17:
string += "H"
elif temp2 == 18:
string += "I"
elif temp2 == 19:
string += "J"
elif temp2 == 20:
string += "K"
elif temp2 == 21:
string += "L"
elif temp2 == 22:
string += "M"
elif temp2 == 23:
string += "N"
elif temp2 == 24:
string += "O"
elif temp2 == 25:
string += "P"
elif temp2 == 26:
string += "Q"
elif temp2 == 27:
string += "R"
elif temp2 == 28:
string += "S"
elif temp2 == 29:
string += "T"
elif temp2 == 30:
string += "U"
elif temp2 == 31:
string += "V"
elif temp2 == 32:
string += "W"
elif temp2 == 33:
string += "X"
elif temp2 == 34:
string += "Y"
else:
string += str(temp%35)
temp = temp // 35
for x in string:
temp_list.append(x)
temp_list.reverse()
result = ""
for y in temp_list:
result += y
print("=============================")
print("Your result is",result)
print("=============================")
elif o_base == 36:
number = int(input("Enter the Decimal number: "))
temp = number
string = ""
temp_list = []
while temp > 0:
temp2 = temp%36
if temp2 == 10:
string += "A"
elif temp2 == 11:
string += "B"
elif temp2 == 12:
string += "C"
elif temp2 == 13:
string += "D"
elif temp2 == 14:
string += "E"
elif temp2 == 15:
string += "F"
elif temp2 == 16:
string += "G"
elif temp2 == 17:
string += "H"
elif temp2 == 18:
string += "I"
elif temp2 == 19:
string += "J"
elif temp2 == 20:
string += "K"
elif temp2 == 21:
string += "L"
elif temp2 == 22:
string += "M"
elif temp2 == 23:
string += "N"
elif temp2 == 24:
string += "O"
elif temp2 == 25:
string += "P"
elif temp2 == 26:
string += "Q"
elif temp2 == 27:
string += "R"
elif temp2 == 28:
string += "S"
elif temp2 == 29:
string += "T"
elif temp2 == 30:
string += "U"
elif temp2 == 31:
string += "V"
elif temp2 == 32:
string += "W"
elif temp2 == 33:
string += "X"
elif temp2 == 34:
string += "Y"
elif temp2 == 35:
string += "Z"
else:
string += str(temp%36)
temp = temp // 36
for x in string:
temp_list.append(x)
temp_list.reverse()
result = ""
for y in temp_list:
result += y
print("=============================")
print("Your result is",result)
print("=============================")
else:
break
inp = input("Press Enter to close...")
| true
| true
|
1c46c40e2bfd9e44bd757c0752d89f57ed80ef32
| 9,575
|
py
|
Python
|
env/lib/python3.8/site-packages/plotly/graph_objs/scattergl/marker/colorbar/_tickformatstop.py
|
acrucetta/Chicago_COVI_WebApp
|
a37c9f492a20dcd625f8647067394617988de913
|
[
"MIT",
"Unlicense"
] | 11,750
|
2015-10-12T07:03:39.000Z
|
2022-03-31T20:43:15.000Z
|
env/lib/python3.8/site-packages/plotly/graph_objs/scattergl/marker/colorbar/_tickformatstop.py
|
acrucetta/Chicago_COVI_WebApp
|
a37c9f492a20dcd625f8647067394617988de913
|
[
"MIT",
"Unlicense"
] | 2,951
|
2015-10-12T00:41:25.000Z
|
2022-03-31T22:19:26.000Z
|
env/lib/python3.8/site-packages/plotly/graph_objs/scattergl/marker/colorbar/_tickformatstop.py
|
acrucetta/Chicago_COVI_WebApp
|
a37c9f492a20dcd625f8647067394617988de913
|
[
"MIT",
"Unlicense"
] | 2,623
|
2015-10-15T14:40:27.000Z
|
2022-03-28T16:05:50.000Z
|
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Tickformatstop(_BaseTraceHierarchyType):
# class properties
# --------------------
_parent_path_str = "scattergl.marker.colorbar"
_path_str = "scattergl.marker.colorbar.tickformatstop"
_valid_props = {"dtickrange", "enabled", "name", "templateitemname", "value"}
# dtickrange
# ----------
@property
def dtickrange(self):
"""
range [*min*, *max*], where "min", "max" - dtick values which
describe some zoom level, it is possible to omit "min" or "max"
value by passing "null"
The 'dtickrange' property is an info array that may be specified as:
* a list or tuple of 2 elements where:
(0) The 'dtickrange[0]' property accepts values of any type
(1) The 'dtickrange[1]' property accepts values of any type
Returns
-------
list
"""
return self["dtickrange"]
@dtickrange.setter
def dtickrange(self, val):
self["dtickrange"] = val
# enabled
# -------
@property
def enabled(self):
"""
Determines whether or not this stop is used. If `false`, this
stop is ignored even within its `dtickrange`.
The 'enabled' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["enabled"]
@enabled.setter
def enabled(self, val):
self["enabled"] = val
# name
# ----
@property
def name(self):
"""
When used in a template, named items are created in the output
figure in addition to any items the figure already has in this
array. You can modify these items in the output figure by
making your own item with `templateitemname` matching this
`name` alongside your modifications (including `visible: false`
or `enabled: false` to hide it). Has no effect outside of a
template.
The 'name' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["name"]
@name.setter
def name(self, val):
self["name"] = val
# templateitemname
# ----------------
@property
def templateitemname(self):
"""
Used to refer to a named item in this array in the template.
Named items from the template will be created even without a
matching item in the input figure, but you can modify one by
making an item with `templateitemname` matching its `name`,
alongside your modifications (including `visible: false` or
`enabled: false` to hide it). If there is no template or no
matching item, this item will be hidden unless you explicitly
show it with `visible: true`.
The 'templateitemname' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["templateitemname"]
@templateitemname.setter
def templateitemname(self, val):
self["templateitemname"] = val
# value
# -----
@property
def value(self):
"""
string - dtickformat for described zoom level, the same as
"tickformat"
The 'value' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["value"]
@value.setter
def value(self, val):
self["value"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
dtickrange
range [*min*, *max*], where "min", "max" - dtick values
which describe some zoom level, it is possible to omit
"min" or "max" value by passing "null"
enabled
Determines whether or not this stop is used. If
`false`, this stop is ignored even within its
`dtickrange`.
name
When used in a template, named items are created in the
output figure in addition to any items the figure
already has in this array. You can modify these items
in the output figure by making your own item with
`templateitemname` matching this `name` alongside your
modifications (including `visible: false` or `enabled:
false` to hide it). Has no effect outside of a
template.
templateitemname
Used to refer to a named item in this array in the
template. Named items from the template will be created
even without a matching item in the input figure, but
you can modify one by making an item with
`templateitemname` matching its `name`, alongside your
modifications (including `visible: false` or `enabled:
false` to hide it). If there is no template or no
matching item, this item will be hidden unless you
explicitly show it with `visible: true`.
value
string - dtickformat for described zoom level, the same
as "tickformat"
"""
def __init__(
self,
arg=None,
dtickrange=None,
enabled=None,
name=None,
templateitemname=None,
value=None,
**kwargs
):
"""
Construct a new Tickformatstop object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of :class:`plotly.graph_objs.scattergl.mark
er.colorbar.Tickformatstop`
dtickrange
range [*min*, *max*], where "min", "max" - dtick values
which describe some zoom level, it is possible to omit
"min" or "max" value by passing "null"
enabled
Determines whether or not this stop is used. If
`false`, this stop is ignored even within its
`dtickrange`.
name
When used in a template, named items are created in the
output figure in addition to any items the figure
already has in this array. You can modify these items
in the output figure by making your own item with
`templateitemname` matching this `name` alongside your
modifications (including `visible: false` or `enabled:
false` to hide it). Has no effect outside of a
template.
templateitemname
Used to refer to a named item in this array in the
template. Named items from the template will be created
even without a matching item in the input figure, but
you can modify one by making an item with
`templateitemname` matching its `name`, alongside your
modifications (including `visible: false` or `enabled:
false` to hide it). If there is no template or no
matching item, this item will be hidden unless you
explicitly show it with `visible: true`.
value
string - dtickformat for described zoom level, the same
as "tickformat"
Returns
-------
Tickformatstop
"""
super(Tickformatstop, self).__init__("tickformatstops")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.scattergl.marker.colorbar.Tickformatstop
constructor must be a dict or
an instance of :class:`plotly.graph_objs.scattergl.marker.colorbar.Tickformatstop`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("dtickrange", None)
_v = dtickrange if dtickrange is not None else _v
if _v is not None:
self["dtickrange"] = _v
_v = arg.pop("enabled", None)
_v = enabled if enabled is not None else _v
if _v is not None:
self["enabled"] = _v
_v = arg.pop("name", None)
_v = name if name is not None else _v
if _v is not None:
self["name"] = _v
_v = arg.pop("templateitemname", None)
_v = templateitemname if templateitemname is not None else _v
if _v is not None:
self["templateitemname"] = _v
_v = arg.pop("value", None)
_v = value if value is not None else _v
if _v is not None:
self["value"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
| 33.714789
| 85
| 0.571488
|
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Tickformatstop(_BaseTraceHierarchyType):
_parent_path_str = "scattergl.marker.colorbar"
_path_str = "scattergl.marker.colorbar.tickformatstop"
_valid_props = {"dtickrange", "enabled", "name", "templateitemname", "value"}
@property
def dtickrange(self):
return self["dtickrange"]
@dtickrange.setter
def dtickrange(self, val):
self["dtickrange"] = val
@property
def enabled(self):
return self["enabled"]
@enabled.setter
def enabled(self, val):
self["enabled"] = val
@property
def name(self):
return self["name"]
@name.setter
def name(self, val):
self["name"] = val
@property
def templateitemname(self):
return self["templateitemname"]
@templateitemname.setter
def templateitemname(self, val):
self["templateitemname"] = val
@property
def value(self):
return self["value"]
@value.setter
def value(self, val):
self["value"] = val
@property
def _prop_descriptions(self):
return """\
dtickrange
range [*min*, *max*], where "min", "max" - dtick values
which describe some zoom level, it is possible to omit
"min" or "max" value by passing "null"
enabled
Determines whether or not this stop is used. If
`false`, this stop is ignored even within its
`dtickrange`.
name
When used in a template, named items are created in the
output figure in addition to any items the figure
already has in this array. You can modify these items
in the output figure by making your own item with
`templateitemname` matching this `name` alongside your
modifications (including `visible: false` or `enabled:
false` to hide it). Has no effect outside of a
template.
templateitemname
Used to refer to a named item in this array in the
template. Named items from the template will be created
even without a matching item in the input figure, but
you can modify one by making an item with
`templateitemname` matching its `name`, alongside your
modifications (including `visible: false` or `enabled:
false` to hide it). If there is no template or no
matching item, this item will be hidden unless you
explicitly show it with `visible: true`.
value
string - dtickformat for described zoom level, the same
as "tickformat"
"""
def __init__(
self,
arg=None,
dtickrange=None,
enabled=None,
name=None,
templateitemname=None,
value=None,
**kwargs
):
super(Tickformatstop, self).__init__("tickformatstops")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.scattergl.marker.colorbar.Tickformatstop
constructor must be a dict or
an instance of :class:`plotly.graph_objs.scattergl.marker.colorbar.Tickformatstop`"""
)
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
_v = arg.pop("dtickrange", None)
_v = dtickrange if dtickrange is not None else _v
if _v is not None:
self["dtickrange"] = _v
_v = arg.pop("enabled", None)
_v = enabled if enabled is not None else _v
if _v is not None:
self["enabled"] = _v
_v = arg.pop("name", None)
_v = name if name is not None else _v
if _v is not None:
self["name"] = _v
_v = arg.pop("templateitemname", None)
_v = templateitemname if templateitemname is not None else _v
if _v is not None:
self["templateitemname"] = _v
_v = arg.pop("value", None)
_v = value if value is not None else _v
if _v is not None:
self["value"] = _v
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| true
| true
|
1c46c46c1f4c709d35888c5eb3d047bbc9d4d31c
| 351
|
py
|
Python
|
src/code-challenges/codewars/5KYU/productFib/test_productFib.py
|
maltewirz/code-challenges
|
97777b10963f19bc587ddd984f0526b221c081f8
|
[
"MIT"
] | 1
|
2020-08-30T07:52:20.000Z
|
2020-08-30T07:52:20.000Z
|
src/code-challenges/codewars/5KYU/productFib/test_productFib.py
|
maltewirz/code-challenges
|
97777b10963f19bc587ddd984f0526b221c081f8
|
[
"MIT"
] | 6
|
2020-08-12T07:05:04.000Z
|
2021-08-23T06:10:10.000Z
|
src/code-challenges/codewars/5KYU/productFib/test_productFib.py
|
maltewirz/code-challenges
|
97777b10963f19bc587ddd984f0526b221c081f8
|
[
"MIT"
] | null | null | null |
from productFib import productFib
import unittest
class Test(unittest.TestCase):
def test_1(self):
result = productFib(4895)
self.assertEqual(result, [55, 89, True])
# def test_2(self):
# result = productFib(5895)
# self.assertEqual(result, [89, 144, False])
if __name__ == "__main__":
unittest.main()
| 20.647059
| 52
| 0.641026
|
from productFib import productFib
import unittest
class Test(unittest.TestCase):
def test_1(self):
result = productFib(4895)
self.assertEqual(result, [55, 89, True])
if __name__ == "__main__":
unittest.main()
| true
| true
|
1c46c4949b4efa2afa8ed0d4db1bfe2610a1a4ad
| 622
|
py
|
Python
|
generateFileList.py
|
mrzhu666/USCL
|
8a4741046ef8f337b1e9439d1575db670a11355c
|
[
"MIT"
] | null | null | null |
generateFileList.py
|
mrzhu666/USCL
|
8a4741046ef8f337b1e9439d1575db670a11355c
|
[
"MIT"
] | null | null | null |
generateFileList.py
|
mrzhu666/USCL
|
8a4741046ef8f337b1e9439d1575db670a11355c
|
[
"MIT"
] | null | null | null |
import cv2
import os
import pickle
from numpy.core.fromnumeric import shape
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from tqdm import tqdm
from typing import Tuple
from collections import defaultdict
from sklearn.model_selection import train_test_split
from IgAModel66.setting import config
# 添加文件名单到 result/csv里
files=os.listdir(config['server_path']+'IgAModel/test/M0/')
files.extend(os.listdir(config['server_path']+'IgAModel/test/M1/') )
eval_All=pd.read_csv('result/eval_All_0.73.csv',header=0)
eval_All['file']=files
eval_All.to_csv('result/eval_All_0.73_file.csv',index=False)
| 23.037037
| 68
| 0.803859
|
import cv2
import os
import pickle
from numpy.core.fromnumeric import shape
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from tqdm import tqdm
from typing import Tuple
from collections import defaultdict
from sklearn.model_selection import train_test_split
from IgAModel66.setting import config
files=os.listdir(config['server_path']+'IgAModel/test/M0/')
files.extend(os.listdir(config['server_path']+'IgAModel/test/M1/') )
eval_All=pd.read_csv('result/eval_All_0.73.csv',header=0)
eval_All['file']=files
eval_All.to_csv('result/eval_All_0.73_file.csv',index=False)
| true
| true
|
1c46c54cd215d2279abe7d5e268fcf2822b63cd3
| 903
|
py
|
Python
|
ultimatepython/data_structures/dict.py
|
Benczus/ultimate-python
|
2bcc8233af7b21388b587812d3e5124189b8cdec
|
[
"MIT"
] | 1
|
2020-09-07T12:50:18.000Z
|
2020-09-07T12:50:18.000Z
|
ultimatepython/data_structures/dict.py
|
Benczus/ultimate-python
|
2bcc8233af7b21388b587812d3e5124189b8cdec
|
[
"MIT"
] | null | null | null |
ultimatepython/data_structures/dict.py
|
Benczus/ultimate-python
|
2bcc8233af7b21388b587812d3e5124189b8cdec
|
[
"MIT"
] | null | null | null |
def main():
# Let's create a dictionary with student keys and GPA values
student_gpa = {"john": 3.5,
"jane": 4.0,
"bob": 2.8,
"mary": 3.2}
# There are four student records in this dictionary
assert len(student_gpa) == 4
# Each student has a name key and a GPA value
assert len(student_gpa.keys()) == len(student_gpa.values())
# We can get the names in isolation
for student in student_gpa.keys():
assert len(student) > 2
# We can get the GPAs in isolation
for gpa in student_gpa.values():
assert gpa > 2.0
# We can get the GPA for a specific student
assert student_gpa["john"] == 3.5
# We can access the student and GPA simultaneously
for student, gpa in student_gpa.items():
print(f"Student {student} has a {gpa} GPA")
if __name__ == "__main__":
main()
| 28.21875
| 64
| 0.601329
|
def main():
student_gpa = {"john": 3.5,
"jane": 4.0,
"bob": 2.8,
"mary": 3.2}
# There are four student records in this dictionary
assert len(student_gpa) == 4
# Each student has a name key and a GPA value
assert len(student_gpa.keys()) == len(student_gpa.values())
# We can get the names in isolation
for student in student_gpa.keys():
assert len(student) > 2
# We can get the GPAs in isolation
for gpa in student_gpa.values():
assert gpa > 2.0
# We can get the GPA for a specific student
assert student_gpa["john"] == 3.5
# We can access the student and GPA simultaneously
for student, gpa in student_gpa.items():
print(f"Student {student} has a {gpa} GPA")
if __name__ == "__main__":
main()
| true
| true
|
1c46c6c3ff147c8e547e3aaf58bce039d6e667a5
| 5,134
|
py
|
Python
|
SCons/Scanner/DirTests.py
|
jcassagnol-public/scons
|
8eaf585a893757e68c9e4a6e25d375021fa5eab7
|
[
"MIT"
] | null | null | null |
SCons/Scanner/DirTests.py
|
jcassagnol-public/scons
|
8eaf585a893757e68c9e4a6e25d375021fa5eab7
|
[
"MIT"
] | null | null | null |
SCons/Scanner/DirTests.py
|
jcassagnol-public/scons
|
8eaf585a893757e68c9e4a6e25d375021fa5eab7
|
[
"MIT"
] | null | null | null |
# MIT License
#
# Copyright The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import os.path
import unittest
import TestCmd
import SCons.Node.FS
import SCons.Scanner.Dir
from SCons.SConsign import current_sconsign_filename
#class DummyNode:
# def __init__(self, name, fs):
# self.name = name
# self.abspath = test.workpath(name)
# self.fs = fs
# def __str__(self):
# return self.name
# def Entry(self, name):
# return self.fs.Entry(name)
class DummyEnvironment:
def __init__(self, root):
self.fs = SCons.Node.FS.FS(root)
def Dir(self, name):
return self.fs.Dir(name)
def Entry(self, name):
return self.fs.Entry(name)
def File(self, name):
return self.fs.File(name)
def get_factory(self, factory):
return factory or self.fs.Entry
class DirScannerTestBase(unittest.TestCase):
def setUp(self):
self.test = TestCmd.TestCmd(workdir = '')
self.test.subdir('dir', ['dir', 'sub'])
sconsign = current_sconsign_filename()
self.test.write(['dir', 'f1'], "dir/f1\n")
self.test.write(['dir', 'f2'], "dir/f2\n")
self.test.write(['dir', '{}'.format(sconsign)], "dir/{}\n".format(sconsign))
self.test.write(['dir', '{}.bak'.format(sconsign)], "dir/{}.bak\n".format(sconsign))
self.test.write(['dir', '{}.dat'.format(sconsign)], "dir/{}.dat\n".format(sconsign))
self.test.write(['dir', '{}.db'.format(sconsign)], "dir/{}.db\n".format(sconsign))
self.test.write(['dir', '{}.dblite'.format(sconsign)], "dir/{}.dblite\n".format(sconsign))
self.test.write(['dir', '{}.dir'.format(sconsign)], "dir/{}.dir\n".format(sconsign))
self.test.write(['dir', '{}.pag'.format(sconsign)], "dir/{}.pag\n".format(sconsign))
self.test.write(['dir', 'sub', 'f3'], "dir/sub/f3\n")
self.test.write(['dir', 'sub', 'f4'], "dir/sub/f4\n")
self.test.write(['dir', 'sub', '{}'.format(sconsign)], "dir/{}\n".format(sconsign))
self.test.write(['dir', 'sub', '{}.bak'.format(sconsign)], "dir/{}.bak\n".format(sconsign))
self.test.write(['dir', 'sub', '{}.dat'.format(sconsign)], "dir/{}.dat\n".format(sconsign))
self.test.write(['dir', 'sub', '{}.dblite'.format(sconsign)], "dir/{}.dblite\n".format(sconsign))
self.test.write(['dir', 'sub', '{}.dir'.format(sconsign)], "dir/{}.dir\n".format(sconsign))
self.test.write(['dir', 'sub', '{}.pag'.format(sconsign)], "dir/{}.pag\n".format(sconsign))
class DirScannerTestCase(DirScannerTestBase):
def runTest(self):
env = DummyEnvironment(self.test.workpath())
s = SCons.Scanner.Dir.DirScanner()
expect = [
os.path.join('dir', 'f1'),
os.path.join('dir', 'f2'),
os.path.join('dir', 'sub'),
]
deps = s(env.Dir('dir'), env, ())
sss = list(map(str, deps))
assert sss == expect, "Found {}, expected {}".format(sss, expect)
expect = [
os.path.join('dir', 'sub', 'f3'),
os.path.join('dir', 'sub', 'f4'),
]
deps = s(env.Dir('dir/sub'), env, ())
sss = list(map(str, deps))
assert sss == expect, "Found {}, expected {}".format(sss, expect)
class DirEntryScannerTestCase(DirScannerTestBase):
def runTest(self):
env = DummyEnvironment(self.test.workpath())
s = SCons.Scanner.Dir.DirEntryScanner()
deps = s(env.Dir('dir'), env, ())
sss = list(map(str, deps))
assert sss == [], "Found {}, expected {}".format(sss, [])
deps = s(env.Dir('dir/sub'), env, ())
sss = list(map(str, deps))
assert sss == [], "Found {}, expected {}".format(sss, [])
# Make sure we don't blow up if handed a non-Dir node.
deps = s(env.File('dir/f1'), env, ())
sss = list(map(str, deps))
assert sss == [], "Found {}, expected {}".format(sss, [])
if __name__ == "__main__":
unittest.main()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| 39.19084
| 105
| 0.614725
|
import os.path
import unittest
import TestCmd
import SCons.Node.FS
import SCons.Scanner.Dir
from SCons.SConsign import current_sconsign_filename
class DummyEnvironment:
def __init__(self, root):
self.fs = SCons.Node.FS.FS(root)
def Dir(self, name):
return self.fs.Dir(name)
def Entry(self, name):
return self.fs.Entry(name)
def File(self, name):
return self.fs.File(name)
def get_factory(self, factory):
return factory or self.fs.Entry
class DirScannerTestBase(unittest.TestCase):
def setUp(self):
self.test = TestCmd.TestCmd(workdir = '')
self.test.subdir('dir', ['dir', 'sub'])
sconsign = current_sconsign_filename()
self.test.write(['dir', 'f1'], "dir/f1\n")
self.test.write(['dir', 'f2'], "dir/f2\n")
self.test.write(['dir', '{}'.format(sconsign)], "dir/{}\n".format(sconsign))
self.test.write(['dir', '{}.bak'.format(sconsign)], "dir/{}.bak\n".format(sconsign))
self.test.write(['dir', '{}.dat'.format(sconsign)], "dir/{}.dat\n".format(sconsign))
self.test.write(['dir', '{}.db'.format(sconsign)], "dir/{}.db\n".format(sconsign))
self.test.write(['dir', '{}.dblite'.format(sconsign)], "dir/{}.dblite\n".format(sconsign))
self.test.write(['dir', '{}.dir'.format(sconsign)], "dir/{}.dir\n".format(sconsign))
self.test.write(['dir', '{}.pag'.format(sconsign)], "dir/{}.pag\n".format(sconsign))
self.test.write(['dir', 'sub', 'f3'], "dir/sub/f3\n")
self.test.write(['dir', 'sub', 'f4'], "dir/sub/f4\n")
self.test.write(['dir', 'sub', '{}'.format(sconsign)], "dir/{}\n".format(sconsign))
self.test.write(['dir', 'sub', '{}.bak'.format(sconsign)], "dir/{}.bak\n".format(sconsign))
self.test.write(['dir', 'sub', '{}.dat'.format(sconsign)], "dir/{}.dat\n".format(sconsign))
self.test.write(['dir', 'sub', '{}.dblite'.format(sconsign)], "dir/{}.dblite\n".format(sconsign))
self.test.write(['dir', 'sub', '{}.dir'.format(sconsign)], "dir/{}.dir\n".format(sconsign))
self.test.write(['dir', 'sub', '{}.pag'.format(sconsign)], "dir/{}.pag\n".format(sconsign))
class DirScannerTestCase(DirScannerTestBase):
def runTest(self):
env = DummyEnvironment(self.test.workpath())
s = SCons.Scanner.Dir.DirScanner()
expect = [
os.path.join('dir', 'f1'),
os.path.join('dir', 'f2'),
os.path.join('dir', 'sub'),
]
deps = s(env.Dir('dir'), env, ())
sss = list(map(str, deps))
assert sss == expect, "Found {}, expected {}".format(sss, expect)
expect = [
os.path.join('dir', 'sub', 'f3'),
os.path.join('dir', 'sub', 'f4'),
]
deps = s(env.Dir('dir/sub'), env, ())
sss = list(map(str, deps))
assert sss == expect, "Found {}, expected {}".format(sss, expect)
class DirEntryScannerTestCase(DirScannerTestBase):
def runTest(self):
env = DummyEnvironment(self.test.workpath())
s = SCons.Scanner.Dir.DirEntryScanner()
deps = s(env.Dir('dir'), env, ())
sss = list(map(str, deps))
assert sss == [], "Found {}, expected {}".format(sss, [])
deps = s(env.Dir('dir/sub'), env, ())
sss = list(map(str, deps))
assert sss == [], "Found {}, expected {}".format(sss, [])
deps = s(env.File('dir/f1'), env, ())
sss = list(map(str, deps))
assert sss == [], "Found {}, expected {}".format(sss, [])
if __name__ == "__main__":
unittest.main()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| true
| true
|
1c46c7815098b0d623f6f7a150989694f53aa6f2
| 877
|
py
|
Python
|
show/gearbox.py
|
sg893052/sonic-utilities
|
fdb79b8d65b8ca22232f4e6b140f593dd01613d5
|
[
"Apache-2.0"
] | 91
|
2016-03-23T14:24:41.000Z
|
2022-03-18T20:25:37.000Z
|
show/gearbox.py
|
sg893052/sonic-utilities
|
fdb79b8d65b8ca22232f4e6b140f593dd01613d5
|
[
"Apache-2.0"
] | 1,495
|
2017-02-15T10:49:10.000Z
|
2022-03-31T18:49:56.000Z
|
show/gearbox.py
|
sg893052/sonic-utilities
|
fdb79b8d65b8ca22232f4e6b140f593dd01613d5
|
[
"Apache-2.0"
] | 466
|
2016-04-25T09:31:23.000Z
|
2022-03-31T06:54:17.000Z
|
import click
import utilities_common.cli as clicommon
@click.group(cls=clicommon.AliasedGroup)
def gearbox():
"""Show gearbox info"""
pass
# 'phys' subcommand ("show gearbox phys")
@gearbox.group(cls=clicommon.AliasedGroup)
def phys():
"""Show external PHY information"""
pass
# 'status' subcommand ("show gearbox phys status")
@phys.command()
@click.pass_context
def status(ctx):
"""Show gearbox phys status"""
clicommon.run_command("gearboxutil phys status")
# 'interfaces' subcommand ("show gearbox interfaces")
@gearbox.group(cls=clicommon.AliasedGroup)
def interfaces():
"""Show gearbox interfaces information"""
pass
# 'status' subcommand ("show gearbox interfaces status")
@interfaces.command()
@click.pass_context
def status(ctx):
"""Show gearbox interfaces status"""
clicommon.run_command("gearboxutil interfaces status")
| 25.057143
| 58
| 0.729761
|
import click
import utilities_common.cli as clicommon
@click.group(cls=clicommon.AliasedGroup)
def gearbox():
pass
@gearbox.group(cls=clicommon.AliasedGroup)
def phys():
pass
@phys.command()
@click.pass_context
def status(ctx):
clicommon.run_command("gearboxutil phys status")
@gearbox.group(cls=clicommon.AliasedGroup)
def interfaces():
pass
@interfaces.command()
@click.pass_context
def status(ctx):
clicommon.run_command("gearboxutil interfaces status")
| true
| true
|
1c46c7dc238b0a632c7b17c278cc27218f17eb00
| 6,095
|
py
|
Python
|
software/metax/WeightDBUtilities.py
|
adellanno/MetaXcan
|
cfc9e369bbf5630e0c9488993cd877f231c5d02e
|
[
"MIT"
] | 83
|
2016-07-19T20:14:52.000Z
|
2022-03-28T17:02:39.000Z
|
software/metax/WeightDBUtilities.py
|
adellanno/MetaXcan
|
cfc9e369bbf5630e0c9488993cd877f231c5d02e
|
[
"MIT"
] | 75
|
2016-02-25T16:43:17.000Z
|
2022-03-30T14:19:03.000Z
|
software/metax/WeightDBUtilities.py
|
adellanno/MetaXcan
|
cfc9e369bbf5630e0c9488993cd877f231c5d02e
|
[
"MIT"
] | 71
|
2016-02-11T17:10:32.000Z
|
2022-03-30T20:15:19.000Z
|
__author__ = 'heroico'
import sqlite3
import os
from collections import OrderedDict
from . import Exceptions
class GeneEntry:
def __init__(self, gene, gene_name, n_snps, R2, pval,qval):
self.gene = gene
self.gene_name = gene_name
self.n_snps = n_snps
self.pred_perf_R2 = R2
self.pred_perf_pval = pval
self.pred_perf_qval = qval
class WeightDBEntry:
def __init__(self, rsid=None, gene=None, weight=None, ref_allele=None, eff_allele=None, pval=None, N=None, cis=None):
"""Warning: many db's have empty 'N', 'cis' and 'pval'"""
self.rsid = rsid
self.gene = gene
self.weight = weight
self.ref_allele = ref_allele
self.eff_allele = eff_allele
self.pval = pval
self.N = N
self.cis = cis
class WDBQF(object):
"Weight DB weight Query Format"
RSID=0
GENE=1
WEIGHT=2
REF_ALLELE=3
EFF_ALLELE=4
class WDBEQF(object):
"Weight DB extra table Query Format"
GENE=0
GENE_NAME=1
N_SNP_IN_MODEL=2
PRED_PERF_R2=3
PRED_PERF_PVAL=4
PRED_PERF_QVAL=5
class WeightDB(object):
def __init__(self, file_name , create_if_absent=False):
self.connection = None
self.cursor = None
self.file_name = file_name
self.create_if_absent = create_if_absent
def __del__(self):
self.closeDB()
def openDBIfNecessary(self):
if not self.connection:
if not self.create_if_absent and not os.path.exists(self.file_name):
raise RuntimeError("Weight file doesn't exist")
self.connection = sqlite3.connect(self.file_name)
self.cursor = self.connection.cursor()
def closeDB(self):
if self.connection:
self.connection.close()
self.connection = None
self.cursor = None
def weightEntriesFromResults(self, results, extra, result_callback=None):
weights = []
for result in results:
weight = WeightDBEntry(result[WDBQF.RSID],
result[WDBQF.GENE],
result[WDBQF.WEIGHT],
result[WDBQF.REF_ALLELE],
result[WDBQF.EFF_ALLELE])
weights.append(weight)
if result_callback:
result_callback(weight, extra)
return weights
def loadFromDB(self, callback=None, gene_key=None):
self.openDBIfNecessary()
extra = self.loadExtraColumnData()
extra = {e.gene:e for e in extra}
if gene_key is None:
results = self.cursor.execute("SELECT rsid, gene, weight, ref_allele, eff_allele FROM weights;")
else:
results = self.cursor.execute("SELECT rsid, gene, weight, ref_allele, eff_allele FROM weights where gene = ?;", (gene_key))
weights = self.weightEntriesFromResults(results, extra, callback)
return weights
def loadExtraColumnData(self, gene_key=None):
self.openDBIfNecessary()
try:
if gene_key is None:
results = self.cursor.execute("SELECT gene, genename, `n.snps.in.model`, `pred.perf.R2`, `pred.perf.pval`, `pred.perf.qval` FROM extra;")
else:
results = self.cursor.execute("SELECT gene, genename, `n.snps.in.model`, `pred.perf.R2`, `pred.perf.pval`, `pred.perf.qval` FROM extra WHERE gene = ?;", (gene_key,))
except sqlite3.OperationalError as e:
print(str(e))
raise Exceptions.ReportableException("Could not read input tissue database. Please try updating the tissue model files.")
except Exception as e:
raise e
extra = [GeneEntry(x[WDBEQF.GENE], x[WDBEQF.GENE_NAME], x[WDBEQF.N_SNP_IN_MODEL], x[WDBEQF.PRED_PERF_R2], x[WDBEQF.PRED_PERF_PVAL], x[WDBEQF.PRED_PERF_QVAL]) for x in results]
return extra
def loadGeneNamesFromDB(self):
self.openDBIfNecessary()
names = []
results = self.cursor.execute("SELECT DISTINCT gene FROM weights;")
for result in results:
name = result[0]
names.append(name)
return names
class WeightDBEntryLogic(object):
def __init__(self, db_file_name):
self.weights_by_gene = OrderedDict()#{}
self.genes_for_an_rsid = OrderedDict()#{}
self.gene_data_for_gene = OrderedDict()#{}
self._loadData(db_file_name)
def anEntryWithRSID(self, rsid):
entry = None
if not rsid in self.genes_for_an_rsid:
return entry
genes = self.genes_for_an_rsid[rsid]
gene = genes[0]
weights = self.weights_by_gene[gene]
entry = weights[rsid]
return entry
def _loadData(self, db_file_name):
weights_db = WeightDB(db_file_name)
class ByNameCallback(object):
"""Helper class to group weights by gene name"""
def __init__(self, weights_by_gene, genes_for_an_rsid, gene_data_for_gene):
self.weights_by_gene = weights_by_gene
self.genes_for_an_rsid = genes_for_an_rsid
self.gene_data_for_gene = gene_data_for_gene
def __call__(self, weight, extra):
if weight.gene in self.weights_by_gene:
weights = self.weights_by_gene[weight.gene]
else:
weights = OrderedDict()
self.weights_by_gene[weight.gene] = weights
weights[weight.rsid]= weight
if not weight.rsid in self.genes_for_an_rsid:
self.genes_for_an_rsid[weight.rsid] = []
genes = self.genes_for_an_rsid[weight.rsid]
if not weight.gene in genes:
genes.append(weight.gene)
gene_entry = extra[weight.gene]
self.gene_data_for_gene[weight.gene] = gene_entry
callback = ByNameCallback(self.weights_by_gene, self.genes_for_an_rsid, self.gene_data_for_gene)
weights_db.loadFromDB(callback)
| 35.643275
| 183
| 0.612961
|
__author__ = 'heroico'
import sqlite3
import os
from collections import OrderedDict
from . import Exceptions
class GeneEntry:
def __init__(self, gene, gene_name, n_snps, R2, pval,qval):
self.gene = gene
self.gene_name = gene_name
self.n_snps = n_snps
self.pred_perf_R2 = R2
self.pred_perf_pval = pval
self.pred_perf_qval = qval
class WeightDBEntry:
def __init__(self, rsid=None, gene=None, weight=None, ref_allele=None, eff_allele=None, pval=None, N=None, cis=None):
self.rsid = rsid
self.gene = gene
self.weight = weight
self.ref_allele = ref_allele
self.eff_allele = eff_allele
self.pval = pval
self.N = N
self.cis = cis
class WDBQF(object):
RSID=0
GENE=1
WEIGHT=2
REF_ALLELE=3
EFF_ALLELE=4
class WDBEQF(object):
GENE=0
GENE_NAME=1
N_SNP_IN_MODEL=2
PRED_PERF_R2=3
PRED_PERF_PVAL=4
PRED_PERF_QVAL=5
class WeightDB(object):
def __init__(self, file_name , create_if_absent=False):
self.connection = None
self.cursor = None
self.file_name = file_name
self.create_if_absent = create_if_absent
def __del__(self):
self.closeDB()
def openDBIfNecessary(self):
if not self.connection:
if not self.create_if_absent and not os.path.exists(self.file_name):
raise RuntimeError("Weight file doesn't exist")
self.connection = sqlite3.connect(self.file_name)
self.cursor = self.connection.cursor()
def closeDB(self):
if self.connection:
self.connection.close()
self.connection = None
self.cursor = None
def weightEntriesFromResults(self, results, extra, result_callback=None):
weights = []
for result in results:
weight = WeightDBEntry(result[WDBQF.RSID],
result[WDBQF.GENE],
result[WDBQF.WEIGHT],
result[WDBQF.REF_ALLELE],
result[WDBQF.EFF_ALLELE])
weights.append(weight)
if result_callback:
result_callback(weight, extra)
return weights
def loadFromDB(self, callback=None, gene_key=None):
self.openDBIfNecessary()
extra = self.loadExtraColumnData()
extra = {e.gene:e for e in extra}
if gene_key is None:
results = self.cursor.execute("SELECT rsid, gene, weight, ref_allele, eff_allele FROM weights;")
else:
results = self.cursor.execute("SELECT rsid, gene, weight, ref_allele, eff_allele FROM weights where gene = ?;", (gene_key))
weights = self.weightEntriesFromResults(results, extra, callback)
return weights
def loadExtraColumnData(self, gene_key=None):
self.openDBIfNecessary()
try:
if gene_key is None:
results = self.cursor.execute("SELECT gene, genename, `n.snps.in.model`, `pred.perf.R2`, `pred.perf.pval`, `pred.perf.qval` FROM extra;")
else:
results = self.cursor.execute("SELECT gene, genename, `n.snps.in.model`, `pred.perf.R2`, `pred.perf.pval`, `pred.perf.qval` FROM extra WHERE gene = ?;", (gene_key,))
except sqlite3.OperationalError as e:
print(str(e))
raise Exceptions.ReportableException("Could not read input tissue database. Please try updating the tissue model files.")
except Exception as e:
raise e
extra = [GeneEntry(x[WDBEQF.GENE], x[WDBEQF.GENE_NAME], x[WDBEQF.N_SNP_IN_MODEL], x[WDBEQF.PRED_PERF_R2], x[WDBEQF.PRED_PERF_PVAL], x[WDBEQF.PRED_PERF_QVAL]) for x in results]
return extra
def loadGeneNamesFromDB(self):
self.openDBIfNecessary()
names = []
results = self.cursor.execute("SELECT DISTINCT gene FROM weights;")
for result in results:
name = result[0]
names.append(name)
return names
class WeightDBEntryLogic(object):
def __init__(self, db_file_name):
self.weights_by_gene = OrderedDict()#{}
self.genes_for_an_rsid = OrderedDict()#{}
self.gene_data_for_gene = OrderedDict()#{}
self._loadData(db_file_name)
def anEntryWithRSID(self, rsid):
entry = None
if not rsid in self.genes_for_an_rsid:
return entry
genes = self.genes_for_an_rsid[rsid]
gene = genes[0]
weights = self.weights_by_gene[gene]
entry = weights[rsid]
return entry
def _loadData(self, db_file_name):
weights_db = WeightDB(db_file_name)
class ByNameCallback(object):
def __init__(self, weights_by_gene, genes_for_an_rsid, gene_data_for_gene):
self.weights_by_gene = weights_by_gene
self.genes_for_an_rsid = genes_for_an_rsid
self.gene_data_for_gene = gene_data_for_gene
def __call__(self, weight, extra):
if weight.gene in self.weights_by_gene:
weights = self.weights_by_gene[weight.gene]
else:
weights = OrderedDict()
self.weights_by_gene[weight.gene] = weights
weights[weight.rsid]= weight
if not weight.rsid in self.genes_for_an_rsid:
self.genes_for_an_rsid[weight.rsid] = []
genes = self.genes_for_an_rsid[weight.rsid]
if not weight.gene in genes:
genes.append(weight.gene)
gene_entry = extra[weight.gene]
self.gene_data_for_gene[weight.gene] = gene_entry
callback = ByNameCallback(self.weights_by_gene, self.genes_for_an_rsid, self.gene_data_for_gene)
weights_db.loadFromDB(callback)
| true
| true
|
1c46c9cfeb9efcce9902f255edbe15907ddf263e
| 4,994
|
py
|
Python
|
tests/http/test_fedclient.py
|
SimmyD/synapse
|
26f2f1ca9a8ce6c32e574f0f0e60bb24b773c4e3
|
[
"Apache-2.0"
] | null | null | null |
tests/http/test_fedclient.py
|
SimmyD/synapse
|
26f2f1ca9a8ce6c32e574f0f0e60bb24b773c4e3
|
[
"Apache-2.0"
] | null | null | null |
tests/http/test_fedclient.py
|
SimmyD/synapse
|
26f2f1ca9a8ce6c32e574f0f0e60bb24b773c4e3
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright 2018 New Vector Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from mock import Mock
from twisted.internet.defer import TimeoutError
from twisted.internet.error import ConnectingCancelledError, DNSLookupError
from twisted.web.client import ResponseNeverReceived
from synapse.http.matrixfederationclient import MatrixFederationHttpClient
from tests.unittest import HomeserverTestCase
class FederationClientTests(HomeserverTestCase):
def make_homeserver(self, reactor, clock):
hs = self.setup_test_homeserver(reactor=reactor, clock=clock)
hs.tls_client_options_factory = None
return hs
def prepare(self, reactor, clock, homeserver):
self.cl = MatrixFederationHttpClient(self.hs)
self.reactor.lookups["testserv"] = "1.2.3.4"
def test_dns_error(self):
"""
If the DNS raising returns an error, it will bubble up.
"""
d = self.cl._request("testserv2:8008", "GET", "foo/bar", timeout=10000)
self.pump()
f = self.failureResultOf(d)
self.assertIsInstance(f.value, DNSLookupError)
def test_client_never_connect(self):
"""
If the HTTP request is not connected and is timed out, it'll give a
ConnectingCancelledError.
"""
d = self.cl._request("testserv:8008", "GET", "foo/bar", timeout=10000)
self.pump()
# Nothing happened yet
self.assertFalse(d.called)
# Make sure treq is trying to connect
clients = self.reactor.tcpClients
self.assertEqual(len(clients), 1)
self.assertEqual(clients[0][0], '1.2.3.4')
self.assertEqual(clients[0][1], 8008)
# Deferred is still without a result
self.assertFalse(d.called)
# Push by enough to time it out
self.reactor.advance(10.5)
f = self.failureResultOf(d)
self.assertIsInstance(f.value, ConnectingCancelledError)
def test_client_connect_no_response(self):
"""
If the HTTP request is connected, but gets no response before being
timed out, it'll give a ResponseNeverReceived.
"""
d = self.cl._request("testserv:8008", "GET", "foo/bar", timeout=10000)
self.pump()
# Nothing happened yet
self.assertFalse(d.called)
# Make sure treq is trying to connect
clients = self.reactor.tcpClients
self.assertEqual(len(clients), 1)
self.assertEqual(clients[0][0], '1.2.3.4')
self.assertEqual(clients[0][1], 8008)
conn = Mock()
client = clients[0][2].buildProtocol(None)
client.makeConnection(conn)
# Deferred is still without a result
self.assertFalse(d.called)
# Push by enough to time it out
self.reactor.advance(10.5)
f = self.failureResultOf(d)
self.assertIsInstance(f.value, ResponseNeverReceived)
def test_client_gets_headers(self):
"""
Once the client gets the headers, _request returns successfully.
"""
d = self.cl._request("testserv:8008", "GET", "foo/bar", timeout=10000)
self.pump()
conn = Mock()
clients = self.reactor.tcpClients
client = clients[0][2].buildProtocol(None)
client.makeConnection(conn)
# Deferred does not have a result
self.assertFalse(d.called)
# Send it the HTTP response
client.dataReceived(b"HTTP/1.1 200 OK\r\nServer: Fake\r\n\r\n")
# We should get a successful response
r = self.successResultOf(d)
self.assertEqual(r.code, 200)
def test_client_headers_no_body(self):
"""
If the HTTP request is connected, but gets no response before being
timed out, it'll give a ResponseNeverReceived.
"""
d = self.cl.post_json("testserv:8008", "foo/bar", timeout=10000)
self.pump()
conn = Mock()
clients = self.reactor.tcpClients
client = clients[0][2].buildProtocol(None)
client.makeConnection(conn)
# Deferred does not have a result
self.assertFalse(d.called)
# Send it the HTTP response
client.dataReceived(
(b"HTTP/1.1 200 OK\r\nContent-Type: application/json\r\n"
b"Server: Fake\r\n\r\n")
)
# Push by enough to time it out
self.reactor.advance(10.5)
f = self.failureResultOf(d)
self.assertIsInstance(f.value, TimeoutError)
| 31.607595
| 79
| 0.647777
|
from mock import Mock
from twisted.internet.defer import TimeoutError
from twisted.internet.error import ConnectingCancelledError, DNSLookupError
from twisted.web.client import ResponseNeverReceived
from synapse.http.matrixfederationclient import MatrixFederationHttpClient
from tests.unittest import HomeserverTestCase
class FederationClientTests(HomeserverTestCase):
def make_homeserver(self, reactor, clock):
hs = self.setup_test_homeserver(reactor=reactor, clock=clock)
hs.tls_client_options_factory = None
return hs
def prepare(self, reactor, clock, homeserver):
self.cl = MatrixFederationHttpClient(self.hs)
self.reactor.lookups["testserv"] = "1.2.3.4"
def test_dns_error(self):
d = self.cl._request("testserv2:8008", "GET", "foo/bar", timeout=10000)
self.pump()
f = self.failureResultOf(d)
self.assertIsInstance(f.value, DNSLookupError)
def test_client_never_connect(self):
d = self.cl._request("testserv:8008", "GET", "foo/bar", timeout=10000)
self.pump()
self.assertFalse(d.called)
clients = self.reactor.tcpClients
self.assertEqual(len(clients), 1)
self.assertEqual(clients[0][0], '1.2.3.4')
self.assertEqual(clients[0][1], 8008)
self.assertFalse(d.called)
self.reactor.advance(10.5)
f = self.failureResultOf(d)
self.assertIsInstance(f.value, ConnectingCancelledError)
def test_client_connect_no_response(self):
d = self.cl._request("testserv:8008", "GET", "foo/bar", timeout=10000)
self.pump()
self.assertFalse(d.called)
clients = self.reactor.tcpClients
self.assertEqual(len(clients), 1)
self.assertEqual(clients[0][0], '1.2.3.4')
self.assertEqual(clients[0][1], 8008)
conn = Mock()
client = clients[0][2].buildProtocol(None)
client.makeConnection(conn)
self.assertFalse(d.called)
self.reactor.advance(10.5)
f = self.failureResultOf(d)
self.assertIsInstance(f.value, ResponseNeverReceived)
def test_client_gets_headers(self):
d = self.cl._request("testserv:8008", "GET", "foo/bar", timeout=10000)
self.pump()
conn = Mock()
clients = self.reactor.tcpClients
client = clients[0][2].buildProtocol(None)
client.makeConnection(conn)
self.assertFalse(d.called)
client.dataReceived(b"HTTP/1.1 200 OK\r\nServer: Fake\r\n\r\n")
r = self.successResultOf(d)
self.assertEqual(r.code, 200)
def test_client_headers_no_body(self):
d = self.cl.post_json("testserv:8008", "foo/bar", timeout=10000)
self.pump()
conn = Mock()
clients = self.reactor.tcpClients
client = clients[0][2].buildProtocol(None)
client.makeConnection(conn)
self.assertFalse(d.called)
client.dataReceived(
(b"HTTP/1.1 200 OK\r\nContent-Type: application/json\r\n"
b"Server: Fake\r\n\r\n")
)
self.reactor.advance(10.5)
f = self.failureResultOf(d)
self.assertIsInstance(f.value, TimeoutError)
| true
| true
|
1c46c9e13c1fa6ba1e653f1f33dbebace96b8941
| 1,046
|
py
|
Python
|
release/stubs.min/Autodesk/Revit/DB/Structure/__init___parts/RebarShapeConstraintSagittaLength.py
|
htlcnn/ironpython-stubs
|
780d829e2104b2789d5f4d6f32b0ec9f2930ca03
|
[
"MIT"
] | 182
|
2017-06-27T02:26:15.000Z
|
2022-03-30T18:53:43.000Z
|
release/stubs.min/Autodesk/Revit/DB/Structure/__init___parts/RebarShapeConstraintSagittaLength.py
|
htlcnn/ironpython-stubs
|
780d829e2104b2789d5f4d6f32b0ec9f2930ca03
|
[
"MIT"
] | 28
|
2017-06-27T13:38:23.000Z
|
2022-03-15T11:19:44.000Z
|
release/stubs.min/Autodesk/Revit/DB/Structure/__init___parts/RebarShapeConstraintSagittaLength.py
|
htlcnn/ironpython-stubs
|
780d829e2104b2789d5f4d6f32b0ec9f2930ca03
|
[
"MIT"
] | 67
|
2017-06-28T09:43:59.000Z
|
2022-03-20T21:17:10.000Z
|
class RebarShapeConstraintSagittaLength(RebarShapeConstraint,IDisposable):
"""
A constraint that can be applied to a RebarShapeDefinitionByArc
and drives the height of the arc.
RebarShapeConstraintSagittaLength(paramId: ElementId)
"""
def Dispose(self):
""" Dispose(self: RebarShapeConstraint,A_0: bool) """
pass
def ReleaseUnmanagedResources(self,*args):
""" ReleaseUnmanagedResources(self: RebarShapeConstraint,disposing: bool) """
pass
def __enter__(self,*args):
""" __enter__(self: IDisposable) -> object """
pass
def __exit__(self,*args):
""" __exit__(self: IDisposable,exc_type: object,exc_value: object,exc_back: object) """
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
@staticmethod
def __new__(self,paramId):
""" __new__(cls: type,paramId: ElementId) """
pass
| 34.866667
| 215
| 0.716061
|
class RebarShapeConstraintSagittaLength(RebarShapeConstraint,IDisposable):
def Dispose(self):
pass
def ReleaseUnmanagedResources(self,*args):
pass
def __enter__(self,*args):
pass
def __exit__(self,*args):
pass
def __init__(self,*args):
pass
@staticmethod
def __new__(self,paramId):
pass
| true
| true
|
1c46ca25cd91c0be4a40c520f78d8264149c79a3
| 5,959
|
py
|
Python
|
tests/unit/streamalert_cli/terraform/test_alert_processor.py
|
Meliairon/streamalert
|
3b774a59d260b2822cd156e837781bd34f3625f7
|
[
"Apache-2.0"
] | null | null | null |
tests/unit/streamalert_cli/terraform/test_alert_processor.py
|
Meliairon/streamalert
|
3b774a59d260b2822cd156e837781bd34f3625f7
|
[
"Apache-2.0"
] | null | null | null |
tests/unit/streamalert_cli/terraform/test_alert_processor.py
|
Meliairon/streamalert
|
3b774a59d260b2822cd156e837781bd34f3625f7
|
[
"Apache-2.0"
] | null | null | null |
"""
Copyright 2017-present Airbnb, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
from nose.tools import assert_equal
from streamalert_cli.config import CLIConfig
from streamalert_cli.terraform import alert_processor
class TestAlertProcessor(unittest.TestCase):
"""Test the Terraform generation for the alert processor"""
def setUp(self):
"""Create the CLIConfig and the expected template for these tests."""
self.config = dict(CLIConfig(config_path='tests/unit/conf'))
self.alert_proc_config = self.config['lambda']['alert_processor_config']
def test_generate_all_options(self):
"""CLI - Terraform Generate Alert Processor - All Options"""
result = alert_processor.generate_alert_processor(config=self.config)
expected = {
'module': {
'alert_processor_iam': {
'account_id': '12345678910',
'kms_key_arn': '${aws_kms_key.streamalert_secrets.arn}',
'output_lambda_functions': [
'unit_test_function',
'unit_test_qualified_function'
],
'output_s3_buckets': ['unit.test.bucket.name'],
'output_sns_topics': ['unit_test_topic_name'],
'output_sqs_queues': ['unit_test_queue_name'],
'prefix': 'unit-test',
'region': 'us-west-1',
'role_id': '${module.alert_processor_lambda.role_id}',
'source': './modules/tf_alert_processor_iam',
'sse_kms_key_arn': '${aws_kms_key.server_side_encryption.arn}'
},
'alert_processor_lambda': {
'alarm_actions': [
'arn:aws:sns:us-west-1:12345678910:unit-test_streamalert_monitoring'
],
'description': 'Unit-Test Streamalert Alert Processor',
'environment_variables': {
'ALERTS_TABLE': 'unit-test_streamalert_alerts',
'STREAMALERT_PREFIX': 'unit-test',
'AWS_ACCOUNT_ID': '12345678910',
'ENABLE_METRICS': '0',
'LOGGER_LEVEL': 'info'
},
'tags': {},
'errors_alarm_enabled': True,
'errors_alarm_evaluation_periods': 1,
'errors_alarm_period_secs': 2,
'errors_alarm_threshold': 3,
'filename': 'alert_processor.zip',
'function_name': 'unit-test_streamalert_alert_processor',
'handler': 'streamalert.alert_processor.main.handler',
'log_retention_days': 7,
'memory_size_mb': 128,
'source': './modules/tf_lambda',
'throttles_alarm_enabled': True,
'throttles_alarm_evaluation_periods': 4,
'throttles_alarm_period_secs': 5,
'throttles_alarm_threshold': 6,
'timeout_sec': 60,
'vpc_security_group_ids': ['sg-abc'],
'vpc_subnet_ids': ['subnet-123']
}
}
}
assert_equal(expected, result)
def test_generate_minimal_options(self):
"""CLI - Terraform Generate Alert Processor - Minimal Options"""
# Remove extra Lambda options
for key in ['log_level', 'log_retention_days', 'metric_alarms', 'vpc_config']:
del self.alert_proc_config[key]
# Remove all outputs from the config
self.config['outputs'] = {}
result = alert_processor.generate_alert_processor(config=self.config)
expected = {
'module': {
'alert_processor_iam': {
'account_id': '12345678910',
'kms_key_arn': '${aws_kms_key.streamalert_secrets.arn}',
'output_lambda_functions': [],
'output_s3_buckets': [],
'output_sns_topics': [],
'output_sqs_queues': [],
'prefix': 'unit-test',
'region': 'us-west-1',
'role_id': '${module.alert_processor_lambda.role_id}',
'source': './modules/tf_alert_processor_iam',
'sse_kms_key_arn': '${aws_kms_key.server_side_encryption.arn}'
},
'alert_processor_lambda': {
'description': 'Unit-Test Streamalert Alert Processor',
'environment_variables': {
'ALERTS_TABLE': 'unit-test_streamalert_alerts',
'STREAMALERT_PREFIX': 'unit-test',
'AWS_ACCOUNT_ID': '12345678910',
'ENABLE_METRICS': '0',
'LOGGER_LEVEL': 'info'
},
'tags': {},
'filename': 'alert_processor.zip',
'function_name': 'unit-test_streamalert_alert_processor',
'handler': 'streamalert.alert_processor.main.handler',
'memory_size_mb': 128,
'source': './modules/tf_lambda',
'timeout_sec': 60,
}
}
}
assert_equal(expected, result)
| 44.470149
| 92
| 0.538345
|
import unittest
from nose.tools import assert_equal
from streamalert_cli.config import CLIConfig
from streamalert_cli.terraform import alert_processor
class TestAlertProcessor(unittest.TestCase):
def setUp(self):
self.config = dict(CLIConfig(config_path='tests/unit/conf'))
self.alert_proc_config = self.config['lambda']['alert_processor_config']
def test_generate_all_options(self):
result = alert_processor.generate_alert_processor(config=self.config)
expected = {
'module': {
'alert_processor_iam': {
'account_id': '12345678910',
'kms_key_arn': '${aws_kms_key.streamalert_secrets.arn}',
'output_lambda_functions': [
'unit_test_function',
'unit_test_qualified_function'
],
'output_s3_buckets': ['unit.test.bucket.name'],
'output_sns_topics': ['unit_test_topic_name'],
'output_sqs_queues': ['unit_test_queue_name'],
'prefix': 'unit-test',
'region': 'us-west-1',
'role_id': '${module.alert_processor_lambda.role_id}',
'source': './modules/tf_alert_processor_iam',
'sse_kms_key_arn': '${aws_kms_key.server_side_encryption.arn}'
},
'alert_processor_lambda': {
'alarm_actions': [
'arn:aws:sns:us-west-1:12345678910:unit-test_streamalert_monitoring'
],
'description': 'Unit-Test Streamalert Alert Processor',
'environment_variables': {
'ALERTS_TABLE': 'unit-test_streamalert_alerts',
'STREAMALERT_PREFIX': 'unit-test',
'AWS_ACCOUNT_ID': '12345678910',
'ENABLE_METRICS': '0',
'LOGGER_LEVEL': 'info'
},
'tags': {},
'errors_alarm_enabled': True,
'errors_alarm_evaluation_periods': 1,
'errors_alarm_period_secs': 2,
'errors_alarm_threshold': 3,
'filename': 'alert_processor.zip',
'function_name': 'unit-test_streamalert_alert_processor',
'handler': 'streamalert.alert_processor.main.handler',
'log_retention_days': 7,
'memory_size_mb': 128,
'source': './modules/tf_lambda',
'throttles_alarm_enabled': True,
'throttles_alarm_evaluation_periods': 4,
'throttles_alarm_period_secs': 5,
'throttles_alarm_threshold': 6,
'timeout_sec': 60,
'vpc_security_group_ids': ['sg-abc'],
'vpc_subnet_ids': ['subnet-123']
}
}
}
assert_equal(expected, result)
def test_generate_minimal_options(self):
for key in ['log_level', 'log_retention_days', 'metric_alarms', 'vpc_config']:
del self.alert_proc_config[key]
self.config['outputs'] = {}
result = alert_processor.generate_alert_processor(config=self.config)
expected = {
'module': {
'alert_processor_iam': {
'account_id': '12345678910',
'kms_key_arn': '${aws_kms_key.streamalert_secrets.arn}',
'output_lambda_functions': [],
'output_s3_buckets': [],
'output_sns_topics': [],
'output_sqs_queues': [],
'prefix': 'unit-test',
'region': 'us-west-1',
'role_id': '${module.alert_processor_lambda.role_id}',
'source': './modules/tf_alert_processor_iam',
'sse_kms_key_arn': '${aws_kms_key.server_side_encryption.arn}'
},
'alert_processor_lambda': {
'description': 'Unit-Test Streamalert Alert Processor',
'environment_variables': {
'ALERTS_TABLE': 'unit-test_streamalert_alerts',
'STREAMALERT_PREFIX': 'unit-test',
'AWS_ACCOUNT_ID': '12345678910',
'ENABLE_METRICS': '0',
'LOGGER_LEVEL': 'info'
},
'tags': {},
'filename': 'alert_processor.zip',
'function_name': 'unit-test_streamalert_alert_processor',
'handler': 'streamalert.alert_processor.main.handler',
'memory_size_mb': 128,
'source': './modules/tf_lambda',
'timeout_sec': 60,
}
}
}
assert_equal(expected, result)
| true
| true
|
1c46cac18042c8dda9c7d3d35fb6a33fff5a1530
| 4,385
|
py
|
Python
|
praisetheflesh/praisetheflesh/settings.py
|
robertraya/portfoliowebsite
|
2a27b86c8cbb63a40025ecc35bc286d2f9654adf
|
[
"CC0-1.0"
] | null | null | null |
praisetheflesh/praisetheflesh/settings.py
|
robertraya/portfoliowebsite
|
2a27b86c8cbb63a40025ecc35bc286d2f9654adf
|
[
"CC0-1.0"
] | null | null | null |
praisetheflesh/praisetheflesh/settings.py
|
robertraya/portfoliowebsite
|
2a27b86c8cbb63a40025ecc35bc286d2f9654adf
|
[
"CC0-1.0"
] | null | null | null |
"""
Django settings for praisetheflesh project.
Generated by 'django-admin startproject' using Django 3.0.8.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import os
import django_heroku
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = os.getenv('SECRET_KEY', 'Optional default value')
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = False
ALLOWED_HOSTS = ['praisetheflesh.herokuapp.com', '127.0.0.1', 'praisetheflesh.com']
# Application definition
INSTALLED_APPS = [
'praisetheflesh',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'whitenoise.middleware.WhiteNoiseMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'praisetheflesh.praisetheflesh.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'ptf/templates/kyle_gannon'),
os.path.join(BASE_DIR, 'store/templates/store')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'praisetheflesh.praisetheflesh.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
STATIC_TMP = os.path.join(BASE_DIR, 'static')
STATIC_URL = '/static/'
os.makedirs(STATIC_TMP, exist_ok=True)
os.makedirs(STATIC_ROOT, exist_ok=True)
STATICFILES_DIRS = [
os.path.join(BASE_DIR, 'ptf/static'),
]
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
MEDIA_URL = "/media/"
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
#security business
SECURE_SSL_REDIRECT = True
SESSION_COOKIE_SECURE = True
CSRF_COOKIE_SECURE = True
SECURE_HSTS_SECONDS = 60
SECURE_HSTS_INCLUDE_SUBDOMAINS = True
SECURE_HSTS_PRELOAD = True
#Activate Django-Heroku
django_heroku.settings(locals())
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
'console': {
'class': 'logging.StreamHandler',
},
},
'loggers': {
'django': {
'handlers': ['console'],
'level': os.getenv('DJANGO_LOG_LEVEL', 'DEBUG'),
},
},
}
| 25.346821
| 91
| 0.697834
|
import os
import django_heroku
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
SECRET_KEY = os.getenv('SECRET_KEY', 'Optional default value')
DEBUG = False
ALLOWED_HOSTS = ['praisetheflesh.herokuapp.com', '127.0.0.1', 'praisetheflesh.com']
# Application definition
INSTALLED_APPS = [
'praisetheflesh',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'whitenoise.middleware.WhiteNoiseMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'praisetheflesh.praisetheflesh.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'ptf/templates/kyle_gannon'),
os.path.join(BASE_DIR, 'store/templates/store')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'praisetheflesh.praisetheflesh.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
STATIC_TMP = os.path.join(BASE_DIR, 'static')
STATIC_URL = '/static/'
os.makedirs(STATIC_TMP, exist_ok=True)
os.makedirs(STATIC_ROOT, exist_ok=True)
STATICFILES_DIRS = [
os.path.join(BASE_DIR, 'ptf/static'),
]
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
MEDIA_URL = "/media/"
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
#security business
SECURE_SSL_REDIRECT = True
SESSION_COOKIE_SECURE = True
CSRF_COOKIE_SECURE = True
SECURE_HSTS_SECONDS = 60
SECURE_HSTS_INCLUDE_SUBDOMAINS = True
SECURE_HSTS_PRELOAD = True
#Activate Django-Heroku
django_heroku.settings(locals())
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
'console': {
'class': 'logging.StreamHandler',
},
},
'loggers': {
'django': {
'handlers': ['console'],
'level': os.getenv('DJANGO_LOG_LEVEL', 'DEBUG'),
},
},
}
| true
| true
|
1c46cb1e70f734ca59a3951c76d89d67602e1d81
| 2,141
|
py
|
Python
|
main.py
|
mzas/j2v
|
adf63ddd62a356faf845cf7fcb01dbdc81bf163e
|
[
"Apache-2.0"
] | null | null | null |
main.py
|
mzas/j2v
|
adf63ddd62a356faf845cf7fcb01dbdc81bf163e
|
[
"Apache-2.0"
] | null | null | null |
main.py
|
mzas/j2v
|
adf63ddd62a356faf845cf7fcb01dbdc81bf163e
|
[
"Apache-2.0"
] | null | null | null |
from j2v.generation.processor import MainProcessor
from j2v.utils.config import generator_config
import argparse
import datetime
import time
from j2v.utils.helpers import is_truthy
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--json_files", nargs=argparse.ONE_OR_MORE, type=str, default=[], )
parser.add_argument("--output_view", nargs=argparse.OPTIONAL, type=str,
default=generator_config['OUTPUT_VIEW_ML_OUT_DEFAULT'], )
parser.add_argument("--output_explore", nargs=argparse.OPTIONAL, type=str,
default=generator_config['EXPLORE_LKML_OUT_DEFAULT'], )
parser.add_argument("--column_name", nargs=argparse.OPTIONAL, type=str,
default=generator_config['COLUMN_WITH_JSONS_DEFAULT'], )
parser.add_argument("--sql_table_name", nargs=argparse.OPTIONAL, type=str,
default=generator_config['TABLE_WITH_JSON_COLUMN_DEFAULT'], )
parser.add_argument("--table_alias", nargs=argparse.OPTIONAL, type=str,
default=generator_config['TABLE_ALIAS_DEFAULT'], )
parser.add_argument("--handle_null_values_in_sql", nargs=argparse.OPTIONAL, type=str,
default=generator_config['HANDLE_NULL_VALUES_IN_SQL_DEFAULT'], )
parser.add_argument("--primary_key", nargs=argparse.OPTIONAL, type=str,)
args = parser.parse_args()
p = MainProcessor(column_name=args.column_name, output_explore_file_name=args.output_explore,
output_view_file_name=args.output_view, sql_table_name=args.sql_table_name,
table_alias=args.table_alias, handle_null_values_in_sql=is_truthy(args.handle_null_values_in_sql),
primary_key=args.primary_key)
start_time = time.process_time()
print("{date} Running the generator.\n\n".format(date=datetime.datetime.now()))
p.process_json_files(args.json_files)
end_time = time.process_time()
print("\n\n{date} Finished.".format(date=datetime.datetime.now()))
print("Took {duration:10.1f} ms".format(duration=(end_time - start_time) * 1000))
| 61.171429
| 120
| 0.708547
|
from j2v.generation.processor import MainProcessor
from j2v.utils.config import generator_config
import argparse
import datetime
import time
from j2v.utils.helpers import is_truthy
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--json_files", nargs=argparse.ONE_OR_MORE, type=str, default=[], )
parser.add_argument("--output_view", nargs=argparse.OPTIONAL, type=str,
default=generator_config['OUTPUT_VIEW_ML_OUT_DEFAULT'], )
parser.add_argument("--output_explore", nargs=argparse.OPTIONAL, type=str,
default=generator_config['EXPLORE_LKML_OUT_DEFAULT'], )
parser.add_argument("--column_name", nargs=argparse.OPTIONAL, type=str,
default=generator_config['COLUMN_WITH_JSONS_DEFAULT'], )
parser.add_argument("--sql_table_name", nargs=argparse.OPTIONAL, type=str,
default=generator_config['TABLE_WITH_JSON_COLUMN_DEFAULT'], )
parser.add_argument("--table_alias", nargs=argparse.OPTIONAL, type=str,
default=generator_config['TABLE_ALIAS_DEFAULT'], )
parser.add_argument("--handle_null_values_in_sql", nargs=argparse.OPTIONAL, type=str,
default=generator_config['HANDLE_NULL_VALUES_IN_SQL_DEFAULT'], )
parser.add_argument("--primary_key", nargs=argparse.OPTIONAL, type=str,)
args = parser.parse_args()
p = MainProcessor(column_name=args.column_name, output_explore_file_name=args.output_explore,
output_view_file_name=args.output_view, sql_table_name=args.sql_table_name,
table_alias=args.table_alias, handle_null_values_in_sql=is_truthy(args.handle_null_values_in_sql),
primary_key=args.primary_key)
start_time = time.process_time()
print("{date} Running the generator.\n\n".format(date=datetime.datetime.now()))
p.process_json_files(args.json_files)
end_time = time.process_time()
print("\n\n{date} Finished.".format(date=datetime.datetime.now()))
print("Took {duration:10.1f} ms".format(duration=(end_time - start_time) * 1000))
| true
| true
|
1c46cd2745257059eee9d1a34c553a3af73b903b
| 799
|
py
|
Python
|
profiles_api/migrations/0003_profilefeeditem.py
|
AbdElRahman24597/profiles-rest-api
|
4fd19af745b015b234f9382276b1ac75aaca7a26
|
[
"MIT"
] | null | null | null |
profiles_api/migrations/0003_profilefeeditem.py
|
AbdElRahman24597/profiles-rest-api
|
4fd19af745b015b234f9382276b1ac75aaca7a26
|
[
"MIT"
] | null | null | null |
profiles_api/migrations/0003_profilefeeditem.py
|
AbdElRahman24597/profiles-rest-api
|
4fd19af745b015b234f9382276b1ac75aaca7a26
|
[
"MIT"
] | null | null | null |
# Generated by Django 2.2 on 2021-04-30 14:11
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('profiles_api', '0002_auto_20210428_1320'),
]
operations = [
migrations.CreateModel(
name='ProfileFeedItem',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('status_text', models.CharField(max_length=255)),
('created_on', models.DateTimeField(auto_now_add=True)),
('user_profile', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| 31.96
| 126
| 0.638298
|
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('profiles_api', '0002_auto_20210428_1320'),
]
operations = [
migrations.CreateModel(
name='ProfileFeedItem',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('status_text', models.CharField(max_length=255)),
('created_on', models.DateTimeField(auto_now_add=True)),
('user_profile', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| true
| true
|
1c46cdeac23e702966e16a34fc97f70d095e595d
| 3,325
|
py
|
Python
|
continue.py
|
shivam-kotwalia/KittiSeg
|
598ae9f4f797b850001eea1dbb270e128bb78d7d
|
[
"MIT"
] | 11
|
2017-06-06T21:18:24.000Z
|
2019-11-04T14:58:10.000Z
|
continue.py
|
rgalvaomesquita/KittiSeg
|
ac93c2f0f83bf84f2ba0d645f819b2bbeeeaf58d
|
[
"MIT-0",
"MIT"
] | null | null | null |
continue.py
|
rgalvaomesquita/KittiSeg
|
ac93c2f0f83bf84f2ba0d645f819b2bbeeeaf58d
|
[
"MIT-0",
"MIT"
] | 5
|
2017-04-28T09:08:54.000Z
|
2020-04-10T23:58:48.000Z
|
"""
Trains, evaluates and saves the KittiSeg model.
-------------------------------------------------
The MIT License (MIT)
Copyright (c) 2017 Marvin Teichmann
More details: https://github.com/MarvinTeichmann/KittiSeg/blob/master/LICENSE
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
import commentjson
import logging
import os
import sys
import collections
def dict_merge(dct, merge_dct):
""" Recursive dict merge. Inspired by :meth:``dict.update()``, instead of
updating only top-level keys, dict_merge recurses down into dicts nested
to an arbitrary depth, updating keys. The ``merge_dct`` is merged into
``dct``.
:param dct: dict onto which the merge is executed
:param merge_dct: dct merged into dct
:return: None
"""
for k, v in merge_dct.iteritems():
if (k in dct and isinstance(dct[k], dict) and
isinstance(merge_dct[k], collections.Mapping)):
dict_merge(dct[k], merge_dct[k])
else:
dct[k] = merge_dct[k]
# configure logging
if 'TV_IS_DEV' in os.environ and os.environ['TV_IS_DEV']:
logging.basicConfig(format='%(asctime)s %(levelname)s %(message)s',
level=logging.INFO,
stream=sys.stdout)
else:
logging.basicConfig(format='%(asctime)s %(levelname)s %(message)s',
level=logging.INFO,
stream=sys.stdout)
# https://github.com/tensorflow/tensorflow/issues/2034#issuecomment-220820070
import numpy as np
flags = tf.app.flags
FLAGS = flags.FLAGS
sys.path.insert(1, 'incl')
import tensorvision.train as train
import tensorvision.utils as utils
flags.DEFINE_string('name', None,
'Append a name Tag to run.')
flags.DEFINE_string('project', None,
'Append a name Tag to run.')
flags.DEFINE_string('logdir', None,
'File storing model parameters.')
flags.DEFINE_string('mod', None,
'Modifier for model parameters.')
if 'TV_SAVE' in os.environ and os.environ['TV_SAVE']:
tf.app.flags.DEFINE_boolean(
'save', True, ('Whether to save the run. In case --nosave (default) '
'output will be saved to the folder TV_DIR_RUNS/debug, '
'hence it will get overwritten by further runs.'))
else:
tf.app.flags.DEFINE_boolean(
'save', True, ('Whether to save the run. In case --nosave (default) '
'output will be saved to the folder TV_DIR_RUNS/debug '
'hence it will get overwritten by further runs.'))
def main(_):
utils.set_gpus_to_use()
try:
import tensorvision.train
import tensorflow_fcn.utils
except ImportError:
logging.error("Could not import the submodules.")
logging.error("Please execute:"
"'git submodule update --init --recursive'")
exit(1)
if tf.app.flags.FLAGS.logdir is None:
logging.error("No logdir is given.")
logging.info("Usage: python train.py --logdir dir")
exit(1)
logging.info("Continuing training...")
train.continue_training(tf.app.flags.FLAGS.logdir)
if __name__ == '__main__':
tf.app.run()
| 29.954955
| 79
| 0.628872
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
import commentjson
import logging
import os
import sys
import collections
def dict_merge(dct, merge_dct):
for k, v in merge_dct.iteritems():
if (k in dct and isinstance(dct[k], dict) and
isinstance(merge_dct[k], collections.Mapping)):
dict_merge(dct[k], merge_dct[k])
else:
dct[k] = merge_dct[k]
if 'TV_IS_DEV' in os.environ and os.environ['TV_IS_DEV']:
logging.basicConfig(format='%(asctime)s %(levelname)s %(message)s',
level=logging.INFO,
stream=sys.stdout)
else:
logging.basicConfig(format='%(asctime)s %(levelname)s %(message)s',
level=logging.INFO,
stream=sys.stdout)
ags = tf.app.flags
FLAGS = flags.FLAGS
sys.path.insert(1, 'incl')
import tensorvision.train as train
import tensorvision.utils as utils
flags.DEFINE_string('name', None,
'Append a name Tag to run.')
flags.DEFINE_string('project', None,
'Append a name Tag to run.')
flags.DEFINE_string('logdir', None,
'File storing model parameters.')
flags.DEFINE_string('mod', None,
'Modifier for model parameters.')
if 'TV_SAVE' in os.environ and os.environ['TV_SAVE']:
tf.app.flags.DEFINE_boolean(
'save', True, ('Whether to save the run. In case --nosave (default) '
'output will be saved to the folder TV_DIR_RUNS/debug, '
'hence it will get overwritten by further runs.'))
else:
tf.app.flags.DEFINE_boolean(
'save', True, ('Whether to save the run. In case --nosave (default) '
'output will be saved to the folder TV_DIR_RUNS/debug '
'hence it will get overwritten by further runs.'))
def main(_):
utils.set_gpus_to_use()
try:
import tensorvision.train
import tensorflow_fcn.utils
except ImportError:
logging.error("Could not import the submodules.")
logging.error("Please execute:"
"'git submodule update --init --recursive'")
exit(1)
if tf.app.flags.FLAGS.logdir is None:
logging.error("No logdir is given.")
logging.info("Usage: python train.py --logdir dir")
exit(1)
logging.info("Continuing training...")
train.continue_training(tf.app.flags.FLAGS.logdir)
if __name__ == '__main__':
tf.app.run()
| true
| true
|
1c46cf40bbac327ea35c8b14b39b6f7814418ca2
| 52,022
|
py
|
Python
|
spikeinterface/sortingcomponents/template_matching.py
|
scratchrealm/spikeinterface
|
17cfcd6f0c30c9933c11e560daf750366e12a151
|
[
"MIT"
] | null | null | null |
spikeinterface/sortingcomponents/template_matching.py
|
scratchrealm/spikeinterface
|
17cfcd6f0c30c9933c11e560daf750366e12a151
|
[
"MIT"
] | null | null | null |
spikeinterface/sortingcomponents/template_matching.py
|
scratchrealm/spikeinterface
|
17cfcd6f0c30c9933c11e560daf750366e12a151
|
[
"MIT"
] | null | null | null |
"""Sorting components: template matching."""
import numpy as np
import scipy.spatial
from tqdm import tqdm
import sklearn, scipy
import scipy
from threadpoolctl import threadpool_limits
try:
import numba
from numba import jit, prange
HAVE_NUMBA = True
except ImportError:
HAVE_NUMBA = False
from spikeinterface.core import WaveformExtractor
from spikeinterface.core.job_tools import ChunkRecordingExecutor
from spikeinterface.toolkit import (get_noise_levels, get_template_channel_sparsity,
get_channel_distances, get_chunk_with_margin, get_template_extremum_channel, get_random_data_chunks)
from spikeinterface.sortingcomponents.peak_detection import detect_peak_locally_exclusive, detect_peaks_by_channel
from sklearn.feature_extraction.image import extract_patches_2d, reconstruct_from_patches_2d
from sklearn.linear_model import orthogonal_mp_gram
potrs, = scipy.linalg.get_lapack_funcs(('potrs',), dtype=np.float32)
nrm2, = scipy.linalg.get_blas_funcs(('nrm2', ), dtype=np.float32)
spike_dtype = [('sample_ind', 'int64'), ('channel_ind', 'int64'), ('cluster_ind', 'int64'),
('amplitude', 'float64'), ('segment_ind', 'int64')]
def find_spikes_from_templates(recording, method='naive', method_kwargs={}, extra_outputs=False,
**job_kwargs):
"""Find spike from a recording from given templates.
Parameters
----------
recording: RecordingExtractor
The recording extractor object
waveform_extractor: WaveformExtractor
The waveform extractor
method: str
Which method to use ('naive' | 'tridesclous' | 'circus')
method_kwargs: dict, optional
Keyword arguments for the chosen method
extra_outputs: bool
If True then method_kwargs is also return
job_kwargs: dict
Parameters for ChunkRecordingExecutor
Returns
-------
spikes: ndarray
Spikes found from templates.
method_kwargs:
Optionaly returns for debug purpose.
Notes
-----
Templates are represented as WaveformExtractor so statistics can be extracted.
"""
assert method in template_matching_methods
method_class = template_matching_methods[method]
# initialize
method_kwargs = method_class.initialize_and_check_kwargs(recording, method_kwargs)
# add
method_kwargs['margin'] = method_class.get_margin(recording, method_kwargs)
# serialiaze for worker
method_kwargs_seralized = method_class.serialize_method_kwargs(method_kwargs)
# and run
func = _find_spikes_chunk
init_func = _init_worker_find_spikes
init_args = (recording.to_dict(), method, method_kwargs_seralized)
processor = ChunkRecordingExecutor(recording, func, init_func, init_args,
handle_returns=True, job_name=f'find spikes ({method})', **job_kwargs)
spikes = processor.run()
spikes = np.concatenate(spikes)
if extra_outputs:
return spikes, method_kwargs
else:
return spikes
def _init_worker_find_spikes(recording, method, method_kwargs):
"""Initialize worker for finding spikes."""
if isinstance(recording, dict):
from spikeinterface.core import load_extractor
recording = load_extractor(recording)
method_class = template_matching_methods[method]
method_kwargs = method_class.unserialize_in_worker(method_kwargs)
# create a local dict per worker
worker_ctx = {}
worker_ctx['recording'] = recording
worker_ctx['method'] = method
worker_ctx['method_kwargs'] = method_kwargs
worker_ctx['function'] = method_class.main_function
return worker_ctx
def _find_spikes_chunk(segment_index, start_frame, end_frame, worker_ctx):
"""Find spikes from a chunk of data."""
# recover variables of the worker
recording = worker_ctx['recording']
method = worker_ctx['method']
method_kwargs = worker_ctx['method_kwargs']
margin = method_kwargs['margin']
# load trace in memory given some margin
recording_segment = recording._recording_segments[segment_index]
traces, left_margin, right_margin = get_chunk_with_margin(recording_segment,
start_frame, end_frame, None, margin, add_zeros=True)
function = worker_ctx['function']
with threadpool_limits(limits=1):
spikes = function(traces, method_kwargs)
# remove spikes in margin
if margin > 0:
keep = (spikes['sample_ind'] >= margin) & (spikes['sample_ind'] < (traces.shape[0] - margin))
spikes = spikes[keep]
spikes['sample_ind'] += (start_frame - margin)
spikes['segment_ind'] = segment_index
return spikes
# generic class for template engine
class BaseTemplateMatchingEngine:
default_params = {}
@classmethod
def initialize_and_check_kwargs(cls, recording, kwargs):
"""This function runs before loops"""
# need to be implemented in subclass
raise NotImplementedError
@classmethod
def serialize_method_kwargs(cls, kwargs):
"""This function serializes kwargs to distribute them to workers"""
# need to be implemented in subclass
raise NotImplementedError
@classmethod
def unserialize_in_worker(cls, recording, kwargs):
"""This function unserializes kwargs in workers"""
# need to be implemented in subclass
raise NotImplementedError
@classmethod
def get_margin(cls, recording, kwargs):
# need to be implemented in subclass
raise NotImplementedError
@classmethod
def main_function(cls, traces, method_kwargs):
"""This function returns the number of samples for the chunk margins"""
# need to be implemented in subclass
raise NotImplementedError
##################
# naive matching #
##################
class NaiveMatching(BaseTemplateMatchingEngine):
"""
This is a naive template matching that does not resolve collision
and does not take in account sparsity.
It just minimizes the distance to templates for detected peaks.
It is implemented for benchmarking against this low quality template matching.
And also as an example how to deal with methods_kwargs, margin, intit, func, ...
"""
default_params = {
'waveform_extractor': None,
'peak_sign': 'neg',
'n_shifts': 10,
'detect_threshold': 5,
'noise_levels': None,
'local_radius_um': 100,
'random_chunk_kwargs': {},
}
@classmethod
def initialize_and_check_kwargs(cls, recording, kwargs):
d = cls.default_params.copy()
d.update(kwargs)
assert d['waveform_extractor'] is not None
we = d['waveform_extractor']
if d['noise_levels'] is None:
d['noise_levels'] = get_noise_levels(recording, **d['random_chunk_kwargs'])
d['abs_threholds'] = d['noise_levels'] * d['detect_threshold']
channel_distance = get_channel_distances(recording)
d['neighbours_mask'] = channel_distance < d['local_radius_um']
d['nbefore'] = we.nbefore
d['nafter'] = we.nafter
return d
@classmethod
def get_margin(cls, recording, kwargs):
margin = max(kwargs['nbefore'], kwargs['nafter'])
return margin
@classmethod
def serialize_method_kwargs(cls, kwargs):
kwargs = dict(kwargs)
waveform_extractor = kwargs['waveform_extractor']
kwargs['waveform_extractor'] = str(waveform_extractor.folder)
return kwargs
@classmethod
def unserialize_in_worker(cls, kwargs):
we = kwargs['waveform_extractor']
if isinstance(we, str):
we = WaveformExtractor.load_from_folder(we)
kwargs['waveform_extractor'] = we
templates = we.get_all_templates(mode='average')
kwargs['templates'] = templates
return kwargs
@classmethod
def main_function(cls, traces, method_kwargs):
peak_sign = method_kwargs['peak_sign']
abs_threholds = method_kwargs['abs_threholds']
n_shifts = method_kwargs['n_shifts']
neighbours_mask = method_kwargs['neighbours_mask']
templates = method_kwargs['templates']
nbefore = method_kwargs['nbefore']
nafter = method_kwargs['nafter']
margin = method_kwargs['margin']
if margin > 0:
peak_traces = traces[margin:-margin, :]
else:
peak_traces = traces
peak_sample_ind, peak_chan_ind = detect_peak_locally_exclusive(peak_traces, peak_sign, abs_threholds, n_shifts, neighbours_mask)
peak_sample_ind += margin
spikes = np.zeros(peak_sample_ind.size, dtype=spike_dtype)
spikes['sample_ind'] = peak_sample_ind
spikes['channel_ind'] = peak_chan_ind # TODO need to put the channel from template
# naively take the closest template
for i in range(peak_sample_ind.size):
i0 = peak_sample_ind[i] - nbefore
i1 = peak_sample_ind[i] + nafter
wf = traces[i0:i1, :]
dist = np.sum(np.sum((templates - wf[None, : , :])**2, axis=1), axis=1)
cluster_ind = np.argmin(dist)
spikes['cluster_ind'][i] = cluster_ind
spikes['amplitude'][i] = 0.
return spikes
######################
# tridesclous peeler #
######################
class TridesclousPeeler(BaseTemplateMatchingEngine):
"""
Template-matching ported from Tridesclous sorter.
The idea of this peeler is pretty simple.
1. Find peaks
2. order by best amplitues
3. find nearest template
4. remove it from traces.
5. in the residual find peaks again
This method is quite fast but don't give exelent results to resolve
spike collision when templates have high similarity.
"""
default_params = {
'waveform_extractor': None,
'peak_sign': 'neg',
'peak_shift_ms': 0.2,
'detect_threshold': 5,
'noise_levels': None,
'local_radius_um': 100,
'num_closest' : 5,
'sample_shift': 3,
'ms_before': 0.8,
'ms_after': 1.2,
'num_peeler_loop': 2,
'num_template_try' : 1,
}
@classmethod
def initialize_and_check_kwargs(cls, recording, kwargs):
assert HAVE_NUMBA
d = cls.default_params.copy()
d.update(kwargs)
assert isinstance(d['waveform_extractor'], WaveformExtractor)
we = d['waveform_extractor']
unit_ids = we.sorting.unit_ids
channel_ids = we.recording.channel_ids
sr = we.recording.get_sampling_frequency()
# TODO load as sharedmem
templates = we.get_all_templates(mode='average')
d['templates'] = templates
d['nbefore'] = we.nbefore
d['nafter'] = we.nafter
nbefore_short = int(d['ms_before'] * sr / 1000.)
nafter_short = int(d['ms_before'] * sr / 1000.)
assert nbefore_short <= we.nbefore
assert nafter_short <= we.nafter
d['nbefore_short'] = nbefore_short
d['nafter_short'] = nafter_short
s0 = (we.nbefore - nbefore_short)
s1 = -(we.nafter - nafter_short)
if s1 == 0:
s1 = None
templates_short = templates[:, slice(s0,s1), :].copy()
d['templates_short'] = templates_short
d['peak_shift'] = int(d['peak_shift_ms'] / 1000 * sr)
if d['noise_levels'] is None:
print('TridesclousPeeler : noise should be computed outside')
d['noise_levels'] = get_noise_levels(recording)
d['abs_threholds'] = d['noise_levels'] * d['detect_threshold']
channel_distance = get_channel_distances(recording)
d['neighbours_mask'] = channel_distance < d['local_radius_um']
#
#~ template_sparsity_inds = get_template_channel_sparsity(we, method='radius',
#~ peak_sign=d['peak_sign'], outputs='index', radius_um=d['local_radius_um'])
template_sparsity_inds = get_template_channel_sparsity(we, method='threshold',
peak_sign=d['peak_sign'], outputs='index', threshold=d['detect_threshold'])
template_sparsity = np.zeros((unit_ids.size, channel_ids.size), dtype='bool')
for unit_index, unit_id in enumerate(unit_ids):
chan_inds = template_sparsity_inds[unit_id]
template_sparsity[unit_index, chan_inds] = True
d['template_sparsity'] = template_sparsity
extremum_channel = get_template_extremum_channel(we, peak_sign=d['peak_sign'], outputs='index')
# as numpy vector
extremum_channel = np.array([extremum_channel[unit_id] for unit_id in unit_ids], dtype='int64')
d['extremum_channel'] = extremum_channel
channel_locations = we.recording.get_channel_locations()
# TODO try it with real locaion
unit_locations = channel_locations[extremum_channel]
#~ print(unit_locations)
# distance between units
unit_distances = scipy.spatial.distance.cdist(unit_locations, unit_locations, metric='euclidean')
# seach for closet units and unitary discriminant vector
closest_units = []
for unit_ind, unit_id in enumerate(unit_ids):
order = np.argsort(unit_distances[unit_ind, :])
closest_u = np.arange(unit_ids.size)[order].tolist()
closest_u.remove(unit_ind)
closest_u = np.array(closest_u[:d['num_closest']])
# compute unitary discriminent vector
chans, = np.nonzero(d['template_sparsity'][unit_ind, :])
template_sparse = templates[unit_ind, :, :][:, chans]
closest_vec = []
# against N closets
for u in closest_u:
vec = (templates[u, :, :][:, chans] - template_sparse)
vec /= np.sum(vec ** 2)
closest_vec.append((u, vec))
# against noise
closest_vec.append((None, - template_sparse / np.sum(template_sparse ** 2)))
closest_units.append(closest_vec)
d['closest_units'] = closest_units
# distance channel from unit
distances = scipy.spatial.distance.cdist(channel_locations, unit_locations, metric='euclidean')
near_cluster_mask = distances < d['local_radius_um']
# nearby cluster for each channel
possible_clusters_by_channel = []
for channel_ind in range(distances.shape[0]):
cluster_inds, = np.nonzero(near_cluster_mask[channel_ind, :])
possible_clusters_by_channel.append(cluster_inds)
d['possible_clusters_by_channel'] = possible_clusters_by_channel
d['possible_shifts'] = np.arange(-d['sample_shift'], d['sample_shift'] +1, dtype='int64')
return d
@classmethod
def serialize_method_kwargs(cls, kwargs):
kwargs = dict(kwargs)
# remove waveform_extractor
kwargs.pop('waveform_extractor')
return kwargs
@classmethod
def unserialize_in_worker(cls, kwargs):
return kwargs
@classmethod
def get_margin(cls, recording, kwargs):
margin = 2 * (kwargs['nbefore'] + kwargs['nafter'])
return margin
@classmethod
def main_function(cls, traces, d):
traces = traces.copy()
all_spikes = []
level = 0
while True:
spikes = _tdc_find_spikes(traces, d, level=level)
keep = (spikes['cluster_ind'] >= 0)
if not np.any(keep):
break
all_spikes.append(spikes[keep])
level += 1
if level == d['num_peeler_loop']:
break
if len(all_spikes) > 0:
all_spikes = np.concatenate(all_spikes)
order = np.argsort(all_spikes['sample_ind'])
all_spikes = all_spikes[order]
else:
all_spikes = np.zeros(0, dtype=spike_dtype)
return all_spikes
def _tdc_find_spikes(traces, d, level=0):
peak_sign = d['peak_sign']
templates = d['templates']
templates_short = d['templates_short']
margin = d['margin']
possible_clusters_by_channel = d['possible_clusters_by_channel']
peak_traces = traces[margin // 2:-margin // 2, :]
peak_sample_ind, peak_chan_ind = detect_peak_locally_exclusive(peak_traces, peak_sign,
d['abs_threholds'], d['peak_shift'], d['neighbours_mask'])
peak_sample_ind += margin // 2
peak_amplitude = traces[peak_sample_ind, peak_chan_ind]
order = np.argsort(np.abs(peak_amplitude))[::-1]
peak_sample_ind = peak_sample_ind[order]
peak_chan_ind = peak_chan_ind[order]
spikes = np.zeros(peak_sample_ind.size, dtype=spike_dtype)
spikes['sample_ind'] = peak_sample_ind
spikes['channel_ind'] = peak_chan_ind # TODO need to put the channel from template
possible_shifts = d['possible_shifts']
distances_shift = np.zeros(possible_shifts.size)
for i in range(peak_sample_ind.size):
sample_ind = peak_sample_ind[i]
chan_ind = peak_chan_ind[i]
possible_clusters = possible_clusters_by_channel[chan_ind]
if possible_clusters.size > 0:
#~ s0 = sample_ind - d['nbefore']
#~ s1 = sample_ind + d['nafter']
#~ wf = traces[s0:s1, :]
s0 = sample_ind - d['nbefore_short']
s1 = sample_ind + d['nafter_short']
wf_short = traces[s0:s1, :]
## pure numpy with cluster spasity
# distances = np.sum(np.sum((templates[possible_clusters, :, :] - wf[None, : , :])**2, axis=1), axis=1)
## pure numpy with cluster+channel spasity
# union_channels, = np.nonzero(np.any(d['template_sparsity'][possible_clusters, :], axis=0))
# distances = np.sum(np.sum((templates[possible_clusters][:, :, union_channels] - wf[: , union_channels][None, : :])**2, axis=1), axis=1)
## numba with cluster+channel spasity
union_channels = np.any(d['template_sparsity'][possible_clusters, :], axis=0)
# distances = numba_sparse_dist(wf, templates, union_channels, possible_clusters)
distances = numba_sparse_dist(wf_short, templates_short, union_channels, possible_clusters)
# DEBUG
#~ ind = np.argmin(distances)
#~ cluster_ind = possible_clusters[ind]
for ind in np.argsort(distances)[:d['num_template_try']]:
cluster_ind = possible_clusters[ind]
chan_sparsity = d['template_sparsity'][cluster_ind, :]
template_sparse = templates[cluster_ind, :, :][:, chan_sparsity]
# find best shift
## pure numpy version
# for s, shift in enumerate(possible_shifts):
# wf_shift = traces[s0 + shift: s1 + shift, chan_sparsity]
# distances_shift[s] = np.sum((template_sparse - wf_shift)**2)
# ind_shift = np.argmin(distances_shift)
# shift = possible_shifts[ind_shift]
## numba version
numba_best_shift(traces, templates[cluster_ind, :, :], sample_ind, d['nbefore'], possible_shifts, distances_shift, chan_sparsity)
ind_shift = np.argmin(distances_shift)
shift = possible_shifts[ind_shift]
sample_ind = sample_ind + shift
s0 = sample_ind - d['nbefore']
s1 = sample_ind + d['nafter']
wf_sparse = traces[s0:s1, chan_sparsity]
# accept or not
centered = wf_sparse - template_sparse
accepted = True
for other_ind, other_vector in d['closest_units'][cluster_ind]:
v = np.sum(centered * other_vector)
if np.abs(v) >0.5:
accepted = False
break
if accepted:
#~ if ind != np.argsort(distances)[0]:
#~ print('not first one', np.argsort(distances), ind)
break
if accepted:
amplitude = 1.
# remove template
template = templates[cluster_ind, :, :]
s0 = sample_ind - d['nbefore']
s1 = sample_ind + d['nafter']
traces[s0:s1, :] -= template * amplitude
else:
cluster_ind = -1
amplitude = 0.
else:
cluster_ind = -1
amplitude = 0.
spikes['cluster_ind'][i] = cluster_ind
spikes['amplitude'][i] =amplitude
return spikes
if HAVE_NUMBA:
@jit(nopython=True)
def numba_sparse_dist(wf, templates, union_channels, possible_clusters):
"""
numba implementation that compute distance from template with sparsity
handle by two separate vectors
"""
total_cluster, width, num_chan = templates.shape
num_cluster = possible_clusters.shape[0]
distances = np.zeros((num_cluster,), dtype=np.float32)
for i in prange(num_cluster):
cluster_ind = possible_clusters[i]
sum_dist = 0.
for chan_ind in range(num_chan):
if union_channels[chan_ind]:
for s in range(width):
v = wf[s, chan_ind]
t = templates[cluster_ind, s, chan_ind]
sum_dist += (v - t) ** 2
distances[i] = sum_dist
return distances
@jit(nopython=True)
def numba_best_shift(traces, template, sample_ind, nbefore, possible_shifts, distances_shift, chan_sparsity):
"""
numba implementation to compute several sample shift before template substraction
"""
width, num_chan = template.shape
n_shift = possible_shifts.size
for i in range(n_shift):
shift = possible_shifts[i]
sum_dist = 0.
for chan_ind in range(num_chan):
if chan_sparsity[chan_ind]:
for s in range(width):
v = traces[sample_ind - nbefore + s +shift, chan_ind]
t = template[s, chan_ind]
sum_dist += (v - t) ** 2
distances_shift[i] = sum_dist
return distances_shift
#################
# Circus peeler #
#################
# if HAVE_NUMBA:
# @jit(nopython=True)
# def fastconvolution(traces, templates, output):
# nb_time, nb_channels = traces.shape
# nb_templates, nb_samples, nb_channels = templates.shape
# center = nb_samples // 2
# for i in range(center, nb_time - center + 1):
# offset_1 = i - center
# for k in range(nb_templates):
# for jj in range(nb_samples):
# offset_2 = offset_1 + jj
# for j in range(nb_channels):
# output[k, offset_1] += (templates[k, jj, j] * traces[offset_2, j])
# return output
class CircusOMPPeeler(BaseTemplateMatchingEngine):
"""
Orthogonal Matching Pursuit inspired from Spyking Circus sorter
https://elifesciences.org/articles/34518
This is an Orthogonal Template Matching algorithm. For speed and
memory optimization, templates are automatically sparsified if the
density of the matrix falls below a given threshold. Signal is
convolved with the templates, and as long as some scalar products
are higher than a given threshold, we use a Cholesky decomposition
to compute the optimal amplitudes needed to reconstruct the signal.
IMPORTANT NOTE: small chunks are more efficient for such Peeler,
consider using 100ms chunk
Parameters
----------
noise_levels: array
The noise levels, for every channels
random_chunk_kwargs: dict
Parameters for computing noise levels, if not provided (sub optimal)
amplitude: tuple
(Minimal, Maximal) amplitudes allowed for every template
omp_min_sps: float
Stopping criteria of the OMP algorithm, in percentage of the norm
sparsify_threshold: float
Templates are sparsified in order to keep only the channels necessary
to explain a given fraction of the total norm
use_sparse_matrix_threshold: float
If density of the templates is below a given threshold, sparse matrix
are used (memory efficient)
progress_bar_steps: bool
In order to display or not steps from the algorithm
-----
"""
_default_params = {
'sparsify_threshold': 0.99,
'amplitudes' : [0.5, 1.5],
'use_sparse_matrix_threshold' : 0.25,
'noise_levels': None,
'random_chunk_kwargs': {},
'omp_min_sps' : 0.5,
'progess_bar_steps' : False,
}
@classmethod
def _sparsify_template(cls, template, sparsify_threshold, noise_levels):
is_silent = template.std(0) < 0.25*noise_levels
template[:, is_silent] = 0
channel_norms = np.linalg.norm(template, axis=0)**2
total_norm = np.linalg.norm(template)**2
idx = np.argsort(channel_norms)[::-1]
explained_norms = np.cumsum(channel_norms[idx]/total_norm)
channel = np.searchsorted(explained_norms, sparsify_threshold)
active_channels = np.sort(idx[:channel])
template[:, idx[channel:]] = 0
return template, active_channels
@classmethod
def _prepare_templates(cls, d):
waveform_extractor = d['waveform_extractor']
nb_samples = d['nb_samples']
nb_channels = d['nb_channels']
nb_templates = d['nb_templates']
use_sparse_matrix_threshold = d['use_sparse_matrix_threshold']
d['norms'] = np.zeros(nb_templates, dtype=np.float32)
all_units = list(d['waveform_extractor'].sorting.unit_ids)
templates = waveform_extractor.get_all_templates(mode='median').copy()
d['sparsities'] = {}
for count, unit_id in enumerate(all_units):
templates[count], active_channels = cls._sparsify_template(templates[count], d['sparsify_threshold'], d['noise_levels'])
d['sparsities'][count] = active_channels
d['norms'][count] = np.linalg.norm(templates[count])
templates[count] /= d['norms'][count]
templates = templates.reshape(nb_templates, -1)
nnz = np.sum(templates != 0)/(nb_templates * nb_samples * nb_channels)
if nnz <= use_sparse_matrix_threshold:
templates = scipy.sparse.csr_matrix(templates)
print(f'Templates are automatically sparsified (sparsity level is {nnz})')
d['is_dense'] = False
else:
d['is_dense'] = True
d['templates'] = templates
return d
@classmethod
def _prepare_overlaps(cls, d):
templates = d['templates']
nb_samples = d['nb_samples']
nb_channels = d['nb_channels']
nb_templates = d['nb_templates']
is_dense = d['is_dense']
if not is_dense:
dense_templates = templates.toarray()
else:
dense_templates = templates
dense_templates = dense_templates.reshape(nb_templates, nb_samples, nb_channels)
size = 2 * nb_samples - 1
all_delays = list(range(nb_samples))
if d['progess_bar_steps']:
all_delays = tqdm(all_delays, desc='[1] compute overlaps')
overlaps = {}
for delay in all_delays:
source = dense_templates[:, :delay, :].reshape(nb_templates, -1)
target = dense_templates[:, nb_samples-delay:, :].reshape(nb_templates, -1)
if delay > 0:
overlaps[delay] = scipy.sparse.csr_matrix(source.dot(target.T))
else:
overlaps[delay] = scipy.sparse.csr_matrix((nb_templates, nb_templates), dtype=np.float32)
if delay < nb_samples:
overlaps[size - delay-1] = overlaps[delay].T.tocsr()
new_overlaps = []
for i in range(nb_templates):
data = [overlaps[j][i, :].T for j in range(size)]
data = scipy.sparse.hstack(data)
new_overlaps += [data]
d['overlaps'] = new_overlaps
return d
@classmethod
def initialize_and_check_kwargs(cls, recording, kwargs):
d = cls._default_params.copy()
d.update(kwargs)
assert isinstance(d['waveform_extractor'], WaveformExtractor)
for v in ['sparsify_threshold', 'omp_min_sps','use_sparse_matrix_threshold']:
assert (d[v] >= 0) and (d[v] <= 1), f'{v} should be in [0, 1]'
if d['noise_levels'] is None:
print('CircusOMPPeeler : noise should be computed outside')
d['noise_levels'] = get_noise_levels(recording, **d['random_chunk_kwargs'])
d['nb_channels'] = d['waveform_extractor'].recording.get_num_channels()
d['nb_samples'] = d['waveform_extractor'].nsamples
d['nb_templates'] = len(d['waveform_extractor'].sorting.unit_ids)
d['nbefore'] = d['waveform_extractor'].nbefore
d['nafter'] = d['waveform_extractor'].nafter
d = cls._prepare_templates(d)
d = cls._prepare_overlaps(d)
return d
@classmethod
def serialize_method_kwargs(cls, kwargs):
kwargs = dict(kwargs)
# remove waveform_extractor
kwargs.pop('waveform_extractor')
return kwargs
@classmethod
def unserialize_in_worker(cls, kwargs):
return kwargs
@classmethod
def get_margin(cls, recording, kwargs):
margin = 2 * max(kwargs['nbefore'], kwargs['nafter'])
return margin
@classmethod
def main_function(cls, traces, d):
templates = d['templates']
nb_templates = d['nb_templates']
nb_channels = d['nb_channels']
overlaps = d['overlaps']
margin = d['margin']
norms = d['norms']
nbefore = d['nbefore']
nafter = d['nafter']
omp_tol = np.finfo(np.float32).eps
omp_min_sps = d['omp_min_sps']
nb_samples = d['nafter'] + d['nbefore']
neighbor_window = nb_samples - 1
min_amplitude, max_amplitude = d['amplitudes']
sparsities = d['sparsities']
is_dense = d['is_dense']
stop_criteria = omp_min_sps * norms[:, np.newaxis]
nb_peaks = len(traces) - nb_samples + 1
if is_dense:
kernel_filters = templates.reshape(nb_templates, nb_samples, nb_channels)[:, ::-1, :]
scalar_products = scipy.signal.fftconvolve(kernel_filters, traces[np.newaxis, :, :], axes=(0, 1), mode='valid').sum(2)
else:
scalar_products = np.empty((nb_templates, nb_peaks), dtype=np.float32)
for i in range(nb_templates):
kernel_filter = templates[i].toarray().reshape(nb_samples, nb_channels)
kernel_filter = kernel_filter[::-1, sparsities[i]]
convolution = scipy.signal.fftconvolve(kernel_filter, traces[:, sparsities[i]], axes=0, mode='valid')
if len(convolution) > 0:
scalar_products[i] = convolution.sum(1)
else:
scalar_products[i] = 0
peak_chan_ind = np.zeros(nb_peaks)
nb_spikes = 0
spikes = np.empty(scalar_products.size, dtype=spike_dtype)
idx_lookup = np.arange(scalar_products.size).reshape(nb_templates, -1)
M = np.zeros((nb_peaks, nb_peaks), dtype=np.float32)
all_selections = np.empty((2, scalar_products.size), dtype=np.int32)
res_sps = np.zeros(0, dtype=np.float32)
final_amplitudes = np.zeros(scalar_products.shape, dtype=np.float32)
nb_selection = 0
full_sps = scalar_products.copy()
neighbors = {}
cached_overlaps = {}
is_valid = (scalar_products > stop_criteria)
while np.any(is_valid):
best_amplitude_ind = scalar_products[is_valid].argmax()
best_cluster_ind, peak_index = np.unravel_index(idx_lookup[is_valid][best_amplitude_ind], idx_lookup.shape)
all_selections[:, nb_selection] = [best_cluster_ind, peak_index]
nb_selection += 1
selection = all_selections[:, :nb_selection]
res_sps = full_sps[selection[0], selection[1]]
mb_selection = nb_selection - 1
delta_t = selection[1] - peak_index
idx = np.where(np.abs(delta_t) <= neighbor_window)[0]
myline = neighbor_window + delta_t[idx]
if best_cluster_ind not in cached_overlaps.keys():
cached_overlaps[best_cluster_ind] = overlaps[best_cluster_ind].toarray()
M[mb_selection, idx] = cached_overlaps[best_cluster_ind][selection[0, idx], myline]
if nb_selection >= (M.shape[0] - 1):
Z = np.zeros((2*M.shape[0], 2*M.shape[1]), dtype=np.float32)
Z[:nb_selection, :nb_selection] = M[:nb_selection, :nb_selection]
M = Z
if mb_selection > 0:
scipy.linalg.solve_triangular(M[:mb_selection, :mb_selection], M[mb_selection, :mb_selection], trans=0,
lower=1,
overwrite_b=True,
check_finite=False)
v = nrm2(M[mb_selection, :mb_selection]) ** 2
if 1 - v <= omp_tol: # selected atoms are dependent
break
M[mb_selection, mb_selection] = np.sqrt(1 - v)
all_amplitudes, _ = potrs(M[:nb_selection, :nb_selection], res_sps,
lower=True, overwrite_b=False)
all_amplitudes /= norms[selection[0]]
diff_amplitudes = (all_amplitudes - final_amplitudes[selection[0], selection[1]])
modified = np.where(np.abs(diff_amplitudes) > omp_tol)[0]
final_amplitudes[selection[0], selection[1]] = all_amplitudes
for i in modified:
tmp_best, tmp_peak = selection[:, i]
diff_amp = diff_amplitudes[i]*norms[tmp_best]
if not tmp_best in cached_overlaps.keys():
cached_overlaps[tmp_best] = overlaps[tmp_best].toarray()
if not tmp_peak in neighbors.keys():
idx = [max(0, tmp_peak - neighbor_window), min(nb_peaks, tmp_peak + neighbor_window + 1)]
offset = [neighbor_window + idx[0] - tmp_peak, neighbor_window + idx[1] - tmp_peak]
neighbors[tmp_peak] = {'idx' : idx, 'tdx' : offset}
idx = neighbors[tmp_peak]['idx']
tdx = neighbors[tmp_peak]['tdx']
to_add = diff_amp * cached_overlaps[tmp_best][:, tdx[0]:tdx[1]]
scalar_products[:, idx[0]:idx[1]] -= to_add
scalar_products[best_cluster_ind, peak_index] = -np.inf
is_valid = (scalar_products > stop_criteria)
is_valid = (final_amplitudes > min_amplitude)*(final_amplitudes < max_amplitude)
valid_indices = np.where(is_valid)
nb_spikes = len(valid_indices[0])
spikes['sample_ind'][:nb_spikes] = valid_indices[1] + d['nbefore']
spikes['channel_ind'][:nb_spikes] = 0
spikes['cluster_ind'][:nb_spikes] = valid_indices[0]
spikes['amplitude'][:nb_spikes] = final_amplitudes[valid_indices[0], valid_indices[1]]
spikes = spikes[:nb_spikes]
order = np.argsort(spikes['sample_ind'])
spikes = spikes[order]
return spikes
class CircusPeeler(BaseTemplateMatchingEngine):
"""
Greedy Template-matching ported from the Spyking Circus sorter
https://elifesciences.org/articles/34518
This is a Greedy Template Matching algorithm. The idea is to detect
all the peaks (negative, positive or both) above a certain threshold
Then, at every peak (plus or minus some jitter) we look if the signal
can be explained with a scaled template.
The amplitudes allowed, for every templates, are automatically adjusted
in an optimal manner, to enhance the Matthew Correlation Coefficient
between all spikes/templates in the waveformextractor. For speed and
memory optimization, templates are automatically sparsified if the
density of the matrix falls below a given threshold
Parameters
----------
peak_sign: str
Sign of the peak (neg, pos, or both)
n_shifts: int
The number of samples before/after to classify a peak (should be low)
jitter: int
The number of samples considered before/after every peak to search for
matches
detect_threshold: int
The detection threshold
noise_levels: array
The noise levels, for every channels
random_chunk_kwargs: dict
Parameters for computing noise levels, if not provided (sub optimal)
max_amplitude: float
Maximal amplitude allowed for every template
min_amplitude: float
Minimal amplitude allowed for every template
sparsify_threshold: float
Templates are sparsified in order to keep only the channels necessary
to explain a given fraction of the total norm
use_sparse_matrix_threshold: float
If density of the templates is below a given threshold, sparse matrix
are used (memory efficient)
progress_bar_steps: bool
In order to display or not steps from the algorithm
-----
"""
_default_params = {
'peak_sign': 'neg',
'n_shifts': 1,
'jitter' : 1,
'detect_threshold': 5,
'noise_levels': None,
'random_chunk_kwargs': {},
'sparsify_threshold': 0.99,
'max_amplitude' : 1.5,
'min_amplitude' : 0.5,
'use_sparse_matrix_threshold' : 0.25,
'progess_bar_steps' : True,
}
@classmethod
def _sparsify_template(cls, template, sparsify_threshold, noise_levels):
is_silent = template.std(0) < 0.25*noise_levels
template[:, is_silent] = 0
channel_norms = np.linalg.norm(template, axis=0)**2
total_norm = np.linalg.norm(template)**2
idx = np.argsort(channel_norms)[::-1]
explained_norms = np.cumsum(channel_norms[idx]/total_norm)
channel = np.searchsorted(explained_norms, sparsify_threshold)
active_channels = np.sort(idx[:channel])
template[:, idx[channel:]] = 0
return template, active_channels
@classmethod
def _prepare_templates(cls, d):
waveform_extractor = d['waveform_extractor']
nb_samples = d['nb_samples']
nb_channels = d['nb_channels']
nb_templates = d['nb_templates']
max_amplitude = d['max_amplitude']
min_amplitude = d['min_amplitude']
use_sparse_matrix_threshold = d['use_sparse_matrix_threshold']
d['norms'] = np.zeros(nb_templates, dtype=np.float32)
all_units = list(d['waveform_extractor'].sorting.unit_ids)
templates = waveform_extractor.get_all_templates(mode='median').copy()
d['sparsities'] = {}
for count, unit_id in enumerate(all_units):
templates[count], active_channels = cls._sparsify_template(templates[count], d['sparsify_threshold'], d['noise_levels'])
d['sparsities'][count] = active_channels
d['norms'][count] = np.linalg.norm(templates[count])
templates[count] /= d['norms'][count]
templates = templates.reshape(nb_templates, -1)
nnz = np.sum(templates != 0)/(nb_templates * nb_samples * nb_channels)
if nnz <= use_sparse_matrix_threshold:
templates = scipy.sparse.csr_matrix(templates)
print(f'Templates are automatically sparsified (sparsity level is {nnz})')
d['is_dense'] = False
else:
d['is_dense'] = True
d['templates'] = templates
return d
@classmethod
def _prepare_overlaps(cls, d):
templates = d['templates']
nb_samples = d['nb_samples']
nb_channels = d['nb_channels']
nb_templates = d['nb_templates']
is_dense = d['is_dense']
if not is_dense:
dense_templates = templates.toarray()
else:
dense_templates = templates
dense_templates = dense_templates.reshape(nb_templates, nb_samples, nb_channels)
size = 2 * nb_samples - 1
all_delays = list(range(nb_samples))
if d['progess_bar_steps']:
all_delays = tqdm(all_delays, desc='[1] compute overlaps')
overlaps = {}
for delay in all_delays:
source = dense_templates[:, :delay, :].reshape(nb_templates, -1)
target = dense_templates[:, nb_samples-delay:, :].reshape(nb_templates, -1)
if delay > 0:
overlaps[delay] = scipy.sparse.csr_matrix(source.dot(target.T))
else:
overlaps[delay] = scipy.sparse.csr_matrix((nb_templates, nb_templates), dtype=np.float32)
if delay < nb_samples:
overlaps[size - delay-1] = overlaps[delay].T.tocsr()
new_overlaps = []
for i in range(nb_templates):
data = [overlaps[j][i, :].T for j in range(size)]
data = scipy.sparse.hstack(data)
new_overlaps += [data]
d['overlaps'] = new_overlaps
return d
@classmethod
def _mcc_error(cls, bounds, good, bad):
fn = np.sum((good < bounds[0]) | (good > bounds[1]))
fp = np.sum((bounds[0] <= bad) & (bad <= bounds[1]))
tp = np.sum((bounds[0] <= good) & (good <= bounds[1]))
tn = np.sum((bad < bounds[0]) | (bad > bounds[1]))
denom = (tp+fp)*(tp+fn)*(tn+fp)*(tn+fn)
if denom > 0:
mcc = 1 - (tp*tn - fp*fn)/np.sqrt(denom)
else:
mcc = 1
return mcc
@classmethod
def _cost_function_mcc(cls, bounds, good, bad, delta_amplitude, alpha):
# We want a minimal error, with the larger bounds that are possible
cost = alpha*cls._mcc_error(bounds, good, bad) + (1 - alpha)*np.abs((1 - (bounds[1] - bounds[0])/delta_amplitude))
return cost
@classmethod
def _optimize_amplitudes(cls, noise_snippets, d):
waveform_extractor = d['waveform_extractor']
templates = d['templates']
nb_templates = d['nb_templates']
max_amplitude = d['max_amplitude']
min_amplitude = d['min_amplitude']
alpha = 0.5
norms = d['norms']
all_units = list(waveform_extractor.sorting.unit_ids)
if d['progess_bar_steps']:
all_units = tqdm(all_units, desc='[2] compute amplitudes')
d['amplitudes'] = np.zeros((nb_templates, 2), dtype=np.float32)
noise = templates.dot(noise_snippets)/norms[:, np.newaxis]
all_amps = {}
for count, unit_id in enumerate(all_units):
w = waveform_extractor.get_waveforms(unit_id)
snippets = w.reshape(w.shape[0], -1).T
amps = templates.dot(snippets)/norms[:, np.newaxis]
good = amps[count, :].flatten()
sub_amps = amps[np.concatenate((np.arange(count), np.arange(count+1, nb_templates))), :]
bad = sub_amps[sub_amps >= good]
bad = np.concatenate((bad, noise[count]))
cost_kwargs = [good, bad, max_amplitude - min_amplitude, alpha]
cost_bounds = [(min_amplitude, 1), (1, max_amplitude)]
res = scipy.optimize.differential_evolution(cls._cost_function_mcc, bounds=cost_bounds, args=cost_kwargs)
d['amplitudes'][count] = res.x
# import pylab as plt
# plt.hist(good, 100, alpha=0.5)
# plt.hist(bad, 100, alpha=0.5)
# plt.hist(noise[count], 100, alpha=0.5)
# ymin, ymax = plt.ylim()
# plt.plot([res.x[0], res.x[0]], [ymin, ymax], 'k--')
# plt.plot([res.x[1], res.x[1]], [ymin, ymax], 'k--')
# plt.savefig('test_%d.png' %count)
# plt.close()
return d
@classmethod
def initialize_and_check_kwargs(cls, recording, kwargs):
d = cls._default_params.copy()
d.update(kwargs)
assert isinstance(d['waveform_extractor'], WaveformExtractor)
for v in ['sparsify_threshold', 'use_sparse_matrix_threshold']:
assert (d[v] >= 0) and (d[v] <= 1), f'{v} should be in [0, 1]'
d['nb_channels'] = d['waveform_extractor'].recording.get_num_channels()
d['nb_samples'] = d['waveform_extractor'].nsamples
d['nb_templates'] = len(d['waveform_extractor'].sorting.unit_ids)
if d['noise_levels'] is None:
print('CircusPeeler : noise should be computed outside')
d['noise_levels'] = get_noise_levels(recording, **d['random_chunk_kwargs'])
d['abs_threholds'] = d['noise_levels'] * d['detect_threshold']
d = cls._prepare_templates(d)
d = cls._prepare_overlaps(d)
d['nbefore'] = d['waveform_extractor'].nbefore
d['nafter'] = d['waveform_extractor'].nafter
d['patch_sizes'] = (d['waveform_extractor'].nsamples, d['nb_channels'])
d['sym_patch'] = d['nbefore'] == d['nafter']
#d['jitter'] = int(1e-3*d['jitter'] * recording.get_sampling_frequency())
nb_segments = recording.get_num_segments()
if d['waveform_extractor']._params['max_spikes_per_unit'] is None:
nb_snippets = 1000
else:
nb_snippets = 2*d['waveform_extractor']._params['max_spikes_per_unit']
nb_chunks = nb_snippets // nb_segments
noise_snippets = get_random_data_chunks(recording, num_chunks_per_segment=nb_chunks, chunk_size=d['nb_samples'], seed=42)
noise_snippets = noise_snippets.reshape(nb_chunks, d['nb_samples'], d['nb_channels']).reshape(nb_chunks, -1).T
d = cls._optimize_amplitudes(noise_snippets, d)
return d
@classmethod
def serialize_method_kwargs(cls, kwargs):
kwargs = dict(kwargs)
# remove waveform_extractor
kwargs.pop('waveform_extractor')
return kwargs
@classmethod
def unserialize_in_worker(cls, kwargs):
return kwargs
@classmethod
def get_margin(cls, recording, kwargs):
margin = 2 * max(kwargs['nbefore'], kwargs['nafter'])
return margin
@classmethod
def main_function(cls, traces, d):
peak_sign = d['peak_sign']
abs_threholds = d['abs_threholds']
n_shifts = d['n_shifts']
templates = d['templates']
nb_templates = d['nb_templates']
nb_channels = d['nb_channels']
overlaps = d['overlaps']
margin = d['margin']
norms = d['norms']
jitter = d['jitter']
patch_sizes = d['patch_sizes']
nb_samples = d['nafter'] + d['nbefore']
neighbor_window = nb_samples - 1
amplitudes = d['amplitudes']
sym_patch = d['sym_patch']
sparsities = d['sparsities']
is_dense = d['is_dense']
peak_traces = traces[margin // 2:-margin // 2, :]
peak_sample_ind, peak_chan_ind = detect_peaks_by_channel(peak_traces, peak_sign, abs_threholds, n_shifts)
if jitter > 0:
jittered_peaks = peak_sample_ind[:, np.newaxis] + np.arange(-jitter, jitter)
jittered_channels = peak_chan_ind[:, np.newaxis] + np.zeros(2*jitter)
mask = (jittered_peaks > 0) & (jittered_peaks < len(peak_traces))
jittered_peaks = jittered_peaks[mask]
jittered_channels = jittered_channels[mask]
peak_sample_ind, unique_idx = np.unique(jittered_peaks, return_index=True)
peak_chan_ind = jittered_channels[unique_idx]
else:
peak_sample_ind, unique_idx = np.unique(peak_sample_ind, return_index=True)
peak_chan_ind = peak_chan_ind[unique_idx]
nb_peaks = len(peak_sample_ind)
if sym_patch:
snippets = extract_patches_2d(traces, patch_sizes)[peak_sample_ind]
peak_sample_ind += margin // 2
else:
peak_sample_ind += margin // 2
snippet_window = np.arange(-d['nbefore'], d['nafter'])
snippets = traces[peak_sample_ind[:, np.newaxis] + snippet_window]
if nb_peaks > 0:
snippets = snippets.reshape(nb_peaks, -1)
scalar_products = templates.dot(snippets.T)
else:
scalar_products = np.zeros((nb_templates, 0), dtype=np.float32)
nb_spikes = 0
spikes = np.empty(scalar_products.size, dtype=spike_dtype)
idx_lookup = np.arange(scalar_products.size).reshape(nb_templates, -1)
min_sps = (amplitudes[:, 0] * norms)[:, np.newaxis]
max_sps = (amplitudes[:, 1] * norms)[:, np.newaxis]
is_valid = (scalar_products > min_sps) & (scalar_products < max_sps)
cached_overlaps = {}
while np.any(is_valid):
best_amplitude_ind = scalar_products[is_valid].argmax()
best_cluster_ind, peak_index = np.unravel_index(idx_lookup[is_valid][best_amplitude_ind], idx_lookup.shape)
best_amplitude = scalar_products[best_cluster_ind, peak_index]
best_peak_sample_ind = peak_sample_ind[peak_index]
best_peak_chan_ind = peak_chan_ind[peak_index]
peak_data = peak_sample_ind - peak_sample_ind[peak_index]
is_valid = np.searchsorted(peak_data, [-neighbor_window, neighbor_window + 1])
idx_neighbor = peak_data[is_valid[0]:is_valid[1]] + neighbor_window
if not best_cluster_ind in cached_overlaps.keys():
cached_overlaps[best_cluster_ind] = overlaps[best_cluster_ind].toarray()
to_add = -best_amplitude * cached_overlaps[best_cluster_ind][:, idx_neighbor]
scalar_products[:, is_valid[0]:is_valid[1]] += to_add
scalar_products[best_cluster_ind, is_valid[0]:is_valid[1]] = -np.inf
spikes['sample_ind'][nb_spikes] = best_peak_sample_ind
spikes['channel_ind'][nb_spikes] = best_peak_chan_ind
spikes['cluster_ind'][nb_spikes] = best_cluster_ind
spikes['amplitude'][nb_spikes] = best_amplitude
nb_spikes += 1
is_valid = (scalar_products > min_sps) & (scalar_products < max_sps)
spikes['amplitude'][:nb_spikes] /= norms[spikes['cluster_ind'][:nb_spikes]]
spikes = spikes[:nb_spikes]
order = np.argsort(spikes['sample_ind'])
spikes = spikes[order]
return spikes
template_matching_methods = {
'naive' : NaiveMatching,
'tridesclous' : TridesclousPeeler,
'circus' : CircusPeeler,
'circus-omp' : CircusOMPPeeler
}
| 36.532303
| 153
| 0.606647
|
import numpy as np
import scipy.spatial
from tqdm import tqdm
import sklearn, scipy
import scipy
from threadpoolctl import threadpool_limits
try:
import numba
from numba import jit, prange
HAVE_NUMBA = True
except ImportError:
HAVE_NUMBA = False
from spikeinterface.core import WaveformExtractor
from spikeinterface.core.job_tools import ChunkRecordingExecutor
from spikeinterface.toolkit import (get_noise_levels, get_template_channel_sparsity,
get_channel_distances, get_chunk_with_margin, get_template_extremum_channel, get_random_data_chunks)
from spikeinterface.sortingcomponents.peak_detection import detect_peak_locally_exclusive, detect_peaks_by_channel
from sklearn.feature_extraction.image import extract_patches_2d, reconstruct_from_patches_2d
from sklearn.linear_model import orthogonal_mp_gram
potrs, = scipy.linalg.get_lapack_funcs(('potrs',), dtype=np.float32)
nrm2, = scipy.linalg.get_blas_funcs(('nrm2', ), dtype=np.float32)
spike_dtype = [('sample_ind', 'int64'), ('channel_ind', 'int64'), ('cluster_ind', 'int64'),
('amplitude', 'float64'), ('segment_ind', 'int64')]
def find_spikes_from_templates(recording, method='naive', method_kwargs={}, extra_outputs=False,
**job_kwargs):
assert method in template_matching_methods
method_class = template_matching_methods[method]
method_kwargs = method_class.initialize_and_check_kwargs(recording, method_kwargs)
method_kwargs['margin'] = method_class.get_margin(recording, method_kwargs)
method_kwargs_seralized = method_class.serialize_method_kwargs(method_kwargs)
func = _find_spikes_chunk
init_func = _init_worker_find_spikes
init_args = (recording.to_dict(), method, method_kwargs_seralized)
processor = ChunkRecordingExecutor(recording, func, init_func, init_args,
handle_returns=True, job_name=f'find spikes ({method})', **job_kwargs)
spikes = processor.run()
spikes = np.concatenate(spikes)
if extra_outputs:
return spikes, method_kwargs
else:
return spikes
def _init_worker_find_spikes(recording, method, method_kwargs):
if isinstance(recording, dict):
from spikeinterface.core import load_extractor
recording = load_extractor(recording)
method_class = template_matching_methods[method]
method_kwargs = method_class.unserialize_in_worker(method_kwargs)
worker_ctx = {}
worker_ctx['recording'] = recording
worker_ctx['method'] = method
worker_ctx['method_kwargs'] = method_kwargs
worker_ctx['function'] = method_class.main_function
return worker_ctx
def _find_spikes_chunk(segment_index, start_frame, end_frame, worker_ctx):
recording = worker_ctx['recording']
method = worker_ctx['method']
method_kwargs = worker_ctx['method_kwargs']
margin = method_kwargs['margin']
recording_segment = recording._recording_segments[segment_index]
traces, left_margin, right_margin = get_chunk_with_margin(recording_segment,
start_frame, end_frame, None, margin, add_zeros=True)
function = worker_ctx['function']
with threadpool_limits(limits=1):
spikes = function(traces, method_kwargs)
if margin > 0:
keep = (spikes['sample_ind'] >= margin) & (spikes['sample_ind'] < (traces.shape[0] - margin))
spikes = spikes[keep]
spikes['sample_ind'] += (start_frame - margin)
spikes['segment_ind'] = segment_index
return spikes
class BaseTemplateMatchingEngine:
default_params = {}
@classmethod
def initialize_and_check_kwargs(cls, recording, kwargs):
raise NotImplementedError
@classmethod
def serialize_method_kwargs(cls, kwargs):
raise NotImplementedError
@classmethod
def unserialize_in_worker(cls, recording, kwargs):
raise NotImplementedError
@classmethod
def get_margin(cls, recording, kwargs):
raise NotImplementedError
@classmethod
def main_function(cls, traces, method_kwargs):
raise NotImplementedError
@classmethod
def initialize_and_check_kwargs(cls, recording, kwargs):
d = cls.default_params.copy()
d.update(kwargs)
assert d['waveform_extractor'] is not None
we = d['waveform_extractor']
if d['noise_levels'] is None:
d['noise_levels'] = get_noise_levels(recording, **d['random_chunk_kwargs'])
d['abs_threholds'] = d['noise_levels'] * d['detect_threshold']
channel_distance = get_channel_distances(recording)
d['neighbours_mask'] = channel_distance < d['local_radius_um']
d['nbefore'] = we.nbefore
d['nafter'] = we.nafter
return d
@classmethod
def get_margin(cls, recording, kwargs):
margin = max(kwargs['nbefore'], kwargs['nafter'])
return margin
@classmethod
def serialize_method_kwargs(cls, kwargs):
kwargs = dict(kwargs)
waveform_extractor = kwargs['waveform_extractor']
kwargs['waveform_extractor'] = str(waveform_extractor.folder)
return kwargs
@classmethod
def unserialize_in_worker(cls, kwargs):
we = kwargs['waveform_extractor']
if isinstance(we, str):
we = WaveformExtractor.load_from_folder(we)
kwargs['waveform_extractor'] = we
templates = we.get_all_templates(mode='average')
kwargs['templates'] = templates
return kwargs
@classmethod
def main_function(cls, traces, method_kwargs):
peak_sign = method_kwargs['peak_sign']
abs_threholds = method_kwargs['abs_threholds']
n_shifts = method_kwargs['n_shifts']
neighbours_mask = method_kwargs['neighbours_mask']
templates = method_kwargs['templates']
nbefore = method_kwargs['nbefore']
nafter = method_kwargs['nafter']
margin = method_kwargs['margin']
if margin > 0:
peak_traces = traces[margin:-margin, :]
else:
peak_traces = traces
peak_sample_ind, peak_chan_ind = detect_peak_locally_exclusive(peak_traces, peak_sign, abs_threholds, n_shifts, neighbours_mask)
peak_sample_ind += margin
spikes = np.zeros(peak_sample_ind.size, dtype=spike_dtype)
spikes['sample_ind'] = peak_sample_ind
spikes['channel_ind'] = peak_chan_ind
for i in range(peak_sample_ind.size):
i0 = peak_sample_ind[i] - nbefore
i1 = peak_sample_ind[i] + nafter
wf = traces[i0:i1, :]
dist = np.sum(np.sum((templates - wf[None, : , :])**2, axis=1), axis=1)
cluster_ind = np.argmin(dist)
spikes['cluster_ind'][i] = cluster_ind
spikes['amplitude'][i] = 0.
return spikes
d
def initialize_and_check_kwargs(cls, recording, kwargs):
assert HAVE_NUMBA
d = cls.default_params.copy()
d.update(kwargs)
assert isinstance(d['waveform_extractor'], WaveformExtractor)
we = d['waveform_extractor']
unit_ids = we.sorting.unit_ids
channel_ids = we.recording.channel_ids
sr = we.recording.get_sampling_frequency()
templates = we.get_all_templates(mode='average')
d['templates'] = templates
d['nbefore'] = we.nbefore
d['nafter'] = we.nafter
nbefore_short = int(d['ms_before'] * sr / 1000.)
nafter_short = int(d['ms_before'] * sr / 1000.)
assert nbefore_short <= we.nbefore
assert nafter_short <= we.nafter
d['nbefore_short'] = nbefore_short
d['nafter_short'] = nafter_short
s0 = (we.nbefore - nbefore_short)
s1 = -(we.nafter - nafter_short)
if s1 == 0:
s1 = None
templates_short = templates[:, slice(s0,s1), :].copy()
d['templates_short'] = templates_short
d['peak_shift'] = int(d['peak_shift_ms'] / 1000 * sr)
if d['noise_levels'] is None:
print('TridesclousPeeler : noise should be computed outside')
d['noise_levels'] = get_noise_levels(recording)
d['abs_threholds'] = d['noise_levels'] * d['detect_threshold']
channel_distance = get_channel_distances(recording)
d['neighbours_mask'] = channel_distance < d['local_radius_um']
template_sparsity_inds = get_template_channel_sparsity(we, method='threshold',
peak_sign=d['peak_sign'], outputs='index', threshold=d['detect_threshold'])
template_sparsity = np.zeros((unit_ids.size, channel_ids.size), dtype='bool')
for unit_index, unit_id in enumerate(unit_ids):
chan_inds = template_sparsity_inds[unit_id]
template_sparsity[unit_index, chan_inds] = True
d['template_sparsity'] = template_sparsity
extremum_channel = get_template_extremum_channel(we, peak_sign=d['peak_sign'], outputs='index')
extremum_channel = np.array([extremum_channel[unit_id] for unit_id in unit_ids], dtype='int64')
d['extremum_channel'] = extremum_channel
channel_locations = we.recording.get_channel_locations()
unit_locations = channel_locations[extremum_channel]
unit_distances = scipy.spatial.distance.cdist(unit_locations, unit_locations, metric='euclidean')
closest_units = []
for unit_ind, unit_id in enumerate(unit_ids):
order = np.argsort(unit_distances[unit_ind, :])
closest_u = np.arange(unit_ids.size)[order].tolist()
closest_u.remove(unit_ind)
closest_u = np.array(closest_u[:d['num_closest']])
chans, = np.nonzero(d['template_sparsity'][unit_ind, :])
template_sparse = templates[unit_ind, :, :][:, chans]
closest_vec = []
for u in closest_u:
vec = (templates[u, :, :][:, chans] - template_sparse)
vec /= np.sum(vec ** 2)
closest_vec.append((u, vec))
closest_vec.append((None, - template_sparse / np.sum(template_sparse ** 2)))
closest_units.append(closest_vec)
d['closest_units'] = closest_units
distances = scipy.spatial.distance.cdist(channel_locations, unit_locations, metric='euclidean')
near_cluster_mask = distances < d['local_radius_um']
possible_clusters_by_channel = []
for channel_ind in range(distances.shape[0]):
cluster_inds, = np.nonzero(near_cluster_mask[channel_ind, :])
possible_clusters_by_channel.append(cluster_inds)
d['possible_clusters_by_channel'] = possible_clusters_by_channel
d['possible_shifts'] = np.arange(-d['sample_shift'], d['sample_shift'] +1, dtype='int64')
return d
@classmethod
def serialize_method_kwargs(cls, kwargs):
kwargs = dict(kwargs)
kwargs.pop('waveform_extractor')
return kwargs
@classmethod
def unserialize_in_worker(cls, kwargs):
return kwargs
@classmethod
def get_margin(cls, recording, kwargs):
margin = 2 * (kwargs['nbefore'] + kwargs['nafter'])
return margin
@classmethod
def main_function(cls, traces, d):
traces = traces.copy()
all_spikes = []
level = 0
while True:
spikes = _tdc_find_spikes(traces, d, level=level)
keep = (spikes['cluster_ind'] >= 0)
if not np.any(keep):
break
all_spikes.append(spikes[keep])
level += 1
if level == d['num_peeler_loop']:
break
if len(all_spikes) > 0:
all_spikes = np.concatenate(all_spikes)
order = np.argsort(all_spikes['sample_ind'])
all_spikes = all_spikes[order]
else:
all_spikes = np.zeros(0, dtype=spike_dtype)
return all_spikes
def _tdc_find_spikes(traces, d, level=0):
peak_sign = d['peak_sign']
templates = d['templates']
templates_short = d['templates_short']
margin = d['margin']
possible_clusters_by_channel = d['possible_clusters_by_channel']
peak_traces = traces[margin // 2:-margin // 2, :]
peak_sample_ind, peak_chan_ind = detect_peak_locally_exclusive(peak_traces, peak_sign,
d['abs_threholds'], d['peak_shift'], d['neighbours_mask'])
peak_sample_ind += margin // 2
peak_amplitude = traces[peak_sample_ind, peak_chan_ind]
order = np.argsort(np.abs(peak_amplitude))[::-1]
peak_sample_ind = peak_sample_ind[order]
peak_chan_ind = peak_chan_ind[order]
spikes = np.zeros(peak_sample_ind.size, dtype=spike_dtype)
spikes['sample_ind'] = peak_sample_ind
spikes['channel_ind'] = peak_chan_ind
possible_shifts = d['possible_shifts']
distances_shift = np.zeros(possible_shifts.size)
for i in range(peak_sample_ind.size):
sample_ind = peak_sample_ind[i]
chan_ind = peak_chan_ind[i]
possible_clusters = possible_clusters_by_channel[chan_ind]
if possible_clusters.size > 0:
s0 = sample_ind - d['nbefore_short']
s1 = sample_ind + d['nafter_short']
wf_short = traces[s0:s1, :]
.any(d['template_sparsity'][possible_clusters, :], axis=0)
distances = numba_sparse_dist(wf_short, templates_short, union_channels, possible_clusters)
for ind in np.argsort(distances)[:d['num_template_try']]:
cluster_ind = possible_clusters[ind]
chan_sparsity = d['template_sparsity'][cluster_ind, :]
template_sparse = templates[cluster_ind, :, :][:, chan_sparsity]
numba_best_shift(traces, templates[cluster_ind, :, :], sample_ind, d['nbefore'], possible_shifts, distances_shift, chan_sparsity)
ind_shift = np.argmin(distances_shift)
shift = possible_shifts[ind_shift]
sample_ind = sample_ind + shift
s0 = sample_ind - d['nbefore']
s1 = sample_ind + d['nafter']
wf_sparse = traces[s0:s1, chan_sparsity]
centered = wf_sparse - template_sparse
accepted = True
for other_ind, other_vector in d['closest_units'][cluster_ind]:
v = np.sum(centered * other_vector)
if np.abs(v) >0.5:
accepted = False
break
if accepted:
break
if accepted:
amplitude = 1.
template = templates[cluster_ind, :, :]
s0 = sample_ind - d['nbefore']
s1 = sample_ind + d['nafter']
traces[s0:s1, :] -= template * amplitude
else:
cluster_ind = -1
amplitude = 0.
else:
cluster_ind = -1
amplitude = 0.
spikes['cluster_ind'][i] = cluster_ind
spikes['amplitude'][i] =amplitude
return spikes
if HAVE_NUMBA:
@jit(nopython=True)
def numba_sparse_dist(wf, templates, union_channels, possible_clusters):
total_cluster, width, num_chan = templates.shape
num_cluster = possible_clusters.shape[0]
distances = np.zeros((num_cluster,), dtype=np.float32)
for i in prange(num_cluster):
cluster_ind = possible_clusters[i]
sum_dist = 0.
for chan_ind in range(num_chan):
if union_channels[chan_ind]:
for s in range(width):
v = wf[s, chan_ind]
t = templates[cluster_ind, s, chan_ind]
sum_dist += (v - t) ** 2
distances[i] = sum_dist
return distances
@jit(nopython=True)
def numba_best_shift(traces, template, sample_ind, nbefore, possible_shifts, distances_shift, chan_sparsity):
width, num_chan = template.shape
n_shift = possible_shifts.size
for i in range(n_shift):
shift = possible_shifts[i]
sum_dist = 0.
for chan_ind in range(num_chan):
if chan_sparsity[chan_ind]:
for s in range(width):
v = traces[sample_ind - nbefore + s +shift, chan_ind]
t = template[s, chan_ind]
sum_dist += (v - t) ** 2
distances_shift[i] = sum_dist
return distances_shift
': {},
'omp_min_sps' : 0.5,
'progess_bar_steps' : False,
}
@classmethod
def _sparsify_template(cls, template, sparsify_threshold, noise_levels):
is_silent = template.std(0) < 0.25*noise_levels
template[:, is_silent] = 0
channel_norms = np.linalg.norm(template, axis=0)**2
total_norm = np.linalg.norm(template)**2
idx = np.argsort(channel_norms)[::-1]
explained_norms = np.cumsum(channel_norms[idx]/total_norm)
channel = np.searchsorted(explained_norms, sparsify_threshold)
active_channels = np.sort(idx[:channel])
template[:, idx[channel:]] = 0
return template, active_channels
@classmethod
def _prepare_templates(cls, d):
waveform_extractor = d['waveform_extractor']
nb_samples = d['nb_samples']
nb_channels = d['nb_channels']
nb_templates = d['nb_templates']
use_sparse_matrix_threshold = d['use_sparse_matrix_threshold']
d['norms'] = np.zeros(nb_templates, dtype=np.float32)
all_units = list(d['waveform_extractor'].sorting.unit_ids)
templates = waveform_extractor.get_all_templates(mode='median').copy()
d['sparsities'] = {}
for count, unit_id in enumerate(all_units):
templates[count], active_channels = cls._sparsify_template(templates[count], d['sparsify_threshold'], d['noise_levels'])
d['sparsities'][count] = active_channels
d['norms'][count] = np.linalg.norm(templates[count])
templates[count] /= d['norms'][count]
templates = templates.reshape(nb_templates, -1)
nnz = np.sum(templates != 0)/(nb_templates * nb_samples * nb_channels)
if nnz <= use_sparse_matrix_threshold:
templates = scipy.sparse.csr_matrix(templates)
print(f'Templates are automatically sparsified (sparsity level is {nnz})')
d['is_dense'] = False
else:
d['is_dense'] = True
d['templates'] = templates
return d
@classmethod
def _prepare_overlaps(cls, d):
templates = d['templates']
nb_samples = d['nb_samples']
nb_channels = d['nb_channels']
nb_templates = d['nb_templates']
is_dense = d['is_dense']
if not is_dense:
dense_templates = templates.toarray()
else:
dense_templates = templates
dense_templates = dense_templates.reshape(nb_templates, nb_samples, nb_channels)
size = 2 * nb_samples - 1
all_delays = list(range(nb_samples))
if d['progess_bar_steps']:
all_delays = tqdm(all_delays, desc='[1] compute overlaps')
overlaps = {}
for delay in all_delays:
source = dense_templates[:, :delay, :].reshape(nb_templates, -1)
target = dense_templates[:, nb_samples-delay:, :].reshape(nb_templates, -1)
if delay > 0:
overlaps[delay] = scipy.sparse.csr_matrix(source.dot(target.T))
else:
overlaps[delay] = scipy.sparse.csr_matrix((nb_templates, nb_templates), dtype=np.float32)
if delay < nb_samples:
overlaps[size - delay-1] = overlaps[delay].T.tocsr()
new_overlaps = []
for i in range(nb_templates):
data = [overlaps[j][i, :].T for j in range(size)]
data = scipy.sparse.hstack(data)
new_overlaps += [data]
d['overlaps'] = new_overlaps
return d
@classmethod
def initialize_and_check_kwargs(cls, recording, kwargs):
d = cls._default_params.copy()
d.update(kwargs)
assert isinstance(d['waveform_extractor'], WaveformExtractor)
for v in ['sparsify_threshold', 'omp_min_sps','use_sparse_matrix_threshold']:
assert (d[v] >= 0) and (d[v] <= 1), f'{v} should be in [0, 1]'
if d['noise_levels'] is None:
print('CircusOMPPeeler : noise should be computed outside')
d['noise_levels'] = get_noise_levels(recording, **d['random_chunk_kwargs'])
d['nb_channels'] = d['waveform_extractor'].recording.get_num_channels()
d['nb_samples'] = d['waveform_extractor'].nsamples
d['nb_templates'] = len(d['waveform_extractor'].sorting.unit_ids)
d['nbefore'] = d['waveform_extractor'].nbefore
d['nafter'] = d['waveform_extractor'].nafter
d = cls._prepare_templates(d)
d = cls._prepare_overlaps(d)
return d
@classmethod
def serialize_method_kwargs(cls, kwargs):
kwargs = dict(kwargs)
kwargs.pop('waveform_extractor')
return kwargs
@classmethod
def unserialize_in_worker(cls, kwargs):
return kwargs
@classmethod
def get_margin(cls, recording, kwargs):
margin = 2 * max(kwargs['nbefore'], kwargs['nafter'])
return margin
@classmethod
def main_function(cls, traces, d):
templates = d['templates']
nb_templates = d['nb_templates']
nb_channels = d['nb_channels']
overlaps = d['overlaps']
margin = d['margin']
norms = d['norms']
nbefore = d['nbefore']
nafter = d['nafter']
omp_tol = np.finfo(np.float32).eps
omp_min_sps = d['omp_min_sps']
nb_samples = d['nafter'] + d['nbefore']
neighbor_window = nb_samples - 1
min_amplitude, max_amplitude = d['amplitudes']
sparsities = d['sparsities']
is_dense = d['is_dense']
stop_criteria = omp_min_sps * norms[:, np.newaxis]
nb_peaks = len(traces) - nb_samples + 1
if is_dense:
kernel_filters = templates.reshape(nb_templates, nb_samples, nb_channels)[:, ::-1, :]
scalar_products = scipy.signal.fftconvolve(kernel_filters, traces[np.newaxis, :, :], axes=(0, 1), mode='valid').sum(2)
else:
scalar_products = np.empty((nb_templates, nb_peaks), dtype=np.float32)
for i in range(nb_templates):
kernel_filter = templates[i].toarray().reshape(nb_samples, nb_channels)
kernel_filter = kernel_filter[::-1, sparsities[i]]
convolution = scipy.signal.fftconvolve(kernel_filter, traces[:, sparsities[i]], axes=0, mode='valid')
if len(convolution) > 0:
scalar_products[i] = convolution.sum(1)
else:
scalar_products[i] = 0
peak_chan_ind = np.zeros(nb_peaks)
nb_spikes = 0
spikes = np.empty(scalar_products.size, dtype=spike_dtype)
idx_lookup = np.arange(scalar_products.size).reshape(nb_templates, -1)
M = np.zeros((nb_peaks, nb_peaks), dtype=np.float32)
all_selections = np.empty((2, scalar_products.size), dtype=np.int32)
res_sps = np.zeros(0, dtype=np.float32)
final_amplitudes = np.zeros(scalar_products.shape, dtype=np.float32)
nb_selection = 0
full_sps = scalar_products.copy()
neighbors = {}
cached_overlaps = {}
is_valid = (scalar_products > stop_criteria)
while np.any(is_valid):
best_amplitude_ind = scalar_products[is_valid].argmax()
best_cluster_ind, peak_index = np.unravel_index(idx_lookup[is_valid][best_amplitude_ind], idx_lookup.shape)
all_selections[:, nb_selection] = [best_cluster_ind, peak_index]
nb_selection += 1
selection = all_selections[:, :nb_selection]
res_sps = full_sps[selection[0], selection[1]]
mb_selection = nb_selection - 1
delta_t = selection[1] - peak_index
idx = np.where(np.abs(delta_t) <= neighbor_window)[0]
myline = neighbor_window + delta_t[idx]
if best_cluster_ind not in cached_overlaps.keys():
cached_overlaps[best_cluster_ind] = overlaps[best_cluster_ind].toarray()
M[mb_selection, idx] = cached_overlaps[best_cluster_ind][selection[0, idx], myline]
if nb_selection >= (M.shape[0] - 1):
Z = np.zeros((2*M.shape[0], 2*M.shape[1]), dtype=np.float32)
Z[:nb_selection, :nb_selection] = M[:nb_selection, :nb_selection]
M = Z
if mb_selection > 0:
scipy.linalg.solve_triangular(M[:mb_selection, :mb_selection], M[mb_selection, :mb_selection], trans=0,
lower=1,
overwrite_b=True,
check_finite=False)
v = nrm2(M[mb_selection, :mb_selection]) ** 2
if 1 - v <= omp_tol:
break
M[mb_selection, mb_selection] = np.sqrt(1 - v)
all_amplitudes, _ = potrs(M[:nb_selection, :nb_selection], res_sps,
lower=True, overwrite_b=False)
all_amplitudes /= norms[selection[0]]
diff_amplitudes = (all_amplitudes - final_amplitudes[selection[0], selection[1]])
modified = np.where(np.abs(diff_amplitudes) > omp_tol)[0]
final_amplitudes[selection[0], selection[1]] = all_amplitudes
for i in modified:
tmp_best, tmp_peak = selection[:, i]
diff_amp = diff_amplitudes[i]*norms[tmp_best]
if not tmp_best in cached_overlaps.keys():
cached_overlaps[tmp_best] = overlaps[tmp_best].toarray()
if not tmp_peak in neighbors.keys():
idx = [max(0, tmp_peak - neighbor_window), min(nb_peaks, tmp_peak + neighbor_window + 1)]
offset = [neighbor_window + idx[0] - tmp_peak, neighbor_window + idx[1] - tmp_peak]
neighbors[tmp_peak] = {'idx' : idx, 'tdx' : offset}
idx = neighbors[tmp_peak]['idx']
tdx = neighbors[tmp_peak]['tdx']
to_add = diff_amp * cached_overlaps[tmp_best][:, tdx[0]:tdx[1]]
scalar_products[:, idx[0]:idx[1]] -= to_add
scalar_products[best_cluster_ind, peak_index] = -np.inf
is_valid = (scalar_products > stop_criteria)
is_valid = (final_amplitudes > min_amplitude)*(final_amplitudes < max_amplitude)
valid_indices = np.where(is_valid)
nb_spikes = len(valid_indices[0])
spikes['sample_ind'][:nb_spikes] = valid_indices[1] + d['nbefore']
spikes['channel_ind'][:nb_spikes] = 0
spikes['cluster_ind'][:nb_spikes] = valid_indices[0]
spikes['amplitude'][:nb_spikes] = final_amplitudes[valid_indices[0], valid_indices[1]]
spikes = spikes[:nb_spikes]
order = np.argsort(spikes['sample_ind'])
spikes = spikes[order]
return spikes
class CircusPeeler(BaseTemplateMatchingEngine):
_default_params = {
'peak_sign': 'neg',
'n_shifts': 1,
'jitter' : 1,
'detect_threshold': 5,
'noise_levels': None,
'random_chunk_kwargs': {},
'sparsify_threshold': 0.99,
'max_amplitude' : 1.5,
'min_amplitude' : 0.5,
'use_sparse_matrix_threshold' : 0.25,
'progess_bar_steps' : True,
}
@classmethod
def _sparsify_template(cls, template, sparsify_threshold, noise_levels):
is_silent = template.std(0) < 0.25*noise_levels
template[:, is_silent] = 0
channel_norms = np.linalg.norm(template, axis=0)**2
total_norm = np.linalg.norm(template)**2
idx = np.argsort(channel_norms)[::-1]
explained_norms = np.cumsum(channel_norms[idx]/total_norm)
channel = np.searchsorted(explained_norms, sparsify_threshold)
active_channels = np.sort(idx[:channel])
template[:, idx[channel:]] = 0
return template, active_channels
@classmethod
def _prepare_templates(cls, d):
waveform_extractor = d['waveform_extractor']
nb_samples = d['nb_samples']
nb_channels = d['nb_channels']
nb_templates = d['nb_templates']
max_amplitude = d['max_amplitude']
min_amplitude = d['min_amplitude']
use_sparse_matrix_threshold = d['use_sparse_matrix_threshold']
d['norms'] = np.zeros(nb_templates, dtype=np.float32)
all_units = list(d['waveform_extractor'].sorting.unit_ids)
templates = waveform_extractor.get_all_templates(mode='median').copy()
d['sparsities'] = {}
for count, unit_id in enumerate(all_units):
templates[count], active_channels = cls._sparsify_template(templates[count], d['sparsify_threshold'], d['noise_levels'])
d['sparsities'][count] = active_channels
d['norms'][count] = np.linalg.norm(templates[count])
templates[count] /= d['norms'][count]
templates = templates.reshape(nb_templates, -1)
nnz = np.sum(templates != 0)/(nb_templates * nb_samples * nb_channels)
if nnz <= use_sparse_matrix_threshold:
templates = scipy.sparse.csr_matrix(templates)
print(f'Templates are automatically sparsified (sparsity level is {nnz})')
d['is_dense'] = False
else:
d['is_dense'] = True
d['templates'] = templates
return d
@classmethod
def _prepare_overlaps(cls, d):
templates = d['templates']
nb_samples = d['nb_samples']
nb_channels = d['nb_channels']
nb_templates = d['nb_templates']
is_dense = d['is_dense']
if not is_dense:
dense_templates = templates.toarray()
else:
dense_templates = templates
dense_templates = dense_templates.reshape(nb_templates, nb_samples, nb_channels)
size = 2 * nb_samples - 1
all_delays = list(range(nb_samples))
if d['progess_bar_steps']:
all_delays = tqdm(all_delays, desc='[1] compute overlaps')
overlaps = {}
for delay in all_delays:
source = dense_templates[:, :delay, :].reshape(nb_templates, -1)
target = dense_templates[:, nb_samples-delay:, :].reshape(nb_templates, -1)
if delay > 0:
overlaps[delay] = scipy.sparse.csr_matrix(source.dot(target.T))
else:
overlaps[delay] = scipy.sparse.csr_matrix((nb_templates, nb_templates), dtype=np.float32)
if delay < nb_samples:
overlaps[size - delay-1] = overlaps[delay].T.tocsr()
new_overlaps = []
for i in range(nb_templates):
data = [overlaps[j][i, :].T for j in range(size)]
data = scipy.sparse.hstack(data)
new_overlaps += [data]
d['overlaps'] = new_overlaps
return d
@classmethod
def _mcc_error(cls, bounds, good, bad):
fn = np.sum((good < bounds[0]) | (good > bounds[1]))
fp = np.sum((bounds[0] <= bad) & (bad <= bounds[1]))
tp = np.sum((bounds[0] <= good) & (good <= bounds[1]))
tn = np.sum((bad < bounds[0]) | (bad > bounds[1]))
denom = (tp+fp)*(tp+fn)*(tn+fp)*(tn+fn)
if denom > 0:
mcc = 1 - (tp*tn - fp*fn)/np.sqrt(denom)
else:
mcc = 1
return mcc
@classmethod
def _cost_function_mcc(cls, bounds, good, bad, delta_amplitude, alpha):
cost = alpha*cls._mcc_error(bounds, good, bad) + (1 - alpha)*np.abs((1 - (bounds[1] - bounds[0])/delta_amplitude))
return cost
@classmethod
def _optimize_amplitudes(cls, noise_snippets, d):
waveform_extractor = d['waveform_extractor']
templates = d['templates']
nb_templates = d['nb_templates']
max_amplitude = d['max_amplitude']
min_amplitude = d['min_amplitude']
alpha = 0.5
norms = d['norms']
all_units = list(waveform_extractor.sorting.unit_ids)
if d['progess_bar_steps']:
all_units = tqdm(all_units, desc='[2] compute amplitudes')
d['amplitudes'] = np.zeros((nb_templates, 2), dtype=np.float32)
noise = templates.dot(noise_snippets)/norms[:, np.newaxis]
all_amps = {}
for count, unit_id in enumerate(all_units):
w = waveform_extractor.get_waveforms(unit_id)
snippets = w.reshape(w.shape[0], -1).T
amps = templates.dot(snippets)/norms[:, np.newaxis]
good = amps[count, :].flatten()
sub_amps = amps[np.concatenate((np.arange(count), np.arange(count+1, nb_templates))), :]
bad = sub_amps[sub_amps >= good]
bad = np.concatenate((bad, noise[count]))
cost_kwargs = [good, bad, max_amplitude - min_amplitude, alpha]
cost_bounds = [(min_amplitude, 1), (1, max_amplitude)]
res = scipy.optimize.differential_evolution(cls._cost_function_mcc, bounds=cost_bounds, args=cost_kwargs)
d['amplitudes'][count] = res.x
return d
@classmethod
def initialize_and_check_kwargs(cls, recording, kwargs):
d = cls._default_params.copy()
d.update(kwargs)
assert isinstance(d['waveform_extractor'], WaveformExtractor)
for v in ['sparsify_threshold', 'use_sparse_matrix_threshold']:
assert (d[v] >= 0) and (d[v] <= 1), f'{v} should be in [0, 1]'
d['nb_channels'] = d['waveform_extractor'].recording.get_num_channels()
d['nb_samples'] = d['waveform_extractor'].nsamples
d['nb_templates'] = len(d['waveform_extractor'].sorting.unit_ids)
if d['noise_levels'] is None:
print('CircusPeeler : noise should be computed outside')
d['noise_levels'] = get_noise_levels(recording, **d['random_chunk_kwargs'])
d['abs_threholds'] = d['noise_levels'] * d['detect_threshold']
d = cls._prepare_templates(d)
d = cls._prepare_overlaps(d)
d['nbefore'] = d['waveform_extractor'].nbefore
d['nafter'] = d['waveform_extractor'].nafter
d['patch_sizes'] = (d['waveform_extractor'].nsamples, d['nb_channels'])
d['sym_patch'] = d['nbefore'] == d['nafter']
nb_segments = recording.get_num_segments()
if d['waveform_extractor']._params['max_spikes_per_unit'] is None:
nb_snippets = 1000
else:
nb_snippets = 2*d['waveform_extractor']._params['max_spikes_per_unit']
nb_chunks = nb_snippets // nb_segments
noise_snippets = get_random_data_chunks(recording, num_chunks_per_segment=nb_chunks, chunk_size=d['nb_samples'], seed=42)
noise_snippets = noise_snippets.reshape(nb_chunks, d['nb_samples'], d['nb_channels']).reshape(nb_chunks, -1).T
d = cls._optimize_amplitudes(noise_snippets, d)
return d
@classmethod
def serialize_method_kwargs(cls, kwargs):
kwargs = dict(kwargs)
kwargs.pop('waveform_extractor')
return kwargs
@classmethod
def unserialize_in_worker(cls, kwargs):
return kwargs
@classmethod
def get_margin(cls, recording, kwargs):
margin = 2 * max(kwargs['nbefore'], kwargs['nafter'])
return margin
@classmethod
def main_function(cls, traces, d):
peak_sign = d['peak_sign']
abs_threholds = d['abs_threholds']
n_shifts = d['n_shifts']
templates = d['templates']
nb_templates = d['nb_templates']
nb_channels = d['nb_channels']
overlaps = d['overlaps']
margin = d['margin']
norms = d['norms']
jitter = d['jitter']
patch_sizes = d['patch_sizes']
nb_samples = d['nafter'] + d['nbefore']
neighbor_window = nb_samples - 1
amplitudes = d['amplitudes']
sym_patch = d['sym_patch']
sparsities = d['sparsities']
is_dense = d['is_dense']
peak_traces = traces[margin // 2:-margin // 2, :]
peak_sample_ind, peak_chan_ind = detect_peaks_by_channel(peak_traces, peak_sign, abs_threholds, n_shifts)
if jitter > 0:
jittered_peaks = peak_sample_ind[:, np.newaxis] + np.arange(-jitter, jitter)
jittered_channels = peak_chan_ind[:, np.newaxis] + np.zeros(2*jitter)
mask = (jittered_peaks > 0) & (jittered_peaks < len(peak_traces))
jittered_peaks = jittered_peaks[mask]
jittered_channels = jittered_channels[mask]
peak_sample_ind, unique_idx = np.unique(jittered_peaks, return_index=True)
peak_chan_ind = jittered_channels[unique_idx]
else:
peak_sample_ind, unique_idx = np.unique(peak_sample_ind, return_index=True)
peak_chan_ind = peak_chan_ind[unique_idx]
nb_peaks = len(peak_sample_ind)
if sym_patch:
snippets = extract_patches_2d(traces, patch_sizes)[peak_sample_ind]
peak_sample_ind += margin // 2
else:
peak_sample_ind += margin // 2
snippet_window = np.arange(-d['nbefore'], d['nafter'])
snippets = traces[peak_sample_ind[:, np.newaxis] + snippet_window]
if nb_peaks > 0:
snippets = snippets.reshape(nb_peaks, -1)
scalar_products = templates.dot(snippets.T)
else:
scalar_products = np.zeros((nb_templates, 0), dtype=np.float32)
nb_spikes = 0
spikes = np.empty(scalar_products.size, dtype=spike_dtype)
idx_lookup = np.arange(scalar_products.size).reshape(nb_templates, -1)
min_sps = (amplitudes[:, 0] * norms)[:, np.newaxis]
max_sps = (amplitudes[:, 1] * norms)[:, np.newaxis]
is_valid = (scalar_products > min_sps) & (scalar_products < max_sps)
cached_overlaps = {}
while np.any(is_valid):
best_amplitude_ind = scalar_products[is_valid].argmax()
best_cluster_ind, peak_index = np.unravel_index(idx_lookup[is_valid][best_amplitude_ind], idx_lookup.shape)
best_amplitude = scalar_products[best_cluster_ind, peak_index]
best_peak_sample_ind = peak_sample_ind[peak_index]
best_peak_chan_ind = peak_chan_ind[peak_index]
peak_data = peak_sample_ind - peak_sample_ind[peak_index]
is_valid = np.searchsorted(peak_data, [-neighbor_window, neighbor_window + 1])
idx_neighbor = peak_data[is_valid[0]:is_valid[1]] + neighbor_window
if not best_cluster_ind in cached_overlaps.keys():
cached_overlaps[best_cluster_ind] = overlaps[best_cluster_ind].toarray()
to_add = -best_amplitude * cached_overlaps[best_cluster_ind][:, idx_neighbor]
scalar_products[:, is_valid[0]:is_valid[1]] += to_add
scalar_products[best_cluster_ind, is_valid[0]:is_valid[1]] = -np.inf
spikes['sample_ind'][nb_spikes] = best_peak_sample_ind
spikes['channel_ind'][nb_spikes] = best_peak_chan_ind
spikes['cluster_ind'][nb_spikes] = best_cluster_ind
spikes['amplitude'][nb_spikes] = best_amplitude
nb_spikes += 1
is_valid = (scalar_products > min_sps) & (scalar_products < max_sps)
spikes['amplitude'][:nb_spikes] /= norms[spikes['cluster_ind'][:nb_spikes]]
spikes = spikes[:nb_spikes]
order = np.argsort(spikes['sample_ind'])
spikes = spikes[order]
return spikes
template_matching_methods = {
'naive' : NaiveMatching,
'tridesclous' : TridesclousPeeler,
'circus' : CircusPeeler,
'circus-omp' : CircusOMPPeeler
}
| true
| true
|
1c46d1d4784a0d62c8a99280915d87553433d406
| 170
|
py
|
Python
|
recepcao/admin.py
|
alantinoco/recepcao-edificio-comercial
|
dcbfa9fd93f71b2bec15681b947371f8af3e815f
|
[
"MIT"
] | null | null | null |
recepcao/admin.py
|
alantinoco/recepcao-edificio-comercial
|
dcbfa9fd93f71b2bec15681b947371f8af3e815f
|
[
"MIT"
] | null | null | null |
recepcao/admin.py
|
alantinoco/recepcao-edificio-comercial
|
dcbfa9fd93f71b2bec15681b947371f8af3e815f
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from .models import *
admin.site.register(Sala)
admin.site.register(Usuario)
admin.site.register(Visitante)
admin.site.register(Visita)
| 21.25
| 32
| 0.811765
|
from django.contrib import admin
from .models import *
admin.site.register(Sala)
admin.site.register(Usuario)
admin.site.register(Visitante)
admin.site.register(Visita)
| true
| true
|
1c46d206debfc3cfd0af0e2eb1216cafaca41f24
| 3,325
|
py
|
Python
|
ucscsdk/mometa/storage/StorageSnapshotCtx.py
|
parag-may4/ucscsdk
|
2ea762fa070330e3a4e2c21b46b157469555405b
|
[
"Apache-2.0"
] | 9
|
2016-12-22T08:39:25.000Z
|
2019-09-10T15:36:19.000Z
|
ucscsdk/mometa/storage/StorageSnapshotCtx.py
|
parag-may4/ucscsdk
|
2ea762fa070330e3a4e2c21b46b157469555405b
|
[
"Apache-2.0"
] | 10
|
2017-01-31T06:59:56.000Z
|
2021-11-09T09:14:37.000Z
|
ucscsdk/mometa/storage/StorageSnapshotCtx.py
|
parag-may4/ucscsdk
|
2ea762fa070330e3a4e2c21b46b157469555405b
|
[
"Apache-2.0"
] | 13
|
2016-11-14T07:42:58.000Z
|
2022-02-10T17:32:05.000Z
|
"""This module contains the general information for StorageSnapshotCtx ManagedObject."""
from ...ucscmo import ManagedObject
from ...ucsccoremeta import UcscVersion, MoPropertyMeta, MoMeta
from ...ucscmeta import VersionMeta
class StorageSnapshotCtxConsts():
LUN_CFG_ACTION_DELETE = "delete"
LUN_CFG_ACTION_OFFLINE = "offline"
LUN_CFG_ACTION_ONLINE = "online"
LUN_CFG_ACTION_RESTORE_SNAPSHOT = "restore-snapshot"
LUN_CFG_ACTION_TRIGGERED = "triggered"
TS_CREATED_ = ""
class StorageSnapshotCtx(ManagedObject):
"""This is StorageSnapshotCtx class."""
consts = StorageSnapshotCtxConsts()
naming_props = set([])
mo_meta = MoMeta("StorageSnapshotCtx", "storageSnapshotCtx", "snap-ctx", VersionMeta.Version141a, "InputOutput", 0x1f, [], ["admin", "ls-compute", "ls-config", "ls-server", "ls-storage"], [u'storageScsiLun'], [], ["Get"])
prop_meta = {
"child_action": MoPropertyMeta("child_action", "childAction", "string", VersionMeta.Version141a, MoPropertyMeta.INTERNAL, None, None, None, r"""((deleteAll|ignore|deleteNonPresent),){0,2}(deleteAll|ignore|deleteNonPresent){0,1}""", [], []),
"dn": MoPropertyMeta("dn", "dn", "string", VersionMeta.Version141a, MoPropertyMeta.READ_ONLY, 0x2, 0, 256, None, [], []),
"lun_cfg_action": MoPropertyMeta("lun_cfg_action", "lunCfgAction", "string", VersionMeta.Version141a, MoPropertyMeta.READ_WRITE, 0x4, None, None, None, ["delete", "offline", "online", "restore-snapshot", "triggered"], []),
"rn": MoPropertyMeta("rn", "rn", "string", VersionMeta.Version141a, MoPropertyMeta.READ_ONLY, 0x8, 0, 256, None, [], []),
"snap_percent": MoPropertyMeta("snap_percent", "snapPercent", "string", VersionMeta.Version141a, MoPropertyMeta.READ_ONLY, None, 0, 510, None, [], []),
"src_lun_dn": MoPropertyMeta("src_lun_dn", "srcLunDn", "string", VersionMeta.Version141a, MoPropertyMeta.READ_ONLY, None, 0, 256, None, [], []),
"src_lun_name": MoPropertyMeta("src_lun_name", "srcLunName", "string", VersionMeta.Version141a, MoPropertyMeta.READ_ONLY, None, 0, 510, None, [], []),
"status": MoPropertyMeta("status", "status", "string", VersionMeta.Version141a, MoPropertyMeta.READ_WRITE, 0x10, None, None, r"""((removed|created|modified|deleted),){0,3}(removed|created|modified|deleted){0,1}""", [], []),
"ts_created": MoPropertyMeta("ts_created", "tsCreated", "string", VersionMeta.Version141a, MoPropertyMeta.READ_ONLY, None, None, None, r"""([0-9]){4}-([0-9]){2}-([0-9]){2}T([0-9]){2}:([0-9]){2}:([0-9]){2}((\.([0-9]){3})){0,1}""", [""], []),
}
prop_map = {
"childAction": "child_action",
"dn": "dn",
"lunCfgAction": "lun_cfg_action",
"rn": "rn",
"snapPercent": "snap_percent",
"srcLunDn": "src_lun_dn",
"srcLunName": "src_lun_name",
"status": "status",
"tsCreated": "ts_created",
}
def __init__(self, parent_mo_or_dn, **kwargs):
self._dirty_mask = 0
self.child_action = None
self.lun_cfg_action = None
self.snap_percent = None
self.src_lun_dn = None
self.src_lun_name = None
self.status = None
self.ts_created = None
ManagedObject.__init__(self, "StorageSnapshotCtx", parent_mo_or_dn, **kwargs)
| 54.508197
| 249
| 0.657444
|
from ...ucscmo import ManagedObject
from ...ucsccoremeta import UcscVersion, MoPropertyMeta, MoMeta
from ...ucscmeta import VersionMeta
class StorageSnapshotCtxConsts():
LUN_CFG_ACTION_DELETE = "delete"
LUN_CFG_ACTION_OFFLINE = "offline"
LUN_CFG_ACTION_ONLINE = "online"
LUN_CFG_ACTION_RESTORE_SNAPSHOT = "restore-snapshot"
LUN_CFG_ACTION_TRIGGERED = "triggered"
TS_CREATED_ = ""
class StorageSnapshotCtx(ManagedObject):
consts = StorageSnapshotCtxConsts()
naming_props = set([])
mo_meta = MoMeta("StorageSnapshotCtx", "storageSnapshotCtx", "snap-ctx", VersionMeta.Version141a, "InputOutput", 0x1f, [], ["admin", "ls-compute", "ls-config", "ls-server", "ls-storage"], [u'storageScsiLun'], [], ["Get"])
prop_meta = {
"child_action": MoPropertyMeta("child_action", "childAction", "string", VersionMeta.Version141a, MoPropertyMeta.INTERNAL, None, None, None, r"""((deleteAll|ignore|deleteNonPresent),){0,2}(deleteAll|ignore|deleteNonPresent){0,1}""", [], []),
"dn": MoPropertyMeta("dn", "dn", "string", VersionMeta.Version141a, MoPropertyMeta.READ_ONLY, 0x2, 0, 256, None, [], []),
"lun_cfg_action": MoPropertyMeta("lun_cfg_action", "lunCfgAction", "string", VersionMeta.Version141a, MoPropertyMeta.READ_WRITE, 0x4, None, None, None, ["delete", "offline", "online", "restore-snapshot", "triggered"], []),
"rn": MoPropertyMeta("rn", "rn", "string", VersionMeta.Version141a, MoPropertyMeta.READ_ONLY, 0x8, 0, 256, None, [], []),
"snap_percent": MoPropertyMeta("snap_percent", "snapPercent", "string", VersionMeta.Version141a, MoPropertyMeta.READ_ONLY, None, 0, 510, None, [], []),
"src_lun_dn": MoPropertyMeta("src_lun_dn", "srcLunDn", "string", VersionMeta.Version141a, MoPropertyMeta.READ_ONLY, None, 0, 256, None, [], []),
"src_lun_name": MoPropertyMeta("src_lun_name", "srcLunName", "string", VersionMeta.Version141a, MoPropertyMeta.READ_ONLY, None, 0, 510, None, [], []),
"status": MoPropertyMeta("status", "status", "string", VersionMeta.Version141a, MoPropertyMeta.READ_WRITE, 0x10, None, None, r"""((removed|created|modified|deleted),){0,3}(removed|created|modified|deleted){0,1}""", [], []),
"ts_created": MoPropertyMeta("ts_created", "tsCreated", "string", VersionMeta.Version141a, MoPropertyMeta.READ_ONLY, None, None, None, r"""([0-9]){4}-([0-9]){2}-([0-9]){2}T([0-9]){2}:([0-9]){2}:([0-9]){2}((\.([0-9]){3})){0,1}""", [""], []),
}
prop_map = {
"childAction": "child_action",
"dn": "dn",
"lunCfgAction": "lun_cfg_action",
"rn": "rn",
"snapPercent": "snap_percent",
"srcLunDn": "src_lun_dn",
"srcLunName": "src_lun_name",
"status": "status",
"tsCreated": "ts_created",
}
def __init__(self, parent_mo_or_dn, **kwargs):
self._dirty_mask = 0
self.child_action = None
self.lun_cfg_action = None
self.snap_percent = None
self.src_lun_dn = None
self.src_lun_name = None
self.status = None
self.ts_created = None
ManagedObject.__init__(self, "StorageSnapshotCtx", parent_mo_or_dn, **kwargs)
| true
| true
|
1c46d21702697c85163d3d5adbdd640e38fb9d31
| 417
|
py
|
Python
|
tina/assimp/pfm.py
|
xuhao1/taichi_three
|
25fdf047da4c93df36a047a0be3cc47225d328c9
|
[
"MIT"
] | 152
|
2020-06-17T09:08:59.000Z
|
2022-03-30T13:48:49.000Z
|
tina/assimp/pfm.py
|
xuhao1/taichi_three
|
25fdf047da4c93df36a047a0be3cc47225d328c9
|
[
"MIT"
] | 46
|
2020-06-20T15:15:57.000Z
|
2022-03-24T20:03:18.000Z
|
tina/assimp/pfm.py
|
xuhao1/taichi_three
|
25fdf047da4c93df36a047a0be3cc47225d328c9
|
[
"MIT"
] | 27
|
2020-06-20T14:25:55.000Z
|
2022-03-12T08:11:31.000Z
|
import numpy as np
import sys
def pfmwrite(path, im):
im = im.swapaxes(0, 1)
scale = max(1e-10, -im.min(), im.max())
h, w = im.shape[:2]
with open(path, 'wb') as f:
f.write(b'PF\n' if len(im.shape) >= 3 else b'Pf\n')
f.write(f'{w} {h}\n'.encode())
f.write(f'{scale if sys.byteorder == "big" else -scale}\n'.encode())
f.write((im / scale).astype(np.float32).tobytes())
| 32.076923
| 76
| 0.553957
|
import numpy as np
import sys
def pfmwrite(path, im):
im = im.swapaxes(0, 1)
scale = max(1e-10, -im.min(), im.max())
h, w = im.shape[:2]
with open(path, 'wb') as f:
f.write(b'PF\n' if len(im.shape) >= 3 else b'Pf\n')
f.write(f'{w} {h}\n'.encode())
f.write(f'{scale if sys.byteorder == "big" else -scale}\n'.encode())
f.write((im / scale).astype(np.float32).tobytes())
| true
| true
|
1c46d21fab526fc9bb640abb06ed75334c27fafe
| 677
|
py
|
Python
|
setup.py
|
tianhuil/checkpoint
|
842d1cff0cbe5926a36f1927fb75b5dcbaf4ec31
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
tianhuil/checkpoint
|
842d1cff0cbe5926a36f1927fb75b5dcbaf4ec31
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
tianhuil/checkpoint
|
842d1cff0cbe5926a36f1927fb75b5dcbaf4ec31
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
from distutils.core import setup
setup(
name='checkpoint',
version='0.1',
description='Setup',
author='Tianhui Michael Li',
author_email='test@example.com',
url='https://github.com/tianhuil/checkpoint/',
packages=['checkpoint'],
classifiers=[
'Development Status :: 3 - Alpha',
'Environment :: Console',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
],
)
| 27.08
| 57
| 0.646972
|
from distutils.core import setup
setup(
name='checkpoint',
version='0.1',
description='Setup',
author='Tianhui Michael Li',
author_email='test@example.com',
url='https://github.com/tianhuil/checkpoint/',
packages=['checkpoint'],
classifiers=[
'Development Status :: 3 - Alpha',
'Environment :: Console',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
],
)
| true
| true
|
1c46d3b7b10037122d1f0238ad1b6a580936df6a
| 4,834
|
py
|
Python
|
TestTickAlpha-Iota.py
|
nikorasen/Project_S.U.I.T.U.P.
|
4f2873346bd3954d455e2e4e19a84f20c58d1ab2
|
[
"MIT"
] | null | null | null |
TestTickAlpha-Iota.py
|
nikorasen/Project_S.U.I.T.U.P.
|
4f2873346bd3954d455e2e4e19a84f20c58d1ab2
|
[
"MIT"
] | null | null | null |
TestTickAlpha-Iota.py
|
nikorasen/Project_S.U.I.T.U.P.
|
4f2873346bd3954d455e2e4e19a84f20c58d1ab2
|
[
"MIT"
] | null | null | null |
import tkinter as tk
import feedparser
import datetime
import time
from tkinter import messagebox
import re
def Spyder(): #Crawls the links in the sources file, saves them to a txt file
All_Articles=''
try:
with open('Sources.txt', 'r') as Srcs:
for line in Srcs:
Link=line
Link=Link.strip('\n') #removes any newline characters
Feed=feedparser.parse(Link)
for entry in Feed.entries:
try:
Art_Title=entry.title
except AttributeError:
Art_Title='n/a'
try:
Art_Auth=entry.author
except AttributeError:
try:
Art_Auth=entry.media
except AttributeError:
try:
Art_Auth=entry.generator
except AttributeError:
Art_Auth='n/a'
try:
Art_URL=entry.link
except AttributeError:
try:
Art_URL=entry.url
except AttributeError:
Art_URL='n/a'
try:
Post_Time=entry.pubdate
except AttributeError:
try:
Post_Time=entry.pubDate
except AttributeError:
try:
Post_Time=entry.published_parsed
except AttributeError:
Post_Time='n/a'
try:
Desc=entry.summary
except AttributeError:
try:
Desc=entry.description
except AttributeError:
Desc='n/a'
All_Articles+=' Title: '+str(Art_Title)+' Author: '+str(Art_Auth)+' Link: '+str(Art_URL)+' Posted: '+str(Post_Time)+' Summary: '+str(Desc)+'\n'
except EOFError:
pass
try:
with open('Art_Arch.txt', 'a') as Svd: #Saves the All_Articles string to a text file
Svd.write(All_Articles+'\n')
except FileNotFoundError:
with open('Art_Arch.txt', 'x') as Svd:
Svd.write(All_Articles+'\n')
def Strip_XML1(string): #Removes the XML characters from a passed string using a regex
XML_Chr=re.compile(r'[\s()-]+')
return XML_Chr.sub(' ', string)
def Strip_XML2(string): #Removes the XML characters from a passed string using a regex
XML_Chr=re.compile(r'<.*?>')
return XML_Chr.sub(' ', string)
Spyder()
root = tk.Tk()
root.geometry('1900x30')
root.wm_title('S.U.I.T.U.P. Newsdesk BETA Ticker')
svar = tk.StringVar()
labl = tk.Label(root, textvariable=svar, height=25, bg='#003b6f', fg='white')
strArticles=''
def shif():
shif.msg = shif.msg[1:] + shif.msg[0]
svar.set(shif.msg)
root.after(150, shif)
try:
with open('Art_Arch.txt', 'r') as Svd:
strCurr_Disp=''
for line in Svd:
strCurr_Disp=line
strCurr_Disp=strCurr_Disp.strip('\n')
strCurr_Disp=strCurr_Disp.strip('</a>')
strCurr_Disp=strCurr_Disp.strip('<a')
strCurr_Disp=strCurr_Disp.strip('</p>')
strCurr_Disp=strCurr_Disp.strip('<p>')
strCurr_Disp=strCurr_Disp.strip('</strong>')
strCurr_Disp=strCurr_Disp.strip('<p><img')
strCurr_Disp=strCurr_Disp.strip(' <p><img ')
strCurr_Disp=strCurr_Disp.strip(' <img ')
strCurr_Disp=strCurr_Disp.strip('<img width="300"')
strCurr_Disp=strCurr_Disp.strip('<img')
strCurr_Disp=strCurr_Disp.strip(' <img src=')
strCurr_Disp=strCurr_Disp.strip(' align="right" ')
strCurr_Disp=strCurr_Disp.strip(' hspace="20" ')
strCurr_Disp=strCurr_Disp.strip(' vspace="20" ')
strCurr_Disp=strCurr_Disp.strip('<td')
strCurr_Disp=strCurr_Disp.strip('</td')
strCurr_Disp=strCurr_Disp.strip('</td>')
strCurr_Disp=strCurr_Disp.strip('<br')
strCurr_Disp=strCurr_Disp.strip(' <br />')
strCurr_Disp=strCurr_Disp.strip(' <br ')
strCurr_Disp=strCurr_Disp.strip('MISC')
strCurr_Disp=strCurr_Disp.strip(' MISC ')
strCurr_Disp=strCurr_Disp.strip('</a')
strCurr_Disp=Strip_XML1(strCurr_Disp)
strCurr_Disp=Strip_XML2(strCurr_Disp)
strArticles=strArticles + str(strCurr_Disp)
shif.msg=(strArticles)
except EOFError:
pass
shif()
labl.pack()
root.mainloop()
| 40.621849
| 163
| 0.522342
|
import tkinter as tk
import feedparser
import datetime
import time
from tkinter import messagebox
import re
def Spyder():
All_Articles=''
try:
with open('Sources.txt', 'r') as Srcs:
for line in Srcs:
Link=line
Link=Link.strip('\n')
Feed=feedparser.parse(Link)
for entry in Feed.entries:
try:
Art_Title=entry.title
except AttributeError:
Art_Title='n/a'
try:
Art_Auth=entry.author
except AttributeError:
try:
Art_Auth=entry.media
except AttributeError:
try:
Art_Auth=entry.generator
except AttributeError:
Art_Auth='n/a'
try:
Art_URL=entry.link
except AttributeError:
try:
Art_URL=entry.url
except AttributeError:
Art_URL='n/a'
try:
Post_Time=entry.pubdate
except AttributeError:
try:
Post_Time=entry.pubDate
except AttributeError:
try:
Post_Time=entry.published_parsed
except AttributeError:
Post_Time='n/a'
try:
Desc=entry.summary
except AttributeError:
try:
Desc=entry.description
except AttributeError:
Desc='n/a'
All_Articles+=' Title: '+str(Art_Title)+' Author: '+str(Art_Auth)+' Link: '+str(Art_URL)+' Posted: '+str(Post_Time)+' Summary: '+str(Desc)+'\n'
except EOFError:
pass
try:
with open('Art_Arch.txt', 'a') as Svd:
Svd.write(All_Articles+'\n')
except FileNotFoundError:
with open('Art_Arch.txt', 'x') as Svd:
Svd.write(All_Articles+'\n')
def Strip_XML1(string):
XML_Chr=re.compile(r'[\s()-]+')
return XML_Chr.sub(' ', string)
def Strip_XML2(string):
XML_Chr=re.compile(r'<.*?>')
return XML_Chr.sub(' ', string)
Spyder()
root = tk.Tk()
root.geometry('1900x30')
root.wm_title('S.U.I.T.U.P. Newsdesk BETA Ticker')
svar = tk.StringVar()
labl = tk.Label(root, textvariable=svar, height=25, bg='#003b6f', fg='white')
strArticles=''
def shif():
shif.msg = shif.msg[1:] + shif.msg[0]
svar.set(shif.msg)
root.after(150, shif)
try:
with open('Art_Arch.txt', 'r') as Svd:
strCurr_Disp=''
for line in Svd:
strCurr_Disp=line
strCurr_Disp=strCurr_Disp.strip('\n')
strCurr_Disp=strCurr_Disp.strip('</a>')
strCurr_Disp=strCurr_Disp.strip('<a')
strCurr_Disp=strCurr_Disp.strip('</p>')
strCurr_Disp=strCurr_Disp.strip('<p>')
strCurr_Disp=strCurr_Disp.strip('</strong>')
strCurr_Disp=strCurr_Disp.strip('<p><img')
strCurr_Disp=strCurr_Disp.strip(' <p><img ')
strCurr_Disp=strCurr_Disp.strip(' <img ')
strCurr_Disp=strCurr_Disp.strip('<img width="300"')
strCurr_Disp=strCurr_Disp.strip('<img')
strCurr_Disp=strCurr_Disp.strip(' <img src=')
strCurr_Disp=strCurr_Disp.strip(' align="right" ')
strCurr_Disp=strCurr_Disp.strip(' hspace="20" ')
strCurr_Disp=strCurr_Disp.strip(' vspace="20" ')
strCurr_Disp=strCurr_Disp.strip('<td')
strCurr_Disp=strCurr_Disp.strip('</td')
strCurr_Disp=strCurr_Disp.strip('</td>')
strCurr_Disp=strCurr_Disp.strip('<br')
strCurr_Disp=strCurr_Disp.strip(' <br />')
strCurr_Disp=strCurr_Disp.strip(' <br ')
strCurr_Disp=strCurr_Disp.strip('MISC')
strCurr_Disp=strCurr_Disp.strip(' MISC ')
strCurr_Disp=strCurr_Disp.strip('</a')
strCurr_Disp=Strip_XML1(strCurr_Disp)
strCurr_Disp=Strip_XML2(strCurr_Disp)
strArticles=strArticles + str(strCurr_Disp)
shif.msg=(strArticles)
except EOFError:
pass
shif()
labl.pack()
root.mainloop()
| true
| true
|
1c46d3bf5256bb38fc04428e212b3c747382289c
| 27,516
|
py
|
Python
|
exchangelib/autodiscover/discovery.py
|
denisovkv/exchangelib
|
fcb4cdac9f41e97f849ddab46ebf7cb9b6ca5d7f
|
[
"BSD-2-Clause"
] | null | null | null |
exchangelib/autodiscover/discovery.py
|
denisovkv/exchangelib
|
fcb4cdac9f41e97f849ddab46ebf7cb9b6ca5d7f
|
[
"BSD-2-Clause"
] | null | null | null |
exchangelib/autodiscover/discovery.py
|
denisovkv/exchangelib
|
fcb4cdac9f41e97f849ddab46ebf7cb9b6ca5d7f
|
[
"BSD-2-Clause"
] | null | null | null |
import logging
import time
from urllib.parse import urlparse
import dns.resolver
from ..configuration import Configuration
from ..credentials import OAuth2Credentials
from ..errors import AutoDiscoverFailed, AutoDiscoverCircularRedirect, TransportError, RedirectError, UnauthorizedError
from ..protocol import Protocol, FailFast
from ..transport import get_auth_method_from_response, DEFAULT_HEADERS, NOAUTH, OAUTH2, CREDENTIALS_REQUIRED
from ..util import post_ratelimited, get_domain, get_redirect_url, _back_off_if_needed, _may_retry_on_error, \
is_valid_hostname, DummyResponse, CONNECTION_ERRORS, TLS_ERRORS
from ..version import Version
from .cache import autodiscover_cache
from .properties import Autodiscover
from .protocol import AutodiscoverProtocol
log = logging.getLogger(__name__)
def discover(email, credentials=None, auth_type=None, retry_policy=None):
return Autodiscovery(
email=email, credentials=credentials, auth_type=auth_type, retry_policy=retry_policy
).discover()
class SrvRecord:
"""A container for autodiscover-related SRV records in DNS"""
def __init__(self, priority, weight, port, srv):
self.priority = priority
self.weight = weight
self.port = port
self.srv = srv
def __eq__(self, other):
for k in self.__dict__.keys():
if getattr(self, k) != getattr(other, k):
return False
return True
class Autodiscovery:
"""Autodiscover is a Microsoft protocol for automatically getting the endpoint of the Exchange server and other
connection-related settings holding the email address using only the email address, and username and password of the
user.
For a description of the protocol implemented, see "Autodiscover for Exchange ActiveSync developers":
https://docs.microsoft.com/en-us/previous-versions/office/developer/exchange-server-interoperability-guidance/hh352638%28v%3dexchg.140%29
Descriptions of the steps from the article are provided in their respective methods in this class.
For a description of how to handle autodiscover error messages, see:
https://docs.microsoft.com/en-us/exchange/client-developer/exchange-web-services/handling-autodiscover-error-messages
A tip from the article:
The client can perform steps 1 through 4 in any order or in parallel to expedite the process, but it must wait for
responses to finish at each step before proceeding. Given that many organizations prefer to use the URL in step 2 to
set up the Autodiscover service, the client might try this step first.
Another possibly newer resource which has not yet been attempted is "Outlook 2016 Implementation of Autodiscover":
https://support.microsoft.com/en-us/help/3211279/outlook-2016-implementation-of-autodiscover
WARNING: The autodiscover protocol is very complicated. If you have problems autodiscovering using this
implementation, start by doing an official test at https://testconnectivity.microsoft.com
"""
# When connecting to servers that may not be serving the correct endpoint, we should use a retry policy that does
# not leave us hanging for a long time on each step in the protocol.
INITIAL_RETRY_POLICY = FailFast()
RETRY_WAIT = 10 # Seconds to wait before retry on connection errors
MAX_REDIRECTS = 10 # Maximum number of URL redirects before we give up
def __init__(self, email, credentials=None, auth_type=None, retry_policy=None):
"""
Args:
email: The email address to autodiscover
credentials: Credentials with authorization to make autodiscover lookups for this Account (Default value = None)
auth_type: (Default value = None)
retry_policy: (Default value = None)
"""
self.email = email
self.credentials = credentials
self.auth_type = auth_type # The auth type that the resulting protocol instance should have
self.retry_policy = retry_policy # The retry policy that the resulting protocol instance should have
self._urls_visited = [] # Collects HTTP and Autodiscover redirects
self._redirect_count = 0
self._emails_visited = [] # Collects Autodiscover email redirects
def discover(self):
self._emails_visited.append(self.email.lower())
# Check the autodiscover cache to see if we already know the autodiscover service endpoint for this email
# domain. Use a lock to guard against multiple threads competing to cache information.
log.debug('Waiting for autodiscover_cache lock')
with autodiscover_cache:
log.debug('autodiscover_cache lock acquired')
cache_key = self._cache_key
domain = get_domain(self.email)
if cache_key in autodiscover_cache:
ad_protocol = autodiscover_cache[cache_key]
log.debug('Cache hit for key %s: %s', cache_key, ad_protocol.service_endpoint)
try:
ad_response = self._quick(protocol=ad_protocol)
except AutoDiscoverFailed:
# Autodiscover no longer works with this domain. Clear cache and try again after releasing the lock
log.debug('AD request failure. Removing cache for key %s', cache_key)
del autodiscover_cache[cache_key]
ad_response = self._step_1(hostname=domain)
else:
# This will cache the result
ad_response = self._step_1(hostname=domain)
log.debug('Released autodiscover_cache_lock')
if ad_response.redirect_address:
log.debug('Got a redirect address: %s', ad_response.redirect_address)
if ad_response.redirect_address.lower() in self._emails_visited:
raise AutoDiscoverCircularRedirect('We were redirected to an email address we have already seen')
# Start over, but with the new email address
self.email = ad_response.redirect_address
return self.discover()
# We successfully received a response. Clear the cache of seen emails etc.
self.clear()
return self._build_response(ad_response=ad_response)
def clear(self):
# This resets cached variables
self._urls_visited = []
self._redirect_count = 0
self._emails_visited = []
@property
def _cache_key(self):
# We may be using multiple different credentials and changing our minds on TLS verification. This key
# combination should be safe for caching.
domain = get_domain(self.email)
return domain, self.credentials
def _build_response(self, ad_response):
ews_url = ad_response.protocol.ews_url
if not ews_url:
raise AutoDiscoverFailed("Response is missing an 'ews_url' value")
if not ad_response.autodiscover_smtp_address:
# Autodiscover does not always return an email address. In that case, the requesting email should be used
ad_response.user.autodiscover_smtp_address = self.email
# Get the server version. Not all protocol entries have a server version so we cheat a bit and also look at the
# other ones that point to the same endpoint.
for protocol in ad_response.account.protocols:
if not protocol.ews_url or not protocol.server_version:
continue
if protocol.ews_url.lower() == ews_url.lower():
version = Version(build=protocol.server_version)
break
else:
version = None
# We may not want to use the auth_package hints in the AD response. It could be incorrect and we can just guess.
protocol = Protocol(
config=Configuration(
service_endpoint=ews_url,
credentials=self.credentials,
version=version,
auth_type=self.auth_type,
retry_policy=self.retry_policy,
)
)
return ad_response, protocol
def _quick(self, protocol):
# Reset auth type and retry policy if we requested non-default values
if self.auth_type:
protocol.config.auth_type = self.auth_type
if self.retry_policy:
protocol.config.retry_policy = self.retry_policy
try:
r = self._get_authenticated_response(protocol=protocol)
except TransportError as e:
raise AutoDiscoverFailed('Response error: %s' % e)
if r.status_code == 200:
try:
ad = Autodiscover.from_bytes(bytes_content=r.content)
return self._step_5(ad=ad)
except ValueError as e:
raise AutoDiscoverFailed('Invalid response: %s' % e)
raise AutoDiscoverFailed('Invalid response code: %s' % r.status_code)
def _redirect_url_is_valid(self, url):
"""Three separate responses can be “Redirect responses”:
* An HTTP status code (301, 302) with a new URL
* An HTTP status code of 200, but with a payload XML containing a redirect to a different URL
* An HTTP status code of 200, but with a payload XML containing a different SMTP address as the target address
We only handle the HTTP 302 redirects here. We validate the URL received in the redirect response to ensure that
it does not redirect to non-SSL endpoints or SSL endpoints with invalid certificates, and that the redirect is
not circular. Finally, we should fail after 10 redirects.
Args:
url:
"""
if url.lower() in self._urls_visited:
log.warning('We have already tried this URL: %s', url)
return False
if self._redirect_count >= self.MAX_REDIRECTS:
log.warning('We reached max redirects at URL: %s', url)
return False
# We require TLS endpoints
if not url.startswith('https://'):
log.debug('Invalid scheme for URL: %s', url)
return False
# Quick test that the endpoint responds and that TLS handshake is OK
try:
self._get_unauthenticated_response(url, method='head')
except TransportError as e:
log.debug('Response error on redirect URL %s: %s', url, e)
return False
self._redirect_count += 1
return True
def _get_unauthenticated_response(self, url, method='post'):
"""Get auth type by tasting headers from the server. Do POST requests be default. HEAD is too error prone, and
some servers are set up to redirect to OWA on all requests except POST to the autodiscover endpoint.
Args:
url:
method: (Default value = 'post')
"""
# We are connecting to untrusted servers here, so take necessary precautions.
hostname = urlparse(url).netloc
if not is_valid_hostname(hostname, timeout=AutodiscoverProtocol.TIMEOUT):
# 'requests' is really bad at reporting that a hostname cannot be resolved. Let's check this separately.
# Don't retry on DNS errors. They will most likely be persistent.
raise TransportError('%r has no DNS entry' % hostname)
kwargs = dict(
url=url, headers=DEFAULT_HEADERS.copy(), allow_redirects=False, timeout=AutodiscoverProtocol.TIMEOUT
)
if method == 'post':
kwargs['data'] = Autodiscover.payload(email=self.email)
retry = 0
t_start = time.monotonic()
while True:
_back_off_if_needed(self.INITIAL_RETRY_POLICY.back_off_until)
log.debug('Trying to get response from %s', url)
with AutodiscoverProtocol.raw_session() as s:
try:
r = getattr(s, method)(**kwargs)
r.close() # Release memory
break
except TLS_ERRORS as e:
# Don't retry on TLS errors. They will most likely be persistent.
raise TransportError(str(e))
except CONNECTION_ERRORS as e:
r = DummyResponse(url=url, headers={}, request_headers=kwargs['headers'])
total_wait = time.monotonic() - t_start
if _may_retry_on_error(response=r, retry_policy=self.INITIAL_RETRY_POLICY, wait=total_wait):
log.debug("Connection error on URL %s (retry %s, error: %s). Cool down", url, retry, e)
self.INITIAL_RETRY_POLICY.back_off(self.RETRY_WAIT)
retry += 1
continue
else:
log.debug("Connection error on URL %s: %s", url, e)
raise TransportError(str(e))
try:
auth_type = get_auth_method_from_response(response=r)
except UnauthorizedError:
# Failed to guess the auth type
auth_type = NOAUTH
if r.status_code in (301, 302):
if 'location' in r.headers:
# Make the redirect URL absolute
try:
r.headers['location'] = get_redirect_url(r)
except TransportError:
del r.headers['location']
return auth_type, r
def _get_authenticated_response(self, protocol):
"""Get a response by using the credentials provided. We guess the auth type along the way.
Args:
protocol:
"""
# Redo the request with the correct auth
data = Autodiscover.payload(email=self.email)
# TODO: If Kerberos auth is set, we should set the X-ClientCanHandle='Negotiate' header. See
# https://docs.microsoft.com/en-us/exchange/client-developer/web-service-reference/pox-autodiscover-request-for-exchange
headers = DEFAULT_HEADERS.copy()
try:
session = protocol.get_session()
r, session = post_ratelimited(protocol=protocol, session=session, url=protocol.service_endpoint,
headers=headers, data=data, allow_redirects=False, stream=False)
protocol.release_session(session)
except UnauthorizedError as e:
# It's entirely possible for the endpoint to ask for login. We should continue if login fails because this
# isn't necessarily the right endpoint to use.
raise TransportError(str(e))
except RedirectError as e:
r = DummyResponse(url=protocol.service_endpoint, headers={'location': e.url}, request_headers=None,
status_code=302)
return r
def _attempt_response(self, url):
"""Returns a (is_valid_response, response) tuple
Args:
url:
"""
self._urls_visited.append(url.lower())
log.debug('Attempting to get a valid response from %s', url)
try:
auth_type, r = self._get_unauthenticated_response(url=url)
if isinstance(self.credentials, OAuth2Credentials):
# This type of credentials *must* use the OAuth auth type
auth_type = OAUTH2
elif self.credentials is None and auth_type in CREDENTIALS_REQUIRED:
raise ValueError('Auth type %r was detected but no credentials were provided' % auth_type)
ad_protocol = AutodiscoverProtocol(
config=Configuration(
service_endpoint=url,
credentials=self.credentials,
auth_type=auth_type,
retry_policy=self.INITIAL_RETRY_POLICY,
)
)
if auth_type != NOAUTH:
r = self._get_authenticated_response(protocol=ad_protocol)
except TransportError as e:
log.debug('Failed to get a response: %s', e)
return False, None
if r.status_code in (301, 302) and 'location' in r.headers:
redirect_url = get_redirect_url(r)
if self._redirect_url_is_valid(url=redirect_url):
# The protocol does not specify this explicitly, but by looking at how testconnectivity.microsoft.com
# works, it seems that we should follow this URL now and try to get a valid response.
return self._attempt_response(url=redirect_url)
if r.status_code == 200:
try:
ad = Autodiscover.from_bytes(bytes_content=r.content)
# We got a valid response. Unless this is a URL redirect response, we cache the result
if ad.response is None or not ad.response.redirect_url:
cache_key = self._cache_key
log.debug('Adding cache entry for key %s: %s', cache_key, ad_protocol.service_endpoint)
autodiscover_cache[cache_key] = ad_protocol
return True, ad
except ValueError as e:
log.debug('Invalid response: %s', e)
return False, None
def _step_1(self, hostname):
"""The client sends an Autodiscover request to https://example.com/autodiscover/autodiscover.xml and then does
one of the following:
* If the Autodiscover attempt succeeds, the client proceeds to step 5.
* If the Autodiscover attempt fails, the client proceeds to step 2.
Args:
hostname:
"""
url = 'https://%s/Autodiscover/Autodiscover.xml' % hostname
log.info('Step 1: Trying autodiscover on %r with email %r', url, self.email)
is_valid_response, ad = self._attempt_response(url=url)
if is_valid_response:
return self._step_5(ad=ad)
else:
return self._step_2(hostname=hostname)
def _step_2(self, hostname):
"""The client sends an Autodiscover request to https://autodiscover.example.com/autodiscover/autodiscover.xml
and then does one of the following:
* If the Autodiscover attempt succeeds, the client proceeds to step 5.
* If the Autodiscover attempt fails, the client proceeds to step 3.
Args:
hostname:
"""
url = 'https://autodiscover.%s/Autodiscover/Autodiscover.xml' % hostname
log.info('Step 2: Trying autodiscover on %r with email %r', url, self.email)
is_valid_response, ad = self._attempt_response(url=url)
if is_valid_response:
return self._step_5(ad=ad)
else:
return self._step_3(hostname=hostname)
def _step_3(self, hostname):
"""The client sends an unauth'ed GET method request to
http://autodiscover.example.com/autodiscover/autodiscover.xml (Note that this is a non-HTTPS endpoint). The
client then does one of the following:
* If the GET request returns a 302 redirect response, it gets the redirection URL from the 'Location' HTTP
header and validates it as described in the "Redirect responses" section. The client then does one of the
following:
* If the redirection URL is valid, the client tries the URL and then does one of the following:
* If the attempt succeeds, the client proceeds to step 5.
* If the attempt fails, the client proceeds to step 4.
* If the redirection URL is not valid, the client proceeds to step 4.
* If the GET request does not return a 302 redirect response, the client proceeds to step 4.
Args:
hostname:
"""
url = 'http://autodiscover.%s/Autodiscover/Autodiscover.xml' % hostname
log.info('Step 3: Trying autodiscover on %r with email %r', url, self.email)
try:
_, r = self._get_unauthenticated_response(url=url, method='get')
except TransportError:
r = DummyResponse(url=url, headers={}, request_headers={})
if r.status_code in (301, 302) and 'location' in r.headers:
redirect_url = get_redirect_url(r)
if self._redirect_url_is_valid(url=redirect_url):
is_valid_response, ad = self._attempt_response(url=redirect_url)
if is_valid_response:
return self._step_5(ad=ad)
else:
return self._step_4(hostname=hostname)
else:
return self._step_4(hostname=hostname)
else:
return self._step_4(hostname=hostname)
def _step_4(self, hostname):
"""The client performs a Domain Name System (DNS) query for an SRV record for _autodiscover._tcp.example.com.
The query might return multiple records. The client selects only records that point to an SSL endpoint and that
have the highest priority and weight. One of the following actions then occurs:
* If no such records are returned, the client proceeds to step 6.
* If records are returned, the application randomly chooses a record in the list and validates the endpoint
that it points to by following the process described in the "Redirect Response" section. The client then
does one of the following:
* If the redirection URL is valid, the client tries the URL and then does one of the following:
* If the attempt succeeds, the client proceeds to step 5.
* If the attempt fails, the client proceeds to step 6.
* If the redirection URL is not valid, the client proceeds to step 6.
Args:
hostname:
"""
dns_hostname = '_autodiscover._tcp.%s' % hostname
log.info('Step 4: Trying autodiscover on %r with email %r', dns_hostname, self.email)
srv_records = _get_srv_records(dns_hostname)
try:
srv_host = _select_srv_host(srv_records)
except ValueError:
srv_host = None
if not srv_host:
return self._step_6()
else:
redirect_url = 'https://%s/Autodiscover/Autodiscover.xml' % srv_host
if self._redirect_url_is_valid(url=redirect_url):
is_valid_response, ad = self._attempt_response(url=redirect_url)
if is_valid_response:
return self._step_5(ad=ad)
else:
return self._step_6()
else:
return self._step_6()
def _step_5(self, ad):
"""When a valid Autodiscover request succeeds, the following sequence occurs:
* If the server responds with an HTTP 302 redirect, the client validates the redirection URL according to
the process defined in the "Redirect responses" and then does one of the following:
* If the redirection URL is valid, the client tries the URL and then does one of the following:
* If the attempt succeeds, the client repeats step 5 from the beginning.
* If the attempt fails, the client proceeds to step 6.
* If the redirection URL is not valid, the client proceeds to step 6.
* If the server responds with a valid Autodiscover response, the client does one of the following:
* If the value of the Action element is "Redirect", the client gets the redirection email address from
the Redirect element and then returns to step 1, using this new email address.
* If the value of the Action element is "Settings", the client has successfully received the requested
configuration settings for the specified user. The client does not need to proceed to step 6.
Args:
ad:
"""
log.info('Step 5: Checking response')
if ad.response is None:
# This is not explicit in the protocol, but let's raise errors here
ad.raise_errors()
ad_response = ad.response
if ad_response.redirect_url:
log.debug('Got a redirect URL: %s', ad_response.redirect_url)
# We are diverging a bit from the protocol here. We will never get an HTTP 302 since earlier steps already
# followed the redirects where possible. Instead, we handle retirect responses here.
if self._redirect_url_is_valid(url=ad_response.redirect_url):
is_valid_response, ad = self._attempt_response(url=ad_response.redirect_url)
if is_valid_response:
return self._step_5(ad=ad)
else:
return self._step_6()
else:
log.debug('Invalid redirect URL: %s', ad_response.redirect_url)
return self._step_6()
else:
# This could be an email redirect. Let outer layer handle this
return ad_response
def _step_6(self):
"""If the client cannot contact the Autodiscover service, the client should ask the user for the Exchange server
name and use it to construct an Exchange EWS URL. The client should try to use this URL for future requests.
"""
raise AutoDiscoverFailed(
'All steps in the autodiscover protocol failed for email %r. If you think this is an error, consider doing '
'an official test at https://testconnectivity.microsoft.com' % self.email)
def _get_srv_records(hostname):
"""Send a DNS query for SRV entries for the hostname.
An SRV entry that has been formatted for autodiscovery will have the following format:
canonical name = mail.example.com.
service = 8 100 443 webmail.example.com.
The first three numbers in the service line are: priority, weight, port
Args:
hostname:
"""
log.debug('Attempting to get SRV records for %s', hostname)
resolver = dns.resolver.Resolver()
resolver.timeout = AutodiscoverProtocol.TIMEOUT
records = []
try:
answers = resolver.query('%s.' % hostname, 'SRV')
except (dns.resolver.NoNameservers, dns.resolver.NoAnswer, dns.resolver.NXDOMAIN) as e:
log.debug('DNS lookup failure: %s', e)
return records
for rdata in answers:
try:
vals = rdata.to_text().strip().rstrip('.').split(' ')
# Raise ValueError if the first three are not ints, and IndexError if there are less than 4 values
priority, weight, port, srv = int(vals[0]), int(vals[1]), int(vals[2]), vals[3]
record = SrvRecord(priority=priority, weight=weight, port=port, srv=srv)
log.debug('Found SRV record %s ', record)
records.append(record)
except (ValueError, IndexError):
log.debug('Incompatible SRV record for %s (%s)', hostname, rdata.to_text())
return records
def _select_srv_host(srv_records):
"""Select the record with the highest priority, that also supports TLS
Args:
srv_records:
"""
best_record = None
for srv_record in srv_records:
if srv_record.port != 443:
log.debug('Skipping SRV record %r (no TLS)', srv_record)
continue
# Assume port 443 will serve TLS. If not, autodiscover will probably also be broken for others.
if best_record is None or best_record.priority < srv_record.priority:
best_record = srv_record
if not best_record:
raise ValueError('No suitable records')
return best_record.srv
| 47.523316
| 141
| 0.641409
|
import logging
import time
from urllib.parse import urlparse
import dns.resolver
from ..configuration import Configuration
from ..credentials import OAuth2Credentials
from ..errors import AutoDiscoverFailed, AutoDiscoverCircularRedirect, TransportError, RedirectError, UnauthorizedError
from ..protocol import Protocol, FailFast
from ..transport import get_auth_method_from_response, DEFAULT_HEADERS, NOAUTH, OAUTH2, CREDENTIALS_REQUIRED
from ..util import post_ratelimited, get_domain, get_redirect_url, _back_off_if_needed, _may_retry_on_error, \
is_valid_hostname, DummyResponse, CONNECTION_ERRORS, TLS_ERRORS
from ..version import Version
from .cache import autodiscover_cache
from .properties import Autodiscover
from .protocol import AutodiscoverProtocol
log = logging.getLogger(__name__)
def discover(email, credentials=None, auth_type=None, retry_policy=None):
return Autodiscovery(
email=email, credentials=credentials, auth_type=auth_type, retry_policy=retry_policy
).discover()
class SrvRecord:
def __init__(self, priority, weight, port, srv):
self.priority = priority
self.weight = weight
self.port = port
self.srv = srv
def __eq__(self, other):
for k in self.__dict__.keys():
if getattr(self, k) != getattr(other, k):
return False
return True
class Autodiscovery:
INITIAL_RETRY_POLICY = FailFast()
RETRY_WAIT = 10
MAX_REDIRECTS = 10
def __init__(self, email, credentials=None, auth_type=None, retry_policy=None):
self.email = email
self.credentials = credentials
self.auth_type = auth_type
self.retry_policy = retry_policy
self._urls_visited = []
self._redirect_count = 0
self._emails_visited = []
def discover(self):
self._emails_visited.append(self.email.lower())
log.debug('Waiting for autodiscover_cache lock')
with autodiscover_cache:
log.debug('autodiscover_cache lock acquired')
cache_key = self._cache_key
domain = get_domain(self.email)
if cache_key in autodiscover_cache:
ad_protocol = autodiscover_cache[cache_key]
log.debug('Cache hit for key %s: %s', cache_key, ad_protocol.service_endpoint)
try:
ad_response = self._quick(protocol=ad_protocol)
except AutoDiscoverFailed:
log.debug('AD request failure. Removing cache for key %s', cache_key)
del autodiscover_cache[cache_key]
ad_response = self._step_1(hostname=domain)
else:
ad_response = self._step_1(hostname=domain)
log.debug('Released autodiscover_cache_lock')
if ad_response.redirect_address:
log.debug('Got a redirect address: %s', ad_response.redirect_address)
if ad_response.redirect_address.lower() in self._emails_visited:
raise AutoDiscoverCircularRedirect('We were redirected to an email address we have already seen')
self.email = ad_response.redirect_address
return self.discover()
self.clear()
return self._build_response(ad_response=ad_response)
def clear(self):
self._urls_visited = []
self._redirect_count = 0
self._emails_visited = []
@property
def _cache_key(self):
domain = get_domain(self.email)
return domain, self.credentials
def _build_response(self, ad_response):
ews_url = ad_response.protocol.ews_url
if not ews_url:
raise AutoDiscoverFailed("Response is missing an 'ews_url' value")
if not ad_response.autodiscover_smtp_address:
ad_response.user.autodiscover_smtp_address = self.email
for protocol in ad_response.account.protocols:
if not protocol.ews_url or not protocol.server_version:
continue
if protocol.ews_url.lower() == ews_url.lower():
version = Version(build=protocol.server_version)
break
else:
version = None
protocol = Protocol(
config=Configuration(
service_endpoint=ews_url,
credentials=self.credentials,
version=version,
auth_type=self.auth_type,
retry_policy=self.retry_policy,
)
)
return ad_response, protocol
def _quick(self, protocol):
if self.auth_type:
protocol.config.auth_type = self.auth_type
if self.retry_policy:
protocol.config.retry_policy = self.retry_policy
try:
r = self._get_authenticated_response(protocol=protocol)
except TransportError as e:
raise AutoDiscoverFailed('Response error: %s' % e)
if r.status_code == 200:
try:
ad = Autodiscover.from_bytes(bytes_content=r.content)
return self._step_5(ad=ad)
except ValueError as e:
raise AutoDiscoverFailed('Invalid response: %s' % e)
raise AutoDiscoverFailed('Invalid response code: %s' % r.status_code)
def _redirect_url_is_valid(self, url):
if url.lower() in self._urls_visited:
log.warning('We have already tried this URL: %s', url)
return False
if self._redirect_count >= self.MAX_REDIRECTS:
log.warning('We reached max redirects at URL: %s', url)
return False
if not url.startswith('https://'):
log.debug('Invalid scheme for URL: %s', url)
return False
try:
self._get_unauthenticated_response(url, method='head')
except TransportError as e:
log.debug('Response error on redirect URL %s: %s', url, e)
return False
self._redirect_count += 1
return True
def _get_unauthenticated_response(self, url, method='post'):
hostname = urlparse(url).netloc
if not is_valid_hostname(hostname, timeout=AutodiscoverProtocol.TIMEOUT):
# Don't retry on DNS errors. They will most likely be persistent.
raise TransportError('%r has no DNS entry' % hostname)
kwargs = dict(
url=url, headers=DEFAULT_HEADERS.copy(), allow_redirects=False, timeout=AutodiscoverProtocol.TIMEOUT
)
if method == 'post':
kwargs['data'] = Autodiscover.payload(email=self.email)
retry = 0
t_start = time.monotonic()
while True:
_back_off_if_needed(self.INITIAL_RETRY_POLICY.back_off_until)
log.debug('Trying to get response from %s', url)
with AutodiscoverProtocol.raw_session() as s:
try:
r = getattr(s, method)(**kwargs)
r.close()
break
except TLS_ERRORS as e:
raise TransportError(str(e))
except CONNECTION_ERRORS as e:
r = DummyResponse(url=url, headers={}, request_headers=kwargs['headers'])
total_wait = time.monotonic() - t_start
if _may_retry_on_error(response=r, retry_policy=self.INITIAL_RETRY_POLICY, wait=total_wait):
log.debug("Connection error on URL %s (retry %s, error: %s). Cool down", url, retry, e)
self.INITIAL_RETRY_POLICY.back_off(self.RETRY_WAIT)
retry += 1
continue
else:
log.debug("Connection error on URL %s: %s", url, e)
raise TransportError(str(e))
try:
auth_type = get_auth_method_from_response(response=r)
except UnauthorizedError:
# Failed to guess the auth type
auth_type = NOAUTH
if r.status_code in (301, 302):
if 'location' in r.headers:
# Make the redirect URL absolute
try:
r.headers['location'] = get_redirect_url(r)
except TransportError:
del r.headers['location']
return auth_type, r
def _get_authenticated_response(self, protocol):
# Redo the request with the correct auth
data = Autodiscover.payload(email=self.email)
# TODO: If Kerberos auth is set, we should set the X-ClientCanHandle='Negotiate' header. See
# https://docs.microsoft.com/en-us/exchange/client-developer/web-service-reference/pox-autodiscover-request-for-exchange
headers = DEFAULT_HEADERS.copy()
try:
session = protocol.get_session()
r, session = post_ratelimited(protocol=protocol, session=session, url=protocol.service_endpoint,
headers=headers, data=data, allow_redirects=False, stream=False)
protocol.release_session(session)
except UnauthorizedError as e:
# It's entirely possible for the endpoint to ask for login. We should continue if login fails because this
raise TransportError(str(e))
except RedirectError as e:
r = DummyResponse(url=protocol.service_endpoint, headers={'location': e.url}, request_headers=None,
status_code=302)
return r
def _attempt_response(self, url):
self._urls_visited.append(url.lower())
log.debug('Attempting to get a valid response from %s', url)
try:
auth_type, r = self._get_unauthenticated_response(url=url)
if isinstance(self.credentials, OAuth2Credentials):
# This type of credentials *must* use the OAuth auth type
auth_type = OAUTH2
elif self.credentials is None and auth_type in CREDENTIALS_REQUIRED:
raise ValueError('Auth type %r was detected but no credentials were provided' % auth_type)
ad_protocol = AutodiscoverProtocol(
config=Configuration(
service_endpoint=url,
credentials=self.credentials,
auth_type=auth_type,
retry_policy=self.INITIAL_RETRY_POLICY,
)
)
if auth_type != NOAUTH:
r = self._get_authenticated_response(protocol=ad_protocol)
except TransportError as e:
log.debug('Failed to get a response: %s', e)
return False, None
if r.status_code in (301, 302) and 'location' in r.headers:
redirect_url = get_redirect_url(r)
if self._redirect_url_is_valid(url=redirect_url):
# The protocol does not specify this explicitly, but by looking at how testconnectivity.microsoft.com
# works, it seems that we should follow this URL now and try to get a valid response.
return self._attempt_response(url=redirect_url)
if r.status_code == 200:
try:
ad = Autodiscover.from_bytes(bytes_content=r.content)
# We got a valid response. Unless this is a URL redirect response, we cache the result
if ad.response is None or not ad.response.redirect_url:
cache_key = self._cache_key
log.debug('Adding cache entry for key %s: %s', cache_key, ad_protocol.service_endpoint)
autodiscover_cache[cache_key] = ad_protocol
return True, ad
except ValueError as e:
log.debug('Invalid response: %s', e)
return False, None
def _step_1(self, hostname):
url = 'https://%s/Autodiscover/Autodiscover.xml' % hostname
log.info('Step 1: Trying autodiscover on %r with email %r', url, self.email)
is_valid_response, ad = self._attempt_response(url=url)
if is_valid_response:
return self._step_5(ad=ad)
else:
return self._step_2(hostname=hostname)
def _step_2(self, hostname):
url = 'https://autodiscover.%s/Autodiscover/Autodiscover.xml' % hostname
log.info('Step 2: Trying autodiscover on %r with email %r', url, self.email)
is_valid_response, ad = self._attempt_response(url=url)
if is_valid_response:
return self._step_5(ad=ad)
else:
return self._step_3(hostname=hostname)
def _step_3(self, hostname):
url = 'http://autodiscover.%s/Autodiscover/Autodiscover.xml' % hostname
log.info('Step 3: Trying autodiscover on %r with email %r', url, self.email)
try:
_, r = self._get_unauthenticated_response(url=url, method='get')
except TransportError:
r = DummyResponse(url=url, headers={}, request_headers={})
if r.status_code in (301, 302) and 'location' in r.headers:
redirect_url = get_redirect_url(r)
if self._redirect_url_is_valid(url=redirect_url):
is_valid_response, ad = self._attempt_response(url=redirect_url)
if is_valid_response:
return self._step_5(ad=ad)
else:
return self._step_4(hostname=hostname)
else:
return self._step_4(hostname=hostname)
else:
return self._step_4(hostname=hostname)
def _step_4(self, hostname):
dns_hostname = '_autodiscover._tcp.%s' % hostname
log.info('Step 4: Trying autodiscover on %r with email %r', dns_hostname, self.email)
srv_records = _get_srv_records(dns_hostname)
try:
srv_host = _select_srv_host(srv_records)
except ValueError:
srv_host = None
if not srv_host:
return self._step_6()
else:
redirect_url = 'https://%s/Autodiscover/Autodiscover.xml' % srv_host
if self._redirect_url_is_valid(url=redirect_url):
is_valid_response, ad = self._attempt_response(url=redirect_url)
if is_valid_response:
return self._step_5(ad=ad)
else:
return self._step_6()
else:
return self._step_6()
def _step_5(self, ad):
log.info('Step 5: Checking response')
if ad.response is None:
# This is not explicit in the protocol, but let's raise errors here
ad.raise_errors()
ad_response = ad.response
if ad_response.redirect_url:
log.debug('Got a redirect URL: %s', ad_response.redirect_url)
if self._redirect_url_is_valid(url=ad_response.redirect_url):
is_valid_response, ad = self._attempt_response(url=ad_response.redirect_url)
if is_valid_response:
return self._step_5(ad=ad)
else:
return self._step_6()
else:
log.debug('Invalid redirect URL: %s', ad_response.redirect_url)
return self._step_6()
else:
return ad_response
def _step_6(self):
raise AutoDiscoverFailed(
'All steps in the autodiscover protocol failed for email %r. If you think this is an error, consider doing '
'an official test at https://testconnectivity.microsoft.com' % self.email)
def _get_srv_records(hostname):
log.debug('Attempting to get SRV records for %s', hostname)
resolver = dns.resolver.Resolver()
resolver.timeout = AutodiscoverProtocol.TIMEOUT
records = []
try:
answers = resolver.query('%s.' % hostname, 'SRV')
except (dns.resolver.NoNameservers, dns.resolver.NoAnswer, dns.resolver.NXDOMAIN) as e:
log.debug('DNS lookup failure: %s', e)
return records
for rdata in answers:
try:
vals = rdata.to_text().strip().rstrip('.').split(' ')
priority, weight, port, srv = int(vals[0]), int(vals[1]), int(vals[2]), vals[3]
record = SrvRecord(priority=priority, weight=weight, port=port, srv=srv)
log.debug('Found SRV record %s ', record)
records.append(record)
except (ValueError, IndexError):
log.debug('Incompatible SRV record for %s (%s)', hostname, rdata.to_text())
return records
def _select_srv_host(srv_records):
best_record = None
for srv_record in srv_records:
if srv_record.port != 443:
log.debug('Skipping SRV record %r (no TLS)', srv_record)
continue
if best_record is None or best_record.priority < srv_record.priority:
best_record = srv_record
if not best_record:
raise ValueError('No suitable records')
return best_record.srv
| true
| true
|
1c46d49bbe5567ce4f5689afc64fec986b8a50d0
| 439
|
py
|
Python
|
projects/golem_integration/tests/browser/find/find_element_not_found.py
|
kangchenwei/keyautotest2
|
f980d46cabfc128b2099af3d33968f236923063f
|
[
"MIT"
] | null | null | null |
projects/golem_integration/tests/browser/find/find_element_not_found.py
|
kangchenwei/keyautotest2
|
f980d46cabfc128b2099af3d33968f236923063f
|
[
"MIT"
] | null | null | null |
projects/golem_integration/tests/browser/find/find_element_not_found.py
|
kangchenwei/keyautotest2
|
f980d46cabfc128b2099af3d33968f236923063f
|
[
"MIT"
] | null | null | null |
from golem import actions
from golem.core.exceptions import ElementNotFound
description = 'Verify the webdriver.find method throws error when element is not found'
def test(data):
actions.navigate(data.env.url+'elements/')
browser = actions.get_browser()
selector = '.invalid-selector-value'
actions.step('Find element by css')
try:
elem = browser.find(css=selector)
except ElementNotFound:
pass
| 27.4375
| 87
| 0.71754
|
from golem import actions
from golem.core.exceptions import ElementNotFound
description = 'Verify the webdriver.find method throws error when element is not found'
def test(data):
actions.navigate(data.env.url+'elements/')
browser = actions.get_browser()
selector = '.invalid-selector-value'
actions.step('Find element by css')
try:
elem = browser.find(css=selector)
except ElementNotFound:
pass
| true
| true
|
1c46d4f59678cd4c42ab336c2ddd37684bf8a54e
| 580
|
py
|
Python
|
tests/spline.py
|
parmes/solfec-2.0
|
3329d3e1e4d58fefaf976c04bab19284aef45bc2
|
[
"MIT"
] | 1
|
2020-06-21T23:52:25.000Z
|
2020-06-21T23:52:25.000Z
|
tests/spline.py
|
parmes/solfec-2.0
|
3329d3e1e4d58fefaf976c04bab19284aef45bc2
|
[
"MIT"
] | 1
|
2020-05-01T14:44:01.000Z
|
2020-05-01T23:50:36.000Z
|
tests/spline.py
|
parmes/solfec-2.0
|
3329d3e1e4d58fefaf976c04bab19284aef45bc2
|
[
"MIT"
] | 2
|
2020-06-21T23:59:21.000Z
|
2021-12-09T09:49:50.000Z
|
# Solfec-2.0 input command test: SPLINE
import sys, os
d0 = os.path.dirname(os.path.realpath(sys.argv[1]))
spl0 = SPLINE (os.path.join(d0,'spline.txt'));
spl1 = SPLINE (os.path.join(d0,'spline.txt'), cache = 10)
lst2 = [0, 10, 1, 11, 2, 12, 3, 13, 4, 14, 5, 15, 6, 16];
spl2 = SPLINE (lst2);
lst3 = [[0, 10], [1, 11], [2, 12], [3, 13], [4, 14], [5, 15], [6, 16]];
spl3 = SPLINE (lst3);
lst4 = [(0, 10), (1, 11), (2, 12), (3, 13), (4, 14), (5, 15), (6, 16)];
spl4 = SPLINE (lst3);
print_SPLINE(spl0)
print_SPLINE(spl1)
print_SPLINE(spl2)
print_SPLINE(spl3)
print_SPLINE(spl4)
| 26.363636
| 71
| 0.593103
|
import sys, os
d0 = os.path.dirname(os.path.realpath(sys.argv[1]))
spl0 = SPLINE (os.path.join(d0,'spline.txt'));
spl1 = SPLINE (os.path.join(d0,'spline.txt'), cache = 10)
lst2 = [0, 10, 1, 11, 2, 12, 3, 13, 4, 14, 5, 15, 6, 16];
spl2 = SPLINE (lst2);
lst3 = [[0, 10], [1, 11], [2, 12], [3, 13], [4, 14], [5, 15], [6, 16]];
spl3 = SPLINE (lst3);
lst4 = [(0, 10), (1, 11), (2, 12), (3, 13), (4, 14), (5, 15), (6, 16)];
spl4 = SPLINE (lst3);
print_SPLINE(spl0)
print_SPLINE(spl1)
print_SPLINE(spl2)
print_SPLINE(spl3)
print_SPLINE(spl4)
| true
| true
|
1c46d5eee6f5de64e17b1f5566525b7d8e6e6eb6
| 1,597
|
py
|
Python
|
application/tictactoe/datastore.py
|
Deephan/tic-tac-toe-for-slack
|
d3aa7e9c2bc52d8afad6d8057ebb60373b100a78
|
[
"Apache-2.0"
] | null | null | null |
application/tictactoe/datastore.py
|
Deephan/tic-tac-toe-for-slack
|
d3aa7e9c2bc52d8afad6d8057ebb60373b100a78
|
[
"Apache-2.0"
] | 4
|
2016-07-05T16:11:31.000Z
|
2016-07-05T16:16:26.000Z
|
application/tictactoe/datastore.py
|
Deephan/tic-tac-toe-for-slack
|
d3aa7e9c2bc52d8afad6d8057ebb60373b100a78
|
[
"Apache-2.0"
] | null | null | null |
'''
datastore.py
Datastore module for the game of Tic-Tac-Toe
Note: This module currently does nothing. Work to be done to store the state of the game.
'''
class DataStore:
class State(ndb.Model):
""" Stores the current state of the board """
board = ndb.StringProperty()
moves = ndb.IntegerProperty()
date = ndb.DateTimeProperty(auto_now_add=True)
def retrieveState():
query = State.query()
states = query.order(-State.date).fetch(1)
lastState = []
turns = None
# pass the board to play before you can serialize the current state
if len(states) > 0:
for state in states:
lastState = deserializeBoard(state.board)
turns = state.moves
else:
lastState = [['#','#','#'],['#','#','#'],['#','#','#']]
turns = 9
return (lastState, turns)
def storeState():
serialized_state = serializeBoard(currentState)
State(board = serialized_state, moves = turns).put()
return
def serializeBoard(board):
state = ""
for row in board:
for col in row:
state += col
return state
def deserializeBoard(state):
ROWS = COLS = 3
board = []
count = 0
while ROWS > 0:
row = []
while COLS > 0:
row.append(str(state[count]))
count += 1
COLS -= 1
board.append(row)
ROWS -= 1
COLS = 3
return board
| 27.067797
| 93
| 0.513463
|
class DataStore:
class State(ndb.Model):
board = ndb.StringProperty()
moves = ndb.IntegerProperty()
date = ndb.DateTimeProperty(auto_now_add=True)
def retrieveState():
query = State.query()
states = query.order(-State.date).fetch(1)
lastState = []
turns = None
if len(states) > 0:
for state in states:
lastState = deserializeBoard(state.board)
turns = state.moves
else:
lastState = [['#','#','#'],['#','#','#'],['#','#','#']]
turns = 9
return (lastState, turns)
def storeState():
serialized_state = serializeBoard(currentState)
State(board = serialized_state, moves = turns).put()
return
def serializeBoard(board):
state = ""
for row in board:
for col in row:
state += col
return state
def deserializeBoard(state):
ROWS = COLS = 3
board = []
count = 0
while ROWS > 0:
row = []
while COLS > 0:
row.append(str(state[count]))
count += 1
COLS -= 1
board.append(row)
ROWS -= 1
COLS = 3
return board
| true
| true
|
1c46d65620086f1fc1ed2ef78050ec11a4ddc8ca
| 670
|
py
|
Python
|
pythran/tests/cases/projection_simplex.py
|
davidbrochart/pythran
|
24b6c8650fe99791a4091cbdc2c24686e86aa67c
|
[
"BSD-3-Clause"
] | 1,647
|
2015-01-13T01:45:38.000Z
|
2022-03-28T01:23:41.000Z
|
pythran/tests/cases/projection_simplex.py
|
davidbrochart/pythran
|
24b6c8650fe99791a4091cbdc2c24686e86aa67c
|
[
"BSD-3-Clause"
] | 1,116
|
2015-01-01T09:52:05.000Z
|
2022-03-18T21:06:40.000Z
|
pythran/tests/cases/projection_simplex.py
|
davidbrochart/pythran
|
24b6c8650fe99791a4091cbdc2c24686e86aa67c
|
[
"BSD-3-Clause"
] | 180
|
2015-02-12T02:47:28.000Z
|
2022-03-14T10:28:18.000Z
|
#from https://gist.github.com/mblondel/c99e575a5207c76a99d714e8c6e08e89
#pythran export projection_simplex(float[], int)
#runas import numpy as np; np.random.seed(0); x = np.random.rand(10); projection_simplex(x, 1)
import numpy as np
def projection_simplex(v, z=1):
"""
Old implementation for test and benchmark purposes.
The arguments v and z should be a vector and a scalar, respectively.
"""
n_features = v.shape[0]
u = np.sort(v)[::-1]
cssv = np.cumsum(u) - z
ind = np.arange(n_features) + 1
cond = u - cssv / ind > 0
rho = ind[cond][-1]
theta = cssv[cond][-1] / float(rho)
w = np.maximum(v - theta, 0)
return w
| 33.5
| 94
| 0.653731
|
import numpy as np
def projection_simplex(v, z=1):
n_features = v.shape[0]
u = np.sort(v)[::-1]
cssv = np.cumsum(u) - z
ind = np.arange(n_features) + 1
cond = u - cssv / ind > 0
rho = ind[cond][-1]
theta = cssv[cond][-1] / float(rho)
w = np.maximum(v - theta, 0)
return w
| true
| true
|
1c46d68712cfe5660bca7d1c26bdad8cf4708df8
| 3,921
|
py
|
Python
|
feedler/admin.py
|
pcoder/public-health-ch
|
cebc4849653560c54238b67814074353ff7c01f3
|
[
"MIT"
] | 2
|
2020-10-29T16:27:21.000Z
|
2021-06-07T12:47:46.000Z
|
feedler/admin.py
|
pcoder/public-health-ch
|
cebc4849653560c54238b67814074353ff7c01f3
|
[
"MIT"
] | 11
|
2017-05-09T10:50:28.000Z
|
2021-12-15T17:01:23.000Z
|
feedler/admin.py
|
pcoder/public-health-ch
|
cebc4849653560c54238b67814074353ff7c01f3
|
[
"MIT"
] | 4
|
2017-04-24T13:06:55.000Z
|
2021-06-04T02:18:32.000Z
|
# -*- coding: utf-8 -*-
from django.db import models
from django.contrib.auth.decorators import login_required
from django.utils.decorators import method_decorator
from django.conf.urls import url
from django.urls import reverse
from django.utils.functional import cached_property
from django.utils.translation import ugettext as _
from django.shortcuts import redirect
from wagtail.admin import messages
from wagtail.contrib.modeladmin.helpers import AdminURLHelper, ButtonHelper
from wagtail.contrib.modeladmin.options import ModelAdmin
from wagtail.contrib.modeladmin.views import IndexView
from wagtail.core.models import Site
from feedler.models import Entry
from feedler.refresh import refresh_streams
from feedler.models.admin import FeedlySettings
class RefreshButtonHelper(ButtonHelper):
"""
This helper constructs a refresh button
"""
button_classnames = ['icon', 'icon-download']
def refresh_button(self, classnames_add=None, classnames_exclude=None):
if classnames_add is None: classnames_add = []
if classnames_exclude is None: classnames_exclude = []
classnames = self.button_classnames + classnames_add
cn = self.finalise_classname(classnames, classnames_exclude)
text = _('Sync {}'.format(self.verbose_name_plural.title()))
return {
'url': self.url_helper.get_action_url('refresh', query_params=self.request.GET),
'label': text, 'classname': cn, 'title': text,
}
class RefreshAdminURLHelper(AdminURLHelper):
"""
This helper constructs the different urls, to overwrite the default behaviour
and append the filters to the action.
"""
non_object_specific_actions = ('create', 'choose_parent', 'index', 'refresh')
def get_action_url(self, action, *args, **kwargs):
query_params = kwargs.pop('query_params', None)
url_name = self.get_action_url_name(action)
if action in self.non_object_specific_actions:
url = reverse(url_name)
else:
url = reverse(url_name, args=args, kwargs=kwargs)
if query_params:
url += '?{params}'.format(params=query_params.urlencode())
return url
def get_action_url_pattern(self, action):
if action in self.non_object_specific_actions:
return self._get_action_url_pattern(action)
return self._get_object_specific_action_url_pattern(action)
class RefreshView(IndexView):
"""
A Class Based View which will handle the button click
"""
# def export_csv(self):
# data = self.queryset.all()
# response = ...
# return response
@method_decorator(login_required)
def dispatch(self, request, *args, **kwargs):
super().dispatch(request, *args, **kwargs)
site = Site.find_for_request(request)
if not refresh_streams(FeedlySettings.for_site(site)):
messages.error(
request, _('Sorry, could not refresh streams. Please try again in a few minutes, then contact support if the issue persists.'))
return redirect('/admin/feedler/entry/')
class EntryModelAdminMixin(object):
"""
A mixin to add to your model admin which hooks the different helpers, the view
and register the new urls.
"""
button_helper_class = RefreshButtonHelper
url_helper_class = RefreshAdminURLHelper
view_class = RefreshView
def get_admin_urls_for_registration(self):
urls = super().get_admin_urls_for_registration()
urls += (
url(
self.url_helper.get_action_url_pattern('refresh'),
self.refresh_view,
name=self.url_helper.get_action_url_name('refresh')
),
)
return urls
def refresh_view(self, request):
kwargs = {'model_admin': self}
view_class = self.view_class
return view_class.as_view(**kwargs)(request)
| 38.821782
| 143
| 0.694211
|
from django.db import models
from django.contrib.auth.decorators import login_required
from django.utils.decorators import method_decorator
from django.conf.urls import url
from django.urls import reverse
from django.utils.functional import cached_property
from django.utils.translation import ugettext as _
from django.shortcuts import redirect
from wagtail.admin import messages
from wagtail.contrib.modeladmin.helpers import AdminURLHelper, ButtonHelper
from wagtail.contrib.modeladmin.options import ModelAdmin
from wagtail.contrib.modeladmin.views import IndexView
from wagtail.core.models import Site
from feedler.models import Entry
from feedler.refresh import refresh_streams
from feedler.models.admin import FeedlySettings
class RefreshButtonHelper(ButtonHelper):
button_classnames = ['icon', 'icon-download']
def refresh_button(self, classnames_add=None, classnames_exclude=None):
if classnames_add is None: classnames_add = []
if classnames_exclude is None: classnames_exclude = []
classnames = self.button_classnames + classnames_add
cn = self.finalise_classname(classnames, classnames_exclude)
text = _('Sync {}'.format(self.verbose_name_plural.title()))
return {
'url': self.url_helper.get_action_url('refresh', query_params=self.request.GET),
'label': text, 'classname': cn, 'title': text,
}
class RefreshAdminURLHelper(AdminURLHelper):
non_object_specific_actions = ('create', 'choose_parent', 'index', 'refresh')
def get_action_url(self, action, *args, **kwargs):
query_params = kwargs.pop('query_params', None)
url_name = self.get_action_url_name(action)
if action in self.non_object_specific_actions:
url = reverse(url_name)
else:
url = reverse(url_name, args=args, kwargs=kwargs)
if query_params:
url += '?{params}'.format(params=query_params.urlencode())
return url
def get_action_url_pattern(self, action):
if action in self.non_object_specific_actions:
return self._get_action_url_pattern(action)
return self._get_object_specific_action_url_pattern(action)
class RefreshView(IndexView):
@method_decorator(login_required)
def dispatch(self, request, *args, **kwargs):
super().dispatch(request, *args, **kwargs)
site = Site.find_for_request(request)
if not refresh_streams(FeedlySettings.for_site(site)):
messages.error(
request, _('Sorry, could not refresh streams. Please try again in a few minutes, then contact support if the issue persists.'))
return redirect('/admin/feedler/entry/')
class EntryModelAdminMixin(object):
button_helper_class = RefreshButtonHelper
url_helper_class = RefreshAdminURLHelper
view_class = RefreshView
def get_admin_urls_for_registration(self):
urls = super().get_admin_urls_for_registration()
urls += (
url(
self.url_helper.get_action_url_pattern('refresh'),
self.refresh_view,
name=self.url_helper.get_action_url_name('refresh')
),
)
return urls
def refresh_view(self, request):
kwargs = {'model_admin': self}
view_class = self.view_class
return view_class.as_view(**kwargs)(request)
| true
| true
|
1c46d82743933279d3da7a04509b37c438837201
| 1,295
|
py
|
Python
|
wazimap/tests/test_geo.py
|
anoited007/country-dashboard
|
577bbcc4992e24c484650895fabbcdf4343e1bdb
|
[
"MIT"
] | 16
|
2017-10-19T03:36:41.000Z
|
2022-03-03T11:46:20.000Z
|
wazimap/tests/test_geo.py
|
ChrisAchinga/wazimap
|
a66a1524030a8b98e7ea0dfb270d1946ca75b3b2
|
[
"MIT"
] | 66
|
2016-02-15T08:59:29.000Z
|
2017-09-21T14:00:43.000Z
|
wazimap/tests/test_geo.py
|
ChrisAchinga/wazimap
|
a66a1524030a8b98e7ea0dfb270d1946ca75b3b2
|
[
"MIT"
] | 18
|
2017-10-06T12:26:37.000Z
|
2021-08-30T01:38:37.000Z
|
from django.test import TestCase
from django.conf import settings
from wazimap.geo import geo_data, GeoData
class GeoTestCase(TestCase):
def test_versioned_geos(self):
# create two geos at different versions
cpt11 = geo_data.geo_model.objects.create(geo_level='municipality', geo_code='cpt', long_name='City of Cape Town', version='2011')
cpt16 = geo_data.geo_model.objects.create(geo_level='municipality', geo_code='cpt', long_name='City of Cape Town', version='2016')
self.assertEquals(cpt16, geo_data.get_geography('cpt', 'municipality'))
self.assertEquals(cpt11, geo_data.get_geography('cpt', 'municipality', '2011'))
self.assertEquals(cpt16, geo_data.get_geography('cpt', 'municipality', '2016'))
def test_geometry(self):
# if the geometry_data is missing the version, we should raise an error
settings.WAZIMAP['geometry_data'] = {'country': 'geo/country.geojson'}
with self.assertRaises(ValueError):
GeoData()
# if the geometry_data is missing the version, we should raise an error
# raises an attribute error from line 188 geo.py
settings.WAZIMAP['geometry_data'] = {'': 'geo/country.geojson'}
with self.assertRaises(AttributeError):
GeoData()
| 43.166667
| 138
| 0.695753
|
from django.test import TestCase
from django.conf import settings
from wazimap.geo import geo_data, GeoData
class GeoTestCase(TestCase):
def test_versioned_geos(self):
cpt11 = geo_data.geo_model.objects.create(geo_level='municipality', geo_code='cpt', long_name='City of Cape Town', version='2011')
cpt16 = geo_data.geo_model.objects.create(geo_level='municipality', geo_code='cpt', long_name='City of Cape Town', version='2016')
self.assertEquals(cpt16, geo_data.get_geography('cpt', 'municipality'))
self.assertEquals(cpt11, geo_data.get_geography('cpt', 'municipality', '2011'))
self.assertEquals(cpt16, geo_data.get_geography('cpt', 'municipality', '2016'))
def test_geometry(self):
settings.WAZIMAP['geometry_data'] = {'country': 'geo/country.geojson'}
with self.assertRaises(ValueError):
GeoData()
settings.WAZIMAP['geometry_data'] = {'': 'geo/country.geojson'}
with self.assertRaises(AttributeError):
GeoData()
| true
| true
|
1c46d8fd89313610b00380ac3e01e23cbd64aab7
| 11,884
|
py
|
Python
|
chemdataextractor/cli/pos.py
|
gubschk/CDEWIP
|
fb628593417df5f955eb1fa62176b7cb3c322ebc
|
[
"MIT"
] | null | null | null |
chemdataextractor/cli/pos.py
|
gubschk/CDEWIP
|
fb628593417df5f955eb1fa62176b7cb3c322ebc
|
[
"MIT"
] | null | null | null |
chemdataextractor/cli/pos.py
|
gubschk/CDEWIP
|
fb628593417df5f955eb1fa62176b7cb3c322ebc
|
[
"MIT"
] | 1
|
2021-02-21T02:51:39.000Z
|
2021-02-21T02:51:39.000Z
|
# -*- coding: utf-8 -*-
"""
chemdataextractor.cli.pos
~~~~~~~~~~~~~~~~~~~~~~~~~
Part of speech tagging commands.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
import click
from ..doc import Document, Text
from ..nlp.corpus import genia_training, wsj_training, wsj_evaluation, genia_evaluation
from ..nlp.pos import TAGS, ChemApPosTagger, ChemCrfPosTagger
log = logging.getLogger(__name__)
@click.group(name='pos')
@click.pass_context
def pos_cli(ctx):
"""POS tagger commands."""
pass
@pos_cli.command()
@click.option('--output', '-o', help='Output model file.', required=True)
@click.pass_context
def train_all(ctx, output):
"""Train POS tagger on WSJ, GENIA, and both. With and without cluster features."""
click.echo('chemdataextractor.pos.train_all')
click.echo('Output: %s' % output)
ctx.invoke(train, output='%s_wsj_nocluster.pickle' % output, corpus='wsj', clusters=False)
ctx.invoke(train, output='%s_wsj.pickle' % output, corpus='wsj', clusters=True)
ctx.invoke(train, output='%s_genia_nocluster.pickle' % output, corpus='genia', clusters=False)
ctx.invoke(train, output='%s_genia.pickle' % output, corpus='genia', clusters=True)
ctx.invoke(train, output='%s_wsj_genia_nocluster.pickle' % output, corpus='wsj+genia', clusters=False)
ctx.invoke(train, output='%s_wsj_genia.pickle' % output, corpus='wsj+genia', clusters=True)
@pos_cli.command()
@click.argument('model', required=True)
@click.pass_context
def evaluate_all(ctx, model):
"""Evaluate POS taggers on WSJ and GENIA."""
click.echo('chemdataextractor.pos.evaluate_all')
click.echo('Model: %s' % model)
ctx.invoke(evaluate, model='%s_wsj_nocluster.pickle' % model, corpus='wsj', clusters=False)
ctx.invoke(evaluate, model='%s_wsj_nocluster.pickle' % model, corpus='genia', clusters=False)
ctx.invoke(evaluate, model='%s_wsj.pickle' % model, corpus='wsj', clusters=True)
ctx.invoke(evaluate, model='%s_wsj.pickle' % model, corpus='genia', clusters=True)
ctx.invoke(evaluate, model='%s_genia_nocluster.pickle' % model, corpus='wsj', clusters=False)
ctx.invoke(evaluate, model='%s_genia_nocluster.pickle' % model, corpus='genia', clusters=False)
ctx.invoke(evaluate, model='%s_genia.pickle' % model, corpus='wsj', clusters=True)
ctx.invoke(evaluate, model='%s_genia.pickle' % model, corpus='genia', clusters=True)
ctx.invoke(evaluate, model='%s_wsj_genia_nocluster.pickle' % model, corpus='wsj', clusters=False)
ctx.invoke(evaluate, model='%s_wsj_genia_nocluster.pickle' % model, corpus='genia', clusters=False)
ctx.invoke(evaluate, model='%s_wsj_genia.pickle' % model, corpus='wsj', clusters=True)
ctx.invoke(evaluate, model='%s_wsj_genia.pickle' % model, corpus='genia', clusters=True)
@pos_cli.command()
@click.option('--output', '-o', help='Output model file.', required=True)
@click.option('--corpus', type=click.Choice(['wsj', 'genia', 'wsj+genia']), help='Training corpus')
@click.option('--clusters/--no-clusters', help='Whether to use cluster features', default=True)
@click.pass_context
def train(ctx, output, corpus, clusters):
"""Train POS Tagger."""
click.echo('chemdataextractor.pos.train')
click.echo('Output: %s' % output)
click.echo('Corpus: %s' % corpus)
click.echo('Clusters: %s' % clusters)
wsj_sents = []
genia_sents = []
if corpus == 'wsj' or corpus == 'wsj+genia':
wsj_sents = list(wsj_training.tagged_sents())
# For WSJ, remove all tokens with -NONE- tag
for i, wsj_sent in enumerate(wsj_sents):
wsj_sents[i] = [t for t in wsj_sent if not t[1] == '-NONE-']
if corpus == 'genia' or corpus == 'wsj+genia':
genia_sents = list(genia_training.tagged_sents())
# Translate GENIA
for i, genia_sent in enumerate(genia_sents):
for j, (token, tag) in enumerate(genia_sent):
if tag == '(':
genia_sents[i][j] = (token, '-LRB-') # ( to -RLB- (also do for evaluation)
elif tag == ')':
genia_sents[i][j] = (token, '-RRB-') # ) to -RRB- (also do for evaluation)
elif tag == 'CT':
genia_sents[i][j] = (token, 'DT') # Typo?
elif tag == 'XT':
genia_sents[i][j] = (token, 'DT') # Typo?
elif tag == '-':
genia_sents[i][j] = (token, ':') # Single hyphen character for dash
elif tag == 'N':
genia_sents[i][j] = (token, 'NN') # Typo?
elif tag == 'PP':
genia_sents[i][j] = (token, 'PRP') # Typo?
elif tag == '' and token == ')':
genia_sents[i][j] = (token, '-RRB-') # Typo?
elif tag == '' and token == 'IFN-gamma':
genia_sents[i][j] = (token, 'NN') # Typo?
elif '|' in tag:
genia_sents[i][j] = (token, tag.split('|')[0]) # If contains |, choose first part
# Filter any tags not in the allowed tagset (Shouldn't be any left anyway)
genia_sents[i] = [t for t in genia_sent if t[1] in TAGS]
if corpus == 'wsj':
training_corpus = wsj_sents
elif corpus == 'genia':
training_corpus = genia_sents
elif corpus == 'wsj+genia':
training_corpus = wsj_sents + genia_sents
else:
raise click.ClickException('Invalid corpus')
tagger = ChemCrfPosTagger(clusters=clusters)
tagger.train(training_corpus, output)
@pos_cli.command()
@click.argument('model', required=True)
@click.option('--corpus', type=click.Choice(['wsj', 'genia']), help='Evaluation corpus')
@click.option('--clusters/--no-clusters', help='Whether to use cluster features', default=True)
@click.pass_context
def evaluate(ctx, model, corpus, clusters):
"""Evaluate performance of POS Tagger."""
click.echo('chemdataextractor.pos.evaluate')
if corpus == 'wsj':
evaluation = wsj_evaluation
sents = list(evaluation.tagged_sents())
for i, wsj_sent in enumerate(sents):
sents[i] = [t for t in wsj_sent if not t[1] == '-NONE-']
elif corpus == 'genia':
evaluation = genia_evaluation
sents = list(evaluation.tagged_sents())
# Translate GENIA bracket tags
for i, genia_sent in enumerate(sents):
for j, (token, tag) in enumerate(genia_sent):
if tag == '(':
sents[i][j] = (token, '-LRB-')
elif tag == ')':
sents[i][j] = (token, '-RRB-')
else:
raise click.ClickException('Invalid corpus')
tagger = ChemCrfPosTagger(model=model, clusters=clusters)
accuracy = tagger.evaluate(sents)
click.echo('%s on %s: %s' % (model, evaluation, accuracy))
@pos_cli.command()
@click.option('--output', '-o', type=click.File('wb'), help='Output model file.', required=True)
@click.option('--corpus', type=click.Choice(['wsj', 'genia', 'wsj+genia']), help='Training corpus')
@click.option('--clusters/--no-clusters', help='Whether to use cluster features', default=True)
@click.pass_obj
def train_perceptron(ctx, output, corpus, clusters):
"""Train Averaged Perceptron POS Tagger."""
click.echo('chemdataextractor.pos.train')
click.echo('Output: %s' % output)
click.echo('Corpus: %s' % corpus)
click.echo('Clusters: %s' % clusters)
wsj_sents = []
genia_sents = []
if corpus == 'wsj' or corpus == 'wsj+genia':
wsj_sents = list(wsj_training.tagged_sents())
# For WSJ, remove all tokens with -NONE- tag
for i, wsj_sent in enumerate(wsj_sents):
wsj_sents[i] = [t for t in wsj_sent if not t[1] == '-NONE-']
if corpus == 'genia' or corpus == 'wsj+genia':
genia_sents = list(genia_training.tagged_sents())
# Translate GENIA
for i, genia_sent in enumerate(genia_sents):
for j, (token, tag) in enumerate(genia_sent):
if tag == '(':
genia_sents[i][j] = (token, '-LRB-') # ( to -RLB- (also do for evaluation)
elif tag == ')':
genia_sents[i][j] = (token, '-RRB-') # ) to -RRB- (also do for evaluation)
elif tag == 'CT':
genia_sents[i][j] = (token, 'DT') # Typo?
elif tag == 'XT':
genia_sents[i][j] = (token, 'DT') # Typo?
elif tag == '-':
genia_sents[i][j] = (token, ':') # Single hyphen character for dash
elif tag == 'N':
genia_sents[i][j] = (token, 'NN') # Typo?
elif tag == 'PP':
genia_sents[i][j] = (token, 'PRP') # Typo?
elif tag == '' and token == ')':
genia_sents[i][j] = (token, '-RRB-') # Typo?
elif tag == '' and token == 'IFN-gamma':
genia_sents[i][j] = (token, 'NN') # Typo?
elif '|' in tag:
genia_sents[i][j] = (token, tag.split('|')[0]) # If contains |, choose first part
# Filter any tags not in the allowed tagset (Shouldn't be any left anyway)
genia_sents[i] = [t for t in genia_sent if t[1] in TAGS]
if corpus == 'wsj':
training_corpus = wsj_sents
elif corpus == 'genia':
training_corpus = genia_sents
elif corpus == 'wsj+genia':
training_corpus = wsj_sents + genia_sents
else:
raise click.ClickException('Invalid corpus')
tagger = ChemApPosTagger(clusters=clusters)
tagger.train(training_corpus)
tagger.save(output)
@pos_cli.command()
@click.argument('model', required=True)
@click.option('--corpus', type=click.Choice(['wsj', 'genia']), help='Evaluation corpus')
@click.pass_obj
def evaluate_perceptron(ctx, model, corpus):
"""Evaluate performance of Averaged Perceptron POS Tagger."""
click.echo('chemdataextractor.pos.evaluate')
if corpus == 'wsj':
evaluation = wsj_evaluation
sents = list(evaluation.tagged_sents())
for i, wsj_sent in enumerate(sents):
sents[i] = [t for t in wsj_sent if not t[1] == u'-NONE-']
elif corpus == 'genia':
evaluation = genia_evaluation
sents = list(evaluation.tagged_sents())
# Translate GENIA bracket tags
for i, genia_sent in enumerate(sents):
for j, (token, tag) in enumerate(genia_sent):
if tag == u'(':
sents[i][j] = (token, u'-LRB-')
elif tag == u')':
sents[i][j] = (token, u'-RRB-')
else:
raise click.ClickException('Invalid corpus')
tagger = ChemApPosTagger(model=model)
accuracy = tagger.evaluate(sents)
click.echo('%s on %s: %s' % (model, evaluation, accuracy))
@pos_cli.command()
@click.option('--output', '-o', type=click.File('w', encoding='utf8'), help='Output file.', default=click.get_text_stream('stdout'))
@click.argument('input', type=click.File('rb'), default=click.get_binary_stream('stdin'))
@click.pass_obj
def tag(ctx, input, output):
"""Output POS-tagged tokens."""
log.info('chemdataextractor.pos.tag')
log.info('Reading %s' % input.name)
doc = Document.from_file(input)
for element in doc.elements:
if isinstance(element, Text):
for sentence in element.sentences:
output.write(u' '.join(u'/'.join([token, tag]) for token, tag in sentence.pos_tagged_tokens))
output.write(u'\n')
| 44.676692
| 133
| 0.588186
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
import click
from ..doc import Document, Text
from ..nlp.corpus import genia_training, wsj_training, wsj_evaluation, genia_evaluation
from ..nlp.pos import TAGS, ChemApPosTagger, ChemCrfPosTagger
log = logging.getLogger(__name__)
@click.group(name='pos')
@click.pass_context
def pos_cli(ctx):
pass
@pos_cli.command()
@click.option('--output', '-o', help='Output model file.', required=True)
@click.pass_context
def train_all(ctx, output):
click.echo('chemdataextractor.pos.train_all')
click.echo('Output: %s' % output)
ctx.invoke(train, output='%s_wsj_nocluster.pickle' % output, corpus='wsj', clusters=False)
ctx.invoke(train, output='%s_wsj.pickle' % output, corpus='wsj', clusters=True)
ctx.invoke(train, output='%s_genia_nocluster.pickle' % output, corpus='genia', clusters=False)
ctx.invoke(train, output='%s_genia.pickle' % output, corpus='genia', clusters=True)
ctx.invoke(train, output='%s_wsj_genia_nocluster.pickle' % output, corpus='wsj+genia', clusters=False)
ctx.invoke(train, output='%s_wsj_genia.pickle' % output, corpus='wsj+genia', clusters=True)
@pos_cli.command()
@click.argument('model', required=True)
@click.pass_context
def evaluate_all(ctx, model):
click.echo('chemdataextractor.pos.evaluate_all')
click.echo('Model: %s' % model)
ctx.invoke(evaluate, model='%s_wsj_nocluster.pickle' % model, corpus='wsj', clusters=False)
ctx.invoke(evaluate, model='%s_wsj_nocluster.pickle' % model, corpus='genia', clusters=False)
ctx.invoke(evaluate, model='%s_wsj.pickle' % model, corpus='wsj', clusters=True)
ctx.invoke(evaluate, model='%s_wsj.pickle' % model, corpus='genia', clusters=True)
ctx.invoke(evaluate, model='%s_genia_nocluster.pickle' % model, corpus='wsj', clusters=False)
ctx.invoke(evaluate, model='%s_genia_nocluster.pickle' % model, corpus='genia', clusters=False)
ctx.invoke(evaluate, model='%s_genia.pickle' % model, corpus='wsj', clusters=True)
ctx.invoke(evaluate, model='%s_genia.pickle' % model, corpus='genia', clusters=True)
ctx.invoke(evaluate, model='%s_wsj_genia_nocluster.pickle' % model, corpus='wsj', clusters=False)
ctx.invoke(evaluate, model='%s_wsj_genia_nocluster.pickle' % model, corpus='genia', clusters=False)
ctx.invoke(evaluate, model='%s_wsj_genia.pickle' % model, corpus='wsj', clusters=True)
ctx.invoke(evaluate, model='%s_wsj_genia.pickle' % model, corpus='genia', clusters=True)
@pos_cli.command()
@click.option('--output', '-o', help='Output model file.', required=True)
@click.option('--corpus', type=click.Choice(['wsj', 'genia', 'wsj+genia']), help='Training corpus')
@click.option('--clusters/--no-clusters', help='Whether to use cluster features', default=True)
@click.pass_context
def train(ctx, output, corpus, clusters):
click.echo('chemdataextractor.pos.train')
click.echo('Output: %s' % output)
click.echo('Corpus: %s' % corpus)
click.echo('Clusters: %s' % clusters)
wsj_sents = []
genia_sents = []
if corpus == 'wsj' or corpus == 'wsj+genia':
wsj_sents = list(wsj_training.tagged_sents())
for i, wsj_sent in enumerate(wsj_sents):
wsj_sents[i] = [t for t in wsj_sent if not t[1] == '-NONE-']
if corpus == 'genia' or corpus == 'wsj+genia':
genia_sents = list(genia_training.tagged_sents())
for i, genia_sent in enumerate(genia_sents):
for j, (token, tag) in enumerate(genia_sent):
if tag == '(':
genia_sents[i][j] = (token, '-LRB-')
elif tag == ')':
genia_sents[i][j] = (token, '-RRB-')
elif tag == 'CT':
genia_sents[i][j] = (token, 'DT')
elif tag == 'XT':
genia_sents[i][j] = (token, 'DT')
elif tag == '-':
genia_sents[i][j] = (token, ':')
elif tag == 'N':
genia_sents[i][j] = (token, 'NN')
elif tag == 'PP':
genia_sents[i][j] = (token, 'PRP')
elif tag == '' and token == ')':
genia_sents[i][j] = (token, '-RRB-')
elif tag == '' and token == 'IFN-gamma':
genia_sents[i][j] = (token, 'NN')
elif '|' in tag:
genia_sents[i][j] = (token, tag.split('|')[0])
genia_sents[i] = [t for t in genia_sent if t[1] in TAGS]
if corpus == 'wsj':
training_corpus = wsj_sents
elif corpus == 'genia':
training_corpus = genia_sents
elif corpus == 'wsj+genia':
training_corpus = wsj_sents + genia_sents
else:
raise click.ClickException('Invalid corpus')
tagger = ChemCrfPosTagger(clusters=clusters)
tagger.train(training_corpus, output)
@pos_cli.command()
@click.argument('model', required=True)
@click.option('--corpus', type=click.Choice(['wsj', 'genia']), help='Evaluation corpus')
@click.option('--clusters/--no-clusters', help='Whether to use cluster features', default=True)
@click.pass_context
def evaluate(ctx, model, corpus, clusters):
click.echo('chemdataextractor.pos.evaluate')
if corpus == 'wsj':
evaluation = wsj_evaluation
sents = list(evaluation.tagged_sents())
for i, wsj_sent in enumerate(sents):
sents[i] = [t for t in wsj_sent if not t[1] == '-NONE-']
elif corpus == 'genia':
evaluation = genia_evaluation
sents = list(evaluation.tagged_sents())
# Translate GENIA bracket tags
for i, genia_sent in enumerate(sents):
for j, (token, tag) in enumerate(genia_sent):
if tag == '(':
sents[i][j] = (token, '-LRB-')
elif tag == ')':
sents[i][j] = (token, '-RRB-')
else:
raise click.ClickException('Invalid corpus')
tagger = ChemCrfPosTagger(model=model, clusters=clusters)
accuracy = tagger.evaluate(sents)
click.echo('%s on %s: %s' % (model, evaluation, accuracy))
@pos_cli.command()
@click.option('--output', '-o', type=click.File('wb'), help='Output model file.', required=True)
@click.option('--corpus', type=click.Choice(['wsj', 'genia', 'wsj+genia']), help='Training corpus')
@click.option('--clusters/--no-clusters', help='Whether to use cluster features', default=True)
@click.pass_obj
def train_perceptron(ctx, output, corpus, clusters):
click.echo('chemdataextractor.pos.train')
click.echo('Output: %s' % output)
click.echo('Corpus: %s' % corpus)
click.echo('Clusters: %s' % clusters)
wsj_sents = []
genia_sents = []
if corpus == 'wsj' or corpus == 'wsj+genia':
wsj_sents = list(wsj_training.tagged_sents())
# For WSJ, remove all tokens with -NONE- tag
for i, wsj_sent in enumerate(wsj_sents):
wsj_sents[i] = [t for t in wsj_sent if not t[1] == '-NONE-']
if corpus == 'genia' or corpus == 'wsj+genia':
genia_sents = list(genia_training.tagged_sents())
# Translate GENIA
for i, genia_sent in enumerate(genia_sents):
for j, (token, tag) in enumerate(genia_sent):
if tag == '(':
genia_sents[i][j] = (token, '-LRB-') # ( to -RLB- (also do for evaluation)
elif tag == ')':
genia_sents[i][j] = (token, '-RRB-') # ) to -RRB- (also do for evaluation)
elif tag == 'CT':
genia_sents[i][j] = (token, 'DT') # Typo?
elif tag == 'XT':
genia_sents[i][j] = (token, 'DT') # Typo?
elif tag == '-':
genia_sents[i][j] = (token, ':') # Single hyphen character for dash
elif tag == 'N':
genia_sents[i][j] = (token, 'NN') # Typo?
elif tag == 'PP':
genia_sents[i][j] = (token, 'PRP') # Typo?
elif tag == '' and token == ')':
genia_sents[i][j] = (token, '-RRB-') # Typo?
elif tag == '' and token == 'IFN-gamma':
genia_sents[i][j] = (token, 'NN') # Typo?
elif '|' in tag:
genia_sents[i][j] = (token, tag.split('|')[0]) # If contains |, choose first part
# Filter any tags not in the allowed tagset (Shouldn't be any left anyway)
genia_sents[i] = [t for t in genia_sent if t[1] in TAGS]
if corpus == 'wsj':
training_corpus = wsj_sents
elif corpus == 'genia':
training_corpus = genia_sents
elif corpus == 'wsj+genia':
training_corpus = wsj_sents + genia_sents
else:
raise click.ClickException('Invalid corpus')
tagger = ChemApPosTagger(clusters=clusters)
tagger.train(training_corpus)
tagger.save(output)
@pos_cli.command()
@click.argument('model', required=True)
@click.option('--corpus', type=click.Choice(['wsj', 'genia']), help='Evaluation corpus')
@click.pass_obj
def evaluate_perceptron(ctx, model, corpus):
click.echo('chemdataextractor.pos.evaluate')
if corpus == 'wsj':
evaluation = wsj_evaluation
sents = list(evaluation.tagged_sents())
for i, wsj_sent in enumerate(sents):
sents[i] = [t for t in wsj_sent if not t[1] == u'-NONE-']
elif corpus == 'genia':
evaluation = genia_evaluation
sents = list(evaluation.tagged_sents())
for i, genia_sent in enumerate(sents):
for j, (token, tag) in enumerate(genia_sent):
if tag == u'(':
sents[i][j] = (token, u'-LRB-')
elif tag == u')':
sents[i][j] = (token, u'-RRB-')
else:
raise click.ClickException('Invalid corpus')
tagger = ChemApPosTagger(model=model)
accuracy = tagger.evaluate(sents)
click.echo('%s on %s: %s' % (model, evaluation, accuracy))
@pos_cli.command()
@click.option('--output', '-o', type=click.File('w', encoding='utf8'), help='Output file.', default=click.get_text_stream('stdout'))
@click.argument('input', type=click.File('rb'), default=click.get_binary_stream('stdin'))
@click.pass_obj
def tag(ctx, input, output):
log.info('chemdataextractor.pos.tag')
log.info('Reading %s' % input.name)
doc = Document.from_file(input)
for element in doc.elements:
if isinstance(element, Text):
for sentence in element.sentences:
output.write(u' '.join(u'/'.join([token, tag]) for token, tag in sentence.pos_tagged_tokens))
output.write(u'\n')
| true
| true
|
1c46da710b690df0d6804fd81ba494ce167bd99d
| 394
|
py
|
Python
|
serempre_todo/task/api/views.py
|
pygabo/Serempre
|
6b29e337abd8d1b3f71ee889d318a2d473d6c744
|
[
"MIT"
] | null | null | null |
serempre_todo/task/api/views.py
|
pygabo/Serempre
|
6b29e337abd8d1b3f71ee889d318a2d473d6c744
|
[
"MIT"
] | null | null | null |
serempre_todo/task/api/views.py
|
pygabo/Serempre
|
6b29e337abd8d1b3f71ee889d318a2d473d6c744
|
[
"MIT"
] | null | null | null |
# Rest Framework
from rest_framework import viewsets
from rest_framework.permissions import IsAuthenticated
# Serializer
from serempre_todo.task.api.serializers import TaskSerializer
# Model
from serempre_todo.task.models import Task
class TaskViewSet(viewsets.ModelViewSet):
serializer_class = TaskSerializer
permission_classes = [IsAuthenticated]
queryset = Task.objects.all()
| 26.266667
| 61
| 0.819797
|
from rest_framework import viewsets
from rest_framework.permissions import IsAuthenticated
from serempre_todo.task.api.serializers import TaskSerializer
from serempre_todo.task.models import Task
class TaskViewSet(viewsets.ModelViewSet):
serializer_class = TaskSerializer
permission_classes = [IsAuthenticated]
queryset = Task.objects.all()
| true
| true
|
1c46db55722edfbae9a686a7bac404d67cd50321
| 3,930
|
py
|
Python
|
contractor_plugins/Manual/models.py
|
T3kton/contractor_plugins
|
a42c87f4d0713b2a461739f528f92fa572a7fec7
|
[
"MIT"
] | null | null | null |
contractor_plugins/Manual/models.py
|
T3kton/contractor_plugins
|
a42c87f4d0713b2a461739f528f92fa572a7fec7
|
[
"MIT"
] | null | null | null |
contractor_plugins/Manual/models.py
|
T3kton/contractor_plugins
|
a42c87f4d0713b2a461739f528f92fa572a7fec7
|
[
"MIT"
] | 2
|
2017-05-05T03:39:11.000Z
|
2018-05-11T13:06:25.000Z
|
from django.db import models
from django.core.exceptions import ValidationError
from cinp.orm_django import DjangoCInP as CInP
from contractor.Site.models import Site
from contractor.Building.models import Foundation, Complex, FOUNDATION_SUBCLASS_LIST, COMPLEX_SUBCLASS_LIST
from contractor.BluePrint.models import FoundationBluePrint
from contractor_plugins.Manual.module import set_power, power_state, wait_for_poweroff
cinp = CInP( 'Manual', '0.1' )
FOUNDATION_SUBCLASS_LIST.append( 'manualfoundation' )
FOUNDATION_SUBCLASS_LIST.append( 'manualcomplexedfoundation' )
COMPLEX_SUBCLASS_LIST.append( 'manualcomplex' )
@cinp.model( property_list=( 'state', 'type' ) )
class ManualComplex( Complex ):
@property
def subclass( self ):
return self
@property
def type( self ):
return 'Manual'
def newFoundation( self, hostname ):
foundation = ManualComplexedFoundation( site=self.site, blueprint=FoundationBluePrint.objects.get( pk='manual-foundation-base' ), locator=hostname )
foundation.complex_host = self
foundation.full_clean()
foundation.save()
return foundation
@cinp.check_auth()
@staticmethod
def checkAuth( user, method, id_list, action=None ):
return True
def clean( self, *args, **kwargs ):
super().clean( *args, **kwargs )
errors = {}
if errors:
raise ValidationError( errors )
def __str__( self ):
return 'ManualComplex {0}'.format( self.pk )
@cinp.model( property_list=( 'state', 'type', 'class_list' ) )
class ManualFoundation( Foundation ):
@staticmethod
def getTscriptValues( write_mode=False ): # locator is handled seperatly
result = super( ManualFoundation, ManualFoundation ).getTscriptValues( write_mode )
return result
@staticmethod
def getTscriptFunctions():
result = super( ManualFoundation, ManualFoundation ).getTscriptFunctions()
result[ 'power_on' ] = lambda foundation: ( 'manual', set_power( foundation, 'on' ) )
result[ 'power_off' ] = lambda foundation: ( 'manual', set_power( foundation, 'off' ) )
result[ 'power_state' ] = lambda foundation: ( 'manual', power_state( foundation ) )
result[ 'wait_for_poweroff' ] = lambda foundation: ( 'manual', wait_for_poweroff( foundation ) )
return result
def configAttributes( self ):
result = super().configAttributes()
return result
@property
def subclass( self ):
return self
@property
def type( self ):
return 'Manual'
@property
def class_list( self ):
return [ 'Metal', 'VM', 'Container', 'Switch', 'Manual' ]
@cinp.list_filter( name='site', paramater_type_list=[ { 'type': 'Model', 'model': Site } ] )
@staticmethod
def filter_site( site ):
return ManualFoundation.objects.filter( site=site )
@cinp.check_auth()
@staticmethod
def checkAuth( user, method, id_list, action=None ):
return True
def __str__( self ):
return 'ManualFoundation {0}'.format( self.pk )
@cinp.model( property_list=( 'state', 'type', 'class_list' ) )
class ManualComplexedFoundation( Foundation ):
complex_host = models.ForeignKey( ManualComplex, on_delete=models.PROTECT )
def configAttributes( self ):
result = super().configAttributes()
result.update( { '_complex_host': self.complex_host.name } )
return result
@property
def subclass( self ):
return self
@property
def type( self ):
return 'ManualComplex'
@property
def class_list( self ):
return [ 'ManualComplex' ]
@property
def complex( self ):
return self.complex_host
@cinp.list_filter( name='site', paramater_type_list=[ { 'type': 'Model', 'model': Site } ] )
@staticmethod
def filter_site( site ):
return ManualComplexedFoundation.objects.filter( site=site )
@cinp.check_auth()
@staticmethod
def checkAuth( user, method, id_list, action=None ):
return True
def __str__( self ):
return 'ManualComplexedFoundation {0}'.format( self.pk )
| 28.071429
| 152
| 0.708142
|
from django.db import models
from django.core.exceptions import ValidationError
from cinp.orm_django import DjangoCInP as CInP
from contractor.Site.models import Site
from contractor.Building.models import Foundation, Complex, FOUNDATION_SUBCLASS_LIST, COMPLEX_SUBCLASS_LIST
from contractor.BluePrint.models import FoundationBluePrint
from contractor_plugins.Manual.module import set_power, power_state, wait_for_poweroff
cinp = CInP( 'Manual', '0.1' )
FOUNDATION_SUBCLASS_LIST.append( 'manualfoundation' )
FOUNDATION_SUBCLASS_LIST.append( 'manualcomplexedfoundation' )
COMPLEX_SUBCLASS_LIST.append( 'manualcomplex' )
@cinp.model( property_list=( 'state', 'type' ) )
class ManualComplex( Complex ):
@property
def subclass( self ):
return self
@property
def type( self ):
return 'Manual'
def newFoundation( self, hostname ):
foundation = ManualComplexedFoundation( site=self.site, blueprint=FoundationBluePrint.objects.get( pk='manual-foundation-base' ), locator=hostname )
foundation.complex_host = self
foundation.full_clean()
foundation.save()
return foundation
@cinp.check_auth()
@staticmethod
def checkAuth( user, method, id_list, action=None ):
return True
def clean( self, *args, **kwargs ):
super().clean( *args, **kwargs )
errors = {}
if errors:
raise ValidationError( errors )
def __str__( self ):
return 'ManualComplex {0}'.format( self.pk )
@cinp.model( property_list=( 'state', 'type', 'class_list' ) )
class ManualFoundation( Foundation ):
@staticmethod
def getTscriptValues( write_mode=False ):
result = super( ManualFoundation, ManualFoundation ).getTscriptValues( write_mode )
return result
@staticmethod
def getTscriptFunctions():
result = super( ManualFoundation, ManualFoundation ).getTscriptFunctions()
result[ 'power_on' ] = lambda foundation: ( 'manual', set_power( foundation, 'on' ) )
result[ 'power_off' ] = lambda foundation: ( 'manual', set_power( foundation, 'off' ) )
result[ 'power_state' ] = lambda foundation: ( 'manual', power_state( foundation ) )
result[ 'wait_for_poweroff' ] = lambda foundation: ( 'manual', wait_for_poweroff( foundation ) )
return result
def configAttributes( self ):
result = super().configAttributes()
return result
@property
def subclass( self ):
return self
@property
def type( self ):
return 'Manual'
@property
def class_list( self ):
return [ 'Metal', 'VM', 'Container', 'Switch', 'Manual' ]
@cinp.list_filter( name='site', paramater_type_list=[ { 'type': 'Model', 'model': Site } ] )
@staticmethod
def filter_site( site ):
return ManualFoundation.objects.filter( site=site )
@cinp.check_auth()
@staticmethod
def checkAuth( user, method, id_list, action=None ):
return True
def __str__( self ):
return 'ManualFoundation {0}'.format( self.pk )
@cinp.model( property_list=( 'state', 'type', 'class_list' ) )
class ManualComplexedFoundation( Foundation ):
complex_host = models.ForeignKey( ManualComplex, on_delete=models.PROTECT )
def configAttributes( self ):
result = super().configAttributes()
result.update( { '_complex_host': self.complex_host.name } )
return result
@property
def subclass( self ):
return self
@property
def type( self ):
return 'ManualComplex'
@property
def class_list( self ):
return [ 'ManualComplex' ]
@property
def complex( self ):
return self.complex_host
@cinp.list_filter( name='site', paramater_type_list=[ { 'type': 'Model', 'model': Site } ] )
@staticmethod
def filter_site( site ):
return ManualComplexedFoundation.objects.filter( site=site )
@cinp.check_auth()
@staticmethod
def checkAuth( user, method, id_list, action=None ):
return True
def __str__( self ):
return 'ManualComplexedFoundation {0}'.format( self.pk )
| true
| true
|
1c46dbb5413dcfd3678d4b0e6bd04adac93c69db
| 1,330
|
py
|
Python
|
src/shardBackup/copy.py
|
babarnescocke/shardBackup
|
ff62869ffd319b627edf2a2a4f5084ed19713f03
|
[
"BSD-3-Clause"
] | null | null | null |
src/shardBackup/copy.py
|
babarnescocke/shardBackup
|
ff62869ffd319b627edf2a2a4f5084ed19713f03
|
[
"BSD-3-Clause"
] | null | null | null |
src/shardBackup/copy.py
|
babarnescocke/shardBackup
|
ff62869ffd319b627edf2a2a4f5084ed19713f03
|
[
"BSD-3-Clause"
] | null | null | null |
from subprocess import run
from sys import exit
from shutil import copy2
import os # for unclear reasons importing just os.stat and os.chown doesn't work
import stat
def rsync(fobject0, fobject1): # takes two file objects and transmits 0 to 1
"""
a call to copying using rsync
>>>rsync('./.gitkeep','/other/')
rsync output....
"""
try:
run(['rsync', #rsync is a major program
'-avzz', #a = archive, v= verbose, zz=compress
'-n', # n = simulate
'--info=progress2', # prints info such as how fast it is downloading
fobject0,
fobject1
])
except:
print(f"Unable to launch rsync copying - despite finding rsync installed")
exit(1)
def copy(fobject0, fobject1): #copies file and then changes perms/owner - https://stackoverflow.com/questions/19787348/copy-file-keep-permissions-and-owner
"""
a function that copies and keeps group and owner attributes
>>>copy("file0", "file1")
"""
try:
copy2(fobject0, fobject1) # copy file
st = os.stat(fobject0) # make variable of source file attributes
os.chown(fobject1, st[stat.ST_UID], st[stat.ST_GID]) #
except:
print(f"Unable to copy {fobject0} to {fobject1}. I told you it was in alpha.")
exit(1)
| 35.945946
| 155
| 0.626316
|
from subprocess import run
from sys import exit
from shutil import copy2
import os
import stat
def rsync(fobject0, fobject1): # takes two file objects and transmits 0 to 1
try:
run(['rsync', #rsync is a major program
'-avzz', #a = archive, v= verbose, zz=compress
'-n', # n = simulate
'--info=progress2', # prints info such as how fast it is downloading
fobject0,
fobject1
])
except:
print(f"Unable to launch rsync copying - despite finding rsync installed")
exit(1)
def copy(fobject0, fobject1): #copies file and then changes perms/owner - https://stackoverflow.com/questions/19787348/copy-file-keep-permissions-and-owner
try:
copy2(fobject0, fobject1) # copy file
st = os.stat(fobject0) # make variable of source file attributes
os.chown(fobject1, st[stat.ST_UID], st[stat.ST_GID]) #
except:
print(f"Unable to copy {fobject0} to {fobject1}. I told you it was in alpha.")
exit(1)
| true
| true
|
1c46dc5e623025be88f670a423523abba08c29d5
| 1,368
|
py
|
Python
|
Diabetes_API/app.py
|
18bce1151/proj
|
96c0a299ccaec29a02a9486d192a7215f5a12566
|
[
"Unlicense"
] | 86
|
2020-11-26T17:38:51.000Z
|
2022-03-10T11:35:08.000Z
|
Diabetes_API/app.py
|
18bce1151/proj
|
96c0a299ccaec29a02a9486d192a7215f5a12566
|
[
"Unlicense"
] | null | null | null |
Diabetes_API/app.py
|
18bce1151/proj
|
96c0a299ccaec29a02a9486d192a7215f5a12566
|
[
"Unlicense"
] | 62
|
2020-11-27T05:16:06.000Z
|
2022-03-27T15:23:55.000Z
|
from flask import Flask, render_template, url_for, flash, redirect
import joblib
from flask import request
import numpy as np
app = Flask(__name__, template_folder='templates')
@app.route("/")
@app.route("/Diabetes")
def cancer():
return render_template("diabetes.html")
def ValuePredictor(to_predict_list, size):
to_predict = np.array(to_predict_list).reshape(1,size)
if(size==6):
loaded_model = joblib.load(r'C:\Users\Mahesh Sharma\Desktop\HealthApp\Indivisual_Deployment\Diabetes_API\diabetes_model.pkl')
result = loaded_model.predict(to_predict)
return result[0]
@app.route('/predict', methods = ["POST"])
def predict():
if request.method == "POST":
to_predict_list = request.form.to_dict()
to_predict_list = list(to_predict_list.values())
to_predict_list = list(map(float, to_predict_list))
#diabetes
if(len(to_predict_list)==6):
result = ValuePredictor(to_predict_list,6)
if(int(result)==1):
prediction = "Sorry you chances of getting the disease. Please consult the doctor immediately"
else:
prediction = "No need to fear. You have no dangerous symptoms of the disease"
return(render_template("result.html", prediction_text=prediction))
if __name__ == "__main__":
app.run(debug=True)
| 35.076923
| 134
| 0.679094
|
from flask import Flask, render_template, url_for, flash, redirect
import joblib
from flask import request
import numpy as np
app = Flask(__name__, template_folder='templates')
@app.route("/")
@app.route("/Diabetes")
def cancer():
return render_template("diabetes.html")
def ValuePredictor(to_predict_list, size):
to_predict = np.array(to_predict_list).reshape(1,size)
if(size==6):
loaded_model = joblib.load(r'C:\Users\Mahesh Sharma\Desktop\HealthApp\Indivisual_Deployment\Diabetes_API\diabetes_model.pkl')
result = loaded_model.predict(to_predict)
return result[0]
@app.route('/predict', methods = ["POST"])
def predict():
if request.method == "POST":
to_predict_list = request.form.to_dict()
to_predict_list = list(to_predict_list.values())
to_predict_list = list(map(float, to_predict_list))
if(len(to_predict_list)==6):
result = ValuePredictor(to_predict_list,6)
if(int(result)==1):
prediction = "Sorry you chances of getting the disease. Please consult the doctor immediately"
else:
prediction = "No need to fear. You have no dangerous symptoms of the disease"
return(render_template("result.html", prediction_text=prediction))
if __name__ == "__main__":
app.run(debug=True)
| true
| true
|
1c46ded6115ecd16b3a79fe253d63b64f0698442
| 18,126
|
py
|
Python
|
python/cloudtik/tests/test_cloudtik.py
|
jerrychenhf/cloudtik
|
5ceab948c5c8b2e00f644d2fb801311572aaf381
|
[
"Apache-2.0"
] | 2
|
2022-03-28T05:03:57.000Z
|
2022-03-28T09:00:48.000Z
|
python/cloudtik/tests/test_cloudtik.py
|
jerrychenhf/cloudtik
|
5ceab948c5c8b2e00f644d2fb801311572aaf381
|
[
"Apache-2.0"
] | 12
|
2022-03-29T05:07:18.000Z
|
2022-03-31T13:57:57.000Z
|
python/cloudtik/tests/test_cloudtik.py
|
jerrychenhf/cloudtik
|
5ceab948c5c8b2e00f644d2fb801311572aaf381
|
[
"Apache-2.0"
] | 6
|
2022-03-28T05:04:24.000Z
|
2022-03-29T01:22:22.000Z
|
from enum import Enum
import os
import re
from subprocess import CalledProcessError
import tempfile
import threading
import time
import unittest
import yaml
import copy
from jsonschema.exceptions import ValidationError
from typing import Dict, Callable, List, Optional
from cloudtik.core._private.utils import prepare_config, validate_config
from cloudtik.core._private.cluster import cluster_operator
from cloudtik.core._private.cluster.cluster_metrics import ClusterMetrics
from cloudtik.core._private.providers import (
_NODE_PROVIDERS, _DEFAULT_CONFIGS)
from cloudtik.core.tags import CLOUDTIK_TAG_NODE_KIND, CLOUDTIK_TAG_NODE_STATUS, \
CLOUDTIK_TAG_USER_NODE_TYPE, CLOUDTIK_TAG_CLUSTER_NAME
from cloudtik.core.node_provider import NodeProvider
import grpc
import pytest
class DrainNodeOutcome(str, Enum):
"""Potential outcomes of DrainNode calls, each of which is handled
differently by the clusterscaler.
"""
# Return a reponse indicating all nodes were succesfully drained.
Succeeded = "Succeeded"
# Return response indicating at least one node failed to be drained.
NotAllDrained = "NotAllDrained"
# Return an unimplemented gRPC error, indicating an old GCS.
Unimplemented = "Unimplemented"
# Raise a generic unexpected RPC error.
GenericRpcError = "GenericRpcError"
# Raise a generic unexpected exception.
GenericException = "GenericException"
class MockRpcException(grpc.RpcError):
"""Mock RpcError with a specified status code.
Note: It might be possible to do this already with standard tools
in the `grpc` module, but how wasn't immediately obvious to me.
"""
def __init__(self, status_code: grpc.StatusCode):
self.status_code = status_code
def code(self):
return self.status_code
class CloudTikTestTimeoutException(Exception):
"""Exception used to identify timeouts from test utilities."""
pass
class MockNode:
def __init__(self, node_id, tags, node_config, node_type,
unique_ips=False):
self.node_id = node_id
self.state = "pending"
self.tags = tags
self.external_ip = "1.2.3.4"
self.internal_ip = "172.0.0.{}".format(self.node_id)
if unique_ips:
self.external_ip = f"1.2.3.{self.node_id}"
self.node_config = node_config
self.node_type = node_type
def matches(self, tags):
for k, v in tags.items():
if k not in self.tags or self.tags[k] != v:
return False
return True
class MockProcessRunner:
def __init__(self, fail_cmds=None, cmd_to_callback=None, print_out=False):
self.calls = []
self.cmd_to_callback = cmd_to_callback or {
} # type: Dict[str, Callable]
self.print_out = print_out
self.fail_cmds = fail_cmds or []
self.call_response = {}
self.ready_to_run = threading.Event()
self.ready_to_run.set()
self.lock = threading.RLock()
def check_call(self, cmd, *args, **kwargs):
with self.lock:
self.ready_to_run.wait()
self.calls.append(cmd)
if self.print_out:
print(f">>>Process runner: Executing \n {str(cmd)}")
for token in self.cmd_to_callback:
if token in str(cmd):
# Trigger a callback if token is in cmd.
# Can be used to simulate background events during a node
# update (e.g. node disconnected).
callback = self.cmd_to_callback[token]
callback()
for token in self.fail_cmds:
if token in str(cmd):
raise CalledProcessError(1, token,
"Failing command on purpose")
def check_output(self, cmd):
with self.lock:
self.check_call(cmd)
return_string = "command-output"
key_to_shrink = None
for pattern, response_list in self.call_response.items():
if pattern in str(cmd):
return_string = response_list[0]
key_to_shrink = pattern
break
if key_to_shrink:
self.call_response[key_to_shrink] = self.call_response[
key_to_shrink][1:]
if len(self.call_response[key_to_shrink]) == 0:
del self.call_response[key_to_shrink]
return return_string.encode()
def assert_has_call(self,
ip: str,
pattern: Optional[str] = None,
exact: Optional[List[str]] = None):
"""Checks if the given value was called by this process runner.
NOTE: Either pattern or exact must be specified, not both!
Args:
ip: IP address of the node that the given call was executed on.
pattern: RegEx that matches one specific call.
exact: List of strings that when joined exactly match one call.
"""
with self.lock:
assert bool(pattern) ^ bool(exact), \
"Must specify either a pattern or exact match."
debug_output = ""
if pattern is not None:
for cmd in self.command_history():
if ip in cmd:
debug_output += cmd
debug_output += "\n"
if re.search(pattern, cmd):
return True
else:
raise Exception(
f"Did not find [{pattern}] in [{debug_output}] for "
f"ip={ip}.\n\nFull output: {self.command_history()}")
elif exact is not None:
exact_cmd = " ".join(exact)
for cmd in self.command_history():
if ip in cmd:
debug_output += cmd
debug_output += "\n"
if cmd == exact_cmd:
return True
raise Exception(
f"Did not find [{exact_cmd}] in [{debug_output}] for "
f"ip={ip}.\n\nFull output: {self.command_history()}")
def assert_not_has_call(self, ip: str, pattern: str):
"""Ensure that the given regex pattern was never called.
"""
with self.lock:
out = ""
for cmd in self.command_history():
if ip in cmd:
out += cmd
out += "\n"
if re.search(pattern, out):
raise Exception("Found [{}] in [{}] for {}".format(
pattern, out, ip))
else:
return True
def clear_history(self):
with self.lock:
self.calls = []
def command_history(self):
with self.lock:
return [" ".join(cmd) for cmd in self.calls]
def respond_to_call(self, pattern, response_list):
with self.lock:
self.call_response[pattern] = response_list
class MockProvider(NodeProvider):
def __init__(self, cache_stopped=False, unique_ips=False):
self.mock_nodes = {}
self.next_id = 0
self.throw = False
self.error_creates = False
self.fail_creates = False
self.ready_to_create = threading.Event()
self.ready_to_create.set()
self.cache_stopped = cache_stopped
self.unique_ips = unique_ips
# Many of these functions are called by node_launcher or updater in
# different threads. This can be treated as a global lock for
# everything.
self.lock = threading.Lock()
super().__init__(None, None)
def non_terminated_nodes(self, tag_filters):
with self.lock:
if self.throw:
raise Exception("oops")
return [
n.node_id for n in self.mock_nodes.values()
if n.matches(tag_filters)
and n.state not in ["stopped", "terminated"]
]
def non_terminated_node_ips(self, tag_filters):
with self.lock:
if self.throw:
raise Exception("oops")
return [
n.internal_ip for n in self.mock_nodes.values()
if n.matches(tag_filters)
and n.state not in ["stopped", "terminated"]
]
def is_running(self, node_id):
with self.lock:
return self.mock_nodes[node_id].state == "running"
def is_terminated(self, node_id):
with self.lock:
return self.mock_nodes[node_id].state in ["stopped", "terminated"]
def node_tags(self, node_id):
# Don't assume that node providers can retrieve tags from
# terminated nodes.
if self.is_terminated(node_id):
raise Exception(f"The node with id {node_id} has been terminated!")
with self.lock:
return self.mock_nodes[node_id].tags
def internal_ip(self, node_id):
with self.lock:
return self.mock_nodes[node_id].internal_ip
def external_ip(self, node_id):
with self.lock:
return self.mock_nodes[node_id].external_ip
def create_node(self, node_config, tags, count, _skip_wait=False):
if self.error_creates:
raise Exception
if not _skip_wait:
self.ready_to_create.wait()
if self.fail_creates:
return
with self.lock:
if self.cache_stopped:
for node in self.mock_nodes.values():
if node.state == "stopped" and count > 0:
count -= 1
node.state = "pending"
node.tags.update(tags)
for _ in range(count):
self.mock_nodes[self.next_id] = MockNode(
self.next_id,
tags.copy(),
node_config,
tags.get(CLOUDTIK_TAG_USER_NODE_TYPE),
unique_ips=self.unique_ips)
self.next_id += 1
def set_node_tags(self, node_id, tags):
with self.lock:
self.mock_nodes[node_id].tags.update(tags)
def terminate_node(self, node_id):
with self.lock:
if self.cache_stopped:
self.mock_nodes[node_id].state = "stopped"
else:
self.mock_nodes[node_id].state = "terminated"
def finish_starting_nodes(self):
with self.lock:
for node in self.mock_nodes.values():
if node.state == "pending":
node.state = "running"
SMALL_CLUSTER = {
"cluster_name": "default",
"min_workers": 2,
"max_workers": 2,
"initial_workers": 0,
"autoscaling_mode": "default",
"target_utilization_fraction": 0.8,
"idle_timeout_minutes": 5,
"provider": {
"type": "mock",
"region": "us-east-1",
"availability_zone": "us-east-1a",
},
"docker": {
"enabled": True,
"image": "example",
"container_name": "mock",
},
"auth": {
"ssh_user": "ubuntu",
"ssh_private_key": os.devnull,
},
"head_node": {
"TestProp": 1,
},
"file_mounts": {},
"cluster_synced_files": [],
"initialization_commands": ["init_cmd"],
"setup_commands": ["setup_cmd"],
"head_setup_commands": ["head_setup_cmd"],
"worker_setup_commands": ["worker_setup_cmd"],
"head_start_commands": ["head_start_cmd"],
"worker_start_commands": ["worker_start_cmd"],
}
MOCK_DEFAULT_CONFIG = {
"cluster_name": "default",
"max_workers": 2,
"idle_timeout_minutes": 5,
"provider": {
"type": "mock",
"region": "us-east-1",
"availability_zone": "us-east-1a",
},
"docker": {
"enabled": True,
"image": "example",
"container_name": "mock",
},
"auth": {
"ssh_user": "ubuntu",
"ssh_private_key": os.devnull,
},
"available_node_types": {
"cloudtik.head.default": {
"resources": {},
"node_config": {
"head_default_prop": 4
}
},
"cloudtik.worker.default": {
"min_workers": 0,
"max_workers": 2,
"resources": {},
"node_config": {
"worker_default_prop": 7
}
}
},
"head_node_type": "cloudtik.head.default",
"head_node": {},
"file_mounts": {},
"cluster_synced_files": [],
"initialization_commands": [],
"setup_commands": [],
"head_setup_commands": [],
"worker_setup_commands": [],
"head_start_commands": [],
"worker_start_commands": [],
}
TYPES_A = {
"empty_node": {
"node_config": {
"FooProperty": 42,
},
"resources": {},
"max_workers": 0,
},
"m4.large": {
"node_config": {},
"resources": {
"CPU": 2
},
"max_workers": 10,
},
"m4.4xlarge": {
"node_config": {},
"resources": {
"CPU": 16
},
"max_workers": 8,
},
"m4.16xlarge": {
"node_config": {},
"resources": {
"CPU": 64
},
"max_workers": 4,
},
"p2.xlarge": {
"node_config": {},
"resources": {
"CPU": 16,
"GPU": 1
},
"max_workers": 10,
},
"p2.8xlarge": {
"node_config": {},
"resources": {
"CPU": 32,
"GPU": 8
},
"max_workers": 4,
},
}
MULTI_WORKER_CLUSTER = dict(
SMALL_CLUSTER, **{
"available_node_types": TYPES_A,
"head_node_type": "empty_node"
})
class ClusterMetricsTest(unittest.TestCase):
def testHeartbeat(self):
cluster_metrics = ClusterMetrics()
cluster_metrics.update("1.1.1.1", b'\xb6\x80\xbdw\xbd\x1c\xee\xf6@\x11', {"CPU": 2}, {"CPU": 1}, {})
cluster_metrics.mark_active("2.2.2.2")
assert "1.1.1.1" in cluster_metrics.last_heartbeat_time_by_ip
assert "2.2.2.2" in cluster_metrics.last_heartbeat_time_by_ip
assert "3.3.3.3" not in cluster_metrics.last_heartbeat_time_by_ip
class CloudTikTest(unittest.TestCase):
def setUp(self):
_NODE_PROVIDERS["mock"] = \
lambda config: self.create_provider
_DEFAULT_CONFIGS["mock"] = _DEFAULT_CONFIGS["aws"]
self.provider = None
self.tmpdir = tempfile.mkdtemp()
def waitFor(self, condition, num_retries=50, fail_msg=None):
for _ in range(num_retries):
if condition():
return
time.sleep(.1)
fail_msg = fail_msg or "Timed out waiting for {}".format(condition)
raise CloudTikTestTimeoutException(fail_msg)
def waitForNodes(self, expected, comparison=None, tag_filters=None):
if tag_filters is None:
tag_filters = {}
MAX_ITER = 50
for i in range(MAX_ITER):
n = len(self.provider.non_terminated_nodes(tag_filters))
if comparison is None:
comparison = self.assertEqual
try:
comparison(n, expected, msg="Unexpected node quantity.")
return
except Exception:
if i == MAX_ITER - 1:
raise
time.sleep(.1)
def create_provider(self, config, cluster_name):
assert self.provider
return self.provider
def write_config(self, config, call_prepare_config=True):
new_config = copy.deepcopy(config)
if call_prepare_config:
new_config = prepare_config(new_config)
path = os.path.join(self.tmpdir, "simple.yaml")
with open(path, "w") as f:
f.write(yaml.dump(new_config))
return path
def testValidateDefaultConfig(self):
config = {"provider": {
"type": "aws",
"region": "us-east-1",
"availability_zone": "us-east-1a",
}}
config = prepare_config(config)
try:
validate_config(config)
except ValidationError:
self.fail("Default config did not pass validation test!")
def testGetRunningHeadNode(self):
config = copy.deepcopy(SMALL_CLUSTER)
self.provider = MockProvider()
# Node 0 is failed.
self.provider.create_node({}, {
CLOUDTIK_TAG_CLUSTER_NAME: "default",
CLOUDTIK_TAG_NODE_KIND: "head",
CLOUDTIK_TAG_NODE_STATUS: "update-failed"
}, 1)
# `_allow_uninitialized_state` should return the head node
# in the `update-failed` state.
allow_failed = cluster_operator._get_running_head_node(
config,
_provider=self.provider,
_allow_uninitialized_state=True)
assert allow_failed == 0
# Node 1 is okay.
self.provider.create_node({}, {
CLOUDTIK_TAG_CLUSTER_NAME: "default",
CLOUDTIK_TAG_NODE_KIND: "head",
CLOUDTIK_TAG_NODE_STATUS: "up-to-date"
}, 1)
node = cluster_operator._get_running_head_node(
config,
_provider=self.provider)
assert node == 1
# `_allow_uninitialized_state` should return the up-to-date head node
# if it is present.
optionally_failed = cluster_operator._get_running_head_node(
config,
_provider=self.provider,
_allow_uninitialized_state=True)
assert optionally_failed == 1
def testDefaultMinMaxWorkers(self):
config = copy.deepcopy(MOCK_DEFAULT_CONFIG)
config = prepare_config(config)
node_types = config["available_node_types"]
head_node_config = node_types["cloudtik.head.default"]
assert head_node_config["min_workers"] == 0
assert head_node_config["max_workers"] == 0
if __name__ == "__main__":
import sys
sys.exit(pytest.main(["-v", __file__]))
| 32.138298
| 108
| 0.5667
|
from enum import Enum
import os
import re
from subprocess import CalledProcessError
import tempfile
import threading
import time
import unittest
import yaml
import copy
from jsonschema.exceptions import ValidationError
from typing import Dict, Callable, List, Optional
from cloudtik.core._private.utils import prepare_config, validate_config
from cloudtik.core._private.cluster import cluster_operator
from cloudtik.core._private.cluster.cluster_metrics import ClusterMetrics
from cloudtik.core._private.providers import (
_NODE_PROVIDERS, _DEFAULT_CONFIGS)
from cloudtik.core.tags import CLOUDTIK_TAG_NODE_KIND, CLOUDTIK_TAG_NODE_STATUS, \
CLOUDTIK_TAG_USER_NODE_TYPE, CLOUDTIK_TAG_CLUSTER_NAME
from cloudtik.core.node_provider import NodeProvider
import grpc
import pytest
class DrainNodeOutcome(str, Enum):
Succeeded = "Succeeded"
NotAllDrained = "NotAllDrained"
Unimplemented = "Unimplemented"
GenericRpcError = "GenericRpcError"
GenericException = "GenericException"
class MockRpcException(grpc.RpcError):
def __init__(self, status_code: grpc.StatusCode):
self.status_code = status_code
def code(self):
return self.status_code
class CloudTikTestTimeoutException(Exception):
pass
class MockNode:
def __init__(self, node_id, tags, node_config, node_type,
unique_ips=False):
self.node_id = node_id
self.state = "pending"
self.tags = tags
self.external_ip = "1.2.3.4"
self.internal_ip = "172.0.0.{}".format(self.node_id)
if unique_ips:
self.external_ip = f"1.2.3.{self.node_id}"
self.node_config = node_config
self.node_type = node_type
def matches(self, tags):
for k, v in tags.items():
if k not in self.tags or self.tags[k] != v:
return False
return True
class MockProcessRunner:
def __init__(self, fail_cmds=None, cmd_to_callback=None, print_out=False):
self.calls = []
self.cmd_to_callback = cmd_to_callback or {
}
self.print_out = print_out
self.fail_cmds = fail_cmds or []
self.call_response = {}
self.ready_to_run = threading.Event()
self.ready_to_run.set()
self.lock = threading.RLock()
def check_call(self, cmd, *args, **kwargs):
with self.lock:
self.ready_to_run.wait()
self.calls.append(cmd)
if self.print_out:
print(f">>>Process runner: Executing \n {str(cmd)}")
for token in self.cmd_to_callback:
if token in str(cmd):
callback = self.cmd_to_callback[token]
callback()
for token in self.fail_cmds:
if token in str(cmd):
raise CalledProcessError(1, token,
"Failing command on purpose")
def check_output(self, cmd):
with self.lock:
self.check_call(cmd)
return_string = "command-output"
key_to_shrink = None
for pattern, response_list in self.call_response.items():
if pattern in str(cmd):
return_string = response_list[0]
key_to_shrink = pattern
break
if key_to_shrink:
self.call_response[key_to_shrink] = self.call_response[
key_to_shrink][1:]
if len(self.call_response[key_to_shrink]) == 0:
del self.call_response[key_to_shrink]
return return_string.encode()
def assert_has_call(self,
ip: str,
pattern: Optional[str] = None,
exact: Optional[List[str]] = None):
with self.lock:
assert bool(pattern) ^ bool(exact), \
"Must specify either a pattern or exact match."
debug_output = ""
if pattern is not None:
for cmd in self.command_history():
if ip in cmd:
debug_output += cmd
debug_output += "\n"
if re.search(pattern, cmd):
return True
else:
raise Exception(
f"Did not find [{pattern}] in [{debug_output}] for "
f"ip={ip}.\n\nFull output: {self.command_history()}")
elif exact is not None:
exact_cmd = " ".join(exact)
for cmd in self.command_history():
if ip in cmd:
debug_output += cmd
debug_output += "\n"
if cmd == exact_cmd:
return True
raise Exception(
f"Did not find [{exact_cmd}] in [{debug_output}] for "
f"ip={ip}.\n\nFull output: {self.command_history()}")
def assert_not_has_call(self, ip: str, pattern: str):
with self.lock:
out = ""
for cmd in self.command_history():
if ip in cmd:
out += cmd
out += "\n"
if re.search(pattern, out):
raise Exception("Found [{}] in [{}] for {}".format(
pattern, out, ip))
else:
return True
def clear_history(self):
with self.lock:
self.calls = []
def command_history(self):
with self.lock:
return [" ".join(cmd) for cmd in self.calls]
def respond_to_call(self, pattern, response_list):
with self.lock:
self.call_response[pattern] = response_list
class MockProvider(NodeProvider):
def __init__(self, cache_stopped=False, unique_ips=False):
self.mock_nodes = {}
self.next_id = 0
self.throw = False
self.error_creates = False
self.fail_creates = False
self.ready_to_create = threading.Event()
self.ready_to_create.set()
self.cache_stopped = cache_stopped
self.unique_ips = unique_ips
self.lock = threading.Lock()
super().__init__(None, None)
def non_terminated_nodes(self, tag_filters):
with self.lock:
if self.throw:
raise Exception("oops")
return [
n.node_id for n in self.mock_nodes.values()
if n.matches(tag_filters)
and n.state not in ["stopped", "terminated"]
]
def non_terminated_node_ips(self, tag_filters):
with self.lock:
if self.throw:
raise Exception("oops")
return [
n.internal_ip for n in self.mock_nodes.values()
if n.matches(tag_filters)
and n.state not in ["stopped", "terminated"]
]
def is_running(self, node_id):
with self.lock:
return self.mock_nodes[node_id].state == "running"
def is_terminated(self, node_id):
with self.lock:
return self.mock_nodes[node_id].state in ["stopped", "terminated"]
def node_tags(self, node_id):
# terminated nodes.
if self.is_terminated(node_id):
raise Exception(f"The node with id {node_id} has been terminated!")
with self.lock:
return self.mock_nodes[node_id].tags
def internal_ip(self, node_id):
with self.lock:
return self.mock_nodes[node_id].internal_ip
def external_ip(self, node_id):
with self.lock:
return self.mock_nodes[node_id].external_ip
def create_node(self, node_config, tags, count, _skip_wait=False):
if self.error_creates:
raise Exception
if not _skip_wait:
self.ready_to_create.wait()
if self.fail_creates:
return
with self.lock:
if self.cache_stopped:
for node in self.mock_nodes.values():
if node.state == "stopped" and count > 0:
count -= 1
node.state = "pending"
node.tags.update(tags)
for _ in range(count):
self.mock_nodes[self.next_id] = MockNode(
self.next_id,
tags.copy(),
node_config,
tags.get(CLOUDTIK_TAG_USER_NODE_TYPE),
unique_ips=self.unique_ips)
self.next_id += 1
def set_node_tags(self, node_id, tags):
with self.lock:
self.mock_nodes[node_id].tags.update(tags)
def terminate_node(self, node_id):
with self.lock:
if self.cache_stopped:
self.mock_nodes[node_id].state = "stopped"
else:
self.mock_nodes[node_id].state = "terminated"
def finish_starting_nodes(self):
with self.lock:
for node in self.mock_nodes.values():
if node.state == "pending":
node.state = "running"
SMALL_CLUSTER = {
"cluster_name": "default",
"min_workers": 2,
"max_workers": 2,
"initial_workers": 0,
"autoscaling_mode": "default",
"target_utilization_fraction": 0.8,
"idle_timeout_minutes": 5,
"provider": {
"type": "mock",
"region": "us-east-1",
"availability_zone": "us-east-1a",
},
"docker": {
"enabled": True,
"image": "example",
"container_name": "mock",
},
"auth": {
"ssh_user": "ubuntu",
"ssh_private_key": os.devnull,
},
"head_node": {
"TestProp": 1,
},
"file_mounts": {},
"cluster_synced_files": [],
"initialization_commands": ["init_cmd"],
"setup_commands": ["setup_cmd"],
"head_setup_commands": ["head_setup_cmd"],
"worker_setup_commands": ["worker_setup_cmd"],
"head_start_commands": ["head_start_cmd"],
"worker_start_commands": ["worker_start_cmd"],
}
MOCK_DEFAULT_CONFIG = {
"cluster_name": "default",
"max_workers": 2,
"idle_timeout_minutes": 5,
"provider": {
"type": "mock",
"region": "us-east-1",
"availability_zone": "us-east-1a",
},
"docker": {
"enabled": True,
"image": "example",
"container_name": "mock",
},
"auth": {
"ssh_user": "ubuntu",
"ssh_private_key": os.devnull,
},
"available_node_types": {
"cloudtik.head.default": {
"resources": {},
"node_config": {
"head_default_prop": 4
}
},
"cloudtik.worker.default": {
"min_workers": 0,
"max_workers": 2,
"resources": {},
"node_config": {
"worker_default_prop": 7
}
}
},
"head_node_type": "cloudtik.head.default",
"head_node": {},
"file_mounts": {},
"cluster_synced_files": [],
"initialization_commands": [],
"setup_commands": [],
"head_setup_commands": [],
"worker_setup_commands": [],
"head_start_commands": [],
"worker_start_commands": [],
}
TYPES_A = {
"empty_node": {
"node_config": {
"FooProperty": 42,
},
"resources": {},
"max_workers": 0,
},
"m4.large": {
"node_config": {},
"resources": {
"CPU": 2
},
"max_workers": 10,
},
"m4.4xlarge": {
"node_config": {},
"resources": {
"CPU": 16
},
"max_workers": 8,
},
"m4.16xlarge": {
"node_config": {},
"resources": {
"CPU": 64
},
"max_workers": 4,
},
"p2.xlarge": {
"node_config": {},
"resources": {
"CPU": 16,
"GPU": 1
},
"max_workers": 10,
},
"p2.8xlarge": {
"node_config": {},
"resources": {
"CPU": 32,
"GPU": 8
},
"max_workers": 4,
},
}
MULTI_WORKER_CLUSTER = dict(
SMALL_CLUSTER, **{
"available_node_types": TYPES_A,
"head_node_type": "empty_node"
})
class ClusterMetricsTest(unittest.TestCase):
def testHeartbeat(self):
cluster_metrics = ClusterMetrics()
cluster_metrics.update("1.1.1.1", b'\xb6\x80\xbdw\xbd\x1c\xee\xf6@\x11', {"CPU": 2}, {"CPU": 1}, {})
cluster_metrics.mark_active("2.2.2.2")
assert "1.1.1.1" in cluster_metrics.last_heartbeat_time_by_ip
assert "2.2.2.2" in cluster_metrics.last_heartbeat_time_by_ip
assert "3.3.3.3" not in cluster_metrics.last_heartbeat_time_by_ip
class CloudTikTest(unittest.TestCase):
def setUp(self):
_NODE_PROVIDERS["mock"] = \
lambda config: self.create_provider
_DEFAULT_CONFIGS["mock"] = _DEFAULT_CONFIGS["aws"]
self.provider = None
self.tmpdir = tempfile.mkdtemp()
def waitFor(self, condition, num_retries=50, fail_msg=None):
for _ in range(num_retries):
if condition():
return
time.sleep(.1)
fail_msg = fail_msg or "Timed out waiting for {}".format(condition)
raise CloudTikTestTimeoutException(fail_msg)
def waitForNodes(self, expected, comparison=None, tag_filters=None):
if tag_filters is None:
tag_filters = {}
MAX_ITER = 50
for i in range(MAX_ITER):
n = len(self.provider.non_terminated_nodes(tag_filters))
if comparison is None:
comparison = self.assertEqual
try:
comparison(n, expected, msg="Unexpected node quantity.")
return
except Exception:
if i == MAX_ITER - 1:
raise
time.sleep(.1)
def create_provider(self, config, cluster_name):
assert self.provider
return self.provider
def write_config(self, config, call_prepare_config=True):
new_config = copy.deepcopy(config)
if call_prepare_config:
new_config = prepare_config(new_config)
path = os.path.join(self.tmpdir, "simple.yaml")
with open(path, "w") as f:
f.write(yaml.dump(new_config))
return path
def testValidateDefaultConfig(self):
config = {"provider": {
"type": "aws",
"region": "us-east-1",
"availability_zone": "us-east-1a",
}}
config = prepare_config(config)
try:
validate_config(config)
except ValidationError:
self.fail("Default config did not pass validation test!")
def testGetRunningHeadNode(self):
config = copy.deepcopy(SMALL_CLUSTER)
self.provider = MockProvider()
# Node 0 is failed.
self.provider.create_node({}, {
CLOUDTIK_TAG_CLUSTER_NAME: "default",
CLOUDTIK_TAG_NODE_KIND: "head",
CLOUDTIK_TAG_NODE_STATUS: "update-failed"
}, 1)
# `_allow_uninitialized_state` should return the head node
# in the `update-failed` state.
allow_failed = cluster_operator._get_running_head_node(
config,
_provider=self.provider,
_allow_uninitialized_state=True)
assert allow_failed == 0
# Node 1 is okay.
self.provider.create_node({}, {
CLOUDTIK_TAG_CLUSTER_NAME: "default",
CLOUDTIK_TAG_NODE_KIND: "head",
CLOUDTIK_TAG_NODE_STATUS: "up-to-date"
}, 1)
node = cluster_operator._get_running_head_node(
config,
_provider=self.provider)
assert node == 1
# `_allow_uninitialized_state` should return the up-to-date head node
# if it is present.
optionally_failed = cluster_operator._get_running_head_node(
config,
_provider=self.provider,
_allow_uninitialized_state=True)
assert optionally_failed == 1
def testDefaultMinMaxWorkers(self):
config = copy.deepcopy(MOCK_DEFAULT_CONFIG)
config = prepare_config(config)
node_types = config["available_node_types"]
head_node_config = node_types["cloudtik.head.default"]
assert head_node_config["min_workers"] == 0
assert head_node_config["max_workers"] == 0
if __name__ == "__main__":
import sys
sys.exit(pytest.main(["-v", __file__]))
| true
| true
|
1c46e01057545892b524898477fb51b8ed2373e5
| 1,140
|
py
|
Python
|
frida_mode/test/png/persistent/get_symbol_addr.py
|
hamzzi/AFLplusplus
|
95f47ac3a4d23b28a573a0614893d7aac5f5d4b4
|
[
"Apache-2.0"
] | 2,104
|
2020-03-19T16:17:10.000Z
|
2022-03-31T16:22:30.000Z
|
frida_mode/test/png/persistent/get_symbol_addr.py
|
hamzzi/AFLplusplus
|
95f47ac3a4d23b28a573a0614893d7aac5f5d4b4
|
[
"Apache-2.0"
] | 788
|
2020-03-19T14:54:09.000Z
|
2022-03-31T17:38:00.000Z
|
frida_mode/test/png/persistent/get_symbol_addr.py
|
hamzzi/AFLplusplus
|
95f47ac3a4d23b28a573a0614893d7aac5f5d4b4
|
[
"Apache-2.0"
] | 518
|
2020-03-21T01:24:55.000Z
|
2022-03-30T21:05:53.000Z
|
#!/usr/bin/python3
import argparse
from elftools.elf.elffile import ELFFile
def process_file(file, symbol, base):
with open(file, 'rb') as f:
elf = ELFFile(f)
symtab = elf.get_section_by_name('.symtab')
mains = symtab.get_symbol_by_name(symbol)
if len(mains) != 1:
print ("Failed to find main")
return 1
main_addr = mains[0]['st_value']
main = base + main_addr
print ("0x%016x" % main)
return 0
def hex_value(x):
return int(x, 16)
def main():
parser = argparse.ArgumentParser(description='Process some integers.')
parser.add_argument('-f', '--file', dest='file', type=str,
help='elf file name', required=True)
parser.add_argument('-s', '--symbol', dest='symbol', type=str,
help='symbol name', required=True)
parser.add_argument('-b', '--base', dest='base', type=hex_value,
help='elf base address', required=True)
args = parser.parse_args()
return process_file (args.file, args.symbol, args.base)
if __name__ == "__main__":
ret = main()
exit(ret)
| 30.810811
| 74
| 0.598246
|
import argparse
from elftools.elf.elffile import ELFFile
def process_file(file, symbol, base):
with open(file, 'rb') as f:
elf = ELFFile(f)
symtab = elf.get_section_by_name('.symtab')
mains = symtab.get_symbol_by_name(symbol)
if len(mains) != 1:
print ("Failed to find main")
return 1
main_addr = mains[0]['st_value']
main = base + main_addr
print ("0x%016x" % main)
return 0
def hex_value(x):
return int(x, 16)
def main():
parser = argparse.ArgumentParser(description='Process some integers.')
parser.add_argument('-f', '--file', dest='file', type=str,
help='elf file name', required=True)
parser.add_argument('-s', '--symbol', dest='symbol', type=str,
help='symbol name', required=True)
parser.add_argument('-b', '--base', dest='base', type=hex_value,
help='elf base address', required=True)
args = parser.parse_args()
return process_file (args.file, args.symbol, args.base)
if __name__ == "__main__":
ret = main()
exit(ret)
| true
| true
|
1c46e1353606f6ac2e8eadd47d685475e3efc0f6
| 946
|
py
|
Python
|
crypto.py
|
Esshahn/cryptoticker
|
6fb32712e380cb2a0605bafcfa64fe7fdf0367b7
|
[
"MIT"
] | null | null | null |
crypto.py
|
Esshahn/cryptoticker
|
6fb32712e380cb2a0605bafcfa64fe7fdf0367b7
|
[
"MIT"
] | null | null | null |
crypto.py
|
Esshahn/cryptoticker
|
6fb32712e380cb2a0605bafcfa64fe7fdf0367b7
|
[
"MIT"
] | null | null | null |
# -------------------------------------------------
# Cryptoticker
# Python Script to get the current prices of crypto currencies
# and send an email with the current prices
# 2021 Ingo Hinterding
# https://github.com/Esshahn/cryptoticker
# -------------------------------------------------
from tracker import *
from downloader import *
# ------------------ downloader ------------------ #
config = load_json("user-data.json")
data = download_latest_crypto_data(config)
save_file("crypto-data.json", json.dumps(data))
# ------------------ tracker ------------------ #
crypto_all = load_json("crypto-data.json")
crypto = crypto_all["data"]
user_all = load_json("user-data.json")
symbols = user_all["symbols"]
portfolio = user_all["portfolio"]
email = load_json('email.json')
full_portfolio = create_portfolio(portfolio, crypto)
body = format_crypto_data(symbols, crypto)
body += format_portfolio(full_portfolio)
send_mail(body, email)
| 26.277778
| 62
| 0.620507
|
from tracker import *
from downloader import *
config = load_json("user-data.json")
data = download_latest_crypto_data(config)
save_file("crypto-data.json", json.dumps(data))
crypto_all = load_json("crypto-data.json")
crypto = crypto_all["data"]
user_all = load_json("user-data.json")
symbols = user_all["symbols"]
portfolio = user_all["portfolio"]
email = load_json('email.json')
full_portfolio = create_portfolio(portfolio, crypto)
body = format_crypto_data(symbols, crypto)
body += format_portfolio(full_portfolio)
send_mail(body, email)
| true
| true
|
1c46e16e22d0b4bc1b34d28281a937a613893ce7
| 27,393
|
py
|
Python
|
python/mxnet/base.py
|
ChrisQiqiang/mxnet-combination
|
015c02f8fa1b22133202e1c70488c439cd9e726d
|
[
"BSL-1.0",
"Apache-2.0"
] | null | null | null |
python/mxnet/base.py
|
ChrisQiqiang/mxnet-combination
|
015c02f8fa1b22133202e1c70488c439cd9e726d
|
[
"BSL-1.0",
"Apache-2.0"
] | null | null | null |
python/mxnet/base.py
|
ChrisQiqiang/mxnet-combination
|
015c02f8fa1b22133202e1c70488c439cd9e726d
|
[
"BSL-1.0",
"Apache-2.0"
] | null | null | null |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# coding: utf-8
# pylint: disable=invalid-name, no-member, trailing-comma-tuple, bad-mcs-classmethod-argument, unnecessary-pass, too-many-lines, wrong-import-position
"""ctypes library of mxnet and helper functions."""
from __future__ import absolute_import
import re
import atexit
import ctypes
import os
import sys
import inspect
import platform
import numpy as _np
from . import libinfo
__all__ = ['MXNetError']
#----------------------------
# library loading
#----------------------------
# pylint: disable=pointless-statement
try:
basestring
long
except NameError:
basestring = str
long = int
# pylint: enable=pointless-statement
integer_types = (int, long, _np.int32, _np.int64)
numeric_types = (float, int, long, _np.generic)
string_types = basestring,
if sys.version_info[0] > 2:
# this function is needed for python3
# to convert ctypes.char_p .value back to python str
py_str = lambda x: x.decode('utf-8')
else:
py_str = lambda x: x
def data_dir_default():
"""
:return: default data directory depending on the platform and environment variables
"""
system = platform.system()
if system == 'Windows':
return os.path.join(os.environ.get('APPDATA'), 'mxnet')
else:
return os.path.join(os.path.expanduser("~"), '.mxnet')
def data_dir():
"""
:return: data directory in the filesystem for storage, for example when downloading models
"""
return os.getenv('MXNET_HOME', data_dir_default())
class _NullType(object):
"""Placeholder for arguments"""
def __repr__(self):
return '_Null'
_Null = _NullType()
class MXNetError(Exception):
"""Error that will be thrown by all mxnet functions."""
pass
class NotImplementedForSymbol(MXNetError):
"""Error: Not implemented for symbol"""
def __init__(self, function, alias, *args):
super(NotImplementedForSymbol, self).__init__()
self.function = function.__name__
self.alias = alias
self.args = [str(type(a)) for a in args]
def __str__(self):
msg = 'Function {}'.format(self.function)
if self.alias:
msg += ' (namely operator "{}")'.format(self.alias)
if self.args:
msg += ' with arguments ({})'.format(', '.join(self.args))
msg += ' is not implemented for Symbol and only available in NDArray.'
return msg
class NotSupportedForSparseNDArray(MXNetError):
"""Error: Not supported for SparseNDArray"""
def __init__(self, function, alias, *args):
super(NotSupportedForSparseNDArray, self).__init__()
self.function = function.__name__
self.alias = alias
self.args = [str(type(a)) for a in args]
def __str__(self):
msg = 'Function {}'.format(self.function)
if self.alias:
msg += ' (namely operator "{}")'.format(self.alias)
if self.args:
msg += ' with arguments ({})'.format(', '.join(self.args))
msg += ' is not supported for SparseNDArray and only available in NDArray.'
return msg
class MXCallbackList(ctypes.Structure):
"""Structure that holds Callback information. Passed to CustomOpProp."""
_fields_ = [
('num_callbacks', ctypes.c_int),
('callbacks', ctypes.POINTER(ctypes.CFUNCTYPE(ctypes.c_int))),
('contexts', ctypes.POINTER(ctypes.c_void_p))
]
# Please see: https://stackoverflow.com/questions/5189699/how-to-make-a-class-property
class _MXClassPropertyDescriptor(object):
def __init__(self, fget, fset=None):
self.fget = fget
self.fset = fset
def __get__(self, obj, clas=None):
if clas is None:
clas = type(obj)
return self.fget.__get__(obj, clas)()
def __set__(self, obj, value):
if not self.fset:
raise MXNetError("cannot use the setter: %s to set attribute" % obj.__name__)
if inspect.isclass(obj):
type_ = obj
obj = None
else:
type_ = type(obj)
return self.fset.__get__(obj, type_)(value)
def setter(self, func):
if not isinstance(func, (classmethod, staticmethod)):
func = classmethod(func)
self.fset = func
return self
class _MXClassPropertyMetaClass(type):
def __setattr__(cls, key, value):
obj = cls.__dict__.get(key)
if obj and isinstance(obj, _MXClassPropertyDescriptor):
return obj.__set__(cls, value)
return super(_MXClassPropertyMetaClass, cls).__setattr__(key, value)
# with_metaclass function obtained from: https://github.com/benjaminp/six/blob/master/six.py
# pylint: disable=unused-argument
def with_metaclass(meta, *bases):
"""Create a base class with a metaclass."""
# This requires a bit of explanation: the basic idea is to make a dummy
# metaclass for one level of class instantiation that replaces itself with
# the actual metaclass.
class metaclass(type):
def __new__(cls, name, this_bases, d):
return meta(name, bases, d)
@classmethod
def __prepare__(cls, name, this_bases):
return meta.__prepare__(name, bases)
return type.__new__(metaclass, 'temporary_class', (), {})
# pylint: enable=unused-argument
def classproperty(func):
if not isinstance(func, (classmethod, staticmethod)):
func = classmethod(func)
return _MXClassPropertyDescriptor(func)
def _load_lib():
"""Load library by searching possible path."""
lib_path = libinfo.find_lib_path()
lib = ctypes.CDLL(lib_path[0], ctypes.RTLD_LOCAL)
# DMatrix functions
lib.MXGetLastError.restype = ctypes.c_char_p
return lib
# version number
__version__ = libinfo.__version__
# library instance of mxnet
_LIB = _load_lib()
# type definitions
mx_int = ctypes.c_int
mx_uint = ctypes.c_uint
mx_int64 = ctypes.c_int64
mx_float = ctypes.c_float
mx_float_p = ctypes.POINTER(mx_float)
mx_real_t = _np.float32
NDArrayHandle = ctypes.c_void_p
FunctionHandle = ctypes.c_void_p
OpHandle = ctypes.c_void_p
CachedOpHandle = ctypes.c_void_p
SymbolHandle = ctypes.c_void_p
ExecutorHandle = ctypes.c_void_p
DataIterCreatorHandle = ctypes.c_void_p
DataIterHandle = ctypes.c_void_p
KVStoreHandle = ctypes.c_void_p
RecordIOHandle = ctypes.c_void_p
RtcHandle = ctypes.c_void_p
CudaModuleHandle = ctypes.c_void_p
CudaKernelHandle = ctypes.c_void_p
ProfileHandle = ctypes.c_void_p
DLPackHandle = ctypes.c_void_p
#----------------------------
# helper function definition
#----------------------------
def check_call(ret):
"""Check the return value of C API call.
This function will raise an exception when an error occurs.
Wrap every API call with this function.
Parameters
----------
ret : int
return value from API calls.
"""
if ret != 0:
raise MXNetError(py_str(_LIB.MXGetLastError()))
if sys.version_info[0] < 3:
def c_str(string):
"""Create ctypes char * from a Python string.
Parameters
----------
string : string type
Python string.
Returns
-------
str : c_char_p
A char pointer that can be passed to C API.
Examples
--------
>>> x = mx.base.c_str("Hello, World")
>>> print x.value
Hello, World
"""
return ctypes.c_char_p(string)
def c_str_array(strings):
"""Create ctypes const char ** from a list of Python strings.
Parameters
----------
strings : list of string
Python strings.
Returns
-------
(ctypes.c_char_p * len(strings))
A const char ** pointer that can be passed to C API.
"""
arr = (ctypes.c_char_p * len(strings))()
arr[:] = strings
return arr
else:
def c_str(string):
"""Create ctypes char * from a Python string.
Parameters
----------
string : string type
Python string.
Returns
-------
str : c_char_p
A char pointer that can be passed to C API.
Examples
--------
>>> x = mx.base.c_str("Hello, World")
>>> print(x.value)
b"Hello, World"
"""
return ctypes.c_char_p(string.encode('utf-8'))
def c_str_array(strings):
"""Create ctypes const char ** from a list of Python strings.
Parameters
----------
strings : list of string
Python strings.
Returns
-------
(ctypes.c_char_p * len(strings))
A const char ** pointer that can be passed to C API.
"""
arr = (ctypes.c_char_p * len(strings))()
arr[:] = [s.encode('utf-8') for s in strings]
return arr
def c_array(ctype, values):
"""Create ctypes array from a Python array.
Parameters
----------
ctype : ctypes data type
Data type of the array we want to convert to, such as mx_float.
values : tuple or list
Data content.
Returns
-------
out : ctypes array
Created ctypes array.
Examples
--------
>>> x = mx.base.c_array(mx.base.mx_float, [1, 2, 3])
>>> print len(x)
3
>>> x[1]
2.0
"""
out = (ctype * len(values))()
out[:] = values
return out
def c_array_buf(ctype, buf):
"""Create ctypes array from a Python buffer.
For primitive types, using the buffer created with array.array is faster
than a c_array call.
Parameters
----------
ctype : ctypes data type
Data type of the array we want to convert to, such as mx_float.
buf : buffer type
Data content.
Returns
-------
out : ctypes array
Created ctypes array.
Examples
--------
>>> x = mx.base.c_array_buf(mx.base.mx_float, array.array('i', [1, 2, 3]))
>>> print len(x)
3
>>> x[1]
2.0
"""
return (ctype * len(buf)).from_buffer(buf)
def c_handle_array(objs):
"""Create ctypes const void ** from a list of MXNet objects with handles.
Parameters
----------
objs : list of NDArray/Symbol.
MXNet objects.
Returns
-------
(ctypes.c_void_p * len(objs))
A void ** pointer that can be passed to C API.
"""
arr = (ctypes.c_void_p * len(objs))()
arr[:] = [o.handle for o in objs]
return arr
def ctypes2buffer(cptr, length):
"""Convert ctypes pointer to buffer type.
Parameters
----------
cptr : ctypes.POINTER(ctypes.c_char)
Pointer to the raw memory region.
length : int
The length of the buffer.
Returns
-------
buffer : bytearray
The raw byte memory buffer.
"""
if not isinstance(cptr, ctypes.POINTER(ctypes.c_char)):
raise TypeError('expected char pointer')
res = bytearray(length)
rptr = (ctypes.c_char * length).from_buffer(res)
if not ctypes.memmove(rptr, cptr, length):
raise RuntimeError('memmove failed')
return res
def ctypes2numpy_shared(cptr, shape):
"""Convert a ctypes pointer to a numpy array.
The resulting NumPy array shares the memory with the pointer.
Parameters
----------
cptr : ctypes.POINTER(mx_float)
pointer to the memory region
shape : tuple
Shape of target `NDArray`.
Returns
-------
out : numpy_array
A numpy array : numpy array.
"""
if not isinstance(cptr, ctypes.POINTER(mx_float)):
raise RuntimeError('expected float pointer')
size = 1
for s in shape:
size *= s
dbuffer = (mx_float * size).from_address(ctypes.addressof(cptr.contents))
return _np.frombuffer(dbuffer, dtype=_np.float32).reshape(shape)
def build_param_doc(arg_names, arg_types, arg_descs, remove_dup=True):
"""Build argument docs in python style.
arg_names : list of str
Argument names.
arg_types : list of str
Argument type information.
arg_descs : list of str
Argument description information.
remove_dup : boolean, optional
Whether remove duplication or not.
Returns
-------
docstr : str
Python docstring of parameter sections.
"""
param_keys = set()
param_str = []
for key, type_info, desc in zip(arg_names, arg_types, arg_descs):
if key in param_keys and remove_dup:
continue
if key == 'num_args':
continue
param_keys.add(key)
ret = '%s : %s' % (key, type_info)
if len(desc) != 0:
ret += '\n ' + desc
param_str.append(ret)
doc_str = ('Parameters\n' +
'----------\n' +
'%s\n')
doc_str = doc_str % ('\n'.join(param_str))
return doc_str
def _notify_shutdown():
"""Notify MXNet about a shutdown."""
check_call(_LIB.MXNotifyShutdown())
atexit.register(_notify_shutdown)
def add_fileline_to_docstring(module, incursive=True):
"""Append the definition position to each function contained in module.
Examples
--------
# Put the following codes at the end of a file
add_fileline_to_docstring(__name__)
"""
def _add_fileline(obj):
"""Add fileinto to a object.
"""
if obj.__doc__ is None or 'From:' in obj.__doc__:
return
fname = inspect.getsourcefile(obj)
if fname is None:
return
try:
line = inspect.getsourcelines(obj)[-1]
except IOError:
return
obj.__doc__ += '\n\nFrom:%s:%d' % (fname, line)
if isinstance(module, str):
module = sys.modules[module]
for _, obj in inspect.getmembers(module):
if inspect.isbuiltin(obj):
continue
if inspect.isfunction(obj):
_add_fileline(obj)
if inspect.ismethod(obj):
_add_fileline(obj.__func__)
if inspect.isclass(obj) and incursive:
add_fileline_to_docstring(obj, False)
def _as_list(obj):
"""A utility function that converts the argument to a list if it is not already.
Parameters
----------
obj : object
Returns
-------
If `obj` is a list or tuple, return it. Otherwise, return `[obj]` as a
single-element list.
"""
if isinstance(obj, (list, tuple)):
return obj
else:
return [obj]
_OP_NAME_PREFIX_LIST = ['_contrib_', '_linalg_', '_sparse_', '_image_', '_random_']
def _get_op_name_prefix(op_name):
"""
Check whether the given op_name starts with any words in `_OP_NAME_PREFIX_LIST`.
If found, return the prefix; else, return an empty string.
"""
for prefix in _OP_NAME_PREFIX_LIST:
if op_name.startswith(prefix):
return prefix
return ""
# pylint: enable=invalid-name
def _init_op_module(root_namespace, module_name, make_op_func):
"""
Registers op functions created by `make_op_func` under
`root_namespace.module_name.[submodule_name]`,
where `submodule_name` is one of `_OP_SUBMODULE_NAME_LIST`.
Parameters
----------
root_namespace : str
Top level module name, `mxnet` in the current cases.
module_name : str
Second level module name, `ndarray` and `symbol` in the current cases.
make_op_func : function
Function for creating op functions for `ndarray` and `symbol` modules.
"""
plist = ctypes.POINTER(ctypes.c_char_p)()
size = ctypes.c_uint()
check_call(_LIB.MXListAllOpNames(ctypes.byref(size),
ctypes.byref(plist)))
op_names = []
for i in range(size.value):
op_name = py_str(plist[i])
if not _is_np_op(op_name):
op_names.append(op_name)
module_op = sys.modules["%s.%s.op" % (root_namespace, module_name)]
module_internal = sys.modules["%s.%s._internal" % (root_namespace, module_name)]
# contrib module in the old format (deprecated)
# kept here for backward compatibility
# use mx.nd.contrib or mx.sym.contrib from now on
contrib_module_name_old = "%s.contrib.%s" % (root_namespace, module_name)
contrib_module_old = sys.modules[contrib_module_name_old]
submodule_dict = {}
for op_name_prefix in _OP_NAME_PREFIX_LIST:
submodule_dict[op_name_prefix] =\
sys.modules["%s.%s.%s" % (root_namespace, module_name, op_name_prefix[1:-1])]
for name in op_names:
hdl = OpHandle()
check_call(_LIB.NNGetOpHandle(c_str(name), ctypes.byref(hdl)))
op_name_prefix = _get_op_name_prefix(name)
module_name_local = module_name
if len(op_name_prefix) > 0:
if op_name_prefix != '_random_' or name.endswith('_like'):
func_name = name[len(op_name_prefix):]
cur_module = submodule_dict[op_name_prefix]
module_name_local = "%s.%s.%s" % (root_namespace, module_name, op_name_prefix[1:-1])
else:
func_name = name
cur_module = module_internal
elif name.startswith('_'):
func_name = name
cur_module = module_internal
else:
func_name = name
cur_module = module_op
function = make_op_func(hdl, name, func_name)
function.__module__ = module_name_local
setattr(cur_module, function.__name__, function)
cur_module.__all__.append(function.__name__)
if op_name_prefix == '_contrib_':
hdl = OpHandle()
check_call(_LIB.NNGetOpHandle(c_str(name), ctypes.byref(hdl)))
func_name = name[len(op_name_prefix):]
function = make_op_func(hdl, name, func_name)
function.__module__ = contrib_module_name_old
setattr(contrib_module_old, function.__name__, function)
contrib_module_old.__all__.append(function.__name__)
def _generate_op_module_signature(root_namespace, module_name, op_code_gen_func):
"""
Generate op functions created by `op_code_gen_func` and write to the source file
of `root_namespace.module_name.[submodule_name]`,
where `submodule_name` is one of `_OP_SUBMODULE_NAME_LIST`.
Parameters
----------
root_namespace : str
Top level module name, `mxnet` in the current cases.
module_name : str
Second level module name, `ndarray` and `symbol` in the current cases.
op_code_gen_func : function
Function for creating op functions for `ndarray` and `symbol` modules.
"""
def get_module_file(module_name):
"""Return the generated module file based on module name."""
path = os.path.dirname(__file__)
module_path = module_name.split('.')
module_path[-1] = 'gen_' + module_path[-1]
file_name = os.path.join(path, '..', *module_path) + '.py'
module_file = open(file_name, 'w', encoding="utf-8")
dependencies = {'symbol': ['from ._internal import SymbolBase',
'from ..base import _Null'],
'ndarray': ['from ._internal import NDArrayBase',
'from ..base import _Null']}
module_file.write('# coding: utf-8')
module_file.write('# File content is auto-generated. Do not modify.' + os.linesep)
module_file.write('# pylint: skip-file' + os.linesep)
module_file.write(os.linesep.join(dependencies[module_name.split('.')[1]]))
return module_file
def write_all_str(module_file, module_all_list):
"""Write the proper __all__ based on available operators."""
module_file.write(os.linesep)
module_file.write(os.linesep)
all_str = '__all__ = [' + ', '.join(["'%s'"%s for s in module_all_list]) + ']'
module_file.write(all_str)
plist = ctypes.POINTER(ctypes.c_char_p)()
size = ctypes.c_uint()
check_call(_LIB.MXListAllOpNames(ctypes.byref(size),
ctypes.byref(plist)))
op_names = []
for i in range(size.value):
op_name = py_str(plist[i])
if not _is_np_op(op_name):
op_names.append(op_name)
module_op_file = get_module_file("%s.%s.op" % (root_namespace, module_name))
module_op_all = []
module_internal_file = get_module_file("%s.%s._internal"%(root_namespace, module_name))
module_internal_all = []
submodule_dict = {}
for op_name_prefix in _OP_NAME_PREFIX_LIST:
submodule_dict[op_name_prefix] =\
(get_module_file("%s.%s.%s" % (root_namespace, module_name,
op_name_prefix[1:-1])), [])
for name in op_names:
hdl = OpHandle()
check_call(_LIB.NNGetOpHandle(c_str(name), ctypes.byref(hdl)))
op_name_prefix = _get_op_name_prefix(name)
if len(op_name_prefix) > 0:
func_name = name[len(op_name_prefix):]
cur_module_file, cur_module_all = submodule_dict[op_name_prefix]
elif name.startswith('_'):
func_name = name
cur_module_file = module_internal_file
cur_module_all = module_internal_all
else:
func_name = name
cur_module_file = module_op_file
cur_module_all = module_op_all
code, _ = op_code_gen_func(hdl, name, func_name, True)
cur_module_file.write(os.linesep)
cur_module_file.write(code)
cur_module_all.append(func_name)
for (submodule_f, submodule_all) in submodule_dict.values():
write_all_str(submodule_f, submodule_all)
submodule_f.close()
write_all_str(module_op_file, module_op_all)
module_op_file.close()
write_all_str(module_internal_file, module_internal_all)
module_internal_file.close()
ctypes.pythonapi.PyCapsule_New.restype = ctypes.py_object
ctypes.pythonapi.PyCapsule_GetPointer.restype = ctypes.c_void_p
_NP_OP_PREFIX = '_np_'
_NP_OP_SUBMODULE_LIST = ['_random_', '_linalg_']
_NP_EXT_OP_PREFIX = '_npx_'
_NP_EXT_OP_SUBMODULE_LIST = ['_image_']
_NP_INTERNAL_OP_PREFIX = '_npi_'
def _is_np_op(op_name):
return op_name.startswith(_NP_OP_PREFIX) or op_name.startswith(_NP_EXT_OP_PREFIX)\
or op_name.startswith(_NP_INTERNAL_OP_PREFIX)
def _get_op_submodule_name(op_name, op_name_prefix, submodule_name_list):
"""Get the submodule name of a specific op"""
assert op_name.startswith(op_name_prefix)
for submodule_name in submodule_name_list:
if op_name[len(op_name_prefix):].startswith(submodule_name):
return submodule_name
return ""
def _init_np_op_module(root_module_name, np_module_name, mx_module_name, make_op_func):
"""
Register numpy operators in namespaces `mxnet.numpy`, `mxnet.ndarray.numpy`
and `mxnet.symbol.numpy`. They are used in imperative mode, Gluon APIs w/o hybridization,
and Gluon APIs w/ hybridization, respectively. Essentially, operators with the same name
registered in three namespaces, respectively share the same functionality in C++ backend.
Different namespaces are needed for dispatching operator calls in Gluon's `HybridBlock` by `F`.
Parameters
----------
root_module_name : str
Top level module name, `mxnet` in the current cases.
np_module_name : str
Second level module name, `numpy` or `numpy_extension` in the current case.
make_op_func : function
Function for creating op functions.
"""
from . import _numpy_op_doc as _np_op_doc
if np_module_name == 'numpy':
op_name_prefix = _NP_OP_PREFIX
submodule_name_list = _NP_OP_SUBMODULE_LIST
elif np_module_name == 'numpy_extension':
op_name_prefix = _NP_EXT_OP_PREFIX
submodule_name_list = _NP_EXT_OP_SUBMODULE_LIST
elif np_module_name == 'numpy._internal':
op_name_prefix = _NP_INTERNAL_OP_PREFIX
submodule_name_list = []
else:
raise ValueError('unsupported np module name {}'.format(np_module_name))
plist = ctypes.POINTER(ctypes.c_char_p)()
size = ctypes.c_uint()
check_call(_LIB.MXListAllOpNames(ctypes.byref(size), ctypes.byref(plist)))
op_names = []
for i in range(size.value):
name = py_str(plist[i])
if name.startswith(op_name_prefix):
op_names.append(name)
if mx_module_name is None:
# register np/npx ops for imperative programming
op_module_name = "%s.%s._op" % (root_module_name, np_module_name) # e.g. mxnet.numpy._op
op_submodule_name = "%s.%s" % (root_module_name, np_module_name) # e.g. mxnet.numpy.random
elif mx_module_name in ('ndarray', 'symbol'):
# register numpy internal ops and np/npx ops for use in Gluon
# np internal ops are registered in mxnet.ndarray/symbol.numpy._internal
# np ops are registered in mxnet.ndarray/symbol.numpy._op
# npx ops are registered in mxnet.ndarray/symbol.numpy_extension._op
op_module_name = "%s.%s.%s" % (root_module_name, mx_module_name, np_module_name)
if op_name_prefix != _NP_INTERNAL_OP_PREFIX:
op_module_name += '._op'
# e.g. mxnet.symbol.numpy.random
op_submodule_name = "%s.%s.%s" % (root_module_name, mx_module_name, np_module_name)
else:
raise ValueError('unsupported mxnet module {}'.format(mx_module_name))
op_submodule_name += '.%s'
op_module = sys.modules[op_module_name]
submodule_dict = {}
for submodule_name in submodule_name_list:
submodule_dict[submodule_name] = sys.modules[op_submodule_name % submodule_name[1:-1]]
for name in op_names:
hdl = OpHandle()
check_call(_LIB.NNGetOpHandle(c_str(name), ctypes.byref(hdl)))
submodule_name = _get_op_submodule_name(name, op_name_prefix, submodule_name_list)
if len(submodule_name) > 0:
func_name = name[(len(op_name_prefix) + len(submodule_name)):]
cur_module = submodule_dict[submodule_name]
module_name_local = op_submodule_name % submodule_name[1:-1]
else:
func_name = name[len(op_name_prefix):]
cur_module = op_module
module_name_local =\
op_module_name[:-len('._op')] if op_module_name.endswith('._op') else op_module_name
function = make_op_func(hdl, name, func_name)
function.__module__ = module_name_local
setattr(cur_module, function.__name__, function)
cur_module.__all__.append(function.__name__)
if hasattr(_np_op_doc, name):
function.__doc__ = getattr(_np_op_doc, name).__doc__
else:
function.__doc__ = re.sub('NDArray', 'ndarray', function.__doc__)
| 32.113716
| 150
| 0.639032
|
from __future__ import absolute_import
import re
import atexit
import ctypes
import os
import sys
import inspect
import platform
import numpy as _np
from . import libinfo
__all__ = ['MXNetError']
try:
basestring
long
except NameError:
basestring = str
long = int
integer_types = (int, long, _np.int32, _np.int64)
numeric_types = (float, int, long, _np.generic)
string_types = basestring,
if sys.version_info[0] > 2:
py_str = lambda x: x.decode('utf-8')
else:
py_str = lambda x: x
def data_dir_default():
system = platform.system()
if system == 'Windows':
return os.path.join(os.environ.get('APPDATA'), 'mxnet')
else:
return os.path.join(os.path.expanduser("~"), '.mxnet')
def data_dir():
return os.getenv('MXNET_HOME', data_dir_default())
class _NullType(object):
def __repr__(self):
return '_Null'
_Null = _NullType()
class MXNetError(Exception):
pass
class NotImplementedForSymbol(MXNetError):
def __init__(self, function, alias, *args):
super(NotImplementedForSymbol, self).__init__()
self.function = function.__name__
self.alias = alias
self.args = [str(type(a)) for a in args]
def __str__(self):
msg = 'Function {}'.format(self.function)
if self.alias:
msg += ' (namely operator "{}")'.format(self.alias)
if self.args:
msg += ' with arguments ({})'.format(', '.join(self.args))
msg += ' is not implemented for Symbol and only available in NDArray.'
return msg
class NotSupportedForSparseNDArray(MXNetError):
def __init__(self, function, alias, *args):
super(NotSupportedForSparseNDArray, self).__init__()
self.function = function.__name__
self.alias = alias
self.args = [str(type(a)) for a in args]
def __str__(self):
msg = 'Function {}'.format(self.function)
if self.alias:
msg += ' (namely operator "{}")'.format(self.alias)
if self.args:
msg += ' with arguments ({})'.format(', '.join(self.args))
msg += ' is not supported for SparseNDArray and only available in NDArray.'
return msg
class MXCallbackList(ctypes.Structure):
_fields_ = [
('num_callbacks', ctypes.c_int),
('callbacks', ctypes.POINTER(ctypes.CFUNCTYPE(ctypes.c_int))),
('contexts', ctypes.POINTER(ctypes.c_void_p))
]
class _MXClassPropertyDescriptor(object):
def __init__(self, fget, fset=None):
self.fget = fget
self.fset = fset
def __get__(self, obj, clas=None):
if clas is None:
clas = type(obj)
return self.fget.__get__(obj, clas)()
def __set__(self, obj, value):
if not self.fset:
raise MXNetError("cannot use the setter: %s to set attribute" % obj.__name__)
if inspect.isclass(obj):
type_ = obj
obj = None
else:
type_ = type(obj)
return self.fset.__get__(obj, type_)(value)
def setter(self, func):
if not isinstance(func, (classmethod, staticmethod)):
func = classmethod(func)
self.fset = func
return self
class _MXClassPropertyMetaClass(type):
def __setattr__(cls, key, value):
obj = cls.__dict__.get(key)
if obj and isinstance(obj, _MXClassPropertyDescriptor):
return obj.__set__(cls, value)
return super(_MXClassPropertyMetaClass, cls).__setattr__(key, value)
def with_metaclass(meta, *bases):
class metaclass(type):
def __new__(cls, name, this_bases, d):
return meta(name, bases, d)
@classmethod
def __prepare__(cls, name, this_bases):
return meta.__prepare__(name, bases)
return type.__new__(metaclass, 'temporary_class', (), {})
def classproperty(func):
if not isinstance(func, (classmethod, staticmethod)):
func = classmethod(func)
return _MXClassPropertyDescriptor(func)
def _load_lib():
lib_path = libinfo.find_lib_path()
lib = ctypes.CDLL(lib_path[0], ctypes.RTLD_LOCAL)
lib.MXGetLastError.restype = ctypes.c_char_p
return lib
__version__ = libinfo.__version__
_LIB = _load_lib()
mx_int = ctypes.c_int
mx_uint = ctypes.c_uint
mx_int64 = ctypes.c_int64
mx_float = ctypes.c_float
mx_float_p = ctypes.POINTER(mx_float)
mx_real_t = _np.float32
NDArrayHandle = ctypes.c_void_p
FunctionHandle = ctypes.c_void_p
OpHandle = ctypes.c_void_p
CachedOpHandle = ctypes.c_void_p
SymbolHandle = ctypes.c_void_p
ExecutorHandle = ctypes.c_void_p
DataIterCreatorHandle = ctypes.c_void_p
DataIterHandle = ctypes.c_void_p
KVStoreHandle = ctypes.c_void_p
RecordIOHandle = ctypes.c_void_p
RtcHandle = ctypes.c_void_p
CudaModuleHandle = ctypes.c_void_p
CudaKernelHandle = ctypes.c_void_p
ProfileHandle = ctypes.c_void_p
DLPackHandle = ctypes.c_void_p
def check_call(ret):
if ret != 0:
raise MXNetError(py_str(_LIB.MXGetLastError()))
if sys.version_info[0] < 3:
def c_str(string):
return ctypes.c_char_p(string)
def c_str_array(strings):
arr = (ctypes.c_char_p * len(strings))()
arr[:] = strings
return arr
else:
def c_str(string):
"""Create ctypes char * from a Python string.
Parameters
----------
string : string type
Python string.
Returns
-------
str : c_char_p
A char pointer that can be passed to C API.
Examples
--------
>>> x = mx.base.c_str("Hello, World")
>>> print(x.value)
b"Hello, World"
"""
return ctypes.c_char_p(string.encode('utf-8'))
def c_str_array(strings):
"""Create ctypes const char ** from a list of Python strings.
Parameters
----------
strings : list of string
Python strings.
Returns
-------
(ctypes.c_char_p * len(strings))
A const char ** pointer that can be passed to C API.
"""
arr = (ctypes.c_char_p * len(strings))()
arr[:] = [s.encode('utf-8') for s in strings]
return arr
def c_array(ctype, values):
out = (ctype * len(values))()
out[:] = values
return out
def c_array_buf(ctype, buf):
return (ctype * len(buf)).from_buffer(buf)
def c_handle_array(objs):
arr = (ctypes.c_void_p * len(objs))()
arr[:] = [o.handle for o in objs]
return arr
def ctypes2buffer(cptr, length):
if not isinstance(cptr, ctypes.POINTER(ctypes.c_char)):
raise TypeError('expected char pointer')
res = bytearray(length)
rptr = (ctypes.c_char * length).from_buffer(res)
if not ctypes.memmove(rptr, cptr, length):
raise RuntimeError('memmove failed')
return res
def ctypes2numpy_shared(cptr, shape):
if not isinstance(cptr, ctypes.POINTER(mx_float)):
raise RuntimeError('expected float pointer')
size = 1
for s in shape:
size *= s
dbuffer = (mx_float * size).from_address(ctypes.addressof(cptr.contents))
return _np.frombuffer(dbuffer, dtype=_np.float32).reshape(shape)
def build_param_doc(arg_names, arg_types, arg_descs, remove_dup=True):
param_keys = set()
param_str = []
for key, type_info, desc in zip(arg_names, arg_types, arg_descs):
if key in param_keys and remove_dup:
continue
if key == 'num_args':
continue
param_keys.add(key)
ret = '%s : %s' % (key, type_info)
if len(desc) != 0:
ret += '\n ' + desc
param_str.append(ret)
doc_str = ('Parameters\n' +
'----------\n' +
'%s\n')
doc_str = doc_str % ('\n'.join(param_str))
return doc_str
def _notify_shutdown():
check_call(_LIB.MXNotifyShutdown())
atexit.register(_notify_shutdown)
def add_fileline_to_docstring(module, incursive=True):
def _add_fileline(obj):
if obj.__doc__ is None or 'From:' in obj.__doc__:
return
fname = inspect.getsourcefile(obj)
if fname is None:
return
try:
line = inspect.getsourcelines(obj)[-1]
except IOError:
return
obj.__doc__ += '\n\nFrom:%s:%d' % (fname, line)
if isinstance(module, str):
module = sys.modules[module]
for _, obj in inspect.getmembers(module):
if inspect.isbuiltin(obj):
continue
if inspect.isfunction(obj):
_add_fileline(obj)
if inspect.ismethod(obj):
_add_fileline(obj.__func__)
if inspect.isclass(obj) and incursive:
add_fileline_to_docstring(obj, False)
def _as_list(obj):
if isinstance(obj, (list, tuple)):
return obj
else:
return [obj]
_OP_NAME_PREFIX_LIST = ['_contrib_', '_linalg_', '_sparse_', '_image_', '_random_']
def _get_op_name_prefix(op_name):
for prefix in _OP_NAME_PREFIX_LIST:
if op_name.startswith(prefix):
return prefix
return ""
def _init_op_module(root_namespace, module_name, make_op_func):
plist = ctypes.POINTER(ctypes.c_char_p)()
size = ctypes.c_uint()
check_call(_LIB.MXListAllOpNames(ctypes.byref(size),
ctypes.byref(plist)))
op_names = []
for i in range(size.value):
op_name = py_str(plist[i])
if not _is_np_op(op_name):
op_names.append(op_name)
module_op = sys.modules["%s.%s.op" % (root_namespace, module_name)]
module_internal = sys.modules["%s.%s._internal" % (root_namespace, module_name)]
contrib_module_name_old = "%s.contrib.%s" % (root_namespace, module_name)
contrib_module_old = sys.modules[contrib_module_name_old]
submodule_dict = {}
for op_name_prefix in _OP_NAME_PREFIX_LIST:
submodule_dict[op_name_prefix] =\
sys.modules["%s.%s.%s" % (root_namespace, module_name, op_name_prefix[1:-1])]
for name in op_names:
hdl = OpHandle()
check_call(_LIB.NNGetOpHandle(c_str(name), ctypes.byref(hdl)))
op_name_prefix = _get_op_name_prefix(name)
module_name_local = module_name
if len(op_name_prefix) > 0:
if op_name_prefix != '_random_' or name.endswith('_like'):
func_name = name[len(op_name_prefix):]
cur_module = submodule_dict[op_name_prefix]
module_name_local = "%s.%s.%s" % (root_namespace, module_name, op_name_prefix[1:-1])
else:
func_name = name
cur_module = module_internal
elif name.startswith('_'):
func_name = name
cur_module = module_internal
else:
func_name = name
cur_module = module_op
function = make_op_func(hdl, name, func_name)
function.__module__ = module_name_local
setattr(cur_module, function.__name__, function)
cur_module.__all__.append(function.__name__)
if op_name_prefix == '_contrib_':
hdl = OpHandle()
check_call(_LIB.NNGetOpHandle(c_str(name), ctypes.byref(hdl)))
func_name = name[len(op_name_prefix):]
function = make_op_func(hdl, name, func_name)
function.__module__ = contrib_module_name_old
setattr(contrib_module_old, function.__name__, function)
contrib_module_old.__all__.append(function.__name__)
def _generate_op_module_signature(root_namespace, module_name, op_code_gen_func):
def get_module_file(module_name):
path = os.path.dirname(__file__)
module_path = module_name.split('.')
module_path[-1] = 'gen_' + module_path[-1]
file_name = os.path.join(path, '..', *module_path) + '.py'
module_file = open(file_name, 'w', encoding="utf-8")
dependencies = {'symbol': ['from ._internal import SymbolBase',
'from ..base import _Null'],
'ndarray': ['from ._internal import NDArrayBase',
'from ..base import _Null']}
module_file.write('# coding: utf-8')
module_file.write('# File content is auto-generated. Do not modify.' + os.linesep)
module_file.write('# pylint: skip-file' + os.linesep)
module_file.write(os.linesep.join(dependencies[module_name.split('.')[1]]))
return module_file
def write_all_str(module_file, module_all_list):
module_file.write(os.linesep)
module_file.write(os.linesep)
all_str = '__all__ = [' + ', '.join(["'%s'"%s for s in module_all_list]) + ']'
module_file.write(all_str)
plist = ctypes.POINTER(ctypes.c_char_p)()
size = ctypes.c_uint()
check_call(_LIB.MXListAllOpNames(ctypes.byref(size),
ctypes.byref(plist)))
op_names = []
for i in range(size.value):
op_name = py_str(plist[i])
if not _is_np_op(op_name):
op_names.append(op_name)
module_op_file = get_module_file("%s.%s.op" % (root_namespace, module_name))
module_op_all = []
module_internal_file = get_module_file("%s.%s._internal"%(root_namespace, module_name))
module_internal_all = []
submodule_dict = {}
for op_name_prefix in _OP_NAME_PREFIX_LIST:
submodule_dict[op_name_prefix] =\
(get_module_file("%s.%s.%s" % (root_namespace, module_name,
op_name_prefix[1:-1])), [])
for name in op_names:
hdl = OpHandle()
check_call(_LIB.NNGetOpHandle(c_str(name), ctypes.byref(hdl)))
op_name_prefix = _get_op_name_prefix(name)
if len(op_name_prefix) > 0:
func_name = name[len(op_name_prefix):]
cur_module_file, cur_module_all = submodule_dict[op_name_prefix]
elif name.startswith('_'):
func_name = name
cur_module_file = module_internal_file
cur_module_all = module_internal_all
else:
func_name = name
cur_module_file = module_op_file
cur_module_all = module_op_all
code, _ = op_code_gen_func(hdl, name, func_name, True)
cur_module_file.write(os.linesep)
cur_module_file.write(code)
cur_module_all.append(func_name)
for (submodule_f, submodule_all) in submodule_dict.values():
write_all_str(submodule_f, submodule_all)
submodule_f.close()
write_all_str(module_op_file, module_op_all)
module_op_file.close()
write_all_str(module_internal_file, module_internal_all)
module_internal_file.close()
ctypes.pythonapi.PyCapsule_New.restype = ctypes.py_object
ctypes.pythonapi.PyCapsule_GetPointer.restype = ctypes.c_void_p
_NP_OP_PREFIX = '_np_'
_NP_OP_SUBMODULE_LIST = ['_random_', '_linalg_']
_NP_EXT_OP_PREFIX = '_npx_'
_NP_EXT_OP_SUBMODULE_LIST = ['_image_']
_NP_INTERNAL_OP_PREFIX = '_npi_'
def _is_np_op(op_name):
return op_name.startswith(_NP_OP_PREFIX) or op_name.startswith(_NP_EXT_OP_PREFIX)\
or op_name.startswith(_NP_INTERNAL_OP_PREFIX)
def _get_op_submodule_name(op_name, op_name_prefix, submodule_name_list):
assert op_name.startswith(op_name_prefix)
for submodule_name in submodule_name_list:
if op_name[len(op_name_prefix):].startswith(submodule_name):
return submodule_name
return ""
def _init_np_op_module(root_module_name, np_module_name, mx_module_name, make_op_func):
from . import _numpy_op_doc as _np_op_doc
if np_module_name == 'numpy':
op_name_prefix = _NP_OP_PREFIX
submodule_name_list = _NP_OP_SUBMODULE_LIST
elif np_module_name == 'numpy_extension':
op_name_prefix = _NP_EXT_OP_PREFIX
submodule_name_list = _NP_EXT_OP_SUBMODULE_LIST
elif np_module_name == 'numpy._internal':
op_name_prefix = _NP_INTERNAL_OP_PREFIX
submodule_name_list = []
else:
raise ValueError('unsupported np module name {}'.format(np_module_name))
plist = ctypes.POINTER(ctypes.c_char_p)()
size = ctypes.c_uint()
check_call(_LIB.MXListAllOpNames(ctypes.byref(size), ctypes.byref(plist)))
op_names = []
for i in range(size.value):
name = py_str(plist[i])
if name.startswith(op_name_prefix):
op_names.append(name)
if mx_module_name is None:
op_module_name = "%s.%s._op" % (root_module_name, np_module_name)
op_submodule_name = "%s.%s" % (root_module_name, np_module_name)
elif mx_module_name in ('ndarray', 'symbol'):
op_module_name = "%s.%s.%s" % (root_module_name, mx_module_name, np_module_name)
if op_name_prefix != _NP_INTERNAL_OP_PREFIX:
op_module_name += '._op'
op_submodule_name = "%s.%s.%s" % (root_module_name, mx_module_name, np_module_name)
else:
raise ValueError('unsupported mxnet module {}'.format(mx_module_name))
op_submodule_name += '.%s'
op_module = sys.modules[op_module_name]
submodule_dict = {}
for submodule_name in submodule_name_list:
submodule_dict[submodule_name] = sys.modules[op_submodule_name % submodule_name[1:-1]]
for name in op_names:
hdl = OpHandle()
check_call(_LIB.NNGetOpHandle(c_str(name), ctypes.byref(hdl)))
submodule_name = _get_op_submodule_name(name, op_name_prefix, submodule_name_list)
if len(submodule_name) > 0:
func_name = name[(len(op_name_prefix) + len(submodule_name)):]
cur_module = submodule_dict[submodule_name]
module_name_local = op_submodule_name % submodule_name[1:-1]
else:
func_name = name[len(op_name_prefix):]
cur_module = op_module
module_name_local =\
op_module_name[:-len('._op')] if op_module_name.endswith('._op') else op_module_name
function = make_op_func(hdl, name, func_name)
function.__module__ = module_name_local
setattr(cur_module, function.__name__, function)
cur_module.__all__.append(function.__name__)
if hasattr(_np_op_doc, name):
function.__doc__ = getattr(_np_op_doc, name).__doc__
else:
function.__doc__ = re.sub('NDArray', 'ndarray', function.__doc__)
| true
| true
|
1c46e19fe76854e8b0b97098ce1dda2257aca5d4
| 4,533
|
py
|
Python
|
includes/NopSCAD/scripts/c14n_stl.py
|
codysandahl/3dprinting
|
98d588864e5ba5826c7ed16959aa7b1040a760b3
|
[
"MIT"
] | null | null | null |
includes/NopSCAD/scripts/c14n_stl.py
|
codysandahl/3dprinting
|
98d588864e5ba5826c7ed16959aa7b1040a760b3
|
[
"MIT"
] | null | null | null |
includes/NopSCAD/scripts/c14n_stl.py
|
codysandahl/3dprinting
|
98d588864e5ba5826c7ed16959aa7b1040a760b3
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
#
# NopSCADlib Copyright Chris Palmer 2018
# nop.head@gmail.com
# hydraraptor.blogspot.com
#
# This file is part of NopSCADlib.
#
# NopSCADlib is free software: you can redistribute it and/or modify it under the terms of the
# GNU General Public License as published by the Free Software Foundation, either version 3 of
# the License, or (at your option) any later version.
#
# NopSCADlib is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
# without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with NopSCADlib.
# If not, see <https://www.gnu.org/licenses/>.
#
#
#! OpenSCAD produces randomly ordered STL files. This script re-orders them consistently so that GIT can tell if they have changed or not.
#
# OpenSCAD produces randomly ordered STL files so source control like GIT can't tell if they have changed or not.
# This scrip orders each triangle to start with the lowest vertex first (comparing x, then y, then z)
# It then sorts the triangles to start with the one with the lowest vertices first (comparing first vertex, second, then third)
# This has no effect on the model but makes the STL consistent. I.e. it makes a canonical form.
#
from __future__ import print_function
import sys
def cmz(x):
''' Convert "-0" to "0". '''
return '0' if x == '-0' else x
class Vertex:
def __init__(self, x, y, z):
self.x, self.y, self.z = x, y, z
self.key = (float(x), float(y), float(z))
class Normal:
def __init__(self, dx, dy, dz):
self.dx, self.dy, self.dz = dx, dy, dz
class Facet:
def __init__(self, normal, v1, v2, v3):
self.normal = normal
if v1.key < v2.key:
if v1.key < v3.key:
self.vertices = (v1, v2, v3) #v1 is the smallest
else:
self.vertices = (v3, v1, v2) #v3 is the smallest
else:
if v2.key < v3.key:
self.vertices = (v2, v3, v1) #v2 is the smallest
else:
self.vertices = (v3, v1, v2) #v3 is the smallest
def key(self):
return (self.vertices[0].x, self.vertices[0].y, self.vertices[0].z,
self.vertices[1].x, self.vertices[1].y, self.vertices[1].z,
self.vertices[2].x, self.vertices[2].y, self.vertices[2].z)
class STL:
def __init__(self, fname):
self.facets = []
with open(fname) as f:
words = [cmz(s.strip()) for s in f.read().split()]
if words[0] == 'solid' and words[1] == 'OpenSCAD_Model':
i = 2
while words[i] == 'facet':
norm = Normal(words[i + 2], words[i + 3], words[i + 4])
v1 = Vertex(words[i + 8], words[i + 9], words[i + 10])
v2 = Vertex(words[i + 12], words[i + 13], words[i + 14])
v3 = Vertex(words[i + 16], words[i + 17], words[i + 18])
i += 21
self.facets.append(Facet(norm, v1, v2, v3))
self.facets.sort(key = Facet.key)
else:
print("Not an OpenSCAD ascii STL file")
sys.exit(1)
def write(self, fname):
mins = [float('inf'), float('inf'), float('inf')]
maxs = [float('-inf'), float('-inf'), float('-inf')]
with open(fname,"wt") as f:
print('solid OpenSCAD_Model', file=f)
for facet in self.facets:
print(' facet normal %s %s %s' % (facet.normal.dx, facet.normal.dy, facet.normal.dz), file=f)
print(' outer loop', file=f)
for vertex in facet.vertices:
print(' vertex %s %s %s' % (vertex.x, vertex.y, vertex.z), file=f)
for i in range(3):
ordinate = vertex.key[i]
if ordinate > maxs[i]: maxs[i] = ordinate
if ordinate < mins[i]: mins[i] = ordinate
print(' endloop', file=f)
print(' endfacet', file=f)
print('endsolid OpenSCAD_Model', file=f)
return mins, maxs
def canonicalise(fname):
stl = STL(fname)
return stl.write(fname)
if __name__ == '__main__':
if len(sys.argv) == 2:
canonicalise(sys.argv[1])
else:
print("\nusage:\n\t c14n_stl file - Canonicalise an STL file created by OpenSCAD.")
sys.exit(1)
| 38.415254
| 138
| 0.578425
|
# This scrip orders each triangle to start with the lowest vertex first (comparing x, then y, then z)
# It then sorts the triangles to start with the one with the lowest vertices first (comparing first vertex, second, then third)
# This has no effect on the model but makes the STL consistent. I.e. it makes a canonical form.
#
from __future__ import print_function
import sys
def cmz(x):
return '0' if x == '-0' else x
class Vertex:
def __init__(self, x, y, z):
self.x, self.y, self.z = x, y, z
self.key = (float(x), float(y), float(z))
class Normal:
def __init__(self, dx, dy, dz):
self.dx, self.dy, self.dz = dx, dy, dz
class Facet:
def __init__(self, normal, v1, v2, v3):
self.normal = normal
if v1.key < v2.key:
if v1.key < v3.key:
self.vertices = (v1, v2, v3) #v1 is the smallest
else:
self.vertices = (v3, v1, v2) #v3 is the smallest
else:
if v2.key < v3.key:
self.vertices = (v2, v3, v1) #v2 is the smallest
else:
self.vertices = (v3, v1, v2) #v3 is the smallest
def key(self):
return (self.vertices[0].x, self.vertices[0].y, self.vertices[0].z,
self.vertices[1].x, self.vertices[1].y, self.vertices[1].z,
self.vertices[2].x, self.vertices[2].y, self.vertices[2].z)
class STL:
def __init__(self, fname):
self.facets = []
with open(fname) as f:
words = [cmz(s.strip()) for s in f.read().split()]
if words[0] == 'solid' and words[1] == 'OpenSCAD_Model':
i = 2
while words[i] == 'facet':
norm = Normal(words[i + 2], words[i + 3], words[i + 4])
v1 = Vertex(words[i + 8], words[i + 9], words[i + 10])
v2 = Vertex(words[i + 12], words[i + 13], words[i + 14])
v3 = Vertex(words[i + 16], words[i + 17], words[i + 18])
i += 21
self.facets.append(Facet(norm, v1, v2, v3))
self.facets.sort(key = Facet.key)
else:
print("Not an OpenSCAD ascii STL file")
sys.exit(1)
def write(self, fname):
mins = [float('inf'), float('inf'), float('inf')]
maxs = [float('-inf'), float('-inf'), float('-inf')]
with open(fname,"wt") as f:
print('solid OpenSCAD_Model', file=f)
for facet in self.facets:
print(' facet normal %s %s %s' % (facet.normal.dx, facet.normal.dy, facet.normal.dz), file=f)
print(' outer loop', file=f)
for vertex in facet.vertices:
print(' vertex %s %s %s' % (vertex.x, vertex.y, vertex.z), file=f)
for i in range(3):
ordinate = vertex.key[i]
if ordinate > maxs[i]: maxs[i] = ordinate
if ordinate < mins[i]: mins[i] = ordinate
print(' endloop', file=f)
print(' endfacet', file=f)
print('endsolid OpenSCAD_Model', file=f)
return mins, maxs
def canonicalise(fname):
stl = STL(fname)
return stl.write(fname)
if __name__ == '__main__':
if len(sys.argv) == 2:
canonicalise(sys.argv[1])
else:
print("\nusage:\n\t c14n_stl file - Canonicalise an STL file created by OpenSCAD.")
sys.exit(1)
| true
| true
|
1c46e1ea720a2cd127402c538d9c90de250108a5
| 3,717
|
py
|
Python
|
twisted/internet/test/test_time.py
|
hawkowl/twisted
|
c413aac3888dea2202c0dc26f978d7f88b4b837a
|
[
"Unlicense",
"MIT"
] | 9,953
|
2019-04-03T23:41:04.000Z
|
2022-03-31T11:54:44.000Z
|
stackoverflow/venv/lib/python3.6/site-packages/twisted/internet/test/test_time.py
|
W4LKURE/learn_python3_spider
|
98dd354a41598b31302641f9a0ea49d1ecfa0fb1
|
[
"MIT"
] | 44
|
2019-05-27T10:59:29.000Z
|
2022-03-31T14:14:29.000Z
|
stackoverflow/venv/lib/python3.6/site-packages/twisted/internet/test/test_time.py
|
W4LKURE/learn_python3_spider
|
98dd354a41598b31302641f9a0ea49d1ecfa0fb1
|
[
"MIT"
] | 2,803
|
2019-04-06T13:15:33.000Z
|
2022-03-31T07:42:01.000Z
|
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for implementations of L{IReactorTime}.
"""
__metaclass__ = type
from twisted.python.log import msg
from twisted.python.runtime import platform
from twisted.trial.unittest import SkipTest
from twisted.internet.test.reactormixins import ReactorBuilder
from twisted.internet.interfaces import IReactorTime, IReactorThreads
class TimeTestsBuilder(ReactorBuilder):
"""
Builder for defining tests relating to L{IReactorTime}.
"""
requiredInterfaces = (IReactorTime,)
def test_delayedCallStopsReactor(self):
"""
The reactor can be stopped by a delayed call.
"""
reactor = self.buildReactor()
reactor.callLater(0, reactor.stop)
reactor.run()
def test_distantDelayedCall(self):
"""
Scheduling a delayed call at a point in the extreme future does not
prevent normal reactor operation.
"""
reactor = self.buildReactor()
if IReactorThreads.providedBy(reactor):
def eventSource(reactor, event):
msg(format="Thread-based event-source scheduling %(event)r",
event=event)
reactor.callFromThread(event)
else:
raise SkipTest("Do not know how to synthesize non-time event to "
"stop the test")
# Pick a pretty big delay.
delayedCall = reactor.callLater(2 ** 128 + 1, lambda: None)
def stop():
msg("Stopping the reactor")
reactor.stop()
# Use repeated invocation of the event source to set up the call to stop
# the reactor. This makes it more likely at least one normal iteration
# will take place with the delayed call in place before the slightly
# different reactor shutdown logic alters things.
eventSource(reactor, lambda: eventSource(reactor, stop))
# Run the reactor directly, without a timeout. A timeout would
# interfere with the purpose of this test, which is to have the timeout
# passed to the reactor's doIterate implementation (potentially) be
# very, very large. Hopefully the event source defined above will work
# and cause the reactor to stop.
reactor.run()
# The reactor almost surely stopped before the delayed call
# fired... right?
self.assertTrue(delayedCall.active())
self.assertIn(delayedCall, reactor.getDelayedCalls())
class GlibTimeTestsBuilder(ReactorBuilder):
"""
Builder for defining tests relating to L{IReactorTime} for reactors based
off glib.
"""
requiredInterfaces = (IReactorTime,)
if platform.isWindows():
_reactors = ["twisted.internet.gtk2reactor.PortableGtkReactor"]
else:
_reactors = ["twisted.internet.glib2reactor.Glib2Reactor",
"twisted.internet.gtk2reactor.Gtk2Reactor"]
def test_timeout_add(self):
"""
A
L{reactor.callLater<twisted.internet.interfaces.IReactorTime.callLater>}
call scheduled from a C{gobject.timeout_add}
call is run on time.
"""
import gobject
reactor = self.buildReactor()
result = []
def gschedule():
reactor.callLater(0, callback)
return 0
def callback():
result.append(True)
reactor.stop()
reactor.callWhenRunning(gobject.timeout_add, 10, gschedule)
self.runReactor(reactor, 5)
self.assertEqual(result, [True])
globals().update(TimeTestsBuilder.makeTestCaseClasses())
globals().update(GlibTimeTestsBuilder.makeTestCaseClasses())
| 32.893805
| 80
| 0.651601
|
__metaclass__ = type
from twisted.python.log import msg
from twisted.python.runtime import platform
from twisted.trial.unittest import SkipTest
from twisted.internet.test.reactormixins import ReactorBuilder
from twisted.internet.interfaces import IReactorTime, IReactorThreads
class TimeTestsBuilder(ReactorBuilder):
requiredInterfaces = (IReactorTime,)
def test_delayedCallStopsReactor(self):
reactor = self.buildReactor()
reactor.callLater(0, reactor.stop)
reactor.run()
def test_distantDelayedCall(self):
reactor = self.buildReactor()
if IReactorThreads.providedBy(reactor):
def eventSource(reactor, event):
msg(format="Thread-based event-source scheduling %(event)r",
event=event)
reactor.callFromThread(event)
else:
raise SkipTest("Do not know how to synthesize non-time event to "
"stop the test")
delayedCall = reactor.callLater(2 ** 128 + 1, lambda: None)
def stop():
msg("Stopping the reactor")
reactor.stop()
eventSource(reactor, lambda: eventSource(reactor, stop))
# very, very large. Hopefully the event source defined above will work
# and cause the reactor to stop.
reactor.run()
# The reactor almost surely stopped before the delayed call
# fired... right?
self.assertTrue(delayedCall.active())
self.assertIn(delayedCall, reactor.getDelayedCalls())
class GlibTimeTestsBuilder(ReactorBuilder):
requiredInterfaces = (IReactorTime,)
if platform.isWindows():
_reactors = ["twisted.internet.gtk2reactor.PortableGtkReactor"]
else:
_reactors = ["twisted.internet.glib2reactor.Glib2Reactor",
"twisted.internet.gtk2reactor.Gtk2Reactor"]
def test_timeout_add(self):
import gobject
reactor = self.buildReactor()
result = []
def gschedule():
reactor.callLater(0, callback)
return 0
def callback():
result.append(True)
reactor.stop()
reactor.callWhenRunning(gobject.timeout_add, 10, gschedule)
self.runReactor(reactor, 5)
self.assertEqual(result, [True])
globals().update(TimeTestsBuilder.makeTestCaseClasses())
globals().update(GlibTimeTestsBuilder.makeTestCaseClasses())
| true
| true
|
1c46e2c4714a5d7a0da9a84287a162ed818906e7
| 3,529
|
py
|
Python
|
backend/schedule_worker/utils/generate_graph.py
|
evemorgen/GdzieJestMojTramwajProject
|
65a090ae4222053a2a0a1b145df5196f3658065c
|
[
"MIT"
] | null | null | null |
backend/schedule_worker/utils/generate_graph.py
|
evemorgen/GdzieJestMojTramwajProject
|
65a090ae4222053a2a0a1b145df5196f3658065c
|
[
"MIT"
] | null | null | null |
backend/schedule_worker/utils/generate_graph.py
|
evemorgen/GdzieJestMojTramwajProject
|
65a090ae4222053a2a0a1b145df5196f3658065c
|
[
"MIT"
] | null | null | null |
import os
import logging
import networkx as nx
import matplotlib.pyplot as plt
import json
from geopy.distance import vincenty
from collections import deque
from db import MpkDb as DbApi
from utils import Config
def czy_skrzyzowanie(przystanek, skrzyzowania, wariant, punkty):
for skrzyzowanie in skrzyzowania:
if przystanek in punkty[skrzyzowanie]['between'] and wariant[1][wariant[1].index(przystanek) + 1] in punkty[skrzyzowanie]['between']:
return skrzyzowanie
return None
def generate_graph():
config = Config()
dbapi = DbApi()
#test = Przystanki()
linie = [str(linia) for linia in config['lines']]
#logging.info(test.petle)
dokladne_linie = {klucz: [] for klucz in linie}
for linia in linie:
warianty = dbapi.get_variants_for_line(linia)
for wariant in warianty:
przystanki = dbapi.get_stops_for_variant(wariant)
tupla = tuple([wariant, przystanki])
dokladne_linie[linia].append(tupla)
with open(os.environ['TRAM_ROOT'] + '/data/przystanki_0_159.json', 'r') as plik:
punkty = json.load(plik)
ogarniete = {klucz: (float(punkty[klucz]['y']) * (10**6), float(punkty[klucz]['x']) * (10**6)) for klucz in punkty}
petle = {k: v for k, v in ogarniete.items() if punkty[k]['petla'] is True}
skrzyzowania = {k: v for k, v in ogarniete.items() if punkty[k]['skrzyzowanie'] is True}
przystanki = {k: v for k, v in ogarniete.items() if punkty[k]['przystanek'] is True}
G = nx.Graph()
G.add_nodes_from(ogarniete.keys())
for n, p in ogarniete.items():
G.node[n]['pos'] = p
pos = nx.get_node_attributes(G, 'pos')
offset = {}
for k, v in pos.items():
offset[k] = (v[0], v[1] - 500)
plt.figure(3, figsize=(80, 80))
nx.draw_networkx_nodes(G, pos, nodelist=przystanki, node_color='b', node_size=150)
nx.draw_networkx_nodes(G, pos, nodelist=skrzyzowania, node_color='g', node_size=100)
nx.draw_networkx_nodes(G, pos, nodelist=petle, node_color='r', node_size=200)
nx.draw_networkx_labels(G, offset, font_size=12, font_family=('ubuntu', 'arial'))
edges = {}
for linia in linie:
for wariant in dokladne_linie[linia]:
for przystanek in wariant[1][:-1]:
ze_skrzyzowaniem = czy_skrzyzowanie(przystanek, skrzyzowania, wariant, punkty)
if ze_skrzyzowaniem is not None:
kraw1 = tuple([przystanek, ze_skrzyzowaniem])
if kraw1 in edges:
edges[kraw1].append(linia)
else:
edges[kraw1] = [linia]
else:
kraw = tuple([przystanek, wariant[1][wariant[1].index(przystanek) + 1]])
if kraw in edges:
edges[kraw].append(linia)
else:
edges[kraw] = [linia]
for edge, label in edges.items():
first = (punkty[edge[0]]['x'], punkty[edge[0]]['y'])
second = (punkty[edge[1]]['x'], punkty[edge[1]]['y'])
logging.info('%s - %s: %s', edge[0], edge[1], vincenty(first, second).meters)
G.add_edge(edge[0], edge[1], linie=label, kolejka_L=deque(), kolejka_R=deque(), odleglosc=vincenty(first, second).meters)
nx.draw_networkx_edges(G, pos)
# nx.draw_networkx_edge_labels(G, pos)
plt.savefig(os.environ['TRAM_ROOT'] + '/data/graph.png', format='png', dpi=75)
nx.write_yaml(G, os.environ['TRAM_ROOT'] + '/data/graph.yaml')
| 39.651685
| 141
| 0.614338
|
import os
import logging
import networkx as nx
import matplotlib.pyplot as plt
import json
from geopy.distance import vincenty
from collections import deque
from db import MpkDb as DbApi
from utils import Config
def czy_skrzyzowanie(przystanek, skrzyzowania, wariant, punkty):
for skrzyzowanie in skrzyzowania:
if przystanek in punkty[skrzyzowanie]['between'] and wariant[1][wariant[1].index(przystanek) + 1] in punkty[skrzyzowanie]['between']:
return skrzyzowanie
return None
def generate_graph():
config = Config()
dbapi = DbApi()
linie = [str(linia) for linia in config['lines']]
dokladne_linie = {klucz: [] for klucz in linie}
for linia in linie:
warianty = dbapi.get_variants_for_line(linia)
for wariant in warianty:
przystanki = dbapi.get_stops_for_variant(wariant)
tupla = tuple([wariant, przystanki])
dokladne_linie[linia].append(tupla)
with open(os.environ['TRAM_ROOT'] + '/data/przystanki_0_159.json', 'r') as plik:
punkty = json.load(plik)
ogarniete = {klucz: (float(punkty[klucz]['y']) * (10**6), float(punkty[klucz]['x']) * (10**6)) for klucz in punkty}
petle = {k: v for k, v in ogarniete.items() if punkty[k]['petla'] is True}
skrzyzowania = {k: v for k, v in ogarniete.items() if punkty[k]['skrzyzowanie'] is True}
przystanki = {k: v for k, v in ogarniete.items() if punkty[k]['przystanek'] is True}
G = nx.Graph()
G.add_nodes_from(ogarniete.keys())
for n, p in ogarniete.items():
G.node[n]['pos'] = p
pos = nx.get_node_attributes(G, 'pos')
offset = {}
for k, v in pos.items():
offset[k] = (v[0], v[1] - 500)
plt.figure(3, figsize=(80, 80))
nx.draw_networkx_nodes(G, pos, nodelist=przystanki, node_color='b', node_size=150)
nx.draw_networkx_nodes(G, pos, nodelist=skrzyzowania, node_color='g', node_size=100)
nx.draw_networkx_nodes(G, pos, nodelist=petle, node_color='r', node_size=200)
nx.draw_networkx_labels(G, offset, font_size=12, font_family=('ubuntu', 'arial'))
edges = {}
for linia in linie:
for wariant in dokladne_linie[linia]:
for przystanek in wariant[1][:-1]:
ze_skrzyzowaniem = czy_skrzyzowanie(przystanek, skrzyzowania, wariant, punkty)
if ze_skrzyzowaniem is not None:
kraw1 = tuple([przystanek, ze_skrzyzowaniem])
if kraw1 in edges:
edges[kraw1].append(linia)
else:
edges[kraw1] = [linia]
else:
kraw = tuple([przystanek, wariant[1][wariant[1].index(przystanek) + 1]])
if kraw in edges:
edges[kraw].append(linia)
else:
edges[kraw] = [linia]
for edge, label in edges.items():
first = (punkty[edge[0]]['x'], punkty[edge[0]]['y'])
second = (punkty[edge[1]]['x'], punkty[edge[1]]['y'])
logging.info('%s - %s: %s', edge[0], edge[1], vincenty(first, second).meters)
G.add_edge(edge[0], edge[1], linie=label, kolejka_L=deque(), kolejka_R=deque(), odleglosc=vincenty(first, second).meters)
nx.draw_networkx_edges(G, pos)
plt.savefig(os.environ['TRAM_ROOT'] + '/data/graph.png', format='png', dpi=75)
nx.write_yaml(G, os.environ['TRAM_ROOT'] + '/data/graph.yaml')
| true
| true
|
1c46e31c48f99fa5dabe9c956cd41ecd0c86bcaf
| 5,840
|
py
|
Python
|
src/retrieval_core/models/modules/da.py
|
RImbriaco/OML
|
4998cdebc3ac553ccd53b4caacf24d8c3d8fc07b
|
[
"MIT"
] | 2
|
2021-09-08T12:33:05.000Z
|
2021-09-14T09:40:43.000Z
|
src/retrieval_core/models/modules/da.py
|
RImbriaco/OML
|
4998cdebc3ac553ccd53b4caacf24d8c3d8fc07b
|
[
"MIT"
] | null | null | null |
src/retrieval_core/models/modules/da.py
|
RImbriaco/OML
|
4998cdebc3ac553ccd53b4caacf24d8c3d8fc07b
|
[
"MIT"
] | 1
|
2021-09-08T12:35:10.000Z
|
2021-09-08T12:35:10.000Z
|
import torch
from torch import nn
"""
Attention module as implemented in "Dual Attention Network for Scene
Segmentation" https://arxiv.org/abs/1809.02983
"""
class ActivatedBatchNorm(nn.Module):
def __init__(self, num_features, activation='relu', **kwargs):
"""
Pre-activates tensor with activation function before applying batch norm.
See following link for details. Leads to better performance.
https://github.com/ducha-aiki/caffenet-benchmark/blob/master/batchnorm.md
:param num_features: number of incoming feature maps
:param activation: activation type
:param kwargs: key word arguments pertaining to BatchNorm
"""
super().__init__()
activation_map = {
'relu': nn.ReLU,
'leaky_relu': nn.LeakyReLU,
'elu': nn.ELU,
}
if activation not in activation_map:
self.act = None
else:
self.act = activation_map[activation](inplace=True)
self.bn = nn.BatchNorm2d(num_features, **kwargs)
def forward(self, x):
if self.act is not None:
x = self.act(x)
x = self.bn(x)
return x
class Conv1x1(nn.Module):
def __init__(self, in_dim, out_dim):
super(Conv1x1, self).__init__()
self.conv1x1 = nn.Conv2d(
in_channels=in_dim, out_channels=out_dim, kernel_size=1)
def forward(self, x):
return self.conv1x1(x)
class Conv3x3(nn.Module):
def __init__(self, in_dim, out_dim, kernel_size=3, padding=1):
"""
Conv 3x3
:param in_dim: input channels
:param out_dim: output_channels
:param kernel_size:
:param padding:
"""
super().__init__()
self.conv = nn.Conv2d(
in_dim, out_dim, kernel_size=kernel_size, padding=padding)
def forward(self, x):
x = self.conv(x)
return x
class ConvPreAct(nn.Module):
def __init__(self, in_dim, out_dim, kernel_size=3, padding=1):
"""
Conv 3x3 -> activation -> BatchNorm
:param in_dim: input channels
:param out_dim: output_channels
:param kernel_size:
:param padding:
"""
super().__init__()
self.conv = Conv3x3(in_dim, out_dim, kernel_size, padding)
self.act = ActivatedBatchNorm(out_dim)
def forward(self, x):
x = self.conv(x)
x = self.act(x)
return x
# Both PAModule & CAModule are taken from Dual attention network as per,
# https://github.com/junfu1115/DANet/blob/master/encoding/nn/attention.py
# See https://arxiv.org/pdf/1809.02983.pdf
class PAModule(nn.Module):
def __init__(self, in_dim):
"""
input feature maps( B X C X H X W)
Position attention module
Here, the generated attention map is based on the shape of the spatial
dimensions B x (H x W) x (H x W)
"""
super(PAModule, self).__init__()
self.in_dim = in_dim
self.query_conv = Conv1x1(self.in_dim, self.in_dim // 8)
self.key_conv = Conv1x1(self.in_dim, self.in_dim // 8)
self.value_conv = Conv1x1(self.in_dim, self.in_dim)
self.gamma = nn.Parameter(torch.zeros(1))
self.softmax = nn.Softmax(dim=-1)
def forward(self, x):
m_batchsize, channels, height, width = x.size()
proj_query = self.query_conv(x).view(
m_batchsize, -1, width*height).permute(0, 2, 1)
proj_key = self.key_conv(x).view(m_batchsize, -1, width*height)
energy = torch.bmm(proj_query, proj_key)
attention = self.softmax(energy)
proj_value = self.value_conv(x).view(m_batchsize, -1, width*height)
out = torch.bmm(proj_value, attention.permute(0, 2, 1))
out = out.view(m_batchsize, channels, height, width)
out = self.gamma*out + x
return out
class CAModule(nn.Module):
"""
input feature maps( B X C X H X W)
Channel attention module
Here, the generated attention map is based on the shape of the channel
dimensions B x (C x C)
"""
def __init__(self, in_dim):
super(CAModule, self).__init__()
self.chanel_in = in_dim
self.gamma = nn.Parameter(torch.zeros(1))
self.softmax = nn.Softmax(dim=-1)
def forward(self, x):
m_batchsize, C, height, width = x.size()
proj_query = x.view(m_batchsize, C, -1)
proj_key = x.view(m_batchsize, C, -1).permute(0, 2, 1)
energy = torch.bmm(proj_query, proj_key)
energy_new = torch.max(energy, -1, keepdim=True)[0].expand_as(energy) - energy
attention = self.softmax(energy_new)
proj_value = x.view(m_batchsize, C, -1)
out = torch.bmm(attention, proj_value)
out = out.view(m_batchsize, C, height, width)
out = self.gamma*out + x
return out
class DAModule(nn.Module):
def __init__(self, in_dim):
"""
Dual attention module from https://arxiv.org/pdf/1809.02983.pdf
Features from CAM and PAM are summed
:param in_dim:input dimensions
"""
super(DAModule, self).__init__()
inter_dim = in_dim // 4
self.conv_pam1 = ConvPreAct(in_dim, inter_dim)
self.pam = PAModule(inter_dim)
self.conv_pam2 = ConvPreAct(inter_dim, inter_dim)
self.conv_cam1 = ConvPreAct(in_dim, inter_dim)
self.cam = CAModule(inter_dim)
self.conv_cam2 = ConvPreAct(inter_dim, inter_dim)
self.conv = ConvPreAct(inter_dim, in_dim)
self.out_dim = in_dim
def forward(self, x):
p = self.conv_pam1(x)
p = self.pam(p)
p = self.conv_pam2(p)
c = self.conv_cam1(x)
c = self.cam(c)
c = self.conv_cam2(c)
feat = p + c
feat = self.conv(feat)
return feat
| 30.899471
| 86
| 0.610445
|
import torch
from torch import nn
class ActivatedBatchNorm(nn.Module):
def __init__(self, num_features, activation='relu', **kwargs):
super().__init__()
activation_map = {
'relu': nn.ReLU,
'leaky_relu': nn.LeakyReLU,
'elu': nn.ELU,
}
if activation not in activation_map:
self.act = None
else:
self.act = activation_map[activation](inplace=True)
self.bn = nn.BatchNorm2d(num_features, **kwargs)
def forward(self, x):
if self.act is not None:
x = self.act(x)
x = self.bn(x)
return x
class Conv1x1(nn.Module):
def __init__(self, in_dim, out_dim):
super(Conv1x1, self).__init__()
self.conv1x1 = nn.Conv2d(
in_channels=in_dim, out_channels=out_dim, kernel_size=1)
def forward(self, x):
return self.conv1x1(x)
class Conv3x3(nn.Module):
def __init__(self, in_dim, out_dim, kernel_size=3, padding=1):
super().__init__()
self.conv = nn.Conv2d(
in_dim, out_dim, kernel_size=kernel_size, padding=padding)
def forward(self, x):
x = self.conv(x)
return x
class ConvPreAct(nn.Module):
def __init__(self, in_dim, out_dim, kernel_size=3, padding=1):
super().__init__()
self.conv = Conv3x3(in_dim, out_dim, kernel_size, padding)
self.act = ActivatedBatchNorm(out_dim)
def forward(self, x):
x = self.conv(x)
x = self.act(x)
return x
class PAModule(nn.Module):
def __init__(self, in_dim):
super(PAModule, self).__init__()
self.in_dim = in_dim
self.query_conv = Conv1x1(self.in_dim, self.in_dim // 8)
self.key_conv = Conv1x1(self.in_dim, self.in_dim // 8)
self.value_conv = Conv1x1(self.in_dim, self.in_dim)
self.gamma = nn.Parameter(torch.zeros(1))
self.softmax = nn.Softmax(dim=-1)
def forward(self, x):
m_batchsize, channels, height, width = x.size()
proj_query = self.query_conv(x).view(
m_batchsize, -1, width*height).permute(0, 2, 1)
proj_key = self.key_conv(x).view(m_batchsize, -1, width*height)
energy = torch.bmm(proj_query, proj_key)
attention = self.softmax(energy)
proj_value = self.value_conv(x).view(m_batchsize, -1, width*height)
out = torch.bmm(proj_value, attention.permute(0, 2, 1))
out = out.view(m_batchsize, channels, height, width)
out = self.gamma*out + x
return out
class CAModule(nn.Module):
def __init__(self, in_dim):
super(CAModule, self).__init__()
self.chanel_in = in_dim
self.gamma = nn.Parameter(torch.zeros(1))
self.softmax = nn.Softmax(dim=-1)
def forward(self, x):
m_batchsize, C, height, width = x.size()
proj_query = x.view(m_batchsize, C, -1)
proj_key = x.view(m_batchsize, C, -1).permute(0, 2, 1)
energy = torch.bmm(proj_query, proj_key)
energy_new = torch.max(energy, -1, keepdim=True)[0].expand_as(energy) - energy
attention = self.softmax(energy_new)
proj_value = x.view(m_batchsize, C, -1)
out = torch.bmm(attention, proj_value)
out = out.view(m_batchsize, C, height, width)
out = self.gamma*out + x
return out
class DAModule(nn.Module):
def __init__(self, in_dim):
super(DAModule, self).__init__()
inter_dim = in_dim // 4
self.conv_pam1 = ConvPreAct(in_dim, inter_dim)
self.pam = PAModule(inter_dim)
self.conv_pam2 = ConvPreAct(inter_dim, inter_dim)
self.conv_cam1 = ConvPreAct(in_dim, inter_dim)
self.cam = CAModule(inter_dim)
self.conv_cam2 = ConvPreAct(inter_dim, inter_dim)
self.conv = ConvPreAct(inter_dim, in_dim)
self.out_dim = in_dim
def forward(self, x):
p = self.conv_pam1(x)
p = self.pam(p)
p = self.conv_pam2(p)
c = self.conv_cam1(x)
c = self.cam(c)
c = self.conv_cam2(c)
feat = p + c
feat = self.conv(feat)
return feat
| true
| true
|
1c46e32070cf0c01bff98632cd40042af2562b9c
| 22,730
|
py
|
Python
|
plugins/sqlfluff-templater-dbt/sqlfluff_templater_dbt/templater.py
|
fdw/sqlfluff
|
e49c974e3fc886a28b358b59442d9471e6f6e89d
|
[
"MIT"
] | null | null | null |
plugins/sqlfluff-templater-dbt/sqlfluff_templater_dbt/templater.py
|
fdw/sqlfluff
|
e49c974e3fc886a28b358b59442d9471e6f6e89d
|
[
"MIT"
] | null | null | null |
plugins/sqlfluff-templater-dbt/sqlfluff_templater_dbt/templater.py
|
fdw/sqlfluff
|
e49c974e3fc886a28b358b59442d9471e6f6e89d
|
[
"MIT"
] | null | null | null |
"""Defines the templaters."""
from collections import deque
from contextlib import contextmanager
import os
import os.path
import logging
from typing import List, Optional, Iterator, Tuple, Any, Dict, Deque
from dataclasses import dataclass
from functools import partial
from dbt.version import get_installed_version
from dbt.config.runtime import RuntimeConfig as DbtRuntimeConfig
from dbt.adapters.factory import register_adapter, get_adapter
from dbt.compilation import Compiler as DbtCompiler
from dbt.exceptions import (
CompilationException as DbtCompilationException,
FailedToConnectException as DbtFailedToConnectException,
)
from dbt import flags
from jinja2 import Environment
from jinja2_simple_tags import StandaloneTag
from sqlfluff.core.cached_property import cached_property
from sqlfluff.core.errors import SQLTemplaterError, SQLTemplaterSkipFile
from sqlfluff.core.templaters.base import (
RawFileSlice,
TemplatedFile,
TemplatedFileSlice,
)
from sqlfluff.core.templaters.slicers.heuristic import slice_template
from sqlfluff.core.templaters.jinja import JinjaTemplater
# Instantiate the templater logger
templater_logger = logging.getLogger("sqlfluff.templater")
DBT_VERSION = get_installed_version()
DBT_VERSION_STRING = DBT_VERSION.to_version_string()
DBT_VERSION_TUPLE = (int(DBT_VERSION.major), int(DBT_VERSION.minor))
if DBT_VERSION_TUPLE >= (1, 0):
from dbt.flags import PROFILES_DIR
else:
from dbt.config.profile import PROFILES_DIR
@dataclass
class DbtConfigArgs:
"""Arguments to load dbt runtime config."""
project_dir: Optional[str] = None
profiles_dir: Optional[str] = None
profile: Optional[str] = None
target: Optional[str] = None
single_threaded: bool = False
class DbtTemplater(JinjaTemplater):
"""A templater using dbt."""
name = "dbt"
sequential_fail_limit = 3
def __init__(self, **kwargs):
self.sqlfluff_config = None
self.formatter = None
self.project_dir = None
self.profiles_dir = None
self.working_dir = os.getcwd()
self._sequential_fails = 0
super().__init__(**kwargs)
def config_pairs(self): # pragma: no cover TODO?
"""Returns info about the given templater for output by the cli."""
return [("templater", self.name), ("dbt", self.dbt_version)]
@property
def dbt_version(self):
"""Gets the dbt version."""
return DBT_VERSION_STRING
@property
def dbt_version_tuple(self):
"""Gets the dbt version as a tuple on (major, minor)."""
return DBT_VERSION_TUPLE
@cached_property
def dbt_config(self):
"""Loads the dbt config."""
if self.dbt_version_tuple >= (1, 0):
flags.set_from_args(
"",
DbtConfigArgs(
project_dir=self.project_dir,
profiles_dir=self.profiles_dir,
profile=self._get_profile(),
target=self._get_target(),
),
)
self.dbt_config = DbtRuntimeConfig.from_args(
DbtConfigArgs(
project_dir=self.project_dir,
profiles_dir=self.profiles_dir,
profile=self._get_profile(),
target=self._get_target(),
)
)
register_adapter(self.dbt_config)
return self.dbt_config
@cached_property
def dbt_compiler(self):
"""Loads the dbt compiler."""
self.dbt_compiler = DbtCompiler(self.dbt_config)
return self.dbt_compiler
@cached_property
def dbt_manifest(self):
"""Loads the dbt manifest."""
# Identity function used for macro hooks
def identity(x):
return x
# Set dbt not to run tracking. We don't load
# a dull project and so some tracking routines
# may fail.
from dbt.tracking import do_not_track
do_not_track()
if self.dbt_version_tuple <= (0, 19):
if self.dbt_version_tuple == (0, 17): # pragma: no cover TODO?
# dbt version 0.17.*
from dbt.parser.manifest import (
load_internal_manifest as load_macro_manifest,
)
else:
# dbt version 0.18.* & # 0.19.*
from dbt.parser.manifest import load_macro_manifest
load_macro_manifest = partial(load_macro_manifest, macro_hook=identity)
from dbt.parser.manifest import load_manifest
dbt_macros_manifest = load_macro_manifest(self.dbt_config)
self.dbt_manifest = load_manifest(
self.dbt_config, dbt_macros_manifest, macro_hook=identity
)
else:
# dbt 0.20.* and onward
from dbt.parser.manifest import ManifestLoader
projects = self.dbt_config.load_dependencies()
loader = ManifestLoader(self.dbt_config, projects, macro_hook=identity)
self.dbt_manifest = loader.load()
return self.dbt_manifest
@cached_property
def dbt_selector_method(self):
"""Loads the dbt selector method."""
if self.formatter: # pragma: no cover TODO?
self.formatter.dispatch_compilation_header(
"dbt templater", "Compiling dbt project..."
)
if self.dbt_version_tuple == (0, 17): # pragma: no cover TODO?
from dbt.graph.selector import PathSelector
self.dbt_selector_method = PathSelector(self.dbt_manifest)
else:
from dbt.graph.selector_methods import (
MethodManager as DbtSelectorMethodManager,
MethodName as DbtMethodName,
)
selector_methods_manager = DbtSelectorMethodManager(
self.dbt_manifest, previous_state=None
)
self.dbt_selector_method = selector_methods_manager.get_method(
DbtMethodName.Path, method_arguments=[]
)
if self.formatter: # pragma: no cover TODO?
self.formatter.dispatch_compilation_header(
"dbt templater", "Project Compiled."
)
return self.dbt_selector_method
def _get_profiles_dir(self):
"""Get the dbt profiles directory from the configuration.
The default is `~/.dbt` in 0.17 but we use the
PROFILES_DIR variable from the dbt library to
support a change of default in the future, as well
as to support the same overwriting mechanism as
dbt (currently an environment variable).
"""
dbt_profiles_dir = os.path.abspath(
os.path.expanduser(
self.sqlfluff_config.get_section(
(self.templater_selector, self.name, "profiles_dir")
)
or PROFILES_DIR
)
)
if not os.path.exists(dbt_profiles_dir):
templater_logger.error(
f"dbt_profiles_dir: {dbt_profiles_dir} could not be accessed. Check it exists."
)
return dbt_profiles_dir
def _get_project_dir(self):
"""Get the dbt project directory from the configuration.
Defaults to the working directory.
"""
dbt_project_dir = os.path.abspath(
os.path.expanduser(
self.sqlfluff_config.get_section(
(self.templater_selector, self.name, "project_dir")
)
or os.getcwd()
)
)
if not os.path.exists(dbt_project_dir):
templater_logger.error(
f"dbt_project_dir: {dbt_project_dir} could not be accessed. Check it exists."
)
return dbt_project_dir
def _get_profile(self):
"""Get a dbt profile name from the configuration."""
return self.sqlfluff_config.get_section(
(self.templater_selector, self.name, "profile")
)
def _get_target(self):
"""Get a dbt target name from the configuration."""
return self.sqlfluff_config.get_section(
(self.templater_selector, self.name, "target")
)
def sequence_files(
self, fnames: List[str], config=None, formatter=None
) -> Iterator[str]:
"""Reorder fnames to process dependent files first.
This avoids errors when an ephemeral model is processed before use.
"""
if formatter: # pragma: no cover
formatter.dispatch_compilation_header("dbt templater", "Sorting Nodes...")
# Initialise config if not already done
self.sqlfluff_config = config
if not self.project_dir:
self.project_dir = self._get_project_dir()
if not self.profiles_dir:
self.profiles_dir = self._get_profiles_dir()
# Populate full paths for selected files
full_paths: Dict[str, str] = {}
selected_files = set()
for fname in fnames:
fpath = os.path.join(self.working_dir, fname)
full_paths[fpath] = fname
selected_files.add(fpath)
ephemeral_nodes: Dict[str, Tuple[str, Any]] = {}
# Extract the ephemeral models
for key, node in self.dbt_manifest.nodes.items():
if node.config.materialized == "ephemeral":
# The key is the full filepath.
# The value tuple, with the filepath and a list of dependent keys
ephemeral_nodes[key] = (
os.path.join(self.project_dir, node.original_file_path),
node.depends_on.nodes,
)
# Yield ephemeral nodes first. We use a Deque for efficient requeing.
# We iterate through the deque, yielding any nodes without dependents,
# or where those dependents have already yielded, first. The original
# mapping is still used to hold the metadata on each key.
already_yielded = set()
ephemeral_buffer: Deque[str] = deque(ephemeral_nodes.keys())
while ephemeral_buffer:
key = ephemeral_buffer.popleft()
fpath, dependents = ephemeral_nodes[key]
# If it's not in our selection, skip it
if fpath not in selected_files:
templater_logger.debug("- Purging unselected ephemeral: %r", fpath)
# If there are dependent nodes in the set, don't process it yet.
elif any(
dependent in ephemeral_buffer for dependent in dependents
): # pragma: no cover
templater_logger.debug(
"- Requeuing ephemeral with dependents: %r", fpath
)
# Requeue it for later
ephemeral_buffer.append(key)
# Otherwise yield it.
else:
templater_logger.debug("- Yielding Ephemeral: %r", fpath)
yield full_paths[fpath]
already_yielded.add(full_paths[fpath])
for fname in fnames:
if fname not in already_yielded:
yield fname
def process(self, *, fname, in_str=None, config=None, formatter=None):
"""Compile a dbt model and return the compiled SQL.
Args:
fname (:obj:`str`): Path to dbt model(s)
in_str (:obj:`str`, optional): This is ignored for dbt
config (:obj:`FluffConfig`, optional): A specific config to use for this
templating operation. Only necessary for some templaters.
formatter (:obj:`CallbackFormatter`): Optional object for output.
"""
# Stash the formatter if provided to use in cached methods.
self.formatter = formatter
self.sqlfluff_config = config
self.project_dir = self._get_project_dir()
self.profiles_dir = self._get_profiles_dir()
fname_absolute_path = os.path.abspath(fname)
try:
os.chdir(self.project_dir)
processed_result = self._unsafe_process(fname_absolute_path, in_str, config)
# Reset the fail counter
self._sequential_fails = 0
return processed_result
except DbtCompilationException as e:
# Increment the counter
self._sequential_fails += 1
if e.node:
return None, [
SQLTemplaterError(
f"dbt compilation error on file '{e.node.original_file_path}', {e.msg}",
# It's fatal if we're over the limit
fatal=self._sequential_fails > self.sequential_fail_limit,
)
]
else:
raise # pragma: no cover
except DbtFailedToConnectException as e:
return None, [
SQLTemplaterError(
"dbt tried to connect to the database and failed: "
"you could use 'execute' https://docs.getdbt.com/reference/dbt-jinja-functions/execute/ "
f"to skip the database calls. Error: {e.msg}",
fatal=True,
)
]
# If a SQLFluff error is raised, just pass it through
except SQLTemplaterError as e: # pragma: no cover
return None, [e]
finally:
os.chdir(self.working_dir)
def _find_node(self, fname, config=None):
if not config: # pragma: no cover
raise ValueError(
"For the dbt templater, the `process()` method requires a config object."
)
if not fname: # pragma: no cover
raise ValueError(
"For the dbt templater, the `process()` method requires a file name"
)
elif fname == "stdin": # pragma: no cover
raise ValueError(
"The dbt templater does not support stdin input, provide a path instead"
)
selected = self.dbt_selector_method.search(
included_nodes=self.dbt_manifest.nodes,
# Selector needs to be a relative path
selector=os.path.relpath(fname, start=os.getcwd()),
)
results = [self.dbt_manifest.expect(uid) for uid in selected]
if not results:
model_name = os.path.splitext(os.path.basename(fname))[0]
if DBT_VERSION_TUPLE >= (1, 0):
disabled_model = None
for key, disabled_model_nodes in self.dbt_manifest.disabled.items():
for disabled_model_node in disabled_model_nodes:
if os.path.abspath(
disabled_model_node.original_file_path
) == os.path.abspath(fname):
disabled_model = disabled_model_node
else:
disabled_model = self.dbt_manifest.find_disabled_by_name(
name=model_name
)
if disabled_model and os.path.abspath(
disabled_model.original_file_path
) == os.path.abspath(fname):
raise SQLTemplaterSkipFile(
f"Skipped file {fname} because the model was disabled"
)
raise RuntimeError(
"File %s was not found in dbt project" % fname
) # pragma: no cover
return results[0]
def _unsafe_process(self, fname, in_str=None, config=None):
original_file_path = os.path.relpath(fname, start=os.getcwd())
# Below, we monkeypatch Environment.from_string() to intercept when dbt
# compiles (i.e. runs Jinja) to expand the "node" corresponding to fname.
# We do this to capture the Jinja context at the time of compilation, i.e.:
# - Jinja Environment object
# - Jinja "globals" dictionary
#
# This info is captured by the "make_template()" function, which in
# turn is used by our parent class' (JinjaTemplater) slice_file()
# function.
old_from_string = Environment.from_string
try:
make_template = None
def from_string(*args, **kwargs):
"""Replaces (via monkeypatch) the jinja2.Environment function."""
nonlocal make_template
# Is it processing the node corresponding to fname?
globals = kwargs.get("globals")
if globals:
model = globals.get("model")
if model:
if model.get("original_file_path") == original_file_path:
# Yes. Capture the important arguments and create
# a make_template() function.
env = args[0]
globals = args[2] if len(args) >= 3 else kwargs["globals"]
def make_template(in_str):
env.add_extension(SnapshotExtension)
return env.from_string(in_str, globals=globals)
return old_from_string(*args, **kwargs)
finally:
# Undo the monkeypatch.
Environment.from_string = from_string
node = self._find_node(fname, config)
with self.connection():
node = self.dbt_compiler.compile_node(
node=node,
manifest=self.dbt_manifest,
)
Environment.from_string = old_from_string
if hasattr(node, "injected_sql"):
# If injected SQL is present, it contains a better picture
# of what will actually hit the database (e.g. with tests).
# However it's not always present.
compiled_sql = node.injected_sql
else:
compiled_sql = node.compiled_sql
if not compiled_sql: # pragma: no cover
raise SQLTemplaterError(
"dbt templater compilation failed silently, check your configuration "
"by running `dbt compile` directly."
)
with open(fname) as source_dbt_model:
source_dbt_sql = source_dbt_model.read()
n_trailing_newlines = len(source_dbt_sql) - len(source_dbt_sql.rstrip("\n"))
templater_logger.debug(
" Trailing newline count in source dbt model: %r",
n_trailing_newlines,
)
templater_logger.debug(" Raw SQL before compile: %r", source_dbt_sql)
templater_logger.debug(" Node raw SQL: %r", node.raw_sql)
templater_logger.debug(" Node compiled SQL: %r", compiled_sql)
# When using dbt-templater, trailing newlines are ALWAYS REMOVED during
# compiling. Unless fixed (like below), this will cause:
# 1. L009 linting errors when running "sqlfluff lint foo_bar.sql"
# since the linter will use the compiled code with the newlines
# removed.
# 2. "No newline at end of file" warnings in Git/GitHub since
# sqlfluff uses the compiled SQL to write fixes back to the
# source SQL in the dbt model.
# The solution is:
# 1. Check for trailing newlines before compiling by looking at the
# raw SQL in the source dbt file, store the count of trailing newlines.
# 2. Append the count from #1 above to the node.raw_sql and
# compiled_sql objects, both of which have had the trailing
# newlines removed by the dbt-templater.
node.raw_sql = node.raw_sql + "\n" * n_trailing_newlines
compiled_sql = compiled_sql + "\n" * n_trailing_newlines
raw_sliced, sliced_file, templated_sql = self.slice_file(
source_dbt_sql,
compiled_sql,
config=config,
make_template=make_template,
)
if make_template and n_trailing_newlines:
# Update templated_sql as we updated the other strings above. Update
# sliced_file to reflect the mapping of the added character(s) back
# to the raw SQL.
templated_sql = templated_sql + "\n" * n_trailing_newlines
sliced_file.append(
TemplatedFileSlice(
slice_type="literal",
source_slice=slice(
len(source_dbt_sql) - n_trailing_newlines, len(source_dbt_sql)
),
templated_slice=slice(
len(templated_sql) - n_trailing_newlines, len(templated_sql)
),
)
)
return (
TemplatedFile(
source_str=source_dbt_sql,
templated_str=templated_sql,
fname=fname,
sliced_file=sliced_file,
raw_sliced=raw_sliced,
),
# No violations returned in this way.
[],
)
def _slice_template(self, in_str: str) -> List[RawFileSlice]:
# DbtTemplater uses the original heuristic-based template slicer.
# TODO: Can it be updated to use TemplateTracer?
return slice_template(in_str, self._get_jinja_env())
@contextmanager
def connection(self):
"""Context manager that manages a dbt connection, if needed."""
# We have to register the connection in dbt >= 1.0.0 ourselves
# In previous versions, we relied on the functionality removed in
# https://github.com/dbt-labs/dbt-core/pull/4062.
if DBT_VERSION_TUPLE >= (1, 0):
adapter = get_adapter(self.dbt_config)
with adapter.connection_named("master"):
adapter.set_relations_cache(self.dbt_manifest)
yield
else:
yield
class SnapshotExtension(StandaloneTag):
"""Dummy "snapshot" tags so raw dbt templates will parse.
Context: dbt snapshots (https://docs.getdbt.com/docs/building-a-dbt-project/snapshots/#example)
use custom Jinja "snapshot" and "endsnapshot" tags. However, dbt does not
actually register those tags with Jinja. Instead, it finds and removes these
tags during a preprocessing step. However, DbtTemplater needs those tags to
actually parse, because JinjaTracer creates and uses Jinja to process
another template similar to the original one.
"""
tags = {"snapshot", "endsnapshot"}
def render(self, format_string=None):
"""Dummy method that renders the tag."""
return ""
| 39.054983
| 109
| 0.593797
|
from collections import deque
from contextlib import contextmanager
import os
import os.path
import logging
from typing import List, Optional, Iterator, Tuple, Any, Dict, Deque
from dataclasses import dataclass
from functools import partial
from dbt.version import get_installed_version
from dbt.config.runtime import RuntimeConfig as DbtRuntimeConfig
from dbt.adapters.factory import register_adapter, get_adapter
from dbt.compilation import Compiler as DbtCompiler
from dbt.exceptions import (
CompilationException as DbtCompilationException,
FailedToConnectException as DbtFailedToConnectException,
)
from dbt import flags
from jinja2 import Environment
from jinja2_simple_tags import StandaloneTag
from sqlfluff.core.cached_property import cached_property
from sqlfluff.core.errors import SQLTemplaterError, SQLTemplaterSkipFile
from sqlfluff.core.templaters.base import (
RawFileSlice,
TemplatedFile,
TemplatedFileSlice,
)
from sqlfluff.core.templaters.slicers.heuristic import slice_template
from sqlfluff.core.templaters.jinja import JinjaTemplater
templater_logger = logging.getLogger("sqlfluff.templater")
DBT_VERSION = get_installed_version()
DBT_VERSION_STRING = DBT_VERSION.to_version_string()
DBT_VERSION_TUPLE = (int(DBT_VERSION.major), int(DBT_VERSION.minor))
if DBT_VERSION_TUPLE >= (1, 0):
from dbt.flags import PROFILES_DIR
else:
from dbt.config.profile import PROFILES_DIR
@dataclass
class DbtConfigArgs:
project_dir: Optional[str] = None
profiles_dir: Optional[str] = None
profile: Optional[str] = None
target: Optional[str] = None
single_threaded: bool = False
class DbtTemplater(JinjaTemplater):
name = "dbt"
sequential_fail_limit = 3
def __init__(self, **kwargs):
self.sqlfluff_config = None
self.formatter = None
self.project_dir = None
self.profiles_dir = None
self.working_dir = os.getcwd()
self._sequential_fails = 0
super().__init__(**kwargs)
def config_pairs(self):
return [("templater", self.name), ("dbt", self.dbt_version)]
@property
def dbt_version(self):
return DBT_VERSION_STRING
@property
def dbt_version_tuple(self):
return DBT_VERSION_TUPLE
@cached_property
def dbt_config(self):
if self.dbt_version_tuple >= (1, 0):
flags.set_from_args(
"",
DbtConfigArgs(
project_dir=self.project_dir,
profiles_dir=self.profiles_dir,
profile=self._get_profile(),
target=self._get_target(),
),
)
self.dbt_config = DbtRuntimeConfig.from_args(
DbtConfigArgs(
project_dir=self.project_dir,
profiles_dir=self.profiles_dir,
profile=self._get_profile(),
target=self._get_target(),
)
)
register_adapter(self.dbt_config)
return self.dbt_config
@cached_property
def dbt_compiler(self):
self.dbt_compiler = DbtCompiler(self.dbt_config)
return self.dbt_compiler
@cached_property
def dbt_manifest(self):
def identity(x):
return x
# a dull project and so some tracking routines
# may fail.
from dbt.tracking import do_not_track
do_not_track()
if self.dbt_version_tuple <= (0, 19):
if self.dbt_version_tuple == (0, 17): # pragma: no cover TODO?
# dbt version 0.17.*
from dbt.parser.manifest import (
load_internal_manifest as load_macro_manifest,
)
else:
# dbt version 0.18.* & # 0.19.*
from dbt.parser.manifest import load_macro_manifest
load_macro_manifest = partial(load_macro_manifest, macro_hook=identity)
from dbt.parser.manifest import load_manifest
dbt_macros_manifest = load_macro_manifest(self.dbt_config)
self.dbt_manifest = load_manifest(
self.dbt_config, dbt_macros_manifest, macro_hook=identity
)
else:
# dbt 0.20.* and onward
from dbt.parser.manifest import ManifestLoader
projects = self.dbt_config.load_dependencies()
loader = ManifestLoader(self.dbt_config, projects, macro_hook=identity)
self.dbt_manifest = loader.load()
return self.dbt_manifest
@cached_property
def dbt_selector_method(self):
if self.formatter: # pragma: no cover TODO?
self.formatter.dispatch_compilation_header(
"dbt templater", "Compiling dbt project..."
)
if self.dbt_version_tuple == (0, 17): # pragma: no cover TODO?
from dbt.graph.selector import PathSelector
self.dbt_selector_method = PathSelector(self.dbt_manifest)
else:
from dbt.graph.selector_methods import (
MethodManager as DbtSelectorMethodManager,
MethodName as DbtMethodName,
)
selector_methods_manager = DbtSelectorMethodManager(
self.dbt_manifest, previous_state=None
)
self.dbt_selector_method = selector_methods_manager.get_method(
DbtMethodName.Path, method_arguments=[]
)
if self.formatter: # pragma: no cover TODO?
self.formatter.dispatch_compilation_header(
"dbt templater", "Project Compiled."
)
return self.dbt_selector_method
def _get_profiles_dir(self):
dbt_profiles_dir = os.path.abspath(
os.path.expanduser(
self.sqlfluff_config.get_section(
(self.templater_selector, self.name, "profiles_dir")
)
or PROFILES_DIR
)
)
if not os.path.exists(dbt_profiles_dir):
templater_logger.error(
f"dbt_profiles_dir: {dbt_profiles_dir} could not be accessed. Check it exists."
)
return dbt_profiles_dir
def _get_project_dir(self):
dbt_project_dir = os.path.abspath(
os.path.expanduser(
self.sqlfluff_config.get_section(
(self.templater_selector, self.name, "project_dir")
)
or os.getcwd()
)
)
if not os.path.exists(dbt_project_dir):
templater_logger.error(
f"dbt_project_dir: {dbt_project_dir} could not be accessed. Check it exists."
)
return dbt_project_dir
def _get_profile(self):
return self.sqlfluff_config.get_section(
(self.templater_selector, self.name, "profile")
)
def _get_target(self):
return self.sqlfluff_config.get_section(
(self.templater_selector, self.name, "target")
)
def sequence_files(
self, fnames: List[str], config=None, formatter=None
) -> Iterator[str]:
if formatter: # pragma: no cover
formatter.dispatch_compilation_header("dbt templater", "Sorting Nodes...")
# Initialise config if not already done
self.sqlfluff_config = config
if not self.project_dir:
self.project_dir = self._get_project_dir()
if not self.profiles_dir:
self.profiles_dir = self._get_profiles_dir()
# Populate full paths for selected files
full_paths: Dict[str, str] = {}
selected_files = set()
for fname in fnames:
fpath = os.path.join(self.working_dir, fname)
full_paths[fpath] = fname
selected_files.add(fpath)
ephemeral_nodes: Dict[str, Tuple[str, Any]] = {}
# Extract the ephemeral models
for key, node in self.dbt_manifest.nodes.items():
if node.config.materialized == "ephemeral":
# The key is the full filepath.
# The value tuple, with the filepath and a list of dependent keys
ephemeral_nodes[key] = (
os.path.join(self.project_dir, node.original_file_path),
node.depends_on.nodes,
)
# Yield ephemeral nodes first. We use a Deque for efficient requeing.
# We iterate through the deque, yielding any nodes without dependents,
# or where those dependents have already yielded, first. The original
# mapping is still used to hold the metadata on each key.
already_yielded = set()
ephemeral_buffer: Deque[str] = deque(ephemeral_nodes.keys())
while ephemeral_buffer:
key = ephemeral_buffer.popleft()
fpath, dependents = ephemeral_nodes[key]
# If it's not in our selection, skip it
if fpath not in selected_files:
templater_logger.debug("- Purging unselected ephemeral: %r", fpath)
elif any(
dependent in ephemeral_buffer for dependent in dependents
): # pragma: no cover
templater_logger.debug(
"- Requeuing ephemeral with dependents: %r", fpath
)
# Requeue it for later
ephemeral_buffer.append(key)
# Otherwise yield it.
else:
templater_logger.debug("- Yielding Ephemeral: %r", fpath)
yield full_paths[fpath]
already_yielded.add(full_paths[fpath])
for fname in fnames:
if fname not in already_yielded:
yield fname
def process(self, *, fname, in_str=None, config=None, formatter=None):
# Stash the formatter if provided to use in cached methods.
self.formatter = formatter
self.sqlfluff_config = config
self.project_dir = self._get_project_dir()
self.profiles_dir = self._get_profiles_dir()
fname_absolute_path = os.path.abspath(fname)
try:
os.chdir(self.project_dir)
processed_result = self._unsafe_process(fname_absolute_path, in_str, config)
# Reset the fail counter
self._sequential_fails = 0
return processed_result
except DbtCompilationException as e:
# Increment the counter
self._sequential_fails += 1
if e.node:
return None, [
SQLTemplaterError(
f"dbt compilation error on file '{e.node.original_file_path}', {e.msg}",
# It's fatal if we're over the limit
fatal=self._sequential_fails > self.sequential_fail_limit,
)
]
else:
raise # pragma: no cover
except DbtFailedToConnectException as e:
return None, [
SQLTemplaterError(
"dbt tried to connect to the database and failed: "
"you could use 'execute' https://docs.getdbt.com/reference/dbt-jinja-functions/execute/ "
f"to skip the database calls. Error: {e.msg}",
fatal=True,
)
]
# If a SQLFluff error is raised, just pass it through
except SQLTemplaterError as e: # pragma: no cover
return None, [e]
finally:
os.chdir(self.working_dir)
def _find_node(self, fname, config=None):
if not config: # pragma: no cover
raise ValueError(
"For the dbt templater, the `process()` method requires a config object."
)
if not fname: # pragma: no cover
raise ValueError(
"For the dbt templater, the `process()` method requires a file name"
)
elif fname == "stdin": # pragma: no cover
raise ValueError(
"The dbt templater does not support stdin input, provide a path instead"
)
selected = self.dbt_selector_method.search(
included_nodes=self.dbt_manifest.nodes,
# Selector needs to be a relative path
selector=os.path.relpath(fname, start=os.getcwd()),
)
results = [self.dbt_manifest.expect(uid) for uid in selected]
if not results:
model_name = os.path.splitext(os.path.basename(fname))[0]
if DBT_VERSION_TUPLE >= (1, 0):
disabled_model = None
for key, disabled_model_nodes in self.dbt_manifest.disabled.items():
for disabled_model_node in disabled_model_nodes:
if os.path.abspath(
disabled_model_node.original_file_path
) == os.path.abspath(fname):
disabled_model = disabled_model_node
else:
disabled_model = self.dbt_manifest.find_disabled_by_name(
name=model_name
)
if disabled_model and os.path.abspath(
disabled_model.original_file_path
) == os.path.abspath(fname):
raise SQLTemplaterSkipFile(
f"Skipped file {fname} because the model was disabled"
)
raise RuntimeError(
"File %s was not found in dbt project" % fname
) # pragma: no cover
return results[0]
def _unsafe_process(self, fname, in_str=None, config=None):
original_file_path = os.path.relpath(fname, start=os.getcwd())
# Below, we monkeypatch Environment.from_string() to intercept when dbt
# compiles (i.e. runs Jinja) to expand the "node" corresponding to fname.
# We do this to capture the Jinja context at the time of compilation, i.e.:
# - Jinja Environment object
# - Jinja "globals" dictionary
#
# This info is captured by the "make_template()" function, which in
# turn is used by our parent class' (JinjaTemplater) slice_file()
old_from_string = Environment.from_string
try:
make_template = None
def from_string(*args, **kwargs):
nonlocal make_template
globals = kwargs.get("globals")
if globals:
model = globals.get("model")
if model:
if model.get("original_file_path") == original_file_path:
env = args[0]
globals = args[2] if len(args) >= 3 else kwargs["globals"]
def make_template(in_str):
env.add_extension(SnapshotExtension)
return env.from_string(in_str, globals=globals)
return old_from_string(*args, **kwargs)
finally:
Environment.from_string = from_string
node = self._find_node(fname, config)
with self.connection():
node = self.dbt_compiler.compile_node(
node=node,
manifest=self.dbt_manifest,
)
Environment.from_string = old_from_string
if hasattr(node, "injected_sql"):
compiled_sql = node.injected_sql
else:
compiled_sql = node.compiled_sql
if not compiled_sql: # pragma: no cover
raise SQLTemplaterError(
"dbt templater compilation failed silently, check your configuration "
"by running `dbt compile` directly."
)
with open(fname) as source_dbt_model:
source_dbt_sql = source_dbt_model.read()
n_trailing_newlines = len(source_dbt_sql) - len(source_dbt_sql.rstrip("\n"))
templater_logger.debug(
" Trailing newline count in source dbt model: %r",
n_trailing_newlines,
)
templater_logger.debug(" Raw SQL before compile: %r", source_dbt_sql)
templater_logger.debug(" Node raw SQL: %r", node.raw_sql)
templater_logger.debug(" Node compiled SQL: %r", compiled_sql)
# When using dbt-templater, trailing newlines are ALWAYS REMOVED during
# compiling. Unless fixed (like below), this will cause:
# 1. L009 linting errors when running "sqlfluff lint foo_bar.sql"
# since the linter will use the compiled code with the newlines
# removed.
# 2. "No newline at end of file" warnings in Git/GitHub since
# sqlfluff uses the compiled SQL to write fixes back to the
# source SQL in the dbt model.
# The solution is:
# 1. Check for trailing newlines before compiling by looking at the
# raw SQL in the source dbt file, store the count of trailing newlines.
# 2. Append the count from #1 above to the node.raw_sql and
# compiled_sql objects, both of which have had the trailing
# newlines removed by the dbt-templater.
node.raw_sql = node.raw_sql + "\n" * n_trailing_newlines
compiled_sql = compiled_sql + "\n" * n_trailing_newlines
raw_sliced, sliced_file, templated_sql = self.slice_file(
source_dbt_sql,
compiled_sql,
config=config,
make_template=make_template,
)
if make_template and n_trailing_newlines:
# Update templated_sql as we updated the other strings above. Update
# sliced_file to reflect the mapping of the added character(s) back
# to the raw SQL.
templated_sql = templated_sql + "\n" * n_trailing_newlines
sliced_file.append(
TemplatedFileSlice(
slice_type="literal",
source_slice=slice(
len(source_dbt_sql) - n_trailing_newlines, len(source_dbt_sql)
),
templated_slice=slice(
len(templated_sql) - n_trailing_newlines, len(templated_sql)
),
)
)
return (
TemplatedFile(
source_str=source_dbt_sql,
templated_str=templated_sql,
fname=fname,
sliced_file=sliced_file,
raw_sliced=raw_sliced,
),
# No violations returned in this way.
[],
)
def _slice_template(self, in_str: str) -> List[RawFileSlice]:
# DbtTemplater uses the original heuristic-based template slicer.
# TODO: Can it be updated to use TemplateTracer?
return slice_template(in_str, self._get_jinja_env())
@contextmanager
def connection(self):
# We have to register the connection in dbt >= 1.0.0 ourselves
# In previous versions, we relied on the functionality removed in
# https://github.com/dbt-labs/dbt-core/pull/4062.
if DBT_VERSION_TUPLE >= (1, 0):
adapter = get_adapter(self.dbt_config)
with adapter.connection_named("master"):
adapter.set_relations_cache(self.dbt_manifest)
yield
else:
yield
class SnapshotExtension(StandaloneTag):
tags = {"snapshot", "endsnapshot"}
def render(self, format_string=None):
return ""
| true
| true
|
1c46e354feed5cf4980e4dc9638c9d72ef429a1d
| 7,419
|
py
|
Python
|
east/utils/image_utils.py
|
embracesource-cv-com/keras-east
|
0733a9a99c4446a30c8b8e1d62e102391f7a854a
|
[
"Apache-2.0"
] | 12
|
2019-04-01T01:58:13.000Z
|
2019-12-10T02:54:18.000Z
|
east/utils/image_utils.py
|
embracesource-cv-com/keras-east
|
0733a9a99c4446a30c8b8e1d62e102391f7a854a
|
[
"Apache-2.0"
] | 5
|
2019-04-22T16:00:02.000Z
|
2020-08-12T07:03:05.000Z
|
east/utils/image_utils.py
|
embracesource-cv-com/keras-east
|
0733a9a99c4446a30c8b8e1d62e102391f7a854a
|
[
"Apache-2.0"
] | 1
|
2019-05-24T11:34:44.000Z
|
2019-05-24T11:34:44.000Z
|
# -*- coding: utf-8 -*-
"""
File Name: image
Description : 图像处理工具类
Author : mick.yi
date: 2019/2/18
"""
import skimage
from skimage import io, transform
import numpy as np
import matplotlib.pyplot as plt
import random
def load_image(image_path):
"""
加载图像
:param image_path: 图像路径
:return: [h,w,3] numpy数组
"""
image = plt.imread(image_path)
# 灰度图转为RGB
if len(image.shape) == 2:
image = np.expand_dims(image, axis=2)
image = np.tile(image, (1, 1, 3))
elif image.shape[-1] == 1:
image = skimage.color.gray2rgb(image) # io.imread 报ValueError: Input image expected to be RGB, RGBA or gray
# 标准化为0~255之间
if image.dtype == np.float32:
image *= 255
image = image.astype(np.uint8)
# 删除alpha通道
return image[..., :3]
def resize_image_and_gt(image, output_size, gt_polygons=None):
"""
按照输入大小缩放图像
:param image:
:param output_size:
:param gt_polygons:
:return:
image: (H,W,3)
image_meta: 元数据信息,详见compose_image_meta
gt_boxes:图像缩放及padding后对于的GT 边框坐标 [N,(y1,x1,y2,x2)]
"""
original_shape = image.shape
# resize图像,并获取相关元数据信息
h, w, window, scale, padding = resize_meta(original_shape[0], original_shape[1], output_size)
image = resize_image(image, h, w, padding)
# 组合元数据信息
image_meta = compose_image_meta(np.random.randint(10000), original_shape, image.shape,
window, scale)
# 根据缩放及padding调整GT边框
if gt_polygons is not None and gt_polygons.shape[0] > 0:
gt_polygons = adjust_polygons(gt_polygons, padding, scale)
return image, image_meta, gt_polygons
def random_crop_image(image, gt_window):
"""
随机裁剪图像
:param image: [H,W,C]
:param gt_window: 标注区域 (y1,x1,y2,x2)
:return: 裁剪后的图像和裁剪窗口
"""
h, w = list(image.shape)[:2]
y1, x1, y2, x2 = gt_window
# 每边最多裁剪1/10
crop_ratio = 0.1
wy1 = np.random.randint(min(y1 + 1, h * crop_ratio))
wx1 = np.random.randint(min(x1 + 1, w * crop_ratio))
wy2 = h - np.random.randint(min(h - y2 + 1, h * crop_ratio))
wx2 = w - np.random.randint(min(w - x2 + 1, w * crop_ratio))
return image[wy1:wy2, wx1:wx2], [wy1, wx1, wy2, wx2]
def resize_image(image, h, w, padding):
"""
缩放图像为正方形,指定长边大小,短边padding;
:param image: numpy 数组(H,W,3)
:param h: 缩放后的高度
:param w: 缩放后的宽度
:param padding:缩放后增加的padding
:return: 缩放后的图像,元素图像的宽口位置,缩放尺寸,padding
"""
image_dtype = image.dtype
image = transform.resize(image, (h, w), order=1, mode='constant',
cval=0, clip=True, preserve_range=True)
image = np.pad(image, padding, mode='constant', constant_values=0)
return image.astype(image_dtype)
def resize_meta(h, w, max_dim):
"""
计算resize的元数据信息
:param h: 图像原始高度
:param w: 图像原始宽度
:param max_dim: 缩放后的边长
:return:
"""
scale = max_dim / max(h, w) # 缩放尺寸
# 新的高度和宽度
h, w = round(h * scale), round(w * scale)
# 计算padding
top_pad = (max_dim - h) // 2
bottom_pad = max_dim - h - top_pad
left_pad = (max_dim - w) // 2
right_pad = max_dim - w - left_pad
padding = [(top_pad, bottom_pad), (left_pad, right_pad), (0, 0)]
# 计算窗口
window = (top_pad, left_pad, h + top_pad, w + left_pad) #
return h, w, window, scale, padding
def compose_image_meta(image_id, original_image_shape, image_shape,
window, scale):
"""
组合图像元数据信息,返回numpy数据
:param image_id:
:param original_image_shape: 原始图像形状,tuple(H,W,3)
:param image_shape: 缩放后图像形状tuple(H,W,3)
:param window: 原始图像在缩放图像上的窗口位置(y1,x1,y2,x2)
:param scale: 缩放因子
:return:
"""
meta = np.array(
[image_id] + # size=1
list(original_image_shape) + # size=3
list(image_shape) + # size=3
list(window) + # size=4 (y1, x1, y2, x2) in image cooredinates
[scale] # size=1
)
return meta
def parse_image_meta(meta):
"""
解析图像元数据信息,注意输入是元数据信息数组
:param meta: [12]
:return:
"""
image_id = meta[0]
original_image_shape = meta[1:4]
image_shape = meta[4:7]
window = meta[7:11] # (y1, x1, y2, x2) window of image in in pixels
scale = meta[11]
return {
"image_id": image_id.astype(np.int32),
"original_image_shape": original_image_shape.astype(np.int32),
"image_shape": image_shape.astype(np.int32),
"window": window.astype(np.int32),
"scale": scale.astype(np.float32)
}
def batch_parse_image_meta(meta):
"""
解析图像元数据信息,注意输入是元数据信息数组
:param meta: [batch,12]
:return:
"""
image_id = meta[:, 0]
original_image_shape = meta[:, 1:4]
image_shape = meta[:, 4:7]
window = meta[:, 7:11] # (y1, x1, y2, x2) window of image in in pixels
scale = meta[:, 11]
return {
"image_id": image_id.astype(np.int32),
"original_image_shape": original_image_shape.astype(np.int32),
"image_shape": image_shape.astype(np.int32),
"window": window.astype(np.int32),
"scale": scale.astype(np.float32)
}
def adjust_box(boxes, padding, scale):
"""
根据填充和缩放因子,调整boxes的值
:param boxes: numpy 数组; GT boxes [N,(y1,x1,y2,x2)]
:param padding: [(top_pad, bottom_pad), (left_pad, right_pad), (0, 0)]
:param scale: 缩放因子
:return:
"""
boxes = boxes * scale
boxes[:, 0::2] += padding[0][0] # 高度padding
boxes[:, 1::2] += padding[1][0] # 宽度padding
return boxes
def adjust_polygons(polygons, padding, scale):
"""
根据填充和缩放因子,调整四边形的值
:param polygons: numpy 数组; GT polygons[N,4,(x,y)]
:param padding: [(top_pad, bottom_pad), (left_pad, right_pad), (0, 0)]
:param scale: 缩放因子
:return:
"""
polygons = polygons * scale
polygons[:, :, 1] += padding[0][0] # 高度padding
polygons[:, :, 0] += padding[1][0] # 宽度padding
return polygons
def recover_detect_boxes(boxes, window, scale):
"""
将检测边框映射到原始图像上,去除padding和缩放
:param boxes: numpy数组,[n,(y1,x1,y2,x2)]
:param window: [(y1,x1,y2,x2)]
:param scale: 标量
:return:
"""
# 去除padding
boxes[:, 0::2] -= window[0]
boxes[:, 1::2] -= window[1]
# 还原缩放
boxes /= scale
return boxes
def clip_polygons(polygons, window):
"""
将检测四边形映射到原始图像上,去除padding和缩放
:param polygons: numpy数组,[n,4,(x,y)]
:param window: [(y1,x1,y2,x2)]
:return:
"""
if len(polygons) == 0:
return polygons
y1, x1, y2, x2 = window
# 保证不越界
polygons[:, :, 1] = np.maximum(y1, np.minimum(y2, polygons[:, :, 1]))
polygons[:, :, 0] = np.maximum(x1, np.minimum(x2, polygons[:, :, 0]))
return polygons
def recover_detect_polygons(polygons, window, scale):
"""
将检测四边形映射到原始图像上,去除padding和缩放
:param polygons: numpy数组,[n,4,(x,y)]
:param window: [(y1,x1,y2,x2)]
:param scale: 标量
:return:
"""
if len(polygons) == 0:
return polygons
clip_polygons(polygons, window)
# 去除padding
polygons[:, :, 1] -= window[0] # 高度
polygons[:, :, 0] -= window[1] # 宽度
# 还原缩放
polygons /= scale
return polygons
| 28.755814
| 117
| 0.579189
|
import skimage
from skimage import io, transform
import numpy as np
import matplotlib.pyplot as plt
import random
def load_image(image_path):
image = plt.imread(image_path)
if len(image.shape) == 2:
image = np.expand_dims(image, axis=2)
image = np.tile(image, (1, 1, 3))
elif image.shape[-1] == 1:
image = skimage.color.gray2rgb(image)
if image.dtype == np.float32:
image *= 255
image = image.astype(np.uint8)
return image[..., :3]
def resize_image_and_gt(image, output_size, gt_polygons=None):
original_shape = image.shape
h, w, window, scale, padding = resize_meta(original_shape[0], original_shape[1], output_size)
image = resize_image(image, h, w, padding)
image_meta = compose_image_meta(np.random.randint(10000), original_shape, image.shape,
window, scale)
if gt_polygons is not None and gt_polygons.shape[0] > 0:
gt_polygons = adjust_polygons(gt_polygons, padding, scale)
return image, image_meta, gt_polygons
def random_crop_image(image, gt_window):
h, w = list(image.shape)[:2]
y1, x1, y2, x2 = gt_window
crop_ratio = 0.1
wy1 = np.random.randint(min(y1 + 1, h * crop_ratio))
wx1 = np.random.randint(min(x1 + 1, w * crop_ratio))
wy2 = h - np.random.randint(min(h - y2 + 1, h * crop_ratio))
wx2 = w - np.random.randint(min(w - x2 + 1, w * crop_ratio))
return image[wy1:wy2, wx1:wx2], [wy1, wx1, wy2, wx2]
def resize_image(image, h, w, padding):
image_dtype = image.dtype
image = transform.resize(image, (h, w), order=1, mode='constant',
cval=0, clip=True, preserve_range=True)
image = np.pad(image, padding, mode='constant', constant_values=0)
return image.astype(image_dtype)
def resize_meta(h, w, max_dim):
scale = max_dim / max(h, w)
h, w = round(h * scale), round(w * scale)
top_pad = (max_dim - h) // 2
bottom_pad = max_dim - h - top_pad
left_pad = (max_dim - w) // 2
right_pad = max_dim - w - left_pad
padding = [(top_pad, bottom_pad), (left_pad, right_pad), (0, 0)]
window = (top_pad, left_pad, h + top_pad, w + left_pad)
return h, w, window, scale, padding
def compose_image_meta(image_id, original_image_shape, image_shape,
window, scale):
meta = np.array(
[image_id] +
list(original_image_shape) +
list(image_shape) +
list(window) +
[scale]
)
return meta
def parse_image_meta(meta):
image_id = meta[0]
original_image_shape = meta[1:4]
image_shape = meta[4:7]
window = meta[7:11]
scale = meta[11]
return {
"image_id": image_id.astype(np.int32),
"original_image_shape": original_image_shape.astype(np.int32),
"image_shape": image_shape.astype(np.int32),
"window": window.astype(np.int32),
"scale": scale.astype(np.float32)
}
def batch_parse_image_meta(meta):
image_id = meta[:, 0]
original_image_shape = meta[:, 1:4]
image_shape = meta[:, 4:7]
window = meta[:, 7:11]
scale = meta[:, 11]
return {
"image_id": image_id.astype(np.int32),
"original_image_shape": original_image_shape.astype(np.int32),
"image_shape": image_shape.astype(np.int32),
"window": window.astype(np.int32),
"scale": scale.astype(np.float32)
}
def adjust_box(boxes, padding, scale):
boxes = boxes * scale
boxes[:, 0::2] += padding[0][0]
boxes[:, 1::2] += padding[1][0]
return boxes
def adjust_polygons(polygons, padding, scale):
polygons = polygons * scale
polygons[:, :, 1] += padding[0][0]
polygons[:, :, 0] += padding[1][0]
return polygons
def recover_detect_boxes(boxes, window, scale):
boxes[:, 0::2] -= window[0]
boxes[:, 1::2] -= window[1]
boxes /= scale
return boxes
def clip_polygons(polygons, window):
if len(polygons) == 0:
return polygons
y1, x1, y2, x2 = window
polygons[:, :, 1] = np.maximum(y1, np.minimum(y2, polygons[:, :, 1]))
polygons[:, :, 0] = np.maximum(x1, np.minimum(x2, polygons[:, :, 0]))
return polygons
def recover_detect_polygons(polygons, window, scale):
if len(polygons) == 0:
return polygons
clip_polygons(polygons, window)
polygons[:, :, 1] -= window[0]
polygons[:, :, 0] -= window[1]
polygons /= scale
return polygons
| true
| true
|
1c46e48e2e3f579a1cdbebb866e2f56a6b6f6241
| 201
|
py
|
Python
|
rpc/client.py
|
yuriscosta/tads-sistemas-distribuidos
|
1bdcd3ff87bb5ecc2a722ef70bb4e7fd7c8540da
|
[
"MIT"
] | 1
|
2017-10-18T03:04:49.000Z
|
2017-10-18T03:04:49.000Z
|
rpc/client.py
|
yuriscosta/tads-sistemas-distribuidos
|
1bdcd3ff87bb5ecc2a722ef70bb4e7fd7c8540da
|
[
"MIT"
] | 1
|
2020-06-05T17:51:11.000Z
|
2020-06-05T17:51:11.000Z
|
rpc/client.py
|
yuriscosta/tads-sistemas-distribuidos
|
1bdcd3ff87bb5ecc2a722ef70bb4e7fd7c8540da
|
[
"MIT"
] | null | null | null |
import xmlrpc.client
s = xmlrpc.client.ServerProxy('http://localhost:8000')
print(s.pow(2,3))
print(s.add(2,3))
print(s.mul(5,2))
# Gerando erros
print(s.pow(0,0))
print(s.add(1))
print(s.sub(1, 2))
| 16.75
| 54
| 0.676617
|
import xmlrpc.client
s = xmlrpc.client.ServerProxy('http://localhost:8000')
print(s.pow(2,3))
print(s.add(2,3))
print(s.mul(5,2))
print(s.pow(0,0))
print(s.add(1))
print(s.sub(1, 2))
| true
| true
|
1c46e5e885ba5b8a6ca6466a4c60eccdef77f19e
| 9,122
|
py
|
Python
|
src/rosdep2/platforms/debian.py
|
gavanderhoorn/rosdep
|
641433af01bb217b807af6adda2b9f7a0c55f727
|
[
"BSD-3-Clause"
] | null | null | null |
src/rosdep2/platforms/debian.py
|
gavanderhoorn/rosdep
|
641433af01bb217b807af6adda2b9f7a0c55f727
|
[
"BSD-3-Clause"
] | null | null | null |
src/rosdep2/platforms/debian.py
|
gavanderhoorn/rosdep
|
641433af01bb217b807af6adda2b9f7a0c55f727
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
# Copyright (c) 2009, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# Author Tully Foote, Ken Conley
from __future__ import print_function
import subprocess
import sys
from rospkg.os_detect import OS_DEBIAN, OS_LINARO, OS_UBUNTU, OS_ELEMENTARY, OsDetect
from .pip import PIP_INSTALLER
from .gem import GEM_INSTALLER
from .source import SOURCE_INSTALLER
from ..installers import PackageManagerInstaller
from ..shell_utils import read_stdout
# apt package manager key
APT_INSTALLER = 'apt'
def register_installers(context):
context.set_installer(APT_INSTALLER, AptInstaller())
def register_platforms(context):
register_debian(context)
register_linaro(context)
register_ubuntu(context)
register_elementary(context)
def register_debian(context):
context.add_os_installer_key(OS_DEBIAN, APT_INSTALLER)
context.add_os_installer_key(OS_DEBIAN, PIP_INSTALLER)
context.add_os_installer_key(OS_DEBIAN, GEM_INSTALLER)
context.add_os_installer_key(OS_DEBIAN, SOURCE_INSTALLER)
context.set_default_os_installer_key(OS_DEBIAN, lambda self: APT_INSTALLER)
context.set_os_version_type(OS_DEBIAN, OsDetect.get_codename)
def register_linaro(context):
# Linaro is an alias for Ubuntu. If linaro is detected and it's not set as
# an override force ubuntu.
(os_name, os_version) = context.get_os_name_and_version()
if os_name == OS_LINARO and not context.os_override:
print('rosdep detected OS: [%s] aliasing it to: [%s]' %
(OS_LINARO, OS_UBUNTU), file=sys.stderr)
context.set_os_override(OS_UBUNTU, context.os_detect.get_codename())
def register_elementary(context):
# Elementary is an alias for Ubuntu. If elementary is detected and it's
# not set as an override force ubuntu.
(os_name, os_version) = context.get_os_name_and_version()
if os_name == OS_ELEMENTARY and not context.os_override:
print('rosdep detected OS: [%s] aliasing it to: [%s]' %
(OS_ELEMENTARY, OS_UBUNTU), file=sys.stderr)
context.set_os_override(OS_UBUNTU, context.os_detect.get_codename())
def register_ubuntu(context):
context.add_os_installer_key(OS_UBUNTU, APT_INSTALLER)
context.add_os_installer_key(OS_UBUNTU, PIP_INSTALLER)
context.add_os_installer_key(OS_UBUNTU, GEM_INSTALLER)
context.add_os_installer_key(OS_UBUNTU, SOURCE_INSTALLER)
context.set_default_os_installer_key(OS_UBUNTU, lambda self: APT_INSTALLER)
context.set_os_version_type(OS_UBUNTU, OsDetect.get_codename)
def _read_apt_cache_showpkg(packages, exec_fn=None):
"""
Output whether these packages are virtual package list providing package.
If one package was not found, it gets returned as non-virtual.
:param exec_fn: see `dpkg_detect`; make sure that exec_fn supports a
second, boolean, parameter.
"""
cmd = ['apt-cache', 'showpkg'] + packages
if exec_fn is None:
exec_fn = read_stdout
std_out = exec_fn(cmd).splitlines()
starts = []
notfound = set()
for p in packages:
last_start = starts[-1] if len(starts) > 0 else 0
try:
starts.append(std_out.index('Package: %s' % p, last_start))
except ValueError:
notfound.add(p)
starts.append(None)
for p in packages:
if p in notfound:
yield p, False, None
continue
start = starts.pop(0)
lines = iter(std_out[start:starts[0]])
header = 'Package: %s' % p
# proceed to Package header
while next(lines) != header:
pass
# proceed to versions section
while next(lines) != 'Versions: ':
pass
# virtual packages don't have versions
if next(lines) != '':
yield p, False, None
continue
# proceed to reserve provides section
while next(lines) != 'Reverse Provides: ':
pass
pr = [line.split(' ', 2)[0] for line in lines]
if pr:
yield p, True, pr
else:
yield p, False, None
def dpkg_detect(pkgs, exec_fn=None):
"""
Given a list of package, return the list of installed packages.
:param pkgs: list of package names, optionally followed by a fixed version (`foo=3.0`)
:param exec_fn: function to execute Popen and read stdout (for testing)
:return: list elements in *pkgs* that were found installed on the system
"""
ret_list = []
# this is mainly a hack to support version locking for eigen.
# we strip version-locking syntax, e.g. libeigen3-dev=3.0.1-*.
# our query does not do the validation on the version itself.
# This is a map `package name -> package name optionally with version`.
version_lock_map = {}
for p in pkgs:
if '=' in p:
version_lock_map[p.split('=')[0]] = p
else:
version_lock_map[p] = p
cmd = ['dpkg-query', '-W', '-f=\'${Package} ${Status}\n\'']
cmd.extend(version_lock_map.keys())
if exec_fn is None:
exec_fn = read_stdout
std_out, std_err = exec_fn(cmd, True)
std_out = std_out.replace('\'', '')
pkg_list = std_out.split('\n')
for pkg in pkg_list:
pkg_row = pkg.split()
if len(pkg_row) == 4 and (pkg_row[3] == 'installed'):
ret_list.append(pkg_row[0])
installed_packages = [version_lock_map[r] for r in ret_list]
# now for the remaining packages check, whether they are installed as
# virtual packages
remaining = _read_apt_cache_showpkg(list(p for p in pkgs if p not in installed_packages))
virtual = [n for (n, v, pr) in remaining if v and len(dpkg_detect(pr)) > 0]
return installed_packages + virtual
def _iterate_packages(packages, reinstall):
for entry in _read_apt_cache_showpkg(packages):
p, is_virtual, providers = entry
if is_virtual:
installed = []
if reinstall:
installed = dpkg_detect(providers)
if len(installed) > 0:
for i in installed:
yield i
continue # don't ouput providers
yield providers
else:
yield p
class AptInstaller(PackageManagerInstaller):
"""
An implementation of the Installer for use on debian style
systems.
"""
def __init__(self):
super(AptInstaller, self).__init__(dpkg_detect)
def get_version_strings(self):
output = subprocess.check_output(['apt-get', '--version'])
version = output.splitlines()[0].split(' ')[1]
return ['apt-get {}'.format(version)]
def _get_install_commands_for_package(self, base_cmd, package_or_list):
def pkg_command(p):
return self.elevate_priv(base_cmd + [p])
if isinstance(package_or_list, list):
return [pkg_command(p) for p in package_or_list]
else:
return pkg_command(package_or_list)
def get_install_command(self, resolved, interactive=True, reinstall=False, quiet=False):
packages = self.get_packages_to_install(resolved, reinstall=reinstall)
if not packages:
return []
if not interactive and quiet:
base_cmd = ['apt-get', 'install', '-y', '-qq']
elif quiet:
base_cmd = ['apt-get', 'install', '-qq']
if not interactive:
base_cmd = ['apt-get', 'install', '-y']
else:
base_cmd = ['apt-get', 'install']
return [self._get_install_commands_for_package(base_cmd, p) for p in _iterate_packages(packages, reinstall)]
| 36.931174
| 116
| 0.676168
|
from __future__ import print_function
import subprocess
import sys
from rospkg.os_detect import OS_DEBIAN, OS_LINARO, OS_UBUNTU, OS_ELEMENTARY, OsDetect
from .pip import PIP_INSTALLER
from .gem import GEM_INSTALLER
from .source import SOURCE_INSTALLER
from ..installers import PackageManagerInstaller
from ..shell_utils import read_stdout
APT_INSTALLER = 'apt'
def register_installers(context):
context.set_installer(APT_INSTALLER, AptInstaller())
def register_platforms(context):
register_debian(context)
register_linaro(context)
register_ubuntu(context)
register_elementary(context)
def register_debian(context):
context.add_os_installer_key(OS_DEBIAN, APT_INSTALLER)
context.add_os_installer_key(OS_DEBIAN, PIP_INSTALLER)
context.add_os_installer_key(OS_DEBIAN, GEM_INSTALLER)
context.add_os_installer_key(OS_DEBIAN, SOURCE_INSTALLER)
context.set_default_os_installer_key(OS_DEBIAN, lambda self: APT_INSTALLER)
context.set_os_version_type(OS_DEBIAN, OsDetect.get_codename)
def register_linaro(context):
# an override force ubuntu.
(os_name, os_version) = context.get_os_name_and_version()
if os_name == OS_LINARO and not context.os_override:
print('rosdep detected OS: [%s] aliasing it to: [%s]' %
(OS_LINARO, OS_UBUNTU), file=sys.stderr)
context.set_os_override(OS_UBUNTU, context.os_detect.get_codename())
def register_elementary(context):
# Elementary is an alias for Ubuntu. If elementary is detected and it's
(os_name, os_version) = context.get_os_name_and_version()
if os_name == OS_ELEMENTARY and not context.os_override:
print('rosdep detected OS: [%s] aliasing it to: [%s]' %
(OS_ELEMENTARY, OS_UBUNTU), file=sys.stderr)
context.set_os_override(OS_UBUNTU, context.os_detect.get_codename())
def register_ubuntu(context):
context.add_os_installer_key(OS_UBUNTU, APT_INSTALLER)
context.add_os_installer_key(OS_UBUNTU, PIP_INSTALLER)
context.add_os_installer_key(OS_UBUNTU, GEM_INSTALLER)
context.add_os_installer_key(OS_UBUNTU, SOURCE_INSTALLER)
context.set_default_os_installer_key(OS_UBUNTU, lambda self: APT_INSTALLER)
context.set_os_version_type(OS_UBUNTU, OsDetect.get_codename)
def _read_apt_cache_showpkg(packages, exec_fn=None):
cmd = ['apt-cache', 'showpkg'] + packages
if exec_fn is None:
exec_fn = read_stdout
std_out = exec_fn(cmd).splitlines()
starts = []
notfound = set()
for p in packages:
last_start = starts[-1] if len(starts) > 0 else 0
try:
starts.append(std_out.index('Package: %s' % p, last_start))
except ValueError:
notfound.add(p)
starts.append(None)
for p in packages:
if p in notfound:
yield p, False, None
continue
start = starts.pop(0)
lines = iter(std_out[start:starts[0]])
header = 'Package: %s' % p
while next(lines) != header:
pass
while next(lines) != 'Versions: ':
pass
if next(lines) != '':
yield p, False, None
continue
# proceed to reserve provides section
while next(lines) != 'Reverse Provides: ':
pass
pr = [line.split(' ', 2)[0] for line in lines]
if pr:
yield p, True, pr
else:
yield p, False, None
def dpkg_detect(pkgs, exec_fn=None):
ret_list = []
# this is mainly a hack to support version locking for eigen.
# we strip version-locking syntax, e.g. libeigen3-dev=3.0.1-*.
# our query does not do the validation on the version itself.
# This is a map `package name -> package name optionally with version`.
version_lock_map = {}
for p in pkgs:
if '=' in p:
version_lock_map[p.split('=')[0]] = p
else:
version_lock_map[p] = p
cmd = ['dpkg-query', '-W', '-f=\'${Package} ${Status}\n\'']
cmd.extend(version_lock_map.keys())
if exec_fn is None:
exec_fn = read_stdout
std_out, std_err = exec_fn(cmd, True)
std_out = std_out.replace('\'', '')
pkg_list = std_out.split('\n')
for pkg in pkg_list:
pkg_row = pkg.split()
if len(pkg_row) == 4 and (pkg_row[3] == 'installed'):
ret_list.append(pkg_row[0])
installed_packages = [version_lock_map[r] for r in ret_list]
remaining = _read_apt_cache_showpkg(list(p for p in pkgs if p not in installed_packages))
virtual = [n for (n, v, pr) in remaining if v and len(dpkg_detect(pr)) > 0]
return installed_packages + virtual
def _iterate_packages(packages, reinstall):
for entry in _read_apt_cache_showpkg(packages):
p, is_virtual, providers = entry
if is_virtual:
installed = []
if reinstall:
installed = dpkg_detect(providers)
if len(installed) > 0:
for i in installed:
yield i
continue
yield providers
else:
yield p
class AptInstaller(PackageManagerInstaller):
def __init__(self):
super(AptInstaller, self).__init__(dpkg_detect)
def get_version_strings(self):
output = subprocess.check_output(['apt-get', '--version'])
version = output.splitlines()[0].split(' ')[1]
return ['apt-get {}'.format(version)]
def _get_install_commands_for_package(self, base_cmd, package_or_list):
def pkg_command(p):
return self.elevate_priv(base_cmd + [p])
if isinstance(package_or_list, list):
return [pkg_command(p) for p in package_or_list]
else:
return pkg_command(package_or_list)
def get_install_command(self, resolved, interactive=True, reinstall=False, quiet=False):
packages = self.get_packages_to_install(resolved, reinstall=reinstall)
if not packages:
return []
if not interactive and quiet:
base_cmd = ['apt-get', 'install', '-y', '-qq']
elif quiet:
base_cmd = ['apt-get', 'install', '-qq']
if not interactive:
base_cmd = ['apt-get', 'install', '-y']
else:
base_cmd = ['apt-get', 'install']
return [self._get_install_commands_for_package(base_cmd, p) for p in _iterate_packages(packages, reinstall)]
| true
| true
|
1c46e7371d0f642717b0dbe3ec998d628839b8d6
| 6,710
|
py
|
Python
|
novelle/views/routes.py
|
sahuashi/novelle
|
04295f4060af763a23a299219da73ba46c1ed626
|
[
"MIT"
] | null | null | null |
novelle/views/routes.py
|
sahuashi/novelle
|
04295f4060af763a23a299219da73ba46c1ed626
|
[
"MIT"
] | null | null | null |
novelle/views/routes.py
|
sahuashi/novelle
|
04295f4060af763a23a299219da73ba46c1ed626
|
[
"MIT"
] | null | null | null |
import os
import requests
from flask import Blueprint, render_template, flash, request, redirect, url_for, current_app
from flask_login import login_required, logout_user, login_user, current_user
from sqlalchemy import exc
from novelle.models import db, User, Book
from novelle.forms import Form
router = Blueprint('route', __name__)
# no home page at the moment, redirect from home page to search page
@router.route("/")
def index():
return redirect(url_for('route.search'))
# allow user to search query
@router.route("/search", methods=["POST", "GET"])
def search():
if request.method == "POST":
q = request.form["query"]
return redirect(url_for('route.retrieve', query=q))
else:
return render_template('search.html')
# retrieve book results from Google Books API
@router.route("/search/<query>")
def retrieve(query):
api_key = os.environ.get('BOOKS_API_KEY')
search_query = query
# build url for api request
search_url = f'https://www.googleapis.com/books/v1/volumes?q={search_query}&projection=full&maxResults=15&key={api_key}'
# send request to api
resp = requests.get(search_url)
# save relevant book info from api response
responses = resp.json()['items']
books = parse_books(responses)
return render_template('results.html', books=books, query=query)
def parse_books(res):
# list to store parsed book information
books = []
# retrieve relevant info from json
for book in res:
book_info = {
'id': book['id'],
'title': book['volumeInfo']['title'] if 'title' in book['volumeInfo'] else 'No title available.',
'subtitle': book['volumeInfo']['subtitle'] if 'subtitle' in book['volumeInfo'] else '',
'desc': book['volumeInfo']['description'] if 'description' in book[
'volumeInfo'] else 'No description available.',
'author': book['volumeInfo']['authors'][0] if 'authors' in book['volumeInfo'] else 'No authors available.',
'date': book['volumeInfo']['publishedDate'] if 'publishedDate' in book[
'volumeInfo'] else 'No published date available.',
'publisher': book['volumeInfo']['publisher'] if 'publisher' in book[
'volumeInfo'] else ' No publisher available.',
'thumbnail': book['volumeInfo']['imageLinks']['thumbnail'] if 'imageLinks' in book[
'volumeInfo'] else 'https://islandpress.org/sites/default/files/default_book_cover_2015.jpg',
'pages': book['volumeInfo']['pageCount'] if 'pageCount' in book[
'volumeInfo'] else 'No page count available.',
'rating': f"{book['volumeInfo']['averageRating']}/5 based on {book['volumeInfo']['ratingsCount']} review(s)"
if 'averageRating' in book['volumeInfo'] else 'No rating available.',
'infoLink': book['volumeInfo']['infoLink'] if 'infoLink' in book['volumeInfo'] else ' '
}
# add current book to list of book results
books.append(book_info)
# add current book to database
try:
book = Book(id=book_info.get('id'),
title=book_info.get('title'),
subtitle=book_info.get('subtitle'),
thumbnail=book_info.get('thumbnail'),
googlebooks=book_info.get('infoLink')
)
db.session.add(book)
db.session.flush()
# if current book info already in db, abort
except exc.SQLAlchemyError:
db.session.rollback()
# else, save updated db
else:
db.session.commit()
return books
# allow user to login to view and add to list
@router.route("/login", methods=['GET', 'POST'])
def login():
form = Form()
if form.validate_on_submit():
user = User.query.filter_by(username=form.username.data).first()
if user:
if user.password == form.password.data:
# valid login, redirect to user's reading list
login_user(user)
flash(f'Welcome back, {current_user.username}!')
return redirect(url_for('route.list'))
# invalid login, return to login page to try again
flash('Invalid username/password. Please try again.')
return redirect(url_for('route.login'))
return render_template('login.html', form=form)
# allow user to create account
@router.route("/register", methods=['GET', 'POST'])
def register():
form = Form()
if form.validate_on_submit():
user = User(username=form.username.data, password=form.password.data)
# add new user to database
try:
db.session.add(user)
db.session.flush()
# if user already exists, abort
except exc.SQLAlchemyError:
db.session.rollback()
flash('Username already taken! Please try again.')
return redirect(url_for('route.register'))
# save changes to database and have user login
else:
db.session.commit()
flash('Account created! Please login to continue.')
return redirect(url_for('route.login'))
return render_template('register.html', form=form)
# protected route: allow user to logout
@router.route("/logout")
@login_required
def logout():
flash(f'You were successfully logged out, {current_user.username}!')
logout_user()
return redirect(url_for('route.index'))
# display user's reading list
@router.route("/mylist")
def list():
if current_user.is_authenticated:
return render_template('list.html', user=current_user)
else:
flash('You must login to see your reading list.')
return redirect(url_for('route.login'))
# add book to user's reading list
@router.route("/save", methods=['POST', 'GET'])
def save():
if current_user.is_authenticated:
if request.method == "POST":
book_id = request.form['bookid']
book = Book.query.filter_by(id=book_id).first()
user = current_user
user.list.append(book)
db.session.commit()
return redirect(url_for('route.list'))
else:
flash('You must login to save to your reading list.')
return redirect(url_for('route.login'))
@router.route("/delete", methods=['POST'])
def delete():
book_id = request.form['bookid']
book = Book.query.filter_by(id=book_id).first()
user = current_user
user.list.remove(book)
db.session.commit()
return redirect(url_for('route.list'))
@router.route('/favicon.ico')
def favicon():
return current_app.send_static_file('favicon.ico')
| 37.909605
| 124
| 0.628167
|
import os
import requests
from flask import Blueprint, render_template, flash, request, redirect, url_for, current_app
from flask_login import login_required, logout_user, login_user, current_user
from sqlalchemy import exc
from novelle.models import db, User, Book
from novelle.forms import Form
router = Blueprint('route', __name__)
@router.route("/")
def index():
return redirect(url_for('route.search'))
@router.route("/search", methods=["POST", "GET"])
def search():
if request.method == "POST":
q = request.form["query"]
return redirect(url_for('route.retrieve', query=q))
else:
return render_template('search.html')
@router.route("/search/<query>")
def retrieve(query):
api_key = os.environ.get('BOOKS_API_KEY')
search_query = query
search_url = f'https://www.googleapis.com/books/v1/volumes?q={search_query}&projection=full&maxResults=15&key={api_key}'
resp = requests.get(search_url)
responses = resp.json()['items']
books = parse_books(responses)
return render_template('results.html', books=books, query=query)
def parse_books(res):
books = []
for book in res:
book_info = {
'id': book['id'],
'title': book['volumeInfo']['title'] if 'title' in book['volumeInfo'] else 'No title available.',
'subtitle': book['volumeInfo']['subtitle'] if 'subtitle' in book['volumeInfo'] else '',
'desc': book['volumeInfo']['description'] if 'description' in book[
'volumeInfo'] else 'No description available.',
'author': book['volumeInfo']['authors'][0] if 'authors' in book['volumeInfo'] else 'No authors available.',
'date': book['volumeInfo']['publishedDate'] if 'publishedDate' in book[
'volumeInfo'] else 'No published date available.',
'publisher': book['volumeInfo']['publisher'] if 'publisher' in book[
'volumeInfo'] else ' No publisher available.',
'thumbnail': book['volumeInfo']['imageLinks']['thumbnail'] if 'imageLinks' in book[
'volumeInfo'] else 'https://islandpress.org/sites/default/files/default_book_cover_2015.jpg',
'pages': book['volumeInfo']['pageCount'] if 'pageCount' in book[
'volumeInfo'] else 'No page count available.',
'rating': f"{book['volumeInfo']['averageRating']}/5 based on {book['volumeInfo']['ratingsCount']} review(s)"
if 'averageRating' in book['volumeInfo'] else 'No rating available.',
'infoLink': book['volumeInfo']['infoLink'] if 'infoLink' in book['volumeInfo'] else ' '
}
books.append(book_info)
try:
book = Book(id=book_info.get('id'),
title=book_info.get('title'),
subtitle=book_info.get('subtitle'),
thumbnail=book_info.get('thumbnail'),
googlebooks=book_info.get('infoLink')
)
db.session.add(book)
db.session.flush()
except exc.SQLAlchemyError:
db.session.rollback()
else:
db.session.commit()
return books
@router.route("/login", methods=['GET', 'POST'])
def login():
form = Form()
if form.validate_on_submit():
user = User.query.filter_by(username=form.username.data).first()
if user:
if user.password == form.password.data:
login_user(user)
flash(f'Welcome back, {current_user.username}!')
return redirect(url_for('route.list'))
# invalid login, return to login page to try again
flash('Invalid username/password. Please try again.')
return redirect(url_for('route.login'))
return render_template('login.html', form=form)
# allow user to create account
@router.route("/register", methods=['GET', 'POST'])
def register():
form = Form()
if form.validate_on_submit():
user = User(username=form.username.data, password=form.password.data)
# add new user to database
try:
db.session.add(user)
db.session.flush()
# if user already exists, abort
except exc.SQLAlchemyError:
db.session.rollback()
flash('Username already taken! Please try again.')
return redirect(url_for('route.register'))
# save changes to database and have user login
else:
db.session.commit()
flash('Account created! Please login to continue.')
return redirect(url_for('route.login'))
return render_template('register.html', form=form)
# protected route: allow user to logout
@router.route("/logout")
@login_required
def logout():
flash(f'You were successfully logged out, {current_user.username}!')
logout_user()
return redirect(url_for('route.index'))
# display user's reading list
@router.route("/mylist")
def list():
if current_user.is_authenticated:
return render_template('list.html', user=current_user)
else:
flash('You must login to see your reading list.')
return redirect(url_for('route.login'))
@router.route("/save", methods=['POST', 'GET'])
def save():
if current_user.is_authenticated:
if request.method == "POST":
book_id = request.form['bookid']
book = Book.query.filter_by(id=book_id).first()
user = current_user
user.list.append(book)
db.session.commit()
return redirect(url_for('route.list'))
else:
flash('You must login to save to your reading list.')
return redirect(url_for('route.login'))
@router.route("/delete", methods=['POST'])
def delete():
book_id = request.form['bookid']
book = Book.query.filter_by(id=book_id).first()
user = current_user
user.list.remove(book)
db.session.commit()
return redirect(url_for('route.list'))
@router.route('/favicon.ico')
def favicon():
return current_app.send_static_file('favicon.ico')
| true
| true
|
1c46e82782628298655f1652a3e4cd46980848c8
| 5,161
|
py
|
Python
|
Synaptic-Flow/Utils/metrics.py
|
santosh-b/Alleviate-Robust-Overfitting
|
c369ab2eaf51ba02a15f45db77a8c9292c8dbbf8
|
[
"MIT"
] | null | null | null |
Synaptic-Flow/Utils/metrics.py
|
santosh-b/Alleviate-Robust-Overfitting
|
c369ab2eaf51ba02a15f45db77a8c9292c8dbbf8
|
[
"MIT"
] | null | null | null |
Synaptic-Flow/Utils/metrics.py
|
santosh-b/Alleviate-Robust-Overfitting
|
c369ab2eaf51ba02a15f45db77a8c9292c8dbbf8
|
[
"MIT"
] | null | null | null |
import torch
import torch.nn as nn
import numpy as np
import pandas as pd
from prune import *
from Layers import layers
def summary(model, scores, flops, prunable):
r"""Summary of compression results for a model.
"""
rows = []
for name, module in model.named_modules():
for pname, param in module.named_parameters(recurse=False):
pruned = prunable(module) and id(param) in scores.keys()
if pruned:
sparsity = getattr(module, pname+'_mask').detach().cpu().numpy().mean()
score = scores[id(param)].detach().cpu().numpy()
else:
sparsity = 1.0
score = np.zeros(1)
shape = param.detach().cpu().numpy().shape
flop = flops[name][pname]
score_mean = score.mean()
score_var = score.var()
score_sum = score.sum()
score_abs_mean = np.abs(score).mean()
score_abs_var = np.abs(score).var()
score_abs_sum = np.abs(score).sum()
rows.append([name, pname, sparsity, np.prod(shape), shape, flop,
score_mean, score_var, score_sum,
score_abs_mean, score_abs_var, score_abs_sum,
pruned])
columns = ['module', 'param', 'sparsity', 'size', 'shape', 'flops', 'score mean', 'score variance',
'score sum', 'score abs mean', 'score abs variance', 'score abs sum', 'prunable']
return pd.DataFrame(rows, columns=columns)
def flop(model, input_shape, device):
total = {}
def count_flops(name):
def hook(module, input, output):
flops = {}
if isinstance(module, layers.Linear) or isinstance(module, nn.Linear):
in_features = module.in_features
out_features = module.out_features
flops['weight'] = in_features * out_features
if module.bias is not None:
flops['bias'] = out_features
if isinstance(module, layers.Conv2d) or isinstance(module, nn.Conv2d):
in_channels = module.in_channels
out_channels = module.out_channels
kernel_size = int(np.prod(module.kernel_size))
output_size = output.size(2) * output.size(3)
flops['weight'] = in_channels * out_channels * kernel_size * output_size
if module.bias is not None:
flops['bias'] = out_channels * output_size
if isinstance(module, layers.BatchNorm1d) or isinstance(module, nn.BatchNorm1d):
if module.affine:
flops['weight'] = module.num_features
flops['bias'] = module.num_features
if isinstance(module, layers.BatchNorm2d) or isinstance(module, nn.BatchNorm2d):
output_size = output.size(2) * output.size(3)
if module.affine:
flops['weight'] = module.num_features * output_size
flops['bias'] = module.num_features * output_size
if isinstance(module, layers.Identity1d):
flops['weight'] = module.num_features
if isinstance(module, layers.Identity2d):
output_size = output.size(2) * output.size(3)
flops['weight'] = module.num_features * output_size
total[name] = flops
return hook
for name, module in model.named_modules():
module.register_forward_hook(count_flops(name))
input = torch.ones([1] + list(input_shape)).to(device)
model(input)
return total
# def conservation(model, scores, batchnorm, residual):
# r"""Summary of conservation results for a model.
# """
# rows = []
# bias_flux = 0.0
# mu = 0.0
# for name, module in reversed(list(model.named_modules())):
# if prunable(module, batchnorm, residual):
# weight_flux = 0.0
# for pname, param in module.named_parameters(recurse=False):
# # Get score
# score = scores[id(param)].detach().cpu().numpy()
# # Adjust batchnorm bias score for mean and variance
# if isinstance(module, (layers.Linear, layers.Conv2d)) and pname == "bias":
# bias = param.detach().cpu().numpy()
# score *= (bias - mu) / bias
# mu = 0.0
# if isinstance(module, (layers.BatchNorm1d, layers.BatchNorm2d)) and pname == "bias":
# mu = module.running_mean.detach().cpu().numpy()
# # Add flux
# if pname == "weight":
# weight_flux += score.sum()
# if pname == "bias":
# bias_flux += score.sum()
# layer_flux = weight_flux
# if not isinstance(module, (layers.Identity1d, layers.Identity2d)):
# layer_flux += bias_flux
# rows.append([name, layer_flux])
# columns = ['module', 'score flux']
# return pd.DataFrame(rows, columns=columns)
| 43.369748
| 104
| 0.550281
|
import torch
import torch.nn as nn
import numpy as np
import pandas as pd
from prune import *
from Layers import layers
def summary(model, scores, flops, prunable):
rows = []
for name, module in model.named_modules():
for pname, param in module.named_parameters(recurse=False):
pruned = prunable(module) and id(param) in scores.keys()
if pruned:
sparsity = getattr(module, pname+'_mask').detach().cpu().numpy().mean()
score = scores[id(param)].detach().cpu().numpy()
else:
sparsity = 1.0
score = np.zeros(1)
shape = param.detach().cpu().numpy().shape
flop = flops[name][pname]
score_mean = score.mean()
score_var = score.var()
score_sum = score.sum()
score_abs_mean = np.abs(score).mean()
score_abs_var = np.abs(score).var()
score_abs_sum = np.abs(score).sum()
rows.append([name, pname, sparsity, np.prod(shape), shape, flop,
score_mean, score_var, score_sum,
score_abs_mean, score_abs_var, score_abs_sum,
pruned])
columns = ['module', 'param', 'sparsity', 'size', 'shape', 'flops', 'score mean', 'score variance',
'score sum', 'score abs mean', 'score abs variance', 'score abs sum', 'prunable']
return pd.DataFrame(rows, columns=columns)
def flop(model, input_shape, device):
total = {}
def count_flops(name):
def hook(module, input, output):
flops = {}
if isinstance(module, layers.Linear) or isinstance(module, nn.Linear):
in_features = module.in_features
out_features = module.out_features
flops['weight'] = in_features * out_features
if module.bias is not None:
flops['bias'] = out_features
if isinstance(module, layers.Conv2d) or isinstance(module, nn.Conv2d):
in_channels = module.in_channels
out_channels = module.out_channels
kernel_size = int(np.prod(module.kernel_size))
output_size = output.size(2) * output.size(3)
flops['weight'] = in_channels * out_channels * kernel_size * output_size
if module.bias is not None:
flops['bias'] = out_channels * output_size
if isinstance(module, layers.BatchNorm1d) or isinstance(module, nn.BatchNorm1d):
if module.affine:
flops['weight'] = module.num_features
flops['bias'] = module.num_features
if isinstance(module, layers.BatchNorm2d) or isinstance(module, nn.BatchNorm2d):
output_size = output.size(2) * output.size(3)
if module.affine:
flops['weight'] = module.num_features * output_size
flops['bias'] = module.num_features * output_size
if isinstance(module, layers.Identity1d):
flops['weight'] = module.num_features
if isinstance(module, layers.Identity2d):
output_size = output.size(2) * output.size(3)
flops['weight'] = module.num_features * output_size
total[name] = flops
return hook
for name, module in model.named_modules():
module.register_forward_hook(count_flops(name))
input = torch.ones([1] + list(input_shape)).to(device)
model(input)
return total
# """
| true
| true
|
1c46e8c37c4ba356c3728913dc60e567bdcb344e
| 9,642
|
py
|
Python
|
server/vcr-server/vcr_server/utils/solrqueue.py
|
brianorwhatever/aries-vcr
|
96bb31a2f96406dfa2832dbd7790c46b60981e13
|
[
"Apache-2.0"
] | 38
|
2019-01-07T02:49:55.000Z
|
2020-01-27T17:26:09.000Z
|
server/vcr-server/vcr_server/utils/solrqueue.py
|
brianorwhatever/aries-vcr
|
96bb31a2f96406dfa2832dbd7790c46b60981e13
|
[
"Apache-2.0"
] | 364
|
2019-01-07T20:22:15.000Z
|
2020-03-10T21:59:23.000Z
|
server/vcr-server/vcr_server/utils/solrqueue.py
|
brianorwhatever/aries-vcr
|
96bb31a2f96406dfa2832dbd7790c46b60981e13
|
[
"Apache-2.0"
] | 34
|
2019-01-04T19:16:04.000Z
|
2020-02-20T19:24:25.000Z
|
import logging
import threading
import os
from queue import Empty, Full, Queue
from haystack.utils import get_identifier
from api.v2.search.index import TxnAwareSearchIndex
LOGGER = logging.getLogger(__name__)
# this will kill the vcr-api process
RTI_ABORT_ON_ERRORS = os.getenv("RTI_ABORT_ON_ERRORS", "TRUE").upper()
ABORT_ON_ERRORS = RTI_ABORT_ON_ERRORS == "TRUE"
# this will re-raise errors, which will kill the indexing thread
RTI_RAISE_ERRORS = os.getenv("RTI_RAISE_ERRORS", "FALSE").upper()
RAISE_ERRORS = RTI_RAISE_ERRORS == "TRUE"
# if both of the above are false, indexing errors will be ignored
# number of seconds to wait when solr queue is empty before retry
RTI_WAIT_TIME = os.getenv("RTI_WAIT_TIME", "5")
WAIT_TIME = int(RTI_WAIT_TIME)
# max number of items to trigger an update to the solr index
RTI_MAX_SOLR_BATCH = os.getenv("RTI_MAX_SOLR_BATCH", "25")
MAX_SOLR_BATCH = int(RTI_MAX_SOLR_BATCH)
class SolrQueue:
is_active = False
def __init__(self):
LOGGER.info("Initializing Solr queue ...")
self._queue = Queue()
self._prev_queue = None
self._stop = threading.Event()
self._thread = None
self._trigger = threading.Event()
def isactive(self):
return (self.is_active or not self._queue.empty())
def qsize(self):
return self._queue.qsize()
def add(self, index_cls, using, instances):
ids = [instance.id for instance in instances]
# Log the wallet_id to make it easy to search for the credentials when troubleshooting
# The record ids are not indexed so they are not searchable.
# wallet_ids = [instance.credential_id for instance in instances]
LOGGER.debug("Adding items to Solr queue for indexing; Class: %s, Using: %s", index_cls, using)
try:
self._queue.put((index_cls, using, ids, 0))
except Full:
LOGGER.error("Can't add items to the Solr queue because it is full")
raise
def delete(self, index_cls, using, instances):
ids = [get_identifier(instance) for instance in instances]
# Log the wallet_id to make it easy to search for the credentials when troubleshooting
# The record ids are not indexed so they are not searchable.
# wallet_ids = [instance.credential_id for instance in instances]
LOGGER.debug("Deleteing items from Solr queue/index; Class: %s, Using: %s", index_cls, using)
try:
self._queue.put((index_cls, using, ids, 1))
except Full:
LOGGER.error("Can't delete items from the Solr queue because it is full")
raise
def setup(self, app=None):
LOGGER.info("Setting up Solr queue ...")
if app is not None:
LOGGER.info("Wiring the Solr queue into the app; %s", app)
app["solrqueue"] = self
app.on_startup.append(self.app_start)
app.on_cleanup.append(self.app_stop)
LOGGER.info("Wiring the Solr queue into the TxnAwareSearchIndex.")
self._prev_queue = TxnAwareSearchIndex._backend_queue
TxnAwareSearchIndex._backend_queue = self
async def app_start(self, _app=None):
self.start()
async def app_stop(self, _app=None):
self.stop()
def __enter__(self):
self.setup()
self.start()
return self
def __exit__(self, type, value, tb):
LOGGER.info("Solr queue is exiting ...")
# if handling exception, don't wait for worker thread
self.stop(not type)
LOGGER.info("Restoring previous TxnAwareSearchIndex settings ...")
TxnAwareSearchIndex._backend_queue = self._prev_queue
def start(self):
LOGGER.info("Starting Solr queue ...")
self._thread = threading.Thread(target=self._run)
self._thread.start()
def stop(self, join=True):
LOGGER.info("Stoping Solr queue ...")
if not self._queue.empty():
LOGGER.error("The Solr queue is not empty, there are about %s items that will not be indexed", self._queue.qsize())
self._stop.set()
self._trigger.set()
if join:
self._thread.join()
def trigger(self):
LOGGER.info("Triggering Solr queue ...")
self._trigger.set()
def _run(self):
LOGGER.info("Running Solr queue ...")
while True:
LOGGER.debug("Waiting [%d] ...", WAIT_TIME)
self._trigger.wait(WAIT_TIME)
self._drain()
if self._stop.is_set():
LOGGER.info("Finished running Solr queue ...")
return
def index_type(self, index_cls, delete, using):
"""String representing the index class type."""
if not index_cls:
return None
return ("delete" if delete == 1 else "update") + "::" + str(index_cls) + "::" + str(using)
def _drain(self):
LOGGER.debug("Indexing Solr queue items ...")
global RAISE_ERRORS
global ABORT_ON_ERRORS
last_ids = {}
try:
self.is_active = True
while True:
try:
index_cls, using, ids, delete = self._queue.get_nowait()
LOGGER.debug("Pop items off the Solr queue for indexing; Class: %s, Using: %s, Delete: %s, Instances: %s", index_cls, using, delete, ids)
except Empty:
LOGGER.debug("Solr queue is empty ...")
index_cls = None
delete = 0
using = None
index_cls_type = self.index_type(index_cls, delete, using)
if index_cls:
LOGGER.debug("Updating list of ids for [%s]..." % index_cls_type)
if not index_cls_type in last_ids:
last_ids[index_cls_type] = {
"index_cls": index_cls,
"delete": delete,
"using": using,
"ids": set(),
}
last_ids[index_cls_type]["ids"].update(ids)
for attr, val in last_ids.items():
if (not index_cls) or MAX_SOLR_BATCH <= len(val["ids"]):
LOGGER.debug("Processing %s items for [%s]", len(val["ids"]), attr)
try:
if val["delete"] == 1:
self.remove(val["index_cls"], val["using"], val["ids"])
else:
self.update(val["index_cls"], val["using"], val["ids"])
last_ids[attr]["ids"] = set()
except:
LOGGER.exception("An unexpected exception was encountered while processing items from the Solr queue.", exc_info=True)
LOGGER.info("Requeueing items for later processing ...")
try:
self._queue.put( (val["index_cls"], val["using"], val["ids"], val["delete"]) )
except Full:
LOGGER.error("Can't requeue items to the Solr queue because it is full; %s", val["ids"])
raise
raise
if not index_cls:
LOGGER.debug("Done indexing items from Solr queue ...")
break
except Exception as e:
LOGGER.error("Error processing real-time index queue: %s", str(e))
if ABORT_ON_ERRORS:
# this will kill the vcr-api process
os.abort()
elif RAISE_ERRORS:
# this will re-raise errors, which will kill the indexing thread
raise
# if both of the above are false, indexing errors will be ignored
finally:
self.is_active = False
def update(self, index_cls, using, ids):
LOGGER.debug("Updating the indexes for Solr queue items ...")
index = index_cls()
backend = index.get_backend(using)
if backend is not None:
LOGGER.info("Updating indexes for %d row(s) from Solr queue: %s", len(ids), ids)
rows = index.index_queryset(using).filter(id__in=ids)
# Turn off silently_fail; throw an exception if there is an error so we can requeue the items being indexed.
backend.silently_fail = False
backend.update(index, rows)
# LOGGER.debug("Index update complete.")
else:
LOGGER.error("Failed to get backend. Unable to update the index for %d row(s) from the Solr queue: %s", len(ids), ids)
raise Exception("Failed to get backend. Unable to update the index for Solr queue")
def remove(self, index_cls, using, ids):
LOGGER.debug("Removing the indexes for Solr queue items ...")
index = index_cls()
backend = index.get_backend(using)
if backend is not None:
LOGGER.info("Removing indexes for %d row(s) in Solr queue: %s", len(ids), ids)
# Turn off silently_fail; throw an exception if there is an error so we can requeue the items being indexed.
backend.silently_fail = False
# backend.remove has no support for a list of IDs
backend.conn.delete(id=ids)
else:
LOGGER.error("Failed to get backend. Unable to remove the indexes for %d row(s) from the solr queue: %s", len(ids), ids)
raise Exception("Failed to get backend. Unable to remove the index for Solr queue")
| 42.663717
| 157
| 0.581
|
import logging
import threading
import os
from queue import Empty, Full, Queue
from haystack.utils import get_identifier
from api.v2.search.index import TxnAwareSearchIndex
LOGGER = logging.getLogger(__name__)
RTI_ABORT_ON_ERRORS = os.getenv("RTI_ABORT_ON_ERRORS", "TRUE").upper()
ABORT_ON_ERRORS = RTI_ABORT_ON_ERRORS == "TRUE"
RTI_RAISE_ERRORS = os.getenv("RTI_RAISE_ERRORS", "FALSE").upper()
RAISE_ERRORS = RTI_RAISE_ERRORS == "TRUE"
RTI_WAIT_TIME = os.getenv("RTI_WAIT_TIME", "5")
WAIT_TIME = int(RTI_WAIT_TIME)
RTI_MAX_SOLR_BATCH = os.getenv("RTI_MAX_SOLR_BATCH", "25")
MAX_SOLR_BATCH = int(RTI_MAX_SOLR_BATCH)
class SolrQueue:
is_active = False
def __init__(self):
LOGGER.info("Initializing Solr queue ...")
self._queue = Queue()
self._prev_queue = None
self._stop = threading.Event()
self._thread = None
self._trigger = threading.Event()
def isactive(self):
return (self.is_active or not self._queue.empty())
def qsize(self):
return self._queue.qsize()
def add(self, index_cls, using, instances):
ids = [instance.id for instance in instances]
LOGGER.debug("Adding items to Solr queue for indexing; Class: %s, Using: %s", index_cls, using)
try:
self._queue.put((index_cls, using, ids, 0))
except Full:
LOGGER.error("Can't add items to the Solr queue because it is full")
raise
def delete(self, index_cls, using, instances):
ids = [get_identifier(instance) for instance in instances]
# Log the wallet_id to make it easy to search for the credentials when troubleshooting
# The record ids are not indexed so they are not searchable.
# wallet_ids = [instance.credential_id for instance in instances]
LOGGER.debug("Deleteing items from Solr queue/index; Class: %s, Using: %s", index_cls, using)
try:
self._queue.put((index_cls, using, ids, 1))
except Full:
LOGGER.error("Can't delete items from the Solr queue because it is full")
raise
def setup(self, app=None):
LOGGER.info("Setting up Solr queue ...")
if app is not None:
LOGGER.info("Wiring the Solr queue into the app; %s", app)
app["solrqueue"] = self
app.on_startup.append(self.app_start)
app.on_cleanup.append(self.app_stop)
LOGGER.info("Wiring the Solr queue into the TxnAwareSearchIndex.")
self._prev_queue = TxnAwareSearchIndex._backend_queue
TxnAwareSearchIndex._backend_queue = self
async def app_start(self, _app=None):
self.start()
async def app_stop(self, _app=None):
self.stop()
def __enter__(self):
self.setup()
self.start()
return self
def __exit__(self, type, value, tb):
LOGGER.info("Solr queue is exiting ...")
self.stop(not type)
LOGGER.info("Restoring previous TxnAwareSearchIndex settings ...")
TxnAwareSearchIndex._backend_queue = self._prev_queue
def start(self):
LOGGER.info("Starting Solr queue ...")
self._thread = threading.Thread(target=self._run)
self._thread.start()
def stop(self, join=True):
LOGGER.info("Stoping Solr queue ...")
if not self._queue.empty():
LOGGER.error("The Solr queue is not empty, there are about %s items that will not be indexed", self._queue.qsize())
self._stop.set()
self._trigger.set()
if join:
self._thread.join()
def trigger(self):
LOGGER.info("Triggering Solr queue ...")
self._trigger.set()
def _run(self):
LOGGER.info("Running Solr queue ...")
while True:
LOGGER.debug("Waiting [%d] ...", WAIT_TIME)
self._trigger.wait(WAIT_TIME)
self._drain()
if self._stop.is_set():
LOGGER.info("Finished running Solr queue ...")
return
def index_type(self, index_cls, delete, using):
if not index_cls:
return None
return ("delete" if delete == 1 else "update") + "::" + str(index_cls) + "::" + str(using)
def _drain(self):
LOGGER.debug("Indexing Solr queue items ...")
global RAISE_ERRORS
global ABORT_ON_ERRORS
last_ids = {}
try:
self.is_active = True
while True:
try:
index_cls, using, ids, delete = self._queue.get_nowait()
LOGGER.debug("Pop items off the Solr queue for indexing; Class: %s, Using: %s, Delete: %s, Instances: %s", index_cls, using, delete, ids)
except Empty:
LOGGER.debug("Solr queue is empty ...")
index_cls = None
delete = 0
using = None
index_cls_type = self.index_type(index_cls, delete, using)
if index_cls:
LOGGER.debug("Updating list of ids for [%s]..." % index_cls_type)
if not index_cls_type in last_ids:
last_ids[index_cls_type] = {
"index_cls": index_cls,
"delete": delete,
"using": using,
"ids": set(),
}
last_ids[index_cls_type]["ids"].update(ids)
for attr, val in last_ids.items():
if (not index_cls) or MAX_SOLR_BATCH <= len(val["ids"]):
LOGGER.debug("Processing %s items for [%s]", len(val["ids"]), attr)
try:
if val["delete"] == 1:
self.remove(val["index_cls"], val["using"], val["ids"])
else:
self.update(val["index_cls"], val["using"], val["ids"])
last_ids[attr]["ids"] = set()
except:
LOGGER.exception("An unexpected exception was encountered while processing items from the Solr queue.", exc_info=True)
LOGGER.info("Requeueing items for later processing ...")
try:
self._queue.put( (val["index_cls"], val["using"], val["ids"], val["delete"]) )
except Full:
LOGGER.error("Can't requeue items to the Solr queue because it is full; %s", val["ids"])
raise
raise
if not index_cls:
LOGGER.debug("Done indexing items from Solr queue ...")
break
except Exception as e:
LOGGER.error("Error processing real-time index queue: %s", str(e))
if ABORT_ON_ERRORS:
os.abort()
elif RAISE_ERRORS:
raise
finally:
self.is_active = False
def update(self, index_cls, using, ids):
LOGGER.debug("Updating the indexes for Solr queue items ...")
index = index_cls()
backend = index.get_backend(using)
if backend is not None:
LOGGER.info("Updating indexes for %d row(s) from Solr queue: %s", len(ids), ids)
rows = index.index_queryset(using).filter(id__in=ids)
backend.silently_fail = False
backend.update(index, rows)
else:
LOGGER.error("Failed to get backend. Unable to update the index for %d row(s) from the Solr queue: %s", len(ids), ids)
raise Exception("Failed to get backend. Unable to update the index for Solr queue")
def remove(self, index_cls, using, ids):
LOGGER.debug("Removing the indexes for Solr queue items ...")
index = index_cls()
backend = index.get_backend(using)
if backend is not None:
LOGGER.info("Removing indexes for %d row(s) in Solr queue: %s", len(ids), ids)
backend.silently_fail = False
backend.conn.delete(id=ids)
else:
LOGGER.error("Failed to get backend. Unable to remove the indexes for %d row(s) from the solr queue: %s", len(ids), ids)
raise Exception("Failed to get backend. Unable to remove the index for Solr queue")
| true
| true
|
1c46e8ebc705732b535b16f3a42154c4df52a3d9
| 82
|
py
|
Python
|
tests/conftest.py
|
mishc9/flake_rba
|
eda1e80436f401871dba61a4c769204c2cbcfc65
|
[
"MIT"
] | null | null | null |
tests/conftest.py
|
mishc9/flake_rba
|
eda1e80436f401871dba61a4c769204c2cbcfc65
|
[
"MIT"
] | null | null | null |
tests/conftest.py
|
mishc9/flake_rba
|
eda1e80436f401871dba61a4c769204c2cbcfc65
|
[
"MIT"
] | null | null | null |
import pytest
@pytest.fixture
def fixture_template():
return "Hello World!"
| 11.714286
| 25
| 0.731707
|
import pytest
@pytest.fixture
def fixture_template():
return "Hello World!"
| true
| true
|
1c46ea4290b2b9e013c4b3a29287456e61b6ca89
| 1,429
|
py
|
Python
|
tests/plugins/inventory/test_nsot.py
|
omershtivi/nornir
|
0bbded1dcf38245c75aadf74706ea8547b2a0e73
|
[
"Apache-2.0"
] | 1
|
2019-04-10T08:14:59.000Z
|
2019-04-10T08:14:59.000Z
|
tests/plugins/inventory/test_nsot.py
|
omershtivi/nornir
|
0bbded1dcf38245c75aadf74706ea8547b2a0e73
|
[
"Apache-2.0"
] | null | null | null |
tests/plugins/inventory/test_nsot.py
|
omershtivi/nornir
|
0bbded1dcf38245c75aadf74706ea8547b2a0e73
|
[
"Apache-2.0"
] | null | null | null |
import json
import os
from nornir.plugins.inventory import nsot
# We need import below to load fixtures
import pytest # noqa
BASE_PATH = os.path.join(os.path.dirname(__file__), "nsot")
def get_inv(requests_mock, case, **kwargs):
for i in ["interfaces", "sites", "devices"]:
with open("{}/{}/{}.json".format(BASE_PATH, case, i), "r") as f:
requests_mock.get(
"http://localhost:8990/api/{}".format(i),
json=json.load(f),
headers={"Content-type": "application/json"},
)
return nsot.NSOTInventory(**kwargs)
def transform_function(host):
attrs = ["user", "password"]
for a in attrs:
if a in host.data:
host["nornir_{}".format(a)] = host.data[a]
class Test(object):
def test_inventory(self, requests_mock):
inv = get_inv(requests_mock, "1.3.0", transform_function=transform_function)
assert len(inv.hosts) == 4
assert len(inv.filter(site="site1").hosts) == 2
assert len(inv.filter(os="junos").hosts) == 2
assert len(inv.filter(site="site1", os="junos").hosts) == 1
def test_transform_function(self, requests_mock):
inv = get_inv(requests_mock, "1.3.0", transform_function=transform_function)
for host in inv.hosts.values():
assert host["user"] == host["nornir_user"]
assert host["password"] == host["nornir_password"]
| 31.755556
| 84
| 0.615815
|
import json
import os
from nornir.plugins.inventory import nsot
import pytest
BASE_PATH = os.path.join(os.path.dirname(__file__), "nsot")
def get_inv(requests_mock, case, **kwargs):
for i in ["interfaces", "sites", "devices"]:
with open("{}/{}/{}.json".format(BASE_PATH, case, i), "r") as f:
requests_mock.get(
"http://localhost:8990/api/{}".format(i),
json=json.load(f),
headers={"Content-type": "application/json"},
)
return nsot.NSOTInventory(**kwargs)
def transform_function(host):
attrs = ["user", "password"]
for a in attrs:
if a in host.data:
host["nornir_{}".format(a)] = host.data[a]
class Test(object):
def test_inventory(self, requests_mock):
inv = get_inv(requests_mock, "1.3.0", transform_function=transform_function)
assert len(inv.hosts) == 4
assert len(inv.filter(site="site1").hosts) == 2
assert len(inv.filter(os="junos").hosts) == 2
assert len(inv.filter(site="site1", os="junos").hosts) == 1
def test_transform_function(self, requests_mock):
inv = get_inv(requests_mock, "1.3.0", transform_function=transform_function)
for host in inv.hosts.values():
assert host["user"] == host["nornir_user"]
assert host["password"] == host["nornir_password"]
| true
| true
|
1c46eb9b38a94e1016136f4df0089ae4ec1eaff0
| 1,112
|
py
|
Python
|
hexi/service/pipeline/inputManager.py
|
tunstek/hexi
|
ebb00e4e47ac90d96a26179a5786d768d95c4bd5
|
[
"MIT"
] | 14
|
2017-10-07T23:19:09.000Z
|
2021-10-08T12:13:59.000Z
|
hexi/service/pipeline/inputManager.py
|
tunstek/hexi
|
ebb00e4e47ac90d96a26179a5786d768d95c4bd5
|
[
"MIT"
] | 1
|
2018-07-16T17:03:43.000Z
|
2018-07-16T17:03:43.000Z
|
hexi/service/pipeline/inputManager.py
|
tunstek/hexi
|
ebb00e4e47ac90d96a26179a5786d768d95c4bd5
|
[
"MIT"
] | 6
|
2018-05-18T14:25:26.000Z
|
2021-03-28T12:37:21.000Z
|
import asyncio
import time
from hexi.service import event
from hexi.service.pipeline.BaseManager import BaseManager
from hexi.util import deque
from hexi.plugin.InputPlugin import InputPlugin
EMPTY_SIGNAL = [0, 0, 0, 0, 0, 0]
class InputManager(BaseManager):
def __init__(self):
super().__init__('input', 'input', InputPlugin)
self.data_log_queue = deque.WebSocketPipingDeque(maxlen=400)
def init(self):
super().init()
self.last_signal = EMPTY_SIGNAL
asyncio.ensure_future(self.fetch_signal_loop_async())
self.data_log_queue.attach_ws_endpoint(self.bp, '/api/input_log')
event.subscribe(self.on_input_raw_signal, ['hexi.pipeline.input.raw_data'])
async def fetch_signal_loop_async(self):
while True:
signal = self.last_signal
self.last_signal = EMPTY_SIGNAL
self.data_log_queue.append([int(time.time()), signal])
# TODO: test whether currently started
asyncio.ensure_future(event.publish('hexi.pipeline.input.data', signal))
await asyncio.sleep(1 / 20)
async def on_input_raw_signal(self, e):
self.last_signal = e['value']
| 30.054054
| 79
| 0.735612
|
import asyncio
import time
from hexi.service import event
from hexi.service.pipeline.BaseManager import BaseManager
from hexi.util import deque
from hexi.plugin.InputPlugin import InputPlugin
EMPTY_SIGNAL = [0, 0, 0, 0, 0, 0]
class InputManager(BaseManager):
def __init__(self):
super().__init__('input', 'input', InputPlugin)
self.data_log_queue = deque.WebSocketPipingDeque(maxlen=400)
def init(self):
super().init()
self.last_signal = EMPTY_SIGNAL
asyncio.ensure_future(self.fetch_signal_loop_async())
self.data_log_queue.attach_ws_endpoint(self.bp, '/api/input_log')
event.subscribe(self.on_input_raw_signal, ['hexi.pipeline.input.raw_data'])
async def fetch_signal_loop_async(self):
while True:
signal = self.last_signal
self.last_signal = EMPTY_SIGNAL
self.data_log_queue.append([int(time.time()), signal])
asyncio.ensure_future(event.publish('hexi.pipeline.input.data', signal))
await asyncio.sleep(1 / 20)
async def on_input_raw_signal(self, e):
self.last_signal = e['value']
| true
| true
|
1c46ec3f4bcd5dfd904476a655c486582328757a
| 7,446
|
py
|
Python
|
tensorflow_io/python/experimental/numpy_dataset_ops.py
|
lgeiger/io
|
90be860451a705e2fbe8cfdec3c30030112b5c69
|
[
"Apache-2.0"
] | 558
|
2018-11-09T22:45:27.000Z
|
2022-03-24T04:59:36.000Z
|
tensorflow_io/python/experimental/numpy_dataset_ops.py
|
lgeiger/io
|
90be860451a705e2fbe8cfdec3c30030112b5c69
|
[
"Apache-2.0"
] | 1,122
|
2018-12-09T03:30:40.000Z
|
2022-03-31T16:22:15.000Z
|
tensorflow_io/python/experimental/numpy_dataset_ops.py
|
lgeiger/io
|
90be860451a705e2fbe8cfdec3c30030112b5c69
|
[
"Apache-2.0"
] | 319
|
2018-12-09T00:18:47.000Z
|
2022-03-30T21:49:46.000Z
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""NumpyIODataset"""
import numpy as np
import tensorflow as tf
from tensorflow_io.python.ops import core_ops
class NumpyIODataset(tf.data.Dataset):
"""NumpyIODataset"""
def __init__(self, a, internal=True):
"""NumpyIODataset."""
with tf.name_scope("NumpyIODataset"):
assert internal
entries = a
def p(entry):
address, _ = entry.__array_interface__["data"]
shape = entry.shape
dtype = tf.as_dtype(entry.dtype)
return address, "", "", shape, dtype
flatten = tf.nest.flatten(entries)
assert all([entry.shape[0] == flatten[0].shape[0] for entry in flatten])
params = [p(entry) for entry in flatten]
def f(start, stop):
return tf.nest.pack_sequence_as(
entries,
[
core_ops.io_numpy_read(
address=address,
filename=filename,
array=array,
shape=shape,
start=start,
stop=stop,
dtype=dtype,
)
for address, filename, array, shape, dtype in params
],
)
step = 1024
total = tf.constant(flatten[0].shape[0], tf.int64)
indices_start = tf.data.Dataset.range(0, total, step)
indices_stop = indices_start.skip(1).concatenate(
tf.data.Dataset.from_tensor_slices([total])
)
dataset = tf.data.Dataset.zip((indices_start, indices_stop))
dataset = dataset.map(f)
dataset = dataset.unbatch()
self._dataset = dataset
self._holder = [np.array(entry, copy=False) for entry in flatten]
super().__init__(
self._dataset._variant_tensor
) # pylint: disable=protected-access
def _inputs(self):
return []
@property
def element_spec(self):
return self._dataset.element_spec
class NumpyFileIODataset(tf.data.Dataset):
"""NumpyFileIODataset"""
def __init__(self, filename, spec=None, internal=True):
"""NumpyFileIODataset."""
with tf.name_scope("NumpyFileIODataset"):
assert internal
if tf.executing_eagerly():
arrays, shapes, dtypes = core_ops.io_numpy_info(filename=filename)
arrays = tf.unstack(arrays)
shapes = tf.unstack(shapes)
dtypes = tf.unstack(dtypes)
dtypes = [tf.as_dtype(dtype.numpy()) for dtype in dtypes]
entries = list(zip(shapes, dtypes, arrays))
entries = [
tf.TensorSpec(shape, dtype, array)
for (shape, dtype, array) in entries
]
indices = None
if all([e.numpy().decode().startswith("arr_") for e in arrays]):
try:
indices = [int(e.numpy()[4:]) for e in arrays]
except ValueError:
pass
if indices is not None:
values = list(indices)
values.sort()
if not all([k == v for k, v in enumerate(values)]):
indices = None
# if indices is continuously, then construct a tuple, otherwise a dict.
if indices is not None:
entries = dict(zip(indices, entries))
entries = tuple([entries[index] for index in sorted(indices)])
else:
indices = [index.numpy().decode() for index in tf.unstack(arrays)]
entries = dict(zip(indices, entries))
flatten = tf.nest.flatten(entries)
shapes = [entry.shape for entry in flatten]
assert all([shape[0] == shapes[0][0] for shape in shapes])
else:
assert spec is not None
if isinstance(spec, tuple):
entries = tuple(
[
tf.TensorSpec(
None,
(v if isinstance(v, tf.dtypes.DType) else v.dtype),
"arr_{}".format(i),
)
for i, v in enumerate(spec)
]
)
else:
entries = {
k: tf.TensorSpec(
None, (v if isinstance(v, tf.dtypes.DType) else v.dtype), k
)
for k, v in spec.items()
}
flatten = tf.nest.flatten(entries)
def shape_f(entry):
shape, _ = core_ops.io_numpy_spec(
filename=filename, array=entry.name
)
return shape
shapes = [shape_f(entry) for entry in flatten]
def p(entry, shape):
return 0, filename, entry.name, shape, entry.dtype
params = [p(entry, shape) for entry, shape in zip(flatten, shapes)]
def f(start, stop):
return tf.nest.pack_sequence_as(
entries,
[
core_ops.io_numpy_read(
address=address,
filename=filename,
array=array,
shape=shape,
start=start,
stop=stop,
dtype=dtype,
)
for address, filename, array, shape, dtype in params
],
)
step = 1024
total = tf.cast(shapes[0][0], tf.int64)
indices_start = tf.data.Dataset.range(0, total, step)
indices_stop = indices_start.skip(1).concatenate(
tf.data.Dataset.from_tensor_slices([total])
)
dataset = tf.data.Dataset.zip((indices_start, indices_stop))
dataset = dataset.map(f)
dataset = dataset.unbatch()
self._dataset = dataset
super().__init__(
self._dataset._variant_tensor
) # pylint: disable=protected-access
def _inputs(self):
return []
@property
def element_spec(self):
return self._dataset.element_spec
| 36.861386
| 87
| 0.480526
|
import numpy as np
import tensorflow as tf
from tensorflow_io.python.ops import core_ops
class NumpyIODataset(tf.data.Dataset):
def __init__(self, a, internal=True):
with tf.name_scope("NumpyIODataset"):
assert internal
entries = a
def p(entry):
address, _ = entry.__array_interface__["data"]
shape = entry.shape
dtype = tf.as_dtype(entry.dtype)
return address, "", "", shape, dtype
flatten = tf.nest.flatten(entries)
assert all([entry.shape[0] == flatten[0].shape[0] for entry in flatten])
params = [p(entry) for entry in flatten]
def f(start, stop):
return tf.nest.pack_sequence_as(
entries,
[
core_ops.io_numpy_read(
address=address,
filename=filename,
array=array,
shape=shape,
start=start,
stop=stop,
dtype=dtype,
)
for address, filename, array, shape, dtype in params
],
)
step = 1024
total = tf.constant(flatten[0].shape[0], tf.int64)
indices_start = tf.data.Dataset.range(0, total, step)
indices_stop = indices_start.skip(1).concatenate(
tf.data.Dataset.from_tensor_slices([total])
)
dataset = tf.data.Dataset.zip((indices_start, indices_stop))
dataset = dataset.map(f)
dataset = dataset.unbatch()
self._dataset = dataset
self._holder = [np.array(entry, copy=False) for entry in flatten]
super().__init__(
self._dataset._variant_tensor
)
def _inputs(self):
return []
@property
def element_spec(self):
return self._dataset.element_spec
class NumpyFileIODataset(tf.data.Dataset):
def __init__(self, filename, spec=None, internal=True):
with tf.name_scope("NumpyFileIODataset"):
assert internal
if tf.executing_eagerly():
arrays, shapes, dtypes = core_ops.io_numpy_info(filename=filename)
arrays = tf.unstack(arrays)
shapes = tf.unstack(shapes)
dtypes = tf.unstack(dtypes)
dtypes = [tf.as_dtype(dtype.numpy()) for dtype in dtypes]
entries = list(zip(shapes, dtypes, arrays))
entries = [
tf.TensorSpec(shape, dtype, array)
for (shape, dtype, array) in entries
]
indices = None
if all([e.numpy().decode().startswith("arr_") for e in arrays]):
try:
indices = [int(e.numpy()[4:]) for e in arrays]
except ValueError:
pass
if indices is not None:
values = list(indices)
values.sort()
if not all([k == v for k, v in enumerate(values)]):
indices = None
if indices is not None:
entries = dict(zip(indices, entries))
entries = tuple([entries[index] for index in sorted(indices)])
else:
indices = [index.numpy().decode() for index in tf.unstack(arrays)]
entries = dict(zip(indices, entries))
flatten = tf.nest.flatten(entries)
shapes = [entry.shape for entry in flatten]
assert all([shape[0] == shapes[0][0] for shape in shapes])
else:
assert spec is not None
if isinstance(spec, tuple):
entries = tuple(
[
tf.TensorSpec(
None,
(v if isinstance(v, tf.dtypes.DType) else v.dtype),
"arr_{}".format(i),
)
for i, v in enumerate(spec)
]
)
else:
entries = {
k: tf.TensorSpec(
None, (v if isinstance(v, tf.dtypes.DType) else v.dtype), k
)
for k, v in spec.items()
}
flatten = tf.nest.flatten(entries)
def shape_f(entry):
shape, _ = core_ops.io_numpy_spec(
filename=filename, array=entry.name
)
return shape
shapes = [shape_f(entry) for entry in flatten]
def p(entry, shape):
return 0, filename, entry.name, shape, entry.dtype
params = [p(entry, shape) for entry, shape in zip(flatten, shapes)]
def f(start, stop):
return tf.nest.pack_sequence_as(
entries,
[
core_ops.io_numpy_read(
address=address,
filename=filename,
array=array,
shape=shape,
start=start,
stop=stop,
dtype=dtype,
)
for address, filename, array, shape, dtype in params
],
)
step = 1024
total = tf.cast(shapes[0][0], tf.int64)
indices_start = tf.data.Dataset.range(0, total, step)
indices_stop = indices_start.skip(1).concatenate(
tf.data.Dataset.from_tensor_slices([total])
)
dataset = tf.data.Dataset.zip((indices_start, indices_stop))
dataset = dataset.map(f)
dataset = dataset.unbatch()
self._dataset = dataset
super().__init__(
self._dataset._variant_tensor
)
def _inputs(self):
return []
@property
def element_spec(self):
return self._dataset.element_spec
| true
| true
|
1c46ec4630ef2346b753d3b1c8de606804d39144
| 5,523
|
py
|
Python
|
azure-mgmt-network/azure/mgmt/network/v2017_03_01/models/security_rule_py3.py
|
Christina-Kang/azure-sdk-for-python
|
bbf982eb06aab04b8151f69f1d230b7f5fb96ebf
|
[
"MIT"
] | 1
|
2022-03-30T22:39:15.000Z
|
2022-03-30T22:39:15.000Z
|
azure-mgmt-network/azure/mgmt/network/v2017_03_01/models/security_rule_py3.py
|
Christina-Kang/azure-sdk-for-python
|
bbf982eb06aab04b8151f69f1d230b7f5fb96ebf
|
[
"MIT"
] | 54
|
2016-03-25T17:25:01.000Z
|
2018-10-22T17:27:54.000Z
|
azure-mgmt-network/azure/mgmt/network/v2017_03_01/models/security_rule_py3.py
|
Christina-Kang/azure-sdk-for-python
|
bbf982eb06aab04b8151f69f1d230b7f5fb96ebf
|
[
"MIT"
] | 2
|
2017-01-20T18:25:46.000Z
|
2017-05-12T21:31:47.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .sub_resource import SubResource
class SecurityRule(SubResource):
"""Network security rule.
All required parameters must be populated in order to send to Azure.
:param id: Resource ID.
:type id: str
:param description: A description for this rule. Restricted to 140 chars.
:type description: str
:param protocol: Required. Network protocol this rule applies to. Possible
values are 'Tcp', 'Udp', and '*'. Possible values include: 'Tcp', 'Udp',
'*'
:type protocol: str or
~azure.mgmt.network.v2017_03_01.models.SecurityRuleProtocol
:param source_port_range: The source port or range. Integer or range
between 0 and 65535. Asterix '*' can also be used to match all ports.
:type source_port_range: str
:param destination_port_range: The destination port or range. Integer or
range between 0 and 65535. Asterix '*' can also be used to match all
ports.
:type destination_port_range: str
:param source_address_prefix: Required. The CIDR or source IP range.
Asterix '*' can also be used to match all source IPs. Default tags such as
'VirtualNetwork', 'AzureLoadBalancer' and 'Internet' can also be used. If
this is an ingress rule, specifies where network traffic originates from.
:type source_address_prefix: str
:param destination_address_prefix: Required. The destination address
prefix. CIDR or source IP range. Asterix '*' can also be used to match all
source IPs. Default tags such as 'VirtualNetwork', 'AzureLoadBalancer' and
'Internet' can also be used.
:type destination_address_prefix: str
:param access: Required. The network traffic is allowed or denied.
Possible values are: 'Allow' and 'Deny'. Possible values include: 'Allow',
'Deny'
:type access: str or
~azure.mgmt.network.v2017_03_01.models.SecurityRuleAccess
:param priority: The priority of the rule. The value can be between 100
and 4096. The priority number must be unique for each rule in the
collection. The lower the priority number, the higher the priority of the
rule.
:type priority: int
:param direction: Required. The direction of the rule. The direction
specifies if rule will be evaluated on incoming or outcoming traffic.
Possible values are: 'Inbound' and 'Outbound'. Possible values include:
'Inbound', 'Outbound'
:type direction: str or
~azure.mgmt.network.v2017_03_01.models.SecurityRuleDirection
:param provisioning_state: The provisioning state of the public IP
resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
:type provisioning_state: str
:param name: The name of the resource that is unique within a resource
group. This name can be used to access the resource.
:type name: str
:param etag: A unique read-only string that changes whenever the resource
is updated.
:type etag: str
"""
_validation = {
'protocol': {'required': True},
'source_address_prefix': {'required': True},
'destination_address_prefix': {'required': True},
'access': {'required': True},
'direction': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'description': {'key': 'properties.description', 'type': 'str'},
'protocol': {'key': 'properties.protocol', 'type': 'str'},
'source_port_range': {'key': 'properties.sourcePortRange', 'type': 'str'},
'destination_port_range': {'key': 'properties.destinationPortRange', 'type': 'str'},
'source_address_prefix': {'key': 'properties.sourceAddressPrefix', 'type': 'str'},
'destination_address_prefix': {'key': 'properties.destinationAddressPrefix', 'type': 'str'},
'access': {'key': 'properties.access', 'type': 'str'},
'priority': {'key': 'properties.priority', 'type': 'int'},
'direction': {'key': 'properties.direction', 'type': 'str'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
}
def __init__(self, *, protocol, source_address_prefix: str, destination_address_prefix: str, access, direction, id: str=None, description: str=None, source_port_range: str=None, destination_port_range: str=None, priority: int=None, provisioning_state: str=None, name: str=None, etag: str=None, **kwargs) -> None:
super(SecurityRule, self).__init__(id=id, **kwargs)
self.description = description
self.protocol = protocol
self.source_port_range = source_port_range
self.destination_port_range = destination_port_range
self.source_address_prefix = source_address_prefix
self.destination_address_prefix = destination_address_prefix
self.access = access
self.priority = priority
self.direction = direction
self.provisioning_state = provisioning_state
self.name = name
self.etag = etag
| 49.756757
| 316
| 0.668296
|
from .sub_resource import SubResource
class SecurityRule(SubResource):
_validation = {
'protocol': {'required': True},
'source_address_prefix': {'required': True},
'destination_address_prefix': {'required': True},
'access': {'required': True},
'direction': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'description': {'key': 'properties.description', 'type': 'str'},
'protocol': {'key': 'properties.protocol', 'type': 'str'},
'source_port_range': {'key': 'properties.sourcePortRange', 'type': 'str'},
'destination_port_range': {'key': 'properties.destinationPortRange', 'type': 'str'},
'source_address_prefix': {'key': 'properties.sourceAddressPrefix', 'type': 'str'},
'destination_address_prefix': {'key': 'properties.destinationAddressPrefix', 'type': 'str'},
'access': {'key': 'properties.access', 'type': 'str'},
'priority': {'key': 'properties.priority', 'type': 'int'},
'direction': {'key': 'properties.direction', 'type': 'str'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
}
def __init__(self, *, protocol, source_address_prefix: str, destination_address_prefix: str, access, direction, id: str=None, description: str=None, source_port_range: str=None, destination_port_range: str=None, priority: int=None, provisioning_state: str=None, name: str=None, etag: str=None, **kwargs) -> None:
super(SecurityRule, self).__init__(id=id, **kwargs)
self.description = description
self.protocol = protocol
self.source_port_range = source_port_range
self.destination_port_range = destination_port_range
self.source_address_prefix = source_address_prefix
self.destination_address_prefix = destination_address_prefix
self.access = access
self.priority = priority
self.direction = direction
self.provisioning_state = provisioning_state
self.name = name
self.etag = etag
| true
| true
|
1c46ed5b7d03f873e983faa920d777e35b56c1ae
| 3,714
|
py
|
Python
|
test_tflite.py
|
kzm4269/keras-yolo3
|
06b2b522213cb901f4a7133b87aab04079e41aff
|
[
"MIT"
] | null | null | null |
test_tflite.py
|
kzm4269/keras-yolo3
|
06b2b522213cb901f4a7133b87aab04079e41aff
|
[
"MIT"
] | null | null | null |
test_tflite.py
|
kzm4269/keras-yolo3
|
06b2b522213cb901f4a7133b87aab04079e41aff
|
[
"MIT"
] | 1
|
2019-09-17T01:28:59.000Z
|
2019-09-17T01:28:59.000Z
|
import argparse
import sys
from pathlib import Path
import numpy as np
import tensorflow as tf
import keras
from PIL import Image
import matplotlib.pyplot as plt
from yolo3.model import yolo_eval
from yolo3.utils import letterbox_image
def predict_keras(model_path):
model = keras.models.load_model(model_path, compile=False)
def predict(image):
assert image.ndim == 3, image.shape
assert image.dtype == np.float32, image.dtype
assert image.ptp() <= 1.0, image.ptp()
return model.predict([image[None]])
return predict
def predict_tflite(model_path):
# Load TFLite model and allocate tensors.
interpreter = tf.lite.Interpreter(model_path=model_path)
interpreter.allocate_tensors()
# Get input and output tensors.
input_details = interpreter.get_input_details()
output_details = interpreter.get_output_details()
def predict(image):
assert image.ndim == 3, image.shape
assert image.dtype == np.float32, image.dtype
assert image.ptp() <= 1.0, image.ptp()
# Test model on random input data.
print('- predict_tflite: interpreter.set_tensor')
interpreter.set_tensor(input_details[0]['index'], image[None])
print('- predict_tflite: interpreter.invoke')
interpreter.invoke()
# The function `get_tensor()` returns a copy of the tensor data.
# Use `tensor()` in order to get a pointer to the tensor.
print('- predict_tflite: interpreter.get_tensor')
return [interpreter.get_tensor(output_ditail['index']) for output_ditail in output_details]
return predict
def _main():
parser = argparse.ArgumentParser()
parser.add_argument('model', help='model path (.h5 or .tflite)')
parser.add_argument('images', nargs='+', help='image paths')
args = parser.parse_args()
anchors = np.reshape(list(map(int, Path('./model_data/yolo_anchors.txt').read_text().strip().split(','))), (-1, 2))
class_names = Path('./model_data/coco_classes.txt').read_text().strip().splitlines()
predict = {
'h5': predict_keras,
'tflite': predict_tflite,
}[args.model.split('.')[-1]](args.model)
for i, image_path in enumerate(map(Path, args.images)):
print('load:', image_path)
pil_image = Image.open(str(image_path))
input_data = letterbox_image(pil_image, size=(416, 416))
input_data = input_data / np.float32(255.)
image = np.asarray(pil_image)
# image = input_data.copy()
print('predict:', image_path)
output_data = predict(input_data)
print('eval:', image_path)
result = yolo_eval(
[keras.backend.constant(d) for d in output_data],
anchors=anchors,
num_classes=len(class_names),
image_shape=(image.shape[0], image.shape[1]),
score_threshold=0.3,
iou_threshold=0.45,
)
boxes, scores, classes = [keras.backend.eval(t) for t in result]
print('boxes =', boxes)
print('save:', image_path)
from matplotlib.backends.backend_agg import FigureCanvasAgg
fig = FigureCanvasAgg(plt.Figure()).figure
ax = fig.add_subplot(1,1,1)
ax.imshow(image)
for i, (top, left, bottom, right) in enumerate(boxes):
assert top <= bottom and left <= right
ax.add_patch(plt.Rectangle(xy=[left, top], width=right - left, height=bottom - top, fill=False, linewidth=3, color='red'))
fig.savefig(f'out_{args.model.split(".")[-1]}_{i:03d}.png')
if __name__ == '__main__':
_main()
| 35.371429
| 134
| 0.631125
|
import argparse
import sys
from pathlib import Path
import numpy as np
import tensorflow as tf
import keras
from PIL import Image
import matplotlib.pyplot as plt
from yolo3.model import yolo_eval
from yolo3.utils import letterbox_image
def predict_keras(model_path):
model = keras.models.load_model(model_path, compile=False)
def predict(image):
assert image.ndim == 3, image.shape
assert image.dtype == np.float32, image.dtype
assert image.ptp() <= 1.0, image.ptp()
return model.predict([image[None]])
return predict
def predict_tflite(model_path):
interpreter = tf.lite.Interpreter(model_path=model_path)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
output_details = interpreter.get_output_details()
def predict(image):
assert image.ndim == 3, image.shape
assert image.dtype == np.float32, image.dtype
assert image.ptp() <= 1.0, image.ptp()
print('- predict_tflite: interpreter.set_tensor')
interpreter.set_tensor(input_details[0]['index'], image[None])
print('- predict_tflite: interpreter.invoke')
interpreter.invoke()
print('- predict_tflite: interpreter.get_tensor')
return [interpreter.get_tensor(output_ditail['index']) for output_ditail in output_details]
return predict
def _main():
parser = argparse.ArgumentParser()
parser.add_argument('model', help='model path (.h5 or .tflite)')
parser.add_argument('images', nargs='+', help='image paths')
args = parser.parse_args()
anchors = np.reshape(list(map(int, Path('./model_data/yolo_anchors.txt').read_text().strip().split(','))), (-1, 2))
class_names = Path('./model_data/coco_classes.txt').read_text().strip().splitlines()
predict = {
'h5': predict_keras,
'tflite': predict_tflite,
}[args.model.split('.')[-1]](args.model)
for i, image_path in enumerate(map(Path, args.images)):
print('load:', image_path)
pil_image = Image.open(str(image_path))
input_data = letterbox_image(pil_image, size=(416, 416))
input_data = input_data / np.float32(255.)
image = np.asarray(pil_image)
print('predict:', image_path)
output_data = predict(input_data)
print('eval:', image_path)
result = yolo_eval(
[keras.backend.constant(d) for d in output_data],
anchors=anchors,
num_classes=len(class_names),
image_shape=(image.shape[0], image.shape[1]),
score_threshold=0.3,
iou_threshold=0.45,
)
boxes, scores, classes = [keras.backend.eval(t) for t in result]
print('boxes =', boxes)
print('save:', image_path)
from matplotlib.backends.backend_agg import FigureCanvasAgg
fig = FigureCanvasAgg(plt.Figure()).figure
ax = fig.add_subplot(1,1,1)
ax.imshow(image)
for i, (top, left, bottom, right) in enumerate(boxes):
assert top <= bottom and left <= right
ax.add_patch(plt.Rectangle(xy=[left, top], width=right - left, height=bottom - top, fill=False, linewidth=3, color='red'))
fig.savefig(f'out_{args.model.split(".")[-1]}_{i:03d}.png')
if __name__ == '__main__':
_main()
| true
| true
|
1c46edebef8140280b53e681b1f63cdbf8683804
| 15,791
|
py
|
Python
|
tests/support/unit.py
|
byteskeptical/salt
|
637fe0b04f38b2274191b005d73b3c6707d7f400
|
[
"Apache-2.0"
] | 5
|
2018-05-01T20:51:14.000Z
|
2021-11-09T05:43:00.000Z
|
tests/support/unit.py
|
byteskeptical/salt
|
637fe0b04f38b2274191b005d73b3c6707d7f400
|
[
"Apache-2.0"
] | 12
|
2015-04-15T22:17:42.000Z
|
2016-03-22T08:46:27.000Z
|
tests/support/unit.py
|
byteskeptical/salt
|
637fe0b04f38b2274191b005d73b3c6707d7f400
|
[
"Apache-2.0"
] | 7
|
2017-09-29T18:49:53.000Z
|
2021-11-09T05:42:49.000Z
|
# -*- coding: utf-8 -*-
'''
:codeauthor: Pedro Algarvio (pedro@algarvio.me)
============================
Unittest Compatibility Layer
============================
Compatibility layer to use :mod:`unittest <python2:unittest>` under Python
2.7 or `unittest2`_ under Python 2.6 without having to worry about which is
in use.
.. attention::
Please refer to Python's :mod:`unittest <python2:unittest>`
documentation as the ultimate source of information, this is just a
compatibility layer.
.. _`unittest2`: https://pypi.python.org/pypi/unittest2
'''
# pylint: disable=unused-import,blacklisted-module,deprecated-method
# Import python libs
from __future__ import absolute_import, print_function, unicode_literals
import os
import sys
import logging
from unittest import (
TestLoader as _TestLoader,
TextTestRunner as _TextTestRunner,
TestCase as _TestCase,
expectedFailure,
TestSuite as _TestSuite,
skip,
skipIf,
TestResult,
TextTestResult as _TextTestResult
)
from unittest.case import _id, SkipTest
from salt.ext import six
try:
import psutil
HAS_PSUTIL = True
except ImportError:
HAS_PSUTIL = False
log = logging.getLogger(__name__)
# Set SHOW_PROC to True to show
# process details when running in verbose mode
# i.e. [CPU:15.1%|MEM:48.3%|Z:0]
SHOW_PROC = 'NO_SHOW_PROC' not in os.environ
LOREM_IPSUM = '''\
Lorem ipsum dolor sit amet, consectetur adipiscing elit. Quisque eget urna a arcu lacinia sagittis.
Sed scelerisque, lacus eget malesuada vestibulum, justo diam facilisis tortor, in sodales dolor
nibh eu urna. Aliquam iaculis massa risus, sed elementum risus accumsan id. Suspendisse mattis,
metus sed lacinia dictum, leo orci dapibus sapien, at porttitor sapien nulla ac velit.
Duis ac cursus leo, non varius metus. Sed laoreet felis magna, vel tempor diam malesuada nec.
Quisque cursus odio tortor. In consequat augue nisl, eget lacinia odio vestibulum eget.
Donec venenatis elementum arcu at rhoncus. Nunc pharetra erat in lacinia convallis. Ut condimentum
eu mauris sit amet convallis. Morbi vulputate vel odio non laoreet. Nullam in suscipit tellus.
Sed quis posuere urna.'''
class TestSuite(_TestSuite):
def _handleClassSetUp(self, test, result):
previousClass = getattr(result, '_previousTestClass', None)
currentClass = test.__class__
if currentClass == previousClass or getattr(currentClass, 'setUpClass', None) is None:
return super(TestSuite, self)._handleClassSetUp(test, result)
# Store a reference to all class attributes before running the setUpClass method
initial_class_attributes = dir(test.__class__)
ret = super(TestSuite, self)._handleClassSetUp(test, result)
# Store the difference in in a variable in order to check later if they were deleted
test.__class__._prerun_class_attributes = [
attr for attr in dir(test.__class__) if attr not in initial_class_attributes]
return ret
def _tearDownPreviousClass(self, test, result):
# Run any tearDownClass code defined
super(TestSuite, self)._tearDownPreviousClass(test, result)
previousClass = getattr(result, '_previousTestClass', None)
currentClass = test.__class__
if currentClass == previousClass:
return
# See if the previous class attributes have been cleaned
if previousClass and getattr(previousClass, 'tearDownClass', None):
prerun_class_attributes = getattr(previousClass, '_prerun_class_attributes', None)
if prerun_class_attributes is not None:
previousClass._prerun_class_attributes = None
del previousClass._prerun_class_attributes
for attr in prerun_class_attributes:
if hasattr(previousClass, attr):
attr_value = getattr(previousClass, attr, None)
if attr_value is None:
continue
if isinstance(attr_value, (bool,) + six.string_types + six.integer_types):
setattr(previousClass, attr, None)
continue
log.warning('Deleting extra class attribute after test run: %s.%s(%s). '
'Please consider using \'del self.%s\' on the test class '
'\'tearDownClass()\' method', previousClass.__name__, attr,
str(getattr(previousClass, attr))[:100], attr)
delattr(previousClass, attr)
class TestLoader(_TestLoader):
# We're just subclassing to make sure tha tour TestSuite class is the one used
suiteClass = TestSuite
class TestCase(_TestCase):
# pylint: disable=expected-an-indented-block-comment,too-many-leading-hastag-for-block-comment
## Commented out because it may be causing tests to hang
## at the end of the run
#
# _cwd = os.getcwd()
# _chdir_counter = 0
# @classmethod
# def tearDownClass(cls):
# '''
# Overriden method for tearing down all classes in salttesting
#
# This hard-resets the environment between test classes
# '''
# # Compare where we are now compared to where we were when we began this family of tests
# if not cls._cwd == os.getcwd() and cls._chdir_counter > 0:
# os.chdir(cls._cwd)
# print('\nWARNING: A misbehaving test has modified the working directory!\nThe test suite has reset the working directory '
# 'on tearDown() to {0}\n'.format(cls._cwd))
# cls._chdir_counter += 1
# pylint: enable=expected-an-indented-block-comment,too-many-leading-hastag-for-block-comment
def run(self, result=None):
self._prerun_instance_attributes = dir(self)
self.maxDiff = None
outcome = super(TestCase, self).run(result=result)
for attr in dir(self):
if attr == '_prerun_instance_attributes':
continue
if attr in getattr(self.__class__, '_prerun_class_attributes', ()):
continue
if attr not in self._prerun_instance_attributes:
attr_value = getattr(self, attr, None)
if attr_value is None:
continue
if isinstance(attr_value, (bool,) + six.string_types + six.integer_types):
setattr(self, attr, None)
continue
log.warning('Deleting extra class attribute after test run: %s.%s(%s). '
'Please consider using \'del self.%s\' on the test case '
'\'tearDown()\' method', self.__class__.__name__, attr,
getattr(self, attr), attr)
delattr(self, attr)
self._prerun_instance_attributes = None
del self._prerun_instance_attributes
return outcome
def shortDescription(self):
desc = _TestCase.shortDescription(self)
if HAS_PSUTIL and SHOW_PROC:
show_zombie_processes = 'SHOW_PROC_ZOMBIES' in os.environ
proc_info = '[CPU:{0}%|MEM:{1}%'.format(psutil.cpu_percent(),
psutil.virtual_memory().percent)
if show_zombie_processes:
found_zombies = 0
try:
for proc in psutil.process_iter():
if proc.status == psutil.STATUS_ZOMBIE:
found_zombies += 1
except Exception:
pass
proc_info += '|Z:{0}'.format(found_zombies)
proc_info += '] {short_desc}'.format(short_desc=desc if desc else '')
return proc_info
else:
return _TestCase.shortDescription(self)
def assertEquals(self, *args, **kwargs):
raise DeprecationWarning(
'The {0}() function is deprecated. Please start using {1}() '
'instead.'.format('assertEquals', 'assertEqual')
)
# return _TestCase.assertEquals(self, *args, **kwargs)
def assertNotEquals(self, *args, **kwargs):
raise DeprecationWarning(
'The {0}() function is deprecated. Please start using {1}() '
'instead.'.format('assertNotEquals', 'assertNotEqual')
)
# return _TestCase.assertNotEquals(self, *args, **kwargs)
def assert_(self, *args, **kwargs):
# The unittest2 library uses this deprecated method, we can't raise
# the exception.
raise DeprecationWarning(
'The {0}() function is deprecated. Please start using {1}() '
'instead.'.format('assert_', 'assertTrue')
)
# return _TestCase.assert_(self, *args, **kwargs)
def assertAlmostEquals(self, *args, **kwargs):
raise DeprecationWarning(
'The {0}() function is deprecated. Please start using {1}() '
'instead.'.format('assertAlmostEquals', 'assertAlmostEqual')
)
# return _TestCase.assertAlmostEquals(self, *args, **kwargs)
def assertNotAlmostEquals(self, *args, **kwargs):
raise DeprecationWarning(
'The {0}() function is deprecated. Please start using {1}() '
'instead.'.format('assertNotAlmostEquals', 'assertNotAlmostEqual')
)
# return _TestCase.assertNotAlmostEquals(self, *args, **kwargs)
def repack_state_returns(self, state_ret):
'''
Accepts a state return dict and returns it back with the top level key
names rewritten such that the ID declaration is the key instead of the
State's unique tag. For example: 'foo' instead of
'file_|-foo_|-/etc/foo.conf|-managed'
This makes it easier to work with state returns when crafting asserts
after running states.
'''
assert isinstance(state_ret, dict), state_ret
return {x.split('_|-')[1]: y for x, y in six.iteritems(state_ret)}
def failUnlessEqual(self, *args, **kwargs):
raise DeprecationWarning(
'The {0}() function is deprecated. Please start using {1}() '
'instead.'.format('failUnlessEqual', 'assertEqual')
)
# return _TestCase.failUnlessEqual(self, *args, **kwargs)
def failIfEqual(self, *args, **kwargs):
raise DeprecationWarning(
'The {0}() function is deprecated. Please start using {1}() '
'instead.'.format('failIfEqual', 'assertNotEqual')
)
# return _TestCase.failIfEqual(self, *args, **kwargs)
def failUnless(self, *args, **kwargs):
raise DeprecationWarning(
'The {0}() function is deprecated. Please start using {1}() '
'instead.'.format('failUnless', 'assertTrue')
)
# return _TestCase.failUnless(self, *args, **kwargs)
def failIf(self, *args, **kwargs):
raise DeprecationWarning(
'The {0}() function is deprecated. Please start using {1}() '
'instead.'.format('failIf', 'assertFalse')
)
# return _TestCase.failIf(self, *args, **kwargs)
def failUnlessRaises(self, *args, **kwargs):
raise DeprecationWarning(
'The {0}() function is deprecated. Please start using {1}() '
'instead.'.format('failUnlessRaises', 'assertRaises')
)
# return _TestCase.failUnlessRaises(self, *args, **kwargs)
def failUnlessAlmostEqual(self, *args, **kwargs):
raise DeprecationWarning(
'The {0}() function is deprecated. Please start using {1}() '
'instead.'.format('failUnlessAlmostEqual', 'assertAlmostEqual')
)
# return _TestCase.failUnlessAlmostEqual(self, *args, **kwargs)
def failIfAlmostEqual(self, *args, **kwargs):
raise DeprecationWarning(
'The {0}() function is deprecated. Please start using {1}() '
'instead.'.format('failIfAlmostEqual', 'assertNotAlmostEqual')
)
# return _TestCase.failIfAlmostEqual(self, *args, **kwargs)
@staticmethod
def assert_called_once(mock):
'''
mock.assert_called_once only exists in PY3 in 3.6 and newer
'''
try:
mock.assert_called_once()
except AttributeError:
log.warning('assert_called_once invoked, but not available')
if six.PY2:
def assertRegexpMatches(self, *args, **kwds):
raise DeprecationWarning(
'The {0}() function will be deprecated in python 3. Please start '
'using {1}() instead.'.format(
'assertRegexpMatches',
'assertRegex'
)
)
def assertRegex(self, text, regex, msg=None):
# In python 2, alias to the future python 3 function
return _TestCase.assertRegexpMatches(self, text, regex, msg=msg)
def assertNotRegexpMatches(self, *args, **kwds):
raise DeprecationWarning(
'The {0}() function will be deprecated in python 3. Please start '
'using {1}() instead.'.format(
'assertNotRegexpMatches',
'assertNotRegex'
)
)
def assertNotRegex(self, text, regex, msg=None):
# In python 2, alias to the future python 3 function
return _TestCase.assertNotRegexpMatches(self, text, regex, msg=msg)
def assertRaisesRegexp(self, *args, **kwds):
raise DeprecationWarning(
'The {0}() function will be deprecated in python 3. Please start '
'using {1}() instead.'.format(
'assertRaisesRegexp',
'assertRaisesRegex'
)
)
def assertRaisesRegex(self, exception, regexp, *args, **kwds):
# In python 2, alias to the future python 3 function
return _TestCase.assertRaisesRegexp(self, exception, regexp, *args, **kwds)
else:
def assertRegexpMatches(self, *args, **kwds):
raise DeprecationWarning(
'The {0}() function is deprecated. Please start using {1}() '
'instead.'.format(
'assertRegexpMatches',
'assertRegex'
)
)
def assertNotRegexpMatches(self, *args, **kwds):
raise DeprecationWarning(
'The {0}() function is deprecated. Please start using {1}() '
'instead.'.format(
'assertNotRegexpMatches',
'assertNotRegex'
)
)
def assertRaisesRegexp(self, *args, **kwds):
raise DeprecationWarning(
'The {0}() function is deprecated. Please start using {1}() '
'instead.'.format(
'assertRaisesRegexp',
'assertRaisesRegex'
)
)
class TextTestResult(_TextTestResult):
'''
Custom TestResult class whith logs the start and the end of a test
'''
def startTest(self, test):
log.debug('>>>>> START >>>>> %s', test.id())
return super(TextTestResult, self).startTest(test)
def stopTest(self, test):
log.debug('<<<<< END <<<<<<< %s', test.id())
return super(TextTestResult, self).stopTest(test)
class TextTestRunner(_TextTestRunner):
'''
Custom Text tests runner to log the start and the end of a test case
'''
resultclass = TextTestResult
__all__ = [
'TestLoader',
'TextTestRunner',
'TestCase',
'expectedFailure',
'TestSuite',
'skipIf',
'TestResult'
]
| 40.283163
| 135
| 0.604142
|
from __future__ import absolute_import, print_function, unicode_literals
import os
import sys
import logging
from unittest import (
TestLoader as _TestLoader,
TextTestRunner as _TextTestRunner,
TestCase as _TestCase,
expectedFailure,
TestSuite as _TestSuite,
skip,
skipIf,
TestResult,
TextTestResult as _TextTestResult
)
from unittest.case import _id, SkipTest
from salt.ext import six
try:
import psutil
HAS_PSUTIL = True
except ImportError:
HAS_PSUTIL = False
log = logging.getLogger(__name__)
SHOW_PROC = 'NO_SHOW_PROC' not in os.environ
LOREM_IPSUM = '''\
Lorem ipsum dolor sit amet, consectetur adipiscing elit. Quisque eget urna a arcu lacinia sagittis.
Sed scelerisque, lacus eget malesuada vestibulum, justo diam facilisis tortor, in sodales dolor
nibh eu urna. Aliquam iaculis massa risus, sed elementum risus accumsan id. Suspendisse mattis,
metus sed lacinia dictum, leo orci dapibus sapien, at porttitor sapien nulla ac velit.
Duis ac cursus leo, non varius metus. Sed laoreet felis magna, vel tempor diam malesuada nec.
Quisque cursus odio tortor. In consequat augue nisl, eget lacinia odio vestibulum eget.
Donec venenatis elementum arcu at rhoncus. Nunc pharetra erat in lacinia convallis. Ut condimentum
eu mauris sit amet convallis. Morbi vulputate vel odio non laoreet. Nullam in suscipit tellus.
Sed quis posuere urna.'''
class TestSuite(_TestSuite):
def _handleClassSetUp(self, test, result):
previousClass = getattr(result, '_previousTestClass', None)
currentClass = test.__class__
if currentClass == previousClass or getattr(currentClass, 'setUpClass', None) is None:
return super(TestSuite, self)._handleClassSetUp(test, result)
initial_class_attributes = dir(test.__class__)
ret = super(TestSuite, self)._handleClassSetUp(test, result)
test.__class__._prerun_class_attributes = [
attr for attr in dir(test.__class__) if attr not in initial_class_attributes]
return ret
def _tearDownPreviousClass(self, test, result):
super(TestSuite, self)._tearDownPreviousClass(test, result)
previousClass = getattr(result, '_previousTestClass', None)
currentClass = test.__class__
if currentClass == previousClass:
return
if previousClass and getattr(previousClass, 'tearDownClass', None):
prerun_class_attributes = getattr(previousClass, '_prerun_class_attributes', None)
if prerun_class_attributes is not None:
previousClass._prerun_class_attributes = None
del previousClass._prerun_class_attributes
for attr in prerun_class_attributes:
if hasattr(previousClass, attr):
attr_value = getattr(previousClass, attr, None)
if attr_value is None:
continue
if isinstance(attr_value, (bool,) + six.string_types + six.integer_types):
setattr(previousClass, attr, None)
continue
log.warning('Deleting extra class attribute after test run: %s.%s(%s). '
'Please consider using \'del self.%s\' on the test class '
'\'tearDownClass()\' method', previousClass.__name__, attr,
str(getattr(previousClass, attr))[:100], attr)
delattr(previousClass, attr)
class TestLoader(_TestLoader):
suiteClass = TestSuite
class TestCase(_TestCase):
# pylint: disable=expected-an-indented-block-comment,too-many-leading-hastag-for-block-comment
## Commented out because it may be causing tests to hang
## at the end of the run
#
# _cwd = os.getcwd()
# _chdir_counter = 0
# @classmethod
# def tearDownClass(cls):
# '''
# Overriden method for tearing down all classes in salttesting
#
# This hard-resets the environment between test classes
# '''
# # Compare where we are now compared to where we were when we began this family of tests
# if not cls._cwd == os.getcwd() and cls._chdir_counter > 0:
# os.chdir(cls._cwd)
# print('\nWARNING: A misbehaving test has modified the working directory!\nThe test suite has reset the working directory '
# 'on tearDown() to {0}\n'.format(cls._cwd))
# cls._chdir_counter += 1
# pylint: enable=expected-an-indented-block-comment,too-many-leading-hastag-for-block-comment
def run(self, result=None):
self._prerun_instance_attributes = dir(self)
self.maxDiff = None
outcome = super(TestCase, self).run(result=result)
for attr in dir(self):
if attr == '_prerun_instance_attributes':
continue
if attr in getattr(self.__class__, '_prerun_class_attributes', ()):
continue
if attr not in self._prerun_instance_attributes:
attr_value = getattr(self, attr, None)
if attr_value is None:
continue
if isinstance(attr_value, (bool,) + six.string_types + six.integer_types):
setattr(self, attr, None)
continue
log.warning('Deleting extra class attribute after test run: %s.%s(%s). '
'Please consider using \'del self.%s\' on the test case '
'\'tearDown()\' method', self.__class__.__name__, attr,
getattr(self, attr), attr)
delattr(self, attr)
self._prerun_instance_attributes = None
del self._prerun_instance_attributes
return outcome
def shortDescription(self):
desc = _TestCase.shortDescription(self)
if HAS_PSUTIL and SHOW_PROC:
show_zombie_processes = 'SHOW_PROC_ZOMBIES' in os.environ
proc_info = '[CPU:{0}%|MEM:{1}%'.format(psutil.cpu_percent(),
psutil.virtual_memory().percent)
if show_zombie_processes:
found_zombies = 0
try:
for proc in psutil.process_iter():
if proc.status == psutil.STATUS_ZOMBIE:
found_zombies += 1
except Exception:
pass
proc_info += '|Z:{0}'.format(found_zombies)
proc_info += '] {short_desc}'.format(short_desc=desc if desc else '')
return proc_info
else:
return _TestCase.shortDescription(self)
def assertEquals(self, *args, **kwargs):
raise DeprecationWarning(
'The {0}() function is deprecated. Please start using {1}() '
'instead.'.format('assertEquals', 'assertEqual')
)
# return _TestCase.assertEquals(self, *args, **kwargs)
def assertNotEquals(self, *args, **kwargs):
raise DeprecationWarning(
'The {0}() function is deprecated. Please start using {1}() '
'instead.'.format('assertNotEquals', 'assertNotEqual')
)
# return _TestCase.assertNotEquals(self, *args, **kwargs)
def assert_(self, *args, **kwargs):
# The unittest2 library uses this deprecated method, we can't raise
raise DeprecationWarning(
'The {0}() function is deprecated. Please start using {1}() '
'instead.'.format('assert_', 'assertTrue')
)
def assertAlmostEquals(self, *args, **kwargs):
raise DeprecationWarning(
'The {0}() function is deprecated. Please start using {1}() '
'instead.'.format('assertAlmostEquals', 'assertAlmostEqual')
)
def assertNotAlmostEquals(self, *args, **kwargs):
raise DeprecationWarning(
'The {0}() function is deprecated. Please start using {1}() '
'instead.'.format('assertNotAlmostEquals', 'assertNotAlmostEqual')
)
def repack_state_returns(self, state_ret):
assert isinstance(state_ret, dict), state_ret
return {x.split('_|-')[1]: y for x, y in six.iteritems(state_ret)}
def failUnlessEqual(self, *args, **kwargs):
raise DeprecationWarning(
'The {0}() function is deprecated. Please start using {1}() '
'instead.'.format('failUnlessEqual', 'assertEqual')
)
def failIfEqual(self, *args, **kwargs):
raise DeprecationWarning(
'The {0}() function is deprecated. Please start using {1}() '
'instead.'.format('failIfEqual', 'assertNotEqual')
)
def failUnless(self, *args, **kwargs):
raise DeprecationWarning(
'The {0}() function is deprecated. Please start using {1}() '
'instead.'.format('failUnless', 'assertTrue')
)
def failIf(self, *args, **kwargs):
raise DeprecationWarning(
'The {0}() function is deprecated. Please start using {1}() '
'instead.'.format('failIf', 'assertFalse')
)
def failUnlessRaises(self, *args, **kwargs):
raise DeprecationWarning(
'The {0}() function is deprecated. Please start using {1}() '
'instead.'.format('failUnlessRaises', 'assertRaises')
)
def failUnlessAlmostEqual(self, *args, **kwargs):
raise DeprecationWarning(
'The {0}() function is deprecated. Please start using {1}() '
'instead.'.format('failUnlessAlmostEqual', 'assertAlmostEqual')
)
def failIfAlmostEqual(self, *args, **kwargs):
raise DeprecationWarning(
'The {0}() function is deprecated. Please start using {1}() '
'instead.'.format('failIfAlmostEqual', 'assertNotAlmostEqual')
)
@staticmethod
def assert_called_once(mock):
try:
mock.assert_called_once()
except AttributeError:
log.warning('assert_called_once invoked, but not available')
if six.PY2:
def assertRegexpMatches(self, *args, **kwds):
raise DeprecationWarning(
'The {0}() function will be deprecated in python 3. Please start '
'using {1}() instead.'.format(
'assertRegexpMatches',
'assertRegex'
)
)
def assertRegex(self, text, regex, msg=None):
return _TestCase.assertRegexpMatches(self, text, regex, msg=msg)
def assertNotRegexpMatches(self, *args, **kwds):
raise DeprecationWarning(
'The {0}() function will be deprecated in python 3. Please start '
'using {1}() instead.'.format(
'assertNotRegexpMatches',
'assertNotRegex'
)
)
def assertNotRegex(self, text, regex, msg=None):
return _TestCase.assertNotRegexpMatches(self, text, regex, msg=msg)
def assertRaisesRegexp(self, *args, **kwds):
raise DeprecationWarning(
'The {0}() function will be deprecated in python 3. Please start '
'using {1}() instead.'.format(
'assertRaisesRegexp',
'assertRaisesRegex'
)
)
def assertRaisesRegex(self, exception, regexp, *args, **kwds):
return _TestCase.assertRaisesRegexp(self, exception, regexp, *args, **kwds)
else:
def assertRegexpMatches(self, *args, **kwds):
raise DeprecationWarning(
'The {0}() function is deprecated. Please start using {1}() '
'instead.'.format(
'assertRegexpMatches',
'assertRegex'
)
)
def assertNotRegexpMatches(self, *args, **kwds):
raise DeprecationWarning(
'The {0}() function is deprecated. Please start using {1}() '
'instead.'.format(
'assertNotRegexpMatches',
'assertNotRegex'
)
)
def assertRaisesRegexp(self, *args, **kwds):
raise DeprecationWarning(
'The {0}() function is deprecated. Please start using {1}() '
'instead.'.format(
'assertRaisesRegexp',
'assertRaisesRegex'
)
)
class TextTestResult(_TextTestResult):
def startTest(self, test):
log.debug('>>>>> START >>>>> %s', test.id())
return super(TextTestResult, self).startTest(test)
def stopTest(self, test):
log.debug('<<<<< END <<<<<<< %s', test.id())
return super(TextTestResult, self).stopTest(test)
class TextTestRunner(_TextTestRunner):
resultclass = TextTestResult
__all__ = [
'TestLoader',
'TextTestRunner',
'TestCase',
'expectedFailure',
'TestSuite',
'skipIf',
'TestResult'
]
| true
| true
|
1c46efa6f932098b01ac8f6ff7f969b913d9d383
| 1,307
|
py
|
Python
|
demo.py
|
foamliu/Image-Matching
|
3213a8a574fa7bcc476d3de1c7370c268bf817a7
|
[
"MIT"
] | 12
|
2019-04-12T06:56:59.000Z
|
2020-05-03T00:47:33.000Z
|
demo.py
|
foamliu/Image-Matching
|
3213a8a574fa7bcc476d3de1c7370c268bf817a7
|
[
"MIT"
] | 1
|
2019-05-15T02:05:46.000Z
|
2019-05-17T17:57:34.000Z
|
demo.py
|
foamliu/Image-Matching
|
3213a8a574fa7bcc476d3de1c7370c268bf817a7
|
[
"MIT"
] | 2
|
2019-05-28T07:03:45.000Z
|
2020-03-20T09:49:15.000Z
|
import math
import cv2 as cv
import numpy as np
import torch
from PIL import Image
from torchvision import transforms
from models import ResNetMatchModel
def get_image(file):
img = cv.imread(file)
img = img[..., ::-1] # RGB
img = Image.fromarray(img, 'RGB') # RGB
img = transformer(img)
img = img.to(device)
return img
def get_feature(model, file):
img = get_image(file)
imgs = img.unsqueeze(dim=0)
with torch.no_grad():
output = model(imgs)
feature = output[0].cpu().numpy()
return feature / np.linalg.norm(feature)
if __name__ == "__main__":
device = torch.device('cpu')
threshold = 21.07971786746929
filename = 'image_matching.pt'
model = ResNetMatchModel()
model.load_state_dict(torch.load(filename))
model = model.to(device)
model.eval()
transformer = transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
x0 = get_feature(model, '0.jpg')
x1 = get_feature(model, '6.jpg')
cosine = np.dot(x0, x1)
cosine = np.clip(cosine, -1, 1)
theta = math.acos(cosine)
theta = theta * 180 / math.pi
print(theta)
print(theta <= threshold)
| 22.929825
| 74
| 0.635042
|
import math
import cv2 as cv
import numpy as np
import torch
from PIL import Image
from torchvision import transforms
from models import ResNetMatchModel
def get_image(file):
img = cv.imread(file)
img = img[..., ::-1]
img = Image.fromarray(img, 'RGB')
img = transformer(img)
img = img.to(device)
return img
def get_feature(model, file):
img = get_image(file)
imgs = img.unsqueeze(dim=0)
with torch.no_grad():
output = model(imgs)
feature = output[0].cpu().numpy()
return feature / np.linalg.norm(feature)
if __name__ == "__main__":
device = torch.device('cpu')
threshold = 21.07971786746929
filename = 'image_matching.pt'
model = ResNetMatchModel()
model.load_state_dict(torch.load(filename))
model = model.to(device)
model.eval()
transformer = transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
x0 = get_feature(model, '0.jpg')
x1 = get_feature(model, '6.jpg')
cosine = np.dot(x0, x1)
cosine = np.clip(cosine, -1, 1)
theta = math.acos(cosine)
theta = theta * 180 / math.pi
print(theta)
print(theta <= threshold)
| true
| true
|
1c46efb1d180176edecbf36aaf6099e81619e829
| 4,855
|
py
|
Python
|
torchflare/metrics/fbeta_meter.py
|
glenn-jocher/torchflare
|
3c55b5a0761f2e85dd6da95767c6ec03f0f5baad
|
[
"Apache-2.0"
] | 1
|
2021-06-12T12:39:04.000Z
|
2021-06-12T12:39:04.000Z
|
torchflare/metrics/fbeta_meter.py
|
weidao-Shi/torchflare
|
3c55b5a0761f2e85dd6da95767c6ec03f0f5baad
|
[
"Apache-2.0"
] | null | null | null |
torchflare/metrics/fbeta_meter.py
|
weidao-Shi/torchflare
|
3c55b5a0761f2e85dd6da95767c6ec03f0f5baad
|
[
"Apache-2.0"
] | null | null | null |
"""Implements FBeta and F1-score."""
import torch
from torchflare.metrics.meters import MetricMeter, _BaseInputHandler
class FBeta(_BaseInputHandler, MetricMeter):
"""Computes Fbeta Score.
Supports binary,multiclass and multilabel cases.
"""
def __init__(
self,
beta: float,
num_classes: int,
threshold: float = 0.5,
average: str = "macro",
multilabel: bool = False,
):
"""Constructor method for Fbeta score.
Args:
num_classes : The number of num_classes(For binary case , use out_features : 1)
threshold: The value of threshold for masking. Input is raw logits.
average : One of "micro" or "macro"
beta : weight of precision in harmonic mean.
multilabel: Whether problem is multilabel or not.
Note:
In case of binary classification, set num_classes = 1
"""
super(FBeta, self).__init__(
num_classes=num_classes,
multilabel=multilabel,
threshold=threshold,
average=average,
)
self.beta = beta
self.eps = 1e-20
self._outputs = None
self._targets = None
self.reset()
def handle(self) -> str:
"""Method to get the class name.
Returns:
The class name
"""
return self.__class__.__name__.lower()
def accumulate(self, outputs: torch.Tensor, targets: torch.Tensor):
"""Method to accumulate the outputs and targets.
Args:
outputs : raw logits from the network.
targets : Ground truth targets
"""
outputs, targets = self.detach_tensor(outputs), self.detach_tensor(targets)
self._outputs.append(outputs)
self._targets.append(targets)
def reset(self):
"""Resets the accumulation lists."""
self._outputs = []
self._targets = []
@property
def value(self) -> torch.Tensor:
"""Computes the FBeta Score.
Returns:
The computed Fbeta score.
"""
outputs = torch.cat(self._outputs)
targets = torch.cat(self._targets)
tp, fp, tn, fn = self.compute_stats(outputs=outputs, targets=targets)
precision = tp / (tp + fp + self.eps)
recall = tp / (tp + fn + self.eps)
numerator = (1 + self.beta ** 2) * precision * recall
denominator = self.beta ** 2 * precision + recall
fbeta = self.reduce(numerator=numerator, denominator=denominator)
return fbeta
class F1Score(_BaseInputHandler, MetricMeter):
"""Computes F1 Score.
Supports binary,multiclass and multilabel cases.
"""
def __init__(
self,
num_classes: int,
threshold: float = 0.5,
multilabel: bool = False,
average: str = "macro",
):
"""Constructor method for F1-score.
Args:
num_classes : The number of num_classes(For binary case , use out_features : 1)
threshold: The value of threshold for masking. Input is raw logits.
average : One of "micro" or "macro".
multilabel: Whether the problem is multilabel or not.
"""
super(F1Score, self).__init__(
num_classes=num_classes,
multilabel=multilabel,
threshold=threshold,
average=average,
)
self.eps = 1e-20
self._outputs = None
self._targets = None
self.reset()
def handle(self) -> str:
"""Method to get the class name.
Returns:
The class name
"""
return self.__class__.__name__.lower()
@property
def value(self) -> torch.Tensor:
"""Value of FBeta Score.
Returns:
The computed F1-score
"""
outputs = torch.cat(self._outputs)
targets = torch.cat(self._targets)
tp, fp, tn, fn = self.compute_stats(outputs=outputs, targets=targets)
precision = tp / (tp + fp + self.eps)
recall = tp / (tp + fn + self.eps)
numerator = 2 * precision * recall
denominator = precision + recall
f1 = self.reduce(numerator=numerator, denominator=denominator)
return f1
def accumulate(self, outputs: torch.Tensor, targets: torch.Tensor):
"""Method to accumulate the outputs and targets.
Args:
outputs : raw logits from the network.
targets : Ground truth targets
"""
outputs, targets = self.detach_tensor(outputs), self.detach_tensor(targets)
self._outputs.append(outputs)
self._targets.append(targets)
def reset(self):
"""Resets the accumulation lists."""
self._outputs = []
self._targets = []
__all__ = ["FBeta", "F1Score"]
| 27.429379
| 91
| 0.581462
|
import torch
from torchflare.metrics.meters import MetricMeter, _BaseInputHandler
class FBeta(_BaseInputHandler, MetricMeter):
def __init__(
self,
beta: float,
num_classes: int,
threshold: float = 0.5,
average: str = "macro",
multilabel: bool = False,
):
super(FBeta, self).__init__(
num_classes=num_classes,
multilabel=multilabel,
threshold=threshold,
average=average,
)
self.beta = beta
self.eps = 1e-20
self._outputs = None
self._targets = None
self.reset()
def handle(self) -> str:
return self.__class__.__name__.lower()
def accumulate(self, outputs: torch.Tensor, targets: torch.Tensor):
outputs, targets = self.detach_tensor(outputs), self.detach_tensor(targets)
self._outputs.append(outputs)
self._targets.append(targets)
def reset(self):
self._outputs = []
self._targets = []
@property
def value(self) -> torch.Tensor:
outputs = torch.cat(self._outputs)
targets = torch.cat(self._targets)
tp, fp, tn, fn = self.compute_stats(outputs=outputs, targets=targets)
precision = tp / (tp + fp + self.eps)
recall = tp / (tp + fn + self.eps)
numerator = (1 + self.beta ** 2) * precision * recall
denominator = self.beta ** 2 * precision + recall
fbeta = self.reduce(numerator=numerator, denominator=denominator)
return fbeta
class F1Score(_BaseInputHandler, MetricMeter):
def __init__(
self,
num_classes: int,
threshold: float = 0.5,
multilabel: bool = False,
average: str = "macro",
):
super(F1Score, self).__init__(
num_classes=num_classes,
multilabel=multilabel,
threshold=threshold,
average=average,
)
self.eps = 1e-20
self._outputs = None
self._targets = None
self.reset()
def handle(self) -> str:
return self.__class__.__name__.lower()
@property
def value(self) -> torch.Tensor:
outputs = torch.cat(self._outputs)
targets = torch.cat(self._targets)
tp, fp, tn, fn = self.compute_stats(outputs=outputs, targets=targets)
precision = tp / (tp + fp + self.eps)
recall = tp / (tp + fn + self.eps)
numerator = 2 * precision * recall
denominator = precision + recall
f1 = self.reduce(numerator=numerator, denominator=denominator)
return f1
def accumulate(self, outputs: torch.Tensor, targets: torch.Tensor):
outputs, targets = self.detach_tensor(outputs), self.detach_tensor(targets)
self._outputs.append(outputs)
self._targets.append(targets)
def reset(self):
self._outputs = []
self._targets = []
__all__ = ["FBeta", "F1Score"]
| true
| true
|
1c46f06b69ffba498e3069692b46574d299220a8
| 5,492
|
py
|
Python
|
tests/data/embeddings_test.py
|
richarajpal/deep_qa
|
d918335a1bed71b9cfccf1d5743321cee9c61952
|
[
"Apache-2.0"
] | 459
|
2017-02-08T13:40:17.000Z
|
2021-12-12T12:57:48.000Z
|
tests/data/embeddings_test.py
|
richarajpal/deep_qa
|
d918335a1bed71b9cfccf1d5743321cee9c61952
|
[
"Apache-2.0"
] | 176
|
2017-01-26T01:19:41.000Z
|
2018-04-22T19:16:01.000Z
|
tests/data/embeddings_test.py
|
richarajpal/deep_qa
|
d918335a1bed71b9cfccf1d5743321cee9c61952
|
[
"Apache-2.0"
] | 154
|
2017-01-26T01:00:30.000Z
|
2021-02-05T10:44:42.000Z
|
# pylint: disable=no-self-use,invalid-name
import gzip
import numpy
import pytest
from deep_qa.common.checks import ConfigurationError
from deep_qa.data.data_indexer import DataIndexer
from deep_qa.data.embeddings import PretrainedEmbeddings
from deep_qa.models.text_classification import ClassificationModel
from deep_qa.testing.test_case import DeepQaTestCase
class TestPretrainedEmbeddings(DeepQaTestCase):
# pylint: disable=protected-access
def test_get_embedding_layer_uses_correct_embedding_dim(self):
data_indexer = DataIndexer()
embeddings_filename = self.TEST_DIR + "embeddings.gz"
with gzip.open(embeddings_filename, 'wb') as embeddings_file:
embeddings_file.write("word1 1.0 2.3 -1.0\n".encode('utf-8'))
embeddings_file.write("word2 0.1 0.4 -4.0\n".encode('utf-8'))
embedding_layer = PretrainedEmbeddings.get_embedding_layer(embeddings_filename, data_indexer)
assert embedding_layer.output_dim == 3
with gzip.open(embeddings_filename, 'wb') as embeddings_file:
embeddings_file.write("word1 1.0 2.3 -1.0 3.1\n".encode('utf-8'))
embeddings_file.write("word2 0.1 0.4 -4.0 -1.2\n".encode('utf-8'))
embedding_layer = PretrainedEmbeddings.get_embedding_layer(embeddings_filename, data_indexer)
assert embedding_layer.output_dim == 4
def test_get_embedding_layer_crashes_when_embedding_dim_is_one(self):
data_indexer = DataIndexer()
embeddings_filename = self.TEST_DIR + "embeddings.gz"
with gzip.open(embeddings_filename, 'wb') as embeddings_file:
embeddings_file.write("dimensionality 3\n".encode('utf-8'))
embeddings_file.write("word1 1.0 2.3 -1.0\n".encode('utf-8'))
embeddings_file.write("word2 0.1 0.4 -4.0\n".encode('utf-8'))
with pytest.raises(Exception):
PretrainedEmbeddings.get_embedding_layer(embeddings_filename, data_indexer)
def test_get_embedding_layer_skips_inconsistent_lines(self):
data_indexer = DataIndexer()
data_indexer.add_word_to_index("word1")
data_indexer.add_word_to_index("word2")
embeddings_filename = self.TEST_DIR + "embeddings.gz"
with gzip.open(embeddings_filename, 'wb') as embeddings_file:
embeddings_file.write("word1 1.0 2.3 -1.0\n".encode('utf-8'))
embeddings_file.write("word2 0.1 0.4 \n".encode('utf-8'))
embedding_layer = PretrainedEmbeddings.get_embedding_layer(embeddings_filename, data_indexer)
print(embedding_layer.weights)
word_vector = embedding_layer._initial_weights[0][data_indexer.get_word_index("word2")]
assert not numpy.allclose(word_vector[:2], numpy.asarray([0.1, 0.4]))
def test_get_embedding_layer_actually_initializes_word_vectors_correctly(self):
data_indexer = DataIndexer()
data_indexer.add_word_to_index("word")
embeddings_filename = self.TEST_DIR + "embeddings.gz"
with gzip.open(embeddings_filename, 'wb') as embeddings_file:
embeddings_file.write("word 1.0 2.3 -1.0\n".encode('utf-8'))
embedding_layer = PretrainedEmbeddings.get_embedding_layer(embeddings_filename, data_indexer)
word_vector = embedding_layer._initial_weights[0][data_indexer.get_word_index("word")]
assert numpy.allclose(word_vector, numpy.asarray([1.0, 2.3, -1.0]))
def test_get_embedding_layer_initializes_unseen_words_randomly_not_zero(self):
data_indexer = DataIndexer()
data_indexer.add_word_to_index("word2")
embeddings_filename = self.TEST_DIR + "embeddings.gz"
with gzip.open(embeddings_filename, 'wb') as embeddings_file:
embeddings_file.write("word 1.0 2.3 -1.0\n".encode('utf-8'))
embedding_layer = PretrainedEmbeddings.get_embedding_layer(embeddings_filename, data_indexer)
word_vector = embedding_layer._initial_weights[0][data_indexer.get_word_index("word2")]
assert not numpy.allclose(word_vector, numpy.asarray([0.0, 0.0, 0.0]))
def test_embedding_will_not_project_random_embeddings(self):
self.write_pretrained_vector_files()
self.write_true_false_model_files()
with pytest.raises(ConfigurationError):
args = {
"embeddings": {
"words": {
"dimension": 5,
"project": True,
"fine_tune": True,
"dropout": 0.2
}
}
}
model = self.get_model(ClassificationModel, args)
model.train()
def test_projection_dim_not_equal_to_pretrained_dim_with_no_projection_flag_raises_error(self):
self.write_pretrained_vector_files()
self.write_true_false_model_files()
with pytest.raises(ConfigurationError):
args = {
"embeddings": {
"words": {
"dimension": 13,
"pretrained_file": self.PRETRAINED_VECTORS_GZIP,
"project": False,
"fine_tune": False,
"dropout": 0.2
}
}
}
model = self.get_model(ClassificationModel, args)
model.train()
| 51.327103
| 101
| 0.64512
|
import gzip
import numpy
import pytest
from deep_qa.common.checks import ConfigurationError
from deep_qa.data.data_indexer import DataIndexer
from deep_qa.data.embeddings import PretrainedEmbeddings
from deep_qa.models.text_classification import ClassificationModel
from deep_qa.testing.test_case import DeepQaTestCase
class TestPretrainedEmbeddings(DeepQaTestCase):
def test_get_embedding_layer_uses_correct_embedding_dim(self):
data_indexer = DataIndexer()
embeddings_filename = self.TEST_DIR + "embeddings.gz"
with gzip.open(embeddings_filename, 'wb') as embeddings_file:
embeddings_file.write("word1 1.0 2.3 -1.0\n".encode('utf-8'))
embeddings_file.write("word2 0.1 0.4 -4.0\n".encode('utf-8'))
embedding_layer = PretrainedEmbeddings.get_embedding_layer(embeddings_filename, data_indexer)
assert embedding_layer.output_dim == 3
with gzip.open(embeddings_filename, 'wb') as embeddings_file:
embeddings_file.write("word1 1.0 2.3 -1.0 3.1\n".encode('utf-8'))
embeddings_file.write("word2 0.1 0.4 -4.0 -1.2\n".encode('utf-8'))
embedding_layer = PretrainedEmbeddings.get_embedding_layer(embeddings_filename, data_indexer)
assert embedding_layer.output_dim == 4
def test_get_embedding_layer_crashes_when_embedding_dim_is_one(self):
data_indexer = DataIndexer()
embeddings_filename = self.TEST_DIR + "embeddings.gz"
with gzip.open(embeddings_filename, 'wb') as embeddings_file:
embeddings_file.write("dimensionality 3\n".encode('utf-8'))
embeddings_file.write("word1 1.0 2.3 -1.0\n".encode('utf-8'))
embeddings_file.write("word2 0.1 0.4 -4.0\n".encode('utf-8'))
with pytest.raises(Exception):
PretrainedEmbeddings.get_embedding_layer(embeddings_filename, data_indexer)
def test_get_embedding_layer_skips_inconsistent_lines(self):
data_indexer = DataIndexer()
data_indexer.add_word_to_index("word1")
data_indexer.add_word_to_index("word2")
embeddings_filename = self.TEST_DIR + "embeddings.gz"
with gzip.open(embeddings_filename, 'wb') as embeddings_file:
embeddings_file.write("word1 1.0 2.3 -1.0\n".encode('utf-8'))
embeddings_file.write("word2 0.1 0.4 \n".encode('utf-8'))
embedding_layer = PretrainedEmbeddings.get_embedding_layer(embeddings_filename, data_indexer)
print(embedding_layer.weights)
word_vector = embedding_layer._initial_weights[0][data_indexer.get_word_index("word2")]
assert not numpy.allclose(word_vector[:2], numpy.asarray([0.1, 0.4]))
def test_get_embedding_layer_actually_initializes_word_vectors_correctly(self):
data_indexer = DataIndexer()
data_indexer.add_word_to_index("word")
embeddings_filename = self.TEST_DIR + "embeddings.gz"
with gzip.open(embeddings_filename, 'wb') as embeddings_file:
embeddings_file.write("word 1.0 2.3 -1.0\n".encode('utf-8'))
embedding_layer = PretrainedEmbeddings.get_embedding_layer(embeddings_filename, data_indexer)
word_vector = embedding_layer._initial_weights[0][data_indexer.get_word_index("word")]
assert numpy.allclose(word_vector, numpy.asarray([1.0, 2.3, -1.0]))
def test_get_embedding_layer_initializes_unseen_words_randomly_not_zero(self):
data_indexer = DataIndexer()
data_indexer.add_word_to_index("word2")
embeddings_filename = self.TEST_DIR + "embeddings.gz"
with gzip.open(embeddings_filename, 'wb') as embeddings_file:
embeddings_file.write("word 1.0 2.3 -1.0\n".encode('utf-8'))
embedding_layer = PretrainedEmbeddings.get_embedding_layer(embeddings_filename, data_indexer)
word_vector = embedding_layer._initial_weights[0][data_indexer.get_word_index("word2")]
assert not numpy.allclose(word_vector, numpy.asarray([0.0, 0.0, 0.0]))
def test_embedding_will_not_project_random_embeddings(self):
self.write_pretrained_vector_files()
self.write_true_false_model_files()
with pytest.raises(ConfigurationError):
args = {
"embeddings": {
"words": {
"dimension": 5,
"project": True,
"fine_tune": True,
"dropout": 0.2
}
}
}
model = self.get_model(ClassificationModel, args)
model.train()
def test_projection_dim_not_equal_to_pretrained_dim_with_no_projection_flag_raises_error(self):
self.write_pretrained_vector_files()
self.write_true_false_model_files()
with pytest.raises(ConfigurationError):
args = {
"embeddings": {
"words": {
"dimension": 13,
"pretrained_file": self.PRETRAINED_VECTORS_GZIP,
"project": False,
"fine_tune": False,
"dropout": 0.2
}
}
}
model = self.get_model(ClassificationModel, args)
model.train()
| true
| true
|
1c46f19ef96c73dc748b7707cea8dbf4595a8711
| 2,871
|
py
|
Python
|
worker/view.py
|
photonle/bot
|
3689d3bfb177bb4b2efe207311283e63118fa427
|
[
"MIT"
] | 1
|
2020-03-18T14:50:59.000Z
|
2020-03-18T14:50:59.000Z
|
worker/view.py
|
photonle/bot
|
3689d3bfb177bb4b2efe207311283e63118fa427
|
[
"MIT"
] | 3
|
2020-03-17T14:07:43.000Z
|
2021-02-14T13:28:22.000Z
|
worker/view.py
|
photonle/bot
|
3689d3bfb177bb4b2efe207311283e63118fa427
|
[
"MIT"
] | 1
|
2020-05-17T15:19:31.000Z
|
2020-05-17T15:19:31.000Z
|
from shutil import copy
import sqlite3
import sys
sys.stdout = open("report.txt", "w", encoding="utf8")
copy('photon.db', 'photon.read.db')
conn = sqlite3.connect('photon.read.db')
curs = conn.cursor()
curs.execute("SELECT * FROM (SELECT path, COUNT(*) as count FROM files GROUP BY path) WHERE count > 1 ORDER BY count ASC, path ASC")
for reply in curs:
print("\nLua Path '{}' has been seen in {} addons.".format(*reply))
inner = conn.cursor()
inner.execute("SELECT path, owner, name, author, sname FROM files INNER JOIN addons ON files.owner = addons.wsid INNER JOIN authors ON addons.author = authors.sid WHERE path = ? ORDER BY wsid ASC", (reply[0],))
for addon in inner:
print("\tSeen in: '{2}' ({1}) by '{4}' ({3}).".format(*addon))
curs.execute("SELECT * FROM (SELECT cname, COUNT(*) as count FROM cars GROUP BY cname) WHERE count > 1 ORDER BY count ASC, cname ASC")
for reply in curs:
print("\nVehicle ID '{}' has been seen in {} addons.".format(*reply))
inner = conn.cursor()
inner.execute("SELECT cname, owner, name, author, sname FROM cars INNER JOIN addons ON cars.owner = addons.wsid INNER JOIN authors ON addons.author = authors.sid WHERE cname = ? ORDER BY wsid ASC", (reply[0],))
for addon in inner:
print("\tSeen in: '{2}' ({1}) by '{4}' ({3}).".format(*addon))
curs.execute("SELECT * FROM (SELECT cname, COUNT(*) as count FROM components GROUP BY cname) WHERE count > 1 ORDER BY count ASC, cname ASC")
for reply in curs:
print("\nComponent '{}' has been seen in {} addons.".format(*reply))
inner = conn.cursor()
inner.execute("SELECT cname, owner, name, author, sname FROM components INNER JOIN addons ON components.owner = addons.wsid INNER JOIN authors ON addons.author = authors.sid WHERE cname = ? ORDER BY wsid ASC", (reply[0],))
for addon in inner:
print("\tSeen in: '{2}' ({1}) by '{4}' ({3}).".format(*addon))
# curs.execute("SELECT * FROM files INNER JOIN addons ON files.owner = addons.wsid WHERE owner IN (SELECT wsid FROM addons WHERE author = 76561198166686412)")
# for reply in curs:
# inner = conn.cursor()
# inner.execute("SELECT path, owner, name, author, sname FROM files INNER JOIN addons ON files.owner = addons.wsid INNER JOIN authors ON addons.author = authors.sid WHERE path = ? ORDER BY wsid ASC", (reply[0],))
# # res = inner.fetchone()
# # if res is not None:
# # print("Lua Path '{0}'.".format(*reply))
# # print("\tSeen in: '{2}' ({1}) by '{4}' ({3}).".format(*res))
# for reply in curs:
# print("\nLua Path '{}' has been seen in {} addons.".format(*reply))
# inner = conn.cursor()
# inner.execute("SELECT path, owner, name, author, sname FROM files INNER JOIN addons ON files.owner = addons.wsid INNER JOIN authors ON addons.author = authors.sid WHERE path = ? ORDER BY wsid ASC", (reply[0],))
# for addon in inner:
# print("\tSeen in: '{2}' ({1}) by '{4}' ({3}).".format(*addon))
| 58.591837
| 223
| 0.676071
|
from shutil import copy
import sqlite3
import sys
sys.stdout = open("report.txt", "w", encoding="utf8")
copy('photon.db', 'photon.read.db')
conn = sqlite3.connect('photon.read.db')
curs = conn.cursor()
curs.execute("SELECT * FROM (SELECT path, COUNT(*) as count FROM files GROUP BY path) WHERE count > 1 ORDER BY count ASC, path ASC")
for reply in curs:
print("\nLua Path '{}' has been seen in {} addons.".format(*reply))
inner = conn.cursor()
inner.execute("SELECT path, owner, name, author, sname FROM files INNER JOIN addons ON files.owner = addons.wsid INNER JOIN authors ON addons.author = authors.sid WHERE path = ? ORDER BY wsid ASC", (reply[0],))
for addon in inner:
print("\tSeen in: '{2}' ({1}) by '{4}' ({3}).".format(*addon))
curs.execute("SELECT * FROM (SELECT cname, COUNT(*) as count FROM cars GROUP BY cname) WHERE count > 1 ORDER BY count ASC, cname ASC")
for reply in curs:
print("\nVehicle ID '{}' has been seen in {} addons.".format(*reply))
inner = conn.cursor()
inner.execute("SELECT cname, owner, name, author, sname FROM cars INNER JOIN addons ON cars.owner = addons.wsid INNER JOIN authors ON addons.author = authors.sid WHERE cname = ? ORDER BY wsid ASC", (reply[0],))
for addon in inner:
print("\tSeen in: '{2}' ({1}) by '{4}' ({3}).".format(*addon))
curs.execute("SELECT * FROM (SELECT cname, COUNT(*) as count FROM components GROUP BY cname) WHERE count > 1 ORDER BY count ASC, cname ASC")
for reply in curs:
print("\nComponent '{}' has been seen in {} addons.".format(*reply))
inner = conn.cursor()
inner.execute("SELECT cname, owner, name, author, sname FROM components INNER JOIN addons ON components.owner = addons.wsid INNER JOIN authors ON addons.author = authors.sid WHERE cname = ? ORDER BY wsid ASC", (reply[0],))
for addon in inner:
print("\tSeen in: '{2}' ({1}) by '{4}' ({3}).".format(*addon))
| true
| true
|
1c46f2088730032fec400cd35792bfc6b4aa4935
| 3,714
|
py
|
Python
|
Lesson 04/walkthrough.py
|
NoelKocheril/Python101
|
b0e923e1ec3e936babbd57a310ec72b13e07ac57
|
[
"WTFPL"
] | null | null | null |
Lesson 04/walkthrough.py
|
NoelKocheril/Python101
|
b0e923e1ec3e936babbd57a310ec72b13e07ac57
|
[
"WTFPL"
] | null | null | null |
Lesson 04/walkthrough.py
|
NoelKocheril/Python101
|
b0e923e1ec3e936babbd57a310ec72b13e07ac57
|
[
"WTFPL"
] | null | null | null |
# Defines a function
# def my_func():
# print("Hello, World!")
# Calls a function
# my_func()
# Repeats a function
# for j in range(10):
# my_func()
# myName = "Noel Kocheril"
# def hello(fname):
# print(f"Hello {fname}")
# hello() # Missing an argument
# hello("Noel")
# hello("Vit")
# hello("Shawn")
# Takes two arguments, and adds them together
# def sum(x, y):
# print(x + y)
# def difference(x, y):
# print(x - y)
# def difference2(x, y, z):
# print(x - y)
# difference(5, 10)
# difference(y=5, x=10)
# difference2(10, 5)
# x = 5
# print("Hello", "Noel", x)
# def printLastChild(*children):
# print(children[-1])
# printLastChild("Noel", "Steve", "Bob")
# Not allowed to have multiple arbitrary arguments
# def printNames(*fname, *lnames):
# print(f"{fname} {lnames}")
# def printLastName(**person):
# print("Hello, Mr. " + person["lname"])
# printLastName(fname="Noel", age=25, lname="Kocheril")
# def greet(name, message="Good Morning!"):
# print(f"Hello, {name}, {message}")
# greet("Noel")
# greet("Vit", "How are you?")
# Not allowed: Non-Default Argument after default argument
# def greet(message="Good Morning!", name):
# print(f"Hello, {name}, {message}")
# def my_func(food):
# for x in food:
# print(x)
# fruits = ["apple", "banana", "orange"]
# my_func(fruits)
# def square(x):
# return x * x
# print(square(10))
# def my_func():
# return 4
# print("This will never run.....")
# def percentageToLetterGrade(percentage):
# if percentage >= 80:
# return "A"
# elif percentage >= 70:
# return "B"
# elif percentage >= 60:
# return "C"
# elif percentage >= 50:
# return "D"
# return "F"
# print(percentageToLetterGrade(50))
# def sumOfNumbers(n):
# if n >= 0:
# result = n + sumOfNumbers(n - 1)
# print(result)
# else:
# result = 0
# return result
# sumOfNumbers(5)
# """
# sumOfNumbers(5)
# -> result = 5 + sumOfNumbers(4)
# -> result = 5 + 4 + sumOfNumbers(3) -> 0
# """
# def TowersOfHanoi()
# Fibonacci Sequence - n = n-1 + n-2
# x_n = x_n-1 + x_n-2
# count = 0
# def FibonacciSequence(n):
# if n == 0:
# result = 0
# elif n == 1:
# result = 1
# else:
# result = FibonacciSequence(n - 1) + FibonacciSequence(n - 2)
# global count
# count += 1
# print(result)
# return result
# FibonacciSequence(10)
# print(count)
# for i in range(100):
# print(f"{i}: {FibonacciSequence(i)}")
# def fib(n):
# if n
# sum = 0
# for i in range(1, 10):
# sum += 2 ** i
# print(sum)
"""
fib(n) -> fib(n-1) + fib(fib-2)
1 - fib
2 - 2 fib
3 - 4 fib
nth - 2^n - 17,179,869,184
"""
# Power Function - Using Recursion
# power(base, expo)
# x^n
# def power(base, expo):
# if expo > 0:
# result = power(base, expo - 1) * (base)
# print(result)
# return result
# elif expo == 0:
# return 1
# power(3, 3)
"""
power(3,3)
result = 27
"""
"""
STEP 1
N^6
->
(N^5 * N)
STEP 2
N^5 * N
->
(N^4 * N) * N
(N^3 * N) * N * N
(N^2 * N) * N * N * N
(N^1 * N) * N * N * N * N
(N^0 * N) * N * N * N * N * N
1 * N * N * N * N * N * N
"""
# always comes back to sequences
# Factorial - n! = n * (n - 1)!
def factorial(x):
if x > 0:
result = x * factorial(x - 1)
print(result)
return result
elif x == 0:
return 1
def fact(x):
if x == 0:
return 1
return x * fact(x - 1)
for i in range(5):
fact(i)
"""""
n!
4! = 4*3*2*1
4!
3! = 3*2*1
4!
4 * 3!
4 * 3 * 2!
4 * 3 * 2 * 1!
4 * 3 * 2 * 1 * 0!
4 * 3 * 2 * 1 * 1
"""
| 13.407942
| 70
| 0.522348
|
# sumOfNumbers(5)
# -> result = 5 + sumOfNumbers(4)
# -> result = 5 + 4 + sumOfNumbers(3) -> 0
# """
def factorial(x):
if x > 0:
result = x * factorial(x - 1)
print(result)
return result
elif x == 0:
return 1
def fact(x):
if x == 0:
return 1
return x * fact(x - 1)
for i in range(5):
fact(i)
| true
| true
|
1c46f24731b57cf200f9345b0298f65fdfe81f08
| 1,237
|
py
|
Python
|
ecom/urls.py
|
dongmokevin/ecomv1
|
abb3dc5a5476c379c029b8299e820c1979d5eb14
|
[
"MIT"
] | null | null | null |
ecom/urls.py
|
dongmokevin/ecomv1
|
abb3dc5a5476c379c029b8299e820c1979d5eb14
|
[
"MIT"
] | null | null | null |
ecom/urls.py
|
dongmokevin/ecomv1
|
abb3dc5a5476c379c029b8299e820c1979d5eb14
|
[
"MIT"
] | null | null | null |
"""ecom URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from django.conf import settings
from django.conf.urls.static import static
urlpatterns = [
path('admin/', admin.site.urls),
path('', include('core.urls')),
path('basket/', include('basket.urls', namespace='basket')),
path('payment/', include('payment.urls', namespace='payment')),
path('orders/', include('orders.urls', namespace='orders')),
path('account/', include('account.urls', namespace='account')),
]
if settings.DEBUG:
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| 35.342857
| 80
| 0.704931
|
from django.contrib import admin
from django.urls import path, include
from django.conf import settings
from django.conf.urls.static import static
urlpatterns = [
path('admin/', admin.site.urls),
path('', include('core.urls')),
path('basket/', include('basket.urls', namespace='basket')),
path('payment/', include('payment.urls', namespace='payment')),
path('orders/', include('orders.urls', namespace='orders')),
path('account/', include('account.urls', namespace='account')),
]
if settings.DEBUG:
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| true
| true
|
1c46f4a9f5384283addc48510e60b1443d6e5e60
| 898
|
py
|
Python
|
python/hsml/utils/tensor.py
|
robzor92/models-api
|
d83ebd775acab4fad94cd4c6a38107635e4b4880
|
[
"Apache-2.0"
] | null | null | null |
python/hsml/utils/tensor.py
|
robzor92/models-api
|
d83ebd775acab4fad94cd4c6a38107635e4b4880
|
[
"Apache-2.0"
] | null | null | null |
python/hsml/utils/tensor.py
|
robzor92/models-api
|
d83ebd775acab4fad94cd4c6a38107635e4b4880
|
[
"Apache-2.0"
] | null | null | null |
#
# Copyright 2021 Logical Clocks AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
class Tensor:
"""Metadata object representing a model signature for a model."""
def __init__(self, data_type: None, shape: None):
self.data_type = data_type
self.shape = shape
def to_dict(self):
return {"shape": self.shape, "dataType": self.data_type}
| 32.071429
| 76
| 0.7049
|
class Tensor:
def __init__(self, data_type: None, shape: None):
self.data_type = data_type
self.shape = shape
def to_dict(self):
return {"shape": self.shape, "dataType": self.data_type}
| true
| true
|
1c46f50f3cb0a12eb3ccbd5ce2ef644903c88627
| 12,415
|
py
|
Python
|
homeassistant/components/alarmdecoder/config_flow.py
|
DavidDeSloovere/core
|
909a20b36d4df6724c955c2ae28cb82fe6d50c2e
|
[
"Apache-2.0"
] | 4
|
2020-08-10T20:02:24.000Z
|
2022-01-31T02:14:22.000Z
|
homeassistant/components/alarmdecoder/config_flow.py
|
jagadeeshvenkatesh/core
|
1bd982668449815fee2105478569f8e4b5670add
|
[
"Apache-2.0"
] | 78
|
2020-07-23T07:13:08.000Z
|
2022-03-31T06:02:04.000Z
|
homeassistant/components/alarmdecoder/config_flow.py
|
jagadeeshvenkatesh/core
|
1bd982668449815fee2105478569f8e4b5670add
|
[
"Apache-2.0"
] | 3
|
2022-01-17T20:10:54.000Z
|
2022-01-17T20:17:22.000Z
|
"""Config flow for AlarmDecoder."""
import logging
from adext import AdExt
from alarmdecoder.devices import SerialDevice, SocketDevice
from alarmdecoder.util import NoDeviceError
import voluptuous as vol
from homeassistant import config_entries
from homeassistant.components.binary_sensor import DEVICE_CLASSES
from homeassistant.const import CONF_HOST, CONF_PORT, CONF_PROTOCOL
from homeassistant.core import callback
from .const import (
CONF_ALT_NIGHT_MODE,
CONF_AUTO_BYPASS,
CONF_CODE_ARM_REQUIRED,
CONF_DEVICE_BAUD,
CONF_DEVICE_PATH,
CONF_RELAY_ADDR,
CONF_RELAY_CHAN,
CONF_ZONE_LOOP,
CONF_ZONE_NAME,
CONF_ZONE_NUMBER,
CONF_ZONE_RFID,
CONF_ZONE_TYPE,
DEFAULT_ARM_OPTIONS,
DEFAULT_DEVICE_BAUD,
DEFAULT_DEVICE_HOST,
DEFAULT_DEVICE_PATH,
DEFAULT_DEVICE_PORT,
DEFAULT_ZONE_OPTIONS,
DEFAULT_ZONE_TYPE,
DOMAIN,
OPTIONS_ARM,
OPTIONS_ZONES,
PROTOCOL_SERIAL,
PROTOCOL_SOCKET,
)
EDIT_KEY = "edit_selection"
EDIT_ZONES = "Zones"
EDIT_SETTINGS = "Arming Settings"
_LOGGER = logging.getLogger(__name__)
class AlarmDecoderFlowHandler(config_entries.ConfigFlow, domain=DOMAIN):
"""Handle a AlarmDecoder config flow."""
VERSION = 1
def __init__(self):
"""Initialize AlarmDecoder ConfigFlow."""
self.protocol = None
@staticmethod
@callback
def async_get_options_flow(config_entry):
"""Get the options flow for AlarmDecoder."""
return AlarmDecoderOptionsFlowHandler(config_entry)
async def async_step_user(self, user_input=None):
"""Handle a flow initialized by the user."""
if user_input is not None:
self.protocol = user_input[CONF_PROTOCOL]
return await self.async_step_protocol()
return self.async_show_form(
step_id="user",
data_schema=vol.Schema(
{
vol.Required(CONF_PROTOCOL): vol.In(
[PROTOCOL_SOCKET, PROTOCOL_SERIAL]
),
}
),
)
async def async_step_protocol(self, user_input=None):
"""Handle AlarmDecoder protocol setup."""
errors = {}
if user_input is not None:
if _device_already_added(
self._async_current_entries(), user_input, self.protocol
):
return self.async_abort(reason="already_configured")
connection = {}
baud = None
if self.protocol == PROTOCOL_SOCKET:
host = connection[CONF_HOST] = user_input[CONF_HOST]
port = connection[CONF_PORT] = user_input[CONF_PORT]
title = f"{host}:{port}"
device = SocketDevice(interface=(host, port))
if self.protocol == PROTOCOL_SERIAL:
path = connection[CONF_DEVICE_PATH] = user_input[CONF_DEVICE_PATH]
baud = connection[CONF_DEVICE_BAUD] = user_input[CONF_DEVICE_BAUD]
title = path
device = SerialDevice(interface=path)
controller = AdExt(device)
def test_connection():
controller.open(baud)
controller.close()
try:
await self.hass.async_add_executor_job(test_connection)
return self.async_create_entry(
title=title, data={CONF_PROTOCOL: self.protocol, **connection}
)
except NoDeviceError:
errors["base"] = "cannot_connect"
except Exception: # pylint: disable=broad-except
_LOGGER.exception("Unexpected exception during AlarmDecoder setup")
errors["base"] = "unknown"
if self.protocol == PROTOCOL_SOCKET:
schema = vol.Schema(
{
vol.Required(CONF_HOST, default=DEFAULT_DEVICE_HOST): str,
vol.Required(CONF_PORT, default=DEFAULT_DEVICE_PORT): int,
}
)
if self.protocol == PROTOCOL_SERIAL:
schema = vol.Schema(
{
vol.Required(CONF_DEVICE_PATH, default=DEFAULT_DEVICE_PATH): str,
vol.Required(CONF_DEVICE_BAUD, default=DEFAULT_DEVICE_BAUD): int,
}
)
return self.async_show_form(
step_id="protocol",
data_schema=schema,
errors=errors,
)
class AlarmDecoderOptionsFlowHandler(config_entries.OptionsFlow):
"""Handle AlarmDecoder options."""
def __init__(self, config_entry: config_entries.ConfigEntry):
"""Initialize AlarmDecoder options flow."""
self.arm_options = config_entry.options.get(OPTIONS_ARM, DEFAULT_ARM_OPTIONS)
self.zone_options = config_entry.options.get(
OPTIONS_ZONES, DEFAULT_ZONE_OPTIONS
)
self.selected_zone = None
async def async_step_init(self, user_input=None):
"""Manage the options."""
if user_input is not None:
if user_input[EDIT_KEY] == EDIT_SETTINGS:
return await self.async_step_arm_settings()
if user_input[EDIT_KEY] == EDIT_ZONES:
return await self.async_step_zone_select()
return self.async_show_form(
step_id="init",
data_schema=vol.Schema(
{
vol.Required(EDIT_KEY, default=EDIT_SETTINGS): vol.In(
[EDIT_SETTINGS, EDIT_ZONES]
)
},
),
)
async def async_step_arm_settings(self, user_input=None):
"""Arming options form."""
if user_input is not None:
return self.async_create_entry(
title="",
data={OPTIONS_ARM: user_input, OPTIONS_ZONES: self.zone_options},
)
return self.async_show_form(
step_id="arm_settings",
data_schema=vol.Schema(
{
vol.Optional(
CONF_ALT_NIGHT_MODE,
default=self.arm_options[CONF_ALT_NIGHT_MODE],
): bool,
vol.Optional(
CONF_AUTO_BYPASS, default=self.arm_options[CONF_AUTO_BYPASS]
): bool,
vol.Optional(
CONF_CODE_ARM_REQUIRED,
default=self.arm_options[CONF_CODE_ARM_REQUIRED],
): bool,
},
),
)
async def async_step_zone_select(self, user_input=None):
"""Zone selection form."""
errors = _validate_zone_input(user_input)
if user_input is not None and not errors:
self.selected_zone = str(
int(user_input[CONF_ZONE_NUMBER])
) # remove leading zeros
return await self.async_step_zone_details()
return self.async_show_form(
step_id="zone_select",
data_schema=vol.Schema({vol.Required(CONF_ZONE_NUMBER): str}),
errors=errors,
)
async def async_step_zone_details(self, user_input=None):
"""Zone details form."""
errors = _validate_zone_input(user_input)
if user_input is not None and not errors:
zone_options = self.zone_options.copy()
zone_id = self.selected_zone
zone_options[zone_id] = _fix_input_types(user_input)
# Delete zone entry if zone_name is omitted
if CONF_ZONE_NAME not in zone_options[zone_id]:
zone_options.pop(zone_id)
return self.async_create_entry(
title="",
data={OPTIONS_ARM: self.arm_options, OPTIONS_ZONES: zone_options},
)
existing_zone_settings = self.zone_options.get(self.selected_zone, {})
return self.async_show_form(
step_id="zone_details",
description_placeholders={CONF_ZONE_NUMBER: self.selected_zone},
data_schema=vol.Schema(
{
vol.Optional(
CONF_ZONE_NAME,
description={
"suggested_value": existing_zone_settings.get(
CONF_ZONE_NAME
)
},
): str,
vol.Optional(
CONF_ZONE_TYPE,
default=existing_zone_settings.get(
CONF_ZONE_TYPE, DEFAULT_ZONE_TYPE
),
): vol.In(DEVICE_CLASSES),
vol.Optional(
CONF_ZONE_RFID,
description={
"suggested_value": existing_zone_settings.get(
CONF_ZONE_RFID
)
},
): str,
vol.Optional(
CONF_ZONE_LOOP,
description={
"suggested_value": existing_zone_settings.get(
CONF_ZONE_LOOP
)
},
): str,
vol.Optional(
CONF_RELAY_ADDR,
description={
"suggested_value": existing_zone_settings.get(
CONF_RELAY_ADDR
)
},
): str,
vol.Optional(
CONF_RELAY_CHAN,
description={
"suggested_value": existing_zone_settings.get(
CONF_RELAY_CHAN
)
},
): str,
}
),
errors=errors,
)
def _validate_zone_input(zone_input):
if not zone_input:
return {}
errors = {}
# CONF_RELAY_ADDR & CONF_RELAY_CHAN are inclusive
if (CONF_RELAY_ADDR in zone_input and CONF_RELAY_CHAN not in zone_input) or (
CONF_RELAY_ADDR not in zone_input and CONF_RELAY_CHAN in zone_input
):
errors["base"] = "relay_inclusive"
# The following keys must be int
for key in [CONF_ZONE_NUMBER, CONF_ZONE_LOOP, CONF_RELAY_ADDR, CONF_RELAY_CHAN]:
if key in zone_input:
try:
int(zone_input[key])
except ValueError:
errors[key] = "int"
# CONF_ZONE_LOOP depends on CONF_ZONE_RFID
if CONF_ZONE_LOOP in zone_input and CONF_ZONE_RFID not in zone_input:
errors[CONF_ZONE_LOOP] = "loop_rfid"
# CONF_ZONE_LOOP must be 1-4
if (
CONF_ZONE_LOOP in zone_input
and zone_input[CONF_ZONE_LOOP].isdigit()
and int(zone_input[CONF_ZONE_LOOP]) not in list(range(1, 5))
):
errors[CONF_ZONE_LOOP] = "loop_range"
return errors
def _fix_input_types(zone_input):
"""Convert necessary keys to int.
Since ConfigFlow inputs of type int cannot default to an empty string, we collect the values below as
strings and then convert them to ints.
"""
for key in [CONF_ZONE_LOOP, CONF_RELAY_ADDR, CONF_RELAY_CHAN]:
if key in zone_input:
zone_input[key] = int(zone_input[key])
return zone_input
def _device_already_added(current_entries, user_input, protocol):
"""Determine if entry has already been added to HA."""
user_host = user_input.get(CONF_HOST)
user_port = user_input.get(CONF_PORT)
user_path = user_input.get(CONF_DEVICE_PATH)
user_baud = user_input.get(CONF_DEVICE_BAUD)
for entry in current_entries:
entry_host = entry.data.get(CONF_HOST)
entry_port = entry.data.get(CONF_PORT)
entry_path = entry.data.get(CONF_DEVICE_PATH)
entry_baud = entry.data.get(CONF_DEVICE_BAUD)
if (
protocol == PROTOCOL_SOCKET
and user_host == entry_host
and user_port == entry_port
):
return True
if (
protocol == PROTOCOL_SERIAL
and user_baud == entry_baud
and user_path == entry_path
):
return True
return False
| 33.920765
| 105
| 0.560129
|
import logging
from adext import AdExt
from alarmdecoder.devices import SerialDevice, SocketDevice
from alarmdecoder.util import NoDeviceError
import voluptuous as vol
from homeassistant import config_entries
from homeassistant.components.binary_sensor import DEVICE_CLASSES
from homeassistant.const import CONF_HOST, CONF_PORT, CONF_PROTOCOL
from homeassistant.core import callback
from .const import (
CONF_ALT_NIGHT_MODE,
CONF_AUTO_BYPASS,
CONF_CODE_ARM_REQUIRED,
CONF_DEVICE_BAUD,
CONF_DEVICE_PATH,
CONF_RELAY_ADDR,
CONF_RELAY_CHAN,
CONF_ZONE_LOOP,
CONF_ZONE_NAME,
CONF_ZONE_NUMBER,
CONF_ZONE_RFID,
CONF_ZONE_TYPE,
DEFAULT_ARM_OPTIONS,
DEFAULT_DEVICE_BAUD,
DEFAULT_DEVICE_HOST,
DEFAULT_DEVICE_PATH,
DEFAULT_DEVICE_PORT,
DEFAULT_ZONE_OPTIONS,
DEFAULT_ZONE_TYPE,
DOMAIN,
OPTIONS_ARM,
OPTIONS_ZONES,
PROTOCOL_SERIAL,
PROTOCOL_SOCKET,
)
EDIT_KEY = "edit_selection"
EDIT_ZONES = "Zones"
EDIT_SETTINGS = "Arming Settings"
_LOGGER = logging.getLogger(__name__)
class AlarmDecoderFlowHandler(config_entries.ConfigFlow, domain=DOMAIN):
VERSION = 1
def __init__(self):
self.protocol = None
@staticmethod
@callback
def async_get_options_flow(config_entry):
return AlarmDecoderOptionsFlowHandler(config_entry)
async def async_step_user(self, user_input=None):
if user_input is not None:
self.protocol = user_input[CONF_PROTOCOL]
return await self.async_step_protocol()
return self.async_show_form(
step_id="user",
data_schema=vol.Schema(
{
vol.Required(CONF_PROTOCOL): vol.In(
[PROTOCOL_SOCKET, PROTOCOL_SERIAL]
),
}
),
)
async def async_step_protocol(self, user_input=None):
errors = {}
if user_input is not None:
if _device_already_added(
self._async_current_entries(), user_input, self.protocol
):
return self.async_abort(reason="already_configured")
connection = {}
baud = None
if self.protocol == PROTOCOL_SOCKET:
host = connection[CONF_HOST] = user_input[CONF_HOST]
port = connection[CONF_PORT] = user_input[CONF_PORT]
title = f"{host}:{port}"
device = SocketDevice(interface=(host, port))
if self.protocol == PROTOCOL_SERIAL:
path = connection[CONF_DEVICE_PATH] = user_input[CONF_DEVICE_PATH]
baud = connection[CONF_DEVICE_BAUD] = user_input[CONF_DEVICE_BAUD]
title = path
device = SerialDevice(interface=path)
controller = AdExt(device)
def test_connection():
controller.open(baud)
controller.close()
try:
await self.hass.async_add_executor_job(test_connection)
return self.async_create_entry(
title=title, data={CONF_PROTOCOL: self.protocol, **connection}
)
except NoDeviceError:
errors["base"] = "cannot_connect"
except Exception:
_LOGGER.exception("Unexpected exception during AlarmDecoder setup")
errors["base"] = "unknown"
if self.protocol == PROTOCOL_SOCKET:
schema = vol.Schema(
{
vol.Required(CONF_HOST, default=DEFAULT_DEVICE_HOST): str,
vol.Required(CONF_PORT, default=DEFAULT_DEVICE_PORT): int,
}
)
if self.protocol == PROTOCOL_SERIAL:
schema = vol.Schema(
{
vol.Required(CONF_DEVICE_PATH, default=DEFAULT_DEVICE_PATH): str,
vol.Required(CONF_DEVICE_BAUD, default=DEFAULT_DEVICE_BAUD): int,
}
)
return self.async_show_form(
step_id="protocol",
data_schema=schema,
errors=errors,
)
class AlarmDecoderOptionsFlowHandler(config_entries.OptionsFlow):
def __init__(self, config_entry: config_entries.ConfigEntry):
self.arm_options = config_entry.options.get(OPTIONS_ARM, DEFAULT_ARM_OPTIONS)
self.zone_options = config_entry.options.get(
OPTIONS_ZONES, DEFAULT_ZONE_OPTIONS
)
self.selected_zone = None
async def async_step_init(self, user_input=None):
if user_input is not None:
if user_input[EDIT_KEY] == EDIT_SETTINGS:
return await self.async_step_arm_settings()
if user_input[EDIT_KEY] == EDIT_ZONES:
return await self.async_step_zone_select()
return self.async_show_form(
step_id="init",
data_schema=vol.Schema(
{
vol.Required(EDIT_KEY, default=EDIT_SETTINGS): vol.In(
[EDIT_SETTINGS, EDIT_ZONES]
)
},
),
)
async def async_step_arm_settings(self, user_input=None):
if user_input is not None:
return self.async_create_entry(
title="",
data={OPTIONS_ARM: user_input, OPTIONS_ZONES: self.zone_options},
)
return self.async_show_form(
step_id="arm_settings",
data_schema=vol.Schema(
{
vol.Optional(
CONF_ALT_NIGHT_MODE,
default=self.arm_options[CONF_ALT_NIGHT_MODE],
): bool,
vol.Optional(
CONF_AUTO_BYPASS, default=self.arm_options[CONF_AUTO_BYPASS]
): bool,
vol.Optional(
CONF_CODE_ARM_REQUIRED,
default=self.arm_options[CONF_CODE_ARM_REQUIRED],
): bool,
},
),
)
async def async_step_zone_select(self, user_input=None):
errors = _validate_zone_input(user_input)
if user_input is not None and not errors:
self.selected_zone = str(
int(user_input[CONF_ZONE_NUMBER])
)
return await self.async_step_zone_details()
return self.async_show_form(
step_id="zone_select",
data_schema=vol.Schema({vol.Required(CONF_ZONE_NUMBER): str}),
errors=errors,
)
async def async_step_zone_details(self, user_input=None):
errors = _validate_zone_input(user_input)
if user_input is not None and not errors:
zone_options = self.zone_options.copy()
zone_id = self.selected_zone
zone_options[zone_id] = _fix_input_types(user_input)
if CONF_ZONE_NAME not in zone_options[zone_id]:
zone_options.pop(zone_id)
return self.async_create_entry(
title="",
data={OPTIONS_ARM: self.arm_options, OPTIONS_ZONES: zone_options},
)
existing_zone_settings = self.zone_options.get(self.selected_zone, {})
return self.async_show_form(
step_id="zone_details",
description_placeholders={CONF_ZONE_NUMBER: self.selected_zone},
data_schema=vol.Schema(
{
vol.Optional(
CONF_ZONE_NAME,
description={
"suggested_value": existing_zone_settings.get(
CONF_ZONE_NAME
)
},
): str,
vol.Optional(
CONF_ZONE_TYPE,
default=existing_zone_settings.get(
CONF_ZONE_TYPE, DEFAULT_ZONE_TYPE
),
): vol.In(DEVICE_CLASSES),
vol.Optional(
CONF_ZONE_RFID,
description={
"suggested_value": existing_zone_settings.get(
CONF_ZONE_RFID
)
},
): str,
vol.Optional(
CONF_ZONE_LOOP,
description={
"suggested_value": existing_zone_settings.get(
CONF_ZONE_LOOP
)
},
): str,
vol.Optional(
CONF_RELAY_ADDR,
description={
"suggested_value": existing_zone_settings.get(
CONF_RELAY_ADDR
)
},
): str,
vol.Optional(
CONF_RELAY_CHAN,
description={
"suggested_value": existing_zone_settings.get(
CONF_RELAY_CHAN
)
},
): str,
}
),
errors=errors,
)
def _validate_zone_input(zone_input):
if not zone_input:
return {}
errors = {}
if (CONF_RELAY_ADDR in zone_input and CONF_RELAY_CHAN not in zone_input) or (
CONF_RELAY_ADDR not in zone_input and CONF_RELAY_CHAN in zone_input
):
errors["base"] = "relay_inclusive"
for key in [CONF_ZONE_NUMBER, CONF_ZONE_LOOP, CONF_RELAY_ADDR, CONF_RELAY_CHAN]:
if key in zone_input:
try:
int(zone_input[key])
except ValueError:
errors[key] = "int"
if CONF_ZONE_LOOP in zone_input and CONF_ZONE_RFID not in zone_input:
errors[CONF_ZONE_LOOP] = "loop_rfid"
if (
CONF_ZONE_LOOP in zone_input
and zone_input[CONF_ZONE_LOOP].isdigit()
and int(zone_input[CONF_ZONE_LOOP]) not in list(range(1, 5))
):
errors[CONF_ZONE_LOOP] = "loop_range"
return errors
def _fix_input_types(zone_input):
for key in [CONF_ZONE_LOOP, CONF_RELAY_ADDR, CONF_RELAY_CHAN]:
if key in zone_input:
zone_input[key] = int(zone_input[key])
return zone_input
def _device_already_added(current_entries, user_input, protocol):
user_host = user_input.get(CONF_HOST)
user_port = user_input.get(CONF_PORT)
user_path = user_input.get(CONF_DEVICE_PATH)
user_baud = user_input.get(CONF_DEVICE_BAUD)
for entry in current_entries:
entry_host = entry.data.get(CONF_HOST)
entry_port = entry.data.get(CONF_PORT)
entry_path = entry.data.get(CONF_DEVICE_PATH)
entry_baud = entry.data.get(CONF_DEVICE_BAUD)
if (
protocol == PROTOCOL_SOCKET
and user_host == entry_host
and user_port == entry_port
):
return True
if (
protocol == PROTOCOL_SERIAL
and user_baud == entry_baud
and user_path == entry_path
):
return True
return False
| true
| true
|
1c46f51d76f2d9918be20948b378e49153ec1648
| 7,109
|
py
|
Python
|
svgpathtools/svg_io_sax.py
|
Vrroom/svgpathtools
|
b9621c9c340337cd044ae21c83e2917cd010dc8f
|
[
"MIT"
] | 2
|
2018-05-08T05:31:15.000Z
|
2022-01-27T11:51:04.000Z
|
svgpathtools/svg_io_sax.py
|
taoari/svgpathtools
|
9b1b8e78e10b99d6ca3d4b28e5b6b0d1596b8dc2
|
[
"MIT"
] | null | null | null |
svgpathtools/svg_io_sax.py
|
taoari/svgpathtools
|
9b1b8e78e10b99d6ca3d4b28e5b6b0d1596b8dc2
|
[
"MIT"
] | 3
|
2018-01-15T18:08:06.000Z
|
2018-10-11T09:19:49.000Z
|
"""(Experimental) replacement for import/export functionality SAX
"""
# External dependencies
from __future__ import division, absolute_import, print_function
import os
from xml.etree.ElementTree import iterparse, Element, ElementTree, SubElement
# Internal dependencies
from .parser import parse_path
from .parser import parse_transform
from .svg_to_paths import (path2pathd, ellipse2pathd, line2pathd,
polyline2pathd, polygon2pathd, rect2pathd)
from .misctools import open_in_browser
from .path import *
# To maintain forward/backward compatibility
try:
str = basestring
except NameError:
pass
NAME_SVG = "svg"
ATTR_VERSION = "version"
VALUE_SVG_VERSION = "1.1"
ATTR_XMLNS = "xmlns"
VALUE_XMLNS = "http://www.w3.org/2000/svg"
ATTR_XMLNS_LINK = "xmlns:xlink"
VALUE_XLINK = "http://www.w3.org/1999/xlink"
ATTR_XMLNS_EV = "xmlns:ev"
VALUE_XMLNS_EV = "http://www.w3.org/2001/xml-events"
ATTR_WIDTH = "width"
ATTR_HEIGHT = "height"
ATTR_VIEWBOX = "viewBox"
NAME_PATH = "path"
ATTR_DATA = "d"
ATTR_FILL = "fill"
ATTR_STROKE = "stroke"
ATTR_STROKE_WIDTH = "stroke-width"
ATTR_TRANSFORM = "transform"
VALUE_NONE = "none"
class SaxDocument:
def __init__(self, filename):
"""A container for a SAX SVG light tree objects document.
This class provides functions for extracting SVG data into Path objects.
Args:
filename (str): The filename of the SVG file
"""
self.root_values = {}
self.tree = []
# remember location of original svg file
if filename is not None and os.path.dirname(filename) == '':
self.original_filename = os.path.join(os.getcwd(), filename)
else:
self.original_filename = filename
if filename is not None:
self.sax_parse(filename)
def sax_parse(self, filename):
self.root_values = {}
self.tree = []
stack = []
values = {}
matrix = None
for event, elem in iterparse(filename, events=('start', 'end')):
if event == 'start':
stack.append((values, matrix))
if matrix is not None:
matrix = matrix.copy() # copy of matrix
current_values = values
values = {}
values.update(current_values) # copy of dictionary
attrs = elem.attrib
values.update(attrs)
name = elem.tag[28:]
if "style" in attrs:
for equate in attrs["style"].split(";"):
equal_item = equate.split(":")
values[equal_item[0]] = equal_item[1]
if "transform" in attrs:
transform_matrix = parse_transform(attrs["transform"])
if matrix is None:
matrix = np.identity(3)
matrix = transform_matrix.dot(matrix)
if "svg" == name:
current_values = values
values = {}
values.update(current_values)
self.root_values = current_values
continue
elif "g" == name:
continue
elif 'path' == name:
values['d'] = path2pathd(values)
elif 'circle' == name:
values["d"] = ellipse2pathd(values)
elif 'ellipse' == name:
values["d"] = ellipse2pathd(values)
elif 'line' == name:
values["d"] = line2pathd(values)
elif 'polyline' == name:
values["d"] = polyline2pathd(values['points'])
elif 'polygon' == name:
values["d"] = polygon2pathd(values['points'])
elif 'rect' == name:
values["d"] = rect2pathd(values)
else:
continue
values["matrix"] = matrix
values["name"] = name
self.tree.append(values)
else:
v = stack.pop()
values = v[0]
matrix = v[1]
def flatten_all_paths(self):
flat = []
for values in self.tree:
pathd = values['d']
matrix = values['matrix']
parsed_path = parse_path(pathd)
if matrix is not None:
transform(parsed_path, matrix)
flat.append(parsed_path)
return flat
def get_pathd_and_matrix(self):
flat = []
for values in self.tree:
pathd = values['d']
matrix = values['matrix']
flat.append((pathd, matrix))
return flat
def generate_dom(self):
root = Element(NAME_SVG)
root.set(ATTR_VERSION, VALUE_SVG_VERSION)
root.set(ATTR_XMLNS, VALUE_XMLNS)
root.set(ATTR_XMLNS_LINK, VALUE_XLINK)
root.set(ATTR_XMLNS_EV, VALUE_XMLNS_EV)
width = self.root_values.get(ATTR_WIDTH, None)
height = self.root_values.get(ATTR_HEIGHT, None)
if width is not None:
root.set(ATTR_WIDTH, width)
if height is not None:
root.set(ATTR_HEIGHT, height)
viewbox = self.root_values.get(ATTR_VIEWBOX, None)
if viewbox is not None:
root.set(ATTR_VIEWBOX, viewbox)
identity = np.identity(3)
for values in self.tree:
pathd = values.get('d', '')
matrix = values.get('matrix', None)
# path_value = parse_path(pathd)
path = SubElement(root, NAME_PATH)
if matrix is not None and not np.all(np.equal(matrix, identity)):
matrix_string = "matrix("
matrix_string += " "
matrix_string += str(matrix[0][0])
matrix_string += " "
matrix_string += str(matrix[1][0])
matrix_string += " "
matrix_string += str(matrix[0][1])
matrix_string += " "
matrix_string += str(matrix[1][1])
matrix_string += " "
matrix_string += str(matrix[0][2])
matrix_string += " "
matrix_string += str(matrix[1][2])
matrix_string += ")"
path.set(ATTR_TRANSFORM, matrix_string)
if ATTR_DATA in values:
path.set(ATTR_DATA, values[ATTR_DATA])
if ATTR_FILL in values:
path.set(ATTR_FILL, values[ATTR_FILL])
if ATTR_STROKE in values:
path.set(ATTR_STROKE, values[ATTR_STROKE])
return ElementTree(root)
def save(self, filename):
with open(filename, 'wb') as output_svg:
dom_tree = self.generate_dom()
dom_tree.write(output_svg)
def display(self, filename=None):
"""Displays/opens the doc using the OS's default application."""
if filename is None:
filename = 'display_temp.svg'
self.save(filename)
open_in_browser(filename)
| 35.723618
| 80
| 0.544943
|
from __future__ import division, absolute_import, print_function
import os
from xml.etree.ElementTree import iterparse, Element, ElementTree, SubElement
from .parser import parse_path
from .parser import parse_transform
from .svg_to_paths import (path2pathd, ellipse2pathd, line2pathd,
polyline2pathd, polygon2pathd, rect2pathd)
from .misctools import open_in_browser
from .path import *
try:
str = basestring
except NameError:
pass
NAME_SVG = "svg"
ATTR_VERSION = "version"
VALUE_SVG_VERSION = "1.1"
ATTR_XMLNS = "xmlns"
VALUE_XMLNS = "http://www.w3.org/2000/svg"
ATTR_XMLNS_LINK = "xmlns:xlink"
VALUE_XLINK = "http://www.w3.org/1999/xlink"
ATTR_XMLNS_EV = "xmlns:ev"
VALUE_XMLNS_EV = "http://www.w3.org/2001/xml-events"
ATTR_WIDTH = "width"
ATTR_HEIGHT = "height"
ATTR_VIEWBOX = "viewBox"
NAME_PATH = "path"
ATTR_DATA = "d"
ATTR_FILL = "fill"
ATTR_STROKE = "stroke"
ATTR_STROKE_WIDTH = "stroke-width"
ATTR_TRANSFORM = "transform"
VALUE_NONE = "none"
class SaxDocument:
def __init__(self, filename):
self.root_values = {}
self.tree = []
if filename is not None and os.path.dirname(filename) == '':
self.original_filename = os.path.join(os.getcwd(), filename)
else:
self.original_filename = filename
if filename is not None:
self.sax_parse(filename)
def sax_parse(self, filename):
self.root_values = {}
self.tree = []
stack = []
values = {}
matrix = None
for event, elem in iterparse(filename, events=('start', 'end')):
if event == 'start':
stack.append((values, matrix))
if matrix is not None:
matrix = matrix.copy()
current_values = values
values = {}
values.update(current_values)
attrs = elem.attrib
values.update(attrs)
name = elem.tag[28:]
if "style" in attrs:
for equate in attrs["style"].split(";"):
equal_item = equate.split(":")
values[equal_item[0]] = equal_item[1]
if "transform" in attrs:
transform_matrix = parse_transform(attrs["transform"])
if matrix is None:
matrix = np.identity(3)
matrix = transform_matrix.dot(matrix)
if "svg" == name:
current_values = values
values = {}
values.update(current_values)
self.root_values = current_values
continue
elif "g" == name:
continue
elif 'path' == name:
values['d'] = path2pathd(values)
elif 'circle' == name:
values["d"] = ellipse2pathd(values)
elif 'ellipse' == name:
values["d"] = ellipse2pathd(values)
elif 'line' == name:
values["d"] = line2pathd(values)
elif 'polyline' == name:
values["d"] = polyline2pathd(values['points'])
elif 'polygon' == name:
values["d"] = polygon2pathd(values['points'])
elif 'rect' == name:
values["d"] = rect2pathd(values)
else:
continue
values["matrix"] = matrix
values["name"] = name
self.tree.append(values)
else:
v = stack.pop()
values = v[0]
matrix = v[1]
def flatten_all_paths(self):
flat = []
for values in self.tree:
pathd = values['d']
matrix = values['matrix']
parsed_path = parse_path(pathd)
if matrix is not None:
transform(parsed_path, matrix)
flat.append(parsed_path)
return flat
def get_pathd_and_matrix(self):
flat = []
for values in self.tree:
pathd = values['d']
matrix = values['matrix']
flat.append((pathd, matrix))
return flat
def generate_dom(self):
root = Element(NAME_SVG)
root.set(ATTR_VERSION, VALUE_SVG_VERSION)
root.set(ATTR_XMLNS, VALUE_XMLNS)
root.set(ATTR_XMLNS_LINK, VALUE_XLINK)
root.set(ATTR_XMLNS_EV, VALUE_XMLNS_EV)
width = self.root_values.get(ATTR_WIDTH, None)
height = self.root_values.get(ATTR_HEIGHT, None)
if width is not None:
root.set(ATTR_WIDTH, width)
if height is not None:
root.set(ATTR_HEIGHT, height)
viewbox = self.root_values.get(ATTR_VIEWBOX, None)
if viewbox is not None:
root.set(ATTR_VIEWBOX, viewbox)
identity = np.identity(3)
for values in self.tree:
pathd = values.get('d', '')
matrix = values.get('matrix', None)
path = SubElement(root, NAME_PATH)
if matrix is not None and not np.all(np.equal(matrix, identity)):
matrix_string = "matrix("
matrix_string += " "
matrix_string += str(matrix[0][0])
matrix_string += " "
matrix_string += str(matrix[1][0])
matrix_string += " "
matrix_string += str(matrix[0][1])
matrix_string += " "
matrix_string += str(matrix[1][1])
matrix_string += " "
matrix_string += str(matrix[0][2])
matrix_string += " "
matrix_string += str(matrix[1][2])
matrix_string += ")"
path.set(ATTR_TRANSFORM, matrix_string)
if ATTR_DATA in values:
path.set(ATTR_DATA, values[ATTR_DATA])
if ATTR_FILL in values:
path.set(ATTR_FILL, values[ATTR_FILL])
if ATTR_STROKE in values:
path.set(ATTR_STROKE, values[ATTR_STROKE])
return ElementTree(root)
def save(self, filename):
with open(filename, 'wb') as output_svg:
dom_tree = self.generate_dom()
dom_tree.write(output_svg)
def display(self, filename=None):
if filename is None:
filename = 'display_temp.svg'
self.save(filename)
open_in_browser(filename)
| true
| true
|
1c46f578bdd65913273fe1b4661b4a5a024c948b
| 301
|
py
|
Python
|
3. Python Advanced (September 2021)/3.2 Python OOP (October 2021)/01. First Steps In OOP/04_car.py
|
kzborisov/SoftUni
|
ccb2b8850adc79bfb2652a45124c3ff11183412e
|
[
"MIT"
] | 1
|
2021-02-07T07:51:12.000Z
|
2021-02-07T07:51:12.000Z
|
3. Python Advanced (September 2021)/3.2 Python OOP (October 2021)/01. First Steps In OOP/04_car.py
|
kzborisov/softuni
|
9c5b45c74fa7d9748e9b3ea65a5ae4e15c142751
|
[
"MIT"
] | null | null | null |
3. Python Advanced (September 2021)/3.2 Python OOP (October 2021)/01. First Steps In OOP/04_car.py
|
kzborisov/softuni
|
9c5b45c74fa7d9748e9b3ea65a5ae4e15c142751
|
[
"MIT"
] | null | null | null |
class Car:
def __init__(self, name, model, engine):
self.name = name
self.model = model
self.engine = engine
def get_info(self):
return f"This is {self.name} {self.model} with engine {self.engine}"
car = Car("Kia", "Rio", "1.3L B3 I4")
print(car.get_info())
| 23.153846
| 76
| 0.598007
|
class Car:
def __init__(self, name, model, engine):
self.name = name
self.model = model
self.engine = engine
def get_info(self):
return f"This is {self.name} {self.model} with engine {self.engine}"
car = Car("Kia", "Rio", "1.3L B3 I4")
print(car.get_info())
| true
| true
|
1c46f59fb85d988d23d303ed82be39df0f9802c3
| 1,990
|
py
|
Python
|
contact_form/tests/views.py
|
nunataksoftware/django-contact-form-updated
|
ad3da22a6c12c78e59fe05bf4e4f9f5a1e654e03
|
[
"BSD-3-Clause"
] | null | null | null |
contact_form/tests/views.py
|
nunataksoftware/django-contact-form-updated
|
ad3da22a6c12c78e59fe05bf4e4f9f5a1e654e03
|
[
"BSD-3-Clause"
] | null | null | null |
contact_form/tests/views.py
|
nunataksoftware/django-contact-form-updated
|
ad3da22a6c12c78e59fe05bf4e4f9f5a1e654e03
|
[
"BSD-3-Clause"
] | null | null | null |
from django.conf import settings
from django.core import mail
from django.core.urlresolvers import reverse
from django.test import TestCase
class ViewTests(TestCase):
urls = 'contact_form.urls'
def test_get(self):
"""
HTTP GET on the form view just shows the form.
"""
contact_url = reverse('contact_form')
response = self.client.get(contact_url)
self.assertEqual(200, response.status_code)
self.assertTemplateUsed(response,
'contact_form/contact_form.html')
def test_send(self):
"""
Valid data through the view results in a successful send.
"""
contact_url = reverse('contact_form')
data = {'name': 'Test',
'email': 'test@example.com',
'body': 'Test message'}
response = self.client.post(contact_url,
data=data)
self.assertRedirects(response,
reverse('contact_form_sent'))
self.assertEqual(1, len(mail.outbox))
message = mail.outbox[0]
self.assertEqual([data['email']],
message.recipients())
self.assertTrue(data['body'] in message.body)
self.assertEqual(settings.DEFAULT_FROM_EMAIL,
message.from_email)
def test_invalid(self):
"""
Invalid data doesn't work.
"""
contact_url = reverse('contact_form')
data = {'name': 'Test',
'body': 'Test message'}
response = self.client.post(contact_url,
data=data)
self.assertEqual(200, response.status_code)
self.assertFormError(response,
'form',
'email',
'This field is required.')
self.assertEqual(0, len(mail.outbox))
| 29.701493
| 65
| 0.522613
|
from django.conf import settings
from django.core import mail
from django.core.urlresolvers import reverse
from django.test import TestCase
class ViewTests(TestCase):
urls = 'contact_form.urls'
def test_get(self):
contact_url = reverse('contact_form')
response = self.client.get(contact_url)
self.assertEqual(200, response.status_code)
self.assertTemplateUsed(response,
'contact_form/contact_form.html')
def test_send(self):
contact_url = reverse('contact_form')
data = {'name': 'Test',
'email': 'test@example.com',
'body': 'Test message'}
response = self.client.post(contact_url,
data=data)
self.assertRedirects(response,
reverse('contact_form_sent'))
self.assertEqual(1, len(mail.outbox))
message = mail.outbox[0]
self.assertEqual([data['email']],
message.recipients())
self.assertTrue(data['body'] in message.body)
self.assertEqual(settings.DEFAULT_FROM_EMAIL,
message.from_email)
def test_invalid(self):
contact_url = reverse('contact_form')
data = {'name': 'Test',
'body': 'Test message'}
response = self.client.post(contact_url,
data=data)
self.assertEqual(200, response.status_code)
self.assertFormError(response,
'form',
'email',
'This field is required.')
self.assertEqual(0, len(mail.outbox))
| true
| true
|
1c46f61d2a6ed620777848b6db1e240c81c79142
| 16,339
|
py
|
Python
|
cadnano/util.py
|
mctrinh/cadnano2.5
|
d8254f24eef5fd77b4fb2b1a9642a8eea2e3c736
|
[
"BSD-3-Clause"
] | 1
|
2022-03-27T14:37:32.000Z
|
2022-03-27T14:37:32.000Z
|
cadnano/util.py
|
mctrinh/cadnano2.5
|
d8254f24eef5fd77b4fb2b1a9642a8eea2e3c736
|
[
"BSD-3-Clause"
] | null | null | null |
cadnano/util.py
|
mctrinh/cadnano2.5
|
d8254f24eef5fd77b4fb2b1a9642a8eea2e3c736
|
[
"BSD-3-Clause"
] | 1
|
2021-01-22T02:29:38.000Z
|
2021-01-22T02:29:38.000Z
|
"""
util.py
"""
import argparse
import inspect
import logging
import logging.handlers
import os
import platform
import string
import sys
from os import path
from traceback import extract_stack
logger = logging.getLogger(__name__)
IS_PY_3 = int(sys.version_info[0] > 2)
def clamp(x, min_x, max_x):
if x < min_x:
return min_x
elif x > max_x:
return max_x
else:
return x
def overlap(x, y, a, b):
"""Finds the overlap of (x, y) and (a, b).
Assumes an overlap exists, i.e. y >= a and b >= x.
"""
c = clamp(x, a, b)
d = clamp(y, a, b)
return c, d
# end def
try:
from termcolor import colored
except ImportError:
print("pip3 install termcolor")
def colored(s, color=None, **kwargs):
return s
def trace(n):
"""Returns a stack trace n frames deep"""
s = extract_stack()
frames = []
for f in s[-n-1:-1]:
# f is a stack frame like
# ('/path/script.py', 42, 'funcname', 'current = line - of / code')
frames.append((colored(path.basename(f[0]) + ':%i' % f[1], 'blue') + '(' + colored(f[2], 'green') + ')'))
sep = colored(" > ", 'yellow')
return sep.join(frames)
if IS_PY_3:
complement = str.maketrans('ACGTacgt', 'TGCATGCA')
else:
complement = string.maketrans('ACGTacgt', 'TGCATGCA')
def rcomp(seqStr):
"""Returns the reverse complement of the sequence in seqStr."""
return seqStr.translate(complement)[::-1]
def comp(seqStr):
"""Returns the complement of the sequence in seqStr."""
return seqStr.translate(complement)
if IS_PY_3:
whitetoQ = str.maketrans(' |', '??')
else:
whitetoQ = string.maketrans(' |', '??')
def markwhite(seqStr):
return seqStr.translate(whitetoQ)
def nowhite(seqStr):
"""Gets rid of non-letters in a string."""
return ''.join([c for c in seqStr if c in string.letters])
def nearest(a, l): return min(l, key=lambda x: abs(x - a))
def isWindows():
"""Returns True if platform is detected as Windows, otherwise False"""
if platform.system() == 'Windows':
return True
else:
return False
def isMac():
"""Returns True if platform is detected as Darwin, otherwise False"""
try:
return platform.system() == 'Darwin'
except Exception:
return path.exists('/System/Library/CoreServices/Finder.app')
def isLinux():
"""Returns True if platform is detected as Linux, otherwise False"""
if platform.system() == 'Linux':
return True
else:
return False
def methodName():
"""Returns string containing name of the calling method."""
return inspect.stack()[1][3]
def execCommandList(model_object, commands, desc=None, use_undostack=True):
"""
This is a wrapper for performing QUndoCommands, meant to ensure
uniform handling of the undoStack and macro descriptions.
When using the undoStack, commands are pushed onto self.undoStack()
as part of a macro with description desc. Otherwise, command redo
methods are called directly.
"""
if use_undostack:
us = model_object.undoStack()
us.beginMacro(desc)
for c in commands:
us.push(c)
us.endMacro()
else:
for c in commands:
c.redo()
# end def
def doCmd(model_object, command, use_undostack):
"""Helper for pushing onto the undostack
"""
if use_undostack:
model_object.undoStack().push(command)
else:
command.redo()
# end def
def finalizeCommands(model_object, commands, desc=None):
"""Used to enable interaction with the model but not push
commands to the undostack. In practice:
1. Call a bunch of commands and don't push them to the undostack AKA:
cmd.redo()
2. call finalizeCommands() to push the cummulative change to the stack
This assumes that the UndoCommands provided this function respresent
a transition from the initial state to the final state
Note:
UndoCommands need to implement specialUndo (e.g. just call normal undo.)
"""
# 1. undo the command to get back to the initial _state
for c in commands:
c.specialUndo()
# c.undo()
# 2. push all the "undoable" commands to the undostac
model_object.undoStack().beginMacro(desc)
for c in commands:
model_object.undoStack().push(c)
model_object.undoStack().endMacro()
# end def
def this_path():
return os.path.abspath(os.path.dirname(__file__))
# maps plugin path (extension stripped) -> plugin module
loadedPlugins = {}
def unloadedPlugins():
"""Returns a list of plugin paths that have yet to
be loaded but are in the top level of one of the
search directories specified in pluginDirs"""
internalPlugins = os.path.join(this_path(), 'plugins')
pluginDirs = [internalPlugins]
results = []
for pluginDir in pluginDirs:
if not os.path.isdir(pluginDir):
continue
for dirent in os.listdir(pluginDir):
f = os.path.join(pluginDir, dirent)
isfile = os.path.isfile(f)
hasValidSuffix = dirent.endswith(('.py', '.so'))
if isfile and hasValidSuffix:
results.append(f)
if os.path.isdir(f) and\
os.path.isfile(os.path.join(f, '__init__.py')):
results.append(f)
return list(filter(lambda x: x not in loadedPlugins, results))
def loadPlugin(f):
pass
# path, fname = os.path.split(f)
# name, ext = os.path.splitext(fname)
# pluginKey = os.path.join(path, name)
# try:
# mod = loadedPlugins[pluginKey]
# return mod
# except KeyError:
# pass
# file, filename, data = imp.find_module(name, [path])
# mod = imp.load_module(name, file, filename, data)
# loadedPlugins[pluginKey] = mod
# return mod
def loadAllPlugins():
loadedAPlugin = False
for p in unloadedPlugins():
loadPlugin(p)
loadedAPlugin = True
return loadedAPlugin
def beginSuperMacro(model_object, desc=None):
"""
SuperMacros can be used to nest multiple command lists.
Normally execCommandList macros all the commands in a list.
In some cases, multiple command lists need to be executed separately
because of dependency issues. (e.g. in part.autoStaple, strands
must be completely 1. created and 2. split before 3. xover installation.)
"""
model_object.undoStack().beginMacro(desc)
# end def
def endSuperMacro(model_object):
"""Ends a SuperMacro. Should be called after beginSuperMacro."""
model_object.undoStack().endMacro()
# end def
def findChild(self):
"""When called when self is a QGraphicsItem, iterates through self's
childItems(), placing a red rectangle (a sibling of self) around
each item in sequence (press return to move between items). Since
the index of each child item is displayed as it is highlighted,
one can use findChild() to quickly get a reference to one of self's
children. At each step, one can type a command letter before
hitting return. The command will apply to the current child.
Command Letter: Action:
<return> Advance to next child
s<return> Show current child
S<return> Show current child, hide siblings
h<return> Hide current child
r<return> return current child
"""
from PyQt5.QtWidgets import QGraphicsRectItem
from PyQt5.QtGui import QPen
from PyQt5.QtCore import Qt
children = self.childItems()
parent = self.parentItem()
childVisibility = [(child, child.isVisible()) for child in children]
for n in range(len(children)):
child = children[n]
print("Highlighting %s.childItems()[%i] = %s" % (self, n, child))
childBR = child.mapToItem(parent, child.boundingRect())
childBR = childBR.boundingRect() # xform gives us a QPolygonF
debugHighlighter = QGraphicsRectItem(childBR, parent)
debugHighlighter.setPen(QPen(Qt.red))
debugHighlighter.setZValue(9001)
while True:
# wait for return to be pressed while spinning the event loop.
# also process single-character commands.
command = raw_input()
if command == 's': # Show current child
child.show()
elif command == 'h': # Hde current child
child.hide()
elif command == 'S': # Show only current child
for c in children:
c.hide()
child.show()
elif command == 'r': # Return current child
for child, wasVisible in childVisibility:
child.setVisible(wasVisible)
return child
else:
break
debugHighlighter.scene().removeItem(debugHighlighter)
for child, wasVisible in childVisibility:
child.setVisible(wasVisible)
# end def
def parse_args(argv=None, gui=None):
"""Uses argparse to process commandline arguments.
Returns:
NameSpace object. This can easily be converted to a regular dict through:
argns.__dict__
This also presents a nice command line help to the user, exposed with --help flag:
python main.py --help
If gui is set to "qt", then the parser will use parse_known_args. Unlike
parse_args(), parse_known_args() will not cause abort by show the help
message and exit, if it finds any unrecognized command-line arguments.
Alternatively, you can initialize your app via:
app = QApplication(sys.argv)
parse_args(app.arguments())
QApplication.arguments() returns a list of arguments with all Qt arguments
stripped away. Qt command line args include:
-style=<style> -stylesheet=<stylesheet> -widgetcount -reverse -qmljsdebugger -session=<session>
"""
parser = argparse.ArgumentParser(description="cadnano 2.5")
parser.add_argument("--testing", "-t", action="store_true", help="Enable testing mode/environment.")
parser.add_argument("--profile", "-p", action="store_true", help="Profile app execution.")
parser.add_argument("--print-stats", "-P", action="store_true", help="Print profiling statistics.")
parser.add_argument("--interactive", "-i", action="store_true", help="Enable interactive (console) mode.")
parser.add_argument('--loglevel',
help="Specify logging level. Can be either DEBUG, INFO, WARNING, ERROR or any integer.")
parser.add_argument("--debug-modules", nargs='*', metavar="MODULE-STR",
help="Debug modules whose names start with any of the given strings. For instance, to "
"debug the cadnano file decoder, use --debug-modules cadnano.fileio.decode ."
"To debug all gui modules, use --debug-modules cadnano.gui .")
parser.add_argument("--file", "-f", metavar="designfile.json", help="cadnano design to load upon start up.")
if gui and (gui is True or gui.lower() == "qt"):
# Command line args might include Qt-specific switches and parameters.
argns, unused = parser.parse_known_args(argv)
else:
argns, unused = parser.parse_args(argv), None
return argns, unused
def init_logging(args=None, logdir=None):
"""Set up standard logging system based on parameters in args, e.g. loglevel and testing.
"""
if args is None:
args = {}
if logdir is None:
appname = "cadnano"
try:
import appdirs
logdir = appdirs.user_log_dir(appname)
except ImportError:
if os.environ.get('APPDATA'):
logdir = os.path.join(os.environ['APPDATA'], appname, "Logs")
elif sys.platform == 'darwin':
logdir = os.path.join(os.path.expanduser("~"), "Library", "Logs", appname)
else:
logdir = os.path.join(os.path.expanduser("~"), "."+appname, "logs")
if not os.path.exists(logdir):
os.makedirs(logdir)
logfilepath = os.path.join(logdir, appname+".log")
# We want different output formatting for file vs console logging output.
# File logs should be simple and easy to regex; console logs should be short and nice on the eyes
logfilefmt = "%(asctime)s %(levelname)-6s - %(name)s:%(lineno)s - %(funcName)s() - %(message)s"
logdatefmt = "%Y%m%d-%H:%M:%S"
loguserfmt = "%(asctime)s %(levelname)-5s %(module)30s:%(lineno)-4s%(funcName)16s() %(message)s"
logtimefmt = "%H:%M:%S" # Nice for output to user in console and testing.
# See https://docs.python.org/3/library/logging.html#logrecord-attributes for full list of attributes
# Loglevel (for console messages)
if args.get('loglevel'):
try:
loglevel = int(args['loglevel'])
except (TypeError, ValueError):
loglevel = getattr(logging, args['loglevel'].upper())
else:
loglevel = logging.DEBUG if args.get('testing') else logging.WARNING
if args.get('basic_logging', False):
logging.basicConfig(level=loglevel,
format=loguserfmt,
datefmt=logtimefmt,
filename=logfilepath)
logger.debug("Logging system initialized with loglevel %s", loglevel)
else:
# Set up custom logger:
logging.root.setLevel(logging.DEBUG)
# Add a rotating file handler:
logfilehandler = logging.handlers.RotatingFileHandler(logfilepath, maxBytes=2*2**20, backupCount=2)
logfileformatter = logging.Formatter(fmt=logfilefmt, datefmt=logdatefmt)
logfilehandler.setFormatter(logfileformatter)
logging.root.addHandler(logfilehandler)
print("Logging to file:", logfilepath)
# Add a custom StreamHandler for outputting to the console (default level is 0 = ANY)
logstreamhandler = logging.StreamHandler() # default stream is sys.stderr
logging.root.addHandler(logstreamhandler)
logstreamformatter = logging.Formatter(loguserfmt, logtimefmt)
logstreamhandler.setFormatter(logstreamformatter)
# Set filter for debugging:
if args.get('debug_modules'):
def module_debug_filter(record):
"""
All Filters attached to a logger or handler are asked.
The record is discarted if any of the attached Filters return False.
"""
return any(record.name.startswith(modstr) for modstr in args['debug_modules']) \
or record.levelno >= loglevel
logstreamhandler.addFilter(module_debug_filter)
# Default level is 0, which is appropriate when using module_debug_filter
else:
# only set a min level if we are not using module_debug_filter. (Level is an additional filter.)
logstreamhandler.setLevel(loglevel)
logger.info("Logging system initialized...")
def read_fasta(fp):
name, seq = None, []
for line in fp:
line = line.rstrip()
if line.startswith(">"):
if name:
yield (name, ''.join(seq))
name, seq = line, []
else:
seq.append(line)
if name:
yield (name, ''.join(seq))
def qtdb_trace():
"""Make PDB usable by calling pyqtRemoveInputHook.
Otherwise, PDB is useless as the message
> QCoreApplication::exec: The event loop is already running
is spammed to the console.
When done, call qtdb_resume from the PDB prompt to return things back to
normal.
Note that PDB will drop you into the current frame (this function) and
hitting 'n' is required to return to the frame you wanted PDB originally.
This could probably be optimized at some point to manipulate the frame PDB
starts in.
"""
if False:
logger.info('No debug')
return
else:
import pdb
from PyQt5.QtCore import pyqtRemoveInputHook
pyqtRemoveInputHook()
pdb.set_trace()
def qtdb_resume():
"""Resume normal PyQt operations after calling qtdb_trace.
Note that this function assumes that pyqtRemoveInputHook has been called
"""
from PyQt5.QtCore import pyqtRestoreInputHook
pyqtRestoreInputHook()
| 34.253669
| 113
| 0.63988
|
import argparse
import inspect
import logging
import logging.handlers
import os
import platform
import string
import sys
from os import path
from traceback import extract_stack
logger = logging.getLogger(__name__)
IS_PY_3 = int(sys.version_info[0] > 2)
def clamp(x, min_x, max_x):
if x < min_x:
return min_x
elif x > max_x:
return max_x
else:
return x
def overlap(x, y, a, b):
c = clamp(x, a, b)
d = clamp(y, a, b)
return c, d
try:
from termcolor import colored
except ImportError:
print("pip3 install termcolor")
def colored(s, color=None, **kwargs):
return s
def trace(n):
s = extract_stack()
frames = []
for f in s[-n-1:-1]:
frames.append((colored(path.basename(f[0]) + ':%i' % f[1], 'blue') + '(' + colored(f[2], 'green') + ')'))
sep = colored(" > ", 'yellow')
return sep.join(frames)
if IS_PY_3:
complement = str.maketrans('ACGTacgt', 'TGCATGCA')
else:
complement = string.maketrans('ACGTacgt', 'TGCATGCA')
def rcomp(seqStr):
return seqStr.translate(complement)[::-1]
def comp(seqStr):
return seqStr.translate(complement)
if IS_PY_3:
whitetoQ = str.maketrans(' |', '??')
else:
whitetoQ = string.maketrans(' |', '??')
def markwhite(seqStr):
return seqStr.translate(whitetoQ)
def nowhite(seqStr):
return ''.join([c for c in seqStr if c in string.letters])
def nearest(a, l): return min(l, key=lambda x: abs(x - a))
def isWindows():
if platform.system() == 'Windows':
return True
else:
return False
def isMac():
try:
return platform.system() == 'Darwin'
except Exception:
return path.exists('/System/Library/CoreServices/Finder.app')
def isLinux():
if platform.system() == 'Linux':
return True
else:
return False
def methodName():
return inspect.stack()[1][3]
def execCommandList(model_object, commands, desc=None, use_undostack=True):
if use_undostack:
us = model_object.undoStack()
us.beginMacro(desc)
for c in commands:
us.push(c)
us.endMacro()
else:
for c in commands:
c.redo()
def doCmd(model_object, command, use_undostack):
if use_undostack:
model_object.undoStack().push(command)
else:
command.redo()
def finalizeCommands(model_object, commands, desc=None):
for c in commands:
c.specialUndo()
model_object.undoStack().beginMacro(desc)
for c in commands:
model_object.undoStack().push(c)
model_object.undoStack().endMacro()
def this_path():
return os.path.abspath(os.path.dirname(__file__))
loadedPlugins = {}
def unloadedPlugins():
internalPlugins = os.path.join(this_path(), 'plugins')
pluginDirs = [internalPlugins]
results = []
for pluginDir in pluginDirs:
if not os.path.isdir(pluginDir):
continue
for dirent in os.listdir(pluginDir):
f = os.path.join(pluginDir, dirent)
isfile = os.path.isfile(f)
hasValidSuffix = dirent.endswith(('.py', '.so'))
if isfile and hasValidSuffix:
results.append(f)
if os.path.isdir(f) and\
os.path.isfile(os.path.join(f, '__init__.py')):
results.append(f)
return list(filter(lambda x: x not in loadedPlugins, results))
def loadPlugin(f):
pass
def loadAllPlugins():
loadedAPlugin = False
for p in unloadedPlugins():
loadPlugin(p)
loadedAPlugin = True
return loadedAPlugin
def beginSuperMacro(model_object, desc=None):
model_object.undoStack().beginMacro(desc)
def endSuperMacro(model_object):
model_object.undoStack().endMacro()
def findChild(self):
from PyQt5.QtWidgets import QGraphicsRectItem
from PyQt5.QtGui import QPen
from PyQt5.QtCore import Qt
children = self.childItems()
parent = self.parentItem()
childVisibility = [(child, child.isVisible()) for child in children]
for n in range(len(children)):
child = children[n]
print("Highlighting %s.childItems()[%i] = %s" % (self, n, child))
childBR = child.mapToItem(parent, child.boundingRect())
childBR = childBR.boundingRect()
debugHighlighter = QGraphicsRectItem(childBR, parent)
debugHighlighter.setPen(QPen(Qt.red))
debugHighlighter.setZValue(9001)
while True:
command = raw_input()
if command == 's':
child.show()
elif command == 'h':
child.hide()
elif command == 'S':
for c in children:
c.hide()
child.show()
elif command == 'r':
for child, wasVisible in childVisibility:
child.setVisible(wasVisible)
return child
else:
break
debugHighlighter.scene().removeItem(debugHighlighter)
for child, wasVisible in childVisibility:
child.setVisible(wasVisible)
def parse_args(argv=None, gui=None):
parser = argparse.ArgumentParser(description="cadnano 2.5")
parser.add_argument("--testing", "-t", action="store_true", help="Enable testing mode/environment.")
parser.add_argument("--profile", "-p", action="store_true", help="Profile app execution.")
parser.add_argument("--print-stats", "-P", action="store_true", help="Print profiling statistics.")
parser.add_argument("--interactive", "-i", action="store_true", help="Enable interactive (console) mode.")
parser.add_argument('--loglevel',
help="Specify logging level. Can be either DEBUG, INFO, WARNING, ERROR or any integer.")
parser.add_argument("--debug-modules", nargs='*', metavar="MODULE-STR",
help="Debug modules whose names start with any of the given strings. For instance, to "
"debug the cadnano file decoder, use --debug-modules cadnano.fileio.decode ."
"To debug all gui modules, use --debug-modules cadnano.gui .")
parser.add_argument("--file", "-f", metavar="designfile.json", help="cadnano design to load upon start up.")
if gui and (gui is True or gui.lower() == "qt"):
argns, unused = parser.parse_known_args(argv)
else:
argns, unused = parser.parse_args(argv), None
return argns, unused
def init_logging(args=None, logdir=None):
if args is None:
args = {}
if logdir is None:
appname = "cadnano"
try:
import appdirs
logdir = appdirs.user_log_dir(appname)
except ImportError:
if os.environ.get('APPDATA'):
logdir = os.path.join(os.environ['APPDATA'], appname, "Logs")
elif sys.platform == 'darwin':
logdir = os.path.join(os.path.expanduser("~"), "Library", "Logs", appname)
else:
logdir = os.path.join(os.path.expanduser("~"), "."+appname, "logs")
if not os.path.exists(logdir):
os.makedirs(logdir)
logfilepath = os.path.join(logdir, appname+".log")
logfilefmt = "%(asctime)s %(levelname)-6s - %(name)s:%(lineno)s - %(funcName)s() - %(message)s"
logdatefmt = "%Y%m%d-%H:%M:%S"
loguserfmt = "%(asctime)s %(levelname)-5s %(module)30s:%(lineno)-4s%(funcName)16s() %(message)s"
logtimefmt = "%H:%M:%S"
loglevel = int(args['loglevel'])
except (TypeError, ValueError):
loglevel = getattr(logging, args['loglevel'].upper())
else:
loglevel = logging.DEBUG if args.get('testing') else logging.WARNING
if args.get('basic_logging', False):
logging.basicConfig(level=loglevel,
format=loguserfmt,
datefmt=logtimefmt,
filename=logfilepath)
logger.debug("Logging system initialized with loglevel %s", loglevel)
else:
logging.root.setLevel(logging.DEBUG)
logfilehandler = logging.handlers.RotatingFileHandler(logfilepath, maxBytes=2*2**20, backupCount=2)
logfileformatter = logging.Formatter(fmt=logfilefmt, datefmt=logdatefmt)
logfilehandler.setFormatter(logfileformatter)
logging.root.addHandler(logfilehandler)
print("Logging to file:", logfilepath)
logstreamhandler = logging.StreamHandler()
logging.root.addHandler(logstreamhandler)
logstreamformatter = logging.Formatter(loguserfmt, logtimefmt)
logstreamhandler.setFormatter(logstreamformatter)
if args.get('debug_modules'):
def module_debug_filter(record):
"""
All Filters attached to a logger or handler are asked.
The record is discarted if any of the attached Filters return False.
"""
return any(record.name.startswith(modstr) for modstr in args['debug_modules']) \
or record.levelno >= loglevel
logstreamhandler.addFilter(module_debug_filter)
else:
logstreamhandler.setLevel(loglevel)
logger.info("Logging system initialized...")
def read_fasta(fp):
name, seq = None, []
for line in fp:
line = line.rstrip()
if line.startswith(">"):
if name:
yield (name, ''.join(seq))
name, seq = line, []
else:
seq.append(line)
if name:
yield (name, ''.join(seq))
def qtdb_trace():
if False:
logger.info('No debug')
return
else:
import pdb
from PyQt5.QtCore import pyqtRemoveInputHook
pyqtRemoveInputHook()
pdb.set_trace()
def qtdb_resume():
from PyQt5.QtCore import pyqtRestoreInputHook
pyqtRestoreInputHook()
| true
| true
|
1c46fa3e618557a23f27ae9321e98729cbf11428
| 37
|
py
|
Python
|
src/trex/error.py
|
cnk113/TREX
|
add83d8108f3602c5bbe7b37f60ff19f89b2236d
|
[
"MIT"
] | null | null | null |
src/trex/error.py
|
cnk113/TREX
|
add83d8108f3602c5bbe7b37f60ff19f89b2236d
|
[
"MIT"
] | 1
|
2022-03-18T01:56:53.000Z
|
2022-03-24T19:35:58.000Z
|
src/trex/error.py
|
cnk113/TREX
|
add83d8108f3602c5bbe7b37f60ff19f89b2236d
|
[
"MIT"
] | 1
|
2022-03-23T03:07:42.000Z
|
2022-03-23T03:07:42.000Z
|
class TrexError(Exception):
pass
| 12.333333
| 27
| 0.72973
|
class TrexError(Exception):
pass
| true
| true
|
1c46fc394f4f2e4a1722f7dab063575db81ae159
| 2,360
|
py
|
Python
|
rematchrSite/rematchrApp/migrations/0003_auto_20150319_1243.py
|
ctames/rematchr
|
4a22c3e4b1c22b64008e4996bdde9d4657c5294b
|
[
"MIT"
] | null | null | null |
rematchrSite/rematchrApp/migrations/0003_auto_20150319_1243.py
|
ctames/rematchr
|
4a22c3e4b1c22b64008e4996bdde9d4657c5294b
|
[
"MIT"
] | null | null | null |
rematchrSite/rematchrApp/migrations/0003_auto_20150319_1243.py
|
ctames/rematchr
|
4a22c3e4b1c22b64008e4996bdde9d4657c5294b
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('rematchrApp', '0002_auto_20150226_1336'),
]
operations = [
migrations.AddField(
model_name='conference',
name='user',
field=models.ForeignKey(null=True, to=settings.AUTH_USER_MODEL, unique=True),
preserve_default=True,
),
migrations.AddField(
model_name='reviewer',
name='doc_texts',
field=models.TextField(default=b'', blank=True),
preserve_default=True,
),
migrations.AddField(
model_name='reviewer',
name='doc_urls',
field=models.TextField(default=b'', blank=True),
preserve_default=True,
),
migrations.AlterField(
model_name='conference',
name='title',
field=models.CharField(max_length=256),
preserve_default=True,
),
migrations.AlterField(
model_name='researcher',
name='doc_texts',
field=models.TextField(default=b'', blank=True),
preserve_default=True,
),
migrations.AlterField(
model_name='researcher',
name='doc_urls',
field=models.TextField(default=b'', blank=True),
preserve_default=True,
),
migrations.AlterField(
model_name='researcher',
name='firstname',
field=models.CharField(max_length=256),
preserve_default=True,
),
migrations.AlterField(
model_name='researcher',
name='lastname',
field=models.CharField(max_length=256),
preserve_default=True,
),
migrations.AlterField(
model_name='reviewer',
name='firstname',
field=models.CharField(max_length=256),
preserve_default=True,
),
migrations.AlterField(
model_name='reviewer',
name='lastname',
field=models.CharField(max_length=256),
preserve_default=True,
),
]
| 30.649351
| 89
| 0.563559
|
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('rematchrApp', '0002_auto_20150226_1336'),
]
operations = [
migrations.AddField(
model_name='conference',
name='user',
field=models.ForeignKey(null=True, to=settings.AUTH_USER_MODEL, unique=True),
preserve_default=True,
),
migrations.AddField(
model_name='reviewer',
name='doc_texts',
field=models.TextField(default=b'', blank=True),
preserve_default=True,
),
migrations.AddField(
model_name='reviewer',
name='doc_urls',
field=models.TextField(default=b'', blank=True),
preserve_default=True,
),
migrations.AlterField(
model_name='conference',
name='title',
field=models.CharField(max_length=256),
preserve_default=True,
),
migrations.AlterField(
model_name='researcher',
name='doc_texts',
field=models.TextField(default=b'', blank=True),
preserve_default=True,
),
migrations.AlterField(
model_name='researcher',
name='doc_urls',
field=models.TextField(default=b'', blank=True),
preserve_default=True,
),
migrations.AlterField(
model_name='researcher',
name='firstname',
field=models.CharField(max_length=256),
preserve_default=True,
),
migrations.AlterField(
model_name='researcher',
name='lastname',
field=models.CharField(max_length=256),
preserve_default=True,
),
migrations.AlterField(
model_name='reviewer',
name='firstname',
field=models.CharField(max_length=256),
preserve_default=True,
),
migrations.AlterField(
model_name='reviewer',
name='lastname',
field=models.CharField(max_length=256),
preserve_default=True,
),
]
| true
| true
|
1c46fd08a6227a33592d9bcc9675ca8b875b746f
| 13,736
|
py
|
Python
|
src/part2.py
|
shelonsky/Spark-Project-on-Demographic-Analysis-of-Turkey
|
91a6d28e125bdd14b5b44a1ea426c2728b7aa9c3
|
[
"MIT"
] | 1
|
2021-12-30T14:19:18.000Z
|
2021-12-30T14:19:18.000Z
|
src/part2.py
|
shelonsky/Spark-Project-on-Demographic-Analysis-of-Turkey
|
91a6d28e125bdd14b5b44a1ea426c2728b7aa9c3
|
[
"MIT"
] | null | null | null |
src/part2.py
|
shelonsky/Spark-Project-on-Demographic-Analysis-of-Turkey
|
91a6d28e125bdd14b5b44a1ea426c2728b7aa9c3
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# @Time : 2021/5/30 16:06
# @Author : Xiao Lulu
#!/usr/bin/env python
# coding: utf-8
# In[2]:
import pyspark.sql.functions as F
from pyspark.sql import Row
from pyspark.sql.functions import *
from pyspark import SparkContext, SparkConf
from pyspark.sql import SparkSession
import re
from pyspark.sql.types import *
from pyspark.sql.window import Window
from pyspark.sql.functions import rank, col
from pyspark.ml import Pipeline
from pyspark.ml.feature import OneHotEncoder, StringIndexer, VectorAssembler
from pyspark.ml.feature import RFormula
from pyspark.ml.classification import LogisticRegression
from pyspark.ml import Pipeline
# import spark.implicits._
sparkconf = SparkConf().setAppName('Mernis')
sparkconf.set('spark.executor.memory', '10g')
sparkconf.set('spark.driver.memory', '10g')
sparkconf.set("spark.sql.debug.maxToStringFields", "100")
spark = (SparkSession
.builder
.appName("Mernis")
.config(conf=sparkconf)
.getOrCreate())
# sc = SparkContext.getOrCreate()
# 加载数据
file_path = '/root/myfile/mernis/data_dump.sql'
data = spark.sparkContext.textFile(file_path). \
filter((lambda line: re.findall('^\d{6}', line))). \
map(lambda line: line.split('\t')[:-1])
schema = "uid STRING, national_identifier STRING, first STRING, last STRING, mother_first STRING, " \
"father_first STRING, gender STRING, birth_city STRING, date_of_birth STRING," \
"id_registration_city STRING, id_registration_district STRING, address_city STRING," \
"address_district STRING, address_neighborhood STRING,street_address STRING," \
"door_or_entrance_number STRING"
df = spark.createDataFrame(data, schema)
# total_count = df.count() # total_count = 49611709
def format_date(line):
li = line.split('/')
if len(li[2]) == 4 and 0 < len(li[1]) <= 2 and 0 < len(li[1]) <= 2:
return li[2] + '-' + li[1].zfill(2) + '-' + li[0].zfill(2)
else:
return 'null'
format_date_udf = udf(format_date, returnType=StringType())
df.createOrReplaceTempView('citizens')
df_format_date = df.withColumn("date_of_birth", format_date_udf(df["date_of_birth"]))
df_format_date = df_format_date.filter(expr("""date_of_birth != 'null'"""))
df_format_date = df_format_date.withColumn('date_of_birth', to_date('date_of_birth')).\
withColumn('month_of_birth',month('date_of_birth')).\
withColumn('year_of_birth', year('date_of_birth'))
df_format_date.show(3)
###TODO: N6 计算前10大人口城市人口密度,其中城市的面积可Google搜索,面积单位使用平方千米;
def N6():
print('=' * 20, 'problem N6', '=' * 20)
# The top10 city with most citizens
df_n6 = df_format_date. \
select('address_city'). \
groupBy('address_city'). \
agg(count('*').alias('total')). \
orderBy('total', ascending=False). \
limit(10)
sc = SparkContext.getOrCreate()
area = [('ADANA', 14030), ('ISTANBUL', 5343), ('BURSA', 10891), ('IZMIR', 7340), ('AYDIN', 8007),
('ANKARA', 30715), ('ANTALYA', 1417), ('KOCAELI', 3418), ('KONYA', 38257), ('MERSIN', 15737)]
df_area = spark.createDataFrame(area, ['address_city', 'area'])
df_area = df_n6.join(df_area, 'address_city', 'left_outer').orderBy('area')
df_area.show(10)
density_df = df_area.withColumn('desity', round(df_area['total'] / df_area['area'], 2))
density_df.show(10)
N6()
## TODO: N7 根据人口的出身地和居住地,分别统计土耳其跨行政区流动人口和跨城市流动人口占总人口的 比例
def N7():
print('=' * 20, 'problem N7', '=' * 20)
total_num = 49611709
df_n7_district = df_format_date. \
select('id_registration_district', 'address_district'). \
filter(col('id_registration_district') != col('address_district'))
propor_district = df_n7_district.count() / total_num
print('Proportion of cross-district floating population:%.3f' % propor_district)
df_n7_city = df_format_date. \
select('id_registration_city', 'address_city'). \
filter(col('id_registration_city') != col('address_city'))
propor_city = df_n7_city.count() / total_num
print('Proportion of cross-city floating population:%.3f' % propor_city)
N7()
# 将出生日期中的年和月提取出来构成新的列,'year_of_birth'和'month_of_birth',
# 以便于转换成特征。由于总的数据量过大,从中抽取出4900余份样本进行训练和预测。
df_h1 = df_format_date.sample(False, 0.00005, seed=2018)
df_h1.show(10)
df_h1 = df_h1.dropna()
print(df_h1.count())
feature_col = ['first', 'last', 'mother_first', 'father_first', 'gender', 'birth_city',
'month_of_birth', 'year_of_birth', 'id_registration_city', 'id_registration_district',
'address_district', 'address_neighborhood', 'street_address', 'address_city'
]
indexOutputCols = [x + '_Index' for x in feature_col]
oheOutputCols = [x + '_OHE' for x in feature_col]
stringIndexer_features = StringIndexer(inputCols=feature_col, outputCols=indexOutputCols,
handleInvalid="skip")
oheEncoder_features = OneHotEncoder(inputCols=indexOutputCols, outputCols=oheOutputCols)
pipeline = Pipeline(stages=[stringIndexer_features, oheEncoder_features])
model = pipeline.fit(df_h1)
res = model.transform(df_h1)
# Split the dataset into training, validation and test set with prob 0.7,0.2 and 0.1.
(trainingData, validData, testData) = res.randomSplit([0.7, 0.2, 0.1], seed=100)
trainingData.persist()
validData.persist()
testData.persist()
#
# TODO: H1. 某人所在城市的预测模型:给定一个人的所有信息(除了所在城市),预测这个人所在的城市。 分析该模型Top1到 Top
from pyspark.ml.evaluation import MulticlassClassificationEvaluator
# 增加一列labels, 保留address_city的onehot编码
def H1():
print('=' * 20, 'problem H1', '=' * 20)
feature_col = ['first', 'last', 'mother_first', 'father_first', 'gender', 'birth_city',
'month_of_birth', 'year_of_birth', 'id_registration_city', 'id_registration_district',
'address_district', 'address_neighborhood', 'street_address'
]
# All the feature columns
oheOutputCols = [x + '_OHE' for x in feature_col]
# assemble all the feature columns
vecAssembler = VectorAssembler(inputCols=oheOutputCols, outputCol='features')
df_h1 = vecAssembler.transform(trainingData)
lr = LogisticRegression(featuresCol='features', labelCol='address_city_Index',
maxIter=100, regParam=0.3, elasticNetParam=0)
lrPipeline = Pipeline(stages=[vecAssembler, lr])
lrModel = lrPipeline.fit(trainingData)
def evaluate_h1(data, model):
print(model)
vecData = vecAssembler.transform(data)
predictions = model.transform(vecData)
predictions. \
select('national_identifier', 'probability', 'address_city_Index', 'prediction'). \
orderBy('probability', ascending=False). \
show(n=5, truncate=30)
evaluator = MulticlassClassificationEvaluator(labelCol='address_city_Index', predictionCol='prediction')
lrAcc = evaluator.evaluate(predictions)
print('test accuracy = ', lrAcc)
evaluate_h1(validData, lrModel)
# 设置不同超参数
lr.setRegParam(0.001)
lrPipeline = Pipeline(stages=[vecAssembler, lr])
lrModel = lrPipeline.fit(trainingData)
evaluate_h1(validData, lrModel)
lr.setRegParam(0.01)
lrPipeline = Pipeline(stages=[vecAssembler, lr])
lrModel = lrPipeline.fit(trainingData)
evaluate_h1(validData, lrModel)
evaluate_h1(testData, lrModel)
H1()
from pyspark.ml.evaluation import MulticlassClassificationEvaluator
### TODO: H2. Given all the information about one person, predict his/her gender.
def H2():
print('=' * 20, 'problem H2', '=' * 20)
feature_col = ['first', 'last', 'mother_first', 'father_first', 'birth_city', 'year_of_birth', 'month_of_birth',
'id_registration_city', 'id_registration_district', 'address_city',
'address_district', 'address_neighborhood', 'street_address'
]
# All the feature columns
oheOutputCols = [x + '_OHE' for x in feature_col]
vecAssembler = VectorAssembler(inputCols=oheOutputCols, outputCol='features')
lr_h2 = LogisticRegression(featuresCol='features', labelCol='gender_Index',
maxIter=100, regParam=0.01, elasticNetParam=0)
lrPipeline_h2 = Pipeline(stages=[vecAssembler, lr_h2])
lrModel_h2 = lrPipeline_h2.fit(trainingData)
def evaluate_h2(data, model):
predictions = model.transform(data)
predictions. \
select('national_identifier', 'probability', 'gender', 'gender_Index', 'prediction'). \
orderBy('probability', ascending=False). \
show(n=10, truncate=30)
evaluator = MulticlassClassificationEvaluator(labelCol='gender_Index', predictionCol='prediction')
lrAcc = evaluator.evaluate(predictions)
print('test accuracy = ', lrAcc)
evaluate_h2(validData, lrModel_h2)
lrPipeline_h2 = Pipeline(stages=[vecAssembler, lr_h2])
lrModel_h2 = lrPipeline_h2.fit(trainingData)
evaluate_h2(testData, lrModel_h2)
H2()
# H3. 姓名预测模型:假设给定一个人的所有信息(除了姓名),预测这个人最可能的姓氏。分析该 模型Top1到 Top 5的预测准确度;
def H3():
print('=' * 20, 'problem H3', '=' * 20)
feature_col = ['mother_first', 'father_first', 'birth_city', 'gender', 'year_of_birth', 'month_of_birth',
'id_registration_city', 'id_registration_district', 'address_city',
'address_district', 'address_neighborhood', 'street_address'
]
# 所有的特征列列名
oheOutputCols = [x + '_OHE' for x in feature_col]
# assemble all the feature columns
vecAssembler = VectorAssembler(inputCols=oheOutputCols, outputCol='features')
vecTrainDF_h3 = vecAssembler.transform(trainingData)
trainingData.show(3)
lr_h3 = LogisticRegression(featuresCol='features', labelCol='first_Index',
maxIter=100, regParam=0.01, elasticNetParam=0)
# lrPipeline_h3 = Pipeline(stages = [vecAssembler,lr_h3])
lrModel_h3 = lr_h3.fit(vecTrainDF_h3)
def evaluate_h3(data):
print(lrModel_h3)
vecData = vecAssembler.transform(data)
predictions = lrModel_h3.transform(vecData)
predictions.select('national_identifier', 'probability', 'first', 'first_Index', 'prediction').orderBy(
'probability', ascending=False).show(n=10, truncate=30)
evaluator = MulticlassClassificationEvaluator(labelCol='first_Index', predictionCol='prediction')
lrAcc = evaluator.evaluate(predictions)
print('test accuracy = ', lrAcc)
evaluate_h3(validData)
evaluate_h3(testData)
# H3()
# TODO: H4. 人口预测模型:统计每一年出生的人数,预测下一年新增人口数。
from pyspark.sql.types import FloatType
from math import log
from pyspark.ml.evaluation import RegressionEvaluator
from pyspark.ml.regression import LinearRegression
from pyspark.ml.feature import VectorAssembler
def H4():
print('='*2,'problem H4','='*20)
df_h4 = df_format_date.withColumn(
'year_of_birth', year('date_of_birth'))
df_population = df_h4.select("year_of_birth").groupBy('year_of_birth').agg(count('*').alias('total'))
df_population = df_population.withColumn('year', df_population['year_of_birth'].cast('int')).drop('year_of_birth')
df_population = df_population.filter(df_population['year'] > 1700)
df_population.orderBy('total').show(10)
def to_index(year):
return year - 1888
to_index_udf = udf(to_index, returnType=IntegerType())
min_year = df_population.select(min('year').alias('year')).collect()[0]
print(min_year)
new_df = df_population.withColumn('index', to_index_udf(df_population['year']))
new_df.show()
(trianing, test) = new_df.randomSplit([0.8, 0.2], seed=2020)
trianing.persist()
test.persist()
### linear regression
vecAssembler = VectorAssembler(inputCols=['index'],outputCol='features')
vecTrainDF = vecAssembler.transform(trianing)
lr_h4 = LinearRegression(featuresCol='features',labelCol='total')
lrModel_h4 = lr_h4.fit(vecTrainDF)
m = lrModel_h4.coefficients[0]
b = lrModel_h4.intercept
print(f"""The formula for the linear regression lines is num = {m:.2f}*index{b:.2f}""")
vecTestDF = vecAssembler.transform(test)
predictions = lrModel_h4.transform(vecTestDF)
predictions.orderBy('prediction', ascending=False).show(5)
regresssionEvaluator = RegressionEvaluator(predictionCol='prediction', labelCol='total', metricName='r2')
r2 = regresssionEvaluator.evaluate(predictions)
print(f"r2 is {r2}")
### LR with Malthus model
def log_num(num):
if num:
return log(num)
else:
return 0
log_num_udf = udf(log_num, returnType=FloatType())
log_df = new_df.withColumn('logTotal', log_num_udf(new_df['total']))
log_df.show()
vecAssembler = VectorAssembler(inputCols=['index'], outputCol='features')
lr_h4_log = LinearRegression(featuresCol='features', labelCol='logTotal')
training_log = trianing.withColumn('logTotal', log_num_udf('total'))
vecTrainDF_log = vecAssembler.transform(training_log)
lrModel_h4_log = lr_h4_log.fit(vecTrainDF_log)
m_log = lrModel_h4_log.coefficients[0]
b_log = lrModel_h4_log.intercept
print(f"""The formula for the linear regression lines is log(total) = {m_log:.3f}*index+{b_log:.3f}""")
# test
test_log = test.withColumn('logTotal', log_num_udf('total'))
vecTestDF_log = vecAssembler.transform(test_log)
predictions_log = lrModel_h4_log.transform(vecTestDF_log)
predictions_log.orderBy('prediction', ascending=False).show(10)
regresssionEvaluator = RegressionEvaluator(predictionCol='prediction', labelCol='logTotal', metricName='r2')
r2_log = regresssionEvaluator.evaluate(predictions_log)
print(f"r2 is {r2_log}")
H4()
| 39.585014
| 118
| 0.692268
|
import pyspark.sql.functions as F
from pyspark.sql import Row
from pyspark.sql.functions import *
from pyspark import SparkContext, SparkConf
from pyspark.sql import SparkSession
import re
from pyspark.sql.types import *
from pyspark.sql.window import Window
from pyspark.sql.functions import rank, col
from pyspark.ml import Pipeline
from pyspark.ml.feature import OneHotEncoder, StringIndexer, VectorAssembler
from pyspark.ml.feature import RFormula
from pyspark.ml.classification import LogisticRegression
from pyspark.ml import Pipeline
sparkconf = SparkConf().setAppName('Mernis')
sparkconf.set('spark.executor.memory', '10g')
sparkconf.set('spark.driver.memory', '10g')
sparkconf.set("spark.sql.debug.maxToStringFields", "100")
spark = (SparkSession
.builder
.appName("Mernis")
.config(conf=sparkconf)
.getOrCreate())
file_path = '/root/myfile/mernis/data_dump.sql'
data = spark.sparkContext.textFile(file_path). \
filter((lambda line: re.findall('^\d{6}', line))). \
map(lambda line: line.split('\t')[:-1])
schema = "uid STRING, national_identifier STRING, first STRING, last STRING, mother_first STRING, " \
"father_first STRING, gender STRING, birth_city STRING, date_of_birth STRING," \
"id_registration_city STRING, id_registration_district STRING, address_city STRING," \
"address_district STRING, address_neighborhood STRING,street_address STRING," \
"door_or_entrance_number STRING"
df = spark.createDataFrame(data, schema)
li = line.split('/')
if len(li[2]) == 4 and 0 < len(li[1]) <= 2 and 0 < len(li[1]) <= 2:
return li[2] + '-' + li[1].zfill(2) + '-' + li[0].zfill(2)
else:
return 'null'
format_date_udf = udf(format_date, returnType=StringType())
df.createOrReplaceTempView('citizens')
df_format_date = df.withColumn("date_of_birth", format_date_udf(df["date_of_birth"]))
df_format_date = df_format_date.filter(expr("""date_of_birth != 'null'"""))
df_format_date = df_format_date.withColumn('date_of_birth', to_date('date_of_birth')).\
withColumn('month_of_birth',month('date_of_birth')).\
withColumn('year_of_birth', year('date_of_birth'))
df_format_date.show(3)
address_city'). \
groupBy('address_city'). \
agg(count('*').alias('total')). \
orderBy('total', ascending=False). \
limit(10)
sc = SparkContext.getOrCreate()
area = [('ADANA', 14030), ('ISTANBUL', 5343), ('BURSA', 10891), ('IZMIR', 7340), ('AYDIN', 8007),
('ANKARA', 30715), ('ANTALYA', 1417), ('KOCAELI', 3418), ('KONYA', 38257), ('MERSIN', 15737)]
df_area = spark.createDataFrame(area, ['address_city', 'area'])
df_area = df_n6.join(df_area, 'address_city', 'left_outer').orderBy('area')
df_area.show(10)
density_df = df_area.withColumn('desity', round(df_area['total'] / df_area['area'], 2))
density_df.show(10)
N6()
total_num = 49611709
df_n7_district = df_format_date. \
select('id_registration_district', 'address_district'). \
filter(col('id_registration_district') != col('address_district'))
propor_district = df_n7_district.count() / total_num
print('Proportion of cross-district floating population:%.3f' % propor_district)
df_n7_city = df_format_date. \
select('id_registration_city', 'address_city'). \
filter(col('id_registration_city') != col('address_city'))
propor_city = df_n7_city.count() / total_num
print('Proportion of cross-city floating population:%.3f' % propor_city)
N7()
df_h1 = df_format_date.sample(False, 0.00005, seed=2018)
df_h1.show(10)
df_h1 = df_h1.dropna()
print(df_h1.count())
feature_col = ['first', 'last', 'mother_first', 'father_first', 'gender', 'birth_city',
'month_of_birth', 'year_of_birth', 'id_registration_city', 'id_registration_district',
'address_district', 'address_neighborhood', 'street_address', 'address_city'
]
indexOutputCols = [x + '_Index' for x in feature_col]
oheOutputCols = [x + '_OHE' for x in feature_col]
stringIndexer_features = StringIndexer(inputCols=feature_col, outputCols=indexOutputCols,
handleInvalid="skip")
oheEncoder_features = OneHotEncoder(inputCols=indexOutputCols, outputCols=oheOutputCols)
pipeline = Pipeline(stages=[stringIndexer_features, oheEncoder_features])
model = pipeline.fit(df_h1)
res = model.transform(df_h1)
(trainingData, validData, testData) = res.randomSplit([0.7, 0.2, 0.1], seed=100)
trainingData.persist()
validData.persist()
testData.persist()
from pyspark.ml.evaluation import MulticlassClassificationEvaluator
def H1():
print('=' * 20, 'problem H1', '=' * 20)
feature_col = ['first', 'last', 'mother_first', 'father_first', 'gender', 'birth_city',
'month_of_birth', 'year_of_birth', 'id_registration_city', 'id_registration_district',
'address_district', 'address_neighborhood', 'street_address'
]
oheOutputCols = [x + '_OHE' for x in feature_col]
vecAssembler = VectorAssembler(inputCols=oheOutputCols, outputCol='features')
df_h1 = vecAssembler.transform(trainingData)
lr = LogisticRegression(featuresCol='features', labelCol='address_city_Index',
maxIter=100, regParam=0.3, elasticNetParam=0)
lrPipeline = Pipeline(stages=[vecAssembler, lr])
lrModel = lrPipeline.fit(trainingData)
def evaluate_h1(data, model):
print(model)
vecData = vecAssembler.transform(data)
predictions = model.transform(vecData)
predictions. \
select('national_identifier', 'probability', 'address_city_Index', 'prediction'). \
orderBy('probability', ascending=False). \
show(n=5, truncate=30)
evaluator = MulticlassClassificationEvaluator(labelCol='address_city_Index', predictionCol='prediction')
lrAcc = evaluator.evaluate(predictions)
print('test accuracy = ', lrAcc)
evaluate_h1(validData, lrModel)
lr.setRegParam(0.001)
lrPipeline = Pipeline(stages=[vecAssembler, lr])
lrModel = lrPipeline.fit(trainingData)
evaluate_h1(validData, lrModel)
lr.setRegParam(0.01)
lrPipeline = Pipeline(stages=[vecAssembler, lr])
lrModel = lrPipeline.fit(trainingData)
evaluate_h1(validData, lrModel)
evaluate_h1(testData, lrModel)
H1()
from pyspark.ml.evaluation import MulticlassClassificationEvaluator
h_of_birth',
'id_registration_city', 'id_registration_district', 'address_city',
'address_district', 'address_neighborhood', 'street_address'
]
oheOutputCols = [x + '_OHE' for x in feature_col]
vecAssembler = VectorAssembler(inputCols=oheOutputCols, outputCol='features')
lr_h2 = LogisticRegression(featuresCol='features', labelCol='gender_Index',
maxIter=100, regParam=0.01, elasticNetParam=0)
lrPipeline_h2 = Pipeline(stages=[vecAssembler, lr_h2])
lrModel_h2 = lrPipeline_h2.fit(trainingData)
def evaluate_h2(data, model):
predictions = model.transform(data)
predictions. \
select('national_identifier', 'probability', 'gender', 'gender_Index', 'prediction'). \
orderBy('probability', ascending=False). \
show(n=10, truncate=30)
evaluator = MulticlassClassificationEvaluator(labelCol='gender_Index', predictionCol='prediction')
lrAcc = evaluator.evaluate(predictions)
print('test accuracy = ', lrAcc)
evaluate_h2(validData, lrModel_h2)
lrPipeline_h2 = Pipeline(stages=[vecAssembler, lr_h2])
lrModel_h2 = lrPipeline_h2.fit(trainingData)
evaluate_h2(testData, lrModel_h2)
H2()
def H3():
print('=' * 20, 'problem H3', '=' * 20)
feature_col = ['mother_first', 'father_first', 'birth_city', 'gender', 'year_of_birth', 'month_of_birth',
'id_registration_city', 'id_registration_district', 'address_city',
'address_district', 'address_neighborhood', 'street_address'
]
oheOutputCols = [x + '_OHE' for x in feature_col]
vecAssembler = VectorAssembler(inputCols=oheOutputCols, outputCol='features')
vecTrainDF_h3 = vecAssembler.transform(trainingData)
trainingData.show(3)
lr_h3 = LogisticRegression(featuresCol='features', labelCol='first_Index',
maxIter=100, regParam=0.01, elasticNetParam=0)
lrModel_h3 = lr_h3.fit(vecTrainDF_h3)
def evaluate_h3(data):
print(lrModel_h3)
vecData = vecAssembler.transform(data)
predictions = lrModel_h3.transform(vecData)
predictions.select('national_identifier', 'probability', 'first', 'first_Index', 'prediction').orderBy(
'probability', ascending=False).show(n=10, truncate=30)
evaluator = MulticlassClassificationEvaluator(labelCol='first_Index', predictionCol='prediction')
lrAcc = evaluator.evaluate(predictions)
print('test accuracy = ', lrAcc)
evaluate_h3(validData)
evaluate_h3(testData)
from pyspark.sql.types import FloatType
from math import log
from pyspark.ml.evaluation import RegressionEvaluator
from pyspark.ml.regression import LinearRegression
from pyspark.ml.feature import VectorAssembler
def H4():
print('='*2,'problem H4','='*20)
df_h4 = df_format_date.withColumn(
'year_of_birth', year('date_of_birth'))
df_population = df_h4.select("year_of_birth").groupBy('year_of_birth').agg(count('*').alias('total'))
df_population = df_population.withColumn('year', df_population['year_of_birth'].cast('int')).drop('year_of_birth')
df_population = df_population.filter(df_population['year'] > 1700)
df_population.orderBy('total').show(10)
def to_index(year):
return year - 1888
to_index_udf = udf(to_index, returnType=IntegerType())
min_year = df_population.select(min('year').alias('year')).collect()[0]
print(min_year)
new_df = df_population.withColumn('index', to_index_udf(df_population['year']))
new_df.show()
(trianing, test) = new_df.randomSplit([0.8, 0.2], seed=2020)
trianing.persist()
test.persist()
utCols=['index'],outputCol='features')
vecTrainDF = vecAssembler.transform(trianing)
lr_h4 = LinearRegression(featuresCol='features',labelCol='total')
lrModel_h4 = lr_h4.fit(vecTrainDF)
m = lrModel_h4.coefficients[0]
b = lrModel_h4.intercept
print(f"""The formula for the linear regression lines is num = {m:.2f}*index{b:.2f}""")
vecTestDF = vecAssembler.transform(test)
predictions = lrModel_h4.transform(vecTestDF)
predictions.orderBy('prediction', ascending=False).show(5)
regresssionEvaluator = RegressionEvaluator(predictionCol='prediction', labelCol='total', metricName='r2')
r2 = regresssionEvaluator.evaluate(predictions)
print(f"r2 is {r2}")
return log(num)
else:
return 0
log_num_udf = udf(log_num, returnType=FloatType())
log_df = new_df.withColumn('logTotal', log_num_udf(new_df['total']))
log_df.show()
vecAssembler = VectorAssembler(inputCols=['index'], outputCol='features')
lr_h4_log = LinearRegression(featuresCol='features', labelCol='logTotal')
training_log = trianing.withColumn('logTotal', log_num_udf('total'))
vecTrainDF_log = vecAssembler.transform(training_log)
lrModel_h4_log = lr_h4_log.fit(vecTrainDF_log)
m_log = lrModel_h4_log.coefficients[0]
b_log = lrModel_h4_log.intercept
print(f"""The formula for the linear regression lines is log(total) = {m_log:.3f}*index+{b_log:.3f}""")
test_log = test.withColumn('logTotal', log_num_udf('total'))
vecTestDF_log = vecAssembler.transform(test_log)
predictions_log = lrModel_h4_log.transform(vecTestDF_log)
predictions_log.orderBy('prediction', ascending=False).show(10)
regresssionEvaluator = RegressionEvaluator(predictionCol='prediction', labelCol='logTotal', metricName='r2')
r2_log = regresssionEvaluator.evaluate(predictions_log)
print(f"r2 is {r2_log}")
H4()
| true
| true
|
1c46fdcf707a39d6008c2679f4330a4c105e612a
| 7,835
|
py
|
Python
|
coffee.py
|
capjamesg/hypertext-coffee-pot
|
2cf5987493066063908b467568a7c54c71c2ff66
|
[
"MIT"
] | null | null | null |
coffee.py
|
capjamesg/hypertext-coffee-pot
|
2cf5987493066063908b467568a7c54c71c2ff66
|
[
"MIT"
] | null | null | null |
coffee.py
|
capjamesg/hypertext-coffee-pot
|
2cf5987493066063908b467568a7c54c71c2ff66
|
[
"MIT"
] | null | null | null |
from config import *
import datetime
import logging
import socket
import json
import os
logging.basicConfig(filename="coffeepot.log", level=logging.DEBUG)
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server.bind((HOST, PORT))
pouring_milk = None
last_request = None
# rewrite the currently brewing file every time the program starts up
# a coffee pot that has been stopped in the middle of operation should not pick up where it left off (!)
with open("currently_brewing.json", "w+") as f:
f.write("{}")
if not os.path.isfile("past_coffees.json"):
with open("past_coffees.json", "w+") as f:
f.write("")
def ensure_request_is_valid(url, content_type, method, processing_request, connection):
if "://" not in url:
connection.send(b"HTCPCP/1.1 400 Bad Request\n\n")
processing_request = False
if url.split("://")[0].encode().decode("ascii") not in ACCEPTED_COFFEE_SCHEMES:
connection.send(b"HTCPCP/1.1 404 Not Found\r\n\r\n")
processing_request = False
if url.split("://")[1] != "james":
connection.send(b"HTCPCP/1.1 404 Not Found\r\n\r\n")
processing_request = False
if method not in ACCEPTED_METHODS:
connection.send(b"HTCPCP/1.1 501 Not Implemented\r\n\r\n")
processing_request = False
if content_type and content_type[0] != "Content-Type: application/coffee-pot-command":
connection.send(b"HTCPCP/1.1 415 Unsupported Media Type\r\n\r\n")
processing_request = False
return processing_request
def process_additions(headers, processing_request, pouring_milk, connection):
accept_additions = [header for header in headers if header.startswith("Accept-Additions")]
if len(accept_additions) > 0:
additions = accept_additions[0].split(":")[1].strip().split(";")
invalid_addition = False
for item in additions:
print(item.lower().strip())
if ACCEPTED_ADDITIONS.get(item.lower().strip()) is None:
response = "HTCPCP/1.1 406 Not Acceptable\r\n\r\n" + ", ".join(list(ACCEPTED_ADDITIONS.keys())).strip(", ")
connection.send(bytes(response.encode()))
invalid_addition = True
processing_request = False
elif item.lower() in MILKS:
# pour milk in 5 mins, after brew
pouring_milk = (datetime.datetime.now() + datetime.timedelta(minutes=5)).strftime("%a, %d %b %Y %H:%M:%S")
if invalid_addition:
processing_request = False
else:
additions = None
return additions, processing_request, pouring_milk
def create_request_response(method, additions, pouring_milk):
response = ""
if method == "GET" or method == "PROPFIND":
with open("currently_brewing.json", "r") as f:
response = json.load(f)
response = json.dumps(response)
elif method == "BREW" or method == "POST":
response_body = message.split("\n")[-1]
if response_body == "stop":
with open("currently_brewing.json", "w+") as f:
f.write("{}")
elif response_body == "start":
now = datetime.datetime.now().strftime("%a, %d %b %Y %H:%M:%S")
end_time = (datetime.datetime.now() + datetime.timedelta(minutes=5)).strftime("%a, %d %b %Y %H:%M:%S")
if additions == None:
additions = []
if pouring_milk == None:
milk_status = ""
else:
milk_status = pouring_milk
record_to_save = json.dumps(
{
"date": now,
"beverage_type": "Coffee",
"additions": additions,
"brew_time_end": end_time,
"pouring_milk": milk_status
}
)
with open("past_coffees.json", "a+") as coffee_records:
coffee_records.write(record_to_save + "\n")
with open("currently_brewing.json", "w+") as brewing_record:
brewing_record.write(record_to_save)
else:
response = "HTCPCP/1.1 400 Bad Request\r\n\r\n"
elif method == "WHEN":
with open("currently_brewing.json", "r") as f:
response = json.load(f)
pouring_milk = datetime.datetime.strptime(pouring_milk, "%a, %d %b %Y %H:%M:%S")
brew_time_end_object = datetime.datetime.strptime(response.get("brew_time_end"), "%a, %d %b %Y %H:%M:%S")
if pouring_milk >= brew_time_end_object:
response = "Milk has stopped pouring."
else:
response = "Milk is not pouring."
pouring_milk = None
return response
while True:
# start listening for connections connections
server.listen()
print("Listening for connections on port " + str(PORT))
connection, address = server.accept()
# set timeout so requests cannot hang
connection.settimeout(5)
print("Connected to: ", address)
processing_request = True
while processing_request:
# get message
message = connection.recv(1024).decode()
last_request = message
if len(message.strip().replace("\n", "").replace("\r", "")) == 0:
processing_request = False
logging.info("Received message: " + message)
# get last coffee
with open("currently_brewing.json", "r") as f:
last_coffee = json.load(f)
method = message.split(" ")[0]
if last_coffee and last_coffee["brew_time_end"] and (method == "BREW" or method == "POST"):
# get last_coffee["brew_time_end"] as datetime object
last_brewed = datetime.datetime.strptime(last_coffee["brew_time_end"], "%a, %d %b %Y %H:%M:%S")
if last_brewed + datetime.timedelta(minutes=5) > datetime.datetime.now():
response = "HTCPCP/1.1 406 Not Acceptable\r\n\r\n" + ", ".join(list(ACCEPTED_ADDITIONS.keys())).strip(", ")
connection.send(bytes(response.encode()))
processing_request = False
else:
with open("currently_brewing.json", "w+") as f:
f.write("{}")
url = message.split(" ")[1]
headers = message.split("\n")
content_type = [header for header in headers if header.startswith("Content-Type")]
safe = [header for header in headers if header.startswith("Safe:")]
if safe and safe[0] == "Yes":
message = last_request
method = message.split(" ")[0]
url = message.split(" ")[1]
headers = message.split("\n")
processing_request = ensure_request_is_valid(url, content_type, method, processing_request, connection)
additions, processing_request, pouring_milk = process_additions(headers, processing_request, pouring_milk, connection)
if method in ACCEPTED_METHODS:
current_date = datetime.datetime.now().strftime("%a, %d %b %Y %H:%M:%S")
# response body
headers_to_send = [
"HTCPCP/1.1 200 OK\r\n",
"Server: CoffeePot\r\n",
"Content-Type: message/coffeepot\r\n",
"Date: " + current_date + "\r\n",
]
response = create_request_response(method, additions, pouring_milk)
final_response = "".join(headers_to_send) + response
logging.info("Sending response: " + final_response)
print(final_response)
connection.send(bytes(final_response.encode("utf-8")))
processing_request = False
# close connection after request has been processed
logging.info("Closing connection")
connection.close()
logging.info("Connection closed")
| 35.292793
| 126
| 0.596171
|
from config import *
import datetime
import logging
import socket
import json
import os
logging.basicConfig(filename="coffeepot.log", level=logging.DEBUG)
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server.bind((HOST, PORT))
pouring_milk = None
last_request = None
with open("currently_brewing.json", "w+") as f:
f.write("{}")
if not os.path.isfile("past_coffees.json"):
with open("past_coffees.json", "w+") as f:
f.write("")
def ensure_request_is_valid(url, content_type, method, processing_request, connection):
if "://" not in url:
connection.send(b"HTCPCP/1.1 400 Bad Request\n\n")
processing_request = False
if url.split("://")[0].encode().decode("ascii") not in ACCEPTED_COFFEE_SCHEMES:
connection.send(b"HTCPCP/1.1 404 Not Found\r\n\r\n")
processing_request = False
if url.split("://")[1] != "james":
connection.send(b"HTCPCP/1.1 404 Not Found\r\n\r\n")
processing_request = False
if method not in ACCEPTED_METHODS:
connection.send(b"HTCPCP/1.1 501 Not Implemented\r\n\r\n")
processing_request = False
if content_type and content_type[0] != "Content-Type: application/coffee-pot-command":
connection.send(b"HTCPCP/1.1 415 Unsupported Media Type\r\n\r\n")
processing_request = False
return processing_request
def process_additions(headers, processing_request, pouring_milk, connection):
accept_additions = [header for header in headers if header.startswith("Accept-Additions")]
if len(accept_additions) > 0:
additions = accept_additions[0].split(":")[1].strip().split(";")
invalid_addition = False
for item in additions:
print(item.lower().strip())
if ACCEPTED_ADDITIONS.get(item.lower().strip()) is None:
response = "HTCPCP/1.1 406 Not Acceptable\r\n\r\n" + ", ".join(list(ACCEPTED_ADDITIONS.keys())).strip(", ")
connection.send(bytes(response.encode()))
invalid_addition = True
processing_request = False
elif item.lower() in MILKS:
pouring_milk = (datetime.datetime.now() + datetime.timedelta(minutes=5)).strftime("%a, %d %b %Y %H:%M:%S")
if invalid_addition:
processing_request = False
else:
additions = None
return additions, processing_request, pouring_milk
def create_request_response(method, additions, pouring_milk):
response = ""
if method == "GET" or method == "PROPFIND":
with open("currently_brewing.json", "r") as f:
response = json.load(f)
response = json.dumps(response)
elif method == "BREW" or method == "POST":
response_body = message.split("\n")[-1]
if response_body == "stop":
with open("currently_brewing.json", "w+") as f:
f.write("{}")
elif response_body == "start":
now = datetime.datetime.now().strftime("%a, %d %b %Y %H:%M:%S")
end_time = (datetime.datetime.now() + datetime.timedelta(minutes=5)).strftime("%a, %d %b %Y %H:%M:%S")
if additions == None:
additions = []
if pouring_milk == None:
milk_status = ""
else:
milk_status = pouring_milk
record_to_save = json.dumps(
{
"date": now,
"beverage_type": "Coffee",
"additions": additions,
"brew_time_end": end_time,
"pouring_milk": milk_status
}
)
with open("past_coffees.json", "a+") as coffee_records:
coffee_records.write(record_to_save + "\n")
with open("currently_brewing.json", "w+") as brewing_record:
brewing_record.write(record_to_save)
else:
response = "HTCPCP/1.1 400 Bad Request\r\n\r\n"
elif method == "WHEN":
with open("currently_brewing.json", "r") as f:
response = json.load(f)
pouring_milk = datetime.datetime.strptime(pouring_milk, "%a, %d %b %Y %H:%M:%S")
brew_time_end_object = datetime.datetime.strptime(response.get("brew_time_end"), "%a, %d %b %Y %H:%M:%S")
if pouring_milk >= brew_time_end_object:
response = "Milk has stopped pouring."
else:
response = "Milk is not pouring."
pouring_milk = None
return response
while True:
server.listen()
print("Listening for connections on port " + str(PORT))
connection, address = server.accept()
connection.settimeout(5)
print("Connected to: ", address)
processing_request = True
while processing_request:
message = connection.recv(1024).decode()
last_request = message
if len(message.strip().replace("\n", "").replace("\r", "")) == 0:
processing_request = False
logging.info("Received message: " + message)
with open("currently_brewing.json", "r") as f:
last_coffee = json.load(f)
method = message.split(" ")[0]
if last_coffee and last_coffee["brew_time_end"] and (method == "BREW" or method == "POST"):
last_brewed = datetime.datetime.strptime(last_coffee["brew_time_end"], "%a, %d %b %Y %H:%M:%S")
if last_brewed + datetime.timedelta(minutes=5) > datetime.datetime.now():
response = "HTCPCP/1.1 406 Not Acceptable\r\n\r\n" + ", ".join(list(ACCEPTED_ADDITIONS.keys())).strip(", ")
connection.send(bytes(response.encode()))
processing_request = False
else:
with open("currently_brewing.json", "w+") as f:
f.write("{}")
url = message.split(" ")[1]
headers = message.split("\n")
content_type = [header for header in headers if header.startswith("Content-Type")]
safe = [header for header in headers if header.startswith("Safe:")]
if safe and safe[0] == "Yes":
message = last_request
method = message.split(" ")[0]
url = message.split(" ")[1]
headers = message.split("\n")
processing_request = ensure_request_is_valid(url, content_type, method, processing_request, connection)
additions, processing_request, pouring_milk = process_additions(headers, processing_request, pouring_milk, connection)
if method in ACCEPTED_METHODS:
current_date = datetime.datetime.now().strftime("%a, %d %b %Y %H:%M:%S")
headers_to_send = [
"HTCPCP/1.1 200 OK\r\n",
"Server: CoffeePot\r\n",
"Content-Type: message/coffeepot\r\n",
"Date: " + current_date + "\r\n",
]
response = create_request_response(method, additions, pouring_milk)
final_response = "".join(headers_to_send) + response
logging.info("Sending response: " + final_response)
print(final_response)
connection.send(bytes(final_response.encode("utf-8")))
processing_request = False
logging.info("Closing connection")
connection.close()
logging.info("Connection closed")
| true
| true
|
1c46fde441f196ee5cc51a5ec50072e5a1d3b4aa
| 7,281
|
py
|
Python
|
scripts/fig/util.py
|
ucbrise/snoopy
|
da4c98e3876c10cf52aa51ece3b62c5e8b8e335a
|
[
"Apache-2.0"
] | 9
|
2021-11-10T20:34:00.000Z
|
2022-03-23T02:30:29.000Z
|
scripts/fig/util.py
|
ucbrise/snoopy
|
da4c98e3876c10cf52aa51ece3b62c5e8b8e335a
|
[
"Apache-2.0"
] | null | null | null |
scripts/fig/util.py
|
ucbrise/snoopy
|
da4c98e3876c10cf52aa51ece3b62c5e8b8e335a
|
[
"Apache-2.0"
] | 4
|
2021-09-30T05:12:06.000Z
|
2022-03-18T03:05:21.000Z
|
import json
import math
import random
from collections import defaultdict
from scipy.special import lambertw
def parseData(filename):
results = []
f = open(filename, "r")
for line in f:
elems = line.split()
result = {
"clients": int(elems[0]),
"data_size": int(elems[1]),
"suborams": int(elems[2]),
"iter": int(elems[3]),
"balancers": int(elems[4]),
"mean_latency": float(elems[6]),
"min_latenecy": float(elems[7]),
"max_latency": float(elems[8]),
"var_latency": float(elems[9]),
"std_latency": float(elems[10]),
"50_latency": float(elems[11]),
"75_latency": float(elems[12]),
"90_latency": float(elems[13]),
"95_latency": float(elems[14]),
"99_latency": float(elems[15]),
"throughput": float(elems[16])
}
results.append(result)
return results
def parseDataNew(filename):
results = []
f = open(filename, "r")
for line in f:
elems = line.split()
result = {
"clients": int(elems[0]),
"data_size": int(elems[1]),
"suborams": int(elems[2]),
"balancers": int(elems[3]),
"iter": int(elems[4]),
"mean_latency": float(elems[5]),
"min_latenecy": float(elems[6]),
"max_latency": float(elems[7]),
"var_latency": float(elems[8]),
"std_latency": float(elems[9]),
"50_latency": float(elems[10]),
"75_latency": float(elems[11]),
"90_latency": float(elems[12]),
"95_latency": float(elems[13]),
"99_latency": float(elems[14]),
"throughput": float(elems[15])
}
results.append(result)
return results
def parseDataNew2(filename):
results = []
f = open(filename, "r")
for line in f:
elems = line.split()
result = {
"clients": int(elems[0]),
"data_size": int(elems[1]),
"suborams": int(elems[2]),
"balancers": int(elems[3]),
"epoch_ms": int(elems[4]),
"iter": int(elems[5]),
"mean_latency": float(elems[6]),
"min_latenecy": float(elems[7]),
"max_latency": float(elems[8]),
"var_latency": float(elems[9]),
"std_latency": float(elems[10]),
"50_latency": float(elems[11]),
"75_latency": float(elems[12]),
"90_latency": float(elems[13]),
"95_latency": float(elems[14]),
"99_latency": float(elems[15]),
"throughput": float(elems[16])
}
results.append(result)
return results
def getMaxThroughputForNumBalancers(results, num_balancers):
ret = 0
for result in results:
if result["balancers"] == num_balancers:
ret = max(ret, result["throughput"])
return ret
def getMaxThroughputForNumBalancersWithMaxLatency(results, num_balancers, max_latency, suborams=None):
ret = 0
for result in results:
if result["balancers"] == num_balancers and result["90_latency"] <= max_latency:
if suborams is None or result["suborams"] == suborams:
ret = max(ret, result["throughput"])
return ret
def getMaxThroughputForNumBalancersWithMaxMeanLatency(results, num_balancers, max_latency, suborams=None):
ret = 0
for result in results:
if result["balancers"] == num_balancers and result["50_latency"] <= max_latency:
if suborams is None or result["suborams"] == suborams:
ret = max(ret, result["throughput"])
return ret
def getLatencyForMaxThroughputForNumBalancers(results, num_balancers):
throughput = 0
ret = 0
for result in results:
if result["balancers"] == num_balancers:
if (throughput < result["throughput"]):
throughput = result["throughput"]
ret = result["mean_latency"]
return ret
def getMaxThroughputForEpochMs(results, epoch_ms):
ret = 0
for result in results:
if result["epoch_ms"] == epoch_ms:
ret = max(ret, result["throughput"])
return ret
def getMaxDataForNumSuborams(results, num_suborams, max_latency, latency_type):
ret = 0
for result in results:
if result["suborams"] == num_suborams and result[latency_type] < max_latency:
print(("Acceptable latency for %d suborams: %d") % (result["suborams"], result[latency_type]))
ret = max(ret, result["data_size"])
return ret
def getTupleListOfVals(results, *labels):
ret = []
for result in results:
res = ()
for l in labels:
res += (result[l],)
if res not in ret:
ret.append(res)
return ret
def getListOfVals(results, label):
ret = []
for result in results:
if result[label] not in ret:
ret.append(result[label])
return ret
def getLatencyForSuboramAndDataSize(results, num_suborams, data_size, latency_type):
for result in results:
if result["suborams"] == num_suborams and result["data_size"] == data_size:
return result[latency_type]
def f(N, n_suborams, secparam=128):
mu = N / n_suborams
alpha = math.log(n_suborams * (2 ** secparam))
rhs = alpha / (math.e * mu) - 1 / math.e
branch = 0
epsilon = math.e ** (lambertw(rhs, branch) + 1) - 1
#epsilon = (alpha + math.sqrt(2 * mu * alpha)) / mu # uncomment for looser bound
#print(alpha, rhs, lambertw(rhs, 0), lambertw(rhs, 1))
#print("bound", suborams, secparam, alpha, rhs, lambertw(rhs), epsilon)
return mu * (1 + epsilon)
def hash_requests(reqs, n_suborams, run):
offset = run * reqs
secret = b'Sixteen byte key'
buckets = defaultdict(int)
for i in range(offset, offset+reqs):
"""
cobj = CMAC.new(secret, ciphermod=AES)
cobj.update(i.to_bytes(i.bit_length(), 'big'))
h = int(cobj.hexdigest(), 16)
"""
h = int(random.random() * n_suborams)
bucket = h % n_suborams
buckets[bucket] += 1
return max(buckets.values())
def max_requests(n_suborams, target, secparam):
"""
Get maximum request batch size for a given # of suborams that each support target requests.
"""
l = n_suborams
r = 2 ** 32
m = 0
while l <= r:
m = math.floor((l+r)/ 2)
bound = f(m, n_suborams, secparam)
if bound > target:
r = m - 1
elif bound < target:
l = m + 1
else:
return m
return m
def parse_args(parser):
parser.add_argument('input', type=str, help='input data')
parser.add_argument('output', type=str, help='output file')
parser.add_argument('-b', '--baseline', help='baseline data')
parser.add_argument('-t', '--title', help='set graph title')
parser.add_argument('-l', '--large', action='store_true',
help='output large graph (default: false)')
args = parser.parse_args()
return args
def parse_baseline(filename):
with open(filename, 'r') as f:
baseline = json.load(f)
return baseline
| 33.708333
| 106
| 0.573136
|
import json
import math
import random
from collections import defaultdict
from scipy.special import lambertw
def parseData(filename):
results = []
f = open(filename, "r")
for line in f:
elems = line.split()
result = {
"clients": int(elems[0]),
"data_size": int(elems[1]),
"suborams": int(elems[2]),
"iter": int(elems[3]),
"balancers": int(elems[4]),
"mean_latency": float(elems[6]),
"min_latenecy": float(elems[7]),
"max_latency": float(elems[8]),
"var_latency": float(elems[9]),
"std_latency": float(elems[10]),
"50_latency": float(elems[11]),
"75_latency": float(elems[12]),
"90_latency": float(elems[13]),
"95_latency": float(elems[14]),
"99_latency": float(elems[15]),
"throughput": float(elems[16])
}
results.append(result)
return results
def parseDataNew(filename):
results = []
f = open(filename, "r")
for line in f:
elems = line.split()
result = {
"clients": int(elems[0]),
"data_size": int(elems[1]),
"suborams": int(elems[2]),
"balancers": int(elems[3]),
"iter": int(elems[4]),
"mean_latency": float(elems[5]),
"min_latenecy": float(elems[6]),
"max_latency": float(elems[7]),
"var_latency": float(elems[8]),
"std_latency": float(elems[9]),
"50_latency": float(elems[10]),
"75_latency": float(elems[11]),
"90_latency": float(elems[12]),
"95_latency": float(elems[13]),
"99_latency": float(elems[14]),
"throughput": float(elems[15])
}
results.append(result)
return results
def parseDataNew2(filename):
results = []
f = open(filename, "r")
for line in f:
elems = line.split()
result = {
"clients": int(elems[0]),
"data_size": int(elems[1]),
"suborams": int(elems[2]),
"balancers": int(elems[3]),
"epoch_ms": int(elems[4]),
"iter": int(elems[5]),
"mean_latency": float(elems[6]),
"min_latenecy": float(elems[7]),
"max_latency": float(elems[8]),
"var_latency": float(elems[9]),
"std_latency": float(elems[10]),
"50_latency": float(elems[11]),
"75_latency": float(elems[12]),
"90_latency": float(elems[13]),
"95_latency": float(elems[14]),
"99_latency": float(elems[15]),
"throughput": float(elems[16])
}
results.append(result)
return results
def getMaxThroughputForNumBalancers(results, num_balancers):
ret = 0
for result in results:
if result["balancers"] == num_balancers:
ret = max(ret, result["throughput"])
return ret
def getMaxThroughputForNumBalancersWithMaxLatency(results, num_balancers, max_latency, suborams=None):
ret = 0
for result in results:
if result["balancers"] == num_balancers and result["90_latency"] <= max_latency:
if suborams is None or result["suborams"] == suborams:
ret = max(ret, result["throughput"])
return ret
def getMaxThroughputForNumBalancersWithMaxMeanLatency(results, num_balancers, max_latency, suborams=None):
ret = 0
for result in results:
if result["balancers"] == num_balancers and result["50_latency"] <= max_latency:
if suborams is None or result["suborams"] == suborams:
ret = max(ret, result["throughput"])
return ret
def getLatencyForMaxThroughputForNumBalancers(results, num_balancers):
throughput = 0
ret = 0
for result in results:
if result["balancers"] == num_balancers:
if (throughput < result["throughput"]):
throughput = result["throughput"]
ret = result["mean_latency"]
return ret
def getMaxThroughputForEpochMs(results, epoch_ms):
ret = 0
for result in results:
if result["epoch_ms"] == epoch_ms:
ret = max(ret, result["throughput"])
return ret
def getMaxDataForNumSuborams(results, num_suborams, max_latency, latency_type):
ret = 0
for result in results:
if result["suborams"] == num_suborams and result[latency_type] < max_latency:
print(("Acceptable latency for %d suborams: %d") % (result["suborams"], result[latency_type]))
ret = max(ret, result["data_size"])
return ret
def getTupleListOfVals(results, *labels):
ret = []
for result in results:
res = ()
for l in labels:
res += (result[l],)
if res not in ret:
ret.append(res)
return ret
def getListOfVals(results, label):
ret = []
for result in results:
if result[label] not in ret:
ret.append(result[label])
return ret
def getLatencyForSuboramAndDataSize(results, num_suborams, data_size, latency_type):
for result in results:
if result["suborams"] == num_suborams and result["data_size"] == data_size:
return result[latency_type]
def f(N, n_suborams, secparam=128):
mu = N / n_suborams
alpha = math.log(n_suborams * (2 ** secparam))
rhs = alpha / (math.e * mu) - 1 / math.e
branch = 0
epsilon = math.e ** (lambertw(rhs, branch) + 1) - 1
1 + epsilon)
def hash_requests(reqs, n_suborams, run):
offset = run * reqs
secret = b'Sixteen byte key'
buckets = defaultdict(int)
for i in range(offset, offset+reqs):
h = int(random.random() * n_suborams)
bucket = h % n_suborams
buckets[bucket] += 1
return max(buckets.values())
def max_requests(n_suborams, target, secparam):
l = n_suborams
r = 2 ** 32
m = 0
while l <= r:
m = math.floor((l+r)/ 2)
bound = f(m, n_suborams, secparam)
if bound > target:
r = m - 1
elif bound < target:
l = m + 1
else:
return m
return m
def parse_args(parser):
parser.add_argument('input', type=str, help='input data')
parser.add_argument('output', type=str, help='output file')
parser.add_argument('-b', '--baseline', help='baseline data')
parser.add_argument('-t', '--title', help='set graph title')
parser.add_argument('-l', '--large', action='store_true',
help='output large graph (default: false)')
args = parser.parse_args()
return args
def parse_baseline(filename):
with open(filename, 'r') as f:
baseline = json.load(f)
return baseline
| true
| true
|
1c4702f1d0d7fa1c75b6ea73d0e090f76d63480b
| 2,601
|
py
|
Python
|
explore/viz/continuous.py
|
idc9/explore
|
ce8aa039de96b1dd9fecc19fa098c222863ac3ce
|
[
"MIT"
] | null | null | null |
explore/viz/continuous.py
|
idc9/explore
|
ce8aa039de96b1dd9fecc19fa098c222863ac3ce
|
[
"MIT"
] | null | null | null |
explore/viz/continuous.py
|
idc9/explore
|
ce8aa039de96b1dd9fecc19fa098c222863ac3ce
|
[
"MIT"
] | 1
|
2021-02-05T20:31:51.000Z
|
2021-02-05T20:31:51.000Z
|
import matplotlib.pyplot as plt
import pandas as pd
from sklearn.linear_model import LinearRegression
from sklearn.preprocessing import StandardScaler
from scipy.stats import pearsonr
from explore.utils import safe_apply
from explore.viz.utils import bold, ABLine2D, fmt_pval
def plot_scatter(x, y, alpha=0.05, standardize=False, label=None):
"""
Parameters
----------
x, y: array-like (ideally pd.Series)
x, y values to plot. If pd.Series, uses 'name' to get x/y labels
alpha: float
Cutoff for correlation coefficient significance.
standardisze: bool
Whether or not to standardized (mean center and scale) variables.
True by defualt.
"""
xlab, ylab = '', ''
if hasattr(x, 'name'):
xlab = x.name
if hasattr(y, 'name'):
ylab = y.name
# drop missing values
df = pd.concat([pd.Series(x), pd.Series(y)], axis=1).dropna()
# optinally center/scale
if standardize:
df = safe_apply(StandardScaler().fit_transform, df)
xlab += ' (standardized)'
ylab += ' (standardized)'
x = df.iloc[:, 0].values.reshape(-1)
y = df.iloc[:, 1].values.reshape(-1)
# fit linear model
lm = LinearRegression(fit_intercept=True).fit(x.reshape(-1, 1), y)
slope = lm.coef_.item()
intercept = lm.intercept_
# if no label provided, compute correlation
if label is None:
alpha = 0.05
# compute pearson correlation
corr, pval = pearsonr(x, y)
reject = pval < alpha
label = get_cts_label(reject, corr, corr_name='pearson', pval=pval)
# scatter plot
plt.scatter(x, y, color='blue', s=2)
plt.xlabel(xlab)
plt.ylabel(ylab)
# line
ABLine2D(slope, intercept, label=label,
color='blue') # , linewidth=linewidth
plt.legend(loc='upper left')
def get_cts_label(reject, corr, corr_name, pval):
if reject:
# stat_str = bold('pearson \\ corr: {:1.2f} \\ (p={:1.2f})'.format(corr, pval))
# label = bold('{}: {:1.3f} (p={:1.3f})*'.format(corr_name, corr, pval))
# label = bold('{}: {:1.3f} (p={:.1e})*'.format(corr_name, corr, pval))
label = bold('{}: {:1.3f} (p={})*'.format(corr_name, corr,
fmt_pval(pval)))
else:
# stat_str = 'pearson corr: {:1.2f} (p={:1.2f})'.format(corr, pval)
# label = '{}: {:1.3f} (p={:1.3f})'.format(corr_name, corr, pval)
label = '{}: {:1.3f} (p={})'.format(corr_name, corr,
fmt_pval(pval))
return label
| 30.964286
| 87
| 0.582468
|
import matplotlib.pyplot as plt
import pandas as pd
from sklearn.linear_model import LinearRegression
from sklearn.preprocessing import StandardScaler
from scipy.stats import pearsonr
from explore.utils import safe_apply
from explore.viz.utils import bold, ABLine2D, fmt_pval
def plot_scatter(x, y, alpha=0.05, standardize=False, label=None):
xlab, ylab = '', ''
if hasattr(x, 'name'):
xlab = x.name
if hasattr(y, 'name'):
ylab = y.name
df = pd.concat([pd.Series(x), pd.Series(y)], axis=1).dropna()
if standardize:
df = safe_apply(StandardScaler().fit_transform, df)
xlab += ' (standardized)'
ylab += ' (standardized)'
x = df.iloc[:, 0].values.reshape(-1)
y = df.iloc[:, 1].values.reshape(-1)
lm = LinearRegression(fit_intercept=True).fit(x.reshape(-1, 1), y)
slope = lm.coef_.item()
intercept = lm.intercept_
if label is None:
alpha = 0.05
corr, pval = pearsonr(x, y)
reject = pval < alpha
label = get_cts_label(reject, corr, corr_name='pearson', pval=pval)
plt.scatter(x, y, color='blue', s=2)
plt.xlabel(xlab)
plt.ylabel(ylab)
ABLine2D(slope, intercept, label=label,
color='blue')
plt.legend(loc='upper left')
def get_cts_label(reject, corr, corr_name, pval):
if reject:
label = bold('{}: {:1.3f} (p={})*'.format(corr_name, corr,
fmt_pval(pval)))
else:
label = '{}: {:1.3f} (p={})'.format(corr_name, corr,
fmt_pval(pval))
return label
| true
| true
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.