hexsha stringlengths 40 40 | size int64 4 1.02M | ext stringclasses 8
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 209 | max_stars_repo_name stringlengths 5 121 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 209 | max_issues_repo_name stringlengths 5 121 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 209 | max_forks_repo_name stringlengths 5 121 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 4 1.02M | avg_line_length float64 1.07 66.1k | max_line_length int64 4 266k | alphanum_fraction float64 0.01 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
14fff5972a7592d8c461ab673a18eb68705810e3 | 2,884 | py | Python | tournament-battle.py | oyasumi-asap/pokemon-swsh-scripts | 35684a03b652eb6744a681b95a9798389a499e65 | [
"MIT"
] | 1 | 2019-12-31T02:53:04.000Z | 2019-12-31T02:53:04.000Z | tournament-battle.py | oyasumi-asap/pokemon-swsh-scripts | 35684a03b652eb6744a681b95a9798389a499e65 | [
"MIT"
] | null | null | null | tournament-battle.py | oyasumi-asap/pokemon-swsh-scripts | 35684a03b652eb6744a681b95a9798389a499e65 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import argparse
import serial
import time
from time import sleep
import datetime
import random
parser = argparse.ArgumentParser()
parser.add_argument('port')
parser.add_argument('--fight_time', type=int, default=150)
args = parser.parse_args()
dt = datetime.datetime
def send(msg, duration=0):
now = dt.now()
print(f'[{now}] {msg}')
ser.write(f'{msg}\r\n'.encode('utf-8'))
sleep(duration)
ser.write(b'RELEASE\r\n')
ser = serial.Serial(args.port, 9600)
try:
start_time = time.time()
# トーナメント自動化
for lap in range(0, 999):
send('Button A', 0.2) # 話しかける
sleep(1)
send('Button A', 0.1)
sleep(0.5)
send('Button A', 0.1) # はい
sleep(1)
send('Button A', 0.1)
sleep(0.5)
send('Button A', 0.1) # いる
sleep(0.5)
send('Button A', 0.1)
sleep(0.5)
send('LY MAX', 0.1)
sleep(0.1)
send('LY MAX', 0.1) # ホップ
sleep(0.1)
for i in range(0, 13):
send('Button A', 0.1)
sleep(0.4)
# 入場
for i in range(0, 3):
send('LY MIN', 3) # 入場する
sleep(5)
send('Button A', 0.1) # セリフ1
sleep(1)
send('Button A', 0.1) # セリフ2
sleep(1)
send('Button A', 0.1) # セリフ3
sleep(15)
# 勝負
send('Button A', 0.1) # 勝負を しかけてきた!
sleep(22)
# スペシャルアップを使う
send('LY MAX', 0.1)
sleep(0.1)
send('LY MAX', 0.1)
sleep(0.1)
send('Button A', 0.1)
sleep(1.5)
send('LX MAX', 0.1)
sleep(0.1)
send('LX MAX', 0.1)
sleep(0.1)
send('LY MAX', 0.1)
sleep(0.1)
send('LY MAX', 0.1)
sleep(0.1)
send('Button A', 0.1)
sleep(0.2)
send('Button A', 0.1)
sleep(12)
send('LY MIN', 0.1)
sleep(0.1)
send('LY MIN', 0.1)
sleep(0.1)
fight_start_time = time.time()
while True:
if (time.time() - fight_start_time) > args.fight_time:
break
send('Button A', 0.1)
sleep(0.1)
if random.randrange(2):
time_left = round(args.fight_time - (time.time() - fight_start_time), 2)
print(f'[{dt.now()}] 残り{time_left}秒')
print('勝利')
# 優勝
print('優勝')
# ボールガイ
for i in range(0, 20):
send('Button A', 0.1)
sleep(1)
send('LY MIN', 1) # 受付に突っ込む
print(f'[{dt.now()}] {round(time.time() - start_time, 2)}秒経過({lap}回目)')
except KeyboardInterrupt:
send('RELEASE')
ser.close()
| 20.167832 | 92 | 0.449723 |
53e7df65480b1ebee186419ce9b473429d086128 | 29,415 | py | Python | lib/python3.9/site-packages/pip/_internal/commands/install.py | ichomchom/Twitter-Media-Downloader | 8287e0c4d034a8ae83422a623ee1049a3b7150a8 | [
"MIT"
] | 1 | 2020-08-07T16:09:57.000Z | 2020-08-07T16:09:57.000Z | lib/python3.9/site-packages/pip/_internal/commands/install.py | ichomchom/Twitter-Media-Downloader | 8287e0c4d034a8ae83422a623ee1049a3b7150a8 | [
"MIT"
] | 2 | 2021-05-12T06:25:57.000Z | 2022-03-01T04:16:03.000Z | env/lib/python3.9/site-packages/pip/_internal/commands/install.py | simotwo/AbileneParadox-ddd | c85961efb37aba43c0d99ed1c36d083507e2b2d3 | [
"MIT"
] | null | null | null | from __future__ import absolute_import
import errno
import logging
import operator
import os
import shutil
import site
import sys
from os import path
from optparse import SUPPRESS_HELP
from pip._vendor import pkg_resources
from pip._vendor.packaging.utils import canonicalize_name
from pip._internal.cache import WheelCache
from pip._internal.cli import cmdoptions
from pip._internal.cli.cmdoptions import make_target_python
from pip._internal.cli.req_command import RequirementCommand, with_cleanup
from pip._internal.cli.status_codes import ERROR, SUCCESS
from pip._internal.exceptions import CommandError, InstallationError
from pip._internal.locations import distutils_scheme
from pip._internal.operations.check import check_install_conflicts
from pip._internal.req import install_given_reqs
from pip._internal.req.req_tracker import get_requirement_tracker
from pip._internal.utils.datetime import today_is_later_than
from pip._internal.utils.deprecation import deprecated
from pip._internal.utils.distutils_args import parse_distutils_args
from pip._internal.utils.filesystem import test_writable_dir
from pip._internal.utils.misc import (
ensure_dir,
get_installed_version,
get_pip_version,
protect_pip_from_modification_on_windows,
write_output,
)
from pip._internal.utils.temp_dir import TempDirectory
from pip._internal.utils.typing import MYPY_CHECK_RUNNING
from pip._internal.utils.virtualenv import virtualenv_no_global
from pip._internal.wheel_builder import build, should_build_for_install_command
if MYPY_CHECK_RUNNING:
from optparse import Values
from typing import Iterable, List, Optional
from pip._internal.models.format_control import FormatControl
from pip._internal.operations.check import ConflictDetails
from pip._internal.req.req_install import InstallRequirement
from pip._internal.wheel_builder import BinaryAllowedPredicate
logger = logging.getLogger(__name__)
def get_check_binary_allowed(format_control):
# type: (FormatControl) -> BinaryAllowedPredicate
def check_binary_allowed(req):
# type: (InstallRequirement) -> bool
if req.use_pep517:
return True
canonical_name = canonicalize_name(req.name)
allowed_formats = format_control.get_allowed_formats(canonical_name)
return "binary" in allowed_formats
return check_binary_allowed
class InstallCommand(RequirementCommand):
"""
Install packages from:
- PyPI (and other indexes) using requirement specifiers.
- VCS project urls.
- Local project directories.
- Local or remote source archives.
pip also supports installing from "requirements files", which provide
an easy way to specify a whole environment to be installed.
"""
usage = """
%prog [options] <requirement specifier> [package-index-options] ...
%prog [options] -r <requirements file> [package-index-options] ...
%prog [options] [-e] <vcs project url> ...
%prog [options] [-e] <local project path> ...
%prog [options] <archive url/path> ..."""
def add_options(self):
# type: () -> None
self.cmd_opts.add_option(cmdoptions.requirements())
self.cmd_opts.add_option(cmdoptions.constraints())
self.cmd_opts.add_option(cmdoptions.no_deps())
self.cmd_opts.add_option(cmdoptions.pre())
self.cmd_opts.add_option(cmdoptions.editable())
self.cmd_opts.add_option(
'-t', '--target',
dest='target_dir',
metavar='dir',
default=None,
help='Install packages into <dir>. '
'By default this will not replace existing files/folders in '
'<dir>. Use --upgrade to replace existing packages in <dir> '
'with new versions.'
)
cmdoptions.add_target_python_options(self.cmd_opts)
self.cmd_opts.add_option(
'--user',
dest='use_user_site',
action='store_true',
help="Install to the Python user install directory for your "
"platform. Typically ~/.local/, or %APPDATA%\\Python on "
"Windows. (See the Python documentation for site.USER_BASE "
"for full details.)")
self.cmd_opts.add_option(
'--no-user',
dest='use_user_site',
action='store_false',
help=SUPPRESS_HELP)
self.cmd_opts.add_option(
'--root',
dest='root_path',
metavar='dir',
default=None,
help="Install everything relative to this alternate root "
"directory.")
self.cmd_opts.add_option(
'--prefix',
dest='prefix_path',
metavar='dir',
default=None,
help="Installation prefix where lib, bin and other top-level "
"folders are placed")
self.cmd_opts.add_option(cmdoptions.build_dir())
self.cmd_opts.add_option(cmdoptions.src())
self.cmd_opts.add_option(
'-U', '--upgrade',
dest='upgrade',
action='store_true',
help='Upgrade all specified packages to the newest available '
'version. The handling of dependencies depends on the '
'upgrade-strategy used.'
)
self.cmd_opts.add_option(
'--upgrade-strategy',
dest='upgrade_strategy',
default='only-if-needed',
choices=['only-if-needed', 'eager'],
help='Determines how dependency upgrading should be handled '
'[default: %default]. '
'"eager" - dependencies are upgraded regardless of '
'whether the currently installed version satisfies the '
'requirements of the upgraded package(s). '
'"only-if-needed" - are upgraded only when they do not '
'satisfy the requirements of the upgraded package(s).'
)
self.cmd_opts.add_option(
'--force-reinstall',
dest='force_reinstall',
action='store_true',
help='Reinstall all packages even if they are already '
'up-to-date.')
self.cmd_opts.add_option(
'-I', '--ignore-installed',
dest='ignore_installed',
action='store_true',
help='Ignore the installed packages, overwriting them. '
'This can break your system if the existing package '
'is of a different version or was installed '
'with a different package manager!'
)
self.cmd_opts.add_option(cmdoptions.ignore_requires_python())
self.cmd_opts.add_option(cmdoptions.no_build_isolation())
self.cmd_opts.add_option(cmdoptions.use_pep517())
self.cmd_opts.add_option(cmdoptions.no_use_pep517())
self.cmd_opts.add_option(cmdoptions.install_options())
self.cmd_opts.add_option(cmdoptions.global_options())
self.cmd_opts.add_option(
"--compile",
action="store_true",
dest="compile",
default=True,
help="Compile Python source files to bytecode",
)
self.cmd_opts.add_option(
"--no-compile",
action="store_false",
dest="compile",
help="Do not compile Python source files to bytecode",
)
self.cmd_opts.add_option(
"--no-warn-script-location",
action="store_false",
dest="warn_script_location",
default=True,
help="Do not warn when installing scripts outside PATH",
)
self.cmd_opts.add_option(
"--no-warn-conflicts",
action="store_false",
dest="warn_about_conflicts",
default=True,
help="Do not warn about broken dependencies",
)
self.cmd_opts.add_option(cmdoptions.no_binary())
self.cmd_opts.add_option(cmdoptions.only_binary())
self.cmd_opts.add_option(cmdoptions.prefer_binary())
self.cmd_opts.add_option(cmdoptions.require_hashes())
self.cmd_opts.add_option(cmdoptions.progress_bar())
index_opts = cmdoptions.make_option_group(
cmdoptions.index_group,
self.parser,
)
self.parser.insert_option_group(0, index_opts)
self.parser.insert_option_group(0, self.cmd_opts)
@with_cleanup
def run(self, options, args):
# type: (Values, List[str]) -> int
if options.use_user_site and options.target_dir is not None:
raise CommandError("Can not combine '--user' and '--target'")
cmdoptions.check_install_build_global(options)
def is_venv():
return (hasattr(sys, 'real_prefix') or
(hasattr(sys, 'base_prefix') and
sys.base_prefix != sys.prefix))
# Check whether we have root privileges and aren't in venv/virtualenv
if os.getuid() == 0 and not is_venv() and not options.root_path:
command = path.basename(sys.argv[0])
if command == "__main__.py":
command = path.basename(sys.executable) + " -m pip"
logger.warning(
"Running pip install with root privileges is "
"generally not a good idea. Try `%s install --user` instead."
% command
)
upgrade_strategy = "to-satisfy-only"
if options.upgrade:
upgrade_strategy = options.upgrade_strategy
cmdoptions.check_dist_restriction(options, check_target=True)
install_options = options.install_options or []
logger.debug("Using %s", get_pip_version())
options.use_user_site = decide_user_install(
options.use_user_site,
prefix_path=options.prefix_path,
target_dir=options.target_dir,
root_path=options.root_path,
isolated_mode=options.isolated_mode,
)
target_temp_dir = None # type: Optional[TempDirectory]
target_temp_dir_path = None # type: Optional[str]
if options.target_dir:
options.ignore_installed = True
options.target_dir = os.path.abspath(options.target_dir)
if (os.path.exists(options.target_dir) and not
os.path.isdir(options.target_dir)):
raise CommandError(
"Target path exists but is not a directory, will not "
"continue."
)
# Create a target directory for using with the target option
target_temp_dir = TempDirectory(kind="target")
target_temp_dir_path = target_temp_dir.path
self.enter_context(target_temp_dir)
global_options = options.global_options or []
session = self.get_default_session(options)
target_python = make_target_python(options)
finder = self._build_package_finder(
options=options,
session=session,
target_python=target_python,
ignore_requires_python=options.ignore_requires_python,
)
build_delete = (not (options.no_clean or options.build_dir))
wheel_cache = WheelCache(options.cache_dir, options.format_control)
req_tracker = self.enter_context(get_requirement_tracker())
directory = TempDirectory(
options.build_dir,
delete=build_delete,
kind="install",
globally_managed=True,
)
try:
reqs = self.get_requirements(args, options, finder, session)
reject_location_related_install_options(
reqs, options.install_options
)
preparer = self.make_requirement_preparer(
temp_build_dir=directory,
options=options,
req_tracker=req_tracker,
session=session,
finder=finder,
use_user_site=options.use_user_site,
)
resolver = self.make_resolver(
preparer=preparer,
finder=finder,
options=options,
wheel_cache=wheel_cache,
use_user_site=options.use_user_site,
ignore_installed=options.ignore_installed,
ignore_requires_python=options.ignore_requires_python,
force_reinstall=options.force_reinstall,
upgrade_strategy=upgrade_strategy,
use_pep517=options.use_pep517,
)
self.trace_basic_info(finder)
requirement_set = resolver.resolve(
reqs, check_supported_wheels=not options.target_dir
)
try:
pip_req = requirement_set.get_requirement("pip")
except KeyError:
modifying_pip = False
else:
# If we're not replacing an already installed pip,
# we're not modifying it.
modifying_pip = pip_req.satisfied_by is None
protect_pip_from_modification_on_windows(
modifying_pip=modifying_pip
)
check_binary_allowed = get_check_binary_allowed(
finder.format_control
)
reqs_to_build = [
r for r in requirement_set.requirements.values()
if should_build_for_install_command(
r, check_binary_allowed
)
]
_, build_failures = build(
reqs_to_build,
wheel_cache=wheel_cache,
build_options=[],
global_options=[],
)
# If we're using PEP 517, we cannot do a direct install
# so we fail here.
pep517_build_failure_names = [
r.name # type: ignore
for r in build_failures if r.use_pep517
] # type: List[str]
if pep517_build_failure_names:
raise InstallationError(
"Could not build wheels for {} which use"
" PEP 517 and cannot be installed directly".format(
", ".join(pep517_build_failure_names)
)
)
# For now, we just warn about failures building legacy
# requirements, as we'll fall through to a direct
# install for those.
legacy_build_failure_names = [
r.name # type: ignore
for r in build_failures if not r.use_pep517
] # type: List[str]
if legacy_build_failure_names:
deprecated(
reason=(
"Could not build wheels for {} which do not use "
"PEP 517. pip will fall back to legacy 'setup.py "
"install' for these.".format(
", ".join(legacy_build_failure_names)
)
),
replacement="to fix the wheel build issue reported above",
gone_in="21.0",
issue=8368,
)
to_install = resolver.get_installation_order(
requirement_set
)
# Check for conflicts in the package set we're installing.
conflicts = None # type: Optional[ConflictDetails]
should_warn_about_conflicts = (
not options.ignore_dependencies and
options.warn_about_conflicts
)
if should_warn_about_conflicts:
conflicts = self._determine_conflicts(to_install)
# Don't warn about script install locations if
# --target has been specified
warn_script_location = options.warn_script_location
if options.target_dir:
warn_script_location = False
installed = install_given_reqs(
to_install,
install_options,
global_options,
root=options.root_path,
home=target_temp_dir_path,
prefix=options.prefix_path,
warn_script_location=warn_script_location,
use_user_site=options.use_user_site,
pycompile=options.compile,
)
lib_locations = get_lib_location_guesses(
user=options.use_user_site,
home=target_temp_dir_path,
root=options.root_path,
prefix=options.prefix_path,
isolated=options.isolated_mode,
)
working_set = pkg_resources.WorkingSet(lib_locations)
installed.sort(key=operator.attrgetter('name'))
items = []
for result in installed:
item = result.name
try:
installed_version = get_installed_version(
result.name, working_set=working_set
)
if installed_version:
item += '-' + installed_version
except Exception:
pass
items.append(item)
if conflicts is not None:
self._warn_about_conflicts(
conflicts,
new_resolver='2020-resolver' in options.features_enabled,
)
installed_desc = ' '.join(items)
if installed_desc:
write_output(
'Successfully installed %s', installed_desc,
)
except EnvironmentError as error:
show_traceback = (self.verbosity >= 1)
message = create_env_error_message(
error, show_traceback, options.use_user_site,
)
logger.error(message, exc_info=show_traceback) # noqa
return ERROR
if options.target_dir:
assert target_temp_dir
self._handle_target_dir(
options.target_dir, target_temp_dir, options.upgrade
)
return SUCCESS
def _handle_target_dir(self, target_dir, target_temp_dir, upgrade):
# type: (str, TempDirectory, bool) -> None
ensure_dir(target_dir)
# Checking both purelib and platlib directories for installed
# packages to be moved to target directory
lib_dir_list = []
# Checking both purelib and platlib directories for installed
# packages to be moved to target directory
scheme = distutils_scheme('', home=target_temp_dir.path)
purelib_dir = scheme['purelib']
platlib_dir = scheme['platlib']
data_dir = scheme['data']
if os.path.exists(purelib_dir):
lib_dir_list.append(purelib_dir)
if os.path.exists(platlib_dir) and platlib_dir != purelib_dir:
lib_dir_list.append(platlib_dir)
if os.path.exists(data_dir):
lib_dir_list.append(data_dir)
for lib_dir in lib_dir_list:
for item in os.listdir(lib_dir):
if lib_dir == data_dir:
ddir = os.path.join(data_dir, item)
if any(s.startswith(ddir) for s in lib_dir_list[:-1]):
continue
target_item_dir = os.path.join(target_dir, item)
if os.path.exists(target_item_dir):
if not upgrade:
logger.warning(
'Target directory %s already exists. Specify '
'--upgrade to force replacement.',
target_item_dir
)
continue
if os.path.islink(target_item_dir):
logger.warning(
'Target directory %s already exists and is '
'a link. pip will not automatically replace '
'links, please remove if replacement is '
'desired.',
target_item_dir
)
continue
if os.path.isdir(target_item_dir):
shutil.rmtree(target_item_dir)
else:
os.remove(target_item_dir)
shutil.move(
os.path.join(lib_dir, item),
target_item_dir
)
def _determine_conflicts(self, to_install):
# type: (List[InstallRequirement]) -> Optional[ConflictDetails]
try:
return check_install_conflicts(to_install)
except Exception:
logger.exception(
"Error while checking for conflicts. Please file an issue on "
"pip's issue tracker: https://github.com/pypa/pip/issues/new"
)
return None
def _warn_about_conflicts(self, conflict_details, new_resolver):
# type: (ConflictDetails, bool) -> None
package_set, (missing, conflicting) = conflict_details
if not missing and not conflicting:
return
parts = [] # type: List[str]
if not new_resolver:
parts.append(
"After October 2020 you may experience errors when installing "
"or updating packages. This is because pip will change the "
"way that it resolves dependency conflicts.\n"
)
parts.append(
"We recommend you use --use-feature=2020-resolver to test "
"your packages with the new resolver before it becomes the "
"default.\n"
)
elif not today_is_later_than(year=2020, month=7, day=31):
# NOTE: trailing newlines here are intentional
parts.append(
"Pip will install or upgrade your package(s) and its "
"dependencies without taking into account other packages you "
"already have installed. This may cause an uncaught "
"dependency conflict.\n"
)
form_link = "https://forms.gle/cWKMoDs8sUVE29hz9"
parts.append(
"If you would like pip to take your other packages into "
"account, please tell us here: {}\n".format(form_link)
)
# NOTE: There is some duplication here, with commands/check.py
for project_name in missing:
version = package_set[project_name][0]
for dependency in missing[project_name]:
message = (
"{name} {version} requires {requirement}, "
"which is not installed."
).format(
name=project_name,
version=version,
requirement=dependency[1],
)
parts.append(message)
for project_name in conflicting:
version = package_set[project_name][0]
for dep_name, dep_version, req in conflicting[project_name]:
message = (
"{name} {version} requires {requirement}, but you'll have "
"{dep_name} {dep_version} which is incompatible."
).format(
name=project_name,
version=version,
requirement=req,
dep_name=dep_name,
dep_version=dep_version,
)
parts.append(message)
logger.critical("\n".join(parts))
def get_lib_location_guesses(
user=False, # type: bool
home=None, # type: Optional[str]
root=None, # type: Optional[str]
isolated=False, # type: bool
prefix=None # type: Optional[str]
):
# type:(...) -> List[str]
scheme = distutils_scheme('', user=user, home=home, root=root,
isolated=isolated, prefix=prefix)
return [scheme['purelib'], scheme['platlib']]
def site_packages_writable(root, isolated):
# type: (Optional[str], bool) -> bool
return all(
test_writable_dir(d) for d in set(
get_lib_location_guesses(root=root, isolated=isolated))
)
def decide_user_install(
use_user_site, # type: Optional[bool]
prefix_path=None, # type: Optional[str]
target_dir=None, # type: Optional[str]
root_path=None, # type: Optional[str]
isolated_mode=False, # type: bool
):
# type: (...) -> bool
"""Determine whether to do a user install based on the input options.
If use_user_site is False, no additional checks are done.
If use_user_site is True, it is checked for compatibility with other
options.
If use_user_site is None, the default behaviour depends on the environment,
which is provided by the other arguments.
"""
# In some cases (config from tox), use_user_site can be set to an integer
# rather than a bool, which 'use_user_site is False' wouldn't catch.
if (use_user_site is not None) and (not use_user_site):
logger.debug("Non-user install by explicit request")
return False
if use_user_site:
if prefix_path:
raise CommandError(
"Can not combine '--user' and '--prefix' as they imply "
"different installation locations"
)
if virtualenv_no_global():
raise InstallationError(
"Can not perform a '--user' install. User site-packages "
"are not visible in this virtualenv."
)
logger.debug("User install by explicit request")
return True
# If we are here, user installs have not been explicitly requested/avoided
assert use_user_site is None
# user install incompatible with --prefix/--target
if prefix_path or target_dir:
logger.debug("Non-user install due to --prefix or --target option")
return False
# If user installs are not enabled, choose a non-user install
if not site.ENABLE_USER_SITE:
logger.debug("Non-user install because user site-packages disabled")
return False
# If we have permission for a non-user install, do that,
# otherwise do a user install.
if site_packages_writable(root=root_path, isolated=isolated_mode):
logger.debug("Non-user install because site-packages writeable")
return False
logger.info("Defaulting to user installation because normal site-packages "
"is not writeable")
return True
def reject_location_related_install_options(requirements, options):
# type: (List[InstallRequirement], Optional[List[str]]) -> None
"""If any location-changing --install-option arguments were passed for
requirements or on the command-line, then show a deprecation warning.
"""
def format_options(option_names):
# type: (Iterable[str]) -> List[str]
return ["--{}".format(name.replace("_", "-")) for name in option_names]
offenders = []
for requirement in requirements:
install_options = requirement.install_options
location_options = parse_distutils_args(install_options)
if location_options:
offenders.append(
"{!r} from {}".format(
format_options(location_options.keys()), requirement
)
)
if options:
location_options = parse_distutils_args(options)
if location_options:
offenders.append(
"{!r} from command line".format(
format_options(location_options.keys())
)
)
if not offenders:
return
raise CommandError(
"Location-changing options found in --install-option: {}."
" This is unsupported, use pip-level options like --user,"
" --prefix, --root, and --target instead.".format(
"; ".join(offenders)
)
)
def create_env_error_message(error, show_traceback, using_user_site):
# type: (EnvironmentError, bool, bool) -> str
"""Format an error message for an EnvironmentError
It may occur anytime during the execution of the install command.
"""
parts = []
# Mention the error if we are not going to show a traceback
parts.append("Could not install packages due to an EnvironmentError")
if not show_traceback:
parts.append(": ")
parts.append(str(error))
else:
parts.append(".")
# Spilt the error indication from a helper message (if any)
parts[-1] += "\n"
# Suggest useful actions to the user:
# (1) using user site-packages or (2) verifying the permissions
if error.errno == errno.EACCES:
user_option_part = "Consider using the `--user` option"
permissions_part = "Check the permissions"
if not using_user_site:
parts.extend([
user_option_part, " or ",
permissions_part.lower(),
])
else:
parts.append(permissions_part)
parts.append(".\n")
return "".join(parts).strip() + "\n"
| 37.519133 | 79 | 0.587455 |
905fd07e934c1dc2a4d67a005c69a32637e38606 | 987 | py | Python | nuapp/src/parseCacheConfig.py | dasebe/robinhoodcache | c94801d833525b49e1f1e78e6ca2c13f1289f371 | [
"BSD-3-Clause"
] | 76 | 2018-10-08T18:24:04.000Z | 2022-03-22T03:39:24.000Z | nuapp/src/parseCacheConfig.py | utkarsh39/robinhoodcache | be7c035d1005088ac489a24f73925df20ce9eadb | [
"BSD-3-Clause"
] | 3 | 2018-10-26T17:55:07.000Z | 2021-09-22T08:59:23.000Z | nuapp/src/parseCacheConfig.py | utkarsh39/robinhoodcache | be7c035d1005088ac489a24f73925df20ce9eadb | [
"BSD-3-Clause"
] | 8 | 2019-09-17T23:02:13.000Z | 2021-05-26T12:33:16.000Z | #!/usr/bin/python2
import json
import sys
import select
from subprocess import Popen
import subprocess
with open(sys.argv[1]) as df:
data = json.load(df)
logdir=sys.argv[2]
eth0 = sys.argv[3]
ports = set([])
procs = {}
for i in data:
if i["CachePort"] not in ports:
startcmd = ["memcached","-v", "-c", "30000", "-l", eth0, "-u", "root", "-p",str(i["CachePort"]),"-I","8388608","-m",str(i["CacheSize"]),"-R","100"] #,"-t","8"
print startcmd
stdout_log = open("{0}/{1}.stdout".format(logdir, i["CachePort"]), 'w')
stderr_log = open("{0}/{1}.stderr".format(logdir, i["CachePort"]), 'w')
p = Popen(startcmd, stdout=stdout_log, stderr=stderr_log)
ports.add(i["CachePort"])
# procs[p.stderr] = i["CachePort"]
# procs[p.stdout] = i["CachePort"]
#pfds = procs.keys()
#while True:
# rfds, wfds, xfds = select.select(pfds, [], [])
# for rfd in rfds:
# print procs[rfd], rfd.readline().strip()
p.communicate()
| 34.034483 | 167 | 0.587639 |
609348fe29b46fa87eb292f72b62f8a3e1a6985e | 4,096 | py | Python | bench/widetree.py | crs4/PyTables | 02d23d41f714122fd5fd4f7e1063c0b31d1a774b | [
"BSD-3-Clause"
] | 1 | 2020-12-27T13:53:00.000Z | 2020-12-27T13:53:00.000Z | bench/widetree.py | mrgloom/PyTables | c30c6f40cd3d5996ee711d5685328085f3569cfc | [
"BSD-3-Clause"
] | null | null | null | bench/widetree.py | mrgloom/PyTables | c30c6f40cd3d5996ee711d5685328085f3569cfc | [
"BSD-3-Clause"
] | null | null | null | import hotshot, hotshot.stats
import unittest
import os
import tempfile
from tables import *
verbose = 0
class WideTreeTestCase(unittest.TestCase):
"""Checks for maximum number of childs for a Group.
"""
def test00_Leafs(self):
"""Checking creation of large number of leafs (1024) per group
Variable 'maxchilds' controls this check. PyTables support
up to 4096 childs per group, but this would take too much
memory (up to 64 MB) for testing purposes (may be we can add a
test for big platforms). A 1024 childs run takes up to 30 MB.
A 512 childs test takes around 25 MB.
"""
import time
maxchilds = 1000
if verbose:
print '\n', '-=' * 30
print "Running %s.test00_wideTree..." % \
self.__class__.__name__
print "Maximum number of childs tested :", maxchilds
# Open a new empty HDF5 file
#file = tempfile.mktemp(".h5")
file = "test_widetree.h5"
fileh = open_file(file, mode = "w")
if verbose:
print "Children writing progress: ",
for child in range(maxchilds):
if verbose:
print "%3d," % (child),
a = [1, 1]
fileh.create_group(fileh.root, 'group' + str(child),
"child: %d" % child)
fileh.create_array("/group" + str(child), 'array' + str(child),
a, "child: %d" % child)
if verbose:
print
# Close the file
fileh.close()
t1 = time.time()
# Open the previous HDF5 file in read-only mode
fileh = open_file(file, mode = "r")
print "\nTime spent opening a file with %d groups + %d arrays: %s s" % \
(maxchilds, maxchilds, time.time()-t1)
if verbose:
print "\nChildren reading progress: ",
# Close the file
fileh.close()
# Then, delete the file
#os.remove(file)
def test01_wideTree(self):
"""Checking creation of large number of groups (1024) per group
Variable 'maxchilds' controls this check. PyTables support
up to 4096 childs per group, but this would take too much
memory (up to 64 MB) for testing purposes (may be we can add a
test for big platforms). A 1024 childs run takes up to 30 MB.
A 512 childs test takes around 25 MB.
"""
import time
maxchilds = 1000
if verbose:
print '\n', '-=' * 30
print "Running %s.test00_wideTree..." % \
self.__class__.__name__
print "Maximum number of childs tested :", maxchilds
# Open a new empty HDF5 file
file = tempfile.mktemp(".h5")
#file = "test_widetree.h5"
fileh = open_file(file, mode = "w")
if verbose:
print "Children writing progress: ",
for child in range(maxchilds):
if verbose:
print "%3d," % (child),
fileh.create_group(fileh.root, 'group' + str(child),
"child: %d" % child)
if verbose:
print
# Close the file
fileh.close()
t1 = time.time()
# Open the previous HDF5 file in read-only mode
fileh = open_file(file, mode = "r")
print "\nTime spent opening a file with %d groups: %s s" % \
(maxchilds, time.time()-t1)
# Close the file
fileh.close()
# Then, delete the file
os.remove(file)
#----------------------------------------------------------------------
def suite():
theSuite = unittest.TestSuite()
theSuite.addTest(unittest.makeSuite(WideTreeTestCase))
return theSuite
if __name__ == '__main__':
prof = hotshot.Profile("widetree.prof")
benchtime, stones = prof.runcall(unittest.main(defaultTest='suite'))
prof.close()
stats = hotshot.stats.load("widetree.prof")
stats.strip_dirs()
stats.sort_stats('time', 'calls')
stats.print_stats(20)
| 32 | 80 | 0.551514 |
02ab8357387b889506a19a52bfb3f7781a03b30d | 497 | py | Python | tests/src/year2021/test_day13a.py | lancelote/advent_of_code | 06dda6ca034bc1e86addee7798bb9b2a34ff565b | [
"Unlicense"
] | 10 | 2017-12-11T17:54:52.000Z | 2021-12-09T20:16:30.000Z | tests/src/year2021/test_day13a.py | lancelote/advent_of_code | 06dda6ca034bc1e86addee7798bb9b2a34ff565b | [
"Unlicense"
] | 260 | 2015-12-09T11:03:03.000Z | 2021-12-12T14:32:23.000Z | tests/src/year2021/test_day13a.py | lancelote/advent_of_code | 06dda6ca034bc1e86addee7798bb9b2a34ff565b | [
"Unlicense"
] | null | null | null | """2021 - Day 13 Part 1: Transparent Origami."""
from textwrap import dedent
from src.year2021.day13a import solve
def test_solve():
task = dedent(
"""
6,10
0,14
9,10
0,3
10,4
4,11
6,0
6,12
4,1
0,13
10,12
3,4
3,0
8,4
1,10
2,14
8,10
9,0
fold along y=7
fold along x=5
"""
).strip()
assert solve(task) == 17
| 14.617647 | 48 | 0.408451 |
20f8b2be4fb3f60eaa2045a41099a27aee5edaf8 | 5,440 | py | Python | quadis/cli.py | BottomNotch/Quadis | 66d8d5d302ccd8691a507fcbd35b4d3097411457 | [
"BSD-3-Clause"
] | null | null | null | quadis/cli.py | BottomNotch/Quadis | 66d8d5d302ccd8691a507fcbd35b4d3097411457 | [
"BSD-3-Clause"
] | null | null | null | quadis/cli.py | BottomNotch/Quadis | 66d8d5d302ccd8691a507fcbd35b4d3097411457 | [
"BSD-3-Clause"
] | null | null | null | '''the click based CLI'''
from datetime import datetime
from quadis import main
import click
# pas a dictionary to sub-commands
pass_config = click.make_pass_decorator(dict, ensure=True)
def set_date():
'''prompts user to set the last used date of the card'''
last_used_month = click.prompt('please enter a month', type=int)
last_used_day = click.prompt('please enter a day of month', type=int)
last_used_year = click.prompt('please enter a year', type=int)
last_used_date = "{0}/{1}/{2}".format(last_used_month,
last_used_day, last_used_year)
return last_used_date
@click.group()
@click.argument('csv_file')
@click.option('-n', '--card-num',
help='the card you want to check/add/change')
@pass_config
def cli(config, csv_file, card_num):
'''CSV_FILE is required, it should be the file used to hold all the
card information'''
config['csv_file'] = csv_file
if card_num is None: #if card_num is none that means there was no
#user input
config['card_num'] = click.prompt(
'Please scan card or enter number manually', type=str)
else:
config['card_num'] = card_num
@cli.command('check_card')
@pass_config
def check_card(config):
'''check a card to make sure it is on the csv file and has not been
used today'''
card_state = main.check_card(config['csv_file'], config['card_num'],
update_card=click.confirm('update card?'))
if card_state == 0:
click.echo(
'card not found, please add the card using the add_card command')
elif card_state == 1:
click.echo('card found and not used today')
elif card_state == 2:
click.echo("card used today")
@cli.command()
@click.option('-N', '--name', help='the name of the card holder')
@click.option('--used-today/--not-used-today', default=False,
help='if the card has been used today')
@pass_config
def add_card(config, name, used_today):
card_dict = {'card_num':config['card_num'], 'last_used_date':'N/A', 'name':name}
if name is None:
card_dict['name'] = click.prompt('please enter a name', type=str)
if used_today or click.confirm('used today?'):
card_dict['last_used_date'] = main.date_today.strftime('%m/%d/%Y')
else:
if click.confirm('set date?'):
card_dict['last_used_date'] = set_date()
for item in main.csv_layout:
if all([item is not 'last_used_date', item is not 'name', item is not 'card_num']):
card_dict[item] = click.prompt(
'to what would you like to set {0} for this card?'
.format(item), type=str)
result = main.add_card(config['csv_file'], card_dict)
card_dict_new = main.card_info(config['csv_file'], config['card_num'])
if result is 0:
click.echo('card number {0} successfully added on row {1}'
.format(config['card_num'], card_dict_new['row_num']))
if result is 1:
click.echo('card number {0} already exists on row {1}'
.format(config['card_num'], card_dict_new['row_num']))
if result is 2:
click.echo('{0} is not a valid date format'
.format(card_dict['last_used_date']))
@cli.command()
@pass_config
def remove_card(config):
'''remove the specified card'''
returned_val = main.remove_card(config['csv_file'], config['card_num'])
if returned_val is 0:
click.echo('card number {0} removed'.format(config['card_num']))
elif returned_val is 1:
click.echo('card number {0} not found'.format(config['card_num']))
@cli.command()
@pass_config
def change_card(config):
result = None
card_info = main.card_info(config['csv_file'], config['card_num'])
if card_info is not 1:
if click.confirm('change number?'):
card_info['card_num'] = click.prompt(
'please enter the new card number', type=str)
if click.confirm('change name?'):
card_info['name'] = click.prompt('please enter the new name', type=str)
if click.confirm('change last used date?'):
if click.confirm('change to today?'):
card_info['last_used_date'] = main.date_today.strftime('%m/%d/%Y')
else:
card_info['last_used_date'] = set_date()
for item in main.csv_layout:
if all([item is not 'last_used_date', item is not 'name', item is not 'card_num',
click.confirm('change {0} for this card?'.format(item))]):
card_info[item] = click.prompt(
'to what would you like to set {0} for this card?'
.format(item), type=str)
result = main.change_card(config['csv_file'], config['card_num'],
card_info)
if result is 0:
click.echo('card information changed successfully')
elif result is 1:
click.echo('card number {0} not found'
.format(config['card_num']))
elif result is 2:
click.echo('card number {0} already exists'
.format(card_info['card_num']))
elif result is 3:
click.echo('{0} is an invalid date'
.format(card_info['last_used_date']))
else:
click.echo('card does not exist')
| 37.260274 | 93 | 0.604044 |
29a6420fb34b15b91af8ba84f88df50f48df2ec7 | 8,709 | py | Python | bigml/tests/world.py | jaor/python | 35f69d2f3121f1b3dde43495cf145d4992796ad5 | [
"Apache-2.0"
] | 137 | 2015-01-12T06:04:10.000Z | 2022-03-06T21:00:04.000Z | bigml/tests/world.py | jaor/python | 35f69d2f3121f1b3dde43495cf145d4992796ad5 | [
"Apache-2.0"
] | 78 | 2015-01-13T18:28:51.000Z | 2022-03-04T19:18:28.000Z | bigml/tests/world.py | jaor/python | 35f69d2f3121f1b3dde43495cf145d4992796ad5 | [
"Apache-2.0"
] | 144 | 2015-01-16T06:13:33.000Z | 2022-03-29T17:53:16.000Z | # -*- coding: utf-8 -*-
#
# Copyright 2015-2021 BigML
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
""" Mimic World lettuce object
"""
import os
import shutil
import time
import pkg_resources
import datetime
import pprint
import json
from bigml.api import BigML
from bigml.api import HTTP_OK, HTTP_NO_CONTENT, HTTP_UNAUTHORIZED
from bigml.constants import IRREGULAR_PLURALS, RENAMED_RESOURCES
from bigml.api_handlers.externalconnectorhandler import get_env_connection_info
from bigml.util import get_exponential_wait
from nose.tools import assert_less
MAX_RETRIES = 10
RESOURCE_TYPES = [
'cluster',
'fusion',
'optiml',
'source',
'dataset',
'prediction',
'evaluation',
'ensemble',
'batchprediction',
'centroid',
'batchcentroid',
'anomaly',
'anomalyscore',
'batchanomalyscore',
'project',
'sample',
'correlation',
'statisticaltest',
'logisticregression',
'model',
'deepnet',
'association',
'associationset',
'configuration',
'topicmodel',
'topicdistribution',
'timeseries',
'forecast',
'pca',
'projection',
'batchprojection',
'linearregression',
'script',
'execution',
'library',
'externalconnector'
]
irregular_plurals = {}
irregular_plurals.update(IRREGULAR_PLURALS)
irregular_plurals.update({"timeseries": "time_series_set"})
def plural(resource_type):
"""Creates the plural form of a resource type
"""
return irregular_plurals.get(resource_type, "%ss" % resource_type)
def show_doc(self, examples=None):
""" Shows the name and documentation of the method passed as argument
"""
print("%s:\n%s" % (self.__name__, self.__doc__))
if examples:
print(" |%s" % \
"\n |".join(["|".join([str(item)
for item in example]) for
example in examples]))
class World(object):
def __init__(self):
self.USERNAME = None
self.API_KEY = None
self.api = None
self.debug = False
try:
self.debug = bool(os.environ.get('BIGML_DEBUG', 0))
except ValueError:
pass
self.short_debug = False
try:
self.short_debug = bool(os.environ.get('BIGML_SHORT_DEBUG', 0))
except ValueError:
pass
self.clear()
self.dataset_ids = []
self.fields_properties_dict = {}
self.counters = {}
self.test_project_name = "Test: python bindings %s" % \
datetime.datetime.now()
self.project_id = None
self.print_connection_info()
self.delta = int(os.environ.get('BIGML_DELTA', '1'))
self.errors = []
def print_connection_info(self):
self.USERNAME = os.environ.get('BIGML_USERNAME')
self.API_KEY = os.environ.get('BIGML_API_KEY')
self.EXTERNAL_CONN = get_env_connection_info()
if self.USERNAME is None or self.API_KEY is None:
assert False, ("Tests use the BIGML_USERNAME and BIGML_API_KEY"
" environment variables to authenticate the"
" connection, but they seem to be unset. Please,"
"set them before testing.")
self.api = BigML(self.USERNAME, self.API_KEY, debug=self.debug,
short_debug=self.short_debug,
organization=None if not hasattr(
self.api, "organization") else organization,
storage=(None if not (self.debug or self.short_debug)
else "./debug_storage"))
print("----------------------------------------------------------")
print(self.api.connection_info())
print(self.external_connection_info())
print("----------------------------------------------------------")
def external_connection_info(self):
"""Printable string: The information used to connect to a external
data source
"""
info = "External data connection config:\n%s" % \
pprint.pformat(self.EXTERNAL_CONN, indent=4)
return info
def clear(self):
"""Clears the stored resources' ids
"""
for resource_type in RESOURCE_TYPES:
setattr(self, plural(resource_type), [])
setattr(self, RENAMED_RESOURCES.get(resource_type,
resource_type), None)
def delete_resources(self):
"""Deletes the created objects
"""
for resource_type in RESOURCE_TYPES:
object_list = set(getattr(self, plural(resource_type)))
if object_list:
print("Deleting %s %s" % (len(object_list),
plural(resource_type)))
delete_method = self.api.deleters[resource_type]
for obj_id in object_list:
counter = 0
result = delete_method(obj_id)
while (result['code'] != HTTP_NO_CONTENT and
counter < MAX_RETRIES):
print("Delete failed for %s. Retrying" % obj_id)
time.sleep(3 * self.delta)
counter += 1
result = delete_method(obj_id)
if counter == MAX_RETRIES:
print ("Retries to delete the created resources are"
" exhausted. Failed to delete.")
if world.errors:
print("Failed resources: \n\n")
for resource in world.errors:
print(json.dumps(resource["status"], indent=4))
def store_resources(self):
"""Stores the created objects
"""
for resource_type in RESOURCE_TYPES:
object_list = set(getattr(self, plural(resource_type)))
if object_list:
print("Storing %s %s" % (len(object_list),
plural(resource_type)))
store_method = self.api.getters[resource_type]
for obj_id in object_list:
counter = 0
result = store_method(obj_id)
self.api.ok(result)
world = World()
def res_filename(file):
return pkg_resources.resource_filename('bigml', "../%s" % file)
def setup_module():
"""Operations to be performed before each module
"""
if world.project_id is None:
world.project_id = world.api.create_project( \
{"name": world.test_project_name})['resource']
world.clear()
def teardown_module():
"""Operations to be performed after each module
"""
print("Teardown module ---------------------------")
if not world.debug and not world.short_debug:
if os.path.exists('./tmp'):
shutil.rmtree('./tmp')
world.delete_resources()
project_stats = world.api.get_project( \
world.project_id)['object']['stats']
for resource_type, value in list(project_stats.items()):
if value['count'] != 0:
# assert False, ("Increment in %s: %s" % (resource_type, value))
print("WARNING: Increment in %s: %s" % (resource_type, value))
world.api.delete_project(world.project_id)
world.project_id = None
else:
world.store_resources()
def teardown_class():
"""Operations to be performed after each class
"""
world.dataset_ids = []
world.local_ensemble = None
world.local_model = None
world.local_deepnet = None
def logged_wait(start, delta, count, res_description):
"""Comparing the elapsed time to the expected delta and waiting for
the next sleep period.
"""
wait_time = min(get_exponential_wait(delta / 100.0, count), delta)
print("Sleeping %s" % wait_time)
time.sleep(wait_time)
elapsed = (datetime.datetime.utcnow() - start).seconds
if elapsed > delta / 2.0:
print("%s seconds waiting for %s" % \
(elapsed, res_description))
assert_less(elapsed, delta)
| 32.136531 | 80 | 0.578827 |
0b58494d4886044e6d1c08e0d8f3b37ab0ac4498 | 93 | py | Python | Competitive Programming/ICPC/2018/Sub-Regionals/D (Desvendando Monty Hall)/D.py | NelsonGomesNeto/ProgramC | e743b1b869f58f7f3022d18bac00c5e0b078562e | [
"MIT"
] | 3 | 2018-12-18T13:39:42.000Z | 2021-06-23T18:05:18.000Z | Competitive Programming/ICPC/2018/Sub-Regionals/D (Desvendando Monty Hall)/D.py | NelsonGomesNeto/ProgramC | e743b1b869f58f7f3022d18bac00c5e0b078562e | [
"MIT"
] | 1 | 2018-11-02T21:32:40.000Z | 2018-11-02T22:47:12.000Z | Competitive Programming/ICPC/2018/Sub-Regionals/D (Desvendando Monty Hall)/D.py | NelsonGomesNeto/ProgramC | e743b1b869f58f7f3022d18bac00c5e0b078562e | [
"MIT"
] | 6 | 2018-10-27T14:07:52.000Z | 2019-11-14T13:49:29.000Z | n, ans = int(input()), 0
for i in range(n):
a = int(input())
ans += a != 1
print(ans) | 18.6 | 24 | 0.505376 |
6c985dea217b36c51724c357a88094981d1d015f | 2,466 | py | Python | scape/__init__.py | ska-sa/scape | 0909436b1d5ab0b068106e0479e95a3089e1c840 | [
"BSD-3-Clause"
] | null | null | null | scape/__init__.py | ska-sa/scape | 0909436b1d5ab0b068106e0479e95a3089e1c840 | [
"BSD-3-Clause"
] | 1 | 2021-06-21T23:30:52.000Z | 2021-06-22T12:49:30.000Z | scape/__init__.py | ska-sa/scape | 0909436b1d5ab0b068106e0479e95a3089e1c840 | [
"BSD-3-Clause"
] | null | null | null | """The Single-dish Continuum Analysis PackagE."""
import logging as _logging
# Setup library logger and add a print-like handler used when no logging is configured
class _NoConfigFilter(_logging.Filter):
"""Filter which only allows event if top-level logging is not configured."""
def filter(self, record):
return 1 if not _logging.root.handlers else 0
_no_config_handler = _logging.StreamHandler()
_no_config_handler.setFormatter(_logging.Formatter(_logging.BASIC_FORMAT))
_no_config_handler.addFilter(_NoConfigFilter())
logger = _logging.getLogger(__name__)
logger.addHandler(_no_config_handler)
logger.setLevel(_logging.DEBUG)
# Most operations are directed through the data set
from .dataset import DataSet # noqa: E402,F401
from .compoundscan import CorrelatorConfig, CompoundScan # noqa: F401
from .scan import Scan # noqa: F401
def _module_found(name, message):
"""Check whether module *name* imports, otherwise log a warning *message*."""
try:
__import__(name)
return True
except ImportError:
logger.warn(message)
return False
# Check if matplotlib is present, otherwise skip plotting routines
if _module_found('matplotlib', 'Matplotlib was not found - plotting will be disabled'):
from .plots_canned import (plot_xyz, extract_xyz_data, # noqa: F401
extract_scan_data, # noqa: F401
plot_spectrum, plot_waterfall, # noqa: F401
plot_spectrogram, plot_fringes, # noqa: F401
plot_compound_scan_in_time, # noqa: F401
plot_compound_scan_on_target, # noqa: F401
plot_data_set_in_mount_space, # noqa: F401
plot_measured_beam_pattern) # noqa: F401
# Check if astropy.io.fits is present, otherwise skip FITS creation routines
if _module_found('astropy.io.fits',
'astropy.io.fits was not found - FITS creation will be disabled'):
from .plots_basic import save_fits_image # noqa: F401
# BEGIN VERSION CHECK
# Get package version when locally imported from repo or via -e develop install
try:
import katversion as _katversion
except ImportError:
import time as _time
__version__ = "0.0+unknown.{}".format(_time.strftime('%Y%m%d%H%M'))
else:
__version__ = _katversion.get_version(__path__[0]) # noqa: F821
# END VERSION CHECK
| 37.938462 | 87 | 0.686131 |
7fd0c4c91dfd910e13aef198fd2a3a760fc68a89 | 2,881 | py | Python | simprod-scripts/resources/scripts/startingmuon.py | hschwane/offline_production | e14a6493782f613b8bbe64217559765d5213dc1e | [
"MIT"
] | 1 | 2020-12-24T22:00:01.000Z | 2020-12-24T22:00:01.000Z | simprod-scripts/resources/scripts/startingmuon.py | hschwane/offline_production | e14a6493782f613b8bbe64217559765d5213dc1e | [
"MIT"
] | null | null | null | simprod-scripts/resources/scripts/startingmuon.py | hschwane/offline_production | e14a6493782f613b8bbe64217559765d5213dc1e | [
"MIT"
] | 3 | 2020-07-17T09:20:29.000Z | 2021-03-30T16:44:18.000Z | #!/usr/bin/env python
"""
IceProd Module for Simple Muon Production
"""
import os,sys
from os.path import expandvars
import logging
import argparse
from I3Tray import I3Units, I3Tray
from icecube import icetray, dataio, dataclasses
from icecube.simprod.util import simprodtray, arguments
from icecube.simprod.util.simprodtray import RunI3Tray
import random
from math import pi
def add_args(parser):
"""
Args:
parser (argparse.ArgumentParser): the command-line parser
"""
arguments.add_outputfile(parser)
arguments.add_seed(parser)
arguments.add_nevents(parser)
parser.add_argument("--FromEnergy", dest="fromenergy",
default=1.*I3Units.TeV, type=float, required=False,
help='Minimum energy')
parser.add_argument("--ToEnergy", dest="toenergy",
default=10.*I3Units.PeV, type=float, required=False,
help='Maximum energy')
def configure_tray(tray, params, stats, logger):
"""
Configures the I3Tray instance: adds modules, segments, services, etc.
Args:
tray (I3Tray): the IceProd tray instance
params (dict): command-line arguments (and default values)
referenced as dict entries; see add_args()
stats (dict): dictionary that collects run-time stats
logger (logging.Logger): the logger for this script
"""
tray.AddModule("I3InfiniteSource", Stream=icetray.I3Frame.DAQ)
random.seed(params['seed'])
def Generator(frame, FromEnergy = 1*I3Units.TeV, ToEnergy = 1*I3Units.TeV):
p = dataclasses.I3Particle()
p.energy = random.uniform(FromEnergy, ToEnergy)
p.pos = dataclasses.I3Position(0,0,0)
zenith = random.uniform(0., pi)
azimuth = random.uniform(0., 2*pi)
p.dir = dataclasses.I3Direction(zenith, azimuth)
p.length = 500 * I3Units.m
p.type = dataclasses.I3Particle.ParticleType.MuMinus
p.location_type = dataclasses.I3Particle.LocationType.InIce
p.time = 0. * I3Units.ns
tree = dataclasses.I3MCTree()
tree.add_primary(p)
frame["I3MCTree_preMuonProp"] = tree
tray.Add(Generator,
FromEnergy=params['fromenergy'],
ToEnergy=params['toenergy'],
Streams=[icetray.I3Frame.DAQ])
def main():
"""
Injects a muon at the center of the detector.
"""
# Get Params
parser = argparse.ArgumentParser(description="StartingMuon script")
add_args(parser)
params = vars(parser.parse_args()) # dict()
# Execute Tray
summary = RunI3Tray(params, configure_tray, "StartingMuon",
outputfile=params['outputfile'],
executionmaxcount=params['nevents'])
if __name__ == "__main__":
main()
| 30.326316 | 79 | 0.633114 |
19a99687e41e4b176f628ad780d7a397e470913c | 11,942 | py | Python | nova/compute/flavors.py | NeCTAR-RC/nova | 6176fe1ca5ab080cb90fa174e952949788853234 | [
"Apache-2.0"
] | 1 | 2015-02-26T03:23:49.000Z | 2015-02-26T03:23:49.000Z | nova/compute/flavors.py | NeCTAR-RC/nova | 6176fe1ca5ab080cb90fa174e952949788853234 | [
"Apache-2.0"
] | null | null | null | nova/compute/flavors.py | NeCTAR-RC/nova | 6176fe1ca5ab080cb90fa174e952949788853234 | [
"Apache-2.0"
] | 2 | 2015-06-17T13:24:55.000Z | 2015-10-27T05:28:38.000Z | # Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
# Copyright (c) 2010 Citrix Systems, Inc.
# Copyright 2011 Ken Pepple
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Built-in instance properties."""
import re
import uuid
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import strutils
import six
from nova.api.validation import parameter_types
from nova.cells import rpcapi as cells_rpcapi
from nova import context
from nova import db
from nova import exception
from nova.i18n import _
from nova.i18n import _LE
from nova import objects
from nova import utils
flavor_opts = [
cfg.StrOpt('default_flavor',
default='m1.small',
help='Default flavor to use for the EC2 API only. The Nova API '
'does not support a default flavor.'),
]
CONF = cfg.CONF
CONF.register_opts(flavor_opts)
LOG = logging.getLogger(__name__)
# NOTE(luisg): Flavor names can include non-ascii characters so that users can
# create flavor names in locales that use them, however flavor IDs are limited
# to ascii characters.
VALID_ID_REGEX = re.compile("^[\w\.\- ]*$")
# NOTE(dosaboy): This is supposed to represent the maximum value that we can
# place into a SQL single precision float so that we can check whether values
# are oversize. Postgres and MySQL both define this as their max whereas Sqlite
# uses dynamic typing so this would not apply. Different dbs react in different
# ways to oversize values e.g. postgres will raise an exception while mysql
# will round off the value. Nevertheless we may still want to know prior to
# insert whether the value is oversize.
SQL_SP_FLOAT_MAX = 3.40282e+38
# Validate extra specs key names.
VALID_EXTRASPEC_NAME_REGEX = re.compile(r"[\w\.\- :]+$", re.UNICODE)
def _int_or_none(val):
if val is not None:
return int(val)
system_metadata_flavor_props = {
'id': int,
'name': str,
'memory_mb': int,
'vcpus': int,
'root_gb': int,
'ephemeral_gb': int,
'flavorid': str,
'swap': int,
'rxtx_factor': float,
'vcpu_weight': _int_or_none,
}
system_metadata_flavor_extra_props = [
'hw:numa_cpus.', 'hw:numa_mem.',
]
def create(name, memory, vcpus, root_gb, ephemeral_gb=0, flavorid=None,
swap=0, rxtx_factor=1.0, is_public=True):
"""Creates flavors."""
cell = None
if CONF.cells.enable and '@' in name:
cell, name = name.split('@')
if not flavorid:
flavorid = uuid.uuid4()
kwargs = {
'memory_mb': memory,
'vcpus': vcpus,
'root_gb': root_gb,
'ephemeral_gb': ephemeral_gb,
'swap': swap,
'rxtx_factor': rxtx_factor,
}
if isinstance(name, six.string_types):
name = name.strip()
# ensure name do not exceed 255 characters
utils.check_string_length(name, 'name', min_length=1, max_length=255)
# ensure name does not contain any special characters
valid_name = parameter_types.valid_name_regex_obj.search(name)
if not valid_name:
msg = _("Flavor names can only contain printable characters "
"and horizontal spaces.")
raise exception.InvalidInput(reason=msg)
# NOTE(vish): Internally, flavorid is stored as a string but it comes
# in through json as an integer, so we convert it here.
flavorid = six.text_type(flavorid)
# ensure leading/trailing whitespaces not present.
if flavorid.strip() != flavorid:
msg = _("id cannot contain leading and/or trailing whitespace(s)")
raise exception.InvalidInput(reason=msg)
# ensure flavor id does not exceed 255 characters
utils.check_string_length(flavorid, 'id', min_length=1,
max_length=255)
# ensure flavor id does not contain any special characters
valid_flavor_id = VALID_ID_REGEX.search(flavorid)
if not valid_flavor_id:
msg = _("Flavor id can only contain letters from A-Z (both cases), "
"periods, dashes, underscores and spaces.")
raise exception.InvalidInput(reason=msg)
# NOTE(wangbo): validate attributes of the creating flavor.
# ram and vcpus should be positive ( > 0) integers.
# disk, ephemeral and swap should be non-negative ( >= 0) integers.
flavor_attributes = {
'memory_mb': ('ram', 1),
'vcpus': ('vcpus', 1),
'root_gb': ('disk', 0),
'ephemeral_gb': ('ephemeral', 0),
'swap': ('swap', 0)
}
for key, value in flavor_attributes.items():
kwargs[key] = utils.validate_integer(kwargs[key], value[0], value[1],
db.MAX_INT)
# rxtx_factor should be a positive float
try:
kwargs['rxtx_factor'] = float(kwargs['rxtx_factor'])
if (kwargs['rxtx_factor'] <= 0 or
kwargs['rxtx_factor'] > SQL_SP_FLOAT_MAX):
raise ValueError()
except ValueError:
msg = (_("'rxtx_factor' argument must be a float between 0 and %g") %
SQL_SP_FLOAT_MAX)
raise exception.InvalidInput(reason=msg)
kwargs['name'] = name
kwargs['flavorid'] = flavorid
# ensure is_public attribute is boolean
try:
kwargs['is_public'] = strutils.bool_from_string(
is_public, strict=True)
except ValueError:
raise exception.InvalidInput(reason=_("is_public must be a boolean"))
if cell:
flavor = cells_rpcapi.CellsAPI().flavor_create(
context.get_admin_context(), cell, kwargs)
else:
flavor = objects.Flavor(context=context.get_admin_context(), **kwargs)
flavor.create()
return flavor
def destroy(name):
"""Marks flavor as deleted."""
try:
if not name:
raise ValueError()
flavor = objects.Flavor(context=context.get_admin_context(), name=name)
flavor.destroy()
except (ValueError, exception.NotFound):
LOG.exception(_LE('Instance type %s not found for deletion'), name)
raise exception.FlavorNotFoundByName(flavor_name=name)
def get_all_flavors_sorted_list(ctxt=None, filters=None, sort_key='flavorid',
sort_dir='asc', limit=None, marker=None):
"""Get all non-deleted flavors as a sorted list.
"""
if ctxt is None:
ctxt = context.get_admin_context()
return objects.FlavorList.get_all(ctxt, filters=filters, sort_key=sort_key,
sort_dir=sort_dir, limit=limit,
marker=marker)
def get_default_flavor():
"""Get the default flavor."""
name = CONF.default_flavor
return get_flavor_by_name(name)
def get_flavor_by_name(name, ctxt=None):
"""Retrieves single flavor by name."""
if name is None:
return get_default_flavor()
if ctxt is None:
ctxt = context.get_admin_context()
return objects.Flavor.get_by_name(ctxt, name)
# TODO(termie): flavor-specific code should probably be in the API that uses
# flavors.
def get_flavor_by_flavor_id(flavorid, ctxt=None, read_deleted="yes"):
"""Retrieve flavor by flavorid.
:raises: FlavorNotFound
"""
if ctxt is None:
ctxt = context.get_admin_context(read_deleted=read_deleted)
return objects.Flavor.get_by_flavor_id(ctxt, flavorid, read_deleted)
def get_flavor_access_by_flavor_id(flavorid, ctxt=None):
"""Retrieve flavor access list by flavor id."""
if ctxt is None:
ctxt = context.get_admin_context()
flavor = objects.Flavor.get_by_flavor_id(ctxt, flavorid)
return flavor.projects
# NOTE(danms): This method is deprecated, do not use it!
# Use instance.{old_,new_,}flavor instead, as instances no longer
# have flavor information in system_metadata.
def extract_flavor(instance, prefix=''):
"""Create a Flavor object from instance's system_metadata
information.
"""
flavor = objects.Flavor()
sys_meta = utils.instance_sys_meta(instance)
if not sys_meta:
return None
for key in system_metadata_flavor_props.keys():
type_key = '%sinstance_type_%s' % (prefix, key)
setattr(flavor, key, sys_meta[type_key])
# NOTE(danms): We do NOT save all of extra_specs, but only the
# NUMA-related ones that we need to avoid an uglier alternative. This
# should be replaced by a general split-out of flavor information from
# system_metadata very soon.
extra_specs = [(k, v) for k, v in sys_meta.items()
if k.startswith('%sinstance_type_extra_' % prefix)]
if extra_specs:
flavor.extra_specs = {}
for key, value in extra_specs:
extra_key = key[len('%sinstance_type_extra_' % prefix):]
flavor.extra_specs[extra_key] = value
return flavor
# NOTE(danms): This method is deprecated, do not use it!
# Use instance.{old_,new_,}flavor instead, as instances no longer
# have flavor information in system_metadata.
def save_flavor_info(metadata, instance_type, prefix=''):
"""Save properties from instance_type into instance's system_metadata,
in the format of:
[prefix]instance_type_[key]
This can be used to update system_metadata in place from a type, as well
as stash information about another instance_type for later use (such as
during resize).
"""
for key in system_metadata_flavor_props.keys():
to_key = '%sinstance_type_%s' % (prefix, key)
metadata[to_key] = instance_type[key]
# NOTE(danms): We do NOT save all of extra_specs here, but only the
# NUMA-related ones that we need to avoid an uglier alternative. This
# should be replaced by a general split-out of flavor information from
# system_metadata very soon.
extra_specs = instance_type.get('extra_specs', {})
for extra_prefix in system_metadata_flavor_extra_props:
for key in extra_specs:
if key.startswith(extra_prefix):
to_key = '%sinstance_type_extra_%s' % (prefix, key)
metadata[to_key] = extra_specs[key]
return metadata
# NOTE(danms): This method is deprecated, do not use it!
# Instances no longer store flavor information in system_metadata
def delete_flavor_info(metadata, *prefixes):
"""Delete flavor instance_type information from instance's system_metadata
by prefix.
"""
for key in system_metadata_flavor_props.keys():
for prefix in prefixes:
to_key = '%sinstance_type_%s' % (prefix, key)
del metadata[to_key]
# NOTE(danms): We do NOT save all of extra_specs, but only the
# NUMA-related ones that we need to avoid an uglier alternative. This
# should be replaced by a general split-out of flavor information from
# system_metadata very soon.
for key in list(metadata.keys()):
for prefix in prefixes:
if key.startswith('%sinstance_type_extra_' % prefix):
del metadata[key]
return metadata
def validate_extra_spec_keys(key_names_list):
for key_name in key_names_list:
if not VALID_EXTRASPEC_NAME_REGEX.match(key_name):
expl = _('Key Names can only contain alphanumeric characters, '
'periods, dashes, underscores, colons and spaces.')
raise exception.InvalidInput(message=expl)
| 34.614493 | 79 | 0.672668 |
87963fbf362d08c769f5a1428eb14b20cabd7944 | 2,345 | py | Python | mandelbrot/mandelbrot.py | gabrielthecruz/mandelbrot-set | 7109d858c9ffc4e0fd6ac1a7fbf5f6b95ea30fe7 | [
"MIT"
] | null | null | null | mandelbrot/mandelbrot.py | gabrielthecruz/mandelbrot-set | 7109d858c9ffc4e0fd6ac1a7fbf5f6b95ea30fe7 | [
"MIT"
] | null | null | null | mandelbrot/mandelbrot.py | gabrielthecruz/mandelbrot-set | 7109d858c9ffc4e0fd6ac1a7fbf5f6b95ea30fe7 | [
"MIT"
] | null | null | null | from itertools import product
from PIL import Image
import numpy as np
import math
class Point:
def __init__(self, x, y):
self.x = x
self.y = y
def __iter__(self):
return iter([self.x, self.y])
class Pixel(Point):
pass
class Coordinate(Point):
pass
def coordinates(min_x, max_x, min_y, max_y, width, height):
'''Yields a pixel and a coordinate.'''
x_axis = np.linspace(min_x, max_x, num=width)
y_axis = np.linspace(min_y, max_y, num=height)
for (px, x), (py, y) in product(enumerate(x_axis), enumerate(y_axis)):
yield Pixel(px, py), Coordinate(x, y)
def bulb_checking(coord):
'''Performs the bulb checking for optimization.'''
x, y = coord
sqr_y = y ** 2
q = (x - 0.25) ** 2 + sqr_y
return q * (q + x - 0.25) <= 0.25 * sqr_y
def escape_time(coord, max_iter):
'''
Executes escape time algorithm on coord.
Returns the last iteration number, x and y values calculated.
'''
sqr_x, sqr_y, sqr_z = 0, 0, 0
for iteration in range(1, max_iter + 1):
x = sqr_x - sqr_y + coord.x
y = sqr_z - sqr_x - sqr_y + coord.y
sqr_x = x ** 2
sqr_y = y ** 2
sqr_z = (x + y) ** 2
if sqr_x + sqr_y > 4:
break
return iteration, x, y
def smooth_coloring(iter_n, x, y, max_iters):
'''Generates a color.'''
if iter_n < max_iters:
log_z = math.log(x ** 2 + y ** 2) / 2
log_2 = math.log(2)
iter_n += 1 - math.log(log_z / log_2) / log_2
hue = int(255 * iter_n / max_iters)
saturation = 255
value = 255 if iter_n < max_iters else 0
return (hue, saturation, value)
def mandelbrot(min_x, max_x, min_y, max_y, max_iters, image):
'''Draws the Mandelbrot Set on image.'''
width, height = image.size
for pixel, coord in coordinates(min_x, max_x, min_y, max_y, width, height):
if bulb_checking(coord):
color = (0, 0, 0)
else:
iteration, x, y = escape_time(coord, max_iters)
color = smooth_coloring(iteration, x, y, max_iters)
image.putpixel(tuple(pixel), color)
return image
if __name__ == '__main__':
image = Image.new('HSV', (800, 600))
image = mandelbrot(-2.2, 0.8, -1.2, 1.2, 100, image)
image.convert('RGB').save('../mandelbrot.png', 'PNG')
| 23.928571 | 79 | 0.588486 |
5705da28a7d25a72b44cdb1610c72f387feb453d | 13,164 | py | Python | tests/test_metrics/test_losses.py | Naoki-Wake/mmaction2 | a2032605db82509744a18d993c94a06feb1efd15 | [
"Apache-2.0"
] | 648 | 2021-06-24T19:33:09.000Z | 2022-03-31T06:27:24.000Z | tests/test_metrics/test_losses.py | jayleicn/mmaction2-1 | 0a6fde1abb8403f1f68b568f5b4694c6f828e27e | [
"Apache-2.0"
] | 53 | 2021-07-01T03:07:52.000Z | 2022-03-27T16:15:29.000Z | tests/test_metrics/test_losses.py | jayleicn/mmaction2-1 | 0a6fde1abb8403f1f68b568f5b4694c6f828e27e | [
"Apache-2.0"
] | 117 | 2021-06-25T01:22:32.000Z | 2022-03-31T08:33:55.000Z | import numpy as np
import pytest
import torch
import torch.nn as nn
import torch.nn.functional as F
from mmcv import ConfigDict
from numpy.testing import assert_almost_equal, assert_array_almost_equal
from torch.autograd import Variable
from mmaction.models import (BCELossWithLogits, BinaryLogisticRegressionLoss,
BMNLoss, CrossEntropyLoss, HVULoss, NLLLoss,
OHEMHingeLoss, SSNLoss)
def test_hvu_loss():
pred = torch.tensor([[-1.0525, -0.7085, 0.1819, -0.8011],
[0.1555, -1.5550, 0.5586, 1.9746]])
gt = torch.tensor([[1., 0., 0., 0.], [0., 0., 1., 1.]])
mask = torch.tensor([[1., 1., 0., 0.], [0., 0., 1., 1.]])
category_mask = torch.tensor([[1., 0.], [0., 1.]])
categories = ['action', 'scene']
category_nums = (2, 2)
category_loss_weights = (1, 1)
loss_all_nomask_sum = HVULoss(
categories=categories,
category_nums=category_nums,
category_loss_weights=category_loss_weights,
loss_type='all',
with_mask=False,
reduction='sum')
loss = loss_all_nomask_sum(pred, gt, mask, category_mask)
loss1 = F.binary_cross_entropy_with_logits(pred, gt, reduction='none')
loss1 = torch.sum(loss1, dim=1)
assert torch.eq(loss['loss_cls'], torch.mean(loss1))
loss_all_mask = HVULoss(
categories=categories,
category_nums=category_nums,
category_loss_weights=category_loss_weights,
loss_type='all',
with_mask=True)
loss = loss_all_mask(pred, gt, mask, category_mask)
loss1 = F.binary_cross_entropy_with_logits(pred, gt, reduction='none')
loss1 = torch.sum(loss1 * mask, dim=1) / torch.sum(mask, dim=1)
loss1 = torch.mean(loss1)
assert torch.eq(loss['loss_cls'], loss1)
loss_ind_mask = HVULoss(
categories=categories,
category_nums=category_nums,
category_loss_weights=category_loss_weights,
loss_type='individual',
with_mask=True)
loss = loss_ind_mask(pred, gt, mask, category_mask)
action_loss = F.binary_cross_entropy_with_logits(pred[:1, :2], gt[:1, :2])
scene_loss = F.binary_cross_entropy_with_logits(pred[1:, 2:], gt[1:, 2:])
loss1 = (action_loss + scene_loss) / 2
assert torch.eq(loss['loss_cls'], loss1)
loss_ind_nomask_sum = HVULoss(
categories=categories,
category_nums=category_nums,
category_loss_weights=category_loss_weights,
loss_type='individual',
with_mask=False,
reduction='sum')
loss = loss_ind_nomask_sum(pred, gt, mask, category_mask)
action_loss = F.binary_cross_entropy_with_logits(
pred[:, :2], gt[:, :2], reduction='none')
action_loss = torch.sum(action_loss, dim=1)
action_loss = torch.mean(action_loss)
scene_loss = F.binary_cross_entropy_with_logits(
pred[:, 2:], gt[:, 2:], reduction='none')
scene_loss = torch.sum(scene_loss, dim=1)
scene_loss = torch.mean(scene_loss)
loss1 = (action_loss + scene_loss) / 2
assert torch.eq(loss['loss_cls'], loss1)
def test_cross_entropy_loss():
cls_scores = torch.rand((3, 4))
hard_gt_labels = torch.LongTensor([0, 1, 2]).squeeze()
soft_gt_labels = torch.FloatTensor([[1, 0, 0, 0], [0, 1, 0, 0],
[0, 0, 1, 0]]).squeeze()
# hard label without weight
cross_entropy_loss = CrossEntropyLoss()
output_loss = cross_entropy_loss(cls_scores, hard_gt_labels)
assert torch.equal(output_loss, F.cross_entropy(cls_scores,
hard_gt_labels))
# hard label with class weight
weight = torch.rand(4)
class_weight = weight.numpy().tolist()
cross_entropy_loss = CrossEntropyLoss(class_weight=class_weight)
output_loss = cross_entropy_loss(cls_scores, hard_gt_labels)
assert torch.equal(
output_loss,
F.cross_entropy(cls_scores, hard_gt_labels, weight=weight))
# soft label without class weight
cross_entropy_loss = CrossEntropyLoss()
output_loss = cross_entropy_loss(cls_scores, soft_gt_labels)
assert_almost_equal(
output_loss.numpy(),
F.cross_entropy(cls_scores, hard_gt_labels).numpy(),
decimal=4)
# soft label with class weight
cross_entropy_loss = CrossEntropyLoss(class_weight=class_weight)
output_loss = cross_entropy_loss(cls_scores, soft_gt_labels)
assert_almost_equal(
output_loss.numpy(),
F.cross_entropy(cls_scores, hard_gt_labels, weight=weight).numpy(),
decimal=4)
def test_bce_loss_with_logits():
cls_scores = torch.rand((3, 4))
gt_labels = torch.rand((3, 4))
bce_loss_with_logits = BCELossWithLogits()
output_loss = bce_loss_with_logits(cls_scores, gt_labels)
assert torch.equal(
output_loss, F.binary_cross_entropy_with_logits(cls_scores, gt_labels))
weight = torch.rand(4)
class_weight = weight.numpy().tolist()
bce_loss_with_logits = BCELossWithLogits(class_weight=class_weight)
output_loss = bce_loss_with_logits(cls_scores, gt_labels)
assert torch.equal(
output_loss,
F.binary_cross_entropy_with_logits(
cls_scores, gt_labels, weight=weight))
def test_nll_loss():
cls_scores = torch.randn(3, 3)
gt_labels = torch.tensor([0, 2, 1]).squeeze()
sm = nn.Softmax(dim=0)
nll_loss = NLLLoss()
cls_score_log = torch.log(sm(cls_scores))
output_loss = nll_loss(cls_score_log, gt_labels)
assert torch.equal(output_loss, F.nll_loss(cls_score_log, gt_labels))
def test_binary_logistic_loss():
binary_logistic_regression_loss = BinaryLogisticRegressionLoss()
reg_score = torch.tensor([0., 1.])
label = torch.tensor([0., 1.])
output_loss = binary_logistic_regression_loss(reg_score, label, 0.5)
assert_array_almost_equal(output_loss.numpy(), np.array([0.]), decimal=4)
reg_score = torch.tensor([0.3, 0.9])
label = torch.tensor([0., 1.])
output_loss = binary_logistic_regression_loss(reg_score, label, 0.5)
assert_array_almost_equal(
output_loss.numpy(), np.array([0.231]), decimal=4)
def test_bmn_loss():
bmn_loss = BMNLoss()
# test tem_loss
pred_start = torch.tensor([0.9, 0.1])
pred_end = torch.tensor([0.1, 0.9])
gt_start = torch.tensor([1., 0.])
gt_end = torch.tensor([0., 1.])
output_tem_loss = bmn_loss.tem_loss(pred_start, pred_end, gt_start, gt_end)
binary_logistic_regression_loss = BinaryLogisticRegressionLoss()
assert_loss = (
binary_logistic_regression_loss(pred_start, gt_start) +
binary_logistic_regression_loss(pred_end, gt_end))
assert_array_almost_equal(
output_tem_loss.numpy(), assert_loss.numpy(), decimal=4)
# test pem_reg_loss
seed = 1
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
pred_bm_reg = torch.tensor([[0.1, 0.99], [0.5, 0.4]])
gt_iou_map = torch.tensor([[0, 1.], [0, 1.]])
mask = torch.tensor([[0.1, 0.4], [0.4, 0.1]])
output_pem_reg_loss = bmn_loss.pem_reg_loss(pred_bm_reg, gt_iou_map, mask)
assert_array_almost_equal(
output_pem_reg_loss.numpy(), np.array([0.2140]), decimal=4)
# test pem_cls_loss
pred_bm_cls = torch.tensor([[0.1, 0.99], [0.95, 0.2]])
gt_iou_map = torch.tensor([[0., 1.], [0., 1.]])
mask = torch.tensor([[0.1, 0.4], [0.4, 0.1]])
output_pem_cls_loss = bmn_loss.pem_cls_loss(pred_bm_cls, gt_iou_map, mask)
assert_array_almost_equal(
output_pem_cls_loss.numpy(), np.array([1.6137]), decimal=4)
# test bmn_loss
pred_bm = torch.tensor([[[[0.1, 0.99], [0.5, 0.4]],
[[0.1, 0.99], [0.95, 0.2]]]])
pred_start = torch.tensor([[0.9, 0.1]])
pred_end = torch.tensor([[0.1, 0.9]])
gt_iou_map = torch.tensor([[[0., 2.5], [0., 10.]]])
gt_start = torch.tensor([[1., 0.]])
gt_end = torch.tensor([[0., 1.]])
mask = torch.tensor([[0.1, 0.4], [0.4, 0.1]])
output_loss = bmn_loss(pred_bm, pred_start, pred_end, gt_iou_map, gt_start,
gt_end, mask)
assert_array_almost_equal(
output_loss[0].numpy(),
output_tem_loss + 10 * output_pem_reg_loss + output_pem_cls_loss)
assert_array_almost_equal(output_loss[1].numpy(), output_tem_loss)
assert_array_almost_equal(output_loss[2].numpy(), output_pem_reg_loss)
assert_array_almost_equal(output_loss[3].numpy(), output_pem_cls_loss)
def test_ohem_hinge_loss():
# test normal case
pred = torch.tensor([[
0.5161, 0.5228, 0.7748, 0.0573, 0.1113, 0.8862, 0.1752, 0.9448, 0.0253,
0.1009, 0.4371, 0.2232, 0.0412, 0.3487, 0.3350, 0.9294, 0.7122, 0.3072,
0.2942, 0.7679
]],
requires_grad=True)
gt = torch.tensor([8])
num_video = 1
loss = OHEMHingeLoss.apply(pred, gt, 1, 1.0, num_video)
assert_array_almost_equal(
loss.detach().numpy(), np.array([0.0552]), decimal=4)
loss.backward(Variable(torch.ones([1])))
assert_array_almost_equal(
np.array(pred.grad),
np.array([[
0., 0., 0., 0., 0., 0., 0., -1., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0.
]]),
decimal=4)
# test error case
with pytest.raises(ValueError):
gt = torch.tensor([8, 10])
loss = OHEMHingeLoss.apply(pred, gt, 1, 1.0, num_video)
def test_ssn_loss():
ssn_loss = SSNLoss()
# test activity_loss
activity_score = torch.rand((8, 21))
labels = torch.LongTensor([8] * 8).squeeze()
activity_indexer = torch.tensor([0, 7])
output_activity_loss = ssn_loss.activity_loss(activity_score, labels,
activity_indexer)
assert torch.equal(
output_activity_loss,
F.cross_entropy(activity_score[activity_indexer, :],
labels[activity_indexer]))
# test completeness_loss
completeness_score = torch.rand((8, 20), requires_grad=True)
labels = torch.LongTensor([8] * 8).squeeze()
completeness_indexer = torch.tensor([0, 1, 2, 3, 4, 5, 6])
positive_per_video = 1
incomplete_per_video = 6
output_completeness_loss = ssn_loss.completeness_loss(
completeness_score, labels, completeness_indexer, positive_per_video,
incomplete_per_video)
pred = completeness_score[completeness_indexer, :]
gt = labels[completeness_indexer]
pred_dim = pred.size(1)
pred = pred.view(-1, positive_per_video + incomplete_per_video, pred_dim)
gt = gt.view(-1, positive_per_video + incomplete_per_video)
# yapf:disable
positive_pred = pred[:, :positive_per_video, :].contiguous().view(-1, pred_dim) # noqa:E501
incomplete_pred = pred[:, positive_per_video:, :].contiguous().view(-1, pred_dim) # noqa:E501
# yapf:enable
ohem_ratio = 0.17
positive_loss = OHEMHingeLoss.apply(
positive_pred, gt[:, :positive_per_video].contiguous().view(-1), 1,
1.0, positive_per_video)
incomplete_loss = OHEMHingeLoss.apply(
incomplete_pred, gt[:, positive_per_video:].contiguous().view(-1), -1,
ohem_ratio, incomplete_per_video)
num_positives = positive_pred.size(0)
num_incompletes = int(incomplete_pred.size(0) * ohem_ratio)
assert_loss = ((positive_loss + incomplete_loss) /
float(num_positives + num_incompletes))
assert torch.equal(output_completeness_loss, assert_loss)
# test reg_loss
bbox_pred = torch.rand((8, 20, 2))
labels = torch.LongTensor([8] * 8).squeeze()
bbox_targets = torch.rand((8, 2))
regression_indexer = torch.tensor([0])
output_reg_loss = ssn_loss.classwise_regression_loss(
bbox_pred, labels, bbox_targets, regression_indexer)
pred = bbox_pred[regression_indexer, :, :]
gt = labels[regression_indexer]
reg_target = bbox_targets[regression_indexer, :]
class_idx = gt.data - 1
classwise_pred = pred[:, class_idx, :]
classwise_reg_pred = torch.cat((torch.diag(classwise_pred[:, :, 0]).view(
-1, 1), torch.diag(classwise_pred[:, :, 1]).view(-1, 1)),
dim=1)
assert torch.equal(
output_reg_loss,
F.smooth_l1_loss(classwise_reg_pred.view(-1), reg_target.view(-1)) * 2)
# test ssn_loss
proposal_type = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 2]])
train_cfg = ConfigDict(
dict(
ssn=dict(
sampler=dict(
num_per_video=8,
positive_ratio=1,
background_ratio=1,
incomplete_ratio=6,
add_gt_as_proposals=True),
loss_weight=dict(comp_loss_weight=0.1, reg_loss_weight=0.1))))
output_loss = ssn_loss(activity_score, completeness_score, bbox_pred,
proposal_type, labels, bbox_targets, train_cfg)
assert torch.equal(output_loss['loss_activity'], output_activity_loss)
assert torch.equal(output_loss['loss_completeness'],
output_completeness_loss * 0.1)
assert torch.equal(output_loss['loss_reg'], output_reg_loss * 0.1)
| 39.650602 | 98 | 0.649802 |
1773603e790d073c1934cfbecf8a8afc7a7e0c6f | 11,858 | py | Python | S4/S4 Library/simulation/open_street_director/open_street_director.py | NeonOcean/Environment | ca658cf66e8fd6866c22a4a0136d415705b36d26 | [
"CC-BY-4.0"
] | 1 | 2021-05-20T19:33:37.000Z | 2021-05-20T19:33:37.000Z | S4/S4 Library/simulation/open_street_director/open_street_director.py | NeonOcean/Environment | ca658cf66e8fd6866c22a4a0136d415705b36d26 | [
"CC-BY-4.0"
] | null | null | null | S4/S4 Library/simulation/open_street_director/open_street_director.py | NeonOcean/Environment | ca658cf66e8fd6866c22a4a0136d415705b36d26 | [
"CC-BY-4.0"
] | null | null | null | from build_buy import load_conditional_objects
from conditional_layers.conditional_layer_service import ConditionalLayerRequestSpeedType
from date_and_time import TimeSpan, create_time_span
from default_property_stream_reader import DefaultPropertyStreamReader
from objects.client_object_mixin import ClientObjectMixin
from sims4.localization import TunableLocalizedString
from sims4.tuning.instances import HashedTunedInstanceMetaclass
from sims4.tuning.tunable import HasTunableReference, TunableRange, TunableSimMinute, TunableMapping, TunableVariant, TunableEnumEntry, Tunable, OptionalTunable, TunableReference
from sims4.utils import classproperty
from situations.service_npcs.modify_lot_items_tuning import ModifyAllLotItems
from venues.npc_summoning import ResidentialLotArrivalBehavior, CreateAndAddToSituation, AddToBackgroundSituation
import alarms
import enum
import objects
import services
import sims4.resources
import venues.venue_constants
import world.street
logger = sims4.log.Logger('OpenStreetDirector', default_owner='jjacobson')
class OpenStreetDirectorPriority(enum.Int, export=False):
DEFAULT = ...
CART = ...
FESTIVAL = ...
class OpenStreetDirectorBase(HasTunableReference, metaclass=HashedTunedInstanceMetaclass, manager=services.get_instance_manager(sims4.resources.Types.OPEN_STREET_DIRECTOR)):
INSTANCE_SUBCLASSES_ONLY = True
LAYER_OBJECTS_TO_LOAD = TunableRange(description='\n The number of objects to load at a time when loading a layer.\n Please consult a GPE before changing this value as it will impact\n performance.\n ', tunable_type=int, default=1, minimum=1)
LAYER_OBJECTS_TO_DESTROY = TunableRange(description='\n The number of objects to destroy at a time when destroying a layer.\n Please consult a GPE before changing this value as it will impact\n performance.\n ', tunable_type=int, default=1, minimum=1)
LAYER_OBJECTS_ALARM_TIME = TunableSimMinute(description='\n The frequency that we will create or destroy objects in the festival.\n Please consult a GPE before changing this value as it will impact\n performance.\n ', default=5, minimum=1)
INSTANCE_TUNABLES = {'lot_cleanup': ModifyAllLotItems.TunableFactory(description='\n A list of actions taken on objects on the lot when the open street\n director is being shutdown or cleaned up. Objects on the lot are\n left untouched.\n '), 'startup_actions': ModifyAllLotItems.TunableFactory(description='\n A list of actions that are taken on objects on the open street\n when the open street director is being started up. Objects on\n the lot are left untouched.\n '), 'npc_summoning_behavior': TunableMapping(description='\n Whenever an NPC is summoned to a lot by the player, determine\n which action to take based on the summoning purpose. The purpose\n is a dynamic enum: venues.venue_constants.NPCSummoningPurpose.\n \n The action will generally involve either adding a sim to an existing\n situation or creating a situation then adding them to it.\n \n \\depot\\Sims4Projects\\Docs\\Design\\Open Streets\\Open Street Invite Matrix.xlsx\n \n residential: This is behavior pushed on the NPC if the venue is a residential lot.\n create_situation: Place the NPC in the specified situation/job pair.\n add_to_background_situation: Add the NPC the currently running background \n situation on a venue.\n ', key_type=TunableEnumEntry(venues.venue_constants.NPCSummoningPurpose, venues.venue_constants.NPCSummoningPurpose.DEFAULT), value_type=TunableVariant(locked_args={'disabled': None}, residential=ResidentialLotArrivalBehavior.TunableFactory(), create_situation=CreateAndAddToSituation.TunableFactory(), add_to_background_situation=AddToBackgroundSituation.TunableFactory(), default='disabled')), 'allow_loading_after_time_passes_elsewhere': Tunable(description='\n When Checked this will allow an open street director to be loaded\n even if time has passed on another neighborhood with a different,\n or no, open street director.\n \n When Unchecked, if any time passes in another neighborhood then the\n save data will not be loaded.\n ', tunable_type=bool, default=False), 'whim_set': OptionalTunable(description='\n If enabled then this open street director will offer a whim set to\n the Sim when it is running.\n ', tunable=TunableReference(description='\n A whim set that is active when this open street director is\n running.\n ', manager=services.get_instance_manager(sims4.resources.Types.ASPIRATION), class_restrictions=('ObjectivelessWhimSet',)))}
@classproperty
def priority(cls):
raise NotImplementedError
def __init__(self):
self.request = None
self._cleanup_actions = []
self.was_loaded = False
self._loaded_layers = []
self._being_cleaned_up = False
self._ready_for_destruction = False
self._prerolling = False
self.did_preroll = False
@property
def ready_for_destruction(self):
return self._ready_for_destruction
def on_startup(self):
startup_actions = self.startup_actions()
def object_criteria(obj):
if obj.is_on_active_lot():
return False
return True
startup_actions.modify_objects(object_criteria=object_criteria)
def on_shutdown(self):
pass
def _clean_up(self):
pass
def clean_up(self):
if self._ready_for_destruction:
self.request.on_open_director_shutdown()
return
self._being_cleaned_up = True
self._clean_up()
def create_situations_during_zone_spin_up(self):
pass
def self_destruct(self):
if self._ready_for_destruction:
self.request.on_open_director_shutdown()
else:
self.request.request_destruction()
def _should_load_old_data(self, street_director_proto, reader):
if not ((not services.current_zone().time_has_passed_in_world_since_open_street_save() or self.allow_loading_after_time_passes_elsewhere) and street_director_proto.HasField('resource_key')):
return False
previous_resource_key = sims4.resources.get_key_from_protobuff(street_director_proto.resource_key)
return previous_resource_key == self.resource_key
def load(self, street_director_proto):
if street_director_proto.HasField('custom_data'):
reader = DefaultPropertyStreamReader(street_director_proto.custom_data)
else:
reader = None
if self._should_load_old_data(street_director_proto, reader):
self.was_loaded = True
loaded_layers = set()
conditional_layer_manager = services.get_instance_manager(sims4.resources.Types.CONDITIONAL_LAYER)
loaded_layers = set(conditional_layer_manager.get(conditional_layer_guid) for conditional_layer_guid in street_director_proto.loaded_layer_guids)
self._loaded_layers = list(loaded_layers)
self._load_custom_open_street_director(street_director_proto, reader)
else:
self.request.manager.cleanup_old_open_street_director()
def _load_custom_open_street_director(self, street_director_proto, reader):
pass
def save(self, street_director_proto):
street_director_proto.resource_key = sims4.resources.get_protobuff_for_key(self.resource_key)
street_director_proto.loaded_layer_guids.extend(loaded_layer.guid64 for loaded_layer in self._loaded_layers)
writer = sims4.PropertyStreamWriter()
self._save_custom_open_street_director(street_director_proto, writer)
data = writer.close()
if writer.count > 0:
street_director_proto.custom_data = data
def _save_custom_open_street_director(self, street_director_proto, writer):
pass
def has_conditional_layer(self, conditional_layer):
current_zone_id = services.current_zone_id()
street = world.street.get_street_instance_from_zone_id(current_zone_id)
if street is None:
return False
return street.has_conditional_layer(conditional_layer)
def load_layer_immediately(self, conditional_layer):
if conditional_layer not in self._loaded_layers:
self._loaded_layers.append(conditional_layer)
services.conditional_layer_service().load_conditional_layer(conditional_layer, callback=self.on_layer_loaded, speed=ConditionalLayerRequestSpeedType.IMMEDIATELY)
def load_layer_gradually(self, conditional_layer):
if conditional_layer not in self._loaded_layers:
self._loaded_layers.append(conditional_layer)
services.conditional_layer_service().load_conditional_layer(conditional_layer, callback=self.on_layer_loaded, speed=ConditionalLayerRequestSpeedType.GRADUALLY, timer_interval=OpenStreetDirectorBase.LAYER_OBJECTS_ALARM_TIME, timer_object_count=OpenStreetDirectorBase.LAYER_OBJECTS_TO_LOAD)
def on_layer_loaded(self, conditional_layer):
layer_objects = services.conditional_layer_service().get_layer_objects(conditional_layer)
for obj in layer_objects:
if obj.environmentscore_component is not None:
obj.remove_component(objects.components.types.ENVIRONMENT_SCORE_COMPONENT)
def remove_layer_objects(self, conditional_layer):
speed = ConditionalLayerRequestSpeedType.GRADUALLY if services.current_zone().is_zone_running else ConditionalLayerRequestSpeedType.IMMEDIATELY
services.conditional_layer_service().destroy_conditional_layer(conditional_layer, callback=self.on_layer_objects_destroyed, speed=speed, timer_interval=OpenStreetDirectorBase.LAYER_OBJECTS_ALARM_TIME, timer_object_count=OpenStreetDirectorBase.LAYER_OBJECTS_TO_DESTROY)
def on_layer_objects_destroyed(self, conditional_layer):
if conditional_layer in self._loaded_layers:
self._loaded_layers.remove(conditional_layer)
def get_all_layer_created_objects(self):
conditional_object_service = services.conditional_layer_service()
objects = []
for conditional_layer in self._loaded_layers:
objects.extend(conditional_object_service.get_layer_objects(conditional_layer))
return objects
@classmethod
def run_lot_cleanup(cls):
cleanup = cls.lot_cleanup()
def object_criteria(obj):
if obj.in_use:
return False
elif obj.is_on_active_lot():
return False
return True
cleanup.modify_objects(object_criteria=object_criteria)
def summon_npcs(self, npc_infos, purpose, host_sim_info=None):
summon_behavior = self.npc_summoning_behavior.get(purpose)
if summon_behavior is None:
summon_behavior = self.npc_summoning_behavior.get(venues.venue_constants.NPCSummoningPurpose.DEFAULT)
if summon_behavior is None:
return False
summon_behavior(npc_infos, host_sim_info=host_sim_info)
return True
def _preroll(self, preroll_time):
pass
def preroll(self, preroll_time=None):
if self.was_loaded:
return
self.did_preroll = True
self._prerolling = True
try:
self._preroll(preroll_time)
except Exception:
logger.exception('Exception hit while prerolling for {}:', self)
finally:
self._prerolling = False
| 61.440415 | 2,770 | 0.730477 |
2a305c52731534ebe77b071e5f2df4daf6fe87c3 | 1,078 | py | Python | hproxy/utils/log.py | howie6879/hproxy | bb862569ca3ffad4d01d08c0ba5b2d9b2521f5e7 | [
"MIT"
] | 61 | 2018-04-08T10:42:39.000Z | 2022-02-24T08:57:25.000Z | hproxy/utils/log.py | howie6879/hproxy | bb862569ca3ffad4d01d08c0ba5b2d9b2521f5e7 | [
"MIT"
] | 5 | 2018-04-13T01:00:26.000Z | 2021-12-13T19:41:59.000Z | hproxy/utils/log.py | howie6879/hproxy | bb862569ca3ffad4d01d08c0ba5b2d9b2521f5e7 | [
"MIT"
] | 18 | 2018-04-08T10:42:44.000Z | 2021-12-02T05:40:47.000Z | #!/usr/bin/env python
"""
Created by howie.hu at 06/04/2018.
"""
import logging
import colorama
from colorama import Fore, Style
colorama.init(autoreset=True)
class Logger:
"""
Token from https://github.com/gaojiuli/toapi/blob/master/toapi/log.py
"""
def __init__(self, name, level=logging.DEBUG):
logging.basicConfig(format='%(asctime)s %(message)-10s ',
datefmt='%Y/%m/%d %H:%M:%S')
self.logger = logging.getLogger(name)
self.logger.setLevel(level)
def info(self, type, message, color=Fore.CYAN):
self.logger.info(color + '[%-16s] %-2s %s' % (type, 'OK', message) + Style.RESET_ALL)
def error(self, type, message, color=Fore.RED):
self.logger.error(color + '[%-16s] %-4s %s' % (type, 'FAIL', message) + Style.RESET_ALL)
def exception(self, type, message, color=Fore.RED):
self.logger.error(color + '[%-16s] %-5s %s' % (type, 'ERROR', message) + Style.RESET_ALL)
logger = Logger('hproxy')
if __name__ == '__main__':
logger.info(type='222', message='222')
| 26.95 | 97 | 0.617811 |
facca6cca07110023e4394e5f5890dbcfd57738c | 18,987 | py | Python | venv/Lib/site-packages/pygments/lexers/erlang.py | star10919/drf | 77c005794087484d72ffc0d76612a6ac9845821e | [
"BSD-3-Clause"
] | 9 | 2019-05-29T23:50:28.000Z | 2021-01-29T20:51:05.000Z | venv/Lib/site-packages/pygments/lexers/erlang.py | star10919/drf | 77c005794087484d72ffc0d76612a6ac9845821e | [
"BSD-3-Clause"
] | 5 | 2021-02-27T21:31:47.000Z | 2021-04-05T21:49:38.000Z | marlinkpy/venv/Lib/site-packages/pygments/lexers/erlang.py | amilinovic/Test | 4cfdc12a0efeebd636c4982ef90dad65a63b842b | [
"MIT"
] | 3 | 2021-01-31T16:40:52.000Z | 2021-08-29T18:32:34.000Z | # -*- coding: utf-8 -*-
"""
pygments.lexers.erlang
~~~~~~~~~~~~~~~~~~~~~~
Lexers for Erlang.
:copyright: Copyright 2006-2021 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import Lexer, RegexLexer, bygroups, words, do_insertions, \
include, default
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Number, Punctuation, Generic
__all__ = ['ErlangLexer', 'ErlangShellLexer', 'ElixirConsoleLexer',
'ElixirLexer']
line_re = re.compile('.*?\n')
class ErlangLexer(RegexLexer):
"""
For the Erlang functional programming language.
Blame Jeremy Thurgood (http://jerith.za.net/).
.. versionadded:: 0.9
"""
name = 'Erlang'
aliases = ['erlang']
filenames = ['*.erl', '*.hrl', '*.es', '*.escript']
mimetypes = ['text/x-erlang']
keywords = (
'after', 'begin', 'case', 'catch', 'cond', 'end', 'fun', 'if',
'let', 'of', 'query', 'receive', 'try', 'when',
)
builtins = ( # See erlang(3) man page
'abs', 'append_element', 'apply', 'atom_to_list', 'binary_to_list',
'bitstring_to_list', 'binary_to_term', 'bit_size', 'bump_reductions',
'byte_size', 'cancel_timer', 'check_process_code', 'delete_module',
'demonitor', 'disconnect_node', 'display', 'element', 'erase', 'exit',
'float', 'float_to_list', 'fun_info', 'fun_to_list',
'function_exported', 'garbage_collect', 'get', 'get_keys',
'group_leader', 'hash', 'hd', 'integer_to_list', 'iolist_to_binary',
'iolist_size', 'is_atom', 'is_binary', 'is_bitstring', 'is_boolean',
'is_builtin', 'is_float', 'is_function', 'is_integer', 'is_list',
'is_number', 'is_pid', 'is_port', 'is_process_alive', 'is_record',
'is_reference', 'is_tuple', 'length', 'link', 'list_to_atom',
'list_to_binary', 'list_to_bitstring', 'list_to_existing_atom',
'list_to_float', 'list_to_integer', 'list_to_pid', 'list_to_tuple',
'load_module', 'localtime_to_universaltime', 'make_tuple', 'md5',
'md5_final', 'md5_update', 'memory', 'module_loaded', 'monitor',
'monitor_node', 'node', 'nodes', 'open_port', 'phash', 'phash2',
'pid_to_list', 'port_close', 'port_command', 'port_connect',
'port_control', 'port_call', 'port_info', 'port_to_list',
'process_display', 'process_flag', 'process_info', 'purge_module',
'put', 'read_timer', 'ref_to_list', 'register', 'resume_process',
'round', 'send', 'send_after', 'send_nosuspend', 'set_cookie',
'setelement', 'size', 'spawn', 'spawn_link', 'spawn_monitor',
'spawn_opt', 'split_binary', 'start_timer', 'statistics',
'suspend_process', 'system_flag', 'system_info', 'system_monitor',
'system_profile', 'term_to_binary', 'tl', 'trace', 'trace_delivered',
'trace_info', 'trace_pattern', 'trunc', 'tuple_size', 'tuple_to_list',
'universaltime_to_localtime', 'unlink', 'unregister', 'whereis'
)
operators = r'(\+\+?|--?|\*|/|<|>|/=|=:=|=/=|=<|>=|==?|<-|!|\?)'
word_operators = (
'and', 'andalso', 'band', 'bnot', 'bor', 'bsl', 'bsr', 'bxor',
'div', 'not', 'or', 'orelse', 'rem', 'xor'
)
atom_re = r"(?:[a-z]\w*|'[^\n']*[^\\]')"
variable_re = r'(?:[A-Z_]\w*)'
esc_char_re = r'[bdefnrstv\'"\\]'
esc_octal_re = r'[0-7][0-7]?[0-7]?'
esc_hex_re = r'(?:x[0-9a-fA-F]{2}|x\{[0-9a-fA-F]+\})'
esc_ctrl_re = r'\^[a-zA-Z]'
escape_re = r'(?:\\(?:'+esc_char_re+r'|'+esc_octal_re+r'|'+esc_hex_re+r'|'+esc_ctrl_re+r'))'
macro_re = r'(?:'+variable_re+r'|'+atom_re+r')'
base_re = r'(?:[2-9]|[12][0-9]|3[0-6])'
tokens = {
'root': [
(r'\s+', Text),
(r'%.*\n', Comment),
(words(keywords, suffix=r'\b'), Keyword),
(words(builtins, suffix=r'\b'), Name.Builtin),
(words(word_operators, suffix=r'\b'), Operator.Word),
(r'^-', Punctuation, 'directive'),
(operators, Operator),
(r'"', String, 'string'),
(r'<<', Name.Label),
(r'>>', Name.Label),
('(' + atom_re + ')(:)', bygroups(Name.Namespace, Punctuation)),
('(?:^|(?<=:))(' + atom_re + r')(\s*)(\()',
bygroups(Name.Function, Text, Punctuation)),
(r'[+-]?' + base_re + r'#[0-9a-zA-Z]+', Number.Integer),
(r'[+-]?\d+', Number.Integer),
(r'[+-]?\d+.\d+', Number.Float),
(r'[]\[:_@\".{}()|;,]', Punctuation),
(variable_re, Name.Variable),
(atom_re, Name),
(r'\?'+macro_re, Name.Constant),
(r'\$(?:'+escape_re+r'|\\[ %]|[^\\])', String.Char),
(r'#'+atom_re+r'(:?\.'+atom_re+r')?', Name.Label),
# Erlang script shebang
(r'\A#!.+\n', Comment.Hashbang),
# EEP 43: Maps
# http://www.erlang.org/eeps/eep-0043.html
(r'#\{', Punctuation, 'map_key'),
],
'string': [
(escape_re, String.Escape),
(r'"', String, '#pop'),
(r'~[0-9.*]*[~#+BPWXb-ginpswx]', String.Interpol),
(r'[^"\\~]+', String),
(r'~', String),
],
'directive': [
(r'(define)(\s*)(\()('+macro_re+r')',
bygroups(Name.Entity, Text, Punctuation, Name.Constant), '#pop'),
(r'(record)(\s*)(\()('+macro_re+r')',
bygroups(Name.Entity, Text, Punctuation, Name.Label), '#pop'),
(atom_re, Name.Entity, '#pop'),
],
'map_key': [
include('root'),
(r'=>', Punctuation, 'map_val'),
(r':=', Punctuation, 'map_val'),
(r'\}', Punctuation, '#pop'),
],
'map_val': [
include('root'),
(r',', Punctuation, '#pop'),
(r'(?=\})', Punctuation, '#pop'),
],
}
class ErlangShellLexer(Lexer):
"""
Shell sessions in erl (for Erlang code).
.. versionadded:: 1.1
"""
name = 'Erlang erl session'
aliases = ['erl']
filenames = ['*.erl-sh']
mimetypes = ['text/x-erl-shellsession']
_prompt_re = re.compile(r'(?:\([\w@_.]+\))?\d+>(?=\s|\Z)')
def get_tokens_unprocessed(self, text):
erlexer = ErlangLexer(**self.options)
curcode = ''
insertions = []
for match in line_re.finditer(text):
line = match.group()
m = self._prompt_re.match(line)
if m is not None:
end = m.end()
insertions.append((len(curcode),
[(0, Generic.Prompt, line[:end])]))
curcode += line[end:]
else:
if curcode:
yield from do_insertions(insertions,
erlexer.get_tokens_unprocessed(curcode))
curcode = ''
insertions = []
if line.startswith('*'):
yield match.start(), Generic.Traceback, line
else:
yield match.start(), Generic.Output, line
if curcode:
yield from do_insertions(insertions,
erlexer.get_tokens_unprocessed(curcode))
def gen_elixir_string_rules(name, symbol, token):
states = {}
states['string_' + name] = [
(r'[^#%s\\]+' % (symbol,), token),
include('escapes'),
(r'\\.', token),
(r'(%s)' % (symbol,), bygroups(token), "#pop"),
include('interpol')
]
return states
def gen_elixir_sigstr_rules(term, term_class, token, interpol=True):
if interpol:
return [
(r'[^#%s\\]+' % (term_class,), token),
include('escapes'),
(r'\\.', token),
(r'%s[a-zA-Z]*' % (term,), token, '#pop'),
include('interpol')
]
else:
return [
(r'[^%s\\]+' % (term_class,), token),
(r'\\.', token),
(r'%s[a-zA-Z]*' % (term,), token, '#pop'),
]
class ElixirLexer(RegexLexer):
"""
For the `Elixir language <http://elixir-lang.org>`_.
.. versionadded:: 1.5
"""
name = 'Elixir'
aliases = ['elixir', 'ex', 'exs']
filenames = ['*.ex', '*.eex', '*.exs']
mimetypes = ['text/x-elixir']
KEYWORD = ('fn', 'do', 'end', 'after', 'else', 'rescue', 'catch')
KEYWORD_OPERATOR = ('not', 'and', 'or', 'when', 'in')
BUILTIN = (
'case', 'cond', 'for', 'if', 'unless', 'try', 'receive', 'raise',
'quote', 'unquote', 'unquote_splicing', 'throw', 'super',
)
BUILTIN_DECLARATION = (
'def', 'defp', 'defmodule', 'defprotocol', 'defmacro', 'defmacrop',
'defdelegate', 'defexception', 'defstruct', 'defimpl', 'defcallback',
)
BUILTIN_NAMESPACE = ('import', 'require', 'use', 'alias')
CONSTANT = ('nil', 'true', 'false')
PSEUDO_VAR = ('_', '__MODULE__', '__DIR__', '__ENV__', '__CALLER__')
OPERATORS3 = (
'<<<', '>>>', '|||', '&&&', '^^^', '~~~', '===', '!==',
'~>>', '<~>', '|~>', '<|>',
)
OPERATORS2 = (
'==', '!=', '<=', '>=', '&&', '||', '<>', '++', '--', '|>', '=~',
'->', '<-', '|', '.', '=', '~>', '<~',
)
OPERATORS1 = ('<', '>', '+', '-', '*', '/', '!', '^', '&')
PUNCTUATION = (
'\\\\', '<<', '>>', '=>', '(', ')', ':', ';', ',', '[', ']',
)
def get_tokens_unprocessed(self, text):
for index, token, value in RegexLexer.get_tokens_unprocessed(self, text):
if token is Name:
if value in self.KEYWORD:
yield index, Keyword, value
elif value in self.KEYWORD_OPERATOR:
yield index, Operator.Word, value
elif value in self.BUILTIN:
yield index, Keyword, value
elif value in self.BUILTIN_DECLARATION:
yield index, Keyword.Declaration, value
elif value in self.BUILTIN_NAMESPACE:
yield index, Keyword.Namespace, value
elif value in self.CONSTANT:
yield index, Name.Constant, value
elif value in self.PSEUDO_VAR:
yield index, Name.Builtin.Pseudo, value
else:
yield index, token, value
else:
yield index, token, value
def gen_elixir_sigil_rules():
# all valid sigil terminators (excluding heredocs)
terminators = [
(r'\{', r'\}', '}', 'cb'),
(r'\[', r'\]', r'\]', 'sb'),
(r'\(', r'\)', ')', 'pa'),
('<', '>', '>', 'ab'),
('/', '/', '/', 'slas'),
(r'\|', r'\|', '|', 'pipe'),
('"', '"', '"', 'quot'),
("'", "'", "'", 'apos'),
]
# heredocs have slightly different rules
triquotes = [(r'"""', 'triquot'), (r"'''", 'triapos')]
token = String.Other
states = {'sigils': []}
for term, name in triquotes:
states['sigils'] += [
(r'(~[a-z])(%s)' % (term,), bygroups(token, String.Heredoc),
(name + '-end', name + '-intp')),
(r'(~[A-Z])(%s)' % (term,), bygroups(token, String.Heredoc),
(name + '-end', name + '-no-intp')),
]
states[name + '-end'] = [
(r'[a-zA-Z]+', token, '#pop'),
default('#pop'),
]
states[name + '-intp'] = [
(r'^\s*' + term, String.Heredoc, '#pop'),
include('heredoc_interpol'),
]
states[name + '-no-intp'] = [
(r'^\s*' + term, String.Heredoc, '#pop'),
include('heredoc_no_interpol'),
]
for lterm, rterm, rterm_class, name in terminators:
states['sigils'] += [
(r'~[a-z]' + lterm, token, name + '-intp'),
(r'~[A-Z]' + lterm, token, name + '-no-intp'),
]
states[name + '-intp'] = \
gen_elixir_sigstr_rules(rterm, rterm_class, token)
states[name + '-no-intp'] = \
gen_elixir_sigstr_rules(rterm, rterm_class, token, interpol=False)
return states
op3_re = "|".join(re.escape(s) for s in OPERATORS3)
op2_re = "|".join(re.escape(s) for s in OPERATORS2)
op1_re = "|".join(re.escape(s) for s in OPERATORS1)
ops_re = r'(?:%s|%s|%s)' % (op3_re, op2_re, op1_re)
punctuation_re = "|".join(re.escape(s) for s in PUNCTUATION)
alnum = r'\w'
name_re = r'(?:\.\.\.|[a-z_]%s*[!?]?)' % alnum
modname_re = r'[A-Z]%(alnum)s*(?:\.[A-Z]%(alnum)s*)*' % {'alnum': alnum}
complex_name_re = r'(?:%s|%s|%s)' % (name_re, modname_re, ops_re)
special_atom_re = r'(?:\.\.\.|<<>>|%\{\}|%|\{\})'
long_hex_char_re = r'(\\x\{)([\da-fA-F]+)(\})'
hex_char_re = r'(\\x[\da-fA-F]{1,2})'
escape_char_re = r'(\\[abdefnrstv])'
tokens = {
'root': [
(r'\s+', Text),
(r'#.*$', Comment.Single),
# Various kinds of characters
(r'(\?)' + long_hex_char_re,
bygroups(String.Char,
String.Escape, Number.Hex, String.Escape)),
(r'(\?)' + hex_char_re,
bygroups(String.Char, String.Escape)),
(r'(\?)' + escape_char_re,
bygroups(String.Char, String.Escape)),
(r'\?\\?.', String.Char),
# '::' has to go before atoms
(r':::', String.Symbol),
(r'::', Operator),
# atoms
(r':' + special_atom_re, String.Symbol),
(r':' + complex_name_re, String.Symbol),
(r':"', String.Symbol, 'string_double_atom'),
(r":'", String.Symbol, 'string_single_atom'),
# [keywords: ...]
(r'(%s|%s)(:)(?=\s|\n)' % (special_atom_re, complex_name_re),
bygroups(String.Symbol, Punctuation)),
# @attributes
(r'@' + name_re, Name.Attribute),
# identifiers
(name_re, Name),
(r'(%%?)(%s)' % (modname_re,), bygroups(Punctuation, Name.Class)),
# operators and punctuation
(op3_re, Operator),
(op2_re, Operator),
(punctuation_re, Punctuation),
(r'&\d', Name.Entity), # anon func arguments
(op1_re, Operator),
# numbers
(r'0b[01]+', Number.Bin),
(r'0o[0-7]+', Number.Oct),
(r'0x[\da-fA-F]+', Number.Hex),
(r'\d(_?\d)*\.\d(_?\d)*([eE][-+]?\d(_?\d)*)?', Number.Float),
(r'\d(_?\d)*', Number.Integer),
# strings and heredocs
(r'"""\s*', String.Heredoc, 'heredoc_double'),
(r"'''\s*$", String.Heredoc, 'heredoc_single'),
(r'"', String.Double, 'string_double'),
(r"'", String.Single, 'string_single'),
include('sigils'),
(r'%\{', Punctuation, 'map_key'),
(r'\{', Punctuation, 'tuple'),
],
'heredoc_double': [
(r'^\s*"""', String.Heredoc, '#pop'),
include('heredoc_interpol'),
],
'heredoc_single': [
(r"^\s*'''", String.Heredoc, '#pop'),
include('heredoc_interpol'),
],
'heredoc_interpol': [
(r'[^#\\\n]+', String.Heredoc),
include('escapes'),
(r'\\.', String.Heredoc),
(r'\n+', String.Heredoc),
include('interpol'),
],
'heredoc_no_interpol': [
(r'[^\\\n]+', String.Heredoc),
(r'\\.', String.Heredoc),
(r'\n+', String.Heredoc),
],
'escapes': [
(long_hex_char_re,
bygroups(String.Escape, Number.Hex, String.Escape)),
(hex_char_re, String.Escape),
(escape_char_re, String.Escape),
],
'interpol': [
(r'#\{', String.Interpol, 'interpol_string'),
],
'interpol_string': [
(r'\}', String.Interpol, "#pop"),
include('root')
],
'map_key': [
include('root'),
(r':', Punctuation, 'map_val'),
(r'=>', Punctuation, 'map_val'),
(r'\}', Punctuation, '#pop'),
],
'map_val': [
include('root'),
(r',', Punctuation, '#pop'),
(r'(?=\})', Punctuation, '#pop'),
],
'tuple': [
include('root'),
(r'\}', Punctuation, '#pop'),
],
}
tokens.update(gen_elixir_string_rules('double', '"', String.Double))
tokens.update(gen_elixir_string_rules('single', "'", String.Single))
tokens.update(gen_elixir_string_rules('double_atom', '"', String.Symbol))
tokens.update(gen_elixir_string_rules('single_atom', "'", String.Symbol))
tokens.update(gen_elixir_sigil_rules())
class ElixirConsoleLexer(Lexer):
"""
For Elixir interactive console (iex) output like:
.. sourcecode:: iex
iex> [head | tail] = [1,2,3]
[1,2,3]
iex> head
1
iex> tail
[2,3]
iex> [head | tail]
[1,2,3]
iex> length [head | tail]
3
.. versionadded:: 1.5
"""
name = 'Elixir iex session'
aliases = ['iex']
mimetypes = ['text/x-elixir-shellsession']
_prompt_re = re.compile(r'(iex|\.{3})((?:\([\w@_.]+\))?\d+|\(\d+\))?> ')
def get_tokens_unprocessed(self, text):
exlexer = ElixirLexer(**self.options)
curcode = ''
in_error = False
insertions = []
for match in line_re.finditer(text):
line = match.group()
if line.startswith('** '):
in_error = True
insertions.append((len(curcode),
[(0, Generic.Error, line[:-1])]))
curcode += line[-1:]
else:
m = self._prompt_re.match(line)
if m is not None:
in_error = False
end = m.end()
insertions.append((len(curcode),
[(0, Generic.Prompt, line[:end])]))
curcode += line[end:]
else:
if curcode:
yield from do_insertions(
insertions, exlexer.get_tokens_unprocessed(curcode))
curcode = ''
insertions = []
token = Generic.Error if in_error else Generic.Output
yield match.start(), token, line
if curcode:
yield from do_insertions(
insertions, exlexer.get_tokens_unprocessed(curcode))
| 35.757062 | 96 | 0.469216 |
56dc2f59c2971136e1977ce03ce6132111755e89 | 8,642 | py | Python | DFP/multi_experiment.py | kaiolae/tf_future_prediction | e06549e02c10cd957f8b0e51f24c9df2702d7ef0 | [
"MIT"
] | null | null | null | DFP/multi_experiment.py | kaiolae/tf_future_prediction | e06549e02c10cd957f8b0e51f24c9df2702d7ef0 | [
"MIT"
] | null | null | null | DFP/multi_experiment.py | kaiolae/tf_future_prediction | e06549e02c10cd957f8b0e51f24c9df2702d7ef0 | [
"MIT"
] | null | null | null | from __future__ import print_function
import numpy as np
from .future_target_maker import FutureTargetMaker
from .multi_doom_simulator import MultiDoomSimulator
from .multi_experience_memory import MultiExperienceMemory
from .future_predictor_agent_basic import FuturePredictorAgentBasic
from .future_predictor_agent_advantage import FuturePredictorAgentAdvantage
from .future_predictor_agent_advantage_nonorm import FuturePredictorAgentAdvantageNoNorm
from . import defaults
import tensorflow as tf
import scipy.misc
from . import util as my_util
import shutil
### Experiment with multi-head experience
class MultiExperiment:
def __init__(self, target_maker_args={},
simulator_args={},
train_experience_args={},
test_policy_experience_args={},
agent_args={},
experiment_args={}):
# set default values
target_maker_args = my_util.merge_two_dicts(defaults.target_maker_args, target_maker_args)
if isinstance(simulator_args, dict):
simulator_args = my_util.merge_two_dicts(defaults.simulator_args, simulator_args)
else:
for n in range(len(simulator_args)):
simulator_args[n] = my_util.merge_two_dicts(defaults.simulator_args, simulator_args[n])
train_experience_args = my_util.merge_two_dicts(defaults.train_experience_args, train_experience_args)
test_policy_experience_args = my_util.merge_two_dicts(defaults.test_policy_experience_args, test_policy_experience_args)
agent_args = my_util.merge_two_dicts(defaults.agent_args, agent_args)
experiment_args = my_util.merge_two_dicts(defaults.experiment_args, experiment_args)
if not (experiment_args['args_file'] is None):
print(' ++ Reading arguments from ', experiment_args['args_file'])
with open(experiment_args['args_file'], 'r') as f:
input_args = my_util.json_load_byteified(f)
for arg_name,arg_val in input_args.items():
print(arg_name, arg_val)
for k,v in arg_val.items():
locals()[arg_name][k] = v
self.target_maker = FutureTargetMaker(target_maker_args)
self.results_file = experiment_args['results_file']
self.net_name = experiment_args['net_name']
self.num_predictions_to_show = experiment_args['num_predictions_to_show']
agent_args['target_dim'] = self.target_maker.target_dim
agent_args['target_names'] = self.target_maker.target_names
if isinstance(simulator_args, list):
# if we are given a bunch of different simulators
self.multi_simulator =MultiDoomSimulator(simulator_args)
else:
# if we have to replicate a single simulator
self.multi_simulator = MultiDoomSimulator([simulator_args] * simulator_args['num_simulators'])
agent_args['discrete_controls'] = self.multi_simulator.discrete_controls
agent_args['continuous_controls'] = self.multi_simulator.continuous_controls
agent_args['objective_indices'], agent_args['objective_coeffs'] = my_util.make_objective_indices_and_coeffs(agent_args['objective_coeffs_temporal'],
agent_args['objective_coeffs_meas'])
train_experience_args['obj_shape'] = (len(agent_args['objective_coeffs']),)
test_policy_experience_args['obj_shape'] = (len(agent_args['objective_coeffs']),)
self.train_experience = MultiExperienceMemory(train_experience_args, multi_simulator = self.multi_simulator, target_maker = self.target_maker)
agent_args['state_imgs_shape'] = self.train_experience.state_imgs_shape
agent_args['obj_shape'] = (len(agent_args['objective_coeffs']),)
agent_args['num_simulators'] = self.multi_simulator.num_simulators
if 'meas_for_net' in experiment_args:
agent_args['meas_for_net'] = []
for ns in range(self.train_experience.history_length):
agent_args['meas_for_net'] += [i + self.multi_simulator.num_meas * ns for i in experiment_args['meas_for_net']] # we want these measurements from all timesteps
agent_args['meas_for_net'] = np.array(agent_args['meas_for_net'])
else:
agent_args['meas_for_net'] = np.arange(self.train_experience.state_meas_shape[0])
if len(experiment_args['meas_for_manual']) > 0:
agent_args['meas_for_manual'] = np.array([i + self.multi_simulator.num_meas*(self.train_experience.history_length-1) for i in experiment_args['meas_for_manual']]) # current timestep is the last in the stack
else:
agent_args['meas_for_manual'] = []
agent_args['state_meas_shape'] = [len(agent_args['meas_for_net'])]
self.agent_type = agent_args['agent_type']
if agent_args['random_objective_coeffs']:
assert('fc_obj_params' in agent_args)
self.test_policy_experience = MultiExperienceMemory(test_policy_experience_args, multi_simulator = self.multi_simulator, target_maker = self.target_maker)
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.1) # avoid using all gpu memory
self.sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options,log_device_placement=False))
if self.agent_type == 'basic':
self.ag = FuturePredictorAgentBasic(self.sess, agent_args)
elif self.agent_type == 'advantage':
self.ag = FuturePredictorAgentAdvantage(self.sess, agent_args) # inital design: concat meas and img, then 2 branches for adv and val
elif self.agent_type == 'advantage_nonorm':
self.ag = FuturePredictorAgentAdvantageNoNorm(self.sess, agent_args) # no normalizatio in the advantage stream
else:
raise Exception('Unknown agent type', self.agent_type)
self.num_train_iterations = experiment_args['num_train_iterations']
_, self.test_objective_coeffs = my_util.make_objective_indices_and_coeffs(experiment_args['test_objective_coeffs_temporal'],
experiment_args['test_objective_coeffs_meas'])
self.test_random_prob = experiment_args['test_random_prob']
self.test_checkpoint = experiment_args['test_checkpoint']
self.test_policy_num_steps = experiment_args['test_policy_num_steps']
def run(self, mode):
#KOE: The main run-method, that can either train a new agent, or show the performance of a trained agent.
shutil.copy('run_exp.py', 'run_exp.py.' + mode)
#if mode == 'show':
#KOE: Shows performance of trained agent.
if not self.ag.load(self.test_checkpoint):
print('Could not load the checkpoint ', self.test_checkpoint)
return
#Since we run N different simulations at the same time, we want to offset their stored data (with N different "write heads") spaced apart by the length of the planned episode.
self.train_experience.head_offset = self.test_policy_num_steps + 1
self.train_experience.log_prefix = 'logs/log_test'
#KOE: Tests the trained policy represented by stored weights, with given objectives. Results are remembered, and plotted by the method below.
avg_meas, avg_reward = self.ag.test_policy(self.multi_simulator, self.train_experience, self.test_objective_coeffs, self.test_policy_num_steps, random_prob = self.test_random_prob, write_summary=False, write_predictions=True)
# KoeChange: WriteVideo
# Shows/Stores the experience of the tested agent above.
#KOETODO What does num_simulators do?
if "show" in mode:
self.train_experience.show(start_index=0, end_index=self.test_policy_num_steps * self.multi_simulator.num_simulators, display=False, write_imgs=False, write_video=True,
preprocess_targets = self.ag.preprocess_input_targets, show_predictions=self.num_predictions_to_show, net_discrete_actions = self.ag.net_discrete_actions)
return avg_meas, avg_reward
#elif mode == 'train':
# self.test_policy_experience.log_prefix = 'logs/log'
# self.ag.train(self.multi_simulator, self.train_experience, self.num_train_iterations, test_policy_experience=self.test_policy_experience)
#else:
# print('Unknown mode', mode)
| 61.29078 | 233 | 0.693127 |
cbb61b1d370b4dc74d0319b3b605be5dd9078dc9 | 4,158 | py | Python | configs/GA_CONF-ddp.py | jeffhancock/GlobusArchiver | f1e3775dd8eb32eed511eb410b5d91ac428e06f2 | [
"Apache-2.0"
] | 3 | 2019-06-12T15:30:08.000Z | 2022-03-02T20:32:30.000Z | configs/GA_CONF-ddp.py | jeffhancock/GlobusArchiver | f1e3775dd8eb32eed511eb410b5d91ac428e06f2 | [
"Apache-2.0"
] | 38 | 2019-09-06T19:49:25.000Z | 2021-06-23T20:56:40.000Z | configs/GA_CONF-ddp.py | jeffhancock/GlobusArchiver | f1e3775dd8eb32eed511eb410b5d91ac428e06f2 | [
"Apache-2.0"
] | 3 | 2019-07-19T19:16:16.000Z | 2019-12-06T00:22:19.000Z | #!/usr/bin/env python
######################################
# GLOBUS CONFIGURATION
######################################
# Imports used in the configuration file
import os
import socket
import datetime
#####################################
## GENERAL CONFIGURATION
#####################################
############### TEMP DIR ##################
# tempDir is used for:
# - Staging Location for .tar Files
# Default, $TMPDIR if it is defined, otherwise $HOME if defined, otherwise '.'.
tempDir = "/rapdmg2/data/tmp/"
# You may want to keep the tmp area around for debugging
cleanTemp = True
############### EMAIL ##################
# Deliver a report to these email addresses
# Use a list of 3-tuples ("name", "local-part", "domain")
emailAddresses = [("prestop@rap.ucar.edu", "prestop", "rap.ucar.edu")]
# This is the email address that will be used in the "from" field
fromEmail = emailAddresses[0];
#####################################
## AUTHENTICATION
#####################################
# You can define the endpoint directly
# This default value is the NCAR CampaignStore
# the value was obtained by running:
# $ globus endpoint search 'NCAR' --filter-owner-id 'ncar@globusid.org' | grep Campaign | cut -f1 -d'
archiveEndPoint = "6b5ab960-7bbf-11e8-9450-0a6d4e044368"
# The refresh token is what lets you use globus without authenticating every time. We store it in a local file.
# !!IMPORTANT!!!
# You need to protect your Refresh Tokens.
# They are an infinite lifetime credential to act as you.
# Like passwords, they should only be stored in secure locations.
# e.g. placed in a directory where only you have read/write access
globusTokenFile = os.path.join(os.path.expanduser("~"),".globus-ral","refresh-tokens.json")
####################################
## ARCHIVE RUN CONFIGURATION
####################################
######### Archive Date/Time #################
#
# This is used to set the date/time of the Archive.
# The date/time can be substituted into all archive-item strings, by using
# standard strftime formatting.
# This value is added (so use a negaative number to assign a date in the past)
# to now() to find the archive date/time.
archiveDayDelta=-2
# If this is set, it overrides the archiveDayDelta. If you want to use
# archiveDayDelta to set the Archive Date/Time, make sure this is
# set to an empty string. This string must be parseable by one of the
# format strings defined in archiveDateTimeFormats.
archiveDateTimeString=""
# You can add additional strptime
archiveDateTimeFormats=["%Y%m%d","%Y%m%d%H","%Y-%m-%dT%H:%M:%SZ"]
# Set to False to process data but don't actually submit the tasks to Globus
submitTasks = True
# Number of seconds to wait to see if transfer completed
# Report error if it doesn't completed after this time
# Default is 21600 (6 hours)
transferStatusTimeout = 21600
####################################
## ARCHIVE ITEM CONFIGURATION
####################################
# TODO: transfer-args are currently ignored
# doZip is optional, and defaults to False
# transferLabel is optional, and defaults to the item key + "-%Y%m%d"
# tar_filename is optional and defaults to "". TAR is only done if tar_filename is a non-empty string
# transferArgs is a placeholder and not yet implemented.
# use sync_level to specify when files are overwritten:
# "exists" - If the destination file is absent, do the transfer.
# "size" - If destination file size does not match the source, do the transfer.
# "mtime" - If source has a newer modififed time than the destination, do the transfer.
# "checksum" - If source and destination contents differ, as determined by a checksum of their contents, do the transfer.
archiveItems = {
"item-0":
{
"source": "/rapdmg1/data/ddp/%Y%m%d",
"expectedFileSize": 91000000,
"expectedNumFiles": 865,
"tarFileName": "%Y%m%d.ddp.tar",
"dataFormat": "ascii",
"comment": "weather service textual data including things like METARs,",
"warningLevel": .7,
"doZip": True,
"destination": "/gpfs/csfs1/ral/nral0003/LDM/ARCHIVE/%Y/%m%d",
},
}
| 33.804878 | 122 | 0.638769 |
9fc0b409e90eafdfdb084736316bba4cccb012ad | 990 | py | Python | tests/td_cfgtemplate.py | sandeep-gh/justpy-chartjs | 0a37e8dc6f6783b7847a744a7cb24cfd124b2787 | [
"MIT"
] | 4 | 2021-08-09T17:11:55.000Z | 2022-02-24T10:45:17.000Z | tests/td_cfgtemplate.py | sandeep-gh/justpy-chartjs | 0a37e8dc6f6783b7847a744a7cb24cfd124b2787 | [
"MIT"
] | null | null | null | tests/td_cfgtemplate.py | sandeep-gh/justpy-chartjs | 0a37e8dc6f6783b7847a744a7cb24cfd124b2787 | [
"MIT"
] | null | null | null | from dpath.util import set as dset
from dpath.util import get as dget
from addict import Dict
from justpy_chartjs.tags import cfg_template as ct
import json
import jsbeautifier
opts = jsbeautifier.default_options()
opts.indent_size = 2
labels = ["ds1", "ds2", "ds3", "ds4", "ds5"]
datavals = [[{'x': 1, 'y': 3}, {'x': 5, 'y': 5}],
[{'x': 1, 'y': 7}, {'x': 5, 'y': 2}],
[{'x': 1, 'y': 0}, {'x': 5, 'y': 8}],
[{'x': 1, 'y': 13}, {'x': 5, 'y': 2}],
[{'x': 1, 'y': 2}, {'x': 5, 'y': 6}],
[{'x': 1, 'y': 9}, {'x': 5, 'y': 7}],
]
cfgctx = Dict()
cfgctx.plttype = ct.PlotType.Line
cfgctx.xaxis_type = ct.ScaleType.Linear
cfgctx.xaxis_title = "xaxis"
cfgctx.yaxis_title = "yaxis"
cfgctx.plot_title = "testplot"
cfg = ct.build_pltcfg(cfgctx)
newcfg = ct.build_cfg(cfg, labels, datavals)
dset(newcfg, "/options/parsing/xAxisKey",
"awesomekye")
res = jsbeautifier.beautify(json.dumps(newcfg), opts)
print(res)
| 28.285714 | 53 | 0.565657 |
f47dc9e84e76991f2cbe904b05b7a7efe095dead | 1,985 | py | Python | leetcode/reverse_string.py | julianespinel/trainning | 23e07c954e5bf03f1cd117e388eed7da4a3e8f63 | [
"MIT"
] | null | null | null | leetcode/reverse_string.py | julianespinel/trainning | 23e07c954e5bf03f1cd117e388eed7da4a3e8f63 | [
"MIT"
] | null | null | null | leetcode/reverse_string.py | julianespinel/trainning | 23e07c954e5bf03f1cd117e388eed7da4a3e8f63 | [
"MIT"
] | null | null | null | """
# Problem statement
https://leetcode.com/explore/interview/card/top-interview-questions-easy/127/strings/879/
## Algorithm description
Traverse the given input until the middle of it.
Swap the extremes of the input until reach the middle of it.
Example:
input = "abcde"
len(input) = 5
middle = ceil(5 / 3) = 3
ii = initial_index
fi = final_index
i = 0, fi = 4, "abcde"
i = 1, fi = 3, "ebcda"
i = 2, fi = 2, "edcba"
This works for odd and event inputs.
### Cases
I considered the following cases:
1. empty: "" -> ""
1. one: "a" -> "a"
1. String length is odd: "abc" -> "cba"
1. String length is even: "abcd" -> "dcba"
### Examples:
"abcde" -> l: 5 -> int(5 / 2) = 3 ->
i = 0 < 3
ii
0, 5 - 1 - 0 = 4
1, 5 - 1 - 1 = 3
2, 5 - 1 - 2 = 2
"abcd" -> l: 4 -> int(4 / 2) = 2 ->
i = 0 < 2
ii
0, 4 - 1 - 0 = 3
1, 4 - 1 - 1 = 2
## Complexity
### Time
1. Traverse the given input until the middle of it: O(n)
1. Swap elements of the input: O(1)
Total = O(n) + O(1) = O(n)
### Space
Only simple variables were created: O(1)
## To improve
I prefer to avoid mutating data structures, so I would preferred to create
a new array to store the answer and the return it.
I mutated the given input because that was a constraint given in the problem
statement:
> Do not allocate extra space for another array, you must do this by modifying the input array in-place with O(1) extra memory.
"""
import math
class Solution:
def reverseString(self, string: [str]) -> None:
"""
Do not return anything, modify s in-place instead.
"""
length = len(string)
if length <= 1:
return
limit = math.ceil(length / 2)
for index in range(limit):
final_index = length - 1 - index
string[index], string[final_index] = string[final_index], string[index]
if __name__ == "__main__":
solution = Solution()
string = ["H","a","n","n","a","h"]
solution.reverseString(string)
print(string)
| 20.463918 | 127 | 0.617128 |
d335fec9fd0f784ce9de8f34a5f4cce30b2985ac | 53,327 | py | Python | src/sage/geometry/toric_lattice.py | fredstro/sage | c936d2cda81ec7ec3552a3bdb29c994b40d1bb24 | [
"BSL-1.0"
] | null | null | null | src/sage/geometry/toric_lattice.py | fredstro/sage | c936d2cda81ec7ec3552a3bdb29c994b40d1bb24 | [
"BSL-1.0"
] | null | null | null | src/sage/geometry/toric_lattice.py | fredstro/sage | c936d2cda81ec7ec3552a3bdb29c994b40d1bb24 | [
"BSL-1.0"
] | null | null | null | r"""
Toric lattices
This module was designed as a part of the framework for toric varieties
(:mod:`~sage.schemes.toric.variety`,
:mod:`~sage.schemes.toric.fano_variety`).
All toric lattices are isomorphic to `\ZZ^n` for some `n`, but will prevent
you from doing "wrong" operations with objects from different lattices.
AUTHORS:
- Andrey Novoseltsev (2010-05-27): initial version.
- Andrey Novoseltsev (2010-07-30): sublattices and quotients.
EXAMPLES:
The simplest way to create a toric lattice is to specify its dimension only::
sage: N = ToricLattice(3)
sage: N
3-d lattice N
While our lattice ``N`` is called exactly "N" it is a coincidence: all
lattices are called "N" by default::
sage: another_name = ToricLattice(3)
sage: another_name
3-d lattice N
If fact, the above lattice is exactly the same as before as an object in
memory::
sage: N is another_name
True
There are actually four names associated to a toric lattice and they all must
be the same for two lattices to coincide::
sage: N, N.dual(), latex(N), latex(N.dual())
(3-d lattice N, 3-d lattice M, N, M)
Notice that the lattice dual to ``N`` is called "M" which is standard in toric
geometry. This happens only if you allow completely automatic handling of
names::
sage: another_N = ToricLattice(3, "N")
sage: another_N.dual()
3-d lattice N*
sage: N is another_N
False
What can you do with toric lattices? Well, their main purpose is to allow
creation of elements of toric lattices::
sage: n = N([1,2,3])
sage: n
N(1, 2, 3)
sage: M = N.dual()
sage: m = M(1,2,3)
sage: m
M(1, 2, 3)
Dual lattices can act on each other::
sage: n * m
14
sage: m * n
14
You can also add elements of the same lattice or scale them::
sage: 2 * n
N(2, 4, 6)
sage: n * 2
N(2, 4, 6)
sage: n + n
N(2, 4, 6)
However, you cannot "mix wrong lattices" in your expressions::
sage: n + m
Traceback (most recent call last):
...
TypeError: unsupported operand parent(s) for '+':
'3-d lattice N' and '3-d lattice M'
sage: n * n
Traceback (most recent call last):
...
TypeError: elements of the same toric lattice cannot be multiplied!
sage: n == m
False
Note that ``n`` and ``m`` are not equal to each other even though they are
both "just (1,2,3)." Moreover, you cannot easily convert elements between
toric lattices::
sage: M(n)
Traceback (most recent call last):
...
TypeError: N(1, 2, 3) cannot be converted to 3-d lattice M!
If you really need to consider elements of one lattice as elements of another,
you can either use intermediate conversion to "just a vector"::
sage: ZZ3 = ZZ^3
sage: n_in_M = M(ZZ3(n))
sage: n_in_M
M(1, 2, 3)
sage: n == n_in_M
False
sage: n_in_M == m
True
Or you can create a homomorphism from one lattice to any other::
sage: h = N.hom(identity_matrix(3), M)
sage: h(n)
M(1, 2, 3)
.. WARNING::
While integer vectors (elements of `\ZZ^n`) are printed as ``(1,2,3)``,
in the code ``(1,2,3)`` is a :class:`tuple`, which has nothing to do
neither with vectors, nor with toric lattices, so the following is
probably not what you want while working with toric geometry objects::
sage: (1,2,3) + (1,2,3)
(1, 2, 3, 1, 2, 3)
Instead, use syntax like ::
sage: N(1,2,3) + N(1,2,3)
N(2, 4, 6)
"""
# Parts of the "tutorial" above are also in toric_lattice_element.pyx.
#*****************************************************************************
# Copyright (C) 2010 Andrey Novoseltsev <novoselt@gmail.com>
# Copyright (C) 2010 William Stein <wstein@gmail.com>
#
# Distributed under the terms of the GNU General Public License (GPL)
#
# http://www.gnu.org/licenses/
#*****************************************************************************
from sage.geometry.toric_lattice_element import (ToricLatticeElement,
is_ToricLatticeElement)
from sage.geometry.toric_plotter import ToricPlotter
from sage.misc.all import latex
from sage.structure.all import parent
from sage.modules.fg_pid.fgp_element import FGP_Element
from sage.modules.fg_pid.fgp_module import FGP_Module_class
from sage.modules.free_module import (FreeModule_ambient_pid,
FreeModule_generic_pid,
FreeModule_submodule_pid,
FreeModule_submodule_with_basis_pid)
from sage.rings.all import QQ, ZZ
from sage.structure.factory import UniqueFactory
def is_ToricLattice(x):
r"""
Check if ``x`` is a toric lattice.
INPUT:
- ``x`` -- anything.
OUTPUT:
- ``True`` if ``x`` is a toric lattice and ``False`` otherwise.
EXAMPLES::
sage: from sage.geometry.toric_lattice import (
... is_ToricLattice)
sage: is_ToricLattice(1)
False
sage: N = ToricLattice(3)
sage: N
3-d lattice N
sage: is_ToricLattice(N)
True
"""
return isinstance(x, ToricLattice_generic)
def is_ToricLatticeQuotient(x):
r"""
Check if ``x`` is a toric lattice quotient.
INPUT:
- ``x`` -- anything.
OUTPUT:
- ``True`` if ``x`` is a toric lattice quotient and ``False`` otherwise.
EXAMPLES::
sage: from sage.geometry.toric_lattice import (
... is_ToricLatticeQuotient)
sage: is_ToricLatticeQuotient(1)
False
sage: N = ToricLattice(3)
sage: N
3-d lattice N
sage: is_ToricLatticeQuotient(N)
False
sage: Q = N / N.submodule([(1,2,3), (3,2,1)])
sage: Q
Quotient with torsion of 3-d lattice N
by Sublattice <N(1, 2, 3), N(0, 4, 8)>
sage: is_ToricLatticeQuotient(Q)
True
"""
return isinstance(x, ToricLattice_quotient)
class ToricLatticeFactory(UniqueFactory):
r"""
Create a lattice for toric geometry objects.
INPUT:
- ``rank`` -- nonnegative integer, the only mandatory parameter;
- ``name`` -- string;
- ``dual_name`` -- string;
- ``latex_name`` -- string;
- ``latex_dual_name`` -- string.
OUTPUT:
- lattice.
A toric lattice is uniquely determined by its rank and associated names.
There are four such "associated names" whose meaning should be clear from
the names of the corresponding parameters, but the choice of default
values is a little bit involved. So here is the full description of the
"naming algorithm":
#. If no names were given at all, then this lattice will be called "N" and
the dual one "M". These are the standard choices in toric geometry.
#. If ``name`` was given and ``dual_name`` was not, then ``dual_name``
will be ``name`` followed by "*".
#. If LaTeX names were not given, they will coincide with the "usual"
names, but if ``dual_name`` was constructed automatically, the trailing
star will be typeset as a superscript.
EXAMPLES:
Let's start with no names at all and see how automatic names are given::
sage: L1 = ToricLattice(3)
sage: L1
3-d lattice N
sage: L1.dual()
3-d lattice M
If we give the name "N" explicitly, the dual lattice will be called "N*"::
sage: L2 = ToricLattice(3, "N")
sage: L2
3-d lattice N
sage: L2.dual()
3-d lattice N*
However, we can give an explicit name for it too::
sage: L3 = ToricLattice(3, "N", "M")
sage: L3
3-d lattice N
sage: L3.dual()
3-d lattice M
If you want, you may also give explicit LaTeX names::
sage: L4 = ToricLattice(3, "N", "M", r"\mathbb{N}", r"\mathbb{M}")
sage: latex(L4)
\mathbb{N}
sage: latex(L4.dual())
\mathbb{M}
While all four lattices above are called "N", only two of them are equal
(and are actually the same)::
sage: L1 == L2
False
sage: L1 == L3
True
sage: L1 is L3
True
sage: L1 == L4
False
The reason for this is that ``L2`` and ``L4`` have different names either
for dual lattices or for LaTeX typesetting.
"""
def create_key(self, rank, name=None, dual_name=None,
latex_name=None, latex_dual_name=None):
"""
Create a key that uniquely identifies this toric lattice.
See :class:`ToricLattice <ToricLatticeFactory>` for documentation.
.. WARNING::
You probably should not use this function directly.
TESTS::
sage: ToricLattice.create_key(3)
(3, 'N', 'M', 'N', 'M')
sage: N = ToricLattice(3)
sage: loads(dumps(N)) is N
True
sage: TestSuite(N).run()
"""
rank = int(rank)
# Should we use standard M and N lattices?
if name is None:
if dual_name is not None:
raise ValueError("you can name the dual lattice only if you "
"also name the original one!")
name = "N"
dual_name = "M"
if latex_name is None:
latex_name = name
# Now name and latex_name are set
# The default for latex_dual_name depends on whether dual_name was
# given or constructed, so we determine it before dual_name
if latex_dual_name is None:
latex_dual_name = (dual_name if dual_name is not None
else latex_name + "^*")
if dual_name is None:
dual_name = name + "*"
return (rank, name, dual_name, latex_name, latex_dual_name)
def create_object(self, version, key):
r"""
Create the toric lattice described by ``key``.
See :class:`ToricLattice <ToricLatticeFactory>` for documentation.
.. WARNING::
You probably should not use this function directly.
TESTS::
sage: key = ToricLattice.create_key(3)
sage: ToricLattice.create_object(1, key)
3-d lattice N
"""
return ToricLattice_ambient(*key)
ToricLattice = ToricLatticeFactory("ToricLattice")
# Possible TODO's:
# - implement a better construction() method, which still will prohibit
# operations mixing lattices by conversion to ZZ^n
# - maybe __call__ is not the right place to prohibit conversion between
# lattices (we need it now so that morphisms behave nicely)
class ToricLattice_generic(FreeModule_generic_pid):
r"""
Abstract base class for toric lattices.
"""
Element = ToricLatticeElement
# It is not recommended to override __call__ in Parent-derived objects
# since it may interfere with the coercion model. We do it here to allow
# N(1,2,3) to be interpreted as N([1,2,3]). We also prohibit N(m) where
# m is an element of another lattice. Otherwise morphisms will care only
# about dimension of lattices.
def __call__(self, *args, **kwds):
r"""
Construct a new element of ``self``.
INPUT:
- anything that can be interpreted as coordinates, except for elements
of other lattices.
OUTPUT:
- :class:`~sage.geometry.toric_lattice_element.ToricLatticeElement`.
TESTS::
sage: N = ToricLattice(3)
sage: N.__call__([1,2,3])
N(1, 2, 3)
sage: N([1,2,3]) # indirect test
N(1, 2, 3)
The point of overriding this function was to allow writing the above
command as::
sage: N(1,2,3)
N(1, 2, 3)
We also test that the special treatment of zero still works::
sage: N(0)
N(0, 0, 0)
Quotients of toric lattices can be converted to a new toric
lattice of the appropriate dimension::
sage: N3 = ToricLattice(3, 'N3')
sage: Q = N3 / N3.span([ N3(1,2,3) ])
sage: Q.an_element()
N3[0, 0, 1]
sage: N2 = ToricLattice(2, 'N2')
sage: N2( Q.an_element() )
N2(1, 0)
"""
supercall = super(ToricLattice_generic, self).__call__
if args == (0, ):
# Special treatment for N(0) to return (0,...,0)
return supercall(*args, **kwds)
if (isinstance(args[0], ToricLattice_quotient_element)
and args[0].parent().is_torsion_free()):
# convert a torsion free quotient lattice
return supercall(list(args[0]), **kwds)
try:
coordinates = [ZZ(_) for _ in args]
except TypeError:
# Prohibit conversion of elements of other lattices
if (is_ToricLatticeElement(args[0])
and args[0].parent().ambient_module()
is not self.ambient_module()):
raise TypeError("%s cannot be converted to %s!"
% (args[0], self))
# "Standard call"
return supercall(*args, **kwds)
# Coordinates were given without packing them into a list or a tuple
return supercall(coordinates, **kwds)
def _coerce_map_from_(self, other):
"""
Return a coercion map from ``other`` to ``self``, or None.
This prevents the construction of coercion maps between
lattices with different ambient modules, so :meth:`__call__`
is invoked instead, which prohibits conversion::
sage: N = ToricLattice(3)
sage: M = N.dual()
sage: M(N(1,2,3))
Traceback (most recent call last):
...
TypeError: N(1, 2, 3) cannot be converted to 3-d lattice M!
"""
if (is_ToricLattice(other) and
other.ambient_module() is not self.ambient_module()):
return None
return super(ToricLattice_generic, self)._convert_map_from_(other)
def __contains__(self, point):
r"""
Check if ``point`` is an element of ``self``.
INPUT:
- ``point`` -- anything.
OUTPUT:
- ``True`` if ``point`` is an element of ``self``, ``False``
otherwise.
TESTS::
sage: N = ToricLattice(3)
sage: M = N.dual()
sage: L = ToricLattice(3, "L")
sage: 1 in N
False
sage: (1,0) in N
False
sage: (1,0,0) in N
True
sage: N(1,0,0) in N
True
sage: M(1,0,0) in N
False
sage: L(1,0,0) in N
False
sage: (1/2,0,0) in N
False
sage: (2/2,0,0) in N
True
"""
try:
self(point)
except TypeError:
return False
return True
# We need to override this function, otherwise e.g. the sum of elements of
# different lattices of the same dimension will live in ZZ^n.
def construction(self):
r"""
Return the functorial construction of ``self``.
OUTPUT:
- ``None``, we do not think of toric lattices as constructed from
simpler objects since we do not want to perform arithmetic involving
different lattices.
TESTS::
sage: print ToricLattice(3).construction()
None
"""
return None
def direct_sum(self, other):
r"""
Return the direct sum with ``other``.
INPUT:
- ``other`` -- a toric lattice or more general module.
OUTPUT:
The direct sum of ``self`` and ``other`` as `\ZZ`-modules. If
``other`` is a :class:`ToricLattice <ToricLatticeFactory>`,
another toric lattice will be returned.
EXAMPLES::
sage: K = ToricLattice(3, 'K')
sage: L = ToricLattice(3, 'L')
sage: N = K.direct_sum(L); N
6-d lattice K+L
sage: N, N.dual(), latex(N), latex(N.dual())
(6-d lattice K+L, 6-d lattice K*+L*, K \oplus L, K^* \oplus L^*)
With default names::
sage: N = ToricLattice(3).direct_sum(ToricLattice(2))
sage: N, N.dual(), latex(N), latex(N.dual())
(5-d lattice N+N, 5-d lattice M+M, N \oplus N, M \oplus M)
If ``other`` is not a :class:`ToricLattice
<ToricLatticeFactory>`, fall back to sum of modules::
sage: ToricLattice(3).direct_sum(ZZ^2)
Free module of degree 5 and rank 5 over Integer Ring
Echelon basis matrix:
[1 0 0 0 0]
[0 1 0 0 0]
[0 0 1 0 0]
[0 0 0 1 0]
[0 0 0 0 1]
"""
if not isinstance(other, ToricLattice_generic):
return super(ToricLattice_generic, self).direct_sum(other)
def make_name(N1, N2, use_latex=False):
if use_latex:
return latex(N1)+ ' \oplus ' +latex(N2)
else:
return N1._name+ '+' +N2._name
rank = self.rank() + other.rank()
name = make_name(self, other, False)
dual_name = make_name(self.dual(), other.dual(), False)
latex_name = make_name(self, other, True)
latex_dual_name = make_name(self.dual(), other.dual(), True)
return ToricLattice(rank, name, dual_name, latex_name, latex_dual_name)
def intersection(self, other):
r"""
Return the intersection of ``self`` and ``other``.
INPUT:
- ``other`` - a toric (sub)lattice.dual
OUTPUT:
- a toric (sub)lattice.
EXAMPLES::
sage: N = ToricLattice(3)
sage: Ns1 = N.submodule([N(2,4,0), N(9,12,0)])
sage: Ns2 = N.submodule([N(1,4,9), N(9,2,0)])
sage: Ns1.intersection(Ns2)
Sublattice <N(54, 12, 0)>
Note that if one of the intersecting sublattices is a sublattice of
another, no new lattices will be constructed::
sage: N.intersection(N) is N
True
sage: Ns1.intersection(N) is Ns1
True
sage: N.intersection(Ns1) is Ns1
True
"""
# Lattice-specific input check
if not is_ToricLattice(other):
raise TypeError("%s is not a toric lattice!" % other)
if self.ambient_module() != other.ambient_module():
raise ValueError("%s and %s have different ambient lattices!" %
(self, other))
# Construct a generic intersection, but make sure to return a lattice.
I = super(ToricLattice_generic, self).intersection(other)
if not is_ToricLattice(I):
I = self.ambient_module().submodule(I.basis())
return I
def quotient(self, sub, check=True,
positive_point=None, positive_dual_point=None):
"""
Return the quotient of ``self`` by the given sublattice ``sub``.
INPUT:
- ``sub`` -- sublattice of self;
- ``check`` -- (default: True) whether or not to check that ``sub`` is
a valid sublattice.
If the quotient is one-dimensional and torsion free, the
following two mutually exclusive keyword arguments are also
allowed. They decide the sign choice for the (single)
generator of the quotient lattice:
- ``positive_point`` -- a lattice point of ``self`` not in the
sublattice ``sub`` (that is, not zero in the quotient
lattice). The quotient generator will be in the same
direction as ``positive_point``.
- ``positive_dual_point`` -- a dual lattice point. The
quotient generator will be chosen such that its lift has a
positive product with ``positive_dual_point``. Note: if
``positive_dual_point`` is not zero on the sublattice
``sub``, then the notion of positivity will depend on the
choice of lift!
EXAMPLES::
sage: N = ToricLattice(3)
sage: Ns = N.submodule([N(2,4,0), N(9,12,0)])
sage: Q = N/Ns
sage: Q
Quotient with torsion of 3-d lattice N
by Sublattice <N(1, 8, 0), N(0, 12, 0)>
Attempting to quotient one lattice by a sublattice of another
will result in a ``ValueError``::
sage: N = ToricLattice(3)
sage: M = ToricLattice(3, name='M')
sage: Ms = M.submodule([M(2,4,0), M(9,12,0)])
sage: N.quotient(Ms)
Traceback (most recent call last):
...
ValueError: M(1, 8, 0) can not generate a sublattice of
3-d lattice N
However, if we forget the sublattice structure, then it is
possible to quotient by vector spaces or modules constructed
from any sublattice::
sage: N = ToricLattice(3)
sage: M = ToricLattice(3, name='M')
sage: Ms = M.submodule([M(2,4,0), M(9,12,0)])
sage: N.quotient(Ms.vector_space())
Quotient with torsion of 3-d lattice N by Sublattice
<N(1, 8, 0), N(0, 12, 0)>
sage: N.quotient(Ms.sparse_module())
Quotient with torsion of 3-d lattice N by Sublattice
<N(1, 8, 0), N(0, 12, 0)>
See :class:`ToricLattice_quotient` for more examples.
TESTS:
We check that :trac:`19603` is fixed::
sage: K = Cone([(1,0,0),(0,1,0)])
sage: K.lattice()
3-d lattice N
sage: K.orthogonal_sublattice()
Sublattice <M(0, 0, 1)>
sage: K.lattice().quotient(K.orthogonal_sublattice())
Traceback (most recent call last):
...
ValueError: M(0, 0, 1) can not generate a sublattice of
3-d lattice N
We can quotient by the trivial sublattice::
sage: N = ToricLattice(3)
sage: N.quotient(N.zero_submodule())
3-d lattice, quotient of 3-d lattice N by Sublattice <>
We can quotient a lattice by itself::
sage: N = ToricLattice(3)
sage: N.quotient(N)
0-d lattice, quotient of 3-d lattice N by Sublattice
<N(1, 0, 0), N(0, 1, 0), N(0, 0, 1)>
"""
return ToricLattice_quotient(self, sub, check,
positive_point, positive_dual_point)
def saturation(self):
r"""
Return the saturation of ``self``.
OUTPUT:
- a :class:`toric lattice <ToricLatticeFactory>`.
EXAMPLES::
sage: N = ToricLattice(3)
sage: Ns = N.submodule([(1,2,3), (4,5,6)])
sage: Ns
Sublattice <N(1, 2, 3), N(0, 3, 6)>
sage: Ns_sat = Ns.saturation()
sage: Ns_sat
Sublattice <N(1, 0, -1), N(0, 1, 2)>
sage: Ns_sat is Ns_sat.saturation()
True
"""
S = super(ToricLattice_generic, self).saturation()
return S if is_ToricLattice(S) else self.ambient_module().submodule(S)
def span(self, gens, base_ring=ZZ, *args, **kwds):
"""
Return the span of the given generators.
INPUT:
- ``gens`` -- list of elements of the ambient vector space of
``self``.
- ``base_ring`` -- (default: `\ZZ`) base ring for the generated module.
OUTPUT:
- submodule spanned by ``gens``.
.. NOTE::
The output need not be a submodule of ``self``, nor even of the
ambient space. It must, however, be contained in the ambient
vector space.
See also :meth:`span_of_basis`,
:meth:`~sage.modules.free_module.FreeModule_generic_pid.submodule`,
and
:meth:`~sage.modules.free_module.FreeModule_generic_pid.submodule_with_basis`,
EXAMPLES::
sage: N = ToricLattice(3)
sage: Ns = N.submodule([N.gen(0)])
sage: Ns.span([N.gen(1)])
Sublattice <N(0, 1, 0)>
sage: Ns.submodule([N.gen(1)])
Traceback (most recent call last):
...
ArithmeticError: Argument gens (= [N(0, 1, 0)])
does not generate a submodule of self.
"""
A = self.ambient_module()
if base_ring is ZZ and all(g in A for g in gens):
return ToricLattice_sublattice(A, gens)
for g in gens:
if is_ToricLatticeElement(g) and g not in A:
raise ValueError("%s can not generate a sublattice of %s"
% (g, A))
else:
return super(ToricLattice_generic, self).span(gens, base_ring,
*args, **kwds)
def span_of_basis(self, basis, base_ring=ZZ, *args, **kwds):
r"""
Return the submodule with the given ``basis``.
INPUT:
- ``basis`` -- list of elements of the ambient vector space of
``self``.
- ``base_ring`` -- (default: `\ZZ`) base ring for the generated module.
OUTPUT:
- submodule spanned by ``basis``.
.. NOTE::
The output need not be a submodule of ``self``, nor even of the
ambient space. It must, however, be contained in the ambient
vector space.
See also :meth:`span`,
:meth:`~sage.modules.free_module.FreeModule_generic_pid.submodule`,
and
:meth:`~sage.modules.free_module.FreeModule_generic_pid.submodule_with_basis`,
EXAMPLES::
sage: N = ToricLattice(3)
sage: Ns = N.span_of_basis([(1,2,3)])
sage: Ns.span_of_basis([(2,4,0)])
Sublattice <N(2, 4, 0)>
sage: Ns.span_of_basis([(1/5,2/5,0), (1/7,1/7,0)])
Free module of degree 3 and rank 2 over Integer Ring
User basis matrix:
[1/5 2/5 0]
[1/7 1/7 0]
Of course the input basis vectors must be linearly independent::
sage: Ns.span_of_basis([(1,2,0), (2,4,0)])
Traceback (most recent call last):
...
ValueError: The given basis vectors must be linearly independent.
"""
A = self.ambient_module()
if base_ring is ZZ and all(g in A for g in basis):
return ToricLattice_sublattice_with_basis(A, basis)
for g in basis:
if is_ToricLatticeElement(g) and g not in A:
raise ValueError("%s can not generate a sublattice of %s"
% (g, A))
else:
return super(ToricLattice_generic, self).span_of_basis(
basis, base_ring, *args, **kwds)
class ToricLattice_ambient(ToricLattice_generic, FreeModule_ambient_pid):
r"""
Create a toric lattice.
See :class:`ToricLattice <ToricLatticeFactory>` for documentation.
.. WARNING::
There should be only one toric lattice with the given rank and
associated names. Using this class directly to create toric lattices
may lead to unexpected results. Please, use :class:`ToricLattice
<ToricLatticeFactory>` to create toric lattices.
TESTS::
sage: N = ToricLattice(3, "N", "M", "N", "M")
sage: N
3-d lattice N
sage: TestSuite(N).run()
"""
Element = ToricLatticeElement
def __init__(self, rank, name, dual_name, latex_name, latex_dual_name):
r"""
See :class:`ToricLattice <ToricLatticeFactory>` for documentation.
TESTS::
sage: ToricLattice(3, "N", "M", "N", "M")
3-d lattice N
"""
super(ToricLattice_ambient, self).__init__(ZZ, rank)
self._name = name
self._dual_name = dual_name
self._latex_name = latex_name
self._latex_dual_name = latex_dual_name
def __cmp__(self, right):
r"""
Compare ``self`` and ``right``.
INPUT:
- ``right`` -- anything.
OUTPUT:
- 0 if ``right`` is a toric lattice of the same dimension as ``self``
and their associated names are the same, 1 or -1 otherwise.
TESTS::
sage: N3 = ToricLattice(3)
sage: N4 = ToricLattice(4)
sage: M3 = N3.dual()
sage: cmp(N3, N4)
-1
sage: cmp(N3, M3)
1
sage: abs( cmp(N3, 3) )
1
sage: cmp(N3, ToricLattice(3))
0
"""
if self is right:
return 0
c = cmp(type(self), type(right))
if c:
return c
c = cmp(self.rank(), right.rank())
if c:
return c
# If lattices are the same as ZZ-modules, compare associated names
return cmp([self._name, self._dual_name,
self._latex_name, self._latex_dual_name],
[right._name, right._dual_name,
right._latex_name, right._latex_dual_name])
def _latex_(self):
r"""
Return a LaTeX representation of ``self``.
OUTPUT:
- string.
TESTS::
sage: L = ToricLattice(3, "L")
sage: L.dual()._latex_()
'L^*'
"""
return self._latex_name
def _repr_(self):
r"""
Return a string representation of ``self``.
OUTPUT:
- string.
TESTS::
sage: L = ToricLattice(3, "L")
sage: L.dual()._repr_()
'3-d lattice L*'
"""
return "%d-d lattice %s" % (self.dimension(), self._name)
def ambient_module(self):
r"""
Return the ambient module of ``self``.
OUTPUT:
- :class:`toric lattice <ToricLatticeFactory>`.
.. NOTE::
For any ambient toric lattice its ambient module is the lattice
itself.
EXAMPLES::
sage: N = ToricLattice(3)
sage: N.ambient_module()
3-d lattice N
sage: N.ambient_module() is N
True
"""
return self
def dual(self):
r"""
Return the lattice dual to ``self``.
OUTPUT:
- :class:`toric lattice <ToricLatticeFactory>`.
EXAMPLES::
sage: N = ToricLattice(3)
sage: N
3-d lattice N
sage: M = N.dual()
sage: M
3-d lattice M
sage: M.dual() is N
True
Elements of dual lattices can act on each other::
sage: n = N(1,2,3)
sage: m = M(4,5,6)
sage: n * m
32
sage: m * n
32
"""
if "_dual" not in self.__dict__:
self._dual = ToricLattice(self.rank(), self._dual_name,
self._name, self._latex_dual_name, self._latex_name)
return self._dual
def plot(self, **options):
r"""
Plot ``self``.
INPUT:
- any options for toric plots (see :func:`toric_plotter.options
<sage.geometry.toric_plotter.options>`), none are mandatory.
OUTPUT:
- a plot.
EXAMPLES::
sage: N = ToricLattice(3)
sage: N.plot()
Graphics3d Object
"""
if "show_lattice" not in options:
# Unless user made an explicit decision, we assume that lattice
# should be visible no matter what is the size of the bounding box.
options["show_lattice"] = True
tp = ToricPlotter(options, self.degree())
tp.adjust_options()
return tp.plot_lattice()
class ToricLattice_sublattice_with_basis(ToricLattice_generic,
FreeModule_submodule_with_basis_pid):
r"""
Construct the sublattice of ``ambient`` toric lattice with given ``basis``.
INPUT (same as for
:class:`~sage.modules.free_module.FreeModule_submodule_with_basis_pid`):
- ``ambient`` -- ambient :class:`toric lattice <ToricLatticeFactory>` for
this sublattice;
- ``basis`` -- list of linearly independent elements of ``ambient``, these
elements will be used as the default basis of the constructed
sublattice;
- see the base class for other available options.
OUTPUT:
- sublattice of a toric lattice with a user-specified basis.
See also :class:`ToricLattice_sublattice` if you do not want to specify an
explicit basis.
EXAMPLES:
The intended way to get objects of this class is to use
:meth:`submodule_with_basis` method of toric lattices::
sage: N = ToricLattice(3)
sage: sublattice = N.submodule_with_basis([(1,1,0), (3,2,1)])
sage: sublattice.has_user_basis()
True
sage: sublattice.basis()
[
N(1, 1, 0),
N(3, 2, 1)
]
Even if you have provided your own basis, you still can access the
"standard" one::
sage: sublattice.echelonized_basis()
[
N(1, 0, 1),
N(0, 1, -1)
]
"""
def _repr_(self):
r"""
Return a string representation of ``self``.
OUTPUT:
- string.
TESTS::
sage: L = ToricLattice(3, "L")
sage: L.submodule_with_basis([(3,2,1),(1,2,3)])
Sublattice <L(3, 2, 1), L(1, 2, 3)>
sage: print L.submodule([(3,2,1),(1,2,3)])._repr_()
Sublattice <L(1, 2, 3), L(0, 4, 8)>
"""
s = 'Sublattice '
s += '<'
s += ', '.join(map(str,self.basis()))
s += '>'
return s
def _latex_(self):
r"""
Return a LaTeX representation of ``self``.
OUTPUT:
- string.
TESTS::
sage: L = ToricLattice(3, "L")
sage: L.submodule_with_basis([(3,2,1),(1,2,3)])._latex_()
'\\left\\langle\\left(3,\\,2,\\,1\\right)_{L},
\\left(1,\\,2,\\,3\\right)_{L}\\right\\rangle'
sage: L.submodule([(3,2,1),(1,2,3)])._latex_()
'\\left\\langle\\left(1,\\,2,\\,3\\right)_{L},
\\left(0,\\,4,\\,8\\right)_{L}\\right\\rangle'
"""
s = '\\left\\langle'
s += ', '.join([ b._latex_() for b in self.basis() ])
s += '\\right\\rangle'
return s
def dual(self):
r"""
Return the lattice dual to ``self``.
OUTPUT:
- a :class:`toric lattice quotient <ToricLattice_quotient>`.
EXAMPLES::
sage: N = ToricLattice(3)
sage: Ns = N.submodule([(1,1,0), (3,2,1)])
sage: Ns.dual()
2-d lattice, quotient of 3-d lattice M by Sublattice <M(1, -1, -1)>
"""
if "_dual" not in self.__dict__:
if not self is self.saturation():
raise ValueError("only dual lattices of saturated sublattices "
"can be constructed! Got %s." % self)
self._dual = (self.ambient_module().dual() /
self.basis_matrix().transpose().integer_kernel())
self._dual._dual = self
return self._dual
def plot(self, **options):
r"""
Plot ``self``.
INPUT:
- any options for toric plots (see :func:`toric_plotter.options
<sage.geometry.toric_plotter.options>`), none are mandatory.
OUTPUT:
- a plot.
EXAMPLES::
sage: N = ToricLattice(3)
sage: sublattice = N.submodule_with_basis([(1,1,0), (3,2,1)])
sage: sublattice.plot()
Graphics3d Object
Now we plot both the ambient lattice and its sublattice::
sage: N.plot() + sublattice.plot(point_color="red")
Graphics3d Object
"""
if "show_lattice" not in options:
# Unless user made an explicit decision, we assume that lattice
# should be visible no matter what is the size of the bounding box.
options["show_lattice"] = True
if "lattice_filter" in options:
old = options["lattice_filter"]
options["lattice_filter"] = lambda pt: pt in self and old(pt)
else:
options["lattice_filter"] = lambda pt: pt in self
tp = ToricPlotter(options, self.degree())
tp.adjust_options()
return tp.plot_lattice()
class ToricLattice_sublattice(ToricLattice_sublattice_with_basis,
FreeModule_submodule_pid):
r"""
Construct the sublattice of ``ambient`` toric lattice generated by ``gens``.
INPUT (same as for
:class:`~sage.modules.free_module.FreeModule_submodule_pid`):
- ``ambient`` -- ambient :class:`toric lattice <ToricLatticeFactory>` for
this sublattice;
- ``gens`` -- list of elements of ``ambient`` generating the constructed
sublattice;
- see the base class for other available options.
OUTPUT:
- sublattice of a toric lattice with an automatically chosen basis.
See also :class:`ToricLattice_sublattice_with_basis` if you want to
specify an explicit basis.
EXAMPLES:
The intended way to get objects of this class is to use
:meth:`submodule` method of toric lattices::
sage: N = ToricLattice(3)
sage: sublattice = N.submodule([(1,1,0), (3,2,1)])
sage: sublattice.has_user_basis()
False
sage: sublattice.basis()
[
N(1, 0, 1),
N(0, 1, -1)
]
For sublattices without user-specified basis, the basis obtained above is
the same as the "standard" one::
sage: sublattice.echelonized_basis()
[
N(1, 0, 1),
N(0, 1, -1)
]
"""
pass
class ToricLattice_quotient_element(FGP_Element):
r"""
Create an element of a toric lattice quotient.
.. WARNING::
You probably should not construct such elements explicitly.
INPUT:
- same as for :class:`~sage.modules.fg_pid.fgp_element.FGP_Element`.
OUTPUT:
- element of a toric lattice quotient.
TESTS::
sage: N = ToricLattice(3)
sage: sublattice = N.submodule([(1,1,0), (3,2,1)])
sage: Q = N/sublattice
sage: e = Q(1,2,3)
sage: e
N[1, 2, 3]
sage: e2 = Q(N(2,3,3))
sage: e2
N[2, 3, 3]
sage: e == e2
True
sage: e.vector()
(4)
sage: e2.vector()
(4)
"""
def _latex_(self):
r"""
Return a LaTeX representation of ``self``.
OUTPUT:
- string.
TESTS::
sage: N = ToricLattice(3)
sage: Ns = N.submodule([N(2,4,0), N(9,12,0)])
sage: Q = N/Ns
sage: print Q.gen(0)._latex_()
\left[0,\,1,\,0\right]_{N}
"""
return latex(self.lift()).replace("(", "[", 1).replace(")", "]", 1)
def _repr_(self):
r"""
Return a string representation of ``self``.
OUTPUT:
- string.
TESTS::
sage: N = ToricLattice(3)
sage: Ns = N.submodule([N(2,4,0), N(9,12,0)])
sage: Q = N/Ns
sage: print Q.gen(0)._repr_()
N[0, 1, 0]
"""
return str(self.lift()).replace("(", "[", 1).replace(")", "]", 1)
def set_immutable(self):
r"""
Make ``self`` immutable.
OUTPUT:
- none.
.. note:: Elements of toric lattice quotients are always immutable, so
this method does nothing, it is introduced for compatibility
purposes only.
EXAMPLES::
sage: N = ToricLattice(3)
sage: Ns = N.submodule([N(2,4,0), N(9,12,0)])
sage: Q = N/Ns
sage: Q.0.set_immutable()
"""
pass
class ToricLattice_quotient(FGP_Module_class):
r"""
Construct the quotient of a toric lattice ``V`` by its sublattice ``W``.
INPUT:
- ``V`` -- ambient toric lattice;
- ``W`` -- sublattice of ``V``;
- ``check`` -- (default: ``True``) whether to check correctness of input
or not.
If the quotient is one-dimensional and torsion free, the following
two mutually exclusive keyword arguments are also allowed. They
decide the sign choice for the (single) generator of the quotient
lattice:
- ``positive_point`` -- a lattice point of ``self`` not in the
sublattice ``sub`` (that is, not zero in the quotient
lattice). The quotient generator will be in the same direction
as ``positive_point``.
- ``positive_dual_point`` -- a dual lattice point. The quotient
generator will be chosen such that its lift has a positive
product with ``positive_dual_point``. Note: if
``positive_dual_point`` is not zero on the sublattice ``sub``,
then the notion of positivity will depend on the choice of lift!
OUTPUT:
- quotient of ``V`` by ``W``.
EXAMPLES:
The intended way to get objects of this class is to use
:meth:`quotient` method of toric lattices::
sage: N = ToricLattice(3)
sage: sublattice = N.submodule([(1,1,0), (3,2,1)])
sage: Q = N/sublattice
sage: Q
1-d lattice, quotient of 3-d lattice N by Sublattice <N(1, 0, 1), N(0, 1, -1)>
sage: Q.gens()
(N[0, 0, 1],)
Here, ``sublattice`` happens to be of codimension one in ``N``. If
you want to prescribe the sign of the quotient generator, you can
do either::
sage: Q = N.quotient(sublattice, positive_point=N(0,0,-1)); Q
1-d lattice, quotient of 3-d lattice N by Sublattice <N(1, 0, 1), N(0, 1, -1)>
sage: Q.gens()
(N[0, 0, -1],)
or::
sage: M = N.dual()
sage: Q = N.quotient(sublattice, positive_dual_point=M(0,0,-1)); Q
1-d lattice, quotient of 3-d lattice N by Sublattice <N(1, 0, 1), N(0, 1, -1)>
sage: Q.gens()
(N[0, 0, -1],)
TESTS::
sage: loads(dumps(Q)) == Q
True
sage: loads(dumps(Q)).gens() == Q.gens()
True
"""
def __init__(self, V, W, check=True, positive_point=None, positive_dual_point=None):
r"""
The constructor
See :class:`ToricLattice_quotient` for an explanation of the arguments.
EXAMPLES::
sage: N = ToricLattice(3)
sage: from sage.geometry.toric_lattice import ToricLattice_quotient
sage: ToricLattice_quotient(N, N.span([N(1,2,3)]))
2-d lattice, quotient of 3-d lattice N by Sublattice <N(1, 2, 3)>
An ``ArithmeticError`` will be raised if ``W`` is not a
sublattice of ``V``::
sage: N = ToricLattice(3)
sage: Ns = N.submodule([N.gen(0)])
sage: Ns
Sublattice <N(1, 0, 0)>
sage: Ns.span([N.gen(1)])
Sublattice <N(0, 1, 0)>
sage: Ns.quotient(Ns.span([N.gen(1)]))
Traceback (most recent call last):
...
ArithmeticError: W must be a sublattice of V
"""
if check:
try:
W = V.submodule(W)
except (TypeError, ArithmeticError):
raise ArithmeticError("W must be a sublattice of V")
super(ToricLattice_quotient, self).__init__(V, W, check)
if (positive_point, positive_dual_point) == (None, None):
self._flip_sign_of_generator = False
return
self._flip_sign_of_generator = False
assert self.is_torsion_free() and self.ngens()==1, \
'You may only specify a positive direction in the codimension one case.'
quotient_generator = self.gen(0)
lattice = self.V().ambient_module()
if (positive_point is not None) and (positive_dual_point is None):
assert positive_point in lattice, 'positive_point must be a lattice point.'
point_quotient = self(positive_point)
scalar_product = quotient_generator.vector()[0] * point_quotient.vector()[0]
if scalar_product==0:
raise ValueError(str(positive_point)+' is zero in the quotient.')
elif (positive_point is None) and (positive_dual_point is not None):
assert positive_dual_point in lattice.dual(), 'positive_dual_point must be a dual lattice point.'
scalar_product = quotient_generator.lift() * positive_dual_point
if scalar_product==0:
raise ValueError(str(positive_dual_point)+' is zero on the lift of the quotient generator.')
else:
raise ValueError('You may not specify both positive_point and positive_dual_point.')
self._flip_sign_of_generator = (scalar_product<0)
def gens(self):
"""
Return the generators of the quotient.
OUTPUT:
A tuple of :class:`ToricLattice_quotient_element` generating
the quotient.
EXAMPLES::
sage: N = ToricLattice(3)
sage: Q = N.quotient(N.span([N(1,2,3), N(0,2,1)]), positive_point=N(0,-1,0))
sage: Q.gens()
(N[0, -1, 0],)
"""
gens = self.smith_form_gens()
if self._flip_sign_of_generator:
assert len(gens)==1
return (-gens[0],)
else:
return gens
# Should be overridden in derived classes.
Element = ToricLattice_quotient_element
def _element_constructor_(self, *x, **kwds):
r"""
Construct an element of ``self``.
INPUT:
- element of a compatible toric object (lattice, sublattice, quotient)
or something that defines such an element (list, generic vector,
etc.).
OUTPUT:
- :class:`toric lattice quotient element
<ToricLattice_quotient_element>`.
EXAMPLES::
sage: N = ToricLattice(3)
sage: Ns = N.submodule([N(2,4,0), N(9,12,0)])
sage: Q = N/Ns
sage: x = Q(1,2,3) # indirect doctest
sage: x
N[1, 2, 3]
sage: type(x)
<class 'sage.geometry.toric_lattice.ToricLattice_quotient_with_category.element_class'>
sage: x is Q(x)
True
sage: x.parent() is Q
True
sage: x == Q(N(1,2,3))
True
sage: y = Q(3,6,3)
sage: y
N[3, 6, 3]
sage: x == y
True
"""
if len(x) == 1 and (x[0] not in ZZ or x[0] == 0):
x = x[0]
if parent(x) is self:
return x
try:
x = x.lift()
except AttributeError:
pass
try:
return self.element_class(self, self._V(x), **kwds)
except TypeError:
return self.linear_combination_of_smith_form_gens(x)
def _latex_(self):
r"""
Return a LaTeX representation of ``self``.
OUTPUT:
- string.
TESTS::
sage: N = ToricLattice(3)
sage: Ns = N.submodule([N(2,4,0), N(9,12,0)])
sage: Q = N/Ns
sage: print Q._latex_()
N / \left\langle\left(1,\,8,\,0\right)_{N}, \left(0,\,12,\,0\right)_{N}\right\rangle
sage: Ns = N.submodule([N(1,4,0)])
sage: Q = N/Ns
sage: print Q._latex_()
N / \left\langle\left(1,\,4,\,0\right)_{N}\right\rangle
"""
return "%s / %s" % (latex(self.V()), latex(self.W()))
def _repr_(self):
r"""
Return a string representation of ``self``.
OUTPUT:
- string.
TESTS::
sage: N = ToricLattice(3)
sage: Ns = N.submodule([N(2,4,0), N(9,12,0)])
sage: Q = N/Ns
sage: print Q._repr_()
Quotient with torsion of 3-d lattice N
by Sublattice <N(1, 8, 0), N(0, 12, 0)>
sage: Ns = N.submodule([N(1,4,0)])
sage: Q = N/Ns
sage: print Q._repr_()
2-d lattice, quotient of 3-d lattice N
by Sublattice <N(1, 4, 0)>
"""
if self.is_torsion_free():
return "%d-d lattice, quotient of %s by %s" % (self.rank(),
self.V(), self.W())
else:
return "Quotient with torsion of %s by %s" % (self.V(), self.W())
def _module_constructor(self, V, W, check=True):
r"""
Construct new quotient modules.
INPUT:
- ``V`` -- ambient toric lattice;
- ``W`` -- sublattice of ``V``;
- ``check`` -- (default: ``True``) whether to check
correctness of input or not.
TESTS::
sage: N = ToricLattice(3)
sage: Ns = N.submodule([N(2,4,0), N(9,12,0)])
sage: Q = N/Ns; Q
Quotient with torsion of 3-d lattice N by Sublattice <N(1, 8, 0), N(0, 12, 0)>
sage: Q._module_constructor(N,Ns)
Quotient with torsion of 3-d lattice N by Sublattice <N(1, 8, 0), N(0, 12, 0)>
"""
return ToricLattice_quotient(V,W,check)
def base_extend(self, R):
"""
Return the base change of ``self`` to the ring ``R``.
INPUT:
- ``R`` -- either `\ZZ` or `\QQ`.
OUTPUT:
- ``self`` if `R=\ZZ`, quotient of the base extension of the ambient
lattice by the base extension of the sublattice if `R=\QQ`.
EXAMPLES::
sage: N = ToricLattice(3)
sage: Ns = N.submodule([N(2,4,0), N(9,12,0)])
sage: Q = N/Ns
sage: Q.base_extend(ZZ) is Q
True
sage: Q.base_extend(QQ)
Vector space quotient V/W of dimension 1 over Rational Field where
V: Vector space of dimension 3 over Rational Field
W: Vector space of degree 3 and dimension 2 over Rational Field
Basis matrix:
[1 0 0]
[0 1 0]
"""
if R is ZZ:
return self
if R is QQ:
return self.V().base_extend(R) / self.W().base_extend(R)
raise NotImplementedError("quotients of toric lattices can only be "
"extended to ZZ or QQ, not %s!" % R)
def is_torsion_free(self):
r"""
Check if ``self`` is torsion-free.
OUTPUT:
- ``True`` is ``self`` has no torsion and ``False`` otherwise.
EXAMPLES::
sage: N = ToricLattice(3)
sage: Ns = N.submodule([N(2,4,0), N(9,12,0)])
sage: Q = N/Ns
sage: Q.is_torsion_free()
False
sage: Ns = N.submodule([N(1,4,0)])
sage: Q = N/Ns
sage: Q.is_torsion_free()
True
"""
return sum(self.invariants()) == 0
def dual(self):
r"""
Return the lattice dual to ``self``.
OUTPUT:
- a :class:`toric lattice quotient <ToricLattice_quotient>`.
EXAMPLES::
sage: N = ToricLattice(3)
sage: Ns = N.submodule([(1, -1, -1)])
sage: Q = N / Ns
sage: Q.dual()
Sublattice <M(1, 0, 1), M(0, 1, -1)>
"""
if "_dual" not in self.__dict__:
self._dual = self.V().dual().submodule(
self.W().basis_matrix().transpose().integer_kernel().gens())
self._dual._dual = self
return self._dual
def rank(self):
r"""
Return the rank of ``self``.
OUTPUT:
Integer. The dimension of the free part of the quotient.
EXAMPLES::
sage: N = ToricLattice(3)
sage: Ns = N.submodule([N(2,4,0), N(9,12,0)])
sage: Q = N/Ns
sage: Q.ngens()
2
sage: Q.rank()
1
sage: Ns = N.submodule([N(1,4,0)])
sage: Q = N/Ns
sage: Q.ngens()
2
sage: Q.rank()
2
"""
return self.V().rank() - self.W().rank()
dimension = rank
def coordinate_vector(self, x, reduce=False):
"""
Return coordinates of x with respect to the optimized
representation of self.
INPUT:
- ``x`` -- element of ``self`` or convertable to ``self``.
- ``reduce`` -- (default: False); if True, reduce coefficients
modulo invariants.
OUTPUT:
The coordinates as a vector.
EXAMPLES::
sage: N = ToricLattice(3)
sage: Q = N.quotient(N.span([N(1,2,3), N(0,2,1)]), positive_point=N(0,-1,0))
sage: q = Q.gen(0); q
N[0, -1, 0]
sage: q.vector() # indirect test
(1)
sage: Q.coordinate_vector(q)
(1)
"""
coordinates = super(ToricLattice_quotient, self).coordinate_vector(x,reduce)
if self._flip_sign_of_generator:
assert len(coordinates)==1, "Sign flipped for a multi-dimensional quotient!"
return -coordinates
else:
return coordinates
| 30.026464 | 109 | 0.545052 |
2c00064119768675ea707da0bdd0be6149e957a1 | 281 | py | Python | idea/admin.py | andreyrobota/AndreyKosinskiy | 6258c4e90de791f721093545ec3cd9a9569155f2 | [
"MIT"
] | null | null | null | idea/admin.py | andreyrobota/AndreyKosinskiy | 6258c4e90de791f721093545ec3cd9a9569155f2 | [
"MIT"
] | null | null | null | idea/admin.py | andreyrobota/AndreyKosinskiy | 6258c4e90de791f721093545ec3cd9a9569155f2 | [
"MIT"
] | null | null | null | from django.contrib import admin
from idea.models import IdeaUser
# Register your models here.
class IdeaAdmin(admin.ModelAdmin):
fields=['idea_name','idea_text','idea_rank',]#
list_display = ("idea_name","idea_text","idea_rank")
admin.site.register(IdeaUser,IdeaAdmin)
| 25.545455 | 56 | 0.758007 |
deb4fcad32d5b76666d88743a107d115e6a5557c | 31,880 | py | Python | testing/path/test_local.py | mathstuf/pytest-py | a1ce2aa461ce83e03fa99a69b0d93a6594458726 | [
"MIT"
] | null | null | null | testing/path/test_local.py | mathstuf/pytest-py | a1ce2aa461ce83e03fa99a69b0d93a6594458726 | [
"MIT"
] | null | null | null | testing/path/test_local.py | mathstuf/pytest-py | a1ce2aa461ce83e03fa99a69b0d93a6594458726 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import with_statement
import time
import py
import pytest
import os, sys
from py.path import local
import common
failsonjython = py.test.mark.xfail("sys.platform.startswith('java')")
failsonjywin32 = py.test.mark.xfail("sys.platform.startswith('java') "
"and getattr(os, '_name', None) == 'nt'")
win32only = py.test.mark.skipif(
"not (sys.platform == 'win32' or getattr(os, '_name', None) == 'nt')")
skiponwin32 = py.test.mark.skipif(
"sys.platform == 'win32' or getattr(os, '_name', None) == 'nt'")
ATIME_RESOLUTION = 0.01
def pytest_funcarg__path1(request):
def setup():
path1 = request.getfuncargvalue("tmpdir")
common.setuptestfs(path1)
return path1
def teardown(path1):
# post check
assert path1.join("samplefile").check()
return request.cached_setup(setup, teardown, scope="session")
def pytest_funcarg__fake_fspath_obj(request):
class FakeFSPathClass(object):
def __init__(self, path):
self._path = path
def __fspath__(self):
return self._path
return FakeFSPathClass(os.path.join("this", "is", "a", "fake", "path"))
class TestLocalPath(common.CommonFSTests):
def test_join_normpath(self, tmpdir):
assert tmpdir.join(".") == tmpdir
p = tmpdir.join("../%s" % tmpdir.basename)
assert p == tmpdir
p = tmpdir.join("..//%s/" % tmpdir.basename)
assert p == tmpdir
@skiponwin32
def test_dirpath_abs_no_abs(self, tmpdir):
p = tmpdir.join('foo')
assert p.dirpath('/bar') == tmpdir.join('bar')
assert tmpdir.dirpath('/bar', abs=True) == py.path.local('/bar')
def test_gethash(self, tmpdir):
md5 = py.builtin._tryimport('md5', 'hashlib').md5
lib = py.builtin._tryimport('sha', 'hashlib')
sha = getattr(lib, 'sha1', getattr(lib, 'sha', None))
fn = tmpdir.join("testhashfile")
data = 'hello'.encode('ascii')
fn.write(data, mode="wb")
assert fn.computehash("md5") == md5(data).hexdigest()
assert fn.computehash("sha1") == sha(data).hexdigest()
py.test.raises(ValueError, fn.computehash, "asdasd")
def test_remove_removes_readonly_file(self, tmpdir):
readonly_file = tmpdir.join('readonly').ensure()
readonly_file.chmod(0)
readonly_file.remove()
assert not readonly_file.check(exists=1)
def test_remove_removes_readonly_dir(self, tmpdir):
readonly_dir = tmpdir.join('readonlydir').ensure(dir=1)
readonly_dir.chmod(int("500", 8))
readonly_dir.remove()
assert not readonly_dir.check(exists=1)
def test_remove_removes_dir_and_readonly_file(self, tmpdir):
readonly_dir = tmpdir.join('readonlydir').ensure(dir=1)
readonly_file = readonly_dir.join('readonlyfile').ensure()
readonly_file.chmod(0)
readonly_dir.remove()
assert not readonly_dir.check(exists=1)
def test_remove_routes_ignore_errors(self, tmpdir, monkeypatch):
l = []
monkeypatch.setattr(py.std.shutil, 'rmtree',
lambda *args, **kwargs: l.append(kwargs))
tmpdir.remove()
assert not l[0]['ignore_errors']
for val in (True, False):
l[:] = []
tmpdir.remove(ignore_errors=val)
assert l[0]['ignore_errors'] == val
def test_initialize_curdir(self):
assert str(local()) == py.std.os.getcwd()
@skiponwin32
def test_chdir_gone(self, path1):
p = path1.ensure("dir_to_be_removed", dir=1)
p.chdir()
p.remove()
pytest.raises(py.error.ENOENT, py.path.local)
assert path1.chdir() is None
assert os.getcwd() == str(path1)
def test_as_cwd(self, path1):
dir = path1.ensure("subdir", dir=1)
old = py.path.local()
with dir.as_cwd() as x:
assert x == old
assert py.path.local() == dir
assert os.getcwd() == str(old)
def test_as_cwd_exception(self, path1):
old = py.path.local()
dir = path1.ensure("subdir", dir=1)
with pytest.raises(ValueError):
with dir.as_cwd():
raise ValueError()
assert old == py.path.local()
def test_initialize_reldir(self, path1):
with path1.as_cwd():
p = local('samplefile')
assert p.check()
@pytest.mark.xfail("sys.version_info < (2,6) and sys.platform == 'win32'")
def test_tilde_expansion(self, monkeypatch, tmpdir):
monkeypatch.setenv("HOME", str(tmpdir))
p = py.path.local("~", expanduser=True)
assert p == os.path.expanduser("~")
def test_eq_with_strings(self, path1):
path1 = path1.join('sampledir')
path2 = str(path1)
assert path1 == path2
assert path2 == path1
path3 = path1.join('samplefile')
assert path3 != path2
assert path2 != path3
def test_eq_with_none(self, path1):
assert path1 != None
def test_eq_non_ascii_unicode(self, path1):
path2 = path1.join(u'temp')
path3 = path1.join(u'ação')
path4 = path1.join(u'ディレクトリ')
assert path2 != path3
assert path2 != path4
assert path4 != path3
def test_gt_with_strings(self, path1):
path2 = path1.join('sampledir')
path3 = str(path1.join("ttt"))
assert path3 > path2
assert path2 < path3
assert path2 < "ttt"
assert "ttt" > path2
path4 = path1.join("aaa")
l = [path2, path4,path3]
assert sorted(l) == [path4, path2, path3]
def test_open_and_ensure(self, path1):
p = path1.join("sub1", "sub2", "file")
with p.open("w", ensure=1) as f:
f.write("hello")
assert p.read() == "hello"
def test_write_and_ensure(self, path1):
p = path1.join("sub1", "sub2", "file")
p.write("hello", ensure=1)
assert p.read() == "hello"
@py.test.mark.multi(bin=(False, True))
def test_dump(self, tmpdir, bin):
path = tmpdir.join("dumpfile%s" % int(bin))
try:
d = {'answer' : 42}
path.dump(d, bin=bin)
f = path.open('rb+')
dnew = py.std.pickle.load(f)
assert d == dnew
finally:
f.close()
@failsonjywin32
def test_setmtime(self):
import tempfile
import time
try:
fd, name = tempfile.mkstemp()
py.std.os.close(fd)
except AttributeError:
name = tempfile.mktemp()
open(name, 'w').close()
try:
mtime = int(time.time())-100
path = local(name)
assert path.mtime() != mtime
path.setmtime(mtime)
assert path.mtime() == mtime
path.setmtime()
assert path.mtime() != mtime
finally:
py.std.os.remove(name)
def test_normpath(self, path1):
new1 = path1.join("/otherdir")
new2 = path1.join("otherdir")
assert str(new1) == str(new2)
def test_mkdtemp_creation(self):
d = local.mkdtemp()
try:
assert d.check(dir=1)
finally:
d.remove(rec=1)
def test_tmproot(self):
d = local.mkdtemp()
tmproot = local.get_temproot()
try:
assert d.check(dir=1)
assert d.dirpath() == tmproot
finally:
d.remove(rec=1)
def test_chdir(self, tmpdir):
old = local()
try:
res = tmpdir.chdir()
assert str(res) == str(old)
assert py.std.os.getcwd() == str(tmpdir)
finally:
old.chdir()
def test_ensure_filepath_withdir(self, tmpdir):
newfile = tmpdir.join('test1','test')
newfile.ensure()
assert newfile.check(file=1)
newfile.write("42")
newfile.ensure()
s = newfile.read()
assert s == "42"
def test_ensure_filepath_withoutdir(self, tmpdir):
newfile = tmpdir.join('test1file')
t = newfile.ensure()
assert t == newfile
assert newfile.check(file=1)
def test_ensure_dirpath(self, tmpdir):
newfile = tmpdir.join('test1','testfile')
t = newfile.ensure(dir=1)
assert t == newfile
assert newfile.check(dir=1)
def test_ensure_non_ascii_unicode(self, tmpdir):
newfile = tmpdir.join(u'ação',u'ディレクトリ')
t = newfile.ensure(dir=1)
assert t == newfile
assert newfile.check(dir=1)
def test_init_from_path(self, tmpdir):
l = local()
l2 = local(l)
assert l2 == l
wc = py.path.svnwc('.')
l3 = local(wc)
assert l3 is not wc
assert l3.strpath == wc.strpath
assert not hasattr(l3, 'commit')
@py.test.mark.xfail(run=False, reason="unreliable est for long filenames")
def test_long_filenames(self, tmpdir):
if sys.platform == "win32":
py.test.skip("win32: work around needed for path length limit")
# see http://codespeak.net/pipermail/py-dev/2008q2/000922.html
# testing paths > 260 chars (which is Windows' limitation, but
# depending on how the paths are used), but > 4096 (which is the
# Linux' limitation) - the behaviour of paths with names > 4096 chars
# is undetermined
newfilename = '/test' * 60
l = tmpdir.join(newfilename)
l.ensure(file=True)
l.write('foo')
l2 = tmpdir.join(newfilename)
assert l2.read() == 'foo'
def test_visit_depth_first(self, tmpdir):
p1 = tmpdir.ensure("a","1")
p2 = tmpdir.ensure("b","2")
p3 = tmpdir.ensure("breadth")
l = list(tmpdir.visit(lambda x: x.check(file=1)))
assert len(l) == 3
# check that breadth comes last
assert l[2] == p3
def test_visit_rec_fnmatch(self, tmpdir):
p1 = tmpdir.ensure("a","123")
p2 = tmpdir.ensure(".b","345")
l = list(tmpdir.visit("???", rec="[!.]*"))
assert len(l) == 1
# check that breadth comes last
assert l[0] == p1
def test_fnmatch_file_abspath(self, tmpdir):
b = tmpdir.join("a", "b")
assert b.fnmatch(os.sep.join("ab"))
pattern = os.sep.join([str(tmpdir), "*", "b"])
assert b.fnmatch(pattern)
def test_sysfind(self):
name = sys.platform == "win32" and "cmd" or "test"
x = py.path.local.sysfind(name)
assert x.check(file=1)
assert py.path.local.sysfind('jaksdkasldqwe') is None
assert py.path.local.sysfind(name, paths=[]) is None
x2 = py.path.local.sysfind(name, paths=[x.dirpath()])
assert x2 == x
def test_fspath_protocol_other_class(self, fake_fspath_obj):
# py.path is always absolute
py_path = py.path.local(fake_fspath_obj)
str_path = fake_fspath_obj.__fspath__()
assert py_path.check(endswith=str_path)
assert py_path.join(fake_fspath_obj).strpath == os.path.join(
py_path.strpath, str_path)
class TestExecutionOnWindows:
pytestmark = win32only
def test_sysfind_bat_exe_before(self, tmpdir, monkeypatch):
monkeypatch.setenv("PATH", str(tmpdir), prepend=os.pathsep)
tmpdir.ensure("hello")
h = tmpdir.ensure("hello.bat")
x = py.path.local.sysfind("hello")
assert x == h
class TestExecution:
pytestmark = skiponwin32
def test_sysfind_no_permisson_ignored(self, monkeypatch, tmpdir):
noperm = tmpdir.ensure('noperm', dir=True)
monkeypatch.setenv("PATH", noperm, prepend=":")
noperm.chmod(0)
assert py.path.local.sysfind('jaksdkasldqwe') is None
def test_sysfind_absolute(self):
x = py.path.local.sysfind('test')
assert x.check(file=1)
y = py.path.local.sysfind(str(x))
assert y.check(file=1)
assert y == x
def test_sysfind_multiple(self, tmpdir, monkeypatch):
monkeypatch.setenv('PATH',
"%s:%s" % (tmpdir.ensure('a'),
tmpdir.join('b')),
prepend=":")
tmpdir.ensure('b', 'a')
checker = lambda x: x.dirpath().basename == 'b'
x = py.path.local.sysfind('a', checker=checker)
assert x.basename == 'a'
assert x.dirpath().basename == 'b'
checker = lambda x: None
assert py.path.local.sysfind('a', checker=checker) is None
def test_sysexec(self):
x = py.path.local.sysfind('ls')
out = x.sysexec('-a')
for x in py.path.local().listdir():
assert out.find(x.basename) != -1
def test_sysexec_failing(self):
x = py.path.local.sysfind('false')
py.test.raises(py.process.cmdexec.Error, """
x.sysexec('aksjdkasjd')
""")
def test_make_numbered_dir(self, tmpdir):
tmpdir.ensure('base.not_an_int', dir=1)
for i in range(10):
numdir = local.make_numbered_dir(prefix='base.', rootdir=tmpdir,
keep=2, lock_timeout=0)
assert numdir.check()
assert numdir.basename == 'base.%d' %i
if i>=1:
assert numdir.new(ext=str(i-1)).check()
if i>=2:
assert numdir.new(ext=str(i-2)).check()
if i>=3:
assert not numdir.new(ext=str(i-3)).check()
def test_make_numbered_dir_NotImplemented_Error(self, tmpdir, monkeypatch):
def notimpl(x, y):
raise NotImplementedError(42)
monkeypatch.setattr(py.std.os, 'symlink', notimpl)
x = tmpdir.make_numbered_dir(rootdir=tmpdir, lock_timeout=0)
assert x.relto(tmpdir)
assert x.check()
def test_locked_make_numbered_dir(self, tmpdir):
for i in range(10):
numdir = local.make_numbered_dir(prefix='base2.', rootdir=tmpdir,
keep=2)
assert numdir.check()
assert numdir.basename == 'base2.%d' %i
for j in range(i):
assert numdir.new(ext=str(j)).check()
def test_error_preservation(self, path1):
py.test.raises (EnvironmentError, path1.join('qwoeqiwe').mtime)
py.test.raises (EnvironmentError, path1.join('qwoeqiwe').read)
#def test_parentdirmatch(self):
# local.parentdirmatch('std', startmodule=__name__)
#
class TestImport:
def test_pyimport(self, path1):
obj = path1.join('execfile.py').pyimport()
assert obj.x == 42
assert obj.__name__ == 'execfile'
def test_pyimport_renamed_dir_creates_mismatch(self, tmpdir):
p = tmpdir.ensure("a", "test_x123.py")
p.pyimport()
tmpdir.join("a").move(tmpdir.join("b"))
pytest.raises(tmpdir.ImportMismatchError,
lambda: tmpdir.join("b", "test_x123.py").pyimport())
def test_pyimport_messy_name(self, tmpdir):
# http://bitbucket.org/hpk42/py-trunk/issue/129
path = tmpdir.ensure('foo__init__.py')
obj = path.pyimport()
def test_pyimport_dir(self, tmpdir):
p = tmpdir.join("hello_123")
p_init = p.ensure("__init__.py")
m = p.pyimport()
assert m.__name__ == "hello_123"
m = p_init.pyimport()
assert m.__name__ == "hello_123"
def test_pyimport_execfile_different_name(self, path1):
obj = path1.join('execfile.py').pyimport(modname="0x.y.z")
assert obj.x == 42
assert obj.__name__ == '0x.y.z'
def test_pyimport_a(self, path1):
otherdir = path1.join('otherdir')
mod = otherdir.join('a.py').pyimport()
assert mod.result == "got it"
assert mod.__name__ == 'otherdir.a'
def test_pyimport_b(self, path1):
otherdir = path1.join('otherdir')
mod = otherdir.join('b.py').pyimport()
assert mod.stuff == "got it"
assert mod.__name__ == 'otherdir.b'
def test_pyimport_c(self, path1):
otherdir = path1.join('otherdir')
mod = otherdir.join('c.py').pyimport()
assert mod.value == "got it"
def test_pyimport_d(self, path1):
otherdir = path1.join('otherdir')
mod = otherdir.join('d.py').pyimport()
assert mod.value2 == "got it"
def test_pyimport_and_import(self, tmpdir):
tmpdir.ensure('xxxpackage', '__init__.py')
mod1path = tmpdir.ensure('xxxpackage', 'module1.py')
mod1 = mod1path.pyimport()
assert mod1.__name__ == 'xxxpackage.module1'
from xxxpackage import module1
assert module1 is mod1
def test_pyimport_check_filepath_consistency(self, monkeypatch, tmpdir):
name = 'pointsback123'
ModuleType = type(py.std.os)
p = tmpdir.ensure(name + '.py')
for ending in ('.pyc', '$py.class', '.pyo'):
mod = ModuleType(name)
pseudopath = tmpdir.ensure(name+ending)
mod.__file__ = str(pseudopath)
monkeypatch.setitem(sys.modules, name, mod)
newmod = p.pyimport()
assert mod == newmod
monkeypatch.undo()
mod = ModuleType(name)
pseudopath = tmpdir.ensure(name+"123.py")
mod.__file__ = str(pseudopath)
monkeypatch.setitem(sys.modules, name, mod)
excinfo = py.test.raises(pseudopath.ImportMismatchError,
"p.pyimport()")
modname, modfile, orig = excinfo.value.args
assert modname == name
assert modfile == pseudopath
assert orig == p
assert issubclass(pseudopath.ImportMismatchError, ImportError)
def test_issue131_pyimport_on__init__(self, tmpdir):
# __init__.py files may be namespace packages, and thus the
# __file__ of an imported module may not be ourselves
# see issue
p1 = tmpdir.ensure("proja", "__init__.py")
p2 = tmpdir.ensure("sub", "proja", "__init__.py")
m1 = p1.pyimport()
m2 = p2.pyimport()
assert m1 == m2
def test_ensuresyspath_append(self, tmpdir):
root1 = tmpdir.mkdir("root1")
file1 = root1.ensure("x123.py")
assert str(root1) not in sys.path
file1.pyimport(ensuresyspath="append")
assert str(root1) == sys.path[-1]
assert str(root1) not in sys.path[:-1]
def test_pypkgdir(tmpdir):
pkg = tmpdir.ensure('pkg1', dir=1)
pkg.ensure("__init__.py")
pkg.ensure("subdir/__init__.py")
assert pkg.pypkgpath() == pkg
assert pkg.join('subdir', '__init__.py').pypkgpath() == pkg
def test_pypkgdir_unimportable(tmpdir):
pkg = tmpdir.ensure('pkg1-1', dir=1) # unimportable
pkg.ensure("__init__.py")
subdir = pkg.ensure("subdir/__init__.py").dirpath()
assert subdir.pypkgpath() == subdir
assert subdir.ensure("xyz.py").pypkgpath() == subdir
assert not pkg.pypkgpath()
def test_isimportable():
from py._path.local import isimportable
assert not isimportable("")
assert isimportable("x")
assert isimportable("x1")
assert isimportable("x_1")
assert isimportable("_")
assert isimportable("_1")
assert not isimportable("x-1")
assert not isimportable("x:1")
def test_homedir_from_HOME(monkeypatch):
path = os.getcwd()
monkeypatch.setenv("HOME", path)
assert py.path.local._gethomedir() == py.path.local(path)
def test_homedir_not_exists(monkeypatch):
monkeypatch.delenv("HOME", raising=False)
monkeypatch.delenv("HOMEDRIVE", raising=False)
homedir = py.path.local._gethomedir()
assert homedir is None
def test_samefile(tmpdir):
assert tmpdir.samefile(tmpdir)
p = tmpdir.ensure("hello")
assert p.samefile(p)
with p.dirpath().as_cwd():
assert p.samefile(p.basename)
if sys.platform == "win32":
p1 = p.__class__(str(p).lower())
p2 = p.__class__(str(p).upper())
assert p1.samefile(p2)
def test_listdir_single_arg(tmpdir):
tmpdir.ensure("hello")
assert tmpdir.listdir("hello")[0].basename == "hello"
def test_mkdtemp_rootdir(tmpdir):
dtmp = local.mkdtemp(rootdir=tmpdir)
assert tmpdir.listdir() == [dtmp]
class TestWINLocalPath:
pytestmark = win32only
def test_owner_group_not_implemented(self, path1):
py.test.raises(NotImplementedError, "path1.stat().owner")
py.test.raises(NotImplementedError, "path1.stat().group")
def test_chmod_simple_int(self, path1):
py.builtin.print_("path1 is", path1)
mode = path1.stat().mode
# Ensure that we actually change the mode to something different.
path1.chmod(mode == 0 and 1 or 0)
try:
print(path1.stat().mode)
print(mode)
assert path1.stat().mode != mode
finally:
path1.chmod(mode)
assert path1.stat().mode == mode
def test_path_comparison_lowercase_mixed(self, path1):
t1 = path1.join("a_path")
t2 = path1.join("A_path")
assert t1 == t1
assert t1 == t2
def test_relto_with_mixed_case(self, path1):
t1 = path1.join("a_path", "fiLe")
t2 = path1.join("A_path")
assert t1.relto(t2) == "fiLe"
def test_allow_unix_style_paths(self, path1):
t1 = path1.join('a_path')
assert t1 == str(path1) + '\\a_path'
t1 = path1.join('a_path/')
assert t1 == str(path1) + '\\a_path'
t1 = path1.join('dir/a_path')
assert t1 == str(path1) + '\\dir\\a_path'
def test_sysfind_in_currentdir(self, path1):
cmd = py.path.local.sysfind('cmd')
root = cmd.new(dirname='', basename='') # c:\ in most installations
with root.as_cwd():
x = py.path.local.sysfind(cmd.relto(root))
assert x.check(file=1)
def test_fnmatch_file_abspath_posix_pattern_on_win32(self, tmpdir):
# path-matching patterns might contain a posix path separator '/'
# Test that we can match that pattern on windows.
import posixpath
b = tmpdir.join("a", "b")
assert b.fnmatch(posixpath.sep.join("ab"))
pattern = posixpath.sep.join([str(tmpdir), "*", "b"])
assert b.fnmatch(pattern)
class TestPOSIXLocalPath:
pytestmark = skiponwin32
def test_hardlink(self, tmpdir):
linkpath = tmpdir.join('test')
filepath = tmpdir.join('file')
filepath.write("Hello")
nlink = filepath.stat().nlink
linkpath.mklinkto(filepath)
assert filepath.stat().nlink == nlink + 1
def test_symlink_are_identical(self, tmpdir):
filepath = tmpdir.join('file')
filepath.write("Hello")
linkpath = tmpdir.join('test')
linkpath.mksymlinkto(filepath)
assert linkpath.readlink() == str(filepath)
def test_symlink_isfile(self, tmpdir):
linkpath = tmpdir.join('test')
filepath = tmpdir.join('file')
filepath.write("")
linkpath.mksymlinkto(filepath)
assert linkpath.check(file=1)
assert not linkpath.check(link=0, file=1)
assert linkpath.islink()
def test_symlink_relative(self, tmpdir):
linkpath = tmpdir.join('test')
filepath = tmpdir.join('file')
filepath.write("Hello")
linkpath.mksymlinkto(filepath, absolute=False)
assert linkpath.readlink() == "file"
assert filepath.read() == linkpath.read()
def test_symlink_not_existing(self, tmpdir):
linkpath = tmpdir.join('testnotexisting')
assert not linkpath.check(link=1)
assert linkpath.check(link=0)
def test_relto_with_root(self, path1, tmpdir):
y = path1.join('x').relto(py.path.local('/'))
assert y[0] == str(path1)[1]
def test_visit_recursive_symlink(self, tmpdir):
linkpath = tmpdir.join('test')
linkpath.mksymlinkto(tmpdir)
visitor = tmpdir.visit(None, lambda x: x.check(link=0))
assert list(visitor) == [linkpath]
def test_symlink_isdir(self, tmpdir):
linkpath = tmpdir.join('test')
linkpath.mksymlinkto(tmpdir)
assert linkpath.check(dir=1)
assert not linkpath.check(link=0, dir=1)
def test_symlink_remove(self, tmpdir):
linkpath = tmpdir.join('test')
linkpath.mksymlinkto(linkpath) # point to itself
assert linkpath.check(link=1)
linkpath.remove()
assert not linkpath.check()
def test_realpath_file(self, tmpdir):
linkpath = tmpdir.join('test')
filepath = tmpdir.join('file')
filepath.write("")
linkpath.mksymlinkto(filepath)
realpath = linkpath.realpath()
assert realpath.basename == 'file'
def test_owner(self, path1, tmpdir):
from pwd import getpwuid
from grp import getgrgid
stat = path1.stat()
assert stat.path == path1
uid = stat.uid
gid = stat.gid
owner = getpwuid(uid)[0]
group = getgrgid(gid)[0]
assert uid == stat.uid
assert owner == stat.owner
assert gid == stat.gid
assert group == stat.group
def test_stat_helpers(self, tmpdir, monkeypatch):
path1 = tmpdir.ensure("file")
stat1 = path1.stat()
stat2 = tmpdir.stat()
assert stat1.isfile()
assert stat2.isdir()
assert not stat1.islink()
assert not stat2.islink()
def test_stat_non_raising(self, tmpdir):
path1 = tmpdir.join("file")
pytest.raises(py.error.ENOENT, lambda: path1.stat())
res = path1.stat(raising=False)
assert res is None
def test_atime(self, tmpdir):
import time
path = tmpdir.ensure('samplefile')
now = time.time()
atime1 = path.atime()
# we could wait here but timer resolution is very
# system dependent
path.read()
time.sleep(ATIME_RESOLUTION)
atime2 = path.atime()
time.sleep(ATIME_RESOLUTION)
duration = time.time() - now
assert (atime2-atime1) <= duration
def test_commondir(self, path1):
# XXX This is here in local until we find a way to implement this
# using the subversion command line api.
p1 = path1.join('something')
p2 = path1.join('otherthing')
assert p1.common(p2) == path1
assert p2.common(p1) == path1
def test_commondir_nocommon(self, path1):
# XXX This is here in local until we find a way to implement this
# using the subversion command line api.
p1 = path1.join('something')
p2 = py.path.local(path1.sep+'blabla')
assert p1.common(p2) == '/'
def test_join_to_root(self, path1):
root = path1.parts()[0]
assert len(str(root)) == 1
assert str(root.join('a')) == '//a' # posix allows two slashes
def test_join_root_to_root_with_no_abs(self, path1):
nroot = path1.join('/')
assert str(path1) == str(nroot)
assert path1 == nroot
def test_chmod_simple_int(self, path1):
mode = path1.stat().mode
path1.chmod(int(mode/2))
try:
assert path1.stat().mode != mode
finally:
path1.chmod(mode)
assert path1.stat().mode == mode
def test_chmod_rec_int(self, path1):
# XXX fragile test
recfilter = lambda x: x.check(dotfile=0, link=0)
oldmodes = {}
for x in path1.visit(rec=recfilter):
oldmodes[x] = x.stat().mode
path1.chmod(int("772", 8), rec=recfilter)
try:
for x in path1.visit(rec=recfilter):
assert x.stat().mode & int("777", 8) == int("772", 8)
finally:
for x,y in oldmodes.items():
x.chmod(y)
def test_copy_archiving(self, tmpdir):
unicode_fn = u"something-\342\200\223.txt"
f = tmpdir.ensure("a", unicode_fn)
a = f.dirpath()
oldmode = f.stat().mode
newmode = oldmode ^ 1
f.chmod(newmode)
b = tmpdir.join("b")
a.copy(b, mode=True)
assert b.join(f.basename).stat().mode == newmode
def test_copy_stat_file(self, tmpdir):
src = tmpdir.ensure('src')
dst = tmpdir.join('dst')
# a small delay before the copy
time.sleep(ATIME_RESOLUTION)
src.copy(dst, stat=True)
oldstat = src.stat()
newstat = dst.stat()
assert oldstat.mode == newstat.mode
assert (dst.atime() - src.atime()) < ATIME_RESOLUTION
assert (dst.mtime() - src.mtime()) < ATIME_RESOLUTION
def test_copy_stat_dir(self, tmpdir):
test_files = ['a', 'b', 'c']
src = tmpdir.join('src')
for f in test_files:
src.join(f).write(f, ensure=True)
dst = tmpdir.join('dst')
# a small delay before the copy
time.sleep(ATIME_RESOLUTION)
src.copy(dst, stat=True)
for f in test_files:
oldstat = src.join(f).stat()
newstat = dst.join(f).stat()
assert (newstat.atime - oldstat.atime) < ATIME_RESOLUTION
assert (newstat.mtime - oldstat.mtime) < ATIME_RESOLUTION
assert oldstat.mode == newstat.mode
@failsonjython
def test_chown_identity(self, path1):
owner = path1.stat().owner
group = path1.stat().group
path1.chown(owner, group)
@failsonjython
def test_chown_dangling_link(self, path1):
owner = path1.stat().owner
group = path1.stat().group
x = path1.join('hello')
x.mksymlinkto('qlwkejqwlek')
try:
path1.chown(owner, group, rec=1)
finally:
x.remove(rec=0)
@failsonjython
def test_chown_identity_rec_mayfail(self, path1):
owner = path1.stat().owner
group = path1.stat().group
path1.chown(owner, group)
class TestUnicodePy2Py3:
def test_join_ensure(self, tmpdir, monkeypatch):
if sys.version_info >= (3,0) and "LANG" not in os.environ:
pytest.skip("cannot run test without locale")
x = py.path.local(tmpdir.strpath)
part = "hällo"
y = x.ensure(part)
assert x.join(part) == y
def test_listdir(self, tmpdir):
if sys.version_info >= (3,0) and "LANG" not in os.environ:
pytest.skip("cannot run test without locale")
x = py.path.local(tmpdir.strpath)
part = "hällo"
y = x.ensure(part)
assert x.listdir(part)[0] == y
@pytest.mark.xfail(reason="changing read/write might break existing usages")
def test_read_write(self, tmpdir):
x = tmpdir.join("hello")
part = py.builtin._totext("hällo", "utf8")
x.write(part)
assert x.read() == part
x.write(part.encode(sys.getdefaultencoding()))
assert x.read() == part.encode(sys.getdefaultencoding())
class TestBinaryAndTextMethods:
def test_read_binwrite(self, tmpdir):
x = tmpdir.join("hello")
part = py.builtin._totext("hällo", "utf8")
part_utf8 = part.encode("utf8")
x.write_binary(part_utf8)
assert x.read_binary() == part_utf8
s = x.read_text(encoding="utf8")
assert s == part
assert py.builtin._istext(s)
def test_read_textwrite(self, tmpdir):
x = tmpdir.join("hello")
part = py.builtin._totext("hällo", "utf8")
part_utf8 = part.encode("utf8")
x.write_text(part, encoding="utf8")
assert x.read_binary() == part_utf8
assert x.read_text(encoding="utf8") == part
def test_default_encoding(self, tmpdir):
x = tmpdir.join("hello")
# Can't use UTF8 as the default encoding (ASCII) doesn't support it
part = py.builtin._totext("hello", "ascii")
x.write_text(part, "ascii")
s = x.read_text("ascii")
assert s == part
assert type(s) == type(part)
| 34.539545 | 80 | 0.59473 |
00fb5f06fb30ecac12fa94455856c39c2dcb72ad | 6,862 | py | Python | models/resnetv2.py | XH-B/attention-feature-distillation | aaab9f63da1b27fd25a1b75b8844b3b66cbc9d82 | [
"Apache-2.0"
] | 53 | 2021-02-09T13:19:27.000Z | 2022-03-24T05:55:05.000Z | models/resnetv2.py | XH-B/attention-feature-distillation | aaab9f63da1b27fd25a1b75b8844b3b66cbc9d82 | [
"Apache-2.0"
] | 2 | 2021-04-12T06:44:43.000Z | 2021-09-01T15:43:50.000Z | models/resnetv2.py | XH-B/attention-feature-distillation | aaab9f63da1b27fd25a1b75b8844b3b66cbc9d82 | [
"Apache-2.0"
] | 12 | 2021-02-09T07:41:07.000Z | 2022-02-10T04:30:54.000Z | # Attention-based Feature-level Distillation
# Original Source : https://github.com/HobbitLong/RepDistiller
'''ResNet in PyTorch.
For Pre-activation ResNet, see 'preact_resnet.py'.
Reference:
[1] Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun
Deep Residual Learning for Image Recognition. arXiv:1512.03385
'''
import torch
import torch.nn as nn
import torch.nn.functional as F
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, in_planes, planes, stride=1, is_last=False):
super(BasicBlock, self).__init__()
self.is_last = is_last
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=False)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion * planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion * planes, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(self.expansion * planes)
)
def forward(self, x):
if isinstance(x, tuple):
x, features = x
else:
features = []
x = self.relu(x)
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out += self.shortcut(x)
return out, features + [out]
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, in_planes, planes, stride=1, is_last=False):
super(Bottleneck, self).__init__()
self.is_last = is_last
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, self.expansion * planes, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(self.expansion * planes)
self.relu = nn.ReLU(inplace=False)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion * planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion * planes, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(self.expansion * planes)
)
def forward(self, x):
if isinstance(x, tuple):
x, features = x
else:
features = []
x = self.relu(x)
out = self.relu(self.bn1(self.conv1(x)))
out = self.relu(self.bn2(self.conv2(out)))
out = self.bn3(self.conv3(out))
out += self.shortcut(x)
return out, features + [out]
class ResNet(nn.Module):
def __init__(self, block, num_blocks, num_classes=10, zero_init_residual=False):
super(ResNet, self).__init__()
self.in_planes = 64
self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=1)
self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2)
self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2)
self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2)
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.linear = nn.Linear(512 * block.expansion, num_classes)
self.relu = nn.ReLU(inplace=False)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
# Zero-initialize the last BN in each residual branch,
# so that the residual branch starts with zeros, and each residual block behaves like an identity.
# This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677
if zero_init_residual:
for m in self.modules():
if isinstance(m, Bottleneck):
nn.init.constant_(m.bn3.weight, 0)
elif isinstance(m, BasicBlock):
nn.init.constant_(m.bn2.weight, 0)
def get_feat_modules(self):
feat_m = nn.ModuleList([])
feat_m.append(self.conv1)
feat_m.append(self.bn1)
feat_m.append(self.layer1)
feat_m.append(self.layer2)
feat_m.append(self.layer3)
feat_m.append(self.layer4)
return feat_m
def get_bn_before_relu(self):
if isinstance(self.layer1[0], Bottleneck):
bn1 = self.layer1[-1].bn3
bn2 = self.layer2[-1].bn3
bn3 = self.layer3[-1].bn3
bn4 = self.layer4[-1].bn3
elif isinstance(self.layer1[0], BasicBlock):
bn1 = self.layer1[-1].bn2
bn2 = self.layer2[-1].bn2
bn3 = self.layer3[-1].bn2
bn4 = self.layer4[-1].bn2
else:
raise NotImplementedError('ResNet unknown block error !!!')
return [bn1, bn2, bn3, bn4]
def _make_layer(self, block, planes, num_blocks, stride):
strides = [stride] + [1] * (num_blocks - 1)
layers = []
for i in range(num_blocks):
stride = strides[i]
layers.append(block(self.in_planes, planes, stride, i == num_blocks - 1))
self.in_planes = planes * block.expansion
return nn.Sequential(*layers)
def forward(self, x, is_feat=False):
x = self.bn1(self.conv1(x))
f0 = x
x, f1 = self.layer1(x)
f1_act = [self.relu(f) for f in f1]
x, f2 = self.layer2(x)
f2_act = [self.relu(f) for f in f2]
x, f3 = self.layer3(x)
f3_act = [self.relu(f) for f in f3]
x, f4 = self.layer4(x)
f4_act = [self.relu(f) for f in f4]
out = self.avgpool(self.relu(x))
out = out.view(out.size(0), -1)
f5 = out
out = self.linear(out)
if is_feat:
return [self.relu(f0)] + f1_act + f2_act + f3_act + f4_act + [f5], out
else:
return out
def resnet18(**kwargs):
return ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)
def resnet34(**kwargs):
return ResNet(BasicBlock, [3, 4, 6, 3], **kwargs)
def resnet50(**kwargs):
return ResNet(Bottleneck, [3, 4, 6, 3], **kwargs)
def resnet101(**kwargs):
return ResNet(Bottleneck, [3, 4, 23, 3], **kwargs)
def resnet152(**kwargs):
return ResNet(Bottleneck, [3, 8, 36, 3], **kwargs)
| 34.656566 | 106 | 0.594433 |
22ef0eb1aec13245aab4ee1d767d6c84f622099d | 13,409 | py | Python | trace/google/cloud/gapic/trace/v1/trace_service_client.py | rodrigodias27/google-cloud-python | 7d1161f70744c0dbbe67a3f472ea95667eaafe50 | [
"Apache-2.0"
] | 1 | 2021-01-04T11:40:17.000Z | 2021-01-04T11:40:17.000Z | trace/google/cloud/gapic/trace/v1/trace_service_client.py | rodrigodias27/google-cloud-python | 7d1161f70744c0dbbe67a3f472ea95667eaafe50 | [
"Apache-2.0"
] | null | null | null | trace/google/cloud/gapic/trace/v1/trace_service_client.py | rodrigodias27/google-cloud-python | 7d1161f70744c0dbbe67a3f472ea95667eaafe50 | [
"Apache-2.0"
] | null | null | null | # Copyright 2017, Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# EDITING INSTRUCTIONS
# This file was generated from the file
# https://github.com/google/googleapis/blob/master/google/devtools/cloudtrace/v1/trace.proto,
# and updates to that file get reflected here through a refresh process.
# For the short term, the refresh process will only be runnable by Google engineers.
#
# The only allowed edits are to method and file documentation. A 3-way
# merge preserves those additions if the generated source changes.
"""Accesses the google.devtools.cloudtrace.v1 TraceService API."""
import collections
import json
import os
import pkg_resources
import platform
from google.gax import api_callable
from google.gax import config
from google.gax import path_template
import google.gax
from google.cloud.gapic.trace.v1 import enums
from google.cloud.proto.devtools.cloudtrace.v1 import trace_pb2
from google.protobuf import timestamp_pb2
_PageDesc = google.gax.PageDescriptor
class TraceServiceClient(object):
"""
This file describes an API for collecting and viewing traces and spans
within a trace. A Trace is a collection of spans corresponding to a single
operation or set of operations for an application. A span is an individual
timed event which forms a node of the trace tree. Spans for a single trace
may span multiple services.
"""
SERVICE_ADDRESS = 'cloudtrace.googleapis.com'
"""The default address of the service."""
DEFAULT_SERVICE_PORT = 443
"""The default port of the service."""
_PAGE_DESCRIPTORS = {
'list_traces': _PageDesc('page_token', 'next_page_token', 'traces')
}
# The scopes needed to make gRPC calls to all of the methods defined in
# this service
_ALL_SCOPES = ('https://www.googleapis.com/auth/cloud-platform',
'https://www.googleapis.com/auth/trace.append',
'https://www.googleapis.com/auth/trace.readonly', )
def __init__(self,
service_path=SERVICE_ADDRESS,
port=DEFAULT_SERVICE_PORT,
channel=None,
credentials=None,
ssl_credentials=None,
scopes=None,
client_config=None,
app_name=None,
app_version='',
lib_name=None,
lib_version='',
metrics_headers=()):
"""Constructor.
Args:
service_path (string): The domain name of the API remote host.
port (int): The port on which to connect to the remote host.
channel (:class:`grpc.Channel`): A ``Channel`` instance through
which to make calls.
credentials (object): The authorization credentials to attach to
requests. These credentials identify this application to the
service.
ssl_credentials (:class:`grpc.ChannelCredentials`): A
``ChannelCredentials`` instance for use with an SSL-enabled
channel.
scopes (list[string]): A list of OAuth2 scopes to attach to requests.
client_config (dict):
A dictionary for call options for each method. See
:func:`google.gax.construct_settings` for the structure of
this data. Falls back to the default config if not specified
or the specified config is missing data points.
app_name (string): The name of the application calling
the service. Recommended for analytics purposes.
app_version (string): The version of the application calling
the service. Recommended for analytics purposes.
lib_name (string): The API library software used for calling
the service. (Unless you are writing an API client itself,
leave this as default.)
lib_version (string): The API library software version used
for calling the service. (Unless you are writing an API client
itself, leave this as default.)
metrics_headers (dict): A dictionary of values for tracking
client library metrics. Ultimately serializes to a string
(e.g. 'foo/1.2.3 bar/3.14.1'). This argument should be
considered private.
Returns:
A TraceServiceClient object.
"""
# Unless the calling application specifically requested
# OAuth scopes, request everything.
if scopes is None:
scopes = self._ALL_SCOPES
# Initialize an empty client config, if none is set.
if client_config is None:
client_config = {}
# Initialize metrics_headers as an ordered dictionary
# (cuts down on cardinality of the resulting string slightly).
metrics_headers = collections.OrderedDict(metrics_headers)
metrics_headers['gl-python'] = platform.python_version()
# The library may or may not be set, depending on what is
# calling this client. Newer client libraries set the library name
# and version.
if lib_name:
metrics_headers[lib_name] = lib_version
# Finally, track the GAPIC package version.
metrics_headers['gapic'] = pkg_resources.get_distribution(
'google-cloud-trace', ).version
# Load the configuration defaults.
default_client_config = json.loads(
pkg_resources.resource_string(
__name__, 'trace_service_client_config.json').decode())
defaults = api_callable.construct_settings(
'google.devtools.cloudtrace.v1.TraceService',
default_client_config,
client_config,
config.STATUS_CODE_NAMES,
metrics_headers=metrics_headers,
page_descriptors=self._PAGE_DESCRIPTORS, )
self.trace_service_stub = config.create_stub(
trace_pb2.TraceServiceStub,
channel=channel,
service_path=service_path,
service_port=port,
credentials=credentials,
scopes=scopes,
ssl_credentials=ssl_credentials)
self._patch_traces = api_callable.create_api_call(
self.trace_service_stub.PatchTraces,
settings=defaults['patch_traces'])
self._get_trace = api_callable.create_api_call(
self.trace_service_stub.GetTrace, settings=defaults['get_trace'])
self._list_traces = api_callable.create_api_call(
self.trace_service_stub.ListTraces,
settings=defaults['list_traces'])
# Service calls
def patch_traces(self, project_id, traces, options=None):
"""
Sends new traces to Stackdriver Trace or updates existing traces. If the ID
of a trace that you send matches that of an existing trace, any fields
in the existing trace and its spans are overwritten by the provided values,
and any new fields provided are merged with the existing trace data. If the
ID does not match, a new trace is created.
Example:
>>> from google.cloud.gapic.trace.v1 import trace_service_client
>>> from google.cloud.proto.devtools.cloudtrace.v1 import trace_pb2
>>> client = trace_service_client.TraceServiceClient()
>>> project_id = ''
>>> traces = trace_pb2.Traces()
>>> client.patch_traces(project_id, traces)
Args:
project_id (string): ID of the Cloud project where the trace data is stored.
traces (:class:`google.cloud.proto.devtools.cloudtrace.v1.trace_pb2.Traces`): The body of the message.
options (:class:`google.gax.CallOptions`): Overrides the default
settings for this call, e.g, timeout, retries etc.
Raises:
:exc:`google.gax.errors.GaxError` if the RPC is aborted.
:exc:`ValueError` if the parameters are invalid.
"""
# Create the request object.
request = trace_pb2.PatchTracesRequest(
project_id=project_id, traces=traces)
self._patch_traces(request, options)
def get_trace(self, project_id, trace_id, options=None):
"""
Gets a single trace by its ID.
Example:
>>> from google.cloud.gapic.trace.v1 import trace_service_client
>>> client = trace_service_client.TraceServiceClient()
>>> project_id = ''
>>> trace_id = ''
>>> response = client.get_trace(project_id, trace_id)
Args:
project_id (string): ID of the Cloud project where the trace data is stored.
trace_id (string): ID of the trace to return.
options (:class:`google.gax.CallOptions`): Overrides the default
settings for this call, e.g, timeout, retries etc.
Returns:
A :class:`google.cloud.proto.devtools.cloudtrace.v1.trace_pb2.Trace` instance.
Raises:
:exc:`google.gax.errors.GaxError` if the RPC is aborted.
:exc:`ValueError` if the parameters are invalid.
"""
# Create the request object.
request = trace_pb2.GetTraceRequest(
project_id=project_id, trace_id=trace_id)
return self._get_trace(request, options)
def list_traces(self,
project_id,
view=None,
page_size=None,
start_time=None,
end_time=None,
filter_=None,
order_by=None,
options=None):
"""
Returns of a list of traces that match the specified filter conditions.
Example:
>>> from google.cloud.gapic.trace.v1 import trace_service_client
>>> from google.gax import CallOptions, INITIAL_PAGE
>>> client = trace_service_client.TraceServiceClient()
>>> project_id = ''
>>>
>>> # Iterate over all results
>>> for element in client.list_traces(project_id):
>>> # process element
>>> pass
>>>
>>> # Or iterate over results one page at a time
>>> for page in client.list_traces(project_id, options=CallOptions(page_token=INITIAL_PAGE)):
>>> for element in page:
>>> # process element
>>> pass
Args:
project_id (string): ID of the Cloud project where the trace data is stored.
view (enum :class:`google.cloud.gapic.trace.v1.enums.ListTracesRequest.ViewType`): Type of data returned for traces in the list. Optional. Default is
``MINIMAL``.
page_size (int): Maximum number of traces to return. If not specified or <= 0, the
implementation selects a reasonable value. The implementation may
return fewer traces than the requested page size. Optional.
start_time (:class:`google.protobuf.timestamp_pb2.Timestamp`): End of the time interval (inclusive) during which the trace data was
collected from the application.
end_time (:class:`google.protobuf.timestamp_pb2.Timestamp`): Start of the time interval (inclusive) during which the trace data was
collected from the application.
filter_ (string): An optional filter for the request.
order_by (string): Field used to sort the returned traces. Optional.
Can be one of the following:
* ``trace_id``
* ``name`` (``name`` field of root span in the trace)
* ``duration`` (difference between ``end_time`` and ``start_time`` fields of
::
the root span)
* ``start`` (``start_time`` field of the root span)
Descending order can be specified by appending ``desc`` to the sort field
(for example, ``name desc``).
Only one sort field is permitted.
options (:class:`google.gax.CallOptions`): Overrides the default
settings for this call, e.g, timeout, retries etc.
Returns:
A :class:`google.gax.PageIterator` instance. By default, this
is an iterable of :class:`google.cloud.proto.devtools.cloudtrace.v1.trace_pb2.Trace` instances.
This object can also be configured to iterate over the pages
of the response through the `CallOptions` parameter.
Raises:
:exc:`google.gax.errors.GaxError` if the RPC is aborted.
:exc:`ValueError` if the parameters are invalid.
"""
# Create the request object.
request = trace_pb2.ListTracesRequest(
project_id=project_id,
view=view,
page_size=page_size,
start_time=start_time,
end_time=end_time,
filter=filter_,
order_by=order_by)
return self._list_traces(request, options)
| 43.115756 | 159 | 0.63875 |
1a15ceab92b6ddfa7e8102a5aa88e686706f7bdc | 4,408 | py | Python | tests/test_areas_item.py | jvm986/podio-py | a48924c4aa4c30dfafdcacd228f512b930b31ee9 | [
"MIT"
] | 32 | 2015-02-02T14:45:01.000Z | 2021-09-22T21:01:23.000Z | tests/test_areas_item.py | jvm986/podio-py | a48924c4aa4c30dfafdcacd228f512b930b31ee9 | [
"MIT"
] | 15 | 2015-03-19T16:00:27.000Z | 2022-03-30T22:18:27.000Z | tests/test_areas_item.py | jvm986/podio-py | a48924c4aa4c30dfafdcacd228f512b930b31ee9 | [
"MIT"
] | 45 | 2015-01-06T11:21:54.000Z | 2022-03-27T00:25:48.000Z | #!/usr/bin/env python
"""
Unit tests for pypodio2.areas.Item (via pypodio2.client.Client). Works
by mocking httplib2, and making assertions about how pypodio2 calls
it.
"""
import json
from mock import Mock
from nose.tools import eq_
from tests.utils import check_client_method, get_client_and_http, URL_BASE
def test_find():
item_id = 9271
client, check_assertions = check_client_method()
result = client.Item.find(item_id)
check_assertions(result, 'GET', '/item/%s' % item_id)
client, check_assertions = check_client_method()
result = client.Item.find(item_id, basic=True)
check_assertions(result, 'GET', '/item/%s/basic' % item_id)
def test_filters():
app_id = 426
attributes = {'a': 1, 'zzzz': 12345}
client, check_assertions = check_client_method()
result = client.Item.filter(app_id, attributes)
check_assertions(result,
'POST',
'/item/app/%s/filter/' % app_id,
expected_body=json.dumps(attributes),
expected_headers={'content-type': 'application/json'})
def test_filter_by_view():
app_id = 421
view_id = 123
client, check_assertions = check_client_method()
result = client.Item.filter_by_view(app_id, view_id)
check_assertions(result,
'POST',
'/item/app/{}/filter/{}'.format(app_id, view_id),
expected_body=json.dumps({}),
expected_headers={'content-type': 'application/json'})
def test_find_by_external_id():
app_id = 13
external_id = 37
client, check_assertions = check_client_method()
result = client.Item.find_all_by_external_id(app_id, external_id)
check_assertions(result,
'GET',
'/item/app/%s/v2/?external_id=%s' % (app_id, external_id))
def test_revisions():
item_id = 255
client, check_assertions = check_client_method()
result = client.Item.revisions(item_id)
check_assertions(result,
'GET',
'/item/%s/revision/' % item_id)
def test_revision_difference():
item_id = 2
from_id = 4
to_id = 8
client, check_assertions = check_client_method()
result = client.Item.revision_difference(item_id, from_id, to_id)
check_assertions(result,
'GET',
'/item/%s/revision/%s/%s' % (item_id, from_id, to_id))
def test_values():
item_id = 9271
client, check_assertions = check_client_method()
result = client.Item.values(item_id)
check_assertions(result, 'GET', '/item/%s/value' % item_id)
def test_values_v2():
item_id = 9271
client, check_assertions = check_client_method()
result = client.Item.values_v2(item_id)
check_assertions(result, 'GET', '/item/%s/value/v2' % item_id)
def test_create():
app_id = 1
attributes = {'1': 1, '2': 3, '5': '8'}
client, check_assertions = check_client_method()
result = client.Item.create(app_id, attributes)
check_assertions(result,
'POST',
'/item/app/%s/' % app_id,
json.dumps(attributes),
{'content-type': 'application/json'})
def test_update():
app_id = 1
attributes = {'1': 1, '2': 3, '5': '8'}
client, check_assertions = check_client_method()
result = client.Item.update(app_id, attributes)
check_assertions(result,
'PUT',
'/item/%s' % app_id,
json.dumps(attributes),
{'content-type': 'application/json'})
client, check_assertions = check_client_method()
result = client.Item.update(app_id, attributes, silent=True)
check_assertions(result,
'PUT',
'/item/%s?silent=true' % app_id,
json.dumps(attributes),
{'content-type': 'application/json'})
def test_delete():
item_id = 1
client, http = get_client_and_http()
http.request = Mock(return_value=(None, None))
result = client.Item.delete(item_id)
eq_(None, result)
http.request.assert_called_once_with("%s/item/%s?" % (URL_BASE, item_id),
'DELETE',
body=None,
headers={})
| 29.192053 | 79 | 0.591198 |
eecd67f2530a2b1cb529443b305ec215432dadb3 | 303 | py | Python | data/multilingual/Latn.TGL/Mono_16/pdf_to_json_test_Latn.TGL_Mono_16.py | antoinecarme/pdf_to_json_tests | d57a024fde862e698d916a1178f285883d7a3b2f | [
"BSD-3-Clause"
] | 1 | 2021-09-19T19:47:35.000Z | 2021-09-19T19:47:35.000Z | data/multilingual/Latn.TGL/Mono_16/pdf_to_json_test_Latn.TGL_Mono_16.py | antoinecarme/pdf_to_json_tests | d57a024fde862e698d916a1178f285883d7a3b2f | [
"BSD-3-Clause"
] | null | null | null | data/multilingual/Latn.TGL/Mono_16/pdf_to_json_test_Latn.TGL_Mono_16.py | antoinecarme/pdf_to_json_tests | d57a024fde862e698d916a1178f285883d7a3b2f | [
"BSD-3-Clause"
] | null | null | null | import pdf_to_json as p2j
import json
url = "file:data/multilingual/Latn.TGL/Mono_16/udhr_Latn.TGL_Mono_16.pdf"
lConverter = p2j.pdf_to_json.pdf_to_json_converter()
lConverter.mImageHashOnly = True
lDict = lConverter.convert(url)
print(json.dumps(lDict, indent=4, ensure_ascii=False, sort_keys=True))
| 30.3 | 73 | 0.811881 |
3d50836216d661df16f5f9932dbd9b0d11852588 | 88 | py | Python | test_challenge.py | EstebanFicetti/itec-pp1-challenge | cdc6b17a6ef6a550f7044c3e7caf17cda234af93 | [
"MIT"
] | null | null | null | test_challenge.py | EstebanFicetti/itec-pp1-challenge | cdc6b17a6ef6a550f7044c3e7caf17cda234af93 | [
"MIT"
] | null | null | null | test_challenge.py | EstebanFicetti/itec-pp1-challenge | cdc6b17a6ef6a550f7044c3e7caf17cda234af93 | [
"MIT"
] | null | null | null |
from challenge import num_to_str
def test_answer():
assert num_to_str(3) == 'tres' | 17.6 | 34 | 0.727273 |
6bcf56a2669f6f116305d70c89c6ef911bf10b31 | 2,816 | py | Python | python/test/function/test_softmax_cross_entropy.py | sdonatti/nnabla | ac4a42e62dd358f16bd79c08a9a9f3d83c0100c9 | [
"Apache-2.0"
] | 1 | 2020-08-03T12:49:19.000Z | 2020-08-03T12:49:19.000Z | python/test/function/test_softmax_cross_entropy.py | langbin2014/nnabla | e94bac5bed65337010e2ac07a5937fb862ab2dd8 | [
"Apache-2.0"
] | 1 | 2020-11-09T07:33:29.000Z | 2020-11-09T07:33:29.000Z | python/test/function/test_softmax_cross_entropy.py | langbin2014/nnabla | e94bac5bed65337010e2ac07a5937fb862ab2dd8 | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2017 Sony Corporation. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import numpy as np
import nnabla.functions as F
from nbla_test_utils import list_context
ctxs = list_context('SoftmaxCrossEntropy')
def ref_softmax_cross_entropy(x, l, axis):
orig_x = x.copy()
x = x - x.max(axis, keepdims=True)
x = np.exp(x) / np.exp(x).sum(axis, keepdims=True)
x = np.rollaxis(x, axis, x.ndim).reshape(-1, x.shape[axis])
ll = np.rollaxis(l, axis, x.ndim).flatten()
y = - \
np.log(
np.maximum(x[np.arange(x.shape[0]), ll],
np.finfo(np.float32).tiny))
return y.reshape(l.shape)
@pytest.mark.parametrize("ctx, func_name", ctxs)
@pytest.mark.parametrize("seed", [314])
@pytest.mark.parametrize("axis", [0, 1, 2])
def test_softmax_cross_entropy_forward_backward(seed, axis, ctx, func_name):
from nbla_test_utils import function_tester
ishape = [2, 3, 4]
rng = np.random.RandomState(seed)
l_shape = list(ishape)
l_shape[axis] = 1
n_class = ishape[axis]
inputs = [
rng.randn(2, 3, 4).astype(np.float32) * 2,
rng.randint(0, n_class, size=l_shape).astype(np.int)]
function_tester(rng, F.softmax_cross_entropy, ref_softmax_cross_entropy,
inputs, func_args=[axis], backward=[True, False],
atol_b=2e-3, ctx=ctx, func_name=func_name)
@pytest.mark.parametrize("ctx, func_name", ctxs)
@pytest.mark.parametrize("seed", [314])
@pytest.mark.parametrize("axis", [0, 1, 2])
def test_softmax_cross_entropy_double_backward(seed, axis, ctx, func_name):
from nbla_test_utils import backward_function_tester
ishape = [2, 3, 4]
rng = np.random.RandomState(seed)
l_shape = list(ishape)
l_shape[axis] = 1
n_class = ishape[axis]
inputs = [
rng.randn(2, 3, 4).astype(np.float32) * 2,
rng.randint(0, n_class, size=l_shape).astype(np.int)]
backward_function_tester(rng, F.softmax_cross_entropy, ref_softmax_cross_entropy,
inputs, func_args=[axis], backward=[True, False],
atol_b=1e-3,
atol_accum=1e-3,
dstep=1e-3,
ctx=ctx, func_name=func_name)
| 35.64557 | 85 | 0.652344 |
71f9a843650fe85ba5609f83b5674078efd5a5c9 | 2,390 | py | Python | tests/confidence_checks/task_checklists/task_suite_test.py | MSLars/allennlp | 2cdb8742c8c8c3c38ace4bdfadbdc750a1aa2475 | [
"Apache-2.0"
] | 11,433 | 2017-06-27T03:08:46.000Z | 2022-03-31T18:14:33.000Z | tests/confidence_checks/task_checklists/task_suite_test.py | MSLars/allennlp | 2cdb8742c8c8c3c38ace4bdfadbdc750a1aa2475 | [
"Apache-2.0"
] | 4,006 | 2017-06-26T21:45:43.000Z | 2022-03-31T02:11:10.000Z | tests/confidence_checks/task_checklists/task_suite_test.py | MSLars/allennlp | 2cdb8742c8c8c3c38ace4bdfadbdc750a1aa2475 | [
"Apache-2.0"
] | 2,560 | 2017-06-26T21:16:53.000Z | 2022-03-30T07:55:46.000Z | import pytest
from allennlp.confidence_checks.task_checklists.task_suite import TaskSuite
from allennlp.common.testing import AllenNlpTestCase
from allennlp.common.checks import ConfigurationError
from allennlp.models.archival import load_archive
from allennlp.predictors import Predictor
from allennlp.common.testing.checklist_test import FakeTaskSuite # noqa: F401
class TestTaskSuite(AllenNlpTestCase):
def setup_method(self):
super().setup_method()
archive = load_archive(
self.FIXTURES_ROOT / "basic_classifier" / "serialization" / "model.tar.gz"
)
self.predictor = Predictor.from_archive(archive)
def test_load_from_suite_file(self):
suite_file = str(self.FIXTURES_ROOT / "task_suites" / "fake_suite.tar.gz")
task_suite = TaskSuite.constructor(suite_file=suite_file)
assert len(task_suite.suite.tests) == 1
def test_load_by_name(self):
task_suite = TaskSuite.constructor(name="fake-task-suite")
assert task_suite._fake_arg1 is None
assert task_suite._fake_arg2 is None
assert len(task_suite.suite.tests) == 1
with pytest.raises(ConfigurationError):
TaskSuite.constructor(name="suite-that-does-not-exist")
def test_load_with_extra_args(self):
extra_args = {"fake_arg1": "some label"}
task_suite = TaskSuite.constructor(name="fake-task-suite", extra_args=extra_args)
assert task_suite._fake_arg1 == "some label"
def test_prediction_and_confidence_scores_function_needs_implementation(self):
task_suite = TaskSuite.constructor(name="fake-task-suite")
with pytest.raises(NotImplementedError):
task_suite.run(self.predictor)
def test_add_default_tests(self):
# We include "isn't" so that the contractions test is also added.
data = ["This isn't real data"]
task_suite = TaskSuite(add_default_tests=True, data=data)
assert "Typos" in task_suite.suite.tests
assert "2 Typos" in task_suite.suite.tests
assert "Contractions" in task_suite.suite.tests
data = ["This is data with no contractions."]
task_suite = TaskSuite(add_default_tests=True, data=data)
assert "Typos" in task_suite.suite.tests
assert "2 Typos" in task_suite.suite.tests
assert "Contractions" not in task_suite.suite.tests
| 37.936508 | 89 | 0.714644 |
02f833d31b8f9c9c177ba44560b8f741c2ceac46 | 442 | py | Python | src/results/migrations/0005_yahoomatchup_updated_result_timestamp.py | sfernandezf/analytics-yahoofantasy | 6242599b903e4b8a7f9c56892ba26591a441b8fb | [
"Apache-2.0"
] | null | null | null | src/results/migrations/0005_yahoomatchup_updated_result_timestamp.py | sfernandezf/analytics-yahoofantasy | 6242599b903e4b8a7f9c56892ba26591a441b8fb | [
"Apache-2.0"
] | 6 | 2020-03-15T03:32:06.000Z | 2022-01-13T03:46:05.000Z | src/results/migrations/0005_yahoomatchup_updated_result_timestamp.py | sfernandezf/analytics-yahoofantasy | 6242599b903e4b8a7f9c56892ba26591a441b8fb | [
"Apache-2.0"
] | null | null | null | # Generated by Django 3.0.3 on 2020-07-27 00:23
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('results', '0004_yahoomatchupteamresult_so'),
]
operations = [
migrations.AddField(
model_name='yahoomatchup',
name='updated_result_timestamp',
field=models.DateTimeField(null=True, verbose_name='Created At'),
),
]
| 23.263158 | 77 | 0.635747 |
c5c8832e62ec2944d8a12ba1e83e5b833a5cac26 | 1,784 | py | Python | seriesplugin/src/ShowLogScreen.py | builder07/enigma2-plugins_3 | 2fc0d26891fba28ebea1550a39f5e8d7973db10c | [
"OLDAP-2.3"
] | 2 | 2020-09-02T18:25:39.000Z | 2020-09-02T18:39:07.000Z | seriesplugin/src/ShowLogScreen.py | builder07/enigma2-plugins_3 | 2fc0d26891fba28ebea1550a39f5e8d7973db10c | [
"OLDAP-2.3"
] | null | null | null | seriesplugin/src/ShowLogScreen.py | builder07/enigma2-plugins_3 | 2fc0d26891fba28ebea1550a39f5e8d7973db10c | [
"OLDAP-2.3"
] | 11 | 2015-02-26T20:59:14.000Z | 2021-09-20T08:23:03.000Z | # -*- coding: utf-8 -*-
import os, sys, traceback
# Config
from Components.config import *
from Components.Sources.StaticText import StaticText
# Screen
from Components.ActionMap import ActionMap
from Components.ScrollLabel import ScrollLabel
from enigma import eSize, ePoint, getDesktop
from Screens.Screen import Screen
from Tools.Directories import fileExists, resolveFilename, SCOPE_PLUGINS
# Plugin internal
from . import _
class ShowLogScreen(Screen):
def __init__(self, session, logFile):
Screen.__init__(self, session)
self.skinName = ["TestBox", "Console"]
title = ""
text = ""
self.logFile = logFile
self["text"] = ScrollLabel("")
self["actions"] = ActionMap(["WizardActions", "DirectionActions", "ChannelSelectBaseActions"],
{
"ok": self.cancel,
"back": self.cancel,
"up": self["text"].pageUp,
"down": self["text"].pageDown,
"left": self["text"].pageUp,
"right": self["text"].pageDown,
"nextBouquet": self["text"].lastPage,
"prevBouquet": self.firstPage,
}, -1)
self.onLayoutFinish.append(self.readLog)
def cancel(self):
self.close()
def setText(self, text):
self["text"].setText(text)
def close(self):
Screen.close(self)
def firstPage(self):
self["text"].long_text.move(ePoint(0,0))
self["text"].updateScrollbar()
def readLog(self):
# Set title and text
title = _("Show Log file")
text = _("Reading log file...\n") + self.logFile + _("\nCancel?")
self.setTitle(title)
self.setText(text)
if not fileExists(self.logFile):
self.setText(_("No log file found"))
elif not os.path.getsize(self.logFile) == 0:
file = open(self.logFile, "r")
text = file.read()
file.close()
try:
self.setText(text)
self["text"].lastPage()
except:
pass
| 22.871795 | 97 | 0.673206 |
4ae7a5f725ffbe5d686b01cecea81bee6739b990 | 568 | py | Python | projects/aid_project.py | magicwenli/morpher | 2f8e756d81f3fac59c948789e945a06a4d4adce3 | [
"MIT"
] | null | null | null | projects/aid_project.py | magicwenli/morpher | 2f8e756d81f3fac59c948789e945a06a4d4adce3 | [
"MIT"
] | null | null | null | projects/aid_project.py | magicwenli/morpher | 2f8e756d81f3fac59c948789e945a06a4d4adce3 | [
"MIT"
] | null | null | null | import cv2
import matplotlib.pyplot as plt
from PIL import Image
def cvt(img):
b, g, r = cv2.split(img)
return cv2.merge([r, g, b])
img1 = Image.open("pics/fig00001.png")
img2 = cv2.imread('pics/fig00003.png', 1)
img3 = cv2.imread('pics/fig00004.jpg', 1)
img4 = Image.open("pics/fig00002.png")
plt.figure("fig_1")
plt.subplot(2, 2, 1)
plt.imshow(img1)
plt.axis('off')
plt.subplot(2, 2, 2)
plt.imshow(cvt(img2))
plt.axis('off')
plt.subplot(2, 2, 3)
plt.imshow(cvt(img3))
plt.axis('off')
plt.subplot(2, 2, 4)
plt.imshow(img4)
plt.axis('off')
plt.show()
| 16.705882 | 41 | 0.667254 |
9344e3f6086f39de840c6be7a2b8eabe86bd138a | 96 | py | Python | pywt/data/__init__.py | SalvoCas/pywt | 75b3b7b37102aad27780153b4b0fdaf184b205a4 | [
"MIT"
] | 1,435 | 2015-07-29T18:28:27.000Z | 2022-03-31T10:16:46.000Z | pywt/data/__init__.py | SalvoCas/pywt | 75b3b7b37102aad27780153b4b0fdaf184b205a4 | [
"MIT"
] | 547 | 2015-07-29T18:10:15.000Z | 2022-03-24T18:42:57.000Z | site-packages/pywt/data/__init__.py | Wristlebane/Pyto | 901ac307b68486d8289105c159ca702318bea5b0 | [
"MIT"
] | 421 | 2015-07-30T13:08:25.000Z | 2022-03-24T11:10:07.000Z | from ._readers import ascent, aero, ecg, camera, nino
from ._wavelab_signals import demo_signal
| 32 | 53 | 0.8125 |
d409e301c133548c35a4658083c87815b2ebd8a6 | 3,735 | py | Python | label_studio_ml/examples/bert/_wsgi.py | arronmabrey/label-studio-ml-backend | 9e8728a9aba11b8385e501428b4421c819ef1839 | [
"Apache-2.0"
] | null | null | null | label_studio_ml/examples/bert/_wsgi.py | arronmabrey/label-studio-ml-backend | 9e8728a9aba11b8385e501428b4421c819ef1839 | [
"Apache-2.0"
] | null | null | null | label_studio_ml/examples/bert/_wsgi.py | arronmabrey/label-studio-ml-backend | 9e8728a9aba11b8385e501428b4421c819ef1839 | [
"Apache-2.0"
] | null | null | null | import os
import argparse
import logging
import logging.config
logging.config.dictConfig({
"version": 1,
"formatters": {
"standard": {
"format": "[%(asctime)s] [%(levelname)s] [%(name)s::%(funcName)s::%(lineno)d] %(message)s"
}
},
"handlers": {
"console": {
"class": "logging.StreamHandler",
"level": "DEBUG",
"stream": "ext://sys.stdout",
"formatter": "standard"
}
},
"root": {
"level": "ERROR",
"handlers": [
"console"
],
"propagate": True
}
})
from label_studio_ml.api import init_app
from bert_classifier import BertClassifier
_DEFAULT_CONFIG_PATH = os.path.join(os.path.dirname(__file__), 'config.json')
def get_kwargs_from_config(config_path=_DEFAULT_CONFIG_PATH):
if not os.path.exists(config_path):
return dict()
with open(config_path) as f:
config = json.load(f)
assert isinstance(config, dict)
return config
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Label studio')
parser.add_argument(
'-p', '--port', dest='port', type=int, default=9090,
help='Server port')
parser.add_argument(
'--host', dest='host', type=str, default='0.0.0.0',
help='Server host')
parser.add_argument(
'--kwargs', '--with', dest='kwargs', metavar='KEY=VAL', nargs='+', type=lambda kv: kv.split('='),
help='Additional LabelStudioMLBase model initialization kwargs')
parser.add_argument(
'-d', '--debug', dest='debug', action='store_true',
help='Switch debug mode')
parser.add_argument(
'--log-level', dest='log_level', choices=['DEBUG', 'INFO', 'WARNING', 'ERROR'], default=None,
help='Logging level')
parser.add_argument(
'--model-dir', dest='model_dir', default=os.path.dirname(__file__),
help='Directory where models are stored (relative to the project directory)')
parser.add_argument(
'--check', dest='check', action='store_true',
help='Validate model instance before launching server')
args = parser.parse_args()
# setup logging level
if args.log_level:
logging.root.setLevel(args.log_level)
def isfloat(value):
try:
float(value)
return True
except ValueError:
return False
def parse_kwargs():
param = dict()
for k, v in args.kwargs:
if v.isdigit():
param[k] = int(v)
elif v == 'True' or v == 'true':
param[k] = True
elif v == 'False' or v == 'False':
param[k] = False
elif isfloat(v):
param[k] = float(v)
else:
param[k] = v
return param
kwargs = get_kwargs_from_config()
if args.kwargs:
kwargs.update(parse_kwargs())
if args.check:
print('Check "' + BertClassifier.__name__ + '" instance creation..')
model = BertClassifier(**kwargs)
app = init_app(
model_class=BertClassifier,
model_dir=os.environ.get('MODEL_DIR', args.model_dir),
redis_queue=os.environ.get('RQ_QUEUE_NAME', 'default'),
redis_host=os.environ.get('REDIS_HOST', 'localhost'),
redis_port=os.environ.get('REDIS_PORT', 6379),
**kwargs
)
app.run(host=args.host, port=args.port, debug=args.debug)
else:
# for uWSGI use
app = init_app(
model_class=BertClassifier,
model_dir=os.environ.get('MODEL_DIR', os.path.dirname(__file__)),
redis_queue=os.environ.get('RQ_QUEUE_NAME', 'default'),
redis_host=os.environ.get('REDIS_HOST', 'localhost'),
redis_port=os.environ.get('REDIS_PORT', 6379)
)
| 29.409449 | 105 | 0.596252 |
ae0b37e211a1246b11effadfbca34063842a8787 | 537 | py | Python | homeassistant/components/recorder/const.py | danieledwardgeorgehitchcock/core | 4cd4fbefbf7142ecf0734fcf6365a034b53ec4ff | [
"Apache-2.0"
] | null | null | null | homeassistant/components/recorder/const.py | danieledwardgeorgehitchcock/core | 4cd4fbefbf7142ecf0734fcf6365a034b53ec4ff | [
"Apache-2.0"
] | 14 | 2022-01-26T06:25:32.000Z | 2022-03-31T06:27:51.000Z | homeassistant/components/recorder/const.py | letterarrow/home-assistant | 8220817d479a101f06fa029b221b2faca496260a | [
"Apache-2.0"
] | null | null | null | """Recorder constants."""
DATA_INSTANCE = "recorder_instance"
SQLITE_URL_PREFIX = "sqlite://"
DOMAIN = "recorder"
CONF_DB_INTEGRITY_CHECK = "db_integrity_check"
MAX_QUEUE_BACKLOG = 30000
# The maximum number of rows (events) we purge in one delete statement
# sqlite3 has a limit of 999 until version 3.32.0
# in https://github.com/sqlite/sqlite/commit/efdba1a8b3c6c967e7fae9c1989c40d420ce64cc
# We can increase this back to 1000 once most
# have upgraded their sqlite version
MAX_ROWS_TO_PURGE = 998
DB_WORKER_PREFIX = "DbWorker"
| 26.85 | 85 | 0.785847 |
ecc322251564418c7a23321719718ad770e65213 | 6,393 | py | Python | numpy/typing/tests/data/reveal/ndarray_misc.py | DFEvans/numpy | 75567b5b15940d2c09cc309eafb2950504714551 | [
"BSD-3-Clause"
] | null | null | null | numpy/typing/tests/data/reveal/ndarray_misc.py | DFEvans/numpy | 75567b5b15940d2c09cc309eafb2950504714551 | [
"BSD-3-Clause"
] | null | null | null | numpy/typing/tests/data/reveal/ndarray_misc.py | DFEvans/numpy | 75567b5b15940d2c09cc309eafb2950504714551 | [
"BSD-3-Clause"
] | null | null | null | """
Tests for miscellaneous (non-magic) ``np.ndarray``/``np.generic`` methods.
More extensive tests are performed for the methods'
function-based counterpart in `../from_numeric.py`.
"""
import operator
from typing import Any
import numpy as np
class SubClass(np.ndarray): ...
f8: np.float64
B: SubClass
AR_f8: np.ndarray[Any, np.dtype[np.float64]]
AR_i8: np.ndarray[Any, np.dtype[np.int64]]
AR_U: np.ndarray[Any, np.dtype[np.str_]]
reveal_type(f8.all()) # E: numpy.bool_
reveal_type(AR_f8.all()) # E: numpy.bool_
reveal_type(AR_f8.all(axis=0)) # E: Any
reveal_type(AR_f8.all(keepdims=True)) # E: Any
reveal_type(AR_f8.all(out=B)) # E: SubClass
reveal_type(f8.any()) # E: numpy.bool_
reveal_type(AR_f8.any()) # E: numpy.bool_
reveal_type(AR_f8.any(axis=0)) # E: Any
reveal_type(AR_f8.any(keepdims=True)) # E: Any
reveal_type(AR_f8.any(out=B)) # E: SubClass
reveal_type(f8.argmax()) # E: {intp}
reveal_type(AR_f8.argmax()) # E: {intp}
reveal_type(AR_f8.argmax(axis=0)) # E: Any
reveal_type(AR_f8.argmax(out=B)) # E: SubClass
reveal_type(f8.argmin()) # E: {intp}
reveal_type(AR_f8.argmin()) # E: {intp}
reveal_type(AR_f8.argmin(axis=0)) # E: Any
reveal_type(AR_f8.argmin(out=B)) # E: SubClass
reveal_type(f8.argsort()) # E: numpy.ndarray[Any, Any]
reveal_type(AR_f8.argsort()) # E: numpy.ndarray[Any, Any]
reveal_type(f8.astype(np.int64).choose([()])) # E: numpy.ndarray[Any, Any]
reveal_type(AR_f8.choose([0])) # E: numpy.ndarray[Any, Any]
reveal_type(AR_f8.choose([0], out=B)) # E: SubClass
reveal_type(f8.clip(1)) # E: Any
reveal_type(AR_f8.clip(1)) # E: Any
reveal_type(AR_f8.clip(None, 1)) # E: Any
reveal_type(AR_f8.clip(1, out=B)) # E: SubClass
reveal_type(AR_f8.clip(None, 1, out=B)) # E: SubClass
reveal_type(f8.compress([0])) # E: numpy.ndarray[Any, Any]
reveal_type(AR_f8.compress([0])) # E: numpy.ndarray[Any, Any]
reveal_type(AR_f8.compress([0], out=B)) # E: SubClass
reveal_type(f8.conj()) # E: {float64}
reveal_type(AR_f8.conj()) # E: numpy.ndarray[Any, numpy.dtype[{float64}]]
reveal_type(B.conj()) # E: SubClass
reveal_type(f8.conjugate()) # E: {float64}
reveal_type(AR_f8.conjugate()) # E: numpy.ndarray[Any, numpy.dtype[{float64}]]
reveal_type(B.conjugate()) # E: SubClass
reveal_type(f8.cumprod()) # E: numpy.ndarray[Any, Any]
reveal_type(AR_f8.cumprod()) # E: numpy.ndarray[Any, Any]
reveal_type(AR_f8.cumprod(out=B)) # E: SubClass
reveal_type(f8.cumsum()) # E: numpy.ndarray[Any, Any]
reveal_type(AR_f8.cumsum()) # E: numpy.ndarray[Any, Any]
reveal_type(AR_f8.cumsum(out=B)) # E: SubClass
reveal_type(f8.max()) # E: Any
reveal_type(AR_f8.max()) # E: Any
reveal_type(AR_f8.max(axis=0)) # E: Any
reveal_type(AR_f8.max(keepdims=True)) # E: Any
reveal_type(AR_f8.max(out=B)) # E: SubClass
reveal_type(f8.mean()) # E: Any
reveal_type(AR_f8.mean()) # E: Any
reveal_type(AR_f8.mean(axis=0)) # E: Any
reveal_type(AR_f8.mean(keepdims=True)) # E: Any
reveal_type(AR_f8.mean(out=B)) # E: SubClass
reveal_type(f8.min()) # E: Any
reveal_type(AR_f8.min()) # E: Any
reveal_type(AR_f8.min(axis=0)) # E: Any
reveal_type(AR_f8.min(keepdims=True)) # E: Any
reveal_type(AR_f8.min(out=B)) # E: SubClass
reveal_type(f8.newbyteorder()) # E: {float64}
reveal_type(AR_f8.newbyteorder()) # E: numpy.ndarray[Any, numpy.dtype[{float64}]]
reveal_type(B.newbyteorder('|')) # E: SubClass
reveal_type(f8.prod()) # E: Any
reveal_type(AR_f8.prod()) # E: Any
reveal_type(AR_f8.prod(axis=0)) # E: Any
reveal_type(AR_f8.prod(keepdims=True)) # E: Any
reveal_type(AR_f8.prod(out=B)) # E: SubClass
reveal_type(f8.ptp()) # E: Any
reveal_type(AR_f8.ptp()) # E: Any
reveal_type(AR_f8.ptp(axis=0)) # E: Any
reveal_type(AR_f8.ptp(keepdims=True)) # E: Any
reveal_type(AR_f8.ptp(out=B)) # E: SubClass
reveal_type(f8.round()) # E: {float64}
reveal_type(AR_f8.round()) # E: numpy.ndarray[Any, numpy.dtype[{float64}]]
reveal_type(AR_f8.round(out=B)) # E: SubClass
reveal_type(f8.repeat(1)) # E: numpy.ndarray[Any, numpy.dtype[{float64}]]
reveal_type(AR_f8.repeat(1)) # E: numpy.ndarray[Any, numpy.dtype[{float64}]]
reveal_type(B.repeat(1)) # E: numpy.ndarray[Any, Any]
reveal_type(f8.std()) # E: Any
reveal_type(AR_f8.std()) # E: Any
reveal_type(AR_f8.std(axis=0)) # E: Any
reveal_type(AR_f8.std(keepdims=True)) # E: Any
reveal_type(AR_f8.std(out=B)) # E: SubClass
reveal_type(f8.sum()) # E: Any
reveal_type(AR_f8.sum()) # E: Any
reveal_type(AR_f8.sum(axis=0)) # E: Any
reveal_type(AR_f8.sum(keepdims=True)) # E: Any
reveal_type(AR_f8.sum(out=B)) # E: SubClass
reveal_type(f8.take(0)) # E: {float64}
reveal_type(AR_f8.take(0)) # E: {float64}
reveal_type(AR_f8.take([0])) # E: numpy.ndarray[Any, numpy.dtype[{float64}]]
reveal_type(AR_f8.take(0, out=B)) # E: SubClass
reveal_type(AR_f8.take([0], out=B)) # E: SubClass
reveal_type(f8.var()) # E: Any
reveal_type(AR_f8.var()) # E: Any
reveal_type(AR_f8.var(axis=0)) # E: Any
reveal_type(AR_f8.var(keepdims=True)) # E: Any
reveal_type(AR_f8.var(out=B)) # E: SubClass
reveal_type(AR_f8.argpartition([0])) # E: numpy.ndarray[Any, numpy.dtype[{intp}]]
reveal_type(AR_f8.diagonal()) # E: numpy.ndarray[Any, numpy.dtype[{float64}]]
reveal_type(AR_f8.dot(1)) # E: numpy.ndarray[Any, Any]
reveal_type(AR_f8.dot([1])) # E: Any
reveal_type(AR_f8.dot(1, out=B)) # E: SubClass
reveal_type(AR_f8.nonzero()) # E: tuple[numpy.ndarray[Any, numpy.dtype[{intp}]]]
reveal_type(AR_f8.searchsorted(1)) # E: {intp}
reveal_type(AR_f8.searchsorted([1])) # E: numpy.ndarray[Any, numpy.dtype[{intp}]]
reveal_type(AR_f8.trace()) # E: Any
reveal_type(AR_f8.trace(out=B)) # E: SubClass
reveal_type(AR_f8.item()) # E: float
reveal_type(AR_U.item()) # E: str
reveal_type(AR_f8.ravel()) # E: numpy.ndarray[Any, numpy.dtype[{float64}]]
reveal_type(AR_U.ravel()) # E: numpy.ndarray[Any, numpy.dtype[numpy.str_]]
reveal_type(AR_f8.flatten()) # E: numpy.ndarray[Any, numpy.dtype[{float64}]]
reveal_type(AR_U.flatten()) # E: numpy.ndarray[Any, numpy.dtype[numpy.str_]]
reveal_type(AR_f8.reshape(1)) # E: numpy.ndarray[Any, numpy.dtype[{float64}]]
reveal_type(AR_U.reshape(1)) # E: numpy.ndarray[Any, numpy.dtype[numpy.str_]]
reveal_type(int(AR_f8)) # E: int
reveal_type(int(AR_U)) # E: int
reveal_type(float(AR_f8)) # E: float
reveal_type(float(AR_U)) # E: float
reveal_type(complex(AR_f8)) # E: complex
reveal_type(operator.index(AR_i8)) # E: int
| 35.516667 | 82 | 0.69451 |
f8372366460d5b2e92b14c50fb06ae3c9cbba7c5 | 60,326 | py | Python | datamart_isi/entries_old.py | usc-isi-i2/datamart-user-end | b3111d67d3c9a7f69885e44b4645724f9a629c19 | [
"MIT"
] | 1 | 2020-09-19T14:51:14.000Z | 2020-09-19T14:51:14.000Z | datamart_isi/entries_old.py | usc-isi-i2/datamart-user-end | b3111d67d3c9a7f69885e44b4645724f9a629c19 | [
"MIT"
] | null | null | null | datamart_isi/entries_old.py | usc-isi-i2/datamart-user-end | b3111d67d3c9a7f69885e44b4645724f9a629c19 | [
"MIT"
] | 1 | 2020-11-06T22:52:30.000Z | 2020-11-06T22:52:30.000Z | import pandas as pd
# import os
import copy
import random
import frozendict
import collections
import typing
from d3m.container import DataFrame as d3m_DataFrame
from d3m.container import Dataset as d3m_Dataset
import d3m.metadata.base as metadata_base
# from datamart.dataset import Dataset
from datamart.utilities.utils import PRODUCTION_ES_INDEX, SEARCH_URL
from datamart.es_managers.json_query_manager import JSONQueryManager
from datamart.new_query.augment import Augment
# old Augment
# from datamart.augment import Augment
# from datamart.data_loader import DataLoader
from d3m.base import utils as d3m_utils
from datamart.utilities.utils import Utils
# from datamart.joiners.join_result import JoinResult
# from datamart.joiners.joiner_base import JoinerType
# from itertools import chain
from datamart.joiners.rltk_joiner import RLTKJoiner
from SPARQLWrapper import SPARQLWrapper, JSON, POST, URLENCODED
from d3m.metadata.base import DataMetadata, ALL_ELEMENTS
from datamart.joiners.rltk_joiner import RLTKJoiner_new
from wikifier import config
# import requests
import traceback
import logging
import datetime
import enum
Q_NODE_SEMANTIC_TYPE = "http://wikidata.org/qnode"
DEFAULT_URL = "https://isi-datamart.edu"
__ALL__ = ('D3MDatamart', 'DatamartSearchResult', 'D3MJoinSpec')
DatamartSearchResult = typing.TypeVar('DatamartSearchResult', bound='DatamartSearchResult')
D3MJoinSpec = typing.TypeVar('D3MJoinSpec', bound='D3MJoinSpec')
DatamartQuery = typing.TypeVar('DatamartQuery', bound='DatamartQuery')
MAX_ENTITIES_LENGTH = 200
CONTAINER_SCHEMA_VERSION = 'https://metadata.datadrivendiscovery.org/schemas/v0/container.json'
P_NODE_IGNORE_LIST = {"P1549"}
SPECIAL_REQUEST_FOR_P_NODE = {"P1813": "FILTER(strlen(str(?P1813)) = 2)"}
AUGMENT_RESOURCE_ID = "augmentData"
class D3MDatamart:
"""
ISI implementation of D3MDatamart
"""
def __init__(self, mode):
self.url = DEFAULT_URL
self._logger = logging.getLogger(__name__)
if mode == "test":
query_server = config.endpoint_query_test
else:
query_server = config.endpoint_query_main
self.augmenter = Augment(endpoint=query_server)
def search(self, query: DatamartQuery, timeout=None, limit: int = 20) -> typing.List[DatamartSearchResult]:
"""
This entry point supports search using a query specification.
The query spec is rich enough to enable query by example. The caller can select
values from a column in a dataset and use the query spec to find datasets that can join with the values
provided. Use of the query spec enables callers to compose their own "smart search" implementations.
Parameters
----------
query: DatamartQuery
Query specification.
timeout: int
Maximum number of seconds before returning results.
limit: int
Maximum number of search results to return.
returns
-------
typing.List[DatamartSearchResult]
List of search results, combined from multiple datamarts. In the baseline implementation the list of
datamarts will be randomized on each call, and the results from multiple datamarts will be interleaved
accordingly. There will be an attempt to deduplicate results from multiple datamarts.
"""
if not self.url.startswith(SEARCH_URL):
return []
query_json = query.to_json()
return self.search_general(query=query_json, supplied_data=None, timeout=timeout, limit=limit)
def search_general(self, query, supplied_data: d3m_DataFrame=None, timeout=None, limit: int=20) \
-> typing.List[DatamartSearchResult]:
"""
The search function used for general elastic search
:param query: JSON object describing the query.
:param supplied_data: the data you are trying to augment.
:param timeout: allowed time spent on searching
:param limit: the limitation on the return amount of DatamartSearchResult
:return: list of search results of DatamartSearchResult
"""
query_results = []
try:
params = {"timeout": "2m"}
if (query and ('required_variables' in query)) or (supplied_data is None):
# if ("required_variables" exists or no data):
cur_results = self.augmenter.query_by_sparql(query, supplied_data, size=limit) or []
for res in cur_results:
query_results.append(DatamartSearchResult(search_result=res, supplied_data=supplied_data,
query_json=query, search_type="general")
)
else:
# if there is no "required_variables" in the query JSON, but the supplied_data exists,
# try each named entity column as "required_variables" and concat the results:
query = query or {}
exist = set()
for col in supplied_data:
if Utils.is_column_able_to_query(supplied_data[col]):
# update 2019.4.9: we should not replace the original query!!!
query_copy = query.copy()
query_copy['required_variables'] = [{
"type": "dataframe_columns",
"names": [col]
}]
cur_results = self.augmenter.query_by_sparql(query_copy, supplied_data, size=limit, params=params)
if not cur_results:
continue
for res in cur_results:
if res['_id'] not in exist:
# TODO: how about the score ??
exist.add(res['_id'])
query_results.append(DatamartSearchResult(search_result=res, supplied_data=supplied_data,
query_json=query_copy, search_type="general")
)
return query_results
except:
print("Searching with wiki data failed")
traceback.print_exc()
finally:
return query_results
def search_with_data(self, supplied_data: typing.Union[d3m_Dataset, d3m_DataFrame], query: DatamartQuery = None,
timeout=None, limit: int = 20) -> typing.List[DatamartSearchResult]:
"""
This entry point supports search based on a supplied datasets.
*) Smart search: the caller provides supplied_data (a D3M dataset), and a query containing
keywords from the D3M problem specification. The search will analyze the supplied data and look for datasets
that can augment the supplied data in some way. The keywords are used for further filtering and ranking.
For example, a datamart may try to identify named entities in the supplied data and search for companion
datasets that contain these entities.
*) Columns search: this search is similar to smart search in that it uses both query spec and the supplied_data.
The difference is that only the data in the columns listed in query_by_example_columns is used to identify
companion datasets.
Parameters
---------
query: DatamartQuery
Query specification
supplied_data: d3m.container.Dataset or d3m.container.DataFrame
The data you are trying to augment.
query_by_example_columns:
A list of column identifiers or column names.
timeout: int
Maximum number of seconds before returning results.
limit: int
Maximum number of search results to return.
Returns
-------
typing.List[DatamartSearchResult]
A list of DatamartSearchResult of possible companion datasets for the supplied data.
"""
if not self.url.startswith(SEARCH_URL):
return []
if type(supplied_data) is d3m_Dataset:
res_id, supplied_dataframe = d3m_utils.get_tabular_resource(dataset=supplied_data, resource_id=None)
else:
supplied_dataframe = supplied_data
search_results = []
if query is not None:
query_json = query.to_json()
else:
query_json = None
# first take a search on wikidata
wikidata_search_results = self.search_wiki_data(query_json, supplied_data, timeout)
search_results.extend(wikidata_search_results)
limit_remained = limit - len(wikidata_search_results)
if query is None:
# if not query given, try to find the Text columns from given dataframe and use it to find some candidates
can_query_columns = []
for each in range(len(supplied_dataframe.columns)):
if type(supplied_data) is d3m_Dataset:
selector = (res_id, ALL_ELEMENTS, each)
else:
selector = (ALL_ELEMENTS, each)
each_column_meta = supplied_data.metadata.query(selector)
if 'http://schema.org/Text' in each_column_meta["semantic_types"]:
# or "https://metadata.datadrivendiscovery.org/types/CategoricalData" in each_column_meta["semantic_types"]:
can_query_columns.append(each)
# import pdb
# pdb.set_trace()
if len(can_query_columns) == 0:
self._logger.warning("No columns can be augment!")
return search_results
results_no_query = []
for each_column in can_query_columns:
column_values = supplied_dataframe.iloc[:, each_column]
query_column_entities = list(set(column_values.tolist()))
if len(query_column_entities) > MAX_ENTITIES_LENGTH:
query_column_entities = random.sample(query_column_entities, MAX_ENTITIES_LENGTH)
for i in range(len(query_column_entities)):
query_column_entities[i] = str(query_column_entities[i])
query_column_entities = " ".join(query_column_entities)
search_query = DatamartQuery(about=query_column_entities)
query_json = search_query.to_json()
# TODO: need to improve the query for required variables
# search_query ={
# "required_variables": [
# {
# "type": "generic_entity",
# "named_entities":query_column_entities
# }
# ]
# }
# sort to put best results at top
temp_results = self.search_general(query=query_json, supplied_data=supplied_dataframe)
temp_results.sort(key=lambda x: x.score, reverse=True)
results_no_query.append(temp_results)
# we will return the results of each searching query one by one
# for example: [res_q1_1,res_q1_2,res_q1_3], [res_q2_1,res_q2_2,res_q2_3] , [res_q3_1,res_q3_2,res_q3_3]
# will return as: [res_q1_1, res_q2_1, res_q3_1, res_q1_2, res_q2_2, res_q3_3...]
results_rescheduled = []
has_remained = True
while has_remained:
has_remained = False
for each in results_no_query:
if len(each) > 0:
has_remained = True
results_rescheduled.append(each.pop(0))
# append together
search_results.extend(results_rescheduled)
else: # for the condition if given query, follow the query
if limit_remained > 0:
query_json = query.to_json()
general_search_results = self.search_general(query=query_json, supplied_data=supplied_dataframe,
timeout=timeout, limit=limit_remained)
general_search_results.sort(key=lambda x: x.score, reverse=True)
search_results.extend(general_search_results)
if len(search_results) > limit:
search_results = search_results[:limit]
return search_results
def search_with_data_columns(self, supplied_data: container.Dataset, data_constraints: typing.List['TabularVariable'],
query: 'DatamartQuery'=None) -> "DatamartQueryCursor":
"""
Search using a query which can include constraints on supplied data columns (TabularVariable).
This search is similar to the "smart" search provided by `search_with_data()`, but caller must manually specify
constraints using columns from the supplied data; Datamart will not automatically analyze it to determine
relevance or joinability.
Use of the query spec enables callers to compose their own "smart search" implementations.
Datamart implementations should return a DatamartQueryCursor immediately.
Parameters
------_---
query : DatamartQuery
Query specification
supplied_data : container.Dataset
The data you are trying to augment.
data_constraints : list
List of `TabularVariable` constraints referencing the supplied data.
Returns
-------
DatamartQueryCursor
A cursor pointing to search results containing possible companion datasets for the supplied data.
"""
if query is None:
def augment(self, query, supplied_data: d3m_Dataset, timeout: int = None, max_new_columns: int = 1000) -> d3m_Dataset:
"""
In this entry point, the caller supplies a query and a dataset, and datamart returns an augmented dataset.
Datamart automatically determines useful data for augmentation and automatically joins the new data to produce
an augmented Dataset that may contain new columns and rows, and possibly new dataframes.
Parameters
---------
query: DatamartQuery
Query specification
supplied_data: d3m.container.Dataset
The data you are trying to augment.
timeout: int
Maximum number of seconds before returning results.
max_new_columns: int
Maximum number of new columns to add to the original dataset.
Returns
-------
d3m.container.Dataset
The augmented Dataset
"""
if type(supplied_data) is d3m_Dataset:
input_type = "ds"
res_id, _ = d3m_utils.get_tabular_resource(dataset=supplied_data, resource_id=None)
else:
input_type = "df"
search_results = self.search_with_data(query=query, supplied_data=supplied_data, timeout=timeout)
continue_aug = True
count = 0
augment_result = supplied_data
# continue augmenting until reach the maximum new columns or all search_result has been used
while continue_aug and count < len(search_results):
augment_result = search_results[count].augment(supplied_data=augment_result)
count += 1
if input_type == "ds":
current_column_number = augment_result[res_id].shape[1]
else:
current_column_number = augment_result.shape[1]
if current_column_number >= max_new_columns:
continue_aug = False
return augment_result
@staticmethod
def search_wiki_data(query, supplied_data: typing.Union[d3m_DataFrame, d3m_Dataset]=None, timeout=None,
search_threshold=0.5) -> typing.List[DatamartSearchResult]:
"""
The search function used for wikidata search
:param query: JSON object describing the query.
:param supplied_data: the data you are trying to augment.
:param timeout: allowed time spent on searching
:param limit: the limitation on the return amount of DatamartSearchResult
:param search_threshold: the minimum appeared times of the properties
:return: list of search results of DatamartSearchResult
"""
wikidata_results = []
try:
q_nodes_columns = []
if type(supplied_data) is d3m_Dataset:
res_id, supplied_dataframe = d3m_utils.get_tabular_resource(dataset=supplied_data, resource_id=None)
selector_base_type = "ds"
else:
supplied_dataframe = supplied_data
selector_base_type = "df"
# check whether Qnode is given in the inputs, if given, use this to wikidata and search
required_variables_names = None
metadata_input = supplied_data.metadata
if query is not None and 'required_variables' in query:
required_variables_names = []
for each in query['required_variables']:
required_variables_names.extend(each['names'])
for i in range(supplied_dataframe.shape[1]):
if selector_base_type == "ds":
metadata_selector = (res_id, metadata_base.ALL_ELEMENTS, i)
else:
metadata_selector = (metadata_base.ALL_ELEMENTS, i)
if Q_NODE_SEMANTIC_TYPE in metadata_input.query(metadata_selector)["semantic_types"]:
# if no required variables given, attach any Q nodes found
if required_variables_names is None:
q_nodes_columns.append(i)
# otherwise this column has to be inside required_variables
else:
if supplied_dataframe.columns[i] in required_variables_names:
q_nodes_columns.append(i)
if len(q_nodes_columns) == 0:
print("No wikidata Q nodes detected on corresponding required_variables! Will skip wikidata search part")
return wikidata_results
else:
print("Wikidata Q nodes inputs detected! Will search with it.")
print("Totally " + str(len(q_nodes_columns)) + " Q nodes columns detected!")
# do a wikidata search for each Q nodes column
for each_column in q_nodes_columns:
q_nodes_list = supplied_dataframe.iloc[:, each_column].tolist()
p_count = collections.defaultdict(int)
p_nodes_needed = []
# temporary block
"""
http_address = 'http://minds03.isi.edu:4444/get_properties'
headers = {"Content-Type": "application/json"}
requests_data = str(q_nodes_list)
requests_data = requests_data.replace("'", '"')
r = requests.post(http_address, data=requests_data, headers=headers)
results = r.json()
for each_p_list in results.values():
for each_p in each_p_list:
p_count[each_p] += 1
"""
# TODO: temporary change here, may change back in the future
# Q node format (wd:Q23)(wd: Q42)
q_node_query_part = ""
unique_qnodes = set(q_nodes_list)
for each in unique_qnodes:
if len(each) > 0:
q_node_query_part += "(wd:" + each + ")"
sparql_query = "select distinct ?item ?property where \n{\n VALUES (?item) {" + q_node_query_part \
+ " }\n ?item ?property ?value .\n ?wd_property wikibase:directClaim ?property ." \
+ " values ( ?type ) \n {\n ( wikibase:Quantity )\n" \
+ " ( wikibase:Time )\n ( wikibase:Monolingualtext )\n }" \
+ " ?wd_property wikibase:propertyType ?type .\n}\norder by ?item ?property "
try:
sparql = SPARQLWrapper(WIKIDATA_QUERY_SERVER)
sparql.setQuery(sparql_query)
sparql.setReturnFormat(JSON)
sparql.setMethod(POST)
sparql.setRequestMethod(URLENCODED)
results = sparql.query().convert()['results']['bindings']
except:
print("Getting query of wiki data failed!")
continue
for each in results:
p_count[each['property']['value'].split("/")[-1]] += 1
for key, val in p_count.items():
if float(val) / len(unique_qnodes) >= search_threshold:
p_nodes_needed.append(key)
wikidata_search_result = {"p_nodes_needed": p_nodes_needed,
"target_q_node_column_name": supplied_dataframe.columns[each_column]}
wikidata_results.append(DatamartSearchResult(search_result=wikidata_search_result,
supplied_data=supplied_data,
query_json=query,
search_type="wikidata")
)
return wikidata_results
except:
print("Searching with wiki data failed")
traceback.print_exc()
finally:
return wikidata_results
class DatamartMetadata:
"""
This class represents the metadata associated with search results.
"""
def __init__(self, title: str, description: str):
self.title = title
self.description = description
def get_columns(self) -> typing.List[str]:
"""
:return: a list of strings representing the names of columns in the dataset that can be downloaded from a
search result.
"""
pass
def get_detailed_metadata(self) -> dict:
"""
Datamart will use a program-wide metadata standard, currently computed by the Harvard team, using contributions
from other performers. This method returns the standard metadata for a dataset.
:return: a dict with the standard metadata.
"""
pass
def get_datamart_specific_metadata(self) -> dict:
"""
A datamart implementation may compute additional metadata beyond the program-wide standard metadata.
:return: a dict with the datamart-specific metadata.
"""
pass
class DatamartSearchResult:
"""
This class represents the search results of a datamart search.
Different datamarts will provide different implementations of this class.
Attributes
----------
join_hints: typing.List[D3MAugmentSpec]
Hints for joining supplied data with datamart data
"""
def __init__(self, search_result, supplied_data, query_json, search_type):
self._logger = logging.getLogger(__name__)
self.search_result = search_result
if "_score" in self.search_result:
self.score = self.search_result["_score"]
if "_source" in self.search_result:
self.metadata = self.search_result["_source"]
self.supplied_data = supplied_data
if type(supplied_data) is d3m_Dataset:
self.res_id, self.supplied_dataframe = d3m_utils.get_tabular_resource(dataset=supplied_data, resource_id=None)
self.selector_base_type = "ds"
elif type(supplied_data) is d3m_DataFrame:
self.supplied_dataframe = supplied_data
self.selector_base_type = "df"
self.query_json = query_json
self.search_type = search_type
self.pairs = None
self._res_id = None # only used for input is Dataset
self.join_pairs = None
def display(self) -> pd.DataFrame:
"""
function used to see what found inside this search result class in a human vision
:return: a pandas DataFrame
"""
if self.search_type == "wikidata":
column_names = []
for each in self.search_result["p_nodes_needed"]:
each_name = self._get_node_name(each)
column_names.append(each_name)
column_names = ", ".join(column_names)
required_variable = []
required_variable.append(self.search_result["target_q_node_column_name"])
result = pd.DataFrame({"title": "wikidata search result for " \
+ self.search_result["target_q_node_column_name"], \
"columns": column_names, "join columns": required_variable}, index=[0])
elif self.search_type == "general":
title = self.search_result['_source']['title']
column_names = []
required_variable = []
for each in self.query_json['required_variables']:
required_variable.append(each['names'])
for each in self.search_result['_source']['variables']:
each_name = each['name']
column_names.append(each_name)
column_names = ", ".join(column_names)
result = pd.DataFrame({"title": title, "columns": column_names, "join columns": required_variable}, index=[0])
return result
def download(self, supplied_data: typing.Union[d3m_Dataset, d3m_DataFrame], generate_metadata=True, return_format="ds") \
-> typing.Union[d3m_Dataset, d3m_DataFrame]:
"""
download the dataset or dataFrame (depending on the input type) and corresponding metadata information of
search result everytime call download, the DataFrame will have the exact same columns in same order
"""
if self.search_type=="general":
return_df = self.download_general(supplied_data, generate_metadata, return_format)
elif self.search_type=="wikidata":
return_df = self.download_wikidata(supplied_data, generate_metadata, return_format)
return return_df
def download_general(self, supplied_data: typing.Union[d3m_Dataset, d3m_DataFrame]=None, generate_metadata=True,
return_format="ds", augment_resource_id = AUGMENT_RESOURCE_ID) -> typing.Union[d3m_Dataset, d3m_DataFrame]:
"""
Specified download function for general datamart Datasets
:param supplied_data: given supplied data
:param generate_metadata: whether need to genreate the metadata or not
:return: a dataset or a dataframe depending on the input
"""
if type(supplied_data) is d3m_Dataset:
self._res_id, self.supplied_dataframe = d3m_utils.get_tabular_resource(dataset=supplied_data, resource_id=None)
else:
self.supplied_dataframe = supplied_data
if self.join_pairs is None:
candidate_join_column_pairs = self.get_join_hints()
else:
candidate_join_column_pairs = self.join_pairs
if len(candidate_join_column_pairs) > 1:
print("[WARN]: multiple joining column pairs found")
join_pairs_result = []
candidate_join_column_scores = []
# start finding pairs
if supplied_data is None:
supplied_data = self.supplied_dataframe
left_df = copy.deepcopy(self.supplied_dataframe)
right_metadata = self.search_result['_source']
right_df = Utils.materialize(metadata=self.metadata)
left_metadata = Utils.generate_metadata_from_dataframe(data=left_df, original_meta=None)
# generate the pairs for each join_column_pairs
for each_pair in candidate_join_column_pairs:
left_columns = each_pair.left_columns
right_columns = each_pair.right_columns
try:
# Only profile the joining columns, otherwise it will be too slow:
left_metadata = Utils.calculate_dsbox_features(data=left_df, metadata=left_metadata,
selected_columns=set(left_columns))
right_metadata = Utils.calculate_dsbox_features(data=right_df, metadata=right_metadata,
selected_columns=set(right_columns))
# update with implicit_variable on the user supplied dataset
if left_metadata.get('implicit_variables'):
Utils.append_columns_for_implicit_variables_and_add_meta(left_metadata, left_df)
print(" - start getting pairs for", each_pair.to_str_format())
result, self.pairs = RLTKJoiner.find_pair(left_df=left_df, right_df=right_df,
left_columns=[left_columns], right_columns=[right_columns],
left_metadata=left_metadata, right_metadata=right_metadata)
join_pairs_result.append(result)
# TODO: figure out some way to compute the joining quality
candidate_join_column_scores.append(100)
except:
print("failed when getting pairs for", each_pair)
traceback.print_exc()
# choose the best joining results
all_results = []
for i in range(len(join_pairs_result)):
each_result = (candidate_join_column_pairs[i], candidate_join_column_scores[i], join_pairs_result[i])
all_results.append(each_result)
all_results.sort(key=lambda x: x[1], reverse=True)
if len(all_results) == 0:
raise ValueError("[ERROR] Failed to get pairs!")
if return_format == "ds":
return_df = d3m_DataFrame(all_results[0][2], generate_metadata=False)
resources = {augment_resource_id: return_df}
return_result = d3m_Dataset(resources=resources, generate_metadata=False)
if generate_metadata:
metadata_shape_part_dict = self._generate_metadata_shape_part(value=return_result, selector=())
for each_selector, each_metadata in metadata_shape_part_dict.items():
return_result.metadata = return_result.metadata.update(selector=each_selector, metadata=each_metadata)
return_result.metadata = self._generate_metadata_column_part_for_general(return_result.metadata, return_format, augment_resource_id)
elif return_format == "df":
return_result = d3m_DataFrame(all_results[0][2], generate_metadata=False)
if generate_metadata:
metadata_shape_part_dict = self._generate_metadata_shape_part(value=return_result, selector=())
for each_selector, each_metadata in metadata_shape_part_dict.items():
return_result.metadata = return_result.metadata.update(selector=each_selector, metadata=each_metadata)
return_result.metadata = self._generate_metadata_column_part_for_general(return_result.metadata, return_format, augment_resource_id=None)
else:
raise ValueError("Invalid return format was given")
return return_result
def _generate_metadata_shape_part(self, value, selector) -> dict:
"""
recursively generate all metadata for shape part, return a dict
:param value:
:param selector:
:return:
"""
generated_metadata: dict = {}
generated_metadata['schema'] = CONTAINER_SCHEMA_VERSION
if isinstance(value, d3m_Dataset): # type: ignore
generated_metadata['dimension'] = {
'name': 'resources',
'semantic_types': ['https://metadata.datadrivendiscovery.org/types/DatasetResource'],
'length': len(value),
}
metadata_dict = collections.OrderedDict([(selector, generated_metadata)])
for k, v in value.items():
metadata_dict.update(self._generate_metadata_shape_part(v, selector + (k,)))
# It is unlikely that metadata is equal across dataset resources, so we do not try to compact metadata here.
return metadata_dict
if isinstance(value, d3m_DataFrame): # type: ignore
generated_metadata['semantic_types'] = ['https://metadata.datadrivendiscovery.org/types/Table']
generated_metadata['dimension'] = {
'name': 'rows',
'semantic_types': ['https://metadata.datadrivendiscovery.org/types/TabularRow'],
'length': value.shape[0],
}
metadata_dict = collections.OrderedDict([(selector, generated_metadata)])
# Reusing the variable for next dimension.
generated_metadata = {
'dimension': {
'name': 'columns',
'semantic_types': ['https://metadata.datadrivendiscovery.org/types/TabularColumn'],
'length': value.shape[1],
},
}
selector_all_rows = selector + (ALL_ELEMENTS,)
metadata_dict[selector_all_rows] = generated_metadata
return metadata_dict
def _generate_metadata_column_part_for_general(self, metadata_return, return_format, augment_resource_id) -> DataMetadata:
"""
Inner function used to generate metadata for general search
"""
# part for adding the whole dataset/ dataframe's metadata
# part for adding each column's metadata
for i, each_metadata in enumerate(self.metadata['variables']):
if return_format == "ds":
metadata_selector = (augment_resource_id, ALL_ELEMENTS, i)
elif return_format == "df":
metadata_selector = (ALL_ELEMENTS, i)
structural_type = each_metadata["description"].split("dtype: ")[-1]
if "int" in structural_type:
structural_type = int
elif "float" in structural_type:
structural_type = float
else:
structural_type = str
metadata_each_column = {"name": each_metadata["name"], "structural_type": structural_type,
'semantic_types': ('https://metadata.datadrivendiscovery.org/types/Attribute',)}
metadata_return = metadata_return.update(metadata=metadata_each_column, selector=metadata_selector)
if return_format == "ds":
metadata_selector = (augment_resource_id, ALL_ELEMENTS, i + 1)
elif return_format == "df":
metadata_selector = (ALL_ELEMENTS, i + 1)
metadata_joining_pairs = {"name": "joining_pairs", "structural_type": typing.List[int],
'semantic_types': ("http://schema.org/Integer",)}
metadata_return = metadata_return.update(metadata=metadata_joining_pairs, selector=metadata_selector)
return metadata_return
def download_wikidata(self, supplied_data: typing.Union[d3m_Dataset, d3m_DataFrame], generate_metadata=True, return_format="ds",augment_resource_id=AUGMENT_RESOURCE_ID) -> typing.Union[d3m_Dataset, d3m_DataFrame]:
"""
:param supplied_data: input DataFrame
:param generate_metadata: control whether to automatically generate metadata of the return DataFrame or not
:return: return_df: the materialized wikidata d3m_DataFrame,
with corresponding pairing information to original_data at last column
"""
# prepare the query
p_nodes_needed = self.search_result["p_nodes_needed"]
target_q_node_column_name = self.search_result["target_q_node_column_name"]
if type(supplied_data) is d3m_DataFrame:
self.supplied_dataframe = copy.deepcopy(supplied_data)
elif type(supplied_data) is d3m_Dataset:
self._res_id, supplied_dataframe = d3m_utils.get_tabular_resource(dataset=supplied_data,
resource_id=None)
self.supplied_dataframe = copy.deepcopy(supplied_dataframe)
q_node_column_number = self.supplied_dataframe.columns.tolist().index(target_q_node_column_name)
q_nodes_list = set(self.supplied_dataframe.iloc[:, q_node_column_number].tolist())
q_nodes_query = ""
p_nodes_query_part = ""
p_nodes_optional_part = ""
special_request_part = ""
# q_nodes_list = q_nodes_list[:30]
for each in q_nodes_list:
if each != "N/A":
q_nodes_query += "(wd:" + each + ") \n"
for each in p_nodes_needed:
if each not in P_NODE_IGNORE_LIST:
p_nodes_query_part += " ?" + each
p_nodes_optional_part += " OPTIONAL { ?q wdt:" + each + " ?" + each + "}\n"
if each in SPECIAL_REQUEST_FOR_P_NODE:
special_request_part += SPECIAL_REQUEST_FOR_P_NODE[each] + "\n"
sparql_query = "SELECT DISTINCT ?q " + p_nodes_query_part + \
"WHERE \n{\n VALUES (?q) { \n " + q_nodes_query + "}\n" + \
p_nodes_optional_part + special_request_part + "}\n"
import pdb
pdb.set_trace()
return_df = d3m_DataFrame()
try:
sparql = SPARQLWrapper(WIKIDATA_QUERY_SERVER)
sparql.setQuery(sparql_query)
sparql.setReturnFormat(JSON)
sparql.setMethod(POST)
sparql.setRequestMethod(URLENCODED)
results = sparql.query().convert()
except:
print("Getting query of wiki data failed!")
return return_df
semantic_types_dict = {
"q_node": ("http://schema.org/Text", 'https://metadata.datadrivendiscovery.org/types/PrimaryKey')}
for result in results["results"]["bindings"]:
each_result = {}
q_node_name = result.pop("q")["value"].split("/")[-1]
each_result["q_node"] = q_node_name
for p_name, p_val in result.items():
each_result[p_name] = p_val["value"]
# only do this part if generate_metadata is required
if p_name not in semantic_types_dict:
if "datatype" in p_val.keys():
semantic_types_dict[p_name] = (
self._get_semantic_type(p_val["datatype"]),
'https://metadata.datadrivendiscovery.org/types/Attribute')
else:
semantic_types_dict[p_name] = (
"http://schema.org/Text", 'https://metadata.datadrivendiscovery.org/types/Attribute')
return_df = return_df.append(each_result, ignore_index=True)
p_name_dict = {"q_node": "q_node"}
for each in return_df.columns.tolist():
if each.lower().startswith("p") or each.lower().startswith("c"):
p_name_dict[each] = self._get_node_name(each)
# use rltk joiner to find the joining pairs
joiner = RLTKJoiner_new()
joiner.set_join_target_column_names((self.supplied_dataframe.columns[q_node_column_number], "q_node"))
result, self.pairs = joiner.find_pair(left_df=self.supplied_dataframe, right_df=return_df)
# if this condition is true, it means "id" column was added which should not be here
if return_df.shape[1] == len(p_name_dict) + 2 and "id" in return_df.columns:
return_df = return_df.drop(columns=["id"])
metadata_new = DataMetadata()
self.metadata = {}
# add remained attributes metadata
for each_column in range(0, return_df.shape[1] - 1):
current_column_name = p_name_dict[return_df.columns[each_column]]
metadata_selector = (ALL_ELEMENTS, each_column)
# here we do not modify the original data, we just add an extra "expected_semantic_types" to metadata
metadata_each_column = {"name": current_column_name, "structural_type": str,
'semantic_types': semantic_types_dict[return_df.columns[each_column]]}
self.metadata[current_column_name] = metadata_each_column
if generate_metadata:
metadata_new = metadata_new.update(metadata=metadata_each_column, selector=metadata_selector)
# special for joining_pairs column
metadata_selector = (ALL_ELEMENTS, return_df.shape[1])
metadata_joining_pairs = {"name": "joining_pairs", "structural_type": typing.List[int],
'semantic_types': ("http://schema.org/Integer",)}
if generate_metadata:
metadata_new = metadata_new.update(metadata=metadata_joining_pairs, selector=metadata_selector)
# start adding shape metadata for dataset
if return_format == "ds":
return_df = d3m_DataFrame(return_df, generate_metadata=False)
return_df = return_df.rename(columns=p_name_dict)
resources = {augment_resource_id: return_df}
return_result = d3m_Dataset(resources=resources, generate_metadata=False)
if generate_metadata:
return_result.metadata = metadata_new
metadata_shape_part_dict = self._generate_metadata_shape_part(value=return_result, selector=())
for each_selector, each_metadata in metadata_shape_part_dict.items():
return_result.metadata = return_result.metadata.update(selector=each_selector,
metadata=each_metadata)
# update column names to be property names instead of number
elif return_format == "df":
return_result = d3m_DataFrame(return_df, generate_metadata=False)
return_result = return_result.rename(columns=p_name_dict)
if generate_metadata:
return_result.metadata = metadata_new
metadata_shape_part_dict = self._generate_metadata_shape_part(value=return_result, selector=())
for each_selector, each_metadata in metadata_shape_part_dict.items():
return_result.metadata = return_result.metadata.update(selector=each_selector,
metadata=each_metadata)
return return_result
def _get_node_name(self, node_code):
"""
Inner function used to get the properties(P nodes) names with given P node
:param node_code: a str indicate the P node (e.g. "P123")
:return: a str indicate the P node label (e.g. "inception")
"""
sparql_query = "SELECT DISTINCT ?x WHERE \n { \n" + \
"wd:" + node_code + " rdfs:label ?x .\n FILTER(LANG(?x) = 'en') \n} "
try:
sparql = SPARQLWrapper(WIKIDATA_QUERY_SERVER)
sparql.setQuery(sparql_query)
sparql.setReturnFormat(JSON)
sparql.setMethod(POST)
sparql.setRequestMethod(URLENCODED)
results = sparql.query().convert()
return results['results']['bindings'][0]['x']['value']
except:
print("Getting name of node " + node_code + " failed!")
return node_code
def _get_semantic_type(self, datatype: str):
"""
Inner function used to transfer the wikidata semantic type to D3M semantic type
:param datatype: a str indicate the semantic type adapted from wikidata
:return: a str indicate the semantic type for D3M
"""
special_type_dict = {"http://www.w3.org/2001/XMLSchema#dateTime": "http://schema.org/DateTime",
"http://www.w3.org/2001/XMLSchema#decimal": "http://schema.org/Float",
"http://www.opengis.net/ont/geosparql#wktLiteral": "https://metadata.datadrivendiscovery.org/types/Location"}
default_type = "http://schema.org/Text"
if datatype in special_type_dict:
return special_type_dict[datatype]
else:
print("not seen type : ", datatype)
return default_type
def augment(self, supplied_data, generate_metadata=True, augment_resource_id=AUGMENT_RESOURCE_ID):
"""
download and join using the D3mJoinSpec from get_join_hints()
"""
if type(supplied_data) is d3m_DataFrame:
return self._augment(supplied_data=supplied_data, generate_metadata=generate_metadata, return_format="df", augment_resource_id=augment_resource_id)
elif type(supplied_data) is d3m_Dataset:
self._res_id, self.supplied_data = d3m_utils.get_tabular_resource(dataset=supplied_data, resource_id=None, has_hyperparameter=False)
res = self._augment(supplied_data=supplied_data, generate_metadata=generate_metadata, return_format="ds", augment_resource_id=augment_resource_id)
return res
def _augment(self, supplied_data, generate_metadata=True, return_format="ds", augment_resource_id=AUGMENT_RESOURCE_ID):
"""
download and join using the D3mJoinSpec from get_join_hints()
"""
if type(supplied_data) is d3m_Dataset:
supplied_data_df = supplied_data[self._res_id]
else:
supplied_data_df = supplied_data
download_result = self.download(supplied_data=supplied_data_df, generate_metadata=False, return_format="df")
download_result = download_result.drop(columns=['joining_pairs'])
df_joined = pd.DataFrame()
column_names_to_join = None
for r1, r2 in self.pairs:
left_res = supplied_data_df.loc[int(r1)]
right_res = download_result.loc[int(r2)]
if column_names_to_join is None:
column_names_to_join = right_res.index.difference(left_res.index)
matched_rows = right_res.index.intersection(left_res.index)
columns_new = left_res.index.tolist()
columns_new.extend(column_names_to_join.tolist())
new = pd.concat([left_res, right_res[column_names_to_join]])
df_joined = df_joined.append(new, ignore_index=True)
# ensure that the original dataframe columns are at the first left part
df_joined = df_joined[columns_new]
# if search with wikidata, we can remove duplicate Q node column
if self.search_type == "wikidata":
df_joined = df_joined.drop(columns=['q_node'])
if 'id' in df_joined.columns:
df_joined = df_joined.drop(columns=['id'])
if generate_metadata:
columns_all = list(df_joined.columns)
if 'd3mIndex' in df_joined.columns:
oldindex = columns_all.index('d3mIndex')
columns_all.insert(0, columns_all.pop(oldindex))
else:
self._logger.warning("No d3mIndex column found after data-mart augment!!!")
df_joined = df_joined[columns_all]
# start adding column metadata for dataset
if generate_metadata:
metadata_dict_left = {}
metadata_dict_right = {}
if self.search_type == "general":
for each in self.metadata['variables']:
description = each['description']
dtype = description.split("dtype: ")[-1]
if "float" in dtype:
semantic_types = (
"http://schema.org/Float",
"https://metadata.datadrivendiscovery.org/types/Attribute"
)
elif "int" in dtype:
semantic_types = (
"http://schema.org/Integer",
"https://metadata.datadrivendiscovery.org/types/Attribute"
)
else:
semantic_types = (
"https://metadata.datadrivendiscovery.org/types/CategoricalData",
"https://metadata.datadrivendiscovery.org/types/Attribute"
)
each_meta = {
"name": each['name'],
"structural_type": str,
"semantic_types": semantic_types,
"description": description
}
metadata_dict_right[each['name']] = frozendict.FrozenOrderedDict(each_meta)
else:
metadata_dict_right = self.metadata
if return_format == "df":
left_df_column_legth = supplied_data.metadata.query((metadata_base.ALL_ELEMENTS,))['dimension'][
'length']
elif return_format == "ds":
left_df_column_legth = supplied_data.metadata.query((self._res_id, metadata_base.ALL_ELEMENTS,))['dimension']['length']
# add the original metadata
for i in range(left_df_column_legth):
if return_format == "df":
each_selector = (ALL_ELEMENTS, i)
elif return_format == "ds":
each_selector = (self._res_id, ALL_ELEMENTS, i)
each_column_meta = supplied_data.metadata.query(each_selector)
metadata_dict_left[each_column_meta['name']] = each_column_meta
metadata_new = metadata_base.DataMetadata()
metadata_old = copy.copy(supplied_data.metadata)
new_column_names_list = list(df_joined.columns)
# update each column's metadata
for i, current_column_name in enumerate(new_column_names_list):
if return_format == "df":
each_selector = (metadata_base.ALL_ELEMENTS, i)
elif return_format == "ds":
each_selector = (augment_resource_id, ALL_ELEMENTS, i)
if current_column_name in metadata_dict_left:
new_metadata_i = metadata_dict_left[current_column_name]
else:
new_metadata_i = metadata_dict_right[current_column_name]
metadata_new = metadata_new.update(each_selector, new_metadata_i)
# start adding shape metadata for dataset
if return_format == "ds":
return_df = d3m_DataFrame(df_joined, generate_metadata=False)
resources = {augment_resource_id: return_df}
return_result = d3m_Dataset(resources=resources, generate_metadata=False)
if generate_metadata:
return_result.metadata = metadata_new
metadata_shape_part_dict = self._generate_metadata_shape_part(value=return_result, selector=())
for each_selector, each_metadata in metadata_shape_part_dict.items():
return_result.metadata = return_result.metadata.update(selector=each_selector,
metadata=each_metadata)
elif return_format == "df":
return_result = d3m_DataFrame(df_joined, generate_metadata=False)
if generate_metadata:
return_result.metadata = metadata_new
metadata_shape_part_dict = self._generate_metadata_shape_part(value=return_result, selector=())
for each_selector, each_metadata in metadata_shape_part_dict.items():
return_result.metadata = return_result.metadata.update(selector=each_selector,
metadata=each_metadata)
return return_result
def get_score(self) -> float:
return self.score
def get_metadata(self) -> dict:
return self.metadata
def set_join_pairs(self, join_pairs: typing.List[D3MJoinSpec]) -> None:
"""
manually set up the join pairs
:param join_pairs: user specified D3MJoinSpec
:return:
"""
self.join_pairs = join_pairs
def get_join_hints(self, supplied_data=None) -> typing.List[D3MJoinSpec]:
"""
Returns hints for joining supplied data with the data that can be downloaded using this search result.
In the typical scenario, the hints are based on supplied data that was provided when search was called.
The optional supplied_data argument enables the caller to request recomputation of join hints for specific data.
:return: a list of join hints. Note that datamart is encouraged to return join hints but not required to do so.
"""
if not supplied_data:
supplied_data = self.supplied_dataframe
if self.search_type == "general":
inner_hits = self.search_result.get('inner_hits', {})
results = []
used = set()
for key_path, outer_hits in inner_hits.items():
vars_type, index, ent_type = key_path.split('.')
if vars_type != 'required_variables':
continue
left_index = []
right_index = []
index = int(index)
if ent_type == JSONQueryManager.DATAFRAME_COLUMNS:
if self.query_json[vars_type][index].get('index'):
left_index = self.query_json[vars_type][index].get('index')
elif self.query_json[vars_type][index].get('names'):
left_index = [supplied_data.columns.tolist().index(idx)
for idx in self.query_json[vars_type][index].get('names')]
inner_hits = outer_hits.get('hits', {})
hits_list = inner_hits.get('hits')
if hits_list:
for hit in hits_list:
offset = hit['_nested']['offset']
if offset not in used:
right_index.append(offset)
used.add(offset)
break
if left_index and right_index:
each_result = D3MJoinSpec(left_columns=left_index, right_columns=right_index)
results.append(each_result)
return results
@classmethod
def construct(cls, serialization: dict) -> DatamartSearchResult:
"""
Take into the serilized input and reconsctruct a "DatamartSearchResult"
"""
load_result = DatamartSearchResult(search_result=serialization["search_result"],
supplied_data=None,
query_json=serialization["query_json"],
search_type=serialization["search_type"])
return load_result
def serialize(self) -> dict:
output = dict()
output["search_result"] = self.search_result
output["query_json"] = self.query_json
output["search_type"] = self.search_type
return output
class D3MJoinSpec:
"""
A join spec specifies a possible way to join a left datasets with a right dataset. The spec assumes that it may
be necessary to use several columns in each datasets to produce a key or fingerprint that is useful for joining
datasets. The spec consists of two lists of column identifiers or names (left_columns, left_column_names and
right_columns, right_column_names).
In the simplest case, both left and right are singleton lists, and the expectation is that an appropriate
matching function exists to adequately join the datasets. In some cases equality may be an appropriate matching
function, and in some cases fuzz matching is required. The spec join spec does not specify the matching function.
In more complex cases, one or both left and right lists contain several elements. For example, the left list
may contain columns for "city", "state" and "country" and the right dataset contains an "address" column. The join
spec pairs up ["city", "state", "country"] with ["address"], but does not specify how the matching should be done
e.g., combine the city/state/country columns into a single column, or split the address into several columns.
"""
def __init__(self, left_columns: typing.List[typing.List[int]], right_columns: typing.List[typing.List[int]],
left_resource_id: str=None, right_resource_id: str=None):
self.left_resource_id = left_resource_id
self.right_resource_id = right_resource_id
self.left_columns = left_columns
self.right_columns = right_columns
# we can have list of the joining column pairs
# each list inside left_columns/right_columns is a candidate joining column for that dataFrame
# each candidate joining column can also have multiple columns
def to_str_format(self):
return "[ (" + (self.left_resource_id or "") + ", " + str(self.left_columns) + ") , (" + \
(self.right_resource_id or "") + ", " + str(self.right_columns) + ") ]"
class TemporalGranularity(enum.Enum):
YEAR = 1
MONTH = 2
DAY = 3
HOUR = 4
SECOND = 5
class GeospatialGranularity(enum.Enum):
COUNTRY = 1
STATE = 2
COUNTY = 3
CITY = 4
POSTALCODE = 5
class DatamartVariable:
pass
class DatamartQuery:
def __init__(self, about: str=None, required_variables: typing.List[DatamartVariable]=None,
desired_variables: typing.List[DatamartVariable]=None):
self.about = about
self.required_variables = required_variables
self.desired_variables = desired_variables
def to_json(self):
"""
function used to transform the Query to json format that can used on elastic search
:return:
"""
search_query = dict()
if self.about is not None:
search_query["dataset"] = {
"about": self.about
}
if self.required_variables is not None:
search_query["required_variables"] = self.required_variables
return search_query
class NamedEntityVariable(DatamartVariable):
"""
Describes columns containing named enitities.
Parameters
----------
entities: List[str]
List of strings that should be contained in the matched dataset column.
"""
def __init__(self, entities: typing.List[str]):
self.entities = entities
class TemporalVariable(DatamartVariable):
"""
Describes columns containing temporal information.
Parameters
----------
start: datetime
Requested datetime should be equal or older than this datetime
end: datetime
Requested datatime should be equal or less than this datetime
granularity: TemporalGranularity
Requested datetimes are well matched with the requested granularity
"""
def __init__(self, start: datetime.datetime, end: datetime.datetime, granularity: TemporalGranularity):
pass
class GeospatialVariable(DatamartVariable):
"""
Describes columns containing geospatial information.
Parameters
----------
lat1: float
The latitude of the first point
long1: float
The longitude of the first point
lat2: float
The latitude of the second point
long2: float
The longitude of the second point
granularity: GeospatialGranularity
Requested geosptial values are well matched with the requested granularity
"""
def __init__(self, lat1: float, long1: float, lat2: float, long2: float, granularity: GeospatialGranularity):
pass
| 48.145251 | 217 | 0.613782 |
87aea42aac9ef372d8bb66bf1c102808381cbbb3 | 103 | py | Python | auto_funcs/symptoms.py | rhysrushton/testauto | 9c32f40640f58703a0d063afbb647855fb680a61 | [
"MIT"
] | null | null | null | auto_funcs/symptoms.py | rhysrushton/testauto | 9c32f40640f58703a0d063afbb647855fb680a61 | [
"MIT"
] | null | null | null | auto_funcs/symptoms.py | rhysrushton/testauto | 9c32f40640f58703a0d063afbb647855fb680a61 | [
"MIT"
] | null | null | null | from random import randrange
def symptoms (range):
number = randrange(1,range)
return number | 14.714286 | 31 | 0.718447 |
41c6915ee87037bd3960ce1cb3fba0a14bf4f686 | 13,131 | py | Python | airflow/utils/db.py | RSEnergyGroup/incubator-airflow | e947c6c034238ede29a6c8f51307458d3e40c1b5 | [
"Apache-2.0"
] | null | null | null | airflow/utils/db.py | RSEnergyGroup/incubator-airflow | e947c6c034238ede29a6c8f51307458d3e40c1b5 | [
"Apache-2.0"
] | 1 | 2018-12-13T10:33:00.000Z | 2018-12-13T10:33:00.000Z | airflow/utils/db.py | RSEnergyGroup/incubator-airflow | e947c6c034238ede29a6c8f51307458d3e40c1b5 | [
"Apache-2.0"
] | 1 | 2019-06-03T23:15:00.000Z | 2019-06-03T23:15:00.000Z | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from functools import wraps
import os
import contextlib
from airflow import settings
from airflow.utils.log.logging_mixin import LoggingMixin
log = LoggingMixin().log
@contextlib.contextmanager
def create_session():
"""
Contextmanager that will create and teardown a session.
"""
session = settings.Session()
try:
yield session
session.expunge_all()
session.commit()
except Exception:
session.rollback()
raise
finally:
session.close()
def provide_session(func):
"""
Function decorator that provides a session if it isn't provided.
If you want to reuse a session or run the function as part of a
database transaction, you pass it to the function, if not this wrapper
will create one and close it for you.
"""
@wraps(func)
def wrapper(*args, **kwargs):
arg_session = 'session'
func_params = func.__code__.co_varnames
session_in_args = arg_session in func_params and \
func_params.index(arg_session) < len(args)
session_in_kwargs = arg_session in kwargs
if session_in_kwargs or session_in_args:
return func(*args, **kwargs)
else:
with create_session() as session:
kwargs[arg_session] = session
return func(*args, **kwargs)
return wrapper
@provide_session
def merge_conn(conn, session=None):
from airflow import models
C = models.Connection
if not session.query(C).filter(C.conn_id == conn.conn_id).first():
session.add(conn)
session.commit()
def initdb(rbac=False):
session = settings.Session()
from airflow import models
upgradedb()
merge_conn(
models.Connection(
conn_id='airflow_db', conn_type='mysql',
host='mysql', login='root', password='',
schema='airflow'))
merge_conn(
models.Connection(
conn_id='beeline_default', conn_type='beeline', port="10000",
host='localhost', extra="{\"use_beeline\": true, \"auth\": \"\"}",
schema='default'))
merge_conn(
models.Connection(
conn_id='bigquery_default', conn_type='google_cloud_platform',
schema='default'))
merge_conn(
models.Connection(
conn_id='local_mysql', conn_type='mysql',
host='localhost', login='airflow', password='airflow',
schema='airflow'))
merge_conn(
models.Connection(
conn_id='presto_default', conn_type='presto',
host='localhost',
schema='hive', port=3400))
merge_conn(
models.Connection(
conn_id='google_cloud_default', conn_type='google_cloud_platform',
schema='default',))
merge_conn(
models.Connection(
conn_id='hive_cli_default', conn_type='hive_cli',
schema='default',))
merge_conn(
models.Connection(
conn_id='hiveserver2_default', conn_type='hiveserver2',
host='localhost',
schema='default', port=10000))
merge_conn(
models.Connection(
conn_id='metastore_default', conn_type='hive_metastore',
host='localhost', extra="{\"authMechanism\": \"PLAIN\"}",
port=9083))
merge_conn(
models.Connection(
conn_id='mongo_default', conn_type='mongo',
host='mongo', port=27017))
merge_conn(
models.Connection(
conn_id='mysql_default', conn_type='mysql',
login='root',
schema='airflow',
host='mysql'))
merge_conn(
models.Connection(
conn_id='postgres_default', conn_type='postgres',
login='postgres',
password='airflow',
schema='airflow',
host='postgres'))
merge_conn(
models.Connection(
conn_id='sqlite_default', conn_type='sqlite',
host='/tmp/sqlite_default.db'))
merge_conn(
models.Connection(
conn_id='http_default', conn_type='http',
host='https://www.google.com/'))
merge_conn(
models.Connection(
conn_id='mssql_default', conn_type='mssql',
host='localhost', port=1433))
merge_conn(
models.Connection(
conn_id='vertica_default', conn_type='vertica',
host='localhost', port=5433))
merge_conn(
models.Connection(
conn_id='wasb_default', conn_type='wasb',
extra='{"sas_token": null}'))
merge_conn(
models.Connection(
conn_id='webhdfs_default', conn_type='hdfs',
host='localhost', port=50070))
merge_conn(
models.Connection(
conn_id='ssh_default', conn_type='ssh',
host='localhost'))
merge_conn(
models.Connection(
conn_id='sftp_default', conn_type='sftp',
host='localhost', port=22, login='airflow',
extra='''
{"key_file": "~/.ssh/id_rsa", "no_host_key_check": true}
'''))
merge_conn(
models.Connection(
conn_id='fs_default', conn_type='fs',
extra='{"path": "/"}'))
merge_conn(
models.Connection(
conn_id='aws_default', conn_type='aws',
extra='{"region_name": "us-east-1"}'))
merge_conn(
models.Connection(
conn_id='spark_default', conn_type='spark',
host='yarn', extra='{"queue": "root.default"}'))
merge_conn(
models.Connection(
conn_id='druid_broker_default', conn_type='druid',
host='druid-broker', port=8082, extra='{"endpoint": "druid/v2/sql"}'))
merge_conn(
models.Connection(
conn_id='druid_ingest_default', conn_type='druid',
host='druid-overlord', port=8081, extra='{"endpoint": "druid/indexer/v1/task"}'))
merge_conn(
models.Connection(
conn_id='redis_default', conn_type='redis',
host='redis', port=6379,
extra='{"db": 0}'))
merge_conn(
models.Connection(
conn_id='sqoop_default', conn_type='sqoop',
host='rmdbs', extra=''))
merge_conn(
models.Connection(
conn_id='emr_default', conn_type='emr',
extra='''
{ "Name": "default_job_flow_name",
"LogUri": "s3://my-emr-log-bucket/default_job_flow_location",
"ReleaseLabel": "emr-4.6.0",
"Instances": {
"Ec2KeyName": "mykey",
"Ec2SubnetId": "somesubnet",
"InstanceGroups": [
{
"Name": "Master nodes",
"Market": "ON_DEMAND",
"InstanceRole": "MASTER",
"InstanceType": "r3.2xlarge",
"InstanceCount": 1
},
{
"Name": "Slave nodes",
"Market": "ON_DEMAND",
"InstanceRole": "CORE",
"InstanceType": "r3.2xlarge",
"InstanceCount": 1
}
],
"TerminationProtected": false,
"KeepJobFlowAliveWhenNoSteps": false
},
"Applications":[
{ "Name": "Spark" }
],
"VisibleToAllUsers": true,
"JobFlowRole": "EMR_EC2_DefaultRole",
"ServiceRole": "EMR_DefaultRole",
"Tags": [
{
"Key": "app",
"Value": "analytics"
},
{
"Key": "environment",
"Value": "development"
}
]
}
'''))
merge_conn(
models.Connection(
conn_id='databricks_default', conn_type='databricks',
host='localhost'))
merge_conn(
models.Connection(
conn_id='qubole_default', conn_type='qubole',
host='localhost'))
merge_conn(
models.Connection(
conn_id='segment_default', conn_type='segment',
extra='{"write_key": "my-segment-write-key"}')),
merge_conn(
models.Connection(
conn_id='azure_data_lake_default', conn_type='azure_data_lake',
extra='{"tenant": "<TENANT>", "account_name": "<ACCOUNTNAME>" }'))
merge_conn(
models.Connection(
conn_id='azure_cosmos_default', conn_type='azure_cosmos',
extra='{"database_name": "<DATABASE_NAME>", "collection_name": "<COLLECTION_NAME>" }'))
merge_conn(
models.Connection(
conn_id='cassandra_default', conn_type='cassandra',
host='cassandra', port=9042))
# Known event types
KET = models.KnownEventType
if not session.query(KET).filter(KET.know_event_type == 'Holiday').first():
session.add(KET(know_event_type='Holiday'))
if not session.query(KET).filter(KET.know_event_type == 'Outage').first():
session.add(KET(know_event_type='Outage'))
if not session.query(KET).filter(
KET.know_event_type == 'Natural Disaster').first():
session.add(KET(know_event_type='Natural Disaster'))
if not session.query(KET).filter(
KET.know_event_type == 'Marketing Campaign').first():
session.add(KET(know_event_type='Marketing Campaign'))
session.commit()
dagbag = models.DagBag()
# Save individual DAGs in the ORM
for dag in dagbag.dags.values():
dag.sync_to_db()
# Deactivate the unknown ones
models.DAG.deactivate_unknown_dags(dagbag.dags.keys())
Chart = models.Chart
chart_label = "Airflow task instance by type"
chart = session.query(Chart).filter(Chart.label == chart_label).first()
if not chart:
chart = Chart(
label=chart_label,
conn_id='airflow_db',
chart_type='bar',
x_is_date=False,
sql=(
"SELECT state, COUNT(1) as number "
"FROM task_instance "
"WHERE dag_id LIKE 'example%' "
"GROUP BY state"),
)
session.add(chart)
session.commit()
if rbac:
from flask_appbuilder.security.sqla import models
from flask_appbuilder.models.sqla import Base
Base.metadata.create_all(settings.engine)
def upgradedb():
# alembic adds significant import time, so we import it lazily
from alembic import command
from alembic.config import Config
log.info("Creating tables")
current_dir = os.path.dirname(os.path.abspath(__file__))
package_dir = os.path.normpath(os.path.join(current_dir, '..'))
directory = os.path.join(package_dir, 'migrations')
config = Config(os.path.join(package_dir, 'alembic.ini'))
config.set_main_option('script_location', directory.replace('%', '%%'))
config.set_main_option('sqlalchemy.url', settings.SQL_ALCHEMY_CONN.replace('%', '%%'))
command.upgrade(config, 'heads')
def resetdb(rbac):
"""
Clear out the database
"""
from airflow import models
# alembic adds significant import time, so we import it lazily
from alembic.migration import MigrationContext
log.info("Dropping tables that exist")
models.Base.metadata.drop_all(settings.engine)
mc = MigrationContext.configure(settings.engine)
if mc._version.exists(settings.engine):
mc._version.drop(settings.engine)
if rbac:
# drop rbac security tables
from flask_appbuilder.security.sqla import models
from flask_appbuilder.models.sqla import Base
Base.metadata.drop_all(settings.engine)
initdb(rbac)
| 35.203753 | 99 | 0.572767 |
c6680ab3dcaa1a65aa0653915c388010e5985e41 | 3,787 | py | Python | projects/robots/abb/irb/controllers/inverse_kinematics/inverse_kinematics.py | SherlockSheep/webots | 13bb7f13f15cd1a003b64368b1fd8783732e175e | [
"Apache-2.0"
] | null | null | null | projects/robots/abb/irb/controllers/inverse_kinematics/inverse_kinematics.py | SherlockSheep/webots | 13bb7f13f15cd1a003b64368b1fd8783732e175e | [
"Apache-2.0"
] | null | null | null | projects/robots/abb/irb/controllers/inverse_kinematics/inverse_kinematics.py | SherlockSheep/webots | 13bb7f13f15cd1a003b64368b1fd8783732e175e | [
"Apache-2.0"
] | null | null | null | # Copyright 1996-2020 Cyberbotics Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Demonstration of inverse kinematics using the "ikpy" Python module."""
import sys
import tempfile
try:
import ikpy
from ikpy.chain import Chain
except ImportError:
sys.exit('The "ikpy" Python module is not installed. '
'To run this sample, please upgrade "pip" and install ikpy with this command: "pip install ikpy"')
import math
from controller import Supervisor
if ikpy.__version__[0] < '3':
sys.exit('The "ikpy" Python module version is too old. '
'Please upgrade "ikpy" Python module to version "3.0" or newer with this command: "pip install --upgrade ikpy"')
# Initialize the Webots Supervisor.
supervisor = Supervisor()
timeStep = int(4 * supervisor.getBasicTimeStep())
# Create the arm chain from the URDF
filename = None
with tempfile.NamedTemporaryFile(suffix='.urdf', delete=False) as file:
filename = file.name
file.write(supervisor.getUrdf().encode('utf-8'))
armChain = Chain.from_urdf_file(filename)
# Initialize the arm motors.
motors = []
for link in armChain.links:
if 'sensor' in link.name:
motor = supervisor.getMotor(link.name.replace('sensor', 'motor'))
motor.setVelocity(1.0)
motors.append(motor)
# Get the arm and target nodes.
target = supervisor.getFromDef('TARGET')
arm = supervisor.getSelf()
# Loop 1: Draw a circle on the paper sheet.
print('Draw a circle on the paper sheet...')
while supervisor.step(timeStep) != -1:
t = supervisor.getTime()
# Use the circle equation relatively to the arm base as an input of the IK algorithm.
x = 0.25 * math.cos(t) + 1.1
y = 0.25 * math.sin(t) - 0.95
z = 0.05
# Call "ikpy" to compute the inverse kinematics of the arm.
ikResults = armChain.inverse_kinematics([x, y, z])
# Actuate the 3 first arm motors with the IK results.
for i in range(3):
motors[i].setPosition(ikResults[i + 1])
# Keep the hand orientation down.
motors[4].setPosition(-ikResults[2] - ikResults[3] + math.pi / 2)
# Keep the hand orientation perpendicular.
motors[5].setPosition(ikResults[1])
# Conditions to start/stop drawing and leave this loop.
if supervisor.getTime() > 2 * math.pi + 1.5:
break
elif supervisor.getTime() > 1.5:
# Note: start to draw at 1.5 second to be sure the arm is well located.
supervisor.getPen('pen').write(True)
# Loop 2: Move the arm hand to the target.
print('Move the yellow and black sphere to move the arm...')
while supervisor.step(timeStep) != -1:
# Get the absolute postion of the target and the arm base.
targetPosition = target.getPosition()
armPosition = arm.getPosition()
# Compute the position of the target relatively to the arm.
# x and y axis are inverted because the arm is not aligned with the Webots global axes.
x = targetPosition[0] - armPosition[0]
y = - (targetPosition[2] - armPosition[2])
z = targetPosition[1] - armPosition[1]
# Call "ikpy" to compute the inverse kinematics of the arm.
ikResults = armChain.inverse_kinematics([x, y, z])
# Actuate the 3 first arm motors with the IK results.
for i in range(len(motors)):
motors[i].setPosition(ikResults[i + 1])
| 36.76699 | 125 | 0.69633 |
416b589288ad3cbf3bffa948213ab497f3933ed4 | 10,477 | py | Python | venv/lib/python3.6/site-packages/ansible_collections/fortinet/fortimanager/plugins/modules/fmgr_system_ha_peer.py | usegalaxy-no/usegalaxy | 75dad095769fe918eb39677f2c887e681a747f3a | [
"MIT"
] | 1 | 2020-01-22T13:11:23.000Z | 2020-01-22T13:11:23.000Z | venv/lib/python3.6/site-packages/ansible_collections/fortinet/fortimanager/plugins/modules/fmgr_system_ha_peer.py | usegalaxy-no/usegalaxy | 75dad095769fe918eb39677f2c887e681a747f3a | [
"MIT"
] | 12 | 2020-02-21T07:24:52.000Z | 2020-04-14T09:54:32.000Z | venv/lib/python3.6/site-packages/ansible_collections/fortinet/fortimanager/plugins/modules/fmgr_system_ha_peer.py | usegalaxy-no/usegalaxy | 75dad095769fe918eb39677f2c887e681a747f3a | [
"MIT"
] | null | null | null | #!/usr/bin/python
from __future__ import absolute_import, division, print_function
# Copyright 2019-2021 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: fmgr_system_ha_peer
short_description: Peer.
description:
- This module is able to configure a FortiManager device.
- Examples include all parameters and values which need to be adjusted to data sources before usage.
version_added: "2.10"
author:
- Link Zheng (@chillancezen)
- Jie Xue (@JieX19)
- Frank Shen (@fshen01)
- Hongbin Lu (@fgtdev-hblu)
notes:
- Running in workspace locking mode is supported in this FortiManager module, the top
level parameters workspace_locking_adom and workspace_locking_timeout help do the work.
- To create or update an object, use state present directive.
- To delete an object, use state absent directive.
- Normally, running one module can fail when a non-zero rc is returned. you can also override
the conditions to fail or succeed with parameters rc_failed and rc_succeeded
options:
enable_log:
description: Enable/Disable logging for task
required: false
type: bool
default: false
proposed_method:
description: The overridden method for the underlying Json RPC request
required: false
type: str
choices:
- update
- set
- add
bypass_validation:
description: only set to True when module schema diffs with FortiManager API structure, module continues to execute without validating parameters
required: false
type: bool
default: false
workspace_locking_adom:
description: the adom to lock for FortiManager running in workspace mode, the value can be global and others including root
required: false
type: str
workspace_locking_timeout:
description: the maximum time in seconds to wait for other user to release the workspace lock
required: false
type: int
default: 300
state:
description: the directive to create, update or delete an object
type: str
required: true
choices:
- present
- absent
rc_succeeded:
description: the rc codes list with which the conditions to succeed will be overriden
type: list
required: false
rc_failed:
description: the rc codes list with which the conditions to fail will be overriden
type: list
required: false
system_ha_peer:
description: the top level parameters set
required: false
type: dict
suboptions:
id:
type: int
default: 0
description: 'Id.'
ip:
type: str
default: '0.0.0.0'
description: 'IP address of peer.'
ip6:
type: str
default: '::'
description: 'IP address (V6) of peer.'
serial-number:
type: str
description: 'Serial number of peer.'
'''
EXAMPLES = '''
- name: gathering fortimanager facts
hosts: fortimanager00
gather_facts: no
connection: httpapi
collections:
- fortinet.fortimanager
vars:
ansible_httpapi_use_ssl: True
ansible_httpapi_validate_certs: False
ansible_httpapi_port: 443
tasks:
- name: retrieve all the HA peers
fmgr_fact:
facts:
selector: 'system_ha_peer'
params:
peer: ''
- hosts: fortimanager00
collections:
- fortinet.fortimanager
connection: httpapi
vars:
ansible_httpapi_use_ssl: True
ansible_httpapi_validate_certs: False
ansible_httpapi_port: 443
tasks:
- name: Peer.
fmgr_system_ha_peer:
bypass_validation: False
state: present
system_ha_peer:
id: 3
ip: '11.11.11.5' # Required, could not be 0.0.0.0
serial-number: FMG-VM0000000003 # need a valid serial number
'''
RETURN = '''
request_url:
description: The full url requested
returned: always
type: str
sample: /sys/login/user
response_code:
description: The status of api request
returned: always
type: int
sample: 0
response_message:
description: The descriptive message of the api response
type: str
returned: always
sample: OK.
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.connection import Connection
from ansible_collections.fortinet.fortimanager.plugins.module_utils.napi import NAPIManager
from ansible_collections.fortinet.fortimanager.plugins.module_utils.napi import check_galaxy_version
from ansible_collections.fortinet.fortimanager.plugins.module_utils.napi import check_parameter_bypass
def main():
jrpc_urls = [
'/cli/global/system/ha/peer'
]
perobject_jrpc_urls = [
'/cli/global/system/ha/peer/{peer}'
]
url_params = []
module_primary_key = 'id'
module_arg_spec = {
'enable_log': {
'type': 'bool',
'required': False,
'default': False
},
'forticloud_access_token': {
'type': 'str',
'required': False,
'no_log': True
},
'proposed_method': {
'type': 'str',
'required': False,
'choices': [
'set',
'update',
'add'
]
},
'bypass_validation': {
'type': 'bool',
'required': False,
'default': False
},
'workspace_locking_adom': {
'type': 'str',
'required': False
},
'workspace_locking_timeout': {
'type': 'int',
'required': False,
'default': 300
},
'rc_succeeded': {
'required': False,
'type': 'list'
},
'rc_failed': {
'required': False,
'type': 'list'
},
'state': {
'type': 'str',
'required': True,
'choices': [
'present',
'absent'
]
},
'system_ha_peer': {
'required': False,
'type': 'dict',
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': True,
'6.4.5': True,
'7.0.0': True
},
'options': {
'id': {
'required': True,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': True,
'6.4.5': True,
'7.0.0': True
},
'type': 'int'
},
'ip': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': True,
'6.4.5': True,
'7.0.0': True
},
'type': 'str'
},
'ip6': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': True,
'6.4.5': True,
'7.0.0': True
},
'type': 'str'
},
'serial-number': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': True,
'6.4.5': True,
'7.0.0': True
},
'type': 'str'
}
}
}
}
params_validation_blob = []
check_galaxy_version(module_arg_spec)
module = AnsibleModule(argument_spec=check_parameter_bypass(module_arg_spec, 'system_ha_peer'),
supports_check_mode=False)
fmgr = None
if module._socket_path:
connection = Connection(module._socket_path)
connection.set_option('enable_log', module.params['enable_log'] if 'enable_log' in module.params else False)
connection.set_option('forticloud_access_token',
module.params['forticloud_access_token'] if 'forticloud_access_token' in module.params else None)
fmgr = NAPIManager(jrpc_urls, perobject_jrpc_urls, module_primary_key, url_params, module, connection, top_level_schema_name='data')
fmgr.validate_parameters(params_validation_blob)
fmgr.process_curd(argument_specs=module_arg_spec)
else:
module.fail_json(msg='MUST RUN IN HTTPAPI MODE')
module.exit_json(meta=module.params)
if __name__ == '__main__':
main()
| 31.557229 | 153 | 0.523528 |
2068031265864629f085fe3327956c69c5a00edc | 1,052 | py | Python | examples/ignore_errors.py | eddiechapman/XlsxWriter | c636117ab30e64e4b7b824c9105595c42887c2c9 | [
"BSD-2-Clause-FreeBSD"
] | 2,766 | 2015-01-02T17:36:42.000Z | 2022-03-31T09:23:30.000Z | examples/ignore_errors.py | xiaolanmeng86/XlsxWriter | 6c3ea23a410e8216eab8f5751e5544ffb444b3da | [
"BSD-2-Clause-FreeBSD"
] | 683 | 2015-01-03T09:55:02.000Z | 2022-03-31T07:18:15.000Z | examples/ignore_errors.py | xiaolanmeng86/XlsxWriter | 6c3ea23a410e8216eab8f5751e5544ffb444b3da | [
"BSD-2-Clause-FreeBSD"
] | 636 | 2015-01-05T01:57:08.000Z | 2022-03-25T18:42:41.000Z | ##############################################################################
#
# An example of turning off worksheet cells errors/warnings using the
# XlsxWriter Python module.
#
# Copyright 2013-2021, John McNamara, jmcnamara@cpan.org
#
import xlsxwriter
workbook = xlsxwriter.Workbook('ignore_errors.xlsx')
worksheet = workbook.add_worksheet()
# Write strings that looks like numbers. This will cause an Excel warning.
worksheet.write_string('C2', '123')
worksheet.write_string('C3', '123')
# Write a divide by zero formula. This will also cause an Excel warning.
worksheet.write_formula('C5', '=1/0')
worksheet.write_formula('C6', '=1/0')
# Turn off some of the warnings:
worksheet.ignore_errors({'number_stored_as_text': 'C3', 'eval_error': 'C6'})
# Write some descriptions for the cells and make the column wider for clarity.
worksheet.set_column('B:B', 16, None)
worksheet.write('B2', 'Warning:')
worksheet.write('B3', 'Warning turned off:')
worksheet.write('B5', 'Warning:')
worksheet.write('B6', 'Warning turned off:')
workbook.close()
| 32.875 | 78 | 0.690114 |
f898a951cc50940f1aad54a6afc6914e812191c1 | 877 | py | Python | kubernetes/test/test_v1_host_alias.py | craigtracey/python | 177564c655c0ea3f9cf38e61ca275ef6c6256aab | [
"Apache-2.0"
] | null | null | null | kubernetes/test/test_v1_host_alias.py | craigtracey/python | 177564c655c0ea3f9cf38e61ca275ef6c6256aab | [
"Apache-2.0"
] | null | null | null | kubernetes/test/test_v1_host_alias.py | craigtracey/python | 177564c655c0ea3f9cf38e61ca275ef6c6256aab | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.9.4
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import unittest
import client
from client.rest import ApiException
from client.models.v1_host_alias import V1HostAlias
class TestV1HostAlias(unittest.TestCase):
""" V1HostAlias unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testV1HostAlias(self):
"""
Test V1HostAlias
"""
# FIXME: construct object with mandatory attributes with example values
#model = client.models.v1_host_alias.V1HostAlias()
pass
if __name__ == '__main__':
unittest.main()
| 19.488889 | 105 | 0.685291 |
cce9ec3c6d15526ead6ca7b37d4d1bacfee28ca6 | 520 | py | Python | database.py | sdkskdks/assignment4 | a0c9c3e79883504415e17dcfed3f84bfbeca0091 | [
"CC0-1.0"
] | null | null | null | database.py | sdkskdks/assignment4 | a0c9c3e79883504415e17dcfed3f84bfbeca0091 | [
"CC0-1.0"
] | null | null | null | database.py | sdkskdks/assignment4 | a0c9c3e79883504415e17dcfed3f84bfbeca0091 | [
"CC0-1.0"
] | null | null | null | from flask import Flask
from flask_sqlalchemy import SQLAlchemy
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = 'postgresql://postgres:<password>@localhost/<database_name>'
db = SQLAlchemy(app)
class Tableone(db.Model):
__tablename__ = 'tableone'
id = db.Column(db.Integer, primary_key=True)
coin_name = db.Column( db.Unicode)
latest_news = db.Column(db.Unicode)
def __init__(self, coin_name, latest_news):
self.coin_name = coin_name
self.latest_news = latest_news
| 28.888889 | 100 | 0.728846 |
f8b954ca0bb2a2320eccb54ae6d3c4faeddf3d75 | 23,561 | py | Python | iot/api-client/manager/manager.py | marford5/DA320 | d93f8e90c974b24c67f57669d388ae84f705b0ed | [
"Apache-2.0"
] | null | null | null | iot/api-client/manager/manager.py | marford5/DA320 | d93f8e90c974b24c67f57669d388ae84f705b0ed | [
"Apache-2.0"
] | null | null | null | iot/api-client/manager/manager.py | marford5/DA320 | d93f8e90c974b24c67f57669d388ae84f705b0ed | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Example of using the Google Cloud IoT Core device manager to administer
devices.
Usage example:
python manager.py \\
--project_id=my-project-id \\
--pubsub_topic=projects/my-project-id/topics/my-topic-id \\
--ec_public_key_file=../ec_public.pem \\
--rsa_certificate_file=../rsa_cert.pem \\
--service_account_json=$HOME/service_account.json
list
"""
import argparse
import base64
import io
import os
import sys
import time
from google.cloud import pubsub
from google.oauth2 import service_account
from googleapiclient import discovery
from googleapiclient.errors import HttpError
def create_iot_topic(project, topic_name):
"""Creates a PubSub Topic and grants access to Cloud IoT Core."""
pubsub_client = pubsub.PublisherClient()
topic_path = pubsub_client.topic_path(project, topic_name)
topic = pubsub_client.create_topic(topic_path)
policy = pubsub_client.get_iam_policy(topic_path)
policy.bindings.add(
role='roles/pubsub.publisher',
members=['serviceAccount:cloud-iot@system.gserviceaccount.com'])
pubsub_client.set_iam_policy(topic_path, policy)
return topic
def get_client(service_account_json):
"""Returns an authorized API client by discovering the IoT API and creating
a service object using the service account credentials JSON."""
api_scopes = ['https://www.googleapis.com/auth/cloud-platform']
api_version = 'v1'
discovery_api = 'https://cloudiot.googleapis.com/$discovery/rest'
service_name = 'cloudiotcore'
credentials = service_account.Credentials.from_service_account_file(
service_account_json)
scoped_credentials = credentials.with_scopes(api_scopes)
discovery_url = '{}?version={}'.format(
discovery_api, api_version)
return discovery.build(
service_name,
api_version,
discoveryServiceUrl=discovery_url,
credentials=scoped_credentials)
def create_rs256_device(
service_account_json, project_id, cloud_region, registry_id, device_id,
certificate_file):
"""Create a new device with the given id, using RS256 for
authentication."""
registry_name = 'projects/{}/locations/{}/registries/{}'.format(
project_id, cloud_region, registry_id)
client = get_client(service_account_json)
with io.open(certificate_file) as f:
certificate = f.read()
# Note: You can have multiple credentials associated with a device.
device_template = {
'id': device_id,
'credentials': [{
'publicKey': {
'format': 'RSA_X509_PEM',
'key': certificate
}
}]
}
devices = client.projects().locations().registries().devices()
return devices.create(parent=registry_name, body=device_template).execute()
def create_es256_device(
service_account_json, project_id, cloud_region, registry_id,
device_id, public_key_file):
"""Create a new device with the given id, using ES256 for
authentication."""
registry_name = 'projects/{}/locations/{}/registries/{}'.format(
project_id, cloud_region, registry_id)
client = get_client(service_account_json)
with io.open(public_key_file) as f:
public_key = f.read()
# Note: You can have multiple credentials associated with a device.
device_template = {
'id': device_id,
'credentials': [{
'publicKey': {
'format': 'ES256_PEM',
'key': public_key
}
}]
}
devices = client.projects().locations().registries().devices()
return devices.create(parent=registry_name, body=device_template).execute()
def create_unauth_device(
service_account_json, project_id, cloud_region, registry_id,
device_id):
"""Create a new device without authentication."""
registry_name = 'projects/{}/locations/{}/registries/{}'.format(
project_id, cloud_region, registry_id)
client = get_client(service_account_json)
device_template = {
'id': device_id,
}
devices = client.projects().locations().registries().devices()
return devices.create(parent=registry_name, body=device_template).execute()
def delete_device(
service_account_json, project_id, cloud_region, registry_id,
device_id):
"""Delete the device with the given id."""
print('Delete device')
client = get_client(service_account_json)
registry_name = 'projects/{}/locations/{}/registries/{}'.format(
project_id, cloud_region, registry_id)
device_name = '{}/devices/{}'.format(registry_name, device_id)
devices = client.projects().locations().registries().devices()
return devices.delete(name=device_name).execute()
def delete_registry(
service_account_json, project_id, cloud_region, registry_id):
"""Deletes the specified registry."""
print('Delete registry')
client = get_client(service_account_json)
registry_name = 'projects/{}/locations/{}/registries/{}'.format(
project_id, cloud_region, registry_id)
registries = client.projects().locations().registries()
return registries.delete(name=registry_name).execute()
def get_device(
service_account_json, project_id, cloud_region, registry_id,
device_id):
"""Retrieve the device with the given id."""
print('Getting device')
client = get_client(service_account_json)
registry_name = 'projects/{}/locations/{}/registries/{}'.format(
project_id, cloud_region, registry_id)
device_name = '{}/devices/{}'.format(registry_name, device_id)
devices = client.projects().locations().registries().devices()
device = devices.get(name=device_name).execute()
print('Id : {}'.format(device.get('id')))
print('Name : {}'.format(device.get('name')))
print('Credentials:')
if device.get('credentials') is not None:
for credential in device.get('credentials'):
keyinfo = credential.get('publicKey')
print('\tcertificate: \n{}'.format(keyinfo.get('key')))
print('\tformat : {}'.format(keyinfo.get('format')))
print('\texpiration: {}'.format(credential.get('expirationTime')))
print('Config:')
print('\tdata: {}'.format(device.get('config').get('data')))
print('\tversion: {}'.format(device.get('config').get('version')))
print('\tcloudUpdateTime: {}'.format(device.get('config').get(
'cloudUpdateTime')))
return device
def get_state(
service_account_json, project_id, cloud_region, registry_id,
device_id):
"""Retrieve a device's state blobs."""
client = get_client(service_account_json)
registry_name = 'projects/{}/locations/{}/registries/{}'.format(
project_id, cloud_region, registry_id)
device_name = '{}/devices/{}'.format(registry_name, device_id)
devices = client.projects().locations().registries().devices()
state = devices.states().list(name=device_name, numStates=5).execute()
print('State: {}\n'.format(state))
return state
def list_devices(
service_account_json, project_id, cloud_region, registry_id):
"""List all devices in the registry."""
print('Listing devices')
registry_path = 'projects/{}/locations/{}/registries/{}'.format(
project_id, cloud_region, registry_id)
client = get_client(service_account_json)
devices = client.projects().locations().registries().devices(
).list(parent=registry_path).execute().get('devices', [])
for device in devices:
print('Device: {} : {}'.format(
device.get('numId'),
device.get('id')))
return devices
def list_registries(service_account_json, project_id, cloud_region):
"""List all registries in the project."""
print('Listing Registries')
registry_path = 'projects/{}/locations/{}'.format(
project_id, cloud_region)
client = get_client(service_account_json)
registries = client.projects().locations().registries().list(
parent=registry_path).execute().get('deviceRegistries', [])
for registry in registries:
print('id: {}\n\tname: {}'.format(
registry.get('id'),
registry.get('name')))
return registries
def create_registry(
service_account_json, project_id, cloud_region, pubsub_topic,
registry_id):
""" Creates a registry and returns the result. Returns an empty result if
the registry already exists."""
client = get_client(service_account_json)
registry_parent = 'projects/{}/locations/{}'.format(
project_id,
cloud_region)
body = {
'eventNotificationConfigs': [{
'pubsubTopicName': pubsub_topic
}],
'id': registry_id
}
request = client.projects().locations().registries().create(
parent=registry_parent, body=body)
try:
response = request.execute()
print('Created registry')
return response
except HttpError:
print('Error, registry not created')
return ""
def get_registry(
service_account_json, project_id, cloud_region, registry_id):
""" Retrieves a device registry."""
client = get_client(service_account_json)
registry_parent = 'projects/{}/locations/{}'.format(
project_id,
cloud_region)
topic_name = '{}/registries/{}'.format(registry_parent, registry_id)
request = client.projects().locations().registries().get(name=topic_name)
return request.execute()
def open_registry(
service_account_json, project_id, cloud_region, pubsub_topic,
registry_id):
"""Gets or creates a device registry."""
print('Creating registry')
response = create_registry(
service_account_json, project_id, cloud_region,
pubsub_topic, registry_id)
if (response is ""):
# Device registry already exists
print(
'Registry {} already exists - looking it up instead.'.format(
registry_id))
response = get_registry(
service_account_json, project_id, cloud_region,
registry_id)
print('Registry {} opened: '.format(response.get('name')))
print(response)
def patch_es256_auth(
service_account_json, project_id, cloud_region, registry_id,
device_id, public_key_file):
"""Patch the device to add an ES256 public key to the device."""
print('Patch device with ES256 certificate')
client = get_client(service_account_json)
registry_path = 'projects/{}/locations/{}/registries/{}'.format(
project_id, cloud_region, registry_id)
with io.open(public_key_file) as f:
public_key = f.read()
patch = {
'credentials': [{
'publicKey': {
'format': 'ES256_PEM',
'key': public_key
}
}]
}
device_name = '{}/devices/{}'.format(registry_path, device_id)
return client.projects().locations().registries().devices().patch(
name=device_name, updateMask='credentials', body=patch).execute()
def patch_rsa256_auth(
service_account_json, project_id, cloud_region, registry_id, device_id,
public_key_file):
"""Patch the device to add an RSA256 public key to the device."""
print('Patch device with RSA256 certificate')
client = get_client(service_account_json)
registry_path = 'projects/{}/locations/{}/registries/{}'.format(
project_id, cloud_region, registry_id)
with io.open(public_key_file) as f:
public_key = f.read()
patch = {
'credentials': [{
'publicKey': {
'format': 'RSA_X509_PEM',
'key': public_key
}
}]
}
device_name = '{}/devices/{}'.format(registry_path, device_id)
return client.projects().locations().registries().devices().patch(
name=device_name, updateMask='credentials', body=patch).execute()
def set_config(
service_account_json, project_id, cloud_region, registry_id, device_id,
version, config):
print('Set device configuration')
client = get_client(service_account_json)
device_path = 'projects/{}/locations/{}/registries/{}/devices/{}'.format(
project_id, cloud_region, registry_id, device_id)
config_body = {
'versionToUpdate': version,
'binaryData': base64.urlsafe_b64encode(
config.encode('utf-8')).decode('ascii')
}
return client.projects(
).locations().registries(
).devices().modifyCloudToDeviceConfig(
name=device_path, body=config_body).execute()
def get_config_versions(
service_account_json, project_id, cloud_region, registry_id,
device_id):
"""Lists versions of a device config in descending order (newest first)."""
client = get_client(service_account_json)
registry_name = 'projects/{}/locations/{}/registries/{}'.format(
project_id, cloud_region, registry_id)
device_name = '{}/devices/{}'.format(registry_name, device_id)
devices = client.projects().locations().registries().devices()
configs = devices.configVersions().list(
name=device_name).execute().get(
'deviceConfigs', [])
for config in configs:
print('version: {}\n\tcloudUpdateTime: {}\n\t binaryData: {}'.format(
config.get('version'),
config.get('cloudUpdateTime'),
config.get('binaryData')))
return configs
def get_iam_permissions(
service_account_json, project_id, cloud_region, registry_id):
"""Retrieves IAM permissions for the given registry."""
client = get_client(service_account_json)
registry_path = 'projects/{}/locations/{}/registries/{}'.format(
project_id, cloud_region, registry_id)
policy = client.projects().locations().registries().getIamPolicy(
resource=registry_path, body={}).execute()
return policy
def set_iam_permissions(
service_account_json, project_id, cloud_region, registry_id, role,
member):
"""Sets IAM permissions for the given registry to a single role/member."""
client = get_client(service_account_json)
registry_path = 'projects/{}/locations/{}/registries/{}'.format(
project_id, cloud_region, registry_id)
body = {
"policy":
{
"bindings":
[{
"members": [member],
"role": role
}]
}
}
return client.projects().locations().registries().setIamPolicy(
resource=registry_path, body=body).execute()
def parse_command_line_args():
"""Parse command line arguments."""
default_registry = 'cloudiot_device_manager_example_registry_{}'.format(
int(time.time()))
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
# Required arguments
parser.add_argument(
'--pubsub_topic',
required=True,
help=('Google Cloud Pub/Sub topic. '
'Format is projects/project_id/topics/topic-id'))
# Optional arguments
parser.add_argument(
'--cloud_region', default='us-central1', help='GCP cloud region')
parser.add_argument(
'--config',
default=None,
help='Configuration sent to a device.')
parser.add_argument(
'--device_id',
default=None,
help='Device id.')
parser.add_argument(
'--ec_public_key_file',
default=None,
help='Path to public ES256 key file.')
parser.add_argument(
'--project_id',
default=os.environ.get("GOOGLE_CLOUD_PROJECT"),
help='GCP cloud project name.')
parser.add_argument(
'--registry_id',
default=default_registry,
help='Registry id. If not set, a name will be generated.')
parser.add_argument(
'--rsa_certificate_file',
default=None,
help='Path to RS256 certificate file.')
parser.add_argument(
'--service_account_json',
default=os.environ.get("GOOGLE_APPLICATION_CREDENTIALS"),
help='Path to service account json file.')
parser.add_argument(
'--version',
default=None,
help='Version number for setting device configuration.')
parser.add_argument(
'--member',
default=None,
help='Member used for IAM commands.')
parser.add_argument(
'--role',
default=None,
help='Role used for IAM commands.')
# Command subparser
command = parser.add_subparsers(dest='command')
command.add_parser('create-es256', help=create_es256_device.__doc__)
command.add_parser('create-registry', help=open_registry.__doc__)
command.add_parser('create-rsa256', help=create_rs256_device.__doc__)
command.add_parser('create-topic', help=create_iot_topic.__doc__)
command.add_parser('create-unauth', help=create_unauth_device.__doc__)
command.add_parser('delete-device', help=delete_device.__doc__)
command.add_parser('delete-registry', help=delete_registry.__doc__)
command.add_parser('get', help=get_device.__doc__)
command.add_parser('get-config-versions', help=get_config_versions.__doc__)
command.add_parser('get-iam-permissions', help=get_iam_permissions.__doc__)
command.add_parser('get-registry', help=get_registry.__doc__)
command.add_parser('get-state', help=get_state.__doc__)
command.add_parser('list', help=list_devices.__doc__)
command.add_parser('list-registries', help=list_registries.__doc__)
command.add_parser('patch-es256', help=patch_es256_auth.__doc__)
command.add_parser('patch-rs256', help=patch_rsa256_auth.__doc__)
command.add_parser('set-config', help=patch_rsa256_auth.__doc__)
command.add_parser('set-iam-permissions', help=set_iam_permissions.__doc__)
return parser.parse_args()
def run_create(args):
"""Handles commands that create devices, registries, or topics."""
if args.command == 'create-rsa256':
create_rs256_device(
args.service_account_json, args.project_id,
args.cloud_region, args.registry_id, args.device_id,
args.rsa_certificate_file)
elif args.command == 'create-es256':
create_es256_device(
args.service_account_json, args.project_id,
args.cloud_region, args.registry_id, args.device_id,
args.ec_public_key_file)
elif args.command == 'create-unauth':
create_unauth_device(
args.service_account_json, args.project_id,
args.cloud_region, args.registry_id, args.device_id)
elif args.command == 'create-registry':
open_registry(
args.service_account_json, args.project_id,
args.cloud_region, args.pubsub_topic, args.registry_id)
elif args.command == 'create-topic':
create_iot_topic(args.project_id, args.pubsub_topic)
def run_get(args):
if args.command == 'get':
get_device(
args.service_account_json, args.project_id,
args.cloud_region, args.registry_id, args.device_id)
elif args.command == 'get-config-versions':
get_device(
args.service_account_json, args.project_id,
args.cloud_region, args.registry_id, args.device_id)
elif args.command == 'get-state':
get_state(
args.service_account_json, args.project_id,
args.cloud_region, args.registry_id, args.device_id)
elif args.command == 'get-iam-permissions':
print(get_iam_permissions(
args.service_account_json, args.project_id,
args.cloud_region, args.registry_id))
elif args.command == 'get-registry':
print(get_registry(
args.service_account_json, args.project_id,
args.cloud_region, args.registry_id))
def run_command(args):
"""Calls the program using the specified command."""
if args.project_id is None:
print('You must specify a project ID or set the environment variable.')
return
elif args.command.startswith('create'):
run_create(args)
elif args.command.startswith('get'):
run_get(args)
elif args.command == 'delete-device':
delete_device(
args.service_account_json, args.project_id,
args.cloud_region, args.registry_id, args.device_id)
elif args.command == 'delete-registry':
delete_registry(
args.service_account_json, args.project_id,
args.cloud_region, args.registry_id)
elif args.command == 'list':
list_devices(
args.service_account_json, args.project_id,
args.cloud_region, args.registry_id)
elif args.command == 'list-registries':
list_registries(
args.service_account_json, args.project_id,
args.cloud_region)
elif args.command == 'patch-es256':
if (args.ec_public_key_file is None):
sys.exit('Error: specify --ec_public_key_file')
patch_es256_auth(
args.service_account_json, args.project_id,
args.cloud_region, args.registry_id, args.device_id,
args.ec_public_key_file)
elif args.command == 'patch-rs256':
if (args.rsa_certificate_file is None):
sys.exit('Error: specify --rsa_certificate_file')
patch_rsa256_auth(
args.service_account_json, args.project_id,
args.cloud_region, args.registry_id, args.device_id,
args.rsa_certificate_file)
elif args.command == 'set-iam-permissions':
if (args.member is None):
sys.exit('Error: specify --member')
if (args.role is None):
sys.exit('Error: specify --role')
set_iam_permissions(
args.service_account_json, args.project_id,
args.cloud_region, args.registry_id, args.role, args.member)
elif args.command == 'set-config':
if (args.config is None):
sys.exit('Error: specify --config')
if (args.version is None):
sys.exit('Error: specify --version')
set_config(
args.service_account_json, args.project_id,
args.cloud_region, args.registry_id, args.device_id,
args.version, args.config)
def main():
args = parse_command_line_args()
run_command(args)
if __name__ == '__main__':
main()
| 34.85355 | 79 | 0.649336 |
8d6570a892f9bf0c0389683d9f1ca6b7ed36c2b3 | 1,848 | py | Python | client.py | joao-p-marques/drive-throught-p2p | 5ec24127d771e50631b105402042aa876c307ba0 | [
"MIT"
] | null | null | null | client.py | joao-p-marques/drive-throught-p2p | 5ec24127d771e50631b105402042aa876c307ba0 | [
"MIT"
] | null | null | null | client.py | joao-p-marques/drive-throught-p2p | 5ec24127d771e50631b105402042aa876c307ba0 | [
"MIT"
] | null | null | null | # coding: utf-8
import time
import pickle
import socket
import random
import logging
import argparse
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s %(name)-15s %(levelname)-8s %(message)s',
datefmt='%m-%d %H:%M:%S')
def main(port, ring, timeout):
# Create a logger for the client
logger = logging.getLogger('Client')
# UDP Socket
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.settimeout(timeout)
sock.bind(('localhost', port))
# Wait for a random time
delta = random.gauss(2, 0.5)
logger.info('Wait for %f seconds', delta)
time.sleep(delta)
# Request some food
logger.info('Request some food...')
p = pickle.dumps({'method': 'ORDER', 'args': {'hamburger': 1}})
sock.sendto(p, ring)
# Wait for Ticket
p,addr = sock.recvfrom(1024)
o = pickle.loads(p)
logger.info('Received ticket %s', o['args'])
my_ticket = o['args']['orderTicket']
# Pickup order
logger.info('Pickup order %s', o['args'])
p = pickle.dumps({"method": 'PICKUP', "args": o['args']})
sock.sendto(p, ring)
# Wait for order
p, addr = sock.recvfrom(1024)
o = pickle.loads(p)
logger.info('Got order %s', o['args'])
# Close socket
sock.close()
if o['args']['ticket'] == my_ticket:
return 0
else:
return -1
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Pi HTTP server')
parser.add_argument('-p', dest='port', type=int, help='client port', default=5004)
parser.add_argument('-r', dest='ring', type=int, help='ring ports ', default=5000)
parser.add_argument('-t', dest='timeout', type=int, help='socket timeout', default=20)
args = parser.parse_args()
main(args.port, ('localhost', args.ring), args.timeout)
| 27.176471 | 90 | 0.617424 |
e151770b97fe0756af454c466a3c3401f9c7a4cf | 998 | py | Python | examples/benchmark/simple-vispy.py | MatthieuDartiailh/vispy | 09d429be361a148b0614a192f56d4070c624072c | [
"BSD-3-Clause"
] | null | null | null | examples/benchmark/simple-vispy.py | MatthieuDartiailh/vispy | 09d429be361a148b0614a192f56d4070c624072c | [
"BSD-3-Clause"
] | null | null | null | examples/benchmark/simple-vispy.py | MatthieuDartiailh/vispy | 09d429be361a148b0614a192f56d4070c624072c | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Copyright (c) 2014, Vispy Development Team.
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
# -----------------------------------------------------------------------------
import time
from vispy import app
from vispy.gloo import gl
app.use('qt')
# app.use('glut')
# app.use('pyglet')
canvas = app.Canvas(size=(512, 512), title = "Do nothing benchmark (vispy)",
close_keys='escape')
@canvas.connect
def on_draw(event):
global t, t0, frames
gl.glClear(gl.GL_COLOR_BUFFER_BIT | gl.GL_DEPTH_BUFFER_BIT)
t = time.time()
frames = frames + 1
elapsed = (t - t0) # seconds
if elapsed > 2.5:
print("FPS : %.2f (%d frames in %.2f second)"
% (frames / elapsed, frames, elapsed))
t0, frames = t, 0
canvas.update()
t0, frames, t = time.time(), 0, 0
canvas.show()
app.run()
| 27.722222 | 79 | 0.518036 |
cffe710f2f045fc2ddcf3bba85843e26af912a80 | 1,229 | py | Python | magenta/tensor2tensor/t2t_datagen.py | sleep-yearning/magenta | a03a14ef5a691ee9e3d336aa621281028dc5af32 | [
"Apache-2.0"
] | 1 | 2020-02-24T06:12:09.000Z | 2020-02-24T06:12:09.000Z | magenta/tensor2tensor/t2t_datagen.py | sleep-yearning/magenta | a03a14ef5a691ee9e3d336aa621281028dc5af32 | [
"Apache-2.0"
] | null | null | null | magenta/tensor2tensor/t2t_datagen.py | sleep-yearning/magenta | a03a14ef5a691ee9e3d336aa621281028dc5af32 | [
"Apache-2.0"
] | null | null | null | # Copyright 2020 The Magenta Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tensor2Tensor data generator for Magenta problems."""
# Registers all Magenta problems with Tensor2Tensor.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from magenta.tensor2tensor import models # pylint: disable=unused-import
from magenta.tensor2tensor import problems # pylint: disable=unused-import
from tensor2tensor.bin import t2t_datagen
import tensorflow.compat.v1 as tf
def main(argv):
t2t_datagen.main(argv)
def console_entry_point():
tf.logging.set_verbosity(tf.logging.INFO)
tf.app.run(main)
if __name__ == '__main__':
console_entry_point()
| 30.725 | 75 | 0.777868 |
77ddecb50c5cbd76164a9f111911a43e37d8f20c | 1,135 | py | Python | 30_day_leetcoding_challenge/2020_05/03-Ransom_Note.py | QuenLo/leecode | ce861103949510dc54fd5cb336bd992c40748de2 | [
"MIT"
] | 6 | 2018-06-13T06:48:42.000Z | 2020-11-25T10:48:13.000Z | 30_day_leetcoding_challenge/2020_05/03-Ransom_Note.py | QuenLo/leecode | ce861103949510dc54fd5cb336bd992c40748de2 | [
"MIT"
] | null | null | null | 30_day_leetcoding_challenge/2020_05/03-Ransom_Note.py | QuenLo/leecode | ce861103949510dc54fd5cb336bd992c40748de2 | [
"MIT"
] | null | null | null | class Solution1:
def canConstruct(self, ransomNote: str, magazine: str) -> bool:
C_r = Counter( ransomNote )
C_m = Counter( magazine )
for r in C_r:
if C_r[r] > C_m[r]:
return False
return True
class Solution2:
def canConstruct(self, ransomNote: str, magazine: str) -> bool:
C_r = Counter( ransomNote )
for r in C_r:
if C_r[r] > magazine.count(r):
return False
return True
class Solution3:
def canConstruct(self, ransomNote: str, magazine: str) -> bool:
magazine = list(magazine)
for r in ransomNote:
if r in magazine:
magazine.remove(r)
else:
return False
return True
class Solution4:
def canConstruct(self, ransomNote: str, magazine: str) -> bool:
for r in ransomNote:
if r not in magazine:
return False
magazine = magazine.replace(r, "", 1)
return True
| 26.395349 | 67 | 0.492511 |
d180caf6766f343a3e3b91684f9bb8883d5a632e | 33,908 | py | Python | sdk/python/pulumi_azure_native/network/v20201101/virtual_hub.py | sebtelko/pulumi-azure-native | 711ec021b5c73da05611c56c8a35adb0ce3244e4 | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_native/network/v20201101/virtual_hub.py | sebtelko/pulumi-azure-native | 711ec021b5c73da05611c56c8a35adb0ce3244e4 | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_native/network/v20201101/virtual_hub.py | sebtelko/pulumi-azure-native | 711ec021b5c73da05611c56c8a35adb0ce3244e4 | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
from ._inputs import *
__all__ = ['VirtualHubArgs', 'VirtualHub']
@pulumi.input_type
class VirtualHubArgs:
def __init__(__self__, *,
resource_group_name: pulumi.Input[str],
address_prefix: Optional[pulumi.Input[str]] = None,
allow_branch_to_branch_traffic: Optional[pulumi.Input[bool]] = None,
azure_firewall: Optional[pulumi.Input['SubResourceArgs']] = None,
express_route_gateway: Optional[pulumi.Input['SubResourceArgs']] = None,
id: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
p2_s_vpn_gateway: Optional[pulumi.Input['SubResourceArgs']] = None,
route_table: Optional[pulumi.Input['VirtualHubRouteTableArgs']] = None,
security_partner_provider: Optional[pulumi.Input['SubResourceArgs']] = None,
security_provider_name: Optional[pulumi.Input[str]] = None,
sku: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
virtual_hub_name: Optional[pulumi.Input[str]] = None,
virtual_hub_route_table_v2s: Optional[pulumi.Input[Sequence[pulumi.Input['VirtualHubRouteTableV2Args']]]] = None,
virtual_router_asn: Optional[pulumi.Input[float]] = None,
virtual_router_ips: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
virtual_wan: Optional[pulumi.Input['SubResourceArgs']] = None,
vpn_gateway: Optional[pulumi.Input['SubResourceArgs']] = None):
"""
The set of arguments for constructing a VirtualHub resource.
:param pulumi.Input[str] resource_group_name: The resource group name of the VirtualHub.
:param pulumi.Input[str] address_prefix: Address-prefix for this VirtualHub.
:param pulumi.Input[bool] allow_branch_to_branch_traffic: Flag to control transit for VirtualRouter hub.
:param pulumi.Input['SubResourceArgs'] azure_firewall: The azureFirewall associated with this VirtualHub.
:param pulumi.Input['SubResourceArgs'] express_route_gateway: The expressRouteGateway associated with this VirtualHub.
:param pulumi.Input[str] id: Resource ID.
:param pulumi.Input[str] location: Resource location.
:param pulumi.Input['SubResourceArgs'] p2_s_vpn_gateway: The P2SVpnGateway associated with this VirtualHub.
:param pulumi.Input['VirtualHubRouteTableArgs'] route_table: The routeTable associated with this virtual hub.
:param pulumi.Input['SubResourceArgs'] security_partner_provider: The securityPartnerProvider associated with this VirtualHub.
:param pulumi.Input[str] security_provider_name: The Security Provider name.
:param pulumi.Input[str] sku: The sku of this VirtualHub.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags.
:param pulumi.Input[str] virtual_hub_name: The name of the VirtualHub.
:param pulumi.Input[Sequence[pulumi.Input['VirtualHubRouteTableV2Args']]] virtual_hub_route_table_v2s: List of all virtual hub route table v2s associated with this VirtualHub.
:param pulumi.Input[float] virtual_router_asn: VirtualRouter ASN.
:param pulumi.Input[Sequence[pulumi.Input[str]]] virtual_router_ips: VirtualRouter IPs.
:param pulumi.Input['SubResourceArgs'] virtual_wan: The VirtualWAN to which the VirtualHub belongs.
:param pulumi.Input['SubResourceArgs'] vpn_gateway: The VpnGateway associated with this VirtualHub.
"""
pulumi.set(__self__, "resource_group_name", resource_group_name)
if address_prefix is not None:
pulumi.set(__self__, "address_prefix", address_prefix)
if allow_branch_to_branch_traffic is not None:
pulumi.set(__self__, "allow_branch_to_branch_traffic", allow_branch_to_branch_traffic)
if azure_firewall is not None:
pulumi.set(__self__, "azure_firewall", azure_firewall)
if express_route_gateway is not None:
pulumi.set(__self__, "express_route_gateway", express_route_gateway)
if id is not None:
pulumi.set(__self__, "id", id)
if location is not None:
pulumi.set(__self__, "location", location)
if p2_s_vpn_gateway is not None:
pulumi.set(__self__, "p2_s_vpn_gateway", p2_s_vpn_gateway)
if route_table is not None:
pulumi.set(__self__, "route_table", route_table)
if security_partner_provider is not None:
pulumi.set(__self__, "security_partner_provider", security_partner_provider)
if security_provider_name is not None:
pulumi.set(__self__, "security_provider_name", security_provider_name)
if sku is not None:
pulumi.set(__self__, "sku", sku)
if tags is not None:
pulumi.set(__self__, "tags", tags)
if virtual_hub_name is not None:
pulumi.set(__self__, "virtual_hub_name", virtual_hub_name)
if virtual_hub_route_table_v2s is not None:
pulumi.set(__self__, "virtual_hub_route_table_v2s", virtual_hub_route_table_v2s)
if virtual_router_asn is not None:
pulumi.set(__self__, "virtual_router_asn", virtual_router_asn)
if virtual_router_ips is not None:
pulumi.set(__self__, "virtual_router_ips", virtual_router_ips)
if virtual_wan is not None:
pulumi.set(__self__, "virtual_wan", virtual_wan)
if vpn_gateway is not None:
pulumi.set(__self__, "vpn_gateway", vpn_gateway)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
"""
The resource group name of the VirtualHub.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter(name="addressPrefix")
def address_prefix(self) -> Optional[pulumi.Input[str]]:
"""
Address-prefix for this VirtualHub.
"""
return pulumi.get(self, "address_prefix")
@address_prefix.setter
def address_prefix(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "address_prefix", value)
@property
@pulumi.getter(name="allowBranchToBranchTraffic")
def allow_branch_to_branch_traffic(self) -> Optional[pulumi.Input[bool]]:
"""
Flag to control transit for VirtualRouter hub.
"""
return pulumi.get(self, "allow_branch_to_branch_traffic")
@allow_branch_to_branch_traffic.setter
def allow_branch_to_branch_traffic(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "allow_branch_to_branch_traffic", value)
@property
@pulumi.getter(name="azureFirewall")
def azure_firewall(self) -> Optional[pulumi.Input['SubResourceArgs']]:
"""
The azureFirewall associated with this VirtualHub.
"""
return pulumi.get(self, "azure_firewall")
@azure_firewall.setter
def azure_firewall(self, value: Optional[pulumi.Input['SubResourceArgs']]):
pulumi.set(self, "azure_firewall", value)
@property
@pulumi.getter(name="expressRouteGateway")
def express_route_gateway(self) -> Optional[pulumi.Input['SubResourceArgs']]:
"""
The expressRouteGateway associated with this VirtualHub.
"""
return pulumi.get(self, "express_route_gateway")
@express_route_gateway.setter
def express_route_gateway(self, value: Optional[pulumi.Input['SubResourceArgs']]):
pulumi.set(self, "express_route_gateway", value)
@property
@pulumi.getter
def id(self) -> Optional[pulumi.Input[str]]:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@id.setter
def id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "id", value)
@property
@pulumi.getter
def location(self) -> Optional[pulumi.Input[str]]:
"""
Resource location.
"""
return pulumi.get(self, "location")
@location.setter
def location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "location", value)
@property
@pulumi.getter(name="p2SVpnGateway")
def p2_s_vpn_gateway(self) -> Optional[pulumi.Input['SubResourceArgs']]:
"""
The P2SVpnGateway associated with this VirtualHub.
"""
return pulumi.get(self, "p2_s_vpn_gateway")
@p2_s_vpn_gateway.setter
def p2_s_vpn_gateway(self, value: Optional[pulumi.Input['SubResourceArgs']]):
pulumi.set(self, "p2_s_vpn_gateway", value)
@property
@pulumi.getter(name="routeTable")
def route_table(self) -> Optional[pulumi.Input['VirtualHubRouteTableArgs']]:
"""
The routeTable associated with this virtual hub.
"""
return pulumi.get(self, "route_table")
@route_table.setter
def route_table(self, value: Optional[pulumi.Input['VirtualHubRouteTableArgs']]):
pulumi.set(self, "route_table", value)
@property
@pulumi.getter(name="securityPartnerProvider")
def security_partner_provider(self) -> Optional[pulumi.Input['SubResourceArgs']]:
"""
The securityPartnerProvider associated with this VirtualHub.
"""
return pulumi.get(self, "security_partner_provider")
@security_partner_provider.setter
def security_partner_provider(self, value: Optional[pulumi.Input['SubResourceArgs']]):
pulumi.set(self, "security_partner_provider", value)
@property
@pulumi.getter(name="securityProviderName")
def security_provider_name(self) -> Optional[pulumi.Input[str]]:
"""
The Security Provider name.
"""
return pulumi.get(self, "security_provider_name")
@security_provider_name.setter
def security_provider_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "security_provider_name", value)
@property
@pulumi.getter
def sku(self) -> Optional[pulumi.Input[str]]:
"""
The sku of this VirtualHub.
"""
return pulumi.get(self, "sku")
@sku.setter
def sku(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "sku", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
@property
@pulumi.getter(name="virtualHubName")
def virtual_hub_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the VirtualHub.
"""
return pulumi.get(self, "virtual_hub_name")
@virtual_hub_name.setter
def virtual_hub_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "virtual_hub_name", value)
@property
@pulumi.getter(name="virtualHubRouteTableV2s")
def virtual_hub_route_table_v2s(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['VirtualHubRouteTableV2Args']]]]:
"""
List of all virtual hub route table v2s associated with this VirtualHub.
"""
return pulumi.get(self, "virtual_hub_route_table_v2s")
@virtual_hub_route_table_v2s.setter
def virtual_hub_route_table_v2s(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['VirtualHubRouteTableV2Args']]]]):
pulumi.set(self, "virtual_hub_route_table_v2s", value)
@property
@pulumi.getter(name="virtualRouterAsn")
def virtual_router_asn(self) -> Optional[pulumi.Input[float]]:
"""
VirtualRouter ASN.
"""
return pulumi.get(self, "virtual_router_asn")
@virtual_router_asn.setter
def virtual_router_asn(self, value: Optional[pulumi.Input[float]]):
pulumi.set(self, "virtual_router_asn", value)
@property
@pulumi.getter(name="virtualRouterIps")
def virtual_router_ips(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
VirtualRouter IPs.
"""
return pulumi.get(self, "virtual_router_ips")
@virtual_router_ips.setter
def virtual_router_ips(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "virtual_router_ips", value)
@property
@pulumi.getter(name="virtualWan")
def virtual_wan(self) -> Optional[pulumi.Input['SubResourceArgs']]:
"""
The VirtualWAN to which the VirtualHub belongs.
"""
return pulumi.get(self, "virtual_wan")
@virtual_wan.setter
def virtual_wan(self, value: Optional[pulumi.Input['SubResourceArgs']]):
pulumi.set(self, "virtual_wan", value)
@property
@pulumi.getter(name="vpnGateway")
def vpn_gateway(self) -> Optional[pulumi.Input['SubResourceArgs']]:
"""
The VpnGateway associated with this VirtualHub.
"""
return pulumi.get(self, "vpn_gateway")
@vpn_gateway.setter
def vpn_gateway(self, value: Optional[pulumi.Input['SubResourceArgs']]):
pulumi.set(self, "vpn_gateway", value)
class VirtualHub(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
address_prefix: Optional[pulumi.Input[str]] = None,
allow_branch_to_branch_traffic: Optional[pulumi.Input[bool]] = None,
azure_firewall: Optional[pulumi.Input[pulumi.InputType['SubResourceArgs']]] = None,
express_route_gateway: Optional[pulumi.Input[pulumi.InputType['SubResourceArgs']]] = None,
id: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
p2_s_vpn_gateway: Optional[pulumi.Input[pulumi.InputType['SubResourceArgs']]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
route_table: Optional[pulumi.Input[pulumi.InputType['VirtualHubRouteTableArgs']]] = None,
security_partner_provider: Optional[pulumi.Input[pulumi.InputType['SubResourceArgs']]] = None,
security_provider_name: Optional[pulumi.Input[str]] = None,
sku: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
virtual_hub_name: Optional[pulumi.Input[str]] = None,
virtual_hub_route_table_v2s: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['VirtualHubRouteTableV2Args']]]]] = None,
virtual_router_asn: Optional[pulumi.Input[float]] = None,
virtual_router_ips: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
virtual_wan: Optional[pulumi.Input[pulumi.InputType['SubResourceArgs']]] = None,
vpn_gateway: Optional[pulumi.Input[pulumi.InputType['SubResourceArgs']]] = None,
__props__=None):
"""
VirtualHub Resource.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] address_prefix: Address-prefix for this VirtualHub.
:param pulumi.Input[bool] allow_branch_to_branch_traffic: Flag to control transit for VirtualRouter hub.
:param pulumi.Input[pulumi.InputType['SubResourceArgs']] azure_firewall: The azureFirewall associated with this VirtualHub.
:param pulumi.Input[pulumi.InputType['SubResourceArgs']] express_route_gateway: The expressRouteGateway associated with this VirtualHub.
:param pulumi.Input[str] id: Resource ID.
:param pulumi.Input[str] location: Resource location.
:param pulumi.Input[pulumi.InputType['SubResourceArgs']] p2_s_vpn_gateway: The P2SVpnGateway associated with this VirtualHub.
:param pulumi.Input[str] resource_group_name: The resource group name of the VirtualHub.
:param pulumi.Input[pulumi.InputType['VirtualHubRouteTableArgs']] route_table: The routeTable associated with this virtual hub.
:param pulumi.Input[pulumi.InputType['SubResourceArgs']] security_partner_provider: The securityPartnerProvider associated with this VirtualHub.
:param pulumi.Input[str] security_provider_name: The Security Provider name.
:param pulumi.Input[str] sku: The sku of this VirtualHub.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags.
:param pulumi.Input[str] virtual_hub_name: The name of the VirtualHub.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['VirtualHubRouteTableV2Args']]]] virtual_hub_route_table_v2s: List of all virtual hub route table v2s associated with this VirtualHub.
:param pulumi.Input[float] virtual_router_asn: VirtualRouter ASN.
:param pulumi.Input[Sequence[pulumi.Input[str]]] virtual_router_ips: VirtualRouter IPs.
:param pulumi.Input[pulumi.InputType['SubResourceArgs']] virtual_wan: The VirtualWAN to which the VirtualHub belongs.
:param pulumi.Input[pulumi.InputType['SubResourceArgs']] vpn_gateway: The VpnGateway associated with this VirtualHub.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: VirtualHubArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
VirtualHub Resource.
:param str resource_name: The name of the resource.
:param VirtualHubArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(VirtualHubArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
address_prefix: Optional[pulumi.Input[str]] = None,
allow_branch_to_branch_traffic: Optional[pulumi.Input[bool]] = None,
azure_firewall: Optional[pulumi.Input[pulumi.InputType['SubResourceArgs']]] = None,
express_route_gateway: Optional[pulumi.Input[pulumi.InputType['SubResourceArgs']]] = None,
id: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
p2_s_vpn_gateway: Optional[pulumi.Input[pulumi.InputType['SubResourceArgs']]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
route_table: Optional[pulumi.Input[pulumi.InputType['VirtualHubRouteTableArgs']]] = None,
security_partner_provider: Optional[pulumi.Input[pulumi.InputType['SubResourceArgs']]] = None,
security_provider_name: Optional[pulumi.Input[str]] = None,
sku: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
virtual_hub_name: Optional[pulumi.Input[str]] = None,
virtual_hub_route_table_v2s: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['VirtualHubRouteTableV2Args']]]]] = None,
virtual_router_asn: Optional[pulumi.Input[float]] = None,
virtual_router_ips: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
virtual_wan: Optional[pulumi.Input[pulumi.InputType['SubResourceArgs']]] = None,
vpn_gateway: Optional[pulumi.Input[pulumi.InputType['SubResourceArgs']]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = VirtualHubArgs.__new__(VirtualHubArgs)
__props__.__dict__["address_prefix"] = address_prefix
__props__.__dict__["allow_branch_to_branch_traffic"] = allow_branch_to_branch_traffic
__props__.__dict__["azure_firewall"] = azure_firewall
__props__.__dict__["express_route_gateway"] = express_route_gateway
__props__.__dict__["id"] = id
__props__.__dict__["location"] = location
__props__.__dict__["p2_s_vpn_gateway"] = p2_s_vpn_gateway
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
__props__.__dict__["route_table"] = route_table
__props__.__dict__["security_partner_provider"] = security_partner_provider
__props__.__dict__["security_provider_name"] = security_provider_name
__props__.__dict__["sku"] = sku
__props__.__dict__["tags"] = tags
__props__.__dict__["virtual_hub_name"] = virtual_hub_name
__props__.__dict__["virtual_hub_route_table_v2s"] = virtual_hub_route_table_v2s
__props__.__dict__["virtual_router_asn"] = virtual_router_asn
__props__.__dict__["virtual_router_ips"] = virtual_router_ips
__props__.__dict__["virtual_wan"] = virtual_wan
__props__.__dict__["vpn_gateway"] = vpn_gateway
__props__.__dict__["bgp_connections"] = None
__props__.__dict__["etag"] = None
__props__.__dict__["ip_configurations"] = None
__props__.__dict__["name"] = None
__props__.__dict__["provisioning_state"] = None
__props__.__dict__["routing_state"] = None
__props__.__dict__["type"] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:network/v20201101:VirtualHub"), pulumi.Alias(type_="azure-native:network:VirtualHub"), pulumi.Alias(type_="azure-nextgen:network:VirtualHub"), pulumi.Alias(type_="azure-native:network/v20180401:VirtualHub"), pulumi.Alias(type_="azure-nextgen:network/v20180401:VirtualHub"), pulumi.Alias(type_="azure-native:network/v20180601:VirtualHub"), pulumi.Alias(type_="azure-nextgen:network/v20180601:VirtualHub"), pulumi.Alias(type_="azure-native:network/v20180701:VirtualHub"), pulumi.Alias(type_="azure-nextgen:network/v20180701:VirtualHub"), pulumi.Alias(type_="azure-native:network/v20180801:VirtualHub"), pulumi.Alias(type_="azure-nextgen:network/v20180801:VirtualHub"), pulumi.Alias(type_="azure-native:network/v20181001:VirtualHub"), pulumi.Alias(type_="azure-nextgen:network/v20181001:VirtualHub"), pulumi.Alias(type_="azure-native:network/v20181101:VirtualHub"), pulumi.Alias(type_="azure-nextgen:network/v20181101:VirtualHub"), pulumi.Alias(type_="azure-native:network/v20181201:VirtualHub"), pulumi.Alias(type_="azure-nextgen:network/v20181201:VirtualHub"), pulumi.Alias(type_="azure-native:network/v20190201:VirtualHub"), pulumi.Alias(type_="azure-nextgen:network/v20190201:VirtualHub"), pulumi.Alias(type_="azure-native:network/v20190401:VirtualHub"), pulumi.Alias(type_="azure-nextgen:network/v20190401:VirtualHub"), pulumi.Alias(type_="azure-native:network/v20190601:VirtualHub"), pulumi.Alias(type_="azure-nextgen:network/v20190601:VirtualHub"), pulumi.Alias(type_="azure-native:network/v20190701:VirtualHub"), pulumi.Alias(type_="azure-nextgen:network/v20190701:VirtualHub"), pulumi.Alias(type_="azure-native:network/v20190801:VirtualHub"), pulumi.Alias(type_="azure-nextgen:network/v20190801:VirtualHub"), pulumi.Alias(type_="azure-native:network/v20190901:VirtualHub"), pulumi.Alias(type_="azure-nextgen:network/v20190901:VirtualHub"), pulumi.Alias(type_="azure-native:network/v20191101:VirtualHub"), pulumi.Alias(type_="azure-nextgen:network/v20191101:VirtualHub"), pulumi.Alias(type_="azure-native:network/v20191201:VirtualHub"), pulumi.Alias(type_="azure-nextgen:network/v20191201:VirtualHub"), pulumi.Alias(type_="azure-native:network/v20200301:VirtualHub"), pulumi.Alias(type_="azure-nextgen:network/v20200301:VirtualHub"), pulumi.Alias(type_="azure-native:network/v20200401:VirtualHub"), pulumi.Alias(type_="azure-nextgen:network/v20200401:VirtualHub"), pulumi.Alias(type_="azure-native:network/v20200501:VirtualHub"), pulumi.Alias(type_="azure-nextgen:network/v20200501:VirtualHub"), pulumi.Alias(type_="azure-native:network/v20200601:VirtualHub"), pulumi.Alias(type_="azure-nextgen:network/v20200601:VirtualHub"), pulumi.Alias(type_="azure-native:network/v20200701:VirtualHub"), pulumi.Alias(type_="azure-nextgen:network/v20200701:VirtualHub"), pulumi.Alias(type_="azure-native:network/v20200801:VirtualHub"), pulumi.Alias(type_="azure-nextgen:network/v20200801:VirtualHub")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(VirtualHub, __self__).__init__(
'azure-native:network/v20201101:VirtualHub',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'VirtualHub':
"""
Get an existing VirtualHub resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = VirtualHubArgs.__new__(VirtualHubArgs)
__props__.__dict__["address_prefix"] = None
__props__.__dict__["allow_branch_to_branch_traffic"] = None
__props__.__dict__["azure_firewall"] = None
__props__.__dict__["bgp_connections"] = None
__props__.__dict__["etag"] = None
__props__.__dict__["express_route_gateway"] = None
__props__.__dict__["ip_configurations"] = None
__props__.__dict__["location"] = None
__props__.__dict__["name"] = None
__props__.__dict__["p2_s_vpn_gateway"] = None
__props__.__dict__["provisioning_state"] = None
__props__.__dict__["route_table"] = None
__props__.__dict__["routing_state"] = None
__props__.__dict__["security_partner_provider"] = None
__props__.__dict__["security_provider_name"] = None
__props__.__dict__["sku"] = None
__props__.__dict__["tags"] = None
__props__.__dict__["type"] = None
__props__.__dict__["virtual_hub_route_table_v2s"] = None
__props__.__dict__["virtual_router_asn"] = None
__props__.__dict__["virtual_router_ips"] = None
__props__.__dict__["virtual_wan"] = None
__props__.__dict__["vpn_gateway"] = None
return VirtualHub(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="addressPrefix")
def address_prefix(self) -> pulumi.Output[Optional[str]]:
"""
Address-prefix for this VirtualHub.
"""
return pulumi.get(self, "address_prefix")
@property
@pulumi.getter(name="allowBranchToBranchTraffic")
def allow_branch_to_branch_traffic(self) -> pulumi.Output[Optional[bool]]:
"""
Flag to control transit for VirtualRouter hub.
"""
return pulumi.get(self, "allow_branch_to_branch_traffic")
@property
@pulumi.getter(name="azureFirewall")
def azure_firewall(self) -> pulumi.Output[Optional['outputs.SubResourceResponse']]:
"""
The azureFirewall associated with this VirtualHub.
"""
return pulumi.get(self, "azure_firewall")
@property
@pulumi.getter(name="bgpConnections")
def bgp_connections(self) -> pulumi.Output[Sequence['outputs.SubResourceResponse']]:
"""
List of references to Bgp Connections.
"""
return pulumi.get(self, "bgp_connections")
@property
@pulumi.getter
def etag(self) -> pulumi.Output[str]:
"""
A unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter(name="expressRouteGateway")
def express_route_gateway(self) -> pulumi.Output[Optional['outputs.SubResourceResponse']]:
"""
The expressRouteGateway associated with this VirtualHub.
"""
return pulumi.get(self, "express_route_gateway")
@property
@pulumi.getter(name="ipConfigurations")
def ip_configurations(self) -> pulumi.Output[Sequence['outputs.SubResourceResponse']]:
"""
List of references to IpConfigurations.
"""
return pulumi.get(self, "ip_configurations")
@property
@pulumi.getter
def location(self) -> pulumi.Output[str]:
"""
Resource location.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="p2SVpnGateway")
def p2_s_vpn_gateway(self) -> pulumi.Output[Optional['outputs.SubResourceResponse']]:
"""
The P2SVpnGateway associated with this VirtualHub.
"""
return pulumi.get(self, "p2_s_vpn_gateway")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> pulumi.Output[str]:
"""
The provisioning state of the virtual hub resource.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="routeTable")
def route_table(self) -> pulumi.Output[Optional['outputs.VirtualHubRouteTableResponse']]:
"""
The routeTable associated with this virtual hub.
"""
return pulumi.get(self, "route_table")
@property
@pulumi.getter(name="routingState")
def routing_state(self) -> pulumi.Output[str]:
"""
The routing state.
"""
return pulumi.get(self, "routing_state")
@property
@pulumi.getter(name="securityPartnerProvider")
def security_partner_provider(self) -> pulumi.Output[Optional['outputs.SubResourceResponse']]:
"""
The securityPartnerProvider associated with this VirtualHub.
"""
return pulumi.get(self, "security_partner_provider")
@property
@pulumi.getter(name="securityProviderName")
def security_provider_name(self) -> pulumi.Output[Optional[str]]:
"""
The Security Provider name.
"""
return pulumi.get(self, "security_provider_name")
@property
@pulumi.getter
def sku(self) -> pulumi.Output[Optional[str]]:
"""
The sku of this VirtualHub.
"""
return pulumi.get(self, "sku")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
Resource type.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter(name="virtualHubRouteTableV2s")
def virtual_hub_route_table_v2s(self) -> pulumi.Output[Optional[Sequence['outputs.VirtualHubRouteTableV2Response']]]:
"""
List of all virtual hub route table v2s associated with this VirtualHub.
"""
return pulumi.get(self, "virtual_hub_route_table_v2s")
@property
@pulumi.getter(name="virtualRouterAsn")
def virtual_router_asn(self) -> pulumi.Output[Optional[float]]:
"""
VirtualRouter ASN.
"""
return pulumi.get(self, "virtual_router_asn")
@property
@pulumi.getter(name="virtualRouterIps")
def virtual_router_ips(self) -> pulumi.Output[Optional[Sequence[str]]]:
"""
VirtualRouter IPs.
"""
return pulumi.get(self, "virtual_router_ips")
@property
@pulumi.getter(name="virtualWan")
def virtual_wan(self) -> pulumi.Output[Optional['outputs.SubResourceResponse']]:
"""
The VirtualWAN to which the VirtualHub belongs.
"""
return pulumi.get(self, "virtual_wan")
@property
@pulumi.getter(name="vpnGateway")
def vpn_gateway(self) -> pulumi.Output[Optional['outputs.SubResourceResponse']]:
"""
The VpnGateway associated with this VirtualHub.
"""
return pulumi.get(self, "vpn_gateway")
| 49 | 2,981 | 0.676625 |
cfdb76dc28d4af80901f1a1dd7159491439ac789 | 1,667 | py | Python | market_maker/market_maker_runner.py | Quant-Network/sample-market-maker | 4c47b60be66b1aead901400ba5fe96abf5e73c1b | [
"Apache-2.0"
] | null | null | null | market_maker/market_maker_runner.py | Quant-Network/sample-market-maker | 4c47b60be66b1aead901400ba5fe96abf5e73c1b | [
"Apache-2.0"
] | null | null | null | market_maker/market_maker_runner.py | Quant-Network/sample-market-maker | 4c47b60be66b1aead901400ba5fe96abf5e73c1b | [
"Apache-2.0"
] | 1 | 2021-04-27T12:02:41.000Z | 2021-04-27T12:02:41.000Z | from __future__ import absolute_import
import sys
import quant_trading
from market_maker.settings import settings
from market_maker.utils import log, constants
from market_maker.quant_position_manager_strategy import QuantPositionManager
from market_maker.quant_position_swinger_strategy import QuantPositionSwinger
#
# Helpers
#
logger = log.setup_custom_logger('root')
def run():
logger.info('BitMEX Quant-trading.Network Market Maker Version: %s\n' % constants.VERSION)
# Configure API key authorization: ApiKeyAuth
configuration = quant_trading.Configuration()
configuration.api_key['X-API-KEY'] = settings.QUANT_API_KEY
if settings.QUANT_ALGO == "BitcoinFuturesSwinger":
# create an instance of the API class
api_instance = quant_trading.BitcoinFuturesSwingerApi(quant_trading.ApiClient(configuration))
om = QuantPositionSwinger(api_instance)
elif settings.QUANT_ALGO == "BitcoinFuturesManager":
# create an instance of the API class
api_instance = quant_trading.BitcoinFuturesManagerApi(quant_trading.ApiClient(configuration))
om = QuantPositionManager(api_instance)
elif settings.QUANT_ALGO == "BitcoinFuturesMarketMaker":
# create an instance of the API class
api_instance = quant_trading.BitcoinFuturesMarketMakerApi(quant_trading.ApiClient(configuration))
om = QuantPositionManager(api_instance)
else:
raise NotImplementedError("The settings.QUANT_ALGO is not valid.")
# Try/except just keeps ctrl-c from printing an ugly stacktrace
try:
om.run_loop()
except (KeyboardInterrupt, SystemExit):
sys.exit()
| 39.690476 | 105 | 0.759448 |
012838eb379407f376e1e01a4f232b378edfb8eb | 1,545 | py | Python | src/aks-preview/azext_aks_preview/vendored_sdks/azure_mgmt_preview_aks/v2019_02_01/models/container_service_linux_profile.py | Mannan2812/azure-cli-extensions | e2b34efe23795f6db9c59100534a40f0813c3d95 | [
"MIT"
] | 2 | 2021-06-05T17:51:26.000Z | 2021-11-17T11:17:56.000Z | src/aks-preview/azext_aks_preview/vendored_sdks/azure_mgmt_preview_aks/v2019_02_01/models/container_service_linux_profile.py | Mannan2812/azure-cli-extensions | e2b34efe23795f6db9c59100534a40f0813c3d95 | [
"MIT"
] | 3 | 2020-05-27T20:16:26.000Z | 2020-07-23T19:46:49.000Z | src/aks-preview/azext_aks_preview/vendored_sdks/azure_mgmt_preview_aks/v2019_02_01/models/container_service_linux_profile.py | Mannan2812/azure-cli-extensions | e2b34efe23795f6db9c59100534a40f0813c3d95 | [
"MIT"
] | 5 | 2020-09-08T22:46:48.000Z | 2020-11-08T14:54:35.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class ContainerServiceLinuxProfile(Model):
"""Profile for Linux VMs in the container service cluster.
All required parameters must be populated in order to send to Azure.
:param admin_username: Required. The administrator username to use for
Linux VMs.
:type admin_username: str
:param ssh: Required. SSH configuration for Linux-based VMs running on
Azure.
:type ssh:
~azure.mgmt.containerservice.v2019_02_01.models.ContainerServiceSshConfiguration
"""
_validation = {
'admin_username': {'required': True, 'pattern': r'^[A-Za-z][-A-Za-z0-9_]*$'},
'ssh': {'required': True},
}
_attribute_map = {
'admin_username': {'key': 'adminUsername', 'type': 'str'},
'ssh': {'key': 'ssh', 'type': 'ContainerServiceSshConfiguration'},
}
def __init__(self, **kwargs):
super(ContainerServiceLinuxProfile, self).__init__(**kwargs)
self.admin_username = kwargs.get('admin_username', None)
self.ssh = kwargs.get('ssh', None)
| 35.930233 | 85 | 0.620065 |
44787036c585421d76a4561fd2f7792615150b31 | 6,876 | py | Python | RecoEgamma/ElectronIdentification/python/Identification/cutBasedElectronID_PHYS14_PU20bx25_V2_cff.py | pasmuss/cmssw | 566f40c323beef46134485a45ea53349f59ae534 | [
"Apache-2.0"
] | null | null | null | RecoEgamma/ElectronIdentification/python/Identification/cutBasedElectronID_PHYS14_PU20bx25_V2_cff.py | pasmuss/cmssw | 566f40c323beef46134485a45ea53349f59ae534 | [
"Apache-2.0"
] | null | null | null | RecoEgamma/ElectronIdentification/python/Identification/cutBasedElectronID_PHYS14_PU20bx25_V2_cff.py | pasmuss/cmssw | 566f40c323beef46134485a45ea53349f59ae534 | [
"Apache-2.0"
] | null | null | null | from PhysicsTools.SelectorUtils.centralIDRegistry import central_id_registry
# Common functions and classes for ID definition are imported here:
from RecoEgamma.ElectronIdentification.Identification.cutBasedElectronID_tools import *
#
# This is the first second of PHYS14 cuts, optimized on PHYS14 samples.
#
# The ID cuts below are optimized IDs for PHYS14 Scenario PU 20, bx 25ns
# The cut values are taken from the twiki:
# https://twiki.cern.ch/twiki/bin/view/CMS/CutBasedElectronIdentificationRun2
# (where they may not stay, if a newer version of cuts becomes available for these
# conditions)
# See also the presentation explaining these working points (this will not change):
# https://indico.cern.ch/event/370494/contribution/2/material/slides/0.pdf
#
# First, define cut values
#
# Veto working point Barrel and Endcap
idName = "cutBasedElectronID-PHYS14-PU20bx25-V2-standalone-veto"
WP_Veto_EB = EleWorkingPoint_V2(
idName , # idName
0.013625 , # dEtaInCut
0.230374 , # dPhiInCut
0.011586 , # full5x5_sigmaIEtaIEtaCut
0.181130 , # hOverECut
0.094095 , # dxyCut
0.713070 , # dzCut
0.295751 , # absEInverseMinusPInverseCut
0.158721 , # relCombIsolationWithEALowPtCut
0.158721 , # relCombIsolationWithEAHighPtCut
# conversion veto cut needs no parameters, so not mentioned
2 # missingHitsCut
)
WP_Veto_EE = EleWorkingPoint_V2(
idName , # idName
0.011932 , # dEtaInCut
0.255450 , # dPhiInCut
0.031849 , # full5x5_sigmaIEtaIEtaCut
0.223870 , # hOverECut
0.342293 , # dxyCut
0.953461 , # dzCut
0.155501 , # absEInverseMinusPInverseCut
0.177032 , # relCombIsolationWithEALowPtCut
0.177032 , # relCombIsolationWithEAHighPtCut
# conversion veto cut needs no parameters, so not mentioned
3 # missingHitsCut
)
# Loose working point Barrel and Endcap
idName = "cutBasedElectronID-PHYS14-PU20bx25-V2-standalone-loose"
WP_Loose_EB = EleWorkingPoint_V2(
idName , # idName
0.009277 , # dEtaInCut
0.094739 , # dPhiInCut
0.010331 , # full5x5_sigmaIEtaIEtaCut
0.093068 , # hOverECut
0.035904 , # dxyCut
0.075496 , # dzCut
0.189968 , # absEInverseMinusPInverseCut
0.130136 , # relCombIsolationWithEALowPtCut
0.130136 , # relCombIsolationWithEAHighPtCut
# conversion veto cut needs no parameters, so not mentioned
1 # missingHitsCut
)
WP_Loose_EE = EleWorkingPoint_V2(
idName , # idName
0.009833 , # dEtaInCut
0.149934 , # dPhiInCut
0.031838 , # full5x5_sigmaIEtaIEtaCut
0.115754 , # hOverECut
0.099266 , # dxyCut
0.197897 , # dzCut
0.140662 , # absEInverseMinusPInverseCut
0.163368 , # relCombIsolationWithEALowPtCut
0.163368 , # relCombIsolationWithEAHighPtCut
# conversion veto cut needs no parameters, so not mentioned
1 # missingHitsCut
)
# Medium working point Barrel and Endcap
idName = "cutBasedElectronID-PHYS14-PU20bx25-V2-standalone-medium"
WP_Medium_EB = EleWorkingPoint_V2(
idName , # idName
0.008925 , # dEtaInCut
0.035973 , # dPhiInCut
0.009996 , # full5x5_sigmaIEtaIEtaCut
0.050537 , # hOverECut
0.012235 , # dxyCut
0.042020 , # dzCut
0.091942 , # absEInverseMinusPInverseCut
0.107587 , # relCombIsolationWithEALowPtCut
0.107587 , # relCombIsolationWithEAHighPtCut
# conversion veto cut needs no parameters, so not mentioned
1 # missingHitsCut
)
WP_Medium_EE = EleWorkingPoint_V2(
idName , # idName
0.007429 , # dEtaInCut
0.067879 , # dPhiInCut
0.030135 , # full5x5_sigmaIEtaIEtaCut
0.086782 , # hOverECut
0.036719 , # dxyCut
0.138142 , # dzCut
0.100683 , # absEInverseMinusPInverseCut
0.113254 , # relCombIsolationWithEALowPtCut
0.113254 , # relCombIsolationWithEAHighPtCut
# conversion veto cut needs no parameters, so not mentioned
1 # missingHitsCut
)
# Tight working point Barrel and Endcap
idName = "cutBasedElectronID-PHYS14-PU20bx25-V2-standalone-tight"
WP_Tight_EB = EleWorkingPoint_V2(
idName , # idName
0.006046 , # dEtaInCut
0.028092 , # dPhiInCut
0.009947 , # full5x5_sigmaIEtaIEtaCut
0.045772 , # hOverECut
0.008790 , # dxyCut
0.021226 , # dzCut
0.020118 , # absEInverseMinusPInverseCut
0.069537 , # relCombIsolationWithEALowPtCut
0.069537 , # relCombIsolationWithEAHighPtCut
# conversion veto cut needs no parameters, so not mentioned
1 # missingHitsCut
)
WP_Tight_EE = EleWorkingPoint_V2(
idName , # idName
0.007057 , # dEtaInCut
0.030159 , # dPhiInCut
0.028237 , # full5x5_sigmaIEtaIEtaCut
0.067778 , # hOverECut
0.027984 , # dxyCut
0.133431 , # dzCut
0.098919 , # absEInverseMinusPInverseCut
0.078265 , # relCombIsolationWithEALowPtCut
0.078265 , # relCombIsolationWithEAHighPtCut
# conversion veto cut needs no parameters, so not mentioned
1 # missingHitsCut
)
# Second, define what effective areas to use for pile-up correction
isoInputs = IsolationCutInputs_V2(
# phoIsolationEffAreas
"RecoEgamma/ElectronIdentification/data/PHYS14/effAreaElectrons_cone03_pfNeuHadronsAndPhotons.txt"
)
#
# Set up VID configuration for all cuts and working points
#
cutBasedElectronID_PHYS14_PU20bx25_V2_standalone_veto = configureVIDCutBasedEleID_V2(WP_Veto_EB, WP_Veto_EE, isoInputs)
cutBasedElectronID_PHYS14_PU20bx25_V2_standalone_loose = configureVIDCutBasedEleID_V2(WP_Loose_EB, WP_Loose_EE, isoInputs)
cutBasedElectronID_PHYS14_PU20bx25_V2_standalone_medium = configureVIDCutBasedEleID_V2(WP_Medium_EB, WP_Medium_EE, isoInputs)
cutBasedElectronID_PHYS14_PU20bx25_V2_standalone_tight = configureVIDCutBasedEleID_V2(WP_Tight_EB, WP_Tight_EE, isoInputs)
central_id_registry.register(cutBasedElectronID_PHYS14_PU20bx25_V2_standalone_veto.idName,
'1f322690ebc601ee190dd97f60371272')
central_id_registry.register(cutBasedElectronID_PHYS14_PU20bx25_V2_standalone_loose.idName,
'fce6cef3a2c73849c527549af6a0dd86')
central_id_registry.register(cutBasedElectronID_PHYS14_PU20bx25_V2_standalone_medium.idName,
'a8c98f61148e0d989e436a46497df86a')
central_id_registry.register(cutBasedElectronID_PHYS14_PU20bx25_V2_standalone_tight.idName,
'26d5a314fe762b4927150f37a23d3e71')
# for now until we have a database...
cutBasedElectronID_PHYS14_PU20bx25_V2_standalone_veto.isPOGApproved = cms.untracked.bool(True)
cutBasedElectronID_PHYS14_PU20bx25_V2_standalone_loose.isPOGApproved = cms.untracked.bool(True)
cutBasedElectronID_PHYS14_PU20bx25_V2_standalone_medium.isPOGApproved = cms.untracked.bool(True)
cutBasedElectronID_PHYS14_PU20bx25_V2_standalone_tight.isPOGApproved = cms.untracked.bool(True)
| 38.2 | 125 | 0.736329 |
8048a55b32d7fdeed9c75d58e2f6ed55a425814c | 8,042 | py | Python | nPYc/plotting/_plotSpectralVariance.py | phenomecentre/nPYc-Toolbox | 614339d7b1fc7c82e7eaec1bb3710822ca9ca84d | [
"MIT"
] | 14 | 2018-01-23T23:10:40.000Z | 2022-02-03T15:15:52.000Z | nPYc/plotting/_plotSpectralVariance.py | phenomecentre/nPYc-Toolbox | 614339d7b1fc7c82e7eaec1bb3710822ca9ca84d | [
"MIT"
] | 76 | 2018-01-24T17:37:25.000Z | 2022-03-23T14:12:54.000Z | nPYc/plotting/_plotSpectralVariance.py | phenomecentre/nPYc-Toolbox | 614339d7b1fc7c82e7eaec1bb3710822ca9ca84d | [
"MIT"
] | 11 | 2018-01-25T11:35:47.000Z | 2022-03-07T15:04:02.000Z | import numpy
import re
import matplotlib as mpl
import matplotlib.pyplot as plt
import plotly
import plotly.graph_objs as go
from ..enumerations import VariableType
from .. import Dataset, NMRDataset
def plotSpectralVariance(dataset, classes=None, quantiles=(25, 75), average='median', xlim=None, logy=False, title=None, savePath=None, figureFormat='png', dpi=72, figureSize=(11,7)):
"""
plotSpectralVariance(dataset, classes=None, quantiles=(25, 75), average='median', xlim=None, **kwargs)
Plot the average spectral profile of dataset, optionally with the bounds of variance calculated from *quantiles* shaded. By specifying a column from *dataset.sampleMetadata* in the *classes* argument, individual averages and ranges will be plotted for each unique label in *dataset.sampleMetadata[classes]*.
:param Dataset dataset: Data to plot
:param classes: Plot by distinct classes specified
:type classes: None or column in dataset.sampleMetadata
:param quantiles: Plot these quantile bounds
:type quantiles: None or (min, max)
:param str average: Method to calculate average spectrum, defaults to 'median', may also be 'mean'
:param xlim: Tuple of (min, max) values to scale the x-axis to
:type xlim: None or (float, float)
:param bool logy: If ``True`` plot intensities on a log10 scale
:param str title: Text to title each plot with
"""
# Check we have a nPYc.Dataset
if not isinstance(dataset, Dataset):
raise TypeError('dataset must be a nPYc.Dataset subclass')
# Check we have continuos data
if dataset.VariableType != VariableType.Continuum:
raise ValueError('dataset must have spectral variables')
if not quantiles is None:
if not len(quantiles) == 2:
raise ValueError('quantiles must be a tuple of (low, high)')
##
# Set up plot
##
fig = plt.figure(figsize=figureSize, dpi=dpi)
ax = plt.subplot(1,1,1)
##
# call helper to draw into axis:
##
_plotSpectralVarianceHelper(ax, dataset, classes=classes, quantiles=quantiles, average=average, xlim=xlim)
if title is not None:
ax.set_title(title)
if logy:
ax.set_yscale('symlog')
else:
ax.ticklabel_format(style='sci', axis='y', scilimits=(0,0))
##
# Save or draw
##
if savePath:
plt.savefig(savePath, format=figureFormat, dpi=dpi)
plt.close()
else:
plt.show()
def _plotSpectralVarianceHelper(ax, dataset, classes=None, quantiles=(25, 75), average='median', xlim=None):
"""
Draws variance plot into ax
"""
##
# If plotting classses, find them here
##
classMask = dict()
if classes is not None:
if not classes in dataset.sampleMetadata.columns:
raise ValueError('%s not in dataset.sampleMetadata.' % str(classes))
uniqueClasses = dataset.sampleMetadata[classes].unique()
for thisClass in uniqueClasses:
classMask[thisClass] = (dataset.sampleMetadata[classes].values == thisClass) & dataset.sampleMask
else:
classMask['All'] = dataset.sampleMask
# If xlimits, trim data
featureMask = dataset.featureMask
if xlim:
featureMask = (dataset.featureMetadata['ppm'].values > xlim[0]) & (dataset.featureMetadata['ppm'].values < xlim[1]) & featureMask
##
# Loop through classes
##
intensityData = dataset.intensityData[:, featureMask]
for thisClass in classMask.keys():
# Find average of class
if average == 'median':
averageSpectrum = numpy.median(intensityData[classMask[thisClass], :], axis=0)
elif average == 'mean':
averageSpectrum = numpy.mean(intensityData[classMask[thisClass], :], axis=0)
# Draw the average
base_line, = ax.plot(dataset.featureMetadata.loc[featureMask, 'ppm'].values, averageSpectrum, label=thisClass)
if quantiles is not None:
# Find quantile range
quantileRange = numpy.percentile(intensityData[classMask[thisClass], :], quantiles, axis=0)
# Draw the range
ax.fill_between(dataset.featureMetadata.loc[featureMask, 'ppm'], quantileRange[0, :], y2=quantileRange[1, :], alpha=0.5, facecolor=base_line.get_color())
if classes:
ax.legend()
ax.set_xlabel('ppm')
if isinstance(dataset, NMRDataset):
ax.invert_xaxis()
def plotSpectralVarianceInteractive(dataset, classes=None, quantiles=(25, 75), average='mean', xlim=None, title=None):
"""
Plot the average spectral profile of dataset, optionally with the bounds of variance calculated from *quantiles* shaded. By specifying a column from *dataset.sampleMetadata* in the *classes* argument, individual averages and ranges will be plotted for each unique label in *dataset.sampleMetadata[classes]*.
:param Dataset dataset: Data to plot
:param classes: Plot by distinct classes specified
:type classes: None or column in dataset.sampleMetadata
:param quantiles: Plot these quantile bounds
:type quantiles: None or (min, max)
:param str average: Method to calculate average spectrum, defaults to 'median', may also be 'mean'
:param xlim: Tuple of (min, max) values to scale the x-axis to
:type xlim: None or (float, float)
"""
# Check we have a nPYc.Dataset
if not isinstance(dataset, Dataset):
raise TypeError('dataset must be a nPYc.Dataset subclass')
# Check we have continuos data
if dataset.VariableType != VariableType.Continuum:
raise ValueError('dataset must have spectral variables')
if not quantiles is None:
if not len(quantiles) == 2:
raise ValueError('quantiles must be a tuple of (low, high)')
##
# If plotting classses, find them here
##
classMask = dict()
if classes is not None:
if not classes in dataset.sampleMetadata.columns:
raise ValueError('%s not in dataset.sampleMetadata.' % str(classes))
uniqueClasses = dataset.sampleMetadata[classes].unique()
for thisClass in uniqueClasses:
classMask[thisClass] = (dataset.sampleMetadata[classes].values == thisClass) & dataset.sampleMask
else:
classMask['All'] = dataset.sampleMask
# If xlimits, trim data
featureMask = dataset.featureMask
data = list()
if xlim:
featureMask = (dataset.featureMetadata['ppm'].values > xlim[0]) & (dataset.featureMetadata['ppm'].values < xlim[1]) & featureMask
##
# Loop through classes
##
colours = plotly.colors.DEFAULT_PLOTLY_COLORS
colourIndex = 0
colourParser = re.compile('rgb\((\d+),\W?(\d+),\W?(\d+)\)')
for thisClass in classMask.keys():
localMask = numpy.ix_(numpy.logical_and(dataset.sampleMask, classMask[thisClass]),
featureMask)
# Find average of class
if average == 'median':
averageSpectrum = numpy.median(dataset.intensityData[localMask], axis=0)
elif average == 'mean':
averageSpectrum = numpy.mean(dataset.intensityData[localMask], axis=0)
quantileRange = numpy.percentile(dataset.intensityData[localMask], quantiles, axis=0)
trace = go.Scattergl(
x = dataset.featureMetadata.loc[featureMask, 'ppm'],
y = averageSpectrum,
line = dict(
color = colours[colourIndex]
),
text = thisClass,
mode = 'lines',
hoverinfo = 'text',
name = thisClass,
legendgroup=thisClass
)
classColour = colourParser.match(colours[colourIndex])
classColour = 'rgba(%s, %s, %s, 0.2)' % classColour.groups()
data.append(trace)
trace = go.Scattergl(
x = dataset.featureMetadata.loc[featureMask, 'ppm'],
y = quantileRange[0],
line = dict(
color = 'rgba(0,0,0,0)'
),
text = False,
mode = 'lines',
hoverinfo = 'none',
showlegend = False,
legendgroup=thisClass
)
data.append(trace)
trace = go.Scattergl(
x = dataset.featureMetadata.loc[featureMask, 'ppm'],
y = quantileRange[1],
line = dict(
color = 'rgba(0,0,0,0)'
),
fillcolor = classColour,
fill = 'tonexty',
text = False,
mode = 'lines',
hoverinfo = 'none',
showlegend=False,
legendgroup=thisClass
)
data.append(trace)
colourIndex += 1
if colourIndex >= len(colours):
colourIndex = 0
if True:
xaxis = 'reversed'
else:
xaxis= 'auto'
layout = go.Layout(
title=title,
legend=dict(
orientation="h"),
hovermode = "closest",
yaxis = dict(
showticklabels=False
),
xaxis=dict(
autorange=xaxis
)
)
figure = go.Figure(data=data, layout=layout)
return figure
| 31.661417 | 308 | 0.714375 |
badc58f1b4ff7ee12f0fac34b1cb9a99e6fcca05 | 2,938 | py | Python | visualDet3D/utils/imdb.py | saurav1869/saurav1869-mono3d_road | ae65b705504af0bec33cd86b7cfeef5d8605e159 | [
"Apache-2.0"
] | null | null | null | visualDet3D/utils/imdb.py | saurav1869/saurav1869-mono3d_road | ae65b705504af0bec33cd86b7cfeef5d8605e159 | [
"Apache-2.0"
] | null | null | null | visualDet3D/utils/imdb.py | saurav1869/saurav1869-mono3d_road | ae65b705504af0bec33cd86b7cfeef5d8605e159 | [
"Apache-2.0"
] | null | null | null | import json
import pickle
import os
import threading
class IMDB:
def __init__(self, imdb_path, frames_size=5000, dump_index=0, size=0,
save_method=None, load_method=None):
self.imdb_path = imdb_path
self._frames = []
self._frames_size = frames_size
self._dump_index = dump_index
self._size = size
self._lock = threading.Lock()
self._save_method = save_method
self._load_method = load_method
@classmethod
def load_from_disk(cls, imdb_path, load_method=None):
dataset = json.load(open(os.path.join(imdb_path, 'dataset.json')))
return cls(imdb_path,
size=int(dataset['size']),
dump_index=-1,
frames_size=int(dataset['frames_size']),
load_method=load_method)
def _are_frames_full(self):
if len(self._frames) == self._frames_size:
return True
return False
def _get_filepath(self, start_index):
return os.path.join(self.imdb_path, 'samples_{0:06d}.pkl'.format(start_index))
def _dump_frames(self):
start_index = self._dump_index * self._frames_size
self._dump_index += 1
filepath = self._get_filepath(start_index)
if self._save_method is None:
with open(filepath, 'wb') as fd:
pickle.dump(self._frames, fd)
else:
self._save_method(self._frames, filepath)
self._frames = []
def append(self, frame):
if self._are_frames_full():
self._dump_frames()
self._frames.append(frame)
def save(self):
self._size = self._dump_index * self._frames_size + len(self._frames)
self._dump_frames()
dataset = {
'path': self.imdb_path,
'size': self._size,
'frames_size': self._frames_size
}
json.dump(dataset,
open(os.path.join(self.imdb_path, 'dataset.json'), 'w'),
indent=4)
def _load_frames(self, index):
asked_file_index = int(index / self._frames_size)
if not asked_file_index == self._dump_index:
filepath = self._get_filepath(asked_file_index * self._frames_size)
if self._load_method is None:
self._frames = pickle.load(open(filepath, 'rb'))
else:
self._frames = self._load_method(filepath)
self._dump_index = asked_file_index
return index - (self._frames_size * asked_file_index)
def __getitem__(self, index):
with self._lock:
active_index = self._load_frames(index)
return self._frames[active_index]
def __len__(self):
return self._size
def __del__(self):
del self.imdb_path
del self._frames
del self._frames_size
del self._dump_index
del self._size
del self._lock
| 32.285714 | 86 | 0.600068 |
3f30127053db464d00794f1f5da00d09de240403 | 32 | py | Python | stubbs/defs/colo.py | holy-crust/reclaimer | 0aa693da3866ce7999c68d5f71f31a9c932cdb2c | [
"MIT"
] | null | null | null | stubbs/defs/colo.py | holy-crust/reclaimer | 0aa693da3866ce7999c68d5f71f31a9c932cdb2c | [
"MIT"
] | null | null | null | stubbs/defs/colo.py | holy-crust/reclaimer | 0aa693da3866ce7999c68d5f71f31a9c932cdb2c | [
"MIT"
] | null | null | null | from ...hek.defs.colo import *
| 16 | 31 | 0.65625 |
f7344479dd36c883ecee3ae2e8ca891b5e378565 | 4,706 | py | Python | unlock/util/runtime.py | NeuralProsthesisLab/unlock | 0c4d95abdab288d3e657ca2db867b06f755f26ff | [
"BSD-3-Clause"
] | 6 | 2017-05-05T01:08:55.000Z | 2021-08-03T21:50:07.000Z | unlock/util/runtime.py | NeuralProsthesisLab/unlock | 0c4d95abdab288d3e657ca2db867b06f755f26ff | [
"BSD-3-Clause"
] | 1 | 2015-05-21T01:02:50.000Z | 2015-05-21T16:03:43.000Z | unlock/util/runtime.py | NeuralProsthesisLab/unlock | 0c4d95abdab288d3e657ca2db867b06f755f26ff | [
"BSD-3-Clause"
] | 4 | 2015-05-21T12:38:42.000Z | 2022-03-28T15:47:58.000Z | # Copyright (c) James Percent, Byron Galbraith and Unlock contributors.
# All rights reserved.
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of Unlock nor the names of its contributors may be used
# to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from unlock.util.observable import *
from unlock.util.saferef import *
from unlock.util.injector import *
from optparse import OptionParser
import json
import logging
import logging.config
import sys
__author__ = 'jpercent'
class RuntimeAssistant(object):
def __init__(self):
super(RuntimeAssistant, self).__init__()
@staticmethod
def configure(config, fact_instance):
assert fact_instance
dpi = DependencyInjector(fact_instance)
instance = dpi.configure_application(config)
assert instance
return instance
@staticmethod
def parse_json_config(conf):
with open(conf, 'rt') as file_descriptor:
json_string = file_descriptor.read()
config = json.loads(json_string)
return config
@staticmethod
def make_high_priority():
try:
import psutil
import os
p = psutil.Process(os.getpid())
p.set_nice(psutil.HIGH_PRIORITY_CLASS)
except Exception as e:
RuntimeAssistant.print_last_exception()
@staticmethod
def print_last_exception():
exc_type, exc_value, exc_traceback = sys.exc_info()
traceback.print_exception(exc_type, exc_value, exc_traceback, file=sys.stderr)
class JsonConfiguredRuntime(object):
def __init__(self, factory, path_to_default_config):
"""Initializes a JsonConfiguredRuntime."""
self.factory = factory
self.conf = None
self.logger = None
self.loglevel = logging.INFO
self.config = None
self.runtime_instance = None
self.args = None
self.options = None
self.parser = None
self.usage = "usage: %prog [options]"
conf_help = 'path to the configuration; if not set conf.json is used'
try:
self.parser = OptionParser(version="%prog 1.0", usage=self.usage)
self.default_conf = os.path.join(path_to_default_config, 'conf.json')
self.parser.add_option('-c', '--conf', type=str, dest='conf', default=self.default_conf, metavar='CONF', help=conf_help)
except Exception as e:
print(str(self.__class__.__name__)+': FATAL failed to parse program arguments')
RuntimeAssistant.print_last_exception()
raise e
def init(self):
assert self.parser
try:
(self.options, self.args) = self.parser.parse_args()
assert self.options.conf
self.config = RuntimeAssistant.parse_json_config(self.options.conf)
self.runtime_instance = RuntimeAssistant.configure(self.config, self.factory)
except Exception as e:
if not self.logger:
print(str(self.__class__.__name__)+': FATAL failed to initialize correctly; did not complete logging setup')
else:
self.logger.fatal('failed to initialize correctly')
if self.parser:
self.parser.print_help()
RuntimeAssistant.print_last_exception()
raise e
self.logger = logging.getLogger(__name__)
| 39.216667 | 132 | 0.68742 |
7d9f7db905544e2962117c53effea52e3b06c292 | 8,354 | py | Python | src/ychaos/cli/verify.py | vanderh0ff/ychaos | 5148c889912b744ee73907e4dd30c9ddb851aeb3 | [
"Apache-2.0"
] | 8 | 2021-07-21T15:37:48.000Z | 2022-03-03T14:43:09.000Z | src/ychaos/cli/verify.py | vanderh0ff/ychaos | 5148c889912b744ee73907e4dd30c9ddb851aeb3 | [
"Apache-2.0"
] | 102 | 2021-07-20T16:08:29.000Z | 2022-03-25T07:28:37.000Z | src/ychaos/cli/verify.py | vanderh0ff/ychaos | 5148c889912b744ee73907e4dd30c9ddb851aeb3 | [
"Apache-2.0"
] | 8 | 2021-07-20T13:37:46.000Z | 2022-02-18T01:44:52.000Z | # Copyright 2021, Yahoo
# Licensed under the terms of the Apache 2.0 license. See the LICENSE file in the project root for terms
from abc import ABC
from argparse import ArgumentParser, Namespace
from pathlib import Path
from typing import Any, Optional, Union
from pydantic import validate_arguments
from ..core.verification.controller import VerificationController
from ..core.verification.data import VerificationStateData
from ..testplan import SystemState
from ..testplan.verification import VerificationConfig, VerificationType
from . import YChaosCLIHook, YChaosTestplanInputSubCommand
__all__ = ["Verify"]
class Verify(YChaosTestplanInputSubCommand):
"""
The `verify` subcommand of YChaos is used to verify the state of the system. This
subcommand requires a valid testplan which can be provided with the -t/--testplan argument.
The subcommand also requires a valid state at which the system is verified.
"""
name = "verify"
help = "The verification subcommand of YChaos"
def __init__(self, **kwargs):
super(Verify, self).__init__(**kwargs)
self.test_plan_path: Path = kwargs.pop("testplan")
self.state: SystemState = SystemState(kwargs.pop("state", None).upper())
self.dump_yaml: Optional[Path] = kwargs.pop("dump_yaml", None)
self.dump_json: Optional[Path] = kwargs.pop("dump_json", None)
self.state_data_path: Optional[Path] = kwargs.pop("state_data", None)
@classmethod
def build_parser(cls, parser: ArgumentParser) -> ArgumentParser:
parser = super(Verify, cls).build_parser(parser)
parser.add_argument(
"-s",
"--state",
choices=[x.value.lower() for x in list(SystemState)],
help="System state to verify",
default="steady",
metavar="state",
)
report_argument_group = parser.add_argument_group("verification reports")
report_argument_group.add_argument(
"--dump-yaml",
type=Path,
help="Store the verification data in YAML format",
required=False,
metavar="path",
)
report_argument_group.add_argument(
"--dump-json",
type=Path,
help="Store the verification data in JSON format",
required=False,
metavar="path",
)
parser.add_argument(
"--state-data",
type=Path,
help="The path of the verification data state file (JSON/YAML)",
required=False,
metavar="path",
)
return parser
def get_state_data(self):
self.console.log("Getting state data")
self.console.line()
try:
import yaml
path = Path(self.state_data_path)
with open(path, "r") as file:
state_data = yaml.safe_load(file)
return state_data
except IsADirectoryError as is_directory:
self.set_exitcode(1)
self.console.print(
f":file_folder: The input path ({self.state_data_path}) is not a valid state data file",
style="indian_red",
)
except FileNotFoundError as file_not_found_error:
self.set_exitcode(1)
self.console.print(
":mag: {file} [italic]not found[/italic]".format(
file=str(self.state_data_path)
),
style="indian_red",
)
return None
@validate_arguments(config=dict(arbitrary_types_allowed=True))
def _generate_verification_report(
self,
verification_controller: VerificationController,
output_format: str,
report_file_path: Union[Path, str],
):
self.console.log(
f"Writing {self.state.value.lower()} state verification data to {report_file_path}, format={output_format}"
)
report_output_base_path = Path(report_file_path).parent
try:
if not report_output_base_path.is_dir():
report_output_base_path.mkdir(parents=True, exist_ok=True)
except PermissionError as permission_error:
self.console.log(
":file_folder: [italic]Permission denied to create report directory[/italic]",
style="indian_red",
)
return
try:
with open(report_file_path, "w") as fp:
verification_controller.dump_verification(
fp, output_format=output_format
)
except PermissionError as permission_error:
self.console.log(
":file_folder: [italic]Permission denied to create report file[/italic]",
style="indian_red",
)
def verify_system_state(self):
testplan = self.get_validated_test_plan(self.test_plan_path)
if self._exitcode != 0:
return
state_data = list()
if self.state_data_path:
state_data = self.get_state_data()
if self._exitcode != 0:
return
# section Hooks
class VerificationHook(YChaosCLIHook, ABC):
def __init__(self, app, state: SystemState):
super(VerificationHook, self).__init__(app)
self.state = state
class OnEachPluginStartHook(VerificationHook):
def __call__(self, index: int, config: VerificationConfig):
self.console.log(
f"Running [i]{self.state.value.lower()}[/i] state verification of type={config.type.value}[{index}]"
)
class OnPluginNotFoundHook(VerificationHook):
def __call__(self, index: int, plugin_type: VerificationType):
self.console.log(
f"The verification plugin type=[i]{plugin_type.value}[/i][{index}] is not available for use."
)
class OnEachPluginEndHook(VerificationHook):
def __call__(
self,
index: int,
config: VerificationConfig,
verified_state_data: VerificationStateData,
):
self.console.log(
(
f"Completed [i]{self.state.value.lower()}[/i] state verification of type={config.type.value};"
f" verified={verified_state_data.rc==0}"
)
)
# end section
verification_controller = VerificationController(
testplan, self.state, state_data
)
verification_controller.register_hook(
"on_each_plugin_start", OnEachPluginStartHook(self.app, self.state)
)
verification_controller.register_hook(
"on_each_plugin_end", OnEachPluginEndHook(self.app, self.state)
)
verification_controller.register_hook(
"on_plugin_not_found", OnPluginNotFoundHook(self.app, self.state)
)
self.console.log(
f"Starting [i]{self.state.value.lower()}[/i] state verification."
)
is_verified = verification_controller.execute()
self.set_exitcode(int(not is_verified))
self.console.line()
if is_verified:
self.console.print(
f"The system is verified to be in {self.state.value.lower()} state",
style="green",
)
else:
self.console.print(
f"The system is not verified to be in {self.state.value.lower()} state",
style="red",
)
self.console.line()
if self.dump_json:
self._generate_verification_report(
verification_controller,
report_file_path=self.dump_json,
output_format="json",
)
if self.dump_yaml:
self._generate_verification_report(
verification_controller,
report_file_path=self.dump_yaml,
output_format="yaml",
)
@classmethod
def main(cls, args: Namespace) -> Any: # pragma: no cover
verification_command = Verify(**vars(args))
verification_command.verify_system_state()
return verification_command._exitcode
| 35.10084 | 120 | 0.594087 |
e678937ffa958feedad60c6818f9966146fc7fd7 | 229 | py | Python | tests/list/list03.py | ktok07b6/polyphony | 657c5c7440520db6b4985970bd50547407693ac4 | [
"MIT"
] | 83 | 2015-11-30T09:59:13.000Z | 2021-08-03T09:12:28.000Z | tests/list/list03.py | jesseclin/polyphony | 657c5c7440520db6b4985970bd50547407693ac4 | [
"MIT"
] | 4 | 2017-02-10T01:43:11.000Z | 2020-07-14T03:52:25.000Z | tests/list/list03.py | jesseclin/polyphony | 657c5c7440520db6b4985970bd50547407693ac4 | [
"MIT"
] | 11 | 2016-11-18T14:39:15.000Z | 2021-02-23T10:05:20.000Z | from polyphony import testbench
def list03(x, y, z):
a = [1, 2, 3]
r0 = x
r1 = y
a[r0] = a[r1] + z
return a[r0]
@testbench
def test():
assert 4 == list03(0, 1 ,2)
assert 5 == list03(2, 1 ,3)
test()
| 14.3125 | 31 | 0.515284 |
646352aaa461f485591782ef3ba1c496aa41e966 | 330 | py | Python | python/reflection.py | MeshFEM/MeshFEM | 9b3619fa450d83722879bfd0f5a3fe69d927bd63 | [
"MIT"
] | 19 | 2020-10-21T10:05:17.000Z | 2022-03-20T13:41:50.000Z | python/reflection.py | MeshFEM/MeshFEM | 9b3619fa450d83722879bfd0f5a3fe69d927bd63 | [
"MIT"
] | 4 | 2021-01-01T15:58:15.000Z | 2021-09-19T03:31:09.000Z | python/reflection.py | MeshFEM/MeshFEM | 9b3619fa450d83722879bfd0f5a3fe69d927bd63 | [
"MIT"
] | 4 | 2020-10-05T09:01:50.000Z | 2022-01-11T03:02:39.000Z | import inspect
def hasArg(func, argName):
try:
return argName in inspect.signature(func).parameters
except:
# Pybind11 methods/funcs apparently don't support `inspect.signature`,
# but at least their arg names are guaranteed to appear in the docstring... :(
return argName in func.__doc__
| 33 | 86 | 0.690909 |
34067c62dd3381b045eefc821641d5bb97c16723 | 931 | py | Python | mars/dataframe/arithmetic/exp.py | hxri/mars | f7864f00911883b94800b63856f0e57648d3d9b4 | [
"Apache-2.0"
] | 2,413 | 2018-12-06T09:37:11.000Z | 2022-03-30T15:47:39.000Z | mars/dataframe/arithmetic/exp.py | hxri/mars | f7864f00911883b94800b63856f0e57648d3d9b4 | [
"Apache-2.0"
] | 1,335 | 2018-12-07T03:06:18.000Z | 2022-03-31T11:45:57.000Z | mars/dataframe/arithmetic/exp.py | hxri/mars | f7864f00911883b94800b63856f0e57648d3d9b4 | [
"Apache-2.0"
] | 329 | 2018-12-07T03:12:41.000Z | 2022-03-29T21:49:57.000Z | # Copyright 1999-2021 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ... import opcodes as OperandDef
from ...utils import classproperty
from .core import DataFrameUnaryUfunc
class DataFrameExp(DataFrameUnaryUfunc):
_op_type_ = OperandDef.EXP
_func_name = 'exp'
@classproperty
def tensor_op_type(self):
from ...tensor.arithmetic import TensorExp
return TensorExp
| 33.25 | 74 | 0.75188 |
3115f9c095f6c5419d069eec3e613ceed901cc59 | 2,743 | py | Python | tests/test_utility_indices.py | sirmarcel/cmlk | e099bf3e255b60675e8e1b3ad29db750dbd6faf3 | [
"MIT"
] | 24 | 2018-06-22T17:31:20.000Z | 2022-03-29T14:52:49.000Z | tests/test_utility_indices.py | sirmarcel/cmlk | e099bf3e255b60675e8e1b3ad29db750dbd6faf3 | [
"MIT"
] | 8 | 2019-06-06T14:51:57.000Z | 2021-06-30T19:43:13.000Z | tests/test_utility_indices.py | sirmarcel/cmlk | e099bf3e255b60675e8e1b3ad29db750dbd6faf3 | [
"MIT"
] | 5 | 2018-07-30T18:53:08.000Z | 2022-02-10T09:12:15.000Z | from unittest import TestCase
from cmlkit.utility.indices import *
class TestFourwaySplit(TestCase):
def setUp(self):
self.n = 40
self.k_train = 7
self.k_test = 5
self.k_valid = 15
self.a, self.b, self.c, self.d = fourway_split(
self.n, self.k_train, self.k_test, self.k_valid
)
def test_sizes(self):
self.assertEqual(
[len(self.a), len(self.b), len(self.c), len(self.d)],
[40 - 5 - 15 - 7, 7, 5, 15],
)
def test_union_is_all(self):
union = np.union1d(self.a, self.b)
union = np.union1d(union, self.c)
union = np.union1d(union, self.d)
self.assertEqual(union.all(), np.array(np.arange(self.n)).all())
class TestTwowaySplit(TestCase):
def setUp(self):
self.n = 40
self.k_test = 10
self.a, self.b = twoway_split(self.n, self.k_test)
def test_sizes(self):
self.assertEqual([len(self.a), len(self.b)], [40 - 10, 10])
def test_union_is_all(self):
union = np.union1d(self.a, self.b)
self.assertEqual(union.all(), np.array(np.arange(self.n)).all())
class TestThreewaySplit(TestCase):
def setUp(self):
self.n = 40
self.k_test = 5
self.k_valid = 15
self.a, self.b, self.c = threeway_split(self.n, self.k_test, self.k_valid)
def test_sizes(self):
self.assertEqual([len(self.a), len(self.b), len(self.c)], [40 - 5 - 15, 5, 15])
def test_union_is_all(self):
union = np.union1d(self.a, self.b)
union = np.union1d(union, self.c)
self.assertEqual(union.all(), np.array(np.arange(self.n)).all())
class TestGenerateIndices(TestCase):
def test_make_range_if_int(self):
ind = generate_indices(6, [])
self.assertEqual(ind.all(), np.arange(6).all())
def test_pass_through_index_array(self):
ind = generate_indices(np.arange(6), [])
self.assertEqual(ind.all(), np.arange(6).all())
def test_exclude(self):
ind = generate_indices(6, [3])
self.assertFalse(3 in ind)
class TestGenerateDistinctSets(TestCase):
def test_element_numbers_sum_up(self):
n = 78
full = np.arange(n)
k = 64
a, b = generate_distinct_sets(full, k)
self.assertEqual(len(a) + len(b), n)
def test_union_is_all(self):
n = 78
full = np.arange(n)
k = 3
a, b = generate_distinct_sets(full, k)
self.assertEqual(np.union1d(a, b).all(), np.array(full).all())
def test_set_disjunct(self):
n = 78
full = np.arange(n)
k = 3
a, b = generate_distinct_sets(full, k)
self.assertTrue(np.intersect1d(a, b).size == 0)
| 27.707071 | 87 | 0.588407 |
cc492e5d3f64a2fffbf38742929ba774f90ae95b | 598 | py | Python | alembic/versions/4713e7ebca9_add_task_status_links.py | Vjrx/airship-drydock | 315fb9864e6d55a66d5266f76c160be55d22c98b | [
"Apache-2.0"
] | 14 | 2018-05-19T11:58:22.000Z | 2019-05-10T12:31:36.000Z | alembic/versions/4713e7ebca9_add_task_status_links.py | Vjrx/airship-drydock | 315fb9864e6d55a66d5266f76c160be55d22c98b | [
"Apache-2.0"
] | 10 | 2019-11-12T17:21:16.000Z | 2021-11-10T18:16:06.000Z | alembic/versions/4713e7ebca9_add_task_status_links.py | Vjrx/airship-drydock | 315fb9864e6d55a66d5266f76c160be55d22c98b | [
"Apache-2.0"
] | 11 | 2018-06-05T16:21:18.000Z | 2019-04-03T11:44:34.000Z | """add task status links
Revision ID: 4713e7ebca9
Revises: 4a5bef3702b
Create Date: 2018-07-05 14:54:18.381988
"""
# revision identifiers, used by Alembic.
revision = '4713e7ebca9'
down_revision = '4a5bef3702b'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
from drydock_provisioner.statemgmt.db import tables
def upgrade():
for c in tables.Tasks.__add_result_links__:
op.add_column(tables.Tasks.__tablename__, c)
def downgrade():
for c in tables.Tasks.__add_result_links__:
op.drop_column(tables.Tasks.__tablename__, c.name)
| 20.62069 | 58 | 0.757525 |
1ad1e7f153cdfca9ebc806f8161206de810ea5b4 | 9,500 | py | Python | test/python/docker/compat/test_containers.py | hmarsanch1/podman | 7a0d94837a15a195f33e82d18a4d8f7d8e65e0f5 | [
"Apache-2.0"
] | 8,575 | 2020-07-07T20:13:34.000Z | 2022-03-31T23:54:14.000Z | test/python/docker/compat/test_containers.py | hmarsanch1/podman | 7a0d94837a15a195f33e82d18a4d8f7d8e65e0f5 | [
"Apache-2.0"
] | 6,911 | 2020-07-07T19:20:47.000Z | 2022-03-31T21:28:02.000Z | test/python/docker/compat/test_containers.py | hmarsanch1/podman | 7a0d94837a15a195f33e82d18a4d8f7d8e65e0f5 | [
"Apache-2.0"
] | 1,066 | 2020-07-08T10:03:02.000Z | 2022-03-31T20:31:01.000Z | import io
import subprocess
import sys
import time
import unittest
from typing import IO, Optional
from docker import DockerClient, errors
from docker.models.containers import Container
from docker.models.images import Image
from test.python.docker import Podman
from test.python.docker.compat import common, constant
import tarfile
class TestContainers(unittest.TestCase):
podman = None # initialized podman configuration for tests
service = None # podman service instance
topContainerId = ""
def setUp(self):
super().setUp()
self.client = DockerClient(base_url="tcp://127.0.0.1:8080", timeout=15)
TestContainers.podman.restore_image_from_cache(self.client)
TestContainers.topContainerId = common.run_top_container(self.client)
self.assertIsNotNone(TestContainers.topContainerId)
def tearDown(self):
common.remove_all_containers(self.client)
common.remove_all_images(self.client)
self.client.close()
return super().tearDown()
@classmethod
def setUpClass(cls):
super().setUpClass()
TestContainers.podman = Podman()
TestContainers.service = TestContainers.podman.open(
"system", "service", "tcp:127.0.0.1:8080", "--time=0"
)
# give the service some time to be ready...
time.sleep(2)
rc = TestContainers.service.poll()
if rc is not None:
raise subprocess.CalledProcessError(rc, "podman system service")
@classmethod
def tearDownClass(cls):
TestContainers.service.terminate()
stdout, stderr = TestContainers.service.communicate(timeout=0.5)
if stdout:
sys.stdout.write("\nContainers Service Stdout:\n" + stdout.decode("utf-8"))
if stderr:
sys.stderr.write("\nContainers Service Stderr:\n" + stderr.decode("utf-8"))
TestContainers.podman.tear_down()
return super().tearDownClass()
def test_create_container(self):
# Run a container with detach mode
self.client.containers.create(image="alpine", detach=True)
self.assertEqual(len(self.client.containers.list(all=True)), 2)
def test_create_network(self):
net = self.client.networks.create("testNetwork", driver="bridge")
ctnr = self.client.containers.create(image="alpine", detach=True)
# TODO fix when ready
# This test will not work until all connect|disconnect
# code is fixed.
# net.connect(ctnr)
# nets = self.client.networks.list(greedy=True)
# self.assertGreaterEqual(len(nets), 1)
# TODO fix endpoint to include containers
# for n in nets:
# if n.id == "testNetwork":
# self.assertEqual(ctnr.id, n.containers)
# self.assertTrue(False, "testNetwork not found")
def test_start_container(self):
# Podman docs says it should give a 304 but returns with no response
# # Start a already started container should return 304
# response = self.client.api.start(container=TestContainers.topContainerId)
# self.assertEqual(error.exception.response.status_code, 304)
# Create a new container and validate the count
self.client.containers.create(image=constant.ALPINE, name="container2")
containers = self.client.containers.list(all=True)
self.assertEqual(len(containers), 2)
def test_start_container_with_random_port_bind(self):
container = self.client.containers.create(
image=constant.ALPINE,
name="containerWithRandomBind",
ports={"1234/tcp": None},
)
containers = self.client.containers.list(all=True)
self.assertTrue(container in containers)
def test_stop_container(self):
top = self.client.containers.get(TestContainers.topContainerId)
self.assertEqual(top.status, "running")
# Stop a running container and validate the state
top.stop()
top.reload()
self.assertIn(top.status, ("stopped", "exited"))
def test_kill_container(self):
top = self.client.containers.get(TestContainers.topContainerId)
self.assertEqual(top.status, "running")
# Kill a running container and validate the state
top.kill()
top.reload()
self.assertIn(top.status, ("stopped", "exited"))
def test_restart_container(self):
# Validate the container state
top = self.client.containers.get(TestContainers.topContainerId)
top.stop()
top.reload()
self.assertIn(top.status, ("stopped", "exited"))
# restart a running container and validate the state
top.restart()
top.reload()
self.assertEqual(top.status, "running")
def test_remove_container(self):
# Remove container by ID with force
top = self.client.containers.get(TestContainers.topContainerId)
top.remove(force=True)
self.assertEqual(len(self.client.containers.list()), 0)
def test_remove_container_without_force(self):
# Validate current container count
self.assertEqual(len(self.client.containers.list()), 1)
# Remove running container should throw error
top = self.client.containers.get(TestContainers.topContainerId)
with self.assertRaises(errors.APIError) as error:
top.remove()
self.assertEqual(error.exception.response.status_code, 500)
# Remove container by ID without force
top.stop()
top.remove()
self.assertEqual(len(self.client.containers.list()), 0)
def test_pause_container(self):
# Validate the container state
top = self.client.containers.get(TestContainers.topContainerId)
self.assertEqual(top.status, "running")
# Pause a running container and validate the state
top.pause()
top.reload()
self.assertEqual(top.status, "paused")
def test_pause_stopped_container(self):
# Stop the container
top = self.client.containers.get(TestContainers.topContainerId)
top.stop()
# Pause exited container should throw error
with self.assertRaises(errors.APIError) as error:
top.pause()
self.assertEqual(error.exception.response.status_code, 500)
def test_unpause_container(self):
top = self.client.containers.get(TestContainers.topContainerId)
# Validate the container state
top.pause()
top.reload()
self.assertEqual(top.status, "paused")
# Pause a running container and validate the state
top.unpause()
top.reload()
self.assertEqual(top.status, "running")
def test_list_container(self):
# Add container and validate the count
self.client.containers.create(image="alpine", detach=True)
containers = self.client.containers.list(all=True)
self.assertEqual(len(containers), 2)
def test_filters(self):
self.skipTest("TODO Endpoint does not yet support filters")
# List container with filter by id
filters = {"id": TestContainers.topContainerId}
ctnrs = self.client.containers.list(all=True, filters=filters)
self.assertEqual(len(ctnrs), 1)
# List container with filter by name
filters = {"name": "top"}
ctnrs = self.client.containers.list(all=True, filters=filters)
self.assertEqual(len(ctnrs), 1)
def test_copy_to_container(self):
ctr: Optional[Container] = None
try:
test_file_content = b"Hello World!"
ctr = self.client.containers.create(image="alpine", detach=True, command="top")
ctr.start()
buff: IO[bytes] = io.BytesIO()
with tarfile.open(fileobj=buff, mode="w:") as tf:
ti: tarfile.TarInfo = tarfile.TarInfo()
ti.uid = 1042
ti.gid = 1043
ti.name = "a.txt"
ti.path = "a.txt"
ti.mode = 0o644
ti.type = tarfile.REGTYPE
ti.size = len(test_file_content)
tf.addfile(ti, fileobj=io.BytesIO(test_file_content))
buff.seek(0)
ctr.put_archive("/tmp/", buff)
ret, out = ctr.exec_run(["stat", "-c", "%u:%g", "/tmp/a.txt"])
self.assertEqual(ret, 0)
self.assertEqual(out.rstrip(), b'1042:1043', "UID/GID of copied file")
ret, out = ctr.exec_run(["cat", "/tmp/a.txt"])
self.assertEqual(ret, 0)
self.assertEqual(out.rstrip(), test_file_content, "Content of copied file")
finally:
if ctr is not None:
ctr.stop()
ctr.remove()
def test_mount_preexisting_dir(self):
dockerfile = (B'FROM quay.io/libpod/alpine:latest\n'
B'USER root\n'
B'RUN mkdir -p /workspace\n'
B'RUN chown 1042:1043 /workspace')
img: Image
img, out = self.client.images.build(fileobj=io.BytesIO(dockerfile))
ctr: Container = self.client.containers.create(image=img.id, detach=True, command="top",
volumes=["test_mount_preexisting_dir_vol:/workspace"])
ctr.start()
ret, out = ctr.exec_run(["stat", "-c", "%u:%g", "/workspace"])
self.assertEqual(out.rstrip(), b'1042:1043', "UID/GID set in dockerfile")
| 37.401575 | 109 | 0.632632 |
a2899a4876e40cd410e431a9d118b099e855c222 | 2,210 | py | Python | model/lookahead.py | 17854212083/MSCANet | 4dd3aa8a85e16ae9eb15c87ab5dd5a7158417cb2 | [
"MIT"
] | null | null | null | model/lookahead.py | 17854212083/MSCANet | 4dd3aa8a85e16ae9eb15c87ab5dd5a7158417cb2 | [
"MIT"
] | null | null | null | model/lookahead.py | 17854212083/MSCANet | 4dd3aa8a85e16ae9eb15c87ab5dd5a7158417cb2 | [
"MIT"
] | null | null | null | #! -*- coding: utf-8 -*-
from keras import backend as K
class Lookahead(object):
def __init__(self, k=5, alpha=0.5):
self.k = k
self.alpha = alpha
self.count = 0
def inject(self, model):
if not hasattr(model, 'train_function'):
raise RuntimeError('You must compile your model before using it.')
model._check_trainable_weights_consistency()
if model.train_function is None:
inputs = (model._feed_inputs +
model._feed_targets +
model._feed_sample_weights)
if model._uses_dynamic_learning_phase():
inputs += [K.learning_phase()]
fast_params = model._collected_trainable_weights
with K.name_scope('training'):
with K.name_scope(model.optimizer.__class__.__name__):
training_updates = model.optimizer.get_updates(
params=fast_params,
loss=model.total_loss)
slow_params = [K.variable(p) for p in fast_params]
fast_updates = (model.updates +
training_updates +
model.metrics_updates)
slow_updates, copy_updates = [], []
for p, q in zip(fast_params, slow_params):
slow_updates.append(K.update(q, q + self.alpha * (p - q)))
copy_updates.append(K.update(p, q))
# Gets loss and metrics. Updates weights at each call.
fast_train_function = K.function(
inputs,
[model.total_loss] + model.metrics_tensors,
updates=fast_updates,
name='fast_train_function',
**model._function_kwargs)
def F(inputs):
self.count += 1
R = fast_train_function(inputs)
if self.count % self.k == 0:
K.batch_get_value(slow_updates)
K.batch_get_value(copy_updates)
return R
model.train_function = F
| 37.457627 | 78 | 0.511312 |
bf699cfd0e8ba65b2e7a29ddedb3fa273019dbf6 | 4,237 | py | Python | sendmail/urllib3/util/connection.py | flows-app/FLOWS_util | 8f310a7e38f3b6fc9d69f96d7fcc77cc7d866cd5 | [
"Apache-2.0"
] | 2,151 | 2020-04-18T07:31:17.000Z | 2022-03-31T08:39:18.000Z | office_hours/dependencies/requests/packages/urllib3/util/connection.py | ikottman/alexa-skills | b7322aaf63ea8172af960a1f5f9fa5dc0bc41cf1 | [
"Unlicense"
] | 4,640 | 2015-07-08T16:19:08.000Z | 2019-12-02T15:01:27.000Z | office_hours/dependencies/requests/packages/urllib3/util/connection.py | ikottman/alexa-skills | b7322aaf63ea8172af960a1f5f9fa5dc0bc41cf1 | [
"Unlicense"
] | 698 | 2015-06-02T19:18:35.000Z | 2022-03-29T16:57:15.000Z | from __future__ import absolute_import
import socket
from .wait import wait_for_read
from .selectors import HAS_SELECT, SelectorError
def is_connection_dropped(conn): # Platform-specific
"""
Returns True if the connection is dropped and should be closed.
:param conn:
:class:`httplib.HTTPConnection` object.
Note: For platforms like AppEngine, this will always return ``False`` to
let the platform handle connection recycling transparently for us.
"""
sock = getattr(conn, 'sock', False)
if sock is False: # Platform-specific: AppEngine
return False
if sock is None: # Connection already closed (such as by httplib).
return True
if not HAS_SELECT:
return False
try:
return bool(wait_for_read(sock, timeout=0.0))
except SelectorError:
return True
# This function is copied from socket.py in the Python 2.7 standard
# library test suite. Added to its signature is only `socket_options`.
# One additional modification is that we avoid binding to IPv6 servers
# discovered in DNS if the system doesn't have IPv6 functionality.
def create_connection(address, timeout=socket._GLOBAL_DEFAULT_TIMEOUT,
source_address=None, socket_options=None):
"""Connect to *address* and return the socket object.
Convenience function. Connect to *address* (a 2-tuple ``(host,
port)``) and return the socket object. Passing the optional
*timeout* parameter will set the timeout on the socket instance
before attempting to connect. If no *timeout* is supplied, the
global default timeout setting returned by :func:`getdefaulttimeout`
is used. If *source_address* is set it must be a tuple of (host, port)
for the socket to bind as a source address before making the connection.
An host of '' or port 0 tells the OS to use the default.
"""
host, port = address
if host.startswith('['):
host = host.strip('[]')
err = None
# Using the value from allowed_gai_family() in the context of getaddrinfo lets
# us select whether to work with IPv4 DNS records, IPv6 records, or both.
# The original create_connection function always returns all records.
family = allowed_gai_family()
for res in socket.getaddrinfo(host, port, family, socket.SOCK_STREAM):
af, socktype, proto, canonname, sa = res
sock = None
try:
sock = socket.socket(af, socktype, proto)
# If provided, set socket level options before connecting.
_set_socket_options(sock, socket_options)
if timeout is not socket._GLOBAL_DEFAULT_TIMEOUT:
sock.settimeout(timeout)
if source_address:
sock.bind(source_address)
sock.connect(sa)
return sock
except socket.error as e:
err = e
if sock is not None:
sock.close()
sock = None
if err is not None:
raise err
raise socket.error("getaddrinfo returns an empty list")
def _set_socket_options(sock, options):
if options is None:
return
for opt in options:
sock.setsockopt(*opt)
def allowed_gai_family():
"""This function is designed to work in the context of
getaddrinfo, where family=socket.AF_UNSPEC is the default and
will perform a DNS search for both IPv6 and IPv4 records."""
family = socket.AF_INET
if HAS_IPV6:
family = socket.AF_UNSPEC
return family
def _has_ipv6(host):
""" Returns True if the system can bind an IPv6 address. """
sock = None
has_ipv6 = False
if socket.has_ipv6:
# has_ipv6 returns true if cPython was compiled with IPv6 support.
# It does not tell us if the system has IPv6 support enabled. To
# determine that we must bind to an IPv6 address.
# https://github.com/shazow/urllib3/pull/611
# https://bugs.python.org/issue658327
try:
sock = socket.socket(socket.AF_INET6)
sock.bind((host, 0))
has_ipv6 = True
except Exception:
pass
if sock:
sock.close()
return has_ipv6
HAS_IPV6 = _has_ipv6('::1')
| 32.343511 | 82 | 0.660137 |
37111f4570fe1648f2741131d81b1b85aeab3caf | 117 | py | Python | hypothesis/benchmark/mg1/__init__.py | boyali/hypothesis-sre | f44d25eb281d49663d49d134ee73ad542849714b | [
"BSD-3-Clause"
] | 45 | 2019-02-13T14:16:35.000Z | 2022-02-23T21:30:02.000Z | hypothesis/benchmark/mg1/__init__.py | boyali/hypothesis-sre | f44d25eb281d49663d49d134ee73ad542849714b | [
"BSD-3-Clause"
] | 1 | 2020-01-13T08:29:50.000Z | 2020-01-22T10:28:02.000Z | hypothesis/benchmark/mg1/__init__.py | boyali/hypothesis-sre | f44d25eb281d49663d49d134ee73ad542849714b | [
"BSD-3-Clause"
] | 8 | 2019-04-23T14:25:08.000Z | 2021-07-28T15:05:31.000Z | from .simulator import MG1Simulator
from .util import Prior
from .util import Truth
from .util import log_likelihood
| 23.4 | 35 | 0.82906 |
9d25760ca8a7d41e5a8b30526061ebf6f207a215 | 656 | py | Python | runs/src2-tgt1/par-nobro-iter07000.cfg.py | janpawellek/broeval | 57e31aa6e354d0bba88103b44910483e8d982d00 | [
"MIT"
] | null | null | null | runs/src2-tgt1/par-nobro-iter07000.cfg.py | janpawellek/broeval | 57e31aa6e354d0bba88103b44910483e8d982d00 | [
"MIT"
] | null | null | null | runs/src2-tgt1/par-nobro-iter07000.cfg.py | janpawellek/broeval | 57e31aa6e354d0bba88103b44910483e8d982d00 | [
"MIT"
] | null | null | null |
# Write results to this file
OUTFILE = 'runs/src2-tgt1/par-nobro-iter07000.result.csv'
# Source computers for the requests
SOURCE = ['10.0.0.1', '10.0.0.3']
# Should Bro be enabled on the source machines?
SOURCE_BRO = [False, False]
# Target machines for the requests (aka server)
TARGET = ['10.0.0.2']
# Should Bro be enabled on the target machines?
TARGET_BRO = [False]
# Connection mode (par = parallel, seq = sequential)
MODE = 'par'
# Number of evaluation repetitions to run
EPOCHS = 100
# Number of iterations to be run in each evaluation repetition
ITER = 7000
# Size of the file to be downloaded from target (in Bytes * 10^SIZE)
SIZE = 5
| 22.62069 | 68 | 0.717988 |
705fd80b9a8a9464811137eb6a10700c2acc6c76 | 3,028 | py | Python | performance/test_azure_data_lake_storage_gen2_origin.py | Pragatibs/datacollector-tests | aac53b2f0e056009ef0e437c8430651e3cf4d502 | [
"Apache-2.0"
] | 1 | 2021-11-24T16:17:25.000Z | 2021-11-24T16:17:25.000Z | performance/test_azure_data_lake_storage_gen2_origin.py | Pragatibs/datacollector-tests | aac53b2f0e056009ef0e437c8430651e3cf4d502 | [
"Apache-2.0"
] | null | null | null | performance/test_azure_data_lake_storage_gen2_origin.py | Pragatibs/datacollector-tests | aac53b2f0e056009ef0e437c8430651e3cf4d502 | [
"Apache-2.0"
] | null | null | null | # Copyright 2021 StreamSets Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
from streamsets.testframework.markers import azure, sdc_min_version
from streamsets.testframework.utils import get_random_string
logger = logging.getLogger(__name__)
ADLS_GEN2_ORIGIN = 'com_streamsets_pipeline_stage_origin_datalake_gen2_DataLakeGen2DSource'
@azure('datalake')
@sdc_min_version('3.9.0')
def test_initial_scan(sdc_builder, sdc_executor, azure, keep_data):
"""Benchmark ADLS Gen2 origin's initial scan"""
directory_name = f'stf_perf_{get_random_string()}'
fs = azure.datalake.file_system
try:
pipeline_builder = sdc_builder.get_pipeline_builder()
benchmark_stages = pipeline_builder.add_benchmark_stages()
azure_data_lake_storage_gen2 = pipeline_builder.add_stage(name=ADLS_GEN2_ORIGIN)
azure_data_lake_storage_gen2.set_attributes(data_format='TEXT',
files_directory=f'/{directory_name}',
file_name_pattern='*',
read_order='TIMESTAMP',
process_subdirectories=True)
azure_data_lake_storage_gen2 >> benchmark_stages.destination
pipeline = pipeline_builder.build().configure_for_environment(azure)
# Populate the Azure directory with 100 subdirectories with 10 files each.
fs.mkdir(directory_name)
for _ in range(100):
folder_name = get_random_string(length=10)
for _ in range(10):
file_name = "{}.txt".format(get_random_string(length=10))
file_path = os.path.join(directory_name, folder_name, file_name)
try:
logger.debug("Creating new file: %s ...", file_path)
res1 = fs.touch(file_path)
res2 = fs.write(file_path, file_path)
if not (res1.response.ok and res2.response.ok):
raise RuntimeError(f'Could not create file: {file_path}')
except Exception as e:
logger.error("Could not create file: %s: %s", file_path, str(e))
sdc_executor.benchmark_pipeline(pipeline, record_count=1000)
finally:
if not keep_data:
logger.info('Azure Data Lake directory %s and underlying files will be deleted.', directory_name)
fs.rmdir(directory_name, recursive=True)
| 43.884058 | 109 | 0.654227 |
5ecf9cf303c9a1d46fc8ddba927a9bcef20b4387 | 35,983 | py | Python | testslide/__init__.py | deathowl/TestSlide | 22958af9d487caa9bbc309405106591a48716ad5 | [
"MIT"
] | 76 | 2018-09-29T17:54:36.000Z | 2020-12-04T19:42:54.000Z | testslide/__init__.py | deathowl/TestSlide | 22958af9d487caa9bbc309405106591a48716ad5 | [
"MIT"
] | 158 | 2018-10-03T14:18:06.000Z | 2021-01-05T13:02:51.000Z | testslide/__init__.py | deathowl/TestSlide | 22958af9d487caa9bbc309405106591a48716ad5 | [
"MIT"
] | 33 | 2021-03-03T19:55:14.000Z | 2022-03-29T21:49:30.000Z | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
if "COVERAGE_PROCESS_START" in os.environ:
import coverage
coverage.process_startup()
import asyncio
import asyncio.log
import contextlib
import inspect
import re
import sys
import types
import unittest
import warnings
from contextlib import contextmanager
from functools import partial
from typing import (
TYPE_CHECKING,
Any,
Callable,
Dict,
Iterator,
List,
Optional,
TextIO,
Tuple,
Type,
Union,
)
import testslide.matchers
import testslide.mock_callable
import testslide.mock_constructor
import testslide.patch_attribute
from testslide.strict_mock import StrictMock # noqa
if TYPE_CHECKING:
# hack for Mypy
from testslide.runner import BaseFormatter
if sys.version_info < (3, 6):
raise RuntimeError("Python >=3.6 required.")
if sys.version_info < (3, 7):
def asyncio_run(coro):
loop = asyncio.events.new_event_loop()
try:
loop.set_debug(True)
loop.run_until_complete(coro)
finally:
try:
loop.run_until_complete(loop.shutdown_asyncgens())
finally:
loop.close()
else:
asyncio_run = partial(asyncio.run, debug=True)
if sys.version_info < (3, 8):
get_all_tasks = asyncio.Task.all_tasks
else:
get_all_tasks = asyncio.all_tasks
def get_active_tasks():
return [
task for task in get_all_tasks() if not task.done() and not task.cancelled()
]
class LeftOverActiveTasks(BaseException):
"""Risen when unfinished asynchronous tasks are detected."""
pass
def _importer(target: str) -> Any:
components = target.split(".")
import_path = components.pop(0)
thing = __import__(import_path)
def dot_lookup(thing: object, comp: str, import_path: str) -> Any:
try:
return getattr(thing, comp)
except AttributeError:
__import__(import_path)
return getattr(thing, comp)
for comp in components:
import_path += ".%s" % comp
thing = dot_lookup(thing, comp, import_path)
return thing
async def _async_ensure_no_leaked_tasks(coro):
before_example_tasks = get_active_tasks()
result = await coro
after_example_tasks = get_active_tasks()
new_still_running_tasks = set(after_example_tasks) - set(before_example_tasks)
if new_still_running_tasks:
tasks_str = "\n".join(str(task) for task in new_still_running_tasks)
raise LeftOverActiveTasks(
"Some tasks were started but did not finish yet, are you missing "
f"an `await` somewhere?\nRunning tasks:\n {tasks_str}"
)
return result
class _ContextData:
"""
To be used as a repository of context specific data, used during each
example execution.
"""
def _init_sub_example(self) -> None:
self._sub_examples_agg_ex = AggregatedExceptions()
def real_assert_sub_examples(self: "_ContextData") -> None:
if self._sub_examples_agg_ex.exceptions:
self._sub_examples_agg_ex.raise_correct_exception()
if self._example.is_async:
async def assert_sub_examples(self: "_ContextData") -> None:
real_assert_sub_examples(self)
else:
def assert_sub_examples(self: "_ContextData") -> None: # type: ignore
real_assert_sub_examples(self)
self.after(assert_sub_examples)
def _init_mocks(self) -> None:
self.mock_callable = testslide.mock_callable.mock_callable
self.mock_async_callable = testslide.mock_callable.mock_async_callable
self.mock_constructor = testslide.mock_constructor.mock_constructor
self.patch_attribute = testslide.patch_attribute.patch_attribute
self._mock_callable_after_functions: List[Callable] = []
def register_assertion(assertion: Callable) -> None:
if self._example.is_async:
async def f(_: _ContextData) -> None:
assertion()
else:
f = lambda _: assertion()
self._mock_callable_after_functions.append(f)
testslide.mock_callable.register_assertion = register_assertion
def __init__(self, example: "Example", formatter: "BaseFormatter") -> None:
self._example = example
self._formatter = formatter
self._context = example.context
self._after_functions: List[Callable] = []
self._test_case = unittest.TestCase()
self._init_sub_example()
self._init_mocks()
@staticmethod
def _not_callable(self: "_ContextData") -> None:
raise BaseException("This function should not be called outside test code.")
@property
def _all_methods(self) -> Dict[str, Callable]:
return self._context.all_context_data_methods
@property
def _all_memoizable_attributes(self) -> Dict[str, Callable]:
return self._context.all_context_data_memoizable_attributes
def __setattr__(self, name: str, value: Any) -> None:
if self.__dict__.get(name) and self.__dict__[name] != value:
raise AttributeError(
f"Attribute {repr(name)} can not be reset.\n"
"Resetting attribute values is not permitted as it can create "
"confusion and taint test signal.\n"
"You can use memoize/memoize_before instead, as they allow "
"attributes from parent contexs to be overridden consistently "
"by sub-contexts.\n"
"Details and examples at the documentation: "
"https://testslide.readthedocs.io/en/main/testslide_dsl/context_attributes_and_functions/index.html"
)
else:
super(_ContextData, self).__setattr__(name, value)
def __getattr__(self, name: str) -> Any:
if name in self._all_methods.keys():
def static(*args: Any, **kwargs: Any) -> Any:
return self._all_methods[name](self, *args, **kwargs)
self.__dict__[name] = static
if name in self._all_memoizable_attributes.keys():
attribute_code = self._all_memoizable_attributes[name]
if self._example.is_async and inspect.iscoroutinefunction(attribute_code):
raise ValueError(
f"Function can not be a coroutine function: {repr(attribute_code)}"
)
self._formatter.dsl_memoize(self._example, attribute_code)
self.__dict__[name] = attribute_code(self)
try:
return self.__dict__[name]
except KeyError:
# Forward assert* methods to unittest.TestCase
if re.match("^assert", name) and hasattr(self._test_case, name):
return getattr(self._test_case, name)
raise AttributeError(
"Context '{}' has no attribute '{}'".format(self._context, name)
)
def after(self, after_code: Callable) -> Callable:
"""
Use this to decorate a function to be registered to be executed after
the example code.
"""
self._after_functions.append(after_code)
return self._not_callable
@contextmanager
def sub_example(self, name: Optional[str] = None) -> Iterator[None]:
"""
Use this as a context manager many times inside the same
example. Failures in the code inside the context manager
will be aggregated, and reported individually at the end.
"""
with self._sub_examples_agg_ex.catch():
yield
def async_run_with_health_checks(self, coro):
"""
Runs the given coroutine in a new event loop, and ensuring there's no
task leakage.
"""
result = asyncio_run(_async_ensure_no_leaked_tasks(coro))
return result
class AggregatedExceptions(Exception):
"""
Aggregate example execution exceptions.
"""
def __init__(self) -> None:
super(AggregatedExceptions, self).__init__()
self.exceptions: List[BaseException] = []
def append_exception(self, exception: BaseException) -> None:
if isinstance(exception, AggregatedExceptions):
self.exceptions.extend(exception.exceptions)
else:
self.exceptions.append(exception)
@contextmanager
def catch(self) -> Iterator[None]:
try:
yield
except BaseException as exception:
self.append_exception(exception)
def __str__(self) -> str:
return "{} failures.\n".format(len(self.exceptions)) + "\n".join(
f"{type(e)}: {str(e)}" for e in self.exceptions
)
def raise_correct_exception(self) -> None:
if not self.exceptions:
return
ex_types = {type(ex) for ex in self.exceptions}
if Skip in ex_types or unittest.SkipTest in ex_types:
raise Skip()
elif len(self.exceptions) == 1:
raise self.exceptions[0]
else:
raise self
if len(self.exceptions) == 1:
raise self.exceptions[0]
else:
raise self
class Skip(Exception):
"""
Raised by an example when it is skipped
"""
pass
class UnexpectedSuccess(Exception):
"""
Raised by an example when it unexpectedly succeeded
"""
class SlowCallback(Exception):
"""
Raised by TestSlide when an asyncio slow callback warning is detected
"""
class _ExampleRunner:
def __init__(self, example: "Example", formatter: "BaseFormatter") -> None:
self.example = example
self.formatter = formatter
self.trim_path_prefix = self.formatter.trim_path_prefix
@staticmethod
async def _fail_if_not_coroutine_function(
func: Callable, *args: Any, **kwargs: Any
) -> None:
if not inspect.iscoroutinefunction(func):
raise ValueError(f"Function must be a coroutine function: {repr(func)}")
return await func(*args, **kwargs)
async def _real_async_run_all_hooks_and_example(
self,
context_data: _ContextData,
around_functions: Optional[List[Callable]] = None,
) -> None:
"""
***********************************************************************
***********************************************************************
WARNING
***********************************************************************
***********************************************************************
This function **MUST** be keep the exact same execution flow of
_sync_run_all_hooks_and_example()!!!
"""
if around_functions is None:
around_functions = list(reversed(self.example.context.all_around_functions))
if not around_functions:
aggregated_exceptions = AggregatedExceptions()
with aggregated_exceptions.catch():
for before_code in self.example.context.all_before_functions:
if hasattr(before_code, "_memoize_before_code"):
self.formatter.dsl_memoize_before(
self.example, before_code._memoize_before_code
)
else:
self.formatter.dsl_before(self.example, before_code)
await self._fail_if_not_coroutine_function(
before_code, context_data
)
self.formatter.dsl_example(self.example, self.example.code)
await _async_ensure_no_leaked_tasks(
self._fail_if_not_coroutine_function(
self.example.code, context_data
)
)
after_functions: List[Callable] = []
after_functions.extend(context_data._mock_callable_after_functions)
after_functions.extend(self.example.context.all_after_functions)
after_functions.extend(context_data._after_functions)
for after_code in reversed(after_functions):
with aggregated_exceptions.catch():
self.formatter.dsl_after(self.example, after_code)
await self._fail_if_not_coroutine_function(after_code, context_data)
aggregated_exceptions.raise_correct_exception()
return
around_code = around_functions.pop()
wrapped_called: List[bool] = []
async def async_wrapped() -> None:
wrapped_called.append(True)
await self._real_async_run_all_hooks_and_example(
context_data, around_functions
)
self.formatter.dsl_around(self.example, around_code)
await self._fail_if_not_coroutine_function(
around_code, context_data, async_wrapped
)
if not wrapped_called:
raise RuntimeError(
"Around hook "
+ repr(around_code.__name__)
+ " did not execute example code!"
)
@contextlib.contextmanager
def _raise_if_asyncio_warnings(self, context_data: _ContextData) -> Iterator[None]:
if sys.version_info < (3, 7):
yield
return
original_showwarning = warnings.showwarning
caught_failures: List[Union[Exception, str]] = []
def showwarning(
message: str,
category: Type[Warning],
filename: str,
lineno: int,
file: Optional[TextIO] = None,
line: Optional[str] = None,
) -> None:
failure_warning_messages: Dict[Any, str] = {
RuntimeWarning: "^coroutine '.+' was never awaited"
}
warning_class = type(message)
pattern = failure_warning_messages.get(warning_class, None)
if pattern and re.compile(pattern).match(str(message)):
caught_failures.append(message)
else:
original_showwarning(message, category, filename, lineno, file, line)
warnings.showwarning = showwarning # type: ignore
original_logger_warning = asyncio.log.logger.warning
def logger_warning(msg: str, *args: Any, **kwargs: Any) -> None:
if re.compile("^Executing .+ took .+ seconds$").match(str(msg)):
msg = (
f"{msg}\n"
"During the execution of the async test a slow callback "
"that blocked the event loop was detected.\n"
"Tip: you can customize the detection threshold with:\n"
" asyncio.get_running_loop().slow_callback_duration = seconds"
)
caught_failures.append(SlowCallback(msg % args))
else:
original_logger_warning(msg, *args, **kwargs)
asyncio.log.logger.warning = logger_warning # type: ignore
aggregated_exceptions = AggregatedExceptions()
try:
with aggregated_exceptions.catch():
yield
finally:
warnings.showwarning = original_showwarning
asyncio.log.logger.warning = original_logger_warning # type: ignore
for failure in caught_failures:
with aggregated_exceptions.catch():
raise failure # type: ignore
aggregated_exceptions.raise_correct_exception()
def _async_run_all_hooks_and_example(self, context_data: _ContextData) -> None:
coro = self._real_async_run_all_hooks_and_example(context_data)
with self._raise_if_asyncio_warnings(context_data):
asyncio_run(coro)
@staticmethod
def _fail_if_coroutine_function(
func: Callable, *args: Any, **kwargs: Any
) -> Optional[Any]:
if inspect.iscoroutinefunction(func):
raise ValueError(f"Function can not be a coroutine function: {repr(func)}")
return func(*args, **kwargs)
def _sync_run_all_hooks_and_example(
self,
context_data: _ContextData,
around_functions: Optional[List[Callable]] = None,
) -> None:
"""
***********************************************************************
***********************************************************************
WARNING
***********************************************************************
***********************************************************************
This function **MUST** be keep the exact same execution flow of
_real_async_run_all_hooks_and_example()!!!
"""
if around_functions is None:
around_functions = list(reversed(self.example.context.all_around_functions))
if not around_functions:
aggregated_exceptions = AggregatedExceptions()
with aggregated_exceptions.catch():
for before_code in self.example.context.all_before_functions:
if hasattr(before_code, "_memoize_before_code"):
self.formatter.dsl_memoize_before(
self.example, before_code._memoize_before_code
)
else:
self.formatter.dsl_before(self.example, before_code)
self._fail_if_coroutine_function(before_code, context_data)
self.formatter.dsl_example(self.example, self.example.code)
self._fail_if_coroutine_function(self.example.code, context_data)
after_functions: List[Callable] = []
after_functions.extend(context_data._mock_callable_after_functions)
after_functions.extend(self.example.context.all_after_functions)
after_functions.extend(context_data._after_functions)
for after_code in reversed(after_functions):
with aggregated_exceptions.catch():
self.formatter.dsl_after(self.example, after_code)
self._fail_if_coroutine_function(after_code, context_data)
aggregated_exceptions.raise_correct_exception()
return
around_code = around_functions.pop()
wrapped_called: List[bool] = []
def wrapped() -> None:
wrapped_called.append(True)
self._sync_run_all_hooks_and_example(context_data, around_functions)
self.formatter.dsl_around(self.example, around_code)
self._fail_if_coroutine_function(around_code, context_data, wrapped)
if not wrapped_called:
raise RuntimeError(
"Around hook "
+ repr(around_code.__name__)
+ " did not execute example code!"
)
def run(self) -> None:
try:
if self.example.skip:
raise Skip()
context_data = _ContextData(self.example, self.formatter)
if self.example.is_async:
self._async_run_all_hooks_and_example(context_data)
else:
self._sync_run_all_hooks_and_example(context_data)
finally:
sys.stdout.flush()
sys.stderr.flush()
testslide.mock_callable.unpatch_all_callable_mocks()
testslide.mock_constructor.unpatch_all_constructor_mocks()
testslide.patch_attribute.unpatch_all_mocked_attributes()
class Example:
"""
Individual example.
"""
def __init__(
self,
name: str,
code: Callable,
context: "Context",
skip: bool = False,
focus: bool = False,
) -> None:
self.name = name
self.code = code
self.is_async = inspect.iscoroutinefunction(self.code)
self.context = context
self.__dict__["skip"] = skip
self.__dict__["focus"] = focus
@property
def full_name(self) -> str:
return "{context_full_name}: {example_name}".format(
context_full_name=self.context.full_name, example_name=self.name
)
@property
def skip(self) -> bool:
"""
True if the example of its context is marked to be skipped.
"""
return any([self.context.skip, self.__dict__["skip"]])
@property
def focus(self) -> bool:
"""
True if the example of its context is marked to be focused.
"""
return any([self.context.focus, self.__dict__["focus"]])
def __str__(self) -> str:
return self.name
class _TestSlideTestResult(unittest.TestResult):
"""
Concrete unittest.TestResult to allow unttest.TestCase integration, by
aggregating failures at an AggregatedExceptions instance.
"""
def __init__(self) -> None:
super(_TestSlideTestResult, self).__init__()
self.aggregated_exceptions = AggregatedExceptions()
def _add_exception(
self,
err: Tuple[
Type[BaseException],
BaseException,
Optional[types.TracebackType],
],
) -> None:
exc_type, exc_value, exc_traceback = err
self.aggregated_exceptions.append_exception(exc_value)
def addError( # type:ignore
self,
test: "TestCase",
err: Tuple[
Type[BaseException],
BaseException,
types.TracebackType,
],
) -> None:
"""Called when an error has occurred. 'err' is a tuple of values as
returned by sys.exc_info().
"""
super(_TestSlideTestResult, self).addError(test, err) # type: ignore
self._add_exception(err)
def addFailure( # type:ignore
self,
test: "TestCase",
err: Tuple[
Type[BaseException],
BaseException,
types.TracebackType,
],
) -> None:
"""Called when an error has occurred. 'err' is a tuple of values as
returned by sys.exc_info()."""
super(_TestSlideTestResult, self).addFailure(test, err)
self._add_exception(err)
def addSkip(self, test: "TestCase", reason: str) -> None: # type: ignore
"""Called when the test case test is skipped. reason is the reason
the test gave for skipping."""
super(_TestSlideTestResult, self).addSkip(test, reason)
self._add_exception((type(Skip), Skip(), None)) # type: ignore
def addUnexpectedSuccess(self, test: "TestCase") -> None: # type: ignore
"""Called when the test case test was marked with the expectedFailure()
decorator, but succeeded."""
super(_TestSlideTestResult, self).addUnexpectedSuccess(test)
self._add_exception((type(UnexpectedSuccess), UnexpectedSuccess(), None)) # type: ignore
def addSubTest(self, test: "TestCase", subtest: "TestCase", err: Tuple[Optional[Type[BaseException]], Optional[BaseException], Optional[types.TracebackType]]) -> None: # type: ignore
"""Called at the end of a subtest.
'err' is None if the subtest ended successfully, otherwise it's a
tuple of values as returned by sys.exc_info().
"""
super(_TestSlideTestResult, self).addSubTest(test, subtest, err) # type: ignore
if err:
self._add_exception(err) # type: ignore
class Context:
"""
Container for example contexts.
"""
_SAME_CONTEXT_NAME_ERROR = "A context with the same name is already defined"
# List of all top level contexts created
all_top_level_contexts: List["Context"] = []
# Constructor
def __init__(
self,
name: str,
parent_context: Optional["Context"] = None,
shared: bool = False,
skip: bool = False,
focus: bool = False,
) -> None:
"""
Creates a new context.
"""
# Validate context name
if parent_context:
current_level_contexts = parent_context.children_contexts
else:
current_level_contexts = self.all_top_level_contexts
if name in [context.name for context in current_level_contexts]:
raise RuntimeError(self._SAME_CONTEXT_NAME_ERROR)
self.name: str = name
self.parent_context = parent_context
self.shared = shared
self.__dict__["skip"] = skip
self.__dict__["focus"] = focus
self.children_contexts: List["Context"] = []
self.examples: List["Example"] = []
self.before_functions: List[Callable] = []
self.after_functions: List[Callable] = []
self.around_functions: List[Callable] = []
self.context_data_methods: Dict[str, Callable] = {}
self.context_data_memoizable_attributes: Dict[str, Callable] = {}
self.shared_contexts: Dict[str, "Context"] = {}
if not self.parent_context and not self.shared:
self.all_top_level_contexts.append(self)
# Properties
@property
def parent_contexts(self) -> List["Context"]:
"""
Returns a list of all parent contexts, from bottom to top.
"""
final_list = []
parent = self.parent_context
while parent:
final_list.append(parent)
parent = parent.parent_context
return final_list
@property
def depth(self) -> int:
"""
Number of parent contexts this context has.
"""
return len(self.parent_contexts)
def _all_parents_as_dict(original: type) -> Callable[["Context"], Dict[str, Any]]: # type: ignore # noqa: B902
"""
Use as a decorator for empty functions named all_attribute_name, to make
them return a dict with self.parent_context.all_attribute_name and
self.attribute_name.
"""
def get_all(self: "Context") -> Dict[str, Any]:
final_dict: Dict[str, Any] = {}
if self.parent_context:
final_dict.update(getattr(self.parent_context, original.__name__))
final_dict.update(getattr(self, original.__name__.split("all_")[1]))
return final_dict
return get_all
def _all_parents_as_list(original: type) -> Callable[["Context"], List[Any]]: # type: ignore # noqa: B902
"""
Use as a decorator for empty functions named all_attribute_name, to make
them return a list with self.parent_context.all_attribute_name and
self.attribute_name.
"""
def get_all(self: "Context") -> List[Any]:
final_list: List[str] = []
if self.parent_context:
final_list.extend(getattr(self.parent_context, original.__name__))
final_list.extend(getattr(self, original.__name__.split("all_")[1]))
return final_list
return get_all
@property # type: ignore
@_all_parents_as_dict
def all_context_data_methods(self) -> None:
"""
Returns a combined dict of all context_data_methods, including from
parent contexts.
"""
pass
@property # type: ignore
@_all_parents_as_dict
def all_context_data_memoizable_attributes(self) -> None:
"""
Returns a combined dict of all context_data_memoizable_attributes,
including from parent contexts.
"""
pass
@property # type: ignore
@_all_parents_as_list
def all_around_functions(self) -> None:
"""
Return a list of all around_functions, including from parent contexts.
"""
pass
@property # type: ignore
@_all_parents_as_list
def all_before_functions(self) -> None:
"""
Return a list of all before_functions, including from parent contexts.
"""
pass
@property # type: ignore
@_all_parents_as_list
def all_after_functions(self) -> None:
"""
Return a list of all after_functions, including from parent contexts.
"""
pass
@property # type: ignore
@_all_parents_as_dict
def all_shared_contexts(self) -> None:
"""
Returns a combined dict of all shared_contexts, including from parent
contexts.
"""
pass
@property
def all_examples(self) -> List[Example]:
"""
List of of all examples in this context and nested contexts.
"""
final_list = []
final_list.extend(self.examples)
for child_context in self.children_contexts:
final_list.extend(child_context.all_examples)
return final_list
@property
def hierarchy(self) -> List["Context"]:
"""
Returns a list of all contexts in this hierarchy.
"""
return [context for context in list(reversed(self.parent_contexts)) + [self]]
@property
def full_name(self) -> str:
"""
Full context name, including parent contexts.
"""
return ", ".join(str(context) for context in self.hierarchy)
@property
def skip(self) -> bool:
"""
True if this context of any parent context are tagged to be skipped.
"""
return any(context.__dict__["skip"] for context in self.hierarchy)
@property
def focus(self) -> bool:
"""
True if this context of any parent context are tagged to be focused.
"""
return any(context.__dict__["focus"] for context in self.hierarchy)
def __str__(self) -> str:
return self.name
def add_child_context(
self, name: str, skip: bool = False, focus: bool = False
) -> "Context":
"""
Creates a nested context below self.
"""
if name in [context.name for context in self.children_contexts]:
raise RuntimeError(self._SAME_CONTEXT_NAME_ERROR)
child_context = Context(name, parent_context=self, skip=skip, focus=focus)
self.children_contexts.append(child_context)
return child_context
def add_example(
self, name: str, example_code: Callable, skip: bool = False, focus: bool = False
) -> Example:
"""
Add an example to this context.
"""
if name in [example.name for example in self.examples]:
raise RuntimeError(
f"An example with the same name '{name}' is already defined"
)
self.examples.append(
Example(name, code=example_code, context=self, skip=skip, focus=focus)
)
return self.examples[-1]
def has_attribute(self, name: str) -> bool:
return any(
[
name in self.context_data_methods.keys(),
name in self.context_data_memoizable_attributes.keys(),
]
)
def add_function(self, name: str, function_code: Callable) -> None:
"""
Add given function to example execution scope.
"""
if self.has_attribute(name):
raise AttributeError(
'Attribute "{}" already set for context "{}"'.format(name, self)
)
self.context_data_methods[name] = function_code
def add_memoized_attribute(
self, name: str, memoizable_code: Callable, before: bool = False
) -> None:
"""
Add given attribute name to execution scope, by lazily memoizing the return
value of memoizable_code().
"""
if self.has_attribute(name):
raise AttributeError(
'Attribute "{}" already set for context "{}"'.format(name, self)
)
self.context_data_memoizable_attributes[name] = memoizable_code
if before:
if inspect.iscoroutinefunction(memoizable_code):
async def async_materialize_attribute(
context_data: _ContextData,
) -> None:
code = context_data._context.all_context_data_memoizable_attributes[
name
]
context_data.__dict__[name] = await code(context_data)
async_materialize_attribute._memoize_before_code = memoizable_code # type: ignore
self.before_functions.append(async_materialize_attribute)
else:
def materialize_attribute(context_data: _ContextData) -> None:
code = context_data._context.all_context_data_memoizable_attributes[
name
]
context_data.__dict__[name] = code(context_data)
materialize_attribute._memoize_before_code = memoizable_code # type: ignore
self.before_functions.append(materialize_attribute)
def add_shared_context(self, name: str, shared_context_code: "Context") -> None:
"""
Create a shared context.
"""
if name in self.shared_contexts:
raise RuntimeError("A shared context with the same name is already defined")
self.shared_contexts[name] = shared_context_code
def add_test_case(self, test_case: Type["TestCase"], attr_name: str) -> None:
"""
Add around hooks to context from given unittest.TestCase class. Only
hooks such as setUp or tearDown will be called, no tests will be
included.
"""
def wrap_test_case(self: "Context", example: Callable) -> None:
def test_test_slide(_: Any) -> None:
example()
def exec_body(ns: Dict[str, Callable]) -> None:
ns.update({"test_test_slide": test_test_slide})
# Build a child class of given TestCase, with a defined test that
# will run TestSlide example.
test_slide_test_case = types.new_class(
"TestSlideTestCase", bases=(test_case,), exec_body=exec_body
)
# This suite will only contain TestSlide's example test.
test_suite = unittest.TestLoader().loadTestsFromName(
"test_test_slide", test_slide_test_case # type: ignore
)
setattr(self, attr_name, list(test_suite)[0])
result = _TestSlideTestResult()
test_suite(result=result) # type: ignore
if not result.wasSuccessful():
result.aggregated_exceptions.raise_correct_exception()
self.around_functions.append(wrap_test_case)
def reset() -> None:
"""
Clear all defined contexts and hooks.
"""
Context.all_top_level_contexts.clear()
class TestCase(unittest.TestCase):
"""
A subclass of unittest.TestCase that adds TestSlide's features.
"""
def setUp(self) -> None:
testslide.mock_callable.register_assertion = lambda assertion: self.addCleanup(
assertion
)
self.addCleanup(testslide.mock_callable.unpatch_all_callable_mocks)
self.addCleanup(testslide.mock_constructor.unpatch_all_constructor_mocks)
self.addCleanup(testslide.patch_attribute.unpatch_all_mocked_attributes)
super(TestCase, self).setUp()
@staticmethod
def mock_callable(
*args: Any, **kwargs: Any
) -> testslide.mock_callable._MockCallableDSL:
return testslide.mock_callable.mock_callable(*args, **kwargs)
@staticmethod
def mock_async_callable(
*args: Any, **kwargs: Any
) -> testslide.mock_callable._MockCallableDSL:
return testslide.mock_callable.mock_async_callable(*args, **kwargs)
@staticmethod
def mock_constructor(
*args: Any, **kwargs: Any
) -> testslide.mock_constructor._MockConstructorDSL:
return testslide.mock_constructor.mock_constructor(*args, **kwargs)
@staticmethod
def patch_attribute(*args: Any, **kwargs: Any) -> None:
return testslide.patch_attribute.patch_attribute(*args, **kwargs)
| 35.002918 | 187 | 0.607592 |
112e26409a1b6ee64ff7438062702c05270606ce | 3,711 | py | Python | context_classification.py | yunzhe99/calam | 09d11fba021ccce93a0baac8d09ae220299ef6c2 | [
"MIT"
] | null | null | null | context_classification.py | yunzhe99/calam | 09d11fba021ccce93a0baac8d09ae220299ef6c2 | [
"MIT"
] | null | null | null | context_classification.py | yunzhe99/calam | 09d11fba021ccce93a0baac8d09ae220299ef6c2 | [
"MIT"
] | null | null | null | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.cluster import KMeans, DBSCAN, AgglomerativeClustering
from scipy.cluster.hierarchy import dendrogram
from my_utils.data_io import load_dataset
from my_utils.preprocess import resnet_18_encoder
from my_utils.tools import get_distance_matrix
def get_features(Config):
dataset_dir = Config.dataset_dir
imgs = load_dataset(dataset_dir)
img_features = resnet_18_encoder(imgs)
img_features = np.array(img_features)
img_features = np.squeeze(img_features)
print(img_features.shape)
# np.save("img_features.npy", img_features)
def context_classification_by_kmeans(img_features):
# print(img_features)
n_class = int(len(img_features) / 20)
print(n_class)
y_pred = KMeans(n_clusters=n_class, random_state=2316).fit_predict(img_features)
# print(y_pred)
kmeans_array = save_kmeans_array(img_features, y_pred)
# # 画图代码
#
# x = np.arange(len(y_pred))
#
# plt.scatter(x, y_pred, alpha=0.6, s=1)
# plt.axvline(x=255, color='r', linestyle='-')
# plt.axvline(x=398, color='r', linestyle='-')
# plt.axvline(x=542, color='r', linestyle='-')
# plt.axvline(x=629, color='r', linestyle='-')
# plt.axvline(x=909, color='r', linestyle='-')
# plt.axvline(x=1072, color='r', linestyle='-')
# plt.axvline(x=1194, color='r', linestyle='-')
# plt.axvline(x=1481, color='r', linestyle='-')
# plt.axvline(x=1582, color='r', linestyle='-')
# plt.axvline(x=1675, color='r', linestyle='-')
# plt.show()
# # 保存结果
#
# dataframe = pd.DataFrame({'y_pred': y_pred})
#
# dataframe.to_csv("y_pred.csv", index=False, sep=',')
return kmeans_array
def save_kmeans_array(img_features, cluster_result):
array_len = len(np.unique(cluster_result)) # 数组的长度为类数
# 初始化一个数组,其每个元素都是一个kmeans的聚类结果
kmeans_array = [[] for _ in range(array_len)]
for img_index in range(len(img_features)):
kmeans_array[cluster_result[img_index]].append(img_features[img_index])
return kmeans_array
def context_cluster_by_dbscan(kmeans_array):
distance_matrix = get_distance_matrix(kmeans_array)
sns.heatmap(data=distance_matrix, vmin=10, vmax=20, cmap='Blues')
plt.show()
clustering = DBSCAN(eps=12, min_samples=3, metric='precomputed').fit(distance_matrix)
print(len(clustering.labels_))
print(clustering.labels_)
def context_cluster_by_hierarchy_cluster(kmeans_array):
distance_matrix = get_distance_matrix(kmeans_array)
model = AgglomerativeClustering(affinity='precomputed',
distance_threshold=0,
n_clusters=None,
linkage='average')
model = model.fit(distance_matrix)
plot_dendrogram(model, truncate_mode='level', p=10)
plt.show()
def plot_dendrogram(model, **kwargs):
# Create linkage matrix and then plot the dendrogram
# create the counts of samples under each node
counts = np.zeros(model.children_.shape[0])
n_samples = len(model.labels_)
for i, merge in enumerate(model.children_):
current_count = 0
for child_idx in merge:
if child_idx < n_samples:
current_count += 1 # leaf node
else:
current_count += counts[child_idx - n_samples]
counts[i] = current_count
linkage_matrix = np.column_stack([model.children_, model.distances_,
counts]).astype(float)
# Plot the corresponding dendrogram
dendrogram(linkage_matrix, **kwargs)
def classifier():
pass
| 27.69403 | 89 | 0.665858 |
6b0ea3ce8a87e61d02a3b7bdd9ddcf6b25966b9e | 145 | py | Python | timemachines/skaters/pycrt/pycaretinclusion.py | iklasky/timemachines | 1820fa9453d31d4daaeff75274a935c7455febe3 | [
"MIT"
] | 253 | 2021-01-08T17:33:30.000Z | 2022-03-21T17:32:36.000Z | timemachines/skaters/pycrt/pycaretinclusion.py | iklasky/timemachines | 1820fa9453d31d4daaeff75274a935c7455febe3 | [
"MIT"
] | 65 | 2021-01-20T16:43:35.000Z | 2022-03-30T19:07:22.000Z | timemachines/skaters/pycrt/pycaretinclusion.py | iklasky/timemachines | 1820fa9453d31d4daaeff75274a935c7455febe3 | [
"MIT"
] | 28 | 2021-02-04T14:58:30.000Z | 2022-01-17T04:35:17.000Z | try:
from pycaret.internal.pycaret_experiment import TimeSeriesExperiment
using_pycaret=True
except ImportError:
using_pycaret=False
| 24.166667 | 72 | 0.813793 |
3c542aa512ab1f5c757eaaa51bfdc08e7bc78a26 | 12,927 | py | Python | my/balancesheet_220220.py | ssawwqdf/-project-stock_info_dashboard | f14a462d915d2207db1da12307aefdef4b6921e1 | [
"MIT"
] | 1 | 2022-02-19T20:22:05.000Z | 2022-02-19T20:22:05.000Z | my/balancesheet_220220.py | ssawwqdf/-project-stock_info_dashboard | f14a462d915d2207db1da12307aefdef4b6921e1 | [
"MIT"
] | null | null | null | my/balancesheet_220220.py | ssawwqdf/-project-stock_info_dashboard | f14a462d915d2207db1da12307aefdef4b6921e1 | [
"MIT"
] | null | null | null | # ====================================================
# 패키지
# ====================================================
# -------- data
import pandas as pd
import numpy as np
import sqlalchemy as sa
# -------- str
import re
# -------- date
from datetime import datetime, date
# -------- float
import math
# -------- craw
from bs4 import BeautifulSoup # 클래스라 생성자 만들어야 함
import requests
# -------- flask
from flask import Flask, make_response, jsonify, request, render_template
from flask_cors import CORS, cross_origin
import json
# -------- API
import yfinance
# --------
# ====================================================
# 기본 설정
# ====================================================
# sql 이름과 비밀번호 수정 필요
engine = sa.create_engine('oracle://ai:1111@localhost:1521/XE')
conn = engine.connect()
conn.close() # TODO 사용 후 close 해주기
# ----------------------------------------------------
# ====================================================
# 함 수
# ====================================================
"""
# 변수 설명
# 크롤링 데이터 : nm(회사명), cd(네이버에서 사용하는 회사 코드)
# 상장 기업 리스트 : corp_name(회사명), stock_code(종목코드), industry(업종), main_product(주요제품), listed_date(상장일), settle_mont( 결산월), pres(대표자명), hpage(홈페이지), region(지역)
# 야후 파이낸스 : yh_code
"""
# ==============
# 업종 분류
# ==============
# -------- 동일 업종 기업 출력
# TODO(미완성) 동일 업종 선택
def select_same_industry(corp_name):
indus = com_df[com_df['nm'] == corp_name]['industry'].values[0] # TODO(df 확인)
# print(com_df.groupby(by='industry')['nm'].nunique().max()) # 동종업계 최대 151개 -> 151개 재무제표 크롤링?
list_com = com_df[com_df['industry'] == indus]['corp_name'].values.tolist()
return list_com
# -------- 네이버증권 연관기업 코드(hjh)
def relate_code_crawl(co):
# 연관 종목코드 있는 페이지 불러오기
url = 'https://finance.naver.com/item/main.naver?code=' + str(co)
page = pd.read_html(url, encoding='CP949')
# 연관 종목명과 종목코드 뽑아내기(code_list[0]은 '종목명'이어서 제외)
code_list = page[4].columns.tolist()
code_list = code_list[1:]
# 종목코드 리스트 반환
codes = []
for word in (code_list):
codes.append(word[-6:])
# print(codes)
return codes
# relate_code_crawl('000660')
# ==============
# 기업 이름 코드 변환
# ==============
# -------- 네이버 재무제표 크롤링 용 gicode로 변환
def nm_to_bs_gicode(corp_name):
gi = com_df[com_df['nm'] == corp_name]['cd']
gi = gi.values[0]
return gi
def stc_code_to_bs_gicode(stock_code):
gi = com_df[com_df['stock_code'] == stock_code]['cd']
gi = gi.values[0]
return gi
def yh_code_to_bs_gicode(yh_code):
gi = com_df[com_df['yh_code'] == yhcode]['cd']
gi = gi.values[0]
return gi
# -------- 네이버 금융 크롤링 용 gicode로 변환
def nm_to_fn_gicode(corp_name):
gi = com_df[com_df['nm'] == corp_name]['stock_code']
gi = gi.values[0]
return gi
def yh_code_to_fn_gicode(yh_code):
gi = com_df[com_df['yh_code'] == yh_code]['stock_code']
gi = gi.values[0]
return gi
# -------- 코드를 기업이름으로 변환
def stc_code_to_nm(stock_code):
gi = com_df[com_df['stock_code'] == stock_code]['nm']
gi = gi.values[0]
return gi
def yh_code_to_nm(yh_code):
gi = com_df[com_df['yh_code'] == yh_code]['nm']
gi = gi.values[0]
return gi
# ==============
# 데이터 수집
# ==============
# -------- Balance Sheets API call
# def bs_api(corp_name=None, yh_code=None, stock_code=None):
# print('haha')
# -------- Balance Sheets Crawling(재무제표 크롤링)
# 220220 수정
# 1) 매개변수 stock_code로 축약
# 2) kind로 특정 테이블 지정하는 대신 데이터프레임 리스트 전체 반환
# 3) '~계산에 참여한 계정 펼치기' 제거는 선택사항으로 둠
def bs_craw(stock_code, clear_name=False): # ------- 검색과 연동해서 입력 변수 설정
"""
# kind
: 0 (연간 포괄손익계산서), 1 (분기별 포괄손익계산서)
2 (연간 재무상태표), 3 (분기별 재무상태표)
4 (연간 현금흐름표), 5 (분기별 현금프름표)
"""
# ------- 검색과 연동해서 입력되는 변수 따라 gicode(네이버에서 분류하는 기업 코드)로 변환
gcode = stc_code_to_bs_gicode(stock_code)
url = f"http://comp.fnguide.com/SVO2/ASP/SVD_Finance.asp?NewMenuID=103&gicode={gcode}"
table_list = pd.read_html(url, encoding='UTF-8')
# 항목에서 불필요한 부분 제거('계산에 참여한 계정 펼치기')
if clear_name == False:
return table_list
else:
new_table_list = []
for tbl in table_list:
for i, idx in enumerate(tbl.iloc[:, 0]):
m = idx.replace('계산에 참여한 계정 펼치기', '')
tbl.iloc[i, 0] = m
new_table_list.append(tbl)
return new_table_list
# ------- 네이버 금융
# 220220 수정
# 1) 매개변수 stock_code로 축약
# 2) kind로 특정 테이블 지정하는 대신 데이터프레임 리스트 전체 반환
def fn_craw(stock_code):
"""
# kind
: 0 (전일&당일 상한가, 하한가, 거래량 등) #TODO 가공 필요
1 (증권사 별 매도 매수 정보) #TODO 가공 필요(컬럼이름)
2 (외국인, 기관 거래 정보) #TODO 가공 필요
3 (기업실적분석(연도별 분기별 주요재무 정보)) #TODO 가공 필요?
4 (동일업종비교) #TODO 가공 필요?
5 (시가총액, 주식수, 액면가 정보) #TODO 가공 필요
6 (외국인 주식 한도, 보유 정보)
7 (목표주가 정보) #TODO 가공 필요
8 (PER, PBR 배당수익률 정보) (주가 따라 변동) #TODO 가공 필요
9 (동일업종 PER, 등락률 정보) #TODO 가공 필요
10 (호가 10단계)
11 (인기 검색 종목: 코스피) #TODO 가공 필요
12 (인기 검색 종목: 코스닥) #TODO 가공 필요
"""
gcode = str(stock_code)
url = f"https://finance.naver.com/item/main.naver?code={gcode}"
table_list = pd.read_html(url, encoding='euc-kr')
return table_list
# ==============
# 지표 선정
# ==============
# -------- 지표 선정
# 220220 수정
# 1) 매개변수 stock_code로 축약
# 2) 데이터프레임 하나가 아닌 리스트로 받아오기때문에 kind 제거하고 직접 선택해줌
# 3) sli_df_y, sil_df_q 에서 '-' 가공 시 if 조건에 따라 처리하는 대신 lambda와 re.sub 이용
# 4) dict 대신 array로 반환, 기업 이름(nm도 반환)
def idv_radar_data(stock_code):
"""
# <지표 설명>
# 1. 배당 분석 -> 배당성향(배당 커버리지의 역수.)
# 2. 유동성 분석(단기채무지급능력) -> 당좌비율(당좌자산 / 유동부채)
# 3. 재무건전성 분석(레버리지 비율) -> 부채비율(총부채 / 자기자본)의 역수
# 4. 수익성분석 -> 매출수익성(당기순이익/매출액))
# 5. 성장성분석 -> 순이익성장률
"""
gcode = stock_code
nm = stc_code_to_nm(stock_code)
sil_df = fn_craw(gcode)[3] # 3: 기업실적정보 재무제표 (220220 수정)
if (sil_df.iloc[0:8, 3].isna().sum()) > 0: # 표 안 가르고 계산하는 건 신규 상장 기업은 정보가 아예 없기 때문
pass
elif (sil_df.iloc[0:8, 9].isna().sum()) > 0: # 표 안 가르고 계산하는 건 신규 상장 기업은 정보가 아예 없기 때문
pass
else:
# 0. 재무정보는 최신 분기 실공시 기준
# 0. 단, 배당은 1년에 한 번 이루어지기 때문에 최신 년도 공시 기준임
sil_df_y = sil_df['최근 연간 실적'].iloc[:, 2] # 느리지만 .iloc으로 하는 이유는 공시 날짜가 다른 기업이 있기 때문
sil_df_q = sil_df['최근 분기 실적'].iloc[:, 4]
sil_df_y = sil_df_y.fillna(0)
sil_df_q = sil_df_q.fillna(0)
if sil_df_y.dtype == 'O':
sil_df_y = sil_df_y.apply(lambda x: re.sub('^-$', '0', '{}'.format(x)))
sil_df_y = sil_df_y.astype('float')
if sil_df_q.dtype == 'O':
sil_df_q = sil_df_q.apply(lambda x: re.sub('^-$', '0', '{}'.format(x)))
sil_df_q = sil_df_q.astype('float')
# 1. 배당성향(bd_tend)
bd_tend = sil_df_y[15] # 실제 배당 성향
# 2. 유동성 분석 - 당좌비율(당좌자산/유동부채)
# 당좌자산 = (유동자산 - 재고자산)
dj_rate = sil_df_q[7] # 당좌비율
# 3. 재무건전성 분석 - 부채비율(총부채/자기자본)의 역수
bch_rate = sil_df_q[6] / 100 # 부채비율
bch_rate = round((1 / bch_rate) * 100, 2)
# 4. 수익성 분석 - 매출수익성(당기순이익/매출액) # TODO 매출액 0인 애들은?
dg_bene = sil_df_q[2]
mch = sil_df_q[0]
suyk = round((dg_bene / mch) * 100, 2)
# 5. 성장성 분석 - 순이익성장률(지속성장 가능률)
# (1-배당성향)*자기자본순이익률(ROE)
# 유보율
roe = sil_df_y[5] / 100
ubo = (100 - bd_tend) / 100
grth = round(roe * ubo * 100, 2)
data_arr = np.array([bd_tend, dj_rate, bch_rate, suyk, grth])
return data_arr, nm
# -------- 관련 기업 지표 선정(상대적 비율 기준)
# 220220 수정
# 1) 매개변수 stock_code로 축약
# 2) dict 대신 array로 반환, 기업 이름(nm도 반환)
def relate_radar_data(stock_code):
label_list = ['배당성향', '유동성', '건전성', '수익성', '성장성']
arr_list = []
# 주식 코드,이름으로 변환
gcode = stock_code
relate_corp = relate_code_crawl(co=gcode)
arr_list = [idv_radar_data(stock_code=stcd) for stcd in relate_corp]
nm_list = [x[1] for x in arr_list if x is not None]
arr_list = [x[0] for x in arr_list if x is not None]
arr_list = np.array(arr_list)
arr_list[:, 0] = (arr_list[:, 0] / arr_list[:, 0].mean()) * 100
arr_list[:, 1] = (arr_list[:, 1] / arr_list[:, 1].mean()) * 100
arr_list[:, 2] = (arr_list[:, 2] / arr_list[:, 2].mean()) * 100
arr_list[:, 3] = (arr_list[:, 3] / arr_list[:, 3].mean()) * 100
arr_list[:, 4] = (arr_list[:, 4] / arr_list[:, 4].mean()) * 100
dict_list = []
for i, nm in enumerate(nm_list):
dic = {}
dic[nm] = arr_list[i, :].tolist()
dict_list.append(dic)
return label_list, dict_list
# -------- 관련 기업 지표 선정(원본)
# def relate_radar_data(yh_code=None, corp_name=None, stock_code=None):
# label_list=['배당성향', '유동성', '건전성', '수익성', '성장성']
# dict_list = []
#
# # 주식 코드로 변환
# gcode = 0
# if yh_code != None:
# gcode = yh_code_to_fn_gicode(yh_code)
# elif corp_name != None:
# gcode = nm_to_fn_gicode(corp_name)
# elif stock_code != None:
# gcode = stock_code
#
# relate_corp = relate_code_crawl(co=gcode)
#
# dict_list = [idv_radar_data(stock_code=stcd) for stcd in relate_corp]
#
# dict_list = [x for x in dict_list if x is not None]
#
#
# return label_list, dict_list
# ==============
# 시각화
# ==============
# -------- 매출, 당기순이익 추이 그래프
# 220220 수정
# 1) 매개변수 stock_code로 축약
# 2) 크롤링한 데이터는 list로 받아오므로 kind 없애고 직접 인덱스 처리
def mch_dg(stock_code):
gcode = stock_code
nm = stc_code_to_nm(stock_code)
bs_df = bs_craw(stock_code=gcode)[0]
label_list = bs_df.columns[1:6].tolist() # 네 분기 + 전년동기
mch_list = bs_df.loc[0, label_list].tolist() # 매출액
dg_list = bs_df.loc[15, label_list].tolist() # 당기순이익
return label_list, mch_list, dg_list
# -------- BS TABLE (재무상태표 필요 없다 ^^)
# def bs_table(corp_name=None, yh_code=None, stock_code=None):
# df=bs_craw(corp_name=cor_name, yh_code=yh_code, stock_code=stock_code, kind=1)
# df
# """
# # kind
# : 0 (연간 포괄손익계산서), 1 (분기별 포괄손익계산서)
# 2 (연간 재무상태표), 3 (분기별 재무상태표)
# 4 (연간 현금흐름표), 5 (분기별 현금프름표)
# """
# tot_list = []
# print(box_list)
#
# for box in box_list:
# print(box)
# for item in box:
# title = box.select_one('th > div').text
# print(item)
# list=[]
# price1 = box.select_one("th > div").text
# price2 = box.select_one("").text
#
# list.append(price1)
# list.append(price2)
#
# tot_list.append(list)
# 프레임 만드는 게 주 목적이면 df=pd.DataFrame(data=tot_list) 하고 return df
# df = pd.DataFrame(data=tot_list)
# return tot_list # [[],[],[]]
# ====================================================
# 데이터
# ====================================================
# -------- 병합 파일 불러오기
com_df = pd.read_csv('C:\\AI\\pythonProject\\venv\\project\\dashboard\\data\\com_df.csv',
dtype={'stock_code': 'str', '표준코드': 'str', '단축코드': 'str'},
parse_dates=['listed_date', '상장일'])
# -------- 기업별 산업 코드
# ====================================================
# 함수 호출(test)
# ====================================================
# df=bs_craw(corp_name='삼성전자', kind=0)
# print(df)
# select_same_industry('삼성전자')
# ====================================================
# 라우터
# =====================================================
# app = Flask(__name__, template_folder='production', static_folder='build' ) # template, static 폴더 다르게 지정 가능
#
# @app.route('/')
# def index():
#
# # TODO: 검색에서 기업 코드/이름 할당 받음
# # ------------------테스트용 nm ,stc_code, yh_code TODO 지울 것
# nm='올리패스'
# # stc_code=nm_to_fn_gicode(nm)
# stc_code='005930' # 삼성전자 주식코드
# yh_code='035420.KS' # 네이버 야후코드
# # ------------------
#
# radar_label, radar_dict=relate_radar_data(stock_code=stc_code) # TODO: 검색에서 기업 코드/이름 할당 받음
# bar_label, bar_mch_list, bar_dg_list = mch_dg(stock_code=stc_code) # TODO: 데이터 없으면 0으로 처리하건 해야할듯
#
#
# return render_template("index.html",
# RD_LABEL_LIST=radar_label,
# RD_DATA_DICT=radar_dict,
# BAR_LABEL_LIST=bar_label,
# BAR_DATA_LIST_MCH=bar_mch_list,
# BAR_DATA_LIST_DG=bar_dg_list
# )
#
#
#
#
#
# if __name__ == '__main__': # 이 py 안에서 실행하면 '__main__'
# app.debug=True # TODO(개발 끝나면 반드시 막기)
# app.run(host='0.0.0.0', port=8899)
| 27.680942 | 154 | 0.512571 |
5d6aa1d53a3848a4aa1776764c8a9d675a82f914 | 2,754 | py | Python | examples/point/equivariance.py | mariogeiger/se3cnn | afd027c72e87f2c390e0a2e7c6cfc8deea34b0cf | [
"MIT"
] | 170 | 2018-07-03T17:18:35.000Z | 2022-03-30T15:30:40.000Z | examples/point/equivariance.py | CNNs4QSPR/se3cnn | 513f5f827c4c511bdc96e3c6ea663c8fbce60f57 | [
"MIT"
] | 34 | 2018-10-09T08:56:24.000Z | 2020-01-30T09:46:52.000Z | examples/point/equivariance.py | CNNs4QSPR/se3cnn | 513f5f827c4c511bdc96e3c6ea663c8fbce60f57 | [
"MIT"
] | 40 | 2018-07-03T13:40:14.000Z | 2022-01-28T16:40:48.000Z | from functools import partial
import torch
from se3cnn.non_linearities.gated_block import GatedBlock
from se3cnn.non_linearities.rescaled_act import relu, sigmoid, tanh, absolute
from se3cnn.non_linearities.gated_block_parity import GatedBlockParity
from se3cnn.point.kernel import Kernel
from se3cnn.point.operations import Convolution
from se3cnn.point.radial import ConstantRadialModel
from se3cnn.SO3 import rep, rot
def check_rotation(batch: int = 10, n_atoms: int = 25):
# Setup the network.
K = partial(Kernel, RadialModel=ConstantRadialModel)
C = partial(Convolution, K)
Rs_in = [(1, 0), (1, 1)]
Rs_out = [(1, 0), (1, 1), (1, 2)]
f = GatedBlock(
Rs_in,
Rs_out,
scalar_activation=sigmoid,
gate_activation=absolute,
Operation=C
)
# Setup the data. The geometry, input features, and output features must all rotate.
abc = torch.randn(3) # Rotation seed of euler angles.
rot_geo = rot(*abc)
D_in = rep(Rs_in, *abc)
D_out = rep(Rs_out, *abc)
c = sum([mul * (2 * l + 1) for mul, l in Rs_in])
feat = torch.randn(batch, n_atoms, c) # Transforms with wigner D matrix
geo = torch.randn(batch, n_atoms, 3) # Transforms with rotation matrix.
# Test equivariance.
F = f(feat, geo)
RF = torch.einsum("ij,zkj->zki", D_out, F)
FR = f(feat @ D_in.t(), geo @ rot_geo.t())
return (RF - FR).norm() < 10e-5 * RF.norm()
def check_rotation_parity(batch: int = 10, n_atoms: int = 25):
# Setup the network.
K = partial(Kernel, RadialModel=ConstantRadialModel)
C = partial(Convolution, K)
Rs_in = [(1, 0, +1)]
f = GatedBlockParity(
Operation=C,
Rs_in=Rs_in,
Rs_scalars=[(4, 0, +1)],
act_scalars=[(-1, relu)],
Rs_gates=[(8, 0, +1)],
act_gates=[(-1, tanh)],
Rs_nonscalars=[(4, 1, -1), (4, 2, +1)]
)
Rs_out = f.Rs_out
# Setup the data. The geometry, input features, and output features must all rotate and observe parity.
abc = torch.randn(3) # Rotation seed of euler angles.
rot_geo = -rot(*abc) # Negative because geometry has odd parity. i.e. improper rotation.
D_in = rep(Rs_in, *abc, parity=1)
D_out = rep(Rs_out, *abc, parity=1)
c = sum([mul * (2 * l + 1) for mul, l, _ in Rs_in])
feat = torch.randn(batch, n_atoms, c) # Transforms with wigner D matrix and parity.
geo = torch.randn(batch, n_atoms, 3) # Transforms with rotation matrix and parity.
# Test equivariance.
F = f(feat, geo)
RF = torch.einsum("ij,zkj->zki", D_out, F)
FR = f(feat @ D_in.t(), geo @ rot_geo.t())
return (RF - FR).norm() < 10e-5 * RF.norm()
if __name__ == '__main__':
check_rotation()
check_rotation_parity()
| 34 | 107 | 0.637981 |
3f498badc94c7060ed3768e5e0fa4d30e0b5368a | 265 | py | Python | coverage_wrapper.py | christophermark/Python-Example-Project | 2c37e0342b5d037be50f6ee553485d1dee0e598b | [
"MIT"
] | 7 | 2017-07-08T03:24:40.000Z | 2020-06-21T13:29:05.000Z | coverage_wrapper.py | ccpmark/Python-Example-Project | 2c37e0342b5d037be50f6ee553485d1dee0e598b | [
"MIT"
] | null | null | null | coverage_wrapper.py | ccpmark/Python-Example-Project | 2c37e0342b5d037be50f6ee553485d1dee0e598b | [
"MIT"
] | 7 | 2017-04-17T20:54:54.000Z | 2021-11-06T13:09:32.000Z | # A wrapper function to monitor coverage through all tests
from coverage import coverage
import pytest
cov = coverage(omit='.tox*')
cov.start()
# Tests to run
# Pytest will crawl through the project directory for test files.
pytest.main()
cov.stop()
cov.save()
| 17.666667 | 65 | 0.750943 |
eed943dc6ae81a125d77c5cad44abcb6084cb47e | 403 | py | Python | crusoe_act/act-component/dnsfw-wrapper/dnsfw_wrapper/wsgi.py | CSIRT-MU/CRUSOE | 73e4ac0ced6c3ac46d24ac5c3feb01a1e88bd36b | [
"MIT"
] | 3 | 2021-11-09T09:55:17.000Z | 2022-02-19T02:58:27.000Z | crusoe_act/act-component/dnsfw-wrapper/dnsfw_wrapper/wsgi.py | CSIRT-MU/CRUSOE | 73e4ac0ced6c3ac46d24ac5c3feb01a1e88bd36b | [
"MIT"
] | null | null | null | crusoe_act/act-component/dnsfw-wrapper/dnsfw_wrapper/wsgi.py | CSIRT-MU/CRUSOE | 73e4ac0ced6c3ac46d24ac5c3feb01a1e88bd36b | [
"MIT"
] | null | null | null | """
WSGI config for dnsfw_wrapper project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'dnsfw_wrapper.settings')
application = get_wsgi_application()
| 23.705882 | 78 | 0.791563 |
a780bdb2082be461f0658b778ab3efe0a8987ef7 | 1,996 | py | Python | Leetcode/427.construct-quad-tree.py | EdwaRen/Competitve-Programming | e8bffeb457936d28c75ecfefb5a1f316c15a9b6c | [
"MIT"
] | 1 | 2021-05-03T21:48:25.000Z | 2021-05-03T21:48:25.000Z | Leetcode/427.construct-quad-tree.py | EdwaRen/Competitve_Programming | e8bffeb457936d28c75ecfefb5a1f316c15a9b6c | [
"MIT"
] | null | null | null | Leetcode/427.construct-quad-tree.py | EdwaRen/Competitve_Programming | e8bffeb457936d28c75ecfefb5a1f316c15a9b6c | [
"MIT"
] | null | null | null | """
# Definition for a QuadTree node.
"""
class Node(object):
def __init__(self, val, isLeaf, topLeft, topRight, bottomLeft, bottomRight):
self.val = val
self.isLeaf = isLeaf
self.topLeft = topLeft
self.topRight = topRight
self.bottomLeft = bottomLeft
self.bottomRight = bottomRight
class Solution(object):
def construct(self, grid):
"""
:type grid: List[List[int]]
:rtype: Node
"""
return self.recurse(grid)
def recurse(self, subgrid):
if self.checkMono(subgrid):
return Node(subgrid[0][0], True, None, None, None, None)
else:
n = len(subgrid)
cur = Node('*', False, None, None, None, None)
cur.topLeft = self.recurse([[i for i in row[0:n/2]] for row in subgrid[0:n/2]])
cur.topRight = self.recurse([[i for i in row[n/2:]] for row in subgrid[0:n/2]])
cur.bottomLeft = self.recurse([[i for i in row[0:n/2]] for row in subgrid[n/2:]])
cur.bottomRight = self.recurse([[i for i in row[n/2:]] for row in subgrid[n/2:]])
return cur
# Check all squares in the grid are valid
def checkMono(self, subgrid):
return all([col == subgrid[0][0] for row in subgrid for col in row ])
z = Solution()
grid = [
[1, 1, 1, 1, 0, 0, 0, 0],
[1, 1, 1, 1, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 0, 0, 0, 0],
[1, 1, 1, 1, 0, 0, 0, 0],
[1, 1, 1, 1, 0, 0, 0, 0],
[1, 1, 1, 1, 0, 0, 0, 0],
]
res = z.construct(grid)
def printQuad(res):
q = [(res, 0)]
while q:
cur, depth = q.pop(0)
if not cur:
continue
print("cur value", cur.val)
if not cur.isLeaf:
q.append((cur.topLeft, depth+1))
q.append((cur.topRight, depth + 1))
q.append((cur.bottomLeft, depth + 1))
q.append((cur.bottomRight, depth + 1))
printQuad(res)
| 29.352941 | 93 | 0.523547 |
83b88f0bea9410f715c3662cbbf4bf6afb9f790d | 1,470 | py | Python | src/main.py | giansalex/repo_info_extractor | fa31940964e31c2d1aa3cdfd302539e02f70f511 | [
"MIT"
] | null | null | null | src/main.py | giansalex/repo_info_extractor | fa31940964e31c2d1aa3cdfd302539e02f70f511 | [
"MIT"
] | null | null | null | src/main.py | giansalex/repo_info_extractor | fa31940964e31c2d1aa3cdfd302539e02f70f511 | [
"MIT"
] | null | null | null | import argparse
import git
import os
from export_result import ExportResult
from analyze_repo import AnalyzeRepo
from ui.questions import Questions
def main():
parser = argparse.ArgumentParser()
parser.add_argument('directory', help='Path to the repository. Example usage: run.sh path/to/directory')
parser.add_argument('--output', default='./repo_data.json', dest='output', help='Path to the JSON file that will contain the result')
parser.add_argument('--skip_obfuscation', default=False, dest='skip_obfuscation', help='If true it won\'t obfuscate the sensitive data such as emails and file names. Mostly for testing purpuse')
args = parser.parse_args()
repo = git.Repo(args.directory)
ar = AnalyzeRepo(repo, args.skip_obfuscation)
q = Questions()
print('Initialization...')
for branch in repo.branches:
ar.create_commits_entity_from_branch(branch.name)
ar.flag_duplicated_commits()
ar.get_commit_stats()
r = ar.create_repo_entity(args.directory)
# Ask the user if we cannot find remote URL
if r.primary_remote_url == '':
answer = q.ask_primary_remote_url(r)
identities = q.ask_user_identity(r)
while len(identities['user_identity']) == 0:
print('Please select at least one.')
identities = q.ask_user_identity(r)
r.local_usernames = identities['user_identity']
er = ExportResult(r)
er.export_to_json(args.output)
if __name__ == "__main__":
main() | 37.692308 | 198 | 0.719048 |
31a33c0dced0f35d0e19d3b47557833b2184e7d4 | 1,821 | py | Python | modules/sr/robot/vision/api.py | WillB97/competition-simulator-CI-testing | e0ef45d0abfa1d167b9844c29bfd43b495aed9af | [
"MIT"
] | 4 | 2020-06-12T18:00:45.000Z | 2021-02-17T14:16:59.000Z | modules/sr/robot/vision/api.py | WillB97/competition-simulator-CI-testing | e0ef45d0abfa1d167b9844c29bfd43b495aed9af | [
"MIT"
] | 158 | 2020-04-29T17:59:31.000Z | 2021-11-13T00:00:21.000Z | modules/sr/robot/vision/api.py | WillB97/competition-simulator-CI-testing | e0ef45d0abfa1d167b9844c29bfd43b495aed9af | [
"MIT"
] | 10 | 2020-06-12T16:19:46.000Z | 2021-07-07T21:06:48.000Z | from typing import List, Tuple, Callable, Iterable, Sequence, TYPE_CHECKING
from sr.robot.coordinates.vectors import Vector
from .image import Rectangle
from .tokens import Token
from .convert import WebotsOrientation, rotation_matrix_from_axis_and_angle
if TYPE_CHECKING:
from controller import CameraRecognitionObject
def build_token_info(
recognition_object: 'CameraRecognitionObject',
size: float,
) -> Tuple[Token, Rectangle, 'CameraRecognitionObject']:
x, y, z = recognition_object.get_position()
token = Token(
size=size,
# Webots Z is inverted with regard to the one we want.
position=Vector((x, y, -z)),
)
token.rotate(rotation_matrix_from_axis_and_angle(
WebotsOrientation(*recognition_object.get_orientation()),
))
return (
token,
Rectangle(
recognition_object.get_position_on_image(),
recognition_object.get_size_on_image(),
),
recognition_object,
)
def tokens_from_objects(
objects: 'Iterable[CameraRecognitionObject]',
get_size: 'Callable[[CameraRecognitionObject], float]',
) -> Sequence[Tuple[Token, 'CameraRecognitionObject']]:
"""
Constructs tokens from the given recognised objects, ignoring any which are
judged not to be visible to the camera.
"""
tokens_with_info = sorted(
(build_token_info(o, get_size(o)) for o in objects),
key=lambda x: x[0].position.magnitude(),
)
preceding_rectangles: List[Rectangle] = []
tokens = []
for token, image_rectangle, recognition_object in tokens_with_info:
if not any(x.overlaps(image_rectangle) for x in preceding_rectangles):
tokens.append((token, recognition_object))
preceding_rectangles.append(image_rectangle)
return tokens
| 29.852459 | 79 | 0.701263 |
9039fdbb1e19473346ab220e832c1259909c11c3 | 2,319 | py | Python | test_scripts/udp_client.py | gftabor/pyForceDAQ | 3eababb41d855b961d228d8366fdd154bb6314ea | [
"MIT"
] | null | null | null | test_scripts/udp_client.py | gftabor/pyForceDAQ | 3eababb41d855b961d228d8366fdd154bb6314ea | [
"MIT"
] | null | null | null | test_scripts/udp_client.py | gftabor/pyForceDAQ | 3eababb41d855b961d228d8366fdd154bb6314ea | [
"MIT"
] | null | null | null | # test client udo connection
from expyriment import control, stimuli, io, misc
from expyriment.misc import Clock
from forceDAQ import UDPConnection
# t : test connect
# q : quit client
# space : enter
control.set_develop_mode(True)
exp = control.initialize()
udp_connection = UDPConnection()
print udp_connection
if not udp_connection.connect_peer("192.168.1.1"): # 41.89.98.24
print "error connecting to peer"
exit()
stimuli.TextLine("connected to " + udp_connection.peer_ip).present()
c = Clock()
# #udp.send("maximum: 72")
##udp.send("measurement: 25")
##udp.send("filename: test")
##udp.send("report: 5")
##print "--> ", c.time, "done"
##udp.send("done")
##feedback = udp.poll()
##while feedback is None:
## feedback = udp.poll()
##print "<-- ", c.time, feedback
##
##print "--> ", c.time, "start"
##udp.send("start")
##feedback = udp.poll()
##while feedback is None:
## feedback = udp.poll()
##print "<-- ", c.time, feedback
##c.wait(2000)
##
##print "--> ", c.time, "pause"
##udp.send("pause")
##feedback = udp.poll()
##while feedback is None:
## feedback = udp.poll()
##print "<-- ", c.time, feedback
##c.wait(2000)
##
##print "--> ", c.time, "unpause"
##udp.send("unpause")
##udp.send("unpause") #for data output
##feedback = udp.poll()
##while feedback is None:
## feedback = udp.poll()
##print "<-- ", c.time, feedback
##c.wait(2000)
##
##print "--> ", c.time, "quit"
##udp.send("quit")
##feedback = udp.poll()
##while feedback is None:
## feedback = udp.poll()
##print "<-- ", c.time, feedback
while True:
key = exp.keyboard.check()
if key == ord("q"):
break
elif key == misc.constants.K_SPACE:
text = io.TextInput().get()
stimuli.BlankScreen().present()
print "--> ", c.time, text
udp_connection.send(text)
elif key == ord("t"):
times = []
for cnt in range(20):
stimuli.TextLine("ping test " + str(cnt)).present()
c.reset_stopwatch()
ok, time = udp_connection.ping()
print c.stopwatch_time
times.append(time)
c.wait(100)
stimuli.BlankScreen().present()
print times
feedback = udp_connection.poll()
if feedback is not None:
print "<-- ", c.time, feedback
udp_connection.unconnect_peer()
| 23.424242 | 68 | 0.605002 |
ce47d1224f6f86d3be6d1b4748e9b43c7dbd3d99 | 13,367 | py | Python | src_py/hat/drivers/mms/connection.py | hat-open/hat-drivers | 64d175b1f724e9b7e7e7fe4852f8de973a659730 | [
"Apache-2.0"
] | 1 | 2022-02-01T13:42:51.000Z | 2022-02-01T13:42:51.000Z | src_py/hat/drivers/mms/connection.py | hat-open/hat-drivers | 64d175b1f724e9b7e7e7fe4852f8de973a659730 | [
"Apache-2.0"
] | null | null | null | src_py/hat/drivers/mms/connection.py | hat-open/hat-drivers | 64d175b1f724e9b7e7e7fe4852f8de973a659730 | [
"Apache-2.0"
] | 3 | 2021-09-10T12:22:02.000Z | 2022-02-18T12:55:44.000Z | """Manufacturing Message Specification"""
from pathlib import Path
import asyncio
import logging
import typing
from hat import aio
from hat import asn1
from hat.drivers import acse
from hat.drivers.mms import common
from hat.drivers.mms import encoder
mlog = logging.getLogger(__name__)
Address = acse.Address
"""Address"""
IdentifiedEntity = acse.IdentifiedEntity
"""Identified entity"""
ConnectionInfo = acse.ConnectionInfo
"""Connection info"""
RequestCb = aio.AsyncCallable[['Connection', common.Request], common.Response]
"""Request callback"""
ConnectionCb = aio.AsyncCallable[['Connection'], None]
"""Connection callback"""
async def connect(request_cb: RequestCb,
addr: Address,
local_tsel: typing.Optional[int] = None,
remote_tsel: typing.Optional[int] = None,
local_ssel: typing.Optional[int] = None,
remote_ssel: typing.Optional[int] = None,
local_psel: typing.Optional[int] = None,
remote_psel: typing.Optional[int] = None,
local_ap_title: typing.Optional[asn1.ObjectIdentifier] = None, # NOQA
remote_ap_title: typing.Optional[asn1.ObjectIdentifier] = None, # NOQA
local_ae_qualifier: typing.Optional[int] = None,
remote_ae_qualifier: typing.Optional[int] = None,
user_data: typing.Optional[IdentifiedEntity] = None
) -> 'Connection':
"""Connect to ACSE server"""
initiate_req = 'initiate-RequestPDU', {
'proposedMaxServOutstandingCalling': 5,
'proposedMaxServOutstandingCalled': 5,
'initRequestDetail': {
'proposedVersionNumber': 1,
'proposedParameterCBB': _parameter_cbb,
'servicesSupportedCalling': _service_support}}
req_user_data = _encode(initiate_req)
acse_conn = await acse.connect(syntax_name_list=[_mms_syntax_name],
app_context_name=_mms_app_context_name,
addr=addr,
local_tsel=local_tsel,
remote_tsel=remote_tsel,
local_ssel=local_ssel,
remote_ssel=remote_ssel,
local_psel=local_psel,
remote_psel=remote_psel,
local_ap_title=local_ap_title,
remote_ap_title=remote_ap_title,
local_ae_qualifier=local_ae_qualifier,
remote_ae_qualifier=remote_ae_qualifier,
user_data=(_mms_syntax_name, req_user_data))
try:
res_syntax_name, res_user_data = acse_conn.conn_res_user_data
if not asn1.is_oid_eq(res_syntax_name, _mms_syntax_name):
raise Exception("invalid syntax name")
initiate_res = _decode(res_user_data)
if initiate_res[0] != 'initiate-ResponsePDU':
raise Exception("invalid initiate response")
return _create_connection(request_cb, acse_conn)
except Exception:
await aio.uncancellable(acse_conn.async_close())
raise
async def listen(connection_cb: ConnectionCb,
request_cb: RequestCb,
addr: Address = Address('0.0.0.0', 102)
) -> 'Server':
"""Create MMS listening server
Args:
connection_cb: new connection callback
request_cb: received request callback
addr: local listening address
"""
async def on_validate(syntax_names, user_data):
syntax_name, req_user_data = user_data
if not asn1.is_oid_eq(syntax_name, _mms_syntax_name):
raise Exception('invalid mms syntax name')
initiate_req = _decode(req_user_data)
if initiate_req[0] != 'initiate-RequestPDU':
raise Exception('invalid initiate request')
initiate_res = 'initiate-ResponsePDU', {
'negotiatedMaxServOutstandingCalling': 5,
'negotiatedMaxServOutstandingCalled': 5,
'negotiatedDataStructureNestingLevel': 4, # TODO compatibility
'initResponseDetail': {
'negotiatedVersionNumber': 1,
'negotiatedParameterCBB': _parameter_cbb,
'servicesSupportedCalled': _service_support}}
if 'localDetailCalling' in initiate_req[1]:
initiate_res[1]['localDetailCalled'] = \
initiate_req[1]['localDetailCalling']
res_user_data = _encode(initiate_res)
return _mms_syntax_name, res_user_data
async def on_connection(acse_conn):
try:
try:
conn = _create_connection(request_cb, acse_conn)
except Exception:
await aio.uncancellable(acse_conn.async_close())
raise
try:
await aio.call(connection_cb, conn)
except BaseException:
await aio.uncancellable(conn.async_close())
raise
except Exception as e:
mlog.error("error creating new incomming connection: %s",
e, exc_info=e)
async def wait_acse_server_closed():
try:
await acse_server.wait_closed()
finally:
async_group.close()
async_group = aio.Group()
acse_server = await acse.listen(on_validate, on_connection, addr)
async_group.spawn(aio.call_on_cancel, acse_server.async_close)
async_group.spawn(wait_acse_server_closed)
srv = Server()
srv._async_group = async_group
srv._acse_server = acse_server
return srv
class Server(aio.Resource):
"""MMS listening server
For creating new server see :func:`listen`
Closing server doesn't close active incomming connections
"""
@property
def async_group(self) -> aio.Group:
"""Async group"""
return self._async_group
@property
def addresses(self) -> typing.List[Address]:
"""Listening addresses"""
return self._acse_server.addresses
def _create_connection(request_cb, acse_conn):
conn = Connection()
conn._request_cb = request_cb
conn._acse_conn = acse_conn
conn._last_invoke_id = 0
conn._unconfirmed_queue = aio.Queue()
conn._response_futures = {}
conn._async_group = aio.Group()
conn._async_group.spawn(conn._read_loop)
return conn
class Connection(aio.Resource):
"""MMS connection
For creating new connection see :func:`connect`
"""
@property
def async_group(self) -> aio.Group:
"""Async group"""
return self._async_group
@property
def info(self) -> ConnectionInfo:
"""Connection info"""
return self._acse_conn.info
async def receive_unconfirmed(self) -> common.Unconfirmed:
"""Receive unconfirmed message
Raises:
ConnectionError: in case connection is not open
"""
try:
return await self._unconfirmed_queue.get()
except aio.QueueClosedError:
raise ConnectionError('connection is not open')
def send_unconfirmed(self, unconfirmed: common.Unconfirmed):
"""Send unconfirmed message"""
pdu = 'unconfirmed-PDU', {
'service': encoder.encode_unconfirmed(unconfirmed)}
data = _mms_syntax_name, _encode(pdu)
self._acse_conn.write(data)
async def send_confirmed(self,
req: common.Request
) -> common.Response:
"""Send confirmed request and wait for response
Raises:
ConnectionError: in case connection is not open
"""
if self._async_group.is_closing:
raise ConnectionError('connection is not open')
invoke_id = self._last_invoke_id + 1
pdu = 'confirmed-RequestPDU', {
'invokeID': invoke_id,
'service': encoder.encode_request(req)}
data = _mms_syntax_name, _encode(pdu)
self._acse_conn.write(data)
self._last_invoke_id = invoke_id
self._response_futures[invoke_id] = asyncio.Future()
try:
return await self._response_futures[invoke_id]
finally:
del self._response_futures[invoke_id]
async def _read_loop(self):
running = True
try:
while running:
syntax_name, entity = await self._acse_conn.read()
if not asn1.is_oid_eq(syntax_name, _mms_syntax_name):
continue
pdu = _decode(entity)
running = await self._process_pdu(pdu)
except asyncio.CancelledError:
pdu = 'conclude-RequestPDU', None
data = _mms_syntax_name, _encode(pdu)
self._acse_conn.write(data)
# TODO: wait for response
raise
finally:
self._async_group.close()
self._unconfirmed_queue.close()
for response_future in self._response_futures.values():
if not response_future.done():
response_future.set_exception(
ConnectionError('connection is not open'))
await aio.uncancellable(self._acse_conn.async_close())
async def _process_pdu(self, pdu):
name, data = pdu
if name == 'unconfirmed-PDU':
unconfirmed = encoder.decode_unconfirmed(data['service'])
await self._unconfirmed_queue.put(unconfirmed)
return True
elif name == 'confirmed-RequestPDU':
invoke_id = data['invokeID']
req = encoder.decode_request(data['service'])
res = await aio.call(self._request_cb, self, req)
if isinstance(res, common.ErrorResponse):
res_pdu = 'confirmed-ErrorPDU', {
'invokeID': invoke_id,
'serviceError': {
'errorClass': (res.error_class.value, res.value)}}
else:
res_pdu = 'confirmed-ResponsePDU', {
'invokeID': invoke_id,
'service': encoder.encode_response(res)}
res_data = _mms_syntax_name, _encode(res_pdu)
self._acse_conn.write(res_data)
return True
elif name == 'confirmed-ResponsePDU':
invoke_id = data['invokeID']
res = encoder.decode_response(data['service'])
future = self._response_futures.get(invoke_id)
if future and not future.done():
future.set_result(res)
else:
mlog.warn(f"dropping confirmed response "
f"(invoke_id: {invoke_id})")
return True
elif name == 'confirmed-ErrorPDU':
invoke_id = data['invokeID']
error_class_name, value = data['serviceError']['errorClass']
error_class = common.ErrorClass(error_class_name)
res = common.ErrorResponse(error_class, value)
future = self._response_futures.get(invoke_id)
if future and not future.done():
future.set_result(res)
else:
mlog.warn(f"dropping confirmed error "
f"(invoke_id: {invoke_id})")
return True
elif name == 'conclude-RequestPDU':
res_pdu = 'conclude-ResponsePDU', None
res_data = _mms_syntax_name, _encode(res_pdu)
self._acse_conn.write(res_data)
return False
return False
_parameter_cbb = [False] * 10 # 18
_parameter_cbb[0] = True # str1
_parameter_cbb[1] = True # str2
_parameter_cbb[2] = True # vnam
_parameter_cbb[3] = True # valt
_parameter_cbb[4] = True # vadr
_parameter_cbb[6] = True # tpy
_parameter_cbb[7] = True # vlis
_service_support = [False] * 85 # 93
_service_support[0] = True # status
_service_support[1] = True # getNameList
_service_support[2] = True # identify
_service_support[4] = True # read
_service_support[5] = True # write
_service_support[6] = True # getVariableAccessAttributes
_service_support[11] = True # defineNamedVariableList
_service_support[12] = True # getNamedVariableListAttributes
_service_support[13] = True # deleteNamedVariableList
_service_support[79] = True # informationReport
# not supported - compatibility flags
_service_support[18] = True # output
_service_support[83] = True # conclude
_mms_syntax_name = [('iso', 1),
('standard', 0),
('iso9506', 9506),
('part', 2),
('mms-abstract-syntax-version1', 1)]
_mms_app_context_name = [('iso', 1),
('standard', 0),
('iso9506', 9506),
('part', 2),
('mms-annex-version1', 3)]
_encoder = asn1.Encoder(asn1.Encoding.BER,
asn1.Repository.from_json(Path(__file__).parent /
'asn1_repo.json'))
def _encode(value):
return _encoder.encode_value('ISO-9506-MMS-1', 'MMSpdu', value)
def _decode(entity):
return _encoder.decode_value('ISO-9506-MMS-1', 'MMSpdu', entity)
| 35.550532 | 89 | 0.596843 |
aeb01c27b0fc2af3b0297459f82c4c4234b07625 | 8,799 | py | Python | spotify/spotify_oauth.py | jose-log/playlistCreator | ebe44f8277efdbd3b2b521f5ea45bfb25a081f73 | [
"MIT"
] | null | null | null | spotify/spotify_oauth.py | jose-log/playlistCreator | ebe44f8277efdbd3b2b521f5ea45bfb25a081f73 | [
"MIT"
] | null | null | null | spotify/spotify_oauth.py | jose-log/playlistCreator | ebe44f8277efdbd3b2b521f5ea45bfb25a081f73 | [
"MIT"
] | null | null | null | #!/bin/env python3
# This script implements direct OAuth authentication protocol based on the
# Spotify web API documentation. The Authentication process uses the
# Authorization Code Flow, which only requires from the user to authorize
# the client application only once using the web browser
#
# The end result is an access token that can later be included in the API
# methods to access the users' data
# Web communication modules
import requests
import urllib.request
import urllib.parse
# Browser and server modules
import webbrowser
from http.server import HTTPServer
from http.server import BaseHTTPRequestHandler
# Utility modules
import json
import base64
import time
import random
# Secrets modules
from spotify_secrets import client_app_id as client_id
from spotify_secrets import client_app_secret as client_secret
# Authorization endpoints:
auth_url = 'https://accounts.spotify.com/authorize'
token_url = 'https://accounts.spotify.com/api/token'
service_url = 'https://api.spotify.com/v1'
redirect_uri = 'http://localhost:9090'
cache_path = './.cache-token'
scopes = {}
scopes['add_item_to_playlist'] = 'playlist-modify-public playlist-modify-private'
scopes['create_playlist'] = 'playlist-modify-public playlist-modify-private'
scopes['retrieve_playlist'] = 'playlist-read-private'
###########################################################################
# RequestHandler
# Extended class from BaseHTTPRequestHandler used to handle the spotify
# authorization response and get the authorization code.
#
# https://docs.python.org/3/library/http.server.html#http.server.BaseHTTPRequestHandler.handle_one_request
class RequestHandler(BaseHTTPRequestHandler):
# GET method handler:
def do_GET(self):
query_s = urllib.parse.urlparse(self.path).query
form = dict(urllib.parse.parse_qsl(query_s))
self.send_response(200)
self.send_header("Content-Type", "text/html")
self.end_headers()
if "code" in form:
self.server.auth_code = form["code"]
self.server.state = form["state"]
self.server.error = None
status = "successful"
elif "error" in form:
self.server.error = form["error"]
self.server.auth_code = None
status = "failed ({})".format(form["error"])
else:
self._write("<html><body><h1>Invalid request</h1></body></html>")
return
self._write(""" <html>
<body>
<h1>Authentication status: {}</h1>
This window can be closed.
<script>
window.close()
</script>
</body>
</html>""".format(status))
def _write(self, text):
return self.wfile.write(text.encode("utf-8"))
###########################################################################
# Request authorization to access data
# The requests module could be used to send a GET request to the authoriza-
# tion server, nevertheless, user authentication is required, thus, the
# web browser must be used to ask the user for login and authorization
def get_authorization_code():
code = None
redirect_port = 9090
print('> Creating Local Server in port:', str(redirect_port))
server = start_local_http_server(redirect_port)
state = generate_random_string(20)
print(' - State string: ' + state)
url = build_authorize_url(state)
print('> OAuth Authorization URL:', url)
try:
webbrowser.open(url)
print('Opened {} in your browser'.format(auth_url))
except webbrowser.Error:
print('Please navigate here: {}'.format(url))
print('Handling request')
server.handle_request() # wait for authorization endpoint response
if server.auth_code is not None:
code = server.auth_code
if server.state.strip() != state:
print('ERROR: response state don\'t match')
print(server.state)
elif server.error is not None:
print('Received error from OAuth server: {}'.format(server.error))
exit()
else:
print('Server listening on localhost has not been accessed')
exit()
return code
###########################################################################
# Request refresh and access tokens
# This time, no user interaction through the browser is needed, thus, the
# POST request is handled using Requests module
def request_access_token(code):
token = None
payload = {
'redirect_uri': redirect_uri,
'code': code,
'grant_type': 'authorization_code',
}
headers = make_authorization_headers()
response = requests.post(token_url, data=payload, headers=headers)
if response.status_code != 200:
print('ERROR. Token request failed')
exit()
token_info = response.json()
# Compute time value for token expiration date
token_info['expires_at'] = int(time.time()) + token_info['expires_in']
save_token_info(token_info)
token = token_info['access_token']
return token
###########################################################################
def generate_random_string(length):
rand = ''
universe = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789'
for i in range(length):
rand += universe[random.randint(0, len(universe) - 1)]
return rand
###########################################################################
def start_local_http_server(port, handler=RequestHandler):
server = HTTPServer(("127.0.0.1", port), handler)
server.allow_reuse_address = True
server.auth_code = None
server.error = None
return server
###########################################################################
def build_authorize_url(state):
# Gets the URL to use to authorize this app
payload = {
'response_type':'code',
'client_id':client_id,
'state':state,
'scope':scopes['retrieve_playlist'],
'redirect_uri':redirect_uri,
}
urlparams = urllib.parse.urlencode(payload)
return "{}?{}".format(auth_url, urlparams)
###########################################################################
def make_authorization_headers():
# This method encodes the header, nevertheless the API allows to send these
# parameters as part of the POST request body.
auth_header = (client_id + ':' + client_secret).encode('ascii')
auth_header = base64.b64encode(auth_header)
return {'Authorization': 'Basic {}'.format(auth_header.decode('ascii'))}
###########################################################################
def save_token_info(token_info):
try:
f = open(cache_path, 'w')
f.write(json.dumps(token_info))
f.close()
return True
except:
print('Couldn\'t write token to cache at: {}'.format(cache_path))
return False
###########################################################################
def get_cached_token():
token_info = None
print(' - Extracting cached token')
try:
f = open(cache_path)
token_info_string = f.read()
f.close()
token_info = json.loads(token_info_string)
except:
print('ERROR! opening {} failed'.format(cache_path))
return token_info
###########################################################################
def is_token_expired(token_info):
now = int(time.time())
# if expiration time is less than a minute
return token_info['expires_at'] - now < 60
###########################################################################
def refresh_access_token(refresh_token):
payload = {
'refresh_token': refresh_token,
'grant_type': 'refresh_token',
}
headers = make_authorization_headers()
response = requests.post(token_url, data=payload, headers=headers)
if response.status_code != 200:
print('ERROR. Token request failed')
exit()
token_info = response.json()
# Compute time value for token expiration date
token_info['expires_at'] = int(time.time()) + token_info['expires_in']
if 'refresh_token' not in token_info:
token_info["refresh_token"] = refresh_token
save_token_info(token_info)
token = token_info['access_token']
return token
###########################################################################
def request_valid_token():
token = None
token_info = get_cached_token()
if token_info is None:
print('FAILED. No cached token was found')
exit()
if is_token_expired(token_info):
print(' - Cached token expired.')
token = refresh_access_token(token_info['refresh_token'])
else:
print(' - Cached token VALID')
token = token_info['access_token']
return token
###############################################################################
# MAIN PROGRAM
###############################################################################
if __name__ == '__main__':
print('Hi!. This is an OAuth-based Spotify API\n\r')
auth = None
while auth is None:
x = input('Do you want to request new Authorization? (Y/n)')
if x in 'yY ':
auth = True
break
elif x in 'nN':
auth = False
break
elif x in 'qQ':
exit()
else:
print('INVALID!')
if auth is True:
code = get_authorization_code()
token = request_access_token(code)
else:
token = request_valid_token()
print('FINISHED')
| 28.568182 | 106 | 0.64155 |
cb66824b7c3d0909a18db8b0a2a02e1312728c64 | 546 | py | Python | tests/r/test_snow_deaths2.py | hajime9652/observations | 2c8b1ac31025938cb17762e540f2f592e302d5de | [
"Apache-2.0"
] | 199 | 2017-07-24T01:34:27.000Z | 2022-01-29T00:50:55.000Z | tests/r/test_snow_deaths2.py | hajime9652/observations | 2c8b1ac31025938cb17762e540f2f592e302d5de | [
"Apache-2.0"
] | 46 | 2017-09-05T19:27:20.000Z | 2019-01-07T09:47:26.000Z | tests/r/test_snow_deaths2.py | hajime9652/observations | 2c8b1ac31025938cb17762e540f2f592e302d5de | [
"Apache-2.0"
] | 45 | 2017-07-26T00:10:44.000Z | 2022-03-16T20:44:59.000Z | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import shutil
import sys
import tempfile
from observations.r.snow_deaths2 import snow_deaths2
def test_snow_deaths2():
"""Test module snow_deaths2.py by downloading
snow_deaths2.csv and testing shape of
extracted data has 578 rows and 3 columns
"""
test_path = tempfile.mkdtemp()
x_train, metadata = snow_deaths2(test_path)
try:
assert x_train.shape == (578, 3)
except:
shutil.rmtree(test_path)
raise()
| 22.75 | 52 | 0.767399 |
037e811036c31fa80d6c1ea3b153270b16fdf001 | 369 | py | Python | configuration/config.py | hemidactylus/measurement-api | 00dc85988d9fd0fb31ba8a3ecd43bd16dfec754f | [
"Apache-2.0"
] | null | null | null | configuration/config.py | hemidactylus/measurement-api | 00dc85988d9fd0fb31ba8a3ecd43bd16dfec754f | [
"Apache-2.0"
] | null | null | null | configuration/config.py | hemidactylus/measurement-api | 00dc85988d9fd0fb31ba8a3ecd43bd16dfec754f | [
"Apache-2.0"
] | null | null | null | import os
this_dir = os.path.abspath(os.path.dirname(__file__))
base_dir = os.path.join(this_dir, '..')
secure_db_bundle_path = os.path.join(
base_dir,
'secrets',
'secure-connect-measurements.zip',
)
secrets_json_path = os.path.join(
base_dir,
'secrets',
'access.json',
)
defaultRateLimitWindowSeconds = 3600
defaultAnonymousRateAllowed = 50
| 19.421053 | 53 | 0.715447 |
dbbcf6161ab2c6d9a820d39c89aa689305768e49 | 11,874 | py | Python | sdk/python/pulumi_azure_native/logz/v20201001/sub_account.py | polivbr/pulumi-azure-native | 09571f3bf6bdc4f3621aabefd1ba6c0d4ecfb0e7 | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_native/logz/v20201001/sub_account.py | polivbr/pulumi-azure-native | 09571f3bf6bdc4f3621aabefd1ba6c0d4ecfb0e7 | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_native/logz/v20201001/sub_account.py | polivbr/pulumi-azure-native | 09571f3bf6bdc4f3621aabefd1ba6c0d4ecfb0e7 | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
from ._enums import *
from ._inputs import *
__all__ = ['SubAccountArgs', 'SubAccount']
@pulumi.input_type
class SubAccountArgs:
def __init__(__self__, *,
monitor_name: pulumi.Input[str],
resource_group_name: pulumi.Input[str],
identity: Optional[pulumi.Input['IdentityPropertiesArgs']] = None,
location: Optional[pulumi.Input[str]] = None,
properties: Optional[pulumi.Input['MonitorPropertiesArgs']] = None,
sub_account_name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None):
"""
The set of arguments for constructing a SubAccount resource.
:param pulumi.Input[str] monitor_name: Monitor resource name
:param pulumi.Input[str] resource_group_name: The name of the resource group. The name is case insensitive.
:param pulumi.Input['MonitorPropertiesArgs'] properties: Properties specific to the monitor resource.
:param pulumi.Input[str] sub_account_name: Sub Account resource name
"""
pulumi.set(__self__, "monitor_name", monitor_name)
pulumi.set(__self__, "resource_group_name", resource_group_name)
if identity is not None:
pulumi.set(__self__, "identity", identity)
if location is not None:
pulumi.set(__self__, "location", location)
if properties is not None:
pulumi.set(__self__, "properties", properties)
if sub_account_name is not None:
pulumi.set(__self__, "sub_account_name", sub_account_name)
if tags is not None:
pulumi.set(__self__, "tags", tags)
@property
@pulumi.getter(name="monitorName")
def monitor_name(self) -> pulumi.Input[str]:
"""
Monitor resource name
"""
return pulumi.get(self, "monitor_name")
@monitor_name.setter
def monitor_name(self, value: pulumi.Input[str]):
pulumi.set(self, "monitor_name", value)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
"""
The name of the resource group. The name is case insensitive.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter
def identity(self) -> Optional[pulumi.Input['IdentityPropertiesArgs']]:
return pulumi.get(self, "identity")
@identity.setter
def identity(self, value: Optional[pulumi.Input['IdentityPropertiesArgs']]):
pulumi.set(self, "identity", value)
@property
@pulumi.getter
def location(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "location")
@location.setter
def location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "location", value)
@property
@pulumi.getter
def properties(self) -> Optional[pulumi.Input['MonitorPropertiesArgs']]:
"""
Properties specific to the monitor resource.
"""
return pulumi.get(self, "properties")
@properties.setter
def properties(self, value: Optional[pulumi.Input['MonitorPropertiesArgs']]):
pulumi.set(self, "properties", value)
@property
@pulumi.getter(name="subAccountName")
def sub_account_name(self) -> Optional[pulumi.Input[str]]:
"""
Sub Account resource name
"""
return pulumi.get(self, "sub_account_name")
@sub_account_name.setter
def sub_account_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "sub_account_name", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
class SubAccount(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
identity: Optional[pulumi.Input[pulumi.InputType['IdentityPropertiesArgs']]] = None,
location: Optional[pulumi.Input[str]] = None,
monitor_name: Optional[pulumi.Input[str]] = None,
properties: Optional[pulumi.Input[pulumi.InputType['MonitorPropertiesArgs']]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
sub_account_name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
__props__=None):
"""
Create a SubAccount resource with the given unique name, props, and options.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] monitor_name: Monitor resource name
:param pulumi.Input[pulumi.InputType['MonitorPropertiesArgs']] properties: Properties specific to the monitor resource.
:param pulumi.Input[str] resource_group_name: The name of the resource group. The name is case insensitive.
:param pulumi.Input[str] sub_account_name: Sub Account resource name
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: SubAccountArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Create a SubAccount resource with the given unique name, props, and options.
:param str resource_name: The name of the resource.
:param SubAccountArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(SubAccountArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
identity: Optional[pulumi.Input[pulumi.InputType['IdentityPropertiesArgs']]] = None,
location: Optional[pulumi.Input[str]] = None,
monitor_name: Optional[pulumi.Input[str]] = None,
properties: Optional[pulumi.Input[pulumi.InputType['MonitorPropertiesArgs']]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
sub_account_name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = SubAccountArgs.__new__(SubAccountArgs)
__props__.__dict__["identity"] = identity
__props__.__dict__["location"] = location
if monitor_name is None and not opts.urn:
raise TypeError("Missing required property 'monitor_name'")
__props__.__dict__["monitor_name"] = monitor_name
__props__.__dict__["properties"] = properties
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
__props__.__dict__["sub_account_name"] = sub_account_name
__props__.__dict__["tags"] = tags
__props__.__dict__["name"] = None
__props__.__dict__["system_data"] = None
__props__.__dict__["type"] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:logz/v20201001:SubAccount"), pulumi.Alias(type_="azure-native:logz:SubAccount"), pulumi.Alias(type_="azure-nextgen:logz:SubAccount"), pulumi.Alias(type_="azure-native:logz/v20201001preview:SubAccount"), pulumi.Alias(type_="azure-nextgen:logz/v20201001preview:SubAccount")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(SubAccount, __self__).__init__(
'azure-native:logz/v20201001:SubAccount',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'SubAccount':
"""
Get an existing SubAccount resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = SubAccountArgs.__new__(SubAccountArgs)
__props__.__dict__["identity"] = None
__props__.__dict__["location"] = None
__props__.__dict__["name"] = None
__props__.__dict__["properties"] = None
__props__.__dict__["system_data"] = None
__props__.__dict__["tags"] = None
__props__.__dict__["type"] = None
return SubAccount(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def identity(self) -> pulumi.Output[Optional['outputs.IdentityPropertiesResponse']]:
return pulumi.get(self, "identity")
@property
@pulumi.getter
def location(self) -> pulumi.Output[str]:
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Name of the monitor resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def properties(self) -> pulumi.Output['outputs.MonitorPropertiesResponse']:
"""
Properties specific to the monitor resource.
"""
return pulumi.get(self, "properties")
@property
@pulumi.getter(name="systemData")
def system_data(self) -> pulumi.Output['outputs.SystemDataResponse']:
"""
The system metadata relating to this resource
"""
return pulumi.get(self, "system_data")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
The type of the monitor resource.
"""
return pulumi.get(self, "type")
| 42.407143 | 360 | 0.646454 |
0ad14df8e1901f5ba30f327e51c1909186dc7725 | 4,274 | py | Python | app.py | PriateXYF/HabiCat-Backend | 46f80f2630ac54249dfb635e114d7f92f88da62a | [
"MIT"
] | 1 | 2021-12-23T11:49:58.000Z | 2021-12-23T11:49:58.000Z | app.py | PriateXYF/HabiCat-Backend | 46f80f2630ac54249dfb635e114d7f92f88da62a | [
"MIT"
] | null | null | null | app.py | PriateXYF/HabiCat-Backend | 46f80f2630ac54249dfb635e114d7f92f88da62a | [
"MIT"
] | null | null | null | # coding: utf-8
import sys
from datetime import datetime
import leancloud
from flask import Flask, jsonify, request
from flask import render_template
from flask_sockets import Sockets
from leancloud import LeanCloudError
import common
from views.todos import todos_view
from bbdc.api import BBDC
from forest.api import Forest
from lc.api import LC
from reading.api import Reading
from course.api import Course
from github.api import GitHub
from douban.api import Douban
app = Flask(__name__, static_url_path='', static_folder='templates', template_folder='templates')
# 开启跨域,用于debug
# from flask_cors import CORS
# CORS(app, supports_credentials=True)
sockets = Sockets(app)
# routing
app.register_blueprint(todos_view, url_prefix='/todos')
# @app.route('/bbdc')
# def bbdc():
# return render_template('index.html')
@app.route('/')
def index():
return render_template('index.html')
# 获取不背单词接口
@app.route('/api/bbdc', methods=['POST'])
def get_BBDC_data():
bbdc = BBDC()
try:
page = request.get_json()['page']
except Exception:
page = 0
datas = bbdc.get_lc_data(page)
return jsonify(datas)
# 获取Forest接口
@app.route('/api/forest', methods=['POST'])
def get_forest_data():
forest = Forest()
try:
page = request.get_json()['page']
except Exception:
page = 0
datas = forest.get_lc_data(page)
return jsonify(datas)
@app.route('/api/github', methods=['POST'])
def get_github_data():
github = GitHub()
try:
page = request.get_json()['page']
except Exception:
page = 0
datas = github.get_lc_data(page)
return jsonify(datas)
@app.route('/api/douban', methods=['POST'])
def get_douban_data():
douban = Douban()
try:
page = request.get_json()['page']
except Exception:
page = 0
datas = douban.get_lc_data(page)
return jsonify(datas)
@app.route('/api/reading', methods=['POST'])
def get_reading_data():
reading = Reading()
try:
page = request.get_json()['page']
except Exception:
page = 0
datas = reading.get_lc_data(page)
return jsonify(datas)
@app.route('/api/course', methods=['POST'])
def get_course_data():
course = Course()
try:
page = request.get_json()['page']
except Exception:
page = 0
datas = course.get_lc_data(page)
return jsonify(datas)
# 触发一次不背单词更新
@app.route('/api/doBBDC', methods=['POST'])
def do_BBDC():
bbdc = BBDC()
datas, numbers = bbdc.habitica_daily_export()
res = {
"numbers" : numbers,
"msg" : datas
}
return jsonify(res)
# 触发一次 Forest 更新
@app.route('/api/doForest', methods=['POST'])
def do_Forest():
forest = Forest()
datas, numbers = forest.habitica_daily_export()
res = {
"numbers" : numbers,
"msg" : datas
}
return jsonify(res)
# 触发一次 Github 更新
@app.route('/api/doGitHub', methods=['POST'])
def do_GitHub():
github = GitHub()
datas, numbers = github.habitica_daily_export()
res = {
"numbers" : numbers,
"msg" : datas
}
return jsonify(res)
# 触发一次 Douban 更新
@app.route('/api/doDouban', methods=['POST'])
def do_Douban():
douban = Douban()
datas, numbers = douban.habitica_daily_export()
res = {
"numbers" : numbers,
"msg" : datas
}
return jsonify(res)
# 触发一次 Reading 记录
@app.route('/api/doReading', methods=['POST'])
def do_Reading():
reading = Reading()
try:
bookName = request.get_json()['bookName']
bookPage = request.get_json()['bookPage']
except Exception:
return jsonify({
"msg" : "参数错误"
})
datas, numbers = reading.habitica_export({
"bookName" : bookName,
"bookPage" : bookPage,
})
res = {
"numbers" : numbers,
"msg" : datas
}
return jsonify(res)
# 触发一次 Reading 记录
@app.route('/api/doCourse', methods=['POST'])
def do_Course():
course = Course()
try:
courseName = request.get_json()['courseName']
courseChapter = request.get_json()['courseChapter']
courseSection = request.get_json()['courseSection']
except Exception:
return jsonify({
"msg" : "参数错误"
})
datas, numbers = course.habitica_export({
"courseName" : courseName,
"courseChapter" : courseChapter,
"courseSection" : courseSection,
})
res = {
"numbers" : numbers,
"msg" : datas
}
return jsonify(res)
# 获取项目数据
@app.route('/api/projects', methods=['POST'])
def get_projects():
lc = LC()
datas = lc.get_lc_projects()
return jsonify(datas)
# 匹配所有路径
@app.route('/<path>')
def all(path):
return render_template('index.html') | 21.585859 | 97 | 0.695835 |
544946da43c6440307b0332ab3533f0a9078a26e | 1,082 | py | Python | desktop/core/ext-py/docutils-0.14/test/test_parsers/test_rst/test_directives/test_class.py | kokosing/hue | 2307f5379a35aae9be871e836432e6f45138b3d9 | [
"Apache-2.0"
] | 5,079 | 2015-01-01T03:39:46.000Z | 2022-03-31T07:38:22.000Z | desktop/core/ext-py/docutils-0.14/test/test_parsers/test_rst/test_directives/test_class.py | zks888/hue | 93a8c370713e70b216c428caa2f75185ef809deb | [
"Apache-2.0"
] | 1,623 | 2015-01-01T08:06:24.000Z | 2022-03-30T19:48:52.000Z | desktop/core/ext-py/docutils-0.14/test/test_parsers/test_rst/test_directives/test_class.py | zks888/hue | 93a8c370713e70b216c428caa2f75185ef809deb | [
"Apache-2.0"
] | 2,033 | 2015-01-04T07:18:02.000Z | 2022-03-28T19:55:47.000Z | #! /usr/bin/env python
# $Id: test_class.py 5174 2007-05-31 00:01:52Z wiemann $
# Author: Lea Wiemann <LeWiemann@gmail.com>
# Copyright: This module has been placed in the public domain.
"""
Tests for the 'class' directive.
"""
from __init__ import DocutilsTestSupport
def suite():
s = DocutilsTestSupport.ParserTestSuite()
s.generateTests(totest)
return s
totest = {}
totest['class'] = [
["""\
.. class:: class1 class2
""",
"""\
<document source="test data">
<pending>
.. internal attributes:
.transform: docutils.transforms.misc.ClassAttribute
.details:
class: ['class1', 'class2']
directive: 'class'
"""],
["""\
.. class:: class1 class2
The classes are applied to this paragraph.
And this one.
""",
"""\
<document source="test data">
<paragraph classes="class1 class2">
The classes are applied to this paragraph.
<paragraph classes="class1 class2">
And this one.
"""],
]
if __name__ == '__main__':
import unittest
unittest.main(defaultTest='suite')
| 20.415094 | 64 | 0.626617 |
e8cfab09086af8e082df509a02ec0f04b57840ff | 1,564 | py | Python | oruxmap/layers_switzerland.py | hmaerki/orux_swissmap | 986207271a4e421bf4bcf683fbfcd747d67ff6f7 | [
"Apache-2.0"
] | 1 | 2021-11-16T13:28:35.000Z | 2021-11-16T13:28:35.000Z | oruxmap/layers_switzerland.py | hmaerki/orux_swissmap | 986207271a4e421bf4bcf683fbfcd747d67ff6f7 | [
"Apache-2.0"
] | null | null | null | oruxmap/layers_switzerland.py | hmaerki/orux_swissmap | 986207271a4e421bf4bcf683fbfcd747d67ff6f7 | [
"Apache-2.0"
] | 1 | 2021-11-16T13:28:42.000Z | 2021-11-16T13:28:42.000Z | from dataclasses import dataclass
@dataclass
class LayerParams:
scale: int
orux_layer: int
m_per_pixel: float
tiff_filename: str = None
tiff_url: str = None
pixel_per_tile: int = 400
@property
def name(self):
return f"{self.scale:04d}"
@property
def m_per_tile(self) -> float:
return self.pixel_per_tile * self.m_per_pixel
def verify_m_per_pixel(self, m_per_pixel: float):
assert isinstance(m_per_pixel, float)
assert abs((m_per_pixel / self.m_per_pixel) - 1.0) < 0.001
LIST_LAYERS = (
# LayerParams(
# scale=5000,
# orux_layer=8,
# ),
# LayerParams(
# scale=2000,
# orux_layer=8,
# m_per_pixel=32.0,
# ),
LayerParams(
scale=1000,
orux_layer=10,
tiff_url="https://data.geo.admin.ch/ch.swisstopo.pixelkarte-farbe-pk1000.noscale/data.zip",
tiff_filename="SMR1000_KREL.tif",
m_per_pixel=50.0,
),
LayerParams(
scale=500,
orux_layer=11,
tiff_url="https://data.geo.admin.ch/ch.swisstopo.pixelkarte-farbe-pk500.noscale/data.zip",
tiff_filename="SMR500_KREL.tif",
m_per_pixel=25.0,
),
LayerParams(
scale=200,
orux_layer=12,
m_per_pixel=10.0,
),
LayerParams(scale=100, orux_layer=13, m_per_pixel=5.0),
LayerParams(scale=50, orux_layer=14, m_per_pixel=2.5),
LayerParams(scale=25, orux_layer=15, m_per_pixel=1.25),
LayerParams(scale=10, orux_layer=16, m_per_pixel=0.5, pixel_per_tile=500),
)
| 26.066667 | 99 | 0.627877 |
1b443fab66911cccc65914866fb6463f8ec017d4 | 25 | py | Python | clearml_agent/version.py | Honzys/clearml-agent | f58071fc74e2df9bdba8ca91569e0bfd70ad4f46 | [
"Apache-2.0"
] | null | null | null | clearml_agent/version.py | Honzys/clearml-agent | f58071fc74e2df9bdba8ca91569e0bfd70ad4f46 | [
"Apache-2.0"
] | null | null | null | clearml_agent/version.py | Honzys/clearml-agent | f58071fc74e2df9bdba8ca91569e0bfd70ad4f46 | [
"Apache-2.0"
] | null | null | null | __version__ = '1.2.0rc6'
| 12.5 | 24 | 0.68 |
5720a8d461d5e4b3aa0d5f5f8eb6508ce8957bd3 | 911 | py | Python | src/neuralnetwork/lstm_user.py | Dive904/What2Cite | bcf188047790abfbb71e6fc0361028749a391f40 | [
"CC-BY-4.0"
] | null | null | null | src/neuralnetwork/lstm_user.py | Dive904/What2Cite | bcf188047790abfbb71e6fc0361028749a391f40 | [
"CC-BY-4.0"
] | null | null | null | src/neuralnetwork/lstm_user.py | Dive904/What2Cite | bcf188047790abfbb71e6fc0361028749a391f40 | [
"CC-BY-4.0"
] | null | null | null | import pickle
import numpy as np
from keras.preprocessing.sequence import pad_sequences
from tensorflow_core.python.keras.models import load_model
from src.pipelineapplication import utils
from src.neuralnetwork import lstm_utils
lstm_model = "../../output/official/neuralnetwork.h5"
tokenizer_model = "../../output/official/tokenizer.pickle"
text = utils.get_abstracts_to_analyze()
text = list(map(lambda x: lstm_utils.preprocess_text(x["abstract"]), text))
with open(tokenizer_model, 'rb') as handle:
tokenizer = pickle.load(handle)
seq = tokenizer.texts_to_sequences(text)
seq = pad_sequences(seq, padding='post', maxlen=200)
# load model from single file
model = load_model(lstm_model)
# make predictions
yhat = model.predict(seq)
list_couple = []
for y in yhat:
topic = np.argmax(y)
prob = np.round(y[topic], 3)
list_couple.append((topic, prob))
for c in list_couple:
print(c)
| 26.028571 | 75 | 0.755214 |
4e688a07f210cdc9b3c69707f1af2feae3266bc3 | 1,429 | py | Python | saleor/graphql/order/bulk_mutations/draft_orders.py | calinrada/saleor | f4e3ba4cb819e9c07a2e1fa963539d2f15528b40 | [
"BSD-3-Clause"
] | 1 | 2019-06-09T14:56:04.000Z | 2019-06-09T14:56:04.000Z | saleor/graphql/order/bulk_mutations/draft_orders.py | ardeal/saleor | 9af726ba6d3a8cb11da07dc957a3fd2a92d7ef64 | [
"BSD-3-Clause"
] | null | null | null | saleor/graphql/order/bulk_mutations/draft_orders.py | ardeal/saleor | 9af726ba6d3a8cb11da07dc957a3fd2a92d7ef64 | [
"BSD-3-Clause"
] | null | null | null | import graphene
from django.core.exceptions import ValidationError
from ....order import OrderStatus, models
from ...core.mutations import ModelBulkDeleteMutation
class DraftOrderBulkDelete(ModelBulkDeleteMutation):
class Arguments:
ids = graphene.List(
graphene.ID,
required=True,
description='List of draft order IDs to delete.')
class Meta:
description = 'Deletes draft orders.'
model = models.Order
@classmethod
def clean_instance(cls, info, instance):
if instance.status != OrderStatus.DRAFT:
raise ValidationError({'id': 'Cannot delete non-draft orders.'})
@classmethod
def user_is_allowed(cls, user, _ids):
return user.has_perm('order.manage_orders')
class DraftOrderLinesBulkDelete(ModelBulkDeleteMutation):
class Arguments:
ids = graphene.List(
graphene.ID,
required=True,
description='List of order lines IDs to delete.')
class Meta:
description = 'Deletes order lines.'
model = models.OrderLine
@classmethod
def clean_instance(cls, _info, instance):
if instance.order.status != OrderStatus.DRAFT:
raise ValidationError(
{'id': 'Cannot delete line for non-draft orders.'})
@classmethod
def user_is_allowed(cls, user, _ids):
return user.has_perm('order.manage_orders')
| 29.163265 | 76 | 0.656403 |
2d0274dd30710f47dfd4ef3a9c93587c1611f474 | 1,502 | py | Python | randgen/importers.py | VasilisG/Mock-Dataset-Generator | f468fcbbd9a1c4a2cdbf80507aa7d0ebf59c9258 | [
"MIT"
] | null | null | null | randgen/importers.py | VasilisG/Mock-Dataset-Generator | f468fcbbd9a1c4a2cdbf80507aa7d0ebf59c9258 | [
"MIT"
] | null | null | null | randgen/importers.py | VasilisG/Mock-Dataset-Generator | f468fcbbd9a1c4a2cdbf80507aa7d0ebf59c9258 | [
"MIT"
] | null | null | null | import csv
import os
import randgen.utils as utils
class Importer:
def __init__(self, filename, fetchBy, delimiter=','):
self.filename = filename
self.fetchBy = fetchBy
self.delimiter = delimiter
self.data = None
@property
def filename(self):
return self.filename
@property
def delimiter(self):
return self.delimiter
@filename.setter
def filename(self, filename):
self.filename = filename
@delimiter.setter
def delimiter(self, delimiter):
self.delimiter = delimiter
def importData(self):
if not utils.isValidFile(self.filename, '.csv'):
return None
with open(self.filename, 'r') as csv_file:
csv_reader = csv.reader(csv_file, delimiter=self.delimiter)
if self.fetchBy == 0:
data = []
for row in csv_reader:
data.append(row)
data = tuple(zip(*data))
if self.fetchBy == 1:
rowData = []
headers = []
rowIndex = 0
for row in csv_reader:
if rowIndex == 0:
headers = row
else:
rowData.append(row)
rowIndex += 1
rowData = list(zip(*rowData))
data = dict((header, rowElem) for header, rowElem in zip(headers, rowData))
return data | 28.884615 | 91 | 0.511984 |
cfc8098af67e2188fdaaa361032f050c1d0de95c | 4,340 | py | Python | tests/storage/test_rollback_worker.py | dsonck92/synapse | 2560b1b6b2f74b5724253396c0e3665fa1f7968c | [
"Apache-2.0"
] | 9,945 | 2015-01-02T07:41:06.000Z | 2022-03-31T23:22:42.000Z | tests/storage/test_rollback_worker.py | t2bot/synapse | 62ca554ef09330cb88d46fca8296a859d0adc143 | [
"Apache-2.0"
] | 9,320 | 2015-01-08T14:09:03.000Z | 2022-03-31T21:11:24.000Z | tests/storage/test_rollback_worker.py | t2bot/synapse | 62ca554ef09330cb88d46fca8296a859d0adc143 | [
"Apache-2.0"
] | 2,299 | 2015-01-31T22:16:29.000Z | 2022-03-31T06:08:26.000Z | # Copyright 2021 The Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import List
from unittest import mock
from synapse.app.generic_worker import GenericWorkerServer
from synapse.storage.database import LoggingDatabaseConnection
from synapse.storage.prepare_database import PrepareDatabaseException, prepare_database
from synapse.storage.schema import SCHEMA_VERSION
from tests.unittest import HomeserverTestCase
def fake_listdir(filepath: str) -> List[str]:
"""
A fake implementation of os.listdir which we can use to mock out the filesystem.
Args:
filepath: The directory to list files for.
Returns:
A list of files and folders in the directory.
"""
if filepath.endswith("full_schemas"):
return [str(SCHEMA_VERSION)]
return ["99_add_unicorn_to_database.sql"]
class WorkerSchemaTests(HomeserverTestCase):
def make_homeserver(self, reactor, clock):
hs = self.setup_test_homeserver(
federation_http_client=None, homeserver_to_use=GenericWorkerServer
)
return hs
def default_config(self):
conf = super().default_config()
# Mark this as a worker app.
conf["worker_app"] = "yes"
return conf
def test_rolling_back(self):
"""Test that workers can start if the DB is a newer schema version"""
db_pool = self.hs.get_datastore().db_pool
db_conn = LoggingDatabaseConnection(
db_pool._db_pool.connect(),
db_pool.engine,
"tests",
)
cur = db_conn.cursor()
cur.execute("UPDATE schema_version SET version = ?", (SCHEMA_VERSION + 1,))
db_conn.commit()
prepare_database(db_conn, db_pool.engine, self.hs.config)
def test_not_upgraded_old_schema_version(self):
"""Test that workers don't start if the DB has an older schema version"""
db_pool = self.hs.get_datastore().db_pool
db_conn = LoggingDatabaseConnection(
db_pool._db_pool.connect(),
db_pool.engine,
"tests",
)
cur = db_conn.cursor()
cur.execute("UPDATE schema_version SET version = ?", (SCHEMA_VERSION - 1,))
db_conn.commit()
with self.assertRaises(PrepareDatabaseException):
prepare_database(db_conn, db_pool.engine, self.hs.config)
def test_not_upgraded_current_schema_version_with_outstanding_deltas(self):
"""
Test that workers don't start if the DB is on the current schema version,
but there are still outstanding delta migrations to run.
"""
db_pool = self.hs.get_datastore().db_pool
db_conn = LoggingDatabaseConnection(
db_pool._db_pool.connect(),
db_pool.engine,
"tests",
)
# Set the schema version of the database to the current version
cur = db_conn.cursor()
cur.execute("UPDATE schema_version SET version = ?", (SCHEMA_VERSION,))
db_conn.commit()
# Path `os.listdir` here to make synapse think that there is a migration
# file ready to be run.
# Note that we can't patch this function for the whole method, else Synapse
# will try to find the file when building the database initially.
with mock.patch("os.listdir", mock.Mock(side_effect=fake_listdir)):
with self.assertRaises(PrepareDatabaseException):
# Synapse should think that there is an outstanding migration file due to
# patching 'os.listdir' in the function decorator.
#
# We expect Synapse to raise an exception to indicate the master process
# needs to apply this migration file.
prepare_database(db_conn, db_pool.engine, self.hs.config)
| 36.166667 | 89 | 0.672811 |
3dc369feaa55491ad0beafb745406fe4fa1845de | 154 | py | Python | intake_omnisci/__init__.py | Quansight/intake-mapd | 6e7bc4b9cef9955f7907c049c3967e80026781a9 | [
"Apache-2.0"
] | 2 | 2019-09-12T00:32:13.000Z | 2020-08-10T18:09:49.000Z | intake_omnisci/__init__.py | Quansight/intake-omnisci | 6e7bc4b9cef9955f7907c049c3967e80026781a9 | [
"Apache-2.0"
] | 8 | 2019-07-25T23:32:13.000Z | 2020-10-23T17:12:49.000Z | intake_omnisci/__init__.py | Quansight/intake-mapd | 6e7bc4b9cef9955f7907c049c3967e80026781a9 | [
"Apache-2.0"
] | 1 | 2021-04-17T15:22:19.000Z | 2021-04-17T15:22:19.000Z | __version__ = '0.0.2'
import intake
from .catalog import OmniSciCatalog
from .source import OmniSciSource
__all__ = ["OmniSciCatalog", "OmniSciSource"]
| 19.25 | 45 | 0.779221 |
aacc79c2630f664a62fc81b6305486a25ba525f0 | 1,554 | py | Python | setup.py | AbsarF/ChatterBot | 61dfdac4714846d4f1a7d843bec70c77c7106630 | [
"BSD-3-Clause"
] | null | null | null | setup.py | AbsarF/ChatterBot | 61dfdac4714846d4f1a7d843bec70c77c7106630 | [
"BSD-3-Clause"
] | null | null | null | setup.py | AbsarF/ChatterBot | 61dfdac4714846d4f1a7d843bec70c77c7106630 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
try:
from setuptools import setup, find_packages
except ImportError:
from distutils.core import setup, find_packages
req = open("requirements.txt")
requirements = req.readlines()
# Dynamically calculate the version based on chatterbot version
version = __import__('chatterbot').__version__
setup(
name="ChatterBot",
version=version,
url="https://github.com/gunthercox/ChatterBot",
setup_requires=['setuptools-markdown'],
long_description_markdown_filename='readme.md',
description="An open-source chat bot program written in Python.",
author="Gunther Cox",
author_email="gunthercx@gmail.com",
packages=find_packages(),
package_dir={"chatterbot": "chatterbot"},
include_package_data=True,
install_requires=requirements,
license="BSD",
zip_safe=False,
platforms=["any"],
keywords=["ChatterBot", "chatbot", "chat", "bot"],
classifiers=[
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"License :: OSI Approved :: BSD License",
"Environment :: Console",
"Environment :: Web Environment",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: 3.4",
],
test_suite="tests",
tests_require=[]
)
| 31.714286 | 69 | 0.655084 |
926098ed838188bf7c06388be2b450dc661ccf42 | 4,440 | py | Python | python_src/adaptive_formation/tasks.py | tkortz/motion_planning_rt | 08e914642b802f7217a8ad0f6153d41ccdce8c7d | [
"MIT"
] | 111 | 2020-02-18T11:41:23.000Z | 2022-03-27T17:07:17.000Z | python_src/adaptive_formation/tasks.py | tkortz/motion_planning_rt | 08e914642b802f7217a8ad0f6153d41ccdce8c7d | [
"MIT"
] | null | null | null | python_src/adaptive_formation/tasks.py | tkortz/motion_planning_rt | 08e914642b802f7217a8ad0f6153d41ccdce8c7d | [
"MIT"
] | 40 | 2019-12-19T14:14:19.000Z | 2022-02-27T03:29:04.000Z | import contextlib
from matplotlib import animation as anim
import matplotlib.pyplot as plt
import matplotlib.patches as patches
import numpy as np
from math import *
import time
def draw_map(start, goal, obstacles_poses, R_obstacles, f=None, draw_gradients=True, nrows=500, ncols=500):
if draw_gradients and f is not None:
skip = 10
[x_m, y_m] = np.meshgrid(np.linspace(-2.5, 2.5, ncols), np.linspace(-2.5, 2.5, nrows))
[gy, gx] = np.gradient(-f);
Q = plt.quiver(x_m[::skip, ::skip], y_m[::skip, ::skip], gx[::skip, ::skip], gy[::skip, ::skip])
else:
plt.grid()
plt.plot(start[0], start[1], 'ro', color='yellow', markersize=10);
plt.plot(goal[0], goal[1], 'ro', color='green', markersize=10);
plt.xlabel('X')
plt.ylabel('Y')
ax = plt.gca()
for pose in obstacles_poses:
circle = plt.Circle(pose, R_obstacles, color='red')
ax.add_artist(circle)
# Create a Rectangle patch
rect1 = patches.Rectangle((-2.5,-1.15),2.0,0.2,linewidth=1,color='red',fill='True')
rect2 = patches.Rectangle((-1.2, 1.4), 0.2,1.0,linewidth=1,color='red',fill='True')
rect3 = patches.Rectangle(( 0.4, 0.8), 2.0,0.5,linewidth=1,color='red',fill='True')
ax.add_patch(rect1)
ax.add_patch(rect2)
ax.add_patch(rect3)
def draw_robots(current_point1, routes=None, num_robots=None, robots_poses=None, centroid=None, vel1=None):
if vel1 is not None: plt.arrow(current_point1[0], current_point1[1], vel1[0], vel1[1], width=0.01, head_width=0.05, head_length=0.1, fc='k')
plt.plot(routes[0][:,0], routes[0][:,1], 'green', linewidth=2)
for r in range(1, num_robots):
plt.plot(routes[r][:,0], routes[r][:,1], '--', color='blue', linewidth=2)
for pose in robots_poses:
plt.plot(pose[0], pose[1], 'ro', color='blue')
# compute centroid and sort poses by polar angle
pp = robots_poses
pp.sort(key=lambda p: atan2(p[1]-centroid[1],p[0]-centroid[0]))
formation = patches.Polygon(pp, color='blue', fill=False, linewidth=2);
plt.gca().add_patch(formation)
plt.plot(centroid[0], centroid[1], '*', color='blue')
def get_movie_writer(should_write_movie, title, movie_fps, plot_pause_len):
"""
:param should_write_movie: Indicates whether the animation of SLAM should be written to a movie file.
:param title: The title of the movie with which the movie writer will be initialized.
:param movie_fps: The frame rate of the movie to write.
:param plot_pause_len: The pause durations between the frames when showing the plots.
:return: A movie writer that enables writing MP4 movie with the animation from SLAM.
"""
get_ff_mpeg_writer = anim.writers['ffmpeg']
metadata = dict(title=title, artist='matplotlib', comment='Potential Fields Formation Navigation')
movie_fps = min(movie_fps, float(1. / plot_pause_len))
return get_ff_mpeg_writer(fps=movie_fps, metadata=metadata)
@contextlib.contextmanager
def get_dummy_context_mgr():
"""
:return: A dummy context manager for conditionally writing to a movie file.
"""
yield None
# HUMAN VELOCITY CALCULATION
hum_time_array = np.ones(10)
hum_pose_array = np.array([ np.ones(10), np.ones(10), np.ones(10) ])
def hum_vel(human_pose):
for i in range(len(hum_time_array)-1):
hum_time_array[i] = hum_time_array[i+1]
hum_time_array[-1] = time.time()
for i in range(len(hum_pose_array[0])-1):
hum_pose_array[0][i] = hum_pose_array[0][i+1]
hum_pose_array[1][i] = hum_pose_array[1][i+1]
hum_pose_array[2][i] = hum_pose_array[2][i+1]
hum_pose_array[0][-1] = human_pose[0]
hum_pose_array[1][-1] = human_pose[1]
hum_pose_array[2][-1] = human_pose[2]
vel_x = (hum_pose_array[0][-1]-hum_pose_array[0][0])/(hum_time_array[-1]-hum_time_array[0])
vel_y = (hum_pose_array[1][-1]-hum_pose_array[1][0])/(hum_time_array[-1]-hum_time_array[0])
vel_z = (hum_pose_array[2][-1]-hum_pose_array[2][0])/(hum_time_array[-1]-hum_time_array[0])
hum_vel = np.array( [vel_x, vel_y, vel_z] )
return hum_vel
def euler_from_quaternion(q):
"""
Intrinsic Tait-Bryan rotation of xyz-order.
"""
q = q / np.linalg.norm(q)
qx, qy, qz, qw = q
roll = atan2(2.0*(qy*qz + qw*qx), qw*qw - qx*qx - qy*qy + qz*qz)
pitch = asin(-2.0*(qx*qz - qw*qy))
yaw = atan2(2.0*(qx*qy + qw*qz), qw*qw + qx*qx - qy*qy - qz*qz)
return roll, pitch, yaw | 41.495327 | 144 | 0.661486 |
770ec62529ab993c5d901bee506a329c34e11ce3 | 8,601 | py | Python | src/sentry/models/organizationmember.py | hieast/sentry | 1179b6b13e86f552d18da0578d7b3bc5d002c0d9 | [
"BSD-3-Clause"
] | 1 | 2019-05-28T06:18:03.000Z | 2019-05-28T06:18:03.000Z | src/sentry/models/organizationmember.py | c88888/sentry | 53837d9e8dad2443c3881fcd05f1b261bdd922fd | [
"BSD-3-Clause"
] | 6 | 2018-10-19T10:04:23.000Z | 2019-12-09T20:29:12.000Z | src/sentry/models/organizationmember.py | c88888/sentry | 53837d9e8dad2443c3881fcd05f1b261bdd922fd | [
"BSD-3-Clause"
] | 1 | 2020-07-03T00:52:19.000Z | 2020-07-03T00:52:19.000Z | """
sentry.models.organizationmember
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2014 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import, print_function
import six
from bitfield import BitField
from django.conf import settings
from django.core.urlresolvers import reverse
from django.db import models, transaction
from django.utils import timezone
from django.utils.encoding import force_bytes
from hashlib import md5
from structlog import get_logger
from uuid import uuid4
from six.moves.urllib.parse import urlencode
from sentry import roles
from sentry.db.models import (
BaseModel, BoundedAutoField, BoundedPositiveIntegerField, FlexibleForeignKey, Model, sane_repr
)
from sentry.utils.http import absolute_uri
class OrganizationMemberTeam(BaseModel):
__core__ = True
id = BoundedAutoField(primary_key=True)
team = FlexibleForeignKey('sentry.Team')
organizationmember = FlexibleForeignKey('sentry.OrganizationMember')
# an inactive membership simply removes the team from the default list
# but still allows them to re-join without request
is_active = models.BooleanField(default=True)
class Meta:
app_label = 'sentry'
db_table = 'sentry_organizationmember_teams'
unique_together = (('team', 'organizationmember'), )
__repr__ = sane_repr('team_id', 'organizationmember_id')
def get_audit_log_data(self):
return {
'team_slug': self.team.slug,
'member_id': self.organizationmember_id,
'email': self.organizationmember.get_email(),
'is_active': self.is_active,
}
class OrganizationMember(Model):
"""
Identifies relationships between teams and users.
Users listed as team members are considered to have access to all projects
and could be thought of as team owners (though their access level may not)
be set to ownership.
"""
__core__ = True
organization = FlexibleForeignKey('sentry.Organization', related_name="member_set")
user = FlexibleForeignKey(
settings.AUTH_USER_MODEL, null=True, blank=True, related_name="sentry_orgmember_set"
)
email = models.EmailField(null=True, blank=True)
role = models.CharField(
choices=roles.get_choices(),
max_length=32,
default=roles.get_default().id,
)
flags = BitField(
flags=(('sso:linked', 'sso:linked'), ('sso:invalid', 'sso:invalid'), ), default=0
)
token = models.CharField(max_length=64, null=True, blank=True, unique=True)
date_added = models.DateTimeField(default=timezone.now)
has_global_access = models.BooleanField(default=True)
teams = models.ManyToManyField(
'sentry.Team', blank=True, through='sentry.OrganizationMemberTeam'
)
# Deprecated -- no longer used
type = BoundedPositiveIntegerField(default=50, blank=True)
class Meta:
app_label = 'sentry'
db_table = 'sentry_organizationmember'
unique_together = (('organization', 'user'), ('organization', 'email'), )
__repr__ = sane_repr(
'organization_id',
'user_id',
'role',
)
@transaction.atomic
def save(self, *args, **kwargs):
assert self.user_id or self.email, \
'Must set user or email'
super(OrganizationMember, self).save(*args, **kwargs)
@property
def is_pending(self):
return self.user_id is None
@property
def legacy_token(self):
checksum = md5()
checksum.update(six.text_type(self.organization_id).encode('utf-8'))
checksum.update(self.get_email().encode('utf-8'))
checksum.update(force_bytes(settings.SECRET_KEY))
return checksum.hexdigest()
def generate_token(self):
return uuid4().hex + uuid4().hex
def get_invite_link(self):
if not self.is_pending:
return None
return absolute_uri(
reverse(
'sentry-accept-invite',
kwargs={
'member_id': self.id,
'token': self.token or self.legacy_token,
}
)
)
def send_invite_email(self):
from sentry.utils.email import MessageBuilder
context = {
'email': self.email,
'organization': self.organization,
'url': self.get_invite_link(),
}
msg = MessageBuilder(
subject='Join %s in using Sentry' % self.organization.name,
template='sentry/emails/member-invite.txt',
html_template='sentry/emails/member-invite.html',
type='organization.invite',
context=context,
)
try:
msg.send_async([self.get_email()])
except Exception as e:
logger = get_logger(name='sentry.mail')
logger.exception(e)
def send_sso_link_email(self, actor, provider):
from sentry.utils.email import MessageBuilder
link_args = {'organization_slug': self.organization.slug}
context = {
'organization': self.organization,
'actor': actor,
'provider': provider,
'url': absolute_uri(reverse('sentry-auth-organization', kwargs=link_args)),
}
msg = MessageBuilder(
subject='Action Required for %s' % (self.organization.name, ),
template='sentry/emails/auth-link-identity.txt',
html_template='sentry/emails/auth-link-identity.html',
type='organization.auth_link',
context=context,
)
msg.send_async([self.get_email()])
def send_sso_unlink_email(self, actor, provider):
from sentry.utils.email import MessageBuilder
from sentry.models import LostPasswordHash
email = self.get_email()
recover_uri = '{path}?{query}'.format(
path=reverse('sentry-account-recover'),
query=urlencode({'email': email}),
)
# Nothing to send if this member isn't associated to a user
if not self.user_id:
return
context = {
'email': email,
'recover_url': absolute_uri(recover_uri),
'has_password': self.user.password,
'organization': self.organization,
'actor': actor,
'provider': provider,
}
if not self.user.password:
password_hash = LostPasswordHash.for_user(self.user)
context['set_password_url'] = password_hash.get_absolute_url(mode='set_password')
msg = MessageBuilder(
subject='Action Required for %s' % (self.organization.name, ),
template='sentry/emails/auth-sso-disabled.txt',
html_template='sentry/emails/auth-sso-disabled.html',
type='organization.auth_sso_disabled',
context=context,
)
msg.send_async([email])
def get_display_name(self):
if self.user_id:
return self.user.get_display_name()
return self.email
def get_label(self):
if self.user_id:
return self.user.get_label()
return self.email or self.id
def get_email(self):
if self.user_id:
return self.user.email
return self.email
def get_avatar_type(self):
if self.user_id:
return self.user.get_avatar_type()
return 'letter_avatar'
def get_audit_log_data(self):
from sentry.models import Team
teams = list(Team.objects.filter(
id__in=OrganizationMemberTeam.objects.filter(
organizationmember=self,
is_active=True,
).values_list('team', flat=True)
).values('id', 'slug')
)
return {
'email':
self.email,
'user':
self.user_id,
'teams': [t['id'] for t in teams],
'teams_slugs': [t['slug'] for t in teams],
'has_global_access':
self.has_global_access,
'role':
self.role,
}
def get_teams(self):
from sentry.models import Team
if roles.get(self.role).is_global:
return self.organization.team_set.all()
return Team.objects.filter(
id__in=OrganizationMemberTeam.objects.filter(
organizationmember=self,
is_active=True,
).values('team')
)
def get_scopes(self):
return roles.get(self.role).scopes
| 31.390511 | 98 | 0.6183 |
1d32356d77a332608761321075dfa8c9d96d79db | 4,269 | py | Python | tests/python/pants_test/engine/legacy/test_filemap_integration.py | anthonyjpratti/pants | d98e53af6ddd877861231bce8343f8204da0a9d1 | [
"Apache-2.0"
] | null | null | null | tests/python/pants_test/engine/legacy/test_filemap_integration.py | anthonyjpratti/pants | d98e53af6ddd877861231bce8343f8204da0a9d1 | [
"Apache-2.0"
] | 1 | 2018-09-04T17:37:34.000Z | 2018-09-04T19:42:58.000Z | tests/python/pants_test/engine/legacy/test_filemap_integration.py | anthonyjpratti/pants | d98e53af6ddd877861231bce8343f8204da0a9d1 | [
"Apache-2.0"
] | null | null | null | # Copyright 2016 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
import os
from pants.base.file_system_project_tree import FileSystemProjectTree
from pants.testutil.pants_run_integration_test import PantsRunIntegrationTest
class FilemapIntegrationTest(PantsRunIntegrationTest):
PATH_PREFIX = 'testprojects/tests/python/pants/file_sets/'
TEST_EXCLUDE_FILES = {
'a.py', 'aa.py', 'aaa.py', 'ab.py', 'aabb.py', 'test_a.py',
'dir1/a.py', 'dir1/aa.py', 'dir1/aaa.py',
'dir1/ab.py', 'dir1/aabb.py', 'dir1/dirdir1/a.py', 'dir1/dirdir1/aa.py', 'dir1/dirdir1/ab.py'
}
def setUp(self):
super().setUp()
project_tree = FileSystemProjectTree(os.path.abspath(self.PATH_PREFIX), ['BUILD', '.*'])
scan_set = set()
def should_ignore(file):
return file.endswith('.pyc') or file.endswith('__init__.py')
for root, dirs, files in project_tree.walk(''):
scan_set.update({os.path.join(root, f) for f in files if not should_ignore(f)})
self.assertEqual(scan_set, self.TEST_EXCLUDE_FILES)
def _mk_target(self, test_name):
return f'{self.PATH_PREFIX}:{test_name}'
def _extract_exclude_output(self, test_name):
stdout_data = self.do_command(
'filemap', self._mk_target(test_name), success=True
).stdout_data
return {
s.split(' ')[0].replace(self.PATH_PREFIX, '')
for s in stdout_data.split('\n')
if s.startswith(self.PATH_PREFIX) and '__init__.py' not in s
}
def test_python_sources(self):
run = self.do_command('filemap',
'testprojects/src/python/sources',
success=True)
self.assertIn('testprojects/src/python/sources/sources.py', run.stdout_data)
def test_exclude_invalid_string(self):
build_path = os.path.join(self.PATH_PREFIX, 'BUILD.invalid')
build_content = '''python_library(name='exclude_strings_disallowed',
sources=rglobs('*.py', exclude='aa.py'))'''
with self.temporary_file_content(build_path, build_content, binary_mode=False):
pants_run = self.do_command('filemap',
self._mk_target('exclude_strings_disallowed'),
success=False)
self.assertRegex(pants_run.stderr_data, r'Excludes of type `.*` are not supported')
def test_exclude_list_of_strings(self):
test_out = self._extract_exclude_output('exclude_list_of_strings')
self.assertEqual(self.TEST_EXCLUDE_FILES - {'aaa.py', 'dir1/aaa.py'},
test_out)
def test_exclude_globs(self):
test_out = self._extract_exclude_output('exclude_globs')
self.assertEqual(self.TEST_EXCLUDE_FILES - {'aabb.py', 'dir1/dirdir1/aa.py'},
test_out)
def test_exclude_strings(self):
test_out = self._extract_exclude_output('exclude_strings')
self.assertEqual(self.TEST_EXCLUDE_FILES - {'aa.py', 'ab.py'},
test_out)
def test_exclude_set(self):
test_out = self._extract_exclude_output('exclude_set')
self.assertEqual(self.TEST_EXCLUDE_FILES - {'aaa.py', 'a.py'},
test_out)
def test_exclude_rglobs(self):
test_out = self._extract_exclude_output('exclude_rglobs')
self.assertEqual(self.TEST_EXCLUDE_FILES - {'ab.py', 'aabb.py', 'dir1/ab.py', 'dir1/aabb.py', 'dir1/dirdir1/ab.py'},
test_out)
def test_exclude_zglobs(self):
test_out = self._extract_exclude_output('exclude_zglobs')
self.assertEqual(self.TEST_EXCLUDE_FILES - {'ab.py', 'aabb.py', 'dir1/ab.py', 'dir1/aabb.py', 'dir1/dirdir1/ab.py'},
test_out)
def test_exclude_composite(self):
test_out = self._extract_exclude_output('exclude_composite')
self.assertEqual(self.TEST_EXCLUDE_FILES -
{'a.py', 'aaa.py', 'dir1/a.py', 'dir1/dirdir1/a.py'},
test_out)
def test_implicit_sources(self):
test_out = self._extract_exclude_output('implicit_sources')
self.assertEqual({'a.py', 'aa.py', 'aaa.py', 'aabb.py', 'ab.py'},
test_out)
test_out = self._extract_exclude_output('test_with_implicit_sources')
self.assertEqual({'test_a.py'}, test_out)
| 40.657143 | 120 | 0.662919 |
5e304d2b6637f0c9af6aae8af1a802463ba1e4f5 | 3,193 | py | Python | src/m6_grid_row_and_column.py | deckerdj/24-Tkinter | 5543aef3cd9a64e28084c6d3e580989cb6eb269d | [
"MIT"
] | null | null | null | src/m6_grid_row_and_column.py | deckerdj/24-Tkinter | 5543aef3cd9a64e28084c6d3e580989cb6eb269d | [
"MIT"
] | null | null | null | src/m6_grid_row_and_column.py | deckerdj/24-Tkinter | 5543aef3cd9a64e28084c6d3e580989cb6eb269d | [
"MIT"
] | null | null | null | """
This project lets you try out Tkinter/Ttk and practice it!
Authors: David Mutchler, Vibha Alangar, Matt Boutell, Dave Fisher,
Mark Hays, Amanda Stouder, Aaron Wilkin, their colleagues,
and Daniel Decker.
""" # Done: 1. PUT YOUR NAME IN THE ABOVE LINE.
import tkinter
from tkinter import ttk
def main():
""" Constructs a GUI that will be used MUCH later to control EV3. """
# -------------------------------------------------------------------------
# TODO: 2. Follow along with the video to make a remote control GUI
# For every grid() method call you will add a row and a column argument
# -------------------------------------------------------------------------
root = tkinter.Tk()
root.title("MQTT Remote")
main_frame = ttk.Frame(root, padding=20)
main_frame.grid() # only grid call that does NOT need a row and column
left_speed_label = ttk.Label(main_frame, text="Left")
left_speed_label.grid()
left_speed_entry = ttk.Entry(main_frame, width=8)
left_speed_entry.insert(0, "600")
left_speed_entry.grid()
right_speed_label = ttk.Label(main_frame, text="Right")
right_speed_label.grid()
right_speed_entry = ttk.Entry(main_frame, width=8, justify=tkinter.RIGHT)
right_speed_entry.insert(0, "600")
right_speed_entry.grid()
forward_button = ttk.Button(main_frame, text="Forward")
forward_button.grid()
forward_button['command'] = lambda: print("Forward button")
root.bind('<Up>', lambda event: print("Forward key"))
left_button = ttk.Button(main_frame, text="Left")
left_button.grid()
left_button['command'] = lambda: print("Left button")
root.bind('<Left>', lambda event: print("Left key"))
stop_button = ttk.Button(main_frame, text="Stop")
stop_button.grid()
stop_button['command'] = lambda: print("Stop button")
root.bind('<space>', lambda event: print("Stop key"))
right_button = ttk.Button(main_frame, text="Right")
right_button.grid()
right_button['command'] = lambda: print("Right button")
root.bind('<Right>', lambda event: print("Right key"))
back_button = ttk.Button(main_frame, text="Back")
back_button.grid()
back_button['command'] = lambda: print("Back button")
root.bind('<Down>', lambda event: print("Back key"))
up_button = ttk.Button(main_frame, text="Up")
up_button.grid()
up_button['command'] = lambda: print("Up button")
root.bind('<u>', lambda event: print("Up key"))
down_button = ttk.Button(main_frame, text="Down")
down_button.grid()
down_button['command'] = lambda: print("Down button")
root.bind('<j>', lambda event: print("Down key"))
# Buttons for quit and exit
q_button = ttk.Button(main_frame, text="Quit")
q_button.grid()
q_button['command'] = lambda: print("Quit button")
e_button = ttk.Button(main_frame, text="Exit")
e_button.grid()
e_button['command'] = lambda: exit()
root.mainloop()
# -----------------------------------------------------------------------------
# Calls main to start the ball rolling.
# -----------------------------------------------------------------------------
main()
| 35.876404 | 79 | 0.607892 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.