hexsha stringlengths 40 40 | size int64 2 1.02M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 245 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 245 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 245 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 2 1.02M | avg_line_length float64 1 417k | max_line_length int64 1 987k | alphanum_fraction float64 0 1 | content_no_comment stringlengths 0 1.01M | is_comment_constant_removed bool 1
class | is_sharp_comment_removed bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f71b1ab7a5127da844d3de34669b1e4ea7ec03cf | 4,069 | py | Python | tests/components/smartthings/test_lock.py | pcaston/core | e74d946cef7a9d4e232ae9e0ba150d18018cfe33 | [
"Apache-2.0"
] | 1 | 2021-07-08T20:09:55.000Z | 2021-07-08T20:09:55.000Z | tests/components/smartthings/test_lock.py | pcaston/core | e74d946cef7a9d4e232ae9e0ba150d18018cfe33 | [
"Apache-2.0"
] | 47 | 2021-02-21T23:43:07.000Z | 2022-03-31T06:07:10.000Z | tests/components/smartthings/test_lock.py | OpenPeerPower/core | f673dfac9f2d0c48fa30af37b0a99df9dd6640ee | [
"Apache-2.0"
] | null | null | null | """
Test for the SmartThings lock platform.
The only mocking required is of the underlying SmartThings API object so
real HTTP calls are not initiated during testing.
"""
from pysmartthings import Attribute, Capability
from pysmartthings.device import Status
from openpeerpower.components.lock import DOMAIN as LOCK_DOMAIN
from openpeerpower.components.smartthings.const import DOMAIN, SIGNAL_SMARTTHINGS_UPDATE
from openpeerpower.config_entries import ConfigEntryState
from openpeerpower.const import STATE_UNAVAILABLE
from openpeerpower.helpers import device_registry as dr, entity_registry as er
from openpeerpower.helpers.dispatcher import async_dispatcher_send
from .conftest import setup_platform
async def test_entity_and_device_attributes(opp, device_factory):
"""Test the attributes of the entity are correct."""
# Arrange
device = device_factory("Lock_1", [Capability.lock], {Attribute.lock: "unlocked"})
entity_registry = er.async_get(opp)
device_registry = dr.async_get(opp)
# Act
await setup_platform(opp, LOCK_DOMAIN, devices=[device])
# Assert
entry = entity_registry.async_get("lock.lock_1")
assert entry
assert entry.unique_id == device.device_id
entry = device_registry.async_get_device({(DOMAIN, device.device_id)})
assert entry
assert entry.name == device.label
assert entry.model == device.device_type_name
assert entry.manufacturer == "Unavailable"
async def test_lock(opp, device_factory):
"""Test the lock locks successfully."""
# Arrange
device = device_factory("Lock_1", [Capability.lock])
device.status.attributes[Attribute.lock] = Status(
"unlocked",
None,
{
"method": "Manual",
"codeId": None,
"codeName": "Code 1",
"lockName": "Front Door",
"usedCode": "Code 2",
},
)
await setup_platform(opp, LOCK_DOMAIN, devices=[device])
# Act
await opp.services.async_call(
LOCK_DOMAIN, "lock", {"entity_id": "lock.lock_1"}, blocking=True
)
# Assert
state = opp.states.get("lock.lock_1")
assert state is not None
assert state.state == "locked"
assert state.attributes["method"] == "Manual"
assert state.attributes["lock_state"] == "locked"
assert state.attributes["code_name"] == "Code 1"
assert state.attributes["used_code"] == "Code 2"
assert state.attributes["lock_name"] == "Front Door"
assert "code_id" not in state.attributes
async def test_unlock(opp, device_factory):
"""Test the lock unlocks successfully."""
# Arrange
device = device_factory("Lock_1", [Capability.lock], {Attribute.lock: "locked"})
await setup_platform(opp, LOCK_DOMAIN, devices=[device])
# Act
await opp.services.async_call(
LOCK_DOMAIN, "unlock", {"entity_id": "lock.lock_1"}, blocking=True
)
# Assert
state = opp.states.get("lock.lock_1")
assert state is not None
assert state.state == "unlocked"
async def test_update_from_signal(opp, device_factory):
"""Test the lock updates when receiving a signal."""
# Arrange
device = device_factory("Lock_1", [Capability.lock], {Attribute.lock: "unlocked"})
await setup_platform(opp, LOCK_DOMAIN, devices=[device])
await device.lock(True)
# Act
async_dispatcher_send(opp, SIGNAL_SMARTTHINGS_UPDATE, [device.device_id])
# Assert
await opp.async_block_till_done()
state = opp.states.get("lock.lock_1")
assert state is not None
assert state.state == "locked"
async def test_unload_config_entry(opp, device_factory):
"""Test the lock is removed when the config entry is unloaded."""
# Arrange
device = device_factory("Lock_1", [Capability.lock], {Attribute.lock: "locked"})
config_entry = await setup_platform(opp, LOCK_DOMAIN, devices=[device])
config_entry.state = ConfigEntryState.LOADED
# Act
await opp.config_entries.async_forward_entry_unload(config_entry, "lock")
# Assert
assert opp.states.get("lock.lock_1").state == STATE_UNAVAILABLE
| 36.330357 | 88 | 0.706562 | from pysmartthings import Attribute, Capability
from pysmartthings.device import Status
from openpeerpower.components.lock import DOMAIN as LOCK_DOMAIN
from openpeerpower.components.smartthings.const import DOMAIN, SIGNAL_SMARTTHINGS_UPDATE
from openpeerpower.config_entries import ConfigEntryState
from openpeerpower.const import STATE_UNAVAILABLE
from openpeerpower.helpers import device_registry as dr, entity_registry as er
from openpeerpower.helpers.dispatcher import async_dispatcher_send
from .conftest import setup_platform
async def test_entity_and_device_attributes(opp, device_factory):
device = device_factory("Lock_1", [Capability.lock], {Attribute.lock: "unlocked"})
entity_registry = er.async_get(opp)
device_registry = dr.async_get(opp)
await setup_platform(opp, LOCK_DOMAIN, devices=[device])
entry = entity_registry.async_get("lock.lock_1")
assert entry
assert entry.unique_id == device.device_id
entry = device_registry.async_get_device({(DOMAIN, device.device_id)})
assert entry
assert entry.name == device.label
assert entry.model == device.device_type_name
assert entry.manufacturer == "Unavailable"
async def test_lock(opp, device_factory):
device = device_factory("Lock_1", [Capability.lock])
device.status.attributes[Attribute.lock] = Status(
"unlocked",
None,
{
"method": "Manual",
"codeId": None,
"codeName": "Code 1",
"lockName": "Front Door",
"usedCode": "Code 2",
},
)
await setup_platform(opp, LOCK_DOMAIN, devices=[device])
await opp.services.async_call(
LOCK_DOMAIN, "lock", {"entity_id": "lock.lock_1"}, blocking=True
)
state = opp.states.get("lock.lock_1")
assert state is not None
assert state.state == "locked"
assert state.attributes["method"] == "Manual"
assert state.attributes["lock_state"] == "locked"
assert state.attributes["code_name"] == "Code 1"
assert state.attributes["used_code"] == "Code 2"
assert state.attributes["lock_name"] == "Front Door"
assert "code_id" not in state.attributes
async def test_unlock(opp, device_factory):
device = device_factory("Lock_1", [Capability.lock], {Attribute.lock: "locked"})
await setup_platform(opp, LOCK_DOMAIN, devices=[device])
await opp.services.async_call(
LOCK_DOMAIN, "unlock", {"entity_id": "lock.lock_1"}, blocking=True
)
state = opp.states.get("lock.lock_1")
assert state is not None
assert state.state == "unlocked"
async def test_update_from_signal(opp, device_factory):
device = device_factory("Lock_1", [Capability.lock], {Attribute.lock: "unlocked"})
await setup_platform(opp, LOCK_DOMAIN, devices=[device])
await device.lock(True)
async_dispatcher_send(opp, SIGNAL_SMARTTHINGS_UPDATE, [device.device_id])
await opp.async_block_till_done()
state = opp.states.get("lock.lock_1")
assert state is not None
assert state.state == "locked"
async def test_unload_config_entry(opp, device_factory):
device = device_factory("Lock_1", [Capability.lock], {Attribute.lock: "locked"})
config_entry = await setup_platform(opp, LOCK_DOMAIN, devices=[device])
config_entry.state = ConfigEntryState.LOADED
await opp.config_entries.async_forward_entry_unload(config_entry, "lock")
assert opp.states.get("lock.lock_1").state == STATE_UNAVAILABLE
| true | true |
f71b1af96cc4062163b2c26f68ca4f8e5e16759c | 21,877 | py | Python | scripts/linters/general_purpose_linter.py | ParitoshKabra/oppia | a8945a5ff28fcbe4eaca1e22d99ed4d3e82f2dca | [
"Apache-2.0"
] | 2 | 2022-02-24T14:06:42.000Z | 2022-02-24T14:11:05.000Z | scripts/linters/general_purpose_linter.py | ParitoshKabra/oppia | a8945a5ff28fcbe4eaca1e22d99ed4d3e82f2dca | [
"Apache-2.0"
] | null | null | null | scripts/linters/general_purpose_linter.py | ParitoshKabra/oppia | a8945a5ff28fcbe4eaca1e22d99ed4d3e82f2dca | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
#
# Copyright 2020 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Lint checks used by all the linters."""
from __future__ import annotations
import os
import re
from . import js_ts_linter
from . import warranted_angular_security_bypasses
from .. import build
from .. import common
from .. import concurrent_task_utils
EXCLUDED_PATHS = (
'third_party/*', 'build/*', '.git/*', '*.pyc', 'CHANGELOG',
'integrations/*', 'integrations_dev/*', '*.svg', '*.gif', '*.png',
'*.webp', '*.zip', '*.ico', '*.jpg', '*.min.js', 'backend_prod_files/*',
'assets/scripts/*', 'core/domain/proto/*.py', 'core/tests/data/*',
'core/tests/build_sources/*', '*.mp3', '*.mp4', 'node_modules/*',
'typings/*', 'local_compiled_js/*', 'webpack_bundles/*',
'core/tests/services_sources/*', 'core/tests/release_sources/tmp_unzip.zip',
'scripts/linters/test_files/*', 'proto_files/*',
'core/tests/release_sources/tmp_unzip.tar.gz',
'core/templates/combined-tests.spec.ts',
'core/templates/css/oppia-material.css',
'core/templates/google-analytics.initializer.ts',
'extensions/classifiers/proto/*', '*.rtl.css',
'%s/*' % js_ts_linter.COMPILED_TYPESCRIPT_TMP_PATH)
GENERATED_FILE_PATHS = (
'core/templates/expressions/parser.js',)
CONFIG_FILE_PATHS = (
'core/tests/.browserstack.env.example',
'core/tests/protractor.conf.js',
'core/tests/karma.conf.ts',
'core/templates/mathjaxConfig.ts',
'assets/constants.ts',
'assets/rich_text_components_definitions.ts',
'webpack.config.ts',
'webpack.dev.config.ts',
'webpack.prod.config.ts')
BAD_STRINGS_CONSTANTS = {
'"DEV_MODE": false': {
'message': 'Please set the DEV_MODE variable in constants.ts '
'to true before committing.',
'excluded_files': ()
},
'"EMULATOR_MODE": false': {
'message': 'Please set the EMULATOR_MODE variable in constants.ts '
'to true before committing.',
'excluded_files': ()
}
}
BAD_PATTERNS = {
'\t': {
'message': 'Please use spaces instead of tabs.',
'excluded_files': (),
'excluded_dirs': (
'assets/i18n/', 'core/tests/build_sources/assets/')},
'\r': {
'message': 'Please make sure all files only have LF endings (no CRLF).',
'excluded_files': (),
'excluded_dirs': ()},
'<<<<<<<': {
'message': 'Please fully resolve existing merge conflicts.',
'excluded_files': (),
'excluded_dirs': ()},
'>>>>>>>': {
'message': 'Please fully resolve existing merge conflicts.',
'excluded_files': (),
'excluded_dirs': ()},
'glyphicon': {
'message': 'Please use equivalent material-icons '
'instead of glyphicons.',
'excluded_files': (),
'excluded_dirs': ()}
}
BAD_PATTERNS_REGEXP = [
{
'regexp': re.compile(r'TODO[^\(]*[^\)][^:]*[^A-Z]+[^\w]*$'),
'message': 'Please link TODO comments to an issue '
'in the format TODO(#issuenum): XXX. ',
'excluded_files': (),
'excluded_dirs': ()
}
]
MANDATORY_PATTERNS_REGEXP = [
{
'regexp': re.compile(
r'Copyright \d{4} The Oppia Authors\. All Rights Reserved\.'),
'message': 'Please ensure this file should contain a proper '
'copyright notice.',
'included_types': ('.py', '.js', '.sh', '.ts'),
'excluded_files': GENERATED_FILE_PATHS + CONFIG_FILE_PATHS + (
'__init__.py', ),
'excluded_dirs': EXCLUDED_PATHS
},
{
'regexp': re.compile('from __future__ import annotations'),
'message': 'Please ensure this file should contain annotations '
'future import.',
'included_types': ('.py'),
'excluded_files': GENERATED_FILE_PATHS + CONFIG_FILE_PATHS + (
'__init__.py',),
'excluded_dirs': EXCLUDED_PATHS
}
]
MANDATORY_PATTERNS_JS_REGEXP = [
{
'regexp': re.compile(r'^\s\*\s@fileoverview\s[a-zA-Z0-9_]+'),
'message': 'Please ensure this file should contain a file '
'overview i.e. a short description of the file.',
'included_types': ('.js', '.ts'),
'excluded_files': GENERATED_FILE_PATHS + CONFIG_FILE_PATHS,
'excluded_dirs': EXCLUDED_PATHS
}
]
BAD_LINE_PATTERNS_HTML_REGEXP = [
{
'regexp': re.compile(r'text\/ng-template'),
'message': 'The directives must be directly referenced.',
'excluded_files': (),
'excluded_dirs': (
'extensions/answer_summarizers/',
'extensions/classifiers/',
'extensions/objects/',
'extensions/value_generators/')
},
{
'regexp': re.compile(r'[ \t]+$'),
'message': 'There should not be any trailing whitespaces.',
'excluded_files': (),
'excluded_dirs': ()
},
{
'regexp': re.compile(r'\$parent'),
'message': 'Please do not access parent properties ' +
'using $parent. Use the scope object ' +
'for this purpose.',
'excluded_files': (),
'excluded_dirs': ()
},
{
'regexp': re.compile(r'\s+style\s*=\s*'),
'message': 'Please do not use inline styling.',
'excluded_files': (),
'excluded_dirs': ()
}
]
BAD_PATTERNS_PYTHON_REGEXP = [
{
'regexp': re.compile(r'__author__'),
'message': 'Please remove author tags from this file.',
'excluded_files': (),
'excluded_dirs': ()
},
{
'regexp': re.compile(r'ndb\.'),
'message': (
'Please use datastore_services instead of ndb, for example:\n'
'\n'
'datastore_services = models.Registry.import_datastore_services()\n'
'\n'
'class SampleModel(datastore_services.Model):\n'
' ...\n'),
'excluded_files': (),
'excluded_dirs': ('core/platform',),
},
{
'regexp': re.compile(r'\Wprint\('),
'message': 'Please do not use print statement.',
'excluded_files': (
'core/tests/test_utils.py',
'core/tests/performance_framework/perf_domain.py',
'core/tests/test_utils_test.py'),
'excluded_dirs': ('scripts/',)
},
{
'regexp': re.compile(r'# pylint:\s*disable=[A-Z][0-9]{4}'),
'message': 'Please remove pylint exclusion if it is unnecessary, or '
'make it human readable with a sentence instead of an id. '
'The id-to-message list can be seen '
'here->http://pylint-messages.wikidot.com/all-codes',
'excluded_files': (),
'excluded_dirs': ()
},
]
BAD_PATTERNS_MAP = {
'.html': BAD_LINE_PATTERNS_HTML_REGEXP,
'.py': BAD_PATTERNS_PYTHON_REGEXP
}
def is_filepath_excluded_for_bad_patterns_check(pattern, filepath):
"""Checks if file is excluded from the bad patterns check.
Args:
pattern: str. The pattern to be checked against.
filepath: str. Path of the file.
Returns:
bool. Whether to exclude the given file from this
particular pattern check.
"""
return (any(
filepath.startswith(bad_pattern)
for bad_pattern in BAD_PATTERNS[pattern]['excluded_dirs'])
or filepath in BAD_PATTERNS[pattern]['excluded_files'])
def check_bad_pattern_in_file(filepath, file_content, pattern):
"""Detects whether the given pattern is present in the file.
Args:
filepath: str. Path of the file.
file_content: str. Contents of the file.
pattern: dict. (regexp(regex pattern) : Object containing details for
the pattern to be checked. Pattern to match:
message: str. Message to show if pattern matches.
excluded_files: tuple(str). Files to be excluded from matching.
excluded_dirs: tuple(str). Directories to be excluded from
matching).
Returns:
tuple(bool, list(str)). A 2-tuple whose first element is a bool
which set to True if there is bad pattern found else False, whose second
element is a list of failed messages.
"""
error_messages = []
failed = False
regexp = pattern['regexp']
if not (any(
filepath.startswith(excluded_dir)
for excluded_dir in pattern['excluded_dirs'])
or any(
filepath.endswith(excluded_file)
for excluded_file in pattern['excluded_files'])):
bad_pattern_count = 0
for line_num, line in enumerate(file_content, 1):
if line.endswith('\n'):
stripped_line = line[:-1]
else:
stripped_line = line
if stripped_line.endswith('disable-bad-pattern-check'):
continue
if regexp.search(stripped_line):
error_message = ('%s --> Line %s: %s' % (
filepath, line_num, pattern['message']))
error_messages.append(error_message)
bad_pattern_count += 1
if bad_pattern_count:
failed = True
return failed, error_messages
return failed, error_messages
def check_file_type_specific_bad_pattern(filepath, content):
"""Check the file content based on the file's extension.
Args:
filepath: str. Path of the file.
content: str. Contents of the file.
Returns:
bool. True if there is bad pattern else false.
total_error_count: int. The number of errors.
"""
error_messages = []
failed = False
_, extension = os.path.splitext(filepath)
pattern = BAD_PATTERNS_MAP.get(extension)
total_error_count = 0
if pattern:
for regexp in pattern:
failed, error_message = check_bad_pattern_in_file(
filepath, content, regexp)
error_messages.extend(error_message)
if failed:
total_error_count += 1
if total_error_count:
failed = True
return failed, total_error_count, error_messages
class GeneralPurposeLinter:
"""Manages all the common linting functions. As an abstract base class, this
is not intended to be used directly.
"""
def __init__(self, files_to_lint, file_cache):
"""Constructs a GeneralPurposeLinter object.
Args:
files_to_lint: list(str). A list of filepaths to lint.
file_cache: object(FileCache). Provides thread-safe access to cached
file content.
"""
# Set path for node.
# The path for node is set explicitly, since otherwise the lint
# tests fail on CircleCI due to the TypeScript files not being
# compilable.
os.environ['PATH'] = '%s/bin:' % common.NODE_PATH + os.environ['PATH']
self.files_to_lint = files_to_lint
self.file_cache = file_cache
@property
def all_filepaths(self):
"""Returns all file paths."""
return self.files_to_lint
def _check_for_mandatory_pattern_in_file(
self, pattern_list, filepath, failed):
"""Checks for a given mandatory pattern in a file.
Args:
pattern_list: list(dict). The list of the mandatory patterns list to
be checked for in the file.
filepath: str. The path to the file to be linted.
failed: bool. Status of failure of the check.
Returns:
bool. The failure status of the check.
"""
# This boolean list keeps track of the regex matches
# found in the file.
pattern_found_list = []
error_messages = []
try:
file_content = self.file_cache.readlines(filepath)
except Exception as e:
raise Exception('%s %s' % (filepath, e)) from e
for index, regexp_to_check in enumerate(
pattern_list):
if (any(filepath.endswith(
allowed_type) for allowed_type in (
regexp_to_check['included_types'])) and (
not any(
filepath.endswith(
pattern) for pattern in (
regexp_to_check['excluded_files'] +
regexp_to_check['excluded_dirs'])))):
pattern_found_list.append(index)
for line in file_content:
if regexp_to_check['regexp'].search(line):
pattern_found_list.pop()
break
if pattern_found_list:
failed = True
for pattern_found in pattern_found_list:
error_message = ('%s --> %s' % (
filepath,
pattern_list[pattern_found]['message']))
error_messages.append(error_message)
return failed, error_messages
def check_mandatory_patterns(self):
"""This function checks that all files contain the mandatory
patterns.
"""
name = 'Mandatory pattern'
error_messages = []
failed = False
sets_of_patterns_to_match = [
MANDATORY_PATTERNS_REGEXP, MANDATORY_PATTERNS_JS_REGEXP]
for filepath in self.all_filepaths:
for pattern_list in sets_of_patterns_to_match:
failed, mandatory_error_messages = (
self._check_for_mandatory_pattern_in_file(
pattern_list, filepath, failed))
error_messages.extend(mandatory_error_messages)
return concurrent_task_utils.TaskResult(
name, failed, error_messages, error_messages)
def check_bad_patterns(self):
"""This function is used for detecting bad patterns."""
name = 'Bad pattern'
total_files_checked = 0
total_error_count = 0
error_messages = []
all_filepaths = [
filepath for filepath in self.all_filepaths if not (
filepath.endswith('general_purpose_linter.py') or (
filepath.endswith('general_purpose_linter_test.py')))]
failed = False
for filepath in all_filepaths:
file_content = self.file_cache.readlines(filepath)
total_files_checked += 1
for pattern, error in BAD_PATTERNS.items():
if is_filepath_excluded_for_bad_patterns_check(
pattern, filepath):
continue
for line_num, line in enumerate(file_content):
if pattern in line:
failed = True
error_message = ('%s --> Line %s: %s' % (
filepath, line_num + 1,
error['message']))
error_messages.append(error_message)
total_error_count += 1
for regexp in BAD_PATTERNS_REGEXP:
bad_pattern_check_failed, bad_pattern_error_messages = (
check_bad_pattern_in_file(
filepath, file_content, regexp))
if bad_pattern_check_failed:
error_messages.extend(bad_pattern_error_messages)
total_error_count += 1
(
file_type_specific_bad_pattern_failed,
temp_count, bad_pattern_error_messages) = (
check_file_type_specific_bad_pattern(
filepath, file_content))
failed = (
failed or file_type_specific_bad_pattern_failed or
bad_pattern_check_failed)
total_error_count += temp_count
error_messages.extend(bad_pattern_error_messages)
if filepath.endswith('constants.ts'):
for pattern, constants in BAD_STRINGS_CONSTANTS.items():
for line in file_content:
if pattern in line:
failed = True
error_message = ('%s --> %s' % (
filepath,
constants['message']))
error_messages.append(error_message)
total_error_count += 1
return concurrent_task_utils.TaskResult(
name, failed, error_messages, error_messages)
def check_newline_at_eof(self):
"""This function is used to detect newline at the end of file."""
name = 'Newline at EOF'
error_messages = []
files_to_lint = self.all_filepaths
failed = False
for filepath in files_to_lint:
file_content = self.file_cache.readlines(filepath)
file_length = len(file_content)
if (
file_length >= 1 and
not re.search(r'[^\n]\n', file_content[-1])):
error_message = (
'%s --> There should be a single newline at the '
'end of file.' % filepath)
error_messages.append(error_message)
failed = True
return concurrent_task_utils.TaskResult(
name, failed, error_messages, error_messages)
def check_disallowed_flags(self):
"""This function is used to disallow flags."""
name = 'Disallow flags'
disallow_flag = (
'eslint-disable-next-line oppia/no-bypass-security-phrase')
error_messages = []
files_to_lint = self.all_filepaths
failed = False
excluded_files = (
warranted_angular_security_bypasses
.EXCLUDED_BYPASS_SECURITY_TRUST_FILES)
allowed_files = ''
for filepath in files_to_lint:
for excluded_file in excluded_files:
if excluded_file in filepath:
allowed_files = filepath
if not filepath.endswith('.ts') or filepath == allowed_files:
continue
file_content = self.file_cache.read(filepath)
if disallow_flag in file_content:
error_message = (
'%s --> Please do not use "no-bypass-security-phrase" flag.'
' It is only expected to be used in files listed in'
' warranted_angular_security_bypasses.py' % filepath)
error_messages.append(error_message)
failed = True
return concurrent_task_utils.TaskResult(
name, failed, error_messages, error_messages)
def check_extra_js_files(self):
"""Checks if the changes made include extra js files in core
or extensions folder which are not specified in
build.JS_FILEPATHS_NOT_TO_BUILD.
Returns:
TaskResult. A TaskResult object representing the result of the lint
check.
"""
name = 'Extra JS files'
error_messages = []
files_to_lint = self.all_filepaths
failed = False
for filepath in files_to_lint:
if filepath.endswith(
('.js')) and filepath.startswith(
('core/templates', 'extensions')) and (
filepath not in build.JS_FILEPATHS_NOT_TO_BUILD
) and not filepath.endswith('protractor.js'):
error_message = (
'%s --> Found extra .js file' % filepath)
error_messages.append(error_message)
failed = True
if failed:
err_msg = (
'If you want the above files to be present as js files, '
'add them to the list JS_FILEPATHS_NOT_TO_BUILD in '
'build.py. Otherwise, rename them to .ts')
error_messages.append(err_msg)
return concurrent_task_utils.TaskResult(
name, failed, error_messages, error_messages)
def perform_all_lint_checks(self):
"""Perform all the lint checks and returns the messages returned by all
the checks.
Returns:
list(TaskResult). A list of TaskResult objects representing the
results of the lint checks.
"""
if not self.all_filepaths:
return [
concurrent_task_utils.TaskResult(
'General purpose lint', False, [],
['There are no files to be checked.'])]
task_results = [
self.check_mandatory_patterns(), self.check_bad_patterns(),
self.check_newline_at_eof(), self.check_extra_js_files(),
self.check_disallowed_flags()]
return task_results
def get_linters(files_to_lint, file_cache):
"""Creates GeneralPurposeLinter object and returns it.
Args:
files_to_lint: list(str). A list of filepaths to lint.
file_cache: object(FileCache). Provides thread-safe access to cached
file content.
Returns:
tuple(GeneralPurposeLinter, None). A 2-tuple of custom and third_party
linter objects.
"""
custom_linter = GeneralPurposeLinter(files_to_lint, file_cache)
return custom_linter, None
| 37.589347 | 80 | 0.581341 |
from __future__ import annotations
import os
import re
from . import js_ts_linter
from . import warranted_angular_security_bypasses
from .. import build
from .. import common
from .. import concurrent_task_utils
EXCLUDED_PATHS = (
'third_party/*', 'build/*', '.git/*', '*.pyc', 'CHANGELOG',
'integrations/*', 'integrations_dev/*', '*.svg', '*.gif', '*.png',
'*.webp', '*.zip', '*.ico', '*.jpg', '*.min.js', 'backend_prod_files/*',
'assets/scripts/*', 'core/domain/proto/*.py', 'core/tests/data/*',
'core/tests/build_sources/*', '*.mp3', '*.mp4', 'node_modules/*',
'typings/*', 'local_compiled_js/*', 'webpack_bundles/*',
'core/tests/services_sources/*', 'core/tests/release_sources/tmp_unzip.zip',
'scripts/linters/test_files/*', 'proto_files/*',
'core/tests/release_sources/tmp_unzip.tar.gz',
'core/templates/combined-tests.spec.ts',
'core/templates/css/oppia-material.css',
'core/templates/google-analytics.initializer.ts',
'extensions/classifiers/proto/*', '*.rtl.css',
'%s/*' % js_ts_linter.COMPILED_TYPESCRIPT_TMP_PATH)
GENERATED_FILE_PATHS = (
'core/templates/expressions/parser.js',)
CONFIG_FILE_PATHS = (
'core/tests/.browserstack.env.example',
'core/tests/protractor.conf.js',
'core/tests/karma.conf.ts',
'core/templates/mathjaxConfig.ts',
'assets/constants.ts',
'assets/rich_text_components_definitions.ts',
'webpack.config.ts',
'webpack.dev.config.ts',
'webpack.prod.config.ts')
BAD_STRINGS_CONSTANTS = {
'"DEV_MODE": false': {
'message': 'Please set the DEV_MODE variable in constants.ts '
'to true before committing.',
'excluded_files': ()
},
'"EMULATOR_MODE": false': {
'message': 'Please set the EMULATOR_MODE variable in constants.ts '
'to true before committing.',
'excluded_files': ()
}
}
BAD_PATTERNS = {
'\t': {
'message': 'Please use spaces instead of tabs.',
'excluded_files': (),
'excluded_dirs': (
'assets/i18n/', 'core/tests/build_sources/assets/')},
'\r': {
'message': 'Please make sure all files only have LF endings (no CRLF).',
'excluded_files': (),
'excluded_dirs': ()},
'<<<<<<<': {
'message': 'Please fully resolve existing merge conflicts.',
'excluded_files': (),
'excluded_dirs': ()},
'>>>>>>>': {
'message': 'Please fully resolve existing merge conflicts.',
'excluded_files': (),
'excluded_dirs': ()},
'glyphicon': {
'message': 'Please use equivalent material-icons '
'instead of glyphicons.',
'excluded_files': (),
'excluded_dirs': ()}
}
BAD_PATTERNS_REGEXP = [
{
'regexp': re.compile(r'TODO[^\(]*[^\)][^:]*[^A-Z]+[^\w]*$'),
'message': 'Please link TODO comments to an issue '
'in the format TODO(#issuenum): XXX. ',
'excluded_files': (),
'excluded_dirs': ()
}
]
MANDATORY_PATTERNS_REGEXP = [
{
'regexp': re.compile(
r'Copyright \d{4} The Oppia Authors\. All Rights Reserved\.'),
'message': 'Please ensure this file should contain a proper '
'copyright notice.',
'included_types': ('.py', '.js', '.sh', '.ts'),
'excluded_files': GENERATED_FILE_PATHS + CONFIG_FILE_PATHS + (
'__init__.py', ),
'excluded_dirs': EXCLUDED_PATHS
},
{
'regexp': re.compile('from __future__ import annotations'),
'message': 'Please ensure this file should contain annotations '
'future import.',
'included_types': ('.py'),
'excluded_files': GENERATED_FILE_PATHS + CONFIG_FILE_PATHS + (
'__init__.py',),
'excluded_dirs': EXCLUDED_PATHS
}
]
MANDATORY_PATTERNS_JS_REGEXP = [
{
'regexp': re.compile(r'^\s\*\s@fileoverview\s[a-zA-Z0-9_]+'),
'message': 'Please ensure this file should contain a file '
'overview i.e. a short description of the file.',
'included_types': ('.js', '.ts'),
'excluded_files': GENERATED_FILE_PATHS + CONFIG_FILE_PATHS,
'excluded_dirs': EXCLUDED_PATHS
}
]
BAD_LINE_PATTERNS_HTML_REGEXP = [
{
'regexp': re.compile(r'text\/ng-template'),
'message': 'The directives must be directly referenced.',
'excluded_files': (),
'excluded_dirs': (
'extensions/answer_summarizers/',
'extensions/classifiers/',
'extensions/objects/',
'extensions/value_generators/')
},
{
'regexp': re.compile(r'[ \t]+$'),
'message': 'There should not be any trailing whitespaces.',
'excluded_files': (),
'excluded_dirs': ()
},
{
'regexp': re.compile(r'\$parent'),
'message': 'Please do not access parent properties ' +
'using $parent. Use the scope object ' +
'for this purpose.',
'excluded_files': (),
'excluded_dirs': ()
},
{
'regexp': re.compile(r'\s+style\s*=\s*'),
'message': 'Please do not use inline styling.',
'excluded_files': (),
'excluded_dirs': ()
}
]
BAD_PATTERNS_PYTHON_REGEXP = [
{
'regexp': re.compile(r'__author__'),
'message': 'Please remove author tags from this file.',
'excluded_files': (),
'excluded_dirs': ()
},
{
'regexp': re.compile(r'ndb\.'),
'message': (
'Please use datastore_services instead of ndb, for example:\n'
'\n'
'datastore_services = models.Registry.import_datastore_services()\n'
'\n'
'class SampleModel(datastore_services.Model):\n'
' ...\n'),
'excluded_files': (),
'excluded_dirs': ('core/platform',),
},
{
'regexp': re.compile(r'\Wprint\('),
'message': 'Please do not use print statement.',
'excluded_files': (
'core/tests/test_utils.py',
'core/tests/performance_framework/perf_domain.py',
'core/tests/test_utils_test.py'),
'excluded_dirs': ('scripts/',)
},
{
'regexp': re.compile(r'# pylint:\s*disable=[A-Z][0-9]{4}'),
'message': 'Please remove pylint exclusion if it is unnecessary, or '
'make it human readable with a sentence instead of an id. '
'The id-to-message list can be seen '
'here->http://pylint-messages.wikidot.com/all-codes',
'excluded_files': (),
'excluded_dirs': ()
},
]
BAD_PATTERNS_MAP = {
'.html': BAD_LINE_PATTERNS_HTML_REGEXP,
'.py': BAD_PATTERNS_PYTHON_REGEXP
}
def is_filepath_excluded_for_bad_patterns_check(pattern, filepath):
return (any(
filepath.startswith(bad_pattern)
for bad_pattern in BAD_PATTERNS[pattern]['excluded_dirs'])
or filepath in BAD_PATTERNS[pattern]['excluded_files'])
def check_bad_pattern_in_file(filepath, file_content, pattern):
error_messages = []
failed = False
regexp = pattern['regexp']
if not (any(
filepath.startswith(excluded_dir)
for excluded_dir in pattern['excluded_dirs'])
or any(
filepath.endswith(excluded_file)
for excluded_file in pattern['excluded_files'])):
bad_pattern_count = 0
for line_num, line in enumerate(file_content, 1):
if line.endswith('\n'):
stripped_line = line[:-1]
else:
stripped_line = line
if stripped_line.endswith('disable-bad-pattern-check'):
continue
if regexp.search(stripped_line):
error_message = ('%s --> Line %s: %s' % (
filepath, line_num, pattern['message']))
error_messages.append(error_message)
bad_pattern_count += 1
if bad_pattern_count:
failed = True
return failed, error_messages
return failed, error_messages
def check_file_type_specific_bad_pattern(filepath, content):
error_messages = []
failed = False
_, extension = os.path.splitext(filepath)
pattern = BAD_PATTERNS_MAP.get(extension)
total_error_count = 0
if pattern:
for regexp in pattern:
failed, error_message = check_bad_pattern_in_file(
filepath, content, regexp)
error_messages.extend(error_message)
if failed:
total_error_count += 1
if total_error_count:
failed = True
return failed, total_error_count, error_messages
class GeneralPurposeLinter:
def __init__(self, files_to_lint, file_cache):
os.environ['PATH'] = '%s/bin:' % common.NODE_PATH + os.environ['PATH']
self.files_to_lint = files_to_lint
self.file_cache = file_cache
@property
def all_filepaths(self):
return self.files_to_lint
def _check_for_mandatory_pattern_in_file(
self, pattern_list, filepath, failed):
pattern_found_list = []
error_messages = []
try:
file_content = self.file_cache.readlines(filepath)
except Exception as e:
raise Exception('%s %s' % (filepath, e)) from e
for index, regexp_to_check in enumerate(
pattern_list):
if (any(filepath.endswith(
allowed_type) for allowed_type in (
regexp_to_check['included_types'])) and (
not any(
filepath.endswith(
pattern) for pattern in (
regexp_to_check['excluded_files'] +
regexp_to_check['excluded_dirs'])))):
pattern_found_list.append(index)
for line in file_content:
if regexp_to_check['regexp'].search(line):
pattern_found_list.pop()
break
if pattern_found_list:
failed = True
for pattern_found in pattern_found_list:
error_message = ('%s --> %s' % (
filepath,
pattern_list[pattern_found]['message']))
error_messages.append(error_message)
return failed, error_messages
def check_mandatory_patterns(self):
name = 'Mandatory pattern'
error_messages = []
failed = False
sets_of_patterns_to_match = [
MANDATORY_PATTERNS_REGEXP, MANDATORY_PATTERNS_JS_REGEXP]
for filepath in self.all_filepaths:
for pattern_list in sets_of_patterns_to_match:
failed, mandatory_error_messages = (
self._check_for_mandatory_pattern_in_file(
pattern_list, filepath, failed))
error_messages.extend(mandatory_error_messages)
return concurrent_task_utils.TaskResult(
name, failed, error_messages, error_messages)
def check_bad_patterns(self):
name = 'Bad pattern'
total_files_checked = 0
total_error_count = 0
error_messages = []
all_filepaths = [
filepath for filepath in self.all_filepaths if not (
filepath.endswith('general_purpose_linter.py') or (
filepath.endswith('general_purpose_linter_test.py')))]
failed = False
for filepath in all_filepaths:
file_content = self.file_cache.readlines(filepath)
total_files_checked += 1
for pattern, error in BAD_PATTERNS.items():
if is_filepath_excluded_for_bad_patterns_check(
pattern, filepath):
continue
for line_num, line in enumerate(file_content):
if pattern in line:
failed = True
error_message = ('%s --> Line %s: %s' % (
filepath, line_num + 1,
error['message']))
error_messages.append(error_message)
total_error_count += 1
for regexp in BAD_PATTERNS_REGEXP:
bad_pattern_check_failed, bad_pattern_error_messages = (
check_bad_pattern_in_file(
filepath, file_content, regexp))
if bad_pattern_check_failed:
error_messages.extend(bad_pattern_error_messages)
total_error_count += 1
(
file_type_specific_bad_pattern_failed,
temp_count, bad_pattern_error_messages) = (
check_file_type_specific_bad_pattern(
filepath, file_content))
failed = (
failed or file_type_specific_bad_pattern_failed or
bad_pattern_check_failed)
total_error_count += temp_count
error_messages.extend(bad_pattern_error_messages)
if filepath.endswith('constants.ts'):
for pattern, constants in BAD_STRINGS_CONSTANTS.items():
for line in file_content:
if pattern in line:
failed = True
error_message = ('%s --> %s' % (
filepath,
constants['message']))
error_messages.append(error_message)
total_error_count += 1
return concurrent_task_utils.TaskResult(
name, failed, error_messages, error_messages)
def check_newline_at_eof(self):
name = 'Newline at EOF'
error_messages = []
files_to_lint = self.all_filepaths
failed = False
for filepath in files_to_lint:
file_content = self.file_cache.readlines(filepath)
file_length = len(file_content)
if (
file_length >= 1 and
not re.search(r'[^\n]\n', file_content[-1])):
error_message = (
'%s --> There should be a single newline at the '
'end of file.' % filepath)
error_messages.append(error_message)
failed = True
return concurrent_task_utils.TaskResult(
name, failed, error_messages, error_messages)
def check_disallowed_flags(self):
name = 'Disallow flags'
disallow_flag = (
'eslint-disable-next-line oppia/no-bypass-security-phrase')
error_messages = []
files_to_lint = self.all_filepaths
failed = False
excluded_files = (
warranted_angular_security_bypasses
.EXCLUDED_BYPASS_SECURITY_TRUST_FILES)
allowed_files = ''
for filepath in files_to_lint:
for excluded_file in excluded_files:
if excluded_file in filepath:
allowed_files = filepath
if not filepath.endswith('.ts') or filepath == allowed_files:
continue
file_content = self.file_cache.read(filepath)
if disallow_flag in file_content:
error_message = (
'%s --> Please do not use "no-bypass-security-phrase" flag.'
' It is only expected to be used in files listed in'
' warranted_angular_security_bypasses.py' % filepath)
error_messages.append(error_message)
failed = True
return concurrent_task_utils.TaskResult(
name, failed, error_messages, error_messages)
def check_extra_js_files(self):
name = 'Extra JS files'
error_messages = []
files_to_lint = self.all_filepaths
failed = False
for filepath in files_to_lint:
if filepath.endswith(
('.js')) and filepath.startswith(
('core/templates', 'extensions')) and (
filepath not in build.JS_FILEPATHS_NOT_TO_BUILD
) and not filepath.endswith('protractor.js'):
error_message = (
'%s --> Found extra .js file' % filepath)
error_messages.append(error_message)
failed = True
if failed:
err_msg = (
'If you want the above files to be present as js files, '
'add them to the list JS_FILEPATHS_NOT_TO_BUILD in '
'build.py. Otherwise, rename them to .ts')
error_messages.append(err_msg)
return concurrent_task_utils.TaskResult(
name, failed, error_messages, error_messages)
def perform_all_lint_checks(self):
if not self.all_filepaths:
return [
concurrent_task_utils.TaskResult(
'General purpose lint', False, [],
['There are no files to be checked.'])]
task_results = [
self.check_mandatory_patterns(), self.check_bad_patterns(),
self.check_newline_at_eof(), self.check_extra_js_files(),
self.check_disallowed_flags()]
return task_results
def get_linters(files_to_lint, file_cache):
custom_linter = GeneralPurposeLinter(files_to_lint, file_cache)
return custom_linter, None
| true | true |
f71b1b5dacb5a5c7e4853d46d9e70c5797445611 | 1,049 | py | Python | prxgt/domain/attribute.py | praxigento/teq_test_db_schema_attrs | 20ec030dc095c644d22631491e066697203d983d | [
"MIT"
] | null | null | null | prxgt/domain/attribute.py | praxigento/teq_test_db_schema_attrs | 20ec030dc095c644d22631491e066697203d983d | [
"MIT"
] | null | null | null | prxgt/domain/attribute.py | praxigento/teq_test_db_schema_attrs | 20ec030dc095c644d22631491e066697203d983d | [
"MIT"
] | null | null | null | __author__ = 'Alex Gusev <alex@flancer64.com>'
import prxgt.const as const
from prxgt.domain.meta.attribute import Attribute as AttributeBase
class Attribute(AttributeBase):
"""
Attribute model contains data.
"""
def __init__(self, name=None, type_=None, value=None):
super(Attribute, self).__init__(name, type_)
self._value = value
return
@property
def value(self):
return self._value
@value.setter
def value(self, val):
self._value = val
@property
def meta(self):
"""
META Attribute (name and type only)
:return:
"""
return AttributeBase(self._name, self._type)
def __repr__(self):
result = super(Attribute, self).__repr__()
if (self.value is not None) and (self.type == const.ATTR_TYPE_TXT):
# [name@type='value']
result += "=" + repr(self.value[:4] + "...")
else:
# [name@text='valu...']
result += "=" + repr(self.value)
return result | 26.225 | 75 | 0.578646 | __author__ = 'Alex Gusev <alex@flancer64.com>'
import prxgt.const as const
from prxgt.domain.meta.attribute import Attribute as AttributeBase
class Attribute(AttributeBase):
def __init__(self, name=None, type_=None, value=None):
super(Attribute, self).__init__(name, type_)
self._value = value
return
@property
def value(self):
return self._value
@value.setter
def value(self, val):
self._value = val
@property
def meta(self):
return AttributeBase(self._name, self._type)
def __repr__(self):
result = super(Attribute, self).__repr__()
if (self.value is not None) and (self.type == const.ATTR_TYPE_TXT):
result += "=" + repr(self.value[:4] + "...")
else:
result += "=" + repr(self.value)
return result | true | true |
f71b1c721a28949ee40ddf327761ea6fcd2fe45b | 11,435 | py | Python | config/settings/common.py | devermaslinfy/rejot | 8c07a42a73be8422f16874684be3b46ab70b5c18 | [
"BSD-3-Clause"
] | 1 | 2020-07-23T16:21:44.000Z | 2020-07-23T16:21:44.000Z | config/settings/common.py | devermaslinfy/rejot | 8c07a42a73be8422f16874684be3b46ab70b5c18 | [
"BSD-3-Clause"
] | 7 | 2020-02-12T01:21:01.000Z | 2022-03-11T23:25:57.000Z | config/settings/common.py | devermaslinfy/rejot | 8c07a42a73be8422f16874684be3b46ab70b5c18 | [
"BSD-3-Clause"
] | 1 | 2019-07-27T10:00:01.000Z | 2019-07-27T10:00:01.000Z | # -*- coding: utf-8 -*-
"""
Django settings for roojet project.
For more information on this file, see
https://docs.djangoproject.com/en/dev/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/dev/ref/settings/
"""
from __future__ import absolute_import, unicode_literals
import environ
from decimal import Decimal
ROOT_DIR = environ.Path(__file__) - 3 # (/a/b/myfile.py - 3 = /)
APPS_DIR = ROOT_DIR.path('roojet')
env = environ.Env()
environ.Env.read_env(ROOT_DIR()+'/.env')
# APP CONFIGURATION
# ------------------------------------------------------------------------------
DJANGO_APPS = (
# Default Django apps:
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# Useful template tags:
# 'django.contrib.humanize',
# Admin
'django.contrib.admin',
)
THIRD_PARTY_APPS = (
'crispy_forms', # Form layouts
'allauth', # registration
'allauth.account', # registration
'allauth.socialaccount', # registration
'loginas', # loginas
'bootstrap3',
'djrill',
'plans',
'ordered_model',
'payments',
'django_cron',
)
# Apps specific for this project go here.
LOCAL_APPS = (
'roojet.users', # custom users app
# Your stuff: custom apps go here
'roojet.core',
'roojet.services',
'roojet.payment_roojet',
'roojet.mailerlite',
)
# See: https://docs.djangoproject.com/en/dev/ref/settings/#installed-apps
INSTALLED_APPS = DJANGO_APPS + THIRD_PARTY_APPS + LOCAL_APPS
# MIDDLEWARE CONFIGURATION
# ------------------------------------------------------------------------------
MIDDLEWARE_CLASSES = (
# Make sure djangosecure.middleware.SecurityMiddleware is listed first
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
# MIGRATIONS CONFIGURATION
# ------------------------------------------------------------------------------
MIGRATION_MODULES = {
'sites': 'roojet.contrib.sites.migrations'
}
# DEBUG
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#debug
DEBUG = env.bool("DJANGO_DEBUG", False)
# FIXTURE CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#std:setting-FIXTURE_DIRS
FIXTURE_DIRS = (
str(APPS_DIR.path('fixtures')),
)
# EMAIL CONFIGURATION
# ------------------------------------------------------------------------------
EMAIL_BACKEND = env('DJANGO_EMAIL_BACKEND', default='django.core.mail.backends.smtp.EmailBackend')
# MANAGER CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#admins
ADMINS = (
("Nija",'nijap@techversantinfotech.com'),
('Scott','scott@roojet.com'),
)
SERVER_EMAIL = 'dev@swapps.co'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#managers
MANAGERS = ADMINS
# DATABASE CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#databases
# DATABASES = {
# ## Raises ImproperlyConfigured exception if DATABASE_URL not in os.environ
# 'default': env.db("DATABASE_URL", default="postgres:///roojet"),
# }
# DATABASES['default']['ATOMIC_REQUESTS'] = True
#DO not harcode. Use environment variables or deployment will fail
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'postgres',
'USER': 'postgres',
'PASSWORD': '123456',
'HOST': '127.0.0.1',
'PORT': '5432',
}
}
# GENERAL CONFIGURATION
# ------------------------------------------------------------------------------
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'UTC'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#language-code
LANGUAGE_CODE = 'en-us'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#site-id
SITE_ID = 1
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-i18n
USE_I18N = True
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-l10n
USE_L10N = True
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-tz
USE_TZ = True
# TEMPLATE CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#templates
TEMPLATES = [
{
# See: https://docs.djangoproject.com/en/dev/ref/settings/#std:setting-TEMPLATES-BACKEND
'BACKEND': 'django.template.backends.django.DjangoTemplates',
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-dirs
'DIRS': [
str(APPS_DIR.path('templates')),
],
'OPTIONS': {
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-debug
'debug': DEBUG,
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-loaders
# https://docs.djangoproject.com/en/dev/ref/templates/api/#loader-types
'loaders': [
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
],
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-context-processors
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.i18n',
'django.template.context_processors.media',
'django.template.context_processors.static',
'django.template.context_processors.tz',
'django.contrib.messages.context_processors.messages',
# Your stuff: custom template context processors go here
],
},
},
]
# See: http://django-crispy-forms.readthedocs.org/en/latest/install.html#template-packs
CRISPY_TEMPLATE_PACK = 'bootstrap3'
# STATIC FILE CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#static-root
STATIC_ROOT = str(ROOT_DIR('staticfiles'))
# See: https://docs.djangoproject.com/en/dev/ref/settings/#static-url
STATIC_URL = '/static/'
# See: https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#std:setting-STATICFILES_DIRS
STATICFILES_DIRS = (
str(APPS_DIR.path('static')),
)
# See: https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#staticfiles-finders
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
)
# MEDIA CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#media-root
MEDIA_ROOT = str(APPS_DIR('media'))
# See: https://docs.djangoproject.com/en/dev/ref/settings/#media-url
MEDIA_URL = '/media/'
# URL Configuration
# ------------------------------------------------------------------------------
ROOT_URLCONF = 'config.urls'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#wsgi-application
WSGI_APPLICATION = 'config.wsgi.application'
# AUTHENTICATION CONFIGURATION
# ------------------------------------------------------------------------------
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
'allauth.account.auth_backends.AuthenticationBackend',
)
# Some really nice defaults
ACCOUNT_AUTHENTICATION_METHOD = 'email'
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_USERNAME_REQUIRED = False
ACCOUNT_EMAIL_VERIFICATION = 'none'
# Custom user app defaults
# Select the correct user model
AUTH_USER_MODEL = 'users.User'
LOGIN_REDIRECT_URL = 'core:dashboard'
LOGIN_URL = 'account_login'
# SLUGLIFIER
AUTOSLUG_SLUGIFY_FUNCTION = 'slugify.slugify'
# LOGGING CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#logging
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
},
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
},
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
# django-plans
PLANS_CURRENCY = 'USD'
PLANS_INVOICE_ISSUER = {
"issuer_name": "Joe Doe Company",
"issuer_street": "Django street, 34",
"issuer_zipcode": "123-3444",
"issuer_city": "Djangoko",
"issuer_country": "US", # Must be a country code with 2 characters
"issuer_tax_number": "1222233334444555",
}
PLANS_TAX = Decimal('0.0')
PLANS_TAX_COUNTRY = 'US'
SEND_PLANS_EMAILS = False
# Payment settings
PAYMENT_HOST = env.str("PAYMENT_HOST", default='')
PAYMENT_USES_SSL = env.bool("PAYMENT_USES_SSL", default='')
PAYMENT_MODEL = 'payment_roojet.Payment'
# use sandbox
PAYMENT_VARIANTS = {
'default': ('payments.stripe.StripeProvider', {
'secret_key': env('STRIPE_API_KEY', default=''),
'public_key': env('STRIPE_PUBLISHABLE_KEY', default=''),
'name': 'roojet',
})
}
# Location of root django.contrib.admin URL, use {% url 'admin:index' %}
ADMIN_URL = r'^admin/'
SHOPIFY_API_KEY = env('SHOPIFY_API_KEY', default='')
SHOPIFY_SECRET = env('SHOPIFY_SECRET', default='')
SHOPIFY_URL = 'myshopify.com'
SHOPIFY_AUTHORIZE_SUFIX = '/admin/oauth/authorize'
SHOPIFY_SCOPES = ['read_products', 'read_orders', 'write_products']
MOE_URL = env('MOE_URL', default='')
CODESHIP_API_KEY = env("CODESHIP_API_KEY", default='')
MANDRILL_API_URL = env("MANDRILL_API_URL", default='')
MANDRILL_API_KEY = env('MANDRILL_API_KEY', default='')
DEFAULT_FROM_EMAIL = env('DJANGO_DEFAULT_FROM_EMAIL', default='')
ENOUGH_DATA = env.int('ENOUGH_DATA', default=5)
ACCOUNT_ADAPTER = 'roojet.users.adapter.AccountAdapter'
CRON_CLASSES = [
"roojet.core.cron.MyCronJob",
]
| 33.338192 | 98 | 0.61784 |
from __future__ import absolute_import, unicode_literals
import environ
from decimal import Decimal
ROOT_DIR = environ.Path(__file__) - 3
APPS_DIR = ROOT_DIR.path('roojet')
env = environ.Env()
environ.Env.read_env(ROOT_DIR()+'/.env')
DJANGO_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.admin',
)
THIRD_PARTY_APPS = (
'crispy_forms',
'allauth',
'allauth.account',
'allauth.socialaccount',
'loginas',
'bootstrap3',
'djrill',
'plans',
'ordered_model',
'payments',
'django_cron',
)
LOCAL_APPS = (
'roojet.users',
'roojet.core',
'roojet.services',
'roojet.payment_roojet',
'roojet.mailerlite',
)
= DJANGO_APPS + THIRD_PARTY_APPS + LOCAL_APPS
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
MIGRATION_MODULES = {
'sites': 'roojet.contrib.sites.migrations'
}
= env.bool("DJANGO_DEBUG", False)
(APPS_DIR.path('fixtures')),
)
EMAIL_BACKEND = env('DJANGO_EMAIL_BACKEND', default='django.core.mail.backends.smtp.EmailBackend')
= (
("Nija",'nijap@techversantinfotech.com'),
('Scott','scott@roojet.com'),
)
SERVER_EMAIL = 'dev@swapps.co'
= ADMINS
'PASSWORD': '123456',
'HOST': '127.0.0.1',
'PORT': '5432',
}
}
TIME_ZONE = 'UTC'
= 'en-us'
= 1
= True
= True
= True
= [
{
mplate.backends.django.DjangoTemplates',
': [
str(APPS_DIR.path('templates')),
],
'OPTIONS': {
ebug': DEBUG,
ders': [
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
],
sors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.i18n',
'django.template.context_processors.media',
'django.template.context_processors.static',
'django.template.context_processors.tz',
'django.contrib.messages.context_processors.messages',
],
},
},
]
E_PACK = 'bootstrap3'
= str(ROOT_DIR('staticfiles'))
= '/static/'
(APPS_DIR.path('static')),
)
= (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
)
= str(APPS_DIR('media'))
= '/media/'
ROOT_URLCONF = 'config.urls'
= 'config.wsgi.application'
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
'allauth.account.auth_backends.AuthenticationBackend',
)
ACCOUNT_AUTHENTICATION_METHOD = 'email'
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_USERNAME_REQUIRED = False
ACCOUNT_EMAIL_VERIFICATION = 'none'
AUTH_USER_MODEL = 'users.User'
LOGIN_REDIRECT_URL = 'core:dashboard'
LOGIN_URL = 'account_login'
AUTOSLUG_SLUGIFY_FUNCTION = 'slugify.slugify'
GGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
},
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
},
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
PLANS_CURRENCY = 'USD'
PLANS_INVOICE_ISSUER = {
"issuer_name": "Joe Doe Company",
"issuer_street": "Django street, 34",
"issuer_zipcode": "123-3444",
"issuer_city": "Djangoko",
"issuer_country": "US",
"issuer_tax_number": "1222233334444555",
}
PLANS_TAX = Decimal('0.0')
PLANS_TAX_COUNTRY = 'US'
SEND_PLANS_EMAILS = False
PAYMENT_HOST = env.str("PAYMENT_HOST", default='')
PAYMENT_USES_SSL = env.bool("PAYMENT_USES_SSL", default='')
PAYMENT_MODEL = 'payment_roojet.Payment'
PAYMENT_VARIANTS = {
'default': ('payments.stripe.StripeProvider', {
'secret_key': env('STRIPE_API_KEY', default=''),
'public_key': env('STRIPE_PUBLISHABLE_KEY', default=''),
'name': 'roojet',
})
}
ADMIN_URL = r'^admin/'
SHOPIFY_API_KEY = env('SHOPIFY_API_KEY', default='')
SHOPIFY_SECRET = env('SHOPIFY_SECRET', default='')
SHOPIFY_URL = 'myshopify.com'
SHOPIFY_AUTHORIZE_SUFIX = '/admin/oauth/authorize'
SHOPIFY_SCOPES = ['read_products', 'read_orders', 'write_products']
MOE_URL = env('MOE_URL', default='')
CODESHIP_API_KEY = env("CODESHIP_API_KEY", default='')
MANDRILL_API_URL = env("MANDRILL_API_URL", default='')
MANDRILL_API_KEY = env('MANDRILL_API_KEY', default='')
DEFAULT_FROM_EMAIL = env('DJANGO_DEFAULT_FROM_EMAIL', default='')
ENOUGH_DATA = env.int('ENOUGH_DATA', default=5)
ACCOUNT_ADAPTER = 'roojet.users.adapter.AccountAdapter'
CRON_CLASSES = [
"roojet.core.cron.MyCronJob",
]
| true | true |
f71b1dc503a1e22e10ad5b8a916812053338361e | 3,578 | py | Python | appStore/tests.py | Rubensrvsc/Store | ab3a3aea6518b76d9d5de9c1b9e90236def8954d | [
"MIT"
] | null | null | null | appStore/tests.py | Rubensrvsc/Store | ab3a3aea6518b76d9d5de9c1b9e90236def8954d | [
"MIT"
] | null | null | null | appStore/tests.py | Rubensrvsc/Store | ab3a3aea6518b76d9d5de9c1b9e90236def8954d | [
"MIT"
] | null | null | null | from django.test import TestCase
from rest_framework.test import APIRequestFactory, APIClient
from .models import *
from rest_framework import status
# Create your tests here.
class ProductCategoryTest(TestCase):
@classmethod
def setUpTestData(cls):
ProductCategory.objects.create(name_product_category="Infantil")
ProductCategory.objects.create(name_product_category="Adulto")
ProductCategory.objects.create(name_product_category="Idoso")
ProductType.objects.create(name_product_type="camisa")
ProductType.objects.create(name_product_type="calça")
Size.objects.create(size="GG")
Size.objects.create(size="G")
size = Size.objects.get(id=1)
category_product = ProductCategory.objects.get(id=1)
type_product = ProductType.objects.get(id=1)
size_two = Size.objects.get(id=2)
category_product_two = ProductCategory.objects.get(id=2)
type_product_two = ProductType.objects.get(id=2)
Product.objects.create(
name_product="camisa 1",
price=100,
color="red",
product_category = category_product,
product_size = size,
product_type = type_product
)
Product.objects.create(
name_product="camisa 2",
price=150,
color="green",
product_category = category_product_two,
product_size = size_two,
product_type = type_product_two
)
Product.objects.create(
name_product="camisa 3",
price=190,
color="yellow",
product_category = category_product_two,
product_size = size_two,
product_type = type_product_two
)
Product.objects.create(
name_product="camisa 4",
price=220,
color="magento",
product_category = category_product,
product_size = size,
product_type = type_product
)
def test_create_product_category(self):
factory = APIClient()
product_category = {"name_product_category": "infantil"}
request = factory.post('/createcategory',product_category)
self.assertEquals(request.status_code,status.HTTP_201_CREATED)
def test_list_product_category(self):
factory = APIClient()
products_categories = ProductCategory.objects.all().count()
request = factory.get("/productcategories")
self.assertEquals(products_categories,3)
def test_create_product(self):
factory = APIClient()
size = Size.objects.get(id=1)
category_product = ProductCategory.objects.get(id=1)
type_product = ProductType.objects.get(id=1)
product = {'name_product':"camisa 1",
'price':100,
'color':"red",
'product_category' : category_product.id,
'size': size.id,
'product_type': type_product.id
}
response = factory.post('/create_product',product)
self.assertEquals(response.status_code,status.HTTP_301_MOVED_PERMANENTLY)
def test_get_product(self):
factory = APIClient()
response = factory.get('/products')
self.assertEquals(response.status_code,status.HTTP_200_OK)
def test_search_price_products(self):
factory = APIClient()
response = factory.get('/searchpriceproduct/80/170')
self.assertEquals(response.status_code,status.HTTP_200_OK)
| 31.663717 | 81 | 0.630799 | from django.test import TestCase
from rest_framework.test import APIRequestFactory, APIClient
from .models import *
from rest_framework import status
class ProductCategoryTest(TestCase):
@classmethod
def setUpTestData(cls):
ProductCategory.objects.create(name_product_category="Infantil")
ProductCategory.objects.create(name_product_category="Adulto")
ProductCategory.objects.create(name_product_category="Idoso")
ProductType.objects.create(name_product_type="camisa")
ProductType.objects.create(name_product_type="calça")
Size.objects.create(size="GG")
Size.objects.create(size="G")
size = Size.objects.get(id=1)
category_product = ProductCategory.objects.get(id=1)
type_product = ProductType.objects.get(id=1)
size_two = Size.objects.get(id=2)
category_product_two = ProductCategory.objects.get(id=2)
type_product_two = ProductType.objects.get(id=2)
Product.objects.create(
name_product="camisa 1",
price=100,
color="red",
product_category = category_product,
product_size = size,
product_type = type_product
)
Product.objects.create(
name_product="camisa 2",
price=150,
color="green",
product_category = category_product_two,
product_size = size_two,
product_type = type_product_two
)
Product.objects.create(
name_product="camisa 3",
price=190,
color="yellow",
product_category = category_product_two,
product_size = size_two,
product_type = type_product_two
)
Product.objects.create(
name_product="camisa 4",
price=220,
color="magento",
product_category = category_product,
product_size = size,
product_type = type_product
)
def test_create_product_category(self):
factory = APIClient()
product_category = {"name_product_category": "infantil"}
request = factory.post('/createcategory',product_category)
self.assertEquals(request.status_code,status.HTTP_201_CREATED)
def test_list_product_category(self):
factory = APIClient()
products_categories = ProductCategory.objects.all().count()
request = factory.get("/productcategories")
self.assertEquals(products_categories,3)
def test_create_product(self):
factory = APIClient()
size = Size.objects.get(id=1)
category_product = ProductCategory.objects.get(id=1)
type_product = ProductType.objects.get(id=1)
product = {'name_product':"camisa 1",
'price':100,
'color':"red",
'product_category' : category_product.id,
'size': size.id,
'product_type': type_product.id
}
response = factory.post('/create_product',product)
self.assertEquals(response.status_code,status.HTTP_301_MOVED_PERMANENTLY)
def test_get_product(self):
factory = APIClient()
response = factory.get('/products')
self.assertEquals(response.status_code,status.HTTP_200_OK)
def test_search_price_products(self):
factory = APIClient()
response = factory.get('/searchpriceproduct/80/170')
self.assertEquals(response.status_code,status.HTTP_200_OK)
| true | true |
f71b1e2fd58d30c28062d37e9f1ae54392548686 | 6,411 | py | Python | src/datadog_api_client/v1/model/synthetics_trigger_ci_tests_response.py | DataDog/datadog-api-client-python | de2fc57dbde9acf4b8c8eef94ac29911227a62a2 | [
"Apache-2.0"
] | 32 | 2021-01-07T15:09:56.000Z | 2022-01-30T05:49:23.000Z | src/datadog_api_client/v1/model/synthetics_trigger_ci_tests_response.py | DataDog/datadog-api-client-python | de2fc57dbde9acf4b8c8eef94ac29911227a62a2 | [
"Apache-2.0"
] | 228 | 2020-09-03T14:03:54.000Z | 2022-03-31T20:16:12.000Z | src/datadog_api_client/v1/model/synthetics_trigger_ci_tests_response.py | DataDog/datadog-api-client-python | de2fc57dbde9acf4b8c8eef94ac29911227a62a2 | [
"Apache-2.0"
] | 12 | 2020-09-15T21:36:03.000Z | 2022-03-31T17:13:17.000Z | # Unless explicitly stated otherwise all files in this repository are licensed under the Apache-2.0 License.
# This product includes software developed at Datadog (https://www.datadoghq.com/).
# Copyright 2019-Present Datadog, Inc.
from datadog_api_client.v1.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
)
def lazy_import():
from datadog_api_client.v1.model.synthetics_trigger_ci_test_location import SyntheticsTriggerCITestLocation
from datadog_api_client.v1.model.synthetics_trigger_ci_test_run_result import SyntheticsTriggerCITestRunResult
globals()["SyntheticsTriggerCITestLocation"] = SyntheticsTriggerCITestLocation
globals()["SyntheticsTriggerCITestRunResult"] = SyntheticsTriggerCITestRunResult
class SyntheticsTriggerCITestsResponse(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {}
validations = {}
additional_properties_type = None
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
lazy_import()
return {
"batch_id": (str,), # noqa: E501
"locations": ([SyntheticsTriggerCITestLocation],), # noqa: E501
"results": ([SyntheticsTriggerCITestRunResult],), # noqa: E501
"triggered_check_ids": ([str],), # noqa: E501
}
discriminator = None
attribute_map = {
"batch_id": "batch_id", # noqa: E501
"locations": "locations", # noqa: E501
"results": "results", # noqa: E501
"triggered_check_ids": "triggered_check_ids", # noqa: E501
}
read_only_vars = {}
_composed_schemas = {}
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs): # noqa: E501
"""SyntheticsTriggerCITestsResponse - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
batch_id (str): The public ID of the batch triggered.. [optional] # noqa: E501
locations ([SyntheticsTriggerCITestLocation]): List of Synthetics locations.. [optional] # noqa: E501
results ([SyntheticsTriggerCITestRunResult]): Information about the tests runs.. [optional] # noqa: E501
triggered_check_ids ([str]): The public IDs of the Synthetics test triggered.. [optional] # noqa: E501
"""
super().__init__(kwargs)
self._check_pos_args(args)
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, *args, **kwargs): # noqa: E501
"""Helper creating a new instance from a response."""
self = super(SyntheticsTriggerCITestsResponse, cls)._from_openapi_data(kwargs)
self._check_pos_args(args)
return self
| 43.910959 | 117 | 0.61644 |
from datadog_api_client.v1.model_utils import (
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
)
def lazy_import():
from datadog_api_client.v1.model.synthetics_trigger_ci_test_location import SyntheticsTriggerCITestLocation
from datadog_api_client.v1.model.synthetics_trigger_ci_test_run_result import SyntheticsTriggerCITestRunResult
globals()["SyntheticsTriggerCITestLocation"] = SyntheticsTriggerCITestLocation
globals()["SyntheticsTriggerCITestRunResult"] = SyntheticsTriggerCITestRunResult
class SyntheticsTriggerCITestsResponse(ModelNormal):
allowed_values = {}
validations = {}
additional_properties_type = None
_nullable = False
@cached_property
def openapi_types():
lazy_import()
return {
"batch_id": (str,),
"locations": ([SyntheticsTriggerCITestLocation],),
"results": ([SyntheticsTriggerCITestRunResult],),
"triggered_check_ids": ([str],),
}
discriminator = None
attribute_map = {
"batch_id": "batch_id",
"locations": "locations",
"results": "results",
"triggered_check_ids": "triggered_check_ids",
}
read_only_vars = {}
_composed_schemas = {}
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs):
super().__init__(kwargs)
self._check_pos_args(args)
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, *args, **kwargs):
self = super(SyntheticsTriggerCITestsResponse, cls)._from_openapi_data(kwargs)
self._check_pos_args(args)
return self
| true | true |
f71b1e406ddf2cb9954d1454378cd13f4b53e7f4 | 1,801 | py | Python | nodes/wind.py | JavierRefuerzo/WeatherFlow | 5ea3669edfd7449797cef3184689d6c540383cde | [
"MIT"
] | 4 | 2018-08-26T02:40:09.000Z | 2020-06-21T22:59:04.000Z | nodes/wind.py | JavierRefuerzo/WeatherFlow | 5ea3669edfd7449797cef3184689d6c540383cde | [
"MIT"
] | 9 | 2018-04-20T15:37:46.000Z | 2020-07-15T20:22:31.000Z | nodes/wind.py | JavierRefuerzo/WeatherFlow | 5ea3669edfd7449797cef3184689d6c540383cde | [
"MIT"
] | 2 | 2018-09-16T23:13:39.000Z | 2021-12-20T16:35:20.000Z | #!/usr/bin/env python3
"""
Polyglot v2 node server for WeatherFlow Weather Station data.
Copyright (c) 2018,2019 Robert Paauwe
"""
import polyinterface
import sys
import time
import datetime
import urllib3
import json
import socket
import math
import threading
LOGGER = polyinterface.LOGGER
class WindNode(polyinterface.Node):
id = 'wind'
hint = [1,11,4,0]
units = 'metric'
drivers = [
{'driver': 'ST', 'value': 0, 'uom': 32}, # speed
{'driver': 'GV0', 'value': 0, 'uom': 76}, # direction
{'driver': 'GV1', 'value': 0, 'uom': 32}, # gust
{'driver': 'GV2', 'value': 0, 'uom': 76}, # gust direction
{'driver': 'GV3', 'value': 0, 'uom': 32} # lull
]
def SetUnits(self, u):
self.units = u
if (u == 'kph'):
self.drivers[0]['uom'] = 32
self.drivers[2]['uom'] = 32
self.drivers[4]['uom'] = 32
self.id = 'wind'
if (u == 'ms'):
self.drivers[0]['uom'] = 40
self.drivers[2]['uom'] = 40
self.drivers[4]['uom'] = 40
self.id = 'wind'
elif (u == 'mph'):
self.drivers[0]['uom'] = 48
self.drivers[2]['uom'] = 48
self.drivers[4]['uom'] = 48
self.id = 'windUS'
def setDriver(self, driver, value):
if (driver == 'ST' or driver == 'GV1' or driver == 'GV3'):
if (self.units == 'mph'):
value = round(value / 1.609344, 2)
super(WindNode, self).setDriver(driver, value, report=True, force=True)
def update(self, ws, wd, wg, wl):
self.setDriver('ST', ws)
self.setDriver('GV0', wd)
self.setDriver('GV1', wg)
self.setDriver('GV2', wd)
self.setDriver('GV3', wl)
| 29.52459 | 79 | 0.513048 |
import polyinterface
import sys
import time
import datetime
import urllib3
import json
import socket
import math
import threading
LOGGER = polyinterface.LOGGER
class WindNode(polyinterface.Node):
id = 'wind'
hint = [1,11,4,0]
units = 'metric'
drivers = [
{'driver': 'ST', 'value': 0, 'uom': 32},
{'driver': 'GV0', 'value': 0, 'uom': 76},
{'driver': 'GV1', 'value': 0, 'uom': 32},
{'driver': 'GV2', 'value': 0, 'uom': 76},
{'driver': 'GV3', 'value': 0, 'uom': 32}
]
def SetUnits(self, u):
self.units = u
if (u == 'kph'):
self.drivers[0]['uom'] = 32
self.drivers[2]['uom'] = 32
self.drivers[4]['uom'] = 32
self.id = 'wind'
if (u == 'ms'):
self.drivers[0]['uom'] = 40
self.drivers[2]['uom'] = 40
self.drivers[4]['uom'] = 40
self.id = 'wind'
elif (u == 'mph'):
self.drivers[0]['uom'] = 48
self.drivers[2]['uom'] = 48
self.drivers[4]['uom'] = 48
self.id = 'windUS'
def setDriver(self, driver, value):
if (driver == 'ST' or driver == 'GV1' or driver == 'GV3'):
if (self.units == 'mph'):
value = round(value / 1.609344, 2)
super(WindNode, self).setDriver(driver, value, report=True, force=True)
def update(self, ws, wd, wg, wl):
self.setDriver('ST', ws)
self.setDriver('GV0', wd)
self.setDriver('GV1', wg)
self.setDriver('GV2', wd)
self.setDriver('GV3', wl)
| true | true |
f71b1e9aa887b542b88ff076fa75931ec6dc85c7 | 2,532 | py | Python | cohesity_management_sdk/models/smb_active_file_path.py | nick6655/management-sdk-python | 88e792cb83e5c24a22af495b220c145d0c45841d | [
"Apache-2.0"
] | 18 | 2019-09-24T17:35:53.000Z | 2022-03-25T08:08:47.000Z | cohesity_management_sdk/models/smb_active_file_path.py | nick6655/management-sdk-python | 88e792cb83e5c24a22af495b220c145d0c45841d | [
"Apache-2.0"
] | 18 | 2019-03-29T19:32:29.000Z | 2022-01-03T23:16:45.000Z | cohesity_management_sdk/models/smb_active_file_path.py | nick6655/management-sdk-python | 88e792cb83e5c24a22af495b220c145d0c45841d | [
"Apache-2.0"
] | 16 | 2019-02-27T06:54:12.000Z | 2021-11-16T18:10:24.000Z | # -*- coding: utf-8 -*-
# Copyright 2021 Cohesity Inc.
import cohesity_management_sdk.models.smb_active_session
class SmbActiveFilePath(object):
"""Implementation of the 'SmbActiveFilePath' model.
Specifies a file path in an SMB view that has active sessions and opens.
Attributes:
active_sessions (list of SmbActiveSession): Specifies the sessions
where the file is open.
file_path (string): Specifies the filepath in the view.
view_id (long|int): Specifies the id of the View assigned by the
Cohesity Cluster. Either viewName or viewId must be specified.
view_name (string): Specifies the name of the View.
"""
# Create a mapping from Model property names to API property names
_names = {
"active_sessions":'activeSessions',
"file_path":'filePath',
"view_id":'viewId',
"view_name":'viewName'
}
def __init__(self,
active_sessions=None,
file_path=None,
view_id=None,
view_name=None):
"""Constructor for the SmbActiveFilePath class"""
# Initialize members of the class
self.active_sessions = active_sessions
self.file_path = file_path
self.view_id = view_id
self.view_name = view_name
@classmethod
def from_dictionary(cls,
dictionary):
"""Creates an instance of this model from a dictionary
Args:
dictionary (dictionary): A dictionary representation of the object as
obtained from the deserialization of the server's response. The keys
MUST match property names in the API description.
Returns:
object: An instance of this structure class.
"""
if dictionary is None:
return None
# Extract variables from the dictionary
active_sessions = None
if dictionary.get('activeSessions') != None:
active_sessions = list()
for structure in dictionary.get('activeSessions'):
active_sessions.append(cohesity_management_sdk.models.smb_active_session.SmbActiveSession.from_dictionary(structure))
file_path = dictionary.get('filePath')
view_id = dictionary.get('viewId')
view_name = dictionary.get('viewName')
# Return an object of this model
return cls(active_sessions,
file_path,
view_id,
view_name)
| 32.461538 | 133 | 0.625197 |
import cohesity_management_sdk.models.smb_active_session
class SmbActiveFilePath(object):
_names = {
"active_sessions":'activeSessions',
"file_path":'filePath',
"view_id":'viewId',
"view_name":'viewName'
}
def __init__(self,
active_sessions=None,
file_path=None,
view_id=None,
view_name=None):
self.active_sessions = active_sessions
self.file_path = file_path
self.view_id = view_id
self.view_name = view_name
@classmethod
def from_dictionary(cls,
dictionary):
if dictionary is None:
return None
active_sessions = None
if dictionary.get('activeSessions') != None:
active_sessions = list()
for structure in dictionary.get('activeSessions'):
active_sessions.append(cohesity_management_sdk.models.smb_active_session.SmbActiveSession.from_dictionary(structure))
file_path = dictionary.get('filePath')
view_id = dictionary.get('viewId')
view_name = dictionary.get('viewName')
return cls(active_sessions,
file_path,
view_id,
view_name)
| true | true |
f71b206d98b3712aa33781c5090859c94fbf8680 | 373 | py | Python | source/Objects/XTRA_Solvers_Class.py | afarahi/XTRA | 6550b216264abaa3ed705835aca0981f2934e069 | [
"MIT"
] | 2 | 2018-11-01T12:38:56.000Z | 2019-10-22T07:02:54.000Z | source/Objects/XTRA_Solvers_Class.py | afarahi/XTRA | 6550b216264abaa3ed705835aca0981f2934e069 | [
"MIT"
] | null | null | null | source/Objects/XTRA_Solvers_Class.py | afarahi/XTRA | 6550b216264abaa3ed705835aca0981f2934e069 | [
"MIT"
] | null | null | null | class Solvers_Class():
def __init__(self):
pass
def ProperDistanceTabulate(self, Input_Param, z_max):
from Distance_Solver import Proper_Distance_Tabulate
Proper_Distance_Tabulate(Input_Param, z_max)
def LxTxSolver(self, Halos):
from LxTx_Solver import LxTx_Solver
solver = LxTx_Solver()
solver.solve(Halos)
| 23.3125 | 60 | 0.691689 | class Solvers_Class():
def __init__(self):
pass
def ProperDistanceTabulate(self, Input_Param, z_max):
from Distance_Solver import Proper_Distance_Tabulate
Proper_Distance_Tabulate(Input_Param, z_max)
def LxTxSolver(self, Halos):
from LxTx_Solver import LxTx_Solver
solver = LxTx_Solver()
solver.solve(Halos)
| true | true |
f71b20c6a58525d0ad6e5a5b0ad92dbbdf9f5849 | 1,599 | py | Python | user/tests.py | Vr3n/django_react_cart_system | f6d2572b640f711ff9c7020641051e3f92c3dd59 | [
"MIT"
] | null | null | null | user/tests.py | Vr3n/django_react_cart_system | f6d2572b640f711ff9c7020641051e3f92c3dd59 | [
"MIT"
] | 3 | 2021-06-18T15:13:46.000Z | 2021-06-18T18:24:43.000Z | user/tests.py | Vr3n/django_react_cart_system | f6d2572b640f711ff9c7020641051e3f92c3dd59 | [
"MIT"
] | null | null | null | from django.contrib.auth import get_user_model
from django.test import TestCase
# Create your tests here.
class UserManagersTests(TestCase):
def test_create_user(self):
User = get_user_model()
user = User.objects.create_user(
email="normal@user.com", password="testing@123")
self.assertEqual(user.email, 'normal@user.com')
self.assertTrue(user.is_active)
self.assertFalse(user.is_staff)
self.assertFalse(user.is_superuser)
try:
self.assertIsNotNone(user.username)
self.assertIsNotNone(user.email)
except AttributeError:
pass
with self.assertRaises(TypeError):
User.objects.create_user()
with self.assertRaises(TypeError):
User.objects.create_user(email='')
with self.assertRaises(ValueError):
User.objects.create_user(email='', password="testing@123")
def test_create_superuser(self):
User = get_user_model()
admin = User.objects.create_superuser(
email="admin@user.com", password="testing@123")
self.assertEqual(admin.email, 'admin@user.com')
self.assertTrue(admin.is_active)
self.assertTrue(admin.is_staff)
self.assertTrue(admin.is_superuser)
try:
self.assertIsNotNone(admin.username)
self.assertIsNotNone(admin.email)
except AttributeError:
pass
with self.assertRaises(ValueError):
User.objects.create_user(
email='', password="testing@123", is_superuser=False)
| 34.76087 | 70 | 0.642276 | from django.contrib.auth import get_user_model
from django.test import TestCase
class UserManagersTests(TestCase):
def test_create_user(self):
User = get_user_model()
user = User.objects.create_user(
email="normal@user.com", password="testing@123")
self.assertEqual(user.email, 'normal@user.com')
self.assertTrue(user.is_active)
self.assertFalse(user.is_staff)
self.assertFalse(user.is_superuser)
try:
self.assertIsNotNone(user.username)
self.assertIsNotNone(user.email)
except AttributeError:
pass
with self.assertRaises(TypeError):
User.objects.create_user()
with self.assertRaises(TypeError):
User.objects.create_user(email='')
with self.assertRaises(ValueError):
User.objects.create_user(email='', password="testing@123")
def test_create_superuser(self):
User = get_user_model()
admin = User.objects.create_superuser(
email="admin@user.com", password="testing@123")
self.assertEqual(admin.email, 'admin@user.com')
self.assertTrue(admin.is_active)
self.assertTrue(admin.is_staff)
self.assertTrue(admin.is_superuser)
try:
self.assertIsNotNone(admin.username)
self.assertIsNotNone(admin.email)
except AttributeError:
pass
with self.assertRaises(ValueError):
User.objects.create_user(
email='', password="testing@123", is_superuser=False)
| true | true |
f71b21a5dd538deb30896fbf0e23be55c42a7ec4 | 22 | py | Python | tsu/__init__.py | KiraDank/tsu | 7a2c6508daf6d797e8371acf3b473d0f92cb11c3 | [
"MIT"
] | 251 | 2019-08-18T17:19:19.000Z | 2022-03-31T20:38:20.000Z | tsu/__init__.py | KiraDank/tsu | 7a2c6508daf6d797e8371acf3b473d0f92cb11c3 | [
"MIT"
] | 37 | 2019-09-05T08:03:00.000Z | 2022-01-27T12:49:50.000Z | tsu/__init__.py | KiraDank/tsu | 7a2c6508daf6d797e8371acf3b473d0f92cb11c3 | [
"MIT"
] | 20 | 2019-09-01T15:29:56.000Z | 2022-01-27T03:03:18.000Z | __version__ = '3.1.2'
| 11 | 21 | 0.636364 | __version__ = '3.1.2'
| true | true |
f71b21c32e28c3f04700fe8235f89c2fac3de98b | 3,420 | py | Python | ays_api/app.py | Jumpscale/g8cockpit | 5e9ede183f77fec4adff9cd038567173b68677f0 | [
"Apache-2.0"
] | null | null | null | ays_api/app.py | Jumpscale/g8cockpit | 5e9ede183f77fec4adff9cd038567173b68677f0 | [
"Apache-2.0"
] | 332 | 2016-05-24T10:51:45.000Z | 2021-09-08T12:28:50.000Z | ays_api/app.py | Jumpscale/g8cockpit | 5e9ede183f77fec4adff9cd038567173b68677f0 | [
"Apache-2.0"
] | 1 | 2016-08-02T07:52:49.000Z | 2016-08-02T07:52:49.000Z | from flask import Flask, send_from_directory, make_response, request, send_file, jsonify
import werkzeug.exceptions
from jose import jwt, exceptions
import wtforms_json
from .ays import ays_api
from .oauth import oauth_api
from .webhooks import webhooks_api
from .cockpit import cockpit_api
from JumpScale import j
app = Flask(__name__)
app.config["WTF_CSRF_ENABLED"] = False
wtforms_json.init()
logger = j.logger.get('j.cockpit.api')
def init_blueprints():
if app.config.get('production',True):
print('JWT middleware enable')
ays_api.before_request(process_jwt_token)
cockpit_api.before_request(process_jwt_token)
app.register_blueprint(ays_api)
app.register_blueprint(oauth_api)
app.register_blueprint(webhooks_api)
app.register_blueprint(cockpit_api)
def process_jwt_token():
authorization = request.cookies.get(
'jwt',
request.headers.get(
'Authorization',
None
))
if authorization is None:
response = make_response('Not JWT token')
response.status_code = 401
return response
msg = ""
ss = authorization.split(' ', 1)
if len(ss) != 2:
msg = "Unauthorized"
else:
type, token = ss[0], ss[1]
if type.lower() == 'bearer':
try:
headers = jwt.get_unverified_header(token)
payload = jwt.decode(
token,
app.config['oauth'].get('jwt_key'),
algorithms=[headers['alg']],
audience=app.config['oauth']['organization'],
issuer='itsyouonline')
# case JWT is for an organization
if 'globalid' in payload and payload['globalid'] == app.config['oauth'].get('organization'):
return
# case JWT is for a user
if 'scope' in payload and 'user:memberof:%s' % app.config[
'oauth'].get('organization') in payload['scope']:
return
msg = 'Unauthorized'
except exceptions.ExpiredSignatureError as e:
msg = 'Your JWT has expired'
except exceptions.JOSEError as e:
msg = 'JWT Error: %s' % str(e)
except Exception as e:
msg = 'Unexpected error : %s' % str(e)
else:
msg = 'Your JWT is invalid'
logger.error(msg)
response = make_response(msg)
response.status_code = 401
return response
@app.route('/apidocs/<path:path>')
def send_js(path):
root = j.sal.fs.joinPaths(j.sal.fs.getParent(__file__), 'apidocs')
return send_from_directory(root, path)
@app.route('/', methods=['GET'])
def home():
path = j.sal.fs.joinPaths(j.sal.fs.getParent(__file__), 'index.html')
return send_file(path)
@app.errorhandler(j.exceptions.NotFound)
def handle_bad_request(e):
return jsonify(error=e.msg), 404
@app.errorhandler(j.exceptions.AYSNotFound)
def handle_bad_request(e):
return jsonify(error=e.msg), 404
@app.errorhandler(j.exceptions.Timeout)
def handle_bad_request(e):
return jsonify(error=e.msg), 408
@app.errorhandler(j.exceptions.BaseJSException)
def handle_bad_request(e):
return jsonify(error=e.msg), 500
@app.errorhandler(werkzeug.exceptions.HTTPException)
def handle_bad_request(e):
return jsonify(error=e.msg), e.code | 28.264463 | 108 | 0.625146 | from flask import Flask, send_from_directory, make_response, request, send_file, jsonify
import werkzeug.exceptions
from jose import jwt, exceptions
import wtforms_json
from .ays import ays_api
from .oauth import oauth_api
from .webhooks import webhooks_api
from .cockpit import cockpit_api
from JumpScale import j
app = Flask(__name__)
app.config["WTF_CSRF_ENABLED"] = False
wtforms_json.init()
logger = j.logger.get('j.cockpit.api')
def init_blueprints():
if app.config.get('production',True):
print('JWT middleware enable')
ays_api.before_request(process_jwt_token)
cockpit_api.before_request(process_jwt_token)
app.register_blueprint(ays_api)
app.register_blueprint(oauth_api)
app.register_blueprint(webhooks_api)
app.register_blueprint(cockpit_api)
def process_jwt_token():
authorization = request.cookies.get(
'jwt',
request.headers.get(
'Authorization',
None
))
if authorization is None:
response = make_response('Not JWT token')
response.status_code = 401
return response
msg = ""
ss = authorization.split(' ', 1)
if len(ss) != 2:
msg = "Unauthorized"
else:
type, token = ss[0], ss[1]
if type.lower() == 'bearer':
try:
headers = jwt.get_unverified_header(token)
payload = jwt.decode(
token,
app.config['oauth'].get('jwt_key'),
algorithms=[headers['alg']],
audience=app.config['oauth']['organization'],
issuer='itsyouonline')
if 'globalid' in payload and payload['globalid'] == app.config['oauth'].get('organization'):
return
if 'scope' in payload and 'user:memberof:%s' % app.config[
'oauth'].get('organization') in payload['scope']:
return
msg = 'Unauthorized'
except exceptions.ExpiredSignatureError as e:
msg = 'Your JWT has expired'
except exceptions.JOSEError as e:
msg = 'JWT Error: %s' % str(e)
except Exception as e:
msg = 'Unexpected error : %s' % str(e)
else:
msg = 'Your JWT is invalid'
logger.error(msg)
response = make_response(msg)
response.status_code = 401
return response
@app.route('/apidocs/<path:path>')
def send_js(path):
root = j.sal.fs.joinPaths(j.sal.fs.getParent(__file__), 'apidocs')
return send_from_directory(root, path)
@app.route('/', methods=['GET'])
def home():
path = j.sal.fs.joinPaths(j.sal.fs.getParent(__file__), 'index.html')
return send_file(path)
@app.errorhandler(j.exceptions.NotFound)
def handle_bad_request(e):
return jsonify(error=e.msg), 404
@app.errorhandler(j.exceptions.AYSNotFound)
def handle_bad_request(e):
return jsonify(error=e.msg), 404
@app.errorhandler(j.exceptions.Timeout)
def handle_bad_request(e):
return jsonify(error=e.msg), 408
@app.errorhandler(j.exceptions.BaseJSException)
def handle_bad_request(e):
return jsonify(error=e.msg), 500
@app.errorhandler(werkzeug.exceptions.HTTPException)
def handle_bad_request(e):
return jsonify(error=e.msg), e.code | true | true |
f71b21ee28dbbf48f0569576807de49f2234149c | 3,475 | py | Python | include/Node.py | Yperidis/DAGFLOW | 84ea30eea480e095dc8e24f80ca84a733170a365 | [
"MIT"
] | null | null | null | include/Node.py | Yperidis/DAGFLOW | 84ea30eea480e095dc8e24f80ca84a733170a365 | [
"MIT"
] | null | null | null | include/Node.py | Yperidis/DAGFLOW | 84ea30eea480e095dc8e24f80ca84a733170a365 | [
"MIT"
] | null | null | null | import random
import networkx as nx
class Node:
def __init__(self, ID=None, message=None, highercmnd=None, lev=None,
parent=None, childNo=None, children_list=None,
cmndlineup=None, cmndnodes=None, ancestor_list=None,
descendant_list=None):
self.ID = ID # an integer ascending from 0 to the number of the nodes minus 1)
self.message = message # some integer number representing an initial message
self.parent = parent # an integer if applicable (non applicable to the root node)
self.highercmnd = highercmnd # an integer ascending from 1 to the height minus 1 indicating the value of knowing
# what superiours know
self.lev = lev # reflects the depth in a tree structure
self.children_list = []
self.cmndlineup = [] # initialization for the full line of command including the node in question
self.cmndnodes = []
self.ancestor_list = [] # a list of the node's direct ancestors
self.descendant_list = [] # a list of the node's direct descendants
def MakeChildren(self, child):
'''
Instantiation of a child node. Needs the node in question.
'''
self.children_list.append(child)
def HasChildren(self):
'''
Boolean determining whether the node in question has a child or not.
'''
if len(self.children_list) != 0:
return True
else:
return False
def MakeDescendants(self, descendant):
'''
Instantiation of a direct descendant node. Needs the node in question.
'''
self.descendant_list.append(descendant)
def children(self):
return self.children_list
def parent(self):
return self.parent
def CommandLineUp(self, l):
'''
This function gathers all the ancestors of the node in question in a list up to l (min(l)=1).
The list is returned.
'''
temp = self.parent
for i in range(l): # depth of message: for l=0 leaf sees only parent. For l=n-1 leaf sees all ancestors
if temp is not None:
self.cmndlineup.append(temp.ID)
temp = temp.parent
else:
break # reached the root
return self.cmndlineup # the final ancestry of the node (or its upstream communication)
def CommandedNodes(self): # attaches to cmndnodes of the node its whole subtree
'''
Attaches to cmndnodes of the node its whole subtree (the node in question is not included).
'''
self.cmndnodes = self.children_list
ncmndnodes = len(self.cmndnodes)
ii = 0 # ATTENTION! CHANGE TO A FOR LOOP IF THE GRAPH IS NOT A TREE ANYMORE
while ii < ncmndnodes:
child = self.cmndnodes[ii]
for childchild in child.children_list:
self.cmndnodes.append(childchild)
ncmndnodes = len(self.cmndnodes)
ii += 1
def Efficiency(self, levin, levcur):
'''
A function to compute the exponential drop in efficiency with level of
a node substitution from another in its subtree.
levin: Int>=0. The substitute's initial level.
levcur: Int>=0. The level on which the candidate node is called to substitute.
'''
self.q = 1/2.**(levin-levcur) | 39.942529 | 122 | 0.606906 | import random
import networkx as nx
class Node:
def __init__(self, ID=None, message=None, highercmnd=None, lev=None,
parent=None, childNo=None, children_list=None,
cmndlineup=None, cmndnodes=None, ancestor_list=None,
descendant_list=None):
self.ID = ID
self.message = message
self.parent = parent
self.highercmnd = highercmnd
self.lev = lev
self.children_list = []
self.cmndlineup = []
self.cmndnodes = []
self.ancestor_list = []
self.descendant_list = [] # a list of the node's direct descendants
def MakeChildren(self, child):
self.children_list.append(child)
def HasChildren(self):
if len(self.children_list) != 0:
return True
else:
return False
def MakeDescendants(self, descendant):
self.descendant_list.append(descendant)
def children(self):
return self.children_list
def parent(self):
return self.parent
def CommandLineUp(self, l):
temp = self.parent
for i in range(l):
if temp is not None:
self.cmndlineup.append(temp.ID)
temp = temp.parent
else:
break
return self.cmndlineup
def CommandedNodes(self):
self.cmndnodes = self.children_list
ncmndnodes = len(self.cmndnodes)
ii = 0
while ii < ncmndnodes:
child = self.cmndnodes[ii]
for childchild in child.children_list:
self.cmndnodes.append(childchild)
ncmndnodes = len(self.cmndnodes)
ii += 1
def Efficiency(self, levin, levcur):
self.q = 1/2.**(levin-levcur) | true | true |
f71b22fcc5f5f5eba2d54b6f00ae8b7fb89c6a76 | 2,334 | py | Python | cltk/utils/frequency.py | Akash-Pardasani/cltk | 2a430e9407452b06f44847202ebce8446007d96b | [
"MIT"
] | null | null | null | cltk/utils/frequency.py | Akash-Pardasani/cltk | 2a430e9407452b06f44847202ebce8446007d96b | [
"MIT"
] | null | null | null | cltk/utils/frequency.py | Akash-Pardasani/cltk | 2a430e9407452b06f44847202ebce8446007d96b | [
"MIT"
] | 1 | 2019-06-16T06:41:47.000Z | 2019-06-16T06:41:47.000Z | """This module's main class reads a text corpus and assembles a list of n
most common words."""
__author__ = 'Kyle P. Johnson <kyle@kyle-p-johnson.com>'
__license__ = 'MIT License. See LICENSE.'
from cltk.corpus.utils.formatter import assemble_tlg_author_filepaths
from cltk.corpus.utils.formatter import assemble_phi5_author_filepaths
from cltk.corpus.utils.formatter import tlg_plaintext_cleanup
from cltk.corpus.utils.formatter import phi5_plaintext_cleanup
from cltk.utils.cltk_logger import logger
from collections import Counter
from nltk.tokenize.punkt import PunktLanguageVars
class Frequency:
"""Methods for making word frequency lists."""
def __init__(self):
"""Language taken as argument, necessary used when saving word frequencies to
``cltk_data/user_data``."""
self.punkt = PunktLanguageVars()
self.punctuation = [',', '.', ';', ':', '"', "'", '?', '-', '!', '*', '[', ']', '{', '}']
def counter_from_str(self, string):
"""Build word frequency list from incoming string."""
string_list = [chars for chars in string if chars not in self.punctuation]
string_joined = ''.join(string_list)
tokens = self.punkt.word_tokenize(string_joined)
return Counter(tokens)
def counter_from_corpus(self, corpus):
"""Build word frequency list from one of several available corpora.
TODO: Make this count iteratively, not all at once
"""
assert corpus in ['phi5', 'tlg'], \
"Corpus '{0}' not available. Choose from 'phi5' or 'tlg'.".format(corpus)
all_strings = self._assemble_corpus_string(corpus=corpus)
return self.counter_from_str(all_strings)
def _assemble_corpus_string(self, corpus):
"""Takes a list of filepaths, returns a string containing contents of
all files."""
if corpus == 'phi5':
filepaths = assemble_phi5_author_filepaths()
file_cleaner = phi5_plaintext_cleanup
elif corpus == 'tlg':
filepaths = assemble_tlg_author_filepaths()
file_cleaner = tlg_plaintext_cleanup
for filepath in filepaths:
with open(filepath) as file_open:
file_read = file_open.read().lower()
file_clean = file_cleaner(file_read)
yield file_clean
| 39.559322 | 97 | 0.670094 |
__author__ = 'Kyle P. Johnson <kyle@kyle-p-johnson.com>'
__license__ = 'MIT License. See LICENSE.'
from cltk.corpus.utils.formatter import assemble_tlg_author_filepaths
from cltk.corpus.utils.formatter import assemble_phi5_author_filepaths
from cltk.corpus.utils.formatter import tlg_plaintext_cleanup
from cltk.corpus.utils.formatter import phi5_plaintext_cleanup
from cltk.utils.cltk_logger import logger
from collections import Counter
from nltk.tokenize.punkt import PunktLanguageVars
class Frequency:
def __init__(self):
self.punkt = PunktLanguageVars()
self.punctuation = [',', '.', ';', ':', '"', "'", '?', '-', '!', '*', '[', ']', '{', '}']
def counter_from_str(self, string):
string_list = [chars for chars in string if chars not in self.punctuation]
string_joined = ''.join(string_list)
tokens = self.punkt.word_tokenize(string_joined)
return Counter(tokens)
def counter_from_corpus(self, corpus):
assert corpus in ['phi5', 'tlg'], \
"Corpus '{0}' not available. Choose from 'phi5' or 'tlg'.".format(corpus)
all_strings = self._assemble_corpus_string(corpus=corpus)
return self.counter_from_str(all_strings)
def _assemble_corpus_string(self, corpus):
if corpus == 'phi5':
filepaths = assemble_phi5_author_filepaths()
file_cleaner = phi5_plaintext_cleanup
elif corpus == 'tlg':
filepaths = assemble_tlg_author_filepaths()
file_cleaner = tlg_plaintext_cleanup
for filepath in filepaths:
with open(filepath) as file_open:
file_read = file_open.read().lower()
file_clean = file_cleaner(file_read)
yield file_clean
| true | true |
f71b22feb4672df4c2b53105a50c79c3c5c48548 | 3,225 | py | Python | Midterm/server.py | eebbk-zhou/Web-Design-1 | 98703942dbb58e15c38a07de551a78398e2a7ab1 | [
"Unlicense"
] | null | null | null | Midterm/server.py | eebbk-zhou/Web-Design-1 | 98703942dbb58e15c38a07de551a78398e2a7ab1 | [
"Unlicense"
] | null | null | null | Midterm/server.py | eebbk-zhou/Web-Design-1 | 98703942dbb58e15c38a07de551a78398e2a7ab1 | [
"Unlicense"
] | null | null | null | from bottle import route, get, post
from bottle import run, debug
from bottle import request, response, redirect, template
from bottle import static_file
import dataset
import json
from bottle import default_app
#http://localhost:8090
@route("/")
def get_midterm():
todo_list_db = dataset.connect('sqlite:///todo_list.db')
todo_table = todo_list_db.get_table('todo')
items = todo_table.find()
items = [ dict(x) for x in list(items) ]
return template("Midterm", items=items)
@route("/static/png/<filename:re:.*\.png")
@route("/image/<filename:re:.*\.png")
def get_picture():
return static_file(filename="the_boat.png", root="static", mimetype="image/png")
@route("/static/<filename:path>")
def get_static(filename):
return static_file(filename=filename, root="static")
@route("/delete/<id>")
def get_delete(id):
id = int(id)
try:
todo_list_db = dataset.connect('sqlite:///todo_list.db')
todo_table = todo_list_db.get_table('todo')
print(f"We need to delete id# {id}...")
todo_table.delete(id=id)
except Exception as e:
response.status="409 Bad Request:"+str(e)
return
return template("deleted", id=id)
@get("/insert")
def get_insert():
return template("insert")
@post("/insert")
def post_insert():
course_number = request.forms.get('course_number')
print("course_number=", course_number)
course_name = request.forms.get('course_name')
try:
todo_list_db = dataset.connect('sqlite:///todo_list.db')
todo_table = todo_list_db.get_table('todo')
todo_table.insert({
'course_number' : course_number.strip(),
'course_name' : course_name.strip(),
'done' : 1
})
except Exception as e:
response.status="409 Bad Request:"+str(e)
return
return redirect('/')
@get("/edit/<id>")
def get_edit(id):
try:
todo_list_db = dataset.connect('sqlite:///todo_list.db')
todo_table = todo_list_db.get_table('todo')
items = list(todo_table.find(id=id))
if len(items) != 1:
response.status="404 Not Found:"+str(id)
return
items = [ dict(x) for x in items ]
print(items)
print(items[0])
except Exception as e:
print(e)
response.status="409 Bad Request:"+str(e)
return
return template("edit", item=items[0]) # put something here
@post("/edit")
def post_edit():
id = request.forms.get('id')
id = int(id)
course_number = request.forms.get('course_number')
course_name = request.forms.get('course_name')
print("course_number=", course_number)
try:
todo_list_db = dataset.connect('sqlite:///todo_list.db')
todo_table = todo_list_db.get_table('todo')
todo_table.update({
'id' : id,
'course_number' : course_number.strip(),
'course_name' : course_name.strip()
}, ['id'])
except Exception as e:
response.status="409 Bad Request:"+str(e)
return
return redirect('/')
if __name__ == "__main__":
debug(True)
run(host="localhost", port=8090)
else:
application = default_app() | 26.434426 | 84 | 0.622636 | from bottle import route, get, post
from bottle import run, debug
from bottle import request, response, redirect, template
from bottle import static_file
import dataset
import json
from bottle import default_app
@route("/")
def get_midterm():
todo_list_db = dataset.connect('sqlite:///todo_list.db')
todo_table = todo_list_db.get_table('todo')
items = todo_table.find()
items = [ dict(x) for x in list(items) ]
return template("Midterm", items=items)
@route("/static/png/<filename:re:.*\.png")
@route("/image/<filename:re:.*\.png")
def get_picture():
return static_file(filename="the_boat.png", root="static", mimetype="image/png")
@route("/static/<filename:path>")
def get_static(filename):
return static_file(filename=filename, root="static")
@route("/delete/<id>")
def get_delete(id):
id = int(id)
try:
todo_list_db = dataset.connect('sqlite:///todo_list.db')
todo_table = todo_list_db.get_table('todo')
print(f"We need to delete id# {id}...")
todo_table.delete(id=id)
except Exception as e:
response.status="409 Bad Request:"+str(e)
return
return template("deleted", id=id)
@get("/insert")
def get_insert():
return template("insert")
@post("/insert")
def post_insert():
course_number = request.forms.get('course_number')
print("course_number=", course_number)
course_name = request.forms.get('course_name')
try:
todo_list_db = dataset.connect('sqlite:///todo_list.db')
todo_table = todo_list_db.get_table('todo')
todo_table.insert({
'course_number' : course_number.strip(),
'course_name' : course_name.strip(),
'done' : 1
})
except Exception as e:
response.status="409 Bad Request:"+str(e)
return
return redirect('/')
@get("/edit/<id>")
def get_edit(id):
try:
todo_list_db = dataset.connect('sqlite:///todo_list.db')
todo_table = todo_list_db.get_table('todo')
items = list(todo_table.find(id=id))
if len(items) != 1:
response.status="404 Not Found:"+str(id)
return
items = [ dict(x) for x in items ]
print(items)
print(items[0])
except Exception as e:
print(e)
response.status="409 Bad Request:"+str(e)
return
return template("edit", item=items[0])
@post("/edit")
def post_edit():
id = request.forms.get('id')
id = int(id)
course_number = request.forms.get('course_number')
course_name = request.forms.get('course_name')
print("course_number=", course_number)
try:
todo_list_db = dataset.connect('sqlite:///todo_list.db')
todo_table = todo_list_db.get_table('todo')
todo_table.update({
'id' : id,
'course_number' : course_number.strip(),
'course_name' : course_name.strip()
}, ['id'])
except Exception as e:
response.status="409 Bad Request:"+str(e)
return
return redirect('/')
if __name__ == "__main__":
debug(True)
run(host="localhost", port=8090)
else:
application = default_app() | true | true |
f71b23bfc71f48b0e7ac2b5989a80a9a6d09b17b | 4,803 | py | Python | CA/ca.py | PIR3-Internet/server | 181962e392be47a39848f3a88703163a140b3d3a | [
"MIT"
] | null | null | null | CA/ca.py | PIR3-Internet/server | 181962e392be47a39848f3a88703163a140b3d3a | [
"MIT"
] | null | null | null | CA/ca.py | PIR3-Internet/server | 181962e392be47a39848f3a88703163a140b3d3a | [
"MIT"
] | null | null | null | import ssl
import socket
import OpenSSL
import sqlite3
import signal
from functools import wraps
from numpy.core.numeric import count_nonzero
import requests
from multiprocessing import Process, Value
TIMEOUT = Value('i', 5)
cMax = Value('i', 2)
ca_num = Value('i', 0)
class TimeoutException(Exception):
pass
def deadline(timeout, *args):
def decorate(f):
def handler(signum, frame):
raise TimeoutException() #when the signal have been handle raise the exception
@wraps(timeout, *args)
def new_f(*args):
signal.signal(signal.SIGALRM, handler) #link the SIGALARM signal to the handler
signal.alarm(timeout) #create an alarm of timeout second
res = f(*args)
signal.alarm(0) #reinitiate the alarm
return res
return new_f
return decorate
@deadline(TIMEOUT.value)
def get_certificate(host, port=443, timeout=10):
context = ssl.create_default_context()
context.set_ciphers('DEFAULT:@SECLEVEL=1')
context.check_hostname = False
context.verify_mode = ssl.CERT_NONE
conn = socket.create_connection((host, port))
sock = context.wrap_socket(conn, server_hostname=host)
sock.settimeout(timeout)
try:
der_cert = sock.getpeercert(True)
finally:
sock.close()
return ssl.DER_cert_to_PEM_cert(der_cert)
@deadline(60)
def url_direct(user):
user = 'http://' + user
user = requests.get(user).url.split('/')[2]
return user
@deadline(60)
def url_with_header(user):
user = 'http://' + user
user = requests.head(user).headers['location'].split('/')[2]
return user
def get_url(user, counter, error):
try:
user = url_direct(user)
except TimeoutException:
print(" Impossible to get url (TimeoutException) from ", user)
cur.execute("INSERT INTO errors VALUES (?, ?, ?)", (user, user.split('.')[len(user.split('.'))-1], error))
counter = cMax.value-1
except:
try:
user = url_with_header(user)
except TimeoutException:
print(" Impossible to get url (TimeoutException) from ", user)
cur.execute("INSERT INTO errors VALUES (?, ?, ?)", (user, user.split('.')[len(user.split('.'))-1], error))
counter = cMax.value-1
except:
print(" Impossible to get url from ", user)
cur.execute("INSERT INTO errors VALUES (?, ?, ?)", (user, user.split('.')[len(user.split('.'))-1], error))
counter = cMax.value-1
return user, counter
def processus(user):
counter = 0
ok = False
while ok == False:
try:
certificate = get_certificate(user)
x509 = OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM, certificate)
provider = x509.get_issuer().organizationName
cur.execute("INSERT INTO ca VALUES (?, ?, ?)", (user, provider, ca_num.value))
print(user, ": ", provider)
ok = True
except TimeoutException as e:
if (counter == cMax.value-1):
if (TIMEOUT.value != 60):
TIMEOUT.value = 60
counter -= counter
else:
cur.execute("INSERT INTO errors VALUES (?, ?, ?)", (user, user.split('.')[len(user.split('.'))-1], repr(e)))
else:
user, counter = get_url(user, counter, repr(e))
print(" ", repr(e), user)
ok = False
counter += 1
except Exception as e:
if (counter == cMax.value-1):
cur.execute("INSERT INTO errors VALUES (?, ?, ?)", (user, user.split('.')[len(user.split('.'))-1], repr(e)))
else:
user, counter = get_url(user, counter, repr(e))
print(" ", repr(e), user)
ok = False
counter += 1
finally:
con.commit()
ca_num.value += 1
if counter == cMax.value:
ok = True
con = sqlite3.connect('ca-providers.db')
cur = con.cursor()
try:
cur.execute("CREATE TABLE ca (ca_user, ca_provider, ca_num)")
except sqlite3.OperationalError:
cur.execute("DELETE FROM ca")
try:
cur.execute("CREATE TABLE errors (user, extension, error)")
except sqlite3.OperationalError:
cur.execute("DELETE FROM errors")
con.commit()
debut = 0
with open("list1m2020.csv", "r") as f:
for line in f:
user = line.split()[0]
p = Process(target=processus, args=(user,))
p.start()
p.join()
if (TIMEOUT.value != 5):
TIMEOUT.value = 5
con.close() | 29.466258 | 128 | 0.564022 | import ssl
import socket
import OpenSSL
import sqlite3
import signal
from functools import wraps
from numpy.core.numeric import count_nonzero
import requests
from multiprocessing import Process, Value
TIMEOUT = Value('i', 5)
cMax = Value('i', 2)
ca_num = Value('i', 0)
class TimeoutException(Exception):
pass
def deadline(timeout, *args):
def decorate(f):
def handler(signum, frame):
raise TimeoutException()
@wraps(timeout, *args)
def new_f(*args):
signal.signal(signal.SIGALRM, handler)
signal.alarm(timeout)
res = f(*args)
signal.alarm(0)
return res
return new_f
return decorate
@deadline(TIMEOUT.value)
def get_certificate(host, port=443, timeout=10):
context = ssl.create_default_context()
context.set_ciphers('DEFAULT:@SECLEVEL=1')
context.check_hostname = False
context.verify_mode = ssl.CERT_NONE
conn = socket.create_connection((host, port))
sock = context.wrap_socket(conn, server_hostname=host)
sock.settimeout(timeout)
try:
der_cert = sock.getpeercert(True)
finally:
sock.close()
return ssl.DER_cert_to_PEM_cert(der_cert)
@deadline(60)
def url_direct(user):
user = 'http://' + user
user = requests.get(user).url.split('/')[2]
return user
@deadline(60)
def url_with_header(user):
user = 'http://' + user
user = requests.head(user).headers['location'].split('/')[2]
return user
def get_url(user, counter, error):
try:
user = url_direct(user)
except TimeoutException:
print(" Impossible to get url (TimeoutException) from ", user)
cur.execute("INSERT INTO errors VALUES (?, ?, ?)", (user, user.split('.')[len(user.split('.'))-1], error))
counter = cMax.value-1
except:
try:
user = url_with_header(user)
except TimeoutException:
print(" Impossible to get url (TimeoutException) from ", user)
cur.execute("INSERT INTO errors VALUES (?, ?, ?)", (user, user.split('.')[len(user.split('.'))-1], error))
counter = cMax.value-1
except:
print(" Impossible to get url from ", user)
cur.execute("INSERT INTO errors VALUES (?, ?, ?)", (user, user.split('.')[len(user.split('.'))-1], error))
counter = cMax.value-1
return user, counter
def processus(user):
counter = 0
ok = False
while ok == False:
try:
certificate = get_certificate(user)
x509 = OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM, certificate)
provider = x509.get_issuer().organizationName
cur.execute("INSERT INTO ca VALUES (?, ?, ?)", (user, provider, ca_num.value))
print(user, ": ", provider)
ok = True
except TimeoutException as e:
if (counter == cMax.value-1):
if (TIMEOUT.value != 60):
TIMEOUT.value = 60
counter -= counter
else:
cur.execute("INSERT INTO errors VALUES (?, ?, ?)", (user, user.split('.')[len(user.split('.'))-1], repr(e)))
else:
user, counter = get_url(user, counter, repr(e))
print(" ", repr(e), user)
ok = False
counter += 1
except Exception as e:
if (counter == cMax.value-1):
cur.execute("INSERT INTO errors VALUES (?, ?, ?)", (user, user.split('.')[len(user.split('.'))-1], repr(e)))
else:
user, counter = get_url(user, counter, repr(e))
print(" ", repr(e), user)
ok = False
counter += 1
finally:
con.commit()
ca_num.value += 1
if counter == cMax.value:
ok = True
con = sqlite3.connect('ca-providers.db')
cur = con.cursor()
try:
cur.execute("CREATE TABLE ca (ca_user, ca_provider, ca_num)")
except sqlite3.OperationalError:
cur.execute("DELETE FROM ca")
try:
cur.execute("CREATE TABLE errors (user, extension, error)")
except sqlite3.OperationalError:
cur.execute("DELETE FROM errors")
con.commit()
debut = 0
with open("list1m2020.csv", "r") as f:
for line in f:
user = line.split()[0]
p = Process(target=processus, args=(user,))
p.start()
p.join()
if (TIMEOUT.value != 5):
TIMEOUT.value = 5
con.close() | true | true |
f71b2498aa5fdb11b49d58a54f14653f45df1cb1 | 10,087 | py | Python | docs/sphinx/conf.py | sdss/lvmscraper | 5b169487963fd06000c0a593993bb3c2c9418951 | [
"BSD-3-Clause"
] | null | null | null | docs/sphinx/conf.py | sdss/lvmscraper | 5b169487963fd06000c0a593993bb3c2c9418951 | [
"BSD-3-Clause"
] | null | null | null | docs/sphinx/conf.py | sdss/lvmscraper | 5b169487963fd06000c0a593993bb3c2c9418951 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# type: ignore
import os
from pkg_resources import parse_version
try:
from cluplus import __version__
except ModuleNotFoundError:
from sdsstools import get_package_version
__version__ = get_package_version(__file__, 'sdss-cluplus') or 'dev'
# Are we building in RTD?
on_rtd = os.environ.get('READTHEDOCS') == 'True'
# Sphinx template selected in cookiecutter and whether to use releases
sphinx_template = 'sphinx-bootstrap'
use_releases = 'no'
if sphinx_template == 'sphinx-bootstrap':
import sphinx_bootstrap_theme
# Importing matplotlib here with agg to prevent tkinter error in readthedocs
# import matplotlib
# matplotlib.use('agg')
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.napoleon', 'sphinx.ext.autosummary',
'sphinx.ext.todo', 'sphinx.ext.viewcode', 'sphinx.ext.mathjax',
'sphinx.ext.intersphinx']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
source_suffix = ['.rst', '.md']
# source_suffix = '.rst'
# source_parsers = {
# '.md': 'recommonmark.parser.CommonMarkParser',
# }
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'cluplus'
copyright = '{0}, {1}'.format('2021', 'Florian Briegel')
author = 'Florian Briegel'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
# The short X.Y version.
version = parse_version(__version__).base_version
# The full version, including alpha/beta/rc tags.
release = __version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The reST default role (used for this markup: `text`) to use for all
# documents.
default_role = 'py:obj'
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# Intersphinx mappings
intersphinx_mapping = {'python': ('https://docs.python.org/', None),
'astropy': ('http://docs.astropy.org/en/latest', None),
'numpy': ('http://docs.scipy.org/doc/numpy/', None)}
autodoc_mock_imports = ['_tkinter']
autodoc_member_order = 'groupwise'
napoleon_use_rtype = False
napoleon_use_ivar = True
rst_epilog = f"""
.. |numpy_array| replace:: Numpy array
.. |HDUList| replace:: :class:`~astropy.io.fits.HDUList`
.. |cluplus_version| replace:: {__version__}
"""
# -- Options for HTML output ----------------------------------------------
html_css_files = []
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
if sphinx_template == 'sphinx-bootstrap':
html_theme = 'bootstrap'
html_sidebars = {}
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
# Navigation bar title. (Default: ``project`` value)
'navbar_title': "SDSS: {0}".format(project),
# Tab name for entire site. (Default: "Site")
'navbar_site_name': "Site",
# A list of tuples containing pages or urls to link to.
# Valid tuples should be in the following forms:
# (name, page) # a link to a page
# (name, "/aa/bb", 1) # a link to an arbitrary relative url
# (name, "http://example.com", True) # arbitrary absolute url
# Note the "1" or "True" value above as the third argument to indicate
# an arbitrary url.
'navbar_links': [
],
# Render the next and previous page links in navbar. (Default: true)
'navbar_sidebarrel': False,
# Render the current pages TOC in the navbar. (Default: true)
'navbar_pagenav': False,
# Tab name for the current pages TOC. (Default: "Page")
'navbar_pagenav_name': "Page",
# Global TOC depth for "site" navbar tab. (Default: 1)
# Switching to -1 shows all levels.
'globaltoc_depth': 2,
# Include hidden TOCs in Site navbar?
#
# Note: If this is "false", you cannot have mixed ``:hidden:`` and
# non-hidden ``toctree`` directives in the same page, or else the build
# will break.
#
# Values: "true" (default) or "false"
'globaltoc_includehidden': "true",
# HTML navbar class (Default: "navbar") to attach to <div> element.
# For black navbar, do "navbar navbar-inverse"
'navbar_class': "navbar",
# Fix navigation bar to top of page?
# Values: "true" (default) or "false"
'navbar_fixed_top': "true",
# Location of link to source.
# Options are "nav" (default), "footer" or anything else to exclude.
'source_link_position': "",
# Bootswatch (http://bootswatch.com/) theme.
#
# Options are nothing (default) or the name of a valid theme
# such as "amelia" or "cosmo".
'bootswatch_theme': "paper",
# Choose Bootstrap version.
# Values: "3" (default) or "2" (in quotes)
'bootstrap_version': "3",
}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = sphinx_bootstrap_theme.get_html_theme_path()
html_logo = '_static/sdssv_logo_small.png'
html_css_files += ["custom_bootstrap.css"]
html_sidebars = {'**': ['localtoc.html']}
elif sphinx_template == 'alabaster':
html_theme = 'alabaster'
html_theme_options = {
'logo': 'sdssv_logo.png',
'github_user': 'sdss',
'github_repo': project,
'github_button': True,
'github_type': 'star',
'sidebar_collapse': True,
'page_width': '80%'
}
html_sidebars = {
'**': [
'about.html',
'navigation.html',
'relations.html',
'searchbox.html',
]
}
html_css_files += ["custom.css"]
html_favicon = './_static/favicon_sdssv.ico'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
# See https://github.com/rtfd/readthedocs.org/issues/1776 for why we do this
if on_rtd:
html_static_path = []
else:
html_static_path = ['_static']
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = '{0}pdoc'.format('cluplus')
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, '{0}.tex'.format(project), u'{0} Documentation'.format(project),
author, 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'cluplus', u'{0} Documentation'.format(project),
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, project, u'{0} Documentation'.format(project),
author, project, 'One line description of project.',
'Miscellaneous'),
]
if use_releases == 'yes':
extensions += ['sdsstools.releases']
releases_github_path = 'wasndas/cluplus'
releases_document_name = ['CHANGELOG']
releases_unstable_prehistory = True
| 30.847095 | 84 | 0.652027 |
import os
from pkg_resources import parse_version
try:
from cluplus import __version__
except ModuleNotFoundError:
from sdsstools import get_package_version
__version__ = get_package_version(__file__, 'sdss-cluplus') or 'dev'
on_rtd = os.environ.get('READTHEDOCS') == 'True'
sphinx_template = 'sphinx-bootstrap'
use_releases = 'no'
if sphinx_template == 'sphinx-bootstrap':
import sphinx_bootstrap_theme
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.napoleon', 'sphinx.ext.autosummary',
'sphinx.ext.todo', 'sphinx.ext.viewcode', 'sphinx.ext.mathjax',
'sphinx.ext.intersphinx']
templates_path = ['_templates']
source_suffix = ['.rst', '.md']
master_doc = 'index'
project = 'cluplus'
copyright = '{0}, {1}'.format('2021', 'Florian Briegel')
author = 'Florian Briegel'
# |version| and |release|, also used in various other places throughout the
# built documents.
# The short X.Y version.
version = parse_version(__version__).base_version
# The full version, including alpha/beta/rc tags.
release = __version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The reST default role (used for this markup: `text`) to use for all
# documents.
default_role = 'py:obj'
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# Intersphinx mappings
intersphinx_mapping = {'python': ('https://docs.python.org/', None),
'astropy': ('http://docs.astropy.org/en/latest', None),
'numpy': ('http://docs.scipy.org/doc/numpy/', None)}
autodoc_mock_imports = ['_tkinter']
autodoc_member_order = 'groupwise'
napoleon_use_rtype = False
napoleon_use_ivar = True
rst_epilog = f"""
.. |numpy_array| replace:: Numpy array
.. |HDUList| replace:: :class:`~astropy.io.fits.HDUList`
.. |cluplus_version| replace:: {__version__}
"""
# -- Options for HTML output ----------------------------------------------
html_css_files = []
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
if sphinx_template == 'sphinx-bootstrap':
html_theme = 'bootstrap'
html_sidebars = {}
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
# Navigation bar title. (Default: ``project`` value)
'navbar_title': "SDSS: {0}".format(project),
# Tab name for entire site. (Default: "Site")
'navbar_site_name': "Site",
# A list of tuples containing pages or urls to link to.
# Valid tuples should be in the following forms:
# (name, page) # a link to a page
# (name, "/aa/bb", 1) # a link to an arbitrary relative url
# (name, "http://example.com", True) # arbitrary absolute url
# Note the "1" or "True" value above as the third argument to indicate
# an arbitrary url.
'navbar_links': [
],
# Render the next and previous page links in navbar. (Default: true)
'navbar_sidebarrel': False,
# Render the current pages TOC in the navbar. (Default: true)
'navbar_pagenav': False,
# Tab name for the current pages TOC. (Default: "Page")
'navbar_pagenav_name': "Page",
# Global TOC depth for "site" navbar tab. (Default: 1)
# Switching to -1 shows all levels.
'globaltoc_depth': 2,
# Include hidden TOCs in Site navbar?
#
# Note: If this is "false", you cannot have mixed ``:hidden:`` and
# non-hidden ``toctree`` directives in the same page, or else the build
# will break.
#
# Values: "true" (default) or "false"
'globaltoc_includehidden': "true",
# HTML navbar class (Default: "navbar") to attach to <div> element.
# For black navbar, do "navbar navbar-inverse"
'navbar_class': "navbar",
# Fix navigation bar to top of page?
# Values: "true" (default) or "false"
'navbar_fixed_top': "true",
# Location of link to source.
# Options are "nav" (default), "footer" or anything else to exclude.
'source_link_position': "",
# Bootswatch (http://bootswatch.com/) theme.
#
# Options are nothing (default) or the name of a valid theme
# such as "amelia" or "cosmo".
'bootswatch_theme': "paper",
# Choose Bootstrap version.
# Values: "3" (default) or "2" (in quotes)
'bootstrap_version': "3",
}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = sphinx_bootstrap_theme.get_html_theme_path()
html_logo = '_static/sdssv_logo_small.png'
html_css_files += ["custom_bootstrap.css"]
html_sidebars = {'**': ['localtoc.html']}
elif sphinx_template == 'alabaster':
html_theme = 'alabaster'
html_theme_options = {
'logo': 'sdssv_logo.png',
'github_user': 'sdss',
'github_repo': project,
'github_button': True,
'github_type': 'star',
'sidebar_collapse': True,
'page_width': '80%'
}
html_sidebars = {
'**': [
'about.html',
'navigation.html',
'relations.html',
'searchbox.html',
]
}
html_css_files += ["custom.css"]
html_favicon = './_static/favicon_sdssv.ico'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
# See https://github.com/rtfd/readthedocs.org/issues/1776 for why we do this
if on_rtd:
html_static_path = []
else:
html_static_path = ['_static']
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = '{0}pdoc'.format('cluplus')
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, '{0}.tex'.format(project), u'{0} Documentation'.format(project),
author, 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'cluplus', u'{0} Documentation'.format(project),
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, project, u'{0} Documentation'.format(project),
author, project, 'One line description of project.',
'Miscellaneous'),
]
if use_releases == 'yes':
extensions += ['sdsstools.releases']
releases_github_path = 'wasndas/cluplus'
releases_document_name = ['CHANGELOG']
releases_unstable_prehistory = True
| true | true |
f71b252375625601941b43c73fbf023daa436c67 | 304 | py | Python | src/pygame_setup.py | sheepy0125/some-platformer-game | 5b623c7ffeb7d1e0ba8bf1d75bc37b1798f31379 | [
"MIT"
] | null | null | null | src/pygame_setup.py | sheepy0125/some-platformer-game | 5b623c7ffeb7d1e0ba8bf1d75bc37b1798f31379 | [
"MIT"
] | null | null | null | src/pygame_setup.py | sheepy0125/some-platformer-game | 5b623c7ffeb7d1e0ba8bf1d75bc37b1798f31379 | [
"MIT"
] | null | null | null | """
Pygame setup for Some Platformer Game
Created by sheepy0125
08/10/2021
"""
import pygame
SCREEN_SIZE = (500, 500)
SCROLL_OFFSET = (SCREEN_SIZE[0] // 2, SCREEN_SIZE[1] // 2)
screen = pygame.display.set_mode(SCREEN_SIZE)
pygame.display.set_caption("Some Platformer Game")
clock = pygame.time.Clock()
| 21.714286 | 58 | 0.746711 |
import pygame
SCREEN_SIZE = (500, 500)
SCROLL_OFFSET = (SCREEN_SIZE[0] // 2, SCREEN_SIZE[1] // 2)
screen = pygame.display.set_mode(SCREEN_SIZE)
pygame.display.set_caption("Some Platformer Game")
clock = pygame.time.Clock()
| true | true |
f71b256176da366107fb45bcef939ee05c453375 | 13,883 | py | Python | nexmo/__init__.py | cook-health/messaging | 1a827b97d9af6e56d55c362b29dd79a6cb373f88 | [
"MIT"
] | null | null | null | nexmo/__init__.py | cook-health/messaging | 1a827b97d9af6e56d55c362b29dd79a6cb373f88 | [
"MIT"
] | 2 | 2018-03-14T10:42:37.000Z | 2018-03-14T11:01:31.000Z | nexmo/__init__.py | Seliniux777/Nexmo-nexmo-python | d1d60e8068b1cb23f12507a6ec1cd500285890b5 | [
"MIT"
] | 1 | 2020-10-18T09:41:15.000Z | 2020-10-18T09:41:15.000Z | from platform import python_version
import hashlib
import hmac
import jwt
import os
import requests
import sys
import time
from uuid import uuid4
import warnings
if sys.version_info[0] == 3:
string_types = (str, bytes)
else:
string_types = (unicode, str)
__version__ = '2.0.0'
class Error(Exception):
pass
class ClientError(Error):
pass
class ServerError(Error):
pass
class AuthenticationError(ClientError):
pass
class Client():
def __init__(self, **kwargs):
self.api_key = kwargs.get('key', None) or os.environ.get('NEXMO_API_KEY', None)
self.api_secret = kwargs.get('secret', None) or os.environ.get('NEXMO_API_SECRET', None)
self.signature_secret = kwargs.get('signature_secret', None) or os.environ.get('NEXMO_SIGNATURE_SECRET', None)
self.signature_method = kwargs.get('signature_method', None) or os.environ.get('NEXMO_SIGNATURE_METHOD', None)
if self.signature_method == 'md5':
self.signature_method = hashlib.md5
elif self.signature_method == 'sha1':
self.signature_method = hashlib.sha1
elif self.signature_method == 'sha256':
self.signature_method = hashlib.sha256
elif self.signature_method == 'sha512':
self.signature_method = hashlib.sha512
self.application_id = kwargs.get('application_id', None)
self.private_key = kwargs.get('private_key', None)
if isinstance(self.private_key, string_types) and '\n' not in self.private_key:
with open(self.private_key, 'rb') as key_file:
self.private_key = key_file.read()
self.host = 'rest.nexmo.com'
self.api_host = 'api.nexmo.com'
user_agent = 'nexmo-python/{0}/{1}'.format(__version__, python_version())
if 'app_name' in kwargs and 'app_version' in kwargs:
user_agent += '/{0}/{1}'.format(kwargs['app_name'], kwargs['app_version'])
self.headers = {'User-Agent': user_agent}
self.auth_params = {}
def auth(self, params=None, **kwargs):
self.auth_params = params or kwargs
def send_message(self, params):
return self.post(self.host, '/sms/json', params)
def get_balance(self):
return self.get(self.host, '/account/get-balance')
def get_country_pricing(self, country_code):
return self.get(self.host, '/account/get-pricing/outbound', {'country': country_code})
def get_prefix_pricing(self, prefix):
return self.get(self.host, '/account/get-prefix-pricing/outbound', {'prefix': prefix})
def get_sms_pricing(self, number):
return self.get(self.host, '/account/get-phone-pricing/outbound/sms', {'phone': number})
def get_voice_pricing(self, number):
return self.get(self.host, '/account/get-phone-pricing/outbound/voice', {'phone': number})
def update_settings(self, params=None, **kwargs):
return self.post(self.host, '/account/settings', params or kwargs)
def topup(self, params=None, **kwargs):
return self.post(self.host, '/account/top-up', params or kwargs)
def get_account_numbers(self, params=None, **kwargs):
return self.get(self.host, '/account/numbers', params or kwargs)
def get_available_numbers(self, country_code, params=None, **kwargs):
return self.get(self.host, '/number/search', dict(params or kwargs, country=country_code))
def buy_number(self, params=None, **kwargs):
return self.post(self.host, '/number/buy', params or kwargs)
def cancel_number(self, params=None, **kwargs):
return self.post(self.host, '/number/cancel', params or kwargs)
def update_number(self, params=None, **kwargs):
return self.post(self.host, '/number/update', params or kwargs)
def get_message(self, message_id):
return self.get(self.host, '/search/message', {'id': message_id})
def get_message_rejections(self, params=None, **kwargs):
return self.get(self.host, '/search/rejections', params or kwargs)
def search_messages(self, params=None, **kwargs):
return self.get(self.host, '/search/messages', params or kwargs)
def send_ussd_push_message(self, params=None, **kwargs):
return self.post(self.host, '/ussd/json', params or kwargs)
def send_ussd_prompt_message(self, params=None, **kwargs):
return self.post(self.host, '/ussd-prompt/json', params or kwargs)
def send_2fa_message(self, params=None, **kwargs):
return self.post(self.host, '/sc/us/2fa/json', params or kwargs)
def send_event_alert_message(self, params=None, **kwargs):
return self.post(self.host, '/sc/us/alert/json', params or kwargs)
def send_marketing_message(self, params=None, **kwargs):
return self.post(self.host, '/sc/us/marketing/json', params or kwargs)
def get_event_alert_numbers(self):
return self.get(self.host, '/sc/us/alert/opt-in/query/json')
def resubscribe_event_alert_number(self, params=None, **kwargs):
return self.post(self.host, '/sc/us/alert/opt-in/manage/json', params or kwargs)
def initiate_call(self, params=None, **kwargs):
return self.post(self.host, '/call/json', params or kwargs)
def initiate_tts_call(self, params=None, **kwargs):
return self.post(self.api_host, '/tts/json', params or kwargs)
def initiate_tts_prompt_call(self, params=None, **kwargs):
return self.post(self.api_host, '/tts-prompt/json', params or kwargs)
def start_verification(self, params=None, **kwargs):
return self.post(self.api_host, '/verify/json', params or kwargs)
def send_verification_request(self, params=None, **kwargs):
warnings.warn('nexmo.Client#send_verification_request is deprecated (use #start_verification instead)',
DeprecationWarning, stacklevel=2)
return self.post(self.api_host, '/verify/json', params or kwargs)
def check_verification(self, request_id, params=None, **kwargs):
return self.post(self.api_host, '/verify/check/json', dict(params or kwargs, request_id=request_id))
def check_verification_request(self, params=None, **kwargs):
warnings.warn('nexmo.Client#check_verification_request is deprecated (use #check_verification instead)',
DeprecationWarning, stacklevel=2)
return self.post(self.api_host, '/verify/check/json', params or kwargs)
def get_verification(self, request_id):
return self.get(self.api_host, '/verify/search/json', {'request_id': request_id})
def get_verification_request(self, request_id):
warnings.warn('nexmo.Client#get_verification_request is deprecated (use #get_verification instead)',
DeprecationWarning, stacklevel=2)
return self.get(self.api_host, '/verify/search/json', {'request_id': request_id})
def cancel_verification(self, request_id):
return self.post(self.api_host, '/verify/control/json', {'request_id': request_id, 'cmd': 'cancel'})
def trigger_next_verification_event(self, request_id):
return self.post(self.api_host, '/verify/control/json', {'request_id': request_id, 'cmd': 'trigger_next_event'})
def control_verification_request(self, params=None, **kwargs):
warnings.warn('nexmo.Client#control_verification_request is deprecated', DeprecationWarning, stacklevel=2)
return self.post(self.api_host, '/verify/control/json', params or kwargs)
def get_basic_number_insight(self, params=None, **kwargs):
return self.get(self.api_host, '/ni/basic/json', params or kwargs)
def get_standard_number_insight(self, params=None, **kwargs):
return self.get(self.api_host, '/ni/standard/json', params or kwargs)
def get_number_insight(self, params=None, **kwargs):
warnings.warn('nexmo.Client#get_number_insight is deprecated (use #get_standard_number_insight instead)',
DeprecationWarning, stacklevel=2)
return self.get(self.api_host, '/number/lookup/json', params or kwargs)
def get_advanced_number_insight(self, params=None, **kwargs):
return self.get(self.api_host, '/ni/advanced/json', params or kwargs)
def request_number_insight(self, params=None, **kwargs):
return self.post(self.host, '/ni/json', params or kwargs)
def get_applications(self, params=None, **kwargs):
return self.get(self.api_host, '/v1/applications', params or kwargs)
def get_application(self, application_id):
return self.get(self.api_host, '/v1/applications/' + application_id)
def create_application(self, params=None, **kwargs):
return self.post(self.api_host, '/v1/applications', params or kwargs)
def update_application(self, application_id, params=None, **kwargs):
return self.put(self.api_host, '/v1/applications/' + application_id, params or kwargs)
def delete_application(self, application_id):
return self.delete(self.api_host, '/v1/applications/' + application_id)
def create_call(self, params=None, **kwargs):
return self.__post('/v1/calls', params or kwargs)
def get_calls(self, params=None, **kwargs):
return self.__get('/v1/calls', params or kwargs)
def get_call(self, uuid):
return self.__get('/v1/calls/' + uuid)
def update_call(self, uuid, params=None, **kwargs):
return self.__put('/v1/calls/' + uuid, params or kwargs)
def send_audio(self, uuid, params=None, **kwargs):
return self.__put('/v1/calls/' + uuid + '/stream', params or kwargs)
def stop_audio(self, uuid):
return self.__delete('/v1/calls/' + uuid + '/stream')
def send_speech(self, uuid, params=None, **kwargs):
return self.__put('/v1/calls/' + uuid + '/talk', params or kwargs)
def stop_speech(self, uuid):
return self.__delete('/v1/calls/' + uuid + '/talk')
def send_dtmf(self, uuid, params=None, **kwargs):
return self.__put('/v1/calls/' + uuid + '/dtmf', params or kwargs)
def check_signature(self, params):
params = dict(params)
signature = params.pop('sig', '').lower()
return hmac.compare_digest(signature, self.signature(params))
def signature(self, params):
if self.signature_method:
hasher = hmac.new(self.signature_secret.encode(), digestmod=self.signature_method)
else:
hasher = hashlib.md5()
# Add timestamp if not already present
if not params.get("timestamp"):
params["timestamp"] = int(time.time())
for key in sorted(params):
value = params[key]
if isinstance(value, str):
value = value.replace('&', '_').replace('=', '_')
hasher.update('&{0}={1}'.format(key, value).encode('utf-8'))
if self.signature_method is None:
hasher.update(self.signature_secret.encode())
return hasher.hexdigest()
def get(self, host, request_uri, params=None):
uri = 'https://' + host + request_uri
params = dict(params or {}, api_key=self.api_key, api_secret=self.api_secret)
return self.parse(host, requests.get(uri, params=params, headers=self.headers))
def post(self, host, request_uri, params):
uri = 'https://' + host + request_uri
params = dict(params, api_key=self.api_key, api_secret=self.api_secret)
return self.parse(host, requests.post(uri, data=params, headers=self.headers))
def put(self, host, request_uri, params):
uri = 'https://' + host + request_uri
params = dict(params, api_key=self.api_key, api_secret=self.api_secret)
return self.parse(host, requests.put(uri, json=params, headers=self.headers))
def delete(self, host, request_uri):
uri = 'https://' + host + request_uri
params = dict(api_key=self.api_key, api_secret=self.api_secret)
return self.parse(host, requests.delete(uri, params=params, headers=self.headers))
def parse(self, host, response):
if response.status_code == 401:
raise AuthenticationError
elif response.status_code == 204:
return None
elif 200 <= response.status_code < 300:
return response.json()
elif 400 <= response.status_code < 500:
message = "{code} response from {host}".format(code=response.status_code, host=host)
raise ClientError(message)
elif 500 <= response.status_code < 600:
message = "{code} response from {host}".format(code=response.status_code, host=host)
raise ServerError(message)
def __get(self, request_uri, params=None):
uri = 'https://' + self.api_host + request_uri
return self.parse(self.api_host, requests.get(uri, params=params or {}, headers=self.__headers()))
def __post(self, request_uri, params):
uri = 'https://' + self.api_host + request_uri
return self.parse(self.api_host, requests.post(uri, json=params, headers=self.__headers()))
def __put(self, request_uri, params):
uri = 'https://' + self.api_host + request_uri
return self.parse(self.api_host, requests.put(uri, json=params, headers=self.__headers()))
def __delete(self, request_uri):
uri = 'https://' + self.api_host + request_uri
return self.parse(self.api_host, requests.delete(uri, headers=self.__headers()))
def __headers(self):
iat = int(time.time())
payload = dict(self.auth_params)
payload.setdefault('application_id', self.application_id)
payload.setdefault('iat', iat)
payload.setdefault('exp', iat + 60)
payload.setdefault('jti', str(uuid4()))
token = jwt.encode(payload, self.private_key, algorithm='RS256')
return dict(self.headers, Authorization=b'Bearer ' + token)
| 38.457064 | 120 | 0.666859 | from platform import python_version
import hashlib
import hmac
import jwt
import os
import requests
import sys
import time
from uuid import uuid4
import warnings
if sys.version_info[0] == 3:
string_types = (str, bytes)
else:
string_types = (unicode, str)
__version__ = '2.0.0'
class Error(Exception):
pass
class ClientError(Error):
pass
class ServerError(Error):
pass
class AuthenticationError(ClientError):
pass
class Client():
def __init__(self, **kwargs):
self.api_key = kwargs.get('key', None) or os.environ.get('NEXMO_API_KEY', None)
self.api_secret = kwargs.get('secret', None) or os.environ.get('NEXMO_API_SECRET', None)
self.signature_secret = kwargs.get('signature_secret', None) or os.environ.get('NEXMO_SIGNATURE_SECRET', None)
self.signature_method = kwargs.get('signature_method', None) or os.environ.get('NEXMO_SIGNATURE_METHOD', None)
if self.signature_method == 'md5':
self.signature_method = hashlib.md5
elif self.signature_method == 'sha1':
self.signature_method = hashlib.sha1
elif self.signature_method == 'sha256':
self.signature_method = hashlib.sha256
elif self.signature_method == 'sha512':
self.signature_method = hashlib.sha512
self.application_id = kwargs.get('application_id', None)
self.private_key = kwargs.get('private_key', None)
if isinstance(self.private_key, string_types) and '\n' not in self.private_key:
with open(self.private_key, 'rb') as key_file:
self.private_key = key_file.read()
self.host = 'rest.nexmo.com'
self.api_host = 'api.nexmo.com'
user_agent = 'nexmo-python/{0}/{1}'.format(__version__, python_version())
if 'app_name' in kwargs and 'app_version' in kwargs:
user_agent += '/{0}/{1}'.format(kwargs['app_name'], kwargs['app_version'])
self.headers = {'User-Agent': user_agent}
self.auth_params = {}
def auth(self, params=None, **kwargs):
self.auth_params = params or kwargs
def send_message(self, params):
return self.post(self.host, '/sms/json', params)
def get_balance(self):
return self.get(self.host, '/account/get-balance')
def get_country_pricing(self, country_code):
return self.get(self.host, '/account/get-pricing/outbound', {'country': country_code})
def get_prefix_pricing(self, prefix):
return self.get(self.host, '/account/get-prefix-pricing/outbound', {'prefix': prefix})
def get_sms_pricing(self, number):
return self.get(self.host, '/account/get-phone-pricing/outbound/sms', {'phone': number})
def get_voice_pricing(self, number):
return self.get(self.host, '/account/get-phone-pricing/outbound/voice', {'phone': number})
def update_settings(self, params=None, **kwargs):
return self.post(self.host, '/account/settings', params or kwargs)
def topup(self, params=None, **kwargs):
return self.post(self.host, '/account/top-up', params or kwargs)
def get_account_numbers(self, params=None, **kwargs):
return self.get(self.host, '/account/numbers', params or kwargs)
def get_available_numbers(self, country_code, params=None, **kwargs):
return self.get(self.host, '/number/search', dict(params or kwargs, country=country_code))
def buy_number(self, params=None, **kwargs):
return self.post(self.host, '/number/buy', params or kwargs)
def cancel_number(self, params=None, **kwargs):
return self.post(self.host, '/number/cancel', params or kwargs)
def update_number(self, params=None, **kwargs):
return self.post(self.host, '/number/update', params or kwargs)
def get_message(self, message_id):
return self.get(self.host, '/search/message', {'id': message_id})
def get_message_rejections(self, params=None, **kwargs):
return self.get(self.host, '/search/rejections', params or kwargs)
def search_messages(self, params=None, **kwargs):
return self.get(self.host, '/search/messages', params or kwargs)
def send_ussd_push_message(self, params=None, **kwargs):
return self.post(self.host, '/ussd/json', params or kwargs)
def send_ussd_prompt_message(self, params=None, **kwargs):
return self.post(self.host, '/ussd-prompt/json', params or kwargs)
def send_2fa_message(self, params=None, **kwargs):
return self.post(self.host, '/sc/us/2fa/json', params or kwargs)
def send_event_alert_message(self, params=None, **kwargs):
return self.post(self.host, '/sc/us/alert/json', params or kwargs)
def send_marketing_message(self, params=None, **kwargs):
return self.post(self.host, '/sc/us/marketing/json', params or kwargs)
def get_event_alert_numbers(self):
return self.get(self.host, '/sc/us/alert/opt-in/query/json')
def resubscribe_event_alert_number(self, params=None, **kwargs):
return self.post(self.host, '/sc/us/alert/opt-in/manage/json', params or kwargs)
def initiate_call(self, params=None, **kwargs):
return self.post(self.host, '/call/json', params or kwargs)
def initiate_tts_call(self, params=None, **kwargs):
return self.post(self.api_host, '/tts/json', params or kwargs)
def initiate_tts_prompt_call(self, params=None, **kwargs):
return self.post(self.api_host, '/tts-prompt/json', params or kwargs)
def start_verification(self, params=None, **kwargs):
return self.post(self.api_host, '/verify/json', params or kwargs)
def send_verification_request(self, params=None, **kwargs):
warnings.warn('nexmo.Client#send_verification_request is deprecated (use #start_verification instead)',
DeprecationWarning, stacklevel=2)
return self.post(self.api_host, '/verify/json', params or kwargs)
def check_verification(self, request_id, params=None, **kwargs):
return self.post(self.api_host, '/verify/check/json', dict(params or kwargs, request_id=request_id))
def check_verification_request(self, params=None, **kwargs):
warnings.warn('nexmo.Client#check_verification_request is deprecated (use #check_verification instead)',
DeprecationWarning, stacklevel=2)
return self.post(self.api_host, '/verify/check/json', params or kwargs)
def get_verification(self, request_id):
return self.get(self.api_host, '/verify/search/json', {'request_id': request_id})
def get_verification_request(self, request_id):
warnings.warn('nexmo.Client#get_verification_request is deprecated (use #get_verification instead)',
DeprecationWarning, stacklevel=2)
return self.get(self.api_host, '/verify/search/json', {'request_id': request_id})
def cancel_verification(self, request_id):
return self.post(self.api_host, '/verify/control/json', {'request_id': request_id, 'cmd': 'cancel'})
def trigger_next_verification_event(self, request_id):
return self.post(self.api_host, '/verify/control/json', {'request_id': request_id, 'cmd': 'trigger_next_event'})
def control_verification_request(self, params=None, **kwargs):
warnings.warn('nexmo.Client#control_verification_request is deprecated', DeprecationWarning, stacklevel=2)
return self.post(self.api_host, '/verify/control/json', params or kwargs)
def get_basic_number_insight(self, params=None, **kwargs):
return self.get(self.api_host, '/ni/basic/json', params or kwargs)
def get_standard_number_insight(self, params=None, **kwargs):
return self.get(self.api_host, '/ni/standard/json', params or kwargs)
def get_number_insight(self, params=None, **kwargs):
warnings.warn('nexmo.Client#get_number_insight is deprecated (use #get_standard_number_insight instead)',
DeprecationWarning, stacklevel=2)
return self.get(self.api_host, '/number/lookup/json', params or kwargs)
def get_advanced_number_insight(self, params=None, **kwargs):
return self.get(self.api_host, '/ni/advanced/json', params or kwargs)
def request_number_insight(self, params=None, **kwargs):
return self.post(self.host, '/ni/json', params or kwargs)
def get_applications(self, params=None, **kwargs):
return self.get(self.api_host, '/v1/applications', params or kwargs)
def get_application(self, application_id):
return self.get(self.api_host, '/v1/applications/' + application_id)
def create_application(self, params=None, **kwargs):
return self.post(self.api_host, '/v1/applications', params or kwargs)
def update_application(self, application_id, params=None, **kwargs):
return self.put(self.api_host, '/v1/applications/' + application_id, params or kwargs)
def delete_application(self, application_id):
return self.delete(self.api_host, '/v1/applications/' + application_id)
def create_call(self, params=None, **kwargs):
return self.__post('/v1/calls', params or kwargs)
def get_calls(self, params=None, **kwargs):
return self.__get('/v1/calls', params or kwargs)
def get_call(self, uuid):
return self.__get('/v1/calls/' + uuid)
def update_call(self, uuid, params=None, **kwargs):
return self.__put('/v1/calls/' + uuid, params or kwargs)
def send_audio(self, uuid, params=None, **kwargs):
return self.__put('/v1/calls/' + uuid + '/stream', params or kwargs)
def stop_audio(self, uuid):
return self.__delete('/v1/calls/' + uuid + '/stream')
def send_speech(self, uuid, params=None, **kwargs):
return self.__put('/v1/calls/' + uuid + '/talk', params or kwargs)
def stop_speech(self, uuid):
return self.__delete('/v1/calls/' + uuid + '/talk')
def send_dtmf(self, uuid, params=None, **kwargs):
return self.__put('/v1/calls/' + uuid + '/dtmf', params or kwargs)
def check_signature(self, params):
params = dict(params)
signature = params.pop('sig', '').lower()
return hmac.compare_digest(signature, self.signature(params))
def signature(self, params):
if self.signature_method:
hasher = hmac.new(self.signature_secret.encode(), digestmod=self.signature_method)
else:
hasher = hashlib.md5()
if not params.get("timestamp"):
params["timestamp"] = int(time.time())
for key in sorted(params):
value = params[key]
if isinstance(value, str):
value = value.replace('&', '_').replace('=', '_')
hasher.update('&{0}={1}'.format(key, value).encode('utf-8'))
if self.signature_method is None:
hasher.update(self.signature_secret.encode())
return hasher.hexdigest()
def get(self, host, request_uri, params=None):
uri = 'https://' + host + request_uri
params = dict(params or {}, api_key=self.api_key, api_secret=self.api_secret)
return self.parse(host, requests.get(uri, params=params, headers=self.headers))
def post(self, host, request_uri, params):
uri = 'https://' + host + request_uri
params = dict(params, api_key=self.api_key, api_secret=self.api_secret)
return self.parse(host, requests.post(uri, data=params, headers=self.headers))
def put(self, host, request_uri, params):
uri = 'https://' + host + request_uri
params = dict(params, api_key=self.api_key, api_secret=self.api_secret)
return self.parse(host, requests.put(uri, json=params, headers=self.headers))
def delete(self, host, request_uri):
uri = 'https://' + host + request_uri
params = dict(api_key=self.api_key, api_secret=self.api_secret)
return self.parse(host, requests.delete(uri, params=params, headers=self.headers))
def parse(self, host, response):
if response.status_code == 401:
raise AuthenticationError
elif response.status_code == 204:
return None
elif 200 <= response.status_code < 300:
return response.json()
elif 400 <= response.status_code < 500:
message = "{code} response from {host}".format(code=response.status_code, host=host)
raise ClientError(message)
elif 500 <= response.status_code < 600:
message = "{code} response from {host}".format(code=response.status_code, host=host)
raise ServerError(message)
def __get(self, request_uri, params=None):
uri = 'https://' + self.api_host + request_uri
return self.parse(self.api_host, requests.get(uri, params=params or {}, headers=self.__headers()))
def __post(self, request_uri, params):
uri = 'https://' + self.api_host + request_uri
return self.parse(self.api_host, requests.post(uri, json=params, headers=self.__headers()))
def __put(self, request_uri, params):
uri = 'https://' + self.api_host + request_uri
return self.parse(self.api_host, requests.put(uri, json=params, headers=self.__headers()))
def __delete(self, request_uri):
uri = 'https://' + self.api_host + request_uri
return self.parse(self.api_host, requests.delete(uri, headers=self.__headers()))
def __headers(self):
iat = int(time.time())
payload = dict(self.auth_params)
payload.setdefault('application_id', self.application_id)
payload.setdefault('iat', iat)
payload.setdefault('exp', iat + 60)
payload.setdefault('jti', str(uuid4()))
token = jwt.encode(payload, self.private_key, algorithm='RS256')
return dict(self.headers, Authorization=b'Bearer ' + token)
| true | true |
f71b26b2f58e18b2342f9b9601e14fbabdb77fb3 | 2,930 | py | Python | src/python/pants/util/process_handler.py | revl/pants | 8ad83e4ca80c095d44efceafd8b41e575da39c65 | [
"Apache-2.0"
] | 1 | 2021-05-05T18:58:28.000Z | 2021-05-05T18:58:28.000Z | src/python/pants/util/process_handler.py | revl/pants | 8ad83e4ca80c095d44efceafd8b41e575da39c65 | [
"Apache-2.0"
] | null | null | null | src/python/pants/util/process_handler.py | revl/pants | 8ad83e4ca80c095d44efceafd8b41e575da39c65 | [
"Apache-2.0"
] | 3 | 2020-06-30T08:28:13.000Z | 2021-07-28T09:35:57.000Z | # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
import io
import multiprocessing
import subprocess
import sys
from abc import ABC, abstractmethod
from typing import Optional
class ProcessHandler(ABC):
"""An abstraction of process handling calls using the same interface as subprocess.Popen.
See SubprocessProcessHandler below for an example.
"""
@abstractmethod
def wait(self, timeout: Optional[float] = None) -> int:
"""Wait for the underlying process to terminate.
:param timeout: The time to wait for the process to terminate in fractional seconds. Wait
forever by default.
:returns: The process exit code is it has terminated.
:raises: :class:`subprocess.TimeoutExpired`
"""
@abstractmethod
def kill(self) -> None:
pass
@abstractmethod
def terminate(self) -> None:
pass
@abstractmethod
def poll(self) -> int:
pass
class SubprocessProcessHandler(ProcessHandler):
"""A `ProcessHandler` that delegates directly to a subprocess.Popen object."""
def __init__(self, process: subprocess.Popen) -> None:
self._process = process
def wait(self, timeout: Optional[float] = None) -> int:
return self._process.wait(timeout=timeout)
def kill(self) -> None:
self._process.kill()
def terminate(self) -> None:
self._process.terminate()
def poll(self) -> int:
return self._process.poll()
def communicate_teeing_stdout_and_stderr(self, stdin=None):
"""Just like subprocess.communicate, but tees stdout and stderr to both sys.std{out,err} and
a buffer. Only operates on stdout/stderr if the Popen call send them to subprocess.PIPE.
:param stdin: A string to send to the stdin of the subprocess.
:return: (stdout, stderr) as strings.
"""
if stdin is not None and self._process.stdin is not None:
self._process.stdin.write(stdin)
def fork_tee(infile, outfile):
if infile is None:
return lambda: None
queue = multiprocessing.Queue()
process = multiprocessing.Process(target=_tee, args=(infile, outfile, queue.put))
process.start()
def join_and_get_output():
process.join()
return queue.get()
return join_and_get_output
stdout = fork_tee(self._process.stdout, sys.stdout)
stderr = fork_tee(self._process.stderr, sys.stderr)
self._process.wait()
return stdout(), stderr()
def _tee(infile, outfile, return_function):
accumulator = io.BytesIO()
for line in iter(infile.readline, b""):
accumulator.write(line)
outfile.buffer.write(line)
infile.close()
return_function(accumulator.getvalue())
| 29.897959 | 100 | 0.650853 |
import io
import multiprocessing
import subprocess
import sys
from abc import ABC, abstractmethod
from typing import Optional
class ProcessHandler(ABC):
@abstractmethod
def wait(self, timeout: Optional[float] = None) -> int:
@abstractmethod
def kill(self) -> None:
pass
@abstractmethod
def terminate(self) -> None:
pass
@abstractmethod
def poll(self) -> int:
pass
class SubprocessProcessHandler(ProcessHandler):
def __init__(self, process: subprocess.Popen) -> None:
self._process = process
def wait(self, timeout: Optional[float] = None) -> int:
return self._process.wait(timeout=timeout)
def kill(self) -> None:
self._process.kill()
def terminate(self) -> None:
self._process.terminate()
def poll(self) -> int:
return self._process.poll()
def communicate_teeing_stdout_and_stderr(self, stdin=None):
if stdin is not None and self._process.stdin is not None:
self._process.stdin.write(stdin)
def fork_tee(infile, outfile):
if infile is None:
return lambda: None
queue = multiprocessing.Queue()
process = multiprocessing.Process(target=_tee, args=(infile, outfile, queue.put))
process.start()
def join_and_get_output():
process.join()
return queue.get()
return join_and_get_output
stdout = fork_tee(self._process.stdout, sys.stdout)
stderr = fork_tee(self._process.stderr, sys.stderr)
self._process.wait()
return stdout(), stderr()
def _tee(infile, outfile, return_function):
accumulator = io.BytesIO()
for line in iter(infile.readline, b""):
accumulator.write(line)
outfile.buffer.write(line)
infile.close()
return_function(accumulator.getvalue())
| true | true |
f71b2735ba192a6cc12f568c018d29bbdfde9f83 | 3,035 | py | Python | code/evaluate_DSTC2.py | avinashsai/GCN-SeA | 26968d8a71269581f7400293064314b5a18b5748 | [
"Apache-2.0"
] | 12 | 2019-09-06T03:48:36.000Z | 2021-04-12T08:36:45.000Z | code/evaluate_DSTC2.py | avinashsai/GCN-SeA | 26968d8a71269581f7400293064314b5a18b5748 | [
"Apache-2.0"
] | null | null | null | code/evaluate_DSTC2.py | avinashsai/GCN-SeA | 26968d8a71269581f7400293064314b5a18b5748 | [
"Apache-2.0"
] | 9 | 2019-06-30T07:23:01.000Z | 2020-10-16T10:05:37.000Z | from metrics import bleu, rouge
import argparse
def get_args():
'''
Parse input arguments:
preds_path: The directory in which labels and predictions files are dumped after inference
config_id: The config id mentioned in the labels and predictions filenames
'''
parser = argparse.ArgumentParser()
parser.add_argument("--preds_path")
parser.add_argument("--kb_path")
parser.add_argument("--config_id")
args = parser.parse_args()
return args
def read_results(path,num):
with open(path+"/labels"+str(num)+".txt","r") as fp:
l=fp.readlines()
with open(path+"/predictions"+str(num)+".txt","r") as fp:
p=fp.readlines()
return p,l
def exact_match(p,l):
c=0
for i1,i in enumerate(l):
if p[i1]==l[i1]:
c+=1
print("Per-Resp Acc: ",c/len(l))
def moses_bl_rouge(p,l):
bl = bleu.moses_multi_bleu(p,l)
x = rouge.rouge(p,l)
print('BLEU: %f\nROUGE1-F: %f\nROUGE1-P: %f\nROUGE1-R: %f\nROUGE2-F: %f\nROUGE2-P: %f\nROUGE2-R: %f\nROUGEL-F: %f\nROUGEL-P: %f\nROUGEL-R: %f'%(bl,x['rouge_1/f_score'],x['rouge_1/p_score'],x['rouge_1/r_score'],x['rouge_2/f_score'],
x['rouge_2/p_score'],x['rouge_2/r_score'],x['rouge_l/f_score'],x['rouge_l/p_score'],x['rouge_l/r_score']))
def micro_compute_prf(gold, pred, global_entity_list):
TP, FP, FN = 0, 0, 0
if len(gold)!= 0:
count = 1
for g in gold:
if g in pred:
TP += 1
else:
FN += 1
for p in set(pred):
if p in global_entity_list:
if p not in gold:
FP += 1
else:
count = 0
return TP,FP,FN,count
def ent_f1(preds,labels,kb_path):
with open(kb_path,'r') as fp:
kb=fp.readlines()
ent=[]
for i in kb:
triples = i.split(' ')
ent.append(triples[1].strip())
ent.append(triples[3].strip())
ent = set(ent)
ent_list = sorted(ent)
mic_pred=0
les=[]
all_TP=0
all_FP=0
all_FN=0
for i in range(len(labels)):
l = labels[i].strip().split()
le=[]
for j in l:
if j in ent_list:
le.append(j)
les.append(le)
p = preds[i].strip().split()
tp,fp,fn,c = micro_compute_prf(le,p,ent_list)
all_TP+=tp
all_FP+=fp
all_FN+=fn
mic_pred+=c
mic_prec = all_TP/float(all_TP+all_FP)
mic_rec = all_TP/float(all_TP + all_FN)
mic_f1=2 * mic_prec * mic_rec / float(mic_prec + mic_rec)
print("Entity-F1:",mic_f1)
if __name__=='__main__':
args = get_args()
result_path = args.preds_path
kb_path = args.kb_path
config_id = args.config_id
print(config_id,"\n")
preds,labels = read_results(result_path,config_id)
exact_match(preds,labels)
moses_bl_rouge(preds,labels)
ent_f1(preds,labels,kb_path)
| 28.632075 | 235 | 0.559143 | from metrics import bleu, rouge
import argparse
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument("--preds_path")
parser.add_argument("--kb_path")
parser.add_argument("--config_id")
args = parser.parse_args()
return args
def read_results(path,num):
with open(path+"/labels"+str(num)+".txt","r") as fp:
l=fp.readlines()
with open(path+"/predictions"+str(num)+".txt","r") as fp:
p=fp.readlines()
return p,l
def exact_match(p,l):
c=0
for i1,i in enumerate(l):
if p[i1]==l[i1]:
c+=1
print("Per-Resp Acc: ",c/len(l))
def moses_bl_rouge(p,l):
bl = bleu.moses_multi_bleu(p,l)
x = rouge.rouge(p,l)
print('BLEU: %f\nROUGE1-F: %f\nROUGE1-P: %f\nROUGE1-R: %f\nROUGE2-F: %f\nROUGE2-P: %f\nROUGE2-R: %f\nROUGEL-F: %f\nROUGEL-P: %f\nROUGEL-R: %f'%(bl,x['rouge_1/f_score'],x['rouge_1/p_score'],x['rouge_1/r_score'],x['rouge_2/f_score'],
x['rouge_2/p_score'],x['rouge_2/r_score'],x['rouge_l/f_score'],x['rouge_l/p_score'],x['rouge_l/r_score']))
def micro_compute_prf(gold, pred, global_entity_list):
TP, FP, FN = 0, 0, 0
if len(gold)!= 0:
count = 1
for g in gold:
if g in pred:
TP += 1
else:
FN += 1
for p in set(pred):
if p in global_entity_list:
if p not in gold:
FP += 1
else:
count = 0
return TP,FP,FN,count
def ent_f1(preds,labels,kb_path):
with open(kb_path,'r') as fp:
kb=fp.readlines()
ent=[]
for i in kb:
triples = i.split(' ')
ent.append(triples[1].strip())
ent.append(triples[3].strip())
ent = set(ent)
ent_list = sorted(ent)
mic_pred=0
les=[]
all_TP=0
all_FP=0
all_FN=0
for i in range(len(labels)):
l = labels[i].strip().split()
le=[]
for j in l:
if j in ent_list:
le.append(j)
les.append(le)
p = preds[i].strip().split()
tp,fp,fn,c = micro_compute_prf(le,p,ent_list)
all_TP+=tp
all_FP+=fp
all_FN+=fn
mic_pred+=c
mic_prec = all_TP/float(all_TP+all_FP)
mic_rec = all_TP/float(all_TP + all_FN)
mic_f1=2 * mic_prec * mic_rec / float(mic_prec + mic_rec)
print("Entity-F1:",mic_f1)
if __name__=='__main__':
args = get_args()
result_path = args.preds_path
kb_path = args.kb_path
config_id = args.config_id
print(config_id,"\n")
preds,labels = read_results(result_path,config_id)
exact_match(preds,labels)
moses_bl_rouge(preds,labels)
ent_f1(preds,labels,kb_path)
| true | true |
f71b27f6c3b0d480c4f0a1707e1a22e41cb11bbd | 7,941 | py | Python | venv/lib/python3.8/site-packages/django/middleware/cache.py | Joshua-Barawa/My-Photos | adcaea48149c6b31e9559b045709d538d0b749bc | [
"PostgreSQL",
"Unlicense"
] | 1 | 2022-03-16T14:43:26.000Z | 2022-03-16T14:43:26.000Z | venv/lib/python3.8/site-packages/django/middleware/cache.py | Joshua-Barawa/My-Photos | adcaea48149c6b31e9559b045709d538d0b749bc | [
"PostgreSQL",
"Unlicense"
] | null | null | null | venv/lib/python3.8/site-packages/django/middleware/cache.py | Joshua-Barawa/My-Photos | adcaea48149c6b31e9559b045709d538d0b749bc | [
"PostgreSQL",
"Unlicense"
] | 4 | 2022-03-12T10:17:00.000Z | 2022-03-26T08:40:43.000Z | """
Cache middleware. If enabled, each Django-powered page will be cached based on
URL. The canonical way to enable cache middleware is to set
``UpdateCacheMiddleware`` as your first piece of middleware, and
``FetchFromCacheMiddleware`` as the last::
MIDDLEWARE = [
'django.middleware.cache.UpdateCacheMiddleware',
...
'django.middleware.cache.FetchFromCacheMiddleware'
]
This is counter-intuitive, but correct: ``UpdateCacheMiddleware`` needs to run
last during the response phase, which processes middleware bottom-up;
``FetchFromCacheMiddleware`` needs to run last during the request phase, which
processes middleware top-down.
The single-class ``CacheMiddleware`` can be used for some simple sites.
However, if any other piece of middleware needs to affect the cache key, you'll
need to use the two-part ``UpdateCacheMiddleware`` and
``FetchFromCacheMiddleware``. This'll most often happen when you're using
Django's ``LocaleMiddleware``.
More details about how the caching works:
* Only GET or HEAD-requests with status code 200 are cached.
* The number of seconds each page is stored for is set by the "max-age" section
of the response's "Cache-Control" header, falling back to the
CACHE_MIDDLEWARE_SECONDS setting if the section was not found.
* This middleware expects that a HEAD request is answered with the same response
headers exactly like the corresponding GET request.
* When a hit occurs, a shallow copy of the original response object is returned
from process_request.
* Pages will be cached based on the contents of the request headers listed in
the response's "Vary" header.
* This middleware also sets ETag, Last-Modified, Expires and Cache-Control
headers on the response object.
"""
from django.conf import settings
from django.core.cache import DEFAULT_CACHE_ALIAS, caches
from django.utils.cache import (
get_cache_key,
get_max_age,
has_vary_header,
learn_cache_key,
patch_response_headers,
)
from django.utils.deprecation import MiddlewareMixin
class UpdateCacheMiddleware(MiddlewareMixin):
"""
Response-phase cache middleware that updates the cache if the response is
cacheable.
Must be used as part of the two-part update/fetch cache middleware.
UpdateCacheMiddleware must be the first piece of middleware in MIDDLEWARE
so that it'll get called last during the response phase.
"""
def __init__(self, get_response):
super().__init__(get_response)
self.cache_timeout = settings.CACHE_MIDDLEWARE_SECONDS
self.page_timeout = None
self.key_prefix = settings.CACHE_MIDDLEWARE_KEY_PREFIX
self.cache_alias = settings.CACHE_MIDDLEWARE_ALIAS
self.cache = caches[self.cache_alias]
def _should_update_cache(self, request, response):
return hasattr(request, "_cache_update_cache") and request._cache_update_cache
def process_response(self, request, response):
"""Set the cache, if needed."""
if not self._should_update_cache(request, response):
# We don't need to update the cache, just return.
return response
if response.streaming or response.status_code not in (200, 304):
return response
# Don't cache responses that set a user-specific (and maybe security
# sensitive) cookie in response to a cookie-less request.
if (
not request.COOKIES
and response.cookies
and has_vary_header(response, "Cookie")
):
return response
# Don't cache a response with 'Cache-Control: private'
if "private" in response.get("Cache-Control", ()):
return response
# Page timeout takes precedence over the "max-age" and the default
# cache timeout.
timeout = self.page_timeout
if timeout is None:
# The timeout from the "max-age" section of the "Cache-Control"
# header takes precedence over the default cache timeout.
timeout = get_max_age(response)
if timeout is None:
timeout = self.cache_timeout
elif timeout == 0:
# max-age was set to 0, don't cache.
return response
patch_response_headers(response, timeout)
if timeout and response.status_code == 200:
cache_key = learn_cache_key(
request, response, timeout, self.key_prefix, cache=self.cache
)
if hasattr(response, "render") and callable(response.render):
response.add_post_render_callback(
lambda r: self.cache.set(cache_key, r, timeout)
)
else:
self.cache.set(cache_key, response, timeout)
return response
class FetchFromCacheMiddleware(MiddlewareMixin):
"""
Request-phase cache middleware that fetches a page from the cache.
Must be used as part of the two-part update/fetch cache middleware.
FetchFromCacheMiddleware must be the last piece of middleware in MIDDLEWARE
so that it'll get called last during the request phase.
"""
def __init__(self, get_response):
super().__init__(get_response)
self.key_prefix = settings.CACHE_MIDDLEWARE_KEY_PREFIX
self.cache_alias = settings.CACHE_MIDDLEWARE_ALIAS
self.cache = caches[self.cache_alias]
def process_request(self, request):
"""
Check whether the page is already cached and return the cached
version if available.
"""
if request.method not in ("GET", "HEAD"):
request._cache_update_cache = False
return None # Don't bother checking the cache.
# try and get the cached GET response
cache_key = get_cache_key(request, self.key_prefix, "GET", cache=self.cache)
if cache_key is None:
request._cache_update_cache = True
return None # No cache information available, need to rebuild.
response = self.cache.get(cache_key)
# if it wasn't found and we are looking for a HEAD, try looking just for that
if response is None and request.method == "HEAD":
cache_key = get_cache_key(
request, self.key_prefix, "HEAD", cache=self.cache
)
response = self.cache.get(cache_key)
if response is None:
request._cache_update_cache = True
return None # No cache information available, need to rebuild.
# hit, return cached response
request._cache_update_cache = False
return response
class CacheMiddleware(UpdateCacheMiddleware, FetchFromCacheMiddleware):
"""
Cache middleware that provides basic behavior for many simple sites.
Also used as the hook point for the cache decorator, which is generated
using the decorator-from-middleware utility.
"""
def __init__(self, get_response, cache_timeout=None, page_timeout=None, **kwargs):
super().__init__(get_response)
# We need to differentiate between "provided, but using default value",
# and "not provided". If the value is provided using a default, then
# we fall back to system defaults. If it is not provided at all,
# we need to use middleware defaults.
try:
key_prefix = kwargs["key_prefix"]
if key_prefix is None:
key_prefix = ""
self.key_prefix = key_prefix
except KeyError:
pass
try:
cache_alias = kwargs["cache_alias"]
if cache_alias is None:
cache_alias = DEFAULT_CACHE_ALIAS
self.cache_alias = cache_alias
self.cache = caches[self.cache_alias]
except KeyError:
pass
if cache_timeout is not None:
self.cache_timeout = cache_timeout
self.page_timeout = page_timeout
| 38.362319 | 86 | 0.673467 |
from django.conf import settings
from django.core.cache import DEFAULT_CACHE_ALIAS, caches
from django.utils.cache import (
get_cache_key,
get_max_age,
has_vary_header,
learn_cache_key,
patch_response_headers,
)
from django.utils.deprecation import MiddlewareMixin
class UpdateCacheMiddleware(MiddlewareMixin):
def __init__(self, get_response):
super().__init__(get_response)
self.cache_timeout = settings.CACHE_MIDDLEWARE_SECONDS
self.page_timeout = None
self.key_prefix = settings.CACHE_MIDDLEWARE_KEY_PREFIX
self.cache_alias = settings.CACHE_MIDDLEWARE_ALIAS
self.cache = caches[self.cache_alias]
def _should_update_cache(self, request, response):
return hasattr(request, "_cache_update_cache") and request._cache_update_cache
def process_response(self, request, response):
if not self._should_update_cache(request, response):
return response
if response.streaming or response.status_code not in (200, 304):
return response
# Don't cache responses that set a user-specific (and maybe security
if (
not request.COOKIES
and response.cookies
and has_vary_header(response, "Cookie")
):
return response
if "private" in response.get("Cache-Control", ()):
return response
# Page timeout takes precedence over the "max-age" and the default
# cache timeout.
timeout = self.page_timeout
if timeout is None:
# The timeout from the "max-age" section of the "Cache-Control"
# header takes precedence over the default cache timeout.
timeout = get_max_age(response)
if timeout is None:
timeout = self.cache_timeout
elif timeout == 0:
# max-age was set to 0, don't cache.
return response
patch_response_headers(response, timeout)
if timeout and response.status_code == 200:
cache_key = learn_cache_key(
request, response, timeout, self.key_prefix, cache=self.cache
)
if hasattr(response, "render") and callable(response.render):
response.add_post_render_callback(
lambda r: self.cache.set(cache_key, r, timeout)
)
else:
self.cache.set(cache_key, response, timeout)
return response
class FetchFromCacheMiddleware(MiddlewareMixin):
def __init__(self, get_response):
super().__init__(get_response)
self.key_prefix = settings.CACHE_MIDDLEWARE_KEY_PREFIX
self.cache_alias = settings.CACHE_MIDDLEWARE_ALIAS
self.cache = caches[self.cache_alias]
def process_request(self, request):
if request.method not in ("GET", "HEAD"):
request._cache_update_cache = False
return None
# try and get the cached GET response
cache_key = get_cache_key(request, self.key_prefix, "GET", cache=self.cache)
if cache_key is None:
request._cache_update_cache = True
return None # No cache information available, need to rebuild.
response = self.cache.get(cache_key)
# if it wasn't found and we are looking for a HEAD, try looking just for that
if response is None and request.method == "HEAD":
cache_key = get_cache_key(
request, self.key_prefix, "HEAD", cache=self.cache
)
response = self.cache.get(cache_key)
if response is None:
request._cache_update_cache = True
return None
request._cache_update_cache = False
return response
class CacheMiddleware(UpdateCacheMiddleware, FetchFromCacheMiddleware):
def __init__(self, get_response, cache_timeout=None, page_timeout=None, **kwargs):
super().__init__(get_response)
try:
key_prefix = kwargs["key_prefix"]
if key_prefix is None:
key_prefix = ""
self.key_prefix = key_prefix
except KeyError:
pass
try:
cache_alias = kwargs["cache_alias"]
if cache_alias is None:
cache_alias = DEFAULT_CACHE_ALIAS
self.cache_alias = cache_alias
self.cache = caches[self.cache_alias]
except KeyError:
pass
if cache_timeout is not None:
self.cache_timeout = cache_timeout
self.page_timeout = page_timeout
| true | true |
f71b286c1207f4d6dae4a96c65379f266e26d4b1 | 7,686 | py | Python | newrelic/hooks/framework_grpc.py | odidev/newrelic-python-agent | e6c4ddc158ab694dd7ff6bd75e54077d736674f1 | [
"Apache-2.0"
] | null | null | null | newrelic/hooks/framework_grpc.py | odidev/newrelic-python-agent | e6c4ddc158ab694dd7ff6bd75e54077d736674f1 | [
"Apache-2.0"
] | null | null | null | newrelic/hooks/framework_grpc.py | odidev/newrelic-python-agent | e6c4ddc158ab694dd7ff6bd75e54077d736674f1 | [
"Apache-2.0"
] | null | null | null | # Copyright 2010 New Relic, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import random
import time
from newrelic.api.external_trace import ExternalTrace
from newrelic.api.web_transaction import WebTransactionWrapper
from newrelic.api.transaction import current_transaction
from newrelic.api.time_trace import record_exception
from newrelic.common.object_wrapper import wrap_function_wrapper
from newrelic.common.object_names import callable_name
def _get_uri_method(instance, *args, **kwargs):
target = instance._channel.target().decode('utf-8')
method = instance._method.decode('utf-8').lstrip('/')
uri = 'grpc://%s/%s' % (target, method)
return (uri, method)
def _prepare_request(
transaction, guid, request,
timeout=None, metadata=None, *args, **kwargs):
metadata = metadata and list(metadata) or []
dt_metadata = transaction._create_distributed_trace_data_with_guid(guid)
metadata.extend(
transaction._generate_distributed_trace_headers(dt_metadata)
)
args = (request, timeout, metadata) + args
return args, kwargs
def _prepare_request_stream(
transaction, guid, request_iterator, *args, **kwargs):
return _prepare_request(
transaction, guid, request_iterator, *args, **kwargs)
def wrap_call(module, object_path, prepare):
def _call_wrapper(wrapped, instance, args, kwargs):
transaction = current_transaction()
if transaction is None:
return wrapped(*args, **kwargs)
uri, method = _get_uri_method(instance)
with ExternalTrace('gRPC', uri, method):
args, kwargs = prepare(transaction, None, *args, **kwargs)
return wrapped(*args, **kwargs)
wrap_function_wrapper(module, object_path, _call_wrapper)
def wrap_future(module, object_path, prepare):
def _future_wrapper(wrapped, instance, args, kwargs):
transaction = current_transaction()
if transaction is None:
return wrapped(*args, **kwargs)
guid = '%016x' % random.getrandbits(64)
uri, method = _get_uri_method(instance)
args, kwargs = prepare(transaction, guid, *args, **kwargs)
future = wrapped(*args, **kwargs)
future._nr_guid = guid
future._nr_args = ('gRPC', uri, method)
future._nr_start_time = time.time()
# In non-streaming responses, result is typically called instead of
# using the iterator. In streaming calls, the iterator is typically
# used.
return future
wrap_function_wrapper(module, object_path, _future_wrapper)
def wrap_next(_wrapped, _instance, _args, _kwargs):
_nr_args = getattr(_instance, '_nr_args', None)
if not _nr_args:
return _wrapped(*_args, **_kwargs)
try:
return _wrapped(*_args, **_kwargs)
except Exception:
delattr(_instance, '_nr_args')
_nr_start_time = getattr(_instance, '_nr_start_time', 0.0)
_nr_guid = getattr(_instance, '_nr_guid', None)
with ExternalTrace(*_nr_args) as t:
t.start_time = _nr_start_time or t.start_time
t.guid = _nr_guid or t.guid
raise
def wrap_result(_wrapped, _instance, _args, _kwargs):
_nr_args = getattr(_instance, '_nr_args', None)
if not _nr_args:
return _wrapped(*_args, **_kwargs)
delattr(_instance, '_nr_args')
_nr_start_time = getattr(_instance, '_nr_start_time', 0.0)
_nr_guid = getattr(_instance, '_nr_guid', None)
try:
result = _wrapped(*_args, **_kwargs)
except Exception:
with ExternalTrace(*_nr_args) as t:
t.start_time = _nr_start_time or t.start_time
t.guid = _nr_guid or t.guid
raise
else:
with ExternalTrace(*_nr_args) as t:
t.start_time = _nr_start_time or t.start_time
t.guid = _nr_guid or t.guid
return result
def _bind_transaction_args(rpc_event, state, behavior, *args, **kwargs):
return rpc_event, behavior
def grpc_web_transaction(wrapped, instance, args, kwargs):
rpc_event, behavior = _bind_transaction_args(*args, **kwargs)
behavior_name = callable_name(behavior)
call_details = (
getattr(rpc_event, 'call_details', None) or
getattr(rpc_event, 'request_call_details', None))
metadata = (
getattr(rpc_event, 'invocation_metadata', None) or
getattr(rpc_event, 'request_metadata', None))
host = port = None
if call_details:
try:
host, port = call_details.host.split(b':', 1)
except Exception:
pass
request_path = call_details.method
return WebTransactionWrapper(
wrapped,
name=behavior_name,
request_path=request_path,
host=host,
port=port,
headers=metadata)(*args, **kwargs)
def _trailing_metadata(state, *args, **kwargs):
return state.trailing_metadata
def _nr_wrap_status_code(wrapped, instance, args, kwargs):
status_code = wrapped(*args, **kwargs)
response_headers = _trailing_metadata(*args, **kwargs)
transaction = current_transaction()
if transaction:
transaction.process_response(status_code, response_headers)
return status_code
def _nr_wrap_abort(wrapped, instance, args, kwargs):
record_exception()
return wrapped(*args, **kwargs)
def instrument_grpc__channel(module):
wrap_call(module, '_UnaryUnaryMultiCallable.__call__',
_prepare_request)
wrap_call(module, '_UnaryUnaryMultiCallable.with_call',
_prepare_request)
wrap_future(module, '_UnaryUnaryMultiCallable.future',
_prepare_request)
wrap_future(module, '_UnaryStreamMultiCallable.__call__',
_prepare_request)
wrap_call(module, '_StreamUnaryMultiCallable.__call__',
_prepare_request_stream)
wrap_call(module, '_StreamUnaryMultiCallable.with_call',
_prepare_request_stream)
wrap_future(module, '_StreamUnaryMultiCallable.future',
_prepare_request_stream)
wrap_future(module, '_StreamStreamMultiCallable.__call__',
_prepare_request_stream)
if hasattr(module, '_MultiThreadedRendezvous'):
wrap_function_wrapper(module, '_MultiThreadedRendezvous.result',
wrap_result)
wrap_function_wrapper(module, '_MultiThreadedRendezvous._next',
wrap_next)
else:
wrap_function_wrapper(module, '_Rendezvous.result',
wrap_result)
wrap_function_wrapper(module, '_Rendezvous._next',
wrap_next)
wrap_function_wrapper(module, '_Rendezvous.cancel',
wrap_result)
def instrument_grpc_server(module):
wrap_function_wrapper(module, '_unary_response_in_pool',
grpc_web_transaction)
wrap_function_wrapper(module, '_stream_response_in_pool',
grpc_web_transaction)
wrap_function_wrapper(module, '_completion_code',
_nr_wrap_status_code)
wrap_function_wrapper(module, '_abortion_code',
_nr_wrap_status_code)
wrap_function_wrapper(module, '_abort',
_nr_wrap_abort)
| 33.710526 | 76 | 0.683971 |
import random
import time
from newrelic.api.external_trace import ExternalTrace
from newrelic.api.web_transaction import WebTransactionWrapper
from newrelic.api.transaction import current_transaction
from newrelic.api.time_trace import record_exception
from newrelic.common.object_wrapper import wrap_function_wrapper
from newrelic.common.object_names import callable_name
def _get_uri_method(instance, *args, **kwargs):
target = instance._channel.target().decode('utf-8')
method = instance._method.decode('utf-8').lstrip('/')
uri = 'grpc://%s/%s' % (target, method)
return (uri, method)
def _prepare_request(
transaction, guid, request,
timeout=None, metadata=None, *args, **kwargs):
metadata = metadata and list(metadata) or []
dt_metadata = transaction._create_distributed_trace_data_with_guid(guid)
metadata.extend(
transaction._generate_distributed_trace_headers(dt_metadata)
)
args = (request, timeout, metadata) + args
return args, kwargs
def _prepare_request_stream(
transaction, guid, request_iterator, *args, **kwargs):
return _prepare_request(
transaction, guid, request_iterator, *args, **kwargs)
def wrap_call(module, object_path, prepare):
def _call_wrapper(wrapped, instance, args, kwargs):
transaction = current_transaction()
if transaction is None:
return wrapped(*args, **kwargs)
uri, method = _get_uri_method(instance)
with ExternalTrace('gRPC', uri, method):
args, kwargs = prepare(transaction, None, *args, **kwargs)
return wrapped(*args, **kwargs)
wrap_function_wrapper(module, object_path, _call_wrapper)
def wrap_future(module, object_path, prepare):
def _future_wrapper(wrapped, instance, args, kwargs):
transaction = current_transaction()
if transaction is None:
return wrapped(*args, **kwargs)
guid = '%016x' % random.getrandbits(64)
uri, method = _get_uri_method(instance)
args, kwargs = prepare(transaction, guid, *args, **kwargs)
future = wrapped(*args, **kwargs)
future._nr_guid = guid
future._nr_args = ('gRPC', uri, method)
future._nr_start_time = time.time()
return future
wrap_function_wrapper(module, object_path, _future_wrapper)
def wrap_next(_wrapped, _instance, _args, _kwargs):
_nr_args = getattr(_instance, '_nr_args', None)
if not _nr_args:
return _wrapped(*_args, **_kwargs)
try:
return _wrapped(*_args, **_kwargs)
except Exception:
delattr(_instance, '_nr_args')
_nr_start_time = getattr(_instance, '_nr_start_time', 0.0)
_nr_guid = getattr(_instance, '_nr_guid', None)
with ExternalTrace(*_nr_args) as t:
t.start_time = _nr_start_time or t.start_time
t.guid = _nr_guid or t.guid
raise
def wrap_result(_wrapped, _instance, _args, _kwargs):
_nr_args = getattr(_instance, '_nr_args', None)
if not _nr_args:
return _wrapped(*_args, **_kwargs)
delattr(_instance, '_nr_args')
_nr_start_time = getattr(_instance, '_nr_start_time', 0.0)
_nr_guid = getattr(_instance, '_nr_guid', None)
try:
result = _wrapped(*_args, **_kwargs)
except Exception:
with ExternalTrace(*_nr_args) as t:
t.start_time = _nr_start_time or t.start_time
t.guid = _nr_guid or t.guid
raise
else:
with ExternalTrace(*_nr_args) as t:
t.start_time = _nr_start_time or t.start_time
t.guid = _nr_guid or t.guid
return result
def _bind_transaction_args(rpc_event, state, behavior, *args, **kwargs):
return rpc_event, behavior
def grpc_web_transaction(wrapped, instance, args, kwargs):
rpc_event, behavior = _bind_transaction_args(*args, **kwargs)
behavior_name = callable_name(behavior)
call_details = (
getattr(rpc_event, 'call_details', None) or
getattr(rpc_event, 'request_call_details', None))
metadata = (
getattr(rpc_event, 'invocation_metadata', None) or
getattr(rpc_event, 'request_metadata', None))
host = port = None
if call_details:
try:
host, port = call_details.host.split(b':', 1)
except Exception:
pass
request_path = call_details.method
return WebTransactionWrapper(
wrapped,
name=behavior_name,
request_path=request_path,
host=host,
port=port,
headers=metadata)(*args, **kwargs)
def _trailing_metadata(state, *args, **kwargs):
return state.trailing_metadata
def _nr_wrap_status_code(wrapped, instance, args, kwargs):
status_code = wrapped(*args, **kwargs)
response_headers = _trailing_metadata(*args, **kwargs)
transaction = current_transaction()
if transaction:
transaction.process_response(status_code, response_headers)
return status_code
def _nr_wrap_abort(wrapped, instance, args, kwargs):
record_exception()
return wrapped(*args, **kwargs)
def instrument_grpc__channel(module):
wrap_call(module, '_UnaryUnaryMultiCallable.__call__',
_prepare_request)
wrap_call(module, '_UnaryUnaryMultiCallable.with_call',
_prepare_request)
wrap_future(module, '_UnaryUnaryMultiCallable.future',
_prepare_request)
wrap_future(module, '_UnaryStreamMultiCallable.__call__',
_prepare_request)
wrap_call(module, '_StreamUnaryMultiCallable.__call__',
_prepare_request_stream)
wrap_call(module, '_StreamUnaryMultiCallable.with_call',
_prepare_request_stream)
wrap_future(module, '_StreamUnaryMultiCallable.future',
_prepare_request_stream)
wrap_future(module, '_StreamStreamMultiCallable.__call__',
_prepare_request_stream)
if hasattr(module, '_MultiThreadedRendezvous'):
wrap_function_wrapper(module, '_MultiThreadedRendezvous.result',
wrap_result)
wrap_function_wrapper(module, '_MultiThreadedRendezvous._next',
wrap_next)
else:
wrap_function_wrapper(module, '_Rendezvous.result',
wrap_result)
wrap_function_wrapper(module, '_Rendezvous._next',
wrap_next)
wrap_function_wrapper(module, '_Rendezvous.cancel',
wrap_result)
def instrument_grpc_server(module):
wrap_function_wrapper(module, '_unary_response_in_pool',
grpc_web_transaction)
wrap_function_wrapper(module, '_stream_response_in_pool',
grpc_web_transaction)
wrap_function_wrapper(module, '_completion_code',
_nr_wrap_status_code)
wrap_function_wrapper(module, '_abortion_code',
_nr_wrap_status_code)
wrap_function_wrapper(module, '_abort',
_nr_wrap_abort)
| true | true |
f71b28ebefb77cb9a3e1c49a1442eb967f6d40ea | 2,038 | py | Python | pytorch_lightning/plugins/training_type/sharded_spawn.py | peblair/pytorch-lightning | e676ff96b16224331297dbd0e5ecd5cf364965b8 | [
"Apache-2.0"
] | 1 | 2021-02-12T04:15:31.000Z | 2021-02-12T04:15:31.000Z | pytorch_lightning/plugins/training_type/sharded_spawn.py | peblair/pytorch-lightning | e676ff96b16224331297dbd0e5ecd5cf364965b8 | [
"Apache-2.0"
] | null | null | null | pytorch_lightning/plugins/training_type/sharded_spawn.py | peblair/pytorch-lightning | e676ff96b16224331297dbd0e5ecd5cf364965b8 | [
"Apache-2.0"
] | null | null | null | from typing import Optional
from pytorch_lightning.core.optimizer import is_lightning_optimizer
from pytorch_lightning.plugins.training_type.ddp_spawn import DDPSpawnPlugin
from pytorch_lightning.utilities import _FAIRSCALE_AVAILABLE, rank_zero_only
if _FAIRSCALE_AVAILABLE:
from fairscale.optim import OSS
from pytorch_lightning.overrides.fairscale import LightningShardedDataParallel
class DDPSpawnShardedPlugin(DDPSpawnPlugin):
def configure_ddp(self):
self._wrap_optimizers()
self._model = LightningShardedDataParallel(
self.model, sharded_optimizer=self.lightning_module.trainer.optimizers
)
def _reinit_optimizers_with_oss(self):
optimizers = self.lightning_module.trainer.optimizers
for x, optimizer in enumerate(optimizers):
if is_lightning_optimizer(optimizer):
optimizer = optimizer._optimizer
if not isinstance(optimizer, OSS):
optim_class = type(optimizer)
zero_optimizer = OSS(params=optimizer.param_groups, optim=optim_class, **optimizer.defaults)
optimizers[x] = zero_optimizer
del optimizer
trainer = self.lightning_module.trainer
trainer.optimizers = trainer.convert_to_lightning_optimizers(optimizers)
def _wrap_optimizers(self):
trainer = self.model.trainer
if trainer.testing:
return
self._reinit_optimizers_with_oss()
def optimizer_state(self, optimizer: 'OSS') -> Optional[dict]:
if is_lightning_optimizer(optimizer):
optimizer = optimizer._optimizer
if isinstance(optimizer, OSS):
optimizer.consolidate_state_dict()
return self._optim_state_dict(optimizer)
@rank_zero_only
def _optim_state_dict(self, optimizer):
"""
Retrieves state dict only on rank 0, which contains the entire optimizer state after calling
:meth:`consolidate_state_dict`.
"""
return optimizer.state_dict()
| 37.054545 | 108 | 0.707066 | from typing import Optional
from pytorch_lightning.core.optimizer import is_lightning_optimizer
from pytorch_lightning.plugins.training_type.ddp_spawn import DDPSpawnPlugin
from pytorch_lightning.utilities import _FAIRSCALE_AVAILABLE, rank_zero_only
if _FAIRSCALE_AVAILABLE:
from fairscale.optim import OSS
from pytorch_lightning.overrides.fairscale import LightningShardedDataParallel
class DDPSpawnShardedPlugin(DDPSpawnPlugin):
def configure_ddp(self):
self._wrap_optimizers()
self._model = LightningShardedDataParallel(
self.model, sharded_optimizer=self.lightning_module.trainer.optimizers
)
def _reinit_optimizers_with_oss(self):
optimizers = self.lightning_module.trainer.optimizers
for x, optimizer in enumerate(optimizers):
if is_lightning_optimizer(optimizer):
optimizer = optimizer._optimizer
if not isinstance(optimizer, OSS):
optim_class = type(optimizer)
zero_optimizer = OSS(params=optimizer.param_groups, optim=optim_class, **optimizer.defaults)
optimizers[x] = zero_optimizer
del optimizer
trainer = self.lightning_module.trainer
trainer.optimizers = trainer.convert_to_lightning_optimizers(optimizers)
def _wrap_optimizers(self):
trainer = self.model.trainer
if trainer.testing:
return
self._reinit_optimizers_with_oss()
def optimizer_state(self, optimizer: 'OSS') -> Optional[dict]:
if is_lightning_optimizer(optimizer):
optimizer = optimizer._optimizer
if isinstance(optimizer, OSS):
optimizer.consolidate_state_dict()
return self._optim_state_dict(optimizer)
@rank_zero_only
def _optim_state_dict(self, optimizer):
return optimizer.state_dict()
| true | true |
f71b296b1a35ac64d40aa0c9ca07717a2e1e1b1b | 17,693 | py | Python | models/pages.py | tobiassernhede/multi_user_blog | c657c5dacdab7b04cf226f75a085e8ac5a1d54a2 | [
"MIT"
] | null | null | null | models/pages.py | tobiassernhede/multi_user_blog | c657c5dacdab7b04cf226f75a085e8ac5a1d54a2 | [
"MIT"
] | null | null | null | models/pages.py | tobiassernhede/multi_user_blog | c657c5dacdab7b04cf226f75a085e8ac5a1d54a2 | [
"MIT"
] | null | null | null | # Split up the pages functionality in separate file to make the code
# easier to read
import os
import re
import webapp2
import jinja2
import json
from google.appengine.ext import ndb
from google.appengine.api import images
# Importing local .py files
from models.users import User, users_key, make_secure_val, check_secure_val
from models.posts import Post, blog_key
from models.comments import Comment, comment_key
from models.likes import Likes
template_dir = os.path.join(os.path.dirname(__file__), '..', 'templates')
jinja_env = jinja2.Environment(loader=jinja2.FileSystemLoader(template_dir),
autoescape=True)
class Handler(webapp2.RequestHandler):
def write(self, *a, **kw):
self.response.out.write(*a, **kw)
def render_str(self, template, **params):
params['user'] = self.user
t = jinja_env.get_template(template)
return t.render(params)
def render(self, template, **kw):
self.write(self.render_str(template, **kw))
def set_secure_cookie(self, name, val):
cookie_val = make_secure_val(val)
self.response.headers.add_header(
'Set-Cookie',
'%s=%s; Path=/' % (name, cookie_val))
def read_secure_cookie(self, name):
cookie_val = self.request.cookies.get(name)
return cookie_val and check_secure_val(cookie_val)
def login(self, user):
self.set_secure_cookie('user_id', str(user.key.id()))
def logout(self):
self.response.headers.add_header('Set-Cookie', 'user_id=; Path=/')
def initialize(self, *a, **kw):
webapp2.RequestHandler.initialize(self, *a, **kw)
uid = self.read_secure_cookie('user_id')
self.user = uid and User.by_id(int(uid))
class MainPage(Handler):
def get(self):
# Running a post query for frontpage sorted by created date
posts = Post.query()
posts = posts.order(-Post.created)
posts = posts.fetch()
self.render('index.html', posts=posts)
#### User Pages ####
USER_RE = re.compile(r"^[a-zA-Z0-9_-]{3,20}$")
def valid_username(username):
return username and USER_RE.match(username)
PASS_RE = re.compile(r"^.{3,20}$")
def valid_password(password):
return password and PASS_RE.match(password)
EMAIL_RE = re.compile(r'^[\S]+@[\S]+\.[\S]+$')
def valid_email(email):
return not email or EMAIL_RE.match(email)
class RegisterPage(Handler):
def get(self):
if self.user:
self.redirect("/profile/" + self.user.name)
else:
self.render("register.html")
def post(self):
have_error = False
self.username = self.request.get('username')
self.password = self.request.get('password')
self.verify = self.request.get('verify')
self.email = self.request.get('email')
self.description = self.request.get('description')
self.profile_img = self.request.get('profile_img')
self.error_msg = [] # setting up a list of potential error messages
self.params = dict(username=self.username,
email=self.email, description=self.description)
if not valid_username(self.username):
self.error_msg.extend(["That's not a valid username."])
have_error = True
if not valid_password(self.password):
self.error_msg.extend(["That wasn't a valid password."])
have_error = True
elif self.password != self.verify:
self.error_msg.extend(["Your passwords didn't match."])
have_error = True
if not valid_email(self.email):
self.serror_msg.extend(["That's not a valid email."])
have_error = True
if have_error:
# add error_msg to params dict if have error
self.params['error_msg'] = self.error_msg
self.render('register.html', **self.params)
else:
self.done()
def done(self):
# make sure the user doesn't already exist
u = User.by_name(self.username)
if u:
self.error_msg.extend(["That user already exists."])
self.params['error_msg'] = self.error_msg
self.render('register.html', **self.params)
else:
u = User.register(
self.username, self.password, self.email,
self.description, self.profile_img)
u.put()
self.login(u)
self.redirect('/profile/' + u.name)
class ProfilePage(Handler):
def get(self, user_profile): # fetching the username from the uri
# get the profile page of the user by name
current_user_profile = User.by_name(user_profile)
if not current_user_profile:
self.response.set_status(404)
self.render("404.html")
return
# run a query of all the posts this user has made
posts = Post.query()
posts = posts.filter(Post.user_id == current_user_profile.key.id())
posts = posts.order(-Post.created)
posts = posts.fetch()
self.render(
'profile.html', user_profile=current_user_profile, posts=posts)
class EditProfilePage(Handler):
def get(self):
# Can only edit your own profile and you must be logged in so just
# checks if user is logged in
if self.user:
user = User.by_id(int(self.user.key.id()))
self.render("edit-profile.html", user=user)
else:
self.redirect("/login")
def post(self):
if not self.user:
self.redirect("/login")
return
have_error = False
self.username = self.request.get('username')
self.password = self.request.get('password')
self.verify = self.request.get('verify')
self.email = self.request.get('email')
self.description = self.request.get('description')
self.profile_img = self.request.get('profile_img')
self.delete_profile_img = self.request.get('delete_profile_img')
self.user_id = self.user.key.id()
self.error_msg = []
self.params = dict(username=self.username,
email=self.email, description=self.description)
if not valid_username(self.username):
self.error_msg.extend(["That's not a valid username."])
have_error = True
if self.password:
if not valid_password(self.password):
self.error_msg.extend(["That wasn't a valid password."])
have_error = True
elif self.password != self.verify:
self.error_msg.extend(["Your passwords didn't match."])
have_error = True
if not valid_email(self.email):
self.serror_msg.extend(["That's not a valid email."])
have_error = True
if have_error:
self.params['error_msg'] = self.error_msg
self.render('register.html', **self.params)
else:
self.done()
def done(self):
# make sure the user doesn't already exist
u = User.by_name(self.username)
if u and not self.username == self.user.name:
self.error_msg.extend(["That user already exists."])
self.params['error_msg'] = self.error_msg
self.render('register.html', **self.params)
else:
user_update = User.update(self.username, self.password, self.email,
self.description, self.profile_img,
self.delete_profile_img, self.user_id)
self.redirect('/profile/' + self.user.name)
class LoginPage(Handler):
def get(self):
self.render('login.html')
def post(self):
username = self.request.get('username')
password = self.request.get('password')
u = User.login(username, password)
if u:
self.login(u)
self.redirect('/blog')
else:
# simplified the error message handling as there is only one error
# message possible
msg = 'Invalid login'
self.render('login.html', error=msg)
class Logout(Handler):
def get(self):
self.logout() # Call logout function of parent class Handler
self.redirect('/') # Redirect to frontpage on logout
#### Blog Pages ####
class BlogPage(Handler):
def get(self):
# if you try to reach /blog then redirect to frontpage.
self.redirect('/')
class PostPage(Handler):
def get(self, post_id): # get the post_id from the uri
key = ndb.Key('Post', int(post_id), parent=blog_key())
post = key.get()
# used for comments if there is an error in the comment form
# See CommentPost class
comment_error = self.request.get('comment_error')
if not post:
# If there is no post then show a 404 as users can delete their own
# posts
self.response.set_status(404)
self.render("404.html")
return
# Collect the comments that belongs to this post
comments = Comment.by_post_id(int(post_id))
# For smarter rendering in the template I put the comments in a list
# with a dict inside
comment_output = []
# loop through each comment and create the dict for each comment
for comment in comments:
user_name = User.username_by_id(int(comment.user_id))
if not user_name:
user_name = "Deleted User"
comment = dict(content=comment.content, created=comment.created,
user_name=user_name, comment_id=comment.key.id())
comment_output.append(comment)
author = User.by_id(post.user_id)
self.render(
"post.html", post=post, author=author,
comment_output=comment_output, comment_error=comment_error)
class CreatePostPage(Handler):
def get(self):
if self.user:
self.render('create-post.html')
else:
self.redirect('/login')
def post(self):
if not self.user:
self.redirect('/login')
subject = self.request.get('subject')
content = self.request.get('content')
user_id = self.user.key.id()
featured_img = self.request.get('featured_img')
# error handling is done inside the Post.create class found in posts.py
post = Post.create(subject, content, featured_img, user_id)
if post.has_error:
params = dict(
subject=subject, content=content, error_msg=post.error_msg)
self.render('create-post.html', **params)
else:
self.redirect('/blog/%s' % str(post.p.key.id()))
class EditPost(Handler):
def get(self, post_id):
if not self.user:
self.redirect('/login')
return
post = Post.by_id(int(post_id))
if post.user_id == self.user.key.id():
self.render("edit-post.html", post=post)
else:
self.redirect('/blog/' + post_id)
def post(self, post_id):
if not self.user:
self.redirect('/login')
return
subject = self.request.get('subject')
content = self.request.get('content')
# Possible to delete featured image so added one extra var
# for edit post
delete_featured_img = self.request.get('delete_featured_img')
featured_img = self.request.get('featured_img')
# error handling is done inside the Post.update class found in posts.py
post = Post.update(
int(post_id), subject, content, featured_img, delete_featured_img)
if post.has_error:
# If errors show the form again with the error messages
params = dict(
subject=subject, content=content, error_msg=post.error_msg)
self.render('edit-post.html', **params)
else:
# Else redirect to the updated post
self.redirect('/blog/%s' % str(post.p.key.id()))
class DeletePost(Handler):
def get(self, post_id):
if not self.user: # check if user is logged in
self.redirect('/login')
return
post = Post.by_id(int(post_id))
# check if user the same as the author
if post.user_id == self.user.key.id():
post.key.delete()
self.redirect('/profile/' + self.user.name)
else:
self.redirect('/blog/' + post_id)
class DeleteComment(Handler):
def get(self, comment_id, post_id):
if not self.user: # check if user is logged in
self.redirect('/login')
return
self.write(comment_id)
comment = Comment.get_by_id(int(comment_id), parent=comment_key())
if not comment:
self.redirect('/blog/' + post_id + '#comments-list')
return
# check if user is the same as the author
if comment.user_id == self.user.key.id():
comment.key.delete()
self.redirect('/blog/' + post_id + '#comments-list')
else:
self.redirect('/blog/' + post_id + '#comments-list')
class CommentPost(Handler):
def post(self, post_id):
if not self.user:
self.rediret('/login')
return
content = self.request.get('comment')
user_id = self.user.key.id()
comment = Comment.create(content, post_id, user_id)
if comment.has_error:
self.redirect(
"/blog/" + post_id + "?comment_error=true#commentform")
# redirect to PostPage class where the error messsage is handled
else:
self.redirect('/blog/%s#%s' % (str(post_id), "comments-list"))
class LikePost(Handler):
def get(self, post_id):
key = ndb.Key('Post', int(post_id), parent=blog_key())
post = key.get()
error = dict()
response = None
logged_in = False
if post:
author_id = post.user_id
if self.user:
logged_in = True
if author_id == self.user.key.id():
error['has_error'] = True
error['error_msg'] = "Can't like your own post"
else:
add_like = Likes.add_like(int(post_id), self.user.key.id())
response = add_like.response
else:
error['has_error'] = True
error['error_msg'] = "No post found"
self.write(
json.dumps(({'logged_in': logged_in,
'response': response, 'error': error})))
class MissingPage(Handler):
def get(self):
# If a user tries to write a url that doesn't exist fallback is a 404
# template
self.response.set_status(404)
self.render("404.html")
return
class RouteProfile(Handler):
def get(self):
# If a user tries to visit just /profile rediret to frontpage
self.redirect('/')
class Image(Handler):
""" Class for image handling.
There are two types of images; Featured Image (featured_img) for posts
and Profile Images (profile_img) for profile.
This is used to create a unique url for each image and make it possible to serve images.
"""
def get(self):
img_id = self.request.get('id')
img_type = self.request.get('type')
if img_id.isdigit():
if img_type == 'featured_img':
img_key = ndb.Key('Post', int(img_id), parent=blog_key())
elif img_type == "profile_img":
img_key = ndb.Key('User', int(img_id), parent=users_key())
if img_key:
img = img_key.get()
if img_type == "featured_img":
if img.featured_img:
self.response.headers['Content-Type'] = 'image/png'
self.response.out.write(img.featured_img)
return
elif img_type == "profile_img":
if img.profile_img:
self.response.headers['Content-Type'] = 'image/png'
self.response.out.write(img.profile_img)
return
self.response.set_status(404)
self.render("404.html")
appLoader = webapp2.WSGIApplication([('/', MainPage),
('/register', RegisterPage),
('/login', LoginPage),
('/logout', Logout),
('/profile', RouteProfile),
('/profile/(\w+)', ProfilePage),
('/edit-profile', EditProfilePage),
('/create-post', CreatePostPage),
('/blog/([0-9]+)/edit', EditPost),
('/blog/([0-9]+)/delete', DeletePost),
('/comment/([0-9]+)', CommentPost),
('/blog/([0-9]+)/like', LikePost),
('/comment/([0-9]+)/([0-9]+)/delete',
DeleteComment),
('/blog', BlogPage),
('/blog/([0-9]+)', PostPage),
('/img', Image),
('/.*', MissingPage)
],
debug=True)
| 33.50947 | 92 | 0.562652 |
import os
import re
import webapp2
import jinja2
import json
from google.appengine.ext import ndb
from google.appengine.api import images
from models.users import User, users_key, make_secure_val, check_secure_val
from models.posts import Post, blog_key
from models.comments import Comment, comment_key
from models.likes import Likes
template_dir = os.path.join(os.path.dirname(__file__), '..', 'templates')
jinja_env = jinja2.Environment(loader=jinja2.FileSystemLoader(template_dir),
autoescape=True)
class Handler(webapp2.RequestHandler):
def write(self, *a, **kw):
self.response.out.write(*a, **kw)
def render_str(self, template, **params):
params['user'] = self.user
t = jinja_env.get_template(template)
return t.render(params)
def render(self, template, **kw):
self.write(self.render_str(template, **kw))
def set_secure_cookie(self, name, val):
cookie_val = make_secure_val(val)
self.response.headers.add_header(
'Set-Cookie',
'%s=%s; Path=/' % (name, cookie_val))
def read_secure_cookie(self, name):
cookie_val = self.request.cookies.get(name)
return cookie_val and check_secure_val(cookie_val)
def login(self, user):
self.set_secure_cookie('user_id', str(user.key.id()))
def logout(self):
self.response.headers.add_header('Set-Cookie', 'user_id=; Path=/')
def initialize(self, *a, **kw):
webapp2.RequestHandler.initialize(self, *a, **kw)
uid = self.read_secure_cookie('user_id')
self.user = uid and User.by_id(int(uid))
class MainPage(Handler):
def get(self):
posts = Post.query()
posts = posts.order(-Post.created)
posts = posts.fetch()
self.render('index.html', posts=posts)
rname(username):
return username and USER_RE.match(username)
PASS_RE = re.compile(r"^.{3,20}$")
def valid_password(password):
return password and PASS_RE.match(password)
EMAIL_RE = re.compile(r'^[\S]+@[\S]+\.[\S]+$')
def valid_email(email):
return not email or EMAIL_RE.match(email)
class RegisterPage(Handler):
def get(self):
if self.user:
self.redirect("/profile/" + self.user.name)
else:
self.render("register.html")
def post(self):
have_error = False
self.username = self.request.get('username')
self.password = self.request.get('password')
self.verify = self.request.get('verify')
self.email = self.request.get('email')
self.description = self.request.get('description')
self.profile_img = self.request.get('profile_img')
self.error_msg = []
self.params = dict(username=self.username,
email=self.email, description=self.description)
if not valid_username(self.username):
self.error_msg.extend(["That's not a valid username."])
have_error = True
if not valid_password(self.password):
self.error_msg.extend(["That wasn't a valid password."])
have_error = True
elif self.password != self.verify:
self.error_msg.extend(["Your passwords didn't match."])
have_error = True
if not valid_email(self.email):
self.serror_msg.extend(["That's not a valid email."])
have_error = True
if have_error:
self.params['error_msg'] = self.error_msg
self.render('register.html', **self.params)
else:
self.done()
def done(self):
u = User.by_name(self.username)
if u:
self.error_msg.extend(["That user already exists."])
self.params['error_msg'] = self.error_msg
self.render('register.html', **self.params)
else:
u = User.register(
self.username, self.password, self.email,
self.description, self.profile_img)
u.put()
self.login(u)
self.redirect('/profile/' + u.name)
class ProfilePage(Handler):
def get(self, user_profile): # fetching the username from the uri
# get the profile page of the user by name
current_user_profile = User.by_name(user_profile)
if not current_user_profile:
self.response.set_status(404)
self.render("404.html")
return
# run a query of all the posts this user has made
posts = Post.query()
posts = posts.filter(Post.user_id == current_user_profile.key.id())
posts = posts.order(-Post.created)
posts = posts.fetch()
self.render(
'profile.html', user_profile=current_user_profile, posts=posts)
class EditProfilePage(Handler):
def get(self):
# Can only edit your own profile and you must be logged in so just
# checks if user is logged in
if self.user:
user = User.by_id(int(self.user.key.id()))
self.render("edit-profile.html", user=user)
else:
self.redirect("/login")
def post(self):
if not self.user:
self.redirect("/login")
return
have_error = False
self.username = self.request.get('username')
self.password = self.request.get('password')
self.verify = self.request.get('verify')
self.email = self.request.get('email')
self.description = self.request.get('description')
self.profile_img = self.request.get('profile_img')
self.delete_profile_img = self.request.get('delete_profile_img')
self.user_id = self.user.key.id()
self.error_msg = []
self.params = dict(username=self.username,
email=self.email, description=self.description)
if not valid_username(self.username):
self.error_msg.extend(["That's not a valid username."])
have_error = True
if self.password:
if not valid_password(self.password):
self.error_msg.extend(["That wasn't a valid password."])
have_error = True
elif self.password != self.verify:
self.error_msg.extend(["Your passwords didn't match."])
have_error = True
if not valid_email(self.email):
self.serror_msg.extend(["That's not a valid email."])
have_error = True
if have_error:
self.params['error_msg'] = self.error_msg
self.render('register.html', **self.params)
else:
self.done()
def done(self):
# make sure the user doesn't already exist
u = User.by_name(self.username)
if u and not self.username == self.user.name:
self.error_msg.extend(["That user already exists."])
self.params['error_msg'] = self.error_msg
self.render('register.html', **self.params)
else:
user_update = User.update(self.username, self.password, self.email,
self.description, self.profile_img,
self.delete_profile_img, self.user_id)
self.redirect('/profile/' + self.user.name)
class LoginPage(Handler):
def get(self):
self.render('login.html')
def post(self):
username = self.request.get('username')
password = self.request.get('password')
u = User.login(username, password)
if u:
self.login(u)
self.redirect('/blog')
else:
msg = 'Invalid login'
self.render('login.html', error=msg)
class Logout(Handler):
def get(self):
self.logout()
self.redirect('/')
self.redirect('/')
class PostPage(Handler):
def get(self, post_id):
key = ndb.Key('Post', int(post_id), parent=blog_key())
post = key.get()
comment_error = self.request.get('comment_error')
if not post:
self.response.set_status(404)
self.render("404.html")
return
comments = Comment.by_post_id(int(post_id))
comment_output = []
for comment in comments:
user_name = User.username_by_id(int(comment.user_id))
if not user_name:
user_name = "Deleted User"
comment = dict(content=comment.content, created=comment.created,
user_name=user_name, comment_id=comment.key.id())
comment_output.append(comment)
author = User.by_id(post.user_id)
self.render(
"post.html", post=post, author=author,
comment_output=comment_output, comment_error=comment_error)
class CreatePostPage(Handler):
def get(self):
if self.user:
self.render('create-post.html')
else:
self.redirect('/login')
def post(self):
if not self.user:
self.redirect('/login')
subject = self.request.get('subject')
content = self.request.get('content')
user_id = self.user.key.id()
featured_img = self.request.get('featured_img')
post = Post.create(subject, content, featured_img, user_id)
if post.has_error:
params = dict(
subject=subject, content=content, error_msg=post.error_msg)
self.render('create-post.html', **params)
else:
self.redirect('/blog/%s' % str(post.p.key.id()))
class EditPost(Handler):
def get(self, post_id):
if not self.user:
self.redirect('/login')
return
post = Post.by_id(int(post_id))
if post.user_id == self.user.key.id():
self.render("edit-post.html", post=post)
else:
self.redirect('/blog/' + post_id)
def post(self, post_id):
if not self.user:
self.redirect('/login')
return
subject = self.request.get('subject')
content = self.request.get('content')
delete_featured_img = self.request.get('delete_featured_img')
featured_img = self.request.get('featured_img')
post = Post.update(
int(post_id), subject, content, featured_img, delete_featured_img)
if post.has_error:
params = dict(
subject=subject, content=content, error_msg=post.error_msg)
self.render('edit-post.html', **params)
else:
self.redirect('/blog/%s' % str(post.p.key.id()))
class DeletePost(Handler):
def get(self, post_id):
if not self.user:
self.redirect('/login')
return
post = Post.by_id(int(post_id))
if post.user_id == self.user.key.id():
post.key.delete()
self.redirect('/profile/' + self.user.name)
else:
self.redirect('/blog/' + post_id)
class DeleteComment(Handler):
def get(self, comment_id, post_id):
if not self.user:
self.redirect('/login')
return
self.write(comment_id)
comment = Comment.get_by_id(int(comment_id), parent=comment_key())
if not comment:
self.redirect('/blog/' + post_id + '#comments-list')
return
if comment.user_id == self.user.key.id():
comment.key.delete()
self.redirect('/blog/' + post_id + '#comments-list')
else:
self.redirect('/blog/' + post_id + '#comments-list')
class CommentPost(Handler):
def post(self, post_id):
if not self.user:
self.rediret('/login')
return
content = self.request.get('comment')
user_id = self.user.key.id()
comment = Comment.create(content, post_id, user_id)
if comment.has_error:
self.redirect(
"/blog/" + post_id + "?comment_error=true#commentform")
else:
self.redirect('/blog/%s#%s' % (str(post_id), "comments-list"))
class LikePost(Handler):
def get(self, post_id):
key = ndb.Key('Post', int(post_id), parent=blog_key())
post = key.get()
error = dict()
response = None
logged_in = False
if post:
author_id = post.user_id
if self.user:
logged_in = True
if author_id == self.user.key.id():
error['has_error'] = True
error['error_msg'] = "Can't like your own post"
else:
add_like = Likes.add_like(int(post_id), self.user.key.id())
response = add_like.response
else:
error['has_error'] = True
error['error_msg'] = "No post found"
self.write(
json.dumps(({'logged_in': logged_in,
'response': response, 'error': error})))
class MissingPage(Handler):
def get(self):
# If a user tries to write a url that doesn't exist fallback is a 404
self.response.set_status(404)
self.render("404.html")
return
class RouteProfile(Handler):
def get(self):
self.redirect('/')
class Image(Handler):
def get(self):
img_id = self.request.get('id')
img_type = self.request.get('type')
if img_id.isdigit():
if img_type == 'featured_img':
img_key = ndb.Key('Post', int(img_id), parent=blog_key())
elif img_type == "profile_img":
img_key = ndb.Key('User', int(img_id), parent=users_key())
if img_key:
img = img_key.get()
if img_type == "featured_img":
if img.featured_img:
self.response.headers['Content-Type'] = 'image/png'
self.response.out.write(img.featured_img)
return
elif img_type == "profile_img":
if img.profile_img:
self.response.headers['Content-Type'] = 'image/png'
self.response.out.write(img.profile_img)
return
self.response.set_status(404)
self.render("404.html")
appLoader = webapp2.WSGIApplication([('/', MainPage),
('/register', RegisterPage),
('/login', LoginPage),
('/logout', Logout),
('/profile', RouteProfile),
('/profile/(\w+)', ProfilePage),
('/edit-profile', EditProfilePage),
('/create-post', CreatePostPage),
('/blog/([0-9]+)/edit', EditPost),
('/blog/([0-9]+)/delete', DeletePost),
('/comment/([0-9]+)', CommentPost),
('/blog/([0-9]+)/like', LikePost),
('/comment/([0-9]+)/([0-9]+)/delete',
DeleteComment),
('/blog', BlogPage),
('/blog/([0-9]+)', PostPage),
('/img', Image),
('/.*', MissingPage)
],
debug=True)
| true | true |
f71b2aad75e30594e61025ad33be2a2c17932235 | 2,792 | py | Python | reinvent_models/link_invent/networks/encoder_decoder.py | GT4SD/-reinvent_models | e1cf00d1b24fe5f39354e34829adc25460da84e2 | [
"MIT"
] | null | null | null | reinvent_models/link_invent/networks/encoder_decoder.py | GT4SD/-reinvent_models | e1cf00d1b24fe5f39354e34829adc25460da84e2 | [
"MIT"
] | 1 | 2022-03-07T12:18:00.000Z | 2022-03-07T12:18:00.000Z | reinvent_models/link_invent/networks/encoder_decoder.py | GT4SD/reinvent_models | e1cf00d1b24fe5f39354e34829adc25460da84e2 | [
"MIT"
] | null | null | null | """
Implementation of a network using an Encoder-Decoder architecture.
"""
import torch.nn as tnn
from torch import Tensor
from reinvent_models.link_invent.networks.decoder import Decoder
from reinvent_models.link_invent.networks.encoder import Encoder
class EncoderDecoder(tnn.Module):
"""
An encoder-decoder that combines input with generated targets.
"""
def __init__(self, encoder_params: dict, decoder_params: dict):
super(EncoderDecoder, self).__init__()
self._encoder = Encoder(**encoder_params)
self._decoder = Decoder(**decoder_params)
def forward(self, encoder_seqs: Tensor, encoder_seq_lengths: Tensor, decoder_seqs: Tensor,
decoder_seq_lengths: Tensor):
"""
Performs the forward pass.
:param encoder_seqs: A tensor with the output sequences (batch, seq_d, dim).
:param encoder_seq_lengths: A list with the length of each input sequence.
:param decoder_seqs: A tensor with the encoded input input sequences (batch, seq_e, dim).
:param decoder_seq_lengths: The lengths of the decoder sequences.
:return : The output logits as a tensor (batch, seq_d, dim).
"""
encoder_padded_seqs, hidden_states = self.forward_encoder(encoder_seqs, encoder_seq_lengths)
logits, _, _ = self.forward_decoder(decoder_seqs, decoder_seq_lengths, encoder_padded_seqs, hidden_states)
return logits
def forward_encoder(self, padded_seqs: Tensor, seq_lengths: Tensor):
"""
Does a forward pass only of the encoder.
:param padded_seqs: The data to feed the encoder.
:param seq_lengths: The length of each sequence in the batch.
:return : Returns a tuple with (encoded_seqs, hidden_states)
"""
return self._encoder(padded_seqs, seq_lengths)
def forward_decoder(self, padded_seqs: Tensor, seq_lengths: Tensor, encoder_padded_seqs: Tensor,
hidden_states: Tensor):
"""
Does a forward pass only of the decoder.
:param hidden_states: The hidden states from the encoder.
:param padded_seqs: The data to feed to the decoder.
:param seq_lengths: The length of each sequence in the batch.
:return : Returns the logits and the hidden state for each element of the sequence passed.
"""
return self._decoder(padded_seqs, seq_lengths, encoder_padded_seqs, hidden_states)
def get_params(self):
"""
Obtains the params for the network.
:return : A dict with the params.
"""
return {
"encoder_params": self._encoder.get_params(),
"decoder_params": self._decoder.get_params()
}
| 42.30303 | 115 | 0.666189 |
import torch.nn as tnn
from torch import Tensor
from reinvent_models.link_invent.networks.decoder import Decoder
from reinvent_models.link_invent.networks.encoder import Encoder
class EncoderDecoder(tnn.Module):
def __init__(self, encoder_params: dict, decoder_params: dict):
super(EncoderDecoder, self).__init__()
self._encoder = Encoder(**encoder_params)
self._decoder = Decoder(**decoder_params)
def forward(self, encoder_seqs: Tensor, encoder_seq_lengths: Tensor, decoder_seqs: Tensor,
decoder_seq_lengths: Tensor):
encoder_padded_seqs, hidden_states = self.forward_encoder(encoder_seqs, encoder_seq_lengths)
logits, _, _ = self.forward_decoder(decoder_seqs, decoder_seq_lengths, encoder_padded_seqs, hidden_states)
return logits
def forward_encoder(self, padded_seqs: Tensor, seq_lengths: Tensor):
return self._encoder(padded_seqs, seq_lengths)
def forward_decoder(self, padded_seqs: Tensor, seq_lengths: Tensor, encoder_padded_seqs: Tensor,
hidden_states: Tensor):
return self._decoder(padded_seqs, seq_lengths, encoder_padded_seqs, hidden_states)
def get_params(self):
return {
"encoder_params": self._encoder.get_params(),
"decoder_params": self._decoder.get_params()
}
| true | true |
f71b2accb33c9e4fb30a401746d3041c3b953b26 | 11,528 | py | Python | second/mayank_scripts/infer_ros_melodic_pretained_same_frame.py | mayanks888/second.pytorch | 02d37885a543ee46516648dcab7db8f5d677a179 | [
"MIT"
] | null | null | null | second/mayank_scripts/infer_ros_melodic_pretained_same_frame.py | mayanks888/second.pytorch | 02d37885a543ee46516648dcab7db8f5d677a179 | [
"MIT"
] | null | null | null | second/mayank_scripts/infer_ros_melodic_pretained_same_frame.py | mayanks888/second.pytorch | 02d37885a543ee46516648dcab7db8f5d677a179 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# ROS node libs
import time
import numpy as np
import rospy
import torch
# from geometry_msgs.msg import Quaternion, Pose, Point, Vector3
from pyquaternion import Quaternion
from google.protobuf import text_format
from sensor_msgs.msg import PointCloud2
from std_msgs.msg import Header, ColorRGBA
# from cv_bridge import CvBridge, CvBridgeError
from visualization_msgs.msg import Marker, MarkerArray
from second.protos import pipeline_pb2
# from second.utils import simplevis
from second.pytorch.train import build_network
from second.utils import config_tool
from std_msgs.msg import Int16, Float32MultiArray
from jsk_recognition_msgs.msg import BoundingBox, BoundingBoxArray
# import ros_numpy
# GPU settings: Select GPUs to use. Coment it to let the system decide
# os.environ["CUDA_VISIBLE_DEVICES"]="0"
class ros_tensorflow_obj():
def __init__(self):
# ## Initial msg
rospy.loginfo(' ## Starting ROS interface ##')
# ## Load a (frozen) Tensorflow model into memory.
print("ready to process----------------------------------------------------------")
####################################################################################333
# config_path = "../configs/nuscenes/all.pp.largea.config"
# config_path = "/home/mayank_sati/codebase/python/lidar/second.pytorch/second/configs/pointpillars/car/xyres_28.config"
config_path = "/home/mayank_sati/codebase/python/lidar/second.pytorch/second/configs/pointpillars/car/xyres_24.config"
config = pipeline_pb2.TrainEvalPipelineConfig()
with open(config_path, "r") as f:
proto_str = f.read()
text_format.Merge(proto_str, config)
input_cfg = config.eval_input_reader
model_cfg = config.model.second
# config_tool.change_detection_range(model_cfg, [-50, -50, 50, 50])
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# ckpt_path = "../checkpoint/voxelnet-140670.tckpt"
ckpt_path="/home/mayank_sati/Downloads/pretrained_models_v1.5/pp_model_for_nuscenes_pretrain/voxelnet-296960.tckpt"
net = build_network(model_cfg).to(device).eval()
net.load_state_dict(torch.load(ckpt_path))
target_assigner = net.target_assigner
self.voxel_generator = net.voxel_generator
class_names = target_assigner.classes
grid_size = self.voxel_generator.grid_size
feature_map_size = grid_size[:2] // config_tool.get_downsample_factor(model_cfg)
feature_map_size = [*feature_map_size, 1][::-1]
anchors = target_assigner.generate_anchors(feature_map_size)["anchors"]
anchors = torch.tensor(anchors, dtype=torch.float32, device=device)
anchors = anchors.view(1, -1, 7)
# @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
feature_map_size = [1, 50, 50]
ret = target_assigner.generate_anchors(feature_map_size)
class_names = target_assigner.classes
anchors_dict = target_assigner.generate_anchors_dict(feature_map_size)
anchors_list = []
for k, v in anchors_dict.items():
anchors_list.append(v["anchors"])
# anchors = ret["anchors"]
anchors = np.concatenate(anchors_list, axis=0)
anchors = anchors.reshape([-1, target_assigner.box_ndim])
assert np.allclose(anchors, ret["anchors"].reshape(-1, target_assigner.box_ndim))
matched_thresholds = ret["matched_thresholds"]
unmatched_thresholds = ret["unmatched_thresholds"]
# anchors_bv = box_np_ops.rbbox2d_to_near_bbox(anchors[:, [0, 1, 3, 4, 6]])
anchors_bv = 2
anchor_cache = {
"anchors": anchors,
"anchors_bv": anchors_bv,
"matched_thresholds": matched_thresholds,
"unmatched_thresholds": unmatched_thresholds,
"anchors_dict": anchors_dict,
}
anchors = torch.tensor(anchors, dtype=torch.float32, device=device)
self.anchors = anchors.view(1, -1, 7)
self.net = net
self.device = device
##########################################################################################
# self.marker_publisher = rospy.Publisher('visualization_marker', MarkerArray, queue_size=5)
self.pcl_publisher = rospy.Publisher('result_pcl', PointCloud2, queue_size=1)
############
# [print(n.name) for n in tf.get_default_graph().as_graph_def().node]
# ROS environment setup
# ## Define subscribers
self.subscribers_def()
# ## Define publishers
self.publishers_def()
self.now = rospy.Time.now()
# Define subscribers
def subscribers_def(self):
# subs_topic = '/kitti/velo/pointcloud'
#subs_topic = '/apollo/sensor/velodyne64/compensator/PointCloud2'
# subs_topic = '/velodyne64_points'
# subs_topic = '/apollo/sensor/velodyne64/PointCloud2'
# subs_topic = '/points_raw'
# subs_topic = '/livox/lidar'
# subs_topic = '/apollo/sensor/velodyne32C/compensator/PointCloud2'
subs_topic = '/lidar_top'
self._sub = rospy.Subscriber(subs_topic, PointCloud2, self.lidar_callback, queue_size=10, buff_size=2 ** 24)
# mydata = rospy.Subscriber( subs_topic , PointCloud2, self.lidar_callback, queue_size=1, buff_size=2**24)
# print(mydata)
# self._sub = rospy.Subscriber( subs_topic , Image, self.lidar_callback, queue_size=1, buff_size=100)
# Define publishers
def publishers_def(self):
self._pub = rospy.Publisher('pc_bbox_topic', Float32MultiArray, queue_size=1)
self.pub_arr_bbox = rospy.Publisher("Detections", BoundingBoxArray, queue_size=1)
# Camera image callback
def lidar_callback(self, point_cl_msg):
arr_bbox = BoundingBoxArray()
############################################################################3
# lidar = np.fromstring(point_cl_msg.data, dtype=np.float32)
# points = lidar.reshape(-1, 4)
# print('gotit"')
# pc = ros_numpy.numpify(point_cl_msg)
# points = np.zeros((pc.shape[0], 4))
# points[:, 0] = pc['x']
# points[:, 1] = pc['y']
# points[:, 2] = pc['z']
# points[:, 3] = pc['intensity']
# points[:, 3] /= 255
#########################################################333
lidar = np.fromstring(point_cl_msg.data, dtype=np.float32)
points = lidar.reshape(-1, 4)
points[:, 3] /= 255
#######################################################################
res = self.voxel_generator.generate(points, max_voxels=30000)
voxels = res["voxels"]
coords = res["coordinates"]
num_points = res["num_points_per_voxel"]
num_voxels = np.array([voxels.shape[0]], dtype=np.int64)
# print("voxel_generator_time",(time.time() - t)*1000)
###############################################################
# print(voxels.shape)
# add batch idx to coords
coords = np.pad(coords, ((0, 0), (1, 0)), mode='constant', constant_values=0)
voxels = torch.tensor(voxels, dtype=torch.float32, device=self.device)
coords = torch.tensor(coords, dtype=torch.int32, device=self.device)
num_points = torch.tensor(num_points, dtype=torch.int32, device=self.device)
# print("conversion time",(time.time() - t)*1000)
example = {"anchors": self.anchors, "voxels": voxels, "num_points": num_points, "coordinates": coords, }
t2 = time.time()
pred = self.net(example)[0]
# print(pred)
# print("prediction",(time.time() - t2)*1000)
# print("total_time",(time.time() - t)*1000)
boxes_lidar = pred["box3d_lidar"].detach().cpu().numpy()
scores_lidar = pred["scores"].detach().cpu().numpy()
labels_lidar = pred["label_preds"].detach().cpu().numpy()
##############################3333
threshold = 0.2
keep = np.where((scores_lidar >= threshold))[0]
scores_lidar = scores_lidar[keep]
print(scores_lidar)
boxes_lidar = boxes_lidar[keep]
labels_lidar = labels_lidar[keep]
# sco
# print(scores_lidar)
################################################################################
# self.show_text_in_rviz_mullti_cube(boxes_lidar,point_cl_msg)
# self.show_text_in_rviz_mullti_sphere(boxes_lidar,point_cl_msg)
##################################################################################
# apollo integration
# numboxes = np.squeeze(scores_lidar)
numboxes = len(scores_lidar)
tl_bbox = Float32MultiArray()
iLen = boxes_lidar.shape[0]
lidar_bbox = Float32MultiArray()
print('Processing no of object:', iLen)
if (numboxes) >= 1:
tmp = -np.ones(10 * (numboxes) + 1)
for i in range(0, int(numboxes)):
try:
score = float((scores_lidar)[i])
if (boxes_lidar.shape[0]) == 1:
bboxes = [float(v) for v in (boxes_lidar)[i]]
else:
bboxes = [float(v) for v in np.squeeze(boxes_lidar)[i]]
tmp[0] = numboxes
tmp[10 * i + 1] = score
tmp[10 * i + 2] = bboxes[0]
tmp[10 * i + 3] = bboxes[1]
tmp[10 * i + 4] = bboxes[2]
tmp[10 * i + 5] = bboxes[3]
tmp[10 * i + 6] = bboxes[4]
tmp[10 * i + 7] = bboxes[5]
tmp[10 * i + 8] = bboxes[6]
tmp[10 * i + 9] = 0
tmp[10 * i + 10] = 0
bbox = BoundingBox()
# bbox.header.frame_id = point_cl_msg.header.frame_id
# bbox.header.frame_id = 'livox_frame'
bbox.header.frame_id = 'lidar_top'
q = Quaternion(axis=(0, 0, 1), radians=-1.0 * float(boxes_lidar[i][6]))
bbox.pose.orientation.x = q.x
bbox.pose.orientation.y = q.y
bbox.pose.orientation.z = q.z
bbox.pose.orientation.w = q.w
bbox.pose.position.x = float(boxes_lidar[i][0])
bbox.pose.position.y = float(boxes_lidar[i][1])
bbox.pose.position.z = float(boxes_lidar[i][2])
bbox.dimensions.x = float(boxes_lidar[i][3])
bbox.dimensions.y = float(boxes_lidar[i][4])
bbox.dimensions.z = float(boxes_lidar[i][5])
arr_bbox.boxes.append(bbox)
except:
print("I am here")
# here data for publishing
tl_bbox.data = tmp
self._pub.publish(tl_bbox)
arr_bbox.header.frame_id = point_cl_msg.header.frame_id
self.pub_arr_bbox.publish(arr_bbox)
point_cl_msg.header.frame_id = point_cl_msg.header.frame_id
self.pcl_publisher.publish(point_cl_msg)
arr_bbox.boxes.clear()
def spin(self):
rospy.spin()
def main():
rospy.init_node('LIDAR_NODE', anonymous=True)
tf_ob = ros_tensorflow_obj()
# tf_ob.subscribers_def
try:
rospy.spin()
except KeyboardInterrupt:
print("Shutting down")
if __name__ == '__main__':
main()
| 45.207843 | 128 | 0.567661 |
import time
import numpy as np
import rospy
import torch
from pyquaternion import Quaternion
from google.protobuf import text_format
from sensor_msgs.msg import PointCloud2
from std_msgs.msg import Header, ColorRGBA
from visualization_msgs.msg import Marker, MarkerArray
from second.protos import pipeline_pb2
from second.pytorch.train import build_network
from second.utils import config_tool
from std_msgs.msg import Int16, Float32MultiArray
from jsk_recognition_msgs.msg import BoundingBox, BoundingBoxArray
class ros_tensorflow_obj():
def __init__(self):
# Starting ROS interface ##')
| true | true |
f71b2b5262d128663739d9f88003925845a959b1 | 27,166 | py | Python | colour/corresponding/datasets/breneman1987.py | MaxSchambach/colour | 3f3685d616fda4be58cec20bc1e16194805d7e2d | [
"BSD-3-Clause"
] | null | null | null | colour/corresponding/datasets/breneman1987.py | MaxSchambach/colour | 3f3685d616fda4be58cec20bc1e16194805d7e2d | [
"BSD-3-Clause"
] | null | null | null | colour/corresponding/datasets/breneman1987.py | MaxSchambach/colour | 3f3685d616fda4be58cec20bc1e16194805d7e2d | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Breneman Corresponding Chromaticities Dataset
=============================================
Defines *Breneman (1987)* results for corresponding chromaticities experiments.
See Also
--------
`Corresponding Chromaticities Prediction Jupyter Notebook
<http://nbviewer.jupyter.org/github/colour-science/colour-notebooks/\
blob/master/notebooks/corresponding/prediction.ipynb>`_
References
----------
- :cite:`Breneman1987b` : Breneman, E. J. (1987). Corresponding
chromaticities for different states of adaptation to complex visual fields.
Journal of the Optical Society of America A, 4(6), 1115.
doi:10.1364/JOSAA.4.001115
"""
from __future__ import division, unicode_literals
import numpy as np
from collections import namedtuple
from colour.utilities.documentation import DocstringDict
__author__ = 'Colour Developers'
__copyright__ = 'Copyright (C) 2013-2019 - Colour Developers'
__license__ = 'New BSD License - https://opensource.org/licenses/BSD-3-Clause'
__maintainer__ = 'Colour Developers'
__email__ = 'colour-developers@colour-science.org'
__status__ = 'Production'
__all__ = [
'BrenemanExperimentResult', 'PrimariesChromaticityCoordinates',
'BRENEMAN_EXPERIMENT_1_RESULTS', 'BRENEMAN_EXPERIMENT_2_RESULTS',
'BRENEMAN_EXPERIMENT_3_RESULTS', 'BRENEMAN_EXPERIMENT_4_RESULTS',
'BRENEMAN_EXPERIMENT_5_RESULTS', 'BRENEMAN_EXPERIMENT_6_RESULTS',
'BRENEMAN_EXPERIMENT_7_RESULTS', 'BRENEMAN_EXPERIMENT_10_RESULTS',
'BRENEMAN_EXPERIMENT_8_RESULTS', 'BRENEMAN_EXPERIMENT_9_RESULTS',
'BRENEMAN_EXPERIMENT_11_RESULTS', 'BRENEMAN_EXPERIMENT_12_RESULTS',
'BRENEMAN_EXPERIMENTS_PRIMARIES_CHROMATICITIES', 'BRENEMAN_EXPERIMENTS'
]
class BrenemanExperimentResult(
namedtuple('BrenemanExperimentResult',
('name', 'uv_t', 'uv_m', 's_uv', 'd_uv_i', 'd_uv_g'))):
"""
Experiment result.
Parameters
----------
name : unicode
Test colour name.
uv_t : numeric
Chromaticity coordinates :math:`uv_t^p` of test colour.
uv_m : array_like, (2,)
Chromaticity coordinates :math:`uv_m^p` of matching colour.
s_uv : array_like, (2,), optional
Interobserver variation (:math:`x10^3`) :math:`\\sigma_uv^p`.
d_uv_i : array_like, (2,), optional
Deviation of individual linear transformation (:math:`x10^3`)
:math:`\\delta_uv_i^p`.
d_uv_g : array_like, (2,), optional
Deviation of individual linear transformation (:math:`x10^3`)
:math:`\\delta_uv_g^p`.
"""
def __new__(cls, name, uv_t, uv_m, s_uv=None, d_uv_i=None, d_uv_g=None):
"""
Returns a new instance of the
:class:`colour.corresponding.datasets.corresponding_chromaticities.\
BrenemanExperimentResult` class.
"""
return super(BrenemanExperimentResult, cls).__new__(
cls, name, np.array(uv_t), np.array(uv_m), np.array(s_uv),
np.array(d_uv_i), np.array(d_uv_g))
class PrimariesChromaticityCoordinates(
namedtuple(
'PrimariesChromaticityCoordinates',
('experiment', 'illuminants', 'Y', 'P_uvp', 'D_uvp', 'T_uvp'))):
"""
Chromaticity coordinates of primaries.
Parameters
----------
experiment : integer
Experiment.
illuminants : array_like, (2,)
Chromaticity coordinates :math:`uv_t^p` of test colour.
Y : numeric
White luminance :math:`Y` in :math:`cd/m^2`.
P_uvp : numeric
Chromaticity coordinates :math:`uv^p` of primary :math:`P`.
D_uvp : numeric
Chromaticity coordinates :math:`uv^p` of primary :math:`D`.
T_uvp : numeric
Chromaticity coordinates :math:`uv^p` of primary :math:`T`.
"""
def __new__(cls,
experiment,
illuminants,
Y,
P_uvp=None,
D_uvp=None,
T_uvp=None):
"""
Returns a new instance of the
:class:`colour.corresponding.datasets.corresponding_chromaticities.\
PrimariesChromaticityCoordinates` class.
"""
return super(PrimariesChromaticityCoordinates, cls).__new__(
cls, experiment, np.array(illuminants), np.array(Y),
np.array(P_uvp), np.array(D_uvp), np.array(T_uvp))
# yapf: disable
BRENEMAN_EXPERIMENT_1_RESULTS = (
BrenemanExperimentResult(
'Illuminant',
(0.259, 0.526), (0.200, 0.475)),
BrenemanExperimentResult(
'Gray',
(0.259, 0.524), (0.199, 0.487), (4, 4), (2, 3), (0, 0)),
BrenemanExperimentResult(
'Red',
(0.459, 0.522), (0.420, 0.509), (19, 4), (-10, -7), (-19, -3)),
BrenemanExperimentResult(
'Skin',
(0.307, 0.526), (0.249, 0.497), (7, 4), (-1, 1), (-6, -1)),
BrenemanExperimentResult(
'Orange',
(0.360, 0.544), (0.302, 0.548), (12, 1), (1, -2), (-7, -6)),
BrenemanExperimentResult(
'Brown',
(0.350, 0.541), (0.290, 0.537), (11, 4), (3, 0), (-5, -3)),
BrenemanExperimentResult(
'Yellow',
(0.318, 0.550), (0.257, 0.554), (8, 2), (0, 2), (-5, -5)),
BrenemanExperimentResult(
'Foliage',
(0.258, 0.542), (0.192, 0.529), (4, 6), (3, 2), (3, -6)),
BrenemanExperimentResult(
'Green',
(0.193, 0.542), (0.129, 0.521), (7, 5), (3, 2), (9, -7)),
BrenemanExperimentResult(
'Blue-green',
(0.180, 0.516), (0.133, 0.469), (4, 6), (-3, -2), (2, -5)),
BrenemanExperimentResult(
'Blue',
(0.186, 0.445), (0.158, 0.340), (13, 33), (2, 7), (1, 13)),
BrenemanExperimentResult(
'Sky',
(0.226, 0.491), (0.178, 0.426), (3, 14), (1, -3), (0, -1)),
BrenemanExperimentResult(
'Purple',
(0.278, 0.456), (0.231, 0.365), (4, 25), (0, 2), (-5, 7)))
# yapf: enable
"""
*Breneman (1987)* experiment 1 results.
BRENEMAN_EXPERIMENT_1_RESULTS : tuple
Notes
-----
- Illuminants : *A*, *D65*
- White Luminance : 1500 :math:`cd/m^2`
- Observers Count : 7
"""
# yapf: disable
BRENEMAN_EXPERIMENT_2_RESULTS = (
BrenemanExperimentResult(
'Illuminant',
(0.222, 0.521), (0.204, 0.479)),
BrenemanExperimentResult(
'Gray',
(0.227, 0.517), (0.207, 0.486), (2, 5), (-1, 0), (0, 0)),
BrenemanExperimentResult(
'Red',
(0.464, 0.520), (0.449, 0.511), (22, 3), (-8, -8), (-7, -2)),
BrenemanExperimentResult(
'Skin',
(0.286, 0.526), (0.263, 0.505), (7, 2), (0, -1), (0, -1)),
BrenemanExperimentResult(
'Orange',
(0.348, 0.546), (0.322, 0.545), (13, 3), (3, -1), (3, -2)),
BrenemanExperimentResult(
'Brown',
(0.340, 0.543), (0.316, 0.537), (11, 3), (1, 1), (0, 0)),
BrenemanExperimentResult(
'Yellow',
(0.288, 0.554), (0.265, 0.553), (5, 2), (-2, 2), (-1, -2)),
BrenemanExperimentResult(
'Foliage',
(0.244, 0.547), (0.221, 0.538), (4, 3), (-2, 1), (0, -3)),
BrenemanExperimentResult(
'Green',
(0.156, 0.548), (0.135, 0.532), (4, 3), (-1, 3), (3, -4)),
BrenemanExperimentResult(
'Blue-green',
(0.159, 0.511), (0.145, 0.472), (9, 7), (-1, 2), (2, 1)),
BrenemanExperimentResult(
'Blue',
(0.160, 0.406), (0.163, 0.331), (23, 31), (2, -3), (-1, 3)),
BrenemanExperimentResult(
'Sky',
(0.190, 0.481), (0.176, 0.431), (5, 24), (2, -2), (2, 0)),
BrenemanExperimentResult(
'Purple',
(0.258, 0.431), (0.244, 0.349), (4, 19), (-3, 13), (-4, 19)))
# yapf: enable
"""
*Breneman (1987)* experiment 2 results.
BRENEMAN_EXPERIMENT_2_RESULTS : tuple
Notes
-----
- Illuminants : *Projector*, *D55*
- White Luminance : 1500 :math:`cd/m^2`
- Observers Count : 7
"""
# yapf: disable
BRENEMAN_EXPERIMENT_3_RESULTS = (
BrenemanExperimentResult(
'Illuminant',
(0.223, 0.521), (0.206, 0.478)),
BrenemanExperimentResult(
'Gray',
(0.228, 0.517), (0.211, 0.494), (1, 3), (0, 2), (0, 0)),
BrenemanExperimentResult(
'Red',
(0.462, 0.519), (0.448, 0.505), (11, 4), (-3, 6), (-4, 6)),
BrenemanExperimentResult(
'Skin',
(0.285, 0.524), (0.267, 0.507), (6, 3), (-1, 1), (-2, 1)),
BrenemanExperimentResult(
'Orange',
(0.346, 0.546), (0.325, 0.541), (11, 3), (1, -2), (2, 3)),
BrenemanExperimentResult(
'Brown',
(0.338, 0.543), (0.321, 0.532), (9, 6), (-3, 2), (-3, 7)),
BrenemanExperimentResult(
'Yellow',
(0.287, 0.554), (0.267, 0.548), (4, 5), (1, -2), (0, 5)),
BrenemanExperimentResult(
'Foliage',
(0.244, 0.547), (0.226, 0.531), (3, 6), (-1, 3), (-2, 8)),
BrenemanExperimentResult(
'Green',
(0.157, 0.548), (0.141, 0.528), (9, 6), (2, 2), (0, 6)),
BrenemanExperimentResult(
'Blue-green',
(0.160, 0.510), (0.151, 0.486), (8, 5), (-2, -1), (-2, -5)),
BrenemanExperimentResult(
'Blue',
(0.162, 0.407), (0.158, 0.375), (6, 7), (1, -6), (4, -23)),
BrenemanExperimentResult(
'Sky',
(0.191, 0.482), (0.179, 0.452), (4, 5), (0, 1), (1, -7)),
BrenemanExperimentResult(
'Purple',
(0.258, 0.432), (0.238, 0.396), (4, 8), (5, 3), (4, -11)))
# yapf: enable
"""
*Breneman (1987)* experiment 3 results.
BRENEMAN_EXPERIMENT_3_RESULTS : tuple
Notes
-----
- Illuminants : *Projector*, *D55*
- White Luminance : 75 :math:`cd/m^2`
- Observers Count : 7
"""
# yapf: disable
BRENEMAN_EXPERIMENT_4_RESULTS = (
BrenemanExperimentResult(
'Illuminant',
(0.258, 0.523), (0.199, 0.467)),
BrenemanExperimentResult(
'Gray',
(0.257, 0.524), (0.205, 0.495), (2, 2), (0, 4), (0, 0)),
BrenemanExperimentResult(
'Red',
(0.460, 0.521), (0.416, 0.501), (11, 6), (-6, 4), (-6, 9)),
BrenemanExperimentResult(
'Skin',
(0.308, 0.526), (0.253, 0.503), (7, 3), (-1, 1), (-1, 0)),
BrenemanExperimentResult(
'Orange',
(0.360, 0.544), (0.303, 0.541), (14, 5), (1, -4), (1, 2)),
BrenemanExperimentResult(
'Brown',
(0.350, 0.541), (0.296, 0.527), (11, 7), (-2, 4), (-3, 9)),
BrenemanExperimentResult(
'Yellow',
(0.317, 0.550), (0.260, 0.547), (9, 5), (1, -3), (0, 3)),
BrenemanExperimentResult(
'Foliage',
(0.258, 0.543), (0.203, 0.520), (4, 6), (0, 8), (0, 9)),
BrenemanExperimentResult(
'Green',
(0.193, 0.543), (0.142, 0.516), (6, 9), (3, 8), (2, 6)),
BrenemanExperimentResult(
'Blue-green',
(0.180, 0.516), (0.140, 0.484), (9, 5), (-2, -1), (-1, -9)),
BrenemanExperimentResult(
'Blue',
(0.185, 0.445), (0.151, 0.394), (8, 10), (2, -8), (8, -24)),
BrenemanExperimentResult(
'Sky',
(0.225, 0.490), (0.180, 0.448), (4, 8), (1, -1), (3, -11)),
BrenemanExperimentResult(
'Purple',
(0.278, 0.455), (0.229, 0.388), (6, 14), (1, 12), (3, 0)))
# yapf: enable
"""
*Breneman (1987)* experiment 4 results.
BRENEMAN_EXPERIMENT_4_RESULTS : tuple
Notes
-----
- Illuminants : *A*, *D65*
- White Luminance : 75 :math:`cd/m^2`
- Observers Count : 7
"""
# yapf: disable
BRENEMAN_EXPERIMENT_5_RESULTS = (
BrenemanExperimentResult(
'Gray',
(0.028, 0.480), (0.212, 0.491), (2, 2)),
BrenemanExperimentResult(
'Red',
(0.449, 0.512), (0.408, 0.514), (11, 5)),
BrenemanExperimentResult(
'Skin',
(0.269, 0.505), (0.262, 0.511), (4, 2)),
BrenemanExperimentResult(
'Orange',
(0.331, 0.548), (0.303, 0.545), (4, 3)),
BrenemanExperimentResult(
'Brown',
(0.322, 0.541), (0.303, 0.538), (4, 4)),
BrenemanExperimentResult(
'Yellow',
(0.268, 0.555), (0.264, 0.550), (3, 2)),
BrenemanExperimentResult(
'Foliage',
(0.224, 0.538), (0.227, 0.535), (3, 3)),
BrenemanExperimentResult(
'Green',
(0.134, 0.531), (0.159, 0.530), (9, 3)),
BrenemanExperimentResult(
'Blue-green',
(0.145, 0.474), (0.165, 0.490), (8, 3)),
BrenemanExperimentResult(
'Blue',
(0.163, 0.329), (0.173, 0.378), (7, 12)),
BrenemanExperimentResult(
'Sky',
(0.179, 0.438), (0.189, 0.462), (5, 4)),
BrenemanExperimentResult(
'Purple',
(0.245, 0.364), (0.239, 0.401), (4, 16)))
# yapf: enable
"""
*Breneman (1987)* experiment 5 results.
BRENEMAN_EXPERIMENT_5_RESULTS : tuple
Notes
-----
- Effective White Levels : 130 and 2120 :math:`cd/m^2`
- Observers Count : 7
"""
# yapf: disable
BRENEMAN_EXPERIMENT_6_RESULTS = (
BrenemanExperimentResult(
'Illuminant',
(0.257, 0.525), (0.201, 0.482)),
BrenemanExperimentResult(
'Gray',
(0.267, 0.521), (0.207, 0.485), (5, 3), (-1, 0), (0, 0)),
BrenemanExperimentResult(
'Red',
(0.457, 0.521), (0.398, 0.516), (9, 4), (-2, -5), (1, -9)),
BrenemanExperimentResult(
'Skin',
(0.316, 0.526), (0.253, 0.503), (5, 3), (-3, -2), (-1, -3)),
BrenemanExperimentResult(
'Orange',
(0.358, 0.545), (0.287, 0.550), (7, 3), (3, 0), (7, -6)),
BrenemanExperimentResult(
'Brown',
(0.350, 0.541), (0.282, 0.540), (6, 3), (-1, 0), (2, -5)),
BrenemanExperimentResult(
'Yellow',
(0.318, 0.551), (0.249, 0.556), (7, 2), (-1, 1), (2, -5)),
BrenemanExperimentResult(
'Foliage',
(0.256, 0.547), (0.188, 0.537), (5, 4), (3, 1), (4, -2)),
BrenemanExperimentResult(
'Green',
(0.193, 0.542), (0.133, 0.520), (13, 3), (5, -2), (5, -4)),
BrenemanExperimentResult(
'Blue-green',
(0.180, 0.516), (0.137, 0.466), (12, 10), (0, 0), (-2, 2)),
BrenemanExperimentResult(
'Blue',
(0.186, 0.445), (0.156, 0.353), (12, 45), (6, 1), (2, 6)),
BrenemanExperimentResult(
'Sky',
(0.225, 0.492), (0.178, 0.428), (6, 14), (1, -1), (-1, 3)),
BrenemanExperimentResult(
'Purple',
(0.276, 0.456), (0.227, 0.369), (6, 27), (-2, 4), (-3, 9)))
# yapf: enable
"""
*Breneman (1987)* experiment 6 results.
BRENEMAN_EXPERIMENT_6_RESULTS : tuple
Notes
-----
- Illuminants : *A*, *D55*
- White Luminance : 11100 :math:`cd/m^2`
- Observers Count : 8
"""
# yapf: disable
BRENEMAN_EXPERIMENT_7_RESULTS = (
BrenemanExperimentResult(
'Gray',
(0.208, 0.481), (0.211, 0.486), (2, 3)),
BrenemanExperimentResult(
'Red',
(0.448, 0.512), (0.409, 0.516), (9, 2)),
BrenemanExperimentResult(
'Skin',
(0.269, 0.505), (0.256, 0.506), (4, 3)),
BrenemanExperimentResult(
'Orange',
(0.331, 0.549), (0.305, 0.547), (5, 4)),
BrenemanExperimentResult(
'Brown',
(0.322, 0.541), (0.301, 0.539), (5, 2)),
BrenemanExperimentResult(
'Yellow',
(0.268, 0.555), (0.257, 0.552), (3, 4)),
BrenemanExperimentResult(
'Foliage',
(0.225, 0.538), (0.222, 0.536), (3, 2)),
BrenemanExperimentResult(
'Green',
(0.135, 0.531), (0.153, 0.529), (8, 2)),
BrenemanExperimentResult(
'Blue-green',
(0.145, 0.475), (0.160, 0.484), (3, 5)),
BrenemanExperimentResult(
'Blue',
(0.163, 0.331), (0.171, 0.379), (4, 11)),
BrenemanExperimentResult(
'Sky',
(0.179, 0.438), (0.187, 0.452), (4, 7)),
BrenemanExperimentResult(
'Purple',
(0.245, 0.365), (0.240, 0.398), (4, 10)))
# yapf: enable
"""
*Breneman (1987)* experiment 7 results.
BRENEMAN_EXPERIMENT_7_RESULTS : tuple
Notes
-----
- Effective White Levels : 850 and 11100 :math:`cd/m^2`
- Observers Count : 8
"""
# yapf: disable
BRENEMAN_EXPERIMENT_8_RESULTS = (
BrenemanExperimentResult(
'Illuminant',
(0.258, 0.524), (0.195, 0.469)),
BrenemanExperimentResult(
'Gray',
(0.257, 0.525), (0.200, 0.494), (2, 3), (1, 2), (0, 0)),
BrenemanExperimentResult(
'Red',
(0.458, 0.522), (0.410, 0.508), (12, 4), (-3, 5), (-7, 2)),
BrenemanExperimentResult(
'Skin',
(0.308, 0.526), (0.249, 0.502), (6, 2), (-1, 1), (-3, -1)),
BrenemanExperimentResult(
'Orange',
(0.359, 0.545), (0.299, 0.545), (12, 4), (0, -2), (-3, 0)),
BrenemanExperimentResult(
'Brown',
(0.349, 0.540), (0.289, 0.532), (10, 4), (0, 1), (-2, 2)),
BrenemanExperimentResult(
'Yellow',
(0.317, 0.550), (0.256, 0.549), (9, 5), (0, -3), (-3, 1)),
BrenemanExperimentResult(
'Foliage',
(0.260, 0.545), (0.198, 0.529), (5, 5), (3, 1), (0, 3)),
BrenemanExperimentResult(
'Green',
(0.193, 0.543), (0.137, 0.520), (9, 5), (3, 0), (2, 1)),
BrenemanExperimentResult(
'Blue-green',
(0.182, 0.516), (0.139, 0.477), (9, 4), (-3, 0), (-2, -4)),
BrenemanExperimentResult(
'Blue',
(0.184, 0.444), (0.150, 0.387), (5, 11), (3, -10), (6, -22)),
BrenemanExperimentResult(
'Sky',
(0.224, 0.489), (0.177, 0.439), (5, 6), (1, 1), (1, -7)),
BrenemanExperimentResult(
'Purple',
(0.277, 0.454), (0.226, 0.389), (4, 10), (1, 4), (1, -8)))
# yapf: enable
"""
*Breneman (1987)* experiment 8 results.
BRENEMAN_EXPERIMENT_8_RESULTS : tuple
Notes
-----
- Illuminants : *A*, *D65*
- White Luminance : 350 :math:`cd/m^2`
- Observers Count : 8
"""
# yapf: disable
BRENEMAN_EXPERIMENT_9_RESULTS = (
BrenemanExperimentResult(
'Illuminant',
(0.254, 0.525), (0.195, 0.465)),
BrenemanExperimentResult(
'Gray',
(0.256, 0.524), (0.207, 0.496), (4, 6), (3, 2), (0, 0)),
BrenemanExperimentResult(
'Red',
(0.459, 0.521), (0.415, 0.489), (20, 14), (2, 12), (-2, 21)),
BrenemanExperimentResult(
'Skin',
(0.307, 0.525), (0.261, 0.500), (7, 7), (0, 1), (-5, 2)),
BrenemanExperimentResult(
'Orange',
(0.359, 0.545), (0.313, 0.532), (7, 5), (-2, -3), (-6, 13)),
BrenemanExperimentResult(
'Brown',
(0.349, 0.540), (0.302, 0.510), (11, 15), (0, 12), (-5, 24)),
BrenemanExperimentResult(
'Yellow',
(0.317, 0.550), (0.268, 0.538), (7, 10), (1, -4), (-4, 12)),
BrenemanExperimentResult(
'Foliage',
(0.259, 0.544), (0.212, 0.510), (10, 11), (0, 14), (-4, 22)),
BrenemanExperimentResult(
'Green',
(0.193, 0.542), (0.150, 0.506), (6, 10), (-1, 13), (-2, 15)),
BrenemanExperimentResult(
'Blue-green',
(0.181, 0.517), (0.144, 0.487), (9, 6), (-3, 0), (-1, -9)),
BrenemanExperimentResult(
'Blue',
(0.184, 0.444), (0.155, 0.407), (4, 11), (-2, -6), (6, -36)),
BrenemanExperimentResult(
'Sky',
(0.225, 0.490), (0.183, 0.458), (5, 8), (1, -3), (2, -19)),
BrenemanExperimentResult(
'Purple',
(0.276, 0.454), (0.233, 0.404), (7, 12), (2, 9), (0, -16)),
BrenemanExperimentResult(
'(Gray)h',
(0.256, 0.525), (0.208, 0.498)),
BrenemanExperimentResult(
'(Red)h',
(0.456, 0.521), (0.416, 0.501), (15, 7), None, (-6, -9)),
BrenemanExperimentResult(
'(Brown)h',
(0.349, 0.539), (0.306, 0.526), (11, 8), None, (-8, 7)),
BrenemanExperimentResult(
'(Foliage)h',
(0.260, 0.545), (0.213, 0.528), (7, 9), None, (-4, 5)),
BrenemanExperimentResult(
'(Green)h',
(0.193, 0.543), (0.149, 0.525), (10, 8), None, (-1, -1)),
BrenemanExperimentResult(
'(Blue)h',
(0.184, 0.444), (0.156, 0.419), (7, 8), None, (4, -45)),
BrenemanExperimentResult(
'(Purple)h',
(0.277, 0.456), (0.236, 0.422), (6, 11), None, (-2, -29)))
# yapf: enable
"""
*Breneman (1987)* experiment 9 results.
BRENEMAN_EXPERIMENT_9_RESULTS : tuple
Notes
-----
- Illuminants : *A*, *D65*
- White Luminance : 15 :math:`cd/m^2`
- Observers Count : 8
- The colors indicated by (.)h are the darker colors presented at the higher
luminescence level of the lighter colors.
"""
# yapf: disable
BRENEMAN_EXPERIMENT_10_RESULTS = (
BrenemanExperimentResult(
'Gray',
(0.208, 0.482), (0.213, 0.494), (3, 3)),
BrenemanExperimentResult(
'Red',
(0.447, 0.512), (0.411, 0.506), (15, 7)),
BrenemanExperimentResult(
'Skin',
(0.269, 0.505), (0.269, 0.511), (4, 3)),
BrenemanExperimentResult(
'Orange',
(0.331, 0.549), (0.315, 0.536), (7, 8)),
BrenemanExperimentResult(
'Brown',
(0.323, 0.542), (0.310, 0.526), (6, 8)),
BrenemanExperimentResult(
'Yellow',
(0.268, 0.556), (0.268, 0.541), (3, 6)),
BrenemanExperimentResult(
'Foliage',
(0.226, 0.538), (0.230, 0.525), (4, 8)),
BrenemanExperimentResult(
'Green',
(0.135, 0.531), (0.158, 0.524), (6, 3)),
BrenemanExperimentResult(
'Blue-green',
(0.145, 0.476), (0.161, 0.491), (4, 4)),
BrenemanExperimentResult(
'Blue',
(0.163, 0.330), (0.171, 0.377), (6, 19)),
BrenemanExperimentResult(
'Sky',
(0.179, 0.439), (0.187, 0.465), (5, 5)),
BrenemanExperimentResult(
'Purple',
(0.245, 0.366), (0.240, 0.402), (3, 12)))
# yapf: enable
"""
*Breneman (1987)* experiment 10 results.
BRENEMAN_EXPERIMENT_10_RESULTS : tuple
Notes
-----
- Effective White Levels : 15 and 270 :math:`cd/m^2`
- Observers Count : 7
"""
# yapf: disable
BRENEMAN_EXPERIMENT_11_RESULTS = (
BrenemanExperimentResult(
'Illuminant',
(0.208, 0.482), (0.174, 0.520)),
BrenemanExperimentResult(
'Gray',
(0.209, 0.483), (0.176, 0.513), (3, 4), (2, 2), (0, 0)),
BrenemanExperimentResult(
'Red',
(0.450, 0.512), (0.419, 0.524), (10, 2), (3, 2), (8, -1)),
BrenemanExperimentResult(
'Skin',
(0.268, 0.506), (0.240, 0.528), (6, 2), (-4, 0), (-3, 0)),
BrenemanExperimentResult(
'Orange',
(0.331, 0.547), (0.293, 0.553), (6, 2), (3, -1), (5, 1)),
BrenemanExperimentResult(
'Brown',
(0.323, 0.542), (0.290, 0.552), (5, 2), (-1, -3), (0, -1)),
BrenemanExperimentResult(
'Yellow',
(0.266, 0.549), (0.236, 0.557), (4, 2), (-3, -2), (-4, 2)),
BrenemanExperimentResult(
'Foliage',
(0.227, 0.538), (0.194, 0.552), (4, 2), (2, -3), (-1, 1)),
BrenemanExperimentResult(
'Green',
(0.146, 0.534), (0.118, 0.551), (8, 3), (4, -2), (-6, 3)),
BrenemanExperimentResult(
'Blue-green',
(0.160, 0.475), (0.130, 0.513), (9, 4), (1, -1), (-4, -3)),
BrenemanExperimentResult(
'Blue',
(0.177, 0.340), (0.133, 0.427), (6, 14), (4, -17), (11, -29)),
BrenemanExperimentResult(
'Sky',
(0.179, 0.438), (0.146, 0.482), (6, 10), (1, 4), (0, -1)),
BrenemanExperimentResult(
'Purple',
(0.245, 0.366), (0.216, 0.419), (4, 13), (-3, 8), (4, -2)))
# yapf: enable
"""
*Breneman (1987)* experiment 1 results.
BRENEMAN_EXPERIMENT_11_RESULTS : tuple
Notes
-----
- Illuminants : *green*, *D65*
- White Luminance : 1560 :math:`cd/m^2`
- Observers Count : 7
"""
# yapf: disable
BRENEMAN_EXPERIMENT_12_RESULTS = (
BrenemanExperimentResult(
'Illuminant',
(0.205, 0.482), (0.174, 0.519)),
BrenemanExperimentResult(
'Gray',
(0.208, 0.482), (0.181, 0.507), (4, 3), (0, 1), (0, 0)),
BrenemanExperimentResult(
'Red',
(0.451, 0.512), (0.422, 0.526), (20, 3), (0, -5), (10, -5)),
BrenemanExperimentResult(
'Skin',
(0.268, 0.506), (0.244, 0.525), (5, 2), (-6, 0), (-2, -1)),
BrenemanExperimentResult(
'Orange',
(0.331, 0.548), (0.292, 0.553), (10, 2), (5, 2), (11, 1)),
BrenemanExperimentResult(
'Brown',
(0.324, 0.542), (0.286, 0.554), (8, 1), (5, -3), (10, -4)),
BrenemanExperimentResult(
'Yellow',
(0.266, 0.548), (0.238, 0.558), (6, 2), (-3, -1), (-1, -2)),
BrenemanExperimentResult(
'Foliage',
(0.227, 0.538), (0.196, 0.555), (6, 3), (3, -4), (2, -5)),
BrenemanExperimentResult(
'Green',
(0.145, 0.534), (0.124, 0.551), (8, 6), (1, -1), (-8, -1)),
BrenemanExperimentResult(
'Blue-green',
(0.160, 0.474), (0.135, 0.505), (5, 2), (1, -1), (-4, -3)),
BrenemanExperimentResult(
'Blue',
(0.178, 0.339), (0.149, 0.392), (4, 20), (-1, -5), (3, -7)),
BrenemanExperimentResult(
'Sky',
(0.179, 0.440), (0.150, 0.473), (4, 8), (3, 2), (2, 0)),
BrenemanExperimentResult(
'Purple',
(0.246, 0.366), (0.222, 0.404), (5, 15), (-4, 2), (4, 2)))
# yapf: enable
"""
*Breneman (1987)* experiment 12 results.
BRENEMAN_EXPERIMENT_12_RESULTS : tuple
Notes
-----
- Illuminants : *D55*, *green*
- White Luminance : 75 :math:`cd/m^2`
- Observers Count : 7
"""
# yapf: disable
BRENEMAN_EXPERIMENTS_PRIMARIES_CHROMATICITIES = DocstringDict({
1: PrimariesChromaticityCoordinates(
1, ('A', 'D65'), 1500,
(0.671, 0.519), (-0.586, 0.627), (0.253, 0.016)),
2: PrimariesChromaticityCoordinates(
2, ('Projector', 'D55'), 1500,
(0.675, 0.523), (-0.466, 0.617), (0.255, 0.018)),
3: PrimariesChromaticityCoordinates(
3, ('Projector', 'D55'), 75,
(0.664, 0.510), (-0.256, 0.729), (0.244, 0.003)),
4: PrimariesChromaticityCoordinates(
4, ('A', 'D65'), 75,
(0.674, 0.524), (-0.172, 0.628), (0.218, -0.026)),
6: PrimariesChromaticityCoordinates(
6, ('A', 'D55'), 11100,
(0.659, 0.506), (-0.141, 0.615), (0.249, 0.009)),
8: PrimariesChromaticityCoordinates(
8, ('A', 'D65'), 350,
(0.659, 0.505), (-0.246, 0.672), (0.235, -0.006)),
9: PrimariesChromaticityCoordinates(
9, ('A', 'D65'), 15,
(0.693, 0.546), (-0.446, 0.773), (0.221, -0.023)),
11: PrimariesChromaticityCoordinates(
11, ('D55', 'green'), 1560,
(0.680, 0.529), (0.018, 0.576), (0.307, 0.080)),
12: PrimariesChromaticityCoordinates(
12, ('D55', 'green'), 75,
(0.661, 0.505), (0.039, 0.598), (0.345, 0.127))})
# yapf: enable
BRENEMAN_EXPERIMENTS_PRIMARIES_CHROMATICITIES.__doc__ = """
*Breneman (1987)* experiments primaries chromaticities.
References
----------
:cite:`Breneman1987b`
BRENEMAN_EXPERIMENTS_PRIMARIES_CHROMATICITIES : dict
"""
BRENEMAN_EXPERIMENTS = DocstringDict({
1: BRENEMAN_EXPERIMENT_1_RESULTS,
2: BRENEMAN_EXPERIMENT_2_RESULTS,
3: BRENEMAN_EXPERIMENT_3_RESULTS,
4: BRENEMAN_EXPERIMENT_4_RESULTS,
5: BRENEMAN_EXPERIMENT_5_RESULTS,
6: BRENEMAN_EXPERIMENT_6_RESULTS,
7: BRENEMAN_EXPERIMENT_7_RESULTS,
8: BRENEMAN_EXPERIMENT_8_RESULTS,
9: BRENEMAN_EXPERIMENT_9_RESULTS,
10: BRENEMAN_EXPERIMENT_10_RESULTS,
11: BRENEMAN_EXPERIMENT_11_RESULTS,
12: BRENEMAN_EXPERIMENT_12_RESULTS
})
BRENEMAN_EXPERIMENTS.__doc__ = """
*Breneman (1987)* experiments.
References
----------
:cite:`Breneman1987b`
BRENEMAN_EXPERIMENTS : dict
"""
| 32.035377 | 79 | 0.536406 |
from __future__ import division, unicode_literals
import numpy as np
from collections import namedtuple
from colour.utilities.documentation import DocstringDict
__author__ = 'Colour Developers'
__copyright__ = 'Copyright (C) 2013-2019 - Colour Developers'
__license__ = 'New BSD License - https://opensource.org/licenses/BSD-3-Clause'
__maintainer__ = 'Colour Developers'
__email__ = 'colour-developers@colour-science.org'
__status__ = 'Production'
__all__ = [
'BrenemanExperimentResult', 'PrimariesChromaticityCoordinates',
'BRENEMAN_EXPERIMENT_1_RESULTS', 'BRENEMAN_EXPERIMENT_2_RESULTS',
'BRENEMAN_EXPERIMENT_3_RESULTS', 'BRENEMAN_EXPERIMENT_4_RESULTS',
'BRENEMAN_EXPERIMENT_5_RESULTS', 'BRENEMAN_EXPERIMENT_6_RESULTS',
'BRENEMAN_EXPERIMENT_7_RESULTS', 'BRENEMAN_EXPERIMENT_10_RESULTS',
'BRENEMAN_EXPERIMENT_8_RESULTS', 'BRENEMAN_EXPERIMENT_9_RESULTS',
'BRENEMAN_EXPERIMENT_11_RESULTS', 'BRENEMAN_EXPERIMENT_12_RESULTS',
'BRENEMAN_EXPERIMENTS_PRIMARIES_CHROMATICITIES', 'BRENEMAN_EXPERIMENTS'
]
class BrenemanExperimentResult(
namedtuple('BrenemanExperimentResult',
('name', 'uv_t', 'uv_m', 's_uv', 'd_uv_i', 'd_uv_g'))):
def __new__(cls, name, uv_t, uv_m, s_uv=None, d_uv_i=None, d_uv_g=None):
return super(BrenemanExperimentResult, cls).__new__(
cls, name, np.array(uv_t), np.array(uv_m), np.array(s_uv),
np.array(d_uv_i), np.array(d_uv_g))
class PrimariesChromaticityCoordinates(
namedtuple(
'PrimariesChromaticityCoordinates',
('experiment', 'illuminants', 'Y', 'P_uvp', 'D_uvp', 'T_uvp'))):
def __new__(cls,
experiment,
illuminants,
Y,
P_uvp=None,
D_uvp=None,
T_uvp=None):
return super(PrimariesChromaticityCoordinates, cls).__new__(
cls, experiment, np.array(illuminants), np.array(Y),
np.array(P_uvp), np.array(D_uvp), np.array(T_uvp))
BRENEMAN_EXPERIMENT_1_RESULTS = (
BrenemanExperimentResult(
'Illuminant',
(0.259, 0.526), (0.200, 0.475)),
BrenemanExperimentResult(
'Gray',
(0.259, 0.524), (0.199, 0.487), (4, 4), (2, 3), (0, 0)),
BrenemanExperimentResult(
'Red',
(0.459, 0.522), (0.420, 0.509), (19, 4), (-10, -7), (-19, -3)),
BrenemanExperimentResult(
'Skin',
(0.307, 0.526), (0.249, 0.497), (7, 4), (-1, 1), (-6, -1)),
BrenemanExperimentResult(
'Orange',
(0.360, 0.544), (0.302, 0.548), (12, 1), (1, -2), (-7, -6)),
BrenemanExperimentResult(
'Brown',
(0.350, 0.541), (0.290, 0.537), (11, 4), (3, 0), (-5, -3)),
BrenemanExperimentResult(
'Yellow',
(0.318, 0.550), (0.257, 0.554), (8, 2), (0, 2), (-5, -5)),
BrenemanExperimentResult(
'Foliage',
(0.258, 0.542), (0.192, 0.529), (4, 6), (3, 2), (3, -6)),
BrenemanExperimentResult(
'Green',
(0.193, 0.542), (0.129, 0.521), (7, 5), (3, 2), (9, -7)),
BrenemanExperimentResult(
'Blue-green',
(0.180, 0.516), (0.133, 0.469), (4, 6), (-3, -2), (2, -5)),
BrenemanExperimentResult(
'Blue',
(0.186, 0.445), (0.158, 0.340), (13, 33), (2, 7), (1, 13)),
BrenemanExperimentResult(
'Sky',
(0.226, 0.491), (0.178, 0.426), (3, 14), (1, -3), (0, -1)),
BrenemanExperimentResult(
'Purple',
(0.278, 0.456), (0.231, 0.365), (4, 25), (0, 2), (-5, 7)))
BRENEMAN_EXPERIMENT_2_RESULTS = (
BrenemanExperimentResult(
'Illuminant',
(0.222, 0.521), (0.204, 0.479)),
BrenemanExperimentResult(
'Gray',
(0.227, 0.517), (0.207, 0.486), (2, 5), (-1, 0), (0, 0)),
BrenemanExperimentResult(
'Red',
(0.464, 0.520), (0.449, 0.511), (22, 3), (-8, -8), (-7, -2)),
BrenemanExperimentResult(
'Skin',
(0.286, 0.526), (0.263, 0.505), (7, 2), (0, -1), (0, -1)),
BrenemanExperimentResult(
'Orange',
(0.348, 0.546), (0.322, 0.545), (13, 3), (3, -1), (3, -2)),
BrenemanExperimentResult(
'Brown',
(0.340, 0.543), (0.316, 0.537), (11, 3), (1, 1), (0, 0)),
BrenemanExperimentResult(
'Yellow',
(0.288, 0.554), (0.265, 0.553), (5, 2), (-2, 2), (-1, -2)),
BrenemanExperimentResult(
'Foliage',
(0.244, 0.547), (0.221, 0.538), (4, 3), (-2, 1), (0, -3)),
BrenemanExperimentResult(
'Green',
(0.156, 0.548), (0.135, 0.532), (4, 3), (-1, 3), (3, -4)),
BrenemanExperimentResult(
'Blue-green',
(0.159, 0.511), (0.145, 0.472), (9, 7), (-1, 2), (2, 1)),
BrenemanExperimentResult(
'Blue',
(0.160, 0.406), (0.163, 0.331), (23, 31), (2, -3), (-1, 3)),
BrenemanExperimentResult(
'Sky',
(0.190, 0.481), (0.176, 0.431), (5, 24), (2, -2), (2, 0)),
BrenemanExperimentResult(
'Purple',
(0.258, 0.431), (0.244, 0.349), (4, 19), (-3, 13), (-4, 19)))
BRENEMAN_EXPERIMENT_3_RESULTS = (
BrenemanExperimentResult(
'Illuminant',
(0.223, 0.521), (0.206, 0.478)),
BrenemanExperimentResult(
'Gray',
(0.228, 0.517), (0.211, 0.494), (1, 3), (0, 2), (0, 0)),
BrenemanExperimentResult(
'Red',
(0.462, 0.519), (0.448, 0.505), (11, 4), (-3, 6), (-4, 6)),
BrenemanExperimentResult(
'Skin',
(0.285, 0.524), (0.267, 0.507), (6, 3), (-1, 1), (-2, 1)),
BrenemanExperimentResult(
'Orange',
(0.346, 0.546), (0.325, 0.541), (11, 3), (1, -2), (2, 3)),
BrenemanExperimentResult(
'Brown',
(0.338, 0.543), (0.321, 0.532), (9, 6), (-3, 2), (-3, 7)),
BrenemanExperimentResult(
'Yellow',
(0.287, 0.554), (0.267, 0.548), (4, 5), (1, -2), (0, 5)),
BrenemanExperimentResult(
'Foliage',
(0.244, 0.547), (0.226, 0.531), (3, 6), (-1, 3), (-2, 8)),
BrenemanExperimentResult(
'Green',
(0.157, 0.548), (0.141, 0.528), (9, 6), (2, 2), (0, 6)),
BrenemanExperimentResult(
'Blue-green',
(0.160, 0.510), (0.151, 0.486), (8, 5), (-2, -1), (-2, -5)),
BrenemanExperimentResult(
'Blue',
(0.162, 0.407), (0.158, 0.375), (6, 7), (1, -6), (4, -23)),
BrenemanExperimentResult(
'Sky',
(0.191, 0.482), (0.179, 0.452), (4, 5), (0, 1), (1, -7)),
BrenemanExperimentResult(
'Purple',
(0.258, 0.432), (0.238, 0.396), (4, 8), (5, 3), (4, -11)))
BRENEMAN_EXPERIMENT_4_RESULTS = (
BrenemanExperimentResult(
'Illuminant',
(0.258, 0.523), (0.199, 0.467)),
BrenemanExperimentResult(
'Gray',
(0.257, 0.524), (0.205, 0.495), (2, 2), (0, 4), (0, 0)),
BrenemanExperimentResult(
'Red',
(0.460, 0.521), (0.416, 0.501), (11, 6), (-6, 4), (-6, 9)),
BrenemanExperimentResult(
'Skin',
(0.308, 0.526), (0.253, 0.503), (7, 3), (-1, 1), (-1, 0)),
BrenemanExperimentResult(
'Orange',
(0.360, 0.544), (0.303, 0.541), (14, 5), (1, -4), (1, 2)),
BrenemanExperimentResult(
'Brown',
(0.350, 0.541), (0.296, 0.527), (11, 7), (-2, 4), (-3, 9)),
BrenemanExperimentResult(
'Yellow',
(0.317, 0.550), (0.260, 0.547), (9, 5), (1, -3), (0, 3)),
BrenemanExperimentResult(
'Foliage',
(0.258, 0.543), (0.203, 0.520), (4, 6), (0, 8), (0, 9)),
BrenemanExperimentResult(
'Green',
(0.193, 0.543), (0.142, 0.516), (6, 9), (3, 8), (2, 6)),
BrenemanExperimentResult(
'Blue-green',
(0.180, 0.516), (0.140, 0.484), (9, 5), (-2, -1), (-1, -9)),
BrenemanExperimentResult(
'Blue',
(0.185, 0.445), (0.151, 0.394), (8, 10), (2, -8), (8, -24)),
BrenemanExperimentResult(
'Sky',
(0.225, 0.490), (0.180, 0.448), (4, 8), (1, -1), (3, -11)),
BrenemanExperimentResult(
'Purple',
(0.278, 0.455), (0.229, 0.388), (6, 14), (1, 12), (3, 0)))
BRENEMAN_EXPERIMENT_5_RESULTS = (
BrenemanExperimentResult(
'Gray',
(0.028, 0.480), (0.212, 0.491), (2, 2)),
BrenemanExperimentResult(
'Red',
(0.449, 0.512), (0.408, 0.514), (11, 5)),
BrenemanExperimentResult(
'Skin',
(0.269, 0.505), (0.262, 0.511), (4, 2)),
BrenemanExperimentResult(
'Orange',
(0.331, 0.548), (0.303, 0.545), (4, 3)),
BrenemanExperimentResult(
'Brown',
(0.322, 0.541), (0.303, 0.538), (4, 4)),
BrenemanExperimentResult(
'Yellow',
(0.268, 0.555), (0.264, 0.550), (3, 2)),
BrenemanExperimentResult(
'Foliage',
(0.224, 0.538), (0.227, 0.535), (3, 3)),
BrenemanExperimentResult(
'Green',
(0.134, 0.531), (0.159, 0.530), (9, 3)),
BrenemanExperimentResult(
'Blue-green',
(0.145, 0.474), (0.165, 0.490), (8, 3)),
BrenemanExperimentResult(
'Blue',
(0.163, 0.329), (0.173, 0.378), (7, 12)),
BrenemanExperimentResult(
'Sky',
(0.179, 0.438), (0.189, 0.462), (5, 4)),
BrenemanExperimentResult(
'Purple',
(0.245, 0.364), (0.239, 0.401), (4, 16)))
BRENEMAN_EXPERIMENT_6_RESULTS = (
BrenemanExperimentResult(
'Illuminant',
(0.257, 0.525), (0.201, 0.482)),
BrenemanExperimentResult(
'Gray',
(0.267, 0.521), (0.207, 0.485), (5, 3), (-1, 0), (0, 0)),
BrenemanExperimentResult(
'Red',
(0.457, 0.521), (0.398, 0.516), (9, 4), (-2, -5), (1, -9)),
BrenemanExperimentResult(
'Skin',
(0.316, 0.526), (0.253, 0.503), (5, 3), (-3, -2), (-1, -3)),
BrenemanExperimentResult(
'Orange',
(0.358, 0.545), (0.287, 0.550), (7, 3), (3, 0), (7, -6)),
BrenemanExperimentResult(
'Brown',
(0.350, 0.541), (0.282, 0.540), (6, 3), (-1, 0), (2, -5)),
BrenemanExperimentResult(
'Yellow',
(0.318, 0.551), (0.249, 0.556), (7, 2), (-1, 1), (2, -5)),
BrenemanExperimentResult(
'Foliage',
(0.256, 0.547), (0.188, 0.537), (5, 4), (3, 1), (4, -2)),
BrenemanExperimentResult(
'Green',
(0.193, 0.542), (0.133, 0.520), (13, 3), (5, -2), (5, -4)),
BrenemanExperimentResult(
'Blue-green',
(0.180, 0.516), (0.137, 0.466), (12, 10), (0, 0), (-2, 2)),
BrenemanExperimentResult(
'Blue',
(0.186, 0.445), (0.156, 0.353), (12, 45), (6, 1), (2, 6)),
BrenemanExperimentResult(
'Sky',
(0.225, 0.492), (0.178, 0.428), (6, 14), (1, -1), (-1, 3)),
BrenemanExperimentResult(
'Purple',
(0.276, 0.456), (0.227, 0.369), (6, 27), (-2, 4), (-3, 9)))
BRENEMAN_EXPERIMENT_7_RESULTS = (
BrenemanExperimentResult(
'Gray',
(0.208, 0.481), (0.211, 0.486), (2, 3)),
BrenemanExperimentResult(
'Red',
(0.448, 0.512), (0.409, 0.516), (9, 2)),
BrenemanExperimentResult(
'Skin',
(0.269, 0.505), (0.256, 0.506), (4, 3)),
BrenemanExperimentResult(
'Orange',
(0.331, 0.549), (0.305, 0.547), (5, 4)),
BrenemanExperimentResult(
'Brown',
(0.322, 0.541), (0.301, 0.539), (5, 2)),
BrenemanExperimentResult(
'Yellow',
(0.268, 0.555), (0.257, 0.552), (3, 4)),
BrenemanExperimentResult(
'Foliage',
(0.225, 0.538), (0.222, 0.536), (3, 2)),
BrenemanExperimentResult(
'Green',
(0.135, 0.531), (0.153, 0.529), (8, 2)),
BrenemanExperimentResult(
'Blue-green',
(0.145, 0.475), (0.160, 0.484), (3, 5)),
BrenemanExperimentResult(
'Blue',
(0.163, 0.331), (0.171, 0.379), (4, 11)),
BrenemanExperimentResult(
'Sky',
(0.179, 0.438), (0.187, 0.452), (4, 7)),
BrenemanExperimentResult(
'Purple',
(0.245, 0.365), (0.240, 0.398), (4, 10)))
BRENEMAN_EXPERIMENT_8_RESULTS = (
BrenemanExperimentResult(
'Illuminant',
(0.258, 0.524), (0.195, 0.469)),
BrenemanExperimentResult(
'Gray',
(0.257, 0.525), (0.200, 0.494), (2, 3), (1, 2), (0, 0)),
BrenemanExperimentResult(
'Red',
(0.458, 0.522), (0.410, 0.508), (12, 4), (-3, 5), (-7, 2)),
BrenemanExperimentResult(
'Skin',
(0.308, 0.526), (0.249, 0.502), (6, 2), (-1, 1), (-3, -1)),
BrenemanExperimentResult(
'Orange',
(0.359, 0.545), (0.299, 0.545), (12, 4), (0, -2), (-3, 0)),
BrenemanExperimentResult(
'Brown',
(0.349, 0.540), (0.289, 0.532), (10, 4), (0, 1), (-2, 2)),
BrenemanExperimentResult(
'Yellow',
(0.317, 0.550), (0.256, 0.549), (9, 5), (0, -3), (-3, 1)),
BrenemanExperimentResult(
'Foliage',
(0.260, 0.545), (0.198, 0.529), (5, 5), (3, 1), (0, 3)),
BrenemanExperimentResult(
'Green',
(0.193, 0.543), (0.137, 0.520), (9, 5), (3, 0), (2, 1)),
BrenemanExperimentResult(
'Blue-green',
(0.182, 0.516), (0.139, 0.477), (9, 4), (-3, 0), (-2, -4)),
BrenemanExperimentResult(
'Blue',
(0.184, 0.444), (0.150, 0.387), (5, 11), (3, -10), (6, -22)),
BrenemanExperimentResult(
'Sky',
(0.224, 0.489), (0.177, 0.439), (5, 6), (1, 1), (1, -7)),
BrenemanExperimentResult(
'Purple',
(0.277, 0.454), (0.226, 0.389), (4, 10), (1, 4), (1, -8)))
BRENEMAN_EXPERIMENT_9_RESULTS = (
BrenemanExperimentResult(
'Illuminant',
(0.254, 0.525), (0.195, 0.465)),
BrenemanExperimentResult(
'Gray',
(0.256, 0.524), (0.207, 0.496), (4, 6), (3, 2), (0, 0)),
BrenemanExperimentResult(
'Red',
(0.459, 0.521), (0.415, 0.489), (20, 14), (2, 12), (-2, 21)),
BrenemanExperimentResult(
'Skin',
(0.307, 0.525), (0.261, 0.500), (7, 7), (0, 1), (-5, 2)),
BrenemanExperimentResult(
'Orange',
(0.359, 0.545), (0.313, 0.532), (7, 5), (-2, -3), (-6, 13)),
BrenemanExperimentResult(
'Brown',
(0.349, 0.540), (0.302, 0.510), (11, 15), (0, 12), (-5, 24)),
BrenemanExperimentResult(
'Yellow',
(0.317, 0.550), (0.268, 0.538), (7, 10), (1, -4), (-4, 12)),
BrenemanExperimentResult(
'Foliage',
(0.259, 0.544), (0.212, 0.510), (10, 11), (0, 14), (-4, 22)),
BrenemanExperimentResult(
'Green',
(0.193, 0.542), (0.150, 0.506), (6, 10), (-1, 13), (-2, 15)),
BrenemanExperimentResult(
'Blue-green',
(0.181, 0.517), (0.144, 0.487), (9, 6), (-3, 0), (-1, -9)),
BrenemanExperimentResult(
'Blue',
(0.184, 0.444), (0.155, 0.407), (4, 11), (-2, -6), (6, -36)),
BrenemanExperimentResult(
'Sky',
(0.225, 0.490), (0.183, 0.458), (5, 8), (1, -3), (2, -19)),
BrenemanExperimentResult(
'Purple',
(0.276, 0.454), (0.233, 0.404), (7, 12), (2, 9), (0, -16)),
BrenemanExperimentResult(
'(Gray)h',
(0.256, 0.525), (0.208, 0.498)),
BrenemanExperimentResult(
'(Red)h',
(0.456, 0.521), (0.416, 0.501), (15, 7), None, (-6, -9)),
BrenemanExperimentResult(
'(Brown)h',
(0.349, 0.539), (0.306, 0.526), (11, 8), None, (-8, 7)),
BrenemanExperimentResult(
'(Foliage)h',
(0.260, 0.545), (0.213, 0.528), (7, 9), None, (-4, 5)),
BrenemanExperimentResult(
'(Green)h',
(0.193, 0.543), (0.149, 0.525), (10, 8), None, (-1, -1)),
BrenemanExperimentResult(
'(Blue)h',
(0.184, 0.444), (0.156, 0.419), (7, 8), None, (4, -45)),
BrenemanExperimentResult(
'(Purple)h',
(0.277, 0.456), (0.236, 0.422), (6, 11), None, (-2, -29)))
BRENEMAN_EXPERIMENT_10_RESULTS = (
BrenemanExperimentResult(
'Gray',
(0.208, 0.482), (0.213, 0.494), (3, 3)),
BrenemanExperimentResult(
'Red',
(0.447, 0.512), (0.411, 0.506), (15, 7)),
BrenemanExperimentResult(
'Skin',
(0.269, 0.505), (0.269, 0.511), (4, 3)),
BrenemanExperimentResult(
'Orange',
(0.331, 0.549), (0.315, 0.536), (7, 8)),
BrenemanExperimentResult(
'Brown',
(0.323, 0.542), (0.310, 0.526), (6, 8)),
BrenemanExperimentResult(
'Yellow',
(0.268, 0.556), (0.268, 0.541), (3, 6)),
BrenemanExperimentResult(
'Foliage',
(0.226, 0.538), (0.230, 0.525), (4, 8)),
BrenemanExperimentResult(
'Green',
(0.135, 0.531), (0.158, 0.524), (6, 3)),
BrenemanExperimentResult(
'Blue-green',
(0.145, 0.476), (0.161, 0.491), (4, 4)),
BrenemanExperimentResult(
'Blue',
(0.163, 0.330), (0.171, 0.377), (6, 19)),
BrenemanExperimentResult(
'Sky',
(0.179, 0.439), (0.187, 0.465), (5, 5)),
BrenemanExperimentResult(
'Purple',
(0.245, 0.366), (0.240, 0.402), (3, 12)))
BRENEMAN_EXPERIMENT_11_RESULTS = (
BrenemanExperimentResult(
'Illuminant',
(0.208, 0.482), (0.174, 0.520)),
BrenemanExperimentResult(
'Gray',
(0.209, 0.483), (0.176, 0.513), (3, 4), (2, 2), (0, 0)),
BrenemanExperimentResult(
'Red',
(0.450, 0.512), (0.419, 0.524), (10, 2), (3, 2), (8, -1)),
BrenemanExperimentResult(
'Skin',
(0.268, 0.506), (0.240, 0.528), (6, 2), (-4, 0), (-3, 0)),
BrenemanExperimentResult(
'Orange',
(0.331, 0.547), (0.293, 0.553), (6, 2), (3, -1), (5, 1)),
BrenemanExperimentResult(
'Brown',
(0.323, 0.542), (0.290, 0.552), (5, 2), (-1, -3), (0, -1)),
BrenemanExperimentResult(
'Yellow',
(0.266, 0.549), (0.236, 0.557), (4, 2), (-3, -2), (-4, 2)),
BrenemanExperimentResult(
'Foliage',
(0.227, 0.538), (0.194, 0.552), (4, 2), (2, -3), (-1, 1)),
BrenemanExperimentResult(
'Green',
(0.146, 0.534), (0.118, 0.551), (8, 3), (4, -2), (-6, 3)),
BrenemanExperimentResult(
'Blue-green',
(0.160, 0.475), (0.130, 0.513), (9, 4), (1, -1), (-4, -3)),
BrenemanExperimentResult(
'Blue',
(0.177, 0.340), (0.133, 0.427), (6, 14), (4, -17), (11, -29)),
BrenemanExperimentResult(
'Sky',
(0.179, 0.438), (0.146, 0.482), (6, 10), (1, 4), (0, -1)),
BrenemanExperimentResult(
'Purple',
(0.245, 0.366), (0.216, 0.419), (4, 13), (-3, 8), (4, -2)))
BRENEMAN_EXPERIMENT_12_RESULTS = (
BrenemanExperimentResult(
'Illuminant',
(0.205, 0.482), (0.174, 0.519)),
BrenemanExperimentResult(
'Gray',
(0.208, 0.482), (0.181, 0.507), (4, 3), (0, 1), (0, 0)),
BrenemanExperimentResult(
'Red',
(0.451, 0.512), (0.422, 0.526), (20, 3), (0, -5), (10, -5)),
BrenemanExperimentResult(
'Skin',
(0.268, 0.506), (0.244, 0.525), (5, 2), (-6, 0), (-2, -1)),
BrenemanExperimentResult(
'Orange',
(0.331, 0.548), (0.292, 0.553), (10, 2), (5, 2), (11, 1)),
BrenemanExperimentResult(
'Brown',
(0.324, 0.542), (0.286, 0.554), (8, 1), (5, -3), (10, -4)),
BrenemanExperimentResult(
'Yellow',
(0.266, 0.548), (0.238, 0.558), (6, 2), (-3, -1), (-1, -2)),
BrenemanExperimentResult(
'Foliage',
(0.227, 0.538), (0.196, 0.555), (6, 3), (3, -4), (2, -5)),
BrenemanExperimentResult(
'Green',
(0.145, 0.534), (0.124, 0.551), (8, 6), (1, -1), (-8, -1)),
BrenemanExperimentResult(
'Blue-green',
(0.160, 0.474), (0.135, 0.505), (5, 2), (1, -1), (-4, -3)),
BrenemanExperimentResult(
'Blue',
(0.178, 0.339), (0.149, 0.392), (4, 20), (-1, -5), (3, -7)),
BrenemanExperimentResult(
'Sky',
(0.179, 0.440), (0.150, 0.473), (4, 8), (3, 2), (2, 0)),
BrenemanExperimentResult(
'Purple',
(0.246, 0.366), (0.222, 0.404), (5, 15), (-4, 2), (4, 2)))
BRENEMAN_EXPERIMENTS_PRIMARIES_CHROMATICITIES = DocstringDict({
1: PrimariesChromaticityCoordinates(
1, ('A', 'D65'), 1500,
(0.671, 0.519), (-0.586, 0.627), (0.253, 0.016)),
2: PrimariesChromaticityCoordinates(
2, ('Projector', 'D55'), 1500,
(0.675, 0.523), (-0.466, 0.617), (0.255, 0.018)),
3: PrimariesChromaticityCoordinates(
3, ('Projector', 'D55'), 75,
(0.664, 0.510), (-0.256, 0.729), (0.244, 0.003)),
4: PrimariesChromaticityCoordinates(
4, ('A', 'D65'), 75,
(0.674, 0.524), (-0.172, 0.628), (0.218, -0.026)),
6: PrimariesChromaticityCoordinates(
6, ('A', 'D55'), 11100,
(0.659, 0.506), (-0.141, 0.615), (0.249, 0.009)),
8: PrimariesChromaticityCoordinates(
8, ('A', 'D65'), 350,
(0.659, 0.505), (-0.246, 0.672), (0.235, -0.006)),
9: PrimariesChromaticityCoordinates(
9, ('A', 'D65'), 15,
(0.693, 0.546), (-0.446, 0.773), (0.221, -0.023)),
11: PrimariesChromaticityCoordinates(
11, ('D55', 'green'), 1560,
(0.680, 0.529), (0.018, 0.576), (0.307, 0.080)),
12: PrimariesChromaticityCoordinates(
12, ('D55', 'green'), 75,
(0.661, 0.505), (0.039, 0.598), (0.345, 0.127))})
BRENEMAN_EXPERIMENTS_PRIMARIES_CHROMATICITIES.__doc__ = """
*Breneman (1987)* experiments primaries chromaticities.
References
----------
:cite:`Breneman1987b`
BRENEMAN_EXPERIMENTS_PRIMARIES_CHROMATICITIES : dict
"""
BRENEMAN_EXPERIMENTS = DocstringDict({
1: BRENEMAN_EXPERIMENT_1_RESULTS,
2: BRENEMAN_EXPERIMENT_2_RESULTS,
3: BRENEMAN_EXPERIMENT_3_RESULTS,
4: BRENEMAN_EXPERIMENT_4_RESULTS,
5: BRENEMAN_EXPERIMENT_5_RESULTS,
6: BRENEMAN_EXPERIMENT_6_RESULTS,
7: BRENEMAN_EXPERIMENT_7_RESULTS,
8: BRENEMAN_EXPERIMENT_8_RESULTS,
9: BRENEMAN_EXPERIMENT_9_RESULTS,
10: BRENEMAN_EXPERIMENT_10_RESULTS,
11: BRENEMAN_EXPERIMENT_11_RESULTS,
12: BRENEMAN_EXPERIMENT_12_RESULTS
})
BRENEMAN_EXPERIMENTS.__doc__ = """
*Breneman (1987)* experiments.
References
----------
:cite:`Breneman1987b`
BRENEMAN_EXPERIMENTS : dict
"""
| true | true |
f71b2b58505f1a73cc43c49801a8cae13c3f8a26 | 43 | py | Python | src/Application/PythonScriptModule/proto/state_2.py | antont/tundra | 5c9b0a3957071f08ab425dff701cdbb34f9e1868 | [
"Apache-2.0"
] | 1 | 2018-04-02T15:38:10.000Z | 2018-04-02T15:38:10.000Z | src/Application/PythonScriptModule/proto/state_2.py | antont/tundra | 5c9b0a3957071f08ab425dff701cdbb34f9e1868 | [
"Apache-2.0"
] | null | null | null | src/Application/PythonScriptModule/proto/state_2.py | antont/tundra | 5c9b0a3957071f08ab425dff701cdbb34f9e1868 | [
"Apache-2.0"
] | 1 | 2021-09-04T12:37:34.000Z | 2021-09-04T12:37:34.000Z | import state
def change():
state.x = 2 | 10.75 | 15 | 0.627907 | import state
def change():
state.x = 2 | true | true |
f71b2c8b5ead15b27ff28d5dc1c80528e9c46c18 | 2,755 | py | Python | profiler/torchmodules/torchlogger/activation_gradient_logger.py | NestLakerJasonLIN/pipedream | f50827f2e28cbdbd82a4ea686c0498272b1460d6 | [
"MIT"
] | 273 | 2019-08-31T14:12:11.000Z | 2022-03-05T13:34:25.000Z | profiler/torchmodules/torchlogger/activation_gradient_logger.py | albertsh10/pipedream | cad624f79a71f44ba79099f0c38321347b13e5c2 | [
"MIT"
] | 67 | 2019-09-19T15:36:59.000Z | 2022-01-13T09:11:54.000Z | profiler/torchmodules/torchlogger/activation_gradient_logger.py | albertsh10/pipedream | cad624f79a71f44ba79099f0c38321347b13e5c2 | [
"MIT"
] | 100 | 2019-09-16T20:59:14.000Z | 2022-03-23T12:56:56.000Z | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import os
import pickle
import torch
class ActivationAndGradientLogger:
def __init__(self, directory):
self.directory = directory
try:
os.mkdir(self.directory)
except:
pass
self.iteration = 0
self.forward_counter = 0
self.backward_counter = 0
def reset_counters(self):
self.forward_counter = 0
self.backward_counter = 0
def hook_modules(self, module, iteration):
self.iteration = iteration
sub_directory = os.path.join(self.directory, str(iteration))
try:
os.mkdir(sub_directory)
except:
pass
self.hook_modules_helper(module, sub_directory)
def hook_modules_helper(self, module, sub_directory):
sub_modules = module.__dict__['_modules']
for name, sub_module in sub_modules.items():
if sub_module is None or isinstance(sub_module, torch.nn.Module) is False:
break
sub_sub_modules = sub_module.__dict__['_modules']
if len(sub_sub_modules) > 0:
# Recursively visit this module's descendants.
self.hook_modules_helper(sub_module, sub_directory)
else:
def forward_hook(*args):
activation = args[2]
filename = os.path.join(sub_directory, 'activations.%d.pkl' % self.forward_counter)
with open(filename, 'wb') as f:
torch.save(activation, f)
self.forward_counter += 1
def backward_hook(*args):
gradient = args[2]
filename = os.path.join(sub_directory, 'gradients.%d.pkl' % self.backward_counter)
with open(filename, 'wb') as f:
torch.save(gradient, f)
self.backward_counter += 1
sub_module.register_forward_hook(forward_hook)
sub_module.register_backward_hook(backward_hook)
def unhook_modules(self, module):
self.unhook_modules_helper(module)
self.reset_counters()
def unhook_modules_helper(self, module):
sub_modules = module.__dict__['_modules']
for name, sub_module in sub_modules.items():
if sub_module is None or isinstance(sub_module, torch.nn.Module) is False:
break
sub_sub_modules = sub_module.__dict__['_modules']
if len(sub_sub_modules) > 0:
# Recursively visit this module's descendants.
self.unhook_modules_helper(sub_module)
else:
sub_module.reset_hooks()
| 34.873418 | 103 | 0.591652 |
import os
import pickle
import torch
class ActivationAndGradientLogger:
def __init__(self, directory):
self.directory = directory
try:
os.mkdir(self.directory)
except:
pass
self.iteration = 0
self.forward_counter = 0
self.backward_counter = 0
def reset_counters(self):
self.forward_counter = 0
self.backward_counter = 0
def hook_modules(self, module, iteration):
self.iteration = iteration
sub_directory = os.path.join(self.directory, str(iteration))
try:
os.mkdir(sub_directory)
except:
pass
self.hook_modules_helper(module, sub_directory)
def hook_modules_helper(self, module, sub_directory):
sub_modules = module.__dict__['_modules']
for name, sub_module in sub_modules.items():
if sub_module is None or isinstance(sub_module, torch.nn.Module) is False:
break
sub_sub_modules = sub_module.__dict__['_modules']
if len(sub_sub_modules) > 0:
self.hook_modules_helper(sub_module, sub_directory)
else:
def forward_hook(*args):
activation = args[2]
filename = os.path.join(sub_directory, 'activations.%d.pkl' % self.forward_counter)
with open(filename, 'wb') as f:
torch.save(activation, f)
self.forward_counter += 1
def backward_hook(*args):
gradient = args[2]
filename = os.path.join(sub_directory, 'gradients.%d.pkl' % self.backward_counter)
with open(filename, 'wb') as f:
torch.save(gradient, f)
self.backward_counter += 1
sub_module.register_forward_hook(forward_hook)
sub_module.register_backward_hook(backward_hook)
def unhook_modules(self, module):
self.unhook_modules_helper(module)
self.reset_counters()
def unhook_modules_helper(self, module):
sub_modules = module.__dict__['_modules']
for name, sub_module in sub_modules.items():
if sub_module is None or isinstance(sub_module, torch.nn.Module) is False:
break
sub_sub_modules = sub_module.__dict__['_modules']
if len(sub_sub_modules) > 0:
# Recursively visit this module's descendants.
self.unhook_modules_helper(sub_module)
else:
sub_module.reset_hooks()
| true | true |
f71b2c90cb12b8290d45257d9a8169e55982187d | 4,207 | py | Python | twine/commands/check.py | chadwhawkins/twine | bd1d8b0f3ffdae9b91672d075d58cf635aa0e0f6 | [
"Apache-2.0"
] | null | null | null | twine/commands/check.py | chadwhawkins/twine | bd1d8b0f3ffdae9b91672d075d58cf635aa0e0f6 | [
"Apache-2.0"
] | null | null | null | twine/commands/check.py | chadwhawkins/twine | bd1d8b0f3ffdae9b91672d075d58cf635aa0e0f6 | [
"Apache-2.0"
] | null | null | null | # Copyright 2018 Dustin Ingram
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function
from __future__ import unicode_literals
import argparse
import cgi
import re
import sys
try:
from StringIO import StringIO
except ImportError:
from _io import StringIO
import readme_renderer.markdown
import readme_renderer.rst
import readme_renderer.txt
from twine.commands import _find_dists
from twine.package import PackageFile
_RENDERERS = {
None: readme_renderer.rst, # Default if description_content_type is None
"text/plain": readme_renderer.txt,
"text/x-rst": readme_renderer.rst,
"text/markdown": readme_renderer.markdown,
}
# Regular expression used to capture and reformat doctuils warnings into
# something that a human can understand. This is loosely borrowed from
# Sphinx: https://github.com/sphinx-doc/sphinx/blob
# /c35eb6fade7a3b4a6de4183d1dd4196f04a5edaf/sphinx/util/docutils.py#L199
_REPORT_RE = re.compile(
r"^<string>:(?P<line>(?:\d+)?): "
r"\((?P<level>DEBUG|INFO|WARNING|ERROR|SEVERE)/(\d+)?\) "
r"(?P<message>.*)",
re.DOTALL | re.MULTILINE,
)
class _WarningStream(object):
def __init__(self):
self.output = StringIO()
def write(self, text):
matched = _REPORT_RE.search(text)
if not matched:
self.output.write(text)
return
self.output.write(
"line {line}: {level_text}: {message}\n".format(
level_text=matched.group("level").capitalize(),
line=matched.group("line"),
message=matched.group("message").rstrip("\r\n"),
)
)
def __str__(self):
return self.output.getvalue()
def check(dists, output_stream=sys.stdout):
uploads = [i for i in _find_dists(dists) if not i.endswith(".asc")]
stream = _WarningStream()
failure = False
for filename in uploads:
output_stream.write("Checking distribution %s: " % filename)
package = PackageFile.from_filename(filename, comment=None)
metadata = package.metadata_dictionary()
description = metadata["description"]
description_content_type = metadata["description_content_type"]
if description_content_type is None:
output_stream.write(
'warning: `long_description_content_type` missing. '
'defaulting to `text/x-rst`.\n'
)
description_content_type = 'text/x-rst'
content_type, params = cgi.parse_header(description_content_type)
renderer = _RENDERERS.get(content_type, _RENDERERS[None])
if description in {None, 'UNKNOWN\n\n\n'}:
output_stream.write('warning: `long_description` missing.\n')
output_stream.write("Passed\n")
else:
if renderer.render(description, stream=stream, **params) is None:
failure = True
output_stream.write("Failed\n")
output_stream.write(
"The project's long_description has invalid markup which "
"will not be rendered on PyPI. The following syntax "
"errors were detected:\n%s" % stream
)
else:
output_stream.write("Passed\n")
return failure
def main(args):
parser = argparse.ArgumentParser(prog="twine check")
parser.add_argument(
"dists",
nargs="+",
metavar="dist",
help="The distribution files to check, usually dist/*",
)
args = parser.parse_args(args)
# Call the check function with the arguments from the command line
return check(args.dists)
| 32.114504 | 78 | 0.659853 |
from __future__ import absolute_import, division, print_function
from __future__ import unicode_literals
import argparse
import cgi
import re
import sys
try:
from StringIO import StringIO
except ImportError:
from _io import StringIO
import readme_renderer.markdown
import readme_renderer.rst
import readme_renderer.txt
from twine.commands import _find_dists
from twine.package import PackageFile
_RENDERERS = {
None: readme_renderer.rst,
"text/plain": readme_renderer.txt,
"text/x-rst": readme_renderer.rst,
"text/markdown": readme_renderer.markdown,
}
ORT_RE = re.compile(
r"^<string>:(?P<line>(?:\d+)?): "
r"\((?P<level>DEBUG|INFO|WARNING|ERROR|SEVERE)/(\d+)?\) "
r"(?P<message>.*)",
re.DOTALL | re.MULTILINE,
)
class _WarningStream(object):
def __init__(self):
self.output = StringIO()
def write(self, text):
matched = _REPORT_RE.search(text)
if not matched:
self.output.write(text)
return
self.output.write(
"line {line}: {level_text}: {message}\n".format(
level_text=matched.group("level").capitalize(),
line=matched.group("line"),
message=matched.group("message").rstrip("\r\n"),
)
)
def __str__(self):
return self.output.getvalue()
def check(dists, output_stream=sys.stdout):
uploads = [i for i in _find_dists(dists) if not i.endswith(".asc")]
stream = _WarningStream()
failure = False
for filename in uploads:
output_stream.write("Checking distribution %s: " % filename)
package = PackageFile.from_filename(filename, comment=None)
metadata = package.metadata_dictionary()
description = metadata["description"]
description_content_type = metadata["description_content_type"]
if description_content_type is None:
output_stream.write(
'warning: `long_description_content_type` missing. '
'defaulting to `text/x-rst`.\n'
)
description_content_type = 'text/x-rst'
content_type, params = cgi.parse_header(description_content_type)
renderer = _RENDERERS.get(content_type, _RENDERERS[None])
if description in {None, 'UNKNOWN\n\n\n'}:
output_stream.write('warning: `long_description` missing.\n')
output_stream.write("Passed\n")
else:
if renderer.render(description, stream=stream, **params) is None:
failure = True
output_stream.write("Failed\n")
output_stream.write(
"The project's long_description has invalid markup which "
"will not be rendered on PyPI. The following syntax "
"errors were detected:\n%s" % stream
)
else:
output_stream.write("Passed\n")
return failure
def main(args):
parser = argparse.ArgumentParser(prog="twine check")
parser.add_argument(
"dists",
nargs="+",
metavar="dist",
help="The distribution files to check, usually dist/*",
)
args = parser.parse_args(args)
# Call the check function with the arguments from the command line
return check(args.dists)
| true | true |
f71b2d261e9d06c923fdc731f3a69eb747347726 | 11,627 | py | Python | bioactive_lab.py | PriyamvadaKumar/AWS_BioActive_Classification | b6a4413618586712ca4dc196f2dfaa3ceca804fb | [
"MIT"
] | 1 | 2021-06-04T02:46:37.000Z | 2021-06-04T02:46:37.000Z | bioactive_lab.py | PriyamvadaKumar/AWS_BioActive_classification | b6a4413618586712ca4dc196f2dfaa3ceca804fb | [
"MIT"
] | null | null | null | bioactive_lab.py | PriyamvadaKumar/AWS_BioActive_classification | b6a4413618586712ca4dc196f2dfaa3ceca804fb | [
"MIT"
] | null | null | null | import os, sys
dirpath = os.getcwd()
sys.path.insert(0, dirpath + '/goal_tether_functions')
sys.path.insert(0, dirpath + '/predictive_modelers')
sys.path.insert(0, dirpath + '/predictive_modelers/assessment_resources')
sys.path.insert(0, dirpath + '/active_learners')
sys.path.insert(0, dirpath + '/data_acquisition')
sys.path.insert(0, dirpath + '/diagnostics')
from createCampaign_battleship import main as createCampaign
# from createImageCampaign_Bria import main as createCampaign
from runCampaign2 import main as runCampaign
from database import *
import outputManager
import time
import boto3
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
from sklearn.preprocessing import LabelEncoder
from sklearn.cluster import KMeans
# Part 1 Plotting Function
def plot_simulation_accuracy(acc, title, mul_accuracy=False):
fig, ax = plt.subplots()
ax.set_ylabel("Accuracy (%)")
ax.set_xlabel("Iterations")
ax.set_title(title)
if mul_accuracy:
ax.plot(np.arange(len(acc[0])), acc[0], label="Full Space")
ax.plot(np.arange(len(acc[1])), acc[1], label="Forward Modeling")
ax.plot(np.arange(len(acc[2])), acc[2], label="Prediction Only")
else:
ax.plot(np.arange(len(acc)), acc)
ax.legend()
plt.show()
def average_arrays(mat):
array = []
for i in range(25):
avg = 0
for m in range(len(mat)):
if len(mat[m]) < i:
continue
avg += mat[m][i]
avg = avg/len(mat)
array.append(avg)
return array
wd =os.getcwd()
print("Current Working Directory: ", wd)
print()
if path.exists("data/data.csv") is False:
print("Retrieving Data from S3")
# read data from S3
s3 = boto3.resource('s3')
s3.Bucket('whatyouknowaboutmybucket').download_file('data.csv', wd + '/data/data.csv')
if path.exists("data/data.csv") is False:
print("Retrieving Data from S3")
time.sleep(5)
data = pd.read_csv("data/data.csv").dropna().to_numpy()
features = data[:, 4:]
labels = data[:, 2]
l = LabelEncoder()
labels = l.fit_transform(labels)
print(l.classes_)
s = KMeans(n_clusters=5)
# s.decision_function(features[:1000])
s.fit_transform(features[:1500])
print(s.score(features[1500:]))
d = np.zeros((20,20))
# create groundTruth
for i in range(len(data)):
if data[i][0] - 1 >= len(d) or data[i][1] >= len(d[0]):
continue
d[data[i][0]-1][data[i][1]-1] = s.predict(features[i].reshape(1,-1))
print(d)
np.savetxt('data_acquisition/project.txt', d)
print(labels)
# exit()
'''
campaign = createCampaign()
runCampaign(campaign)
acc = [np.array(campaign.accuracy_full), np.array(campaign.accuracy_forwardModeling),
np.array(campaign.accuracy_onlyPredictions)]
plot_simulation_accuracy(acc, "Model Accuracies for a Single Simulation", mul_accuracy=True)
'''
# Part 2 of Assignment - 2 independent variables (0-20) and 1 dependent variable (0-10) for 20 simulations
acc = []
for i in range(1):
campaign = createCampaign()
campaign.randoseed = 2
# campaign.ESS.iVars = [('int', 0, 9), ('int', 0, 9)]
# campaign.ESS.dVars = [('int', 0, 2)]
campaign.groundtruthData = 'data_acquisition/project.txt'
campaign.simsFlag = True
runCampaign(campaign)
acc = [campaign.accuracy_full, campaign.accuracy_forwardModeling, campaign.accuracy_onlyPredictions]
# acc = average_arrays(acc)
plot_simulation_accuracy(acc, "Three Accuracies for the Experimental Space", mul_accuracy=True)
# Part 3 of Assignment -
# acc1, acc2, acc3, acc4 = [], [], [], []
# for i in range(5):
# campaign = createCampaign()
# campaign.ESS.high_homogeneity = True
# campaign.ESS.h_num = 2
# campaign.ESS.iVars = [('int', 0, 19), ('int', 0, 19)]
# campaign.ESS.dVars = [('int', 0, 2)]
# campaign.ESS.dimarr = [20,20]
# runCampaign(campaign)
# acc = campaign.accuracy_onlyPredictions
# acc1.append(acc)
#
# for i in range(5):
# campaign = createCampaign()
# campaign.ESS.low_homogeneity = True
# campaign.ESS.h_num = 2
# campaign.ESS.iVars = [('int', 0, 19), ('int', 0, 19)]
# campaign.ESS.dVars = [('int', 0, 2)]
# runCampaign(campaign)
# acc = campaign.accuracy_onlyPredictions
# acc2.append(acc)
#
# for i in range(5):
# campaign = createCampaign()
# campaign.ESS.high_homogeneity = True
# campaign.ESS.h_num = 10
# campaign.ESS.iVars = [('int', 0, 19), ('int', 0, 19)]
# campaign.ESS.dVars = [('int', 0, 9)]
# campaign.ESS.dimarr = [20,20]
# runCampaign(campaign)
# acc = campaign.accuracy_onlyPredictions
# acc3.append(acc)
#
# for i in range(5):
# campaign = createCampaign()
# campaign.ESS.low_homogeneity = True
# campaign.ESS.h_num = 10
# campaign.ESS.iVars = [('int', 0, 19), ('int', 0, 19)]
# campaign.ESS.dVars = [('int', 0, 9)]
# campaign.ESS.dimarr = [20,20]
# runCampaign(campaign)
# acc = campaign.accuracy_onlyPredictions
# acc4.append(acc)
#
# acc1, acc2, acc3, acc4 = average_arrays(acc1), average_arrays(acc2), average_arrays(acc3), average_arrays(acc4)
#
# plt.plot([i+1 for i in range(len(acc1))], acc1, label="H-2", color="blue")
# plt.plot([i+1 for i in range(len(acc2))], acc2, label="L-2", color="green")
# plt.plot([i+1 for i in range(len(acc3))], acc3, label="H-10", color="red")
# plt.plot([i+1 for i in range(len(acc4))], acc4, label="L-10", color="black")
# plt.ylabel("Accuracy (%)")
# plt.xlabel("Iterations")
# plt.title("Different Homogeneity within Experimental Spaces")
# plt.legend()
# plt.show()
# Part 4 of Assignment -
# acc1, acc2, acc3, acc4 = [], [], [], []
# for i in range(1):
# campaign = createCampaign()
# campaign.ESS.low_homogeneity = True
# campaign.ESS.error = True
# campaign.ESS.error = 0
# campaign.randoseed= 45
# campaign.ESS.h_num = 10
# campaign.ESS.iVars = [('int', 0, 19), ('int', 0, 19)]
# campaign.ESS.dVars = [('int', 0, 9)]
# campaign.ESS.dimarr = [20, 20]
# runCampaign(campaign)
# print(campaign.groundTruth)
# acc = campaign.accuracy_onlyPredictions
# acc1.append(acc)
#
#
# for i in range(1):
# campaign = createCampaign()
# campaign.ESS.low_homogeneity = True
# campaign.ESS.error = True
# campaign.randoseed = 1
# campaign.ESS.error = 0.1
# campaign.ESS.h_num = 10
# campaign.ESS.iVars = [('int', 0, 19), ('int', 0, 19)]
# campaign.ESS.dVars = [('int', 0, 9)]
# campaign.ESS.dimarr = [20, 20]
# runCampaign(campaign)
# print(campaign.groundTruth)
# acc = campaign.accuracy_onlyPredictions
# acc2.append(acc)
#
# for i in range(1):
# campaign = createCampaign()
# campaign.ESS.low_homogeneity = True
# campaign.ESS.error = True
# campaign.ESS.error = 0.5
# campaign.randoseed = 2
# campaign.ESS.h_num = 10
# campaign.ESS.iVars = [('int', 0, 19), ('int', 0, 19)]
# campaign.ESS.dVars = [('int', 0, 9)]
# campaign.ESS.dimarr = [20, 20]
# runCampaign(campaign)
# print(campaign.groundTruth)
# acc = campaign.accuracy_onlyPredictions
# acc3.append(acc)
#
# for i in range(1):
# campaign = createCampaign()
# campaign.ESS.low_homogeneity = True
# campaign.ESS.error = True
# campaign.ESS.error = 1.0
# campaign.randoseed=3
# campaign.ESS.h_num = 10
# campaign.ESS.iVars = [('int', 0, 19), ('int', 0, 19)]
# campaign.ESS.dVars = [('int', 0, 9)]
# campaign.ESS.dimarr = [20, 20]
# runCampaign(campaign)
# print(campaign.groundTruth)
# acc = campaign.accuracy_onlyPredictions
# acc4.append(acc)
#
# acc1, acc2, acc3, acc4 = average_arrays(acc1), average_arrays(acc2), average_arrays(acc3), average_arrays(acc4)
#
# plt.plot([i+1 for i in range(len(acc1))], acc1, label="0.0", color="blue")
# plt.plot([i+1 for i in range(len(acc2))], acc2, label="0.1", color="green")
# plt.plot([i+1 for i in range(len(acc3))], acc3, label="0.5", color="red")
# plt.plot([i+1 for i in range(len(acc4))], acc4, label="1.0", color="black")
# plt.ylabel("Accuracy (%)")
# plt.xlabel("Iterations")
# plt.title("Different Error Rates within Experimental Spaces")
# plt.legend()
# plt.show()
# for i in range(1):
# campaign = createCampaign()
# campaign.ESS.low_homogeneity = True
# campaign.ESS.error = True
# campaign.ESS.error = 0
# campaign.randoseed = 53
# campaign.ESS.h_num = 10
# campaign.ESS.iVars = [('int', 0, 19), ('int', 0, 19)]
# campaign.ESS.dVars = [('int', 0, 9)]
# campaign.ESS.dimarr = [20, 20]
# runCampaign(campaign)
# print(campaign.groundTruth)
# acc1 = campaign.accuracy_onlyPredictions
#
#
# for i in range(1):
# campaign = createCampaign()
# campaign.ESS.low_homogeneity = True
# campaign.ESS.error = True
# campaign.ESS.error = 0
# campaign.randoseed = 39
# campaign.ESS.h_num = 10
# campaign.ESS.iVars = [('int', 0, 19), ('int', 0, 19)]
# campaign.ESS.dVars = [('int', 0, 9)]
# campaign.ESS.dimarr = [20, 20]
# runCampaign(campaign)
# print(campaign.groundTruth)
# acc2 = campaign.accuracy_onlyPredictions
#
#
# for i in range(1):
# campaign = createCampaign()
# campaign.ESS.low_homogeneity = True
# campaign.ESS.error = True
# campaign.ESS.error = 0.1
# campaign.randoseed = 32
# campaign.ESS.h_num = 10
# campaign.ESS.iVars = [('int', 0, 19), ('int', 0, 19)]
# campaign.ESS.dVars = [('int', 0, 9)]
# campaign.ESS.dimarr = [20, 20]
# runCampaign(campaign)
# print(campaign.groundTruth)
# acc3 = campaign.accuracy_onlyPredictions
#
# for i in range(1):
# campaign = createCampaign()
# campaign.ESS.low_homogeneity = True
# campaign.ESS.error = True
# campaign.ESS.error = 0.1
# campaign.randoseed = 17
# campaign.ESS.h_num = 10
# campaign.ESS.iVars = [('int', 0, 19), ('int', 0, 19)]
# campaign.ESS.dVars = [('int', 0, 9)]
# campaign.ESS.dimarr = [20, 20]
# runCampaign(campaign)
# print(campaign.groundTruth)
# acc4 = campaign.accuracy_onlyPredictions
#
# for i in range(1):
# campaign = createCampaign()
# campaign.ESS.low_homogeneity = True
# campaign.ESS.error = True
# campaign.ESS.error = 0.5
# campaign.randoseed = 3
# campaign.ESS.h_num = 10
# campaign.ESS.iVars = [('int', 0, 19), ('int', 0, 19)]
# campaign.ESS.dVars = [('int', 0, 9)]
# campaign.ESS.dimarr = [20, 20]
# runCampaign(campaign)
# print(campaign.groundTruth)
# acc5 = campaign.accuracy_onlyPredictions
#
# for i in range(1):
# campaign = createCampaign()
# campaign.ESS.low_homogeneity = True
# campaign.ESS.error = True
# campaign.ESS.error = 0.5
# campaign.randoseed = 15
# campaign.ESS.h_num = 10
# campaign.ESS.iVars = [('int', 0, 19), ('int', 0, 19)]
# campaign.ESS.dVars = [('int', 0, 9)]
# campaign.ESS.dimarr = [20, 20]
# runCampaign(campaign)
# print(campaign.groundTruth)
# acc6 = campaign.accuracy_onlyPredictions
#
#
# plt.plot([i+1 for i in range(len(acc1))], acc1, label="0.0 - B", color="blue")
# plt.plot([i+1 for i in range(len(acc2))], acc2, label="0.0 - N", color="green")
# plt.plot([i+1 for i in range(len(acc3))], acc3, label="0.1 - B", color="red")
# plt.plot([i+1 for i in range(len(acc4))], acc4, label="0.1 - N", color="black")
# plt.plot([i+1 for i in range(len(acc5))], acc5, label="0.5 - B", color="yellow")
# plt.plot([i+1 for i in range(len(acc6))], acc6, label="0.5 - N", color="cyan")
# plt.ylabel("Accuracy (%)")
# plt.xlabel("Iterations")
# plt.title("Different Categorical Models within Experimental Spaces")
# plt.legend()
# plt.show()
| 32.387187 | 113 | 0.637224 | import os, sys
dirpath = os.getcwd()
sys.path.insert(0, dirpath + '/goal_tether_functions')
sys.path.insert(0, dirpath + '/predictive_modelers')
sys.path.insert(0, dirpath + '/predictive_modelers/assessment_resources')
sys.path.insert(0, dirpath + '/active_learners')
sys.path.insert(0, dirpath + '/data_acquisition')
sys.path.insert(0, dirpath + '/diagnostics')
from createCampaign_battleship import main as createCampaign
from runCampaign2 import main as runCampaign
from database import *
import outputManager
import time
import boto3
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
from sklearn.preprocessing import LabelEncoder
from sklearn.cluster import KMeans
def plot_simulation_accuracy(acc, title, mul_accuracy=False):
fig, ax = plt.subplots()
ax.set_ylabel("Accuracy (%)")
ax.set_xlabel("Iterations")
ax.set_title(title)
if mul_accuracy:
ax.plot(np.arange(len(acc[0])), acc[0], label="Full Space")
ax.plot(np.arange(len(acc[1])), acc[1], label="Forward Modeling")
ax.plot(np.arange(len(acc[2])), acc[2], label="Prediction Only")
else:
ax.plot(np.arange(len(acc)), acc)
ax.legend()
plt.show()
def average_arrays(mat):
array = []
for i in range(25):
avg = 0
for m in range(len(mat)):
if len(mat[m]) < i:
continue
avg += mat[m][i]
avg = avg/len(mat)
array.append(avg)
return array
wd =os.getcwd()
print("Current Working Directory: ", wd)
print()
if path.exists("data/data.csv") is False:
print("Retrieving Data from S3")
s3 = boto3.resource('s3')
s3.Bucket('whatyouknowaboutmybucket').download_file('data.csv', wd + '/data/data.csv')
if path.exists("data/data.csv") is False:
print("Retrieving Data from S3")
time.sleep(5)
data = pd.read_csv("data/data.csv").dropna().to_numpy()
features = data[:, 4:]
labels = data[:, 2]
l = LabelEncoder()
labels = l.fit_transform(labels)
print(l.classes_)
s = KMeans(n_clusters=5)
s.fit_transform(features[:1500])
print(s.score(features[1500:]))
d = np.zeros((20,20))
for i in range(len(data)):
if data[i][0] - 1 >= len(d) or data[i][1] >= len(d[0]):
continue
d[data[i][0]-1][data[i][1]-1] = s.predict(features[i].reshape(1,-1))
print(d)
np.savetxt('data_acquisition/project.txt', d)
print(labels)
acc = []
for i in range(1):
campaign = createCampaign()
campaign.randoseed = 2
campaign.groundtruthData = 'data_acquisition/project.txt'
campaign.simsFlag = True
runCampaign(campaign)
acc = [campaign.accuracy_full, campaign.accuracy_forwardModeling, campaign.accuracy_onlyPredictions]
plot_simulation_accuracy(acc, "Three Accuracies for the Experimental Space", mul_accuracy=True)
| true | true |
f71b2dd96ed4ebc42bacf10ebdd08e56bd022192 | 1,747 | py | Python | s3-bucket-cleaner/clean_s3_bucket.py | artsy/opstools | 889b08c6b741dfeac0c32b0a4d96d0f9f3bbc0e7 | [
"MIT"
] | null | null | null | s3-bucket-cleaner/clean_s3_bucket.py | artsy/opstools | 889b08c6b741dfeac0c32b0a4d96d0f9f3bbc0e7 | [
"MIT"
] | null | null | null | s3-bucket-cleaner/clean_s3_bucket.py | artsy/opstools | 889b08c6b741dfeac0c32b0a4d96d0f9f3bbc0e7 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# Sourced from https://gist.github.com/seventhskye/0cc7b2804252975d36dca047ab7729e9 with some modifications
import os
import boto3
def main():
client = boto3.client('s3')
Bucket = os.environ.get('S3_BUCKET')
Prefix = os.environ.get('S3_PREFIX', '') # leave blank to delete the entire contents
IsTruncated = True
MaxKeys = 1000
KeyMarker = None
if Bucket is None:
print("Environment variable S3_BUCKET must be set!")
return
while IsTruncated == True:
if not KeyMarker:
version_list = client.list_object_versions(
Bucket=Bucket,
MaxKeys=MaxKeys,
Prefix=Prefix)
else:
version_list = client.list_object_versions(
Bucket=Bucket,
MaxKeys=MaxKeys,
Prefix=Prefix,
KeyMarker=KeyMarker)
try:
objects = []
versions = version_list['Versions']
for v in versions:
objects.append({'VersionId':v['VersionId'],'Key': v['Key']})
response = client.delete_objects(Bucket=Bucket,Delete={'Objects':objects})
for item in response['Deleted']:
print("Deleted %s" % item['Key'])
except:
pass
try:
objects = []
delete_markers = version_list['DeleteMarkers']
for d in delete_markers:
objects.append({'VersionId':d['VersionId'],'Key': d['Key']})
response = client.delete_objects(Bucket=Bucket,Delete={'Objects':objects})
for item in response['Deleted']:
print("Deleted %s" % item['Key'])
except:
pass
IsTruncated = version_list['IsTruncated']
if 'NextKeyMarker' in version_list:
KeyMarker = version_list['NextKeyMarker']
if __name__ == '__main__':
main()
| 28.639344 | 107 | 0.627934 |
import os
import boto3
def main():
client = boto3.client('s3')
Bucket = os.environ.get('S3_BUCKET')
Prefix = os.environ.get('S3_PREFIX', '')
IsTruncated = True
MaxKeys = 1000
KeyMarker = None
if Bucket is None:
print("Environment variable S3_BUCKET must be set!")
return
while IsTruncated == True:
if not KeyMarker:
version_list = client.list_object_versions(
Bucket=Bucket,
MaxKeys=MaxKeys,
Prefix=Prefix)
else:
version_list = client.list_object_versions(
Bucket=Bucket,
MaxKeys=MaxKeys,
Prefix=Prefix,
KeyMarker=KeyMarker)
try:
objects = []
versions = version_list['Versions']
for v in versions:
objects.append({'VersionId':v['VersionId'],'Key': v['Key']})
response = client.delete_objects(Bucket=Bucket,Delete={'Objects':objects})
for item in response['Deleted']:
print("Deleted %s" % item['Key'])
except:
pass
try:
objects = []
delete_markers = version_list['DeleteMarkers']
for d in delete_markers:
objects.append({'VersionId':d['VersionId'],'Key': d['Key']})
response = client.delete_objects(Bucket=Bucket,Delete={'Objects':objects})
for item in response['Deleted']:
print("Deleted %s" % item['Key'])
except:
pass
IsTruncated = version_list['IsTruncated']
if 'NextKeyMarker' in version_list:
KeyMarker = version_list['NextKeyMarker']
if __name__ == '__main__':
main()
| true | true |
f71b2e2da61209cbca5bf4e3278259b97ab94ddd | 135 | py | Python | scripts/migrate_piwik/settings/local-dist.py | DanielSBrown/osf.io | 98dda2ac237377197acacce78274bc0a4ce8f303 | [
"Apache-2.0"
] | null | null | null | scripts/migrate_piwik/settings/local-dist.py | DanielSBrown/osf.io | 98dda2ac237377197acacce78274bc0a4ce8f303 | [
"Apache-2.0"
] | null | null | null | scripts/migrate_piwik/settings/local-dist.py | DanielSBrown/osf.io | 98dda2ac237377197acacce78274bc0a4ce8f303 | [
"Apache-2.0"
] | null | null | null | PIWIK_DB_HOST = 'localhost'
PIWIK_DB_PORT = 3336
PIWIK_DB_USER = 'root'
PIWIK_DB_PASSWORD = 'changeme'
PIWIK_DB_NAME = 'piwik_staging'
| 22.5 | 31 | 0.792593 | PIWIK_DB_HOST = 'localhost'
PIWIK_DB_PORT = 3336
PIWIK_DB_USER = 'root'
PIWIK_DB_PASSWORD = 'changeme'
PIWIK_DB_NAME = 'piwik_staging'
| true | true |
f71b2e305d3d87fbaf19ce272d3f1ec378c3ef49 | 1,017 | py | Python | tenants/monkey/db/migrations/recorder.py | bugkiwi/django-tenants-mysql | bc008bde01f5dbbd0e85bcacfa48db2ee8347e50 | [
"MIT"
] | 2 | 2017-08-01T10:29:00.000Z | 2022-03-05T12:51:43.000Z | tenants/monkey/db/migrations/recorder.py | bugkiwi/django-tenants-mysql | bc008bde01f5dbbd0e85bcacfa48db2ee8347e50 | [
"MIT"
] | 1 | 2018-08-07T13:40:38.000Z | 2018-08-07T13:40:38.000Z | tenants/monkey/db/migrations/recorder.py | bugkiwi/django-tenants-mysql | bc008bde01f5dbbd0e85bcacfa48db2ee8347e50 | [
"MIT"
] | 2 | 2019-11-27T09:34:44.000Z | 2022-03-05T12:59:12.000Z | #!/usr/bin/env python
#coding:utf-8
__author__ = 'gkiwi'
from django.db.utils import DatabaseError
from django.db.migrations.exceptions import MigrationSchemaMissing
__all__ = ['MigrationRecorder']
class MigrationRecorder(object):
def ensure_schema(self):
"""
Ensures the table exists and has the correct schema.
"""
# If the table's there, that's fine - we've never changed its schema
# in the codebase.
# gkiwi #TOPATCH
from django.db import connection
db_table = connection.get_schemaed_db_table(self.Migration._meta.db_table)
# end
if db_table in self.connection.introspection.table_names(self.connection.cursor()):
return
# Make the table
try:
with self.connection.schema_editor() as editor:
editor.create_model(self.Migration)
except DatabaseError as exc:
raise MigrationSchemaMissing("Unable to create the django_migrations table (%s)" % exc)
| 31.78125 | 99 | 0.665683 |
__author__ = 'gkiwi'
from django.db.utils import DatabaseError
from django.db.migrations.exceptions import MigrationSchemaMissing
__all__ = ['MigrationRecorder']
class MigrationRecorder(object):
def ensure_schema(self):
# in the codebase.
# gkiwi #TOPATCH
from django.db import connection
db_table = connection.get_schemaed_db_table(self.Migration._meta.db_table)
# end
if db_table in self.connection.introspection.table_names(self.connection.cursor()):
return
# Make the table
try:
with self.connection.schema_editor() as editor:
editor.create_model(self.Migration)
except DatabaseError as exc:
raise MigrationSchemaMissing("Unable to create the django_migrations table (%s)" % exc)
| true | true |
f71b2e92c0252192242ac618201c720c01142e52 | 46,863 | py | Python | pyAudioAnalysis/audioSegmentation.py | polewczakp/pyAudioAnalysis | 7dc2d8e18da1ca2f2485a402bb7399b43bbb2b24 | [
"Apache-2.0"
] | null | null | null | pyAudioAnalysis/audioSegmentation.py | polewczakp/pyAudioAnalysis | 7dc2d8e18da1ca2f2485a402bb7399b43bbb2b24 | [
"Apache-2.0"
] | null | null | null | pyAudioAnalysis/audioSegmentation.py | polewczakp/pyAudioAnalysis | 7dc2d8e18da1ca2f2485a402bb7399b43bbb2b24 | [
"Apache-2.0"
] | null | null | null | from __future__ import print_function
import os
import csv
import glob
import scipy
import sklearn
import numpy as np
import hmmlearn.hmm
import sklearn.cluster
import pickle as cpickle
import matplotlib.pyplot as plt
from scipy.spatial import distance
import sklearn.discriminant_analysis
from pyAudioAnalysis import audioBasicIO
from pyAudioAnalysis import audioTrainTest as at
from pyAudioAnalysis import MidTermFeatures as mtf
from pyAudioAnalysis import ShortTermFeatures as stf
""" General utility functions """
def smooth_moving_avg(signal, window=11):
window = int(window)
if signal.ndim != 1:
raise ValueError("")
if signal.size < window:
raise ValueError("Input vector needs to be bigger than window size.")
if window < 3:
return signal
s = np.r_[2 * signal[0] - signal[window - 1::-1],
signal, 2 * signal[-1] - signal[-1:-window:-1]]
w = np.ones(window, 'd')
y = np.convolve(w/w.sum(), s, mode='same')
return y[window:-window + 1]
def self_similarity_matrix(feature_vectors):
"""
This function computes the self-similarity matrix for a sequence
of feature vectors.
ARGUMENTS:
- feature_vectors: a np matrix (nDims x nVectors) whose i-th column
corresponds to the i-th feature vector
RETURNS:
- sim_matrix: the self-similarity matrix (nVectors x nVectors)
"""
norm_feature_vectors, mean, std = at.normalize_features([feature_vectors.T])
norm_feature_vectors = norm_feature_vectors[0].T
sim_matrix = 1.0 - distance.squareform(
distance.pdist(norm_feature_vectors.T, 'cosine'))
return sim_matrix
def labels_to_segments(labels, window):
"""
ARGUMENTS:
- labels: a sequence of class labels (per time window)
- window: window duration (in seconds)
RETURNS:
- segments: a sequence of segment's limits: segs[i, 0] is start and
segs[i, 1] are start and end point of segment i
- classes: a sequence of class flags: class[i] is the class ID of
the i-th segment
"""
if len(labels)==1:
segs = [0, window]
classes = labels
return segs, classes
num_segs = 0
index = 0
classes = []
segment_list = []
cur_label = labels[index]
while index < len(labels) - 1:
previous_value = cur_label
while True:
index += 1
compare_flag = labels[index]
if (compare_flag != cur_label) | (index == len(labels) - 1):
num_segs += 1
cur_label = labels[index]
segment_list.append((index * window))
classes.append(previous_value)
break
segments = np.zeros((len(segment_list), 2))
for i in range(len(segment_list)):
if i > 0:
segments[i, 0] = segment_list[i-1]
segments[i, 1] = segment_list[i]
return segments, classes
def segments_to_labels(start_times, end_times, labels, window):
"""
This function converts segment endpoints and respective segment
labels to fix-sized class labels.
ARGUMENTS:
- start_times: segment start points (in seconds)
- end_times: segment endpoints (in seconds)
- labels: segment labels
- window: fix-sized window (in seconds)
RETURNS:
- flags: np array of class indices
- class_names: list of classnames (strings)
"""
flags = []
class_names = list(set(labels))
index = window / 2.0
while index < end_times[-1]:
for i in range(len(start_times)):
if start_times[i] < index <= end_times[i]:
break
flags.append(class_names.index(labels[i]))
index += window
return np.array(flags), class_names
def compute_metrics(confusion_matrix, class_names):
"""
This function computes the precision, recall and f1 measures,
given a confusion matrix
"""
f1 = []
recall = []
precision = []
n_classes = confusion_matrix.shape[0]
if len(class_names) != n_classes:
print("Error in computePreRec! Confusion matrix and class_names "
"list must be of the same size!")
else:
for i, c in enumerate(class_names):
precision.append(confusion_matrix[i, i] /
np.sum(confusion_matrix[:, i]))
recall.append(confusion_matrix[i, i] /
np.sum(confusion_matrix[i, :]))
f1.append(2 * precision[-1] * recall[-1] /
(precision[-1] + recall[-1]))
return recall, precision, f1
def read_segmentation_gt(gt_file):
"""
This function reads a segmentation ground truth file,
following a simple CSV format with the following columns:
<segment start>,<segment end>,<class label>
ARGUMENTS:
- gt_file: the path of the CSV segment file
RETURNS:
- seg_start: a np array of segments' start positions
- seg_end: a np array of segments' ending positions
- seg_label: a list of respective class labels (strings)
"""
with open(gt_file, 'rt') as f_handle:
reader = csv.reader(f_handle, delimiter='\t')
start_times = []
end_times = []
labels = []
for row in reader:
if len(row) == 3:
start_times.append(float(row[0]))
end_times.append(float(row[1]))
labels.append((row[2]))
return np.array(start_times), np.array(end_times), labels
def plot_segmentation_results(flags_ind, flags_ind_gt, class_names, mt_step,
evaluate_only=False):
"""
This function plots statistics on the classification-segmentation results
produced either by the fix-sized supervised method or the HMM method.
It also computes the overall accuracy achieved by the respective method
if ground-truth is available.
"""
flags = [class_names[int(f)] for f in flags_ind]
segments, classes = labels_to_segments(flags, mt_step)
min_len = min(flags_ind.shape[0], flags_ind_gt.shape[0])
if min_len > 0:
accuracy = np.sum(flags_ind[0:min_len] ==
flags_ind_gt[0:min_len]) / float(min_len)
else:
accuracy = -1
if not evaluate_only:
duration = segments[-1, 1]
s_percentages = np.zeros((len(class_names), ))
percentages = np.zeros((len(class_names), ))
av_durations = np.zeros((len(class_names), ))
for i_seg in range(segments.shape[0]):
s_percentages[class_names.index(classes[i_seg])] += \
(segments[i_seg, 1]-segments[i_seg, 0])
for i in range(s_percentages.shape[0]):
percentages[i] = 100.0 * s_percentages[i] / duration
class_sum = sum(1 for c in classes if c == class_names[i])
if class_sum > 0:
av_durations[i] = s_percentages[i] / class_sum
else:
av_durations[i] = 0.0
for i in range(percentages.shape[0]):
print(class_names[i], percentages[i], av_durations[i])
font = {'size': 10}
plt.rc('font', **font)
fig = plt.figure()
ax1 = fig.add_subplot(211)
ax1.set_yticks(np.array(range(len(class_names))))
ax1.axis((0, duration, -1, len(class_names)))
ax1.set_yticklabels(class_names)
ax1.plot(np.array(range(len(flags_ind))) * mt_step +
mt_step / 2.0, flags_ind)
if flags_ind_gt.shape[0] > 0:
ax1.plot(np.array(range(len(flags_ind_gt))) * mt_step +
mt_step / 2.0, flags_ind_gt + 0.05, '--r')
plt.xlabel("time (seconds)")
if accuracy >= 0:
plt.title('Accuracy = {0:.1f}%'.format(100.0 * accuracy))
ax2 = fig.add_subplot(223)
plt.title("Classes percentage durations")
ax2.axis((0, len(class_names) + 1, 0, 100))
ax2.set_xticks(np.array(range(len(class_names) + 1)))
ax2.set_xticklabels([" "] + class_names)
print(np.array(range(len(class_names))), percentages)
ax2.bar(np.array(range(len(class_names))) + 0.5, percentages)
ax3 = fig.add_subplot(224)
plt.title("Segment average duration per class")
ax3.axis((0, len(class_names)+1, 0, av_durations.max()))
ax3.set_xticks(np.array(range(len(class_names) + 1)))
ax3.set_xticklabels([" "] + class_names)
ax3.bar(np.array(range(len(class_names))) + 0.5, av_durations)
fig.tight_layout()
plt.show()
return accuracy
def evaluate_speaker_diarization(labels, labels_gt):
min_len = min(labels.shape[0], labels_gt.shape[0])
labels = labels[0:min_len]
labels_gt = labels_gt[0:min_len]
unique_flags = np.unique(labels)
unique_flags_gt = np.unique(labels_gt)
# compute contigency table:
contigency_matrix = np.zeros((unique_flags.shape[0],
unique_flags_gt.shape[0]))
for i in range(min_len):
contigency_matrix[int(np.nonzero(unique_flags == labels[i])[0]),
int(np.nonzero(unique_flags_gt == labels_gt[i])[0])] += 1.0
columns, rows = contigency_matrix.shape
row_sum = np.sum(contigency_matrix, axis=0)
column_sum = np.sum(contigency_matrix, axis=1)
matrix_sum = np.sum(contigency_matrix)
purity_clust = np.zeros((columns, ))
purity_speak = np.zeros((rows, ))
# compute cluster purity:
for i in range(columns):
purity_clust[i] = np.max((contigency_matrix[i, :])) / (column_sum[i])
for j in range(rows):
purity_speak[j] = np.max((contigency_matrix[:, j])) / (row_sum[j])
purity_cluster_m = np.sum(purity_clust * column_sum) / matrix_sum
purity_speaker_m = np.sum(purity_speak * row_sum) / matrix_sum
return purity_cluster_m, purity_speaker_m
def train_hmm_compute_statistics(features, labels):
"""
This function computes the statistics used to train
an HMM joint segmentation-classification model
using a sequence of sequential features and respective labels
ARGUMENTS:
- features: a np matrix of feature vectors (numOfDimensions x n_wins)
- labels: a np array of class indices (n_wins x 1)
RETURNS:
- class_priors: matrix of prior class probabilities
(n_classes x 1)
- transmutation_matrix: transition matrix (n_classes x n_classes)
- means: means matrix (numOfDimensions x 1)
- cov: deviation matrix (numOfDimensions x 1)
"""
unique_labels = np.unique(labels)
n_comps = len(unique_labels)
n_feats = features.shape[0]
if features.shape[1] < labels.shape[0]:
print("trainHMM warning: number of short-term feature vectors "
"must be greater or equal to the labels length!")
labels = labels[0:features.shape[1]]
# compute prior probabilities:
class_priors = np.zeros((n_comps,))
for i, u_label in enumerate(unique_labels):
class_priors[i] = np.count_nonzero(labels == u_label)
# normalize prior probabilities
class_priors = class_priors / class_priors.sum()
# compute transition matrix:
transmutation_matrix = np.zeros((n_comps, n_comps))
for i in range(labels.shape[0]-1):
transmutation_matrix[int(labels[i]), int(labels[i + 1])] += 1
# normalize rows of transition matrix:
for i in range(n_comps):
transmutation_matrix[i, :] /= transmutation_matrix[i, :].sum()
means = np.zeros((n_comps, n_feats))
for i in range(n_comps):
means[i, :] = \
np.array(features[:,
np.nonzero(labels == unique_labels[i])[0]].mean(axis=1))
cov = np.zeros((n_comps, n_feats))
for i in range(n_comps):
"""
cov[i, :, :] = np.cov(features[:, np.nonzero(labels == u_labels[i])[0]])
"""
# use line above if HMM using full gaussian distributions are to be used
cov[i, :] = np.std(features[:,
np.nonzero(labels == unique_labels[i])[0]],
axis=1)
return class_priors, transmutation_matrix, means, cov
def train_hmm_from_file(wav_file, gt_file, hmm_model_name, mid_window, mid_step):
"""
This function trains a HMM model for segmentation-classification
using a single annotated audio file
ARGUMENTS:
- wav_file: the path of the audio filename
- gt_file: the path of the ground truth filename
(a csv file of the form <segment start in seconds>,
<segment end in seconds>,<segment label> in each row
- hmm_model_name: the name of the HMM model to be stored
- mt_win: mid-term window size
- mt_step: mid-term window step
RETURNS:
- hmm: an object to the resulting HMM
- class_names: a list of class_names
After training, hmm, class_names, along with the mt_win and mt_step
values are stored in the hmm_model_name file
"""
seg_start, seg_end, seg_labs = read_segmentation_gt(gt_file)
flags, class_names = segments_to_labels(seg_start, seg_end, seg_labs, mid_step)
sampling_rate, signal = audioBasicIO.read_audio_file(wav_file)
features, _, _ = \
mtf.mid_feature_extraction(signal, sampling_rate,
mid_window * sampling_rate,
mid_step * sampling_rate,
round(sampling_rate * 0.050),
round(sampling_rate * 0.050))
class_priors, transumation_matrix, means, cov = \
train_hmm_compute_statistics(features, flags)
hmm = hmmlearn.hmm.GaussianHMM(class_priors.shape[0], "diag")
hmm.covars_ = cov
hmm.means_ = means
hmm.startprob_ = class_priors
hmm.transmat_ = transumation_matrix
save_hmm(hmm_model_name, hmm, class_names, mid_window, mid_step)
return hmm, class_names
def train_hmm_from_directory(folder_path, hmm_model_name, mid_window, mid_step):
"""
This function trains a HMM model for segmentation-classification using
a where WAV files and .segment (ground-truth files) are stored
ARGUMENTS:
- folder_path: the path of the data diretory
- hmm_model_name: the name of the HMM model to be stored
- mt_win: mid-term window size
- mt_step: mid-term window step
RETURNS:
- hmm: an object to the resulting HMM
- class_names: a list of class_names
After training, hmm, class_names, along with the mt_win
and mt_step values are stored in the hmm_model_name file
"""
flags_all = np.array([])
class_names_all = []
for i, f in enumerate(glob.glob(folder_path + os.sep + '*.wav')):
# for each WAV file
wav_file = f
gt_file = f.replace('.wav', '.segments')
if os.path.isfile(gt_file):
seg_start, seg_end, seg_labs = read_segmentation_gt(gt_file)
flags, class_names = \
segments_to_labels(seg_start, seg_end, seg_labs, mid_step)
for c in class_names:
# update class names:
if c not in class_names_all:
class_names_all.append(c)
sampling_rate, signal = audioBasicIO.read_audio_file(wav_file)
feature_vector, _, _ = \
mtf.mid_feature_extraction(signal, sampling_rate,
mid_window * sampling_rate,
mid_step * sampling_rate,
round(sampling_rate * 0.050),
round(sampling_rate * 0.050))
flag_len = len(flags)
feat_cols = feature_vector.shape[1]
min_sm = min(feat_cols, flag_len)
feature_vector = feature_vector[:, 0:min_sm]
flags = flags[0:min_sm]
flags_new = []
# append features and labels
for j, fl in enumerate(flags):
flags_new.append(class_names_all.index(class_names_all[flags[j]]))
flags_all = np.append(flags_all, np.array(flags_new))
if i == 0:
f_all = feature_vector
else:
f_all = np.concatenate((f_all, feature_vector), axis=1)
# compute HMM statistics
class_priors, transmutation_matrix, means, cov = \
train_hmm_compute_statistics(f_all, flags_all)
# train the HMM
hmm = hmmlearn.hmm.GaussianHMM(class_priors.shape[0], "diag")
hmm.covars_ = cov
hmm.means_ = means
hmm.startprob_ = class_priors
hmm.transmat_ = transmutation_matrix
save_hmm(hmm_model_name, hmm, class_names_all, mid_window, mid_step)
return hmm, class_names_all
def save_hmm(hmm_model_name, model, classes, mid_window, mid_step):
"""Save HMM model"""
with open(hmm_model_name, "wb") as f_handle:
cpickle.dump(model, f_handle, protocol=cpickle.HIGHEST_PROTOCOL)
cpickle.dump(classes, f_handle, protocol=cpickle.HIGHEST_PROTOCOL)
cpickle.dump(mid_window, f_handle, protocol=cpickle.HIGHEST_PROTOCOL)
cpickle.dump(mid_step, f_handle, protocol=cpickle.HIGHEST_PROTOCOL)
def hmm_segmentation(audio_file, hmm_model_name, plot_results=False,
gt_file=""):
sampling_rate, signal = audioBasicIO.read_audio_file(audio_file)
with open(hmm_model_name, "rb") as f_handle:
hmm = cpickle.load(f_handle)
class_names = cpickle.load(f_handle)
mid_window = cpickle.load(f_handle)
mid_step = cpickle.load(f_handle)
features, _, _ = \
mtf.mid_feature_extraction(signal, sampling_rate,
mid_window * sampling_rate,
mid_step * sampling_rate,
round(sampling_rate * 0.050),
round(sampling_rate * 0.050))
# apply model
labels = hmm.predict(features.T)
labels_gt, class_names_gt, accuracy, cm = \
load_ground_truth(gt_file, labels, class_names, mid_step, plot_results)
return labels, class_names, accuracy, cm
def load_ground_truth_segments(gt_file, mt_step):
seg_start, seg_end, seg_labels = read_segmentation_gt(gt_file)
labels, class_names = segments_to_labels(seg_start, seg_end, seg_labels,
mt_step)
labels_temp = []
for index, label in enumerate(labels):
# "align" labels with GT
if class_names[labels[index]] in class_names:
labels_temp.append(class_names.index(class_names[
labels[index]]))
else:
labels_temp.append(-1)
labels = np.array(labels_temp)
return labels, class_names
def calculate_confusion_matrix(predictions, ground_truth, classes):
cm = np.zeros((len(classes), len(classes)))
for index in range(min(predictions.shape[0], ground_truth.shape[0])):
cm[int(ground_truth[index]), int(predictions[index])] += 1
return cm
def mid_term_file_classification(input_file, model_name, model_type,
plot_results=False, gt_file=""):
"""
This function performs mid-term classification of an audio stream.
Towards this end, supervised knowledge is used,
i.e. a pre-trained classifier.
ARGUMENTS:
- input_file: path of the input WAV file
- model_name: name of the classification model
- model_type: svm or knn depending on the classifier type
- plot_results: True if results are to be plotted using
matplotlib along with a set of statistics
RETURNS:
- segs: a sequence of segment's endpoints: segs[i] is the
endpoint of the i-th segment (in seconds)
- classes: a sequence of class flags: class[i] is the
class ID of the i-th segment
"""
labels = []
accuracy = 0.0
class_names = []
cm = np.array([])
if not os.path.isfile(model_name):
print("mtFileClassificationError: input model_type not found!")
return labels, class_names, accuracy, cm
# Load classifier:
if model_type == "knn":
classifier, mean, std, class_names, mt_win, mid_step, st_win, \
st_step, compute_beat = at.load_model_knn(model_name)
else:
classifier, mean, std, class_names, mt_win, mid_step, st_win, \
st_step, compute_beat = at.load_model(model_name)
if compute_beat:
print("Model " + model_name + " contains long-term music features "
"(beat etc) and cannot be used in "
"segmentation")
return labels, class_names, accuracy, cm
# load input file
sampling_rate, signal = audioBasicIO.read_audio_file(input_file)
# could not read file
if sampling_rate == 0:
return labels, class_names, accuracy, cm
# convert stereo (if) to mono
signal = audioBasicIO.stereo_to_mono(signal)
# mid-term feature extraction:
mt_feats, _, _ = \
mtf.mid_feature_extraction(signal, sampling_rate,
mt_win * sampling_rate,
mid_step * sampling_rate,
round(sampling_rate * st_win),
round(sampling_rate * st_step))
posterior_matrix = []
# for each feature vector (i.e. for each fix-sized segment):
for col_index in range(mt_feats.shape[1]):
# normalize current feature v
feature_vector = (mt_feats[:, col_index] - mean) / std
# classify vector:
label_predicted, posterior = \
at.classifier_wrapper(classifier, model_type, feature_vector)
labels.append(label_predicted)
# update probability matrix
posterior_matrix.append(np.max(posterior))
labels = np.array(labels)
# convert fix-sized flags to segments and classes
segs, classes = labels_to_segments(labels, mid_step)
segs[-1] = len(signal) / float(sampling_rate)
# Load grount-truth:
labels_gt, class_names_gt, accuracy, cm = \
load_ground_truth(gt_file, labels, class_names, mid_step, plot_results)
return labels, class_names, accuracy, cm
def load_ground_truth(gt_file, labels, class_names, mid_step, plot_results):
accuracy = 0
cm = np.array([])
labels_gt = np.array([])
if os.path.isfile(gt_file):
# load ground truth and class names
labels_gt, class_names_gt = load_ground_truth_segments(gt_file,
mid_step)
# map predicted labels to ground truth class names
# Note: if a predicted label does not belong to the ground truth
# classes --> -1
labels_new = []
for il, l in enumerate(labels):
if class_names[int(l)] in class_names_gt:
labels_new.append(class_names_gt.index(class_names[int(l)]))
else:
labels_new.append(-1)
labels_new = np.array(labels_new)
cm = calculate_confusion_matrix(labels_new, labels_gt, class_names_gt)
accuracy = plot_segmentation_results(labels_new, labels_gt,
class_names, mid_step, not plot_results)
if accuracy >= 0:
print("Overall Accuracy: {0:.2f}".format(accuracy))
return labels_gt, class_names, accuracy, cm
def evaluate_segmentation_classification_dir(dir_name, model_name, method_name):
accuracies = []
class_names = []
cm_total = np.array([])
for index, wav_file in enumerate(glob.glob(dir_name + os.sep + '*.wav')):
print(wav_file)
gt_file = wav_file.replace('.wav', '.segments')
if method_name.lower() in ["svm", "svm_rbf", "knn", "randomforest",
"gradientboosting", "extratrees"]:
flags_ind, class_names, accuracy, cm_temp = \
mid_term_file_classification(wav_file, model_name, method_name,
False, gt_file)
else:
flags_ind, class_names, accuracy, cm_temp = \
hmm_segmentation(wav_file, model_name, False, gt_file)
if accuracy > 0:
if not index:
cm_total = np.copy(cm_temp)
else:
cm_total = cm_total + cm_temp
accuracies.append(accuracy)
print(cm_temp, class_names)
print(cm_total)
if len(cm_total.shape) > 1:
cm_total = cm_total / np.sum(cm_total)
rec, pre, f1 = compute_metrics(cm_total, class_names)
print(" - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ")
print("Average Accuracy: {0:.1f}".
format(100.0*np.array(accuracies).mean()))
print("Average recall: {0:.1f}".format(100.0*np.array(rec).mean()))
print("Average precision: {0:.1f}".format(100.0*np.array(pre).mean()))
print("Average f1: {0:.1f}".format(100.0*np.array(f1).mean()))
print("Median Accuracy: {0:.1f}".
format(100.0*np.median(np.array(accuracies))))
print("Min Accuracy: {0:.1f}".format(100.0*np.array(accuracies).min()))
print("Max Accuracy: {0:.1f}".format(100.0*np.array(accuracies).max()))
else:
print("Confusion matrix was empty, accuracy for every file was 0")
def silence_removal(signal, sampling_rate, st_win, st_step, smooth_window=0.5,
weight=0.5, plot=False):
"""
Event Detection (silence removal)
ARGUMENTS:
- signal: the input audio signal
- sampling_rate: sampling freq
- st_win, st_step: window size and step in seconds
- smoothWindow: (optinal) smooth window (in seconds)
- weight: (optinal) weight factor (0 < weight < 1)
the higher, the more strict
- plot: (optinal) True if results are to be plotted
RETURNS:
- seg_limits: list of segment limits in seconds (e.g [[0.1, 0.9],
[1.4, 3.0]] means that
the resulting segments are (0.1 - 0.9) seconds
and (1.4, 3.0) seconds
"""
if weight >= 1:
weight = 0.99
if weight <= 0:
weight = 0.01
# Step 1: feature extraction
signal = audioBasicIO.stereo_to_mono(signal)
st_feats, _ = stf.feature_extraction(signal, sampling_rate,
st_win * sampling_rate,
st_step * sampling_rate)
# Step 2: train binary svm classifier of low vs high energy frames
# keep only the energy short-term sequence (2nd feature)
st_energy = st_feats[1, :]
en = np.sort(st_energy)
# number of 10% of the total short-term windows
st_windows_fraction = int(len(en) / 10)
# compute "lower" 10% energy threshold
low_threshold = np.mean(en[0:st_windows_fraction]) + 1e-15
# compute "higher" 10% energy threshold
high_threshold = np.mean(en[-st_windows_fraction:-1]) + 1e-15
# get all features that correspond to low energy
low_energy = st_feats[:, np.where(st_energy <= low_threshold)[0]]
# get all features that correspond to high energy
high_energy = st_feats[:, np.where(st_energy >= high_threshold)[0]]
# form the binary classification task and ...
features = [low_energy.T, high_energy.T]
# normalize and train the respective svm probabilistic model
# (ONSET vs SILENCE)
features_norm, mean, std = at.normalize_features(features)
svm = at.train_svm(features_norm, 1.0)
# Step 3: compute onset probability based on the trained svm
prob_on_set = []
for index in range(st_feats.shape[1]):
# for each frame
cur_fv = (st_feats[:, index] - mean) / std
# get svm probability (that it belongs to the ONSET class)
prob_on_set.append(svm.predict_proba(cur_fv.reshape(1, -1))[0][1])
prob_on_set = np.array(prob_on_set)
# smooth probability:
prob_on_set = smooth_moving_avg(prob_on_set, smooth_window / st_step)
# Step 4A: detect onset frame indices:
prog_on_set_sort = np.sort(prob_on_set)
# find probability Threshold as a weighted average
# of top 10% and lower 10% of the values
nt = int(prog_on_set_sort.shape[0] / 10)
threshold = (np.mean((1 - weight) * prog_on_set_sort[0:nt]) +
weight * np.mean(prog_on_set_sort[-nt::]))
max_indices = np.where(prob_on_set > threshold)[0]
# get the indices of the frames that satisfy the thresholding
index = 0
seg_limits = []
time_clusters = []
# Step 4B: group frame indices to onset segments
while index < len(max_indices):
# for each of the detected onset indices
cur_cluster = [max_indices[index]]
if index == len(max_indices)-1:
break
while max_indices[index+1] - cur_cluster[-1] <= 2:
cur_cluster.append(max_indices[index+1])
index += 1
if index == len(max_indices)-1:
break
index += 1
time_clusters.append(cur_cluster)
seg_limits.append([cur_cluster[0] * st_step,
cur_cluster[-1] * st_step])
# Step 5: Post process: remove very small segments:
min_duration = 0.2
seg_limits_2 = []
for s_lim in seg_limits:
if s_lim[1] - s_lim[0] > min_duration:
seg_limits_2.append(s_lim)
seg_limits = seg_limits_2
if plot:
time_x = np.arange(0, signal.shape[0] / float(sampling_rate), 1.0 /
sampling_rate)
plt.subplot(2, 1, 1)
plt.plot(time_x, signal)
for s_lim in seg_limits:
plt.axvline(x=s_lim[0], color='red')
plt.axvline(x=s_lim[1], color='red')
plt.subplot(2, 1, 2)
plt.plot(np.arange(0, prob_on_set.shape[0] * st_step, st_step),
prob_on_set)
plt.title('Signal')
for s_lim in seg_limits:
plt.axvline(x=s_lim[0], color='red')
plt.axvline(x=s_lim[1], color='red')
plt.title('svm Probability')
plt.show()
return seg_limits
def speaker_diarization(filename, n_speakers, mid_window=2.0, mid_step=0.2,
short_window=0.05, lda_dim=35, plot_res=False):
"""
ARGUMENTS:
- filename: the name of the WAV file to be analyzed
- n_speakers the number of speakers (clusters) in
the recording (<=0 for unknown)
- mid_window (opt) mid-term window size
- mid_step (opt) mid-term window step
- short_window (opt) short-term window size
- lda_dim (opt LDA dimension (0 for no LDA)
- plot_res (opt) 0 for not plotting the results 1 for plotting
"""
sampling_rate, signal = audioBasicIO.read_audio_file(filename)
signal = audioBasicIO.stereo_to_mono(signal)
duration = len(signal) / sampling_rate
base_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)),
"data/models")
classifier_all, mean_all, std_all, class_names_all, _, _, _, _, _ = \
at.load_model_knn(os.path.join(base_dir, "knn_speaker_10"))
classifier_fm, mean_fm, std_fm, class_names_fm, _, _, _, _, _ = \
at.load_model_knn(os.path.join(base_dir, "knn_speaker_male_female"))
mid_feats, st_feats, _ = \
mtf.mid_feature_extraction(signal, sampling_rate,
mid_window * sampling_rate,
mid_step * sampling_rate,
round(sampling_rate * short_window),
round(sampling_rate * short_window * 0.5))
mid_term_features = np.zeros((mid_feats.shape[0] + len(class_names_all) +
len(class_names_fm), mid_feats.shape[1]))
for index in range(mid_feats.shape[1]):
feature_norm_all = (mid_feats[:, index] - mean_all) / std_all
feature_norm_fm = (mid_feats[:, index] - mean_fm) / std_fm
_, p1 = at.classifier_wrapper(classifier_all, "knn", feature_norm_all)
_, p2 = at.classifier_wrapper(classifier_fm, "knn", feature_norm_fm)
start = mid_feats.shape[0]
end = mid_feats.shape[0] + len(class_names_all)
mid_term_features[0:mid_feats.shape[0], index] = mid_feats[:, index]
mid_term_features[start:end, index] = p1 + 1e-4
mid_term_features[end::, index] = p2 + 1e-4
mid_feats = mid_term_features # TODO
feature_selected = [8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 41,
42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53]
mid_feats = mid_feats[feature_selected, :]
mid_feats_norm, mean, std = at.normalize_features([mid_feats.T])
mid_feats_norm = mid_feats_norm[0].T
n_wins = mid_feats.shape[1]
# remove outliers:
dist_all = np.sum(distance.squareform(distance.pdist(mid_feats_norm.T)),
axis=0)
m_dist_all = np.mean(dist_all)
i_non_outliers = np.nonzero(dist_all < 1.2 * m_dist_all)[0]
# TODO: Combine energy threshold for outlier removal:
# EnergyMin = np.min(mt_feats[1,:])
# EnergyMean = np.mean(mt_feats[1,:])
# Thres = (1.5*EnergyMin + 0.5*EnergyMean) / 2.0
# i_non_outliers = np.nonzero(mt_feats[1,:] > Thres)[0]
# print i_non_outliers
mt_feats_norm_or = mid_feats_norm
mid_feats_norm = mid_feats_norm[:, i_non_outliers]
# LDA dimensionality reduction:
if lda_dim > 0:
# extract mid-term features with minimum step:
window_ratio = int(round(mid_window / short_window))
step_ratio = int(round(short_window / short_window))
mt_feats_to_red = []
num_of_features = len(st_feats)
num_of_stats = 2
for index in range(num_of_stats * num_of_features):
mt_feats_to_red.append([])
# for each of the short-term features:
for index in range(num_of_features):
cur_pos = 0
feat_len = len(st_feats[index])
while cur_pos < feat_len:
n1 = cur_pos
n2 = cur_pos + window_ratio
if n2 > feat_len:
n2 = feat_len
short_features = st_feats[index][n1:n2]
mt_feats_to_red[index].append(np.mean(short_features))
mt_feats_to_red[index + num_of_features].\
append(np.std(short_features))
cur_pos += step_ratio
mt_feats_to_red = np.array(mt_feats_to_red)
mt_feats_to_red_2 = np.zeros((mt_feats_to_red.shape[0] +
len(class_names_all) +
len(class_names_fm),
mt_feats_to_red.shape[1]))
limit = mt_feats_to_red.shape[0] + len(class_names_all)
for index in range(mt_feats_to_red.shape[1]):
feature_norm_all = (mt_feats_to_red[:, index] - mean_all) / std_all
feature_norm_fm = (mt_feats_to_red[:, index] - mean_fm) / std_fm
_, p1 = at.classifier_wrapper(classifier_all, "knn",
feature_norm_all)
_, p2 = at.classifier_wrapper(classifier_fm, "knn", feature_norm_fm)
mt_feats_to_red_2[0:mt_feats_to_red.shape[0], index] = \
mt_feats_to_red[:, index]
mt_feats_to_red_2[mt_feats_to_red.shape[0]:limit, index] = p1 + 1e-4
mt_feats_to_red_2[limit::, index] = p2 + 1e-4
mt_feats_to_red = mt_feats_to_red_2
mt_feats_to_red = mt_feats_to_red[feature_selected, :]
mt_feats_to_red, mean, std = at.normalize_features([mt_feats_to_red.T])
mt_feats_to_red = mt_feats_to_red[0].T
labels = np.zeros((mt_feats_to_red.shape[1], ))
lda_step = 1.0
lda_step_ratio = lda_step / short_window
for index in range(labels.shape[0]):
labels[index] = int(index * short_window / lda_step_ratio)
clf = sklearn.discriminant_analysis.\
LinearDiscriminantAnalysis(n_components=lda_dim)
clf.fit(mt_feats_to_red.T, labels)
mid_feats_norm = (clf.transform(mid_feats_norm.T)).T
if n_speakers <= 0:
s_range = range(2, 10)
else:
s_range = [n_speakers]
cluster_labels = []
sil_all = []
cluster_centers = []
for speakers in s_range:
k_means = sklearn.cluster.KMeans(n_clusters=speakers)
k_means.fit(mid_feats_norm.T)
cls = k_means.labels_
means = k_means.cluster_centers_
cluster_labels.append(cls)
cluster_centers.append(means)
sil_1, sil_2 = [], []
for c in range(speakers):
# for each speaker (i.e. for each extracted cluster)
clust_per_cent = np.nonzero(cls == c)[0].shape[0] / float(len(cls))
if clust_per_cent < 0.020:
sil_1.append(0.0)
sil_2.append(0.0)
else:
# get subset of feature vectors
mt_feats_norm_temp = mid_feats_norm[:, cls == c]
# compute average distance between samples
# that belong to the cluster (a values)
dist = distance.pdist(mt_feats_norm_temp.T)
sil_1.append(np.mean(dist)*clust_per_cent)
sil_temp = []
for c2 in range(speakers):
# compute distances from samples of other clusters
if c2 != c:
clust_per_cent_2 = np.nonzero(cls == c2)[0].shape[0] /\
float(len(cls))
mid_features_temp = mid_feats_norm[:, cls == c2]
dist = distance.cdist(mt_feats_norm_temp.T,
mid_features_temp.T)
sil_temp.append(np.mean(dist)*(clust_per_cent
+ clust_per_cent_2)/2.0)
sil_temp = np.array(sil_temp)
# ... and keep the minimum value (i.e.
# the distance from the "nearest" cluster)
sil_2.append(min(sil_temp))
sil_1 = np.array(sil_1)
sil_2 = np.array(sil_2)
sil = []
for c in range(speakers):
# for each cluster (speaker) compute silhouette
sil.append((sil_2[c] - sil_1[c]) / (max(sil_2[c], sil_1[c]) + 1e-5))
# keep the AVERAGE SILLOUETTE
sil_all.append(np.mean(sil))
imax = int(np.argmax(sil_all))
# optimal number of clusters
num_speakers = s_range[imax]
# generate the final set of cluster labels
# (important: need to retrieve the outlier windows:
# this is achieved by giving them the value of their
# nearest non-outlier window)
cls = np.zeros((n_wins,))
for index in range(n_wins):
j = np.argmin(np.abs(index-i_non_outliers))
cls[index] = cluster_labels[imax][j]
# Post-process method 1: hmm smoothing
for index in range(1):
# hmm training
start_prob, transmat, means, cov = \
train_hmm_compute_statistics(mt_feats_norm_or, cls)
hmm = hmmlearn.hmm.GaussianHMM(start_prob.shape[0], "diag")
hmm.startprob_ = start_prob
hmm.transmat_ = transmat
hmm.means_ = means
hmm.covars_ = cov
cls = hmm.predict(mt_feats_norm_or.T)
# Post-process method 2: median filtering:
cls = scipy.signal.medfilt(cls, 13)
cls = scipy.signal.medfilt(cls, 11)
class_names = ["speaker{0:d}".format(c) for c in range(num_speakers)]
# load ground-truth if available
gt_file = filename.replace('.wav', '.segments')
# if groundtruth exists
if os.path.isfile(gt_file):
seg_start, seg_end, seg_labs = read_segmentation_gt(gt_file)
flags_gt, class_names_gt = segments_to_labels(seg_start, seg_end,
seg_labs, mid_step)
if plot_res:
fig = plt.figure()
if n_speakers > 0:
ax1 = fig.add_subplot(111)
else:
ax1 = fig.add_subplot(211)
ax1.set_yticks(np.array(range(len(class_names))))
ax1.axis((0, duration, -1, len(class_names)))
ax1.set_yticklabels(class_names)
ax1.plot(np.array(range(len(cls))) * mid_step + mid_step / 2.0, cls)
if os.path.isfile(gt_file):
if plot_res:
ax1.plot(np.array(range(len(flags_gt))) *
mid_step + mid_step / 2.0, flags_gt, 'r')
purity_cluster_m, purity_speaker_m = \
evaluate_speaker_diarization(cls, flags_gt)
print("{0:.1f}\t{1:.1f}".format(100 * purity_cluster_m,
100 * purity_speaker_m))
if plot_res:
plt.title("Cluster purity: {0:.1f}% - "
"Speaker purity: {1:.1f}%".format(100 * purity_cluster_m,
100 * purity_speaker_m))
if plot_res:
plt.xlabel("time (seconds)")
if n_speakers <= 0:
plt.subplot(212)
plt.plot(s_range, sil_all)
plt.xlabel("number of clusters")
plt.ylabel("average clustering's sillouette")
plt.show()
return cls
def speaker_diarization_evaluation(folder_name, lda_dimensions):
"""
This function prints the cluster purity and speaker purity for
each WAV file stored in a provided directory (.SEGMENT files
are needed as ground-truth)
ARGUMENTS:
- folder_name: the full path of the folder where the WAV and
segment (ground-truth) files are stored
- lda_dimensions: a list of LDA dimensions (0 for no LDA)
"""
types = ('*.wav', )
wav_files = []
for files in types:
wav_files.extend(glob.glob(os.path.join(folder_name, files)))
wav_files = sorted(wav_files)
# get number of unique speakers per file (from ground-truth)
num_speakers = []
for wav_file in wav_files:
gt_file = wav_file.replace('.wav', '.segments')
if os.path.isfile(gt_file):
_, _, seg_labs = read_segmentation_gt(gt_file)
num_speakers.append(len(list(set(seg_labs))))
else:
num_speakers.append(-1)
for dim in lda_dimensions:
print("LDA = {0:d}".format(dim))
for i, wav_file in enumerate(wav_files):
speaker_diarization(wav_file, num_speakers[i], 2.0, 0.2, 0.05, dim,
plot_res=False)
def music_thumbnailing(signal, sampling_rate, short_window=1.0, short_step=0.5,
thumb_size=10.0, limit_1=0, limit_2=1):
"""
This function detects instances of the most representative part of a
music recording, also called "music thumbnails".
A technique similar to the one proposed in [1], however a wider set of
audio features is used instead of chroma features.
In particular the following steps are followed:
- Extract short-term audio features. Typical short-term window size: 1
second
- Compute the self-similarity matrix, i.e. all pairwise similarities
between feature vectors
- Apply a diagonal mask is as a moving average filter on the values of the
self-similarty matrix.
The size of the mask is equal to the desirable thumbnail length.
- Find the position of the maximum value of the new (filtered)
self-similarity matrix. The audio segments that correspond to the
diagonial around that position are the selected thumbnails
ARGUMENTS:
- signal: input signal
- sampling_rate: sampling frequency
- short_window: window size (in seconds)
- short_step: window step (in seconds)
- thumb_size: desider thumbnail size (in seconds)
RETURNS:
- A1: beginning of 1st thumbnail (in seconds)
- A2: ending of 1st thumbnail (in seconds)
- B1: beginning of 2nd thumbnail (in seconds)
- B2: ending of 2nd thumbnail (in seconds)
USAGE EXAMPLE:
import audioFeatureExtraction as aF
[fs, x] = basicIO.readAudioFile(input_file)
[A1, A2, B1, B2] = musicThumbnailing(x, fs)
[1] Bartsch, M. A., & Wakefield, G. H. (2005). Audio thumbnailing
of popular music using chroma-based representations.
Multimedia, IEEE Transactions on, 7(1), 96-104.
"""
signal = audioBasicIO.stereo_to_mono(signal)
# feature extraction:
st_feats, _ = stf.feature_extraction(signal, sampling_rate,
sampling_rate * short_window,
sampling_rate * short_step)
# self-similarity matrix
sim_matrix = self_similarity_matrix(st_feats)
# moving filter:
m_filter = int(round(thumb_size / short_step))
diagonal = np.eye(m_filter, m_filter)
sim_matrix = scipy.signal.convolve2d(sim_matrix, diagonal, 'valid')
# post-processing (remove main diagonal elements)
min_sm = np.min(sim_matrix)
for i in range(sim_matrix.shape[0]):
for j in range(sim_matrix.shape[1]):
if abs(i-j) < 5.0 / short_step or i > j:
sim_matrix[i, j] = min_sm
# find max position:
sim_matrix[0:int(limit_1 * sim_matrix.shape[0]), :] = min_sm
sim_matrix[:, 0:int(limit_1 * sim_matrix.shape[0])] = min_sm
sim_matrix[int(limit_2 * sim_matrix.shape[0])::, :] = min_sm
sim_matrix[:, int(limit_2 * sim_matrix.shape[0])::] = min_sm
rows, cols = np.unravel_index(sim_matrix.argmax(), sim_matrix.shape)
i1 = rows
i2 = rows
j1 = cols
j2 = cols
while i2-i1 < m_filter:
if i1 <= 0 or j1 <= 0 or i2 >= sim_matrix.shape[0]-2 or \
j2 >= sim_matrix.shape[1]-2:
break
if sim_matrix[i1-1, j1-1] > sim_matrix[i2 + 1, j2 + 1]:
i1 -= 1
j1 -= 1
else:
i2 += 1
j2 += 1
return short_step * i1, short_step * i2, short_step * j1, short_step * j2, \
sim_matrix
| 39.714407 | 83 | 0.598959 | from __future__ import print_function
import os
import csv
import glob
import scipy
import sklearn
import numpy as np
import hmmlearn.hmm
import sklearn.cluster
import pickle as cpickle
import matplotlib.pyplot as plt
from scipy.spatial import distance
import sklearn.discriminant_analysis
from pyAudioAnalysis import audioBasicIO
from pyAudioAnalysis import audioTrainTest as at
from pyAudioAnalysis import MidTermFeatures as mtf
from pyAudioAnalysis import ShortTermFeatures as stf
def smooth_moving_avg(signal, window=11):
window = int(window)
if signal.ndim != 1:
raise ValueError("")
if signal.size < window:
raise ValueError("Input vector needs to be bigger than window size.")
if window < 3:
return signal
s = np.r_[2 * signal[0] - signal[window - 1::-1],
signal, 2 * signal[-1] - signal[-1:-window:-1]]
w = np.ones(window, 'd')
y = np.convolve(w/w.sum(), s, mode='same')
return y[window:-window + 1]
def self_similarity_matrix(feature_vectors):
norm_feature_vectors, mean, std = at.normalize_features([feature_vectors.T])
norm_feature_vectors = norm_feature_vectors[0].T
sim_matrix = 1.0 - distance.squareform(
distance.pdist(norm_feature_vectors.T, 'cosine'))
return sim_matrix
def labels_to_segments(labels, window):
if len(labels)==1:
segs = [0, window]
classes = labels
return segs, classes
num_segs = 0
index = 0
classes = []
segment_list = []
cur_label = labels[index]
while index < len(labels) - 1:
previous_value = cur_label
while True:
index += 1
compare_flag = labels[index]
if (compare_flag != cur_label) | (index == len(labels) - 1):
num_segs += 1
cur_label = labels[index]
segment_list.append((index * window))
classes.append(previous_value)
break
segments = np.zeros((len(segment_list), 2))
for i in range(len(segment_list)):
if i > 0:
segments[i, 0] = segment_list[i-1]
segments[i, 1] = segment_list[i]
return segments, classes
def segments_to_labels(start_times, end_times, labels, window):
flags = []
class_names = list(set(labels))
index = window / 2.0
while index < end_times[-1]:
for i in range(len(start_times)):
if start_times[i] < index <= end_times[i]:
break
flags.append(class_names.index(labels[i]))
index += window
return np.array(flags), class_names
def compute_metrics(confusion_matrix, class_names):
f1 = []
recall = []
precision = []
n_classes = confusion_matrix.shape[0]
if len(class_names) != n_classes:
print("Error in computePreRec! Confusion matrix and class_names "
"list must be of the same size!")
else:
for i, c in enumerate(class_names):
precision.append(confusion_matrix[i, i] /
np.sum(confusion_matrix[:, i]))
recall.append(confusion_matrix[i, i] /
np.sum(confusion_matrix[i, :]))
f1.append(2 * precision[-1] * recall[-1] /
(precision[-1] + recall[-1]))
return recall, precision, f1
def read_segmentation_gt(gt_file):
with open(gt_file, 'rt') as f_handle:
reader = csv.reader(f_handle, delimiter='\t')
start_times = []
end_times = []
labels = []
for row in reader:
if len(row) == 3:
start_times.append(float(row[0]))
end_times.append(float(row[1]))
labels.append((row[2]))
return np.array(start_times), np.array(end_times), labels
def plot_segmentation_results(flags_ind, flags_ind_gt, class_names, mt_step,
evaluate_only=False):
flags = [class_names[int(f)] for f in flags_ind]
segments, classes = labels_to_segments(flags, mt_step)
min_len = min(flags_ind.shape[0], flags_ind_gt.shape[0])
if min_len > 0:
accuracy = np.sum(flags_ind[0:min_len] ==
flags_ind_gt[0:min_len]) / float(min_len)
else:
accuracy = -1
if not evaluate_only:
duration = segments[-1, 1]
s_percentages = np.zeros((len(class_names), ))
percentages = np.zeros((len(class_names), ))
av_durations = np.zeros((len(class_names), ))
for i_seg in range(segments.shape[0]):
s_percentages[class_names.index(classes[i_seg])] += \
(segments[i_seg, 1]-segments[i_seg, 0])
for i in range(s_percentages.shape[0]):
percentages[i] = 100.0 * s_percentages[i] / duration
class_sum = sum(1 for c in classes if c == class_names[i])
if class_sum > 0:
av_durations[i] = s_percentages[i] / class_sum
else:
av_durations[i] = 0.0
for i in range(percentages.shape[0]):
print(class_names[i], percentages[i], av_durations[i])
font = {'size': 10}
plt.rc('font', **font)
fig = plt.figure()
ax1 = fig.add_subplot(211)
ax1.set_yticks(np.array(range(len(class_names))))
ax1.axis((0, duration, -1, len(class_names)))
ax1.set_yticklabels(class_names)
ax1.plot(np.array(range(len(flags_ind))) * mt_step +
mt_step / 2.0, flags_ind)
if flags_ind_gt.shape[0] > 0:
ax1.plot(np.array(range(len(flags_ind_gt))) * mt_step +
mt_step / 2.0, flags_ind_gt + 0.05, '--r')
plt.xlabel("time (seconds)")
if accuracy >= 0:
plt.title('Accuracy = {0:.1f}%'.format(100.0 * accuracy))
ax2 = fig.add_subplot(223)
plt.title("Classes percentage durations")
ax2.axis((0, len(class_names) + 1, 0, 100))
ax2.set_xticks(np.array(range(len(class_names) + 1)))
ax2.set_xticklabels([" "] + class_names)
print(np.array(range(len(class_names))), percentages)
ax2.bar(np.array(range(len(class_names))) + 0.5, percentages)
ax3 = fig.add_subplot(224)
plt.title("Segment average duration per class")
ax3.axis((0, len(class_names)+1, 0, av_durations.max()))
ax3.set_xticks(np.array(range(len(class_names) + 1)))
ax3.set_xticklabels([" "] + class_names)
ax3.bar(np.array(range(len(class_names))) + 0.5, av_durations)
fig.tight_layout()
plt.show()
return accuracy
def evaluate_speaker_diarization(labels, labels_gt):
min_len = min(labels.shape[0], labels_gt.shape[0])
labels = labels[0:min_len]
labels_gt = labels_gt[0:min_len]
unique_flags = np.unique(labels)
unique_flags_gt = np.unique(labels_gt)
contigency_matrix = np.zeros((unique_flags.shape[0],
unique_flags_gt.shape[0]))
for i in range(min_len):
contigency_matrix[int(np.nonzero(unique_flags == labels[i])[0]),
int(np.nonzero(unique_flags_gt == labels_gt[i])[0])] += 1.0
columns, rows = contigency_matrix.shape
row_sum = np.sum(contigency_matrix, axis=0)
column_sum = np.sum(contigency_matrix, axis=1)
matrix_sum = np.sum(contigency_matrix)
purity_clust = np.zeros((columns, ))
purity_speak = np.zeros((rows, ))
for i in range(columns):
purity_clust[i] = np.max((contigency_matrix[i, :])) / (column_sum[i])
for j in range(rows):
purity_speak[j] = np.max((contigency_matrix[:, j])) / (row_sum[j])
purity_cluster_m = np.sum(purity_clust * column_sum) / matrix_sum
purity_speaker_m = np.sum(purity_speak * row_sum) / matrix_sum
return purity_cluster_m, purity_speaker_m
def train_hmm_compute_statistics(features, labels):
unique_labels = np.unique(labels)
n_comps = len(unique_labels)
n_feats = features.shape[0]
if features.shape[1] < labels.shape[0]:
print("trainHMM warning: number of short-term feature vectors "
"must be greater or equal to the labels length!")
labels = labels[0:features.shape[1]]
class_priors = np.zeros((n_comps,))
for i, u_label in enumerate(unique_labels):
class_priors[i] = np.count_nonzero(labels == u_label)
class_priors = class_priors / class_priors.sum()
transmutation_matrix = np.zeros((n_comps, n_comps))
for i in range(labels.shape[0]-1):
transmutation_matrix[int(labels[i]), int(labels[i + 1])] += 1
for i in range(n_comps):
transmutation_matrix[i, :] /= transmutation_matrix[i, :].sum()
means = np.zeros((n_comps, n_feats))
for i in range(n_comps):
means[i, :] = \
np.array(features[:,
np.nonzero(labels == unique_labels[i])[0]].mean(axis=1))
cov = np.zeros((n_comps, n_feats))
for i in range(n_comps):
cov[i, :] = np.std(features[:,
np.nonzero(labels == unique_labels[i])[0]],
axis=1)
return class_priors, transmutation_matrix, means, cov
def train_hmm_from_file(wav_file, gt_file, hmm_model_name, mid_window, mid_step):
seg_start, seg_end, seg_labs = read_segmentation_gt(gt_file)
flags, class_names = segments_to_labels(seg_start, seg_end, seg_labs, mid_step)
sampling_rate, signal = audioBasicIO.read_audio_file(wav_file)
features, _, _ = \
mtf.mid_feature_extraction(signal, sampling_rate,
mid_window * sampling_rate,
mid_step * sampling_rate,
round(sampling_rate * 0.050),
round(sampling_rate * 0.050))
class_priors, transumation_matrix, means, cov = \
train_hmm_compute_statistics(features, flags)
hmm = hmmlearn.hmm.GaussianHMM(class_priors.shape[0], "diag")
hmm.covars_ = cov
hmm.means_ = means
hmm.startprob_ = class_priors
hmm.transmat_ = transumation_matrix
save_hmm(hmm_model_name, hmm, class_names, mid_window, mid_step)
return hmm, class_names
def train_hmm_from_directory(folder_path, hmm_model_name, mid_window, mid_step):
flags_all = np.array([])
class_names_all = []
for i, f in enumerate(glob.glob(folder_path + os.sep + '*.wav')):
wav_file = f
gt_file = f.replace('.wav', '.segments')
if os.path.isfile(gt_file):
seg_start, seg_end, seg_labs = read_segmentation_gt(gt_file)
flags, class_names = \
segments_to_labels(seg_start, seg_end, seg_labs, mid_step)
for c in class_names:
if c not in class_names_all:
class_names_all.append(c)
sampling_rate, signal = audioBasicIO.read_audio_file(wav_file)
feature_vector, _, _ = \
mtf.mid_feature_extraction(signal, sampling_rate,
mid_window * sampling_rate,
mid_step * sampling_rate,
round(sampling_rate * 0.050),
round(sampling_rate * 0.050))
flag_len = len(flags)
feat_cols = feature_vector.shape[1]
min_sm = min(feat_cols, flag_len)
feature_vector = feature_vector[:, 0:min_sm]
flags = flags[0:min_sm]
flags_new = []
for j, fl in enumerate(flags):
flags_new.append(class_names_all.index(class_names_all[flags[j]]))
flags_all = np.append(flags_all, np.array(flags_new))
if i == 0:
f_all = feature_vector
else:
f_all = np.concatenate((f_all, feature_vector), axis=1)
class_priors, transmutation_matrix, means, cov = \
train_hmm_compute_statistics(f_all, flags_all)
hmm = hmmlearn.hmm.GaussianHMM(class_priors.shape[0], "diag")
hmm.covars_ = cov
hmm.means_ = means
hmm.startprob_ = class_priors
hmm.transmat_ = transmutation_matrix
save_hmm(hmm_model_name, hmm, class_names_all, mid_window, mid_step)
return hmm, class_names_all
def save_hmm(hmm_model_name, model, classes, mid_window, mid_step):
with open(hmm_model_name, "wb") as f_handle:
cpickle.dump(model, f_handle, protocol=cpickle.HIGHEST_PROTOCOL)
cpickle.dump(classes, f_handle, protocol=cpickle.HIGHEST_PROTOCOL)
cpickle.dump(mid_window, f_handle, protocol=cpickle.HIGHEST_PROTOCOL)
cpickle.dump(mid_step, f_handle, protocol=cpickle.HIGHEST_PROTOCOL)
def hmm_segmentation(audio_file, hmm_model_name, plot_results=False,
gt_file=""):
sampling_rate, signal = audioBasicIO.read_audio_file(audio_file)
with open(hmm_model_name, "rb") as f_handle:
hmm = cpickle.load(f_handle)
class_names = cpickle.load(f_handle)
mid_window = cpickle.load(f_handle)
mid_step = cpickle.load(f_handle)
features, _, _ = \
mtf.mid_feature_extraction(signal, sampling_rate,
mid_window * sampling_rate,
mid_step * sampling_rate,
round(sampling_rate * 0.050),
round(sampling_rate * 0.050))
labels = hmm.predict(features.T)
labels_gt, class_names_gt, accuracy, cm = \
load_ground_truth(gt_file, labels, class_names, mid_step, plot_results)
return labels, class_names, accuracy, cm
def load_ground_truth_segments(gt_file, mt_step):
seg_start, seg_end, seg_labels = read_segmentation_gt(gt_file)
labels, class_names = segments_to_labels(seg_start, seg_end, seg_labels,
mt_step)
labels_temp = []
for index, label in enumerate(labels):
if class_names[labels[index]] in class_names:
labels_temp.append(class_names.index(class_names[
labels[index]]))
else:
labels_temp.append(-1)
labels = np.array(labels_temp)
return labels, class_names
def calculate_confusion_matrix(predictions, ground_truth, classes):
cm = np.zeros((len(classes), len(classes)))
for index in range(min(predictions.shape[0], ground_truth.shape[0])):
cm[int(ground_truth[index]), int(predictions[index])] += 1
return cm
def mid_term_file_classification(input_file, model_name, model_type,
plot_results=False, gt_file=""):
labels = []
accuracy = 0.0
class_names = []
cm = np.array([])
if not os.path.isfile(model_name):
print("mtFileClassificationError: input model_type not found!")
return labels, class_names, accuracy, cm
if model_type == "knn":
classifier, mean, std, class_names, mt_win, mid_step, st_win, \
st_step, compute_beat = at.load_model_knn(model_name)
else:
classifier, mean, std, class_names, mt_win, mid_step, st_win, \
st_step, compute_beat = at.load_model(model_name)
if compute_beat:
print("Model " + model_name + " contains long-term music features "
"(beat etc) and cannot be used in "
"segmentation")
return labels, class_names, accuracy, cm
sampling_rate, signal = audioBasicIO.read_audio_file(input_file)
if sampling_rate == 0:
return labels, class_names, accuracy, cm
signal = audioBasicIO.stereo_to_mono(signal)
mt_feats, _, _ = \
mtf.mid_feature_extraction(signal, sampling_rate,
mt_win * sampling_rate,
mid_step * sampling_rate,
round(sampling_rate * st_win),
round(sampling_rate * st_step))
posterior_matrix = []
for col_index in range(mt_feats.shape[1]):
feature_vector = (mt_feats[:, col_index] - mean) / std
label_predicted, posterior = \
at.classifier_wrapper(classifier, model_type, feature_vector)
labels.append(label_predicted)
posterior_matrix.append(np.max(posterior))
labels = np.array(labels)
segs, classes = labels_to_segments(labels, mid_step)
segs[-1] = len(signal) / float(sampling_rate)
labels_gt, class_names_gt, accuracy, cm = \
load_ground_truth(gt_file, labels, class_names, mid_step, plot_results)
return labels, class_names, accuracy, cm
def load_ground_truth(gt_file, labels, class_names, mid_step, plot_results):
accuracy = 0
cm = np.array([])
labels_gt = np.array([])
if os.path.isfile(gt_file):
labels_gt, class_names_gt = load_ground_truth_segments(gt_file,
mid_step)
labels_new = []
for il, l in enumerate(labels):
if class_names[int(l)] in class_names_gt:
labels_new.append(class_names_gt.index(class_names[int(l)]))
else:
labels_new.append(-1)
labels_new = np.array(labels_new)
cm = calculate_confusion_matrix(labels_new, labels_gt, class_names_gt)
accuracy = plot_segmentation_results(labels_new, labels_gt,
class_names, mid_step, not plot_results)
if accuracy >= 0:
print("Overall Accuracy: {0:.2f}".format(accuracy))
return labels_gt, class_names, accuracy, cm
def evaluate_segmentation_classification_dir(dir_name, model_name, method_name):
accuracies = []
class_names = []
cm_total = np.array([])
for index, wav_file in enumerate(glob.glob(dir_name + os.sep + '*.wav')):
print(wav_file)
gt_file = wav_file.replace('.wav', '.segments')
if method_name.lower() in ["svm", "svm_rbf", "knn", "randomforest",
"gradientboosting", "extratrees"]:
flags_ind, class_names, accuracy, cm_temp = \
mid_term_file_classification(wav_file, model_name, method_name,
False, gt_file)
else:
flags_ind, class_names, accuracy, cm_temp = \
hmm_segmentation(wav_file, model_name, False, gt_file)
if accuracy > 0:
if not index:
cm_total = np.copy(cm_temp)
else:
cm_total = cm_total + cm_temp
accuracies.append(accuracy)
print(cm_temp, class_names)
print(cm_total)
if len(cm_total.shape) > 1:
cm_total = cm_total / np.sum(cm_total)
rec, pre, f1 = compute_metrics(cm_total, class_names)
print(" - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ")
print("Average Accuracy: {0:.1f}".
format(100.0*np.array(accuracies).mean()))
print("Average recall: {0:.1f}".format(100.0*np.array(rec).mean()))
print("Average precision: {0:.1f}".format(100.0*np.array(pre).mean()))
print("Average f1: {0:.1f}".format(100.0*np.array(f1).mean()))
print("Median Accuracy: {0:.1f}".
format(100.0*np.median(np.array(accuracies))))
print("Min Accuracy: {0:.1f}".format(100.0*np.array(accuracies).min()))
print("Max Accuracy: {0:.1f}".format(100.0*np.array(accuracies).max()))
else:
print("Confusion matrix was empty, accuracy for every file was 0")
def silence_removal(signal, sampling_rate, st_win, st_step, smooth_window=0.5,
weight=0.5, plot=False):
if weight >= 1:
weight = 0.99
if weight <= 0:
weight = 0.01
signal = audioBasicIO.stereo_to_mono(signal)
st_feats, _ = stf.feature_extraction(signal, sampling_rate,
st_win * sampling_rate,
st_step * sampling_rate)
st_energy = st_feats[1, :]
en = np.sort(st_energy)
st_windows_fraction = int(len(en) / 10)
low_threshold = np.mean(en[0:st_windows_fraction]) + 1e-15
high_threshold = np.mean(en[-st_windows_fraction:-1]) + 1e-15
low_energy = st_feats[:, np.where(st_energy <= low_threshold)[0]]
high_energy = st_feats[:, np.where(st_energy >= high_threshold)[0]]
features = [low_energy.T, high_energy.T]
features_norm, mean, std = at.normalize_features(features)
svm = at.train_svm(features_norm, 1.0)
prob_on_set = []
for index in range(st_feats.shape[1]):
cur_fv = (st_feats[:, index] - mean) / std
prob_on_set.append(svm.predict_proba(cur_fv.reshape(1, -1))[0][1])
prob_on_set = np.array(prob_on_set)
prob_on_set = smooth_moving_avg(prob_on_set, smooth_window / st_step)
prog_on_set_sort = np.sort(prob_on_set)
nt = int(prog_on_set_sort.shape[0] / 10)
threshold = (np.mean((1 - weight) * prog_on_set_sort[0:nt]) +
weight * np.mean(prog_on_set_sort[-nt::]))
max_indices = np.where(prob_on_set > threshold)[0]
index = 0
seg_limits = []
time_clusters = []
while index < len(max_indices):
cur_cluster = [max_indices[index]]
if index == len(max_indices)-1:
break
while max_indices[index+1] - cur_cluster[-1] <= 2:
cur_cluster.append(max_indices[index+1])
index += 1
if index == len(max_indices)-1:
break
index += 1
time_clusters.append(cur_cluster)
seg_limits.append([cur_cluster[0] * st_step,
cur_cluster[-1] * st_step])
min_duration = 0.2
seg_limits_2 = []
for s_lim in seg_limits:
if s_lim[1] - s_lim[0] > min_duration:
seg_limits_2.append(s_lim)
seg_limits = seg_limits_2
if plot:
time_x = np.arange(0, signal.shape[0] / float(sampling_rate), 1.0 /
sampling_rate)
plt.subplot(2, 1, 1)
plt.plot(time_x, signal)
for s_lim in seg_limits:
plt.axvline(x=s_lim[0], color='red')
plt.axvline(x=s_lim[1], color='red')
plt.subplot(2, 1, 2)
plt.plot(np.arange(0, prob_on_set.shape[0] * st_step, st_step),
prob_on_set)
plt.title('Signal')
for s_lim in seg_limits:
plt.axvline(x=s_lim[0], color='red')
plt.axvline(x=s_lim[1], color='red')
plt.title('svm Probability')
plt.show()
return seg_limits
def speaker_diarization(filename, n_speakers, mid_window=2.0, mid_step=0.2,
short_window=0.05, lda_dim=35, plot_res=False):
sampling_rate, signal = audioBasicIO.read_audio_file(filename)
signal = audioBasicIO.stereo_to_mono(signal)
duration = len(signal) / sampling_rate
base_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)),
"data/models")
classifier_all, mean_all, std_all, class_names_all, _, _, _, _, _ = \
at.load_model_knn(os.path.join(base_dir, "knn_speaker_10"))
classifier_fm, mean_fm, std_fm, class_names_fm, _, _, _, _, _ = \
at.load_model_knn(os.path.join(base_dir, "knn_speaker_male_female"))
mid_feats, st_feats, _ = \
mtf.mid_feature_extraction(signal, sampling_rate,
mid_window * sampling_rate,
mid_step * sampling_rate,
round(sampling_rate * short_window),
round(sampling_rate * short_window * 0.5))
mid_term_features = np.zeros((mid_feats.shape[0] + len(class_names_all) +
len(class_names_fm), mid_feats.shape[1]))
for index in range(mid_feats.shape[1]):
feature_norm_all = (mid_feats[:, index] - mean_all) / std_all
feature_norm_fm = (mid_feats[:, index] - mean_fm) / std_fm
_, p1 = at.classifier_wrapper(classifier_all, "knn", feature_norm_all)
_, p2 = at.classifier_wrapper(classifier_fm, "knn", feature_norm_fm)
start = mid_feats.shape[0]
end = mid_feats.shape[0] + len(class_names_all)
mid_term_features[0:mid_feats.shape[0], index] = mid_feats[:, index]
mid_term_features[start:end, index] = p1 + 1e-4
mid_term_features[end::, index] = p2 + 1e-4
mid_feats = mid_term_features
feature_selected = [8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 41,
42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53]
mid_feats = mid_feats[feature_selected, :]
mid_feats_norm, mean, std = at.normalize_features([mid_feats.T])
mid_feats_norm = mid_feats_norm[0].T
n_wins = mid_feats.shape[1]
dist_all = np.sum(distance.squareform(distance.pdist(mid_feats_norm.T)),
axis=0)
m_dist_all = np.mean(dist_all)
i_non_outliers = np.nonzero(dist_all < 1.2 * m_dist_all)[0]
mt_feats_norm_or = mid_feats_norm
mid_feats_norm = mid_feats_norm[:, i_non_outliers]
if lda_dim > 0:
window_ratio = int(round(mid_window / short_window))
step_ratio = int(round(short_window / short_window))
mt_feats_to_red = []
num_of_features = len(st_feats)
num_of_stats = 2
for index in range(num_of_stats * num_of_features):
mt_feats_to_red.append([])
for index in range(num_of_features):
cur_pos = 0
feat_len = len(st_feats[index])
while cur_pos < feat_len:
n1 = cur_pos
n2 = cur_pos + window_ratio
if n2 > feat_len:
n2 = feat_len
short_features = st_feats[index][n1:n2]
mt_feats_to_red[index].append(np.mean(short_features))
mt_feats_to_red[index + num_of_features].\
append(np.std(short_features))
cur_pos += step_ratio
mt_feats_to_red = np.array(mt_feats_to_red)
mt_feats_to_red_2 = np.zeros((mt_feats_to_red.shape[0] +
len(class_names_all) +
len(class_names_fm),
mt_feats_to_red.shape[1]))
limit = mt_feats_to_red.shape[0] + len(class_names_all)
for index in range(mt_feats_to_red.shape[1]):
feature_norm_all = (mt_feats_to_red[:, index] - mean_all) / std_all
feature_norm_fm = (mt_feats_to_red[:, index] - mean_fm) / std_fm
_, p1 = at.classifier_wrapper(classifier_all, "knn",
feature_norm_all)
_, p2 = at.classifier_wrapper(classifier_fm, "knn", feature_norm_fm)
mt_feats_to_red_2[0:mt_feats_to_red.shape[0], index] = \
mt_feats_to_red[:, index]
mt_feats_to_red_2[mt_feats_to_red.shape[0]:limit, index] = p1 + 1e-4
mt_feats_to_red_2[limit::, index] = p2 + 1e-4
mt_feats_to_red = mt_feats_to_red_2
mt_feats_to_red = mt_feats_to_red[feature_selected, :]
mt_feats_to_red, mean, std = at.normalize_features([mt_feats_to_red.T])
mt_feats_to_red = mt_feats_to_red[0].T
labels = np.zeros((mt_feats_to_red.shape[1], ))
lda_step = 1.0
lda_step_ratio = lda_step / short_window
for index in range(labels.shape[0]):
labels[index] = int(index * short_window / lda_step_ratio)
clf = sklearn.discriminant_analysis.\
LinearDiscriminantAnalysis(n_components=lda_dim)
clf.fit(mt_feats_to_red.T, labels)
mid_feats_norm = (clf.transform(mid_feats_norm.T)).T
if n_speakers <= 0:
s_range = range(2, 10)
else:
s_range = [n_speakers]
cluster_labels = []
sil_all = []
cluster_centers = []
for speakers in s_range:
k_means = sklearn.cluster.KMeans(n_clusters=speakers)
k_means.fit(mid_feats_norm.T)
cls = k_means.labels_
means = k_means.cluster_centers_
cluster_labels.append(cls)
cluster_centers.append(means)
sil_1, sil_2 = [], []
for c in range(speakers):
clust_per_cent = np.nonzero(cls == c)[0].shape[0] / float(len(cls))
if clust_per_cent < 0.020:
sil_1.append(0.0)
sil_2.append(0.0)
else:
mt_feats_norm_temp = mid_feats_norm[:, cls == c]
dist = distance.pdist(mt_feats_norm_temp.T)
sil_1.append(np.mean(dist)*clust_per_cent)
sil_temp = []
for c2 in range(speakers):
if c2 != c:
clust_per_cent_2 = np.nonzero(cls == c2)[0].shape[0] /\
float(len(cls))
mid_features_temp = mid_feats_norm[:, cls == c2]
dist = distance.cdist(mt_feats_norm_temp.T,
mid_features_temp.T)
sil_temp.append(np.mean(dist)*(clust_per_cent
+ clust_per_cent_2)/2.0)
sil_temp = np.array(sil_temp)
sil_2.append(min(sil_temp))
sil_1 = np.array(sil_1)
sil_2 = np.array(sil_2)
sil = []
for c in range(speakers):
sil.append((sil_2[c] - sil_1[c]) / (max(sil_2[c], sil_1[c]) + 1e-5))
sil_all.append(np.mean(sil))
imax = int(np.argmax(sil_all))
num_speakers = s_range[imax]
cls = np.zeros((n_wins,))
for index in range(n_wins):
j = np.argmin(np.abs(index-i_non_outliers))
cls[index] = cluster_labels[imax][j]
for index in range(1):
start_prob, transmat, means, cov = \
train_hmm_compute_statistics(mt_feats_norm_or, cls)
hmm = hmmlearn.hmm.GaussianHMM(start_prob.shape[0], "diag")
hmm.startprob_ = start_prob
hmm.transmat_ = transmat
hmm.means_ = means
hmm.covars_ = cov
cls = hmm.predict(mt_feats_norm_or.T)
cls = scipy.signal.medfilt(cls, 13)
cls = scipy.signal.medfilt(cls, 11)
class_names = ["speaker{0:d}".format(c) for c in range(num_speakers)]
gt_file = filename.replace('.wav', '.segments')
if os.path.isfile(gt_file):
seg_start, seg_end, seg_labs = read_segmentation_gt(gt_file)
flags_gt, class_names_gt = segments_to_labels(seg_start, seg_end,
seg_labs, mid_step)
if plot_res:
fig = plt.figure()
if n_speakers > 0:
ax1 = fig.add_subplot(111)
else:
ax1 = fig.add_subplot(211)
ax1.set_yticks(np.array(range(len(class_names))))
ax1.axis((0, duration, -1, len(class_names)))
ax1.set_yticklabels(class_names)
ax1.plot(np.array(range(len(cls))) * mid_step + mid_step / 2.0, cls)
if os.path.isfile(gt_file):
if plot_res:
ax1.plot(np.array(range(len(flags_gt))) *
mid_step + mid_step / 2.0, flags_gt, 'r')
purity_cluster_m, purity_speaker_m = \
evaluate_speaker_diarization(cls, flags_gt)
print("{0:.1f}\t{1:.1f}".format(100 * purity_cluster_m,
100 * purity_speaker_m))
if plot_res:
plt.title("Cluster purity: {0:.1f}% - "
"Speaker purity: {1:.1f}%".format(100 * purity_cluster_m,
100 * purity_speaker_m))
if plot_res:
plt.xlabel("time (seconds)")
if n_speakers <= 0:
plt.subplot(212)
plt.plot(s_range, sil_all)
plt.xlabel("number of clusters")
plt.ylabel("average clustering's sillouette")
plt.show()
return cls
def speaker_diarization_evaluation(folder_name, lda_dimensions):
types = ('*.wav', )
wav_files = []
for files in types:
wav_files.extend(glob.glob(os.path.join(folder_name, files)))
wav_files = sorted(wav_files)
# get number of unique speakers per file (from ground-truth)
num_speakers = []
for wav_file in wav_files:
gt_file = wav_file.replace('.wav', '.segments')
if os.path.isfile(gt_file):
_, _, seg_labs = read_segmentation_gt(gt_file)
num_speakers.append(len(list(set(seg_labs))))
else:
num_speakers.append(-1)
for dim in lda_dimensions:
print("LDA = {0:d}".format(dim))
for i, wav_file in enumerate(wav_files):
speaker_diarization(wav_file, num_speakers[i], 2.0, 0.2, 0.05, dim,
plot_res=False)
def music_thumbnailing(signal, sampling_rate, short_window=1.0, short_step=0.5,
thumb_size=10.0, limit_1=0, limit_2=1):
signal = audioBasicIO.stereo_to_mono(signal)
# feature extraction:
st_feats, _ = stf.feature_extraction(signal, sampling_rate,
sampling_rate * short_window,
sampling_rate * short_step)
# self-similarity matrix
sim_matrix = self_similarity_matrix(st_feats)
# moving filter:
m_filter = int(round(thumb_size / short_step))
diagonal = np.eye(m_filter, m_filter)
sim_matrix = scipy.signal.convolve2d(sim_matrix, diagonal, 'valid')
# post-processing (remove main diagonal elements)
min_sm = np.min(sim_matrix)
for i in range(sim_matrix.shape[0]):
for j in range(sim_matrix.shape[1]):
if abs(i-j) < 5.0 / short_step or i > j:
sim_matrix[i, j] = min_sm
# find max position:
sim_matrix[0:int(limit_1 * sim_matrix.shape[0]), :] = min_sm
sim_matrix[:, 0:int(limit_1 * sim_matrix.shape[0])] = min_sm
sim_matrix[int(limit_2 * sim_matrix.shape[0])::, :] = min_sm
sim_matrix[:, int(limit_2 * sim_matrix.shape[0])::] = min_sm
rows, cols = np.unravel_index(sim_matrix.argmax(), sim_matrix.shape)
i1 = rows
i2 = rows
j1 = cols
j2 = cols
while i2-i1 < m_filter:
if i1 <= 0 or j1 <= 0 or i2 >= sim_matrix.shape[0]-2 or \
j2 >= sim_matrix.shape[1]-2:
break
if sim_matrix[i1-1, j1-1] > sim_matrix[i2 + 1, j2 + 1]:
i1 -= 1
j1 -= 1
else:
i2 += 1
j2 += 1
return short_step * i1, short_step * i2, short_step * j1, short_step * j2, \
sim_matrix
| true | true |
f71b2ed9253b60e916abe7efa50cc6715f2d213c | 2,044 | py | Python | test/crawler/testICrawler.py | AutoDash/AutoDash | 3924795a04159f80ea3b65b2172747babd15f35f | [
"Apache-2.0"
] | 3 | 2020-02-12T01:24:46.000Z | 2020-02-13T00:50:46.000Z | test/crawler/testICrawler.py | AutoDash/AutoDash | 3924795a04159f80ea3b65b2172747babd15f35f | [
"Apache-2.0"
] | 32 | 2020-02-20T10:20:56.000Z | 2022-02-10T01:42:46.000Z | test/crawler/testICrawler.py | AutoDash/AutoDash | 3924795a04159f80ea3b65b2172747babd15f35f | [
"Apache-2.0"
] | 1 | 2020-02-22T02:47:19.000Z | 2020-02-22T02:47:19.000Z | #!/usr/bin/env python3
import unittest
from src.crawler.iCrawler import iCrawler, UndefinedDatabaseException
from src.data.MetaDataItem import MetaDataItem
from test.mock.MockDataAccessor import MockDataAccessor
class MockCrawler(iCrawler):
def __init__(self):
super().__init__()
def next_downloadable(self):
return MetaDataItem(
title="title",
url="fake url 1",
download_src="youtube")
class TestICrawler(unittest.TestCase):
def setUp(self):
self.crawler = MockCrawler()
self.database = MockDataAccessor()
def test_compiles(self):
self.assertEqual(True, True)
def test_no_database(self):
metadata = self.crawler.next_downloadable()
try:
self.crawler.check_new_url(metadata.url)
self.assertTrue(False)
except UndefinedDatabaseException:
# Expected error
pass
def test_check_new_url(self):
self.crawler.set_database(self.database)
metadata = self.crawler.next_downloadable()
self.assertTrue(self.crawler.check_new_url(metadata.url))
self.database.publish_new_metadata(metadata)
self.assertFalse(self.crawler.check_new_url(metadata.url))
def test_run(self):
self.crawler.set_database(self.database)
metadata = self.crawler.run({})
self.database.publish_new_metadata(metadata)
id_list = self.database.fetch_video_id_list()
self.assertTrue(len(id_list) == 1)
metadata = self.database.fetch_metadata(id_list[0])
# Get exact copy of the metadata item that was published
copy_metadata = self.crawler.next_downloadable()
# need to do this cause the times can be off
copy_metadata.date_created = metadata.date_created
copy_metadata.id = metadata.id #need to do this because otherwise cant compare
self.assertEqual(metadata.to_json(), copy_metadata.to_json())
if __name__ == '__main__':
unittest.main()
| 29.623188 | 86 | 0.675147 |
import unittest
from src.crawler.iCrawler import iCrawler, UndefinedDatabaseException
from src.data.MetaDataItem import MetaDataItem
from test.mock.MockDataAccessor import MockDataAccessor
class MockCrawler(iCrawler):
def __init__(self):
super().__init__()
def next_downloadable(self):
return MetaDataItem(
title="title",
url="fake url 1",
download_src="youtube")
class TestICrawler(unittest.TestCase):
def setUp(self):
self.crawler = MockCrawler()
self.database = MockDataAccessor()
def test_compiles(self):
self.assertEqual(True, True)
def test_no_database(self):
metadata = self.crawler.next_downloadable()
try:
self.crawler.check_new_url(metadata.url)
self.assertTrue(False)
except UndefinedDatabaseException:
pass
def test_check_new_url(self):
self.crawler.set_database(self.database)
metadata = self.crawler.next_downloadable()
self.assertTrue(self.crawler.check_new_url(metadata.url))
self.database.publish_new_metadata(metadata)
self.assertFalse(self.crawler.check_new_url(metadata.url))
def test_run(self):
self.crawler.set_database(self.database)
metadata = self.crawler.run({})
self.database.publish_new_metadata(metadata)
id_list = self.database.fetch_video_id_list()
self.assertTrue(len(id_list) == 1)
metadata = self.database.fetch_metadata(id_list[0])
copy_metadata = self.crawler.next_downloadable()
copy_metadata.date_created = metadata.date_created
copy_metadata.id = metadata.id
self.assertEqual(metadata.to_json(), copy_metadata.to_json())
if __name__ == '__main__':
unittest.main()
| true | true |
f71b2f0c6d371df241e52fd406f5828a49387ea4 | 4,395 | py | Python | prepare.py | binmahone/Raven | 40b7e24f14a72af978341c311250f15795be1eb0 | [
"Apache-2.0"
] | 1 | 2021-12-23T02:45:06.000Z | 2021-12-23T02:45:06.000Z | prepare.py | Mukvin/Raven | 40b7e24f14a72af978341c311250f15795be1eb0 | [
"Apache-2.0"
] | null | null | null | prepare.py | Mukvin/Raven | 40b7e24f14a72af978341c311250f15795be1eb0 | [
"Apache-2.0"
] | 2 | 2021-09-16T10:18:01.000Z | 2021-09-17T08:40:47.000Z | import time
import boto3
from lib.Logger import Logger
from lib.popen import subprocess_popen
def prepare():
# 0. Initialize boto3 clients
emr = boto3.client('emr')
ec2 = boto3.client('ec2')
# 1. Create an EMR cluster on AWS
logger.info("Creating the EMR cluster...")
with open("./cloud/cluster.sh", 'r') as f:
cmd = f.read()
res = subprocess_popen(cmd)
cid = res[1][res[1].find("j-"):len(res[1])-2]
logger.info("Cluster created! Cluster ID is " + cid + ".")
# 2. Check if all EC2 instances are ready
logger.info("Creating EC2 instances for the cluster...")
found_flag = False
while found_flag is False:
time.sleep(15)
masters = []
slaves = []
masters_to_find = 1
slaves_to_find = 2
reservations = ec2.describe_instances()
for reservation in reservations['Reservations']:
for instance in reservation['Instances']:
is_instance = False
try:
for tag in instance['Tags']:
if tag['Key'] == 'aws:elasticmapreduce:job-flow-id':
if tag['Value'] == cid:
is_instance = True
if is_instance:
for tag in instance['Tags']:
if tag['Key'] == 'aws:elasticmapreduce:instance-group-role':
if tag['Value'] == 'MASTER':
masters.append(instance)
else:
slaves.append(instance)
except KeyError:
pass
if len(masters) == masters_to_find and len(slaves) == slaves_to_find:
with open("./cloud/instances", 'w') as f:
for instance in masters:
print(str(instance['ImageId'] + ', ' + instance['InstanceId'] + ', '
+ instance['InstanceType'] + ', ' + instance['KeyName'] + ', '
+ instance['PublicDnsName'] + ', ' + instance['PrivateDnsName']), file=f)
for instance in slaves:
print(str(instance['ImageId'] + ', ' + instance['InstanceId'] + ', '
+ instance['InstanceType'] + ', ' + instance['KeyName'] + ', '
+ instance['PublicDnsName'] + ', ' + instance['PrivateDnsName']), file=f)
print("Commands:", file=f)
for instance in masters:
print("ssh -i \"./cloud/" + str(instance['KeyName']) + ".pem\" -o StrictHostKeyCHecking=no hadoop@"
+ str(instance['PublicDnsName']), file=f)
for instance in slaves:
print("ssh -i \"./cloud/" + str(instance['KeyName']) + ".pem\" -o StrictHostKeyCHecking=no hadoop@"
+ str(instance['PublicDnsName']), file=f)
print("Tunnels:", file=f)
for instance in masters:
print("ssh -i \"./cloud/" + str(instance['KeyName']) + ".pem\" -N hadoop@" +
str(instance['PublicDnsName']) + " -L PORT:localhost:PORT", file=f)
found_flag = True
else:
logger.info("MASTERs to create: " + str(masters_to_find - len(masters)) + ", "
+ "SLAVEs to create: " + str(slaves_to_find - len(slaves)) + ".")
logger.info("All instances are created! Starting cluster...")
logger.info("It may take up to 10 minutes to start a cluster.")
started_flag = False
while started_flag is False:
time.sleep(55)
clusters = emr.list_clusters()
for cluster in clusters['Clusters']:
if cluster['Id'] == cid:
if cluster['Status']['State'] == 'WAITING':
started_flag = True
else:
logger.info("Cluster starting, please wait...")
break
logger.info("Cluster started!")
logger.info("Please connect to servers in Shell consoles. IPs to be connected is in ./cloud/instances.csv.")
logger.info("Remember to edit the configuration of your engine regarding internal network (if needed).")
return
if __name__ == '__main__':
logger = Logger('./log/benchmark.log', 'preparer')
prepare() | 46.755319 | 119 | 0.513311 | import time
import boto3
from lib.Logger import Logger
from lib.popen import subprocess_popen
def prepare():
emr = boto3.client('emr')
ec2 = boto3.client('ec2')
logger.info("Creating the EMR cluster...")
with open("./cloud/cluster.sh", 'r') as f:
cmd = f.read()
res = subprocess_popen(cmd)
cid = res[1][res[1].find("j-"):len(res[1])-2]
logger.info("Cluster created! Cluster ID is " + cid + ".")
logger.info("Creating EC2 instances for the cluster...")
found_flag = False
while found_flag is False:
time.sleep(15)
masters = []
slaves = []
masters_to_find = 1
slaves_to_find = 2
reservations = ec2.describe_instances()
for reservation in reservations['Reservations']:
for instance in reservation['Instances']:
is_instance = False
try:
for tag in instance['Tags']:
if tag['Key'] == 'aws:elasticmapreduce:job-flow-id':
if tag['Value'] == cid:
is_instance = True
if is_instance:
for tag in instance['Tags']:
if tag['Key'] == 'aws:elasticmapreduce:instance-group-role':
if tag['Value'] == 'MASTER':
masters.append(instance)
else:
slaves.append(instance)
except KeyError:
pass
if len(masters) == masters_to_find and len(slaves) == slaves_to_find:
with open("./cloud/instances", 'w') as f:
for instance in masters:
print(str(instance['ImageId'] + ', ' + instance['InstanceId'] + ', '
+ instance['InstanceType'] + ', ' + instance['KeyName'] + ', '
+ instance['PublicDnsName'] + ', ' + instance['PrivateDnsName']), file=f)
for instance in slaves:
print(str(instance['ImageId'] + ', ' + instance['InstanceId'] + ', '
+ instance['InstanceType'] + ', ' + instance['KeyName'] + ', '
+ instance['PublicDnsName'] + ', ' + instance['PrivateDnsName']), file=f)
print("Commands:", file=f)
for instance in masters:
print("ssh -i \"./cloud/" + str(instance['KeyName']) + ".pem\" -o StrictHostKeyCHecking=no hadoop@"
+ str(instance['PublicDnsName']), file=f)
for instance in slaves:
print("ssh -i \"./cloud/" + str(instance['KeyName']) + ".pem\" -o StrictHostKeyCHecking=no hadoop@"
+ str(instance['PublicDnsName']), file=f)
print("Tunnels:", file=f)
for instance in masters:
print("ssh -i \"./cloud/" + str(instance['KeyName']) + ".pem\" -N hadoop@" +
str(instance['PublicDnsName']) + " -L PORT:localhost:PORT", file=f)
found_flag = True
else:
logger.info("MASTERs to create: " + str(masters_to_find - len(masters)) + ", "
+ "SLAVEs to create: " + str(slaves_to_find - len(slaves)) + ".")
logger.info("All instances are created! Starting cluster...")
logger.info("It may take up to 10 minutes to start a cluster.")
started_flag = False
while started_flag is False:
time.sleep(55)
clusters = emr.list_clusters()
for cluster in clusters['Clusters']:
if cluster['Id'] == cid:
if cluster['Status']['State'] == 'WAITING':
started_flag = True
else:
logger.info("Cluster starting, please wait...")
break
logger.info("Cluster started!")
logger.info("Please connect to servers in Shell consoles. IPs to be connected is in ./cloud/instances.csv.")
logger.info("Remember to edit the configuration of your engine regarding internal network (if needed).")
return
if __name__ == '__main__':
logger = Logger('./log/benchmark.log', 'preparer')
prepare() | true | true |
f71b2fb9a2d9df9315262d217475ffce3958a2f8 | 9,376 | py | Python | src/snowflake/connector/auth_okta.py | groodt/snowflake-connector-python | 26d0a36cb9a65a728e745f077bd11ab536d386f8 | [
"Apache-2.0"
] | 3 | 2021-03-05T22:01:00.000Z | 2021-04-02T17:48:33.000Z | src/snowflake/connector/auth_okta.py | groodt/snowflake-connector-python | 26d0a36cb9a65a728e745f077bd11ab536d386f8 | [
"Apache-2.0"
] | 26 | 2021-06-01T09:43:42.000Z | 2022-03-16T15:11:52.000Z | src/snowflake/connector/auth_okta.py | groodt/snowflake-connector-python | 26d0a36cb9a65a728e745f077bd11ab536d386f8 | [
"Apache-2.0"
] | 1 | 2021-03-05T22:08:46.000Z | 2021-03-05T22:08:46.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2012-2020 Snowflake Computing Inc. All right reserved.
#
import json
import logging
from .auth import Auth
from .auth_by_plugin import AuthByPlugin
from .compat import unescape, urlencode, urlsplit
from .constants import HTTP_HEADER_ACCEPT, HTTP_HEADER_CONTENT_TYPE, HTTP_HEADER_SERVICE_NAME, HTTP_HEADER_USER_AGENT
from .errorcode import ER_IDP_CONNECTION_ERROR, ER_INCORRECT_DESTINATION
from .errors import DatabaseError, Error
from .network import CONTENT_TYPE_APPLICATION_JSON, PYTHON_CONNECTOR_USER_AGENT
from .sqlstate import SQLSTATE_CONNECTION_WAS_NOT_ESTABLISHED
logger = logging.getLogger(__name__)
def _is_prefix_equal(url1, url2):
"""Checks if URL prefixes are identical.
The scheme, hostname and port number are compared. If the port number is not specified and the scheme is https,
the port number is assumed to be 443.
"""
parsed_url1 = urlsplit(url1)
parsed_url2 = urlsplit(url2)
port1 = parsed_url1.port
if not port1 and parsed_url1.scheme == 'https':
port1 = '443'
port2 = parsed_url1.port
if not port2 and parsed_url2.scheme == 'https':
port2 = '443'
return parsed_url1.hostname == parsed_url2.hostname and \
port1 == port2 and \
parsed_url1.scheme == parsed_url2.scheme
def _get_post_back_url_from_html(html):
"""Gets the post back URL.
Since the HTML is not well formed, minidom cannot be used to convert to
DOM. The first discovered form is assumed to be the form to post back
and the URL is taken from action attributes.
"""
logger.debug(html)
idx = html.find('<form')
start_idx = html.find('action="', idx)
end_idx = html.find('"', start_idx + 8)
return unescape(html[start_idx + 8:end_idx])
class AuthByOkta(AuthByPlugin):
"""Authenticate user by OKTA."""
def __init__(self, rest, application):
self._rest = rest
self._saml_response = None
self._application = application
@property
def assertion_content(self):
return self._saml_response
def update_body(self, body):
body['data']['RAW_SAML_RESPONSE'] = self._saml_response
def authenticate(
self, authenticator, service_name, account, user, password):
"""SAML Authentication.
Steps are:
1. query GS to obtain IDP token and SSO url
2. IMPORTANT Client side validation:
validate both token url and sso url contains same prefix
(protocol + host + port) as the given authenticator url.
Explanation:
This provides a way for the user to 'authenticate' the IDP it is
sending his/her credentials to. Without such a check, the user could
be coerced to provide credentials to an IDP impersonator.
3. query IDP token url to authenticate and retrieve access token
4. given access token, query IDP URL snowflake app to get SAML response
5. IMPORTANT Client side validation:
validate the post back url come back with the SAML response
contains the same prefix as the Snowflake's server url, which is the
intended destination url to Snowflake.
Explanation:
This emulates the behavior of IDP initiated login flow in the user
browser where the IDP instructs the browser to POST the SAML
assertion to the specific SP endpoint. This is critical in
preventing a SAML assertion issued to one SP from being sent to
another SP.
"""
logger.debug('authenticating by SAML')
headers, sso_url, token_url = self._step1(
authenticator, service_name, account, user)
self._step2(authenticator, sso_url, token_url)
one_time_token = self._step3(headers, token_url, user, password)
response_html = self._step4(one_time_token, sso_url)
self._step5(response_html)
def _step1(self, authenticator, service_name, account, user):
logger.debug('step 1: query GS to obtain IDP token and SSO url')
headers = {
HTTP_HEADER_CONTENT_TYPE: CONTENT_TYPE_APPLICATION_JSON,
HTTP_HEADER_ACCEPT: CONTENT_TYPE_APPLICATION_JSON,
HTTP_HEADER_USER_AGENT: PYTHON_CONNECTOR_USER_AGENT,
}
if service_name:
headers[HTTP_HEADER_SERVICE_NAME] = service_name
url = "/session/authenticator-request"
body = Auth.base_auth_data(
user, account,
self._rest._connection.application,
self._rest._connection._internal_application_name,
self._rest._connection._internal_application_version,
self._rest._connection._ocsp_mode(),
self._rest._connection._login_timeout,
self._rest._connection._network_timeout,
)
body["data"]["AUTHENTICATOR"] = authenticator
logger.debug(
'account=%s, authenticator=%s',
account, authenticator,
)
ret = self._rest._post_request(
url, headers, json.dumps(body),
timeout=self._rest._connection.login_timeout,
socket_timeout=self._rest._connection.login_timeout)
if not ret['success']:
self.handle_failure(ret)
data = ret['data']
token_url = data['tokenUrl']
sso_url = data['ssoUrl']
return headers, sso_url, token_url
def _step2(self, authenticator, sso_url, token_url):
logger.debug('step 2: validate Token and SSO URL has the same prefix '
'as authenticator')
if not _is_prefix_equal(authenticator, token_url) or \
not _is_prefix_equal(authenticator, sso_url):
Error.errorhandler_wrapper(
self._rest._connection, None, DatabaseError,
{
'msg': ("The specified authenticator is not supported: "
"{authenticator}, token_url: {token_url}, "
"sso_url: {sso_url}".format(
authenticator=authenticator,
token_url=token_url,
sso_url=sso_url,
)),
'errno': ER_IDP_CONNECTION_ERROR,
'sqlstate': SQLSTATE_CONNECTION_WAS_NOT_ESTABLISHED
}
)
def _step3(self, headers, token_url, user, password):
logger.debug('step 3: query IDP token url to authenticate and '
'retrieve access token')
data = {
'username': user,
'password': password,
}
ret = self._rest.fetch(
'post', token_url, headers,
data=json.dumps(data),
timeout=self._rest._connection.login_timeout,
socket_timeout=self._rest._connection.login_timeout,
catch_okta_unauthorized_error=True)
one_time_token = ret.get('cookieToken')
if not one_time_token:
Error.errorhandler_wrapper(
self._rest._connection, None, DatabaseError,
{
'msg': ("The authentication failed for {user} "
"by {token_url}.".format(
token_url=token_url,
user=user,
)),
'errno': ER_IDP_CONNECTION_ERROR,
'sqlstate': SQLSTATE_CONNECTION_WAS_NOT_ESTABLISHED
}
)
return one_time_token
def _step4(self, one_time_token, sso_url):
logger.debug('step 4: query IDP URL snowflake app to get SAML '
'response')
url_parameters = {
'RelayState': "/some/deep/link",
'onetimetoken': one_time_token,
}
sso_url = sso_url + '?' + urlencode(url_parameters)
headers = {
HTTP_HEADER_ACCEPT: '*/*',
}
response_html = self._rest.fetch(
'get', sso_url, headers,
timeout=self._rest._connection.login_timeout,
socket_timeout=self._rest._connection.login_timeout,
is_raw_text=True)
return response_html
def _step5(self, response_html):
logger.debug('step 5: validate post_back_url matches Snowflake URL')
post_back_url = _get_post_back_url_from_html(response_html)
full_url = '{protocol}://{host}:{port}'.format(
protocol=self._rest._protocol,
host=self._rest._host,
port=self._rest._port,
)
if not _is_prefix_equal(post_back_url, full_url):
Error.errorhandler_wrapper(
self._rest._connection, None, DatabaseError,
{
'msg': ("The specified authenticator and destination "
"URL in the SAML assertion do not match: "
"expected: {url}, "
"post back: {post_back_url}".format(
url=full_url,
post_back_url=post_back_url,
)),
'errno': ER_INCORRECT_DESTINATION,
'sqlstate': SQLSTATE_CONNECTION_WAS_NOT_ESTABLISHED
}
)
self._saml_response = response_html
| 39.561181 | 117 | 0.611455 |
import json
import logging
from .auth import Auth
from .auth_by_plugin import AuthByPlugin
from .compat import unescape, urlencode, urlsplit
from .constants import HTTP_HEADER_ACCEPT, HTTP_HEADER_CONTENT_TYPE, HTTP_HEADER_SERVICE_NAME, HTTP_HEADER_USER_AGENT
from .errorcode import ER_IDP_CONNECTION_ERROR, ER_INCORRECT_DESTINATION
from .errors import DatabaseError, Error
from .network import CONTENT_TYPE_APPLICATION_JSON, PYTHON_CONNECTOR_USER_AGENT
from .sqlstate import SQLSTATE_CONNECTION_WAS_NOT_ESTABLISHED
logger = logging.getLogger(__name__)
def _is_prefix_equal(url1, url2):
parsed_url1 = urlsplit(url1)
parsed_url2 = urlsplit(url2)
port1 = parsed_url1.port
if not port1 and parsed_url1.scheme == 'https':
port1 = '443'
port2 = parsed_url1.port
if not port2 and parsed_url2.scheme == 'https':
port2 = '443'
return parsed_url1.hostname == parsed_url2.hostname and \
port1 == port2 and \
parsed_url1.scheme == parsed_url2.scheme
def _get_post_back_url_from_html(html):
logger.debug(html)
idx = html.find('<form')
start_idx = html.find('action="', idx)
end_idx = html.find('"', start_idx + 8)
return unescape(html[start_idx + 8:end_idx])
class AuthByOkta(AuthByPlugin):
def __init__(self, rest, application):
self._rest = rest
self._saml_response = None
self._application = application
@property
def assertion_content(self):
return self._saml_response
def update_body(self, body):
body['data']['RAW_SAML_RESPONSE'] = self._saml_response
def authenticate(
self, authenticator, service_name, account, user, password):
logger.debug('authenticating by SAML')
headers, sso_url, token_url = self._step1(
authenticator, service_name, account, user)
self._step2(authenticator, sso_url, token_url)
one_time_token = self._step3(headers, token_url, user, password)
response_html = self._step4(one_time_token, sso_url)
self._step5(response_html)
def _step1(self, authenticator, service_name, account, user):
logger.debug('step 1: query GS to obtain IDP token and SSO url')
headers = {
HTTP_HEADER_CONTENT_TYPE: CONTENT_TYPE_APPLICATION_JSON,
HTTP_HEADER_ACCEPT: CONTENT_TYPE_APPLICATION_JSON,
HTTP_HEADER_USER_AGENT: PYTHON_CONNECTOR_USER_AGENT,
}
if service_name:
headers[HTTP_HEADER_SERVICE_NAME] = service_name
url = "/session/authenticator-request"
body = Auth.base_auth_data(
user, account,
self._rest._connection.application,
self._rest._connection._internal_application_name,
self._rest._connection._internal_application_version,
self._rest._connection._ocsp_mode(),
self._rest._connection._login_timeout,
self._rest._connection._network_timeout,
)
body["data"]["AUTHENTICATOR"] = authenticator
logger.debug(
'account=%s, authenticator=%s',
account, authenticator,
)
ret = self._rest._post_request(
url, headers, json.dumps(body),
timeout=self._rest._connection.login_timeout,
socket_timeout=self._rest._connection.login_timeout)
if not ret['success']:
self.handle_failure(ret)
data = ret['data']
token_url = data['tokenUrl']
sso_url = data['ssoUrl']
return headers, sso_url, token_url
def _step2(self, authenticator, sso_url, token_url):
logger.debug('step 2: validate Token and SSO URL has the same prefix '
'as authenticator')
if not _is_prefix_equal(authenticator, token_url) or \
not _is_prefix_equal(authenticator, sso_url):
Error.errorhandler_wrapper(
self._rest._connection, None, DatabaseError,
{
'msg': ("The specified authenticator is not supported: "
"{authenticator}, token_url: {token_url}, "
"sso_url: {sso_url}".format(
authenticator=authenticator,
token_url=token_url,
sso_url=sso_url,
)),
'errno': ER_IDP_CONNECTION_ERROR,
'sqlstate': SQLSTATE_CONNECTION_WAS_NOT_ESTABLISHED
}
)
def _step3(self, headers, token_url, user, password):
logger.debug('step 3: query IDP token url to authenticate and '
'retrieve access token')
data = {
'username': user,
'password': password,
}
ret = self._rest.fetch(
'post', token_url, headers,
data=json.dumps(data),
timeout=self._rest._connection.login_timeout,
socket_timeout=self._rest._connection.login_timeout,
catch_okta_unauthorized_error=True)
one_time_token = ret.get('cookieToken')
if not one_time_token:
Error.errorhandler_wrapper(
self._rest._connection, None, DatabaseError,
{
'msg': ("The authentication failed for {user} "
"by {token_url}.".format(
token_url=token_url,
user=user,
)),
'errno': ER_IDP_CONNECTION_ERROR,
'sqlstate': SQLSTATE_CONNECTION_WAS_NOT_ESTABLISHED
}
)
return one_time_token
def _step4(self, one_time_token, sso_url):
logger.debug('step 4: query IDP URL snowflake app to get SAML '
'response')
url_parameters = {
'RelayState': "/some/deep/link",
'onetimetoken': one_time_token,
}
sso_url = sso_url + '?' + urlencode(url_parameters)
headers = {
HTTP_HEADER_ACCEPT: '*/*',
}
response_html = self._rest.fetch(
'get', sso_url, headers,
timeout=self._rest._connection.login_timeout,
socket_timeout=self._rest._connection.login_timeout,
is_raw_text=True)
return response_html
def _step5(self, response_html):
logger.debug('step 5: validate post_back_url matches Snowflake URL')
post_back_url = _get_post_back_url_from_html(response_html)
full_url = '{protocol}://{host}:{port}'.format(
protocol=self._rest._protocol,
host=self._rest._host,
port=self._rest._port,
)
if not _is_prefix_equal(post_back_url, full_url):
Error.errorhandler_wrapper(
self._rest._connection, None, DatabaseError,
{
'msg': ("The specified authenticator and destination "
"URL in the SAML assertion do not match: "
"expected: {url}, "
"post back: {post_back_url}".format(
url=full_url,
post_back_url=post_back_url,
)),
'errno': ER_INCORRECT_DESTINATION,
'sqlstate': SQLSTATE_CONNECTION_WAS_NOT_ESTABLISHED
}
)
self._saml_response = response_html
| true | true |
f71b2fcfe4e1bf00db7f8c9aa8bc9eac22fc3f1c | 2,248 | py | Python | mobile/db/prompts.py | TRIP-Lab/itinerum-mobile-api | a3b31b411d9d46434c54be1d21415024dec86ae7 | [
"MIT"
] | 4 | 2019-02-04T15:34:49.000Z | 2021-04-30T17:33:05.000Z | mobile/db/prompts.py | TRIP-Lab/itinerum-mobile-api | a3b31b411d9d46434c54be1d21415024dec86ae7 | [
"MIT"
] | 1 | 2021-03-19T22:26:11.000Z | 2021-03-19T22:26:11.000Z | mobile/db/prompts.py | TRIP-Lab/itinerum-mobile-api | a3b31b411d9d46434c54be1d21415024dec86ae7 | [
"MIT"
] | 5 | 2018-03-13T20:24:55.000Z | 2021-01-18T14:44:35.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Kyle Fitzsimmons, 2017
from datetime import datetime
import pytz
from models import db, PromptResponse
class MobilePromptsActions:
def get(self, prompts_uuids):
prompts_filters = PromptResponse.prompt_uuid.in_(prompts_uuids)
return db.session.query(PromptResponse).filter(prompts_filters)
# formats a prompts query to a lookup dictionary
# [p0, p1, p2] --> {p0_uuid: {p0_prompt_num: p0}, ...}
def create_lookup(self, prompts):
prompts_lookup = {}
for p in prompts:
prompts_lookup.setdefault(p.prompt_uuid, {})
prompts_lookup[p.prompt_uuid][p.prompt_num] = p
return prompts_lookup
def upsert(self, user, prompts):
prompts_uuids = {p['uuid'] for p in prompts}
existing_prompts = self.get(prompts_uuids)
existing_lookup = self.create_lookup(existing_prompts)
responses = []
for prompt in prompts:
# gracefully handle change of 'timestamp' -> 'displayed_at'
if 'timestamp' in prompt:
prompt['displayed_at'] = prompt.pop('timestamp')
uuid = prompt['uuid']
prompt_num = int(prompt['prompt_num'])
if uuid in existing_lookup:
response = existing_lookup[uuid][prompt_num]
response.response = prompt['answer']
response.recorded_at = prompt['recorded_at']
response.latitude = prompt['latitude']
response.longitude = prompt['longitude']
response.edited_at = datetime.now(pytz.utc)
else:
response = PromptResponse(
survey_id=user.survey_id,
mobile_id=user.id,
prompt_uuid=uuid,
prompt_num=prompt_num,
response=prompt['answer'],
displayed_at=prompt['displayed_at'],
recorded_at=prompt['recorded_at'],
latitude=prompt['latitude'],
longitude=prompt['longitude'])
responses.append(response)
db.session.bulk_save_objects(responses)
db.session.commit()
return responses
| 36.852459 | 71 | 0.589858 |
from datetime import datetime
import pytz
from models import db, PromptResponse
class MobilePromptsActions:
def get(self, prompts_uuids):
prompts_filters = PromptResponse.prompt_uuid.in_(prompts_uuids)
return db.session.query(PromptResponse).filter(prompts_filters)
def create_lookup(self, prompts):
prompts_lookup = {}
for p in prompts:
prompts_lookup.setdefault(p.prompt_uuid, {})
prompts_lookup[p.prompt_uuid][p.prompt_num] = p
return prompts_lookup
def upsert(self, user, prompts):
prompts_uuids = {p['uuid'] for p in prompts}
existing_prompts = self.get(prompts_uuids)
existing_lookup = self.create_lookup(existing_prompts)
responses = []
for prompt in prompts:
if 'timestamp' in prompt:
prompt['displayed_at'] = prompt.pop('timestamp')
uuid = prompt['uuid']
prompt_num = int(prompt['prompt_num'])
if uuid in existing_lookup:
response = existing_lookup[uuid][prompt_num]
response.response = prompt['answer']
response.recorded_at = prompt['recorded_at']
response.latitude = prompt['latitude']
response.longitude = prompt['longitude']
response.edited_at = datetime.now(pytz.utc)
else:
response = PromptResponse(
survey_id=user.survey_id,
mobile_id=user.id,
prompt_uuid=uuid,
prompt_num=prompt_num,
response=prompt['answer'],
displayed_at=prompt['displayed_at'],
recorded_at=prompt['recorded_at'],
latitude=prompt['latitude'],
longitude=prompt['longitude'])
responses.append(response)
db.session.bulk_save_objects(responses)
db.session.commit()
return responses
| true | true |
f71b2fd793a7e41ee094ac47d7408dbb3c13f221 | 804 | py | Python | garf_data.py | doesnotsitproperly/garfbot | 04e2a8409bd171ca29b6cdb5e864fe2fa1c13b6d | [
"Unlicense"
] | null | null | null | garf_data.py | doesnotsitproperly/garfbot | 04e2a8409bd171ca29b6cdb5e864fe2fa1c13b6d | [
"Unlicense"
] | null | null | null | garf_data.py | doesnotsitproperly/garfbot | 04e2a8409bd171ca29b6cdb5e864fe2fa1c13b6d | [
"Unlicense"
] | null | null | null | import json, os
class GarfData:
file = os.path.join(os.getcwd(), "garf_data.json")
token: str
path_to_ffmpeg: str
jokes: list
trigger_words: list
def __init__(self):
with open(self.file, "r") as f:
json_dict = json.loads(f.read())
self.token = json_dict["token"]
self.path_to_ffmpeg = json_dict["path_to_ffmpeg"]
self.jokes = json_dict["jokes"]
self.trigger_words = json_dict["trigger_words"]
def overwrite(self):
json_dict = {
"token": self.token,
"path_to_ffmpeg": self.path_to_ffmpeg,
"jokes": self.jokes,
"trigger_words": self.trigger_words
}
with open(self.file, "w") as f:
f.write(json.dumps(json_dict, indent = 4) + os.linesep)
| 27.724138 | 67 | 0.584577 | import json, os
class GarfData:
file = os.path.join(os.getcwd(), "garf_data.json")
token: str
path_to_ffmpeg: str
jokes: list
trigger_words: list
def __init__(self):
with open(self.file, "r") as f:
json_dict = json.loads(f.read())
self.token = json_dict["token"]
self.path_to_ffmpeg = json_dict["path_to_ffmpeg"]
self.jokes = json_dict["jokes"]
self.trigger_words = json_dict["trigger_words"]
def overwrite(self):
json_dict = {
"token": self.token,
"path_to_ffmpeg": self.path_to_ffmpeg,
"jokes": self.jokes,
"trigger_words": self.trigger_words
}
with open(self.file, "w") as f:
f.write(json.dumps(json_dict, indent = 4) + os.linesep)
| true | true |
f71b2fde98fb40c654242c9096ab88064334974c | 33,682 | py | Python | models/adaptation_model_stage1.py | BwCai/DCAA-UDA | 359c2122060aebfbe4384c918768c261fe2dc9c7 | [
"Apache-2.0"
] | 2 | 2022-01-28T10:35:53.000Z | 2022-03-09T14:38:59.000Z | models/adaptation_model_stage1.py | BwCai/DCAA-UDA | 359c2122060aebfbe4384c918768c261fe2dc9c7 | [
"Apache-2.0"
] | 1 | 2022-03-07T10:48:11.000Z | 2022-03-07T10:48:11.000Z | models/adaptation_model_stage1.py | BwCai/DCAA-UDA | 359c2122060aebfbe4384c918768c261fe2dc9c7 | [
"Apache-2.0"
] | null | null | null | from models.base_model import BaseModel
import torch.nn as nn
import torch.nn.functional as F
import os, sys
import torch
import numpy as np
import itertools
from torch.autograd import Variable
from optimizers import get_optimizer
from schedulers import get_scheduler
from models.sync_batchnorm import SynchronizedBatchNorm2d, DataParallelWithCallback
from models.deeplab_multimodal import DeepLab
from models.decoder import Decoder
from models.aspp import ASPP
from models.discriminator import FCDiscriminator, FCDiscriminator_low, FCDiscriminator_out, FCDiscriminator_class
from loss import get_loss_function
from .utils import freeze_bn, GradReverse, normalisation_pooling
from metrics import runningScore
import pdb
def multimodal_merger(multi_modal_data, is_upsample=False, up_size=None):
"""
[Func Handler] multimodal_merger:
@Input Params:
multi_modal_data: dict.
examples: {
"feat_cls": feat_cls,
"output": output,
}
@Reture:
merge_out: dict.
examples: {
"feat_cls": feat_cls,
"output_comb": output_comb,
"output": output,
}
"""
feat_cls = multi_modal_data['feat_cls']
# merge class features
feat_cls_cat = torch.cat(feat_cls, 1) # concat
# merge output pred
output = multi_modal_data['output']
output_comb = 0
for _i in range(len(output)):
if is_upsample:
output[_i] = F.interpolate(output[_i], size=up_size, mode='bilinear', align_corners=True)
output_comb += output[_i]
merge_out = {
'feat_cls': feat_cls,
'feat_cls_cat': feat_cls_cat,
'output_comb': output_comb,
'output': output,
}
return merge_out
class CustomMetricsMultimodalMerger():
"""
[Func Handler] objective_vectors_multimodal_merger:
@Input Params:
multi_modal_data: dict.
examples: {
"class_threshold_group": [model.class_threshold_group[modal_idx][i], ...]
"objective_vectors_group": [model.objective_vectors_group[modal_idx][i], ...],
}
cate_idx: int. 0 ~ 18
modal_ids: list.
examples: [0, 1] or [0,]
@Reture:
merge_out: dict.
examples: {
"class_threshold": class_threshold,
"objective_vectors": objective_vectors,
}
"""
def __init__(self, modal_num, category_num, model):
self.modal_num = modal_num
self.category_num = category_num
self._model = model
def initialize_model(model):
self._model = model
def merge_class_threshold(self, modal_ids=[]):
assert self._model is not None, "[ERROR] Deeplab Model not initialize before using!"
_class_threshold_group = self._model.class_threshold_group[modal_ids]
return torch.mean(_class_threshold_group, dim=0) # modal_num x 19 --> 19
def merge_clu_threshold(self, clu_threshold, modal_ids=[]):
_clu_threshold_group = clu_threshold[modal_ids]
return torch.mean(_clu_threshold_group, dim=0)
def merge_objective_vectors(self, modal_ids=[]):
assert self._model is not None, "[ERROR] Deeplab Model not initialize before using!"
_modal_num, _cate_num, _feat_dim = self._model.objective_vectors_group.size()
_objective_vectors = self._model.objective_vectors_group[modal_ids]
# modal_num x 19 x 256 --> 19 x modal_num x 256 --> 19 x (modal_num x 256)
assert _objective_vectors.dim() == 4, "objective_vector dimension != 4"
_objective_vectors = _objective_vectors.permute(1, 0, 2).contiguous()
return _objective_vectors.view(_cate_num, -1)
class CustomMetrics():
def __init__(self, numbers=19, modal_num=3, model=None):
self.class_numbers = numbers
self.classes_recall_thr = np.zeros([19, 3])
self.classes_recall_thr_num = np.zeros([19])
self.classes_recall_clu = np.zeros([19, 3])
self.classes_recall_clu_num = np.zeros([19])
self.running_metrics_val_threshold = runningScore(self.class_numbers)
self.running_metrics_val_clusters = runningScore(self.class_numbers)
self.clu_threshold = torch.full((modal_num + 1, 19), 2.5).cuda()
self.multimodal_merger = CustomMetricsMultimodalMerger(
modal_num=modal_num + 1, category_num=numbers, model=model
)
def update(self, feat_cls, outputs, labels, modal_ids=[0,]):
'''calculate accuracy. caring about recall but not IoU'''
batch, width, height = labels.shape
labels = labels.reshape([batch, 1, width, height]).float()
labels = F.interpolate(labels, size=feat_cls.size()[2:], mode='nearest')
outputs_threshold = outputs.clone()
outputs_threshold = F.softmax(outputs_threshold, dim=1)
#self.running_metrics_val_threshold.update(labels.cpu().numpy(), outputs_threshold.argmax(1).cpu().numpy())
self.running_metrics_val_threshold.update(labels, outputs_threshold.argmax(1))
_class_threshold_set = self.multimodal_merger.merge_class_threshold(modal_ids=modal_idx)
for i in range(19):
outputs_threshold[:, i, :, :] = torch.where(outputs_threshold[:, i, :, :] > _class_threshold_set[i], torch.Tensor([1]).cuda(), torch.Tensor([0]).cuda())
_batch, _channel, _w, _h = outputs_threshold.shape
_tmp = torch.full([_batch, 1, _w, _h], 0.2,).cuda()
_tmp = torch.cat((outputs_threshold, _tmp), 1)
threshold_arg = _tmp.argmax(1, keepdim=True)
threshold_arg[threshold_arg == 19] = 250 #ignore index
truth, pred_all, truth_all = self.calc_recall(labels.cpu().int().numpy(), threshold_arg.cpu().int().numpy())
self.classes_recall_thr[:, 0] += truth
self.classes_recall_thr[:, 2] += pred_all
self.classes_recall_thr[:, 1] += truth_all
outputs_cluster = outputs.clone()
_objective_vectors_set = self.multimodal_merger.merge_objective_vectors(modal_ids=modal_idx)
for i in range(19):
outputs_cluster[:, i, :, :] = torch.norm( _objective_vectors_set[i].reshape(-1,1,1).expand(-1,128,256) - feat_cls, 2, dim=1,)
outputs_cluster_min, outputs_cluster_arg = outputs_cluster.min(dim=1, keepdim=True)
outputs_cluster_second = outputs_cluster.scatter_(1, outputs_cluster_arg, 100)
if torch.unique(outputs_cluster_second.argmax(1) - outputs_cluster_arg.squeeze()).squeeze().item() != 0:
raise NotImplementedError('wrong when computing L2 norm!!')
outputs_cluster_secondmin, outputs_cluster_secondarg = outputs_cluster_second.min(dim=1, keepdim=True)
#self.running_metrics_val_clusters.update(labels.cpu().numpy(), outputs_cluster_arg.cpu().numpy())
self.running_metrics_val_clusters.update(labels, outputs_cluster_arg)
tmp_arg = outputs_cluster_arg.clone()
pdb.set_trace()
_clu_thresholds = self.multimodal_merger.merge_clu_threshold(self.clu_threshold, modal_ids=modal_ids)
outputs_cluster_arg[(outputs_cluster_secondmin - outputs_cluster_min) < _clu_thresholds] = 250
truth, pred_all, truth_all = self.calc_recall(labels.cpu().int().numpy(), outputs_cluster_arg.cpu().int().numpy())
self.classes_recall_clu[:, 0] += truth
self.classes_recall_clu[:, 2] += pred_all
self.classes_recall_clu[:, 1] += truth_all
return threshold_arg, outputs_cluster_arg
def calc_recall(self, gt, argmax):
truth = np.zeros([self.class_numbers])
pred_all = np.zeros([self.class_numbers])
truth_all = np.zeros([self.class_numbers])
for i in range(self.class_numbers):
truth[i] = (gt == i)[argmax == i].sum()
pred_all[i] = (argmax == i).sum()
truth_all[i] = (gt == i).sum()
pass
return truth, pred_all, truth_all
def calc_mean_Clu_recall(self, ):
return np.mean(self.classes_recall_clu[:, 0] / self.classes_recall_clu[:, 1])
def calc_mean_Thr_recall(self, ):
return np.mean(self.classes_recall_thr[:, 0] / self.classes_recall_thr[:, 1])
def reset(self, ):
self.running_metrics_val_clusters.reset()
self.running_metrics_val_threshold.reset()
self.classes_recall_clu = np.zeros([19, 3])
self.classes_recall_thr = np.zeros([19, 3])
class CustomModel():
def __init__(self, cfg, writer, logger, use_pseudo_label=False, modal_num=3):
self.cfg = cfg
self.writer = writer
self.class_numbers = 19
self.logger = logger
cfg_model = cfg['model']
self.cfg_model = cfg_model
self.best_iou = -100
self.iter = 0
self.nets = []
self.split_gpu = 0
self.default_gpu = cfg['model']['default_gpu']
self.PredNet_Dir = None
self.valid_classes = cfg['training']['valid_classes']
self.G_train = True
self.cls_feature_weight = cfg['training']['cls_feature_weight']
self.use_pseudo_label = use_pseudo_label
self.modal_num = modal_num
# cluster vectors & cuda initialization
self.objective_vectors_group = torch.zeros(self.modal_num + 1, 19, 256).cuda()
self.objective_vectors_num_group = torch.zeros(self.modal_num + 1, 19).cuda()
self.objective_vectors_dis_group = torch.zeros(self.modal_num + 1, 19, 19).cuda()
self.class_threshold_group = torch.full([self.modal_num + 1, 19], 0.95).cuda()
#self.metrics = CustomMetrics(self.class_numbers)
self.metrics = CustomMetrics(self.class_numbers, modal_num=self.modal_num, model=self)
bn = cfg_model['bn']
if bn == 'sync_bn':
BatchNorm = SynchronizedBatchNorm2d
elif bn == 'bn':
BatchNorm = nn.BatchNorm2d
elif bn == 'gn':
BatchNorm = nn.GroupNorm
else:
raise NotImplementedError('batch norm choice {} is not implemented'.format(bn))
if use_pseudo_label:
self.PredNet = DeepLab(
num_classes=19,
backbone=cfg_model['basenet']['version'],
output_stride=16,
bn=cfg_model['bn'],
freeze_bn=True,
modal_num=self.modal_num
).cuda()
self.load_PredNet(cfg, writer, logger, dir=None, net=self.PredNet)
self.PredNet_DP = self.init_device(self.PredNet, gpu_id=self.default_gpu, whether_DP=True)
self.PredNet.eval()
self.PredNet_num = 0
self.BaseNet = DeepLab(
num_classes=19,
backbone=cfg_model['basenet']['version'],
output_stride=16,
bn=cfg_model['bn'],
freeze_bn=True,
modal_num=self.modal_num
)
logger.info('the backbone is {}'.format(cfg_model['basenet']['version']))
self.BaseNet_DP = self.init_device(self.BaseNet, gpu_id=self.default_gpu, whether_DP=True)
self.nets.extend([self.BaseNet])
self.nets_DP = [self.BaseNet_DP]
# Discriminator
self.SOURCE_LABEL = 0
self.TARGET_LABEL = 1
self.DNets = []
self.DNets_DP = []
for _ in range(self.modal_num+1):
_net_d = FCDiscriminator(inplanes=19)
self.DNets.append(_net_d)
_net_d_DP = self.init_device(_net_d, gpu_id=self.default_gpu, whether_DP=True)
self.DNets_DP.append(_net_d_DP)
self.nets.extend(self.DNets)
self.nets_DP.extend(self.DNets_DP)
self.optimizers = []
self.schedulers = []
optimizer_cls = torch.optim.SGD
optimizer_params = {k:v for k, v in cfg['training']['optimizer'].items()
if k != 'name'}
optimizer_cls_D = torch.optim.Adam
optimizer_params_D = {k:v for k, v in cfg['training']['optimizer_D'].items()
if k != 'name'}
if self.use_pseudo_label:
self.BaseOpti = optimizer_cls(self.BaseNet.parameters(), **optimizer_params)
else:
self.BaseOpti = optimizer_cls(self.BaseNet.optim_parameters(cfg['training']['optimizer']['lr']), **optimizer_params)
self.optimizers.extend([self.BaseOpti])
self.DiscOptis = []
for _d_net in self.DNets:
self.DiscOptis.append(
optimizer_cls_D(_d_net.parameters(), **optimizer_params_D)
)
self.optimizers.extend(self.DiscOptis)
self.schedulers = []
if self.use_pseudo_label:
self.BaseSchedule = get_scheduler(self.BaseOpti, cfg['training']['lr_schedule'])
self.schedulers.extend([self.BaseSchedule])
else:
"""BaseSchedule detail see FUNC: scheduler_step()"""
self.learning_rate = cfg['training']['optimizer']['lr']
self.gamma = cfg['training']['lr_schedule']['gamma']
self.num_steps = cfg['training']['lr_schedule']['max_iter']
self._BaseSchedule_nouse = get_scheduler(self.BaseOpti, cfg['training']['lr_schedule'])
self.schedulers.extend([self._BaseSchedule_nouse])
self.DiscSchedules = []
for _disc_opt in self.DiscOptis:
self.DiscSchedules.append(
get_scheduler(_disc_opt, cfg['training']['lr_schedule'])
)
self.schedulers.extend(self.DiscSchedules)
self.setup(cfg, writer, logger)
self.adv_source_label = 0
self.adv_target_label = 1
self.bceloss = nn.BCEWithLogitsLoss(reduce=False)
self.loss_fn = get_loss_function(cfg)
self.mseloss = nn.MSELoss()
self.l1loss = nn.L1Loss()
self.smoothloss = nn.SmoothL1Loss()
self.triplet_loss = nn.TripletMarginLoss()
def create_PredNet(self,):
ss = DeepLab(
num_classes=19,
backbone=self.cfg_model['basenet']['version'],
output_stride=16,
bn=self.cfg_model['bn'],
freeze_bn=True,
modal_num=self.modal_num,
).cuda()
ss.eval()
return ss
def setup(self, cfg, writer, logger):
'''
set optimizer and load pretrained model
'''
for net in self.nets:
# name = net.__class__.__name__
self.init_weights(cfg['model']['init'], logger, net)
print("Initializition completed")
if hasattr(net, '_load_pretrained_model') and cfg['model']['pretrained']:
print("loading pretrained model for {}".format(net.__class__.__name__))
net._load_pretrained_model()
'''load pretrained model
'''
if cfg['training']['resume_flag']:
self.load_nets(cfg, writer, logger)
pass
def lr_poly(self):
return self.learning_rate * ((1 - float(self.iter) / self.num_steps) ** (self.gamma))
def adjust_basenet_learning_rate(self):
lr = self.lr_poly()
self.BaseOpti.param_groups[0]['lr'] = lr
if len(self.BaseOpti.param_groups) > 1:
self.BaseOpti.param_groups[1]['lr'] = lr * 10
def forward(self, input):
feat, feat_low, att_mask, feat_cls, output = self.BaseNet_DP(input)
return feat, feat_low, feat_cls, output
def forward_Up(self, input):
feat, feat_low, feat_cls, outputs = self.forward(input)
output = F.interpolate(outputs[-1], size=input.size()[2:], mode='bilinear', align_corners=True)
return feat, feat_low, feat_cls, output
def PredNet_Forward(self, input):
with torch.no_grad():
_, _, att_mask, feat_cls, output_result = self.PredNet_DP(input)
return _, _, feat_cls, output_result
def calculate_mean_vector(self, feat_cls, outputs, labels, ):
outputs_softmax = F.softmax(outputs, dim=1)
outputs_argmax = outputs_softmax.argmax(dim=1, keepdim=True)
outputs_argmax = self.process_label(outputs_argmax.float())
labels_expanded = self.process_label(labels)
outputs_pred = labels_expanded * outputs_argmax
scale_factor = F.adaptive_avg_pool2d(outputs_pred, 1)
vectors = []
ids = []
for n in range(feat_cls.size()[0]):
for t in range(self.class_numbers):
if scale_factor[n][t].item()==0:
continue
if (outputs_pred[n][t] > 0).sum() < 10:
continue
s = feat_cls[n] * outputs_pred[n][t]
scale = torch.sum(outputs_pred[n][t]) / labels.shape[2] / labels.shape[3] * 2
s = normalisation_pooling()(s, scale)
s = F.adaptive_avg_pool2d(s, 1) / scale_factor[n][t]
vectors.append(s)
ids.append(t)
return vectors, ids
def step(self, source_x, source_label, source_modal_ids, target_x, target_label, target_modal_ids, use_pseudo_loss=False):
assert len(source_modal_ids) == source_x.size(0), "modal_ids' batchsize != source_x's batchsize"
_, _, source_feat_cls, source_output = self.forward(input=source_x)
"""source_output: [B x 19 x W x H, ...]
select modal-branch output in each batchsize
Specific-modal output
"""
source_output_modal_k = torch.stack(
[
source_output[_modal_i][_batch_i]
for _batch_i, _modal_i in enumerate(source_modal_ids)
],
dim=0,
)
# attention output & specific-modal output
source_output_comb = torch.cat([source_output_modal_k, source_output[-1]], dim=0)
source_label_comb = torch.cat([source_label, source_label.clone()], dim=0)
source_outputUp = F.interpolate(source_output_comb, size=source_x.size()[-2:], mode='bilinear', align_corners=True)
loss_GTA = self.loss_fn(input=source_outputUp, target=source_label_comb)
#self.PredNet.eval()
# adversarial loss
# -----------------------------
"""Generator (segmentation)"""
# -----------------------------
# On Source Domain
loss_adv = torch.Tensor([0]).cuda()
_batch_size = 0
_, _, _, target_output = self.forward(target_x)
target_modal_ids_tensor = torch.Tensor(target_modal_ids).cuda()
for t_out, _d_net_DP, _d_net, modal_idx in zip(target_output, self.DNets_DP, self.DNets, range(len(target_output))):
# set grad false
self.set_requires_grad(self.logger, _d_net, requires_grad = False)
# true/false discriminator
t_D_out = _d_net_DP(F.softmax(t_out))
#source_modal_ids
loss_temp = torch.mean(self.bceloss(
t_D_out,
torch.FloatTensor(t_D_out.data.size()).fill_(1.0).cuda()
), [1,2,3])
if modal_idx >= self.modal_num:
loss_adv += torch.mean(loss_temp)
elif torch.mean(torch.as_tensor((modal_idx==target_modal_ids_tensor), dtype=torch.float32)) == 0:
loss_adv += 0.0
else:
loss_adv += torch.mean(torch.masked_select(loss_temp, target_modal_ids_tensor==modal_idx))
_batch_size += t_out.size(0)
#loss_adv /= _batch_size
loss_adv *= self.cfg['training']['loss_adv_lambda']
loss_G = torch.Tensor([0]).cuda()
loss_G = loss_G + loss_GTA + loss_adv
self.BaseOpti.zero_grad()
if loss_G.item() != 0:
loss_G.backward()
self.BaseOpti.step()
# -----------------------------
"""Discriminator """
# -----------------------------
_batch_size = 0
loss_D_comb = torch.Tensor([0]).cuda()
source_modal_ids_tensor = torch.Tensor(source_modal_ids).cuda()
for s_out, t_out, _d_net_DP, _d_net, _disc_opt, modal_idx in zip(source_output, target_output, self.DNets_DP, self.DNets, self.DiscOptis, range(len(source_output))):
self.set_requires_grad(self.logger, _d_net, requires_grad = True)
_batch_size = 0
loss_D = torch.Tensor([0]).cuda()
# source domain
s_D_out = _d_net_DP(F.softmax(s_out.detach()))
loss_temp_s = torch.mean(self.bceloss(
s_D_out,
torch.FloatTensor(s_D_out.data.size()).fill_(1.0).cuda()
), [1,2,3])
if modal_idx >= self.modal_num:
loss_D += torch.mean(loss_temp_s)
elif torch.mean(torch.as_tensor((modal_idx==source_modal_ids_tensor), dtype=torch.float32)) == 0:
loss_D += 0.0
else:
loss_D += torch.mean(torch.masked_select(loss_temp_s, source_modal_ids_tensor==modal_idx))
# target domain
_batch_size += (s_out.size(0) + t_out.size(0))
t_D_out = _d_net_DP(F.softmax(t_out.detach()))
loss_temp_t = torch.mean(self.bceloss(
t_D_out,
torch.FloatTensor(t_D_out.data.size()).fill_(0.0).cuda()
), [1,2,3])
if modal_idx >= self.modal_num:
loss_D += torch.mean(loss_temp_t)
elif torch.mean(torch.as_tensor((modal_idx==target_modal_ids_tensor), dtype=torch.float32)) == 0:
loss_D += 0.0
else:
loss_D += torch.mean(torch.masked_select(loss_temp_t, target_modal_ids_tensor==modal_idx))
loss_D *= self.cfg['training']['loss_adv_lambda']*0.5
loss_D_comb += loss_D
_disc_opt.zero_grad()
if loss_D_comb.item() != 0:
loss_D_comb.backward()
_disc_opt.step()
return loss_GTA, loss_adv, loss_D_comb
def process_label(self, label):
batch, channel, w, h = label.size()
pred1 = torch.zeros(batch, 20, w, h).cuda()
id = torch.where(label < 19, label, torch.Tensor([19]).cuda())
pred1 = pred1.scatter_(1, id.long(), 1)
return pred1
def class_vectors_alignment(self, ids, vectors, modal_ids=[0,]):
#loss = torch.Tensor([0]).cuda(self.default_gpu)
loss = torch.Tensor([0]).cuda()
"""construct category objective vectors"""
# objective_vectors_group 2 x 19 x 256 --> 19 x 512
_objective_vectors_set = self.metrics.multimodal_merger.merge_objective_vectors(modal_ids=modal_idx)
for i in range(len(ids)):
if ids[i] not in self.valid_classes:
continue
new_loss = self.smoothloss(vectors[i].squeeze().cuda(), _objective_vectors[ids[i]])
while (new_loss.item() > 5):
new_loss = new_loss / 10
loss = loss + new_loss
loss = loss / len(ids) * 10
return loss
def freeze_bn_apply(self):
for net in self.nets:
net.apply(freeze_bn)
for net in self.nets_DP:
net.apply(freeze_bn)
def scheduler_step(self):
if self.use_pseudo_label:
for scheduler in self.schedulers:
scheduler.step()
else:
"""skipped _BaseScheduler_nouse"""
for scheduler in self.schedulers[1:]:
scheduler.step()
# baseNet scheduler
self.adjust_basenet_learning_rate()
def optimizer_zerograd(self):
for optimizer in self.optimizers:
optimizer.zero_grad()
def optimizer_step(self):
for opt in self.optimizers:
opt.step()
def init_device(self, net, gpu_id=None, whether_DP=False):
gpu_id = gpu_id or self.default_gpu
device = torch.device("cuda:{}".format(gpu_id) if torch.cuda.is_available() else 'cpu')
net = net.to(device)
# if torch.cuda.is_available():
if whether_DP:
net = DataParallelWithCallback(net, device_ids=range(torch.cuda.device_count()))
return net
def eval(self, net=None, logger=None):
"""Make specific models eval mode during test time"""
if net == None:
for net in self.nets:
net.eval()
for net in self.nets_DP:
net.eval()
if logger!=None:
logger.info("Successfully set the model eval mode")
else:
net.eval()
if logger!=None:
logger("Successfully set {} eval mode".format(net.__class__.__name__))
return
def train(self, net=None, logger=None):
if net==None:
for net in self.nets:
net.train()
for net in self.nets_DP:
net.train()
else:
net.train()
return
def set_requires_grad(self, logger, net, requires_grad = False):
"""Set requires_grad=Fasle for all the networks to avoid unnecessary computations
Parameters:
net (BaseModel) -- the network which will be operated on
requires_grad (bool) -- whether the networks require gradients or not
"""
for parameter in net.parameters():
parameter.requires_grad = requires_grad
def set_requires_grad_layer(self, logger, net, layer_type='batchnorm', requires_grad=False):
''' set specific type of layers whether needing grad
'''
# print('Warning: all the BatchNorm params are fixed!')
# logger.info('Warning: all the BatchNorm params are fixed!')
for net in self.nets:
for _i in net.modules():
if _i.__class__.__name__.lower().find(layer_type.lower()) != -1:
_i.weight.requires_grad = requires_grad
return
def init_weights(self, cfg, logger, net, init_type='normal', init_gain=0.02):
"""Initialize network weights.
Parameters:
net (network) -- network to be initialized
init_type (str) -- the name of an initialization method: normal | xavier | kaiming | orthogonal
init_gain (float) -- scaling factor for normal, xavier and orthogonal.
We use 'normal' in the original pix2pix and CycleGAN paper. But xavier and kaiming might
work better for some applications. Feel free to try yourself.
"""
init_type = cfg.get('init_type', init_type)
init_gain = cfg.get('init_gain', init_gain)
def init_func(m): # define the initialization function
classname = m.__class__.__name__
if hasattr(m, 'weight') and (classname.find('Conv') != -1 or classname.find('Linear') != -1):
if init_type == 'normal':
nn.init.normal_(m.weight.data, 0.0, init_gain)
elif init_type == 'xavier':
nn.init.xavier_normal_(m.weight.data, gain=init_gain)
elif init_type == 'kaiming':
nn.init.kaiming_normal_(m.weight.data, a=0, mode='fan_in')
elif init_type == 'orthogonal':
nn.init.orthogonal_(m.weight.data, gain=init_gain)
else:
raise NotImplementedError('initialization method [%s] is not implemented' % init_type)
if hasattr(m, 'bias') and m.bias is not None:
nn.init.constant_(m.bias.data, 0.0)
elif isinstance(m, SynchronizedBatchNorm2d) or classname.find('BatchNorm2d') != -1 \
or isinstance(m, nn.GroupNorm):
m.weight.data.fill_(1)
m.bias.data.zero_() # BatchNorm Layer's weight is not a matrix; only normal distribution applies.
print('initialize {} with {}'.format(init_type, net.__class__.__name__))
logger.info('initialize {} with {}'.format(init_type, net.__class__.__name__))
net.apply(init_func) # apply the initialization function <init_func>
pass
def adaptive_load_nets(self, net, model_weight):
model_dict = net.state_dict()
pretrained_dict = {k : v for k, v in model_weight.items() if k in model_dict}
# print("[INFO] Pretrained dict:", pretrained_dict.keys())
model_dict.update(pretrained_dict)
net.load_state_dict(model_dict)
def load_nets(self, cfg, writer, logger): # load pretrained weights on the net
if os.path.isfile(cfg['training']['resume']):
logger.info(
"Loading model and optimizer from checkpoint '{}'".format(cfg['training']['resume'])
)
checkpoint = torch.load(cfg['training']['resume'])
_k = -1
net_state_no = {}
for net in self.nets:
name = net.__class__.__name__
if name not in net_state_no:
net_state_no[name] = 0
else:
net_state_no[name] += 1
_k += 1
if checkpoint.get(name) == None:
continue
if name.find('FCDiscriminator') != -1 and cfg['training']['gan_resume'] == False:
continue
if isinstance(checkpoint[name], list):
self.adaptive_load_nets(net, checkpoint[name][net_state_no[name]]["model_state"])
else:
print("*****************************************")
print("[WARNING] Using depreciated load version! Model {}".format(name))
print("*****************************************")
self.adaptive_load_nets(net, checkpoint[name]["model_state"])
if cfg['training']['optimizer_resume']:
if isinstance(checkpoint[name], list):
self.adaptive_load_nets(self.optimizers[_k], checkpoint[name][net_state_no[name]]["optimizer_state"])
self.adaptive_load_nets(self.schedulers[_k], checkpoint[name][net_state_no[name]]["scheduler_state"])
else:
self.adaptive_load_nets(self.optimizers[_k], checkpoint[name]["optimizer_state"])
self.adaptive_load_nets(self.schedulers[_k], checkpoint[name]["scheduler_state"])
self.iter = checkpoint["iter"]
#self.best_iou = checkpoint['best_iou']
logger.info(
"Loaded checkpoint '{}' (iter {})".format(
cfg['training']['resume'], checkpoint["iter"]
)
)
else:
raise Exception("No checkpoint found at '{}'".format(cfg['training']['resume']))
def load_PredNet(self, cfg, writer, logger, dir=None, net=None): # load pretrained weights on the net
dir = dir or cfg['training']['Pred_resume']
best_iou = 0
if os.path.isfile(dir):
logger.info(
"Loading model and optimizer from checkpoint '{}'".format(dir)
)
checkpoint = torch.load(dir)
name = net.__class__.__name__
if checkpoint.get(name) == None:
return
if name.find('FCDiscriminator') != -1 and cfg['training']['gan_resume'] == False:
return
if isinstance(checkpoint[name], list):
self.adaptive_load_nets(net, checkpoint[name][0]["model_state"])
else:
self.adaptive_load_nets(net, checkpoint[name]["model_state"])
iter = checkpoint["iter"]
best_iou = checkpoint['best_iou']
logger.info(
"Loaded checkpoint '{}' (iter {}) (best iou {}) for PredNet".format(
dir, checkpoint["iter"], best_iou
)
)
else:
raise Exception("No checkpoint found at '{}'".format(dir))
if hasattr(net, 'best_iou'):
#net.best_iou = best_iou
pass
return best_iou
def set_optimizer(self, optimizer): #set optimizer to all nets
pass
def reset_objective_SingleVector(self,):
self.objective_vectors_group = torch.zeros(self.modal_num + 1, 19, 256).cuda()
self.objective_vectors_num_group = torch.zeros(self.modal_num + 1, 19).cuda()
self.objective_vectors_dis_group = torch.zeros(self.modal_num + 1, 19, 19).cuda()
def update_objective_SingleVector(self, vectors, vectors_num, name='moving_average'):
#vector = vector.squeeze().detach()
if torch.sum(vectors) == 0:
return
if name == 'moving_average':
self.objective_vectors_group = self.objective_vectors_group * 0.9999 + 0.0001 * vectors
self.objective_vectors_num_group += vectors_num
self.objective_vectors_num_group = min(self.objective_vectors_num_group, 3000)
elif name == 'mean':
self.objective_vectors_group = self.objective_vectors_group * self.objective_vectors_num_group + vectors
self.objective_vectors_num_group += vectors_num
self.objective_vectors_group = self.objective_vectors_group / self.objective_vectors_num_group
self.objective_vectors_num_group = min(self.objective_vectors_num_group, 3000)
else:
raise NotImplementedError('no such updating way of objective vectors {}'.format(name))
def grad_reverse(x):
return GradReverse()(x)
| 43.293059 | 173 | 0.598064 | from models.base_model import BaseModel
import torch.nn as nn
import torch.nn.functional as F
import os, sys
import torch
import numpy as np
import itertools
from torch.autograd import Variable
from optimizers import get_optimizer
from schedulers import get_scheduler
from models.sync_batchnorm import SynchronizedBatchNorm2d, DataParallelWithCallback
from models.deeplab_multimodal import DeepLab
from models.decoder import Decoder
from models.aspp import ASPP
from models.discriminator import FCDiscriminator, FCDiscriminator_low, FCDiscriminator_out, FCDiscriminator_class
from loss import get_loss_function
from .utils import freeze_bn, GradReverse, normalisation_pooling
from metrics import runningScore
import pdb
def multimodal_merger(multi_modal_data, is_upsample=False, up_size=None):
feat_cls = multi_modal_data['feat_cls']
feat_cls_cat = torch.cat(feat_cls, 1)
output = multi_modal_data['output']
output_comb = 0
for _i in range(len(output)):
if is_upsample:
output[_i] = F.interpolate(output[_i], size=up_size, mode='bilinear', align_corners=True)
output_comb += output[_i]
merge_out = {
'feat_cls': feat_cls,
'feat_cls_cat': feat_cls_cat,
'output_comb': output_comb,
'output': output,
}
return merge_out
class CustomMetricsMultimodalMerger():
def __init__(self, modal_num, category_num, model):
self.modal_num = modal_num
self.category_num = category_num
self._model = model
def initialize_model(model):
self._model = model
def merge_class_threshold(self, modal_ids=[]):
assert self._model is not None, "[ERROR] Deeplab Model not initialize before using!"
_class_threshold_group = self._model.class_threshold_group[modal_ids]
return torch.mean(_class_threshold_group, dim=0)
def merge_clu_threshold(self, clu_threshold, modal_ids=[]):
_clu_threshold_group = clu_threshold[modal_ids]
return torch.mean(_clu_threshold_group, dim=0)
def merge_objective_vectors(self, modal_ids=[]):
assert self._model is not None, "[ERROR] Deeplab Model not initialize before using!"
_modal_num, _cate_num, _feat_dim = self._model.objective_vectors_group.size()
_objective_vectors = self._model.objective_vectors_group[modal_ids]
assert _objective_vectors.dim() == 4, "objective_vector dimension != 4"
_objective_vectors = _objective_vectors.permute(1, 0, 2).contiguous()
return _objective_vectors.view(_cate_num, -1)
class CustomMetrics():
def __init__(self, numbers=19, modal_num=3, model=None):
self.class_numbers = numbers
self.classes_recall_thr = np.zeros([19, 3])
self.classes_recall_thr_num = np.zeros([19])
self.classes_recall_clu = np.zeros([19, 3])
self.classes_recall_clu_num = np.zeros([19])
self.running_metrics_val_threshold = runningScore(self.class_numbers)
self.running_metrics_val_clusters = runningScore(self.class_numbers)
self.clu_threshold = torch.full((modal_num + 1, 19), 2.5).cuda()
self.multimodal_merger = CustomMetricsMultimodalMerger(
modal_num=modal_num + 1, category_num=numbers, model=model
)
def update(self, feat_cls, outputs, labels, modal_ids=[0,]):
batch, width, height = labels.shape
labels = labels.reshape([batch, 1, width, height]).float()
labels = F.interpolate(labels, size=feat_cls.size()[2:], mode='nearest')
outputs_threshold = outputs.clone()
outputs_threshold = F.softmax(outputs_threshold, dim=1)
self.running_metrics_val_threshold.update(labels, outputs_threshold.argmax(1))
_class_threshold_set = self.multimodal_merger.merge_class_threshold(modal_ids=modal_idx)
for i in range(19):
outputs_threshold[:, i, :, :] = torch.where(outputs_threshold[:, i, :, :] > _class_threshold_set[i], torch.Tensor([1]).cuda(), torch.Tensor([0]).cuda())
_batch, _channel, _w, _h = outputs_threshold.shape
_tmp = torch.full([_batch, 1, _w, _h], 0.2,).cuda()
_tmp = torch.cat((outputs_threshold, _tmp), 1)
threshold_arg = _tmp.argmax(1, keepdim=True)
threshold_arg[threshold_arg == 19] = 250
truth, pred_all, truth_all = self.calc_recall(labels.cpu().int().numpy(), threshold_arg.cpu().int().numpy())
self.classes_recall_thr[:, 0] += truth
self.classes_recall_thr[:, 2] += pred_all
self.classes_recall_thr[:, 1] += truth_all
outputs_cluster = outputs.clone()
_objective_vectors_set = self.multimodal_merger.merge_objective_vectors(modal_ids=modal_idx)
for i in range(19):
outputs_cluster[:, i, :, :] = torch.norm( _objective_vectors_set[i].reshape(-1,1,1).expand(-1,128,256) - feat_cls, 2, dim=1,)
outputs_cluster_min, outputs_cluster_arg = outputs_cluster.min(dim=1, keepdim=True)
outputs_cluster_second = outputs_cluster.scatter_(1, outputs_cluster_arg, 100)
if torch.unique(outputs_cluster_second.argmax(1) - outputs_cluster_arg.squeeze()).squeeze().item() != 0:
raise NotImplementedError('wrong when computing L2 norm!!')
outputs_cluster_secondmin, outputs_cluster_secondarg = outputs_cluster_second.min(dim=1, keepdim=True)
self.running_metrics_val_clusters.update(labels, outputs_cluster_arg)
tmp_arg = outputs_cluster_arg.clone()
pdb.set_trace()
_clu_thresholds = self.multimodal_merger.merge_clu_threshold(self.clu_threshold, modal_ids=modal_ids)
outputs_cluster_arg[(outputs_cluster_secondmin - outputs_cluster_min) < _clu_thresholds] = 250
truth, pred_all, truth_all = self.calc_recall(labels.cpu().int().numpy(), outputs_cluster_arg.cpu().int().numpy())
self.classes_recall_clu[:, 0] += truth
self.classes_recall_clu[:, 2] += pred_all
self.classes_recall_clu[:, 1] += truth_all
return threshold_arg, outputs_cluster_arg
def calc_recall(self, gt, argmax):
truth = np.zeros([self.class_numbers])
pred_all = np.zeros([self.class_numbers])
truth_all = np.zeros([self.class_numbers])
for i in range(self.class_numbers):
truth[i] = (gt == i)[argmax == i].sum()
pred_all[i] = (argmax == i).sum()
truth_all[i] = (gt == i).sum()
pass
return truth, pred_all, truth_all
def calc_mean_Clu_recall(self, ):
return np.mean(self.classes_recall_clu[:, 0] / self.classes_recall_clu[:, 1])
def calc_mean_Thr_recall(self, ):
return np.mean(self.classes_recall_thr[:, 0] / self.classes_recall_thr[:, 1])
def reset(self, ):
self.running_metrics_val_clusters.reset()
self.running_metrics_val_threshold.reset()
self.classes_recall_clu = np.zeros([19, 3])
self.classes_recall_thr = np.zeros([19, 3])
class CustomModel():
def __init__(self, cfg, writer, logger, use_pseudo_label=False, modal_num=3):
self.cfg = cfg
self.writer = writer
self.class_numbers = 19
self.logger = logger
cfg_model = cfg['model']
self.cfg_model = cfg_model
self.best_iou = -100
self.iter = 0
self.nets = []
self.split_gpu = 0
self.default_gpu = cfg['model']['default_gpu']
self.PredNet_Dir = None
self.valid_classes = cfg['training']['valid_classes']
self.G_train = True
self.cls_feature_weight = cfg['training']['cls_feature_weight']
self.use_pseudo_label = use_pseudo_label
self.modal_num = modal_num
self.objective_vectors_group = torch.zeros(self.modal_num + 1, 19, 256).cuda()
self.objective_vectors_num_group = torch.zeros(self.modal_num + 1, 19).cuda()
self.objective_vectors_dis_group = torch.zeros(self.modal_num + 1, 19, 19).cuda()
self.class_threshold_group = torch.full([self.modal_num + 1, 19], 0.95).cuda()
self.metrics = CustomMetrics(self.class_numbers, modal_num=self.modal_num, model=self)
bn = cfg_model['bn']
if bn == 'sync_bn':
BatchNorm = SynchronizedBatchNorm2d
elif bn == 'bn':
BatchNorm = nn.BatchNorm2d
elif bn == 'gn':
BatchNorm = nn.GroupNorm
else:
raise NotImplementedError('batch norm choice {} is not implemented'.format(bn))
if use_pseudo_label:
self.PredNet = DeepLab(
num_classes=19,
backbone=cfg_model['basenet']['version'],
output_stride=16,
bn=cfg_model['bn'],
freeze_bn=True,
modal_num=self.modal_num
).cuda()
self.load_PredNet(cfg, writer, logger, dir=None, net=self.PredNet)
self.PredNet_DP = self.init_device(self.PredNet, gpu_id=self.default_gpu, whether_DP=True)
self.PredNet.eval()
self.PredNet_num = 0
self.BaseNet = DeepLab(
num_classes=19,
backbone=cfg_model['basenet']['version'],
output_stride=16,
bn=cfg_model['bn'],
freeze_bn=True,
modal_num=self.modal_num
)
logger.info('the backbone is {}'.format(cfg_model['basenet']['version']))
self.BaseNet_DP = self.init_device(self.BaseNet, gpu_id=self.default_gpu, whether_DP=True)
self.nets.extend([self.BaseNet])
self.nets_DP = [self.BaseNet_DP]
self.SOURCE_LABEL = 0
self.TARGET_LABEL = 1
self.DNets = []
self.DNets_DP = []
for _ in range(self.modal_num+1):
_net_d = FCDiscriminator(inplanes=19)
self.DNets.append(_net_d)
_net_d_DP = self.init_device(_net_d, gpu_id=self.default_gpu, whether_DP=True)
self.DNets_DP.append(_net_d_DP)
self.nets.extend(self.DNets)
self.nets_DP.extend(self.DNets_DP)
self.optimizers = []
self.schedulers = []
optimizer_cls = torch.optim.SGD
optimizer_params = {k:v for k, v in cfg['training']['optimizer'].items()
if k != 'name'}
optimizer_cls_D = torch.optim.Adam
optimizer_params_D = {k:v for k, v in cfg['training']['optimizer_D'].items()
if k != 'name'}
if self.use_pseudo_label:
self.BaseOpti = optimizer_cls(self.BaseNet.parameters(), **optimizer_params)
else:
self.BaseOpti = optimizer_cls(self.BaseNet.optim_parameters(cfg['training']['optimizer']['lr']), **optimizer_params)
self.optimizers.extend([self.BaseOpti])
self.DiscOptis = []
for _d_net in self.DNets:
self.DiscOptis.append(
optimizer_cls_D(_d_net.parameters(), **optimizer_params_D)
)
self.optimizers.extend(self.DiscOptis)
self.schedulers = []
if self.use_pseudo_label:
self.BaseSchedule = get_scheduler(self.BaseOpti, cfg['training']['lr_schedule'])
self.schedulers.extend([self.BaseSchedule])
else:
"""BaseSchedule detail see FUNC: scheduler_step()"""
self.learning_rate = cfg['training']['optimizer']['lr']
self.gamma = cfg['training']['lr_schedule']['gamma']
self.num_steps = cfg['training']['lr_schedule']['max_iter']
self._BaseSchedule_nouse = get_scheduler(self.BaseOpti, cfg['training']['lr_schedule'])
self.schedulers.extend([self._BaseSchedule_nouse])
self.DiscSchedules = []
for _disc_opt in self.DiscOptis:
self.DiscSchedules.append(
get_scheduler(_disc_opt, cfg['training']['lr_schedule'])
)
self.schedulers.extend(self.DiscSchedules)
self.setup(cfg, writer, logger)
self.adv_source_label = 0
self.adv_target_label = 1
self.bceloss = nn.BCEWithLogitsLoss(reduce=False)
self.loss_fn = get_loss_function(cfg)
self.mseloss = nn.MSELoss()
self.l1loss = nn.L1Loss()
self.smoothloss = nn.SmoothL1Loss()
self.triplet_loss = nn.TripletMarginLoss()
def create_PredNet(self,):
ss = DeepLab(
num_classes=19,
backbone=self.cfg_model['basenet']['version'],
output_stride=16,
bn=self.cfg_model['bn'],
freeze_bn=True,
modal_num=self.modal_num,
).cuda()
ss.eval()
return ss
def setup(self, cfg, writer, logger):
for net in self.nets:
self.init_weights(cfg['model']['init'], logger, net)
print("Initializition completed")
if hasattr(net, '_load_pretrained_model') and cfg['model']['pretrained']:
print("loading pretrained model for {}".format(net.__class__.__name__))
net._load_pretrained_model()
if cfg['training']['resume_flag']:
self.load_nets(cfg, writer, logger)
pass
def lr_poly(self):
return self.learning_rate * ((1 - float(self.iter) / self.num_steps) ** (self.gamma))
def adjust_basenet_learning_rate(self):
lr = self.lr_poly()
self.BaseOpti.param_groups[0]['lr'] = lr
if len(self.BaseOpti.param_groups) > 1:
self.BaseOpti.param_groups[1]['lr'] = lr * 10
def forward(self, input):
feat, feat_low, att_mask, feat_cls, output = self.BaseNet_DP(input)
return feat, feat_low, feat_cls, output
def forward_Up(self, input):
feat, feat_low, feat_cls, outputs = self.forward(input)
output = F.interpolate(outputs[-1], size=input.size()[2:], mode='bilinear', align_corners=True)
return feat, feat_low, feat_cls, output
def PredNet_Forward(self, input):
with torch.no_grad():
_, _, att_mask, feat_cls, output_result = self.PredNet_DP(input)
return _, _, feat_cls, output_result
def calculate_mean_vector(self, feat_cls, outputs, labels, ):
outputs_softmax = F.softmax(outputs, dim=1)
outputs_argmax = outputs_softmax.argmax(dim=1, keepdim=True)
outputs_argmax = self.process_label(outputs_argmax.float())
labels_expanded = self.process_label(labels)
outputs_pred = labels_expanded * outputs_argmax
scale_factor = F.adaptive_avg_pool2d(outputs_pred, 1)
vectors = []
ids = []
for n in range(feat_cls.size()[0]):
for t in range(self.class_numbers):
if scale_factor[n][t].item()==0:
continue
if (outputs_pred[n][t] > 0).sum() < 10:
continue
s = feat_cls[n] * outputs_pred[n][t]
scale = torch.sum(outputs_pred[n][t]) / labels.shape[2] / labels.shape[3] * 2
s = normalisation_pooling()(s, scale)
s = F.adaptive_avg_pool2d(s, 1) / scale_factor[n][t]
vectors.append(s)
ids.append(t)
return vectors, ids
def step(self, source_x, source_label, source_modal_ids, target_x, target_label, target_modal_ids, use_pseudo_loss=False):
assert len(source_modal_ids) == source_x.size(0), "modal_ids' batchsize != source_x's batchsize"
_, _, source_feat_cls, source_output = self.forward(input=source_x)
source_output_modal_k = torch.stack(
[
source_output[_modal_i][_batch_i]
for _batch_i, _modal_i in enumerate(source_modal_ids)
],
dim=0,
)
source_output_comb = torch.cat([source_output_modal_k, source_output[-1]], dim=0)
source_label_comb = torch.cat([source_label, source_label.clone()], dim=0)
source_outputUp = F.interpolate(source_output_comb, size=source_x.size()[-2:], mode='bilinear', align_corners=True)
loss_GTA = self.loss_fn(input=source_outputUp, target=source_label_comb)
loss_adv = torch.Tensor([0]).cuda()
_batch_size = 0
_, _, _, target_output = self.forward(target_x)
target_modal_ids_tensor = torch.Tensor(target_modal_ids).cuda()
for t_out, _d_net_DP, _d_net, modal_idx in zip(target_output, self.DNets_DP, self.DNets, range(len(target_output))):
self.set_requires_grad(self.logger, _d_net, requires_grad = False)
t_D_out = _d_net_DP(F.softmax(t_out))
loss_temp = torch.mean(self.bceloss(
t_D_out,
torch.FloatTensor(t_D_out.data.size()).fill_(1.0).cuda()
), [1,2,3])
if modal_idx >= self.modal_num:
loss_adv += torch.mean(loss_temp)
elif torch.mean(torch.as_tensor((modal_idx==target_modal_ids_tensor), dtype=torch.float32)) == 0:
loss_adv += 0.0
else:
loss_adv += torch.mean(torch.masked_select(loss_temp, target_modal_ids_tensor==modal_idx))
_batch_size += t_out.size(0)
loss_adv *= self.cfg['training']['loss_adv_lambda']
loss_G = torch.Tensor([0]).cuda()
loss_G = loss_G + loss_GTA + loss_adv
self.BaseOpti.zero_grad()
if loss_G.item() != 0:
loss_G.backward()
self.BaseOpti.step()
_batch_size = 0
loss_D_comb = torch.Tensor([0]).cuda()
source_modal_ids_tensor = torch.Tensor(source_modal_ids).cuda()
for s_out, t_out, _d_net_DP, _d_net, _disc_opt, modal_idx in zip(source_output, target_output, self.DNets_DP, self.DNets, self.DiscOptis, range(len(source_output))):
self.set_requires_grad(self.logger, _d_net, requires_grad = True)
_batch_size = 0
loss_D = torch.Tensor([0]).cuda()
s_D_out = _d_net_DP(F.softmax(s_out.detach()))
loss_temp_s = torch.mean(self.bceloss(
s_D_out,
torch.FloatTensor(s_D_out.data.size()).fill_(1.0).cuda()
), [1,2,3])
if modal_idx >= self.modal_num:
loss_D += torch.mean(loss_temp_s)
elif torch.mean(torch.as_tensor((modal_idx==source_modal_ids_tensor), dtype=torch.float32)) == 0:
loss_D += 0.0
else:
loss_D += torch.mean(torch.masked_select(loss_temp_s, source_modal_ids_tensor==modal_idx))
_batch_size += (s_out.size(0) + t_out.size(0))
t_D_out = _d_net_DP(F.softmax(t_out.detach()))
loss_temp_t = torch.mean(self.bceloss(
t_D_out,
torch.FloatTensor(t_D_out.data.size()).fill_(0.0).cuda()
), [1,2,3])
if modal_idx >= self.modal_num:
loss_D += torch.mean(loss_temp_t)
elif torch.mean(torch.as_tensor((modal_idx==target_modal_ids_tensor), dtype=torch.float32)) == 0:
loss_D += 0.0
else:
loss_D += torch.mean(torch.masked_select(loss_temp_t, target_modal_ids_tensor==modal_idx))
loss_D *= self.cfg['training']['loss_adv_lambda']*0.5
loss_D_comb += loss_D
_disc_opt.zero_grad()
if loss_D_comb.item() != 0:
loss_D_comb.backward()
_disc_opt.step()
return loss_GTA, loss_adv, loss_D_comb
def process_label(self, label):
batch, channel, w, h = label.size()
pred1 = torch.zeros(batch, 20, w, h).cuda()
id = torch.where(label < 19, label, torch.Tensor([19]).cuda())
pred1 = pred1.scatter_(1, id.long(), 1)
return pred1
def class_vectors_alignment(self, ids, vectors, modal_ids=[0,]):
loss = torch.Tensor([0]).cuda()
_objective_vectors_set = self.metrics.multimodal_merger.merge_objective_vectors(modal_ids=modal_idx)
for i in range(len(ids)):
if ids[i] not in self.valid_classes:
continue
new_loss = self.smoothloss(vectors[i].squeeze().cuda(), _objective_vectors[ids[i]])
while (new_loss.item() > 5):
new_loss = new_loss / 10
loss = loss + new_loss
loss = loss / len(ids) * 10
return loss
def freeze_bn_apply(self):
for net in self.nets:
net.apply(freeze_bn)
for net in self.nets_DP:
net.apply(freeze_bn)
def scheduler_step(self):
if self.use_pseudo_label:
for scheduler in self.schedulers:
scheduler.step()
else:
"""skipped _BaseScheduler_nouse"""
for scheduler in self.schedulers[1:]:
scheduler.step()
self.adjust_basenet_learning_rate()
def optimizer_zerograd(self):
for optimizer in self.optimizers:
optimizer.zero_grad()
def optimizer_step(self):
for opt in self.optimizers:
opt.step()
def init_device(self, net, gpu_id=None, whether_DP=False):
gpu_id = gpu_id or self.default_gpu
device = torch.device("cuda:{}".format(gpu_id) if torch.cuda.is_available() else 'cpu')
net = net.to(device)
if whether_DP:
net = DataParallelWithCallback(net, device_ids=range(torch.cuda.device_count()))
return net
def eval(self, net=None, logger=None):
if net == None:
for net in self.nets:
net.eval()
for net in self.nets_DP:
net.eval()
if logger!=None:
logger.info("Successfully set the model eval mode")
else:
net.eval()
if logger!=None:
logger("Successfully set {} eval mode".format(net.__class__.__name__))
return
def train(self, net=None, logger=None):
if net==None:
for net in self.nets:
net.train()
for net in self.nets_DP:
net.train()
else:
net.train()
return
def set_requires_grad(self, logger, net, requires_grad = False):
for parameter in net.parameters():
parameter.requires_grad = requires_grad
def set_requires_grad_layer(self, logger, net, layer_type='batchnorm', requires_grad=False):
for net in self.nets:
for _i in net.modules():
if _i.__class__.__name__.lower().find(layer_type.lower()) != -1:
_i.weight.requires_grad = requires_grad
return
def init_weights(self, cfg, logger, net, init_type='normal', init_gain=0.02):
init_type = cfg.get('init_type', init_type)
init_gain = cfg.get('init_gain', init_gain)
def init_func(m):
classname = m.__class__.__name__
if hasattr(m, 'weight') and (classname.find('Conv') != -1 or classname.find('Linear') != -1):
if init_type == 'normal':
nn.init.normal_(m.weight.data, 0.0, init_gain)
elif init_type == 'xavier':
nn.init.xavier_normal_(m.weight.data, gain=init_gain)
elif init_type == 'kaiming':
nn.init.kaiming_normal_(m.weight.data, a=0, mode='fan_in')
elif init_type == 'orthogonal':
nn.init.orthogonal_(m.weight.data, gain=init_gain)
else:
raise NotImplementedError('initialization method [%s] is not implemented' % init_type)
if hasattr(m, 'bias') and m.bias is not None:
nn.init.constant_(m.bias.data, 0.0)
elif isinstance(m, SynchronizedBatchNorm2d) or classname.find('BatchNorm2d') != -1 \
or isinstance(m, nn.GroupNorm):
m.weight.data.fill_(1)
m.bias.data.zero_()
print('initialize {} with {}'.format(init_type, net.__class__.__name__))
logger.info('initialize {} with {}'.format(init_type, net.__class__.__name__))
net.apply(init_func) # apply the initialization function <init_func>
pass
def adaptive_load_nets(self, net, model_weight):
model_dict = net.state_dict()
pretrained_dict = {k : v for k, v in model_weight.items() if k in model_dict}
# print("[INFO] Pretrained dict:", pretrained_dict.keys())
model_dict.update(pretrained_dict)
net.load_state_dict(model_dict)
def load_nets(self, cfg, writer, logger): # load pretrained weights on the net
if os.path.isfile(cfg['training']['resume']):
logger.info(
"Loading model and optimizer from checkpoint '{}'".format(cfg['training']['resume'])
)
checkpoint = torch.load(cfg['training']['resume'])
_k = -1
net_state_no = {}
for net in self.nets:
name = net.__class__.__name__
if name not in net_state_no:
net_state_no[name] = 0
else:
net_state_no[name] += 1
_k += 1
if checkpoint.get(name) == None:
continue
if name.find('FCDiscriminator') != -1 and cfg['training']['gan_resume'] == False:
continue
if isinstance(checkpoint[name], list):
self.adaptive_load_nets(net, checkpoint[name][net_state_no[name]]["model_state"])
else:
print("*****************************************")
print("[WARNING] Using depreciated load version! Model {}".format(name))
print("*****************************************")
self.adaptive_load_nets(net, checkpoint[name]["model_state"])
if cfg['training']['optimizer_resume']:
if isinstance(checkpoint[name], list):
self.adaptive_load_nets(self.optimizers[_k], checkpoint[name][net_state_no[name]]["optimizer_state"])
self.adaptive_load_nets(self.schedulers[_k], checkpoint[name][net_state_no[name]]["scheduler_state"])
else:
self.adaptive_load_nets(self.optimizers[_k], checkpoint[name]["optimizer_state"])
self.adaptive_load_nets(self.schedulers[_k], checkpoint[name]["scheduler_state"])
self.iter = checkpoint["iter"]
#self.best_iou = checkpoint['best_iou']
logger.info(
"Loaded checkpoint '{}' (iter {})".format(
cfg['training']['resume'], checkpoint["iter"]
)
)
else:
raise Exception("No checkpoint found at '{}'".format(cfg['training']['resume']))
def load_PredNet(self, cfg, writer, logger, dir=None, net=None): # load pretrained weights on the net
dir = dir or cfg['training']['Pred_resume']
best_iou = 0
if os.path.isfile(dir):
logger.info(
"Loading model and optimizer from checkpoint '{}'".format(dir)
)
checkpoint = torch.load(dir)
name = net.__class__.__name__
if checkpoint.get(name) == None:
return
if name.find('FCDiscriminator') != -1 and cfg['training']['gan_resume'] == False:
return
if isinstance(checkpoint[name], list):
self.adaptive_load_nets(net, checkpoint[name][0]["model_state"])
else:
self.adaptive_load_nets(net, checkpoint[name]["model_state"])
iter = checkpoint["iter"]
best_iou = checkpoint['best_iou']
logger.info(
"Loaded checkpoint '{}' (iter {}) (best iou {}) for PredNet".format(
dir, checkpoint["iter"], best_iou
)
)
else:
raise Exception("No checkpoint found at '{}'".format(dir))
if hasattr(net, 'best_iou'):
#net.best_iou = best_iou
pass
return best_iou
def set_optimizer(self, optimizer): #set optimizer to all nets
pass
def reset_objective_SingleVector(self,):
self.objective_vectors_group = torch.zeros(self.modal_num + 1, 19, 256).cuda()
self.objective_vectors_num_group = torch.zeros(self.modal_num + 1, 19).cuda()
self.objective_vectors_dis_group = torch.zeros(self.modal_num + 1, 19, 19).cuda()
def update_objective_SingleVector(self, vectors, vectors_num, name='moving_average'):
#vector = vector.squeeze().detach()
if torch.sum(vectors) == 0:
return
if name == 'moving_average':
self.objective_vectors_group = self.objective_vectors_group * 0.9999 + 0.0001 * vectors
self.objective_vectors_num_group += vectors_num
self.objective_vectors_num_group = min(self.objective_vectors_num_group, 3000)
elif name == 'mean':
self.objective_vectors_group = self.objective_vectors_group * self.objective_vectors_num_group + vectors
self.objective_vectors_num_group += vectors_num
self.objective_vectors_group = self.objective_vectors_group / self.objective_vectors_num_group
self.objective_vectors_num_group = min(self.objective_vectors_num_group, 3000)
else:
raise NotImplementedError('no such updating way of objective vectors {}'.format(name))
def grad_reverse(x):
return GradReverse()(x)
| true | true |
f71b30ea10a42f00072f8fb902d4bdeca0fdac2c | 4,663 | py | Python | pose_sync_pytorch/generate_basic.py | lilly9117/Cross-Cutting | d534e8b5d4bf071883b7cb5f1832bba74b9a52d0 | [
"Apache-2.0"
] | null | null | null | pose_sync_pytorch/generate_basic.py | lilly9117/Cross-Cutting | d534e8b5d4bf071883b7cb5f1832bba74b9a52d0 | [
"Apache-2.0"
] | null | null | null | pose_sync_pytorch/generate_basic.py | lilly9117/Cross-Cutting | d534e8b5d4bf071883b7cb5f1832bba74b9a52d0 | [
"Apache-2.0"
] | null | null | null | import os
from moviepy.editor import VideoFileClip, concatenate_videoclips
import random
import numpy as np
import time
# from video_facial_landmarks_minmax import calculate_distance
from video_pose_landmarks import calculate_pose_distance
TEST = True
TEST_TIME = 20
INIT_NUM = float("Inf")
WINDOW_TIME = 10
PADDED_TIME = 4 # 얼굴이 클로즈업 된게 있으면 계속 클로즈업 된 부분만 찾으므로 3초정도 띄어준다.
# def distance(reference_clip, clip):
# min_diff, min_idx, additional_info = calculate_distance(reference_clip, clip)
# return min_diff, min_idx
def pose_distance(reference_clip, clip):
min_diff, min_idx, additional_info = calculate_pose_distance(reference_clip, clip)
return min_diff, min_idx
def crosscut(videos_path="./video", option="random"):
min_time = 1000.0
min_idx = 0
audioclip = None
extracted_clips_array = []
video_num = len(os.listdir(videos_path))
start_times = [0] * video_num # VIDEO ALIGNMENT -> SLICE START TIME
# VIDEO ALIGNMENT -> SLICE START TIME
for i in range(len(os.listdir(videos_path))):
video_path = os.path.join(videos_path, sorted(os.listdir(videos_path))[i])
clip = VideoFileClip(video_path)
clip = clip.subclip(start_times[i], clip.duration) # 그냥 전체 영상을 시작점 맞게 자르기
print(video_path, clip.fps, clip.duration)
if min_time > clip.duration: # ?? 제일 작은거 기준으로 자르려는건가?? 근데 그러면 그 앞에건 이미 크지않나??
audioclip = clip.audio
min_time = clip.duration
min_idx = i
print(video_path, clip.fps, clip.duration)
extracted_clips_array.append(clip)
print(len(extracted_clips_array))
if TEST: # test하면 일부분만 생성해서 빠르게 확인하기
min_time = TEST_TIME
audioclip = audioclip.set_duration(TEST_TIME)
# GENERATE STAGEMIX
# CONCAT SUBCLIP 0~ MIN DURATION CLIP TIME
con_clips = []
t = 3 # 초반 3초 INIT
current_idx = 0 # INIT
con_clips.append(extracted_clips_array[current_idx].subclip(0, min(t, int(min_time))))
while t < min_time:
# 10 sec.
cur_t = t
next_t = min(t+WINDOW_TIME, min_time) # 마지막은 window초보다 작은초일수도 있으니
# RANDOM BASED METHOD
if option=="random" or min(min_time,t + PADDED_TIME)==min_time:
random_video_idx = random.randint(0, len(extracted_clips_array)-1)
clip = extracted_clips_array[random_video_idx].subclip(cur_t, next_t)
t = next_t
con_clips.append(clip)
else:
# 지금 현재 영상!
reference_clip = extracted_clips_array[current_idx].subclip(cur_t, next_t)
d = INIT_NUM
cur_clip = None
# inf가 있을때는 이 idx로 설정됨!
min_idx = (current_idx+1)%len(extracted_clips_array)
for video_idx in range(len(extracted_clips_array)):
if video_idx == current_idx:
continue
# 10초간 영상 확인
clip = extracted_clips_array[video_idx].subclip(cur_t, next_t)
# 이미 확인한 앞부분은 무시해야 함!!(! 첫번째 영상은 3초는 무조건 안겹치는 문제 있음)
# !! ㅜㅜ 제일 좋은 얼굴 부분 놓칠수도 있을듯!
# CALCULATE DISTANCE
cur_d, plus_frame = pose_distance(reference_clip, clip)
print(current_idx, video_idx, cur_d, cur_t + plus_frame)
if d > cur_d:
d = cur_d
min_idx = video_idx
next_t = cur_t + plus_frame # 바로 옮길 frame
cur_clip = reference_clip.subclip(0, plus_frame)
# next_clip = clip.subclip(0, plus_frame) # 그 바꿀 부분만 자르는 클립!
# 이번 clip : 10초 내에서 자르거나 10초 full append
if cur_clip: # 계산이 가능하면
clip = cur_clip # 현재 클립(바꾸면 가장 좋은 부분까지 잘린 현재 클립)
else:
clip = reference_clip # 현재 클립 10초 모두
t = next_t
con_clips.append(clip)
# 다음 clip : padding 길이는 반드시 append
# 뒤에 padding 데이터 더하기
current_idx = min_idx # 바로 다음에 이어지면 가까운 거리로 연결되는 데이터
print("idx : {}".format(current_idx))
pad_clip = extracted_clips_array[current_idx].subclip(t, min(min_time,t+PADDED_TIME)) # min_time을 넘어가면 안됨!
t = min(min_time,t + PADDED_TIME) # padding 된 시간 더하기
con_clips.append(pad_clip)
final_clip = concatenate_videoclips(con_clips)
if audioclip !=None:
print("Not None")
final_clip.audio = audioclip
final_clip.write_videofile("crosscut_fiesta.mp4")
return final_clip
start_time = time.time()
crosscut(videos_path="./video", option="norandom")
end_time = time.time()
print(end_time - start_time)
| 36.716535 | 118 | 0.619558 | import os
from moviepy.editor import VideoFileClip, concatenate_videoclips
import random
import numpy as np
import time
from video_pose_landmarks import calculate_pose_distance
TEST = True
TEST_TIME = 20
INIT_NUM = float("Inf")
WINDOW_TIME = 10
PADDED_TIME = 4
def pose_distance(reference_clip, clip):
min_diff, min_idx, additional_info = calculate_pose_distance(reference_clip, clip)
return min_diff, min_idx
def crosscut(videos_path="./video", option="random"):
min_time = 1000.0
min_idx = 0
audioclip = None
extracted_clips_array = []
video_num = len(os.listdir(videos_path))
start_times = [0] * video_num
for i in range(len(os.listdir(videos_path))):
video_path = os.path.join(videos_path, sorted(os.listdir(videos_path))[i])
clip = VideoFileClip(video_path)
clip = clip.subclip(start_times[i], clip.duration)
print(video_path, clip.fps, clip.duration)
if min_time > clip.duration:
audioclip = clip.audio
min_time = clip.duration
min_idx = i
print(video_path, clip.fps, clip.duration)
extracted_clips_array.append(clip)
print(len(extracted_clips_array))
if TEST:
min_time = TEST_TIME
audioclip = audioclip.set_duration(TEST_TIME)
con_clips = []
t = 3
current_idx = 0
con_clips.append(extracted_clips_array[current_idx].subclip(0, min(t, int(min_time))))
while t < min_time:
cur_t = t
next_t = min(t+WINDOW_TIME, min_time)
if option=="random" or min(min_time,t + PADDED_TIME)==min_time:
random_video_idx = random.randint(0, len(extracted_clips_array)-1)
clip = extracted_clips_array[random_video_idx].subclip(cur_t, next_t)
t = next_t
con_clips.append(clip)
else:
reference_clip = extracted_clips_array[current_idx].subclip(cur_t, next_t)
d = INIT_NUM
cur_clip = None
min_idx = (current_idx+1)%len(extracted_clips_array)
for video_idx in range(len(extracted_clips_array)):
if video_idx == current_idx:
continue
clip = extracted_clips_array[video_idx].subclip(cur_t, next_t)
cur_d, plus_frame = pose_distance(reference_clip, clip)
print(current_idx, video_idx, cur_d, cur_t + plus_frame)
if d > cur_d:
d = cur_d
min_idx = video_idx
next_t = cur_t + plus_frame
cur_clip = reference_clip.subclip(0, plus_frame)
if cur_clip:
clip = cur_clip
else:
clip = reference_clip
t = next_t
con_clips.append(clip)
current_idx = min_idx
print("idx : {}".format(current_idx))
pad_clip = extracted_clips_array[current_idx].subclip(t, min(min_time,t+PADDED_TIME))
t = min(min_time,t + PADDED_TIME)
con_clips.append(pad_clip)
final_clip = concatenate_videoclips(con_clips)
if audioclip !=None:
print("Not None")
final_clip.audio = audioclip
final_clip.write_videofile("crosscut_fiesta.mp4")
return final_clip
start_time = time.time()
crosscut(videos_path="./video", option="norandom")
end_time = time.time()
print(end_time - start_time)
| true | true |
f71b30f5180889ca3dd9df9574f878409863dfe6 | 1,003 | py | Python | Tests/test_TimeAverager.py | chipgarner/yourair | 22415389256cfa283e817970d6c79c187cbded4c | [
"MIT"
] | null | null | null | Tests/test_TimeAverager.py | chipgarner/yourair | 22415389256cfa283e817970d6c79c187cbded4c | [
"MIT"
] | null | null | null | Tests/test_TimeAverager.py | chipgarner/yourair | 22415389256cfa283e817970d6c79c187cbded4c | [
"MIT"
] | null | null | null | import Averager as Avg
class Average:
def __init__(self):
self.answer = 0
self.delta_time = 0
def average(self, avg, delta_t):
print("Average: " + str(avg) + " Timespan: " + str(delta_t))
self.answer = avg
self.delta_time = delta_t
def test_averager():
avg = Average()
averager = Avg.TimeAverager(1, avg.average)
averager.update_average(12.345)
assert avg.answer == 12.345
assert avg.delta_time > 0
def test_averager_averages():
avg = Average()
averager = Avg.TimeAverager(10, avg.average)
averager.update_average(1)
assert avg.answer == 0
assert avg.delta_time == 0
for i in range(2, 10):
averager.update_average(i)
assert avg.answer == 0
assert avg.delta_time == 0
averager.update_average(10)
assert avg.answer == 5.5
assert avg.delta_time > 0
def test_averager_function_none():
averager = Avg.TimeAverager(1, None)
averager.update_average(12.345)
| 20.469388 | 68 | 0.639083 | import Averager as Avg
class Average:
def __init__(self):
self.answer = 0
self.delta_time = 0
def average(self, avg, delta_t):
print("Average: " + str(avg) + " Timespan: " + str(delta_t))
self.answer = avg
self.delta_time = delta_t
def test_averager():
avg = Average()
averager = Avg.TimeAverager(1, avg.average)
averager.update_average(12.345)
assert avg.answer == 12.345
assert avg.delta_time > 0
def test_averager_averages():
avg = Average()
averager = Avg.TimeAverager(10, avg.average)
averager.update_average(1)
assert avg.answer == 0
assert avg.delta_time == 0
for i in range(2, 10):
averager.update_average(i)
assert avg.answer == 0
assert avg.delta_time == 0
averager.update_average(10)
assert avg.answer == 5.5
assert avg.delta_time > 0
def test_averager_function_none():
averager = Avg.TimeAverager(1, None)
averager.update_average(12.345)
| true | true |
f71b315d6312d73a4f7581bd22785f23c8cb7785 | 5,935 | py | Python | sprokit/tests/bindings/python/sprokit/pipeline/test-scheduler_registry.py | dstoup/kwiver | a3a36317b446baf0feb6274235ab1ac6b4329ead | [
"BSD-3-Clause"
] | 1 | 2017-07-31T07:07:32.000Z | 2017-07-31T07:07:32.000Z | sprokit/tests/bindings/python/sprokit/pipeline/test-scheduler_registry.py | dstoup/kwiver | a3a36317b446baf0feb6274235ab1ac6b4329ead | [
"BSD-3-Clause"
] | 3 | 2021-03-19T15:39:43.000Z | 2021-09-08T02:47:15.000Z | sprokit/tests/bindings/python/sprokit/pipeline/test-scheduler_registry.py | acidburn0zzz/kwiver | 6e4205f1c46df04759c57c040f01cc804b27e00d | [
"BSD-3-Clause"
] | null | null | null | #!@PYTHON_EXECUTABLE@
#ckwg +28
# Copyright 2011-2013 by Kitware, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither name of Kitware, Inc. nor the names of any contributors may be used
# to endorse or promote products derived from this software without specific
# prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS''
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
def test_import():
try:
from sprokit.pipeline import config
import sprokit.pipeline.scheduler_factory
except:
test_error("Failed to import the scheduler_factory module")
def test_create():
from sprokit.pipeline import config
from sprokit.pipeline import scheduler_factory
scheduler_factory.SchedulerType()
## scheduler_factory.SchedulerTypes()
scheduler_factory.SchedulerDescription()
scheduler_factory.SchedulerModule()
def test_api_calls():
from sprokit.pipeline import config
from sprokit.pipeline import modules
from sprokit.pipeline import pipeline
from sprokit.pipeline import scheduler_factory
modules.load_known_modules()
sched_type = 'thread_per_process'
c = config.empty_config()
p = pipeline.Pipeline()
scheduler_factory.create_scheduler(sched_type, p)
scheduler_factory.create_scheduler(sched_type, p, c)
scheduler_factory.types()
scheduler_factory.description(sched_type)
scheduler_factory.default_type
def example_scheduler(check_init):
from sprokit.pipeline import scheduler
class PythonExample(scheduler.PythonScheduler):
def __init__(self, pipe, conf):
scheduler.PythonScheduler.__init__(self, pipe, conf)
self.ran_start = check_init
self.ran_wait = check_init
self.ran_stop = check_init
self.ran_pause = check_init
self.ran_resume = check_init
def _start(self):
self.ran_start = True
def _wait(self):
self.ran_wait = True
def _stop(self):
self.ran_stop = True
def _pause(self):
self.ran_pause = True
def _resume(self):
self.ran_resume = True
def __del__(self):
if not self.ran_start:
test_error("start override was not called")
if not self.ran_wait:
test_error("wait override was not called")
if not self.ran_stop:
test_error("stop override was not called")
if not self.ran_pause:
test_error("pause override was not called")
if not self.ran_resume:
test_error("resume override was not called")
return PythonExample
def test_register():
from sprokit.pipeline import config
from sprokit.pipeline import modules
from sprokit.pipeline import pipeline
from sprokit.pipeline import scheduler_factory
modules.load_known_modules()
sched_type = 'python_example'
sched_desc = 'simple description'
scheduler_factory.add_scheduler(sched_type, sched_desc, example_scheduler(True))
if not sched_desc == scheduler_factory.description(sched_type):
test_error("Description was not preserved when registering")
p = pipeline.Pipeline()
try:
s = scheduler_factory.create_scheduler(sched_type, p)
if s is None:
raise Exception()
except:
test_error("Could not create newly registered scheduler type")
def test_wrapper_api():
from sprokit.pipeline import config
from sprokit.pipeline import modules
from sprokit.pipeline import pipeline
from sprokit.pipeline import process_factory
from sprokit.pipeline import scheduler_factory
sched_type = 'python_example'
sched_desc = 'simple description'
modules.load_known_modules()
scheduler_factory.add_scheduler(sched_type, sched_desc, example_scheduler(False))
p = pipeline.Pipeline()
proc_type = 'orphan'
proc_name = 'orphan'
proc = process_factory.create_process(proc_type, proc_name)
p.add_process(proc)
def check_scheduler(s):
if s is None:
test_error("Got a 'None' scheduler")
return
s.start()
s.pause()
s.resume()
s.stop()
s.start()
s.wait()
del s
p.reset()
p.setup_pipeline()
s = scheduler_factory.create_scheduler(sched_type, p)
check_scheduler(s)
if __name__ == '__main__':
import os
import sys
if not len(sys.argv) == 4:
test_error("Expected three arguments")
sys.exit(1)
testname = sys.argv[1]
os.chdir(sys.argv[2])
sys.path.append(sys.argv[3])
from sprokit.test.test import *
run_test(testname, find_tests(locals()))
| 29.824121 | 85 | 0.694356 |
def test_import():
try:
from sprokit.pipeline import config
import sprokit.pipeline.scheduler_factory
except:
test_error("Failed to import the scheduler_factory module")
def test_create():
from sprokit.pipeline import config
from sprokit.pipeline import scheduler_factory
scheduler_factory.SchedulerType()
ription()
scheduler_factory.SchedulerModule()
def test_api_calls():
from sprokit.pipeline import config
from sprokit.pipeline import modules
from sprokit.pipeline import pipeline
from sprokit.pipeline import scheduler_factory
modules.load_known_modules()
sched_type = 'thread_per_process'
c = config.empty_config()
p = pipeline.Pipeline()
scheduler_factory.create_scheduler(sched_type, p)
scheduler_factory.create_scheduler(sched_type, p, c)
scheduler_factory.types()
scheduler_factory.description(sched_type)
scheduler_factory.default_type
def example_scheduler(check_init):
from sprokit.pipeline import scheduler
class PythonExample(scheduler.PythonScheduler):
def __init__(self, pipe, conf):
scheduler.PythonScheduler.__init__(self, pipe, conf)
self.ran_start = check_init
self.ran_wait = check_init
self.ran_stop = check_init
self.ran_pause = check_init
self.ran_resume = check_init
def _start(self):
self.ran_start = True
def _wait(self):
self.ran_wait = True
def _stop(self):
self.ran_stop = True
def _pause(self):
self.ran_pause = True
def _resume(self):
self.ran_resume = True
def __del__(self):
if not self.ran_start:
test_error("start override was not called")
if not self.ran_wait:
test_error("wait override was not called")
if not self.ran_stop:
test_error("stop override was not called")
if not self.ran_pause:
test_error("pause override was not called")
if not self.ran_resume:
test_error("resume override was not called")
return PythonExample
def test_register():
from sprokit.pipeline import config
from sprokit.pipeline import modules
from sprokit.pipeline import pipeline
from sprokit.pipeline import scheduler_factory
modules.load_known_modules()
sched_type = 'python_example'
sched_desc = 'simple description'
scheduler_factory.add_scheduler(sched_type, sched_desc, example_scheduler(True))
if not sched_desc == scheduler_factory.description(sched_type):
test_error("Description was not preserved when registering")
p = pipeline.Pipeline()
try:
s = scheduler_factory.create_scheduler(sched_type, p)
if s is None:
raise Exception()
except:
test_error("Could not create newly registered scheduler type")
def test_wrapper_api():
from sprokit.pipeline import config
from sprokit.pipeline import modules
from sprokit.pipeline import pipeline
from sprokit.pipeline import process_factory
from sprokit.pipeline import scheduler_factory
sched_type = 'python_example'
sched_desc = 'simple description'
modules.load_known_modules()
scheduler_factory.add_scheduler(sched_type, sched_desc, example_scheduler(False))
p = pipeline.Pipeline()
proc_type = 'orphan'
proc_name = 'orphan'
proc = process_factory.create_process(proc_type, proc_name)
p.add_process(proc)
def check_scheduler(s):
if s is None:
test_error("Got a 'None' scheduler")
return
s.start()
s.pause()
s.resume()
s.stop()
s.start()
s.wait()
del s
p.reset()
p.setup_pipeline()
s = scheduler_factory.create_scheduler(sched_type, p)
check_scheduler(s)
if __name__ == '__main__':
import os
import sys
if not len(sys.argv) == 4:
test_error("Expected three arguments")
sys.exit(1)
testname = sys.argv[1]
os.chdir(sys.argv[2])
sys.path.append(sys.argv[3])
from sprokit.test.test import *
run_test(testname, find_tests(locals()))
| true | true |
f71b32a53e1eb2f384ead41803a3f5892542c5b5 | 6,308 | py | Python | dbms/tests/queries/0_stateless/helpers/uexpect.py | sunadm/ClickHouse | 55903fbe23ef6dff8fc7ec25ae68e04919bc9b7f | [
"Apache-2.0"
] | 8 | 2019-06-04T02:50:13.000Z | 2022-02-10T06:46:51.000Z | dbms/tests/queries/0_stateless/helpers/uexpect.py | sunadm/ClickHouse | 55903fbe23ef6dff8fc7ec25ae68e04919bc9b7f | [
"Apache-2.0"
] | 16 | 2021-06-07T21:32:30.000Z | 2022-03-31T21:08:29.000Z | dbms/tests/queries/0_stateless/helpers/uexpect.py | sunadm/ClickHouse | 55903fbe23ef6dff8fc7ec25ae68e04919bc9b7f | [
"Apache-2.0"
] | 3 | 2020-02-24T12:57:54.000Z | 2021-10-04T13:29:00.000Z | # Copyright (c) 2019 Vitaliy Zakaznikov
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import pty
import time
import sys
import re
from threading import Thread, Event
from subprocess import Popen
from Queue import Queue, Empty
class TimeoutError(Exception):
def __init__(self, timeout):
self.timeout = timeout
def __str__(self):
return 'Timeout %.3fs' % float(self.timeout)
class ExpectTimeoutError(Exception):
def __init__(self, pattern, timeout, buffer):
self.pattern = pattern
self.timeout = timeout
self.buffer = buffer
def __str__(self):
s = 'Timeout %.3fs ' % float(self.timeout)
if self.pattern:
s += 'for %s ' % repr(self.pattern.pattern)
if self.buffer:
s += 'buffer %s ' % repr(self.buffer[:])
s += 'or \'%s\'' % ','.join(['%x' % ord(c) for c in self.buffer[:]])
return s
class IO(object):
class EOF(object):
pass
class Timeout(object):
pass
EOF = EOF
TIMEOUT = Timeout
class Logger(object):
def __init__(self, logger, prefix=''):
self._logger = logger
self._prefix = prefix
def write(self, data):
self._logger.write(('\n' + data).replace('\n','\n' + self._prefix))
def flush(self):
self._logger.flush()
def __init__(self, process, master, queue, reader):
self.process = process
self.master = master
self.queue = queue
self.buffer = None
self.before = None
self.after = None
self.match = None
self.pattern = None
self.reader = reader
self._timeout = None
self._logger = None
self._eol = ''
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.close()
def logger(self, logger=None, prefix=''):
if logger:
self._logger = self.Logger(logger, prefix=prefix)
return self._logger
def timeout(self, timeout=None):
if timeout:
self._timeout = timeout
return self._timeout
def eol(self, eol=None):
if eol:
self._eol = eol
return self._eol
def close(self, force=True):
self.reader['kill_event'].set()
os.system('pkill -TERM -P %d' % self.process.pid)
if force:
self.process.kill()
else:
self.process.terminate()
os.close(self.master)
if self._logger:
self._logger.write('\n')
self._logger.flush()
def send(self, data, eol=None):
if eol is None:
eol = self._eol
return self.write(data + eol)
def write(self, data):
return os.write(self.master, data)
def expect(self, pattern, timeout=None, escape=False):
self.match = None
self.before = None
self.after = None
if escape:
pattern = re.escape(pattern)
pattern = re.compile(pattern)
if timeout is None:
timeout = self._timeout
timeleft = timeout
while True:
start_time = time.time()
if self.buffer is not None:
self.match = pattern.search(self.buffer, 0)
if self.match is not None:
self.after = self.buffer[self.match.start():self.match.end()]
self.before = self.buffer[:self.match.start()]
self.buffer = self.buffer[self.match.end():]
break
if timeleft < 0:
break
try:
data = self.read(timeout=timeleft, raise_exception=True)
except TimeoutError:
if self._logger:
self._logger.write((self.buffer or '') + '\n')
self._logger.flush()
exception = ExpectTimeoutError(pattern, timeout, self.buffer)
self.buffer = None
raise exception
timeleft -= (time.time() - start_time)
if data:
self.buffer = (self.buffer + data) if self.buffer else data
if self._logger:
self._logger.write((self.before or '') + (self.after or ''))
self._logger.flush()
if self.match is None:
exception = ExpectTimeoutError(pattern, timeout, self.buffer)
self.buffer = None
raise exception
return self.match
def read(self, timeout=0, raise_exception=False):
data = ''
timeleft = timeout
try:
while timeleft >= 0 :
start_time = time.time()
data += self.queue.get(timeout=timeleft)
if data:
break
timeleft -= (time.time() - start_time)
except Empty:
if data:
return data
if raise_exception:
raise TimeoutError(timeout)
pass
if not data and raise_exception:
raise TimeoutError(timeout)
return data
def spawn(command):
master, slave = pty.openpty()
process = Popen(command, preexec_fn=os.setsid, stdout=slave, stdin=slave, stderr=slave, bufsize=1)
os.close(slave)
queue = Queue()
reader_kill_event = Event()
thread = Thread(target=reader, args=(process, master, queue, reader_kill_event))
thread.daemon = True
thread.start()
return IO(process, master, queue, reader={'thread':thread, 'kill_event':reader_kill_event})
def reader(process, out, queue, kill_event):
while True:
try:
data = os.read(out, 65536)
queue.put(data)
except:
if kill_event.is_set():
break
raise
| 30.47343 | 102 | 0.566265 |
import os
import pty
import time
import sys
import re
from threading import Thread, Event
from subprocess import Popen
from Queue import Queue, Empty
class TimeoutError(Exception):
def __init__(self, timeout):
self.timeout = timeout
def __str__(self):
return 'Timeout %.3fs' % float(self.timeout)
class ExpectTimeoutError(Exception):
def __init__(self, pattern, timeout, buffer):
self.pattern = pattern
self.timeout = timeout
self.buffer = buffer
def __str__(self):
s = 'Timeout %.3fs ' % float(self.timeout)
if self.pattern:
s += 'for %s ' % repr(self.pattern.pattern)
if self.buffer:
s += 'buffer %s ' % repr(self.buffer[:])
s += 'or \'%s\'' % ','.join(['%x' % ord(c) for c in self.buffer[:]])
return s
class IO(object):
class EOF(object):
pass
class Timeout(object):
pass
EOF = EOF
TIMEOUT = Timeout
class Logger(object):
def __init__(self, logger, prefix=''):
self._logger = logger
self._prefix = prefix
def write(self, data):
self._logger.write(('\n' + data).replace('\n','\n' + self._prefix))
def flush(self):
self._logger.flush()
def __init__(self, process, master, queue, reader):
self.process = process
self.master = master
self.queue = queue
self.buffer = None
self.before = None
self.after = None
self.match = None
self.pattern = None
self.reader = reader
self._timeout = None
self._logger = None
self._eol = ''
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.close()
def logger(self, logger=None, prefix=''):
if logger:
self._logger = self.Logger(logger, prefix=prefix)
return self._logger
def timeout(self, timeout=None):
if timeout:
self._timeout = timeout
return self._timeout
def eol(self, eol=None):
if eol:
self._eol = eol
return self._eol
def close(self, force=True):
self.reader['kill_event'].set()
os.system('pkill -TERM -P %d' % self.process.pid)
if force:
self.process.kill()
else:
self.process.terminate()
os.close(self.master)
if self._logger:
self._logger.write('\n')
self._logger.flush()
def send(self, data, eol=None):
if eol is None:
eol = self._eol
return self.write(data + eol)
def write(self, data):
return os.write(self.master, data)
def expect(self, pattern, timeout=None, escape=False):
self.match = None
self.before = None
self.after = None
if escape:
pattern = re.escape(pattern)
pattern = re.compile(pattern)
if timeout is None:
timeout = self._timeout
timeleft = timeout
while True:
start_time = time.time()
if self.buffer is not None:
self.match = pattern.search(self.buffer, 0)
if self.match is not None:
self.after = self.buffer[self.match.start():self.match.end()]
self.before = self.buffer[:self.match.start()]
self.buffer = self.buffer[self.match.end():]
break
if timeleft < 0:
break
try:
data = self.read(timeout=timeleft, raise_exception=True)
except TimeoutError:
if self._logger:
self._logger.write((self.buffer or '') + '\n')
self._logger.flush()
exception = ExpectTimeoutError(pattern, timeout, self.buffer)
self.buffer = None
raise exception
timeleft -= (time.time() - start_time)
if data:
self.buffer = (self.buffer + data) if self.buffer else data
if self._logger:
self._logger.write((self.before or '') + (self.after or ''))
self._logger.flush()
if self.match is None:
exception = ExpectTimeoutError(pattern, timeout, self.buffer)
self.buffer = None
raise exception
return self.match
def read(self, timeout=0, raise_exception=False):
data = ''
timeleft = timeout
try:
while timeleft >= 0 :
start_time = time.time()
data += self.queue.get(timeout=timeleft)
if data:
break
timeleft -= (time.time() - start_time)
except Empty:
if data:
return data
if raise_exception:
raise TimeoutError(timeout)
pass
if not data and raise_exception:
raise TimeoutError(timeout)
return data
def spawn(command):
master, slave = pty.openpty()
process = Popen(command, preexec_fn=os.setsid, stdout=slave, stdin=slave, stderr=slave, bufsize=1)
os.close(slave)
queue = Queue()
reader_kill_event = Event()
thread = Thread(target=reader, args=(process, master, queue, reader_kill_event))
thread.daemon = True
thread.start()
return IO(process, master, queue, reader={'thread':thread, 'kill_event':reader_kill_event})
def reader(process, out, queue, kill_event):
while True:
try:
data = os.read(out, 65536)
queue.put(data)
except:
if kill_event.is_set():
break
raise
| true | true |
f71b32db7390644ae31d4b98d84b70883c847091 | 1,777 | py | Python | generate/uk.py | mczub/because-moe | d57164399832e32f505a081d7196e0f3828a6e35 | [
"Unlicense",
"MIT"
] | 78 | 2015-09-09T00:48:19.000Z | 2022-02-25T14:18:46.000Z | generate/uk.py | mczub/because-moe | d57164399832e32f505a081d7196e0f3828a6e35 | [
"Unlicense",
"MIT"
] | 15 | 2015-09-09T03:56:29.000Z | 2020-01-03T07:18:50.000Z | generate/uk.py | mczub/because-moe | d57164399832e32f505a081d7196e0f3828a6e35 | [
"Unlicense",
"MIT"
] | 22 | 2015-09-09T03:05:37.000Z | 2021-07-24T07:35:59.000Z | import sys
sys.path.append("site-packages")
import json
import string
from unidecode import unidecode
from urllib import parse
from azure.storage.blob import BlockBlobService
from datetime import datetime
import animesources
indexedShows = {}
shows = []
with open('title-map.json') as titlemap_file:
titlemap = json.load(titlemap_file)
with open('multi-season.json') as multiseason_file:
multiseason = json.load(multiseason_file)
with open('azure.json') as azure_file:
azure_storage = json.load(azure_file)
azure_blob = BlockBlobService(account_name=azure_storage['account'], account_key=azure_storage['key'])
with open('proxies.json') as proxies_file:
proxy_data = json.load(proxies_file)
proxy = proxy_data['uk']
sources = [
animesources.Crunchyroll(titlemap, multiseason, 'uk', proxy),
animesources.Funimation(titlemap, multiseason, 'gb', proxy),
animesources.Netflix(titlemap, multiseason, 'uk', proxy),
animesources.HiDive(titlemap, multiseason, 'uk', proxy),
animesources.AmazonPrime(titlemap, multiseason, 'uk', proxy)
]
for source in sources:
source.UpdateShowList(indexedShows)
print(source.GetName() + ': ' + str(len(indexedShows)))
shows = indexedShows.values()
with open('alternates.json') as alternates_file:
alternates = json.load(alternates_file)
for alternate in alternates:
match_index = next((i for i, x in enumerate(shows) if animesources.compare(x['name'], alternate)), False)
if (match_index):
shows[match_index]['alt'] = alternates[alternate]
shows = sorted(shows, key = lambda show: show['name'].lower())
blob = {"lastUpdated": datetime.utcnow().isoformat(), "shows": shows}
out_file = open('uk.json', 'w')
json.dump(blob, out_file)
out_file.close()
azure_blob.create_blob_from_path(
'assets',
'uk.json',
'uk.json'
)
print('done') | 34.843137 | 106 | 0.758582 | import sys
sys.path.append("site-packages")
import json
import string
from unidecode import unidecode
from urllib import parse
from azure.storage.blob import BlockBlobService
from datetime import datetime
import animesources
indexedShows = {}
shows = []
with open('title-map.json') as titlemap_file:
titlemap = json.load(titlemap_file)
with open('multi-season.json') as multiseason_file:
multiseason = json.load(multiseason_file)
with open('azure.json') as azure_file:
azure_storage = json.load(azure_file)
azure_blob = BlockBlobService(account_name=azure_storage['account'], account_key=azure_storage['key'])
with open('proxies.json') as proxies_file:
proxy_data = json.load(proxies_file)
proxy = proxy_data['uk']
sources = [
animesources.Crunchyroll(titlemap, multiseason, 'uk', proxy),
animesources.Funimation(titlemap, multiseason, 'gb', proxy),
animesources.Netflix(titlemap, multiseason, 'uk', proxy),
animesources.HiDive(titlemap, multiseason, 'uk', proxy),
animesources.AmazonPrime(titlemap, multiseason, 'uk', proxy)
]
for source in sources:
source.UpdateShowList(indexedShows)
print(source.GetName() + ': ' + str(len(indexedShows)))
shows = indexedShows.values()
with open('alternates.json') as alternates_file:
alternates = json.load(alternates_file)
for alternate in alternates:
match_index = next((i for i, x in enumerate(shows) if animesources.compare(x['name'], alternate)), False)
if (match_index):
shows[match_index]['alt'] = alternates[alternate]
shows = sorted(shows, key = lambda show: show['name'].lower())
blob = {"lastUpdated": datetime.utcnow().isoformat(), "shows": shows}
out_file = open('uk.json', 'w')
json.dump(blob, out_file)
out_file.close()
azure_blob.create_blob_from_path(
'assets',
'uk.json',
'uk.json'
)
print('done') | true | true |
f71b33566c8e0a884e4d1704ac06c8583ef46398 | 7,939 | py | Python | tests/test_storage.py | gregdan3/limits | f2c693b9009afe27c9ecbb94492455ad470127f1 | [
"MIT"
] | null | null | null | tests/test_storage.py | gregdan3/limits | f2c693b9009afe27c9ecbb94492455ad470127f1 | [
"MIT"
] | null | null | null | tests/test_storage.py | gregdan3/limits | f2c693b9009afe27c9ecbb94492455ad470127f1 | [
"MIT"
] | null | null | null | import time
import pytest
from limits.errors import ConfigurationError
from limits.storage import (
MemcachedStorage,
MemoryStorage,
MongoDBStorage,
RedisClusterStorage,
RedisSentinelStorage,
RedisStorage,
Storage,
storage_from_string,
)
from limits.strategies import MovingWindowRateLimiter
class TestBaseStorage:
@pytest.mark.parametrize(
"uri, args, expected_instance, fixture",
[
("memory://", {}, MemoryStorage, None),
pytest.param(
"redis://localhost:7379",
{},
RedisStorage,
pytest.lazy_fixture("redis_basic"),
marks=pytest.mark.redis,
),
pytest.param(
"redis+unix:///tmp/limits.redis.sock",
{},
RedisStorage,
pytest.lazy_fixture("redis_uds"),
marks=pytest.mark.redis,
),
pytest.param(
"redis+unix://:password/tmp/limits.redis.sock",
{},
RedisStorage,
pytest.lazy_fixture("redis_uds"),
marks=pytest.mark.redis,
),
pytest.param(
"memcached://localhost:22122",
{},
MemcachedStorage,
pytest.lazy_fixture("memcached"),
marks=pytest.mark.memcached,
),
pytest.param(
"memcached://localhost:22122,localhost:22123",
{},
MemcachedStorage,
pytest.lazy_fixture("memcached_cluster"),
marks=pytest.mark.memcached,
),
pytest.param(
"memcached:///tmp/limits.memcached.sock",
{},
MemcachedStorage,
pytest.lazy_fixture("memcached_uds"),
marks=pytest.mark.memcached,
),
pytest.param(
"redis+sentinel://localhost:26379",
{"service_name": "localhost-redis-sentinel"},
RedisSentinelStorage,
pytest.lazy_fixture("redis_sentinel"),
marks=pytest.mark.redis_sentinel,
),
pytest.param(
"redis+sentinel://localhost:26379/localhost-redis-sentinel",
{},
RedisSentinelStorage,
pytest.lazy_fixture("redis_sentinel"),
marks=pytest.mark.redis_sentinel,
),
pytest.param(
"redis+sentinel://:sekret@localhost:26379/localhost-redis-sentinel",
{},
RedisSentinelStorage,
pytest.lazy_fixture("redis_sentinel_auth"),
marks=pytest.mark.redis_sentinel,
),
pytest.param(
"redis+cluster://localhost:7001/",
{},
RedisClusterStorage,
pytest.lazy_fixture("redis_cluster"),
marks=pytest.mark.redis_cluster,
),
pytest.param(
"mongodb://localhost:37017/",
{},
MongoDBStorage,
pytest.lazy_fixture("mongodb"),
marks=pytest.mark.mongodb,
),
],
)
def test_storage_string(self, uri, args, expected_instance, fixture):
assert isinstance(storage_from_string(uri, **args), expected_instance)
@pytest.mark.parametrize(
"uri, args", [("blah://", {}), ("redis+sentinel://localhost:26379", {})]
)
def test_invalid_storage_string(self, uri, args):
with pytest.raises(ConfigurationError):
storage_from_string(uri, **args)
@pytest.mark.parametrize(
"uri, args, fixture",
[
("memory://", {}, None),
pytest.param(
"redis://localhost:7379",
{},
pytest.lazy_fixture("redis_basic"),
marks=pytest.mark.redis,
),
pytest.param(
"redis+unix:///tmp/limits.redis.sock",
{},
pytest.lazy_fixture("redis_uds"),
marks=pytest.mark.redis,
),
pytest.param(
"redis+unix://:password/tmp/limits.redis.sock",
{},
pytest.lazy_fixture("redis_uds"),
marks=pytest.mark.redis,
),
pytest.param(
"memcached://localhost:22122",
{},
pytest.lazy_fixture("memcached"),
marks=pytest.mark.memcached,
),
pytest.param(
"memcached://localhost:22122,localhost:22123",
{},
pytest.lazy_fixture("memcached_cluster"),
marks=pytest.mark.memcached,
),
pytest.param(
"memcached:///tmp/limits.memcached.sock",
{},
pytest.lazy_fixture("memcached_uds"),
marks=pytest.mark.memcached,
),
pytest.param(
"redis+sentinel://localhost:26379",
{"service_name": "localhost-redis-sentinel"},
pytest.lazy_fixture("redis_sentinel"),
marks=pytest.mark.redis_sentinel,
),
pytest.param(
"redis+sentinel://localhost:26379/localhost-redis-sentinel",
{},
pytest.lazy_fixture("redis_sentinel"),
marks=pytest.mark.redis_sentinel,
),
pytest.param(
"redis+sentinel://:sekret@localhost:36379/localhost-redis-sentinel",
{},
pytest.lazy_fixture("redis_sentinel_auth"),
marks=pytest.mark.redis_sentinel,
),
pytest.param(
"redis+cluster://localhost:7001/",
{},
pytest.lazy_fixture("redis_cluster"),
marks=pytest.mark.redis_cluster,
),
pytest.param(
"mongodb://localhost:37017/",
{},
pytest.lazy_fixture("mongodb"),
marks=pytest.mark.mongodb,
),
],
)
def test_storage_check(self, uri, args, fixture):
assert storage_from_string(uri, **args).check()
def test_pluggable_storage_no_moving_window(self):
class MyStorage(Storage):
STORAGE_SCHEME = ["mystorage"]
def incr(self, key, expiry, elastic_expiry=False):
return
def get(self, key):
return 0
def get_expiry(self, key):
return time.time()
def reset(self):
return
def check(self):
return
def clear(self):
return
storage = storage_from_string("mystorage://")
assert isinstance(storage, MyStorage)
with pytest.raises(NotImplementedError):
MovingWindowRateLimiter(storage)
def test_pluggable_storage_moving_window(self):
class MyStorage(Storage):
STORAGE_SCHEME = ["mystorage"]
def incr(self, key, expiry, elastic_expiry=False):
return
def get(self, key):
return 0
def get_expiry(self, key):
return time.time()
def reset(self):
return
def check(self):
return
def clear(self):
return
def acquire_entry(self, *a, **k):
return True
def get_moving_window(self, *a, **k):
return (time.time(), 1)
storage = storage_from_string("mystorage://")
assert isinstance(storage, MyStorage)
MovingWindowRateLimiter(storage)
| 32.272358 | 84 | 0.493513 | import time
import pytest
from limits.errors import ConfigurationError
from limits.storage import (
MemcachedStorage,
MemoryStorage,
MongoDBStorage,
RedisClusterStorage,
RedisSentinelStorage,
RedisStorage,
Storage,
storage_from_string,
)
from limits.strategies import MovingWindowRateLimiter
class TestBaseStorage:
@pytest.mark.parametrize(
"uri, args, expected_instance, fixture",
[
("memory://", {}, MemoryStorage, None),
pytest.param(
"redis://localhost:7379",
{},
RedisStorage,
pytest.lazy_fixture("redis_basic"),
marks=pytest.mark.redis,
),
pytest.param(
"redis+unix:///tmp/limits.redis.sock",
{},
RedisStorage,
pytest.lazy_fixture("redis_uds"),
marks=pytest.mark.redis,
),
pytest.param(
"redis+unix://:password/tmp/limits.redis.sock",
{},
RedisStorage,
pytest.lazy_fixture("redis_uds"),
marks=pytest.mark.redis,
),
pytest.param(
"memcached://localhost:22122",
{},
MemcachedStorage,
pytest.lazy_fixture("memcached"),
marks=pytest.mark.memcached,
),
pytest.param(
"memcached://localhost:22122,localhost:22123",
{},
MemcachedStorage,
pytest.lazy_fixture("memcached_cluster"),
marks=pytest.mark.memcached,
),
pytest.param(
"memcached:///tmp/limits.memcached.sock",
{},
MemcachedStorage,
pytest.lazy_fixture("memcached_uds"),
marks=pytest.mark.memcached,
),
pytest.param(
"redis+sentinel://localhost:26379",
{"service_name": "localhost-redis-sentinel"},
RedisSentinelStorage,
pytest.lazy_fixture("redis_sentinel"),
marks=pytest.mark.redis_sentinel,
),
pytest.param(
"redis+sentinel://localhost:26379/localhost-redis-sentinel",
{},
RedisSentinelStorage,
pytest.lazy_fixture("redis_sentinel"),
marks=pytest.mark.redis_sentinel,
),
pytest.param(
"redis+sentinel://:sekret@localhost:26379/localhost-redis-sentinel",
{},
RedisSentinelStorage,
pytest.lazy_fixture("redis_sentinel_auth"),
marks=pytest.mark.redis_sentinel,
),
pytest.param(
"redis+cluster://localhost:7001/",
{},
RedisClusterStorage,
pytest.lazy_fixture("redis_cluster"),
marks=pytest.mark.redis_cluster,
),
pytest.param(
"mongodb://localhost:37017/",
{},
MongoDBStorage,
pytest.lazy_fixture("mongodb"),
marks=pytest.mark.mongodb,
),
],
)
def test_storage_string(self, uri, args, expected_instance, fixture):
assert isinstance(storage_from_string(uri, **args), expected_instance)
@pytest.mark.parametrize(
"uri, args", [("blah://", {}), ("redis+sentinel://localhost:26379", {})]
)
def test_invalid_storage_string(self, uri, args):
with pytest.raises(ConfigurationError):
storage_from_string(uri, **args)
@pytest.mark.parametrize(
"uri, args, fixture",
[
("memory://", {}, None),
pytest.param(
"redis://localhost:7379",
{},
pytest.lazy_fixture("redis_basic"),
marks=pytest.mark.redis,
),
pytest.param(
"redis+unix:///tmp/limits.redis.sock",
{},
pytest.lazy_fixture("redis_uds"),
marks=pytest.mark.redis,
),
pytest.param(
"redis+unix://:password/tmp/limits.redis.sock",
{},
pytest.lazy_fixture("redis_uds"),
marks=pytest.mark.redis,
),
pytest.param(
"memcached://localhost:22122",
{},
pytest.lazy_fixture("memcached"),
marks=pytest.mark.memcached,
),
pytest.param(
"memcached://localhost:22122,localhost:22123",
{},
pytest.lazy_fixture("memcached_cluster"),
marks=pytest.mark.memcached,
),
pytest.param(
"memcached:///tmp/limits.memcached.sock",
{},
pytest.lazy_fixture("memcached_uds"),
marks=pytest.mark.memcached,
),
pytest.param(
"redis+sentinel://localhost:26379",
{"service_name": "localhost-redis-sentinel"},
pytest.lazy_fixture("redis_sentinel"),
marks=pytest.mark.redis_sentinel,
),
pytest.param(
"redis+sentinel://localhost:26379/localhost-redis-sentinel",
{},
pytest.lazy_fixture("redis_sentinel"),
marks=pytest.mark.redis_sentinel,
),
pytest.param(
"redis+sentinel://:sekret@localhost:36379/localhost-redis-sentinel",
{},
pytest.lazy_fixture("redis_sentinel_auth"),
marks=pytest.mark.redis_sentinel,
),
pytest.param(
"redis+cluster://localhost:7001/",
{},
pytest.lazy_fixture("redis_cluster"),
marks=pytest.mark.redis_cluster,
),
pytest.param(
"mongodb://localhost:37017/",
{},
pytest.lazy_fixture("mongodb"),
marks=pytest.mark.mongodb,
),
],
)
def test_storage_check(self, uri, args, fixture):
assert storage_from_string(uri, **args).check()
def test_pluggable_storage_no_moving_window(self):
class MyStorage(Storage):
STORAGE_SCHEME = ["mystorage"]
def incr(self, key, expiry, elastic_expiry=False):
return
def get(self, key):
return 0
def get_expiry(self, key):
return time.time()
def reset(self):
return
def check(self):
return
def clear(self):
return
storage = storage_from_string("mystorage://")
assert isinstance(storage, MyStorage)
with pytest.raises(NotImplementedError):
MovingWindowRateLimiter(storage)
def test_pluggable_storage_moving_window(self):
class MyStorage(Storage):
STORAGE_SCHEME = ["mystorage"]
def incr(self, key, expiry, elastic_expiry=False):
return
def get(self, key):
return 0
def get_expiry(self, key):
return time.time()
def reset(self):
return
def check(self):
return
def clear(self):
return
def acquire_entry(self, *a, **k):
return True
def get_moving_window(self, *a, **k):
return (time.time(), 1)
storage = storage_from_string("mystorage://")
assert isinstance(storage, MyStorage)
MovingWindowRateLimiter(storage)
| true | true |
f71b336c74bf785c71596fc3f4e1c0603495a240 | 37,268 | py | Python | desktop/libs/indexer/src/indexer/indexers/sql_tests.py | taklwu/hue | db661408f8fd206557b3d98670cf5edc4d52f869 | [
"Apache-2.0"
] | 1 | 2020-06-22T10:20:52.000Z | 2020-06-22T10:20:52.000Z | desktop/libs/indexer/src/indexer/indexers/sql_tests.py | taklwu/hue | db661408f8fd206557b3d98670cf5edc4d52f869 | [
"Apache-2.0"
] | null | null | null | desktop/libs/indexer/src/indexer/indexers/sql_tests.py | taklwu/hue | db661408f8fd206557b3d98670cf5edc4d52f869 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from builtins import object
import json
from mock import patch, Mock, MagicMock
from nose.tools import assert_equal, assert_true
from desktop.lib.django_test_util import make_logged_in_client
from useradmin.models import User
from indexer.indexers.sql import SQLIndexer
class MockRequest(object):
def __init__(self, fs=None, user=None):
self.fs = fs if fs is not None else MockFs()
if user is None:
self.c = make_logged_in_client(username='test_importer', is_superuser=False)
self.user = User.objects.get(username='test_importer')
else:
self.user = user
class MockFs(object):
def __init__(self, path=None):
self.path = {'isDir': False, 'split': ('/A', 'a'), 'listdir': ['/A'], 'parent_path': '/A'} if path is None else path
def isdir(self, path):
return self.path['isDir']
def split(self, path):
return self.path['split']
def listdir(self, path):
return self.path['listdir']
def parent_path(self, path):
return self.path['parent_path']
def stats(self, path):
return {"mode": 0o0777}
def test_generate_create_text_table_with_data_partition():
source = {u'sourceType': 'hive', u'sampleCols': [{u'operations': [], u'comment': u'', u'name': u'customers.id', u'level': 0, u'keyType': u'string', u'required': False, u'nested': [], u'isPartition': False, u'length': 100, u'partitionValue': u'', u'multiValued': False, u'unique': False, u'type': u'bigint', u'showProperties': False, u'keep': True}, {u'operations': [], u'comment': u'', u'name': u'customers.name', u'level': 0, u'keyType': u'string', u'required': False, u'nested': [], u'isPartition': False, u'length': 100, u'partitionValue': u'', u'multiValued': False, u'unique': False, u'type': u'string', u'showProperties': False, u'keep': True}, {u'operations': [], u'comment': u'', u'name': u'customers.email_preferences', u'level': 0, u'keyType': u'string', u'required': False, u'nested': [], u'isPartition': False, u'length': 100, u'partitionValue': u'', u'multiValued': False, u'unique': False, u'type': u'string', u'showProperties': False, u'keep': True}, {u'operations': [], u'comment': u'', u'name': u'customers.addresses', u'level': 0, u'keyType': u'string', u'required': False, u'nested': [], u'isPartition': False, u'length': 100, u'partitionValue': u'', u'multiValued': False, u'unique': False, u'type': u'string', u'showProperties': False, u'keep': True}, {u'operations': [], u'comment': u'', u'name': u'customers.orders', u'level': 0, u'keyType': u'string', u'required': False, u'nested': [], u'isPartition': False, u'length': 100, u'partitionValue': u'', u'multiValued': False, u'unique': False, u'type': u'string', u'showProperties': False, u'keep': True}], u'name': u'', u'inputFormat': u'file', u'format': {u'status': 0, u'fieldSeparator': u',', u'hasHeader': True, u'quoteChar': u'"', u'recordSeparator': u'\\n', u'type': u'csv'}, u'defaultName': u'default.customer_stats', u'show': True, u'tableName': u'', u'sample': [], u'apiHelperType': u'hive', u'inputFormatsAll': [{u'name': u'File', u'value': u'file'}, {u'name': u'Manually', u'value': u'manual'}, {u'name': u'SQL Query', u'value': u'query'}, {u'name': u'Table', u'value': u'table'}], u'query': u'', u'databaseName': u'default', u'table': u'', u'inputFormats': [{u'name': u'File', u'value': u'file'}, {u'name': u'Manually', u'value': u'manual'}, {u'name': u'SQL Query', u'value': u'query'}, {u'name': u'Table', u'value': u'table'}], u'path': u'/user/romain/customer_stats.csv', u'draggedQuery': u'', u'inputFormatsManual': [{u'name': u'Manually', u'value': u'manual'}], u'isObjectStore': False}
destination = {u'isTransactional': False, u'isInsertOnly': False, u'sourceType': 'hive', u'KUDU_DEFAULT_PARTITION_COLUMN': {u'int_val': 16, u'name': u'HASH', u'columns': [], u'range_partitions': [{u'include_upper_val': u'<=', u'upper_val': 1, u'name': u'VALUES', u'include_lower_val': u'<=', u'lower_val': 0, u'values': [{u'value': u''}]}]}, u'isTargetChecking': False, u'tableName': u'customer_stats', u'outputFormatsList': [{u'name': u'Table', u'value': u'table'}, {u'name': u'Solr index', u'value': u'index'}, {u'name': u'File', u'value': u'file'}, {u'name': u'Database', u'value': u'database'}], u'customRegexp': u'', u'isTargetExisting': False, u'partitionColumns': [{u'operations': [], u'comment': u'', u'name': u'new_field_1', u'level': 0, u'keyType': u'string', u'required': False, u'nested': [], u'isPartition': True, u'length': 100, u'partitionValue': u'AAA', u'multiValued': False, u'unique': False, u'type': u'string', u'showProperties': False, u'keep': True}], u'useCustomDelimiters': False, u'apiHelperType': u'hive', u'kuduPartitionColumns': [], u'outputFormats': [{u'name': u'Table', u'value': u'table'}, {u'name': u'Solr index', u'value': u'index'}], u'customMapDelimiter': u'\\003', u'showProperties': False, u'useDefaultLocation': True, u'description': u'', u'primaryKeyObjects': [], u'customFieldDelimiter': u',', u'existingTargetUrl': u'', u'importData': True, u'databaseName': u'default', u'KUDU_DEFAULT_RANGE_PARTITION_COLUMN': {u'include_upper_val': u'<=', u'upper_val': 1, u'name': u'VALUES', u'include_lower_val': u'<=', u'lower_val': 0, u'values': [{u'value': u''}]}, u'primaryKeys': [], u'outputFormat': u'table', u'nonDefaultLocation': u'/user/romain/customer_stats.csv', u'name': u'default.customer_stats', u'tableFormat': u'text', 'ouputFormat': u'table', u'bulkColumnNames': u'customers.id,customers.name,customers.email_preferences,customers.addresses,customers.orders', u'columns': [{u'operations': [], u'comment': u'', u'name': u'customers.id', u'level': 0, u'keyType': u'string', u'required': False, u'nested': [], u'isPartition': False, u'length': 100, u'partitionValue': u'', u'multiValued': False, u'unique': False, u'type': u'bigint', u'showProperties': False, u'keep': True}, {u'operations': [], u'comment': u'', u'name': u'customers.name', u'level': 0, u'keyType': u'string', u'required': False, u'nested': [], u'isPartition': False, u'length': 100, u'partitionValue': u'', u'multiValued': False, u'unique': False, u'type': u'string', u'showProperties': False, u'keep': True}, {u'operations': [], u'comment': u'', u'name': u'customers.email_preferences', u'level': 0, u'keyType': u'string', u'required': False, u'nested': [], u'isPartition': False, u'length': 100, u'partitionValue': u'', u'multiValued': False, u'unique': False, u'type': u'string', u'showProperties': False, u'keep': True}, {u'operations': [], u'comment': u'', u'name': u'customers.addresses', u'level': 0, u'keyType': u'string', u'required': False, u'nested': [], u'isPartition': False, u'length': 100, u'partitionValue': u'', u'multiValued': False, u'unique': False, u'type': u'string', u'showProperties': False, u'keep': True}, {u'operations': [], u'comment': u'', u'name': u'customers.orders', u'level': 0, u'keyType': u'string', u'required': False, u'nested': [], u'isPartition': False, u'length': 100, u'partitionValue': u'', u'multiValued': False, u'unique': False, u'type': u'string', u'showProperties': False, u'keep': True}], u'hasHeader': True, u'tableFormats': [{u'name': u'Text', u'value': u'text'}, {u'name': u'Parquet', u'value': u'parquet'}, {u'name': u'Kudu', u'value': u'kudu'}, {u'name': u'Csv', u'value': u'csv'}, {u'name': u'Avro', u'value': u'avro'}, {u'name': u'Json', u'value': u'json'}, {u'name': u'Regexp', u'value': u'regexp'}, {u'name': u'ORC', u'value': u'orc'}], u'customCollectionDelimiter': u'\\002'}
request = MockRequest(fs=MockFs())
sql = SQLIndexer(user=request.user, fs=request.fs).create_table_from_a_file(source, destination).get_str()
assert_true('''USE default;''' in sql, sql)
assert_true('''CREATE TABLE `default`.`customer_stats`
(
`customers.id` bigint ,
`customers.name` string ,
`customers.email_preferences` string ,
`customers.addresses` string ,
`customers.orders` string ) PARTITIONED BY (
`new_field_1` string )
ROW FORMAT DELIMITED
FIELDS TERMINATED BY ','
COLLECTION ITEMS TERMINATED BY '\\002'
MAP KEYS TERMINATED BY '\\003'
STORED AS TextFile TBLPROPERTIES("transactional" = "false", "skip.header.line.count" = "1")
;''' in sql, sql)
assert_true('''LOAD DATA INPATH '/user/romain/customer_stats.csv' INTO TABLE `default`.`customer_stats` PARTITION (new_field_1='AAA');''' in sql, sql)
def test_generate_create_kudu_table_with_data():
source = {u'sourceType': 'impala', u'apiHelperType': 'hive', u'sampleCols': [], u'name': u'', u'inputFormat': u'file', u'format': {u'quoteChar': u'"', u'recordSeparator': u'\\n', u'type': u'csv', u'hasHeader': True, u'fieldSeparator': u','}, u'show': True, u'tableName': u'', u'sample': [], u'defaultName': u'index_data', u'query': u'', u'databaseName': u'default', u'table': u'', u'inputFormats': [{u'name': u'File', u'value': u'file'}, {u'name': u'Manually', u'value': u'manual'}], u'path': u'/user/admin/index_data.csv', u'draggedQuery': u'', u'isObjectStore': False}
destination = {u'isTransactional': False, u'isInsertOnly': False, u'sourceType': 'impala', u'KUDU_DEFAULT_PARTITION_COLUMN': {u'int_val': 16, u'name': u'HASH', u'columns': [], u'range_partitions': [{u'include_upper_val': u'<=', u'upper_val': 1, u'name': u'VALUES', u'include_lower_val': u'<=', u'lower_val': 0, u'values': [{u'value': u''}]}]}, u'tableName': u'index_data', u'outputFormatsList': [{u'name': u'Table', u'value': u'table'}, {u'name': u'Solr+index', u'value': u'index'}, {u'name': u'File', u'value': u'file'}, {u'name': u'Database', u'value': u'database'}], u'customRegexp': u'', u'isTargetExisting': False, u'partitionColumns': [], u'useCustomDelimiters': True, u'kuduPartitionColumns': [{u'int_val': 16, u'name': u'HASH', u'columns': [u'id'], u'range_partitions': [{u'include_upper_val': u'<=', u'upper_val': 1, u'name': u'VALUES', u'include_lower_val': u'<=', u'lower_val': 0, u'values': [{u'value': u''}]}]}], u'outputFormats': [{u'name': u'Table', u'value': u'table'}, {u'name': u'Solr+index', u'value': u'index'}], u'customMapDelimiter': None, u'showProperties': False, u'useDefaultLocation': True, u'description': u'Big Data', u'primaryKeyObjects': [{u'operations': [], u'comment': u'', u'name': u'id', u'level': 0, u'keyType': u'string', u'required': False, u'nested': [], u'isPartition': False, u'length': 100, u'multiValued': False, u'unique': False, u'type': u'string', u'showProperties': False, u'keep': True}], u'customFieldDelimiter': u',', u'existingTargetUrl': u'', u'importData': True, u'databaseName': u'default', u'KUDU_DEFAULT_RANGE_PARTITION_COLUMN': {u'include_upper_val': u'<=', u'upper_val': 1, u'name': u'VALUES', u'include_lower_val': u'<=', u'lower_val': 0, u'values': [{u'value': u''}]}, u'primaryKeys': [u'id'], u'outputFormat': u'table', u'nonDefaultLocation': u'/user/admin/index_data.csv', u'name': u'index_data', u'tableFormat': u'kudu', u'bulkColumnNames': u'business_id,cool,date,funny,id,stars,text,type,useful,user_id,name,full_address,latitude,longitude,neighborhoods,open,review_count,state', u'columns': [{u'operations': [], u'comment': u'', u'name': u'business_id', u'level': 0, u'keyType': u'string', u'required': False, u'nested': [], u'isPartition': False, u'length': 100, u'multiValued': False, u'unique': False, u'type': u'string', u'showProperties': False, u'keep': True}, {u'operations': [], u'comment': u'', u'name': u'cool', u'level': 0, u'keyType': u'string', u'required': False, u'nested': [], u'isPartition': False, u'length': 100, u'multiValued': False, u'unique': False, u'type': u'bigint', u'showProperties': False, u'keep': False}, {u'operations': [], u'comment': u'', u'name': u'date', u'level': 0, u'keyType': u'string', u'required': False, u'nested': [], u'isPartition': False, u'length': 100, u'multiValued': False, u'unique': False, u'type': u'string', u'showProperties': False, u'keep': True}, {u'operations': [], u'comment': u'', u'name': u'funny', u'level': 0, u'scale':4, u'precision':10, u'keyType': u'string', u'required': False, u'nested': [], u'isPartition': False, u'length': 100, u'multiValued': False, u'unique': False, u'type': u'decimal', u'showProperties': False, u'keep': True}, {u'operations': [], u'comment': u'', u'name': u'id', u'level': 0, u'keyType': u'string', u'required': False, u'nested': [], u'isPartition': False, u'length': 100, u'multiValued': False, u'unique': False, u'type': u'string', u'showProperties': False, u'keep': True}, {u'operations': [], u'comment': u'', u'name': u'stars', u'level': 0, u'keyType': u'string', u'required': False, u'nested': [], u'isPartition': False, u'length': 100, u'multiValued': False, u'unique': False, u'type': u'bigint', u'showProperties': False, u'keep': True}, {u'operations': [], u'comment': u'', u'name': u'text', u'level': 0, u'keyType': u'string', u'required': False, u'nested': [], u'isPartition': False, u'length': 100, u'multiValued': False, u'unique': False, u'type': u'string', u'showProperties': False, u'keep': True}, {u'operations': [], u'comment': u'', u'name': u'type', u'level': 0, u'keyType': u'string', u'required': False, u'nested': [], u'isPartition': False, u'length': 100, u'multiValued': False, u'unique': False, u'type': u'string', u'showProperties': False, u'keep': True}, {u'operations': [], u'comment': u'', u'name': u'useful', u'level': 0, u'keyType': u'string', u'required': False, u'nested': [], u'isPartition': False, u'length': 100, u'multiValued': False, u'unique': False, u'type': u'bigint', u'showProperties': False, u'keep': True}, {u'operations': [], u'comment': u'', u'name': u'user_id', u'level': 0, u'keyType': u'string', u'required': False, u'nested': [], u'isPartition': False, u'length': 100, u'multiValued': False, u'unique': False, u'type': u'string', u'showProperties': False, u'keep': True}, {u'operations': [], u'comment': u'', u'name': u'name', u'level': 0, u'keyType': u'string', u'required': False, u'nested': [], u'isPartition': False, u'length': 100, u'multiValued': False, u'unique': False, u'type': u'string', u'showProperties': False, u'keep': True}, {u'operations': [], u'comment': u'', u'name': u'full_address', u'level': 0, u'keyType': u'string', u'required': False, u'nested': [], u'isPartition': False, u'length': 100, u'multiValued': False, u'unique': False, u'type': u'string', u'showProperties': False, u'keep': True}, {u'operations': [], u'comment': u'', u'name': u'latitude', u'level': 0, u'keyType': u'string', u'required': False, u'nested': [], u'isPartition': False, u'length': 100, u'multiValued': False, u'unique': False, u'type': u'double', u'showProperties': False, u'keep': True}, {u'operations': [], u'comment': u'', u'name': u'longitude', u'level': 0, u'keyType': u'string', u'required': False, u'nested': [], u'isPartition': False, u'length': 100, u'multiValued': False, u'unique': False, u'type': u'double', u'showProperties': False, u'keep': True}, {u'operations': [], u'comment': u'', u'name': u'neighborhoods', u'level': 0, u'keyType': u'string', u'required': False, u'nested': [], u'isPartition': False, u'length': 100, u'multiValued': False, u'unique': False, u'type': u'string', u'showProperties': False, u'keep': True}, {u'operations': [], u'comment': u'', u'name': u'open', u'level': 0, u'keyType': u'string', u'required': False, u'nested': [], u'isPartition': False, u'length': 100, u'multiValued': False, u'unique': False, u'type': u'string', u'showProperties': False, u'keep': True}, {u'operations': [], u'comment': u'', u'name': u'review_count', u'level': 0, u'keyType': u'string', u'required': False, u'nested': [], u'isPartition': False, u'length': 100, u'multiValued': False, u'unique': False, u'type': u'bigint', u'showProperties': False, u'keep': True}, {u'operations': [], u'comment': u'', u'name': u'state', u'level': 0, u'keyType': u'string', u'required': False, u'nested': [], u'isPartition': False, u'length': 100, u'multiValued': False, u'unique': False, u'type': u'string', u'showProperties': False, u'keep': True}], u'hasHeader': True, u'tableFormats': [{u'name': u'Text', u'value': u'text'}, {u'name': u'Parquet', u'value': u'parquet'}, {u'name': u'Json', u'value': u'json'}, {u'name': u'Kudu', u'value': u'kudu'}, {u'name': u'Avro', u'value': u'avro'}, {u'name': u'Regexp', u'value': u'regexp'}, {u'name': u'RCFile', u'value': u'rcfile'}, {u'name': u'ORC', u'value': u'orc'}, {u'name': u'SequenceFile', u'value': u'sequencefile'}], u'customCollectionDelimiter': None}
request = MockRequest(fs=MockFs())
sql = SQLIndexer(user=request.user, fs=request.fs).create_table_from_a_file(source, destination).get_str()
assert_true('''DROP TABLE IF EXISTS `default`.`hue__tmp_index_data`;''' in sql, sql)
assert_true('''CREATE EXTERNAL TABLE `default`.`hue__tmp_index_data`
(
`business_id` string ,
`cool` bigint ,
`date` string ,
`funny` decimal(10, 4) ,
`id` string ,
`stars` bigint ,
`text` string ,
`type` string ,
`useful` bigint ,
`user_id` string ,
`name` string ,
`full_address` string ,
`latitude` double ,
`longitude` double ,
`neighborhoods` string ,
`open` string ,
`review_count` bigint ,
`state` string ) COMMENT "Big Data"
ROW FORMAT DELIMITED
FIELDS TERMINATED BY ','
STORED AS TextFile LOCATION '/A'
TBLPROPERTIES("transactional" = "false", "skip.header.line.count" = "1")''' in sql, sql)
assert_true('''CREATE TABLE `default`.`index_data` COMMENT "Big Data"
PRIMARY KEY (id)
PARTITION BY HASH PARTITIONS 16
STORED AS kudu
TBLPROPERTIES(
'kudu.num_tablet_replicas' = '1'
)
AS SELECT `id`, `business_id`, `date`, `funny`, `stars`, `text`, `type`, `useful`, `user_id`, `name`, `full_address`, `latitude`, `longitude`, `neighborhoods`, `open`, `review_count`, `state`
FROM `default`.`hue__tmp_index_data`;''' in sql, sql)
def test_generate_create_parquet_table():
source = json.loads('''{"sourceType": "hive", "name":"","sample":[["Bank Of America","3000000.0","US","Miami","37.6801986694","-121.92150116"],["Citi Bank","2800000.0","US","Richmond","37.5242004395","-77.4932022095"],["Deutsche Bank","2600000.0","US","Corpus Christi","40.7807998657","-73.9772033691"],["Thomson Reuters","2400000.0","US","Albany","35.7976989746","-78.6252975464"],["OpenX","2200000.0","US","Des Moines","40.5411987305","-119.586898804"]],"sampleCols":[{"operations":[],"comment":"","nested":[],"name":"acct_client","level":0,"keyType":"string","required":false,"precision":10,"keep":true,"isPartition":false,"length":100,"partitionValue":"","multiValued":false,"unique":false,"type":"string","showProperties":false,"scale":0},{"operations":[],"comment":"","nested":[],"name":"tran_amount","level":0,"keyType":"string","required":false,"precision":10,"keep":true,"isPartition":false,"length":100,"partitionValue":"","multiValued":false,"unique":false,"type":"double","showProperties":false,"scale":0},{"operations":[],"comment":"","nested":[],"name":"tran_country_cd","level":0,"keyType":"string","required":false,"precision":10,"keep":true,"isPartition":false,"length":100,"partitionValue":"","multiValued":false,"unique":false,"type":"string","showProperties":false,"scale":0},{"operations":[],"comment":"","nested":[],"name":"vrfcn_city","level":0,"keyType":"string","required":false,"precision":10,"keep":true,"isPartition":false,"length":100,"partitionValue":"","multiValued":false,"unique":false,"type":"string","showProperties":false,"scale":0},{"operations":[],"comment":"","nested":[],"name":"vrfcn_city_lat","level":0,"keyType":"string","required":false,"precision":10,"keep":true,"isPartition":false,"length":100,"partitionValue":"","multiValued":false,"unique":false,"type":"double","showProperties":false,"scale":0},{"operations":[],"comment":"","nested":[],"name":"vrfcn_city_lon","level":0,"keyType":"string","required":false,"precision":10,"keep":true,"isPartition":false,"length":100,"partitionValue":"","multiValued":false,"unique":false,"type":"double","showProperties":false,"scale":0}],"inputFormat":"file","inputFormatsAll":[{"value":"file","name":"File"},{"value":"manual","name":"Manually"},{"value":"query","name":"SQL Query"},{"value":"table","name":"Table"}],"inputFormatsManual":[{"value":"manual","name":"Manually"}],"inputFormats":[{"value":"file","name":"File"},{"value":"manual","name":"Manually"},{"value":"query","name":"SQL Query"},{"value":"table","name":"Table"}],"path":"/user/hue/data/query-hive-360.csv","isObjectStore":false,"table":"","tableName":"","databaseName":"default","apiHelperType":"hive","query":"","draggedQuery":"","format":{"type":"csv","fieldSeparator":",","recordSeparator":"\\n","quoteChar":"\\"","hasHeader":true,"status":0},"show":true,"defaultName":"default.query-hive-360"}''')
destination = json.loads('''{"isTransactional": false, "isInsertOnly": false, "sourceType": "hive", "name":"default.parquet_table","apiHelperType":"hive","description":"","outputFormat":"table","outputFormatsList":[{"name":"Table","value":"table"},{"name":"Solr index","value":"index"},{"name":"File","value":"file"},{"name":"Database","value":"database"}],"outputFormats":[{"name":"Table","value":"table"},{"name":"Solr index","value":"index"}],"columns":[{"operations":[],"comment":"","nested":[],"name":"acct_client","level":0,"keyType":"string","required":false,"precision":10,"keep":true,"isPartition":false,"length":100,"partitionValue":"","multiValued":false,"unique":false,"type":"string","showProperties":false,"scale":0},{"operations":[],"comment":"","nested":[],"name":"tran_amount","level":0,"keyType":"string","required":false,"precision":10,"keep":true,"isPartition":false,"length":100,"partitionValue":"","multiValued":false,"unique":false,"type":"double","showProperties":false,"scale":0},{"operations":[],"comment":"","nested":[],"name":"tran_country_cd","level":0,"keyType":"string","required":false,"precision":10,"keep":true,"isPartition":false,"length":100,"partitionValue":"","multiValued":false,"unique":false,"type":"string","showProperties":false,"scale":0},{"operations":[],"comment":"","nested":[],"name":"vrfcn_city","level":0,"keyType":"string","required":false,"precision":10,"keep":true,"isPartition":false,"length":100,"partitionValue":"","multiValued":false,"unique":false,"type":"string","showProperties":false,"scale":0},{"operations":[],"comment":"","nested":[],"name":"vrfcn_city_lat","level":0,"keyType":"string","required":false,"precision":10,"keep":true,"isPartition":false,"length":100,"partitionValue":"","multiValued":false,"unique":false,"type":"double","showProperties":false,"scale":0},{"operations":[],"comment":"","nested":[],"name":"vrfcn_city_lon","level":0,"keyType":"string","required":false,"precision":10,"keep":true,"isPartition":false,"length":100,"partitionValue":"","multiValued":false,"unique":false,"type":"double","showProperties":false,"scale":0}],"bulkColumnNames":"acct_client,tran_amount,tran_country_cd,vrfcn_city,vrfcn_city_lat,vrfcn_city_lon","showProperties":false,"isTargetExisting":false,"isTargetChecking":false,"existingTargetUrl":"","tableName":"parquet_table","databaseName":"default","tableFormat":"parquet","KUDU_DEFAULT_RANGE_PARTITION_COLUMN":{"values":[{"value":""}],"name":"VALUES","lower_val":0,"include_lower_val":"<=","upper_val":1,"include_upper_val":"<="},"KUDU_DEFAULT_PARTITION_COLUMN":{"columns":[],"range_partitions":[{"values":[{"value":""}],"name":"VALUES","lower_val":0,"include_lower_val":"<=","upper_val":1,"include_upper_val":"<="}],"name":"HASH","int_val":16},"tableFormats":[{"value":"text","name":"Text"},{"value":"parquet","name":"Parquet"},{"value":"kudu","name":"Kudu"},{"value":"csv","name":"Csv"},{"value":"avro","name":"Avro"},{"value":"json","name":"Json"},{"value":"regexp","name":"Regexp"},{"value":"orc","name":"ORC"}],"partitionColumns":[],"kuduPartitionColumns":[],"primaryKeys":[],"primaryKeyObjects":[],"importData":true,"useDefaultLocation":true,"nonDefaultLocation":"/user/hue/data/query-hive-360.csv","hasHeader":true,"useCustomDelimiters":false,"customFieldDelimiter":",","customCollectionDelimiter":"\\\\002","customMapDelimiter":"\\\\003","customRegexp":""}''')
path = {'isDir': False, 'split': ('/user/hue/data', 'query-hive-360.csv'), 'listdir': ['/user/hue/data']}
request = MockRequest(fs=MockFs(path=path))
sql = SQLIndexer(user=request.user, fs=request.fs).create_table_from_a_file(source, destination).get_str()
assert_true('''USE default;''' in sql, sql)
assert_true('''CREATE EXTERNAL TABLE `default`.`hue__tmp_parquet_table`
(
`acct_client` string ,
`tran_amount` double ,
`tran_country_cd` string ,
`vrfcn_city` string ,
`vrfcn_city_lat` double ,
`vrfcn_city_lon` double ) ROW FORMAT DELIMITED
FIELDS TERMINATED BY ','
COLLECTION ITEMS TERMINATED BY '\\002'
MAP KEYS TERMINATED BY '\\003'
STORED AS TextFile LOCATION '/user/hue/data'
TBLPROPERTIES("transactional" = "false", "skip.header.line.count" = "1")
;''' in sql, sql)
assert_true('''CREATE TABLE `default`.`parquet_table`
STORED AS parquet
AS SELECT *
FROM `default`.`hue__tmp_parquet_table`;
''' in sql, sql)
assert_true('''DROP TABLE IF EXISTS `default`.`hue__tmp_parquet_table`;
''' in sql, sql)
def test_generate_create_orc_table_transactional():
source = json.loads('''{"sourceType": "hive", "name":"","sample":[["Bank Of America","3000000.0","US","Miami","37.6801986694","-121.92150116"],["Citi Bank","2800000.0","US","Richmond","37.5242004395","-77.4932022095"],["Deutsche Bank","2600000.0","US","Corpus Christi","40.7807998657","-73.9772033691"],["Thomson Reuters","2400000.0","US","Albany","35.7976989746","-78.6252975464"],["OpenX","2200000.0","US","Des Moines","40.5411987305","-119.586898804"]],"sampleCols":[{"operations":[],"comment":"","nested":[],"name":"acct_client","level":0,"keyType":"string","required":false,"precision":10,"keep":true,"isPartition":false,"length":100,"partitionValue":"","multiValued":false,"unique":false,"type":"string","showProperties":false,"scale":0},{"operations":[],"comment":"","nested":[],"name":"tran_amount","level":0,"keyType":"string","required":false,"precision":10,"keep":true,"isPartition":false,"length":100,"partitionValue":"","multiValued":false,"unique":false,"type":"double","showProperties":false,"scale":0},{"operations":[],"comment":"","nested":[],"name":"tran_country_cd","level":0,"keyType":"string","required":false,"precision":10,"keep":true,"isPartition":false,"length":100,"partitionValue":"","multiValued":false,"unique":false,"type":"string","showProperties":false,"scale":0},{"operations":[],"comment":"","nested":[],"name":"vrfcn_city","level":0,"keyType":"string","required":false,"precision":10,"keep":true,"isPartition":false,"length":100,"partitionValue":"","multiValued":false,"unique":false,"type":"string","showProperties":false,"scale":0},{"operations":[],"comment":"","nested":[],"name":"vrfcn_city_lat","level":0,"keyType":"string","required":false,"precision":10,"keep":true,"isPartition":false,"length":100,"partitionValue":"","multiValued":false,"unique":false,"type":"double","showProperties":false,"scale":0},{"operations":[],"comment":"","nested":[],"name":"vrfcn_city_lon","level":0,"keyType":"string","required":false,"precision":10,"keep":true,"isPartition":false,"length":100,"partitionValue":"","multiValued":false,"unique":false,"type":"double","showProperties":false,"scale":0}],"inputFormat":"file","inputFormatsAll":[{"value":"file","name":"File"},{"value":"manual","name":"Manually"},{"value":"query","name":"SQL Query"},{"value":"table","name":"Table"}],"inputFormatsManual":[{"value":"manual","name":"Manually"}],"inputFormats":[{"value":"file","name":"File"},{"value":"manual","name":"Manually"},{"value":"query","name":"SQL Query"},{"value":"table","name":"Table"}],"path":"/user/hue/data/query-hive-360.csv","isObjectStore":false,"table":"","tableName":"","databaseName":"default","apiHelperType":"hive","query":"","draggedQuery":"","format":{"type":"csv","fieldSeparator":",","recordSeparator":"\\n","quoteChar":"\\"","hasHeader":true,"status":0},"show":true,"defaultName":"default.query-hive-360"}''')
destination = json.loads('''{"isTransactional": true, "isInsertOnly": true, "sourceType": "hive", "name":"default.parquet_table","apiHelperType":"hive","description":"","outputFormat":"table","outputFormatsList":[{"name":"Table","value":"table"},{"name":"Solr index","value":"index"},{"name":"File","value":"file"},{"name":"Database","value":"database"}],"outputFormats":[{"name":"Table","value":"table"},{"name":"Solr index","value":"index"}],"columns":[{"operations":[],"comment":"","nested":[],"name":"acct_client","level":0,"keyType":"string","required":false,"precision":10,"keep":true,"isPartition":false,"length":100,"partitionValue":"","multiValued":false,"unique":false,"type":"string","showProperties":false,"scale":0},{"operations":[],"comment":"","nested":[],"name":"tran_amount","level":0,"keyType":"string","required":false,"precision":10,"keep":true,"isPartition":false,"length":100,"partitionValue":"","multiValued":false,"unique":false,"type":"double","showProperties":false,"scale":0},{"operations":[],"comment":"","nested":[],"name":"tran_country_cd","level":0,"keyType":"string","required":false,"precision":10,"keep":true,"isPartition":false,"length":100,"partitionValue":"","multiValued":false,"unique":false,"type":"string","showProperties":false,"scale":0},{"operations":[],"comment":"","nested":[],"name":"vrfcn_city","level":0,"keyType":"string","required":false,"precision":10,"keep":true,"isPartition":false,"length":100,"partitionValue":"","multiValued":false,"unique":false,"type":"string","showProperties":false,"scale":0},{"operations":[],"comment":"","nested":[],"name":"vrfcn_city_lat","level":0,"keyType":"string","required":false,"precision":10,"keep":true,"isPartition":false,"length":100,"partitionValue":"","multiValued":false,"unique":false,"type":"double","showProperties":false,"scale":0},{"operations":[],"comment":"","nested":[],"name":"vrfcn_city_lon","level":0,"keyType":"string","required":false,"precision":10,"keep":true,"isPartition":false,"length":100,"partitionValue":"","multiValued":false,"unique":false,"type":"double","showProperties":false,"scale":0}],"bulkColumnNames":"acct_client,tran_amount,tran_country_cd,vrfcn_city,vrfcn_city_lat,vrfcn_city_lon","showProperties":false,"isTargetExisting":false,"isTargetChecking":false,"existingTargetUrl":"","tableName":"parquet_table","databaseName":"default","tableFormat":"orc","KUDU_DEFAULT_RANGE_PARTITION_COLUMN":{"values":[{"value":""}],"name":"VALUES","lower_val":0,"include_lower_val":"<=","upper_val":1,"include_upper_val":"<="},"KUDU_DEFAULT_PARTITION_COLUMN":{"columns":[],"range_partitions":[{"values":[{"value":""}],"name":"VALUES","lower_val":0,"include_lower_val":"<=","upper_val":1,"include_upper_val":"<="}],"name":"HASH","int_val":16},"tableFormats":[{"value":"text","name":"Text"},{"value":"parquet","name":"Parquet"},{"value":"kudu","name":"Kudu"},{"value":"csv","name":"Csv"},{"value":"avro","name":"Avro"},{"value":"json","name":"Json"},{"value":"regexp","name":"Regexp"},{"value":"orc","name":"ORC"}],"partitionColumns":[],"kuduPartitionColumns":[],"primaryKeys":[],"primaryKeyObjects":[],"importData":true,"useDefaultLocation":true,"nonDefaultLocation":"/user/hue/data/query-hive-360.csv","hasHeader":true,"useCustomDelimiters":false,"customFieldDelimiter":",","customCollectionDelimiter":"\\\\002","customMapDelimiter":"\\\\003","customRegexp":""}''')
path = {'isDir': False, 'split': ('/user/hue/data', 'query-hive-360.csv'), 'listdir': ['/user/hue/data']}
request = MockRequest(fs=MockFs(path=path))
sql = SQLIndexer(user=request.user, fs=request.fs).create_table_from_a_file(source, destination).get_str()
assert_true('''USE default;''' in sql, sql)
assert_true('''CREATE EXTERNAL TABLE `default`.`hue__tmp_parquet_table`
(
`acct_client` string ,
`tran_amount` double ,
`tran_country_cd` string ,
`vrfcn_city` string ,
`vrfcn_city_lat` double ,
`vrfcn_city_lon` double ) ROW FORMAT DELIMITED
FIELDS TERMINATED BY ','
COLLECTION ITEMS TERMINATED BY '\\002'
MAP KEYS TERMINATED BY '\\003'
STORED AS TextFile LOCATION '/user/hue/data'
TBLPROPERTIES("transactional" = "false", "skip.header.line.count" = "1")
;''' in sql, sql)
assert_true('''CREATE TABLE `default`.`parquet_table`
STORED AS orc
TBLPROPERTIES("transactional"="true", "transactional_properties"="insert_only")
AS SELECT *
FROM `default`.`hue__tmp_parquet_table`;
''' in sql, sql)
assert_true('''DROP TABLE IF EXISTS `default`.`hue__tmp_parquet_table`;
''' in sql, sql)
def test_generate_create_empty_kudu_table():
source = json.loads('''{"sourceType": "impala", "apiHelperType": "impala", "path": "", "inputFormat": "manual"}''')
destination = json.loads('''{"isTransactional": false, "isInsertOnly": false, "sourceType": "impala", "name":"default.manual_empty_kudu","apiHelperType":"impala","description":"","outputFormat":"table","columns":[{"operations":[],"comment":"","nested":[],"name":"acct_client","level":0,"keyType":"string","required":false,"precision":10,"keep":true,"isPartition":false,"length":100,"partitionValue":"","multiValued":false,"unique":false,"type":"string","showProperties":false,"scale":0},{"operations":[],"comment":"","nested":[],"name":"tran_amount","level":0,"keyType":"string","required":false,"precision":10,"keep":true,"isPartition":false,"length":100,"partitionValue":"","multiValued":false,"unique":false,"type":"double","showProperties":false,"scale":0},{"operations":[],"comment":"","nested":[],"name":"tran_country_cd","level":0,"keyType":"string","required":false,"precision":10,"keep":true,"isPartition":false,"length":100,"partitionValue":"","multiValued":false,"unique":false,"type":"string","showProperties":false,"scale":0},{"operations":[],"comment":"","nested":[],"name":"vrfcn_city","level":0,"keyType":"string","required":false,"precision":10,"keep":true,"isPartition":false,"length":100,"partitionValue":"","multiValued":false,"unique":false,"type":"string","showProperties":false,"scale":0},{"operations":[],"comment":"","nested":[],"name":"vrfcn_city_lat","level":0,"keyType":"string","required":false,"precision":10,"keep":true,"isPartition":false,"length":100,"partitionValue":"","multiValued":false,"unique":false,"type":"double","showProperties":false,"scale":0},{"operations":[],"comment":"","nested":[],"name":"vrfcn_city_lon","level":0,"keyType":"string","required":false,"precision":10,"keep":true,"isPartition":false,"length":100,"partitionValue":"","multiValued":false,"unique":false,"type":"double","showProperties":false,"scale":0}],"bulkColumnNames":"acct_client,tran_amount,tran_country_cd,vrfcn_city,vrfcn_city_lat,vrfcn_city_lon","showProperties":false,"isTargetExisting":false,"isTargetChecking":false,"existingTargetUrl":"","tableName":"manual_kudu_table","databaseName":"default","tableFormat":"kudu","KUDU_DEFAULT_RANGE_PARTITION_COLUMN":{"values":[{"value":""}],"name":"VALUES","lower_val":0,"include_lower_val":"<=","upper_val":1,"include_upper_val":"<="},"KUDU_DEFAULT_PARTITION_COLUMN":{"columns":[],"range_partitions":[{"values":[{"value":""}],"name":"VALUES","lower_val":0,"include_lower_val":"<=","upper_val":1,"include_upper_val":"<="}],"name":"HASH","int_val":16},"tableFormats":[{"value":"text","name":"Text"},{"value":"parquet","name":"Parquet"},{"value":"kudu","name":"Kudu"},{"value":"csv","name":"Csv"},{"value":"avro","name":"Avro"},{"value":"json","name":"Json"},{"value":"regexp","name":"Regexp"},{"value":"orc","name":"ORC"}],"partitionColumns":[],"kuduPartitionColumns":[],"primaryKeys": ["acct_client"],"primaryKeyObjects":[],"importData":false,"useDefaultLocation":true,"nonDefaultLocation":"/user/hue/data/query-hive-360.csv","hasHeader":false,"useCustomDelimiters":false,"customFieldDelimiter":",","customCollectionDelimiter":"\\\\002","customMapDelimiter":"\\\\003","customRegexp":""}''')
path = {'isDir': False, 'split': ('/user/hue/data', 'query-hive-360.csv'), 'listdir': ['/user/hue/data']}
request = MockRequest(fs=MockFs(path=path))
sql = SQLIndexer(user=request.user, fs=request.fs).create_table_from_a_file(source, destination).get_str()
assert_true('''CREATE TABLE `default`.`manual_empty_kudu`
(
`acct_client` string ,
`tran_amount` double ,
`tran_country_cd` string ,
`vrfcn_city` string ,
`vrfcn_city_lat` double ,
`vrfcn_city_lon` double , PRIMARY KEY (acct_client)
) STORED AS kudu TBLPROPERTIES("transactional" = "false")
;''' in sql, sql)
| 165.635556 | 7,415 | 0.674305 |
from builtins import object
import json
from mock import patch, Mock, MagicMock
from nose.tools import assert_equal, assert_true
from desktop.lib.django_test_util import make_logged_in_client
from useradmin.models import User
from indexer.indexers.sql import SQLIndexer
class MockRequest(object):
def __init__(self, fs=None, user=None):
self.fs = fs if fs is not None else MockFs()
if user is None:
self.c = make_logged_in_client(username='test_importer', is_superuser=False)
self.user = User.objects.get(username='test_importer')
else:
self.user = user
class MockFs(object):
def __init__(self, path=None):
self.path = {'isDir': False, 'split': ('/A', 'a'), 'listdir': ['/A'], 'parent_path': '/A'} if path is None else path
def isdir(self, path):
return self.path['isDir']
def split(self, path):
return self.path['split']
def listdir(self, path):
return self.path['listdir']
def parent_path(self, path):
return self.path['parent_path']
def stats(self, path):
return {"mode": 0o0777}
def test_generate_create_text_table_with_data_partition():
source = {u'sourceType': 'hive', u'sampleCols': [{u'operations': [], u'comment': u'', u'name': u'customers.id', u'level': 0, u'keyType': u'string', u'required': False, u'nested': [], u'isPartition': False, u'length': 100, u'partitionValue': u'', u'multiValued': False, u'unique': False, u'type': u'bigint', u'showProperties': False, u'keep': True}, {u'operations': [], u'comment': u'', u'name': u'customers.name', u'level': 0, u'keyType': u'string', u'required': False, u'nested': [], u'isPartition': False, u'length': 100, u'partitionValue': u'', u'multiValued': False, u'unique': False, u'type': u'string', u'showProperties': False, u'keep': True}, {u'operations': [], u'comment': u'', u'name': u'customers.email_preferences', u'level': 0, u'keyType': u'string', u'required': False, u'nested': [], u'isPartition': False, u'length': 100, u'partitionValue': u'', u'multiValued': False, u'unique': False, u'type': u'string', u'showProperties': False, u'keep': True}, {u'operations': [], u'comment': u'', u'name': u'customers.addresses', u'level': 0, u'keyType': u'string', u'required': False, u'nested': [], u'isPartition': False, u'length': 100, u'partitionValue': u'', u'multiValued': False, u'unique': False, u'type': u'string', u'showProperties': False, u'keep': True}, {u'operations': [], u'comment': u'', u'name': u'customers.orders', u'level': 0, u'keyType': u'string', u'required': False, u'nested': [], u'isPartition': False, u'length': 100, u'partitionValue': u'', u'multiValued': False, u'unique': False, u'type': u'string', u'showProperties': False, u'keep': True}], u'name': u'', u'inputFormat': u'file', u'format': {u'status': 0, u'fieldSeparator': u',', u'hasHeader': True, u'quoteChar': u'"', u'recordSeparator': u'\\n', u'type': u'csv'}, u'defaultName': u'default.customer_stats', u'show': True, u'tableName': u'', u'sample': [], u'apiHelperType': u'hive', u'inputFormatsAll': [{u'name': u'File', u'value': u'file'}, {u'name': u'Manually', u'value': u'manual'}, {u'name': u'SQL Query', u'value': u'query'}, {u'name': u'Table', u'value': u'table'}], u'query': u'', u'databaseName': u'default', u'table': u'', u'inputFormats': [{u'name': u'File', u'value': u'file'}, {u'name': u'Manually', u'value': u'manual'}, {u'name': u'SQL Query', u'value': u'query'}, {u'name': u'Table', u'value': u'table'}], u'path': u'/user/romain/customer_stats.csv', u'draggedQuery': u'', u'inputFormatsManual': [{u'name': u'Manually', u'value': u'manual'}], u'isObjectStore': False}
destination = {u'isTransactional': False, u'isInsertOnly': False, u'sourceType': 'hive', u'KUDU_DEFAULT_PARTITION_COLUMN': {u'int_val': 16, u'name': u'HASH', u'columns': [], u'range_partitions': [{u'include_upper_val': u'<=', u'upper_val': 1, u'name': u'VALUES', u'include_lower_val': u'<=', u'lower_val': 0, u'values': [{u'value': u''}]}]}, u'isTargetChecking': False, u'tableName': u'customer_stats', u'outputFormatsList': [{u'name': u'Table', u'value': u'table'}, {u'name': u'Solr index', u'value': u'index'}, {u'name': u'File', u'value': u'file'}, {u'name': u'Database', u'value': u'database'}], u'customRegexp': u'', u'isTargetExisting': False, u'partitionColumns': [{u'operations': [], u'comment': u'', u'name': u'new_field_1', u'level': 0, u'keyType': u'string', u'required': False, u'nested': [], u'isPartition': True, u'length': 100, u'partitionValue': u'AAA', u'multiValued': False, u'unique': False, u'type': u'string', u'showProperties': False, u'keep': True}], u'useCustomDelimiters': False, u'apiHelperType': u'hive', u'kuduPartitionColumns': [], u'outputFormats': [{u'name': u'Table', u'value': u'table'}, {u'name': u'Solr index', u'value': u'index'}], u'customMapDelimiter': u'\\003', u'showProperties': False, u'useDefaultLocation': True, u'description': u'', u'primaryKeyObjects': [], u'customFieldDelimiter': u',', u'existingTargetUrl': u'', u'importData': True, u'databaseName': u'default', u'KUDU_DEFAULT_RANGE_PARTITION_COLUMN': {u'include_upper_val': u'<=', u'upper_val': 1, u'name': u'VALUES', u'include_lower_val': u'<=', u'lower_val': 0, u'values': [{u'value': u''}]}, u'primaryKeys': [], u'outputFormat': u'table', u'nonDefaultLocation': u'/user/romain/customer_stats.csv', u'name': u'default.customer_stats', u'tableFormat': u'text', 'ouputFormat': u'table', u'bulkColumnNames': u'customers.id,customers.name,customers.email_preferences,customers.addresses,customers.orders', u'columns': [{u'operations': [], u'comment': u'', u'name': u'customers.id', u'level': 0, u'keyType': u'string', u'required': False, u'nested': [], u'isPartition': False, u'length': 100, u'partitionValue': u'', u'multiValued': False, u'unique': False, u'type': u'bigint', u'showProperties': False, u'keep': True}, {u'operations': [], u'comment': u'', u'name': u'customers.name', u'level': 0, u'keyType': u'string', u'required': False, u'nested': [], u'isPartition': False, u'length': 100, u'partitionValue': u'', u'multiValued': False, u'unique': False, u'type': u'string', u'showProperties': False, u'keep': True}, {u'operations': [], u'comment': u'', u'name': u'customers.email_preferences', u'level': 0, u'keyType': u'string', u'required': False, u'nested': [], u'isPartition': False, u'length': 100, u'partitionValue': u'', u'multiValued': False, u'unique': False, u'type': u'string', u'showProperties': False, u'keep': True}, {u'operations': [], u'comment': u'', u'name': u'customers.addresses', u'level': 0, u'keyType': u'string', u'required': False, u'nested': [], u'isPartition': False, u'length': 100, u'partitionValue': u'', u'multiValued': False, u'unique': False, u'type': u'string', u'showProperties': False, u'keep': True}, {u'operations': [], u'comment': u'', u'name': u'customers.orders', u'level': 0, u'keyType': u'string', u'required': False, u'nested': [], u'isPartition': False, u'length': 100, u'partitionValue': u'', u'multiValued': False, u'unique': False, u'type': u'string', u'showProperties': False, u'keep': True}], u'hasHeader': True, u'tableFormats': [{u'name': u'Text', u'value': u'text'}, {u'name': u'Parquet', u'value': u'parquet'}, {u'name': u'Kudu', u'value': u'kudu'}, {u'name': u'Csv', u'value': u'csv'}, {u'name': u'Avro', u'value': u'avro'}, {u'name': u'Json', u'value': u'json'}, {u'name': u'Regexp', u'value': u'regexp'}, {u'name': u'ORC', u'value': u'orc'}], u'customCollectionDelimiter': u'\\002'}
request = MockRequest(fs=MockFs())
sql = SQLIndexer(user=request.user, fs=request.fs).create_table_from_a_file(source, destination).get_str()
assert_true('''USE default;''' in sql, sql)
assert_true('''CREATE TABLE `default`.`customer_stats`
(
`customers.id` bigint ,
`customers.name` string ,
`customers.email_preferences` string ,
`customers.addresses` string ,
`customers.orders` string ) PARTITIONED BY (
`new_field_1` string )
ROW FORMAT DELIMITED
FIELDS TERMINATED BY ','
COLLECTION ITEMS TERMINATED BY '\\002'
MAP KEYS TERMINATED BY '\\003'
STORED AS TextFile TBLPROPERTIES("transactional" = "false", "skip.header.line.count" = "1")
;''' in sql, sql)
assert_true('''LOAD DATA INPATH '/user/romain/customer_stats.csv' INTO TABLE `default`.`customer_stats` PARTITION (new_field_1='AAA');''' in sql, sql)
def test_generate_create_kudu_table_with_data():
source = {u'sourceType': 'impala', u'apiHelperType': 'hive', u'sampleCols': [], u'name': u'', u'inputFormat': u'file', u'format': {u'quoteChar': u'"', u'recordSeparator': u'\\n', u'type': u'csv', u'hasHeader': True, u'fieldSeparator': u','}, u'show': True, u'tableName': u'', u'sample': [], u'defaultName': u'index_data', u'query': u'', u'databaseName': u'default', u'table': u'', u'inputFormats': [{u'name': u'File', u'value': u'file'}, {u'name': u'Manually', u'value': u'manual'}], u'path': u'/user/admin/index_data.csv', u'draggedQuery': u'', u'isObjectStore': False}
destination = {u'isTransactional': False, u'isInsertOnly': False, u'sourceType': 'impala', u'KUDU_DEFAULT_PARTITION_COLUMN': {u'int_val': 16, u'name': u'HASH', u'columns': [], u'range_partitions': [{u'include_upper_val': u'<=', u'upper_val': 1, u'name': u'VALUES', u'include_lower_val': u'<=', u'lower_val': 0, u'values': [{u'value': u''}]}]}, u'tableName': u'index_data', u'outputFormatsList': [{u'name': u'Table', u'value': u'table'}, {u'name': u'Solr+index', u'value': u'index'}, {u'name': u'File', u'value': u'file'}, {u'name': u'Database', u'value': u'database'}], u'customRegexp': u'', u'isTargetExisting': False, u'partitionColumns': [], u'useCustomDelimiters': True, u'kuduPartitionColumns': [{u'int_val': 16, u'name': u'HASH', u'columns': [u'id'], u'range_partitions': [{u'include_upper_val': u'<=', u'upper_val': 1, u'name': u'VALUES', u'include_lower_val': u'<=', u'lower_val': 0, u'values': [{u'value': u''}]}]}], u'outputFormats': [{u'name': u'Table', u'value': u'table'}, {u'name': u'Solr+index', u'value': u'index'}], u'customMapDelimiter': None, u'showProperties': False, u'useDefaultLocation': True, u'description': u'Big Data', u'primaryKeyObjects': [{u'operations': [], u'comment': u'', u'name': u'id', u'level': 0, u'keyType': u'string', u'required': False, u'nested': [], u'isPartition': False, u'length': 100, u'multiValued': False, u'unique': False, u'type': u'string', u'showProperties': False, u'keep': True}], u'customFieldDelimiter': u',', u'existingTargetUrl': u'', u'importData': True, u'databaseName': u'default', u'KUDU_DEFAULT_RANGE_PARTITION_COLUMN': {u'include_upper_val': u'<=', u'upper_val': 1, u'name': u'VALUES', u'include_lower_val': u'<=', u'lower_val': 0, u'values': [{u'value': u''}]}, u'primaryKeys': [u'id'], u'outputFormat': u'table', u'nonDefaultLocation': u'/user/admin/index_data.csv', u'name': u'index_data', u'tableFormat': u'kudu', u'bulkColumnNames': u'business_id,cool,date,funny,id,stars,text,type,useful,user_id,name,full_address,latitude,longitude,neighborhoods,open,review_count,state', u'columns': [{u'operations': [], u'comment': u'', u'name': u'business_id', u'level': 0, u'keyType': u'string', u'required': False, u'nested': [], u'isPartition': False, u'length': 100, u'multiValued': False, u'unique': False, u'type': u'string', u'showProperties': False, u'keep': True}, {u'operations': [], u'comment': u'', u'name': u'cool', u'level': 0, u'keyType': u'string', u'required': False, u'nested': [], u'isPartition': False, u'length': 100, u'multiValued': False, u'unique': False, u'type': u'bigint', u'showProperties': False, u'keep': False}, {u'operations': [], u'comment': u'', u'name': u'date', u'level': 0, u'keyType': u'string', u'required': False, u'nested': [], u'isPartition': False, u'length': 100, u'multiValued': False, u'unique': False, u'type': u'string', u'showProperties': False, u'keep': True}, {u'operations': [], u'comment': u'', u'name': u'funny', u'level': 0, u'scale':4, u'precision':10, u'keyType': u'string', u'required': False, u'nested': [], u'isPartition': False, u'length': 100, u'multiValued': False, u'unique': False, u'type': u'decimal', u'showProperties': False, u'keep': True}, {u'operations': [], u'comment': u'', u'name': u'id', u'level': 0, u'keyType': u'string', u'required': False, u'nested': [], u'isPartition': False, u'length': 100, u'multiValued': False, u'unique': False, u'type': u'string', u'showProperties': False, u'keep': True}, {u'operations': [], u'comment': u'', u'name': u'stars', u'level': 0, u'keyType': u'string', u'required': False, u'nested': [], u'isPartition': False, u'length': 100, u'multiValued': False, u'unique': False, u'type': u'bigint', u'showProperties': False, u'keep': True}, {u'operations': [], u'comment': u'', u'name': u'text', u'level': 0, u'keyType': u'string', u'required': False, u'nested': [], u'isPartition': False, u'length': 100, u'multiValued': False, u'unique': False, u'type': u'string', u'showProperties': False, u'keep': True}, {u'operations': [], u'comment': u'', u'name': u'type', u'level': 0, u'keyType': u'string', u'required': False, u'nested': [], u'isPartition': False, u'length': 100, u'multiValued': False, u'unique': False, u'type': u'string', u'showProperties': False, u'keep': True}, {u'operations': [], u'comment': u'', u'name': u'useful', u'level': 0, u'keyType': u'string', u'required': False, u'nested': [], u'isPartition': False, u'length': 100, u'multiValued': False, u'unique': False, u'type': u'bigint', u'showProperties': False, u'keep': True}, {u'operations': [], u'comment': u'', u'name': u'user_id', u'level': 0, u'keyType': u'string', u'required': False, u'nested': [], u'isPartition': False, u'length': 100, u'multiValued': False, u'unique': False, u'type': u'string', u'showProperties': False, u'keep': True}, {u'operations': [], u'comment': u'', u'name': u'name', u'level': 0, u'keyType': u'string', u'required': False, u'nested': [], u'isPartition': False, u'length': 100, u'multiValued': False, u'unique': False, u'type': u'string', u'showProperties': False, u'keep': True}, {u'operations': [], u'comment': u'', u'name': u'full_address', u'level': 0, u'keyType': u'string', u'required': False, u'nested': [], u'isPartition': False, u'length': 100, u'multiValued': False, u'unique': False, u'type': u'string', u'showProperties': False, u'keep': True}, {u'operations': [], u'comment': u'', u'name': u'latitude', u'level': 0, u'keyType': u'string', u'required': False, u'nested': [], u'isPartition': False, u'length': 100, u'multiValued': False, u'unique': False, u'type': u'double', u'showProperties': False, u'keep': True}, {u'operations': [], u'comment': u'', u'name': u'longitude', u'level': 0, u'keyType': u'string', u'required': False, u'nested': [], u'isPartition': False, u'length': 100, u'multiValued': False, u'unique': False, u'type': u'double', u'showProperties': False, u'keep': True}, {u'operations': [], u'comment': u'', u'name': u'neighborhoods', u'level': 0, u'keyType': u'string', u'required': False, u'nested': [], u'isPartition': False, u'length': 100, u'multiValued': False, u'unique': False, u'type': u'string', u'showProperties': False, u'keep': True}, {u'operations': [], u'comment': u'', u'name': u'open', u'level': 0, u'keyType': u'string', u'required': False, u'nested': [], u'isPartition': False, u'length': 100, u'multiValued': False, u'unique': False, u'type': u'string', u'showProperties': False, u'keep': True}, {u'operations': [], u'comment': u'', u'name': u'review_count', u'level': 0, u'keyType': u'string', u'required': False, u'nested': [], u'isPartition': False, u'length': 100, u'multiValued': False, u'unique': False, u'type': u'bigint', u'showProperties': False, u'keep': True}, {u'operations': [], u'comment': u'', u'name': u'state', u'level': 0, u'keyType': u'string', u'required': False, u'nested': [], u'isPartition': False, u'length': 100, u'multiValued': False, u'unique': False, u'type': u'string', u'showProperties': False, u'keep': True}], u'hasHeader': True, u'tableFormats': [{u'name': u'Text', u'value': u'text'}, {u'name': u'Parquet', u'value': u'parquet'}, {u'name': u'Json', u'value': u'json'}, {u'name': u'Kudu', u'value': u'kudu'}, {u'name': u'Avro', u'value': u'avro'}, {u'name': u'Regexp', u'value': u'regexp'}, {u'name': u'RCFile', u'value': u'rcfile'}, {u'name': u'ORC', u'value': u'orc'}, {u'name': u'SequenceFile', u'value': u'sequencefile'}], u'customCollectionDelimiter': None}
request = MockRequest(fs=MockFs())
sql = SQLIndexer(user=request.user, fs=request.fs).create_table_from_a_file(source, destination).get_str()
assert_true('''DROP TABLE IF EXISTS `default`.`hue__tmp_index_data`;''' in sql, sql)
assert_true('''CREATE EXTERNAL TABLE `default`.`hue__tmp_index_data`
(
`business_id` string ,
`cool` bigint ,
`date` string ,
`funny` decimal(10, 4) ,
`id` string ,
`stars` bigint ,
`text` string ,
`type` string ,
`useful` bigint ,
`user_id` string ,
`name` string ,
`full_address` string ,
`latitude` double ,
`longitude` double ,
`neighborhoods` string ,
`open` string ,
`review_count` bigint ,
`state` string ) COMMENT "Big Data"
ROW FORMAT DELIMITED
FIELDS TERMINATED BY ','
STORED AS TextFile LOCATION '/A'
TBLPROPERTIES("transactional" = "false", "skip.header.line.count" = "1")''' in sql, sql)
assert_true('''CREATE TABLE `default`.`index_data` COMMENT "Big Data"
PRIMARY KEY (id)
PARTITION BY HASH PARTITIONS 16
STORED AS kudu
TBLPROPERTIES(
'kudu.num_tablet_replicas' = '1'
)
AS SELECT `id`, `business_id`, `date`, `funny`, `stars`, `text`, `type`, `useful`, `user_id`, `name`, `full_address`, `latitude`, `longitude`, `neighborhoods`, `open`, `review_count`, `state`
FROM `default`.`hue__tmp_index_data`;''' in sql, sql)
def test_generate_create_parquet_table():
source = json.loads('''{"sourceType": "hive", "name":"","sample":[["Bank Of America","3000000.0","US","Miami","37.6801986694","-121.92150116"],["Citi Bank","2800000.0","US","Richmond","37.5242004395","-77.4932022095"],["Deutsche Bank","2600000.0","US","Corpus Christi","40.7807998657","-73.9772033691"],["Thomson Reuters","2400000.0","US","Albany","35.7976989746","-78.6252975464"],["OpenX","2200000.0","US","Des Moines","40.5411987305","-119.586898804"]],"sampleCols":[{"operations":[],"comment":"","nested":[],"name":"acct_client","level":0,"keyType":"string","required":false,"precision":10,"keep":true,"isPartition":false,"length":100,"partitionValue":"","multiValued":false,"unique":false,"type":"string","showProperties":false,"scale":0},{"operations":[],"comment":"","nested":[],"name":"tran_amount","level":0,"keyType":"string","required":false,"precision":10,"keep":true,"isPartition":false,"length":100,"partitionValue":"","multiValued":false,"unique":false,"type":"double","showProperties":false,"scale":0},{"operations":[],"comment":"","nested":[],"name":"tran_country_cd","level":0,"keyType":"string","required":false,"precision":10,"keep":true,"isPartition":false,"length":100,"partitionValue":"","multiValued":false,"unique":false,"type":"string","showProperties":false,"scale":0},{"operations":[],"comment":"","nested":[],"name":"vrfcn_city","level":0,"keyType":"string","required":false,"precision":10,"keep":true,"isPartition":false,"length":100,"partitionValue":"","multiValued":false,"unique":false,"type":"string","showProperties":false,"scale":0},{"operations":[],"comment":"","nested":[],"name":"vrfcn_city_lat","level":0,"keyType":"string","required":false,"precision":10,"keep":true,"isPartition":false,"length":100,"partitionValue":"","multiValued":false,"unique":false,"type":"double","showProperties":false,"scale":0},{"operations":[],"comment":"","nested":[],"name":"vrfcn_city_lon","level":0,"keyType":"string","required":false,"precision":10,"keep":true,"isPartition":false,"length":100,"partitionValue":"","multiValued":false,"unique":false,"type":"double","showProperties":false,"scale":0}],"inputFormat":"file","inputFormatsAll":[{"value":"file","name":"File"},{"value":"manual","name":"Manually"},{"value":"query","name":"SQL Query"},{"value":"table","name":"Table"}],"inputFormatsManual":[{"value":"manual","name":"Manually"}],"inputFormats":[{"value":"file","name":"File"},{"value":"manual","name":"Manually"},{"value":"query","name":"SQL Query"},{"value":"table","name":"Table"}],"path":"/user/hue/data/query-hive-360.csv","isObjectStore":false,"table":"","tableName":"","databaseName":"default","apiHelperType":"hive","query":"","draggedQuery":"","format":{"type":"csv","fieldSeparator":",","recordSeparator":"\\n","quoteChar":"\\"","hasHeader":true,"status":0},"show":true,"defaultName":"default.query-hive-360"}''')
destination = json.loads('''{"isTransactional": false, "isInsertOnly": false, "sourceType": "hive", "name":"default.parquet_table","apiHelperType":"hive","description":"","outputFormat":"table","outputFormatsList":[{"name":"Table","value":"table"},{"name":"Solr index","value":"index"},{"name":"File","value":"file"},{"name":"Database","value":"database"}],"outputFormats":[{"name":"Table","value":"table"},{"name":"Solr index","value":"index"}],"columns":[{"operations":[],"comment":"","nested":[],"name":"acct_client","level":0,"keyType":"string","required":false,"precision":10,"keep":true,"isPartition":false,"length":100,"partitionValue":"","multiValued":false,"unique":false,"type":"string","showProperties":false,"scale":0},{"operations":[],"comment":"","nested":[],"name":"tran_amount","level":0,"keyType":"string","required":false,"precision":10,"keep":true,"isPartition":false,"length":100,"partitionValue":"","multiValued":false,"unique":false,"type":"double","showProperties":false,"scale":0},{"operations":[],"comment":"","nested":[],"name":"tran_country_cd","level":0,"keyType":"string","required":false,"precision":10,"keep":true,"isPartition":false,"length":100,"partitionValue":"","multiValued":false,"unique":false,"type":"string","showProperties":false,"scale":0},{"operations":[],"comment":"","nested":[],"name":"vrfcn_city","level":0,"keyType":"string","required":false,"precision":10,"keep":true,"isPartition":false,"length":100,"partitionValue":"","multiValued":false,"unique":false,"type":"string","showProperties":false,"scale":0},{"operations":[],"comment":"","nested":[],"name":"vrfcn_city_lat","level":0,"keyType":"string","required":false,"precision":10,"keep":true,"isPartition":false,"length":100,"partitionValue":"","multiValued":false,"unique":false,"type":"double","showProperties":false,"scale":0},{"operations":[],"comment":"","nested":[],"name":"vrfcn_city_lon","level":0,"keyType":"string","required":false,"precision":10,"keep":true,"isPartition":false,"length":100,"partitionValue":"","multiValued":false,"unique":false,"type":"double","showProperties":false,"scale":0}],"bulkColumnNames":"acct_client,tran_amount,tran_country_cd,vrfcn_city,vrfcn_city_lat,vrfcn_city_lon","showProperties":false,"isTargetExisting":false,"isTargetChecking":false,"existingTargetUrl":"","tableName":"parquet_table","databaseName":"default","tableFormat":"parquet","KUDU_DEFAULT_RANGE_PARTITION_COLUMN":{"values":[{"value":""}],"name":"VALUES","lower_val":0,"include_lower_val":"<=","upper_val":1,"include_upper_val":"<="},"KUDU_DEFAULT_PARTITION_COLUMN":{"columns":[],"range_partitions":[{"values":[{"value":""}],"name":"VALUES","lower_val":0,"include_lower_val":"<=","upper_val":1,"include_upper_val":"<="}],"name":"HASH","int_val":16},"tableFormats":[{"value":"text","name":"Text"},{"value":"parquet","name":"Parquet"},{"value":"kudu","name":"Kudu"},{"value":"csv","name":"Csv"},{"value":"avro","name":"Avro"},{"value":"json","name":"Json"},{"value":"regexp","name":"Regexp"},{"value":"orc","name":"ORC"}],"partitionColumns":[],"kuduPartitionColumns":[],"primaryKeys":[],"primaryKeyObjects":[],"importData":true,"useDefaultLocation":true,"nonDefaultLocation":"/user/hue/data/query-hive-360.csv","hasHeader":true,"useCustomDelimiters":false,"customFieldDelimiter":",","customCollectionDelimiter":"\\\\002","customMapDelimiter":"\\\\003","customRegexp":""}''')
path = {'isDir': False, 'split': ('/user/hue/data', 'query-hive-360.csv'), 'listdir': ['/user/hue/data']}
request = MockRequest(fs=MockFs(path=path))
sql = SQLIndexer(user=request.user, fs=request.fs).create_table_from_a_file(source, destination).get_str()
assert_true('''USE default;''' in sql, sql)
assert_true('''CREATE EXTERNAL TABLE `default`.`hue__tmp_parquet_table`
(
`acct_client` string ,
`tran_amount` double ,
`tran_country_cd` string ,
`vrfcn_city` string ,
`vrfcn_city_lat` double ,
`vrfcn_city_lon` double ) ROW FORMAT DELIMITED
FIELDS TERMINATED BY ','
COLLECTION ITEMS TERMINATED BY '\\002'
MAP KEYS TERMINATED BY '\\003'
STORED AS TextFile LOCATION '/user/hue/data'
TBLPROPERTIES("transactional" = "false", "skip.header.line.count" = "1")
;''' in sql, sql)
assert_true('''CREATE TABLE `default`.`parquet_table`
STORED AS parquet
AS SELECT *
FROM `default`.`hue__tmp_parquet_table`;
''' in sql, sql)
assert_true('''DROP TABLE IF EXISTS `default`.`hue__tmp_parquet_table`;
''' in sql, sql)
def test_generate_create_orc_table_transactional():
source = json.loads('''{"sourceType": "hive", "name":"","sample":[["Bank Of America","3000000.0","US","Miami","37.6801986694","-121.92150116"],["Citi Bank","2800000.0","US","Richmond","37.5242004395","-77.4932022095"],["Deutsche Bank","2600000.0","US","Corpus Christi","40.7807998657","-73.9772033691"],["Thomson Reuters","2400000.0","US","Albany","35.7976989746","-78.6252975464"],["OpenX","2200000.0","US","Des Moines","40.5411987305","-119.586898804"]],"sampleCols":[{"operations":[],"comment":"","nested":[],"name":"acct_client","level":0,"keyType":"string","required":false,"precision":10,"keep":true,"isPartition":false,"length":100,"partitionValue":"","multiValued":false,"unique":false,"type":"string","showProperties":false,"scale":0},{"operations":[],"comment":"","nested":[],"name":"tran_amount","level":0,"keyType":"string","required":false,"precision":10,"keep":true,"isPartition":false,"length":100,"partitionValue":"","multiValued":false,"unique":false,"type":"double","showProperties":false,"scale":0},{"operations":[],"comment":"","nested":[],"name":"tran_country_cd","level":0,"keyType":"string","required":false,"precision":10,"keep":true,"isPartition":false,"length":100,"partitionValue":"","multiValued":false,"unique":false,"type":"string","showProperties":false,"scale":0},{"operations":[],"comment":"","nested":[],"name":"vrfcn_city","level":0,"keyType":"string","required":false,"precision":10,"keep":true,"isPartition":false,"length":100,"partitionValue":"","multiValued":false,"unique":false,"type":"string","showProperties":false,"scale":0},{"operations":[],"comment":"","nested":[],"name":"vrfcn_city_lat","level":0,"keyType":"string","required":false,"precision":10,"keep":true,"isPartition":false,"length":100,"partitionValue":"","multiValued":false,"unique":false,"type":"double","showProperties":false,"scale":0},{"operations":[],"comment":"","nested":[],"name":"vrfcn_city_lon","level":0,"keyType":"string","required":false,"precision":10,"keep":true,"isPartition":false,"length":100,"partitionValue":"","multiValued":false,"unique":false,"type":"double","showProperties":false,"scale":0}],"inputFormat":"file","inputFormatsAll":[{"value":"file","name":"File"},{"value":"manual","name":"Manually"},{"value":"query","name":"SQL Query"},{"value":"table","name":"Table"}],"inputFormatsManual":[{"value":"manual","name":"Manually"}],"inputFormats":[{"value":"file","name":"File"},{"value":"manual","name":"Manually"},{"value":"query","name":"SQL Query"},{"value":"table","name":"Table"}],"path":"/user/hue/data/query-hive-360.csv","isObjectStore":false,"table":"","tableName":"","databaseName":"default","apiHelperType":"hive","query":"","draggedQuery":"","format":{"type":"csv","fieldSeparator":",","recordSeparator":"\\n","quoteChar":"\\"","hasHeader":true,"status":0},"show":true,"defaultName":"default.query-hive-360"}''')
destination = json.loads('''{"isTransactional": true, "isInsertOnly": true, "sourceType": "hive", "name":"default.parquet_table","apiHelperType":"hive","description":"","outputFormat":"table","outputFormatsList":[{"name":"Table","value":"table"},{"name":"Solr index","value":"index"},{"name":"File","value":"file"},{"name":"Database","value":"database"}],"outputFormats":[{"name":"Table","value":"table"},{"name":"Solr index","value":"index"}],"columns":[{"operations":[],"comment":"","nested":[],"name":"acct_client","level":0,"keyType":"string","required":false,"precision":10,"keep":true,"isPartition":false,"length":100,"partitionValue":"","multiValued":false,"unique":false,"type":"string","showProperties":false,"scale":0},{"operations":[],"comment":"","nested":[],"name":"tran_amount","level":0,"keyType":"string","required":false,"precision":10,"keep":true,"isPartition":false,"length":100,"partitionValue":"","multiValued":false,"unique":false,"type":"double","showProperties":false,"scale":0},{"operations":[],"comment":"","nested":[],"name":"tran_country_cd","level":0,"keyType":"string","required":false,"precision":10,"keep":true,"isPartition":false,"length":100,"partitionValue":"","multiValued":false,"unique":false,"type":"string","showProperties":false,"scale":0},{"operations":[],"comment":"","nested":[],"name":"vrfcn_city","level":0,"keyType":"string","required":false,"precision":10,"keep":true,"isPartition":false,"length":100,"partitionValue":"","multiValued":false,"unique":false,"type":"string","showProperties":false,"scale":0},{"operations":[],"comment":"","nested":[],"name":"vrfcn_city_lat","level":0,"keyType":"string","required":false,"precision":10,"keep":true,"isPartition":false,"length":100,"partitionValue":"","multiValued":false,"unique":false,"type":"double","showProperties":false,"scale":0},{"operations":[],"comment":"","nested":[],"name":"vrfcn_city_lon","level":0,"keyType":"string","required":false,"precision":10,"keep":true,"isPartition":false,"length":100,"partitionValue":"","multiValued":false,"unique":false,"type":"double","showProperties":false,"scale":0}],"bulkColumnNames":"acct_client,tran_amount,tran_country_cd,vrfcn_city,vrfcn_city_lat,vrfcn_city_lon","showProperties":false,"isTargetExisting":false,"isTargetChecking":false,"existingTargetUrl":"","tableName":"parquet_table","databaseName":"default","tableFormat":"orc","KUDU_DEFAULT_RANGE_PARTITION_COLUMN":{"values":[{"value":""}],"name":"VALUES","lower_val":0,"include_lower_val":"<=","upper_val":1,"include_upper_val":"<="},"KUDU_DEFAULT_PARTITION_COLUMN":{"columns":[],"range_partitions":[{"values":[{"value":""}],"name":"VALUES","lower_val":0,"include_lower_val":"<=","upper_val":1,"include_upper_val":"<="}],"name":"HASH","int_val":16},"tableFormats":[{"value":"text","name":"Text"},{"value":"parquet","name":"Parquet"},{"value":"kudu","name":"Kudu"},{"value":"csv","name":"Csv"},{"value":"avro","name":"Avro"},{"value":"json","name":"Json"},{"value":"regexp","name":"Regexp"},{"value":"orc","name":"ORC"}],"partitionColumns":[],"kuduPartitionColumns":[],"primaryKeys":[],"primaryKeyObjects":[],"importData":true,"useDefaultLocation":true,"nonDefaultLocation":"/user/hue/data/query-hive-360.csv","hasHeader":true,"useCustomDelimiters":false,"customFieldDelimiter":",","customCollectionDelimiter":"\\\\002","customMapDelimiter":"\\\\003","customRegexp":""}''')
path = {'isDir': False, 'split': ('/user/hue/data', 'query-hive-360.csv'), 'listdir': ['/user/hue/data']}
request = MockRequest(fs=MockFs(path=path))
sql = SQLIndexer(user=request.user, fs=request.fs).create_table_from_a_file(source, destination).get_str()
assert_true('''USE default;''' in sql, sql)
assert_true('''CREATE EXTERNAL TABLE `default`.`hue__tmp_parquet_table`
(
`acct_client` string ,
`tran_amount` double ,
`tran_country_cd` string ,
`vrfcn_city` string ,
`vrfcn_city_lat` double ,
`vrfcn_city_lon` double ) ROW FORMAT DELIMITED
FIELDS TERMINATED BY ','
COLLECTION ITEMS TERMINATED BY '\\002'
MAP KEYS TERMINATED BY '\\003'
STORED AS TextFile LOCATION '/user/hue/data'
TBLPROPERTIES("transactional" = "false", "skip.header.line.count" = "1")
;''' in sql, sql)
assert_true('''CREATE TABLE `default`.`parquet_table`
STORED AS orc
TBLPROPERTIES("transactional"="true", "transactional_properties"="insert_only")
AS SELECT *
FROM `default`.`hue__tmp_parquet_table`;
''' in sql, sql)
assert_true('''DROP TABLE IF EXISTS `default`.`hue__tmp_parquet_table`;
''' in sql, sql)
def test_generate_create_empty_kudu_table():
source = json.loads('''{"sourceType": "impala", "apiHelperType": "impala", "path": "", "inputFormat": "manual"}''')
destination = json.loads('''{"isTransactional": false, "isInsertOnly": false, "sourceType": "impala", "name":"default.manual_empty_kudu","apiHelperType":"impala","description":"","outputFormat":"table","columns":[{"operations":[],"comment":"","nested":[],"name":"acct_client","level":0,"keyType":"string","required":false,"precision":10,"keep":true,"isPartition":false,"length":100,"partitionValue":"","multiValued":false,"unique":false,"type":"string","showProperties":false,"scale":0},{"operations":[],"comment":"","nested":[],"name":"tran_amount","level":0,"keyType":"string","required":false,"precision":10,"keep":true,"isPartition":false,"length":100,"partitionValue":"","multiValued":false,"unique":false,"type":"double","showProperties":false,"scale":0},{"operations":[],"comment":"","nested":[],"name":"tran_country_cd","level":0,"keyType":"string","required":false,"precision":10,"keep":true,"isPartition":false,"length":100,"partitionValue":"","multiValued":false,"unique":false,"type":"string","showProperties":false,"scale":0},{"operations":[],"comment":"","nested":[],"name":"vrfcn_city","level":0,"keyType":"string","required":false,"precision":10,"keep":true,"isPartition":false,"length":100,"partitionValue":"","multiValued":false,"unique":false,"type":"string","showProperties":false,"scale":0},{"operations":[],"comment":"","nested":[],"name":"vrfcn_city_lat","level":0,"keyType":"string","required":false,"precision":10,"keep":true,"isPartition":false,"length":100,"partitionValue":"","multiValued":false,"unique":false,"type":"double","showProperties":false,"scale":0},{"operations":[],"comment":"","nested":[],"name":"vrfcn_city_lon","level":0,"keyType":"string","required":false,"precision":10,"keep":true,"isPartition":false,"length":100,"partitionValue":"","multiValued":false,"unique":false,"type":"double","showProperties":false,"scale":0}],"bulkColumnNames":"acct_client,tran_amount,tran_country_cd,vrfcn_city,vrfcn_city_lat,vrfcn_city_lon","showProperties":false,"isTargetExisting":false,"isTargetChecking":false,"existingTargetUrl":"","tableName":"manual_kudu_table","databaseName":"default","tableFormat":"kudu","KUDU_DEFAULT_RANGE_PARTITION_COLUMN":{"values":[{"value":""}],"name":"VALUES","lower_val":0,"include_lower_val":"<=","upper_val":1,"include_upper_val":"<="},"KUDU_DEFAULT_PARTITION_COLUMN":{"columns":[],"range_partitions":[{"values":[{"value":""}],"name":"VALUES","lower_val":0,"include_lower_val":"<=","upper_val":1,"include_upper_val":"<="}],"name":"HASH","int_val":16},"tableFormats":[{"value":"text","name":"Text"},{"value":"parquet","name":"Parquet"},{"value":"kudu","name":"Kudu"},{"value":"csv","name":"Csv"},{"value":"avro","name":"Avro"},{"value":"json","name":"Json"},{"value":"regexp","name":"Regexp"},{"value":"orc","name":"ORC"}],"partitionColumns":[],"kuduPartitionColumns":[],"primaryKeys": ["acct_client"],"primaryKeyObjects":[],"importData":false,"useDefaultLocation":true,"nonDefaultLocation":"/user/hue/data/query-hive-360.csv","hasHeader":false,"useCustomDelimiters":false,"customFieldDelimiter":",","customCollectionDelimiter":"\\\\002","customMapDelimiter":"\\\\003","customRegexp":""}''')
path = {'isDir': False, 'split': ('/user/hue/data', 'query-hive-360.csv'), 'listdir': ['/user/hue/data']}
request = MockRequest(fs=MockFs(path=path))
sql = SQLIndexer(user=request.user, fs=request.fs).create_table_from_a_file(source, destination).get_str()
assert_true('''CREATE TABLE `default`.`manual_empty_kudu`
(
`acct_client` string ,
`tran_amount` double ,
`tran_country_cd` string ,
`vrfcn_city` string ,
`vrfcn_city_lat` double ,
`vrfcn_city_lon` double , PRIMARY KEY (acct_client)
) STORED AS kudu TBLPROPERTIES("transactional" = "false")
;''' in sql, sql)
| true | true |
f71b3428812e3f4af3ba5ec76b4fec00628e68ec | 1,009 | py | Python | web/tex_cnn_rest.py | wbj0110/cnn-text-classification-tf-chinese | 42e47d34c300e9d571231e43c189ee292b595559 | [
"Apache-2.0"
] | null | null | null | web/tex_cnn_rest.py | wbj0110/cnn-text-classification-tf-chinese | 42e47d34c300e9d571231e43c189ee292b595559 | [
"Apache-2.0"
] | null | null | null | web/tex_cnn_rest.py | wbj0110/cnn-text-classification-tf-chinese | 42e47d34c300e9d571231e43c189ee292b595559 | [
"Apache-2.0"
] | null | null | null | #! /usr/bin/env python
from tornado import httpserver
from tornado import gen
from tornado.ioloop import IOLoop
import tornado.web
import json
import single_eval as sev
class IndexHandler(tornado.web.RequestHandler):
def get(self):
self.write("Hello,This is TextCNN")
class ClassifyHandler(tornado.web.RequestHandler):
def get(self):
data = self.get_argument('q', 'Hello')
predict_result = sev.classify(data)
self.write("this is Classfication for text,get method and result:{}".format(predict_result))
def post(self):
self.write("this is classfication for text ,post method")
class Application(tornado.web.Application):
def __init__(self):
handlers = [
(r"/?",IndexHandler),
(r"/classify/?",ClassifyHandler)
]
tornado.web.Application.__init__(self,handlers=handlers)
def main():
app = Application()
app.listen(80)
IOLoop.instance().start()
if __name__ == '__main__':
main()
| 25.871795 | 100 | 0.667988 |
from tornado import httpserver
from tornado import gen
from tornado.ioloop import IOLoop
import tornado.web
import json
import single_eval as sev
class IndexHandler(tornado.web.RequestHandler):
def get(self):
self.write("Hello,This is TextCNN")
class ClassifyHandler(tornado.web.RequestHandler):
def get(self):
data = self.get_argument('q', 'Hello')
predict_result = sev.classify(data)
self.write("this is Classfication for text,get method and result:{}".format(predict_result))
def post(self):
self.write("this is classfication for text ,post method")
class Application(tornado.web.Application):
def __init__(self):
handlers = [
(r"/?",IndexHandler),
(r"/classify/?",ClassifyHandler)
]
tornado.web.Application.__init__(self,handlers=handlers)
def main():
app = Application()
app.listen(80)
IOLoop.instance().start()
if __name__ == '__main__':
main()
| true | true |
f71b345986d63817e8ebf1e91022534a55821bb0 | 1,010 | py | Python | setup.py | jiad-dev/flask-req-parser | f46a6c002381d7b74e9c80c6b3ae536dcda908f9 | [
"BSD-2-Clause"
] | 2 | 2020-10-17T04:46:08.000Z | 2020-10-17T04:46:10.000Z | setup.py | jiad-dev/flask-req-parser | f46a6c002381d7b74e9c80c6b3ae536dcda908f9 | [
"BSD-2-Clause"
] | null | null | null | setup.py | jiad-dev/flask-req-parser | f46a6c002381d7b74e9c80c6b3ae536dcda908f9 | [
"BSD-2-Clause"
] | 2 | 2020-10-07T03:33:19.000Z | 2020-10-07T04:07:16.000Z | from setuptools import setup
setup(
name='flask_req_parser',
version='0.1.4',
url='https://github.com/Rhyanz46/flask-req-parser',
license='BSD',
author='Arian Saputra',
author_email='rianariansaputra@gmail.com',
description='Simple Request parser for flask',
long_description=__doc__,
# packages=find_packages(),
# py_modules=['flask_req_parser'],
# if you would be using a package instead use packages instead
# of py_modules:
packages=['flask_req_parser'],
zip_safe=False,
include_package_data=True,
platforms='any',
install_requires=[
'Flask'
],
classifiers=[
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Software Development :: Libraries :: Python Modules'
]
) | 30.606061 | 70 | 0.643564 | from setuptools import setup
setup(
name='flask_req_parser',
version='0.1.4',
url='https://github.com/Rhyanz46/flask-req-parser',
license='BSD',
author='Arian Saputra',
author_email='rianariansaputra@gmail.com',
description='Simple Request parser for flask',
long_description=__doc__,
packages=['flask_req_parser'],
zip_safe=False,
include_package_data=True,
platforms='any',
install_requires=[
'Flask'
],
classifiers=[
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Software Development :: Libraries :: Python Modules'
]
) | true | true |
f71b36d9600e32803a5a8daae7b744b0672e4fd7 | 24,960 | py | Python | src/data/database.py | aFoxPod/torrent-bot | 29ffaea5edab81bfa3aa0d944a96a766d06770e5 | [
"MIT"
] | null | null | null | src/data/database.py | aFoxPod/torrent-bot | 29ffaea5edab81bfa3aa0d944a96a766d06770e5 | [
"MIT"
] | null | null | null | src/data/database.py | aFoxPod/torrent-bot | 29ffaea5edab81bfa3aa0d944a96a766d06770e5 | [
"MIT"
] | null | null | null | import sqlite3
class TorrentState:
SEARCHING = "SEARCHING" # Still being searched
DOWNLOADING = "DOWNLOADING" # Currently being downloading
SEEDING = "SEEDING" # Currently uploading
COMPLETED = "COMPLETED" # Removed from seeding
DELETING = "DELETING" # Torrent marked for deletion
PAUSED = "PAUSED" # Download stopped
@staticmethod
def get_states() -> list:
return [
TorrentState.SEARCHING,
TorrentState.DOWNLOADING,
TorrentState.SEEDING,
TorrentState.COMPLETED,
TorrentState.DELETING,
TorrentState.PAUSED
]
class TBDatabase:
"""
Database Handler
Attributes
----------
db_file_path : str
the database file path (sqlite)
"""
def __init__(self, db_file_path: str) -> None:
self.db_file_path = db_file_path
self.connection = sqlite3.connect(self.db_file_path)
self.connection.execute("PRAGMA foreign_keys = ON")
self.connection.row_factory = dict_factory
self.states = TorrentState()
def create_schema(self) -> None:
"""Initializes the database by creating the necessary schema.
Args:
db_file (str): the file to were the database state will be stored
"""
cur = self.connection.cursor()
# Create Movies table
sql = f"""CREATE TABLE IF NOT EXISTS movies (
"id" INTEGER PRIMARY KEY AUTOINCREMENT,
"name" TEXT UNIQUE NOT NULL,
"max_size_mb" INTEGER NOT NULL,
"resolution_profile" TEXT NOT NULL,
"state" TEXT NOT NULL DEFAULT '{self.states.SEARCHING}',
"imdbid" INTEGER UNIQUE NOT NULL,
"cover_url" TEXT,
"hash" TEXT)
"""
cur.execute(sql)
# Create TV Shows Table
sql = f"""CREATE TABLE IF NOT EXISTS tv_shows (
"id" INTEGER PRIMARY KEY AUTOINCREMENT,
"name" TEXT UNIQUE NOT NULL,
"max_episode_size_mb" INTEGER NOT NULL,
"resolution_profile" TEXT NOT NULL,
"imdbid" INTEGER UNIQUE NOT NULL,
"state" TEXT NOT NULL DEFAULT '{self.states.SEARCHING}',
"cover_url" TEXT
)
"""
cur.execute(sql)
# Create TV Show seasons
sql = f"""CREATE TABLE IF NOT EXISTS tv_show_seasons (
"id" INTEGER PRIMARY KEY AUTOINCREMENT,
"show_id" INTEGER,
"season_number" INTEGER NOT NULL,
"season_number_episodes" INTEGER NOT NULL,
"state" TEXT NOT NULL DEFAULT '{self.states.SEARCHING}',
"hash" TEXT,
FOREIGN KEY(show_id) REFERENCES tv_shows(id) ON DELETE CASCADE,
UNIQUE(show_id, season_number))
"""
cur.execute(sql)
# Create TV Show season episodes
sql = f"""CREATE TABLE IF NOT EXISTS tv_show_season_episodes (
"id" INTEGER PRIMARY KEY AUTOINCREMENT,
"season_id" INTEGER,
"name" TEXT NOT NULL,
"episode_number" INTEGER NOT NULL,
"air_date" TEXT NOT NULL,
"state" TEXT NOT NULL DEFAULT '{self.states.SEARCHING}',
"hash" TEXT,
FOREIGN KEY(season_id) REFERENCES tv_show_seasons(id) ON DELETE CASCADE,
UNIQUE(season_id, episode_number))
"""
cur.execute(sql)
# Create tv shows with seasons view
sql = f"""CREATE VIEW IF NOT EXISTS tv_shows_with_seasons_view
AS
SELECT
tv_shows.id as show_id,
tv_shows.name as show_name,
tv_shows.state as show_state,
tv_shows.resolution_profile as resolution_profile,
tv_shows.name as show_name,
tv_shows.max_episode_size_mb as max_episode_size_mb,
tv_shows.imdbid as show_imdbid,
tv_show_seasons.id as season_id,
tv_show_seasons.season_number as season_number,
tv_show_seasons.season_number_episodes as season_number_episodes,
tv_show_seasons.state as season_state,
tv_show_seasons.hash as season_hash
FROM tv_shows
INNER JOIN tv_show_seasons on tv_shows.id = tv_show_seasons.show_id;
"""
cur.execute(sql)
# Create seaons with episodes view
sql = f"""CREATE VIEW IF NOT EXISTS tv_show_seasons_with_episodes_view
AS
SELECT
tv_show_seasons.id as season_id,
tv_show_seasons.hash as season_hash,
tv_show_seasons.state as season_state,
tv_show_seasons.season_number as season_number,
tv_show_season_episodes.id as episode_id,
tv_show_season_episodes.name as episode_name,
tv_show_season_episodes.air_date as episode_air_date,
tv_show_season_episodes.episode_number as episode_number,
tv_show_season_episodes.state as episode_state,
tv_show_season_episodes.hash as episode_hash
FROM tv_show_seasons
INNER JOIN tv_show_season_episodes on tv_show_seasons.id = tv_show_season_episodes.season_id;
"""
cur.execute(sql)
# commit changes and close the connection
self.connection.commit()
def get_all_movies(self) -> list:
"""Retrieves all movies
Returns:
list: the the list of movies
"""
cur = self.connection.cursor()
cur.execute("SELECT * FROM movies")
return cur.fetchall()
def get_all_tv_shows(self) -> list:
"""Retrieves all tv shows
Returns:
list: the list of tv shows
"""
cur = self.connection.cursor()
cur.execute("SELECT * FROM tv_shows;")
return cur.fetchall()
def get_all_seasons(self) -> list:
"""Retrieves all tv show seasons
Returns:
list: the list of seasons
"""
cur = self.connection.cursor()
cur.execute("SELECT * FROM tv_show_seasons;")
return cur.fetchall()
def get_all_episodes(self) -> list:
"""Retrieves all episodes
Returns:
list: the list of episdoes
"""
cur = self.connection.cursor()
cur.execute("SELECT * FROM tv_show_season_episodes;")
return cur.fetchall()
def get_all_tv_shows_with_seasons(self) -> list:
"""Retrieves all tv shows and seasons
Returns:
list: the list of tv shows and seaons
"""
cur = self.connection.cursor()
cur.execute("SELECT * FROM tv_shows_with_seasons_view;")
return cur.fetchall()
def get_all_tv_shows_season_episodes(self) -> list:
"""Retrieves all tv shows seasons and episodes
Returns:
list: the list of tv shows season episodes
"""
cur = self.connection.cursor()
cur.execute("SELECT * FROM tv_shows_season_with_episodes_view;")
return cur.fetchall()
def get_movies_by_state(self, state: str) -> list:
"""Retrieves all movies stored in the database with the specified state
Args:
state (str): the state (must match a valid state)
Returns:
list: the list of movies
"""
if state not in self.states.get_states():
raise Exception(f"Non allowed state={state}!")
cur = self.connection.cursor()
cur.execute("SELECT * FROM movies WHERE state=?", (state,))
return cur.fetchall()
def get_tv_shows_by_state(self, state: str) -> list:
"""Retrieves all tv shows with the specified state
Args:
state (str): the state (must match a valid state)
Raises:
Exception: If the state is not valid
Returns:
list: the list of tv shows
"""
if state not in self.states.get_states():
raise Exception(f"Non allowed state={state}!")
cur = self.connection.cursor()
cur.execute("SELECT * FROM tv_shows WHERE state=?", (state,))
return cur.fetchall()
def get_tv_show_with_seasons_by_state(self, state: str) -> list:
"""Retrieves all tv show seaons with the specified state
Args:
state (str): the state (must match a valid state)
Raises:
Exception: If the state is not valid
Returns:
list: the list of tv show seasons
"""
if state not in self.states.get_states():
raise Exception(f"Non allowed state={state}!")
cur = self.connection.cursor()
cur.execute(
"SELECT * FROM tv_shows_with_seasons_view WHERE season_state=?", (state,))
return cur.fetchall()
def get_tv_show_seasons_with_episodes_by_state(self, state: str) -> list:
"""Retrieves all tv show season episodes with the specified state
Args:
state (str): the state (must match a valid state)
Raises:
Exception: If the state is not valid
Returns:
list: the list of tv show season episodes
"""
if state not in self.states.get_states():
raise Exception(f"Non allowed state={state}!")
cur = self.connection.cursor()
cur.execute(
"SELECT * FROM tv_show_seasons_with_episodes_view WHERE season_state=?", (state,))
return cur.fetchall()
def get_movie(self, id: int) -> dict:
"""Retrieves a movie
Args:
id (int): the id of the movie to retrieve
Returns:
dict: the movie
"""
cur = self.connection.cursor()
cur.execute("SELECT * FROM movies WHERE id=?", (id,))
return cur.fetchone()
def get_tv_show(self, id: int) -> dict:
"""Retrieves a tv show
Args:
id (int): the id of the tv show to retrieve
Returns:
dict: the tv show
"""
cur = self.connection.cursor()
cur.execute("SELECT * FROM tv_shows WHERE id=?", (id,))
return cur.fetchone()
def get_tv_show_season(self, id: str) -> dict:
"""Retrieves a tv show season
Args:
id (str): the id of the tv show season to retrieve
Returns:
dict: the tv show season
"""
cur = self.connection.cursor()
cur.execute("SELECT * FROM tv_show_seasons WHERE id=?", (id,))
return cur.fetchone()
def get_tv_show_with_seasons(self, id: str) -> list:
"""Retrieves all seasons for the tv show with the sepecified id
Args:
id (str): the tv show id
Returns:
list: the list of seasons
"""
cur = self.connection.cursor()
cur.execute("SELECT * from tv_shows_with_seasons_view WHERE show_id=?", (id,))
return cur.fetchall()
def get_tv_show_season_with_episodes(self, id: str) -> list:
"""Retrieves all seasons for the tv show with the sepecified id
Args:
id (str): the tv show id
Returns:
list: the list of seasons
"""
cur = self.connection.cursor()
cur.execute("SELECT * from tv_show_seasons_with_episodes_view WHERE season_id=?", (id,))
return cur.fetchall()
def get_season_episodes(self, season_id: str) -> list:
"""Retrieves all episodes for the specified season id
Args:
season_id (str): the season id
Returns:
list: the list of episodes
"""
cur = self.connection.cursor()
cur.execute("SELECT * FROM tv_show_season_episodes WHERE season_id=?", (season_id,))
result = cur.fetchall()
return result
def delete_movie(self, id: int) -> None:
"""Delete a movie
Args:
id (int): the id of the movie to delete
"""
cur = self.connection.cursor()
cur.execute("DELETE FROM movies WHERE id=?", (id,))
self.connection.commit()
def delete_tv_show(self, id: int) -> None:
"""Delete a tv show
Args:
id (int): the id of the tv show to delete
"""
cur = self.connection.cursor()
cur.execute("DELETE FROM tv_shows WHERE id=?", (id,))
self.connection.commit()
def delete_season(self, id: int):
"""Delete a season
Args:
id (int): the season id
"""
cur = self.connection.cursor()
cur.execute("DELETE FROM tv_show_seasons WHERE id=?", (id,))
self.connection.commit()
def delete_episode(self, id: int):
"""Delete an epidose
Args:
id (int): the episode id
"""
cur = self.connection.cursor()
cur.execute("DELETE FROM tv_show_season_episodes WHERE id=?", (id,))
self.connection.commit()
def add_movie(self, name: str, max_size_mb: int, resolution_profile: str, imdbid: str, cover_url: str) -> int:
"""Adds a movie to the database
Args:
name (str): the movie name
max_size_mb (int): the movie max size in megabytes
resolution_profile (str): the desired resolutions
imdbid (str): the imdbid
cover_url (str): the cover image url
Returns:
int: the id of the inserted movie
"""
cur = self.connection.cursor()
cur.execute(
"""
INSERT INTO movies(name,max_size_mb,resolution_profile, imdbid, cover_url)
VALUES(?,?,?,?,?)
""",
(name, max_size_mb, resolution_profile, imdbid,cover_url),
)
self.connection.commit()
return cur.execute('SELECT last_insert_rowid() as id').fetchone()['id']
def add_tv_show(self, name: str, max_episode_size_mb: int, resolution_profile: str, imdbid: str, cover_url: str) -> int:
"""Adds a tv show to the database
Args:
name (str): the tv show name
max_episode_size_mb (int): the max size of an episode
resolution_profile (str): the desired resolutions
imdbid (str): the imdb id
cover_url (str): the cover image url
Returns:
int: the id of the inserted tv show
"""
cur = self.connection.cursor()
cur.execute(
"""
INSERT INTO tv_shows(name,max_episode_size_mb,resolution_profile, imdbid, cover_url)
VALUES(?,?,?,?, ?)
""",
(name, max_episode_size_mb, resolution_profile, imdbid, cover_url),
)
self.connection.commit()
return cur.execute('SELECT last_insert_rowid() as id').fetchone()['id']
def add_tv_show_season(self, show_id: int, season_number: str, season_number_episodes: int) -> int:
"""Add tv show season
Args:
show_id (int): the tv show id
season_number (str): the season number
Returns:
int: the id of the inserted tv show
"""
cur = self.connection.cursor()
cur.execute(
"""
INSERT INTO tv_show_seasons(show_id,season_number, season_number_episodes)
VALUES(?,?,?)
""",
(show_id, season_number, season_number_episodes),
)
self.connection.commit()
return cur.execute('SELECT last_insert_rowid() as id').fetchone()['id']
def add_season_episode(self, season_id: int, episode_name: str, episode_number: int, air_date: str) -> int:
"""Add a new episode
Args:
season_id (int): the season id
episode_name (str): the episode name
episode_number (int): the episode number
air_date (str): air_date
Returns:
int: the id of the inserted tv show
"""
cur = self.connection.cursor()
cur.execute(
"""
INSERT INTO tv_show_season_episodes(season_id, name, episode_number, air_date)
VALUES(?,?,?,?)
""",
(season_id, episode_name, episode_number, air_date),
)
self.connection.commit()
return cur.execute('SELECT last_insert_rowid() as id').fetchone()['id']
def get_season_id(self, show_id: int, season_number: int) -> int:
"""Retrieves the season id from the show_id and season_number
Args:
show_id (int): the show id
season_number (int): the season number
Returns:
int: the season id
"""
cur = self.connection.cursor()
row = cur.execute(
"""
SELECT id FROM tv_show_seasons WHERE show_id=? AND season_number=?
""",
(show_id, season_number),
).fetchone()
return row['id']
def update_movie(self, id: int, **kwargs: dict) -> None:
"""Update a movie
Args:
id (int): The movie identifier
Raises:
Exception: if the kwargs is empty or none or if the key arguments don't correspond to
a database column
"""
movie_table_columns = ["name", "max_size_mb",
"resolution_profile", "state", "hash", "imdbid", "cover_url"]
self.connection.row_factory = dict_factory
cur = self.connection.cursor()
columns_to_update = ""
values = ()
if not kwargs:
raise Exception("At least one argument must be specified")
for key, value in kwargs.items():
if key not in movie_table_columns:
raise Exception(
f"The key argument must be one of the following: {movie_table_columns}"
)
columns_to_update += f"{key}=?, "
values += (value,)
values += (id,)
columns_to_update = columns_to_update[:-2]
cur.execute(
f"UPDATE movies SET {columns_to_update} WHERE id=?",
values,
)
self.connection.commit()
def update_tv_show(self, id: int, **kwargs: dict) -> None:
"""Update a tv show
Args:
id (int): The tv show id
Raises:
Exception: if the kwargs is empty or none or if the key arguments don't correspond to
a database column
"""
tv_shows_table_columns = ["name", "max_episode_size_mb",
"resolution_profile", "state", "imdbid", "cover_url"]
self.connection.row_factory = dict_factory
cur = self.connection.cursor()
columns_to_update = ""
values = ()
if not kwargs:
raise Exception("At least one argument must be specified")
for key, value in kwargs.items():
if key not in tv_shows_table_columns:
raise Exception(
f"The key argument must be one of the following: {tv_shows_table_columns}"
)
columns_to_update += f"{key}=?, "
values += (value,)
values += (id,)
columns_to_update = columns_to_update[:-2]
cur.execute(
f"UPDATE tv_shows SET {columns_to_update} WHERE id=?",
values,
)
self.connection.commit()
def update_show_season(self, id: int, **kwargs: dict) -> None:
"""Update a tv show season
Args:
id (int): The tv show season id
Raises:
Exception: if the kwargs is empty or none or if the key arguments don't correspond to
a database column
"""
tv_show_season_table_columns = ["season_number",
"season_number_episodes", "state", "hash"]
self.connection.row_factory = dict_factory
cur = self.connection.cursor()
columns_to_update = ""
values = ()
if not kwargs:
raise Exception("At least one argument must be specified")
for key, value in kwargs.items():
if key not in tv_show_season_table_columns:
raise Exception(
f"The key argument must be one of the following: {tv_show_season_table_columns}"
)
columns_to_update += f"{key}=?, "
values += (value,)
values += (id,)
columns_to_update = columns_to_update[:-2]
cur.execute(
f"UPDATE tv_show_seasons SET {columns_to_update} WHERE id=?",
values,
)
self.connection.commit()
def update_tv_show_season_episode(self, id: int, **kwargs: dict) -> None:
"""Update a tv show season episode
Args:
id (int): The tv show season episode id
Raises:
Exception: if the kwargs is empty or none or if the key arguments don't correspond to
a database column
"""
tv_show_season_episode_table_columns = [
"season_id", "episode_number", "air_date", "state", "hash"]
self.connection.row_factory = dict_factory
cur = self.connection.cursor()
columns_to_update = ""
values = ()
if not kwargs:
raise Exception("At least one argument must be specified")
for key, value in kwargs.items():
if key not in tv_show_season_episode_table_columns:
raise Exception(
f"The key argument must be one of the following: {tv_show_season_episode_table_columns}"
)
columns_to_update += f"{key}=?, "
values += (value,)
values += (id,)
columns_to_update = columns_to_update[:-2]
cur.execute(
f"UPDATE tv_show_season_episodes SET {columns_to_update} WHERE id=?",
values,
)
self.connection.commit()
def get_season_states(self, show_id: int) -> set:
"""Retrieves a set of all current season states for the specified show
Args:
show_id (int): the show id
Returns:
set: the set of season states
"""
cur = self.connection.cursor()
cur.execute(
"SELECT season_state FROM tv_shows_with_seasons_view WHERE show_id=?", (show_id,))
result = cur.fetchall()
state_set = set()
for row in result:
state_set.add(row['season_state'])
return state_set
def get_season_episodes_states(self, season_id) -> set:
"""Retrieves a set of all current season states for the specified show
Args:
show_id (int): the show id
Returns:
set: the set of season states
"""
cur = self.connection.cursor()
cur.execute(
"SELECT state FROM tv_show_season_episodes WHERE season_id=?", (season_id,))
result = cur.fetchall()
state_set = set()
for row in result:
state_set.add(row['state'])
return state_set
def get_tv_show_season_numbers(self, show_id: int) -> set:
"""Get all seasons numbers of the specified tv show
Args:
show_id (int): the show id
Returns:
set: the tv show season numbers
"""
cur = self.connection.cursor()
cur.execute("SELECT season_number FROM tv_show_seasons WHERE show_id=?", (show_id,))
result = cur.fetchall()
seasons = set()
for row in result:
seasons.add(row['season_number'])
return seasons
def get_tv_show_season_episode_numbers(self, season_id: int) -> set:
"""Get all episode numbers for the specified season_id
Args:
show_id (int): the show id
Returns:
set: the episode numbers
"""
cur = self.connection.cursor()
cur.execute(
"SELECT episode_number FROM tv_show_season_episodes WHERE season_id=?", (season_id,))
result = cur.fetchall()
episodes = set()
for row in result:
episodes.add(row['episode_number'])
return episodes
def close(self) -> None:
"""Close database connection"""
self.connection.close()
def dict_factory(cursor, row) -> dict:
"""Transform tuple rows into a dictionary with column names and values
Args:
cursor: database cursor
row: row
Returns:
dict: a dictionary containing the column names as keys and the respective values
"""
output = {}
for idx, col in enumerate(cursor.description):
output[col[0]] = row[idx]
return output
if __name__ == "__main__":
import sys
if len(sys.argv) < 2:
print("Usage: python database.py <db_path>", file=sys.stderr)
exit(0)
db = TBDatabase(sys.argv[1])
db.create_schema()
print(db.get_all_movies())
db.close()
| 33.28 | 124 | 0.575561 | import sqlite3
class TorrentState:
SEARCHING = "SEARCHING"
DOWNLOADING = "DOWNLOADING"
SEEDING = "SEEDING"
COMPLETED = "COMPLETED"
DELETING = "DELETING"
PAUSED = "PAUSED"
@staticmethod
def get_states() -> list:
return [
TorrentState.SEARCHING,
TorrentState.DOWNLOADING,
TorrentState.SEEDING,
TorrentState.COMPLETED,
TorrentState.DELETING,
TorrentState.PAUSED
]
class TBDatabase:
def __init__(self, db_file_path: str) -> None:
self.db_file_path = db_file_path
self.connection = sqlite3.connect(self.db_file_path)
self.connection.execute("PRAGMA foreign_keys = ON")
self.connection.row_factory = dict_factory
self.states = TorrentState()
def create_schema(self) -> None:
cur = self.connection.cursor()
sql = f"""CREATE TABLE IF NOT EXISTS movies (
"id" INTEGER PRIMARY KEY AUTOINCREMENT,
"name" TEXT UNIQUE NOT NULL,
"max_size_mb" INTEGER NOT NULL,
"resolution_profile" TEXT NOT NULL,
"state" TEXT NOT NULL DEFAULT '{self.states.SEARCHING}',
"imdbid" INTEGER UNIQUE NOT NULL,
"cover_url" TEXT,
"hash" TEXT)
"""
cur.execute(sql)
sql = f"""CREATE TABLE IF NOT EXISTS tv_shows (
"id" INTEGER PRIMARY KEY AUTOINCREMENT,
"name" TEXT UNIQUE NOT NULL,
"max_episode_size_mb" INTEGER NOT NULL,
"resolution_profile" TEXT NOT NULL,
"imdbid" INTEGER UNIQUE NOT NULL,
"state" TEXT NOT NULL DEFAULT '{self.states.SEARCHING}',
"cover_url" TEXT
)
"""
cur.execute(sql)
sql = f"""CREATE TABLE IF NOT EXISTS tv_show_seasons (
"id" INTEGER PRIMARY KEY AUTOINCREMENT,
"show_id" INTEGER,
"season_number" INTEGER NOT NULL,
"season_number_episodes" INTEGER NOT NULL,
"state" TEXT NOT NULL DEFAULT '{self.states.SEARCHING}',
"hash" TEXT,
FOREIGN KEY(show_id) REFERENCES tv_shows(id) ON DELETE CASCADE,
UNIQUE(show_id, season_number))
"""
cur.execute(sql)
sql = f"""CREATE TABLE IF NOT EXISTS tv_show_season_episodes (
"id" INTEGER PRIMARY KEY AUTOINCREMENT,
"season_id" INTEGER,
"name" TEXT NOT NULL,
"episode_number" INTEGER NOT NULL,
"air_date" TEXT NOT NULL,
"state" TEXT NOT NULL DEFAULT '{self.states.SEARCHING}',
"hash" TEXT,
FOREIGN KEY(season_id) REFERENCES tv_show_seasons(id) ON DELETE CASCADE,
UNIQUE(season_id, episode_number))
"""
cur.execute(sql)
sql = f"""CREATE VIEW IF NOT EXISTS tv_shows_with_seasons_view
AS
SELECT
tv_shows.id as show_id,
tv_shows.name as show_name,
tv_shows.state as show_state,
tv_shows.resolution_profile as resolution_profile,
tv_shows.name as show_name,
tv_shows.max_episode_size_mb as max_episode_size_mb,
tv_shows.imdbid as show_imdbid,
tv_show_seasons.id as season_id,
tv_show_seasons.season_number as season_number,
tv_show_seasons.season_number_episodes as season_number_episodes,
tv_show_seasons.state as season_state,
tv_show_seasons.hash as season_hash
FROM tv_shows
INNER JOIN tv_show_seasons on tv_shows.id = tv_show_seasons.show_id;
"""
cur.execute(sql)
sql = f"""CREATE VIEW IF NOT EXISTS tv_show_seasons_with_episodes_view
AS
SELECT
tv_show_seasons.id as season_id,
tv_show_seasons.hash as season_hash,
tv_show_seasons.state as season_state,
tv_show_seasons.season_number as season_number,
tv_show_season_episodes.id as episode_id,
tv_show_season_episodes.name as episode_name,
tv_show_season_episodes.air_date as episode_air_date,
tv_show_season_episodes.episode_number as episode_number,
tv_show_season_episodes.state as episode_state,
tv_show_season_episodes.hash as episode_hash
FROM tv_show_seasons
INNER JOIN tv_show_season_episodes on tv_show_seasons.id = tv_show_season_episodes.season_id;
"""
cur.execute(sql)
self.connection.commit()
def get_all_movies(self) -> list:
cur = self.connection.cursor()
cur.execute("SELECT * FROM movies")
return cur.fetchall()
def get_all_tv_shows(self) -> list:
cur = self.connection.cursor()
cur.execute("SELECT * FROM tv_shows;")
return cur.fetchall()
def get_all_seasons(self) -> list:
cur = self.connection.cursor()
cur.execute("SELECT * FROM tv_show_seasons;")
return cur.fetchall()
def get_all_episodes(self) -> list:
cur = self.connection.cursor()
cur.execute("SELECT * FROM tv_show_season_episodes;")
return cur.fetchall()
def get_all_tv_shows_with_seasons(self) -> list:
cur = self.connection.cursor()
cur.execute("SELECT * FROM tv_shows_with_seasons_view;")
return cur.fetchall()
def get_all_tv_shows_season_episodes(self) -> list:
cur = self.connection.cursor()
cur.execute("SELECT * FROM tv_shows_season_with_episodes_view;")
return cur.fetchall()
def get_movies_by_state(self, state: str) -> list:
if state not in self.states.get_states():
raise Exception(f"Non allowed state={state}!")
cur = self.connection.cursor()
cur.execute("SELECT * FROM movies WHERE state=?", (state,))
return cur.fetchall()
def get_tv_shows_by_state(self, state: str) -> list:
if state not in self.states.get_states():
raise Exception(f"Non allowed state={state}!")
cur = self.connection.cursor()
cur.execute("SELECT * FROM tv_shows WHERE state=?", (state,))
return cur.fetchall()
def get_tv_show_with_seasons_by_state(self, state: str) -> list:
if state not in self.states.get_states():
raise Exception(f"Non allowed state={state}!")
cur = self.connection.cursor()
cur.execute(
"SELECT * FROM tv_shows_with_seasons_view WHERE season_state=?", (state,))
return cur.fetchall()
def get_tv_show_seasons_with_episodes_by_state(self, state: str) -> list:
if state not in self.states.get_states():
raise Exception(f"Non allowed state={state}!")
cur = self.connection.cursor()
cur.execute(
"SELECT * FROM tv_show_seasons_with_episodes_view WHERE season_state=?", (state,))
return cur.fetchall()
def get_movie(self, id: int) -> dict:
cur = self.connection.cursor()
cur.execute("SELECT * FROM movies WHERE id=?", (id,))
return cur.fetchone()
def get_tv_show(self, id: int) -> dict:
cur = self.connection.cursor()
cur.execute("SELECT * FROM tv_shows WHERE id=?", (id,))
return cur.fetchone()
def get_tv_show_season(self, id: str) -> dict:
cur = self.connection.cursor()
cur.execute("SELECT * FROM tv_show_seasons WHERE id=?", (id,))
return cur.fetchone()
def get_tv_show_with_seasons(self, id: str) -> list:
cur = self.connection.cursor()
cur.execute("SELECT * from tv_shows_with_seasons_view WHERE show_id=?", (id,))
return cur.fetchall()
def get_tv_show_season_with_episodes(self, id: str) -> list:
cur = self.connection.cursor()
cur.execute("SELECT * from tv_show_seasons_with_episodes_view WHERE season_id=?", (id,))
return cur.fetchall()
def get_season_episodes(self, season_id: str) -> list:
cur = self.connection.cursor()
cur.execute("SELECT * FROM tv_show_season_episodes WHERE season_id=?", (season_id,))
result = cur.fetchall()
return result
def delete_movie(self, id: int) -> None:
cur = self.connection.cursor()
cur.execute("DELETE FROM movies WHERE id=?", (id,))
self.connection.commit()
def delete_tv_show(self, id: int) -> None:
cur = self.connection.cursor()
cur.execute("DELETE FROM tv_shows WHERE id=?", (id,))
self.connection.commit()
def delete_season(self, id: int):
cur = self.connection.cursor()
cur.execute("DELETE FROM tv_show_seasons WHERE id=?", (id,))
self.connection.commit()
def delete_episode(self, id: int):
cur = self.connection.cursor()
cur.execute("DELETE FROM tv_show_season_episodes WHERE id=?", (id,))
self.connection.commit()
def add_movie(self, name: str, max_size_mb: int, resolution_profile: str, imdbid: str, cover_url: str) -> int:
cur = self.connection.cursor()
cur.execute(
"""
INSERT INTO movies(name,max_size_mb,resolution_profile, imdbid, cover_url)
VALUES(?,?,?,?,?)
""",
(name, max_size_mb, resolution_profile, imdbid,cover_url),
)
self.connection.commit()
return cur.execute('SELECT last_insert_rowid() as id').fetchone()['id']
def add_tv_show(self, name: str, max_episode_size_mb: int, resolution_profile: str, imdbid: str, cover_url: str) -> int:
cur = self.connection.cursor()
cur.execute(
"""
INSERT INTO tv_shows(name,max_episode_size_mb,resolution_profile, imdbid, cover_url)
VALUES(?,?,?,?, ?)
""",
(name, max_episode_size_mb, resolution_profile, imdbid, cover_url),
)
self.connection.commit()
return cur.execute('SELECT last_insert_rowid() as id').fetchone()['id']
def add_tv_show_season(self, show_id: int, season_number: str, season_number_episodes: int) -> int:
cur = self.connection.cursor()
cur.execute(
"""
INSERT INTO tv_show_seasons(show_id,season_number, season_number_episodes)
VALUES(?,?,?)
""",
(show_id, season_number, season_number_episodes),
)
self.connection.commit()
return cur.execute('SELECT last_insert_rowid() as id').fetchone()['id']
def add_season_episode(self, season_id: int, episode_name: str, episode_number: int, air_date: str) -> int:
cur = self.connection.cursor()
cur.execute(
"""
INSERT INTO tv_show_season_episodes(season_id, name, episode_number, air_date)
VALUES(?,?,?,?)
""",
(season_id, episode_name, episode_number, air_date),
)
self.connection.commit()
return cur.execute('SELECT last_insert_rowid() as id').fetchone()['id']
def get_season_id(self, show_id: int, season_number: int) -> int:
cur = self.connection.cursor()
row = cur.execute(
"""
SELECT id FROM tv_show_seasons WHERE show_id=? AND season_number=?
""",
(show_id, season_number),
).fetchone()
return row['id']
def update_movie(self, id: int, **kwargs: dict) -> None:
movie_table_columns = ["name", "max_size_mb",
"resolution_profile", "state", "hash", "imdbid", "cover_url"]
self.connection.row_factory = dict_factory
cur = self.connection.cursor()
columns_to_update = ""
values = ()
if not kwargs:
raise Exception("At least one argument must be specified")
for key, value in kwargs.items():
if key not in movie_table_columns:
raise Exception(
f"The key argument must be one of the following: {movie_table_columns}"
)
columns_to_update += f"{key}=?, "
values += (value,)
values += (id,)
columns_to_update = columns_to_update[:-2]
cur.execute(
f"UPDATE movies SET {columns_to_update} WHERE id=?",
values,
)
self.connection.commit()
def update_tv_show(self, id: int, **kwargs: dict) -> None:
tv_shows_table_columns = ["name", "max_episode_size_mb",
"resolution_profile", "state", "imdbid", "cover_url"]
self.connection.row_factory = dict_factory
cur = self.connection.cursor()
columns_to_update = ""
values = ()
if not kwargs:
raise Exception("At least one argument must be specified")
for key, value in kwargs.items():
if key not in tv_shows_table_columns:
raise Exception(
f"The key argument must be one of the following: {tv_shows_table_columns}"
)
columns_to_update += f"{key}=?, "
values += (value,)
values += (id,)
columns_to_update = columns_to_update[:-2]
cur.execute(
f"UPDATE tv_shows SET {columns_to_update} WHERE id=?",
values,
)
self.connection.commit()
def update_show_season(self, id: int, **kwargs: dict) -> None:
tv_show_season_table_columns = ["season_number",
"season_number_episodes", "state", "hash"]
self.connection.row_factory = dict_factory
cur = self.connection.cursor()
columns_to_update = ""
values = ()
if not kwargs:
raise Exception("At least one argument must be specified")
for key, value in kwargs.items():
if key not in tv_show_season_table_columns:
raise Exception(
f"The key argument must be one of the following: {tv_show_season_table_columns}"
)
columns_to_update += f"{key}=?, "
values += (value,)
values += (id,)
columns_to_update = columns_to_update[:-2]
cur.execute(
f"UPDATE tv_show_seasons SET {columns_to_update} WHERE id=?",
values,
)
self.connection.commit()
def update_tv_show_season_episode(self, id: int, **kwargs: dict) -> None:
tv_show_season_episode_table_columns = [
"season_id", "episode_number", "air_date", "state", "hash"]
self.connection.row_factory = dict_factory
cur = self.connection.cursor()
columns_to_update = ""
values = ()
if not kwargs:
raise Exception("At least one argument must be specified")
for key, value in kwargs.items():
if key not in tv_show_season_episode_table_columns:
raise Exception(
f"The key argument must be one of the following: {tv_show_season_episode_table_columns}"
)
columns_to_update += f"{key}=?, "
values += (value,)
values += (id,)
columns_to_update = columns_to_update[:-2]
cur.execute(
f"UPDATE tv_show_season_episodes SET {columns_to_update} WHERE id=?",
values,
)
self.connection.commit()
def get_season_states(self, show_id: int) -> set:
cur = self.connection.cursor()
cur.execute(
"SELECT season_state FROM tv_shows_with_seasons_view WHERE show_id=?", (show_id,))
result = cur.fetchall()
state_set = set()
for row in result:
state_set.add(row['season_state'])
return state_set
def get_season_episodes_states(self, season_id) -> set:
cur = self.connection.cursor()
cur.execute(
"SELECT state FROM tv_show_season_episodes WHERE season_id=?", (season_id,))
result = cur.fetchall()
state_set = set()
for row in result:
state_set.add(row['state'])
return state_set
def get_tv_show_season_numbers(self, show_id: int) -> set:
cur = self.connection.cursor()
cur.execute("SELECT season_number FROM tv_show_seasons WHERE show_id=?", (show_id,))
result = cur.fetchall()
seasons = set()
for row in result:
seasons.add(row['season_number'])
return seasons
def get_tv_show_season_episode_numbers(self, season_id: int) -> set:
cur = self.connection.cursor()
cur.execute(
"SELECT episode_number FROM tv_show_season_episodes WHERE season_id=?", (season_id,))
result = cur.fetchall()
episodes = set()
for row in result:
episodes.add(row['episode_number'])
return episodes
def close(self) -> None:
self.connection.close()
def dict_factory(cursor, row) -> dict:
output = {}
for idx, col in enumerate(cursor.description):
output[col[0]] = row[idx]
return output
if __name__ == "__main__":
import sys
if len(sys.argv) < 2:
print("Usage: python database.py <db_path>", file=sys.stderr)
exit(0)
db = TBDatabase(sys.argv[1])
db.create_schema()
print(db.get_all_movies())
db.close()
| true | true |
f71b389bb26a910057e54795c0cf91314386c30f | 11,436 | py | Python | sparseConv/multitask/semseg/models/res16unet.py | ShengyuH/Scene-Recognition-in-3D | 8fb869e1f8e8ff48c6f1082bb75f60a562875fc5 | [
"MIT"
] | 48 | 2020-03-02T23:05:59.000Z | 2022-02-22T11:23:17.000Z | sparseConv/multitask/semseg/models/res16unet.py | HenrryBryant/Scene-Recognition-in-3D | 8fb869e1f8e8ff48c6f1082bb75f60a562875fc5 | [
"MIT"
] | 5 | 2020-10-29T14:19:04.000Z | 2022-01-25T05:33:59.000Z | sparseConv/multitask/semseg/models/res16unet.py | HenrryBryant/Scene-Recognition-in-3D | 8fb869e1f8e8ff48c6f1082bb75f60a562875fc5 | [
"MIT"
] | 7 | 2020-06-18T05:23:01.000Z | 2021-05-13T01:26:32.000Z | from models.resnet import ResNetBase, get_norm
from models.modules.common import ConvType, NormType, conv, conv_tr
from models.modules.resnet_block import BasicBlock, Bottleneck, BasicBlockIN, BottleneckIN, BasicBlockLN
from MinkowskiEngine import MinkowskiReLU
import MinkowskiEngine.MinkowskiOps as me
class Res16UNetBase(ResNetBase):
BLOCK = None
PLANES = (32, 64, 128, 256, 256, 256, 256, 256)
DILATIONS = (1, 1, 1, 1, 1, 1, 1, 1)
LAYERS = (2, 2, 2, 2, 2, 2, 2, 2)
INIT_DIM = 32
OUT_PIXEL_DIST = 1
NORM_TYPE = NormType.BATCH_NORM
NON_BLOCK_CONV_TYPE = ConvType.SPATIAL_HYPERCUBE
CONV_TYPE = ConvType.SPATIAL_HYPERCUBE_TEMPORAL_HYPERCROSS
# To use the model, must call initialize_coords before forward pass.
# Once data is processed, call clear to reset the model before calling initialize_coords
def __init__(self, in_channels, out_channels, config, D=3, **kwargs):
super(Res16UNetBase, self).__init__(in_channels, out_channels, config, D)
def network_initialization(self, in_channels, out_channels, config, D):
# Setup net_metadata
dilations = self.DILATIONS
bn_momentum = config['bn_momentum']
def space_n_time_m(n, m):
return n if D == 3 else [n, n, n, m]
if D == 4:
self.OUT_PIXEL_DIST = space_n_time_m(self.OUT_PIXEL_DIST, 1)
# Output of the first conv concated to conv6
self.inplanes = self.INIT_DIM
self.conv0p1s1 = conv(
in_channels,
self.inplanes,
kernel_size=space_n_time_m(config['conv1_kernel_size'], 1),
stride=1,
dilation=1,
conv_type=self.NON_BLOCK_CONV_TYPE,
D=D)
self.bn0 = get_norm(self.NORM_TYPE, self.inplanes, D, bn_momentum=bn_momentum)
self.conv1p1s2 = conv(
self.inplanes,
self.inplanes,
kernel_size=space_n_time_m(2, 1),
stride=space_n_time_m(2, 1),
dilation=1,
conv_type=self.NON_BLOCK_CONV_TYPE,
D=D)
self.bn1 = get_norm(self.NORM_TYPE, self.inplanes, D, bn_momentum=bn_momentum)
self.block1 = self._make_layer(
self.BLOCK,
self.PLANES[0],
self.LAYERS[0],
dilation=dilations[0],
norm_type=self.NORM_TYPE,
bn_momentum=bn_momentum)
self.conv2p2s2 = conv(
self.inplanes,
self.inplanes,
kernel_size=space_n_time_m(2, 1),
stride=space_n_time_m(2, 1),
dilation=1,
conv_type=self.NON_BLOCK_CONV_TYPE,
D=D)
self.bn2 = get_norm(self.NORM_TYPE, self.inplanes, D, bn_momentum=bn_momentum)
self.block2 = self._make_layer(
self.BLOCK,
self.PLANES[1],
self.LAYERS[1],
dilation=dilations[1],
norm_type=self.NORM_TYPE,
bn_momentum=bn_momentum)
self.conv3p4s2 = conv(
self.inplanes,
self.inplanes,
kernel_size=space_n_time_m(2, 1),
stride=space_n_time_m(2, 1),
dilation=1,
conv_type=self.NON_BLOCK_CONV_TYPE,
D=D)
self.bn3 = get_norm(self.NORM_TYPE, self.inplanes, D, bn_momentum=bn_momentum)
self.block3 = self._make_layer(
self.BLOCK,
self.PLANES[2],
self.LAYERS[2],
dilation=dilations[2],
norm_type=self.NORM_TYPE,
bn_momentum=bn_momentum)
self.conv4p8s2 = conv(
self.inplanes,
self.inplanes,
kernel_size=space_n_time_m(2, 1),
stride=space_n_time_m(2, 1),
dilation=1,
conv_type=self.NON_BLOCK_CONV_TYPE,
D=D)
self.bn4 = get_norm(self.NORM_TYPE, self.inplanes, D, bn_momentum=bn_momentum)
self.block4 = self._make_layer(
self.BLOCK,
self.PLANES[3],
self.LAYERS[3],
dilation=dilations[3],
norm_type=self.NORM_TYPE,
bn_momentum=bn_momentum)
self.convtr4p16s2 = conv_tr(
self.inplanes,
self.PLANES[4],
kernel_size=space_n_time_m(2, 1),
upsample_stride=space_n_time_m(2, 1),
dilation=1,
bias=False,
conv_type=self.NON_BLOCK_CONV_TYPE,
D=D)
self.bntr4 = get_norm(self.NORM_TYPE, self.PLANES[4], D, bn_momentum=bn_momentum)
self.inplanes = self.PLANES[4] + self.PLANES[2] * self.BLOCK.expansion
self.block5 = self._make_layer(
self.BLOCK,
self.PLANES[4],
self.LAYERS[4],
dilation=dilations[4],
norm_type=self.NORM_TYPE,
bn_momentum=bn_momentum)
self.convtr5p8s2 = conv_tr(
self.inplanes,
self.PLANES[5],
kernel_size=space_n_time_m(2, 1),
upsample_stride=space_n_time_m(2, 1),
dilation=1,
bias=False,
conv_type=self.NON_BLOCK_CONV_TYPE,
D=D)
self.bntr5 = get_norm(self.NORM_TYPE, self.PLANES[5], D, bn_momentum=bn_momentum)
self.inplanes = self.PLANES[5] + self.PLANES[1] * self.BLOCK.expansion
self.block6 = self._make_layer(
self.BLOCK,
self.PLANES[5],
self.LAYERS[5],
dilation=dilations[5],
norm_type=self.NORM_TYPE,
bn_momentum=bn_momentum)
self.convtr6p4s2 = conv_tr(
self.inplanes,
self.PLANES[6],
kernel_size=space_n_time_m(2, 1),
upsample_stride=space_n_time_m(2, 1),
dilation=1,
bias=False,
conv_type=self.NON_BLOCK_CONV_TYPE,
D=D)
self.bntr6 = get_norm(self.NORM_TYPE, self.PLANES[6], D, bn_momentum=bn_momentum)
self.inplanes = self.PLANES[6] + self.PLANES[0] * self.BLOCK.expansion
self.block7 = self._make_layer(
self.BLOCK,
self.PLANES[6],
self.LAYERS[6],
dilation=dilations[6],
norm_type=self.NORM_TYPE,
bn_momentum=bn_momentum)
self.convtr7p2s2 = conv_tr(
self.inplanes,
self.PLANES[7],
kernel_size=space_n_time_m(2, 1),
upsample_stride=space_n_time_m(2, 1),
dilation=1,
bias=False,
conv_type=self.NON_BLOCK_CONV_TYPE,
D=D)
self.bntr7 = get_norm(self.NORM_TYPE, self.PLANES[7], D, bn_momentum=bn_momentum)
self.inplanes = self.PLANES[7] + self.INIT_DIM
self.block8 = self._make_layer(
self.BLOCK,
self.PLANES[7],
self.LAYERS[7],
dilation=dilations[7],
norm_type=self.NORM_TYPE,
bn_momentum=bn_momentum)
self.final = conv(self.PLANES[7], out_channels, kernel_size=1, stride=1, bias=True, D=D)
self.relu = MinkowskiReLU(inplace=True)
def forward(self, x):
out = self.conv0p1s1(x)
out = self.bn0(out)
out_p1 = self.relu(out)
out = self.conv1p1s2(out_p1)
out = self.bn1(out)
out = self.relu(out)
out_b1p2 = self.block1(out)
out = self.conv2p2s2(out_b1p2)
out = self.bn2(out)
out = self.relu(out)
out_b2p4 = self.block2(out)
out = self.conv3p4s2(out_b2p4)
out = self.bn3(out)
out = self.relu(out)
out_b3p8 = self.block3(out)
# pixel_dist=16
out = self.conv4p8s2(out_b3p8)
out = self.bn4(out)
out = self.relu(out)
out = self.block4(out)
# pixel_dist=8
out = self.convtr4p16s2(out)
out = self.bntr4(out)
out = self.relu(out)
out = me.cat(out, out_b3p8)
out = self.block5(out)
# pixel_dist=4
out = self.convtr5p8s2(out)
out = self.bntr5(out)
out = self.relu(out)
out = me.cat(out, out_b2p4)
out = self.block6(out)
# pixel_dist=2
out = self.convtr6p4s2(out)
out = self.bntr6(out)
out = self.relu(out)
out = me.cat(out, out_b1p2)
out = self.block7(out)
# pixel_dist=1
out = self.convtr7p2s2(out)
out = self.bntr7(out)
out = self.relu(out)
out = me.cat(out, out_p1)
out = self.block8(out)
return self.final(out)
class Res16UNet14(Res16UNetBase):
BLOCK = BasicBlock
LAYERS = (1, 1, 1, 1, 1, 1, 1, 1)
class Res16UNet18(Res16UNetBase):
BLOCK = BasicBlock
LAYERS = (2, 2, 2, 2, 2, 2, 2, 2)
class Res16UNet34(Res16UNetBase):
BLOCK = BasicBlock
LAYERS = (2, 3, 4, 6, 2, 2, 2, 2)
class Res16UNet50(Res16UNetBase):
BLOCK = Bottleneck
LAYERS = (2, 3, 4, 6, 2, 2, 2, 2)
class Res16UNet101(Res16UNetBase):
BLOCK = Bottleneck
LAYERS = (2, 3, 4, 23, 2, 2, 2, 2)
class Res16UNet14A(Res16UNet14):
PLANES = (32, 64, 128, 256, 128, 128, 96, 96)
class Res16UNet14A2(Res16UNet14A):
LAYERS = (1, 1, 1, 1, 2, 2, 2, 2)
class Res16UNet14B(Res16UNet14):
PLANES = (32, 64, 128, 256, 128, 128, 128, 128)
class Res16UNet14B2(Res16UNet14B):
LAYERS = (1, 1, 1, 1, 2, 2, 2, 2)
class Res16UNet14B3(Res16UNet14B):
LAYERS = (2, 2, 2, 2, 1, 1, 1, 1)
class Res16UNet14C(Res16UNet14):
PLANES = (32, 64, 128, 256, 192, 192, 128, 128)
class Res16UNet14D(Res16UNet14):
PLANES = (32, 64, 128, 256, 384, 384, 384, 384)
class Res16UNet18A(Res16UNet18):
PLANES = (32, 64, 128, 256, 128, 128, 96, 96)
class Res16UNet18B(Res16UNet18):
PLANES = (32, 64, 128, 256, 128, 128, 128, 128)
class Res16UNet18D(Res16UNet18):
PLANES = (32, 64, 128, 256, 384, 384, 384, 384)
class Res16UNet34A(Res16UNet34):
PLANES = (32, 64, 128, 256, 256, 128, 64, 64)
class Res16UNet34B(Res16UNet34):
PLANES = (32, 64, 128, 256, 256, 128, 64, 32)
class Res16UNet34C(Res16UNet34):
PLANES = (32, 64, 128, 256, 256, 128, 96, 96)
# Experimentally, worse than others
class Res16UNetLN14(Res16UNet14):
NORM_TYPE = NormType.SPARSE_LAYER_NORM
BLOCK = BasicBlockLN
class Res16UNetTemporalBase(Res16UNetBase):
"""
Res16UNet that can take 4D independently. No temporal convolution.
"""
CONV_TYPE = ConvType.SPATIAL_HYPERCUBE
def __init__(self, in_channels, out_channels, config, D=4, **kwargs):
super(Res16UNetTemporalBase, self).__init__(in_channels, out_channels, config, D, **kwargs)
class Res16UNetTemporal14(Res16UNet14, Res16UNetTemporalBase):
pass
class Res16UNetTemporal18(Res16UNet18, Res16UNetTemporalBase):
pass
class Res16UNetTemporal34(Res16UNet34, Res16UNetTemporalBase):
pass
class Res16UNetTemporal50(Res16UNet50, Res16UNetTemporalBase):
pass
class Res16UNetTemporal101(Res16UNet101, Res16UNetTemporalBase):
pass
class Res16UNetTemporalIN14(Res16UNetTemporal14):
NORM_TYPE = NormType.SPARSE_INSTANCE_NORM
BLOCK = BasicBlockIN
class Res16UNetTemporalIN18(Res16UNetTemporal18):
NORM_TYPE = NormType.SPARSE_INSTANCE_NORM
BLOCK = BasicBlockIN
class Res16UNetTemporalIN34(Res16UNetTemporal34):
NORM_TYPE = NormType.SPARSE_INSTANCE_NORM
BLOCK = BasicBlockIN
class Res16UNetTemporalIN50(Res16UNetTemporal50):
NORM_TYPE = NormType.SPARSE_INSTANCE_NORM
BLOCK = BottleneckIN
class Res16UNetTemporalIN101(Res16UNetTemporal101):
NORM_TYPE = NormType.SPARSE_INSTANCE_NORM
BLOCK = BottleneckIN
class STRes16UNetBase(Res16UNetBase):
CONV_TYPE = ConvType.SPATIAL_HYPERCUBE_TEMPORAL_HYPERCROSS
def __init__(self, in_channels, out_channels, config, D=4, **kwargs):
super(STRes16UNetBase, self).__init__(in_channels, out_channels, config, D, **kwargs)
class STRes16UNet14(STRes16UNetBase, Res16UNet14):
pass
class STRes16UNet18(STRes16UNetBase, Res16UNet18):
pass
class STRes16UNet34(STRes16UNetBase, Res16UNet34):
pass
class STRes16UNet50(STRes16UNetBase, Res16UNet50):
pass
class STRes16UNet101(STRes16UNetBase, Res16UNet101):
pass
class STRes16UNet18A(STRes16UNet18):
PLANES = (32, 64, 128, 256, 128, 128, 96, 96)
class STResTesseract16UNetBase(STRes16UNetBase):
CONV_TYPE = ConvType.HYPERCUBE
class STResTesseract16UNet18A(STRes16UNet18A, STResTesseract16UNetBase):
pass
| 26.411085 | 104 | 0.669902 | from models.resnet import ResNetBase, get_norm
from models.modules.common import ConvType, NormType, conv, conv_tr
from models.modules.resnet_block import BasicBlock, Bottleneck, BasicBlockIN, BottleneckIN, BasicBlockLN
from MinkowskiEngine import MinkowskiReLU
import MinkowskiEngine.MinkowskiOps as me
class Res16UNetBase(ResNetBase):
BLOCK = None
PLANES = (32, 64, 128, 256, 256, 256, 256, 256)
DILATIONS = (1, 1, 1, 1, 1, 1, 1, 1)
LAYERS = (2, 2, 2, 2, 2, 2, 2, 2)
INIT_DIM = 32
OUT_PIXEL_DIST = 1
NORM_TYPE = NormType.BATCH_NORM
NON_BLOCK_CONV_TYPE = ConvType.SPATIAL_HYPERCUBE
CONV_TYPE = ConvType.SPATIAL_HYPERCUBE_TEMPORAL_HYPERCROSS
def __init__(self, in_channels, out_channels, config, D=3, **kwargs):
super(Res16UNetBase, self).__init__(in_channels, out_channels, config, D)
def network_initialization(self, in_channels, out_channels, config, D):
dilations = self.DILATIONS
bn_momentum = config['bn_momentum']
def space_n_time_m(n, m):
return n if D == 3 else [n, n, n, m]
if D == 4:
self.OUT_PIXEL_DIST = space_n_time_m(self.OUT_PIXEL_DIST, 1)
self.inplanes = self.INIT_DIM
self.conv0p1s1 = conv(
in_channels,
self.inplanes,
kernel_size=space_n_time_m(config['conv1_kernel_size'], 1),
stride=1,
dilation=1,
conv_type=self.NON_BLOCK_CONV_TYPE,
D=D)
self.bn0 = get_norm(self.NORM_TYPE, self.inplanes, D, bn_momentum=bn_momentum)
self.conv1p1s2 = conv(
self.inplanes,
self.inplanes,
kernel_size=space_n_time_m(2, 1),
stride=space_n_time_m(2, 1),
dilation=1,
conv_type=self.NON_BLOCK_CONV_TYPE,
D=D)
self.bn1 = get_norm(self.NORM_TYPE, self.inplanes, D, bn_momentum=bn_momentum)
self.block1 = self._make_layer(
self.BLOCK,
self.PLANES[0],
self.LAYERS[0],
dilation=dilations[0],
norm_type=self.NORM_TYPE,
bn_momentum=bn_momentum)
self.conv2p2s2 = conv(
self.inplanes,
self.inplanes,
kernel_size=space_n_time_m(2, 1),
stride=space_n_time_m(2, 1),
dilation=1,
conv_type=self.NON_BLOCK_CONV_TYPE,
D=D)
self.bn2 = get_norm(self.NORM_TYPE, self.inplanes, D, bn_momentum=bn_momentum)
self.block2 = self._make_layer(
self.BLOCK,
self.PLANES[1],
self.LAYERS[1],
dilation=dilations[1],
norm_type=self.NORM_TYPE,
bn_momentum=bn_momentum)
self.conv3p4s2 = conv(
self.inplanes,
self.inplanes,
kernel_size=space_n_time_m(2, 1),
stride=space_n_time_m(2, 1),
dilation=1,
conv_type=self.NON_BLOCK_CONV_TYPE,
D=D)
self.bn3 = get_norm(self.NORM_TYPE, self.inplanes, D, bn_momentum=bn_momentum)
self.block3 = self._make_layer(
self.BLOCK,
self.PLANES[2],
self.LAYERS[2],
dilation=dilations[2],
norm_type=self.NORM_TYPE,
bn_momentum=bn_momentum)
self.conv4p8s2 = conv(
self.inplanes,
self.inplanes,
kernel_size=space_n_time_m(2, 1),
stride=space_n_time_m(2, 1),
dilation=1,
conv_type=self.NON_BLOCK_CONV_TYPE,
D=D)
self.bn4 = get_norm(self.NORM_TYPE, self.inplanes, D, bn_momentum=bn_momentum)
self.block4 = self._make_layer(
self.BLOCK,
self.PLANES[3],
self.LAYERS[3],
dilation=dilations[3],
norm_type=self.NORM_TYPE,
bn_momentum=bn_momentum)
self.convtr4p16s2 = conv_tr(
self.inplanes,
self.PLANES[4],
kernel_size=space_n_time_m(2, 1),
upsample_stride=space_n_time_m(2, 1),
dilation=1,
bias=False,
conv_type=self.NON_BLOCK_CONV_TYPE,
D=D)
self.bntr4 = get_norm(self.NORM_TYPE, self.PLANES[4], D, bn_momentum=bn_momentum)
self.inplanes = self.PLANES[4] + self.PLANES[2] * self.BLOCK.expansion
self.block5 = self._make_layer(
self.BLOCK,
self.PLANES[4],
self.LAYERS[4],
dilation=dilations[4],
norm_type=self.NORM_TYPE,
bn_momentum=bn_momentum)
self.convtr5p8s2 = conv_tr(
self.inplanes,
self.PLANES[5],
kernel_size=space_n_time_m(2, 1),
upsample_stride=space_n_time_m(2, 1),
dilation=1,
bias=False,
conv_type=self.NON_BLOCK_CONV_TYPE,
D=D)
self.bntr5 = get_norm(self.NORM_TYPE, self.PLANES[5], D, bn_momentum=bn_momentum)
self.inplanes = self.PLANES[5] + self.PLANES[1] * self.BLOCK.expansion
self.block6 = self._make_layer(
self.BLOCK,
self.PLANES[5],
self.LAYERS[5],
dilation=dilations[5],
norm_type=self.NORM_TYPE,
bn_momentum=bn_momentum)
self.convtr6p4s2 = conv_tr(
self.inplanes,
self.PLANES[6],
kernel_size=space_n_time_m(2, 1),
upsample_stride=space_n_time_m(2, 1),
dilation=1,
bias=False,
conv_type=self.NON_BLOCK_CONV_TYPE,
D=D)
self.bntr6 = get_norm(self.NORM_TYPE, self.PLANES[6], D, bn_momentum=bn_momentum)
self.inplanes = self.PLANES[6] + self.PLANES[0] * self.BLOCK.expansion
self.block7 = self._make_layer(
self.BLOCK,
self.PLANES[6],
self.LAYERS[6],
dilation=dilations[6],
norm_type=self.NORM_TYPE,
bn_momentum=bn_momentum)
self.convtr7p2s2 = conv_tr(
self.inplanes,
self.PLANES[7],
kernel_size=space_n_time_m(2, 1),
upsample_stride=space_n_time_m(2, 1),
dilation=1,
bias=False,
conv_type=self.NON_BLOCK_CONV_TYPE,
D=D)
self.bntr7 = get_norm(self.NORM_TYPE, self.PLANES[7], D, bn_momentum=bn_momentum)
self.inplanes = self.PLANES[7] + self.INIT_DIM
self.block8 = self._make_layer(
self.BLOCK,
self.PLANES[7],
self.LAYERS[7],
dilation=dilations[7],
norm_type=self.NORM_TYPE,
bn_momentum=bn_momentum)
self.final = conv(self.PLANES[7], out_channels, kernel_size=1, stride=1, bias=True, D=D)
self.relu = MinkowskiReLU(inplace=True)
def forward(self, x):
out = self.conv0p1s1(x)
out = self.bn0(out)
out_p1 = self.relu(out)
out = self.conv1p1s2(out_p1)
out = self.bn1(out)
out = self.relu(out)
out_b1p2 = self.block1(out)
out = self.conv2p2s2(out_b1p2)
out = self.bn2(out)
out = self.relu(out)
out_b2p4 = self.block2(out)
out = self.conv3p4s2(out_b2p4)
out = self.bn3(out)
out = self.relu(out)
out_b3p8 = self.block3(out)
out = self.conv4p8s2(out_b3p8)
out = self.bn4(out)
out = self.relu(out)
out = self.block4(out)
out = self.convtr4p16s2(out)
out = self.bntr4(out)
out = self.relu(out)
out = me.cat(out, out_b3p8)
out = self.block5(out)
out = self.convtr5p8s2(out)
out = self.bntr5(out)
out = self.relu(out)
out = me.cat(out, out_b2p4)
out = self.block6(out)
out = self.convtr6p4s2(out)
out = self.bntr6(out)
out = self.relu(out)
out = me.cat(out, out_b1p2)
out = self.block7(out)
out = self.convtr7p2s2(out)
out = self.bntr7(out)
out = self.relu(out)
out = me.cat(out, out_p1)
out = self.block8(out)
return self.final(out)
class Res16UNet14(Res16UNetBase):
BLOCK = BasicBlock
LAYERS = (1, 1, 1, 1, 1, 1, 1, 1)
class Res16UNet18(Res16UNetBase):
BLOCK = BasicBlock
LAYERS = (2, 2, 2, 2, 2, 2, 2, 2)
class Res16UNet34(Res16UNetBase):
BLOCK = BasicBlock
LAYERS = (2, 3, 4, 6, 2, 2, 2, 2)
class Res16UNet50(Res16UNetBase):
BLOCK = Bottleneck
LAYERS = (2, 3, 4, 6, 2, 2, 2, 2)
class Res16UNet101(Res16UNetBase):
BLOCK = Bottleneck
LAYERS = (2, 3, 4, 23, 2, 2, 2, 2)
class Res16UNet14A(Res16UNet14):
PLANES = (32, 64, 128, 256, 128, 128, 96, 96)
class Res16UNet14A2(Res16UNet14A):
LAYERS = (1, 1, 1, 1, 2, 2, 2, 2)
class Res16UNet14B(Res16UNet14):
PLANES = (32, 64, 128, 256, 128, 128, 128, 128)
class Res16UNet14B2(Res16UNet14B):
LAYERS = (1, 1, 1, 1, 2, 2, 2, 2)
class Res16UNet14B3(Res16UNet14B):
LAYERS = (2, 2, 2, 2, 1, 1, 1, 1)
class Res16UNet14C(Res16UNet14):
PLANES = (32, 64, 128, 256, 192, 192, 128, 128)
class Res16UNet14D(Res16UNet14):
PLANES = (32, 64, 128, 256, 384, 384, 384, 384)
class Res16UNet18A(Res16UNet18):
PLANES = (32, 64, 128, 256, 128, 128, 96, 96)
class Res16UNet18B(Res16UNet18):
PLANES = (32, 64, 128, 256, 128, 128, 128, 128)
class Res16UNet18D(Res16UNet18):
PLANES = (32, 64, 128, 256, 384, 384, 384, 384)
class Res16UNet34A(Res16UNet34):
PLANES = (32, 64, 128, 256, 256, 128, 64, 64)
class Res16UNet34B(Res16UNet34):
PLANES = (32, 64, 128, 256, 256, 128, 64, 32)
class Res16UNet34C(Res16UNet34):
PLANES = (32, 64, 128, 256, 256, 128, 96, 96)
class Res16UNetLN14(Res16UNet14):
NORM_TYPE = NormType.SPARSE_LAYER_NORM
BLOCK = BasicBlockLN
class Res16UNetTemporalBase(Res16UNetBase):
CONV_TYPE = ConvType.SPATIAL_HYPERCUBE
def __init__(self, in_channels, out_channels, config, D=4, **kwargs):
super(Res16UNetTemporalBase, self).__init__(in_channels, out_channels, config, D, **kwargs)
class Res16UNetTemporal14(Res16UNet14, Res16UNetTemporalBase):
pass
class Res16UNetTemporal18(Res16UNet18, Res16UNetTemporalBase):
pass
class Res16UNetTemporal34(Res16UNet34, Res16UNetTemporalBase):
pass
class Res16UNetTemporal50(Res16UNet50, Res16UNetTemporalBase):
pass
class Res16UNetTemporal101(Res16UNet101, Res16UNetTemporalBase):
pass
class Res16UNetTemporalIN14(Res16UNetTemporal14):
NORM_TYPE = NormType.SPARSE_INSTANCE_NORM
BLOCK = BasicBlockIN
class Res16UNetTemporalIN18(Res16UNetTemporal18):
NORM_TYPE = NormType.SPARSE_INSTANCE_NORM
BLOCK = BasicBlockIN
class Res16UNetTemporalIN34(Res16UNetTemporal34):
NORM_TYPE = NormType.SPARSE_INSTANCE_NORM
BLOCK = BasicBlockIN
class Res16UNetTemporalIN50(Res16UNetTemporal50):
NORM_TYPE = NormType.SPARSE_INSTANCE_NORM
BLOCK = BottleneckIN
class Res16UNetTemporalIN101(Res16UNetTemporal101):
NORM_TYPE = NormType.SPARSE_INSTANCE_NORM
BLOCK = BottleneckIN
class STRes16UNetBase(Res16UNetBase):
CONV_TYPE = ConvType.SPATIAL_HYPERCUBE_TEMPORAL_HYPERCROSS
def __init__(self, in_channels, out_channels, config, D=4, **kwargs):
super(STRes16UNetBase, self).__init__(in_channels, out_channels, config, D, **kwargs)
class STRes16UNet14(STRes16UNetBase, Res16UNet14):
pass
class STRes16UNet18(STRes16UNetBase, Res16UNet18):
pass
class STRes16UNet34(STRes16UNetBase, Res16UNet34):
pass
class STRes16UNet50(STRes16UNetBase, Res16UNet50):
pass
class STRes16UNet101(STRes16UNetBase, Res16UNet101):
pass
class STRes16UNet18A(STRes16UNet18):
PLANES = (32, 64, 128, 256, 128, 128, 96, 96)
class STResTesseract16UNetBase(STRes16UNetBase):
CONV_TYPE = ConvType.HYPERCUBE
class STResTesseract16UNet18A(STRes16UNet18A, STResTesseract16UNetBase):
pass
| true | true |
f71b38a82f278340c42a84945a9e9d87c7755673 | 5,609 | py | Python | pinyinsplit.py | throput/pinyinsplit | e500da5b4b37e4d7762790825e1efd1e6e0f4765 | [
"MIT"
] | 5 | 2018-11-12T19:33:37.000Z | 2021-05-26T05:03:48.000Z | pinyinsplit.py | throput/pinyinsplit | e500da5b4b37e4d7762790825e1efd1e6e0f4765 | [
"MIT"
] | null | null | null | pinyinsplit.py | throput/pinyinsplit | e500da5b4b37e4d7762790825e1efd1e6e0f4765 | [
"MIT"
] | 1 | 2021-08-10T07:15:18.000Z | 2021-08-10T07:15:18.000Z | from pygtrie import CharTrie
import copy
"""
Split a Chinese Pinyin phrase into a list of possible permutations of Pinyin words.This is the "example" module.
For example,
>>> from pinyinsplit import PinyinSplit
>>> pys = PinyinSplit()
>>> pys.split('XiangGangDaXue')
[['Xiang', 'Gang', 'Da', 'Xue'], ['Xiang', 'Gang', 'Da', 'Xu', 'e'], ['Xi', 'ang', 'Gang', 'Da', 'Xue'], ['Xi', 'ang', 'Gang', 'Da', 'Xu', 'e']]
"""
class PinyinSplit:
"""Split a Chinese Pinyin phrase into a list of possible permutations of Pinyin words.
It returns a list of all possible permutations of valid Pinyin words.
If the Pinyin phrase cannot be exhaustively split into valid Pinyin words, an empty list will be returned.
>>> from pinyinsplit import PinyinSplit
>>> pys = PinyinSplit()
>>> pys.split('shediaoyingxiongchuan')
[['she', 'diao', 'ying', 'xiong', 'chuan'], ['she', 'diao', 'ying', 'xiong', 'chu', 'an'], ['she', 'di', 'ao', 'ying', 'xiong', 'chuan'], ['she', 'di', 'ao', 'ying', 'xiong', 'chu', 'an']]
>>> pys.split('shediaoyingxiongchuanxyz')
[]
"""
pylist = [
'a', 'ai', 'an', 'ang', 'ao',
'ba', 'bai', 'ban', 'bang', 'bao', 'bei', 'ben', 'beng',
'bi', 'bian', 'biang', 'biao', 'bie', 'bin', 'bing', 'bo', 'bu',
'ca', 'cai', 'can', 'cang', 'cao', 'ce', 'cen', 'ceng',
'cha', 'chai', 'chan', 'chang', 'chao', 'che', 'chen', 'cheng',
'chi', 'chong', 'chou', 'chu', 'chua', 'chuai', 'chuan', 'chuang', 'chui', 'chun', 'chuo',
'ci', 'cong', 'cou', 'cu', 'cuan', 'cui', 'cun', 'cuo',
'da', 'dai', 'dan', 'dang', 'dao', 'de', 'dei', 'den', 'deng',
'di', 'dia', 'dian', 'diang', 'diao', 'die', 'ding', 'diu',
'dong', 'dou', 'du', 'duan', 'dui', 'dun', 'duo',
'e', 'ei', 'en', 'eng', 'er',
'fa', 'fan', 'fang', 'fei', 'fen', 'feng', 'fiao',
'fo', 'fou', 'fu', 'ga', 'gai', 'gan', 'gang', 'gao',
'ge', 'gei', 'gen', 'geng', 'gong', 'gou',
'gu', 'gua', 'guai', 'guan', 'guang', 'gui', 'gun', 'guo',
'ha', 'hai', 'han', 'hang', 'hao', 'he', 'hei', 'hen', 'heng',
'hong', 'hou', 'hu', 'hua', 'huai', 'huan', 'huang', 'hui', 'hun', 'huo',
'ji', 'jia', 'jian', 'jiang', 'jiao', 'jie', 'jin', 'jing', 'jiong', 'jiu', 'ju', 'juan', 'jue', 'jun',
'ka', 'kai', 'kan', 'kang', 'kao', 'ke', 'kei', 'ken', 'keng',
'kong', 'kou', 'ku', 'kua', 'kuai', 'kuan', 'kuang', 'kui', 'kun', 'kuo',
'la', 'lai', 'lan', 'lang', 'lao', 'le', 'lei', 'leng',
'li', 'lia', 'lian', 'liang', 'liao', 'lie', 'lin', 'ling', 'liu', 'long', 'lou',
'lu', 'luan', 'lue', 'lun', 'luo', 'lv', 'lve', 'lvn', 'lü', 'lüe', 'lün',
'ma', 'mai', 'man', 'mang', 'mao', 'me', 'mei', 'men', 'meng',
'mi', 'mian', 'miao', 'mie', 'min', 'ming', 'miu', 'mo', 'mou', 'mu',
'na', 'nai', 'nan', 'nang', 'nao', 'ne', 'nei', 'nen', 'neng',
'ni', 'nia', 'nian', 'niang', 'niao', 'nie', 'nin', 'ning', 'niu',
'nong', 'nou', 'nu', 'nuan', 'nue', 'nun', 'nuo', 'nv', 'nve', 'nü', 'nüe', 'ou',
'pa', 'pai', 'pan', 'pang', 'pao', 'pei', 'pen', 'peng',
'pi', 'pian', 'piao', 'pie', 'pin', 'ping', 'po', 'pou', 'pu',
'qi', 'qia', 'qian', 'qiang', 'qiao', 'qie',
'qin', 'qing', 'qiong', 'qiu', 'qu', 'quan', 'que', 'qun',
'ran', 'rang', 'rao', 're', 'ren', 'reng', 'ri', 'rong', 'rou',
'ru', 'rua', 'ruan', 'rui', 'run', 'ruo',
'sa', 'sai', 'san', 'sang', 'sao', 'se', 'sei', 'sen', 'seng',
'sha', 'shai', 'shan', 'shang', 'shao', 'she', 'shei', 'shen', 'sheng', 'shi',
'shong', 'shou', 'shu', 'shua', 'shuai', 'shuan', 'shuang', 'shui', 'shun', 'shuo',
'si', 'song', 'sou', 'su', 'suan', 'sui', 'sun', 'suo',
'ta', 'tai', 'tan', 'tang', 'tao', 'te', 'tei', 'teng',
'ti', 'tian', 'tiao', 'tie', 'ting', 'tong', 'tou',
'tu', 'tuan', 'tui', 'tun', 'tuo',
'wa', 'wai', 'wan', 'wang', 'wei', 'wen', 'weng', 'wo', 'wu',
'xi', 'xia', 'xian', 'xiang', 'xiao', 'xie', 'xin', 'xing', 'xiong', 'xiu', 'xu', 'xuan', 'xue', 'xun',
'ya', 'yai', 'yan', 'yang', 'yao', 'ye', 'yi', 'yin', 'ying',
'yo', 'yong', 'you', 'yu', 'yuan', 'yue', 'yun',
'za', 'zai', 'zan', 'zang', 'zao', 'ze', 'zei', 'zen', 'zeng',
'zha', 'zhai', 'zhan', 'zhang', 'zhao', 'zhe', 'zhei', 'zhen', 'zheng',
'zhi', 'zhong', 'zhou', 'zhu', 'zhua', 'zhuai', 'zhuan', 'zhuang', 'zhui', 'zhun', 'zhuo',
'zi', 'zong', 'zou', 'zu', 'zuan', 'zui', 'zun', 'zuo', 'ê'
]
def __init__(self):
self.trie = CharTrie()
for py in self.pylist:
self.trie[py] = len(py)
def split(self, phrase):
phrase_lc = phrase.lower()
split_list = []
results = []
if phrase:
split_list.append((phrase, phrase_lc, []))
while split_list:
pair = split_list.pop()
phrase = pair[0]
phrase_lc = pair[1]
words = pair[2]
matches = self.trie.prefixes(phrase_lc)
for match in matches:
n = match[1]
word = phrase[:n]
tail = phrase[n:]
tail_lc = phrase_lc[n:]
words_copy = copy.deepcopy(words)
words_copy.append(word)
if tail:
split_list.append((tail, tail_lc, words_copy))
else:
results.append(words_copy)
return results
if __name__ == "__main__":
import doctest
doctest.testmod() | 50.080357 | 192 | 0.456409 | from pygtrie import CharTrie
import copy
class PinyinSplit:
pylist = [
'a', 'ai', 'an', 'ang', 'ao',
'ba', 'bai', 'ban', 'bang', 'bao', 'bei', 'ben', 'beng',
'bi', 'bian', 'biang', 'biao', 'bie', 'bin', 'bing', 'bo', 'bu',
'ca', 'cai', 'can', 'cang', 'cao', 'ce', 'cen', 'ceng',
'cha', 'chai', 'chan', 'chang', 'chao', 'che', 'chen', 'cheng',
'chi', 'chong', 'chou', 'chu', 'chua', 'chuai', 'chuan', 'chuang', 'chui', 'chun', 'chuo',
'ci', 'cong', 'cou', 'cu', 'cuan', 'cui', 'cun', 'cuo',
'da', 'dai', 'dan', 'dang', 'dao', 'de', 'dei', 'den', 'deng',
'di', 'dia', 'dian', 'diang', 'diao', 'die', 'ding', 'diu',
'dong', 'dou', 'du', 'duan', 'dui', 'dun', 'duo',
'e', 'ei', 'en', 'eng', 'er',
'fa', 'fan', 'fang', 'fei', 'fen', 'feng', 'fiao',
'fo', 'fou', 'fu', 'ga', 'gai', 'gan', 'gang', 'gao',
'ge', 'gei', 'gen', 'geng', 'gong', 'gou',
'gu', 'gua', 'guai', 'guan', 'guang', 'gui', 'gun', 'guo',
'ha', 'hai', 'han', 'hang', 'hao', 'he', 'hei', 'hen', 'heng',
'hong', 'hou', 'hu', 'hua', 'huai', 'huan', 'huang', 'hui', 'hun', 'huo',
'ji', 'jia', 'jian', 'jiang', 'jiao', 'jie', 'jin', 'jing', 'jiong', 'jiu', 'ju', 'juan', 'jue', 'jun',
'ka', 'kai', 'kan', 'kang', 'kao', 'ke', 'kei', 'ken', 'keng',
'kong', 'kou', 'ku', 'kua', 'kuai', 'kuan', 'kuang', 'kui', 'kun', 'kuo',
'la', 'lai', 'lan', 'lang', 'lao', 'le', 'lei', 'leng',
'li', 'lia', 'lian', 'liang', 'liao', 'lie', 'lin', 'ling', 'liu', 'long', 'lou',
'lu', 'luan', 'lue', 'lun', 'luo', 'lv', 'lve', 'lvn', 'lü', 'lüe', 'lün',
'ma', 'mai', 'man', 'mang', 'mao', 'me', 'mei', 'men', 'meng',
'mi', 'mian', 'miao', 'mie', 'min', 'ming', 'miu', 'mo', 'mou', 'mu',
'na', 'nai', 'nan', 'nang', 'nao', 'ne', 'nei', 'nen', 'neng',
'ni', 'nia', 'nian', 'niang', 'niao', 'nie', 'nin', 'ning', 'niu',
'nong', 'nou', 'nu', 'nuan', 'nue', 'nun', 'nuo', 'nv', 'nve', 'nü', 'nüe', 'ou',
'pa', 'pai', 'pan', 'pang', 'pao', 'pei', 'pen', 'peng',
'pi', 'pian', 'piao', 'pie', 'pin', 'ping', 'po', 'pou', 'pu',
'qi', 'qia', 'qian', 'qiang', 'qiao', 'qie',
'qin', 'qing', 'qiong', 'qiu', 'qu', 'quan', 'que', 'qun',
'ran', 'rang', 'rao', 're', 'ren', 'reng', 'ri', 'rong', 'rou',
'ru', 'rua', 'ruan', 'rui', 'run', 'ruo',
'sa', 'sai', 'san', 'sang', 'sao', 'se', 'sei', 'sen', 'seng',
'sha', 'shai', 'shan', 'shang', 'shao', 'she', 'shei', 'shen', 'sheng', 'shi',
'shong', 'shou', 'shu', 'shua', 'shuai', 'shuan', 'shuang', 'shui', 'shun', 'shuo',
'si', 'song', 'sou', 'su', 'suan', 'sui', 'sun', 'suo',
'ta', 'tai', 'tan', 'tang', 'tao', 'te', 'tei', 'teng',
'ti', 'tian', 'tiao', 'tie', 'ting', 'tong', 'tou',
'tu', 'tuan', 'tui', 'tun', 'tuo',
'wa', 'wai', 'wan', 'wang', 'wei', 'wen', 'weng', 'wo', 'wu',
'xi', 'xia', 'xian', 'xiang', 'xiao', 'xie', 'xin', 'xing', 'xiong', 'xiu', 'xu', 'xuan', 'xue', 'xun',
'ya', 'yai', 'yan', 'yang', 'yao', 'ye', 'yi', 'yin', 'ying',
'yo', 'yong', 'you', 'yu', 'yuan', 'yue', 'yun',
'za', 'zai', 'zan', 'zang', 'zao', 'ze', 'zei', 'zen', 'zeng',
'zha', 'zhai', 'zhan', 'zhang', 'zhao', 'zhe', 'zhei', 'zhen', 'zheng',
'zhi', 'zhong', 'zhou', 'zhu', 'zhua', 'zhuai', 'zhuan', 'zhuang', 'zhui', 'zhun', 'zhuo',
'zi', 'zong', 'zou', 'zu', 'zuan', 'zui', 'zun', 'zuo', 'ê'
]
def __init__(self):
self.trie = CharTrie()
for py in self.pylist:
self.trie[py] = len(py)
def split(self, phrase):
phrase_lc = phrase.lower()
split_list = []
results = []
if phrase:
split_list.append((phrase, phrase_lc, []))
while split_list:
pair = split_list.pop()
phrase = pair[0]
phrase_lc = pair[1]
words = pair[2]
matches = self.trie.prefixes(phrase_lc)
for match in matches:
n = match[1]
word = phrase[:n]
tail = phrase[n:]
tail_lc = phrase_lc[n:]
words_copy = copy.deepcopy(words)
words_copy.append(word)
if tail:
split_list.append((tail, tail_lc, words_copy))
else:
results.append(words_copy)
return results
if __name__ == "__main__":
import doctest
doctest.testmod() | true | true |
f71b39087446ecc9cc6e057576d78b80e52404ee | 340 | py | Python | src/chenv/__init__.py | jonathan-shemer/chenv | e2b86b7a53031a35def1be21ece87a05d74d2919 | [
"MIT"
] | 3 | 2020-10-15T07:46:48.000Z | 2021-09-06T20:49:05.000Z | src/chenv/__init__.py | jonathan-shemer/chenv | e2b86b7a53031a35def1be21ece87a05d74d2919 | [
"MIT"
] | 5 | 2021-01-27T11:47:12.000Z | 2021-08-30T08:49:37.000Z | src/chenv/__init__.py | jonathan-shemer/chenv | e2b86b7a53031a35def1be21ece87a05d74d2919 | [
"MIT"
] | 1 | 2022-03-15T09:29:19.000Z | 2022-03-15T09:29:19.000Z | """chenv."""
try:
from importlib.metadata import version, PackageNotFoundError # type: ignore
except ImportError: # pragma: no cover
from importlib_metadata import version, PackageNotFoundError # type: ignore
try:
__version__ = version(__name__)
except PackageNotFoundError: # pragma: no cover
__version__ = "unknown"
| 28.333333 | 80 | 0.744118 | try:
from importlib.metadata import version, PackageNotFoundError
except ImportError:
from importlib_metadata import version, PackageNotFoundError
try:
__version__ = version(__name__)
except PackageNotFoundError:
__version__ = "unknown"
| true | true |
f71b39d53d82554ce904392600c340709e0534bb | 2,018 | py | Python | pandas/tests/indexing/multiindex/test_chaining_and_caching.py | oricou/pandas | 9405e58d9268041f5416711c051cf5429a19bf49 | [
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 2 | 2021-05-07T04:58:36.000Z | 2021-05-07T04:58:59.000Z | pandas/tests/indexing/multiindex/test_chaining_and_caching.py | oricou/pandas | 9405e58d9268041f5416711c051cf5429a19bf49 | [
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | null | null | null | pandas/tests/indexing/multiindex/test_chaining_and_caching.py | oricou/pandas | 9405e58d9268041f5416711c051cf5429a19bf49 | [
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 2 | 2021-06-16T07:19:12.000Z | 2021-12-16T10:24:44.000Z | import numpy as np
import pytest
from pandas import (
DataFrame,
MultiIndex,
Series,
)
import pandas._testing as tm
import pandas.core.common as com
def test_detect_chained_assignment():
# Inplace ops, originally from:
# https://stackoverflow.com/questions/20508968/series-fillna-in-a-multiindex-dataframe-does-not-fill-is-this-a-bug
a = [12, 23]
b = [123, None]
c = [1234, 2345]
d = [12345, 23456]
tuples = [("eyes", "left"), ("eyes", "right"), ("ears", "left"), ("ears", "right")]
events = {
("eyes", "left"): a,
("eyes", "right"): b,
("ears", "left"): c,
("ears", "right"): d,
}
multiind = MultiIndex.from_tuples(tuples, names=["part", "side"])
zed = DataFrame(events, index=["a", "b"], columns=multiind)
msg = "A value is trying to be set on a copy of a slice from a DataFrame"
with pytest.raises(com.SettingWithCopyError, match=msg):
zed["eyes"]["right"].fillna(value=555, inplace=True)
def test_cache_updating():
# 5216
# make sure that we don't try to set a dead cache
a = np.random.rand(10, 3)
df = DataFrame(a, columns=["x", "y", "z"])
tuples = [(i, j) for i in range(5) for j in range(2)]
index = MultiIndex.from_tuples(tuples)
df.index = index
# setting via chained assignment
# but actually works, since everything is a view
df.loc[0]["z"].iloc[0] = 1.0
result = df.loc[(0, 0), "z"]
assert result == 1
# correct setting
df.loc[(0, 0), "z"] = 2
result = df.loc[(0, 0), "z"]
assert result == 2
@pytest.mark.arm_slow
def test_indexer_caching():
# GH5727
# make sure that indexers are in the _internal_names_set
n = 1000001
arrays = (range(n), range(n))
index = MultiIndex.from_tuples(zip(*arrays))
s = Series(np.zeros(n), index=index)
str(s)
# setitem
expected = Series(np.ones(n), index=index)
s = Series(np.zeros(n), index=index)
s[s == 0] = 1
tm.assert_series_equal(s, expected)
| 28.422535 | 118 | 0.606541 | import numpy as np
import pytest
from pandas import (
DataFrame,
MultiIndex,
Series,
)
import pandas._testing as tm
import pandas.core.common as com
def test_detect_chained_assignment():
a = [12, 23]
b = [123, None]
c = [1234, 2345]
d = [12345, 23456]
tuples = [("eyes", "left"), ("eyes", "right"), ("ears", "left"), ("ears", "right")]
events = {
("eyes", "left"): a,
("eyes", "right"): b,
("ears", "left"): c,
("ears", "right"): d,
}
multiind = MultiIndex.from_tuples(tuples, names=["part", "side"])
zed = DataFrame(events, index=["a", "b"], columns=multiind)
msg = "A value is trying to be set on a copy of a slice from a DataFrame"
with pytest.raises(com.SettingWithCopyError, match=msg):
zed["eyes"]["right"].fillna(value=555, inplace=True)
def test_cache_updating():
a = np.random.rand(10, 3)
df = DataFrame(a, columns=["x", "y", "z"])
tuples = [(i, j) for i in range(5) for j in range(2)]
index = MultiIndex.from_tuples(tuples)
df.index = index
# setting via chained assignment
# but actually works, since everything is a view
df.loc[0]["z"].iloc[0] = 1.0
result = df.loc[(0, 0), "z"]
assert result == 1
# correct setting
df.loc[(0, 0), "z"] = 2
result = df.loc[(0, 0), "z"]
assert result == 2
@pytest.mark.arm_slow
def test_indexer_caching():
# GH5727
# make sure that indexers are in the _internal_names_set
n = 1000001
arrays = (range(n), range(n))
index = MultiIndex.from_tuples(zip(*arrays))
s = Series(np.zeros(n), index=index)
str(s)
# setitem
expected = Series(np.ones(n), index=index)
s = Series(np.zeros(n), index=index)
s[s == 0] = 1
tm.assert_series_equal(s, expected)
| true | true |
f71b3ac0d45395a6a7e2ff0955877634c8665bca | 3,872 | py | Python | gym-kinova-gripper/plotting_code/other_plots.py | OSUrobotics/KinovaGrasping | f22af60d3683fdc4ffecf49ccff179fbc6750748 | [
"Linux-OpenIB"
] | 16 | 2020-05-16T00:40:31.000Z | 2022-02-22T11:59:03.000Z | gym-kinova-gripper/plotting_code/other_plots.py | OSUrobotics/KinovaGrasping | f22af60d3683fdc4ffecf49ccff179fbc6750748 | [
"Linux-OpenIB"
] | 9 | 2020-08-10T08:33:55.000Z | 2021-08-17T02:10:50.000Z | gym-kinova-gripper/plotting_code/other_plots.py | OSUrobotics/KinovaGrasping | f22af60d3683fdc4ffecf49ccff179fbc6750748 | [
"Linux-OpenIB"
] | 7 | 2020-07-27T09:45:05.000Z | 2021-06-21T21:42:50.000Z | import matplotlib.pyplot as plt
import numpy as np
## Extra plotting functions that can be called for quick analysis
def plot_timestep_distribution(success_timesteps=None, fail_timesteps=None, all_timesteps=None, expert_saving_dir=None):
""" Plot the distribution of time steps over successful and failed episodes """
if all_timesteps is None:
success_timesteps = np.load(expert_saving_dir + "/success_timesteps.npy")
fail_timesteps = np.load(expert_saving_dir + "/fail_timesteps.npy")
all_timesteps = np.load(expert_saving_dir + "/all_timesteps.npy")
n_bins = 40
# We can set the number of bins with the `bins` kwarg
plt.hist(all_timesteps, bins=n_bins, color="g")
plt.title("Total time steps distribution for all episodes (3x speed)", weight='bold')
plt.xlabel('# of time steps per episode')
plt.ylabel('# of episodes with the time step count')
plt.xlim(0, 800)
plt.savefig(expert_saving_dir + "/total_timestep_distribution")
plt.clf()
plt.hist(success_timesteps, bins=n_bins, color="b")
plt.title("Time steps distribution for Successful episodes (3x speed)", weight='bold')
plt.xlabel('# of time steps per episode')
plt.ylabel('# of episodes with the time step count')
plt.savefig(expert_saving_dir + "/success_timestep_distribution")
plt.clf()
plt.hist(fail_timesteps, bins=n_bins, color="r")
plt.title("Time steps distribution for Failed episodes (3x speed)", weight='bold')
plt.xlabel('# of time steps per episode')
plt.ylabel('# of episodes with the time step count')
plt.savefig(expert_saving_dir + "/fail_timestep_distribution")
plt.clf()
'''
# Plot the average velocity over an episode
def plot_average_velocity(replay_buffer,num_timesteps):
""" Plot the average velocity over a certain number of episodes """
velocity_dir = "./expert_average_velocity"
if not os.path.isdir(velocity_dir):
os.mkdir(velocity_dir)
#num_episodes = len(f1_vels)
#plt.plot(np.arrange(len(f1_vels)), f1_vels)
max_timesteps = 30
timestep_vel_count = np.zeros(max_timesteps)
wrist_avg_vels = np.zeros(max_timesteps)
f1_avg_vels = np.zeros(max_timesteps)
f2_avg_vels = np.zeros(max_timesteps)
f3_avg_vels = np.zeros(max_timesteps)
for episode_actions in replay_buffer.action:
for timestep_idx in range(len(episode_actions)):
timestep_vel_count[timestep_idx] += 1
wrist_avg_vels[timestep_idx] = (wrist_avg_vels[timestep_idx] + episode_actions[timestep_idx][0]) / timestep_vel_count[timestep_idx]
f1_avg_vels[timestep_idx] = (f1_avg_vels[timestep_idx] + episode_actions[timestep_idx][1]) / \
timestep_vel_count[timestep_idx]
f2_avg_vels[timestep_idx] = (f2_avg_vels[timestep_idx] + episode_actions[timestep_idx][2]) / \
timestep_vel_count[timestep_idx]
f3_avg_vels[timestep_idx] = (f3_avg_vels[timestep_idx] + episode_actions[timestep_idx][3]) / \
timestep_vel_count[timestep_idx]
num_episodes = len(replay_buffer.action)
print("replay_buffer.action: ",replay_buffer.action)
print("f1_avg_vels: ",f1_avg_vels)
plt.plot(np.arange(num_timesteps), f1_avg_vels, color="r", label="Finger1")
plt.plot(np.arange(num_timesteps), f2_avg_vels, color="b", label="Finger2")
plt.plot(np.arange(num_timesteps), f3_avg_vels, color="g", label="Finger3")
plt.plot(np.arange(num_timesteps), wrist_avg_vels, color="y", label="Wrist")
plt.legend()
plt.title("Average velocity over "+str(num_episodes)+" episodes", weight='bold')
plt.xlabel('Timestep within an episode')
plt.ylabel('Average Velocity at Timestep')
#plt.savefig(velocity_dir + "/velocity_plot")
#plt.clf()
plt.show()
''' | 46.650602 | 143 | 0.698089 | import matplotlib.pyplot as plt
import numpy as np
mesteps=None, all_timesteps=None, expert_saving_dir=None):
if all_timesteps is None:
success_timesteps = np.load(expert_saving_dir + "/success_timesteps.npy")
fail_timesteps = np.load(expert_saving_dir + "/fail_timesteps.npy")
all_timesteps = np.load(expert_saving_dir + "/all_timesteps.npy")
n_bins = 40
plt.hist(all_timesteps, bins=n_bins, color="g")
plt.title("Total time steps distribution for all episodes (3x speed)", weight='bold')
plt.xlabel('# of time steps per episode')
plt.ylabel('# of episodes with the time step count')
plt.xlim(0, 800)
plt.savefig(expert_saving_dir + "/total_timestep_distribution")
plt.clf()
plt.hist(success_timesteps, bins=n_bins, color="b")
plt.title("Time steps distribution for Successful episodes (3x speed)", weight='bold')
plt.xlabel('# of time steps per episode')
plt.ylabel('# of episodes with the time step count')
plt.savefig(expert_saving_dir + "/success_timestep_distribution")
plt.clf()
plt.hist(fail_timesteps, bins=n_bins, color="r")
plt.title("Time steps distribution for Failed episodes (3x speed)", weight='bold')
plt.xlabel('# of time steps per episode')
plt.ylabel('# of episodes with the time step count')
plt.savefig(expert_saving_dir + "/fail_timestep_distribution")
plt.clf()
| true | true |
f71b3b475439ea9ed08d69fbc7b9ab409bb33d5a | 2,223 | py | Python | src/utils.py | wenyuC94/LogConcComp | b17d6ba6a102ba83a8415774b0e6da27a362bd5d | [
"MIT"
] | null | null | null | src/utils.py | wenyuC94/LogConcComp | b17d6ba6a102ba83a8415774b0e6da27a362bd5d | [
"MIT"
] | null | null | null | src/utils.py | wenyuC94/LogConcComp | b17d6ba6a102ba83a8415774b0e6da27a362bd5d | [
"MIT"
] | null | null | null | import os
import numpy as np
import numba as nb
def create_folder(storage_path):
if not os.path.isdir(storage_path):
os.makedirs(storage_path,exist_ok=True)
lsdir = os.listdir(storage_path)
for item in ["info","hist","soln","figs"]:
if item not in lsdir:
os.makedirs(storage_path+item+"/",exist_ok=True)
if item == "figs":
lsdir_figs = os.listdir(storage_path+item+"/")
for item1 in ["crop","raw"]:
if item1 not in lsdir_figs:
os.makedirs(storage_path+item+"/"+item1+"/",exist_ok=True)
def time_to_string(runtime):
seconds = runtime%60
runmins = (runtime-seconds)/60
mins = int(runmins%60)
runhrs = (runmins-mins)/60
hrs = int(runhrs)
return "%.2d:%.2d:%05.2f"%(hrs,mins,seconds)
def multivariate_laplace(n,d,rng=None, random_state=None):
rng = rng if rng is not None else np.random.RandomState(random_state)
X = rng.randn(n,d)
Z = rng.exponential(size=(n,1))
return X*np.sqrt(Z)
@nb.njit(cache=True)
def np_apply_along_axis(func1d, axis, arr):
assert arr.ndim == 2
assert axis in [0, 1]
if axis == 0:
result = np.empty(arr.shape[1])
for i in range(len(result)):
result[i] = func1d(arr[:, i])
else:
result = np.empty(arr.shape[0])
for i in range(len(result)):
result[i] = func1d(arr[i, :])
return result
@nb.njit(cache=True)
def np_apply_along_axis_kd(funckd, axis, arr, k = -1):
assert arr.ndim == 2
assert axis in [0, 1]
if axis == 0:
k = k if k > 0 else arr.shape[0]
result = np.empty((k,arr.shape[1]))
for i in range(arr.shape[1]):
result[:, i] = funckd(arr[:, i])
else:
k = k if k > 0 else arr.shape[1]
result = np.empty((arr.shape[0],k))
for i in range(arr.shape[0]):
result[i, :] = funckd(arr[i, :])
return result
@nb.njit(cache=True)
def split(n, B):
sep = n//B
rem = n%B
indices = []
last = 0
cur = 0
for i in range(B):
cur = last + sep + (i < rem)
indices.append(cur)
last = cur
return indices
| 28.5 | 78 | 0.559154 | import os
import numpy as np
import numba as nb
def create_folder(storage_path):
if not os.path.isdir(storage_path):
os.makedirs(storage_path,exist_ok=True)
lsdir = os.listdir(storage_path)
for item in ["info","hist","soln","figs"]:
if item not in lsdir:
os.makedirs(storage_path+item+"/",exist_ok=True)
if item == "figs":
lsdir_figs = os.listdir(storage_path+item+"/")
for item1 in ["crop","raw"]:
if item1 not in lsdir_figs:
os.makedirs(storage_path+item+"/"+item1+"/",exist_ok=True)
def time_to_string(runtime):
seconds = runtime%60
runmins = (runtime-seconds)/60
mins = int(runmins%60)
runhrs = (runmins-mins)/60
hrs = int(runhrs)
return "%.2d:%.2d:%05.2f"%(hrs,mins,seconds)
def multivariate_laplace(n,d,rng=None, random_state=None):
rng = rng if rng is not None else np.random.RandomState(random_state)
X = rng.randn(n,d)
Z = rng.exponential(size=(n,1))
return X*np.sqrt(Z)
@nb.njit(cache=True)
def np_apply_along_axis(func1d, axis, arr):
assert arr.ndim == 2
assert axis in [0, 1]
if axis == 0:
result = np.empty(arr.shape[1])
for i in range(len(result)):
result[i] = func1d(arr[:, i])
else:
result = np.empty(arr.shape[0])
for i in range(len(result)):
result[i] = func1d(arr[i, :])
return result
@nb.njit(cache=True)
def np_apply_along_axis_kd(funckd, axis, arr, k = -1):
assert arr.ndim == 2
assert axis in [0, 1]
if axis == 0:
k = k if k > 0 else arr.shape[0]
result = np.empty((k,arr.shape[1]))
for i in range(arr.shape[1]):
result[:, i] = funckd(arr[:, i])
else:
k = k if k > 0 else arr.shape[1]
result = np.empty((arr.shape[0],k))
for i in range(arr.shape[0]):
result[i, :] = funckd(arr[i, :])
return result
@nb.njit(cache=True)
def split(n, B):
sep = n//B
rem = n%B
indices = []
last = 0
cur = 0
for i in range(B):
cur = last + sep + (i < rem)
indices.append(cur)
last = cur
return indices
| true | true |
f71b3b5ffd9a0d39ef3e2a5f01e2965d34e8b74e | 1,448 | py | Python | make_string_alphabetic.py | Maffey/FunPythonScripts | 7682ab1f8dc7924eb4b3fb19d58ebbabe3901a22 | [
"Apache-2.0"
] | 1 | 2022-01-29T21:07:38.000Z | 2022-01-29T21:07:38.000Z | make_string_alphabetic.py | Maffey/fun-python-scripts | 7682ab1f8dc7924eb4b3fb19d58ebbabe3901a22 | [
"Apache-2.0"
] | 2 | 2019-05-15T13:48:58.000Z | 2019-05-29T18:33:34.000Z | make_string_alphabetic.py | Maffey/FunPythonScripts | 7682ab1f8dc7924eb4b3fb19d58ebbabe3901a22 | [
"Apache-2.0"
] | 1 | 2020-08-04T11:09:17.000Z | 2020-08-04T11:09:17.000Z | #! python
# Removes letter from word to make characters go alphabetically.
# It doesn't work all the time, but is efficient.
import unittest
class TestRemoveLettersAlphabet(unittest.TestCase):
def test_object1(self):
self.assertEqual(letters_to_remove('mateusz'), 3)
def test_object2(self):
self.assertEqual(letters_to_remove('cba'), 2)
def test_object3(self):
self.assertEqual(letters_to_remove('dirt'), 0)
def test_object4(self):
self.assertEqual(letters_to_remove('jablko'), 2)
def test_repeating_letters1(self):
self.assertEqual(letters_to_remove('gabriela'), 5)
def test_repeating_letters2(self):
self.assertEqual(letters_to_remove('banana'), 3)
def test_repeating_letters3(self):
self.assertEqual(letters_to_remove('apple'), 2)
def letters_to_remove(string: str) -> int:
string = list(string)
sorted_string = sorted(string)
letters_removed = 0
remaining_string = ""
for character in sorted_string:
index = string.index(character)
to_remove = string[:index]
letters_removed += len(to_remove)
for letter in to_remove:
string.remove(letter)
sorted_string.remove(letter)
remaining_string += character
string.remove(character)
print(f"[+] Remaining string: {remaining_string}")
return letters_removed
if __name__ == "__main__":
unittest.main()
| 27.320755 | 64 | 0.685083 |
import unittest
class TestRemoveLettersAlphabet(unittest.TestCase):
def test_object1(self):
self.assertEqual(letters_to_remove('mateusz'), 3)
def test_object2(self):
self.assertEqual(letters_to_remove('cba'), 2)
def test_object3(self):
self.assertEqual(letters_to_remove('dirt'), 0)
def test_object4(self):
self.assertEqual(letters_to_remove('jablko'), 2)
def test_repeating_letters1(self):
self.assertEqual(letters_to_remove('gabriela'), 5)
def test_repeating_letters2(self):
self.assertEqual(letters_to_remove('banana'), 3)
def test_repeating_letters3(self):
self.assertEqual(letters_to_remove('apple'), 2)
def letters_to_remove(string: str) -> int:
string = list(string)
sorted_string = sorted(string)
letters_removed = 0
remaining_string = ""
for character in sorted_string:
index = string.index(character)
to_remove = string[:index]
letters_removed += len(to_remove)
for letter in to_remove:
string.remove(letter)
sorted_string.remove(letter)
remaining_string += character
string.remove(character)
print(f"[+] Remaining string: {remaining_string}")
return letters_removed
if __name__ == "__main__":
unittest.main()
| true | true |
f71b3ba93636d762ed00bb7c81089b7edadd08c2 | 1,407 | py | Python | verifiers/statusCode.py | CalConnect/caldavtester | a17683554df8a9b80fceab91085de99945fefe48 | [
"Apache-2.0"
] | 2 | 2019-04-23T04:06:08.000Z | 2020-08-26T17:30:45.000Z | verifiers/statusCode.py | CalConnect/caldavtester | a17683554df8a9b80fceab91085de99945fefe48 | [
"Apache-2.0"
] | 3 | 2017-09-28T13:23:39.000Z | 2017-10-03T15:59:36.000Z | verifiers/statusCode.py | CalConnect/caldavtester | a17683554df8a9b80fceab91085de99945fefe48 | [
"Apache-2.0"
] | 2 | 2017-02-08T04:47:31.000Z | 2021-08-05T20:09:10.000Z | ##
# Copyright (c) 2006-2016 Apple Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##
"""
Verifier that chec ks the response status code for a specific value.
"""
class Verifier(object):
def verify(self, manager, uri, response, respdata, args): # @UnusedVariable
# If no status verification requested, then assume all 2xx codes are OK
teststatus = args.get("status", ["2xx"])
for test in teststatus:
if test[1:3] == "xx":
test = int(test[0])
else:
test = int(test)
if test < 100:
result = ((response.status / 100) == test)
else:
result = (response.status == test)
if result:
return True, ""
return False, " HTTP Status Code Wrong: %d expected one of %s" % (response.status, ", ".join(teststatus))
| 34.317073 | 120 | 0.628998 |
class Verifier(object):
def verify(self, manager, uri, response, respdata, args):
teststatus = args.get("status", ["2xx"])
for test in teststatus:
if test[1:3] == "xx":
test = int(test[0])
else:
test = int(test)
if test < 100:
result = ((response.status / 100) == test)
else:
result = (response.status == test)
if result:
return True, ""
return False, " HTTP Status Code Wrong: %d expected one of %s" % (response.status, ", ".join(teststatus))
| true | true |
f71b3d10ad093122f20e0962b3f6645057d8279b | 613 | py | Python | classifier_stgcn_real_only/utils/temp.py | 1suancaiyu/STEP | 54195112990feaee137f5137775c736d07c2d26f | [
"MIT"
] | 32 | 2020-02-21T16:12:13.000Z | 2022-03-11T09:00:47.000Z | classifier_stgcn_real_only/utils/temp.py | 1suancaiyu/STEP | 54195112990feaee137f5137775c736d07c2d26f | [
"MIT"
] | 12 | 2020-06-23T08:11:25.000Z | 2022-03-26T11:34:42.000Z | classifier_stgcn_real_only/utils/temp.py | 1suancaiyu/STEP | 54195112990feaee137f5137775c736d07c2d26f | [
"MIT"
] | 13 | 2020-04-01T16:51:50.000Z | 2022-03-03T10:15:10.000Z | import h5py
import os
import numpy as np
base_path = os.path.dirname(os.path.realpath(__file__))
feature_file = '/media/uttaran/FCE1-7BF3/Gamma/Gait/classifier_stgcn/model_classifier_stgcn/featuresCombineddeep_features.txt'
f = np.loadtxt(feature_file)
fCombined = h5py.File('/media/uttaran/FCE1-7BF3/Gamma/Gait/data/featuresCombined.h5', 'r')
fkeys = fCombined.keys()
dfCombined = h5py.File('/media/uttaran/FCE1-7BF3/Gamma/Gait/data/deepFeaturesCombined.h5', 'w')
for i, fkey in enumerate(fkeys):
fname = [fkey][0]
feature = f[i, :]
dfCombined.create_dataset(fname, data=feature)
dfCombined.close()
| 38.3125 | 126 | 0.76509 | import h5py
import os
import numpy as np
base_path = os.path.dirname(os.path.realpath(__file__))
feature_file = '/media/uttaran/FCE1-7BF3/Gamma/Gait/classifier_stgcn/model_classifier_stgcn/featuresCombineddeep_features.txt'
f = np.loadtxt(feature_file)
fCombined = h5py.File('/media/uttaran/FCE1-7BF3/Gamma/Gait/data/featuresCombined.h5', 'r')
fkeys = fCombined.keys()
dfCombined = h5py.File('/media/uttaran/FCE1-7BF3/Gamma/Gait/data/deepFeaturesCombined.h5', 'w')
for i, fkey in enumerate(fkeys):
fname = [fkey][0]
feature = f[i, :]
dfCombined.create_dataset(fname, data=feature)
dfCombined.close()
| true | true |
f71b3da82b76452958e6ff8037b2e5c369373cfd | 2,253 | py | Python | python/hostconfig/machines/alexf.py | stu-l/Chaste | 8efa8b440660553af66804067639f237c855f557 | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | python/hostconfig/machines/alexf.py | stu-l/Chaste | 8efa8b440660553af66804067639f237c855f557 | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | python/hostconfig/machines/alexf.py | stu-l/Chaste | 8efa8b440660553af66804067639f237c855f557 | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | # Configuration for Finarfin
"""Copyright (c) 2005-2022, University of Oxford.
All rights reserved.
University of Oxford means the Chancellor, Masters and Scholars of the
University of Oxford, having an administrative office at Wellington
Square, Oxford OX1 2JD, UK.
This file is part of Chaste.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of the University of Oxford nor the names of its
contributors may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
petsc_2_2_path = None
petsc_2_3_path = '/home/alex/petsc-2.3.2-p4/'
petsc_build_name = 'linux-gnu'
petsc_build_name_optimized = 'linux-gnu-opt'
dealii_path = None
metis_path = None
intel_path = None #'/opt/intel_cc_80'
#icpc = 'icpc'
other_includepaths = []
other_libpaths = ['/home/alex/hdf5/lib']
blas_lapack = ['lapack', 'blas']
other_libraries = ['boost_serialization', 'boost_filesystem', 'xerces-c']
tools = {'mpirun': '/home/alex/mpi/bin/mpirun',
'mpicxx': '/home/alex/mpi/bin/mpicxx'}
| 44.176471 | 79 | 0.782956 |
petsc_2_2_path = None
petsc_2_3_path = '/home/alex/petsc-2.3.2-p4/'
petsc_build_name = 'linux-gnu'
petsc_build_name_optimized = 'linux-gnu-opt'
dealii_path = None
metis_path = None
intel_path = None
other_includepaths = []
other_libpaths = ['/home/alex/hdf5/lib']
blas_lapack = ['lapack', 'blas']
other_libraries = ['boost_serialization', 'boost_filesystem', 'xerces-c']
tools = {'mpirun': '/home/alex/mpi/bin/mpirun',
'mpicxx': '/home/alex/mpi/bin/mpicxx'}
| true | true |
f71b400d9193c88784e05204664ee4a910ee628d | 1,831 | py | Python | awss3upload-capi/s3upload.py | krlex/aws-python-examples | 1310e82c748e3d0898809bbe909870fe4c7cb37b | [
"MIT"
] | 21 | 2021-02-13T04:11:01.000Z | 2022-03-28T09:13:53.000Z | awss3upload-capi/s3upload.py | krlex/aws-python-examples | 1310e82c748e3d0898809bbe909870fe4c7cb37b | [
"MIT"
] | null | null | null | awss3upload-capi/s3upload.py | krlex/aws-python-examples | 1310e82c748e3d0898809bbe909870fe4c7cb37b | [
"MIT"
] | 17 | 2019-07-26T06:02:27.000Z | 2022-03-23T00:06:12.000Z | #!/usr/bin/python
# -*- coding: utf-8 -*-
# s3upload.py
# It is an example that handles S3 buckets on AWS.
# It uses Client API (low-level) of Boto3.
# Upload a local file to a S3 bucket.
# You must provide 1 parameter:
# BUCKET_NAME = Name of the bucket
# OBJECT_NAME = Object file name in the bucket
# LOCAL_FILE_NAME = Local file name
import sys
import os
import boto3
import botocore
def main():
# Make a list of command line arguments, omitting the [0] element
# which is the script itself.
args = sys.argv[1:]
if len(args) < 3:
print('Not enough parameters.\n'\
'Proper Usage is: python s3upload.py '\
'<BUCKET_NAME> <OBJECT_NAME> <LOCAL_FILE_NAME>')
sys.exit(1)
bucket_name = args[0]
key_name = args[1]
local_file_name = args[2]
print('Bucket: ' + bucket_name)
print('Object/Key: ' + key_name)
print('Local file: ' + local_file_name)
# Create an S3 Client
s3_client = boto3.client('s3')
if not os.path.isfile(local_file_name):
print("Error: File Not Found!!")
sys.exit(1)
# Upload object
try:
print('Uploading object ...')
s3_client.upload_file(local_file_name, bucket_name, key_name)
print('Uploaded')
except botocore.exceptions.ClientError as e:
if e.response['Error']['Code'] == "NoSuchBucket":
print("Error: Bucket does not exist!!")
elif e.response['Error']['Code'] == "InvalidBucketName":
print("Error: Invalid Bucket name!!")
elif e.response['Error']['Code'] == "AllAccessDisabled":
print("Error: You do not have access to the Bucket!!")
else:
raise
return
# This is the standard boilerplate that calls the main() function.
if __name__ == '__main__':
main()
| 28.609375 | 69 | 0.621518 |
import sys
import os
import boto3
import botocore
def main():
args = sys.argv[1:]
if len(args) < 3:
print('Not enough parameters.\n'\
'Proper Usage is: python s3upload.py '\
'<BUCKET_NAME> <OBJECT_NAME> <LOCAL_FILE_NAME>')
sys.exit(1)
bucket_name = args[0]
key_name = args[1]
local_file_name = args[2]
print('Bucket: ' + bucket_name)
print('Object/Key: ' + key_name)
print('Local file: ' + local_file_name)
s3_client = boto3.client('s3')
if not os.path.isfile(local_file_name):
print("Error: File Not Found!!")
sys.exit(1)
try:
print('Uploading object ...')
s3_client.upload_file(local_file_name, bucket_name, key_name)
print('Uploaded')
except botocore.exceptions.ClientError as e:
if e.response['Error']['Code'] == "NoSuchBucket":
print("Error: Bucket does not exist!!")
elif e.response['Error']['Code'] == "InvalidBucketName":
print("Error: Invalid Bucket name!!")
elif e.response['Error']['Code'] == "AllAccessDisabled":
print("Error: You do not have access to the Bucket!!")
else:
raise
return
if __name__ == '__main__':
main()
| true | true |
f71b4058fcf0bfe5202371ab731ffe619ab85852 | 827 | py | Python | src/lambda/face-detector-function/main/image_ops/resizer.py | gai6948/video-analytics-for-ppe-compliance | bb41ac010f5917bd7e85adfed689489f24830617 | [
"Apache-2.0"
] | null | null | null | src/lambda/face-detector-function/main/image_ops/resizer.py | gai6948/video-analytics-for-ppe-compliance | bb41ac010f5917bd7e85adfed689489f24830617 | [
"Apache-2.0"
] | null | null | null | src/lambda/face-detector-function/main/image_ops/resizer.py | gai6948/video-analytics-for-ppe-compliance | bb41ac010f5917bd7e85adfed689489f24830617 | [
"Apache-2.0"
] | null | null | null | import numpy as np
import timeit
import cv2
from PIL import Image
from aws_lambda_powertools.logging import Logger
from aws_lambda_powertools.tracing import Tracer
logger = Logger(service='face-detector', child=True)
tracer = Tracer(service='face-detector')
@tracer.capture_method(capture_response=False)
def resize_image(frame: np.ndarray, target_image_width: int, target_image_height: int) -> np.ndarray:
"""
Resize the drawn image and save to /tmp directory before uploading to S3
:param `frame`: frame data in numpy array
"""
start_time = timeit.default_timer()
new_frame: np.ndarray = cv2.resize(frame, dsize=(
target_image_width, target_image_height), interpolation=cv2.INTER_LINEAR)
logger.info(f'Resized frame after: {timeit.default_timer() - start_time}')
return new_frame
| 33.08 | 101 | 0.759371 | import numpy as np
import timeit
import cv2
from PIL import Image
from aws_lambda_powertools.logging import Logger
from aws_lambda_powertools.tracing import Tracer
logger = Logger(service='face-detector', child=True)
tracer = Tracer(service='face-detector')
@tracer.capture_method(capture_response=False)
def resize_image(frame: np.ndarray, target_image_width: int, target_image_height: int) -> np.ndarray:
start_time = timeit.default_timer()
new_frame: np.ndarray = cv2.resize(frame, dsize=(
target_image_width, target_image_height), interpolation=cv2.INTER_LINEAR)
logger.info(f'Resized frame after: {timeit.default_timer() - start_time}')
return new_frame
| true | true |
f71b4158a84075698aa6f4b4d391c6b10747b9c5 | 2,190 | py | Python | config/settings/local.py | suyash143/Base_todo | 3284f24f8b5c611088af6189a2f264a280fbbbd6 | [
"MIT"
] | 1 | 2022-03-16T10:22:34.000Z | 2022-03-16T10:22:34.000Z | config/settings/local.py | suyash143/Base_todo | 3284f24f8b5c611088af6189a2f264a280fbbbd6 | [
"MIT"
] | 1 | 2022-03-30T21:29:43.000Z | 2022-03-30T21:29:43.000Z | config/settings/local.py | suyash143/Base_todo | 3284f24f8b5c611088af6189a2f264a280fbbbd6 | [
"MIT"
] | null | null | null | from .base import * # noqa
from .base import env
# GENERAL
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#debug
DEBUG = True
# https://docs.djangoproject.com/en/dev/ref/settings/#secret-key
SECRET_KEY = env(
"DJANGO_SECRET_KEY",
default="00nep4XZom6FM9dVyJO6Y7kqt5JV8TN5GTNmcDnDhH0jTq3cDYEGLsyOsUYnOAsM",
)
# https://docs.djangoproject.com/en/dev/ref/settings/#allowed-hosts
ALLOWED_HOSTS = ["localhost", "0.0.0.0", "127.0.0.1"]
# CACHES
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#caches
CACHES = {
"default": {
"BACKEND": "django.core.cache.backends.locmem.LocMemCache",
"LOCATION": "",
}
}
# EMAIL
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#email-backend
EMAIL_BACKEND = env(
"DJANGO_EMAIL_BACKEND", default="django.core.mail.backends.console.EmailBackend"
)
# django-debug-toolbar
# ------------------------------------------------------------------------------
# https://django-debug-toolbar.readthedocs.io/en/latest/installation.html#prerequisites
INSTALLED_APPS += ["debug_toolbar"] # noqa F405
# https://django-debug-toolbar.readthedocs.io/en/latest/installation.html#middleware
MIDDLEWARE += ["debug_toolbar.middleware.DebugToolbarMiddleware"] # noqa F405
# https://django-debug-toolbar.readthedocs.io/en/latest/configuration.html#debug-toolbar-config
DEBUG_TOOLBAR_CONFIG = {
"DISABLE_PANELS": ["debug_toolbar.panels.redirects.RedirectsPanel"],
"SHOW_TEMPLATE_CONTEXT": True,
}
# https://django-debug-toolbar.readthedocs.io/en/latest/installation.html#internal-ips
INTERNAL_IPS = ["127.0.0.1", "10.0.2.2"]
# django-extensions
# ------------------------------------------------------------------------------
# https://django-extensions.readthedocs.io/en/latest/installation_instructions.html#configuration
INSTALLED_APPS += ["django_extensions"] # noqa F405
# Your stuff...
# ------------------------------------------------------------------------------
| 39.818182 | 97 | 0.583105 | from .base import *
from .base import env
= True
= env(
"DJANGO_SECRET_KEY",
default="00nep4XZom6FM9dVyJO6Y7kqt5JV8TN5GTNmcDnDhH0jTq3cDYEGLsyOsUYnOAsM",
)
= ["localhost", "0.0.0.0", "127.0.0.1"]
= {
"default": {
"BACKEND": "django.core.cache.backends.locmem.LocMemCache",
"LOCATION": "",
}
}
= env(
"DJANGO_EMAIL_BACKEND", default="django.core.mail.backends.console.EmailBackend"
)
S += ["debug_toolbar"]
+= ["debug_toolbar.middleware.DebugToolbarMiddleware"]
= {
"DISABLE_PANELS": ["debug_toolbar.panels.redirects.RedirectsPanel"],
"SHOW_TEMPLATE_CONTEXT": True,
}
= ["127.0.0.1", "10.0.2.2"]
S += ["django_extensions"]
| true | true |
f71b41b1f6968010d18f796949243c59c3f77265 | 2,847 | py | Python | wagtail/hooks.py | stevedya/wagtail | 52e5abfe62547cdfd90ea7dfeb8bf5a52f16324c | [
"BSD-3-Clause"
] | 1 | 2022-02-09T05:25:30.000Z | 2022-02-09T05:25:30.000Z | wagtail/hooks.py | stevedya/wagtail | 52e5abfe62547cdfd90ea7dfeb8bf5a52f16324c | [
"BSD-3-Clause"
] | null | null | null | wagtail/hooks.py | stevedya/wagtail | 52e5abfe62547cdfd90ea7dfeb8bf5a52f16324c | [
"BSD-3-Clause"
] | null | null | null | from contextlib import ContextDecorator
from operator import itemgetter
from wagtail.utils.apps import get_app_submodules
_hooks = {}
def register(hook_name, fn=None, order=0):
"""
Register hook for ``hook_name``. Can be used as a decorator::
@register('hook_name')
def my_hook(...):
pass
or as a function call::
def my_hook(...):
pass
register('hook_name', my_hook)
"""
# Pretend to be a decorator if fn is not supplied
if fn is None:
def decorator(fn):
register(hook_name, fn, order=order)
return fn
return decorator
if hook_name not in _hooks:
_hooks[hook_name] = []
_hooks[hook_name].append((fn, order))
class TemporaryHook(ContextDecorator):
def __init__(self, hooks, order):
self.hooks = hooks
self.order = order
def __enter__(self):
for hook_name, fn in self.hooks:
if hook_name not in _hooks:
_hooks[hook_name] = []
_hooks[hook_name].append((fn, self.order))
def __exit__(self, exc_type, exc_value, traceback):
for hook_name, fn in self.hooks:
_hooks[hook_name].remove((fn, self.order))
def register_temporarily(hook_name_or_hooks, fn=None, *, order=0):
"""
Register hook for ``hook_name`` temporarily. This is useful for testing hooks.
Can be used as a decorator::
def my_hook(...):
pass
class TestMyHook(Testcase):
@hooks.register_temporarily('hook_name', my_hook)
def test_my_hook(self):
pass
or as a context manager::
def my_hook(...):
pass
with hooks.register_temporarily('hook_name', my_hook):
# Hook is registered here
# Hook is unregistered here
To register multiple hooks at the same time, pass in a list of 2-tuples:
def my_hook(...):
pass
def my_other_hook(...):
pass
with hooks.register_temporarily([
('hook_name', my_hook),
('hook_name', my_other_hook),
]):
# Hooks are registered here
"""
if not isinstance(hook_name_or_hooks, list) and fn is not None:
hooks = [(hook_name_or_hooks, fn)]
else:
hooks = hook_name_or_hooks
return TemporaryHook(hooks, order)
_searched_for_hooks = False
def search_for_hooks():
global _searched_for_hooks
if not _searched_for_hooks:
list(get_app_submodules("wagtail_hooks"))
_searched_for_hooks = True
def get_hooks(hook_name):
"""Return the hooks function sorted by their order."""
search_for_hooks()
hooks = _hooks.get(hook_name, [])
hooks = sorted(hooks, key=itemgetter(1))
return [hook[0] for hook in hooks]
| 24.543103 | 82 | 0.60555 | from contextlib import ContextDecorator
from operator import itemgetter
from wagtail.utils.apps import get_app_submodules
_hooks = {}
def register(hook_name, fn=None, order=0):
if fn is None:
def decorator(fn):
register(hook_name, fn, order=order)
return fn
return decorator
if hook_name not in _hooks:
_hooks[hook_name] = []
_hooks[hook_name].append((fn, order))
class TemporaryHook(ContextDecorator):
def __init__(self, hooks, order):
self.hooks = hooks
self.order = order
def __enter__(self):
for hook_name, fn in self.hooks:
if hook_name not in _hooks:
_hooks[hook_name] = []
_hooks[hook_name].append((fn, self.order))
def __exit__(self, exc_type, exc_value, traceback):
for hook_name, fn in self.hooks:
_hooks[hook_name].remove((fn, self.order))
def register_temporarily(hook_name_or_hooks, fn=None, *, order=0):
if not isinstance(hook_name_or_hooks, list) and fn is not None:
hooks = [(hook_name_or_hooks, fn)]
else:
hooks = hook_name_or_hooks
return TemporaryHook(hooks, order)
_searched_for_hooks = False
def search_for_hooks():
global _searched_for_hooks
if not _searched_for_hooks:
list(get_app_submodules("wagtail_hooks"))
_searched_for_hooks = True
def get_hooks(hook_name):
search_for_hooks()
hooks = _hooks.get(hook_name, [])
hooks = sorted(hooks, key=itemgetter(1))
return [hook[0] for hook in hooks]
| true | true |
f71b41c7e4894d5f578583e52382342d197a0a53 | 373 | py | Python | tests/schema/mutation/snapshots/snap_test_maps_delete.py | TaiSakuma/acondbs | 990ab44ce4081cc0e04148a8375f7ce7081c2dee | [
"MIT"
] | null | null | null | tests/schema/mutation/snapshots/snap_test_maps_delete.py | TaiSakuma/acondbs | 990ab44ce4081cc0e04148a8375f7ce7081c2dee | [
"MIT"
] | null | null | null | tests/schema/mutation/snapshots/snap_test_maps_delete.py | TaiSakuma/acondbs | 990ab44ce4081cc0e04148a8375f7ce7081c2dee | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# snapshottest: v1 - https://goo.gl/zC4yUc
from __future__ import unicode_literals
from snapshottest import Snapshot
snapshots = Snapshot()
snapshots['test_schema[deleteMap] 1'] = {
'data': {
'deleteMap': {
'ok': True
}
}
}
snapshots['test_schema[deleteMap] 2'] = {
'data': {
'map': None
}
}
| 16.217391 | 42 | 0.571046 |
from __future__ import unicode_literals
from snapshottest import Snapshot
snapshots = Snapshot()
snapshots['test_schema[deleteMap] 1'] = {
'data': {
'deleteMap': {
'ok': True
}
}
}
snapshots['test_schema[deleteMap] 2'] = {
'data': {
'map': None
}
}
| true | true |
f71b4212c0a7a5b644b48292c8eaeacc6cbdda01 | 1,035 | py | Python | src/products/mixins.py | bopopescu/django-estore | c092ffa965b8ef68e71d27d34a17fde1beacd90e | [
"MIT"
] | null | null | null | src/products/mixins.py | bopopescu/django-estore | c092ffa965b8ef68e71d27d34a17fde1beacd90e | [
"MIT"
] | null | null | null | src/products/mixins.py | bopopescu/django-estore | c092ffa965b8ef68e71d27d34a17fde1beacd90e | [
"MIT"
] | 2 | 2019-04-29T14:16:10.000Z | 2020-07-23T12:04:17.000Z | from django.contrib.admin.views.decorators import staff_member_required
from django.contrib.auth.decorators import login_required
from django.utils.decorators import method_decorator
from django.http import Http404
class StaffRequiredMixin(object):
@classmethod
def as_view(self, *args, **kwargs):
view = super(StaffRequiredMixin, self).as_view(*args, **kwargs)
return login_required(view)
@method_decorator(login_required)
def dispatch(self, request, *args, **kwargs):
if request.user.is_staff:
return super(StaffRequiredMixin, self).dispatch(request, *args, **kwargs)
else:
raise Http404
class LoginRequiredMixin(object):
@classmethod
def as_view(self, *args, **kwargs):
view = super(LoginRequiredMixin, self).as_view(*args, **kwargs)
return login_required(view)
@method_decorator(login_required)
def dispatch(self, request, *args, **kwargs):
return super(LoginRequiredMixin, self).dispatch(request, *args, **kwargs) | 36.964286 | 85 | 0.713043 | from django.contrib.admin.views.decorators import staff_member_required
from django.contrib.auth.decorators import login_required
from django.utils.decorators import method_decorator
from django.http import Http404
class StaffRequiredMixin(object):
@classmethod
def as_view(self, *args, **kwargs):
view = super(StaffRequiredMixin, self).as_view(*args, **kwargs)
return login_required(view)
@method_decorator(login_required)
def dispatch(self, request, *args, **kwargs):
if request.user.is_staff:
return super(StaffRequiredMixin, self).dispatch(request, *args, **kwargs)
else:
raise Http404
class LoginRequiredMixin(object):
@classmethod
def as_view(self, *args, **kwargs):
view = super(LoginRequiredMixin, self).as_view(*args, **kwargs)
return login_required(view)
@method_decorator(login_required)
def dispatch(self, request, *args, **kwargs):
return super(LoginRequiredMixin, self).dispatch(request, *args, **kwargs) | true | true |
f71b4286b66f22ca7786f177364f1945a65ac3fc | 3,123 | py | Python | app/app/settings.py | M0narc/recipe-api | 3b6c204ca76d98310d26fcbeaa4537646a93e023 | [
"MIT"
] | 1 | 2022-03-11T20:38:42.000Z | 2022-03-11T20:38:42.000Z | app/app/settings.py | M0narc/recipe-api | 3b6c204ca76d98310d26fcbeaa4537646a93e023 | [
"MIT"
] | null | null | null | app/app/settings.py | M0narc/recipe-api | 3b6c204ca76d98310d26fcbeaa4537646a93e023 | [
"MIT"
] | null | null | null | """
Django settings for app project.
Generated by 'django-admin startproject' using Django 2.1.15.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'p2-y-!@#t0dny#e+nx-txbsphwp(yt(9t939=o_*sf%&3z2_p%'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'core',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'app.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'app.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = '/static/'
AUTH_USER_MODEL = 'core.User'
| 25.185484 | 91 | 0.693244 |
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
SECRET_KEY = 'p2-y-!@#t0dny#e+nx-txbsphwp(yt(9t939=o_*sf%&3z2_p%'
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'core',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'app.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'app.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = '/static/'
AUTH_USER_MODEL = 'core.User'
| true | true |
f71b43813e699367c1eaeee665cc3b3fd3d5c5d0 | 4,573 | py | Python | hgext/fsmonitor/state.py | EnjoyLifeFund/macHighSierra-py36-pkgs | 5668b5785296b314ea1321057420bcd077dba9ea | [
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | null | null | null | hgext/fsmonitor/state.py | EnjoyLifeFund/macHighSierra-py36-pkgs | 5668b5785296b314ea1321057420bcd077dba9ea | [
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | null | null | null | hgext/fsmonitor/state.py | EnjoyLifeFund/macHighSierra-py36-pkgs | 5668b5785296b314ea1321057420bcd077dba9ea | [
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | null | null | null | # state.py - fsmonitor persistent state
#
# Copyright 2013-2016 Facebook, Inc.
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
from __future__ import absolute_import
import errno
import os
import socket
import struct
from mercurial.i18n import _
from mercurial import (
pathutil,
util,
)
_version = 4
_versionformat = ">I"
class state(object):
def __init__(self, repo):
self._vfs = repo.vfs
self._ui = repo.ui
self._rootdir = pathutil.normasprefix(repo.root)
self._lastclock = None
self._identity = util.filestat(None)
self.mode = self._ui.config('fsmonitor', 'mode', default='on')
self.walk_on_invalidate = self._ui.configbool(
'fsmonitor', 'walk_on_invalidate', False)
self.timeout = float(self._ui.config(
'fsmonitor', 'timeout', default='2'))
def get(self):
try:
file = self._vfs('fsmonitor.state', 'rb')
except IOError as inst:
self._identity = util.filestat(None)
if inst.errno != errno.ENOENT:
raise
return None, None, None
self._identity = util.filestat.fromfp(file)
versionbytes = file.read(4)
if len(versionbytes) < 4:
self._ui.log(
'fsmonitor', 'fsmonitor: state file only has %d bytes, '
'nuking state\n' % len(versionbytes))
self.invalidate()
return None, None, None
try:
diskversion = struct.unpack(_versionformat, versionbytes)[0]
if diskversion != _version:
# different version, nuke state and start over
self._ui.log(
'fsmonitor', 'fsmonitor: version switch from %d to '
'%d, nuking state\n' % (diskversion, _version))
self.invalidate()
return None, None, None
state = file.read().split('\0')
# state = hostname\0clock\0ignorehash\0 + list of files, each
# followed by a \0
if len(state) < 3:
self._ui.log(
'fsmonitor', 'fsmonitor: state file truncated (expected '
'3 chunks, found %d), nuking state\n', len(state))
self.invalidate()
return None, None, None
diskhostname = state[0]
hostname = socket.gethostname()
if diskhostname != hostname:
# file got moved to a different host
self._ui.log('fsmonitor', 'fsmonitor: stored hostname "%s" '
'different from current "%s", nuking state\n' %
(diskhostname, hostname))
self.invalidate()
return None, None, None
clock = state[1]
ignorehash = state[2]
# discard the value after the last \0
notefiles = state[3:-1]
finally:
file.close()
return clock, ignorehash, notefiles
def set(self, clock, ignorehash, notefiles):
if clock is None:
self.invalidate()
return
# Read the identity from the file on disk rather than from the open file
# pointer below, because the latter is actually a brand new file.
identity = util.filestat.frompath(self._vfs.join('fsmonitor.state'))
if identity != self._identity:
self._ui.debug('skip updating fsmonitor.state: identity mismatch\n')
return
try:
file = self._vfs('fsmonitor.state', 'wb', atomictemp=True,
checkambig=True)
except (IOError, OSError):
self._ui.warn(_("warning: unable to write out fsmonitor state\n"))
return
with file:
file.write(struct.pack(_versionformat, _version))
file.write(socket.gethostname() + '\0')
file.write(clock + '\0')
file.write(ignorehash + '\0')
if notefiles:
file.write('\0'.join(notefiles))
file.write('\0')
def invalidate(self):
try:
os.unlink(os.path.join(self._rootdir, '.hg', 'fsmonitor.state'))
except OSError as inst:
if inst.errno != errno.ENOENT:
raise
self._identity = util.filestat(None)
def setlastclock(self, clock):
self._lastclock = clock
def getlastclock(self):
return self._lastclock
| 33.625 | 80 | 0.557839 |
from __future__ import absolute_import
import errno
import os
import socket
import struct
from mercurial.i18n import _
from mercurial import (
pathutil,
util,
)
_version = 4
_versionformat = ">I"
class state(object):
def __init__(self, repo):
self._vfs = repo.vfs
self._ui = repo.ui
self._rootdir = pathutil.normasprefix(repo.root)
self._lastclock = None
self._identity = util.filestat(None)
self.mode = self._ui.config('fsmonitor', 'mode', default='on')
self.walk_on_invalidate = self._ui.configbool(
'fsmonitor', 'walk_on_invalidate', False)
self.timeout = float(self._ui.config(
'fsmonitor', 'timeout', default='2'))
def get(self):
try:
file = self._vfs('fsmonitor.state', 'rb')
except IOError as inst:
self._identity = util.filestat(None)
if inst.errno != errno.ENOENT:
raise
return None, None, None
self._identity = util.filestat.fromfp(file)
versionbytes = file.read(4)
if len(versionbytes) < 4:
self._ui.log(
'fsmonitor', 'fsmonitor: state file only has %d bytes, '
'nuking state\n' % len(versionbytes))
self.invalidate()
return None, None, None
try:
diskversion = struct.unpack(_versionformat, versionbytes)[0]
if diskversion != _version:
self._ui.log(
'fsmonitor', 'fsmonitor: version switch from %d to '
'%d, nuking state\n' % (diskversion, _version))
self.invalidate()
return None, None, None
state = file.read().split('\0')
if len(state) < 3:
self._ui.log(
'fsmonitor', 'fsmonitor: state file truncated (expected '
'3 chunks, found %d), nuking state\n', len(state))
self.invalidate()
return None, None, None
diskhostname = state[0]
hostname = socket.gethostname()
if diskhostname != hostname:
self._ui.log('fsmonitor', 'fsmonitor: stored hostname "%s" '
'different from current "%s", nuking state\n' %
(diskhostname, hostname))
self.invalidate()
return None, None, None
clock = state[1]
ignorehash = state[2]
notefiles = state[3:-1]
finally:
file.close()
return clock, ignorehash, notefiles
def set(self, clock, ignorehash, notefiles):
if clock is None:
self.invalidate()
return
identity = util.filestat.frompath(self._vfs.join('fsmonitor.state'))
if identity != self._identity:
self._ui.debug('skip updating fsmonitor.state: identity mismatch\n')
return
try:
file = self._vfs('fsmonitor.state', 'wb', atomictemp=True,
checkambig=True)
except (IOError, OSError):
self._ui.warn(_("warning: unable to write out fsmonitor state\n"))
return
with file:
file.write(struct.pack(_versionformat, _version))
file.write(socket.gethostname() + '\0')
file.write(clock + '\0')
file.write(ignorehash + '\0')
if notefiles:
file.write('\0'.join(notefiles))
file.write('\0')
def invalidate(self):
try:
os.unlink(os.path.join(self._rootdir, '.hg', 'fsmonitor.state'))
except OSError as inst:
if inst.errno != errno.ENOENT:
raise
self._identity = util.filestat(None)
def setlastclock(self, clock):
self._lastclock = clock
def getlastclock(self):
return self._lastclock
| true | true |
f71b45acabf22bbb1840898ea73829472d8e7060 | 731 | py | Python | tests/test_python3_dbb.py | bopopescu/NovalIDE | 590c2adb69d54fa4a6c9dad5459198be057b1329 | [
"MulanPSL-1.0"
] | null | null | null | tests/test_python3_dbb.py | bopopescu/NovalIDE | 590c2adb69d54fa4a6c9dad5459198be057b1329 | [
"MulanPSL-1.0"
] | null | null | null | tests/test_python3_dbb.py | bopopescu/NovalIDE | 590c2adb69d54fa4a6c9dad5459198be057b1329 | [
"MulanPSL-1.0"
] | null | null | null | import inspect
class _D:
def _m(self): pass
class _C:
def _m(self): pass
_x = _C()
_x2 = _D()
a=121111
r = input('hahah')
print(r)
raise AttributeError('Provider test already registered')
print (type(_C),_x.__class__,dir(_x),"------------")
import types
###print (dir(types))
print (type(inspect))
###print type(inspect) is types.InstanceType,"============="
print (type(_x),type(type))
print (inspect.isclass(_x))
print (inspect.isclass(_x2))
print (inspect.isclass(_D))
print (inspect.isclass(_C))
print (inspect.ismodule(_C))
print (isinstance(inspect,object),"------------")
print (1)
###print (g)
print (2)
print (3)
print (4)
print (6)
print (7)
print (8)
print (9)
print (10)
print ("11111111111111") | 17.404762 | 60 | 0.642955 | import inspect
class _D:
def _m(self): pass
class _C:
def _m(self): pass
_x = _C()
_x2 = _D()
a=121111
r = input('hahah')
print(r)
raise AttributeError('Provider test already registered')
print (type(_C),_x.__class__,dir(_x),"------------")
import types
lass(_C))
print (inspect.ismodule(_C))
print (isinstance(inspect,object),"------------")
print (1)
print (4)
print (6)
print (7)
print (8)
print (9)
print (10)
print ("11111111111111") | true | true |
f71b45baa1e78f59775296f091528acf3ccf2008 | 718 | py | Python | meus_projetos/projetos_python/lista_de_tarefas/minhas_funcoes.py | SabinoEduardo/Python | f46e47f166150afdf0f4c4358b5848d52667a764 | [
"MIT"
] | null | null | null | meus_projetos/projetos_python/lista_de_tarefas/minhas_funcoes.py | SabinoEduardo/Python | f46e47f166150afdf0f4c4358b5848d52667a764 | [
"MIT"
] | null | null | null | meus_projetos/projetos_python/lista_de_tarefas/minhas_funcoes.py | SabinoEduardo/Python | f46e47f166150afdf0f4c4358b5848d52667a764 | [
"MIT"
] | null | null | null | def adicionar_tarefa(lista_tarefas, tarefas):
lista_tarefas.append(tarefas)
def deletar_tarefa(lista_tarefas, tarefas_deletadas):
"""
Esta função serve para deletar a ultima tarefa da lista e guarda esta tarefa em outra lista.
"""
if not lista_tarefas:
print("Nada a deletar")
return
tarefas_deletadas.append(lista_tarefas[-1])
lista_tarefas.pop()
def repor_tarefa(lista_tarefas, tarefas_deletadas):
"""
Esta função serve para repor ultima tarefa deletada da lista de tarefas.
"""
if not tarefas_deletadas:
print("Nada a repor")
return
else:
lista_tarefas.append(tarefas_deletadas[-1])
tarefas_deletadas.pop()
| 27.615385 | 100 | 0.682451 | def adicionar_tarefa(lista_tarefas, tarefas):
lista_tarefas.append(tarefas)
def deletar_tarefa(lista_tarefas, tarefas_deletadas):
if not lista_tarefas:
print("Nada a deletar")
return
tarefas_deletadas.append(lista_tarefas[-1])
lista_tarefas.pop()
def repor_tarefa(lista_tarefas, tarefas_deletadas):
if not tarefas_deletadas:
print("Nada a repor")
return
else:
lista_tarefas.append(tarefas_deletadas[-1])
tarefas_deletadas.pop()
| true | true |
f71b47bfd1af85c0318ff27db55a1a089a9d0ee9 | 4,549 | py | Python | pyuvm/s09_phasing.py | ktbarrett/pyuvm | 725e6e4b8088aa085a5ce16861b46db49ce46672 | [
"Apache-2.0"
] | 4 | 2021-11-07T13:22:27.000Z | 2022-02-17T08:51:24.000Z | pyuvm/s09_phasing.py | ktbarrett/pyuvm | 725e6e4b8088aa085a5ce16861b46db49ce46672 | [
"Apache-2.0"
] | null | null | null | pyuvm/s09_phasing.py | ktbarrett/pyuvm | 725e6e4b8088aa085a5ce16861b46db49ce46672 | [
"Apache-2.0"
] | null | null | null | from pyuvm.s05_base_classes import uvm_object
import pyuvm.error_classes as error_classes
import cocotb
# 9.1
#
# This is a dramatically simplified version of UVM phasing. We don't have
# to deal with simulation time and we are not going to deal with a generalized
# phasing system.
#
# So this system simply traverses the common phases, calling the appropriate
# method in each component.
#
# Much of the work in the SV phasing code has to do with handling the passage
# of time. There is no timewheel in Python, so all of that code can go
# away.
#
# Also, the generalized phasing system is rarely used and so that
# is left as an exercise for future developers. Instead we have a simple
# topdown and bottom up traversal of calling methods in component
# classes based on the phase name.
#
# We're not doing schedules or domains. We're just creating a list of classes
# and traversing them in order. The order it dependent upon whether they
# are topdown or bottom up phases.
# 9.3.1.2 Class declaration
class uvm_phase(uvm_object):
# Strips the "uvm_" from this class's name and uses the remainder
# to get a function call out of the component and execute it.
# 'uvm_run_phase' becomes 'run_phase' and is called as 'run_phase()'
@classmethod
def execute(cls, comp):
"""
:param comp: The component whose turn it is to execute
"""
method_name = cls.__name__[4:]
try:
method = getattr(comp, method_name)
except AttributeError:
raise error_classes.UVMBadPhase(
f"{comp.get_name()} is missing {method_name} function")
method()
def __str__(self):
return self.__name__[4:]
class uvm_topdown_phase(uvm_phase):
"""
Runs phases from the top down.
"""
@classmethod
def traverse(cls, comp):
"""
Given a component, we traverse the component tree
top to bottom calling the phase functions as we go
:param comp: The component whose hierarchy will be traversed
"""
cls.execute(comp) # first we execute this node then its children
for child in comp.get_children():
cls.traverse(child)
class uvm_bottomup_phase(uvm_phase):
"""
Runs the phases from bottom up.
"""
@classmethod
def traverse(cls, comp):
for child in comp.get_children():
cls.traverse(child)
cls.execute(comp)
class uvm_threaded_execute_phase(uvm_phase):
"""
This phase launches the phase function in a thread and
returns the thread to the caller. The caller can then
join all the threads.
"""
@classmethod
def execute(cls, comp):
phase_name = cls.__name__
assert phase_name.startswith("uvm_"), \
"We only support phases whose names start with uvm_"
method_name = cls.__name__[4:]
try:
method = getattr(comp, method_name)
except AttributeError:
raise error_classes.UVMBadPhase(
f"{comp.get_name()} is missing {method_name} function")
cocotb.start_soon(method())
# 9.8 Predefined Phases
# 9.8.1 Common Phases
# The common phases are described in the order of their execution.
# 9.8.1.1
class uvm_build_phase(uvm_topdown_phase):
...
# 9.8.1.2
class uvm_connect_phase(uvm_bottomup_phase):
...
# 9.8.1.3
class uvm_end_of_elaboration_phase(uvm_topdown_phase):
...
# 9.8.1.4
class uvm_start_of_simulation_phase(uvm_topdown_phase):
...
# 9.8.1.5
class uvm_run_phase(uvm_threaded_execute_phase, uvm_bottomup_phase):
...
# 9.8.1.6
class uvm_extract_phase(uvm_topdown_phase):
...
# 9.8.1.7
class uvm_check_phase(uvm_topdown_phase):
...
# 9.8.1.8
class uvm_report_phase(uvm_topdown_phase):
...
# 9.8.1.9
class uvm_final_phase(uvm_topdown_phase):
...
# 9.8.2
# UVM run-time phases are left as an exercise for an enterprising soul
# I cannot imagine why anyone would implement this.
# One could add phases by simply extending uvm_topdown_phase
# or uvm_bottom_up phase with a new phase named 'uvm_my_phase' and adding
# the my_phase() method to a uvm component with setattr.
uvm_common_phases = [uvm_build_phase,
uvm_connect_phase,
uvm_end_of_elaboration_phase,
uvm_start_of_simulation_phase,
uvm_run_phase,
uvm_extract_phase,
uvm_check_phase,
uvm_report_phase,
uvm_final_phase]
| 27.737805 | 78 | 0.667619 | from pyuvm.s05_base_classes import uvm_object
import pyuvm.error_classes as error_classes
import cocotb
# to deal with simulation time and we are not going to deal with a generalized
# phasing system.
#
# So this system simply traverses the common phases, calling the appropriate
# method in each component.
#
# Much of the work in the SV phasing code has to do with handling the passage
# of time. There is no timewheel in Python, so all of that code can go
# away.
#
# Also, the generalized phasing system is rarely used and so that
# is left as an exercise for future developers. Instead we have a simple
# topdown and bottom up traversal of calling methods in component
# classes based on the phase name.
#
# We're not doing schedules or domains. We're just creating a list of classes
# and traversing them in order. The order it dependent upon whether they
# are topdown or bottom up phases.
# 9.3.1.2 Class declaration
class uvm_phase(uvm_object):
# Strips the "uvm_" from this class's name and uses the remainder
@classmethod
def execute(cls, comp):
method_name = cls.__name__[4:]
try:
method = getattr(comp, method_name)
except AttributeError:
raise error_classes.UVMBadPhase(
f"{comp.get_name()} is missing {method_name} function")
method()
def __str__(self):
return self.__name__[4:]
class uvm_topdown_phase(uvm_phase):
@classmethod
def traverse(cls, comp):
cls.execute(comp)
for child in comp.get_children():
cls.traverse(child)
class uvm_bottomup_phase(uvm_phase):
@classmethod
def traverse(cls, comp):
for child in comp.get_children():
cls.traverse(child)
cls.execute(comp)
class uvm_threaded_execute_phase(uvm_phase):
@classmethod
def execute(cls, comp):
phase_name = cls.__name__
assert phase_name.startswith("uvm_"), \
"We only support phases whose names start with uvm_"
method_name = cls.__name__[4:]
try:
method = getattr(comp, method_name)
except AttributeError:
raise error_classes.UVMBadPhase(
f"{comp.get_name()} is missing {method_name} function")
cocotb.start_soon(method())
class uvm_build_phase(uvm_topdown_phase):
...
class uvm_connect_phase(uvm_bottomup_phase):
...
class uvm_end_of_elaboration_phase(uvm_topdown_phase):
...
class uvm_start_of_simulation_phase(uvm_topdown_phase):
...
class uvm_run_phase(uvm_threaded_execute_phase, uvm_bottomup_phase):
...
class uvm_extract_phase(uvm_topdown_phase):
...
class uvm_check_phase(uvm_topdown_phase):
...
class uvm_report_phase(uvm_topdown_phase):
...
class uvm_final_phase(uvm_topdown_phase):
...
uvm_common_phases = [uvm_build_phase,
uvm_connect_phase,
uvm_end_of_elaboration_phase,
uvm_start_of_simulation_phase,
uvm_run_phase,
uvm_extract_phase,
uvm_check_phase,
uvm_report_phase,
uvm_final_phase]
| true | true |
f71b486fd1687af447da51c84625b2c67f3d5401 | 3,199 | py | Python | homeassistant/components/harmony/subscriber.py | PiotrMachowski/core | b9d7d0cae2ccd2d88e90e49cc09e154a27ed809b | [
"Apache-2.0"
] | 3 | 2020-11-27T06:26:27.000Z | 2020-12-09T14:55:16.000Z | homeassistant/components/harmony/subscriber.py | PiotrMachowski/core | b9d7d0cae2ccd2d88e90e49cc09e154a27ed809b | [
"Apache-2.0"
] | 277 | 2021-10-04T06:39:33.000Z | 2021-12-28T22:04:17.000Z | homeassistant/components/harmony/subscriber.py | PiotrMachowski/core | b9d7d0cae2ccd2d88e90e49cc09e154a27ed809b | [
"Apache-2.0"
] | 3 | 2022-01-02T18:49:54.000Z | 2022-01-25T02:03:54.000Z | """Mixin class for handling harmony callback subscriptions."""
import asyncio
import logging
# pylint: disable-next=deprecated-typing-alias
# Issue with Python 3.9.0 and 3.9.1 with collections.abc.Callable
# https://bugs.python.org/issue42965
from typing import Any, Callable, NamedTuple, Optional
from homeassistant.core import callback
_LOGGER = logging.getLogger(__name__)
NoParamCallback = Optional[Callable[[object], Any]]
ActivityCallback = Optional[Callable[[object, tuple], Any]]
class HarmonyCallback(NamedTuple):
"""Callback type for Harmony Hub notifications."""
connected: NoParamCallback
disconnected: NoParamCallback
config_updated: NoParamCallback
activity_starting: ActivityCallback
activity_started: ActivityCallback
class HarmonySubscriberMixin:
"""Base implementation for a subscriber."""
def __init__(self, hass):
"""Initialize an subscriber."""
super().__init__()
self._hass = hass
self._subscriptions = []
self._activity_lock = asyncio.Lock()
async def async_lock_start_activity(self):
"""Acquire the lock."""
await self._activity_lock.acquire()
@callback
def async_unlock_start_activity(self):
"""Release the lock."""
if self._activity_lock.locked():
self._activity_lock.release()
@callback
def async_subscribe(self, update_callbacks: HarmonyCallback) -> Callable:
"""Add a callback subscriber."""
self._subscriptions.append(update_callbacks)
def _unsubscribe():
self.async_unsubscribe(update_callbacks)
return _unsubscribe
@callback
def async_unsubscribe(self, update_callback: HarmonyCallback):
"""Remove a callback subscriber."""
self._subscriptions.remove(update_callback)
def _config_updated(self, _=None) -> None:
_LOGGER.debug("config_updated")
self._call_callbacks("config_updated")
def _connected(self, _=None) -> None:
_LOGGER.debug("connected")
self.async_unlock_start_activity()
self._available = True
self._call_callbacks("connected")
def _disconnected(self, _=None) -> None:
_LOGGER.debug("disconnected")
self.async_unlock_start_activity()
self._available = False
self._call_callbacks("disconnected")
def _activity_starting(self, activity_info: tuple) -> None:
_LOGGER.debug("activity %s starting", activity_info)
self._call_callbacks("activity_starting", activity_info)
def _activity_started(self, activity_info: tuple) -> None:
_LOGGER.debug("activity %s started", activity_info)
self.async_unlock_start_activity()
self._call_callbacks("activity_started", activity_info)
def _call_callbacks(self, callback_func_name: str, argument: tuple = None):
for subscription in self._subscriptions:
current_callback = getattr(subscription, callback_func_name)
if current_callback:
if argument:
self._hass.async_run_job(current_callback, argument)
else:
self._hass.async_run_job(current_callback)
| 32.979381 | 79 | 0.689278 |
import asyncio
import logging
from typing import Any, Callable, NamedTuple, Optional
from homeassistant.core import callback
_LOGGER = logging.getLogger(__name__)
NoParamCallback = Optional[Callable[[object], Any]]
ActivityCallback = Optional[Callable[[object, tuple], Any]]
class HarmonyCallback(NamedTuple):
connected: NoParamCallback
disconnected: NoParamCallback
config_updated: NoParamCallback
activity_starting: ActivityCallback
activity_started: ActivityCallback
class HarmonySubscriberMixin:
def __init__(self, hass):
super().__init__()
self._hass = hass
self._subscriptions = []
self._activity_lock = asyncio.Lock()
async def async_lock_start_activity(self):
await self._activity_lock.acquire()
@callback
def async_unlock_start_activity(self):
if self._activity_lock.locked():
self._activity_lock.release()
@callback
def async_subscribe(self, update_callbacks: HarmonyCallback) -> Callable:
self._subscriptions.append(update_callbacks)
def _unsubscribe():
self.async_unsubscribe(update_callbacks)
return _unsubscribe
@callback
def async_unsubscribe(self, update_callback: HarmonyCallback):
self._subscriptions.remove(update_callback)
def _config_updated(self, _=None) -> None:
_LOGGER.debug("config_updated")
self._call_callbacks("config_updated")
def _connected(self, _=None) -> None:
_LOGGER.debug("connected")
self.async_unlock_start_activity()
self._available = True
self._call_callbacks("connected")
def _disconnected(self, _=None) -> None:
_LOGGER.debug("disconnected")
self.async_unlock_start_activity()
self._available = False
self._call_callbacks("disconnected")
def _activity_starting(self, activity_info: tuple) -> None:
_LOGGER.debug("activity %s starting", activity_info)
self._call_callbacks("activity_starting", activity_info)
def _activity_started(self, activity_info: tuple) -> None:
_LOGGER.debug("activity %s started", activity_info)
self.async_unlock_start_activity()
self._call_callbacks("activity_started", activity_info)
def _call_callbacks(self, callback_func_name: str, argument: tuple = None):
for subscription in self._subscriptions:
current_callback = getattr(subscription, callback_func_name)
if current_callback:
if argument:
self._hass.async_run_job(current_callback, argument)
else:
self._hass.async_run_job(current_callback)
| true | true |
f71b492ed7fe05c2dd8f787b6d743c15e42b6651 | 23,428 | py | Python | test/test_framework/util.py | vpubchain/Phore | 7819f046e629ccb5a00fa4f89a7399a7732b4113 | [
"MIT"
] | null | null | null | test/test_framework/util.py | vpubchain/Phore | 7819f046e629ccb5a00fa4f89a7399a7732b4113 | [
"MIT"
] | null | null | null | test/test_framework/util.py | vpubchain/Phore | 7819f046e629ccb5a00fa4f89a7399a7732b4113 | [
"MIT"
] | 1 | 2019-08-10T08:20:56.000Z | 2019-08-10T08:20:56.000Z | #!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Helpful routines for regression testing
#
import os
import sys
from binascii import hexlify, unhexlify
from base64 import b64encode
from decimal import Decimal, ROUND_DOWN
import json
import http.client
import random
import shutil
import subprocess
import time
import re
import errno
from . import coverage
from .authproxy import AuthServiceProxy, JSONRPCException
COVERAGE_DIR = None
# The maximum number of nodes a single test can spawn
MAX_NODES = 8
# Don't assign rpc or p2p ports lower than this
PORT_MIN = 11000
# The number of ports to "reserve" for p2p and rpc, each
PORT_RANGE = 5000
VPUBD_PROC_WAIT_TIMEOUT = 60
class PortSeed:
# Must be initialized with a unique integer for each process
n = None
#Set Mocktime default to OFF.
#MOCKTIME is only needed for scripts that use the
#cached version of the blockchain. If the cached
#version of the blockchain is used without MOCKTIME
#then the mempools will not sync due to IBD.
MOCKTIME = 0
def enable_mocktime():
#For backwared compatibility of the python scripts
#with previous versions of the cache, set MOCKTIME
#to Jan 1, 2014 + (201 * 10 * 60)
global MOCKTIME
MOCKTIME = 1388534400 + (201 * 10 * 60)
def disable_mocktime():
global MOCKTIME
MOCKTIME = 0
def get_mocktime():
return MOCKTIME
def enable_coverage(dirname):
"""Maintain a log of which RPC calls are made during testing."""
global COVERAGE_DIR
COVERAGE_DIR = dirname
def get_rpc_proxy(url, node_number, timeout=None):
"""
Args:
url (str): URL of the RPC server to call
node_number (int): the node number (or id) that this calls to
Kwargs:
timeout (int): HTTP timeout in seconds
Returns:
AuthServiceProxy. convenience object for making RPC calls.
"""
proxy_kwargs = {}
if timeout is not None:
proxy_kwargs['timeout'] = timeout
proxy = AuthServiceProxy(url, **proxy_kwargs)
proxy.url = url # store URL on proxy for info
coverage_logfile = coverage.get_filename(
COVERAGE_DIR, node_number) if COVERAGE_DIR else None
return coverage.AuthServiceProxyWrapper(proxy, coverage_logfile)
def p2p_port(n):
assert(n <= MAX_NODES)
return PORT_MIN + n + (MAX_NODES * PortSeed.n) % (PORT_RANGE - 1 - MAX_NODES)
def rpc_port(n):
return PORT_MIN + PORT_RANGE + n + (MAX_NODES * PortSeed.n) % (PORT_RANGE - 1 - MAX_NODES)
def check_json_precision():
"""Make sure json library being used does not lose precision converting BTC values"""
n = Decimal("20000000.00000003")
satoshis = int(json.loads(json.dumps(float(n)))*1.0e8)
if satoshis != 2000000000000003:
raise RuntimeError("JSON encode/decode loses precision")
def count_bytes(hex_string):
return len(bytearray.fromhex(hex_string))
def bytes_to_hex_str(byte_str):
return hexlify(byte_str).decode('ascii')
def hex_str_to_bytes(hex_str):
return unhexlify(hex_str.encode('ascii'))
def str_to_b64str(string):
return b64encode(string.encode('utf-8')).decode('ascii')
def sync_blocks(rpc_connections, wait=1, timeout=60):
"""
Wait until everybody has the same tip
"""
while timeout > 0:
tips = [ x.getbestblockhash() for x in rpc_connections ]
if tips == [ tips[0] ]*len(tips):
return True
time.sleep(wait)
timeout -= wait
raise AssertionError("Block sync failed")
def sync_mempools(rpc_connections, wait=1, timeout=60):
"""
Wait until everybody has the same transactions in their memory
pools
"""
while timeout > 0:
pool = set(rpc_connections[0].getrawmempool())
num_match = 1
for i in range(1, len(rpc_connections)):
if set(rpc_connections[i].getrawmempool()) == pool:
num_match = num_match+1
if num_match == len(rpc_connections):
return True
time.sleep(wait)
timeout -= wait
raise AssertionError("Mempool sync failed")
vpubd_processes = {}
def initialize_datadir(dirname, n):
datadir = os.path.join(dirname, "node"+str(n))
if not os.path.isdir(datadir):
os.makedirs(datadir)
rpc_u, rpc_p = rpc_auth_pair(n)
with open(os.path.join(datadir, "vpub.conf"), 'w', encoding='utf8') as f:
f.write("regtest=1\n")
f.write("server=1\n")
f.write("rpcuser=" + rpc_u + "\n")
f.write("rpcpassword=" + rpc_p + "\n")
f.write("port="+str(p2p_port(n))+"\n")
f.write("rpcport="+str(rpc_port(n))+"\n")
f.write("listenonion=0\n")
return datadir
def rpc_auth_pair(n):
return 'rpcuser' + str(n), 'rpcpass' + str(n)
def rpc_url(i, rpchost=None):
rpc_u, rpc_p = rpc_auth_pair(i)
host = '127.0.0.1'
port = rpc_port(i)
if rpchost:
parts = rpchost.split(':')
if len(parts) == 2:
host, port = parts
else:
host = rpchost
return "http://%s:%s@%s:%d" % (rpc_u, rpc_p, host, int(port))
def wait_for_vpubd_start(process, url, i):
'''
Wait for vpubd to start. This means that RPC is accessible and fully initialized.
Raise an exception if vpubd exits during initialization.
'''
while True:
if process.poll() is not None:
raise Exception('vpubd exited with status %i during initialization' % process.returncode)
try:
rpc = get_rpc_proxy(url, i)
blocks = rpc.getblockcount()
break # break out of loop on success
except IOError as e:
if e.errno != errno.ECONNREFUSED: # Port not yet open?
raise # unknown IO error
except JSONRPCException as e: # Initialization phase
if e.error['code'] != -28: # RPC in warmup?
raise # unkown JSON RPC exception
time.sleep(0.25)
def initialize_chain(test_dir, num_nodes):
"""
Create a cache of a 200-block-long chain (with wallet) for MAX_NODES
Afterward, create num_nodes copies from the cache
"""
assert num_nodes <= MAX_NODES
create_cache = False
for i in range(MAX_NODES):
if not os.path.isdir(os.path.join('cache', 'node'+str(i))):
create_cache = True
break
if create_cache:
#find and delete old cache directories if any exist
for i in range(MAX_NODES):
if os.path.isdir(os.path.join("cache","node"+str(i))):
shutil.rmtree(os.path.join("cache","node"+str(i)))
# Create cache directories, run vpubds:
for i in range(MAX_NODES):
datadir=initialize_datadir("cache", i)
args = [ os.getenv("VPUBD", "vpubd"), "-server", "-keypool=1", "-datadir="+datadir, "-discover=0" ]
if i > 0:
args.append("-connect=127.0.0.1:"+str(p2p_port(0)))
vpubd_processes[i] = subprocess.Popen(args)
if os.getenv("PYTHON_DEBUG", ""):
print("initialize_chain: vpubd started, waiting for RPC to come up")
wait_for_vpubd_start(vpubd_processes[i], rpc_url(i), i)
if os.getenv("PYTHON_DEBUG", ""):
print("initialize_chain: RPC succesfully started")
rpcs = []
for i in range(MAX_NODES):
try:
rpcs.append(get_rpc_proxy(rpc_url(i), i))
except:
sys.stderr.write("Error connecting to "+url+"\n")
sys.exit(1)
# Create a 200-block-long chain; each of the 4 first nodes
# gets 25 mature blocks and 25 immature.
# Note: To preserve compatibility with older versions of
# initialize_chain, only 4 nodes will generate coins.
#
# blocks are created with timestamps 10 minutes apart
# starting from 2010 minutes in the past
enable_mocktime()
block_time = get_mocktime() - (201 * 10 * 60)
for i in range(2):
for peer in range(4):
for j in range(25):
set_node_times(rpcs, block_time)
rpcs[peer].setgenerate(True, 1)
block_time += 10*60
# Must sync before next peer starts generating blocks
sync_blocks(rpcs)
# Shut them down, and clean up cache directories:
stop_nodes(rpcs)
disable_mocktime()
for i in range(MAX_NODES):
os.remove(log_filename("cache", i, "debug.log"))
os.remove(log_filename("cache", i, "db.log"))
os.remove(log_filename("cache", i, "peers.dat"))
os.remove(log_filename("cache", i, "fee_estimates.dat"))
for i in range(num_nodes):
from_dir = os.path.join("cache", "node"+str(i))
to_dir = os.path.join(test_dir, "node"+str(i))
shutil.copytree(from_dir, to_dir)
initialize_datadir(test_dir, i) # Overwrite port/rpcport in vpub.conf
def initialize_chain_clean(test_dir, num_nodes):
"""
Create an empty blockchain and num_nodes wallets.
Useful if a test case wants complete control over initialization.
"""
for i in range(num_nodes):
datadir=initialize_datadir(test_dir, i)
def _rpchost_to_args(rpchost):
'''Convert optional IP:port spec to rpcconnect/rpcport args'''
if rpchost is None:
return []
match = re.match('(\[[0-9a-fA-f:]+\]|[^:]+)(?::([0-9]+))?$', rpchost)
if not match:
raise ValueError('Invalid RPC host spec ' + rpchost)
rpcconnect = match.group(1)
rpcport = match.group(2)
if rpcconnect.startswith('['): # remove IPv6 [...] wrapping
rpcconnect = rpcconnect[1:-1]
rv = ['-rpcconnect=' + rpcconnect]
if rpcport:
rv += ['-rpcport=' + rpcport]
return rv
def start_node(i, dirname, extra_args=None, rpchost=None, timewait=None, binary=None):
"""
Start a vpubd and return RPC connection to it
"""
datadir = os.path.join(dirname, "node"+str(i))
if binary is None:
binary = os.getenv("VPUBD", "vpubd")
args = [ binary, "-datadir="+datadir, "-server", "-keypool=1", "-discover=0", "-rest", "-mocktime="+str(get_mocktime()), "-regtest", "-sporkkey=923EhWh2bJHynX6d4Tqt2Q75bhTDCT1b4kff3qzDKDZHZ6pkQs7"]
if extra_args is not None: args.extend(extra_args)
vpubd_processes[i] = subprocess.Popen(args)
if os.getenv("PYTHON_DEBUG", ""):
print("start_node: phroed started, waiting for RPC to come up")
url = rpc_url(i, rpchost)
wait_for_vpubd_start(vpubd_processes[i], url, i)
if os.getenv("PYTHON_DEBUG", ""):
print("start_node: RPC succesfully started")
proxy = get_rpc_proxy(url, i, timeout=timewait)
if COVERAGE_DIR:
coverage.write_all_rpc_commands(COVERAGE_DIR, proxy)
return proxy
def start_nodes(num_nodes, dirname, extra_args=None, rpchost=None, timewait=None, binary=None):
"""
Start multiple vpubds, return RPC connections to them
"""
if extra_args is None: extra_args = [ None for _ in range(num_nodes) ]
if binary is None: binary = [ None for _ in range(num_nodes) ]
rpcs = []
try:
for i in range(num_nodes):
rpcs.append(start_node(i, dirname, extra_args[i], rpchost, timewait=timewait, binary=binary[i]))
except: # If one node failed to start, stop the others
stop_nodes(rpcs)
raise
return rpcs
def log_filename(dirname, n_node, logname):
return os.path.join(dirname, "node"+str(n_node), "regtest", logname)
def stop_node(node, i):
try:
node.stop()
except http.client.CannotSendRequest as e:
print("WARN: Unable to stop node: " + repr(e))
vpubd_processes[i].wait(timeout=VPUBD_PROC_WAIT_TIMEOUT)
del vpubd_processes[i]
def stop_nodes(nodes):
for node in nodes:
try:
node.stop()
except http.client.CannotSendRequest as e:
print("WARN: Unable to stop node: " + repr(e))
del nodes[:] # Emptying array closes connections as a side effect
wait_vpubds()
def set_node_times(nodes, t):
for node in nodes:
node.setmocktime(t)
def wait_vpubds():
# Wait for all vpubds to cleanly exit
for vpubd in vpubd_processes.values():
vpubd.wait(timeout=VPUBD_PROC_WAIT_TIMEOUT)
vpubd_processes.clear()
def connect_nodes(from_connection, node_num):
ip_port = "127.0.0.1:"+str(p2p_port(node_num))
from_connection.addnode(ip_port, "onetry")
# poll until version handshake complete to avoid race conditions
# with transaction relaying
while any(peer['version'] == 0 for peer in from_connection.getpeerinfo()):
time.sleep(0.1)
def connect_nodes_bi(nodes, a, b):
connect_nodes(nodes[a], b)
connect_nodes(nodes[b], a)
def find_output(node, txid, amount):
"""
Return index to output of txid with value amount
Raises exception if there is none.
"""
txdata = node.getrawtransaction(txid, 1)
for i in range(len(txdata["vout"])):
if txdata["vout"][i]["value"] == amount:
return i
raise RuntimeError("find_output txid %s : %s not found"%(txid,str(amount)))
def gather_inputs(from_node, amount_needed, confirmations_required=1):
"""
Return a random set of unspent txouts that are enough to pay amount_needed
"""
assert(confirmations_required >=0)
utxo = from_node.listunspent(confirmations_required)
random.shuffle(utxo)
inputs = []
total_in = Decimal("0.00000000")
while total_in < amount_needed and len(utxo) > 0:
t = utxo.pop()
total_in += t["amount"]
inputs.append({ "txid" : t["txid"], "vout" : t["vout"], "address" : t["address"] } )
if total_in < amount_needed:
raise RuntimeError("Insufficient funds: need %d, have %d"%(amount_needed, total_in))
return (total_in, inputs)
def make_change(from_node, amount_in, amount_out, fee):
"""
Create change output(s), return them
"""
outputs = {}
amount = amount_out+fee
change = amount_in - amount
if change > amount*2:
# Create an extra change output to break up big inputs
change_address = from_node.getnewaddress()
# Split change in two, being careful of rounding:
outputs[change_address] = Decimal(change/2).quantize(Decimal('0.00000001'), rounding=ROUND_DOWN)
change = amount_in - amount - outputs[change_address]
if change > 0:
outputs[from_node.getnewaddress()] = change
return outputs
def send_zeropri_transaction(from_node, to_node, amount, fee):
"""
Create&broadcast a zero-priority transaction.
Returns (txid, hex-encoded-txdata)
Ensures transaction is zero-priority by first creating a send-to-self,
then using its output
"""
# Create a send-to-self with confirmed inputs:
self_address = from_node.getnewaddress()
(total_in, inputs) = gather_inputs(from_node, amount+fee*2)
outputs = make_change(from_node, total_in, amount+fee, fee)
outputs[self_address] = float(amount+fee)
self_rawtx = from_node.createrawtransaction(inputs, outputs)
self_signresult = from_node.signrawtransaction(self_rawtx)
self_txid = from_node.sendrawtransaction(self_signresult["hex"], True)
vout = find_output(from_node, self_txid, amount+fee)
# Now immediately spend the output to create a 1-input, 1-output
# zero-priority transaction:
inputs = [ { "txid" : self_txid, "vout" : vout } ]
outputs = { to_node.getnewaddress() : float(amount) }
rawtx = from_node.createrawtransaction(inputs, outputs)
signresult = from_node.signrawtransaction(rawtx)
txid = from_node.sendrawtransaction(signresult["hex"], True)
return (txid, signresult["hex"])
def random_zeropri_transaction(nodes, amount, min_fee, fee_increment, fee_variants):
"""
Create a random zero-priority transaction.
Returns (txid, hex-encoded-transaction-data, fee)
"""
from_node = random.choice(nodes)
to_node = random.choice(nodes)
fee = min_fee + fee_increment*random.randint(0,fee_variants)
(txid, txhex) = send_zeropri_transaction(from_node, to_node, amount, fee)
return (txid, txhex, fee)
def random_transaction(nodes, amount, min_fee, fee_increment, fee_variants):
"""
Create a random transaction.
Returns (txid, hex-encoded-transaction-data, fee)
"""
from_node = random.choice(nodes)
to_node = random.choice(nodes)
fee = min_fee + fee_increment*random.randint(0,fee_variants)
(total_in, inputs) = gather_inputs(from_node, amount+fee)
outputs = make_change(from_node, total_in, amount, fee)
outputs[to_node.getnewaddress()] = float(amount)
rawtx = from_node.createrawtransaction(inputs, outputs)
signresult = from_node.signrawtransaction(rawtx)
txid = from_node.sendrawtransaction(signresult["hex"], True)
return (txid, signresult["hex"], fee)
def assert_fee_amount(fee, tx_size, fee_per_kB):
"""Assert the fee was in range"""
target_fee = tx_size * fee_per_kB / 1000
if fee < target_fee:
raise AssertionError("Fee of %s BTC too low! (Should be %s BTC)"%(str(fee), str(target_fee)))
# allow the wallet's estimation to be at most 2 bytes off
if fee > (tx_size + 2) * fee_per_kB / 1000:
raise AssertionError("Fee of %s BTC too high! (Should be %s BTC)"%(str(fee), str(target_fee)))
def assert_equal(thing1, thing2):
if thing1 != thing2:
raise AssertionError("%s != %s"%(str(thing1),str(thing2)))
def assert_greater_than(thing1, thing2):
if thing1 <= thing2:
raise AssertionError("%s <= %s"%(str(thing1),str(thing2)))
def assert_raises(exc, fun, *args, **kwds):
assert_raises_message(exc, None, fun, *args, **kwds)
def assert_raises_message(exc, message, fun, *args, **kwds):
try:
fun(*args, **kwds)
except exc as e:
if message is not None and message not in e.error['message']:
raise AssertionError("Expected substring not found:"+e.error['message'])
except Exception as e:
raise AssertionError("Unexpected exception raised: "+type(e).__name__)
else:
raise AssertionError("No exception raised")
def assert_is_hex_string(string):
try:
int(string, 16)
except Exception as e:
raise AssertionError(
"Couldn't interpret %r as hexadecimal; raised: %s" % (string, e))
def assert_is_hash_string(string, length=64):
if not isinstance(string, str):
raise AssertionError("Expected a string, got type %r" % type(string))
elif length and len(string) != length:
raise AssertionError(
"String of length %d expected; got %d" % (length, len(string)))
elif not re.match('[abcdef0-9]+$', string):
raise AssertionError(
"String %r contains invalid characters for a hash." % string)
def assert_array_result(object_array, to_match, expected, should_not_find = False):
"""
Pass in array of JSON objects, a dictionary with key/value pairs
to match against, and another dictionary with expected key/value
pairs.
If the should_not_find flag is true, to_match should not be found
in object_array
"""
if should_not_find == True:
assert_equal(expected, { })
num_matched = 0
for item in object_array:
all_match = True
for key,value in to_match.items():
if item[key] != value:
all_match = False
if not all_match:
continue
elif should_not_find == True:
num_matched = num_matched+1
for key,value in expected.items():
if item[key] != value:
raise AssertionError("%s : expected %s=%s"%(str(item), str(key), str(value)))
num_matched = num_matched+1
if num_matched == 0 and should_not_find != True:
raise AssertionError("No objects matched %s"%(str(to_match)))
if num_matched > 0 and should_not_find == True:
raise AssertionError("Objects were found %s"%(str(to_match)))
def satoshi_round(amount):
return Decimal(amount).quantize(Decimal('0.00000001'), rounding=ROUND_DOWN)
# Helper to create at least "count" utxos
# Pass in a fee that is sufficient for relay and mining new transactions.
def create_confirmed_utxos(fee, node, count):
node.setgenerate(True, int(0.5*count)+101)
utxos = node.listunspent()
iterations = count - len(utxos)
addr1 = node.getnewaddress()
addr2 = node.getnewaddress()
if iterations <= 0:
return utxos
for i in range(iterations):
t = utxos.pop()
inputs = []
inputs.append({ "txid" : t["txid"], "vout" : t["vout"]})
outputs = {}
send_value = t['amount'] - fee
outputs[addr1] = satoshi_round(send_value/2)
outputs[addr2] = satoshi_round(send_value/2)
raw_tx = node.createrawtransaction(inputs, outputs)
signed_tx = node.signrawtransaction(raw_tx)["hex"]
txid = node.sendrawtransaction(signed_tx)
while (node.getmempoolinfo()['size'] > 0):
node.setgenerate(True, 1)
utxos = node.listunspent()
assert(len(utxos) >= count)
return utxos
# Create large OP_RETURN txouts that can be appended to a transaction
# to make it large (helper for constructing large transactions).
def gen_return_txouts():
# Some pre-processing to create a bunch of OP_RETURN txouts to insert into transactions we create
# So we have big transactions (and therefore can't fit very many into each block)
# create one script_pubkey
script_pubkey = "6a4d0200" #OP_RETURN OP_PUSH2 512 bytes
for i in range (512):
script_pubkey = script_pubkey + "01"
# concatenate 128 txouts of above script_pubkey which we'll insert before the txout for change
txouts = "81"
for k in range(128):
# add txout value
txouts = txouts + "0000000000000000"
# add length of script_pubkey
txouts = txouts + "fd0402"
# add script_pubkey
txouts = txouts + script_pubkey
return txouts
def create_tx(node, coinbase, to_address, amount):
inputs = [{ "txid" : coinbase, "vout" : 0}]
outputs = { to_address : amount }
rawtx = node.createrawtransaction(inputs, outputs)
signresult = node.signrawtransaction(rawtx)
assert_equal(signresult["complete"], True)
return signresult["hex"]
# Create a spend of each passed-in utxo, splicing in "txouts" to each raw
# transaction to make it large. See gen_return_txouts() above.
def create_lots_of_big_transactions(node, txouts, utxos, fee):
addr = node.getnewaddress()
txids = []
for i in range(len(utxos)):
t = utxos.pop()
inputs = []
inputs.append({ "txid" : t["txid"], "vout" : t["vout"]})
outputs = {}
send_value = t['amount'] - fee
outputs[addr] = satoshi_round(send_value)
rawtx = node.createrawtransaction(inputs, outputs)
newtx = rawtx[0:92]
newtx = newtx + txouts
newtx = newtx + rawtx[94:]
signresult = node.signrawtransaction(newtx, None, None, "NONE")
txid = node.sendrawtransaction(signresult["hex"], True)
txids.append(txid)
return txids
def get_bip9_status(node, key):
info = node.getblockchaininfo()
return info['bip9_softforks'][key]
| 35.713415 | 201 | 0.651742 |
import os
import sys
from binascii import hexlify, unhexlify
from base64 import b64encode
from decimal import Decimal, ROUND_DOWN
import json
import http.client
import random
import shutil
import subprocess
import time
import re
import errno
from . import coverage
from .authproxy import AuthServiceProxy, JSONRPCException
COVERAGE_DIR = None
MAX_NODES = 8
PORT_MIN = 11000
# The number of ports to "reserve" for p2p and rpc, each
PORT_RANGE = 5000
VPUBD_PROC_WAIT_TIMEOUT = 60
class PortSeed:
# Must be initialized with a unique integer for each process
n = None
#Set Mocktime default to OFF.
#MOCKTIME is only needed for scripts that use the
#cached version of the blockchain. If the cached
#version of the blockchain is used without MOCKTIME
#then the mempools will not sync due to IBD.
MOCKTIME = 0
def enable_mocktime():
#For backwared compatibility of the python scripts
#with previous versions of the cache, set MOCKTIME
#to Jan 1, 2014 + (201 * 10 * 60)
global MOCKTIME
MOCKTIME = 1388534400 + (201 * 10 * 60)
def disable_mocktime():
global MOCKTIME
MOCKTIME = 0
def get_mocktime():
return MOCKTIME
def enable_coverage(dirname):
global COVERAGE_DIR
COVERAGE_DIR = dirname
def get_rpc_proxy(url, node_number, timeout=None):
proxy_kwargs = {}
if timeout is not None:
proxy_kwargs['timeout'] = timeout
proxy = AuthServiceProxy(url, **proxy_kwargs)
proxy.url = url # store URL on proxy for info
coverage_logfile = coverage.get_filename(
COVERAGE_DIR, node_number) if COVERAGE_DIR else None
return coverage.AuthServiceProxyWrapper(proxy, coverage_logfile)
def p2p_port(n):
assert(n <= MAX_NODES)
return PORT_MIN + n + (MAX_NODES * PortSeed.n) % (PORT_RANGE - 1 - MAX_NODES)
def rpc_port(n):
return PORT_MIN + PORT_RANGE + n + (MAX_NODES * PortSeed.n) % (PORT_RANGE - 1 - MAX_NODES)
def check_json_precision():
n = Decimal("20000000.00000003")
satoshis = int(json.loads(json.dumps(float(n)))*1.0e8)
if satoshis != 2000000000000003:
raise RuntimeError("JSON encode/decode loses precision")
def count_bytes(hex_string):
return len(bytearray.fromhex(hex_string))
def bytes_to_hex_str(byte_str):
return hexlify(byte_str).decode('ascii')
def hex_str_to_bytes(hex_str):
return unhexlify(hex_str.encode('ascii'))
def str_to_b64str(string):
return b64encode(string.encode('utf-8')).decode('ascii')
def sync_blocks(rpc_connections, wait=1, timeout=60):
while timeout > 0:
tips = [ x.getbestblockhash() for x in rpc_connections ]
if tips == [ tips[0] ]*len(tips):
return True
time.sleep(wait)
timeout -= wait
raise AssertionError("Block sync failed")
def sync_mempools(rpc_connections, wait=1, timeout=60):
while timeout > 0:
pool = set(rpc_connections[0].getrawmempool())
num_match = 1
for i in range(1, len(rpc_connections)):
if set(rpc_connections[i].getrawmempool()) == pool:
num_match = num_match+1
if num_match == len(rpc_connections):
return True
time.sleep(wait)
timeout -= wait
raise AssertionError("Mempool sync failed")
vpubd_processes = {}
def initialize_datadir(dirname, n):
datadir = os.path.join(dirname, "node"+str(n))
if not os.path.isdir(datadir):
os.makedirs(datadir)
rpc_u, rpc_p = rpc_auth_pair(n)
with open(os.path.join(datadir, "vpub.conf"), 'w', encoding='utf8') as f:
f.write("regtest=1\n")
f.write("server=1\n")
f.write("rpcuser=" + rpc_u + "\n")
f.write("rpcpassword=" + rpc_p + "\n")
f.write("port="+str(p2p_port(n))+"\n")
f.write("rpcport="+str(rpc_port(n))+"\n")
f.write("listenonion=0\n")
return datadir
def rpc_auth_pair(n):
return 'rpcuser' + str(n), 'rpcpass' + str(n)
def rpc_url(i, rpchost=None):
rpc_u, rpc_p = rpc_auth_pair(i)
host = '127.0.0.1'
port = rpc_port(i)
if rpchost:
parts = rpchost.split(':')
if len(parts) == 2:
host, port = parts
else:
host = rpchost
return "http://%s:%s@%s:%d" % (rpc_u, rpc_p, host, int(port))
def wait_for_vpubd_start(process, url, i):
while True:
if process.poll() is not None:
raise Exception('vpubd exited with status %i during initialization' % process.returncode)
try:
rpc = get_rpc_proxy(url, i)
blocks = rpc.getblockcount()
break # break out of loop on success
except IOError as e:
if e.errno != errno.ECONNREFUSED: # Port not yet open?
raise # unknown IO error
except JSONRPCException as e: # Initialization phase
if e.error['code'] != -28: # RPC in warmup?
raise # unkown JSON RPC exception
time.sleep(0.25)
def initialize_chain(test_dir, num_nodes):
assert num_nodes <= MAX_NODES
create_cache = False
for i in range(MAX_NODES):
if not os.path.isdir(os.path.join('cache', 'node'+str(i))):
create_cache = True
break
if create_cache:
#find and delete old cache directories if any exist
for i in range(MAX_NODES):
if os.path.isdir(os.path.join("cache","node"+str(i))):
shutil.rmtree(os.path.join("cache","node"+str(i)))
# Create cache directories, run vpubds:
for i in range(MAX_NODES):
datadir=initialize_datadir("cache", i)
args = [ os.getenv("VPUBD", "vpubd"), "-server", "-keypool=1", "-datadir="+datadir, "-discover=0" ]
if i > 0:
args.append("-connect=127.0.0.1:"+str(p2p_port(0)))
vpubd_processes[i] = subprocess.Popen(args)
if os.getenv("PYTHON_DEBUG", ""):
print("initialize_chain: vpubd started, waiting for RPC to come up")
wait_for_vpubd_start(vpubd_processes[i], rpc_url(i), i)
if os.getenv("PYTHON_DEBUG", ""):
print("initialize_chain: RPC succesfully started")
rpcs = []
for i in range(MAX_NODES):
try:
rpcs.append(get_rpc_proxy(rpc_url(i), i))
except:
sys.stderr.write("Error connecting to "+url+"\n")
sys.exit(1)
# Create a 200-block-long chain; each of the 4 first nodes
# gets 25 mature blocks and 25 immature.
# Note: To preserve compatibility with older versions of
# initialize_chain, only 4 nodes will generate coins.
#
# blocks are created with timestamps 10 minutes apart
# starting from 2010 minutes in the past
enable_mocktime()
block_time = get_mocktime() - (201 * 10 * 60)
for i in range(2):
for peer in range(4):
for j in range(25):
set_node_times(rpcs, block_time)
rpcs[peer].setgenerate(True, 1)
block_time += 10*60
# Must sync before next peer starts generating blocks
sync_blocks(rpcs)
# Shut them down, and clean up cache directories:
stop_nodes(rpcs)
disable_mocktime()
for i in range(MAX_NODES):
os.remove(log_filename("cache", i, "debug.log"))
os.remove(log_filename("cache", i, "db.log"))
os.remove(log_filename("cache", i, "peers.dat"))
os.remove(log_filename("cache", i, "fee_estimates.dat"))
for i in range(num_nodes):
from_dir = os.path.join("cache", "node"+str(i))
to_dir = os.path.join(test_dir, "node"+str(i))
shutil.copytree(from_dir, to_dir)
initialize_datadir(test_dir, i) # Overwrite port/rpcport in vpub.conf
def initialize_chain_clean(test_dir, num_nodes):
for i in range(num_nodes):
datadir=initialize_datadir(test_dir, i)
def _rpchost_to_args(rpchost):
if rpchost is None:
return []
match = re.match('(\[[0-9a-fA-f:]+\]|[^:]+)(?::([0-9]+))?$', rpchost)
if not match:
raise ValueError('Invalid RPC host spec ' + rpchost)
rpcconnect = match.group(1)
rpcport = match.group(2)
if rpcconnect.startswith('['): # remove IPv6 [...] wrapping
rpcconnect = rpcconnect[1:-1]
rv = ['-rpcconnect=' + rpcconnect]
if rpcport:
rv += ['-rpcport=' + rpcport]
return rv
def start_node(i, dirname, extra_args=None, rpchost=None, timewait=None, binary=None):
datadir = os.path.join(dirname, "node"+str(i))
if binary is None:
binary = os.getenv("VPUBD", "vpubd")
args = [ binary, "-datadir="+datadir, "-server", "-keypool=1", "-discover=0", "-rest", "-mocktime="+str(get_mocktime()), "-regtest", "-sporkkey=923EhWh2bJHynX6d4Tqt2Q75bhTDCT1b4kff3qzDKDZHZ6pkQs7"]
if extra_args is not None: args.extend(extra_args)
vpubd_processes[i] = subprocess.Popen(args)
if os.getenv("PYTHON_DEBUG", ""):
print("start_node: phroed started, waiting for RPC to come up")
url = rpc_url(i, rpchost)
wait_for_vpubd_start(vpubd_processes[i], url, i)
if os.getenv("PYTHON_DEBUG", ""):
print("start_node: RPC succesfully started")
proxy = get_rpc_proxy(url, i, timeout=timewait)
if COVERAGE_DIR:
coverage.write_all_rpc_commands(COVERAGE_DIR, proxy)
return proxy
def start_nodes(num_nodes, dirname, extra_args=None, rpchost=None, timewait=None, binary=None):
if extra_args is None: extra_args = [ None for _ in range(num_nodes) ]
if binary is None: binary = [ None for _ in range(num_nodes) ]
rpcs = []
try:
for i in range(num_nodes):
rpcs.append(start_node(i, dirname, extra_args[i], rpchost, timewait=timewait, binary=binary[i]))
except: # If one node failed to start, stop the others
stop_nodes(rpcs)
raise
return rpcs
def log_filename(dirname, n_node, logname):
return os.path.join(dirname, "node"+str(n_node), "regtest", logname)
def stop_node(node, i):
try:
node.stop()
except http.client.CannotSendRequest as e:
print("WARN: Unable to stop node: " + repr(e))
vpubd_processes[i].wait(timeout=VPUBD_PROC_WAIT_TIMEOUT)
del vpubd_processes[i]
def stop_nodes(nodes):
for node in nodes:
try:
node.stop()
except http.client.CannotSendRequest as e:
print("WARN: Unable to stop node: " + repr(e))
del nodes[:] # Emptying array closes connections as a side effect
wait_vpubds()
def set_node_times(nodes, t):
for node in nodes:
node.setmocktime(t)
def wait_vpubds():
# Wait for all vpubds to cleanly exit
for vpubd in vpubd_processes.values():
vpubd.wait(timeout=VPUBD_PROC_WAIT_TIMEOUT)
vpubd_processes.clear()
def connect_nodes(from_connection, node_num):
ip_port = "127.0.0.1:"+str(p2p_port(node_num))
from_connection.addnode(ip_port, "onetry")
# poll until version handshake complete to avoid race conditions
# with transaction relaying
while any(peer['version'] == 0 for peer in from_connection.getpeerinfo()):
time.sleep(0.1)
def connect_nodes_bi(nodes, a, b):
connect_nodes(nodes[a], b)
connect_nodes(nodes[b], a)
def find_output(node, txid, amount):
txdata = node.getrawtransaction(txid, 1)
for i in range(len(txdata["vout"])):
if txdata["vout"][i]["value"] == amount:
return i
raise RuntimeError("find_output txid %s : %s not found"%(txid,str(amount)))
def gather_inputs(from_node, amount_needed, confirmations_required=1):
assert(confirmations_required >=0)
utxo = from_node.listunspent(confirmations_required)
random.shuffle(utxo)
inputs = []
total_in = Decimal("0.00000000")
while total_in < amount_needed and len(utxo) > 0:
t = utxo.pop()
total_in += t["amount"]
inputs.append({ "txid" : t["txid"], "vout" : t["vout"], "address" : t["address"] } )
if total_in < amount_needed:
raise RuntimeError("Insufficient funds: need %d, have %d"%(amount_needed, total_in))
return (total_in, inputs)
def make_change(from_node, amount_in, amount_out, fee):
outputs = {}
amount = amount_out+fee
change = amount_in - amount
if change > amount*2:
# Create an extra change output to break up big inputs
change_address = from_node.getnewaddress()
# Split change in two, being careful of rounding:
outputs[change_address] = Decimal(change/2).quantize(Decimal('0.00000001'), rounding=ROUND_DOWN)
change = amount_in - amount - outputs[change_address]
if change > 0:
outputs[from_node.getnewaddress()] = change
return outputs
def send_zeropri_transaction(from_node, to_node, amount, fee):
# Create a send-to-self with confirmed inputs:
self_address = from_node.getnewaddress()
(total_in, inputs) = gather_inputs(from_node, amount+fee*2)
outputs = make_change(from_node, total_in, amount+fee, fee)
outputs[self_address] = float(amount+fee)
self_rawtx = from_node.createrawtransaction(inputs, outputs)
self_signresult = from_node.signrawtransaction(self_rawtx)
self_txid = from_node.sendrawtransaction(self_signresult["hex"], True)
vout = find_output(from_node, self_txid, amount+fee)
# Now immediately spend the output to create a 1-input, 1-output
# zero-priority transaction:
inputs = [ { "txid" : self_txid, "vout" : vout } ]
outputs = { to_node.getnewaddress() : float(amount) }
rawtx = from_node.createrawtransaction(inputs, outputs)
signresult = from_node.signrawtransaction(rawtx)
txid = from_node.sendrawtransaction(signresult["hex"], True)
return (txid, signresult["hex"])
def random_zeropri_transaction(nodes, amount, min_fee, fee_increment, fee_variants):
from_node = random.choice(nodes)
to_node = random.choice(nodes)
fee = min_fee + fee_increment*random.randint(0,fee_variants)
(txid, txhex) = send_zeropri_transaction(from_node, to_node, amount, fee)
return (txid, txhex, fee)
def random_transaction(nodes, amount, min_fee, fee_increment, fee_variants):
from_node = random.choice(nodes)
to_node = random.choice(nodes)
fee = min_fee + fee_increment*random.randint(0,fee_variants)
(total_in, inputs) = gather_inputs(from_node, amount+fee)
outputs = make_change(from_node, total_in, amount, fee)
outputs[to_node.getnewaddress()] = float(amount)
rawtx = from_node.createrawtransaction(inputs, outputs)
signresult = from_node.signrawtransaction(rawtx)
txid = from_node.sendrawtransaction(signresult["hex"], True)
return (txid, signresult["hex"], fee)
def assert_fee_amount(fee, tx_size, fee_per_kB):
target_fee = tx_size * fee_per_kB / 1000
if fee < target_fee:
raise AssertionError("Fee of %s BTC too low! (Should be %s BTC)"%(str(fee), str(target_fee)))
# allow the wallet's estimation to be at most 2 bytes off
if fee > (tx_size + 2) * fee_per_kB / 1000:
raise AssertionError("Fee of %s BTC too high! (Should be %s BTC)"%(str(fee), str(target_fee)))
def assert_equal(thing1, thing2):
if thing1 != thing2:
raise AssertionError("%s != %s"%(str(thing1),str(thing2)))
def assert_greater_than(thing1, thing2):
if thing1 <= thing2:
raise AssertionError("%s <= %s"%(str(thing1),str(thing2)))
def assert_raises(exc, fun, *args, **kwds):
assert_raises_message(exc, None, fun, *args, **kwds)
def assert_raises_message(exc, message, fun, *args, **kwds):
try:
fun(*args, **kwds)
except exc as e:
if message is not None and message not in e.error['message']:
raise AssertionError("Expected substring not found:"+e.error['message'])
except Exception as e:
raise AssertionError("Unexpected exception raised: "+type(e).__name__)
else:
raise AssertionError("No exception raised")
def assert_is_hex_string(string):
try:
int(string, 16)
except Exception as e:
raise AssertionError(
"Couldn't interpret %r as hexadecimal; raised: %s" % (string, e))
def assert_is_hash_string(string, length=64):
if not isinstance(string, str):
raise AssertionError("Expected a string, got type %r" % type(string))
elif length and len(string) != length:
raise AssertionError(
"String of length %d expected; got %d" % (length, len(string)))
elif not re.match('[abcdef0-9]+$', string):
raise AssertionError(
"String %r contains invalid characters for a hash." % string)
def assert_array_result(object_array, to_match, expected, should_not_find = False):
if should_not_find == True:
assert_equal(expected, { })
num_matched = 0
for item in object_array:
all_match = True
for key,value in to_match.items():
if item[key] != value:
all_match = False
if not all_match:
continue
elif should_not_find == True:
num_matched = num_matched+1
for key,value in expected.items():
if item[key] != value:
raise AssertionError("%s : expected %s=%s"%(str(item), str(key), str(value)))
num_matched = num_matched+1
if num_matched == 0 and should_not_find != True:
raise AssertionError("No objects matched %s"%(str(to_match)))
if num_matched > 0 and should_not_find == True:
raise AssertionError("Objects were found %s"%(str(to_match)))
def satoshi_round(amount):
return Decimal(amount).quantize(Decimal('0.00000001'), rounding=ROUND_DOWN)
# Helper to create at least "count" utxos
# Pass in a fee that is sufficient for relay and mining new transactions.
def create_confirmed_utxos(fee, node, count):
node.setgenerate(True, int(0.5*count)+101)
utxos = node.listunspent()
iterations = count - len(utxos)
addr1 = node.getnewaddress()
addr2 = node.getnewaddress()
if iterations <= 0:
return utxos
for i in range(iterations):
t = utxos.pop()
inputs = []
inputs.append({ "txid" : t["txid"], "vout" : t["vout"]})
outputs = {}
send_value = t['amount'] - fee
outputs[addr1] = satoshi_round(send_value/2)
outputs[addr2] = satoshi_round(send_value/2)
raw_tx = node.createrawtransaction(inputs, outputs)
signed_tx = node.signrawtransaction(raw_tx)["hex"]
txid = node.sendrawtransaction(signed_tx)
while (node.getmempoolinfo()['size'] > 0):
node.setgenerate(True, 1)
utxos = node.listunspent()
assert(len(utxos) >= count)
return utxos
# Create large OP_RETURN txouts that can be appended to a transaction
# to make it large (helper for constructing large transactions).
def gen_return_txouts():
# Some pre-processing to create a bunch of OP_RETURN txouts to insert into transactions we create
# So we have big transactions (and therefore can't fit very many into each block)
script_pubkey = "6a4d0200"
for i in range (512):
script_pubkey = script_pubkey + "01"
txouts = "81"
for k in range(128):
# add txout value
txouts = txouts + "0000000000000000"
# add length of script_pubkey
txouts = txouts + "fd0402"
# add script_pubkey
txouts = txouts + script_pubkey
return txouts
def create_tx(node, coinbase, to_address, amount):
inputs = [{ "txid" : coinbase, "vout" : 0}]
outputs = { to_address : amount }
rawtx = node.createrawtransaction(inputs, outputs)
signresult = node.signrawtransaction(rawtx)
assert_equal(signresult["complete"], True)
return signresult["hex"]
# Create a spend of each passed-in utxo, splicing in "txouts" to each raw
# transaction to make it large. See gen_return_txouts() above.
def create_lots_of_big_transactions(node, txouts, utxos, fee):
addr = node.getnewaddress()
txids = []
for i in range(len(utxos)):
t = utxos.pop()
inputs = []
inputs.append({ "txid" : t["txid"], "vout" : t["vout"]})
outputs = {}
send_value = t['amount'] - fee
outputs[addr] = satoshi_round(send_value)
rawtx = node.createrawtransaction(inputs, outputs)
newtx = rawtx[0:92]
newtx = newtx + txouts
newtx = newtx + rawtx[94:]
signresult = node.signrawtransaction(newtx, None, None, "NONE")
txid = node.sendrawtransaction(signresult["hex"], True)
txids.append(txid)
return txids
def get_bip9_status(node, key):
info = node.getblockchaininfo()
return info['bip9_softforks'][key]
| true | true |
f71b49b3ed0e3ba6b9a8f90ca5e35100450fb249 | 13,385 | py | Python | skimage/segmentation/tests/test_random_walker.py | thewtex/scikit-image | 22bb6b94698b8889cbdf26b25d9e4fdb8b968d97 | [
"BSD-3-Clause"
] | 1 | 2019-01-12T13:17:32.000Z | 2019-01-12T13:17:32.000Z | skimage/segmentation/tests/test_random_walker.py | thewtex/scikit-image | 22bb6b94698b8889cbdf26b25d9e4fdb8b968d97 | [
"BSD-3-Clause"
] | null | null | null | skimage/segmentation/tests/test_random_walker.py | thewtex/scikit-image | 22bb6b94698b8889cbdf26b25d9e4fdb8b968d97 | [
"BSD-3-Clause"
] | null | null | null | import numpy as np
from skimage.segmentation import random_walker
from skimage.transform import resize
from skimage._shared._warnings import expected_warnings
from skimage._shared import testing
# older versions of scipy raise a warning with new NumPy because they use
# numpy.rank() instead of arr.ndim or numpy.linalg.matrix_rank.
SCIPY_EXPECTED = 'numpy.linalg.matrix_rank|\A\Z'
PYAMG_EXPECTED_WARNING = 'pyamg|\A\Z'
PYAMG_SCIPY_EXPECTED = SCIPY_EXPECTED + '|' + PYAMG_EXPECTED_WARNING
def make_2d_syntheticdata(lx, ly=None):
if ly is None:
ly = lx
np.random.seed(1234)
data = np.zeros((lx, ly)) + 0.1 * np.random.randn(lx, ly)
small_l = int(lx // 5)
data[lx // 2 - small_l:lx // 2 + small_l,
ly // 2 - small_l:ly // 2 + small_l] = 1
data[lx // 2 - small_l + 1:lx // 2 + small_l - 1,
ly // 2 - small_l + 1:ly // 2 + small_l - 1] = (
0.1 * np.random.randn(2 * small_l - 2, 2 * small_l - 2))
data[lx // 2 - small_l, ly // 2 - small_l // 8:ly // 2 + small_l // 8] = 0
seeds = np.zeros_like(data)
seeds[lx // 5, ly // 5] = 1
seeds[lx // 2 + small_l // 4, ly // 2 - small_l // 4] = 2
return data, seeds
def make_3d_syntheticdata(lx, ly=None, lz=None):
if ly is None:
ly = lx
if lz is None:
lz = lx
np.random.seed(1234)
data = np.zeros((lx, ly, lz)) + 0.1 * np.random.randn(lx, ly, lz)
small_l = int(lx // 5)
data[lx // 2 - small_l:lx // 2 + small_l,
ly // 2 - small_l:ly // 2 + small_l,
lz // 2 - small_l:lz // 2 + small_l] = 1
data[lx // 2 - small_l + 1:lx // 2 + small_l - 1,
ly // 2 - small_l + 1:ly // 2 + small_l - 1,
lz // 2 - small_l + 1:lz // 2 + small_l - 1] = 0
# make a hole
hole_size = np.max([1, small_l // 8])
data[lx // 2 - small_l,
ly // 2 - hole_size:ly // 2 + hole_size,
lz // 2 - hole_size:lz // 2 + hole_size] = 0
seeds = np.zeros_like(data)
seeds[lx // 5, ly // 5, lz // 5] = 1
seeds[lx // 2 + small_l // 4,
ly // 2 - small_l // 4,
lz // 2 - small_l // 4] = 2
return data, seeds
def test_2d_bf():
lx = 70
ly = 100
data, labels = make_2d_syntheticdata(lx, ly)
labels_bf = random_walker(data, labels, beta=90, mode='bf')
assert (labels_bf[25:45, 40:60] == 2).all()
assert data.shape == labels.shape
full_prob_bf = random_walker(data, labels, beta=90, mode='bf',
return_full_prob=True)
assert (full_prob_bf[1, 25:45, 40:60] >=
full_prob_bf[0, 25:45, 40:60]).all()
assert data.shape == labels.shape
# Now test with more than two labels
labels[55, 80] = 3
full_prob_bf = random_walker(data, labels, beta=90, mode='bf',
return_full_prob=True)
assert (full_prob_bf[1, 25:45, 40:60] >=
full_prob_bf[0, 25:45, 40:60]).all()
assert len(full_prob_bf) == 3
assert data.shape == labels.shape
def test_2d_cg():
lx = 70
ly = 100
data, labels = make_2d_syntheticdata(lx, ly)
with expected_warnings(['"cg" mode' + '|' + SCIPY_EXPECTED]):
labels_cg = random_walker(data, labels, beta=90, mode='cg')
assert (labels_cg[25:45, 40:60] == 2).all()
assert data.shape == labels.shape
with expected_warnings(['"cg" mode' + '|' + SCIPY_EXPECTED]):
full_prob = random_walker(data, labels, beta=90, mode='cg',
return_full_prob=True)
assert (full_prob[1, 25:45, 40:60] >=
full_prob[0, 25:45, 40:60]).all()
assert data.shape == labels.shape
return data, labels_cg
def test_2d_cg_mg():
lx = 70
ly = 100
data, labels = make_2d_syntheticdata(lx, ly)
expected = 'scipy.sparse.sparsetools|%s' % PYAMG_SCIPY_EXPECTED
with expected_warnings([expected]):
labels_cg_mg = random_walker(data, labels, beta=90, mode='cg_mg')
assert (labels_cg_mg[25:45, 40:60] == 2).all()
assert data.shape == labels.shape
with expected_warnings([expected]):
full_prob = random_walker(data, labels, beta=90, mode='cg_mg',
return_full_prob=True)
assert (full_prob[1, 25:45, 40:60] >=
full_prob[0, 25:45, 40:60]).all()
assert data.shape == labels.shape
return data, labels_cg_mg
def test_types():
lx = 70
ly = 100
data, labels = make_2d_syntheticdata(lx, ly)
data = 255 * (data - data.min()) // (data.max() - data.min())
data = data.astype(np.uint8)
with expected_warnings([PYAMG_SCIPY_EXPECTED]):
labels_cg_mg = random_walker(data, labels, beta=90, mode='cg_mg')
assert (labels_cg_mg[25:45, 40:60] == 2).all()
assert data.shape == labels.shape
return data, labels_cg_mg
def test_reorder_labels():
lx = 70
ly = 100
data, labels = make_2d_syntheticdata(lx, ly)
labels[labels == 2] = 4
labels_bf = random_walker(data, labels, beta=90, mode='bf')
assert (labels_bf[25:45, 40:60] == 2).all()
assert data.shape == labels.shape
return data, labels_bf
def test_2d_inactive():
lx = 70
ly = 100
data, labels = make_2d_syntheticdata(lx, ly)
labels[10:20, 10:20] = -1
labels[46:50, 33:38] = -2
labels = random_walker(data, labels, beta=90)
assert (labels.reshape((lx, ly))[25:45, 40:60] == 2).all()
assert data.shape == labels.shape
return data, labels
def test_3d():
n = 30
lx, ly, lz = n, n, n
data, labels = make_3d_syntheticdata(lx, ly, lz)
with expected_warnings(['"cg" mode' + '|' + SCIPY_EXPECTED]):
labels = random_walker(data, labels, mode='cg')
assert (labels.reshape(data.shape)[13:17, 13:17, 13:17] == 2).all()
assert data.shape == labels.shape
return data, labels
def test_3d_inactive():
n = 30
lx, ly, lz = n, n, n
data, labels = make_3d_syntheticdata(lx, ly, lz)
old_labels = np.copy(labels)
labels[5:25, 26:29, 26:29] = -1
after_labels = np.copy(labels)
with expected_warnings(['"cg" mode|CObject type' + '|' + SCIPY_EXPECTED]):
labels = random_walker(data, labels, mode='cg')
assert (labels.reshape(data.shape)[13:17, 13:17, 13:17] == 2).all()
assert data.shape == labels.shape
return data, labels, old_labels, after_labels
def test_multispectral_2d():
lx, ly = 70, 100
data, labels = make_2d_syntheticdata(lx, ly)
data = data[..., np.newaxis].repeat(2, axis=-1) # Expect identical output
with expected_warnings(['"cg" mode' + '|' + SCIPY_EXPECTED]):
multi_labels = random_walker(data, labels, mode='cg',
multichannel=True)
assert data[..., 0].shape == labels.shape
with expected_warnings(['"cg" mode' + '|' + SCIPY_EXPECTED]):
single_labels = random_walker(data[..., 0], labels, mode='cg')
assert (multi_labels.reshape(labels.shape)[25:45, 40:60] == 2).all()
assert data[..., 0].shape == labels.shape
return data, multi_labels, single_labels, labels
def test_multispectral_3d():
n = 30
lx, ly, lz = n, n, n
data, labels = make_3d_syntheticdata(lx, ly, lz)
data = data[..., np.newaxis].repeat(2, axis=-1) # Expect identical output
with expected_warnings(['"cg" mode' + '|' + SCIPY_EXPECTED]):
multi_labels = random_walker(data, labels, mode='cg',
multichannel=True)
assert data[..., 0].shape == labels.shape
with expected_warnings(['"cg" mode' + '|' + SCIPY_EXPECTED]):
single_labels = random_walker(data[..., 0], labels, mode='cg')
assert (multi_labels.reshape(labels.shape)[13:17, 13:17, 13:17] == 2).all()
assert (single_labels.reshape(labels.shape)[13:17, 13:17, 13:17] == 2).all()
assert data[..., 0].shape == labels.shape
return data, multi_labels, single_labels, labels
def test_spacing_0():
n = 30
lx, ly, lz = n, n, n
data, _ = make_3d_syntheticdata(lx, ly, lz)
# Rescale `data` along Z axis
data_aniso = np.zeros((n, n, n // 2))
for i, yz in enumerate(data):
data_aniso[i, :, :] = resize(yz, (n, n // 2),
mode='constant',
anti_aliasing=False)
# Generate new labels
small_l = int(lx // 5)
labels_aniso = np.zeros_like(data_aniso)
labels_aniso[lx // 5, ly // 5, lz // 5] = 1
labels_aniso[lx // 2 + small_l // 4,
ly // 2 - small_l // 4,
lz // 4 - small_l // 8] = 2
# Test with `spacing` kwarg
with expected_warnings(['"cg" mode' + '|' + SCIPY_EXPECTED]):
labels_aniso = random_walker(data_aniso, labels_aniso, mode='cg',
spacing=(1., 1., 0.5))
assert (labels_aniso[13:17, 13:17, 7:9] == 2).all()
def test_spacing_1():
n = 30
lx, ly, lz = n, n, n
data, _ = make_3d_syntheticdata(lx, ly, lz)
# Rescale `data` along Y axis
# `resize` is not yet 3D capable, so this must be done by looping in 2D.
data_aniso = np.zeros((n, n * 2, n))
for i, yz in enumerate(data):
data_aniso[i, :, :] = resize(yz, (n * 2, n),
mode='constant',
anti_aliasing=False)
# Generate new labels
small_l = int(lx // 5)
labels_aniso = np.zeros_like(data_aniso)
labels_aniso[lx // 5, ly // 5, lz // 5] = 1
labels_aniso[lx // 2 + small_l // 4,
ly - small_l // 2,
lz // 2 - small_l // 4] = 2
# Test with `spacing` kwarg
# First, anisotropic along Y
with expected_warnings(['"cg" mode' + '|' + SCIPY_EXPECTED]):
labels_aniso = random_walker(data_aniso, labels_aniso, mode='cg',
spacing=(1., 2., 1.))
assert (labels_aniso[13:17, 26:34, 13:17] == 2).all()
# Rescale `data` along X axis
# `resize` is not yet 3D capable, so this must be done by looping in 2D.
data_aniso = np.zeros((n, n * 2, n))
for i in range(data.shape[1]):
data_aniso[i, :, :] = resize(data[:, 1, :], (n * 2, n),
mode='constant',
anti_aliasing=False)
# Generate new labels
small_l = int(lx // 5)
labels_aniso2 = np.zeros_like(data_aniso)
labels_aniso2[lx // 5, ly // 5, lz // 5] = 1
labels_aniso2[lx - small_l // 2,
ly // 2 + small_l // 4,
lz // 2 - small_l // 4] = 2
# Anisotropic along X
with expected_warnings(['"cg" mode' + '|' + SCIPY_EXPECTED]):
labels_aniso2 = random_walker(data_aniso,
labels_aniso2,
mode='cg', spacing=(2., 1., 1.))
assert (labels_aniso2[26:34, 13:17, 13:17] == 2).all()
def test_trivial_cases():
# When all voxels are labeled
img = np.ones((10, 10))
labels = np.ones((10, 10))
with expected_warnings(["Returning provided labels"]):
pass_through = random_walker(img, labels)
np.testing.assert_array_equal(pass_through, labels)
# When all voxels are labeled AND return_full_prob is True
labels[:, :5] = 3
expected = np.concatenate(((labels == 1)[..., np.newaxis],
(labels == 3)[..., np.newaxis]), axis=2)
with expected_warnings(["Returning provided labels"]):
test = random_walker(img, labels, return_full_prob=True)
np.testing.assert_array_equal(test, expected)
def test_length2_spacing():
# If this passes without raising an exception (warnings OK), the new
# spacing code is working properly.
np.random.seed(42)
img = np.ones((10, 10)) + 0.2 * np.random.normal(size=(10, 10))
labels = np.zeros((10, 10), dtype=np.uint8)
labels[2, 4] = 1
labels[6, 8] = 4
random_walker(img, labels, spacing=(1., 2.))
def test_bad_inputs():
# Too few dimensions
img = np.ones(10)
labels = np.arange(10)
with testing.raises(ValueError):
random_walker(img, labels)
with testing.raises(ValueError):
random_walker(img, labels, multichannel=True)
# Too many dimensions
np.random.seed(42)
img = np.random.normal(size=(3, 3, 3, 3, 3))
labels = np.arange(3 ** 5).reshape(img.shape)
with testing.raises(ValueError):
random_walker(img, labels)
with testing.raises(ValueError):
random_walker(img, labels, multichannel=True)
# Spacing incorrect length
img = np.random.normal(size=(10, 10))
labels = np.zeros((10, 10))
labels[2, 4] = 2
labels[6, 8] = 5
with testing.raises(ValueError):
random_walker(img, labels, spacing=(1,))
# Invalid mode
img = np.random.normal(size=(10, 10))
labels = np.zeros((10, 10))
with testing.raises(ValueError):
random_walker(img, labels, mode='bad')
def test_isolated_seeds():
np.random.seed(0)
a = np.random.random((7, 7))
mask = - np.ones(a.shape)
# This pixel is an isolated seed
mask[1, 1] = 1
# Unlabeled pixels
mask[3:, 3:] = 0
# Seeds connected to unlabeled pixels
mask[4, 4] = 2
mask[6, 6] = 1
# Test that no error is raised, and that labels of isolated seeds are OK
res = random_walker(a, mask)
assert res[1, 1] == 1
res = random_walker(a, mask, return_full_prob=True)
assert res[0, 1, 1] == 1
assert res[1, 1, 1] == 0
| 36.175676 | 80 | 0.582219 | import numpy as np
from skimage.segmentation import random_walker
from skimage.transform import resize
from skimage._shared._warnings import expected_warnings
from skimage._shared import testing
SCIPY_EXPECTED = 'numpy.linalg.matrix_rank|\A\Z'
PYAMG_EXPECTED_WARNING = 'pyamg|\A\Z'
PYAMG_SCIPY_EXPECTED = SCIPY_EXPECTED + '|' + PYAMG_EXPECTED_WARNING
def make_2d_syntheticdata(lx, ly=None):
if ly is None:
ly = lx
np.random.seed(1234)
data = np.zeros((lx, ly)) + 0.1 * np.random.randn(lx, ly)
small_l = int(lx // 5)
data[lx // 2 - small_l:lx // 2 + small_l,
ly // 2 - small_l:ly // 2 + small_l] = 1
data[lx // 2 - small_l + 1:lx // 2 + small_l - 1,
ly // 2 - small_l + 1:ly // 2 + small_l - 1] = (
0.1 * np.random.randn(2 * small_l - 2, 2 * small_l - 2))
data[lx // 2 - small_l, ly // 2 - small_l // 8:ly // 2 + small_l // 8] = 0
seeds = np.zeros_like(data)
seeds[lx // 5, ly // 5] = 1
seeds[lx // 2 + small_l // 4, ly // 2 - small_l // 4] = 2
return data, seeds
def make_3d_syntheticdata(lx, ly=None, lz=None):
if ly is None:
ly = lx
if lz is None:
lz = lx
np.random.seed(1234)
data = np.zeros((lx, ly, lz)) + 0.1 * np.random.randn(lx, ly, lz)
small_l = int(lx // 5)
data[lx // 2 - small_l:lx // 2 + small_l,
ly // 2 - small_l:ly // 2 + small_l,
lz // 2 - small_l:lz // 2 + small_l] = 1
data[lx // 2 - small_l + 1:lx // 2 + small_l - 1,
ly // 2 - small_l + 1:ly // 2 + small_l - 1,
lz // 2 - small_l + 1:lz // 2 + small_l - 1] = 0
hole_size = np.max([1, small_l // 8])
data[lx // 2 - small_l,
ly // 2 - hole_size:ly // 2 + hole_size,
lz // 2 - hole_size:lz // 2 + hole_size] = 0
seeds = np.zeros_like(data)
seeds[lx // 5, ly // 5, lz // 5] = 1
seeds[lx // 2 + small_l // 4,
ly // 2 - small_l // 4,
lz // 2 - small_l // 4] = 2
return data, seeds
def test_2d_bf():
lx = 70
ly = 100
data, labels = make_2d_syntheticdata(lx, ly)
labels_bf = random_walker(data, labels, beta=90, mode='bf')
assert (labels_bf[25:45, 40:60] == 2).all()
assert data.shape == labels.shape
full_prob_bf = random_walker(data, labels, beta=90, mode='bf',
return_full_prob=True)
assert (full_prob_bf[1, 25:45, 40:60] >=
full_prob_bf[0, 25:45, 40:60]).all()
assert data.shape == labels.shape
labels[55, 80] = 3
full_prob_bf = random_walker(data, labels, beta=90, mode='bf',
return_full_prob=True)
assert (full_prob_bf[1, 25:45, 40:60] >=
full_prob_bf[0, 25:45, 40:60]).all()
assert len(full_prob_bf) == 3
assert data.shape == labels.shape
def test_2d_cg():
lx = 70
ly = 100
data, labels = make_2d_syntheticdata(lx, ly)
with expected_warnings(['"cg" mode' + '|' + SCIPY_EXPECTED]):
labels_cg = random_walker(data, labels, beta=90, mode='cg')
assert (labels_cg[25:45, 40:60] == 2).all()
assert data.shape == labels.shape
with expected_warnings(['"cg" mode' + '|' + SCIPY_EXPECTED]):
full_prob = random_walker(data, labels, beta=90, mode='cg',
return_full_prob=True)
assert (full_prob[1, 25:45, 40:60] >=
full_prob[0, 25:45, 40:60]).all()
assert data.shape == labels.shape
return data, labels_cg
def test_2d_cg_mg():
lx = 70
ly = 100
data, labels = make_2d_syntheticdata(lx, ly)
expected = 'scipy.sparse.sparsetools|%s' % PYAMG_SCIPY_EXPECTED
with expected_warnings([expected]):
labels_cg_mg = random_walker(data, labels, beta=90, mode='cg_mg')
assert (labels_cg_mg[25:45, 40:60] == 2).all()
assert data.shape == labels.shape
with expected_warnings([expected]):
full_prob = random_walker(data, labels, beta=90, mode='cg_mg',
return_full_prob=True)
assert (full_prob[1, 25:45, 40:60] >=
full_prob[0, 25:45, 40:60]).all()
assert data.shape == labels.shape
return data, labels_cg_mg
def test_types():
lx = 70
ly = 100
data, labels = make_2d_syntheticdata(lx, ly)
data = 255 * (data - data.min()) // (data.max() - data.min())
data = data.astype(np.uint8)
with expected_warnings([PYAMG_SCIPY_EXPECTED]):
labels_cg_mg = random_walker(data, labels, beta=90, mode='cg_mg')
assert (labels_cg_mg[25:45, 40:60] == 2).all()
assert data.shape == labels.shape
return data, labels_cg_mg
def test_reorder_labels():
lx = 70
ly = 100
data, labels = make_2d_syntheticdata(lx, ly)
labels[labels == 2] = 4
labels_bf = random_walker(data, labels, beta=90, mode='bf')
assert (labels_bf[25:45, 40:60] == 2).all()
assert data.shape == labels.shape
return data, labels_bf
def test_2d_inactive():
lx = 70
ly = 100
data, labels = make_2d_syntheticdata(lx, ly)
labels[10:20, 10:20] = -1
labels[46:50, 33:38] = -2
labels = random_walker(data, labels, beta=90)
assert (labels.reshape((lx, ly))[25:45, 40:60] == 2).all()
assert data.shape == labels.shape
return data, labels
def test_3d():
n = 30
lx, ly, lz = n, n, n
data, labels = make_3d_syntheticdata(lx, ly, lz)
with expected_warnings(['"cg" mode' + '|' + SCIPY_EXPECTED]):
labels = random_walker(data, labels, mode='cg')
assert (labels.reshape(data.shape)[13:17, 13:17, 13:17] == 2).all()
assert data.shape == labels.shape
return data, labels
def test_3d_inactive():
n = 30
lx, ly, lz = n, n, n
data, labels = make_3d_syntheticdata(lx, ly, lz)
old_labels = np.copy(labels)
labels[5:25, 26:29, 26:29] = -1
after_labels = np.copy(labels)
with expected_warnings(['"cg" mode|CObject type' + '|' + SCIPY_EXPECTED]):
labels = random_walker(data, labels, mode='cg')
assert (labels.reshape(data.shape)[13:17, 13:17, 13:17] == 2).all()
assert data.shape == labels.shape
return data, labels, old_labels, after_labels
def test_multispectral_2d():
lx, ly = 70, 100
data, labels = make_2d_syntheticdata(lx, ly)
data = data[..., np.newaxis].repeat(2, axis=-1)
with expected_warnings(['"cg" mode' + '|' + SCIPY_EXPECTED]):
multi_labels = random_walker(data, labels, mode='cg',
multichannel=True)
assert data[..., 0].shape == labels.shape
with expected_warnings(['"cg" mode' + '|' + SCIPY_EXPECTED]):
single_labels = random_walker(data[..., 0], labels, mode='cg')
assert (multi_labels.reshape(labels.shape)[25:45, 40:60] == 2).all()
assert data[..., 0].shape == labels.shape
return data, multi_labels, single_labels, labels
def test_multispectral_3d():
n = 30
lx, ly, lz = n, n, n
data, labels = make_3d_syntheticdata(lx, ly, lz)
data = data[..., np.newaxis].repeat(2, axis=-1)
with expected_warnings(['"cg" mode' + '|' + SCIPY_EXPECTED]):
multi_labels = random_walker(data, labels, mode='cg',
multichannel=True)
assert data[..., 0].shape == labels.shape
with expected_warnings(['"cg" mode' + '|' + SCIPY_EXPECTED]):
single_labels = random_walker(data[..., 0], labels, mode='cg')
assert (multi_labels.reshape(labels.shape)[13:17, 13:17, 13:17] == 2).all()
assert (single_labels.reshape(labels.shape)[13:17, 13:17, 13:17] == 2).all()
assert data[..., 0].shape == labels.shape
return data, multi_labels, single_labels, labels
def test_spacing_0():
n = 30
lx, ly, lz = n, n, n
data, _ = make_3d_syntheticdata(lx, ly, lz)
data_aniso = np.zeros((n, n, n // 2))
for i, yz in enumerate(data):
data_aniso[i, :, :] = resize(yz, (n, n // 2),
mode='constant',
anti_aliasing=False)
small_l = int(lx // 5)
labels_aniso = np.zeros_like(data_aniso)
labels_aniso[lx // 5, ly // 5, lz // 5] = 1
labels_aniso[lx // 2 + small_l // 4,
ly // 2 - small_l // 4,
lz // 4 - small_l // 8] = 2
with expected_warnings(['"cg" mode' + '|' + SCIPY_EXPECTED]):
labels_aniso = random_walker(data_aniso, labels_aniso, mode='cg',
spacing=(1., 1., 0.5))
assert (labels_aniso[13:17, 13:17, 7:9] == 2).all()
def test_spacing_1():
n = 30
lx, ly, lz = n, n, n
data, _ = make_3d_syntheticdata(lx, ly, lz)
data_aniso = np.zeros((n, n * 2, n))
for i, yz in enumerate(data):
data_aniso[i, :, :] = resize(yz, (n * 2, n),
mode='constant',
anti_aliasing=False)
small_l = int(lx // 5)
labels_aniso = np.zeros_like(data_aniso)
labels_aniso[lx // 5, ly // 5, lz // 5] = 1
labels_aniso[lx // 2 + small_l // 4,
ly - small_l // 2,
lz // 2 - small_l // 4] = 2
with expected_warnings(['"cg" mode' + '|' + SCIPY_EXPECTED]):
labels_aniso = random_walker(data_aniso, labels_aniso, mode='cg',
spacing=(1., 2., 1.))
assert (labels_aniso[13:17, 26:34, 13:17] == 2).all()
data_aniso = np.zeros((n, n * 2, n))
for i in range(data.shape[1]):
data_aniso[i, :, :] = resize(data[:, 1, :], (n * 2, n),
mode='constant',
anti_aliasing=False)
small_l = int(lx // 5)
labels_aniso2 = np.zeros_like(data_aniso)
labels_aniso2[lx // 5, ly // 5, lz // 5] = 1
labels_aniso2[lx - small_l // 2,
ly // 2 + small_l // 4,
lz // 2 - small_l // 4] = 2
with expected_warnings(['"cg" mode' + '|' + SCIPY_EXPECTED]):
labels_aniso2 = random_walker(data_aniso,
labels_aniso2,
mode='cg', spacing=(2., 1., 1.))
assert (labels_aniso2[26:34, 13:17, 13:17] == 2).all()
def test_trivial_cases():
img = np.ones((10, 10))
labels = np.ones((10, 10))
with expected_warnings(["Returning provided labels"]):
pass_through = random_walker(img, labels)
np.testing.assert_array_equal(pass_through, labels)
labels[:, :5] = 3
expected = np.concatenate(((labels == 1)[..., np.newaxis],
(labels == 3)[..., np.newaxis]), axis=2)
with expected_warnings(["Returning provided labels"]):
test = random_walker(img, labels, return_full_prob=True)
np.testing.assert_array_equal(test, expected)
def test_length2_spacing():
np.random.seed(42)
img = np.ones((10, 10)) + 0.2 * np.random.normal(size=(10, 10))
labels = np.zeros((10, 10), dtype=np.uint8)
labels[2, 4] = 1
labels[6, 8] = 4
random_walker(img, labels, spacing=(1., 2.))
def test_bad_inputs():
img = np.ones(10)
labels = np.arange(10)
with testing.raises(ValueError):
random_walker(img, labels)
with testing.raises(ValueError):
random_walker(img, labels, multichannel=True)
np.random.seed(42)
img = np.random.normal(size=(3, 3, 3, 3, 3))
labels = np.arange(3 ** 5).reshape(img.shape)
with testing.raises(ValueError):
random_walker(img, labels)
with testing.raises(ValueError):
random_walker(img, labels, multichannel=True)
img = np.random.normal(size=(10, 10))
labels = np.zeros((10, 10))
labels[2, 4] = 2
labels[6, 8] = 5
with testing.raises(ValueError):
random_walker(img, labels, spacing=(1,))
img = np.random.normal(size=(10, 10))
labels = np.zeros((10, 10))
with testing.raises(ValueError):
random_walker(img, labels, mode='bad')
def test_isolated_seeds():
np.random.seed(0)
a = np.random.random((7, 7))
mask = - np.ones(a.shape)
mask[1, 1] = 1
mask[3:, 3:] = 0
mask[4, 4] = 2
mask[6, 6] = 1
res = random_walker(a, mask)
assert res[1, 1] == 1
res = random_walker(a, mask, return_full_prob=True)
assert res[0, 1, 1] == 1
assert res[1, 1, 1] == 0
| true | true |
f71b4ba03c952835c47e87d25a7a9beba942d977 | 8,309 | py | Python | mmdet/models/necks/mscatfpn.py | JHuang-CV/OD | 290bf90a5f210199b6a3750c88152f7dd2fbc276 | [
"Apache-2.0"
] | null | null | null | mmdet/models/necks/mscatfpn.py | JHuang-CV/OD | 290bf90a5f210199b6a3750c88152f7dd2fbc276 | [
"Apache-2.0"
] | null | null | null | mmdet/models/necks/mscatfpn.py | JHuang-CV/OD | 290bf90a5f210199b6a3750c88152f7dd2fbc276 | [
"Apache-2.0"
] | null | null | null | import torch
import torch.nn as nn
import torch.nn.functional as F
from mmcv.cnn import xavier_init
from mmdet.core import auto_fp16
from ..registry import NECKS
from ..utils import ConvModule
from mmdet.ops.context_block import ContextBlock
from mmdet.models.plugins.squeeze_excitation import ChannelSELayer
@NECKS.register_module
class MSCATFPN(nn.Module):
def __init__(self,
in_channels,
out_channels,
num_outs,
start_level=0,
end_level=-1,
add_extra_convs=False,
extra_convs_on_inputs=True,
relu_before_extra_convs=False,
no_norm_on_lateral=False,
conv_cfg=None,
norm_cfg=None,
activation=None):
super(MSCATFPN, self).__init__()
assert isinstance(in_channels, list)
self.in_channels = in_channels
self.out_channels = out_channels
self.num_ins = len(in_channels)
self.num_outs = num_outs
self.activation = activation
self.relu_before_extra_convs = relu_before_extra_convs
self.no_norm_on_lateral = no_norm_on_lateral
self.norm_cfg = norm_cfg
self.fp16_enabled = False
self.epsilon = 1e-4
self.se = ChannelSELayer(768)
if end_level == -1:
self.backbone_end_level = self.num_ins
assert num_outs >= self.num_ins - start_level
else:
# if end_level < inputs, no extra level is allowed
self.backbone_end_level = end_level
assert end_level <= len(in_channels)
assert num_outs == end_level - start_level
self.start_level = start_level
self.end_level = end_level
self.add_extra_convs = add_extra_convs
self.extra_convs_on_inputs = extra_convs_on_inputs
self.lateral_convs = nn.ModuleList()
self.fpn_convs = nn.ModuleList()
self.cat_convs = nn.ModuleList()
self.add_convs = nn.ModuleList()
#self.gc_block = nn.ModuleList()
self.relu = nn.ReLU()
self.gc_block1 = ContextBlock(inplanes=256, ratio=1./4.)
self.gc_block2 = ContextBlock(inplanes=256, ratio=1. / 4.)
self.scat_conv = ConvModule(
out_channels * (self.backbone_end_level-self.start_level),
out_channels,
3,
padding=1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
activation=self.activation,
inplace=False)
self.c3_w = nn.Parameter(torch.ones(3, dtype=torch.float32), requires_grad=True)
self.c4_w = nn.Parameter(torch.ones(3, dtype=torch.float32), requires_grad=True)
self.c5_w = nn.Parameter(torch.ones(3, dtype=torch.float32), requires_grad=True)
for i in range(self.start_level, self.backbone_end_level):
l_conv = ConvModule(
in_channels[i],
out_channels,
1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg if not self.no_norm_on_lateral else None,
activation=self.activation,
inplace=False)
cat_conv = ConvModule(
out_channels * (self.backbone_end_level-self.start_level),
out_channels,
3,
padding=1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
activation=self.activation,
inplace=False)
add_conv = ConvModule(
out_channels,
out_channels,
3,
padding=1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
activation=self.activation,
inplace=False)
self.cat_convs.append(cat_conv)
self.lateral_convs.append(l_conv)
self.add_convs.append(add_conv)
#self.gc_block.append(ContextBlock(inplanes=256, ratio=1./4.))
# add extra conv layers (e.g., RetinaNet)
extra_levels = num_outs - self.backbone_end_level + self.start_level
if add_extra_convs and extra_levels >= 1:
for i in range(extra_levels):
if i == 0 and self.extra_convs_on_inputs:
in_channels = self.in_channels[self.backbone_end_level - 1]
else:
in_channels = out_channels
extra_fpn_conv = ConvModule(
in_channels,
out_channels,
3,
stride=2,
padding=1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
activation=self.activation,
inplace=False)
self.fpn_convs.append(extra_fpn_conv)
# default init_weights for conv(msra) and norm in ConvModule
def init_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
xavier_init(m, distribution='uniform')
@auto_fp16()
def forward(self, inputs):
assert len(inputs) == len(self.in_channels)
# build laterals
laterals = [
lateral_conv(inputs[i + self.start_level])
for i, lateral_conv in enumerate(self.lateral_convs)
]
used_backbone_levels = len(laterals)
mulscale_per_level = []
for i in range(used_backbone_levels):
level = []
m = i - 0
n = used_backbone_levels - 1 - i
level.append(laterals[i])
for x in range(m):
level.insert(0, F.interpolate(level[0], scale_factor=2, mode='nearest'))
for y in range(n):
level.append(F.max_pool2d(level[-1], 2, stride=2))
mulscale_per_level.append(level)
sglscale_per_level = list(zip(*mulscale_per_level))
feat_cat = [torch.cat(scale, 1)for scale in sglscale_per_level]
#channel_se = [self.se(cat_ft) for cat_ft in feat_cat]
mcat = [cat_conv(feat_cat[i]) for i, cat_conv in enumerate(self.cat_convs)]
#outs = [gc(outs[i]) for i, gc in enumerate(self.gc_block)]
mcat = [self.gc_block1(ft) for ft in mcat]
single_list = []
level = used_backbone_levels // 2
for i in range(used_backbone_levels):
if i < level:
single_list.append(F.max_pool2d(laterals[i], 2, stride=2))
elif i == level:
single_list.append(laterals[i])
else:
single_list.append(F.interpolate(laterals[i], scale_factor=2, mode='nearest'))
single_cat = torch.cat(single_list, 1)
single_cat = self.scat_conv(single_cat)
single_cat = self.gc_block2(single_cat)
m = level - 0
n = used_backbone_levels - 1 - level
scat = [single_cat]
for x in range(m):
scat.insert(0, F.interpolate(scat[0], scale_factor=2, mode='nearest'))
for y in range(n):
scat.append(F.max_pool2d(scat[-1], 2, stride=2))
# outs = [scat[i]+lateral for i, lateral in enumerate(laterals)]
# outs = [add_conv(outs[i]) for i, add_conv in enumerate(self.add_convs)]
outs = []
for i, (m, s, l) in enumerate(zip(mcat, scat, laterals)):
outs.append(
self.add_convs[i](m.sigmoid()*s/2 + l / 2)
)
if self.num_outs > used_backbone_levels:
if not self.add_extra_convs:
for i in range(self.num_outs - used_backbone_levels):
outs.append(F.max_pool2d(outs[-1], 1, stride=2))
else:
if self.extra_convs_on_inputs:
orig = inputs[self.backbone_end_level - 1]
outs.append(self.fpn_convs[0](orig))
else:
outs.append(self.fpn_convs[0](outs[-1]))
for i in range(1, self.num_outs-used_backbone_levels):
if self.relu_before_extra_convs:
outs.append(self.fpn_convs[i](F.relu(outs[-1])))
else:
outs.append(self.fpn_convs[i](outs[-1]))
return tuple(outs)
| 37.768182 | 94 | 0.564087 | import torch
import torch.nn as nn
import torch.nn.functional as F
from mmcv.cnn import xavier_init
from mmdet.core import auto_fp16
from ..registry import NECKS
from ..utils import ConvModule
from mmdet.ops.context_block import ContextBlock
from mmdet.models.plugins.squeeze_excitation import ChannelSELayer
@NECKS.register_module
class MSCATFPN(nn.Module):
def __init__(self,
in_channels,
out_channels,
num_outs,
start_level=0,
end_level=-1,
add_extra_convs=False,
extra_convs_on_inputs=True,
relu_before_extra_convs=False,
no_norm_on_lateral=False,
conv_cfg=None,
norm_cfg=None,
activation=None):
super(MSCATFPN, self).__init__()
assert isinstance(in_channels, list)
self.in_channels = in_channels
self.out_channels = out_channels
self.num_ins = len(in_channels)
self.num_outs = num_outs
self.activation = activation
self.relu_before_extra_convs = relu_before_extra_convs
self.no_norm_on_lateral = no_norm_on_lateral
self.norm_cfg = norm_cfg
self.fp16_enabled = False
self.epsilon = 1e-4
self.se = ChannelSELayer(768)
if end_level == -1:
self.backbone_end_level = self.num_ins
assert num_outs >= self.num_ins - start_level
else:
self.backbone_end_level = end_level
assert end_level <= len(in_channels)
assert num_outs == end_level - start_level
self.start_level = start_level
self.end_level = end_level
self.add_extra_convs = add_extra_convs
self.extra_convs_on_inputs = extra_convs_on_inputs
self.lateral_convs = nn.ModuleList()
self.fpn_convs = nn.ModuleList()
self.cat_convs = nn.ModuleList()
self.add_convs = nn.ModuleList()
self.relu = nn.ReLU()
self.gc_block1 = ContextBlock(inplanes=256, ratio=1./4.)
self.gc_block2 = ContextBlock(inplanes=256, ratio=1. / 4.)
self.scat_conv = ConvModule(
out_channels * (self.backbone_end_level-self.start_level),
out_channels,
3,
padding=1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
activation=self.activation,
inplace=False)
self.c3_w = nn.Parameter(torch.ones(3, dtype=torch.float32), requires_grad=True)
self.c4_w = nn.Parameter(torch.ones(3, dtype=torch.float32), requires_grad=True)
self.c5_w = nn.Parameter(torch.ones(3, dtype=torch.float32), requires_grad=True)
for i in range(self.start_level, self.backbone_end_level):
l_conv = ConvModule(
in_channels[i],
out_channels,
1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg if not self.no_norm_on_lateral else None,
activation=self.activation,
inplace=False)
cat_conv = ConvModule(
out_channels * (self.backbone_end_level-self.start_level),
out_channels,
3,
padding=1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
activation=self.activation,
inplace=False)
add_conv = ConvModule(
out_channels,
out_channels,
3,
padding=1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
activation=self.activation,
inplace=False)
self.cat_convs.append(cat_conv)
self.lateral_convs.append(l_conv)
self.add_convs.append(add_conv)
extra_levels = num_outs - self.backbone_end_level + self.start_level
if add_extra_convs and extra_levels >= 1:
for i in range(extra_levels):
if i == 0 and self.extra_convs_on_inputs:
in_channels = self.in_channels[self.backbone_end_level - 1]
else:
in_channels = out_channels
extra_fpn_conv = ConvModule(
in_channels,
out_channels,
3,
stride=2,
padding=1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
activation=self.activation,
inplace=False)
self.fpn_convs.append(extra_fpn_conv)
def init_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
xavier_init(m, distribution='uniform')
@auto_fp16()
def forward(self, inputs):
assert len(inputs) == len(self.in_channels)
laterals = [
lateral_conv(inputs[i + self.start_level])
for i, lateral_conv in enumerate(self.lateral_convs)
]
used_backbone_levels = len(laterals)
mulscale_per_level = []
for i in range(used_backbone_levels):
level = []
m = i - 0
n = used_backbone_levels - 1 - i
level.append(laterals[i])
for x in range(m):
level.insert(0, F.interpolate(level[0], scale_factor=2, mode='nearest'))
for y in range(n):
level.append(F.max_pool2d(level[-1], 2, stride=2))
mulscale_per_level.append(level)
sglscale_per_level = list(zip(*mulscale_per_level))
feat_cat = [torch.cat(scale, 1)for scale in sglscale_per_level]
mcat = [cat_conv(feat_cat[i]) for i, cat_conv in enumerate(self.cat_convs)]
mcat = [self.gc_block1(ft) for ft in mcat]
single_list = []
level = used_backbone_levels // 2
for i in range(used_backbone_levels):
if i < level:
single_list.append(F.max_pool2d(laterals[i], 2, stride=2))
elif i == level:
single_list.append(laterals[i])
else:
single_list.append(F.interpolate(laterals[i], scale_factor=2, mode='nearest'))
single_cat = torch.cat(single_list, 1)
single_cat = self.scat_conv(single_cat)
single_cat = self.gc_block2(single_cat)
m = level - 0
n = used_backbone_levels - 1 - level
scat = [single_cat]
for x in range(m):
scat.insert(0, F.interpolate(scat[0], scale_factor=2, mode='nearest'))
for y in range(n):
scat.append(F.max_pool2d(scat[-1], 2, stride=2))
outs = []
for i, (m, s, l) in enumerate(zip(mcat, scat, laterals)):
outs.append(
self.add_convs[i](m.sigmoid()*s/2 + l / 2)
)
if self.num_outs > used_backbone_levels:
if not self.add_extra_convs:
for i in range(self.num_outs - used_backbone_levels):
outs.append(F.max_pool2d(outs[-1], 1, stride=2))
else:
if self.extra_convs_on_inputs:
orig = inputs[self.backbone_end_level - 1]
outs.append(self.fpn_convs[0](orig))
else:
outs.append(self.fpn_convs[0](outs[-1]))
for i in range(1, self.num_outs-used_backbone_levels):
if self.relu_before_extra_convs:
outs.append(self.fpn_convs[i](F.relu(outs[-1])))
else:
outs.append(self.fpn_convs[i](outs[-1]))
return tuple(outs)
| true | true |
f71b4bb600bb418ed1ef7e86a5615b6ad8bfabf3 | 2,753 | py | Python | python/dgl/nn/mxnet/conv/agnnconv.py | jinghuix/dgl | fae26dd15caac92458a08ad34889086e1e333ddd | [
"Apache-2.0"
] | 2 | 2020-07-24T19:26:51.000Z | 2021-08-21T21:04:11.000Z | python/dgl/nn/mxnet/conv/agnnconv.py | jinghuix/dgl | fae26dd15caac92458a08ad34889086e1e333ddd | [
"Apache-2.0"
] | null | null | null | python/dgl/nn/mxnet/conv/agnnconv.py | jinghuix/dgl | fae26dd15caac92458a08ad34889086e1e333ddd | [
"Apache-2.0"
] | 1 | 2021-03-09T12:42:46.000Z | 2021-03-09T12:42:46.000Z | """MXNet Module for Attention-based Graph Neural Network layer"""
# pylint: disable= no-member, arguments-differ, invalid-name
import mxnet as mx
from mxnet.gluon import nn
from .... import function as fn
from ..softmax import edge_softmax
from ..utils import normalize
from ....utils import expand_as_pair
class AGNNConv(nn.Block):
r"""Attention-based Graph Neural Network layer from paper `Attention-based
Graph Neural Network for Semi-Supervised Learning
<https://arxiv.org/abs/1803.03735>`__.
.. math::
H^{l+1} = P H^{l}
where :math:`P` is computed as:
.. math::
P_{ij} = \mathrm{softmax}_i ( \beta \cdot \cos(h_i^l, h_j^l))
Parameters
----------
init_beta : float, optional
The :math:`\beta` in the formula.
learn_beta : bool, optional
If True, :math:`\beta` will be learnable parameter.
"""
def __init__(self,
init_beta=1.,
learn_beta=True):
super(AGNNConv, self).__init__()
with self.name_scope():
self.beta = self.params.get('beta',
shape=(1,),
grad_req='write' if learn_beta else 'null',
init=mx.init.Constant(init_beta))
def forward(self, graph, feat):
r"""Compute AGNN Layer.
Parameters
----------
graph : DGLGraph
The graph.
feat : mxnet.NDArray
The input feature of shape :math:`(N, *)` :math:`N` is the
number of nodes, and :math:`*` could be of any shape.
If a pair of mxnet.NDArray is given, the pair must contain two tensors of shape
:math:`(N_{in}, *)` and :math:`(N_{out}, *})`, the the :math:`*` in the later
tensor must equal the previous one.
Returns
-------
mxnet.NDArray
The output feature of shape :math:`(N, *)` where :math:`*`
should be the same as input shape.
"""
with graph.local_scope():
feat_src, feat_dst = expand_as_pair(feat, graph)
graph.srcdata['h'] = feat_src
graph.srcdata['norm_h'] = normalize(feat_src, p=2, axis=-1)
if isinstance(feat, tuple) or graph.is_block:
graph.dstdata['norm_h'] = normalize(feat_dst, p=2, axis=-1)
# compute cosine distance
graph.apply_edges(fn.u_dot_v('norm_h', 'norm_h', 'cos'))
cos = graph.edata.pop('cos')
e = self.beta.data(feat_src.context) * cos
graph.edata['p'] = edge_softmax(graph, e)
graph.update_all(fn.u_mul_e('h', 'p', 'm'), fn.sum('m', 'h'))
return graph.dstdata.pop('h')
| 36.706667 | 91 | 0.553578 |
import mxnet as mx
from mxnet.gluon import nn
from .... import function as fn
from ..softmax import edge_softmax
from ..utils import normalize
from ....utils import expand_as_pair
class AGNNConv(nn.Block):
def __init__(self,
init_beta=1.,
learn_beta=True):
super(AGNNConv, self).__init__()
with self.name_scope():
self.beta = self.params.get('beta',
shape=(1,),
grad_req='write' if learn_beta else 'null',
init=mx.init.Constant(init_beta))
def forward(self, graph, feat):
with graph.local_scope():
feat_src, feat_dst = expand_as_pair(feat, graph)
graph.srcdata['h'] = feat_src
graph.srcdata['norm_h'] = normalize(feat_src, p=2, axis=-1)
if isinstance(feat, tuple) or graph.is_block:
graph.dstdata['norm_h'] = normalize(feat_dst, p=2, axis=-1)
graph.apply_edges(fn.u_dot_v('norm_h', 'norm_h', 'cos'))
cos = graph.edata.pop('cos')
e = self.beta.data(feat_src.context) * cos
graph.edata['p'] = edge_softmax(graph, e)
graph.update_all(fn.u_mul_e('h', 'p', 'm'), fn.sum('m', 'h'))
return graph.dstdata.pop('h')
| true | true |
f71b4d5f7826768bee64b7feec106bd5368db512 | 1,099 | py | Python | test/test_arrow_result.py | mariusvniekerk/snowflake-connector-python | 4c6b728f9ca7ac9c8a318741924a963a5574e216 | [
"Apache-2.0"
] | null | null | null | test/test_arrow_result.py | mariusvniekerk/snowflake-connector-python | 4c6b728f9ca7ac9c8a318741924a963a5574e216 | [
"Apache-2.0"
] | null | null | null | test/test_arrow_result.py | mariusvniekerk/snowflake-connector-python | 4c6b728f9ca7ac9c8a318741924a963a5574e216 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2012-2019 Snowflake Computing Inc. All right reserved.
#
import pytest
@pytest.mark.skip(
reason="Cython is not enabled in build env")
def test_select_with_num(conn_cnx):
with conn_cnx() as json_cnx:
with conn_cnx() as arrow_cnx:
row_count = 50000
sql_text = ("select seq4() as c1, uniform(1, 10, random(12)) as c2 from " +
"table(generator(rowcount=>50000)) order by c1")
cursor_json = json_cnx.cursor()
cursor_json.execute("alter session set query_result_format='JSON'")
cursor_json.execute(sql_text)
cursor_arrow = arrow_cnx.cursor()
cursor_arrow.execute("alter session set query_result_format='ARROW_FORCE'")
cursor_arrow.execute(sql_text)
for i in range(0, row_count):
(json_c1, json_c2) = cursor_json.fetchone()
(arrow_c1, arrow_c2) = cursor_arrow.fetchone()
assert json_c1 == arrow_c1
assert json_c2 == arrow_c2
| 36.633333 | 87 | 0.610555 |
import pytest
@pytest.mark.skip(
reason="Cython is not enabled in build env")
def test_select_with_num(conn_cnx):
with conn_cnx() as json_cnx:
with conn_cnx() as arrow_cnx:
row_count = 50000
sql_text = ("select seq4() as c1, uniform(1, 10, random(12)) as c2 from " +
"table(generator(rowcount=>50000)) order by c1")
cursor_json = json_cnx.cursor()
cursor_json.execute("alter session set query_result_format='JSON'")
cursor_json.execute(sql_text)
cursor_arrow = arrow_cnx.cursor()
cursor_arrow.execute("alter session set query_result_format='ARROW_FORCE'")
cursor_arrow.execute(sql_text)
for i in range(0, row_count):
(json_c1, json_c2) = cursor_json.fetchone()
(arrow_c1, arrow_c2) = cursor_arrow.fetchone()
assert json_c1 == arrow_c1
assert json_c2 == arrow_c2
| true | true |
f71b4eb0cc83cbb94a84bbec221dd9f3a3147026 | 25,712 | py | Python | draco/core/io.py | sjforeman/draco | b0ab40b6984637642b28a5485af1c09c9cf183f2 | [
"MIT"
] | null | null | null | draco/core/io.py | sjforeman/draco | b0ab40b6984637642b28a5485af1c09c9cf183f2 | [
"MIT"
] | null | null | null | draco/core/io.py | sjforeman/draco | b0ab40b6984637642b28a5485af1c09c9cf183f2 | [
"MIT"
] | null | null | null | """Tasks for reading and writing data.
Tasks
=====
.. autosummary::
:toctree:
LoadFiles
LoadMaps
LoadFilesFromParams
Save
Print
LoadBeamTransfer
File Groups
===========
Several tasks accept groups of files as arguments. These are specified in the YAML file as a dictionary like below.
.. code-block:: yaml
list_of_file_groups:
- tag: first_group # An optional tag naming the group
files:
- 'file1.h5'
- 'file[3-4].h5' # Globs are processed
- 'file7.h5'
- files: # No tag specified, implicitly gets the tag 'group_2'
- 'another_file1.h5'
- 'another_file2.h5'
single_group:
files: ['file1.h5', 'file2.h5']
"""
import os.path
import h5py
import numpy as np
from yaml import dump as yamldump
from caput import pipeline
from caput import config
from cora.util import units
from . import task
from ..util.truncate import bit_truncate_weights, bit_truncate_fixed
from .containers import SiderealStream, TimeStream, TrackBeam
TRUNC_SPEC = {
SiderealStream: {
"dataset": ["vis", "vis_weight"],
"weight_dataset": ["vis_weight", None],
"fixed_precision": 1e-4,
"variance_increase": 1e-3,
},
TimeStream: {
"dataset": ["vis", "vis_weight"],
"weight_dataset": ["vis_weight", None],
"fixed_precision": 1e-4,
"variance_increase": 1e-3,
},
TrackBeam: {
"dataset": ["beam", "weight"],
"weight_dataset": ["weight", None],
"fixed_precision": 1e-4,
"variance_increase": 1e-3,
},
}
def _list_of_filelists(files):
# Take in a list of lists/glob patterns of filenames
import glob
f2 = []
for filelist in files:
if isinstance(filelist, str):
filelist = glob.glob(filelist)
elif isinstance(filelist, list):
pass
else:
raise Exception("Must be list or glob pattern.")
f2.append(filelist)
return f2
def _list_or_glob(files):
# Take in a list of lists/glob patterns of filenames
import glob
if isinstance(files, str):
files = sorted(glob.glob(files))
elif isinstance(files, list):
pass
else:
raise ValueError("Argument must be list or glob pattern, got %s" % repr(files))
return files
def _list_of_filegroups(groups):
# Process a file group/groups
import glob
# Convert to list if the group was not included in a list
if not isinstance(groups, list):
groups = [groups]
# Iterate over groups, set the tag if needed, and process the file list
# through glob
for gi, group in enumerate(groups):
files = group["files"]
if "tag" not in group:
group["tag"] = "group_%i" % gi
flist = []
for fname in files:
flist += glob.glob(fname)
if not len(flist):
raise RuntimeError("No files in group exist (%s)." % files)
group["files"] = flist
return groups
class LoadMaps(task.MPILoggedTask):
"""Load a series of maps from files given in the tasks parameters.
Maps are given as one, or a list of `File Groups` (see
:mod:`draco.core.io`). Maps within the same group are added together
before being passed on.
Attributes
----------
maps : list or dict
A dictionary specifying a file group, or a list of them.
"""
maps = config.Property(proptype=_list_of_filegroups)
def next(self):
"""Load the groups of maps from disk and pass them on.
Returns
-------
map : :class:`containers.Map`
"""
from . import containers
# Exit this task if we have eaten all the file groups
if len(self.maps) == 0:
raise pipeline.PipelineStopIteration
group = self.maps.pop(0)
map_stack = None
# Iterate over all the files in the group, load them into a Map
# container and add them all together
for mfile in group["files"]:
self.log.debug("Loading file %s", mfile)
current_map = containers.Map.from_file(mfile, distributed=True)
current_map.redistribute("freq")
# Start the stack if needed
if map_stack is None:
map_stack = current_map
# Otherwise, check that the new map has consistent frequencies,
# nside and pol and stack up.
else:
if (current_map.freq != map_stack.freq).all():
raise RuntimeError("Maps do not have consistent frequencies.")
if (current_map.index_map["pol"] != map_stack.index_map["pol"]).all():
raise RuntimeError("Maps do not have the same polarisations.")
if (
current_map.index_map["pixel"] != map_stack.index_map["pixel"]
).all():
raise RuntimeError("Maps do not have the same pixelisation.")
map_stack.map[:] += current_map.map[:]
# Assign a tag to the stack of maps
map_stack.attrs["tag"] = group["tag"]
return map_stack
class LoadFITSCatalog(task.SingleTask):
"""Load an SDSS-style FITS source catalog.
Catalogs are given as one, or a list of `File Groups` (see
:mod:`draco.core.io`). Catalogs within the same group are combined together
before being passed on.
Attributes
----------
catalogs : list or dict
A dictionary specifying a file group, or a list of them.
z_range : list, optional
Select only sources with a redshift within the given range.
freq_range : list, optional
Select only sources with a 21cm line freq within the given range. Overrides
`z_range`.
"""
catalogs = config.Property(proptype=_list_of_filegroups)
z_range = config.list_type(type_=float, length=2, default=None)
freq_range = config.list_type(type_=float, length=2, default=None)
def process(self):
"""Load the groups of catalogs from disk, concatenate them and pass them on.
Returns
-------
catalog : :class:`containers.SpectroscopicCatalog`
"""
from astropy.io import fits
from . import containers
# Exit this task if we have eaten all the file groups
if len(self.catalogs) == 0:
raise pipeline.PipelineStopIteration
group = self.catalogs.pop(0)
# Set the redshift selection
if self.freq_range:
zl = units.nu21 / self.freq_range[1] - 1
zh = units.nu21 / self.freq_range[0] - 1
self.z_range = (zl, zh)
if self.z_range:
zl, zh = self.z_range
self.log.info(f"Applying redshift selection {zl:.2f} <= z <= {zh:.2f}")
# Load the data only on rank=0 and then broadcast
if self.comm.rank == 0:
# Iterate over all the files in the group, load them into a Map
# container and add them all together
catalog_stack = []
for cfile in group["files"]:
self.log.debug("Loading file %s", cfile)
# TODO: read out the weights from the catalogs
with fits.open(cfile, mode="readonly") as cat:
pos = np.array([cat[1].data[col] for col in ["RA", "DEC", "Z"]])
# Apply any redshift selection to the objects
if self.z_range:
zsel = (pos[2] >= self.z_range[0]) & (pos[2] <= self.z_range[1])
pos = pos[:, zsel]
catalog_stack.append(pos)
# NOTE: this one is tricky, for some reason the concatenate in here
# produces a non C contiguous array, so we need to ensure that otherwise
# the broadcasting will get very confused
catalog_array = np.concatenate(catalog_stack, axis=-1).astype(np.float64)
catalog_array = np.ascontiguousarray(catalog_array)
num_objects = catalog_array.shape[-1]
else:
num_objects = None
catalog_array = None
# Broadcast the size of the catalog to all ranks, create the target array and
# broadcast into it
num_objects = self.comm.bcast(num_objects, root=0)
self.log.debug(f"Constructing catalog with {num_objects} objects.")
if self.comm.rank != 0:
catalog_array = np.zeros((3, num_objects), dtype=np.float64)
self.comm.Bcast(catalog_array, root=0)
catalog = containers.SpectroscopicCatalog(object_id=num_objects)
catalog["position"]["ra"] = catalog_array[0]
catalog["position"]["dec"] = catalog_array[1]
catalog["redshift"]["z"] = catalog_array[2]
catalog["redshift"]["z_error"] = 0
# Assign a tag to the stack of maps
catalog.attrs["tag"] = group["tag"]
return catalog
class LoadFilesFromParams(task.SingleTask):
"""Load data from files given in the tasks parameters.
Attributes
----------
files : glob pattern, or list
Can either be a glob pattern, or lists of actual files.
distributed : bool, optional
Whether the file should be loaded distributed across ranks.
convert_strings : bool, optional
Convert strings to unicode when loading.
selections : dict, optional
A dictionary of axis selections. See the section below for details.
Selections
----------
Selections can be given to limit the data read to specified subsets. They can be
given for any named axis in the container.
Selections can be given as a slice with an `<axis name>_range` key with either
`[start, stop]` or `[start, stop, step]` as the value. Alternatively a list of
explicit indices to extract can be given with the `<axis name>_index` key, and
the value is a list of the indices. If both `<axis name>_range` and `<axis
name>_index` keys are given the former will take precedence, but you should
clearly avoid doing this.
Additionally index based selections currently don't work for distributed reads.
Here's an example in the YAML format that the pipeline uses:
.. code-block:: yaml
selections:
freq_range: [256, 512, 4] # A strided slice
stack_index: [1, 2, 4, 9, 16, 25, 36, 49, 64] # A sparse selection
stack_range: [1, 14] # Will override the selection above
"""
files = config.Property(proptype=_list_or_glob)
distributed = config.Property(proptype=bool, default=True)
convert_strings = config.Property(proptype=bool, default=True)
selections = config.Property(proptype=dict, default=None)
def setup(self):
"""Resolve the selections."""
self._sel = self._resolve_sel()
def process(self):
"""Load the given files in turn and pass on.
Returns
-------
cont : subclass of `memh5.BasicCont`
"""
from caput import memh5
# Garbage collect to workaround leaking memory from containers.
# TODO: find actual source of leak
import gc
gc.collect()
if len(self.files) == 0:
raise pipeline.PipelineStopIteration
# Fetch and remove the first item in the list
file_ = self.files.pop(0)
self.log.info(f"Loading file {file_}")
self.log.debug(f"Reading with selections: {self._sel}")
# If we are applying selections we need to dispatch the `from_file` via the
# correct subclass, rather than relying on the internal detection of the
# subclass. To minimise the number of files being opened this is only done on
# rank=0 and is then broadcast
if self._sel:
if self.comm.rank == 0:
with h5py.File(file_, "r") as fh:
clspath = memh5.MemDiskGroup._detect_subclass_path(fh)
else:
clspath = None
clspath = self.comm.bcast(clspath, root=0)
new_cls = memh5.MemDiskGroup._resolve_subclass(clspath)
else:
new_cls = memh5.BasicCont
cont = new_cls.from_file(
file_,
distributed=self.distributed,
comm=self.comm,
convert_attribute_strings=self.convert_strings,
convert_dataset_strings=self.convert_strings,
**self._sel,
)
if "tag" not in cont.attrs:
# Get the first part of the actual filename and use it as the tag
tag = os.path.splitext(os.path.basename(file_))[0]
cont.attrs["tag"] = tag
return cont
def _resolve_sel(self):
# Turn the selection parameters into actual selectable types
sel = {}
sel_parsers = {"range": self._parse_range, "index": self._parse_index}
# To enforce the precedence of range vs index selections, we rely on the fact
# that a sort will place the axis_range keys after axis_index keys
for k in sorted(self.selections or []):
# Parse the key to get the axis name and type, accounting for the fact the
# axis name may contain an underscore
*axis, type_ = k.split("_")
axis_name = "_".join(axis)
if type_ not in sel_parsers:
raise ValueError(
f'Unsupported selection type "{type_}", or invalid key "{k}"'
)
sel[f"{axis_name}_sel"] = sel_parsers[type_](self.selections[k])
return sel
def _parse_range(self, x):
# Parse and validate a range type selection
if not isinstance(x, (list, tuple)) or len(x) > 3 or len(x) < 2:
raise ValueError(
f"Range spec must be a length 2 or 3 list or tuple. Got {x}."
)
for v in x:
if not isinstance(v, int):
raise ValueError(f"All elements of range spec must be ints. Got {x}")
return slice(*x)
def _parse_index(self, x):
# Parse and validate an index type selection
if not isinstance(x, (list, tuple)) or len(x) == 0:
raise ValueError(f"Index spec must be a non-empty list or tuple. Got {x}.")
for v in x:
if not isinstance(v, int):
raise ValueError(f"All elements of index spec must be ints. Got {x}")
return list(x)
# Define alias for old code
LoadBasicCont = LoadFilesFromParams
class FindFiles(pipeline.TaskBase):
"""Take a glob or list of files specified as a parameter in the
configuration file and pass on to other tasks.
Parameters
----------
files : list or glob
"""
files = config.Property(proptype=_list_or_glob)
def setup(self):
"""Return list of files specified in the parameters."""
if not isinstance(self.files, (list, tuple)):
raise RuntimeError("Argument must be list of files.")
return self.files
class LoadFiles(LoadFilesFromParams):
"""Load data from files passed into the setup routine.
File must be a serialised subclass of :class:`memh5.BasicCont`.
"""
files = None
def setup(self, files):
"""Set the list of files to load.
Parameters
----------
files : list
"""
# Call the baseclass setup to resolve any selections
super().setup()
if not isinstance(files, (list, tuple)):
raise RuntimeError(f'Argument must be list of files. Got "{files}"')
self.files = files
class Save(pipeline.TaskBase):
"""Save out the input, and pass it on.
Assumes that the input has a `to_hdf5` method. Appends a *tag* if there is
a `tag` entry in the attributes, otherwise just uses a count.
Attributes
----------
root : str
Root of the file name to output to.
"""
root = config.Property(proptype=str)
count = 0
def next(self, data):
"""Write out the data file.
Assumes it has an MPIDataset interface.
Parameters
----------
data : mpidataset.MPIDataset
Data to write out.
"""
if "tag" not in data.attrs:
tag = self.count
self.count += 1
else:
tag = data.attrs["tag"]
fname = "%s_%s.h5" % (self.root, str(tag))
data.to_hdf5(fname)
return data
class Print(pipeline.TaskBase):
"""Stupid module which just prints whatever it gets. Good for debugging."""
def next(self, input_):
print(input_)
return input_
class LoadBeamTransfer(pipeline.TaskBase):
"""Loads a beam transfer manager from disk.
Attributes
----------
product_directory : str
Path to the saved Beam Transfer products.
"""
product_directory = config.Property(proptype=str)
def setup(self):
"""Load the beam transfer matrices.
Returns
-------
tel : TransitTelescope
Object describing the telescope.
bt : BeamTransfer
BeamTransfer manager.
feed_info : list, optional
Optional list providing additional information about each feed.
"""
import os
from drift.core import beamtransfer
if not os.path.exists(self.product_directory):
raise RuntimeError("BeamTransfers do not exist.")
bt = beamtransfer.BeamTransfer(self.product_directory)
tel = bt.telescope
try:
return tel, bt, tel.feeds
except AttributeError:
return tel, bt
class LoadProductManager(pipeline.TaskBase):
"""Loads a driftscan product manager from disk.
Attributes
----------
product_directory : str
Path to the root of the products. This is the same as the output
directory used by ``drift-makeproducts``.
"""
product_directory = config.Property(proptype=str)
def setup(self):
"""Load the beam transfer matrices.
Returns
-------
manager : ProductManager
Object describing the telescope.
"""
import os
from drift.core import manager
if not os.path.exists(self.product_directory):
raise RuntimeError("Products do not exist.")
# Load ProductManager and Timestream
pm = manager.ProductManager.from_config(self.product_directory)
return pm
class Truncate(task.SingleTask):
"""Precision truncate data prior to saving with bitshuffle compression.
If no configuration is provided, will look for preset values for the
input container. Any properties defined in the config will override the
presets.
If available, each specified dataset will be truncated relative to a
(specified) weight dataset with the truncation increasing the variance up
to the specified maximum in `variance_increase`. If there is no specified
weight dataset then the truncation falls back to using the
`fixed_precision`.
Attributes
----------
dataset : list of str
Datasets to truncate.
weight_dataset : list of str
Datasets to use as inverse variance for truncation precision.
fixed_precision : float
Relative precision to truncate to (default 1e-4).
variance_increase : float
Maximum fractional increase in variance from numerical truncation.
"""
dataset = config.Property(proptype=list, default=None)
weight_dataset = config.Property(proptype=list, default=None)
fixed_precision = config.Property(proptype=float, default=None)
variance_increase = config.Property(proptype=float, default=None)
def _get_params(self, container):
"""Load truncation parameters from config or container defaults."""
if container in TRUNC_SPEC:
self.log.info("Truncating from preset for container {}".format(container))
for key in [
"dataset",
"weight_dataset",
"fixed_precision",
"variance_increase",
]:
attr = getattr(self, key)
if attr is None:
setattr(self, key, TRUNC_SPEC[container][key])
else:
self.log.info("Overriding container default for '{}'.".format(key))
else:
if (
self.dataset is None
or self.fixed_precision is None
or self.variance_increase is None
):
raise pipeline.PipelineConfigError(
"Container {} has no preset values. You must define all of 'dataset', "
"'fixed_precision', and 'variance_increase' properties.".format(
container
)
)
# Factor of 3 for variance over uniform distribution of truncation errors
self.variance_increase *= 3
def process(self, data):
"""Truncate the incoming data.
The truncation is done *in place*.
Parameters
----------
data : containers.ContainerBase
Data to truncate.
Returns
-------
truncated_data : containers.ContainerBase
Truncated data.
"""
# get truncation parameters from config or container defaults
self._get_params(type(data))
if self.weight_dataset is None:
self.weight_dataset = [None] * len(self.dataset)
for dset, wgt in zip(self.dataset, self.weight_dataset):
old_shape = data[dset].local_shape
val = np.ndarray.reshape(data[dset][:], data[dset][:].size)
if wgt is None:
if np.iscomplexobj(data[dset]):
data[dset][:].real = bit_truncate_fixed(
val.real, self.fixed_precision
).reshape(old_shape)
data[dset][:].imag = bit_truncate_fixed(
val.imag, self.fixed_precision
).reshape(old_shape)
else:
data[dset][:] = bit_truncate_fixed(
val, self.fixed_precision
).reshape(old_shape)
else:
if data[dset][:].shape != data[wgt][:].shape:
raise pipeline.PipelineRuntimeError(
"Dataset and weight arrays must have same shape ({} != {})".format(
data[dset].shape, data[wgt].shape
)
)
invvar = np.ndarray.reshape(data[wgt][:], data[dset][:].size)
if np.iscomplexobj(data[dset]):
data[dset][:].real = bit_truncate_weights(
val.real,
invvar * 2.0 / self.variance_increase,
self.fixed_precision,
).reshape(old_shape)
data[dset][:].imag = bit_truncate_weights(
val.imag,
invvar * 2.0 / self.variance_increase,
self.fixed_precision,
).reshape(old_shape)
else:
data[dset][:] = bit_truncate_weights(
val, invvar / self.variance_increase, self.fixed_precision
).reshape(old_shape)
return data
class SaveModuleVersions(task.SingleTask):
"""Write module versions to a YAML file.
The list of modules should be added to the configuration under key 'save_versions'.
The version strings are written to a YAML file.
Attributes
----------
root : str
Root of the file name to output to.
"""
root = config.Property(proptype=str)
done = True
def setup(self):
"""Save module versions."""
fname = "{}_versions.yml".format(self.root)
f = open(fname, "w")
f.write(yamldump(self.versions))
f.close()
self.done = True
def process(self):
"""Do nothing."""
self.done = True
return
class SaveConfig(task.SingleTask):
"""Write pipeline config to a text file.
Yaml configuration document is written to a text file.
Attributes
----------
root : str
Root of the file name to output to.
"""
root = config.Property(proptype=str)
done = True
def setup(self):
"""Save module versions."""
fname = "{}_config.yml".format(self.root)
f = open(fname, "w")
f.write(yamldump(self.pipeline_config))
f.close()
self.done = True
def process(self):
"""Do nothing."""
self.done = True
return
def get_telescope(obj):
"""Return a telescope object out of the input (either `ProductManager`,
`BeamTransfer` or `TransitTelescope`).
"""
from drift.core import telescope
try:
return get_beamtransfer(obj).telescope
except RuntimeError:
if isinstance(obj, telescope.TransitTelescope):
return obj
raise RuntimeError("Could not get telescope instance out of %s" % repr(obj))
def get_beamtransfer(obj):
"""Return a BeamTransfer object out of the input (either `ProductManager`,
`BeamTransfer`).
"""
from drift.core import manager, beamtransfer
if isinstance(obj, beamtransfer.BeamTransfer):
return obj
if isinstance(obj, manager.ProductManager):
return obj.beamtransfer
raise RuntimeError("Could not get BeamTransfer instance out of %s" % repr(obj))
| 30.285041 | 115 | 0.592253 |
import os.path
import h5py
import numpy as np
from yaml import dump as yamldump
from caput import pipeline
from caput import config
from cora.util import units
from . import task
from ..util.truncate import bit_truncate_weights, bit_truncate_fixed
from .containers import SiderealStream, TimeStream, TrackBeam
TRUNC_SPEC = {
SiderealStream: {
"dataset": ["vis", "vis_weight"],
"weight_dataset": ["vis_weight", None],
"fixed_precision": 1e-4,
"variance_increase": 1e-3,
},
TimeStream: {
"dataset": ["vis", "vis_weight"],
"weight_dataset": ["vis_weight", None],
"fixed_precision": 1e-4,
"variance_increase": 1e-3,
},
TrackBeam: {
"dataset": ["beam", "weight"],
"weight_dataset": ["weight", None],
"fixed_precision": 1e-4,
"variance_increase": 1e-3,
},
}
def _list_of_filelists(files):
import glob
f2 = []
for filelist in files:
if isinstance(filelist, str):
filelist = glob.glob(filelist)
elif isinstance(filelist, list):
pass
else:
raise Exception("Must be list or glob pattern.")
f2.append(filelist)
return f2
def _list_or_glob(files):
import glob
if isinstance(files, str):
files = sorted(glob.glob(files))
elif isinstance(files, list):
pass
else:
raise ValueError("Argument must be list or glob pattern, got %s" % repr(files))
return files
def _list_of_filegroups(groups):
import glob
if not isinstance(groups, list):
groups = [groups]
for gi, group in enumerate(groups):
files = group["files"]
if "tag" not in group:
group["tag"] = "group_%i" % gi
flist = []
for fname in files:
flist += glob.glob(fname)
if not len(flist):
raise RuntimeError("No files in group exist (%s)." % files)
group["files"] = flist
return groups
class LoadMaps(task.MPILoggedTask):
maps = config.Property(proptype=_list_of_filegroups)
def next(self):
from . import containers
if len(self.maps) == 0:
raise pipeline.PipelineStopIteration
group = self.maps.pop(0)
map_stack = None
for mfile in group["files"]:
self.log.debug("Loading file %s", mfile)
current_map = containers.Map.from_file(mfile, distributed=True)
current_map.redistribute("freq")
if map_stack is None:
map_stack = current_map
else:
if (current_map.freq != map_stack.freq).all():
raise RuntimeError("Maps do not have consistent frequencies.")
if (current_map.index_map["pol"] != map_stack.index_map["pol"]).all():
raise RuntimeError("Maps do not have the same polarisations.")
if (
current_map.index_map["pixel"] != map_stack.index_map["pixel"]
).all():
raise RuntimeError("Maps do not have the same pixelisation.")
map_stack.map[:] += current_map.map[:]
map_stack.attrs["tag"] = group["tag"]
return map_stack
class LoadFITSCatalog(task.SingleTask):
catalogs = config.Property(proptype=_list_of_filegroups)
z_range = config.list_type(type_=float, length=2, default=None)
freq_range = config.list_type(type_=float, length=2, default=None)
def process(self):
from astropy.io import fits
from . import containers
if len(self.catalogs) == 0:
raise pipeline.PipelineStopIteration
group = self.catalogs.pop(0)
if self.freq_range:
zl = units.nu21 / self.freq_range[1] - 1
zh = units.nu21 / self.freq_range[0] - 1
self.z_range = (zl, zh)
if self.z_range:
zl, zh = self.z_range
self.log.info(f"Applying redshift selection {zl:.2f} <= z <= {zh:.2f}")
if self.comm.rank == 0:
catalog_stack = []
for cfile in group["files"]:
self.log.debug("Loading file %s", cfile)
with fits.open(cfile, mode="readonly") as cat:
pos = np.array([cat[1].data[col] for col in ["RA", "DEC", "Z"]])
if self.z_range:
zsel = (pos[2] >= self.z_range[0]) & (pos[2] <= self.z_range[1])
pos = pos[:, zsel]
catalog_stack.append(pos)
catalog_array = np.concatenate(catalog_stack, axis=-1).astype(np.float64)
catalog_array = np.ascontiguousarray(catalog_array)
num_objects = catalog_array.shape[-1]
else:
num_objects = None
catalog_array = None
num_objects = self.comm.bcast(num_objects, root=0)
self.log.debug(f"Constructing catalog with {num_objects} objects.")
if self.comm.rank != 0:
catalog_array = np.zeros((3, num_objects), dtype=np.float64)
self.comm.Bcast(catalog_array, root=0)
catalog = containers.SpectroscopicCatalog(object_id=num_objects)
catalog["position"]["ra"] = catalog_array[0]
catalog["position"]["dec"] = catalog_array[1]
catalog["redshift"]["z"] = catalog_array[2]
catalog["redshift"]["z_error"] = 0
catalog.attrs["tag"] = group["tag"]
return catalog
class LoadFilesFromParams(task.SingleTask):
files = config.Property(proptype=_list_or_glob)
distributed = config.Property(proptype=bool, default=True)
convert_strings = config.Property(proptype=bool, default=True)
selections = config.Property(proptype=dict, default=None)
def setup(self):
self._sel = self._resolve_sel()
def process(self):
from caput import memh5
import gc
gc.collect()
if len(self.files) == 0:
raise pipeline.PipelineStopIteration
file_ = self.files.pop(0)
self.log.info(f"Loading file {file_}")
self.log.debug(f"Reading with selections: {self._sel}")
if self._sel:
if self.comm.rank == 0:
with h5py.File(file_, "r") as fh:
clspath = memh5.MemDiskGroup._detect_subclass_path(fh)
else:
clspath = None
clspath = self.comm.bcast(clspath, root=0)
new_cls = memh5.MemDiskGroup._resolve_subclass(clspath)
else:
new_cls = memh5.BasicCont
cont = new_cls.from_file(
file_,
distributed=self.distributed,
comm=self.comm,
convert_attribute_strings=self.convert_strings,
convert_dataset_strings=self.convert_strings,
**self._sel,
)
if "tag" not in cont.attrs:
tag = os.path.splitext(os.path.basename(file_))[0]
cont.attrs["tag"] = tag
return cont
def _resolve_sel(self):
sel = {}
sel_parsers = {"range": self._parse_range, "index": self._parse_index}
for k in sorted(self.selections or []):
*axis, type_ = k.split("_")
axis_name = "_".join(axis)
if type_ not in sel_parsers:
raise ValueError(
f'Unsupported selection type "{type_}", or invalid key "{k}"'
)
sel[f"{axis_name}_sel"] = sel_parsers[type_](self.selections[k])
return sel
def _parse_range(self, x):
if not isinstance(x, (list, tuple)) or len(x) > 3 or len(x) < 2:
raise ValueError(
f"Range spec must be a length 2 or 3 list or tuple. Got {x}."
)
for v in x:
if not isinstance(v, int):
raise ValueError(f"All elements of range spec must be ints. Got {x}")
return slice(*x)
def _parse_index(self, x):
if not isinstance(x, (list, tuple)) or len(x) == 0:
raise ValueError(f"Index spec must be a non-empty list or tuple. Got {x}.")
for v in x:
if not isinstance(v, int):
raise ValueError(f"All elements of index spec must be ints. Got {x}")
return list(x)
LoadBasicCont = LoadFilesFromParams
class FindFiles(pipeline.TaskBase):
files = config.Property(proptype=_list_or_glob)
def setup(self):
if not isinstance(self.files, (list, tuple)):
raise RuntimeError("Argument must be list of files.")
return self.files
class LoadFiles(LoadFilesFromParams):
files = None
def setup(self, files):
super().setup()
if not isinstance(files, (list, tuple)):
raise RuntimeError(f'Argument must be list of files. Got "{files}"')
self.files = files
class Save(pipeline.TaskBase):
root = config.Property(proptype=str)
count = 0
def next(self, data):
if "tag" not in data.attrs:
tag = self.count
self.count += 1
else:
tag = data.attrs["tag"]
fname = "%s_%s.h5" % (self.root, str(tag))
data.to_hdf5(fname)
return data
class Print(pipeline.TaskBase):
def next(self, input_):
print(input_)
return input_
class LoadBeamTransfer(pipeline.TaskBase):
product_directory = config.Property(proptype=str)
def setup(self):
import os
from drift.core import beamtransfer
if not os.path.exists(self.product_directory):
raise RuntimeError("BeamTransfers do not exist.")
bt = beamtransfer.BeamTransfer(self.product_directory)
tel = bt.telescope
try:
return tel, bt, tel.feeds
except AttributeError:
return tel, bt
class LoadProductManager(pipeline.TaskBase):
product_directory = config.Property(proptype=str)
def setup(self):
import os
from drift.core import manager
if not os.path.exists(self.product_directory):
raise RuntimeError("Products do not exist.")
pm = manager.ProductManager.from_config(self.product_directory)
return pm
class Truncate(task.SingleTask):
dataset = config.Property(proptype=list, default=None)
weight_dataset = config.Property(proptype=list, default=None)
fixed_precision = config.Property(proptype=float, default=None)
variance_increase = config.Property(proptype=float, default=None)
def _get_params(self, container):
if container in TRUNC_SPEC:
self.log.info("Truncating from preset for container {}".format(container))
for key in [
"dataset",
"weight_dataset",
"fixed_precision",
"variance_increase",
]:
attr = getattr(self, key)
if attr is None:
setattr(self, key, TRUNC_SPEC[container][key])
else:
self.log.info("Overriding container default for '{}'.".format(key))
else:
if (
self.dataset is None
or self.fixed_precision is None
or self.variance_increase is None
):
raise pipeline.PipelineConfigError(
"Container {} has no preset values. You must define all of 'dataset', "
"'fixed_precision', and 'variance_increase' properties.".format(
container
)
)
self.variance_increase *= 3
def process(self, data):
self._get_params(type(data))
if self.weight_dataset is None:
self.weight_dataset = [None] * len(self.dataset)
for dset, wgt in zip(self.dataset, self.weight_dataset):
old_shape = data[dset].local_shape
val = np.ndarray.reshape(data[dset][:], data[dset][:].size)
if wgt is None:
if np.iscomplexobj(data[dset]):
data[dset][:].real = bit_truncate_fixed(
val.real, self.fixed_precision
).reshape(old_shape)
data[dset][:].imag = bit_truncate_fixed(
val.imag, self.fixed_precision
).reshape(old_shape)
else:
data[dset][:] = bit_truncate_fixed(
val, self.fixed_precision
).reshape(old_shape)
else:
if data[dset][:].shape != data[wgt][:].shape:
raise pipeline.PipelineRuntimeError(
"Dataset and weight arrays must have same shape ({} != {})".format(
data[dset].shape, data[wgt].shape
)
)
invvar = np.ndarray.reshape(data[wgt][:], data[dset][:].size)
if np.iscomplexobj(data[dset]):
data[dset][:].real = bit_truncate_weights(
val.real,
invvar * 2.0 / self.variance_increase,
self.fixed_precision,
).reshape(old_shape)
data[dset][:].imag = bit_truncate_weights(
val.imag,
invvar * 2.0 / self.variance_increase,
self.fixed_precision,
).reshape(old_shape)
else:
data[dset][:] = bit_truncate_weights(
val, invvar / self.variance_increase, self.fixed_precision
).reshape(old_shape)
return data
class SaveModuleVersions(task.SingleTask):
root = config.Property(proptype=str)
done = True
def setup(self):
fname = "{}_versions.yml".format(self.root)
f = open(fname, "w")
f.write(yamldump(self.versions))
f.close()
self.done = True
def process(self):
self.done = True
return
class SaveConfig(task.SingleTask):
root = config.Property(proptype=str)
done = True
def setup(self):
fname = "{}_config.yml".format(self.root)
f = open(fname, "w")
f.write(yamldump(self.pipeline_config))
f.close()
self.done = True
def process(self):
self.done = True
return
def get_telescope(obj):
from drift.core import telescope
try:
return get_beamtransfer(obj).telescope
except RuntimeError:
if isinstance(obj, telescope.TransitTelescope):
return obj
raise RuntimeError("Could not get telescope instance out of %s" % repr(obj))
def get_beamtransfer(obj):
from drift.core import manager, beamtransfer
if isinstance(obj, beamtransfer.BeamTransfer):
return obj
if isinstance(obj, manager.ProductManager):
return obj.beamtransfer
raise RuntimeError("Could not get BeamTransfer instance out of %s" % repr(obj))
| true | true |
f71b4f651dc252f16edc83bd218126c89ab19ffc | 2,507 | py | Python | scripts/remove_orphans.py | dbaio/portsfallout | 2512036a9983b833f4ece2a0801541dca4d8d58c | [
"BSD-2-Clause"
] | 6 | 2020-10-11T07:54:50.000Z | 2022-01-25T22:03:18.000Z | scripts/remove_orphans.py | dbaio/portsfallout | 2512036a9983b833f4ece2a0801541dca4d8d58c | [
"BSD-2-Clause"
] | null | null | null | scripts/remove_orphans.py | dbaio/portsfallout | 2512036a9983b833f4ece2a0801541dca4d8d58c | [
"BSD-2-Clause"
] | null | null | null | # Copyright (c) 2020-2021 Danilo G. Baio <dbaio@bsd.com.br>
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
import sys
import requests
import bz2
sys.path.insert(1, r'../')
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'portsfallout.settings')
import django
django.setup()
from ports.models import Port
def fetch_index():
url = "https://www.FreeBSD.org/ports/INDEX-13.bz2"
r = requests.get(url, allow_redirects=True)
open('INDEX-13.bz2', 'wb').write(r.content)
def populate_set():
Ports = Port.objects.all().values('origin').order_by('origin')
sPorts = set()
for port in Ports:
sPorts.add(port['origin'])
return sPorts
def read_index(sPorts):
with bz2.open('INDEX-13.bz2', mode='rt') as index_file:
for row in index_file:
row_list = row.split("|")
p_origin = row_list[1].replace("/usr/ports/", "")
if p_origin in sPorts:
sPorts.remove(p_origin)
return sPorts
def remove_orphans(sPortsOrp):
for port in sPortsOrp:
print('Removing {}'.format(port))
Port.objects.filter(origin=port).delete()
if __name__ == "__main__":
fetch_index()
sPorts = populate_set()
sPortsOrp = read_index(sPorts)
remove_orphans(sPortsOrp)
| 32.986842 | 81 | 0.723574 |
import os
import sys
import requests
import bz2
sys.path.insert(1, r'../')
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'portsfallout.settings')
import django
django.setup()
from ports.models import Port
def fetch_index():
url = "https://www.FreeBSD.org/ports/INDEX-13.bz2"
r = requests.get(url, allow_redirects=True)
open('INDEX-13.bz2', 'wb').write(r.content)
def populate_set():
Ports = Port.objects.all().values('origin').order_by('origin')
sPorts = set()
for port in Ports:
sPorts.add(port['origin'])
return sPorts
def read_index(sPorts):
with bz2.open('INDEX-13.bz2', mode='rt') as index_file:
for row in index_file:
row_list = row.split("|")
p_origin = row_list[1].replace("/usr/ports/", "")
if p_origin in sPorts:
sPorts.remove(p_origin)
return sPorts
def remove_orphans(sPortsOrp):
for port in sPortsOrp:
print('Removing {}'.format(port))
Port.objects.filter(origin=port).delete()
if __name__ == "__main__":
fetch_index()
sPorts = populate_set()
sPortsOrp = read_index(sPorts)
remove_orphans(sPortsOrp)
| true | true |
f71b510ba2e775050928e7d131af51fdd10e9af6 | 4,481 | py | Python | scripts/mergemessages.py | burakozdemir32/django-internationalflavor | 4c5d29519050c929a608d2054c14faa44ee273c9 | [
"BSD-3-Clause"
] | null | null | null | scripts/mergemessages.py | burakozdemir32/django-internationalflavor | 4c5d29519050c929a608d2054c14faa44ee273c9 | [
"BSD-3-Clause"
] | null | null | null | scripts/mergemessages.py | burakozdemir32/django-internationalflavor | 4c5d29519050c929a608d2054c14faa44ee273c9 | [
"BSD-3-Clause"
] | null | null | null | import os
import sys
import polib
import django
from django.conf import settings
from django.core.management.base import BaseCommand
from django.utils import translation
# This is almost a management command, but we do not want it to be added to the django-admin namespace for the simple
# reason that it is not expected to be executed by package users, only by the package maintainers.
# We use a thin __main__ wrapper to make it work (ish) like a management command.
MODULE_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..', 'internationalflavor')
LOCALE_PATH = os.path.join(MODULE_PATH, 'locale')
def mark_entry(entry):
if 'fuzzy' in entry.flags:
entry.flags.remove('fuzzy')
entry.comment = "auto-generated from CLDR -- see docs before updating"
class Command(BaseCommand):
help = 'Updates messages in the PO file with messages from the CLDR'
def handle(self, *args, **options):
translation.deactivate_all()
if options['l']:
languages = (options['l'], dict(settings.LANGUAGES)[options['l']]),
else:
languages = settings.LANGUAGES
for lc, language in languages:
try:
self.stdout.write("Parsing language %s [%s]" % (language, lc))
# Get some files ready
# The pofile is our combined file
pofile = polib.pofile(os.path.join(LOCALE_PATH, lc, 'LC_MESSAGES', 'django.po'))
# The cldrfile contain only messages from CLDR
cldrfile = polib.pofile(os.path.join(LOCALE_PATH, lc, 'LC_MESSAGES', 'cldr.po'))
# The djangofile will only contain messages not from CLDR
try:
djangofile = polib.pofile(os.path.join(LOCALE_PATH, lc, 'LC_MESSAGES', 'django_only.po'))
except IOError:
djangofile = polib.POFile()
djangofile.metadata = pofile.metadata
djangofile.header = pofile.header
# Merge all non-django messages to the djangofile
django_only_messages = polib.POFile()
for entry in pofile:
if cldrfile.find(entry.msgid) is None and not entry.obsolete and not 'fuzzy' in entry.flags:
django_only_messages.append(entry)
djangofile.merge(django_only_messages)
djangofile.save(os.path.join(LOCALE_PATH, lc, 'LC_MESSAGES', 'django_only.po'))
# Add all entries from the CLDR file to the combined file
for entry in cldrfile:
e = pofile.find(entry.msgid)
if e is None:
e = polib.POEntry()
e.msgid = entry.msgid
pofile.append(e)
elif 'manual' in e.tcomment.lower():
self.stdout.write("-- Skipping %s of %s" % (e.msgid, language))
continue
e.obsolete = False
e.msgstr = entry.msgstr
e.comment = entry.comment
if 'fuzzy' in e.flags:
e.flags.remove('fuzzy')
# Add entries from the Django file to the combined file
for entry in djangofile:
e = pofile.find(entry.msgid)
# If not in main file, then skip
if e is None:
continue
e.obsolete = entry.obsolete
e.msgstr = entry.msgstr
e.comment = entry.comment
e.flags = entry.flags
# We copy over the header and metadata from the djangofile.
pofile.metadata = djangofile.metadata
pofile.header = djangofile.header
pofile.save()
pofile.save_as_mofile(os.path.join(LOCALE_PATH, lc, 'LC_MESSAGES', 'django.mo'))
except IOError as e:
self.stderr.write("Error while handling %s: %s (possibly no valid .po file)" % (language, e))
except Exception as e:
self.stderr.write("Error while handling %s: %s" % (language, e))
def add_arguments(self, parser):
parser.add_argument('-l')
if __name__ == '__main__':
settings.configure()
django.setup()
Command().run_from_argv(["django-admin.py", "mergemessages"] + sys.argv[1:])
| 41.110092 | 117 | 0.567061 | import os
import sys
import polib
import django
from django.conf import settings
from django.core.management.base import BaseCommand
from django.utils import translation
MODULE_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..', 'internationalflavor')
LOCALE_PATH = os.path.join(MODULE_PATH, 'locale')
def mark_entry(entry):
if 'fuzzy' in entry.flags:
entry.flags.remove('fuzzy')
entry.comment = "auto-generated from CLDR -- see docs before updating"
class Command(BaseCommand):
help = 'Updates messages in the PO file with messages from the CLDR'
def handle(self, *args, **options):
translation.deactivate_all()
if options['l']:
languages = (options['l'], dict(settings.LANGUAGES)[options['l']]),
else:
languages = settings.LANGUAGES
for lc, language in languages:
try:
self.stdout.write("Parsing language %s [%s]" % (language, lc))
pofile = polib.pofile(os.path.join(LOCALE_PATH, lc, 'LC_MESSAGES', 'django.po'))
cldrfile = polib.pofile(os.path.join(LOCALE_PATH, lc, 'LC_MESSAGES', 'cldr.po'))
try:
djangofile = polib.pofile(os.path.join(LOCALE_PATH, lc, 'LC_MESSAGES', 'django_only.po'))
except IOError:
djangofile = polib.POFile()
djangofile.metadata = pofile.metadata
djangofile.header = pofile.header
django_only_messages = polib.POFile()
for entry in pofile:
if cldrfile.find(entry.msgid) is None and not entry.obsolete and not 'fuzzy' in entry.flags:
django_only_messages.append(entry)
djangofile.merge(django_only_messages)
djangofile.save(os.path.join(LOCALE_PATH, lc, 'LC_MESSAGES', 'django_only.po'))
for entry in cldrfile:
e = pofile.find(entry.msgid)
if e is None:
e = polib.POEntry()
e.msgid = entry.msgid
pofile.append(e)
elif 'manual' in e.tcomment.lower():
self.stdout.write("-- Skipping %s of %s" % (e.msgid, language))
continue
e.obsolete = False
e.msgstr = entry.msgstr
e.comment = entry.comment
if 'fuzzy' in e.flags:
e.flags.remove('fuzzy')
for entry in djangofile:
e = pofile.find(entry.msgid)
if e is None:
continue
e.obsolete = entry.obsolete
e.msgstr = entry.msgstr
e.comment = entry.comment
e.flags = entry.flags
pofile.metadata = djangofile.metadata
pofile.header = djangofile.header
pofile.save()
pofile.save_as_mofile(os.path.join(LOCALE_PATH, lc, 'LC_MESSAGES', 'django.mo'))
except IOError as e:
self.stderr.write("Error while handling %s: %s (possibly no valid .po file)" % (language, e))
except Exception as e:
self.stderr.write("Error while handling %s: %s" % (language, e))
def add_arguments(self, parser):
parser.add_argument('-l')
if __name__ == '__main__':
settings.configure()
django.setup()
Command().run_from_argv(["django-admin.py", "mergemessages"] + sys.argv[1:])
| true | true |
f71b518086dd6b504569820b3ca2a1c860242389 | 213 | py | Python | wadeem/wadeem/doctype/coordinators/test_coordinators.py | siddhantsinha-oodles/Wadeem-app | eee05dead7ccee8878cf3630d3cdf32adb155c7f | [
"MIT"
] | null | null | null | wadeem/wadeem/doctype/coordinators/test_coordinators.py | siddhantsinha-oodles/Wadeem-app | eee05dead7ccee8878cf3630d3cdf32adb155c7f | [
"MIT"
] | null | null | null | wadeem/wadeem/doctype/coordinators/test_coordinators.py | siddhantsinha-oodles/Wadeem-app | eee05dead7ccee8878cf3630d3cdf32adb155c7f | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright (c) 2020, Siddhant and Contributors
# See license.txt
from __future__ import unicode_literals
# import frappe
import unittest
class TestCoordinators(unittest.TestCase):
pass
| 19.363636 | 47 | 0.765258 |
from __future__ import unicode_literals
import unittest
class TestCoordinators(unittest.TestCase):
pass
| true | true |
f71b5347247e075de1cdb3afefef4dff57d2b7aa | 802 | py | Python | multiqc_npm/modules/npm_calculate_callability.py | c-BIG/MultiQC_NPM | c8dbf15c2b2ce03f2932db0bb50d49dcab865d75 | [
"MIT"
] | null | null | null | multiqc_npm/modules/npm_calculate_callability.py | c-BIG/MultiQC_NPM | c8dbf15c2b2ce03f2932db0bb50d49dcab865d75 | [
"MIT"
] | null | null | null | multiqc_npm/modules/npm_calculate_callability.py | c-BIG/MultiQC_NPM | c8dbf15c2b2ce03f2932db0bb50d49dcab865d75 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
"""
Parser for calculate_callability.py
"""
import logging
import json
from multiqc.utils import report
log = logging.getLogger(__name__)
def parse_reports(self):
# Set up vars
self.calculate_callability = dict()
# Collect metrics
for f in self.find_log_files('multiqc_npm/calculate_callability'):
parsed_data = json.loads(f["f"])
# Save results
s_name = f["s_name"]
self.calculate_callability[s_name] = parsed_data
# Write results
if len(self.calculate_callability) > 0:
# Write parsed data to a file
self.write_data_file(self.calculate_callability, 'multiqc_npm_calculate_callability')
# Return the number of detected samples to the parent module
return len(self.calculate_callability)
| 22.914286 | 93 | 0.706983 |
import logging
import json
from multiqc.utils import report
log = logging.getLogger(__name__)
def parse_reports(self):
self.calculate_callability = dict()
for f in self.find_log_files('multiqc_npm/calculate_callability'):
parsed_data = json.loads(f["f"])
s_name = f["s_name"]
self.calculate_callability[s_name] = parsed_data
if len(self.calculate_callability) > 0:
self.write_data_file(self.calculate_callability, 'multiqc_npm_calculate_callability')
return len(self.calculate_callability)
| true | true |
f71b54183262fd12198dc0255f4b1059568df90a | 14,252 | py | Python | airflow/models/connection.py | dorranh/airflow | 1a9a2cadcf8606cfcb729d1323dd33dfacc64633 | [
"Apache-2.0"
] | null | null | null | airflow/models/connection.py | dorranh/airflow | 1a9a2cadcf8606cfcb729d1323dd33dfacc64633 | [
"Apache-2.0"
] | 1 | 2019-05-14T14:32:40.000Z | 2019-05-14T14:32:40.000Z | airflow/models/connection.py | dorranh/airflow | 1a9a2cadcf8606cfcb729d1323dd33dfacc64633 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import json
from urllib.parse import parse_qsl, quote, unquote, urlencode, urlparse
from sqlalchemy import Boolean, Column, Integer, String
from sqlalchemy.ext.declarative import declared_attr
from sqlalchemy.orm import synonym
from airflow.exceptions import AirflowException
from airflow.models.base import ID_LEN, Base
from airflow.models.crypto import get_fernet
from airflow.utils.log.logging_mixin import LoggingMixin
# Python automatically converts all letters to lowercase in hostname
# See: https://issues.apache.org/jira/browse/AIRFLOW-3615
def parse_netloc_to_hostname(uri_parts):
hostname = unquote(uri_parts.hostname or '')
if '/' in hostname:
hostname = uri_parts.netloc
if "@" in hostname:
hostname = hostname.rsplit("@", 1)[1]
if ":" in hostname:
hostname = hostname.split(":", 1)[0]
hostname = unquote(hostname)
return hostname
class Connection(Base, LoggingMixin):
"""
Placeholder to store information about different database instances
connection information. The idea here is that scripts use references to
database instances (conn_id) instead of hard coding hostname, logins and
passwords when using operators or hooks.
"""
__tablename__ = "connection"
id = Column(Integer(), primary_key=True)
conn_id = Column(String(ID_LEN))
conn_type = Column(String(500))
host = Column(String(500))
schema = Column(String(500))
login = Column(String(500))
_password = Column('password', String(5000))
port = Column(Integer())
is_encrypted = Column(Boolean, unique=False, default=False)
is_extra_encrypted = Column(Boolean, unique=False, default=False)
_extra = Column('extra', String(5000))
_types = [
('docker', 'Docker Registry'),
('fs', 'File (path)'),
('ftp', 'FTP'),
('google_cloud_platform', 'Google Cloud Platform'),
('hdfs', 'HDFS'),
('http', 'HTTP'),
('pig_cli', 'Pig Client Wrapper'),
('hive_cli', 'Hive Client Wrapper'),
('hive_metastore', 'Hive Metastore Thrift'),
('hiveserver2', 'Hive Server 2 Thrift'),
('jdbc', 'JDBC Connection'),
('odbc', 'ODBC Connection'),
('jenkins', 'Jenkins'),
('mysql', 'MySQL'),
('postgres', 'Postgres'),
('oracle', 'Oracle'),
('vertica', 'Vertica'),
('presto', 'Presto'),
('s3', 'S3'),
('samba', 'Samba'),
('sqlite', 'Sqlite'),
('ssh', 'SSH'),
('cloudant', 'IBM Cloudant'),
('mssql', 'Microsoft SQL Server'),
('mesos_framework-id', 'Mesos Framework ID'),
('jira', 'JIRA'),
('redis', 'Redis'),
('wasb', 'Azure Blob Storage'),
('databricks', 'Databricks'),
('aws', 'Amazon Web Services'),
('emr', 'Elastic MapReduce'),
('snowflake', 'Snowflake'),
('segment', 'Segment'),
('sqoop', 'Sqoop'),
('azure_data_lake', 'Azure Data Lake'),
('azure_container_instances', 'Azure Container Instances'),
('azure_cosmos', 'Azure CosmosDB'),
('cassandra', 'Cassandra'),
('qubole', 'Qubole'),
('mongo', 'MongoDB'),
('gcpcloudsql', 'Google Cloud SQL'),
('grpc', 'GRPC Connection'),
]
def __init__(
self, conn_id=None, conn_type=None,
host=None, login=None, password=None,
schema=None, port=None, extra=None,
uri=None):
self.conn_id = conn_id
if uri:
self.parse_from_uri(uri)
else:
self.conn_type = conn_type
self.host = host
self.login = login
self.password = password
self.schema = schema
self.port = port
self.extra = extra
def parse_from_uri(self, uri):
uri_parts = urlparse(uri)
conn_type = uri_parts.scheme
if conn_type == 'postgresql':
conn_type = 'postgres'
elif '-' in conn_type:
conn_type = conn_type.replace('-', '_')
self.conn_type = conn_type
self.host = parse_netloc_to_hostname(uri_parts)
quoted_schema = uri_parts.path[1:]
self.schema = unquote(quoted_schema) if quoted_schema else quoted_schema
self.login = unquote(uri_parts.username) \
if uri_parts.username else uri_parts.username
self.password = unquote(uri_parts.password) \
if uri_parts.password else uri_parts.password
self.port = uri_parts.port
if uri_parts.query:
self.extra = json.dumps(dict(parse_qsl(uri_parts.query, keep_blank_values=True)))
def get_uri(self) -> str:
uri = '{}://'.format(str(self.conn_type).lower().replace('_', '-'))
authority_block = ''
if self.login is not None:
authority_block += quote(self.login, safe='')
if self.password is not None:
authority_block += ':' + quote(self.password, safe='')
if authority_block > '':
authority_block += '@'
uri += authority_block
host_block = ''
if self.host:
host_block += quote(self.host, safe='')
if self.port:
if host_block > '':
host_block += ':{}'.format(self.port)
else:
host_block += '@:{}'.format(self.port)
if self.schema:
host_block += '/{}'.format(quote(self.schema, safe=''))
uri += host_block
if self.extra_dejson:
uri += '?{}'.format(urlencode(self.extra_dejson))
return uri
def get_password(self):
if self._password and self.is_encrypted:
fernet = get_fernet()
if not fernet.is_encrypted:
raise AirflowException(
"Can't decrypt encrypted password for login={}, \
FERNET_KEY configuration is missing".format(self.login))
return fernet.decrypt(bytes(self._password, 'utf-8')).decode()
else:
return self._password
def set_password(self, value):
if value:
fernet = get_fernet()
self._password = fernet.encrypt(bytes(value, 'utf-8')).decode()
self.is_encrypted = fernet.is_encrypted
@declared_attr
def password(cls):
return synonym('_password',
descriptor=property(cls.get_password, cls.set_password))
def get_extra(self):
if self._extra and self.is_extra_encrypted:
fernet = get_fernet()
if not fernet.is_encrypted:
raise AirflowException(
"Can't decrypt `extra` params for login={},\
FERNET_KEY configuration is missing".format(self.login))
return fernet.decrypt(bytes(self._extra, 'utf-8')).decode()
else:
return self._extra
def set_extra(self, value):
if value:
fernet = get_fernet()
self._extra = fernet.encrypt(bytes(value, 'utf-8')).decode()
self.is_extra_encrypted = fernet.is_encrypted
else:
self._extra = value
self.is_extra_encrypted = False
@declared_attr
def extra(cls):
return synonym('_extra',
descriptor=property(cls.get_extra, cls.set_extra))
def rotate_fernet_key(self):
fernet = get_fernet()
if self._password and self.is_encrypted:
self._password = fernet.rotate(self._password.encode('utf-8')).decode()
if self._extra and self.is_extra_encrypted:
self._extra = fernet.rotate(self._extra.encode('utf-8')).decode()
def get_hook(self):
if self.conn_type == 'mysql':
from airflow.providers.mysql.hooks.mysql import MySqlHook
return MySqlHook(mysql_conn_id=self.conn_id)
elif self.conn_type == 'google_cloud_platform':
from airflow.providers.google.cloud.hooks.bigquery import BigQueryHook
return BigQueryHook(bigquery_conn_id=self.conn_id)
elif self.conn_type == 'postgres':
from airflow.providers.postgres.hooks.postgres import PostgresHook
return PostgresHook(postgres_conn_id=self.conn_id)
elif self.conn_type == 'pig_cli':
from airflow.providers.apache.pig.hooks.pig import PigCliHook
return PigCliHook(pig_cli_conn_id=self.conn_id)
elif self.conn_type == 'hive_cli':
from airflow.providers.apache.hive.hooks.hive import HiveCliHook
return HiveCliHook(hive_cli_conn_id=self.conn_id)
elif self.conn_type == 'presto':
from airflow.providers.presto.hooks.presto import PrestoHook
return PrestoHook(presto_conn_id=self.conn_id)
elif self.conn_type == 'hiveserver2':
from airflow.providers.apache.hive.hooks.hive import HiveServer2Hook
return HiveServer2Hook(hiveserver2_conn_id=self.conn_id)
elif self.conn_type == 'sqlite':
from airflow.providers.sqlite.hooks.sqlite import SqliteHook
return SqliteHook(sqlite_conn_id=self.conn_id)
elif self.conn_type == 'jdbc':
from airflow.providers.jdbc.hooks.jdbc import JdbcHook
return JdbcHook(jdbc_conn_id=self.conn_id)
elif self.conn_type == 'mssql':
from airflow.providers.microsoft.mssql.hooks.mssql import MsSqlHook
return MsSqlHook(mssql_conn_id=self.conn_id)
elif self.conn_type == 'odbc':
from airflow.providers.odbc.hooks.odbc import OdbcHook
return OdbcHook(odbc_conn_id=self.conn_id)
elif self.conn_type == 'oracle':
from airflow.providers.oracle.hooks.oracle import OracleHook
return OracleHook(oracle_conn_id=self.conn_id)
elif self.conn_type == 'vertica':
from airflow.providers.vertica.hooks.vertica import VerticaHook
return VerticaHook(vertica_conn_id=self.conn_id)
elif self.conn_type == 'cloudant':
from airflow.providers.cloudant.hooks.cloudant import CloudantHook
return CloudantHook(cloudant_conn_id=self.conn_id)
elif self.conn_type == 'jira':
from airflow.providers.jira.hooks.jira import JiraHook
return JiraHook(jira_conn_id=self.conn_id)
elif self.conn_type == 'redis':
from airflow.providers.redis.hooks.redis import RedisHook
return RedisHook(redis_conn_id=self.conn_id)
elif self.conn_type == 'wasb':
from airflow.providers.microsoft.azure.hooks.wasb import WasbHook
return WasbHook(wasb_conn_id=self.conn_id)
elif self.conn_type == 'docker':
from airflow.providers.docker.hooks.docker import DockerHook
return DockerHook(docker_conn_id=self.conn_id)
elif self.conn_type == 'azure_data_lake':
from airflow.providers.microsoft.azure.hooks.azure_data_lake import AzureDataLakeHook
return AzureDataLakeHook(azure_data_lake_conn_id=self.conn_id)
elif self.conn_type == 'azure_cosmos':
from airflow.providers.microsoft.azure.hooks.azure_cosmos import AzureCosmosDBHook
return AzureCosmosDBHook(azure_cosmos_conn_id=self.conn_id)
elif self.conn_type == 'cassandra':
from airflow.providers.apache.cassandra.hooks.cassandra import CassandraHook
return CassandraHook(cassandra_conn_id=self.conn_id)
elif self.conn_type == 'mongo':
from airflow.providers.mongo.hooks.mongo import MongoHook
return MongoHook(conn_id=self.conn_id)
elif self.conn_type == 'gcpcloudsql':
from airflow.providers.google.cloud.hooks.cloud_sql import CloudSQLDatabaseHook
return CloudSQLDatabaseHook(gcp_cloudsql_conn_id=self.conn_id)
elif self.conn_type == 'grpc':
from airflow.providers.grpc.hooks.grpc import GrpcHook
return GrpcHook(grpc_conn_id=self.conn_id)
raise AirflowException("Unknown hook type {}".format(self.conn_type))
def __repr__(self):
return self.conn_id
def log_info(self):
return ("id: {}. Host: {}, Port: {}, Schema: {}, "
"Login: {}, Password: {}, extra: {}".
format(self.conn_id,
self.host,
self.port,
self.schema,
self.login,
"XXXXXXXX" if self.password else None,
"XXXXXXXX" if self.extra_dejson else None))
def debug_info(self):
return ("id: {}. Host: {}, Port: {}, Schema: {}, "
"Login: {}, Password: {}, extra: {}".
format(self.conn_id,
self.host,
self.port,
self.schema,
self.login,
"XXXXXXXX" if self.password else None,
self.extra_dejson))
@property
def extra_dejson(self):
"""Returns the extra property by deserializing json."""
obj = {}
if self.extra:
try:
obj = json.loads(self.extra)
except Exception as e:
self.log.exception(e)
self.log.error("Failed parsing the json for conn_id %s", self.conn_id)
return obj
| 40.72 | 97 | 0.613388 |
import json
from urllib.parse import parse_qsl, quote, unquote, urlencode, urlparse
from sqlalchemy import Boolean, Column, Integer, String
from sqlalchemy.ext.declarative import declared_attr
from sqlalchemy.orm import synonym
from airflow.exceptions import AirflowException
from airflow.models.base import ID_LEN, Base
from airflow.models.crypto import get_fernet
from airflow.utils.log.logging_mixin import LoggingMixin
def parse_netloc_to_hostname(uri_parts):
hostname = unquote(uri_parts.hostname or '')
if '/' in hostname:
hostname = uri_parts.netloc
if "@" in hostname:
hostname = hostname.rsplit("@", 1)[1]
if ":" in hostname:
hostname = hostname.split(":", 1)[0]
hostname = unquote(hostname)
return hostname
class Connection(Base, LoggingMixin):
__tablename__ = "connection"
id = Column(Integer(), primary_key=True)
conn_id = Column(String(ID_LEN))
conn_type = Column(String(500))
host = Column(String(500))
schema = Column(String(500))
login = Column(String(500))
_password = Column('password', String(5000))
port = Column(Integer())
is_encrypted = Column(Boolean, unique=False, default=False)
is_extra_encrypted = Column(Boolean, unique=False, default=False)
_extra = Column('extra', String(5000))
_types = [
('docker', 'Docker Registry'),
('fs', 'File (path)'),
('ftp', 'FTP'),
('google_cloud_platform', 'Google Cloud Platform'),
('hdfs', 'HDFS'),
('http', 'HTTP'),
('pig_cli', 'Pig Client Wrapper'),
('hive_cli', 'Hive Client Wrapper'),
('hive_metastore', 'Hive Metastore Thrift'),
('hiveserver2', 'Hive Server 2 Thrift'),
('jdbc', 'JDBC Connection'),
('odbc', 'ODBC Connection'),
('jenkins', 'Jenkins'),
('mysql', 'MySQL'),
('postgres', 'Postgres'),
('oracle', 'Oracle'),
('vertica', 'Vertica'),
('presto', 'Presto'),
('s3', 'S3'),
('samba', 'Samba'),
('sqlite', 'Sqlite'),
('ssh', 'SSH'),
('cloudant', 'IBM Cloudant'),
('mssql', 'Microsoft SQL Server'),
('mesos_framework-id', 'Mesos Framework ID'),
('jira', 'JIRA'),
('redis', 'Redis'),
('wasb', 'Azure Blob Storage'),
('databricks', 'Databricks'),
('aws', 'Amazon Web Services'),
('emr', 'Elastic MapReduce'),
('snowflake', 'Snowflake'),
('segment', 'Segment'),
('sqoop', 'Sqoop'),
('azure_data_lake', 'Azure Data Lake'),
('azure_container_instances', 'Azure Container Instances'),
('azure_cosmos', 'Azure CosmosDB'),
('cassandra', 'Cassandra'),
('qubole', 'Qubole'),
('mongo', 'MongoDB'),
('gcpcloudsql', 'Google Cloud SQL'),
('grpc', 'GRPC Connection'),
]
def __init__(
self, conn_id=None, conn_type=None,
host=None, login=None, password=None,
schema=None, port=None, extra=None,
uri=None):
self.conn_id = conn_id
if uri:
self.parse_from_uri(uri)
else:
self.conn_type = conn_type
self.host = host
self.login = login
self.password = password
self.schema = schema
self.port = port
self.extra = extra
def parse_from_uri(self, uri):
uri_parts = urlparse(uri)
conn_type = uri_parts.scheme
if conn_type == 'postgresql':
conn_type = 'postgres'
elif '-' in conn_type:
conn_type = conn_type.replace('-', '_')
self.conn_type = conn_type
self.host = parse_netloc_to_hostname(uri_parts)
quoted_schema = uri_parts.path[1:]
self.schema = unquote(quoted_schema) if quoted_schema else quoted_schema
self.login = unquote(uri_parts.username) \
if uri_parts.username else uri_parts.username
self.password = unquote(uri_parts.password) \
if uri_parts.password else uri_parts.password
self.port = uri_parts.port
if uri_parts.query:
self.extra = json.dumps(dict(parse_qsl(uri_parts.query, keep_blank_values=True)))
def get_uri(self) -> str:
uri = '{}://'.format(str(self.conn_type).lower().replace('_', '-'))
authority_block = ''
if self.login is not None:
authority_block += quote(self.login, safe='')
if self.password is not None:
authority_block += ':' + quote(self.password, safe='')
if authority_block > '':
authority_block += '@'
uri += authority_block
host_block = ''
if self.host:
host_block += quote(self.host, safe='')
if self.port:
if host_block > '':
host_block += ':{}'.format(self.port)
else:
host_block += '@:{}'.format(self.port)
if self.schema:
host_block += '/{}'.format(quote(self.schema, safe=''))
uri += host_block
if self.extra_dejson:
uri += '?{}'.format(urlencode(self.extra_dejson))
return uri
def get_password(self):
if self._password and self.is_encrypted:
fernet = get_fernet()
if not fernet.is_encrypted:
raise AirflowException(
"Can't decrypt encrypted password for login={}, \
FERNET_KEY configuration is missing".format(self.login))
return fernet.decrypt(bytes(self._password, 'utf-8')).decode()
else:
return self._password
def set_password(self, value):
if value:
fernet = get_fernet()
self._password = fernet.encrypt(bytes(value, 'utf-8')).decode()
self.is_encrypted = fernet.is_encrypted
@declared_attr
def password(cls):
return synonym('_password',
descriptor=property(cls.get_password, cls.set_password))
def get_extra(self):
if self._extra and self.is_extra_encrypted:
fernet = get_fernet()
if not fernet.is_encrypted:
raise AirflowException(
"Can't decrypt `extra` params for login={},\
FERNET_KEY configuration is missing".format(self.login))
return fernet.decrypt(bytes(self._extra, 'utf-8')).decode()
else:
return self._extra
def set_extra(self, value):
if value:
fernet = get_fernet()
self._extra = fernet.encrypt(bytes(value, 'utf-8')).decode()
self.is_extra_encrypted = fernet.is_encrypted
else:
self._extra = value
self.is_extra_encrypted = False
@declared_attr
def extra(cls):
return synonym('_extra',
descriptor=property(cls.get_extra, cls.set_extra))
def rotate_fernet_key(self):
fernet = get_fernet()
if self._password and self.is_encrypted:
self._password = fernet.rotate(self._password.encode('utf-8')).decode()
if self._extra and self.is_extra_encrypted:
self._extra = fernet.rotate(self._extra.encode('utf-8')).decode()
def get_hook(self):
if self.conn_type == 'mysql':
from airflow.providers.mysql.hooks.mysql import MySqlHook
return MySqlHook(mysql_conn_id=self.conn_id)
elif self.conn_type == 'google_cloud_platform':
from airflow.providers.google.cloud.hooks.bigquery import BigQueryHook
return BigQueryHook(bigquery_conn_id=self.conn_id)
elif self.conn_type == 'postgres':
from airflow.providers.postgres.hooks.postgres import PostgresHook
return PostgresHook(postgres_conn_id=self.conn_id)
elif self.conn_type == 'pig_cli':
from airflow.providers.apache.pig.hooks.pig import PigCliHook
return PigCliHook(pig_cli_conn_id=self.conn_id)
elif self.conn_type == 'hive_cli':
from airflow.providers.apache.hive.hooks.hive import HiveCliHook
return HiveCliHook(hive_cli_conn_id=self.conn_id)
elif self.conn_type == 'presto':
from airflow.providers.presto.hooks.presto import PrestoHook
return PrestoHook(presto_conn_id=self.conn_id)
elif self.conn_type == 'hiveserver2':
from airflow.providers.apache.hive.hooks.hive import HiveServer2Hook
return HiveServer2Hook(hiveserver2_conn_id=self.conn_id)
elif self.conn_type == 'sqlite':
from airflow.providers.sqlite.hooks.sqlite import SqliteHook
return SqliteHook(sqlite_conn_id=self.conn_id)
elif self.conn_type == 'jdbc':
from airflow.providers.jdbc.hooks.jdbc import JdbcHook
return JdbcHook(jdbc_conn_id=self.conn_id)
elif self.conn_type == 'mssql':
from airflow.providers.microsoft.mssql.hooks.mssql import MsSqlHook
return MsSqlHook(mssql_conn_id=self.conn_id)
elif self.conn_type == 'odbc':
from airflow.providers.odbc.hooks.odbc import OdbcHook
return OdbcHook(odbc_conn_id=self.conn_id)
elif self.conn_type == 'oracle':
from airflow.providers.oracle.hooks.oracle import OracleHook
return OracleHook(oracle_conn_id=self.conn_id)
elif self.conn_type == 'vertica':
from airflow.providers.vertica.hooks.vertica import VerticaHook
return VerticaHook(vertica_conn_id=self.conn_id)
elif self.conn_type == 'cloudant':
from airflow.providers.cloudant.hooks.cloudant import CloudantHook
return CloudantHook(cloudant_conn_id=self.conn_id)
elif self.conn_type == 'jira':
from airflow.providers.jira.hooks.jira import JiraHook
return JiraHook(jira_conn_id=self.conn_id)
elif self.conn_type == 'redis':
from airflow.providers.redis.hooks.redis import RedisHook
return RedisHook(redis_conn_id=self.conn_id)
elif self.conn_type == 'wasb':
from airflow.providers.microsoft.azure.hooks.wasb import WasbHook
return WasbHook(wasb_conn_id=self.conn_id)
elif self.conn_type == 'docker':
from airflow.providers.docker.hooks.docker import DockerHook
return DockerHook(docker_conn_id=self.conn_id)
elif self.conn_type == 'azure_data_lake':
from airflow.providers.microsoft.azure.hooks.azure_data_lake import AzureDataLakeHook
return AzureDataLakeHook(azure_data_lake_conn_id=self.conn_id)
elif self.conn_type == 'azure_cosmos':
from airflow.providers.microsoft.azure.hooks.azure_cosmos import AzureCosmosDBHook
return AzureCosmosDBHook(azure_cosmos_conn_id=self.conn_id)
elif self.conn_type == 'cassandra':
from airflow.providers.apache.cassandra.hooks.cassandra import CassandraHook
return CassandraHook(cassandra_conn_id=self.conn_id)
elif self.conn_type == 'mongo':
from airflow.providers.mongo.hooks.mongo import MongoHook
return MongoHook(conn_id=self.conn_id)
elif self.conn_type == 'gcpcloudsql':
from airflow.providers.google.cloud.hooks.cloud_sql import CloudSQLDatabaseHook
return CloudSQLDatabaseHook(gcp_cloudsql_conn_id=self.conn_id)
elif self.conn_type == 'grpc':
from airflow.providers.grpc.hooks.grpc import GrpcHook
return GrpcHook(grpc_conn_id=self.conn_id)
raise AirflowException("Unknown hook type {}".format(self.conn_type))
def __repr__(self):
return self.conn_id
def log_info(self):
return ("id: {}. Host: {}, Port: {}, Schema: {}, "
"Login: {}, Password: {}, extra: {}".
format(self.conn_id,
self.host,
self.port,
self.schema,
self.login,
"XXXXXXXX" if self.password else None,
"XXXXXXXX" if self.extra_dejson else None))
def debug_info(self):
return ("id: {}. Host: {}, Port: {}, Schema: {}, "
"Login: {}, Password: {}, extra: {}".
format(self.conn_id,
self.host,
self.port,
self.schema,
self.login,
"XXXXXXXX" if self.password else None,
self.extra_dejson))
@property
def extra_dejson(self):
obj = {}
if self.extra:
try:
obj = json.loads(self.extra)
except Exception as e:
self.log.exception(e)
self.log.error("Failed parsing the json for conn_id %s", self.conn_id)
return obj
| true | true |
f71b5426abe6a21a2aafe3e9e5c4721319783d05 | 5,751 | py | Python | original_author_notes/yolo_video.py | adam-blinzler/simple-lane-detection | 8814e0aaf7ac56b7e5be59634e363ca17839effb | [
"MIT"
] | null | null | null | original_author_notes/yolo_video.py | adam-blinzler/simple-lane-detection | 8814e0aaf7ac56b7e5be59634e363ca17839effb | [
"MIT"
] | null | null | null | original_author_notes/yolo_video.py | adam-blinzler/simple-lane-detection | 8814e0aaf7ac56b7e5be59634e363ca17839effb | [
"MIT"
] | null | null | null | # USAGE
# python yolo_video.py --input videos/airport.mp4 --output output/airport_output.avi --object_detection object_detection-coco
# import the necessary packages
import numpy as np
import argparse
import imutils
import time
import cv2
import os
# construct the argument parse and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-i", "--input", required=True,
help="path to input video")
ap.add_argument("-o", "--output", required=True,
help="path to output video")
ap.add_argument("-y", "--object_detection", required=True,
help="base path to YOLO directory")
ap.add_argument("-c", "--confidence", type=float, default=0.5,
help="minimum probability to filter weak detections")
ap.add_argument("-t", "--threshold", type=float, default=0.3,
help="threshold when applyong non-maxima suppression")
args = vars(ap.parse_args())
# load the COCO class labels our YOLO model was trained on
labelsPath = os.path.sep.join([args["object_detection"], "coco.names"])
LABELS = open(labelsPath).read().strip().split("\n")
# initialize a list of colors to represent each possible class label
np.random.seed(42)
COLORS = np.random.randint(0, 255, size=(len(LABELS), 3),
dtype="uint8")
# derive the paths to the YOLO weights and model configuration
weightsPath = os.path.sep.join([args["object_detection"], "yolov3.weights"])
configPath = os.path.sep.join([args["object_detection"], "yolov3.cfg"])
# load our YOLO object detector trained on COCO dataset (80 classes)
# and determine only the *output* layer names that we need from YOLO
print("[INFO] loading YOLO from disk...")
net = cv2.dnn.readNetFromDarknet(configPath, weightsPath)
ln = net.getLayerNames()
ln = [ln[i[0] - 1] for i in net.getUnconnectedOutLayers()]
# initialize the video stream, pointer to output video file, and
# frame dimensions
vs = cv2.VideoCapture(args["input"])
writer = None
(W, H) = (None, None)
# try to determine the total number of frames in the video file
try:
prop = cv2.cv.CV_CAP_PROP_FRAME_COUNT if imutils.is_cv2() \
else cv2.CAP_PROP_FRAME_COUNT
total = int(vs.get(prop))
print("[INFO] {} total frames in video".format(total))
# an error occurred while trying to determine the total
# number of frames in the video file
except:
print("[INFO] could not determine # of frames in video")
print("[INFO] no approx. completion time can be provided")
total = -1
# loop over frames from the video file stream
while True:
# read the next frame from the file
(grabbed, frame) = vs.read()
# if the frame was not grabbed, then we have reached the end
# of the stream
if not grabbed:
break
# if the frame dimensions are empty, grab them
if W is None or H is None:
(H, W) = frame.shape[:2]
# construct a blob from the input frame and then perform a forward
# pass of the YOLO object detector, giving us our bounding boxes
# and associated probabilities
blob = cv2.dnn.blobFromImage(frame, 1 / 255.0, (416, 416),
swapRB=True, crop=False)
net.setInput(blob)
start = time.time()
layerOutputs = net.forward(ln)
end = time.time()
# initialize our lists of detected bounding boxes, confidences,
# and class IDs, respectively
boxes = []
confidences = []
classIDs = []
# loop over each of the layer outputs
for output in layerOutputs:
# loop over each of the detections
for detection in output:
# extract the class ID and confidence (i.e., probability)
# of the current object detection
scores = detection[5:]
classID = np.argmax(scores)
confidence = scores[classID]
# filter out weak predictions by ensuring the detected
# probability is greater than the minimum probability
if confidence > args["confidence"]:
# scale the bounding box coordinates back relative to
# the size of the image, keeping in mind that YOLO
# actually returns the center (x, y)-coordinates of
# the bounding box followed by the boxes' width and
# height
box = detection[0:4] * np.array([W, H, W, H])
(centerX, centerY, width, height) = box.astype("int")
# use the center (x, y)-coordinates to derive the top
# and and left corner of the bounding box
x = int(centerX - (width / 2))
y = int(centerY - (height / 2))
# update our list of bounding box coordinates,
# confidences, and class IDs
boxes.append([x, y, int(width), int(height)])
confidences.append(float(confidence))
classIDs.append(classID)
# apply non-maxima suppression to suppress weak, overlapping
# bounding boxes
idxs = cv2.dnn.NMSBoxes(boxes, confidences, args["confidence"],
args["threshold"])
# ensure at least one detection exists
if len(idxs) > 0:
# loop over the indexes we are keeping
for i in idxs.flatten():
# extract the bounding box coordinates
(x, y) = (boxes[i][0], boxes[i][1])
(w, h) = (boxes[i][2], boxes[i][3])
# draw a bounding box rectangle and label on the frame
color = [int(c) for c in COLORS[classIDs[i]]]
cv2.rectangle(frame, (x, y), (x + w, y + h), color, 2)
text = "{}: {:.4f}".format(LABELS[classIDs[i]],
confidences[i])
cv2.putText(frame, text, (x, y - 5),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, color, 2)
# check if the video writer is None
if writer is None:
# initialize our video writer
fourcc = cv2.VideoWriter_fourcc(*"MJPG")
writer = cv2.VideoWriter(args["output"], fourcc, 30,
(frame.shape[1], frame.shape[0]), True)
# some information on processing single frame
if total > 0:
elap = (end - start)
print("[INFO] single frame took {:.4f} seconds".format(elap))
print("[INFO] estimated total time to finish: {:.4f}".format(
elap * total))
# write the output frame to disk
writer.write(frame)
# release the file pointers
print("[INFO] cleaning up...")
writer.release()
vs.release() | 34.029586 | 125 | 0.704051 |
import numpy as np
import argparse
import imutils
import time
import cv2
import os
ap = argparse.ArgumentParser()
ap.add_argument("-i", "--input", required=True,
help="path to input video")
ap.add_argument("-o", "--output", required=True,
help="path to output video")
ap.add_argument("-y", "--object_detection", required=True,
help="base path to YOLO directory")
ap.add_argument("-c", "--confidence", type=float, default=0.5,
help="minimum probability to filter weak detections")
ap.add_argument("-t", "--threshold", type=float, default=0.3,
help="threshold when applyong non-maxima suppression")
args = vars(ap.parse_args())
labelsPath = os.path.sep.join([args["object_detection"], "coco.names"])
LABELS = open(labelsPath).read().strip().split("\n")
np.random.seed(42)
COLORS = np.random.randint(0, 255, size=(len(LABELS), 3),
dtype="uint8")
weightsPath = os.path.sep.join([args["object_detection"], "yolov3.weights"])
configPath = os.path.sep.join([args["object_detection"], "yolov3.cfg"])
print("[INFO] loading YOLO from disk...")
net = cv2.dnn.readNetFromDarknet(configPath, weightsPath)
ln = net.getLayerNames()
ln = [ln[i[0] - 1] for i in net.getUnconnectedOutLayers()]
vs = cv2.VideoCapture(args["input"])
writer = None
(W, H) = (None, None)
try:
prop = cv2.cv.CV_CAP_PROP_FRAME_COUNT if imutils.is_cv2() \
else cv2.CAP_PROP_FRAME_COUNT
total = int(vs.get(prop))
print("[INFO] {} total frames in video".format(total))
except:
print("[INFO] could not determine # of frames in video")
print("[INFO] no approx. completion time can be provided")
total = -1
while True:
(grabbed, frame) = vs.read()
if not grabbed:
break
if W is None or H is None:
(H, W) = frame.shape[:2]
blob = cv2.dnn.blobFromImage(frame, 1 / 255.0, (416, 416),
swapRB=True, crop=False)
net.setInput(blob)
start = time.time()
layerOutputs = net.forward(ln)
end = time.time()
boxes = []
confidences = []
classIDs = []
for output in layerOutputs:
for detection in output:
scores = detection[5:]
classID = np.argmax(scores)
confidence = scores[classID]
if confidence > args["confidence"]:
# height
box = detection[0:4] * np.array([W, H, W, H])
(centerX, centerY, width, height) = box.astype("int")
# use the center (x, y)-coordinates to derive the top
# and and left corner of the bounding box
x = int(centerX - (width / 2))
y = int(centerY - (height / 2))
# update our list of bounding box coordinates,
# confidences, and class IDs
boxes.append([x, y, int(width), int(height)])
confidences.append(float(confidence))
classIDs.append(classID)
# apply non-maxima suppression to suppress weak, overlapping
# bounding boxes
idxs = cv2.dnn.NMSBoxes(boxes, confidences, args["confidence"],
args["threshold"])
# ensure at least one detection exists
if len(idxs) > 0:
# loop over the indexes we are keeping
for i in idxs.flatten():
# extract the bounding box coordinates
(x, y) = (boxes[i][0], boxes[i][1])
(w, h) = (boxes[i][2], boxes[i][3])
# draw a bounding box rectangle and label on the frame
color = [int(c) for c in COLORS[classIDs[i]]]
cv2.rectangle(frame, (x, y), (x + w, y + h), color, 2)
text = "{}: {:.4f}".format(LABELS[classIDs[i]],
confidences[i])
cv2.putText(frame, text, (x, y - 5),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, color, 2)
# check if the video writer is None
if writer is None:
# initialize our video writer
fourcc = cv2.VideoWriter_fourcc(*"MJPG")
writer = cv2.VideoWriter(args["output"], fourcc, 30,
(frame.shape[1], frame.shape[0]), True)
# some information on processing single frame
if total > 0:
elap = (end - start)
print("[INFO] single frame took {:.4f} seconds".format(elap))
print("[INFO] estimated total time to finish: {:.4f}".format(
elap * total))
# write the output frame to disk
writer.write(frame)
# release the file pointers
print("[INFO] cleaning up...")
writer.release()
vs.release() | true | true |
f71b5468bb09f935a4b8dd8609a936248498eb63 | 7,498 | py | Python | tessia/server/api/views/auth.py | tessia-project/tessia | b9ded8dc7f0b9a7a0ea00d95b5ccc4af4d2e7540 | [
"Apache-2.0"
] | 5 | 2020-06-04T10:20:33.000Z | 2020-10-26T15:09:19.000Z | tessia/server/api/views/auth.py | tessia-project/tessia | b9ded8dc7f0b9a7a0ea00d95b5ccc4af4d2e7540 | [
"Apache-2.0"
] | null | null | null | tessia/server/api/views/auth.py | tessia-project/tessia | b9ded8dc7f0b9a7a0ea00d95b5ccc4af4d2e7540 | [
"Apache-2.0"
] | null | null | null | # Copyright 2016, 2017 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Authentication routines
"""
#
# IMPORTS
#
from base64 import b64decode
from flask import g as flask_global
from flask import request as flask_request
from sqlalchemy.sql import func
from tessia.server import auth
from tessia.server.api.db import API_DB
from tessia.server.api.exceptions import UnauthorizedError
from tessia.server.config import CONF
from tessia.server.db.models import User
from tessia.server.db.models import UserKey
# use the exception directly so that potion custom error handler can catch
# it and convert to a valid json response
from werkzeug.exceptions import BadRequest
#
# CONSTANTS AND DEFINITIONS
#
#
# CODE
#
class _LoginManager:
# holds the login manager object
_manager = None
@classmethod
def get_login_manager(cls):
"""
Return the login manager object, as defined by the auth module.
"""
if cls._manager is None:
cls._manager = auth.get_manager()
return cls._manager
# get_login_manager()
@classmethod
def authenticate_basic(cls, auth_value):
"""
Basic authentication with username and password, validate against the
login manager defined in configured file (usually LDAP)
Args:
auth_value (str): the value part of the Authorization header
form username:password base64 encoded
Raises:
BadRequest: if value is malformed
UnauthorizedError: if credentials are invalid
Returns:
User: instance of User's sqlalchemy model
"""
try:
# http headers are always ascii
user, passwd = b64decode(auth_value).decode(
'ascii').split(':', 1)
except Exception:
raise BadRequest()
case_sensitive = CONF.get_config().get(
'auth', {}).get('case_sensitive', False)
if not case_sensitive:
# logins should be case-insensitive
user = user.lower()
# user authentication with login provider failed: return unauthorized
result = cls.get_login_manager().authenticate(user, passwd)
if result is None:
raise UnauthorizedError()
# find user entry in database
user_entry = User.query.filter_by(login=user).first()
if user_entry is not None:
# update db in case user information has changed
changed = False
if user_entry.name != result['fullname']:
changed = True
user_entry.name = result['fullname']
if user_entry.title != result.get('title', None):
changed = True
user_entry.title = result.get('title', None)
if changed:
API_DB.db.session.add(user_entry)
API_DB.db.session.commit()
return user_entry
allow_auto_create = CONF.get_config().get(
'auth', {}).get('allow_user_auto_create', False)
# auto creation of users not allowed: report unauthorized
if not allow_auto_create:
raise UnauthorizedError(
msg='User authenticated but not registered in database')
# create user in database
new_user = User()
if case_sensitive:
new_user.login = result['login']
else:
# save login as lowercase to avoid duplicates or user having to
# worry about entering the right case
new_user.login = result['login'].lower()
new_user.name = result['fullname']
# job title is optional
new_user.title = result.get('title', None)
new_user.restricted = False
new_user.admin = False
API_DB.db.session.add(new_user)
API_DB.db.session.commit()
return new_user
# authenticate_basic()
@classmethod
def authenticate_key(cls, auth_value):
"""
API key-based authentication
Args:
auth_value (str): the value part of the Authorization header in the
form key_id:key_value
Raises:
BadRequest: if value is malformed
UnauthorizedError: if credentials are invalid
Returns:
User: instance of User's sqlalchemy model
"""
try:
# http headers are always ascii
key_id, key_secret = auth_value.split(':', 1)
except Exception:
raise BadRequest()
key_entry = UserKey.query.filter_by(
key_id=key_id, key_secret=key_secret).first()
if key_entry is None:
raise UnauthorizedError()
key_entry.last_used = func.now()
API_DB.db.session.add(key_entry)
API_DB.db.session.commit()
return key_entry.user_rel
# authenticate_key()
# _LoginManager
def authorize(decorated_view):
"""
A decorator view which implements authorization routine.
Args:
decorated_view (method): the view function to be decorated
Returns:
method: the authenticate wrapper containing the original view
Raises:
None
"""
def authenticate(*args, **kwargs):
"""
The wrapper that takes the authorization related actions before
executing the actual view
Args:
args: packed args for the decorated view
kwargs: packed keyargs for the decorated view
Returns:
any: the return value of the decorated view
Raises:
UnauthorizedError: if auth header is missing or invalid
"""
# no credentials provided: reply that authorization is needed.
# The exception takes care of providing the scheme allowed
# via WWW-Authenticate response header
auth_header = flask_request.headers.get('Authorization', None)
if not auth_header:
raise UnauthorizedError(auth_provided=False)
try:
auth_scheme, auth_value = auth_header.split(None, 1)
except ValueError:
raise UnauthorizedError()
auth_scheme = auth_scheme.lower()
if auth_scheme == 'basic':
user_entry = _LoginManager.authenticate_basic(auth_value)
elif auth_scheme == 'x-key':
user_entry = _LoginManager.authenticate_key(auth_value)
else:
# scheme not supported
raise UnauthorizedError()
# set model as session variable
flask_global.auth_user = user_entry # pylint: disable=assigning-non-slot
# this might be relevant depending on the nature of the operation.
# i.e. api key operations are only allowed after entering password
# (basic scheme)
flask_global.auth_method = auth_scheme # pylint: disable=assigning-non-slot
return decorated_view(*args, **kwargs)
# authenticate()
return authenticate
# authorize()
| 31.771186 | 83 | 0.634169 |
from base64 import b64decode
from flask import g as flask_global
from flask import request as flask_request
from sqlalchemy.sql import func
from tessia.server import auth
from tessia.server.api.db import API_DB
from tessia.server.api.exceptions import UnauthorizedError
from tessia.server.config import CONF
from tessia.server.db.models import User
from tessia.server.db.models import UserKey
from werkzeug.exceptions import BadRequest
class _LoginManager:
_manager = None
@classmethod
def get_login_manager(cls):
if cls._manager is None:
cls._manager = auth.get_manager()
return cls._manager
@classmethod
def authenticate_basic(cls, auth_value):
try:
user, passwd = b64decode(auth_value).decode(
'ascii').split(':', 1)
except Exception:
raise BadRequest()
case_sensitive = CONF.get_config().get(
'auth', {}).get('case_sensitive', False)
if not case_sensitive:
user = user.lower()
result = cls.get_login_manager().authenticate(user, passwd)
if result is None:
raise UnauthorizedError()
user_entry = User.query.filter_by(login=user).first()
if user_entry is not None:
changed = False
if user_entry.name != result['fullname']:
changed = True
user_entry.name = result['fullname']
if user_entry.title != result.get('title', None):
changed = True
user_entry.title = result.get('title', None)
if changed:
API_DB.db.session.add(user_entry)
API_DB.db.session.commit()
return user_entry
allow_auto_create = CONF.get_config().get(
'auth', {}).get('allow_user_auto_create', False)
if not allow_auto_create:
raise UnauthorizedError(
msg='User authenticated but not registered in database')
new_user = User()
if case_sensitive:
new_user.login = result['login']
else:
new_user.login = result['login'].lower()
new_user.name = result['fullname']
new_user.title = result.get('title', None)
new_user.restricted = False
new_user.admin = False
API_DB.db.session.add(new_user)
API_DB.db.session.commit()
return new_user
@classmethod
def authenticate_key(cls, auth_value):
try:
key_id, key_secret = auth_value.split(':', 1)
except Exception:
raise BadRequest()
key_entry = UserKey.query.filter_by(
key_id=key_id, key_secret=key_secret).first()
if key_entry is None:
raise UnauthorizedError()
key_entry.last_used = func.now()
API_DB.db.session.add(key_entry)
API_DB.db.session.commit()
return key_entry.user_rel
def authorize(decorated_view):
def authenticate(*args, **kwargs):
auth_header = flask_request.headers.get('Authorization', None)
if not auth_header:
raise UnauthorizedError(auth_provided=False)
try:
auth_scheme, auth_value = auth_header.split(None, 1)
except ValueError:
raise UnauthorizedError()
auth_scheme = auth_scheme.lower()
if auth_scheme == 'basic':
user_entry = _LoginManager.authenticate_basic(auth_value)
elif auth_scheme == 'x-key':
user_entry = _LoginManager.authenticate_key(auth_value)
else:
raise UnauthorizedError()
flask_global.auth_user = user_entry
flask_global.auth_method = auth_scheme
return decorated_view(*args, **kwargs)
return authenticate
| true | true |
f71b54b467c00b5f3318a9985c87b77ec8bf71f6 | 853 | py | Python | med2img/tests/test_med2img.py | mohitchandarana/pl-med2img | c8c9df64a5e244d57e2fdb9ffadeea3c455eab23 | [
"MIT"
] | 4 | 2017-07-06T20:32:08.000Z | 2021-09-18T02:46:55.000Z | med2img/tests/test_med2img.py | mohitchandarana/pl-med2img | c8c9df64a5e244d57e2fdb9ffadeea3c455eab23 | [
"MIT"
] | 1 | 2021-11-07T20:17:16.000Z | 2021-11-08T20:11:48.000Z | med2img/tests/test_med2img.py | mohitchandarana/pl-med2img | c8c9df64a5e244d57e2fdb9ffadeea3c455eab23 | [
"MIT"
] | 3 | 2020-09-01T07:13:19.000Z | 2021-03-25T16:06:39.000Z |
from unittest import TestCase
from unittest import mock
from med2img.med2img import Med2img
class Med2imgTests(TestCase):
"""
Test Med2img.
"""
def setUp(self):
self.app = Med2img()
def test_run(self):
"""
Test the run code.
"""
args = []
if self.app.TYPE == 'ds':
args.append('inputdir') # you may want to change this inputdir mock
args.append('outputdir') # you may want to change this outputdir mock
# you may want to add more of your custom defined optional arguments to test
# your app with
# eg.
# args.append('--custom-int')
# args.append(10)
options = self.app.parse_args(args)
self.app.run(options)
# write your own assertions
self.assertEqual(options.outputdir, 'outputdir')
| 25.088235 | 84 | 0.594373 |
from unittest import TestCase
from unittest import mock
from med2img.med2img import Med2img
class Med2imgTests(TestCase):
def setUp(self):
self.app = Med2img()
def test_run(self):
args = []
if self.app.TYPE == 'ds':
args.append('inputdir')
args.append('outputdir')
options = self.app.parse_args(args)
self.app.run(options)
self.assertEqual(options.outputdir, 'outputdir')
| true | true |
f71b555a4703e5eb655f1c62d6c59060c0f772cf | 6,488 | py | Python | MPL/MPL_envs/reach/reach_v0.py | vikashplus/MPL | 4a784fd94dc7a5988a1eca85851ee546ca1992f9 | [
"Apache-2.0"
] | null | null | null | MPL/MPL_envs/reach/reach_v0.py | vikashplus/MPL | 4a784fd94dc7a5988a1eca85851ee546ca1992f9 | [
"Apache-2.0"
] | null | null | null | MPL/MPL_envs/reach/reach_v0.py | vikashplus/MPL | 4a784fd94dc7a5988a1eca85851ee546ca1992f9 | [
"Apache-2.0"
] | null | null | null | import numpy as np
from gym import utils
from mjrl.envs import mujoco_env
from mujoco_py import MjViewer
from MPL.MPL_robot.robot import Robot
import os
# TODO: Action normalization is missing
class sallyReachEnv(mujoco_env.MujocoEnv, utils.EzPickle):
def __init__(self, noise_scale=0.0):
# prep
utils.EzPickle.__init__(self)
self._noise_scale = noise_scale
self.initializing = True
curr_dir = os.path.dirname(os.path.abspath(__file__))
self.Rtarget = 0
self.Ltarget = 0
self.Rgrasp = 0
self.Lgrasp = 0
# acquire robot
self.mpl = Robot(name='sallyReach', model_file=curr_dir+'/reach_v0.xml', config_file=curr_dir+'/reach_v0.config')
# acquire env
mujoco_env.MujocoEnv.__init__(self, curr_dir+'/reach_v0.xml', 20)
self.Rtarget = self.sim.model.site_name2id('Rtarget')
self.Ltarget = self.sim.model.site_name2id('Ltarget')
self.Rgrasp = self.sim.model.site_name2id('Rgrasp')
self.Lgrasp = self.sim.model.site_name2id('Lgrasp')
# env ready
self.initializing = False
def step(self, a):
self.mpl.step(self, a, self.frame_skip*self.sim.model.opt.timestep)
obs = self.get_obs()
score, reward_dict, solved, done = self._get_score_reward_solved_done(self.obs_dict)
# finalize step
env_info = {
'time': self.obs_dict['t'],
'obs_dict': self.obs_dict,
'rewards': reward_dict,
'score': score,
'solved': solved
}
return obs, reward_dict['total'], done, env_info
# query robot and populate observations
def get_obs(self):
# ask robot for sensor data
sen = self.mpl.get_sensors(self, noise_scale=self._noise_scale)
# parse sensor data into obs dict
self.obs_dict = {}
self.obs_dict['t'] = sen['time']
self.obs_dict['Tmpl_pos'] = sen['Tmpl_pos']
self.obs_dict['Rmpl_pos'] = sen['Rmpl_pos']
self.obs_dict['Lmpl_pos'] = sen['Lmpl_pos']
self.obs_dict['Tmpl_vel'] = sen['Tmpl_vel']
self.obs_dict['Rmpl_vel'] = sen['Rmpl_vel']
self.obs_dict['Lmpl_vel'] = sen['Lmpl_vel']
self.obs_dict['Rerr'] = self.sim.data.site_xpos[self.Rtarget]-self.sim.data.site_xpos[self.Rgrasp]
self.obs_dict['Lerr'] = self.sim.data.site_xpos[self.Ltarget]-self.sim.data.site_xpos[self.Lgrasp]
# vectorize observations
return np.concatenate([
self.obs_dict['Tmpl_pos'],
self.obs_dict['Rmpl_pos'],
self.obs_dict['Lmpl_pos'],
self.obs_dict['Tmpl_vel'],
self.obs_dict['Rmpl_vel'],
self.obs_dict['Lmpl_vel'],
self.obs_dict['Lerr'],
self.obs_dict['Rerr']])
# evaluate observations
def _get_score_reward_solved_done(self, obs, act=None):
Ldist = np.linalg.norm(obs['Lerr'])
Rdist = np.linalg.norm(obs['Rerr'])
# print(Rdist, Ldist)
done = (bool( Ldist > 1.0) or bool(Rdist>1.0)) \
if not self.initializing else False
reward_dict = {}
avg_dist = (Ldist+Rdist)/2.0
score = -1.* avg_dist
reward_dict["avg_dist"] = score
reward_dict["small_bonus"] = 2.0*(Ldist<.1) + 2.0*(Rdist<.1)
reward_dict["big_bonus"] = 2.0*(Ldist<.1) * 2.0*(Rdist<.1)
reward_dict["total"] = reward_dict["avg_dist"] + reward_dict["small_bonus"] + reward_dict["big_bonus"] - 50.0 * int(done)
solved = bool(avg_dist<0.100)
return score, reward_dict, solved, done
# reset model
def reset_model(self):
raise NotImplementedError # for child class to define
# evaluate a path
def compute_path_rewards(self, paths):
# path has two keys: observations and actions
# path["observations"] : (num_traj, horizon, obs_dim)
# path["rewards"] should have shape (num_traj, horizon)
obs = paths["observations"]
score, rewards, done = self._get_score_reward_solved_done(obs)
paths["rewards"] = rewards if rewards.shape[0] > 1 else rewards.ravel()
# evaluate policy's success from a collection of paths
def evaluate_success(self, paths, logger=None):
success = 0.0
for p in paths:
if np.mean(p['env_infos']['solved'][-4:]) > 0.0:
success += 1.0
success_rate = 100.0*success/len(paths)
if logger is None:
# nowhere to log so return the value
return success_rate
else:
# log the success
# can log multiple statistics here if needed
logger.log_kv('success_rate', success_rate)
return None
# --------------------------------
# get and set states
# --------------------------------
def get_env_state(self):
return dict(qp=self.data.qpos.copy(), qv=self.data.qvel.copy())
def set_env_state(self, state):
self.sim.reset()
qp = state['qp'].copy()
qv = state['qv'].copy()
self.set_state(qp, qv)
self.sim.forward()
# --------------------------------
# utility functions
# --------------------------------
def get_env_infos(self):
return dict(state=self.get_env_state())
def mj_viewer_setup(self):
self.viewer = MjViewer(self.sim)
self.viewer.cam.azimuth = -90
self.viewer.cam.distance = 2.5
self.viewer.cam.elevation = -30
self.sim.forward()
def close_env(self):
pass
# Reach at fixed targets
class sallyReachEnvFixed(sallyReachEnv):
def __init__(self):
super().__init__()
def reset_model(self):
self.sim.model.site_pos[self.Rtarget] = np.array([0.15, 0.2, .6])
self.sim.model.site_pos[self.Ltarget] = np.array([-0.15, 0.2, .3])
self.set_state(self.init_qpos, self.init_qvel)
self.sim.forward()
return self.get_obs()
# Reach at random targets
class sallyReachEnvRandom(sallyReachEnv):
def __init__(self):
super().__init__()
def reset_model(self):
self.sim.model.site_pos[self.Rtarget] = self.np_random.uniform(high=[0.5, .5, .6], low=[0, .1, .3])
self.sim.model.site_pos[self.Ltarget] = self.np_random.uniform(high=[0, .5, .6], low=[-.5, .1, .3])
self.set_state(self.init_qpos, self.init_qvel)
self.sim.forward()
return self.get_obs()
| 33.968586 | 130 | 0.594945 | import numpy as np
from gym import utils
from mjrl.envs import mujoco_env
from mujoco_py import MjViewer
from MPL.MPL_robot.robot import Robot
import os
class sallyReachEnv(mujoco_env.MujocoEnv, utils.EzPickle):
def __init__(self, noise_scale=0.0):
utils.EzPickle.__init__(self)
self._noise_scale = noise_scale
self.initializing = True
curr_dir = os.path.dirname(os.path.abspath(__file__))
self.Rtarget = 0
self.Ltarget = 0
self.Rgrasp = 0
self.Lgrasp = 0
self.mpl = Robot(name='sallyReach', model_file=curr_dir+'/reach_v0.xml', config_file=curr_dir+'/reach_v0.config')
mujoco_env.MujocoEnv.__init__(self, curr_dir+'/reach_v0.xml', 20)
self.Rtarget = self.sim.model.site_name2id('Rtarget')
self.Ltarget = self.sim.model.site_name2id('Ltarget')
self.Rgrasp = self.sim.model.site_name2id('Rgrasp')
self.Lgrasp = self.sim.model.site_name2id('Lgrasp')
self.initializing = False
def step(self, a):
self.mpl.step(self, a, self.frame_skip*self.sim.model.opt.timestep)
obs = self.get_obs()
score, reward_dict, solved, done = self._get_score_reward_solved_done(self.obs_dict)
env_info = {
'time': self.obs_dict['t'],
'obs_dict': self.obs_dict,
'rewards': reward_dict,
'score': score,
'solved': solved
}
return obs, reward_dict['total'], done, env_info
def get_obs(self):
sen = self.mpl.get_sensors(self, noise_scale=self._noise_scale)
self.obs_dict = {}
self.obs_dict['t'] = sen['time']
self.obs_dict['Tmpl_pos'] = sen['Tmpl_pos']
self.obs_dict['Rmpl_pos'] = sen['Rmpl_pos']
self.obs_dict['Lmpl_pos'] = sen['Lmpl_pos']
self.obs_dict['Tmpl_vel'] = sen['Tmpl_vel']
self.obs_dict['Rmpl_vel'] = sen['Rmpl_vel']
self.obs_dict['Lmpl_vel'] = sen['Lmpl_vel']
self.obs_dict['Rerr'] = self.sim.data.site_xpos[self.Rtarget]-self.sim.data.site_xpos[self.Rgrasp]
self.obs_dict['Lerr'] = self.sim.data.site_xpos[self.Ltarget]-self.sim.data.site_xpos[self.Lgrasp]
return np.concatenate([
self.obs_dict['Tmpl_pos'],
self.obs_dict['Rmpl_pos'],
self.obs_dict['Lmpl_pos'],
self.obs_dict['Tmpl_vel'],
self.obs_dict['Rmpl_vel'],
self.obs_dict['Lmpl_vel'],
self.obs_dict['Lerr'],
self.obs_dict['Rerr']])
def _get_score_reward_solved_done(self, obs, act=None):
Ldist = np.linalg.norm(obs['Lerr'])
Rdist = np.linalg.norm(obs['Rerr'])
done = (bool( Ldist > 1.0) or bool(Rdist>1.0)) \
if not self.initializing else False
reward_dict = {}
avg_dist = (Ldist+Rdist)/2.0
score = -1.* avg_dist
reward_dict["avg_dist"] = score
reward_dict["small_bonus"] = 2.0*(Ldist<.1) + 2.0*(Rdist<.1)
reward_dict["big_bonus"] = 2.0*(Ldist<.1) * 2.0*(Rdist<.1)
reward_dict["total"] = reward_dict["avg_dist"] + reward_dict["small_bonus"] + reward_dict["big_bonus"] - 50.0 * int(done)
solved = bool(avg_dist<0.100)
return score, reward_dict, solved, done
def reset_model(self):
raise NotImplementedError
def compute_path_rewards(self, paths):
obs = paths["observations"]
score, rewards, done = self._get_score_reward_solved_done(obs)
paths["rewards"] = rewards if rewards.shape[0] > 1 else rewards.ravel()
def evaluate_success(self, paths, logger=None):
success = 0.0
for p in paths:
if np.mean(p['env_infos']['solved'][-4:]) > 0.0:
success += 1.0
success_rate = 100.0*success/len(paths)
if logger is None:
# nowhere to log so return the value
return success_rate
else:
# log the success
# can log multiple statistics here if needed
logger.log_kv('success_rate', success_rate)
return None
# --------------------------------
# get and set states
# --------------------------------
def get_env_state(self):
return dict(qp=self.data.qpos.copy(), qv=self.data.qvel.copy())
def set_env_state(self, state):
self.sim.reset()
qp = state['qp'].copy()
qv = state['qv'].copy()
self.set_state(qp, qv)
self.sim.forward()
# --------------------------------
# utility functions
# --------------------------------
def get_env_infos(self):
return dict(state=self.get_env_state())
def mj_viewer_setup(self):
self.viewer = MjViewer(self.sim)
self.viewer.cam.azimuth = -90
self.viewer.cam.distance = 2.5
self.viewer.cam.elevation = -30
self.sim.forward()
def close_env(self):
pass
# Reach at fixed targets
class sallyReachEnvFixed(sallyReachEnv):
def __init__(self):
super().__init__()
def reset_model(self):
self.sim.model.site_pos[self.Rtarget] = np.array([0.15, 0.2, .6])
self.sim.model.site_pos[self.Ltarget] = np.array([-0.15, 0.2, .3])
self.set_state(self.init_qpos, self.init_qvel)
self.sim.forward()
return self.get_obs()
# Reach at random targets
class sallyReachEnvRandom(sallyReachEnv):
def __init__(self):
super().__init__()
def reset_model(self):
self.sim.model.site_pos[self.Rtarget] = self.np_random.uniform(high=[0.5, .5, .6], low=[0, .1, .3])
self.sim.model.site_pos[self.Ltarget] = self.np_random.uniform(high=[0, .5, .6], low=[-.5, .1, .3])
self.set_state(self.init_qpos, self.init_qvel)
self.sim.forward()
return self.get_obs()
| true | true |
f71b560fdbc2811c07e38208065e693e4befa940 | 40,786 | py | Python | metaflow/plugins/aws/step_functions/step_functions.py | cclauss/metaflow | 5186c6c5bba36d9e77077413ee2495dc79da3dca | [
"Apache-2.0"
] | 1 | 2021-11-29T22:37:54.000Z | 2021-11-29T22:37:54.000Z | metaflow/plugins/aws/step_functions/step_functions.py | sappier/metaflow | dfe1a216e342555d5fb127607b35491a4ef11627 | [
"Apache-2.0"
] | null | null | null | metaflow/plugins/aws/step_functions/step_functions.py | sappier/metaflow | dfe1a216e342555d5fb127607b35491a4ef11627 | [
"Apache-2.0"
] | 1 | 2021-11-29T22:37:50.000Z | 2021-11-29T22:37:50.000Z | import os
from collections import defaultdict
import sys
import hashlib
import json
import time
import string
import random
import uuid
from metaflow.exception import MetaflowException, MetaflowInternalError
from metaflow.plugins import ResourcesDecorator, BatchDecorator, RetryDecorator
from metaflow.parameters import deploy_time_eval
from metaflow.decorators import flow_decorators
from metaflow.util import compress_list, dict_to_cli_options, to_pascalcase
from metaflow.metaflow_config import (
SFN_IAM_ROLE,
EVENTS_SFN_ACCESS_IAM_ROLE,
SFN_DYNAMO_DB_TABLE,
SFN_EXECUTION_LOG_GROUP_ARN,
)
from metaflow import R
from .step_functions_client import StepFunctionsClient
from .event_bridge_client import EventBridgeClient
from ..batch.batch import Batch
class StepFunctionsException(MetaflowException):
headline = "AWS Step Functions error"
class StepFunctionsSchedulingException(MetaflowException):
headline = "AWS Step Functions scheduling error"
class StepFunctions(object):
def __init__(
self,
name,
graph,
flow,
code_package_sha,
code_package_url,
production_token,
metadata,
flow_datastore,
environment,
event_logger,
monitor,
tags=None,
namespace=None,
username=None,
max_workers=None,
workflow_timeout=None,
is_project=False,
):
self.name = name
self.graph = graph
self.flow = flow
self.code_package_sha = code_package_sha
self.code_package_url = code_package_url
self.production_token = production_token
self.metadata = metadata
self.flow_datastore = flow_datastore
self.environment = environment
self.event_logger = event_logger
self.monitor = monitor
self.tags = tags
self.namespace = namespace
self.username = username
self.max_workers = max_workers
self.workflow_timeout = workflow_timeout
self._client = StepFunctionsClient()
self._workflow = self._compile()
self._cron = self._cron()
self._state_machine_arn = None
def to_json(self):
return self._workflow.to_json(pretty=True)
def trigger_explanation(self):
if self._cron:
# Sometime in the future, we should vendor (or write) a utility
# that can translate cron specifications into a human readable
# format and push to the user for a better UX, someday.
return (
"This workflow triggers automatically "
"via a cron schedule *%s* defined in AWS EventBridge."
% self.event_bridge_rule
)
else:
return "No triggers defined. " "You need to launch this workflow manually."
def deploy(self, log_execution_history):
if SFN_IAM_ROLE is None:
raise StepFunctionsException(
"No IAM role found for AWS Step "
"Functions. You can create one "
"following the instructions listed at "
"*https://admin-docs.metaflow.org/meta"
"flow-on-aws/deployment-guide/manual-d"
"eployment#scheduling* and "
"re-configure Metaflow using "
"*metaflow configure aws* on your "
"terminal."
)
if log_execution_history:
if SFN_EXECUTION_LOG_GROUP_ARN is None:
raise StepFunctionsException(
"No AWS CloudWatch Logs log "
"group ARN found for emitting "
"state machine execution logs for "
"your workflow. You can set it in "
"your environment by using the "
"METAFLOW_SFN_EXECUTION_LOG_GROUP_ARN "
"environment variable."
)
try:
self._state_machine_arn = self._client.push(
name=self.name,
definition=self.to_json(),
role_arn=SFN_IAM_ROLE,
log_execution_history=log_execution_history,
)
except Exception as e:
raise StepFunctionsException(repr(e))
def schedule(self):
# Scheduling is currently enabled via AWS Event Bridge.
if EVENTS_SFN_ACCESS_IAM_ROLE is None:
raise StepFunctionsSchedulingException(
"No IAM role found for AWS "
"Events Bridge. You can "
"create one following the "
"instructions listed at "
"*https://admin-docs.metaflo"
"w.org/metaflow-on-aws/deplo"
"yment-guide/manual-deployme"
"nt#scheduling* and "
"re-configure Metaflow "
"using *metaflow configure "
"aws* on your terminal."
)
try:
self.event_bridge_rule = (
EventBridgeClient(self.name)
.cron(self._cron)
.role_arn(EVENTS_SFN_ACCESS_IAM_ROLE)
.state_machine_arn(self._state_machine_arn)
.schedule()
)
except Exception as e:
raise StepFunctionsSchedulingException(repr(e))
@classmethod
def trigger(cls, name, parameters):
try:
state_machine = StepFunctionsClient().get(name)
except Exception as e:
raise StepFunctionsException(repr(e))
if state_machine is None:
raise StepFunctionsException(
"The workflow *%s* doesn't exist "
"on AWS Step Functions. Please "
"deploy your flow first." % name
)
# Dump parameters into `Parameters` input field.
input = json.dumps({"Parameters": json.dumps(parameters)})
# AWS Step Functions limits input to be 32KiB, but AWS Batch
# has it's own limitation of 30KiB for job specification length.
# Reserving 10KiB for rest of the job sprecification leaves 20KiB
# for us, which should be enough for most use cases for now.
if len(input) > 20480:
raise StepFunctionsException(
"Length of parameter names and "
"values shouldn't exceed 20480 as "
"imposed by AWS Step Functions."
)
try:
state_machine_arn = state_machine.get("stateMachineArn")
return StepFunctionsClient().trigger(state_machine_arn, input)
except Exception as e:
raise StepFunctionsException(repr(e))
@classmethod
def list(cls, name, states):
try:
state_machine = StepFunctionsClient().get(name)
except Exception as e:
raise StepFunctionsException(repr(e))
if state_machine is None:
raise StepFunctionsException(
"The workflow *%s* doesn't exist " "on AWS Step Functions." % name
)
try:
state_machine_arn = state_machine.get("stateMachineArn")
return StepFunctionsClient().list_executions(state_machine_arn, states)
except Exception as e:
raise StepFunctionsException(repr(e))
@classmethod
def get_existing_deployment(cls, name):
workflow = StepFunctionsClient().get(name)
if workflow is not None:
try:
start = json.loads(workflow["definition"])["States"]["start"]
parameters = start["Parameters"]["Parameters"]
return parameters.get("metaflow.owner"), parameters.get(
"metaflow.production_token"
)
except KeyError as e:
raise StepFunctionsException(
"An existing non-metaflow "
"workflow with the same name as "
"*%s* already exists in AWS Step "
"Functions. Please modify the "
"name of this flow or delete your "
"existing workflow on AWS Step "
"Functions." % name
)
return None
def _compile(self):
# Visit every node of the flow and recursively build the state machine.
def _visit(node, workflow, exit_node=None):
# Assign an AWS Batch job to the AWS Step Functions state
# and pass the intermediate state by exposing `JobId` and
# `Parameters` to the child job(s) as outputs. `Index` and
# `SplitParentTaskId` are populated optionally, when available.
# We can't modify the names of keys in AWS Step Functions aside
# from a blessed few which are set as `Parameters` for the Map
# state. That's why even though `JobId` refers to the parent task
# id, we can't call it as such. Similar situation for `Parameters`.
state = (
State(node.name)
.batch(self._batch(node))
.output_path(
"$.['JobId', " "'Parameters', " "'Index', " "'SplitParentTaskId']"
)
)
# End the (sub)workflow if we have reached the end of the flow or
# the parent step of matching_join of the sub workflow.
if node.type == "end" or exit_node in node.out_funcs:
workflow.add_state(state.end())
# Continue linear assignment within the (sub)workflow if the node
# doesn't branch or fork.
elif node.type in ("linear", "join"):
workflow.add_state(state.next(node.out_funcs[0]))
_visit(self.graph[node.out_funcs[0]], workflow, exit_node)
# Create a `Parallel` state and assign sub workflows if the node
# branches out.
elif node.type == "split-and":
branch_name = hashlib.sha224(
"&".join(node.out_funcs).encode("utf-8")
).hexdigest()
workflow.add_state(state.next(branch_name))
branch = Parallel(branch_name).next(node.matching_join)
# Generate as many sub workflows as branches and recurse.
for n in node.out_funcs:
branch.branch(
_visit(
self.graph[n], Workflow(n).start_at(n), node.matching_join
)
)
workflow.add_state(branch)
# Continue the traversal from the matching_join.
_visit(self.graph[node.matching_join], workflow, exit_node)
# Create a `Map` state and assign sub workflow if the node forks.
elif node.type == "foreach":
# Fetch runtime cardinality via an AWS DynamoDb Get call before
# configuring the node
cardinality_state_name = "#%s" % node.out_funcs[0]
workflow.add_state(state.next(cardinality_state_name))
cardinality_state = (
State(cardinality_state_name)
.dynamo_db(SFN_DYNAMO_DB_TABLE, "$.JobId", "for_each_cardinality")
.result_path("$.Result")
)
iterator_name = "*%s" % node.out_funcs[0]
workflow.add_state(cardinality_state.next(iterator_name))
workflow.add_state(
Map(iterator_name)
.items_path("$.Result.Item.for_each_cardinality.NS")
.parameter("JobId.$", "$.JobId")
.parameter("SplitParentTaskId.$", "$.JobId")
.parameter("Parameters.$", "$.Parameters")
.parameter("Index.$", "$$.Map.Item.Value")
.next(node.matching_join)
.iterator(
_visit(
self.graph[node.out_funcs[0]],
Workflow(node.out_funcs[0]).start_at(node.out_funcs[0]),
node.matching_join,
)
)
.max_concurrency(self.max_workers)
.output_path("$.[0]")
)
# Continue the traversal from the matching_join.
_visit(self.graph[node.matching_join], workflow, exit_node)
# We shouldn't ideally ever get here.
else:
raise StepFunctionsException(
"Node type *%s* for step *%s* "
"is not currently supported by "
"AWS Step Functions." % (node.type, node.name)
)
return workflow
workflow = Workflow(self.name).start_at("start")
if self.workflow_timeout:
workflow.timeout_seconds(self.workflow_timeout)
return _visit(self.graph["start"], workflow)
def _cron(self):
schedule = self.flow._flow_decorators.get("schedule")
if schedule:
return schedule.schedule
return None
def _process_parameters(self):
parameters = []
has_schedule = self._cron() is not None
seen = set()
for var, param in self.flow._get_parameters():
# Throw an exception if the parameter is specified twice.
norm = param.name.lower()
if norm in seen:
raise MetaflowException(
"Parameter *%s* is specified twice. "
"Note that parameter names are "
"case-insensitive." % param.name
)
seen.add(norm)
is_required = param.kwargs.get("required", False)
# Throw an exception if a schedule is set for a flow with required
# parameters with no defaults. We currently don't have any notion
# of data triggers in AWS Event Bridge.
if "default" not in param.kwargs and is_required and has_schedule:
raise MetaflowException(
"The parameter *%s* does not have a "
"default and is required. Scheduling "
"such parameters via AWS Event Bridge "
"is not currently supported." % param.name
)
value = deploy_time_eval(param.kwargs.get("default"))
parameters.append(dict(name=param.name, value=value))
return parameters
def _batch(self, node):
attrs = {
# metaflow.user is only used for setting the AWS Job Name.
# Since job executions are no longer tied to a specific user
# identity, we will just set their user to `SFN`. We still do need
# access to the owner of the workflow for production tokens, which
# we can stash in metaflow.owner.
"metaflow.user": "SFN",
"metaflow.owner": self.username,
"metaflow.flow_name": self.flow.name,
"metaflow.step_name": node.name,
"metaflow.run_id.$": "$$.Execution.Name",
# Unfortunately we can't set the task id here since AWS Step
# Functions lacks any notion of run-scoped task identifiers. We
# instead co-opt the AWS Batch job id as the task id. This also
# means that the AWS Batch job name will have missing fields since
# the job id is determined at job execution, but since the job id is
# part of the job description payload, we don't lose much except for
# a few ugly looking black fields in the AWS Batch UI.
# Also, unfortunately we can't set the retry count since
# `$$.State.RetryCount` resolves to an int dynamically and
# AWS Batch job specification only accepts strings. We handle
# retries/catch within AWS Batch to get around this limitation.
"metaflow.version": self.environment.get_environment_info()[
"metaflow_version"
],
# We rely on step names and task ids of parent steps to construct
# input paths for a task. Since the only information we can pass
# between states (via `InputPath` and `ResultPath`) in AWS Step
# Functions is the job description, we run the risk of exceeding
# 32K state size limit rather quickly if we don't filter the job
# description to a minimal set of fields. Unfortunately, the partial
# `JsonPath` implementation within AWS Step Functions makes this
# work a little non-trivial; it doesn't like dots in keys, so we
# have to add the field again.
# This pattern is repeated in a lot of other places, where we use
# AWS Batch parameters to store AWS Step Functions state
# information, since this field is the only field in the AWS Batch
# specification that allows us to set key-values.
"step_name": node.name,
}
# Store production token within the `start` step, so that subsequent
# `step-functions create` calls can perform a rudimentary authorization
# check.
if node.name == "start":
attrs["metaflow.production_token"] = self.production_token
# Add env vars from the optional @environment decorator.
env_deco = [deco for deco in node.decorators if deco.name == "environment"]
env = {}
if env_deco:
env = env_deco[0].attributes["vars"]
if node.name == "start":
# Initialize parameters for the flow in the `start` step.
parameters = self._process_parameters()
if parameters:
# Get user-defined parameters from State Machine Input.
# Since AWS Step Functions doesn't allow for optional inputs
# currently, we have to unfortunately place an artificial
# constraint that every parameterized workflow needs to include
# `Parameters` as a key in the input to the workflow.
# `step-functions trigger` already takes care of this
# requirement, but within the UI, the users will be required to
# specify an input with key as `Parameters` and value as a
# stringified json of the actual parameters -
# {"Parameters": "{\"alpha\": \"beta\"}"}
env["METAFLOW_PARAMETERS"] = "$.Parameters"
default_parameters = {}
for parameter in parameters:
if parameter["value"] is not None:
default_parameters[parameter["name"]] = parameter["value"]
# Dump the default values specified in the flow.
env["METAFLOW_DEFAULT_PARAMETERS"] = json.dumps(default_parameters)
# `start` step has no upstream input dependencies aside from
# parameters.
input_paths = None
else:
# We need to rely on the `InputPath` of the AWS Step Functions
# specification to grab task ids and the step names of the parent
# to properly construct input_paths at runtime. Thanks to the
# JsonPath-foo embedded in the parent states, we have this
# information easily available.
# Handle foreach join.
if (
node.type == "join"
and self.graph[node.split_parents[-1]].type == "foreach"
):
input_paths = (
"sfn-${METAFLOW_RUN_ID}/%s/:"
"${METAFLOW_PARENT_TASK_IDS}" % node.in_funcs[0]
)
# Unfortunately, AWS Batch only allows strings as value types
# in it's specification and we don't have any way to concatenate
# the task ids array from the parent steps within AWS Step
# Functions and pass it down to AWS Batch. We instead have to
# rely on publishing the state to DynamoDb and fetching it back
# in within the AWS Batch entry point to set
# `METAFLOW_PARENT_TASK_IDS`. The state is scoped to the parent
# foreach task `METAFLOW_SPLIT_PARENT_TASK_ID`. We decided on
# AWS DynamoDb and not AWS Lambdas, because deploying and
# debugging Lambdas would be a nightmare as far as OSS support
# is concerned.
env["METAFLOW_SPLIT_PARENT_TASK_ID"] = (
"$.Parameters.split_parent_task_id_%s" % node.split_parents[-1]
)
else:
# Set appropriate environment variables for runtime replacement.
if len(node.in_funcs) == 1:
input_paths = (
"sfn-${METAFLOW_RUN_ID}/%s/${METAFLOW_PARENT_TASK_ID}"
% node.in_funcs[0]
)
env["METAFLOW_PARENT_TASK_ID"] = "$.JobId"
else:
# Generate the input paths in a quasi-compressed format.
# See util.decompress_list for why this is written the way
# it is.
input_paths = "sfn-${METAFLOW_RUN_ID}:" + ",".join(
"/${METAFLOW_PARENT_%s_STEP}/"
"${METAFLOW_PARENT_%s_TASK_ID}" % (idx, idx)
for idx, _ in enumerate(node.in_funcs)
)
for idx, _ in enumerate(node.in_funcs):
env["METAFLOW_PARENT_%s_TASK_ID" % idx] = "$.[%s].JobId" % idx
env["METAFLOW_PARENT_%s_STEP" % idx] = (
"$.[%s].Parameters.step_name" % idx
)
env["METAFLOW_INPUT_PATHS"] = input_paths
if node.is_inside_foreach:
# Set the task id of the parent job of the foreach split in
# our favorite dumping ground, the AWS Batch attrs. For
# subsequent descendent tasks, this attrs blob becomes the
# input to those descendent tasks. We set and propagate the
# task ids pointing to split_parents through every state.
if any(self.graph[n].type == "foreach" for n in node.in_funcs):
attrs[
"split_parent_task_id_%s.$" % node.split_parents[-1]
] = "$.SplitParentTaskId"
for parent in node.split_parents[:-1]:
if self.graph[parent].type == "foreach":
attrs["split_parent_task_id_%s.$" % parent] = (
"$.Parameters.split_parent_task_id_%s" % parent
)
elif node.type == "join":
if self.graph[node.split_parents[-1]].type == "foreach":
# A foreach join only gets one set of input from the
# parent tasks. We filter the Map state to only output
# `$.[0]`, since we don't need any of the other outputs,
# that information is available to us from AWS DynamoDB.
# This has a nice side-effect of making our foreach
# splits infinitely scalable because otherwise we would
# be bounded by the 32K state limit for the outputs. So,
# instead of referencing `Parameters` fields by index
# (like in `split-and`), we can just reference them
# directly.
attrs["split_parent_task_id_%s.$" % node.split_parents[-1]] = (
"$.Parameters.split_parent_task_id_%s"
% node.split_parents[-1]
)
for parent in node.split_parents[:-1]:
if self.graph[parent].type == "foreach":
attrs["split_parent_task_id_%s.$" % parent] = (
"$.Parameters.split_parent_task_id_%s" % parent
)
else:
for parent in node.split_parents:
if self.graph[parent].type == "foreach":
attrs["split_parent_task_id_%s.$" % parent] = (
"$.[0].Parameters.split_parent_task_id_%s" % parent
)
else:
for parent in node.split_parents:
if self.graph[parent].type == "foreach":
attrs["split_parent_task_id_%s.$" % parent] = (
"$.Parameters.split_parent_task_id_%s" % parent
)
# Set `METAFLOW_SPLIT_PARENT_TASK_ID_FOR_FOREACH_JOIN` if the
# next transition is to a foreach join, so that the
# stepfunctions decorator can write the mapping for input path
# to DynamoDb.
if any(
self.graph[n].type == "join"
and self.graph[self.graph[n].split_parents[-1]].type == "foreach"
for n in node.out_funcs
):
env["METAFLOW_SPLIT_PARENT_TASK_ID_FOR_FOREACH_JOIN"] = attrs[
"split_parent_task_id_%s.$"
% self.graph[node.out_funcs[0]].split_parents[-1]
]
# Set ttl for the values we set in AWS DynamoDB.
if node.type == "foreach":
if self.workflow_timeout:
env["METAFLOW_SFN_WORKFLOW_TIMEOUT"] = self.workflow_timeout
# Handle split index for for-each.
if any(self.graph[n].type == "foreach" for n in node.in_funcs):
env["METAFLOW_SPLIT_INDEX"] = "$.Index"
env["METAFLOW_CODE_URL"] = self.code_package_url
env["METAFLOW_FLOW_NAME"] = attrs["metaflow.flow_name"]
env["METAFLOW_STEP_NAME"] = attrs["metaflow.step_name"]
env["METAFLOW_RUN_ID"] = attrs["metaflow.run_id.$"]
env["METAFLOW_PRODUCTION_TOKEN"] = self.production_token
env["SFN_STATE_MACHINE"] = self.name
env["METAFLOW_OWNER"] = attrs["metaflow.owner"]
# Can't set `METAFLOW_TASK_ID` due to lack of run-scoped identifiers.
# We will instead rely on `AWS_BATCH_JOB_ID` as the task identifier.
# Can't set `METAFLOW_RETRY_COUNT` either due to integer casting issue.
metadata_env = self.metadata.get_runtime_environment("step-functions")
env.update(metadata_env)
metaflow_version = self.environment.get_environment_info()
metaflow_version["flow_name"] = self.graph.name
metaflow_version["production_token"] = self.production_token
env["METAFLOW_VERSION"] = json.dumps(metaflow_version)
# Set AWS DynamoDb Table Name for state tracking for for-eaches.
# There are three instances when metaflow runtime directly interacts
# with AWS DynamoDB.
# 1. To set the cardinality of foreaches (which are subsequently)
# read prior to the instantiation of the Map state by AWS Step
# Functions.
# 2. To set the input paths from the parent steps of a foreach join.
# 3. To read the input paths in a foreach join.
if (
node.type == "foreach"
or (
node.is_inside_foreach
and any(
self.graph[n].type == "join"
and self.graph[self.graph[n].split_parents[-1]].type == "foreach"
for n in node.out_funcs
)
)
or (
node.type == "join"
and self.graph[node.split_parents[-1]].type == "foreach"
)
):
if SFN_DYNAMO_DB_TABLE is None:
raise StepFunctionsException(
"An AWS DynamoDB table is needed "
"to support foreach in your flow. "
"You can create one following the "
"instructions listed at *https://a"
"dmin-docs.metaflow.org/metaflow-o"
"n-aws/deployment-guide/manual-dep"
"loyment#scheduling* and "
"re-configure Metaflow using "
"*metaflow configure aws* on your "
"terminal."
)
env["METAFLOW_SFN_DYNAMO_DB_TABLE"] = SFN_DYNAMO_DB_TABLE
# Resolve AWS Batch resource requirements.
batch_deco = [deco for deco in node.decorators if deco.name == "batch"][0]
resources = batch_deco.attributes
# Resolve retry strategy.
user_code_retries, total_retries = self._get_retries(node)
task_spec = {
"flow_name": attrs["metaflow.flow_name"],
"step_name": attrs["metaflow.step_name"],
"run_id": "sfn-$METAFLOW_RUN_ID",
# Use AWS Batch job identifier as the globally unique
# task identifier.
"task_id": "$AWS_BATCH_JOB_ID",
# Since retries are handled by AWS Batch, we can rely on
# AWS_BATCH_JOB_ATTEMPT as the job counter.
"retry_count": "$((AWS_BATCH_JOB_ATTEMPT-1))",
}
return (
Batch(self.metadata, self.environment)
.create_job(
step_name=node.name,
step_cli=self._step_cli(
node, input_paths, self.code_package_url, user_code_retries
),
task_spec=task_spec,
code_package_sha=self.code_package_sha,
code_package_url=self.code_package_url,
code_package_ds=self.flow_datastore.TYPE,
image=resources["image"],
queue=resources["queue"],
iam_role=resources["iam_role"],
execution_role=resources["execution_role"],
cpu=resources["cpu"],
gpu=resources["gpu"],
memory=resources["memory"],
run_time_limit=batch_deco.run_time_limit,
shared_memory=resources["shared_memory"],
max_swap=resources["max_swap"],
swappiness=resources["swappiness"],
env=env,
attrs=attrs,
host_volumes=resources["host_volumes"],
)
.attempts(total_retries + 1)
)
def _get_retries(self, node):
max_user_code_retries = 0
max_error_retries = 0
# Different decorators may have different retrying strategies, so take
# the max of them.
for deco in node.decorators:
user_code_retries, error_retries = deco.step_task_retry_count()
max_user_code_retries = max(max_user_code_retries, user_code_retries)
max_error_retries = max(max_error_retries, error_retries)
return max_user_code_retries, max_user_code_retries + max_error_retries
def _step_cli(self, node, paths, code_package_url, user_code_retries):
cmds = []
script_name = os.path.basename(sys.argv[0])
executable = self.environment.executable(node.name)
if R.use_r():
entrypoint = [R.entrypoint()]
else:
entrypoint = [executable, script_name]
# Use AWS Batch job identifier as the globally unique task identifier.
task_id = "${AWS_BATCH_JOB_ID}"
# FlowDecorators can define their own top-level options. They are
# responsible for adding their own top-level options and values through
# the get_top_level_options() hook. See similar logic in runtime.py.
top_opts_dict = {}
for deco in flow_decorators():
top_opts_dict.update(deco.get_top_level_options())
top_opts = list(dict_to_cli_options(top_opts_dict))
if node.name == "start":
# We need a separate unique ID for the special _parameters task
task_id_params = "%s-params" % task_id
# Export user-defined parameters into runtime environment
param_file = "".join(
random.choice(string.ascii_lowercase) for _ in range(10)
)
export_params = (
"python -m "
"metaflow.plugins.aws.step_functions.set_batch_environment "
"parameters %s && . `pwd`/%s" % (param_file, param_file)
)
params = (
entrypoint
+ top_opts
+ [
"--quiet",
"--metadata=%s" % self.metadata.TYPE,
"--environment=%s" % self.environment.TYPE,
"--datastore=s3",
"--event-logger=%s" % self.event_logger.logger_type,
"--monitor=%s" % self.monitor.monitor_type,
"--no-pylint",
"init",
"--run-id sfn-$METAFLOW_RUN_ID",
"--task-id %s" % task_id_params,
]
)
# Assign tags to run objects.
if self.tags:
params.extend("--tag %s" % tag for tag in self.tags)
# If the start step gets retried, we must be careful not to
# regenerate multiple parameters tasks. Hence we check first if
# _parameters exists already.
exists = entrypoint + [
"dump",
"--max-value-size=0",
"sfn-${METAFLOW_RUN_ID}/_parameters/%s" % (task_id_params),
]
cmd = "if ! %s >/dev/null 2>/dev/null; then %s && %s; fi" % (
" ".join(exists),
export_params,
" ".join(params),
)
cmds.append(cmd)
paths = "sfn-${METAFLOW_RUN_ID}/_parameters/%s" % (task_id_params)
if node.type == "join" and self.graph[node.split_parents[-1]].type == "foreach":
parent_tasks_file = "".join(
random.choice(string.ascii_lowercase) for _ in range(10)
)
export_parent_tasks = (
"python -m "
"metaflow.plugins.aws.step_functions.set_batch_environment "
"parent_tasks %s && . `pwd`/%s" % (parent_tasks_file, parent_tasks_file)
)
cmds.append(export_parent_tasks)
top_level = top_opts + [
"--quiet",
"--metadata=%s" % self.metadata.TYPE,
"--environment=%s" % self.environment.TYPE,
"--datastore=%s" % self.flow_datastore.TYPE,
"--datastore-root=%s" % self.flow_datastore.datastore_root,
"--event-logger=%s" % self.event_logger.logger_type,
"--monitor=%s" % self.monitor.monitor_type,
"--no-pylint",
"--with=step_functions_internal",
]
step = [
"step",
node.name,
"--run-id sfn-$METAFLOW_RUN_ID",
"--task-id %s" % task_id,
# Since retries are handled by AWS Batch, we can rely on
# AWS_BATCH_JOB_ATTEMPT as the job counter.
"--retry-count $((AWS_BATCH_JOB_ATTEMPT-1))",
"--max-user-code-retries %d" % user_code_retries,
"--input-paths %s" % paths,
# Set decorator to batch to execute `task_*` hooks for batch
# decorator.
"--with=batch",
]
if any(self.graph[n].type == "foreach" for n in node.in_funcs):
# We set the `METAFLOW_SPLIT_INDEX` through JSONPath-foo
# to pass the state from the parent DynamoDb state for for-each.
step.append("--split-index $METAFLOW_SPLIT_INDEX")
if self.tags:
step.extend("--tag %s" % tag for tag in self.tags)
if self.namespace is not None:
step.append("--namespace=%s" % self.namespace)
cmds.append(" ".join(entrypoint + top_level + step))
return " && ".join(cmds)
class Workflow(object):
def __init__(self, name):
self.name = name
tree = lambda: defaultdict(tree)
self.payload = tree()
def start_at(self, start_at):
self.payload["StartAt"] = start_at
return self
def add_state(self, state):
self.payload["States"][state.name] = state.payload
return self
def timeout_seconds(self, timeout_seconds):
self.payload["TimeoutSeconds"] = timeout_seconds
return self
def to_json(self, pretty=False):
return json.dumps(self.payload, indent=4 if pretty else None)
class State(object):
def __init__(self, name):
self.name = name
tree = lambda: defaultdict(tree)
self.payload = tree()
self.payload["Type"] = "Task"
def resource(self, resource):
self.payload["Resource"] = resource
return self
def next(self, state):
self.payload["Next"] = state
return self
def end(self):
self.payload["End"] = True
return self
def parameter(self, name, value):
self.payload["Parameters"][name] = value
return self
def output_path(self, output_path):
self.payload["OutputPath"] = output_path
return self
def result_path(self, result_path):
self.payload["ResultPath"] = result_path
return self
def _partition(self):
# This is needed to support AWS Gov Cloud and AWS CN regions
return SFN_IAM_ROLE.split(":")[1]
def batch(self, job):
self.resource(
"arn:%s:states:::batch:submitJob.sync" % self._partition()
).parameter("JobDefinition", job.payload["jobDefinition"]).parameter(
"JobName", job.payload["jobName"]
).parameter(
"JobQueue", job.payload["jobQueue"]
).parameter(
"Parameters", job.payload["parameters"]
).parameter(
"ContainerOverrides", to_pascalcase(job.payload["containerOverrides"])
).parameter(
"RetryStrategy", to_pascalcase(job.payload["retryStrategy"])
).parameter(
"Timeout", to_pascalcase(job.payload["timeout"])
)
# tags may not be present in all scenarios
if "tags" in job.payload:
self.parameter("Tags", job.payload["tags"])
return self
def dynamo_db(self, table_name, primary_key, values):
self.resource("arn:%s:states:::dynamodb:getItem" % self._partition()).parameter(
"TableName", table_name
).parameter("Key", {"pathspec": {"S.$": primary_key}}).parameter(
"ConsistentRead", True
).parameter(
"ProjectionExpression", values
)
return self
class Parallel(object):
def __init__(self, name):
self.name = name
tree = lambda: defaultdict(tree)
self.payload = tree()
self.payload["Type"] = "Parallel"
def branch(self, workflow):
if "Branches" not in self.payload:
self.payload["Branches"] = []
self.payload["Branches"].append(workflow.payload)
return self
def next(self, state):
self.payload["Next"] = state
return self
def output_path(self, output_path):
self.payload["OutputPath"] = output_path
return self
def result_path(self, result_path):
self.payload["ResultPath"] = result_path
return self
class Map(object):
def __init__(self, name):
self.name = name
tree = lambda: defaultdict(tree)
self.payload = tree()
self.payload["Type"] = "Map"
self.payload["MaxConcurrency"] = 0
def iterator(self, workflow):
self.payload["Iterator"] = workflow.payload
return self
def next(self, state):
self.payload["Next"] = state
return self
def items_path(self, items_path):
self.payload["ItemsPath"] = items_path
return self
def parameter(self, name, value):
self.payload["Parameters"][name] = value
return self
def max_concurrency(self, max_concurrency):
self.payload["MaxConcurrency"] = max_concurrency
return self
def output_path(self, output_path):
self.payload["OutputPath"] = output_path
return self
def result_path(self, result_path):
self.payload["ResultPath"] = result_path
return self
| 43.205508 | 88 | 0.558721 | import os
from collections import defaultdict
import sys
import hashlib
import json
import time
import string
import random
import uuid
from metaflow.exception import MetaflowException, MetaflowInternalError
from metaflow.plugins import ResourcesDecorator, BatchDecorator, RetryDecorator
from metaflow.parameters import deploy_time_eval
from metaflow.decorators import flow_decorators
from metaflow.util import compress_list, dict_to_cli_options, to_pascalcase
from metaflow.metaflow_config import (
SFN_IAM_ROLE,
EVENTS_SFN_ACCESS_IAM_ROLE,
SFN_DYNAMO_DB_TABLE,
SFN_EXECUTION_LOG_GROUP_ARN,
)
from metaflow import R
from .step_functions_client import StepFunctionsClient
from .event_bridge_client import EventBridgeClient
from ..batch.batch import Batch
class StepFunctionsException(MetaflowException):
headline = "AWS Step Functions error"
class StepFunctionsSchedulingException(MetaflowException):
headline = "AWS Step Functions scheduling error"
class StepFunctions(object):
def __init__(
self,
name,
graph,
flow,
code_package_sha,
code_package_url,
production_token,
metadata,
flow_datastore,
environment,
event_logger,
monitor,
tags=None,
namespace=None,
username=None,
max_workers=None,
workflow_timeout=None,
is_project=False,
):
self.name = name
self.graph = graph
self.flow = flow
self.code_package_sha = code_package_sha
self.code_package_url = code_package_url
self.production_token = production_token
self.metadata = metadata
self.flow_datastore = flow_datastore
self.environment = environment
self.event_logger = event_logger
self.monitor = monitor
self.tags = tags
self.namespace = namespace
self.username = username
self.max_workers = max_workers
self.workflow_timeout = workflow_timeout
self._client = StepFunctionsClient()
self._workflow = self._compile()
self._cron = self._cron()
self._state_machine_arn = None
def to_json(self):
return self._workflow.to_json(pretty=True)
def trigger_explanation(self):
if self._cron:
return (
"This workflow triggers automatically "
"via a cron schedule *%s* defined in AWS EventBridge."
% self.event_bridge_rule
)
else:
return "No triggers defined. " "You need to launch this workflow manually."
def deploy(self, log_execution_history):
if SFN_IAM_ROLE is None:
raise StepFunctionsException(
"No IAM role found for AWS Step "
"Functions. You can create one "
"following the instructions listed at "
"*https://admin-docs.metaflow.org/meta"
"flow-on-aws/deployment-guide/manual-d"
"eployment#scheduling* and "
"re-configure Metaflow using "
"*metaflow configure aws* on your "
"terminal."
)
if log_execution_history:
if SFN_EXECUTION_LOG_GROUP_ARN is None:
raise StepFunctionsException(
"No AWS CloudWatch Logs log "
"group ARN found for emitting "
"state machine execution logs for "
"your workflow. You can set it in "
"your environment by using the "
"METAFLOW_SFN_EXECUTION_LOG_GROUP_ARN "
"environment variable."
)
try:
self._state_machine_arn = self._client.push(
name=self.name,
definition=self.to_json(),
role_arn=SFN_IAM_ROLE,
log_execution_history=log_execution_history,
)
except Exception as e:
raise StepFunctionsException(repr(e))
def schedule(self):
if EVENTS_SFN_ACCESS_IAM_ROLE is None:
raise StepFunctionsSchedulingException(
"No IAM role found for AWS "
"Events Bridge. You can "
"create one following the "
"instructions listed at "
"*https://admin-docs.metaflo"
"w.org/metaflow-on-aws/deplo"
"yment-guide/manual-deployme"
"nt#scheduling* and "
"re-configure Metaflow "
"using *metaflow configure "
"aws* on your terminal."
)
try:
self.event_bridge_rule = (
EventBridgeClient(self.name)
.cron(self._cron)
.role_arn(EVENTS_SFN_ACCESS_IAM_ROLE)
.state_machine_arn(self._state_machine_arn)
.schedule()
)
except Exception as e:
raise StepFunctionsSchedulingException(repr(e))
@classmethod
def trigger(cls, name, parameters):
try:
state_machine = StepFunctionsClient().get(name)
except Exception as e:
raise StepFunctionsException(repr(e))
if state_machine is None:
raise StepFunctionsException(
"The workflow *%s* doesn't exist "
"on AWS Step Functions. Please "
"deploy your flow first." % name
)
# Dump parameters into `Parameters` input field.
input = json.dumps({"Parameters": json.dumps(parameters)})
# AWS Step Functions limits input to be 32KiB, but AWS Batch
# has it's own limitation of 30KiB for job specification length.
if len(input) > 20480:
raise StepFunctionsException(
"Length of parameter names and "
"values shouldn't exceed 20480 as "
"imposed by AWS Step Functions."
)
try:
state_machine_arn = state_machine.get("stateMachineArn")
return StepFunctionsClient().trigger(state_machine_arn, input)
except Exception as e:
raise StepFunctionsException(repr(e))
@classmethod
def list(cls, name, states):
try:
state_machine = StepFunctionsClient().get(name)
except Exception as e:
raise StepFunctionsException(repr(e))
if state_machine is None:
raise StepFunctionsException(
"The workflow *%s* doesn't exist " "on AWS Step Functions." % name
)
try:
state_machine_arn = state_machine.get("stateMachineArn")
return StepFunctionsClient().list_executions(state_machine_arn, states)
except Exception as e:
raise StepFunctionsException(repr(e))
@classmethod
def get_existing_deployment(cls, name):
workflow = StepFunctionsClient().get(name)
if workflow is not None:
try:
start = json.loads(workflow["definition"])["States"]["start"]
parameters = start["Parameters"]["Parameters"]
return parameters.get("metaflow.owner"), parameters.get(
"metaflow.production_token"
)
except KeyError as e:
raise StepFunctionsException(
"An existing non-metaflow "
"workflow with the same name as "
"*%s* already exists in AWS Step "
"Functions. Please modify the "
"name of this flow or delete your "
"existing workflow on AWS Step "
"Functions." % name
)
return None
def _compile(self):
def _visit(node, workflow, exit_node=None):
# from a blessed few which are set as `Parameters` for the Map
# state. That's why even though `JobId` refers to the parent task
state = (
State(node.name)
.batch(self._batch(node))
.output_path(
"$.['JobId', " "'Parameters', " "'Index', " "'SplitParentTaskId']"
)
)
# End the (sub)workflow if we have reached the end of the flow or
# the parent step of matching_join of the sub workflow.
if node.type == "end" or exit_node in node.out_funcs:
workflow.add_state(state.end())
# Continue linear assignment within the (sub)workflow if the node
# doesn't branch or fork.
elif node.type in ("linear", "join"):
workflow.add_state(state.next(node.out_funcs[0]))
_visit(self.graph[node.out_funcs[0]], workflow, exit_node)
elif node.type == "split-and":
branch_name = hashlib.sha224(
"&".join(node.out_funcs).encode("utf-8")
).hexdigest()
workflow.add_state(state.next(branch_name))
branch = Parallel(branch_name).next(node.matching_join)
for n in node.out_funcs:
branch.branch(
_visit(
self.graph[n], Workflow(n).start_at(n), node.matching_join
)
)
workflow.add_state(branch)
_visit(self.graph[node.matching_join], workflow, exit_node)
elif node.type == "foreach":
cardinality_state_name = "#%s" % node.out_funcs[0]
workflow.add_state(state.next(cardinality_state_name))
cardinality_state = (
State(cardinality_state_name)
.dynamo_db(SFN_DYNAMO_DB_TABLE, "$.JobId", "for_each_cardinality")
.result_path("$.Result")
)
iterator_name = "*%s" % node.out_funcs[0]
workflow.add_state(cardinality_state.next(iterator_name))
workflow.add_state(
Map(iterator_name)
.items_path("$.Result.Item.for_each_cardinality.NS")
.parameter("JobId.$", "$.JobId")
.parameter("SplitParentTaskId.$", "$.JobId")
.parameter("Parameters.$", "$.Parameters")
.parameter("Index.$", "$$.Map.Item.Value")
.next(node.matching_join)
.iterator(
_visit(
self.graph[node.out_funcs[0]],
Workflow(node.out_funcs[0]).start_at(node.out_funcs[0]),
node.matching_join,
)
)
.max_concurrency(self.max_workers)
.output_path("$.[0]")
)
_visit(self.graph[node.matching_join], workflow, exit_node)
else:
raise StepFunctionsException(
"Node type *%s* for step *%s* "
"is not currently supported by "
"AWS Step Functions." % (node.type, node.name)
)
return workflow
workflow = Workflow(self.name).start_at("start")
if self.workflow_timeout:
workflow.timeout_seconds(self.workflow_timeout)
return _visit(self.graph["start"], workflow)
def _cron(self):
schedule = self.flow._flow_decorators.get("schedule")
if schedule:
return schedule.schedule
return None
def _process_parameters(self):
parameters = []
has_schedule = self._cron() is not None
seen = set()
for var, param in self.flow._get_parameters():
# Throw an exception if the parameter is specified twice.
norm = param.name.lower()
if norm in seen:
raise MetaflowException(
"Parameter *%s* is specified twice. "
"Note that parameter names are "
"case-insensitive." % param.name
)
seen.add(norm)
is_required = param.kwargs.get("required", False)
# Throw an exception if a schedule is set for a flow with required
# parameters with no defaults. We currently don't have any notion
if "default" not in param.kwargs and is_required and has_schedule:
raise MetaflowException(
"The parameter *%s* does not have a "
"default and is required. Scheduling "
"such parameters via AWS Event Bridge "
"is not currently supported." % param.name
)
value = deploy_time_eval(param.kwargs.get("default"))
parameters.append(dict(name=param.name, value=value))
return parameters
def _batch(self, node):
attrs = {
"metaflow.user": "SFN",
"metaflow.owner": self.username,
"metaflow.flow_name": self.flow.name,
"metaflow.step_name": node.name,
"metaflow.run_id.$": "$$.Execution.Name",
# Functions lacks any notion of run-scoped task identifiers. We
# instead co-opt the AWS Batch job id as the task id. This also
# means that the AWS Batch job name will have missing fields since
# the job id is determined at job execution, but since the job id is
# part of the job description payload, we don't lose much except for
# `$$.State.RetryCount` resolves to an int dynamically and
# AWS Batch job specification only accepts strings. We handle
# retries/catch within AWS Batch to get around this limitation.
"metaflow.version": self.environment.get_environment_info()[
"metaflow_version"
],
# We rely on step names and task ids of parent steps to construct
# input paths for a task. Since the only information we can pass
# between states (via `InputPath` and `ResultPath`) in AWS Step
# Functions is the job description, we run the risk of exceeding
# 32K state size limit rather quickly if we don't filter the job
# have to add the field again.
# This pattern is repeated in a lot of other places, where we use
# AWS Batch parameters to store AWS Step Functions state
# information, since this field is the only field in the AWS Batch
# specification that allows us to set key-values.
"step_name": node.name,
}
# Store production token within the `start` step, so that subsequent
# `step-functions create` calls can perform a rudimentary authorization
# check.
if node.name == "start":
attrs["metaflow.production_token"] = self.production_token
# Add env vars from the optional @environment decorator.
env_deco = [deco for deco in node.decorators if deco.name == "environment"]
env = {}
if env_deco:
env = env_deco[0].attributes["vars"]
if node.name == "start":
# Initialize parameters for the flow in the `start` step.
parameters = self._process_parameters()
if parameters:
# Get user-defined parameters from State Machine Input.
# Since AWS Step Functions doesn't allow for optional inputs
env["METAFLOW_PARAMETERS"] = "$.Parameters"
default_parameters = {}
for parameter in parameters:
if parameter["value"] is not None:
default_parameters[parameter["name"]] = parameter["value"]
env["METAFLOW_DEFAULT_PARAMETERS"] = json.dumps(default_parameters)
input_paths = None
else:
if (
node.type == "join"
and self.graph[node.split_parents[-1]].type == "foreach"
):
input_paths = (
"sfn-${METAFLOW_RUN_ID}/%s/:"
"${METAFLOW_PARENT_TASK_IDS}" % node.in_funcs[0]
)
env["METAFLOW_SPLIT_PARENT_TASK_ID"] = (
"$.Parameters.split_parent_task_id_%s" % node.split_parents[-1]
)
else:
if len(node.in_funcs) == 1:
input_paths = (
"sfn-${METAFLOW_RUN_ID}/%s/${METAFLOW_PARENT_TASK_ID}"
% node.in_funcs[0]
)
env["METAFLOW_PARENT_TASK_ID"] = "$.JobId"
else:
input_paths = "sfn-${METAFLOW_RUN_ID}:" + ",".join(
"/${METAFLOW_PARENT_%s_STEP}/"
"${METAFLOW_PARENT_%s_TASK_ID}" % (idx, idx)
for idx, _ in enumerate(node.in_funcs)
)
for idx, _ in enumerate(node.in_funcs):
env["METAFLOW_PARENT_%s_TASK_ID" % idx] = "$.[%s].JobId" % idx
env["METAFLOW_PARENT_%s_STEP" % idx] = (
"$.[%s].Parameters.step_name" % idx
)
env["METAFLOW_INPUT_PATHS"] = input_paths
if node.is_inside_foreach:
if any(self.graph[n].type == "foreach" for n in node.in_funcs):
attrs[
"split_parent_task_id_%s.$" % node.split_parents[-1]
] = "$.SplitParentTaskId"
for parent in node.split_parents[:-1]:
if self.graph[parent].type == "foreach":
attrs["split_parent_task_id_%s.$" % parent] = (
"$.Parameters.split_parent_task_id_%s" % parent
)
elif node.type == "join":
if self.graph[node.split_parents[-1]].type == "foreach":
# that information is available to us from AWS DynamoDB.
# This has a nice side-effect of making our foreach
# splits infinitely scalable because otherwise we would
# be bounded by the 32K state limit for the outputs. So,
# instead of referencing `Parameters` fields by index
# (like in `split-and`), we can just reference them
# directly.
attrs["split_parent_task_id_%s.$" % node.split_parents[-1]] = (
"$.Parameters.split_parent_task_id_%s"
% node.split_parents[-1]
)
for parent in node.split_parents[:-1]:
if self.graph[parent].type == "foreach":
attrs["split_parent_task_id_%s.$" % parent] = (
"$.Parameters.split_parent_task_id_%s" % parent
)
else:
for parent in node.split_parents:
if self.graph[parent].type == "foreach":
attrs["split_parent_task_id_%s.$" % parent] = (
"$.[0].Parameters.split_parent_task_id_%s" % parent
)
else:
for parent in node.split_parents:
if self.graph[parent].type == "foreach":
attrs["split_parent_task_id_%s.$" % parent] = (
"$.Parameters.split_parent_task_id_%s" % parent
)
# Set `METAFLOW_SPLIT_PARENT_TASK_ID_FOR_FOREACH_JOIN` if the
# next transition is to a foreach join, so that the
# stepfunctions decorator can write the mapping for input path
# to DynamoDb.
if any(
self.graph[n].type == "join"
and self.graph[self.graph[n].split_parents[-1]].type == "foreach"
for n in node.out_funcs
):
env["METAFLOW_SPLIT_PARENT_TASK_ID_FOR_FOREACH_JOIN"] = attrs[
"split_parent_task_id_%s.$"
% self.graph[node.out_funcs[0]].split_parents[-1]
]
# Set ttl for the values we set in AWS DynamoDB.
if node.type == "foreach":
if self.workflow_timeout:
env["METAFLOW_SFN_WORKFLOW_TIMEOUT"] = self.workflow_timeout
# Handle split index for for-each.
if any(self.graph[n].type == "foreach" for n in node.in_funcs):
env["METAFLOW_SPLIT_INDEX"] = "$.Index"
env["METAFLOW_CODE_URL"] = self.code_package_url
env["METAFLOW_FLOW_NAME"] = attrs["metaflow.flow_name"]
env["METAFLOW_STEP_NAME"] = attrs["metaflow.step_name"]
env["METAFLOW_RUN_ID"] = attrs["metaflow.run_id.$"]
env["METAFLOW_PRODUCTION_TOKEN"] = self.production_token
env["SFN_STATE_MACHINE"] = self.name
env["METAFLOW_OWNER"] = attrs["metaflow.owner"]
# Can't set `METAFLOW_TASK_ID` due to lack of run-scoped identifiers.
metadata_env = self.metadata.get_runtime_environment("step-functions")
env.update(metadata_env)
metaflow_version = self.environment.get_environment_info()
metaflow_version["flow_name"] = self.graph.name
metaflow_version["production_token"] = self.production_token
env["METAFLOW_VERSION"] = json.dumps(metaflow_version)
# Set AWS DynamoDb Table Name for state tracking for for-eaches.
# There are three instances when metaflow runtime directly interacts
# with AWS DynamoDB.
# 1. To set the cardinality of foreaches (which are subsequently)
# read prior to the instantiation of the Map state by AWS Step
# Functions.
# 2. To set the input paths from the parent steps of a foreach join.
# 3. To read the input paths in a foreach join.
if (
node.type == "foreach"
or (
node.is_inside_foreach
and any(
self.graph[n].type == "join"
and self.graph[self.graph[n].split_parents[-1]].type == "foreach"
for n in node.out_funcs
)
)
or (
node.type == "join"
and self.graph[node.split_parents[-1]].type == "foreach"
)
):
if SFN_DYNAMO_DB_TABLE is None:
raise StepFunctionsException(
"An AWS DynamoDB table is needed "
"to support foreach in your flow. "
"You can create one following the "
"instructions listed at *https://a"
"dmin-docs.metaflow.org/metaflow-o"
"n-aws/deployment-guide/manual-dep"
"loyment#scheduling* and "
"re-configure Metaflow using "
"*metaflow configure aws* on your "
"terminal."
)
env["METAFLOW_SFN_DYNAMO_DB_TABLE"] = SFN_DYNAMO_DB_TABLE
# Resolve AWS Batch resource requirements.
batch_deco = [deco for deco in node.decorators if deco.name == "batch"][0]
resources = batch_deco.attributes
# Resolve retry strategy.
user_code_retries, total_retries = self._get_retries(node)
task_spec = {
"flow_name": attrs["metaflow.flow_name"],
"step_name": attrs["metaflow.step_name"],
"run_id": "sfn-$METAFLOW_RUN_ID",
# Use AWS Batch job identifier as the globally unique
# task identifier.
"task_id": "$AWS_BATCH_JOB_ID",
# Since retries are handled by AWS Batch, we can rely on
# AWS_BATCH_JOB_ATTEMPT as the job counter.
"retry_count": "$((AWS_BATCH_JOB_ATTEMPT-1))",
}
return (
Batch(self.metadata, self.environment)
.create_job(
step_name=node.name,
step_cli=self._step_cli(
node, input_paths, self.code_package_url, user_code_retries
),
task_spec=task_spec,
code_package_sha=self.code_package_sha,
code_package_url=self.code_package_url,
code_package_ds=self.flow_datastore.TYPE,
image=resources["image"],
queue=resources["queue"],
iam_role=resources["iam_role"],
execution_role=resources["execution_role"],
cpu=resources["cpu"],
gpu=resources["gpu"],
memory=resources["memory"],
run_time_limit=batch_deco.run_time_limit,
shared_memory=resources["shared_memory"],
max_swap=resources["max_swap"],
swappiness=resources["swappiness"],
env=env,
attrs=attrs,
host_volumes=resources["host_volumes"],
)
.attempts(total_retries + 1)
)
def _get_retries(self, node):
max_user_code_retries = 0
max_error_retries = 0
# Different decorators may have different retrying strategies, so take
# the max of them.
for deco in node.decorators:
user_code_retries, error_retries = deco.step_task_retry_count()
max_user_code_retries = max(max_user_code_retries, user_code_retries)
max_error_retries = max(max_error_retries, error_retries)
return max_user_code_retries, max_user_code_retries + max_error_retries
def _step_cli(self, node, paths, code_package_url, user_code_retries):
cmds = []
script_name = os.path.basename(sys.argv[0])
executable = self.environment.executable(node.name)
if R.use_r():
entrypoint = [R.entrypoint()]
else:
entrypoint = [executable, script_name]
# Use AWS Batch job identifier as the globally unique task identifier.
task_id = "${AWS_BATCH_JOB_ID}"
# FlowDecorators can define their own top-level options. They are
# responsible for adding their own top-level options and values through
# the get_top_level_options() hook. See similar logic in runtime.py.
top_opts_dict = {}
for deco in flow_decorators():
top_opts_dict.update(deco.get_top_level_options())
top_opts = list(dict_to_cli_options(top_opts_dict))
if node.name == "start":
# We need a separate unique ID for the special _parameters task
task_id_params = "%s-params" % task_id
# Export user-defined parameters into runtime environment
param_file = "".join(
random.choice(string.ascii_lowercase) for _ in range(10)
)
export_params = (
"python -m "
"metaflow.plugins.aws.step_functions.set_batch_environment "
"parameters %s && . `pwd`/%s" % (param_file, param_file)
)
params = (
entrypoint
+ top_opts
+ [
"--quiet",
"--metadata=%s" % self.metadata.TYPE,
"--environment=%s" % self.environment.TYPE,
"--datastore=s3",
"--event-logger=%s" % self.event_logger.logger_type,
"--monitor=%s" % self.monitor.monitor_type,
"--no-pylint",
"init",
"--run-id sfn-$METAFLOW_RUN_ID",
"--task-id %s" % task_id_params,
]
)
# Assign tags to run objects.
if self.tags:
params.extend("--tag %s" % tag for tag in self.tags)
# If the start step gets retried, we must be careful not to
# regenerate multiple parameters tasks. Hence we check first if
# _parameters exists already.
exists = entrypoint + [
"dump",
"--max-value-size=0",
"sfn-${METAFLOW_RUN_ID}/_parameters/%s" % (task_id_params),
]
cmd = "if ! %s >/dev/null 2>/dev/null; then %s && %s; fi" % (
" ".join(exists),
export_params,
" ".join(params),
)
cmds.append(cmd)
paths = "sfn-${METAFLOW_RUN_ID}/_parameters/%s" % (task_id_params)
if node.type == "join" and self.graph[node.split_parents[-1]].type == "foreach":
parent_tasks_file = "".join(
random.choice(string.ascii_lowercase) for _ in range(10)
)
export_parent_tasks = (
"python -m "
"metaflow.plugins.aws.step_functions.set_batch_environment "
"parent_tasks %s && . `pwd`/%s" % (parent_tasks_file, parent_tasks_file)
)
cmds.append(export_parent_tasks)
top_level = top_opts + [
"--quiet",
"--metadata=%s" % self.metadata.TYPE,
"--environment=%s" % self.environment.TYPE,
"--datastore=%s" % self.flow_datastore.TYPE,
"--datastore-root=%s" % self.flow_datastore.datastore_root,
"--event-logger=%s" % self.event_logger.logger_type,
"--monitor=%s" % self.monitor.monitor_type,
"--no-pylint",
"--with=step_functions_internal",
]
step = [
"step",
node.name,
"--run-id sfn-$METAFLOW_RUN_ID",
"--task-id %s" % task_id,
# Since retries are handled by AWS Batch, we can rely on
# AWS_BATCH_JOB_ATTEMPT as the job counter.
"--retry-count $((AWS_BATCH_JOB_ATTEMPT-1))",
"--max-user-code-retries %d" % user_code_retries,
"--input-paths %s" % paths,
# Set decorator to batch to execute `task_*` hooks for batch
# decorator.
"--with=batch",
]
if any(self.graph[n].type == "foreach" for n in node.in_funcs):
# We set the `METAFLOW_SPLIT_INDEX` through JSONPath-foo
# to pass the state from the parent DynamoDb state for for-each.
step.append("--split-index $METAFLOW_SPLIT_INDEX")
if self.tags:
step.extend("--tag %s" % tag for tag in self.tags)
if self.namespace is not None:
step.append("--namespace=%s" % self.namespace)
cmds.append(" ".join(entrypoint + top_level + step))
return " && ".join(cmds)
class Workflow(object):
def __init__(self, name):
self.name = name
tree = lambda: defaultdict(tree)
self.payload = tree()
def start_at(self, start_at):
self.payload["StartAt"] = start_at
return self
def add_state(self, state):
self.payload["States"][state.name] = state.payload
return self
def timeout_seconds(self, timeout_seconds):
self.payload["TimeoutSeconds"] = timeout_seconds
return self
def to_json(self, pretty=False):
return json.dumps(self.payload, indent=4 if pretty else None)
class State(object):
def __init__(self, name):
self.name = name
tree = lambda: defaultdict(tree)
self.payload = tree()
self.payload["Type"] = "Task"
def resource(self, resource):
self.payload["Resource"] = resource
return self
def next(self, state):
self.payload["Next"] = state
return self
def end(self):
self.payload["End"] = True
return self
def parameter(self, name, value):
self.payload["Parameters"][name] = value
return self
def output_path(self, output_path):
self.payload["OutputPath"] = output_path
return self
def result_path(self, result_path):
self.payload["ResultPath"] = result_path
return self
def _partition(self):
# This is needed to support AWS Gov Cloud and AWS CN regions
return SFN_IAM_ROLE.split(":")[1]
def batch(self, job):
self.resource(
"arn:%s:states:::batch:submitJob.sync" % self._partition()
).parameter("JobDefinition", job.payload["jobDefinition"]).parameter(
"JobName", job.payload["jobName"]
).parameter(
"JobQueue", job.payload["jobQueue"]
).parameter(
"Parameters", job.payload["parameters"]
).parameter(
"ContainerOverrides", to_pascalcase(job.payload["containerOverrides"])
).parameter(
"RetryStrategy", to_pascalcase(job.payload["retryStrategy"])
).parameter(
"Timeout", to_pascalcase(job.payload["timeout"])
)
# tags may not be present in all scenarios
if "tags" in job.payload:
self.parameter("Tags", job.payload["tags"])
return self
def dynamo_db(self, table_name, primary_key, values):
self.resource("arn:%s:states:::dynamodb:getItem" % self._partition()).parameter(
"TableName", table_name
).parameter("Key", {"pathspec": {"S.$": primary_key}}).parameter(
"ConsistentRead", True
).parameter(
"ProjectionExpression", values
)
return self
class Parallel(object):
def __init__(self, name):
self.name = name
tree = lambda: defaultdict(tree)
self.payload = tree()
self.payload["Type"] = "Parallel"
def branch(self, workflow):
if "Branches" not in self.payload:
self.payload["Branches"] = []
self.payload["Branches"].append(workflow.payload)
return self
def next(self, state):
self.payload["Next"] = state
return self
def output_path(self, output_path):
self.payload["OutputPath"] = output_path
return self
def result_path(self, result_path):
self.payload["ResultPath"] = result_path
return self
class Map(object):
def __init__(self, name):
self.name = name
tree = lambda: defaultdict(tree)
self.payload = tree()
self.payload["Type"] = "Map"
self.payload["MaxConcurrency"] = 0
def iterator(self, workflow):
self.payload["Iterator"] = workflow.payload
return self
def next(self, state):
self.payload["Next"] = state
return self
def items_path(self, items_path):
self.payload["ItemsPath"] = items_path
return self
def parameter(self, name, value):
self.payload["Parameters"][name] = value
return self
def max_concurrency(self, max_concurrency):
self.payload["MaxConcurrency"] = max_concurrency
return self
def output_path(self, output_path):
self.payload["OutputPath"] = output_path
return self
def result_path(self, result_path):
self.payload["ResultPath"] = result_path
return self
| true | true |
f71b5640c381e4a1a513cc6857ecd00c92aa7029 | 279 | py | Python | circle_core/writer/base.py | glucoseinc/CircleCore | 577f814ce2944efb6e5997f3d7838c71ce9aea6a | [
"MIT"
] | 3 | 2019-01-11T04:30:18.000Z | 2019-01-11T04:31:18.000Z | circle_core/writer/base.py | glucoseinc/CircleCore | 577f814ce2944efb6e5997f3d7838c71ce9aea6a | [
"MIT"
] | 16 | 2018-11-21T11:47:18.000Z | 2021-09-01T03:52:35.000Z | circle_core/writer/base.py | glucoseinc/CircleCore | 577f814ce2944efb6e5997f3d7838c71ce9aea6a | [
"MIT"
] | null | null | null | import abc
class DBWriter(metaclass=abc.ABCMeta):
@abc.abstractmethod
async def store(self, message_box, message) -> bool:
raise NotImplementedError
@abc.abstractmethod
async def flush(self, flush_all=False) -> None:
raise NotImplementedError
| 21.461538 | 56 | 0.706093 | import abc
class DBWriter(metaclass=abc.ABCMeta):
@abc.abstractmethod
async def store(self, message_box, message) -> bool:
raise NotImplementedError
@abc.abstractmethod
async def flush(self, flush_all=False) -> None:
raise NotImplementedError
| true | true |
f71b56535865b78456cbc8ac2192f63ee5287cfd | 588 | py | Python | src/UI_Code_Q2/Test_code_smaller_parts/PythonWriteUSB.py | KevinEwoudLee/HU3-UI | 16d63e0be8c515540daf4f9cfcff2d0a85c1cbab | [
"MIT"
] | 1 | 2019-12-11T15:27:53.000Z | 2019-12-11T15:27:53.000Z | src/UI_Code_Q2/Test_code_smaller_parts/PythonWriteUSB.py | KevinEwoudLee/HU3-UI | 16d63e0be8c515540daf4f9cfcff2d0a85c1cbab | [
"MIT"
] | null | null | null | src/UI_Code_Q2/Test_code_smaller_parts/PythonWriteUSB.py | KevinEwoudLee/HU3-UI | 16d63e0be8c515540daf4f9cfcff2d0a85c1cbab | [
"MIT"
] | 1 | 2019-12-11T15:23:56.000Z | 2019-12-11T15:23:56.000Z | # -*- coding: utf-8 -*-
"""
Created on Tue Nov 26 09:36:21 2019
@author: kevin
"""
import os
import time
from time import sleep
from datetime import datetime
file = open("E:/test2.csv", "a")
i=0
if os.stat("E:/test2.csv").st_size == 0:
file.write("Time,Sensor1,Sensor2,Sensor3,Sensor4,Sensor5\n")
while True:
i=i+1
now = datetime.now()
file.write(str(now)+","+str(i)+","+str(-i)+","+str(i-10)+","+str(i+5)+","+str(i*i)+"\n")
file.flush()
time.sleep(1)
if (i>=10):
break
file.close()
| 22.615385 | 97 | 0.52551 |
import os
import time
from time import sleep
from datetime import datetime
file = open("E:/test2.csv", "a")
i=0
if os.stat("E:/test2.csv").st_size == 0:
file.write("Time,Sensor1,Sensor2,Sensor3,Sensor4,Sensor5\n")
while True:
i=i+1
now = datetime.now()
file.write(str(now)+","+str(i)+","+str(-i)+","+str(i-10)+","+str(i+5)+","+str(i*i)+"\n")
file.flush()
time.sleep(1)
if (i>=10):
break
file.close()
| true | true |
f71b56ae1d0e79e35e8bd9e7c4c05e6ff33f45bf | 3,286 | py | Python | src/util/pos_util.py | tiefenauer/ip7-python | 512105ba39110ec77d2ea0961dd7c2a42d4ec26d | [
"MIT"
] | null | null | null | src/util/pos_util.py | tiefenauer/ip7-python | 512105ba39110ec77d2ea0961dd7c2a42d4ec26d | [
"MIT"
] | null | null | null | src/util/pos_util.py | tiefenauer/ip7-python | 512105ba39110ec77d2ea0961dd7c2a42d4ec26d | [
"MIT"
] | null | null | null | import collections
from src.importer.known_jobs import KnownJobs
from src.preprocessing import preproc
from src.util import loe_util, jobtitle_util
mw_tokens = ['m/w', 'w/m', 'm/f', 'f/m',
'M/W', 'W/M', 'M/F', 'F/M']
def find_jobs(sentence):
jobs = []
# find known jobs
for hit in find_job_by_keyword(sentence, KnownJobs()):
jobs.append((hit, 'known-job'))
# find by m/w patterns
sentence_without_percentage = loe_util.remove_percentage(sentence)
for hit in find_job_by_keyword(sentence_without_percentage, mw_tokens):
jobs.append((hit, 'mw'))
# find by percentages
sentence_without_mw = jobtitle_util.remove_mw(sentence)
for hit in find_job_by_keyword(sentence_without_mw, loe_util.find_all_loe(sentence_without_mw)):
jobs.append((hit, 'loe'))
# find by gender forms
# sentence_without_mw_and_percentage = loe_util.remove_percentage(sentence_without_mw)
# jobs += find_job_by_keyword(sentence_without_mw_and_percentage, ['/in', '/-in'])
# search by keyword: gender
# for match in jobtitle_util.find_all_genderized(sentence):
# gender_job = expand_left_right(sentence.split(match[0])[0], sentence)
# if gender_job:
# yield gender_job
return jobs
def find_job_by_keyword(sentence, keywords):
# job_names = []
for keyword in keywords:
if keyword in sentence:
job_name = expand_left_right(keyword, sentence)
if job_name:
yield job_name
# job_names.append(job_name)
# return job_names
def expand_left_right(token, sentence):
if token not in sentence:
return None
job_name_tokens = preproc.to_words(token)
sentence_tokens = [word for word in preproc.to_words(sentence) if word not in ['(', ')']]
ix_from, ix_to = calculate_positions(job_name_tokens, sentence_tokens)
sentence_pos = preproc.pos_tag(sentence_tokens)
left = sentence_pos[:ix_from]
right = sentence_pos[ix_to:]
initial_content = [token] if token not in mw_tokens and not loe_util.is_percentate(token) else []
tokens = collections.deque(initial_content)
search_left(left, tokens)
search_right(right, tokens)
return ' '.join(tokens)
def search_left(pos_tagged_words, tokens=collections.deque()):
i = len(pos_tagged_words) - 1
while 0 <= i:
word, pos_tag = pos_tagged_words[i]
if is_part_of_name(word, pos_tag):
tokens.appendleft(word)
else:
break
i -= 1
return tokens
def search_right(pos_tagged_words, tokens=collections.deque()):
i = 0
while 0 <= i < len(pos_tagged_words):
word, pos_tag = pos_tagged_words[i]
if is_part_of_name(word, pos_tag):
tokens.append(word)
else:
break
i += 1
return tokens
def is_part_of_name(word, pos_tag):
return is_noun(pos_tag) or word in ['/']
def is_noun(pos_tag):
return pos_tag[0] in ['N', 'F']
def is_punctuation(pos_tag):
return pos_tag.startswith('$')
def calculate_positions(job_name_tokens, sentence_tokens):
ix_from = [i for i, word in enumerate(sentence_tokens) if job_name_tokens[0] in word][0]
ix_to = ix_from + len(job_name_tokens)
return ix_from, ix_to
| 30.71028 | 101 | 0.676811 | import collections
from src.importer.known_jobs import KnownJobs
from src.preprocessing import preproc
from src.util import loe_util, jobtitle_util
mw_tokens = ['m/w', 'w/m', 'm/f', 'f/m',
'M/W', 'W/M', 'M/F', 'F/M']
def find_jobs(sentence):
jobs = []
for hit in find_job_by_keyword(sentence, KnownJobs()):
jobs.append((hit, 'known-job'))
sentence_without_percentage = loe_util.remove_percentage(sentence)
for hit in find_job_by_keyword(sentence_without_percentage, mw_tokens):
jobs.append((hit, 'mw'))
sentence_without_mw = jobtitle_util.remove_mw(sentence)
for hit in find_job_by_keyword(sentence_without_mw, loe_util.find_all_loe(sentence_without_mw)):
jobs.append((hit, 'loe'))
return jobs
def find_job_by_keyword(sentence, keywords):
for keyword in keywords:
if keyword in sentence:
job_name = expand_left_right(keyword, sentence)
if job_name:
yield job_name
def expand_left_right(token, sentence):
if token not in sentence:
return None
job_name_tokens = preproc.to_words(token)
sentence_tokens = [word for word in preproc.to_words(sentence) if word not in ['(', ')']]
ix_from, ix_to = calculate_positions(job_name_tokens, sentence_tokens)
sentence_pos = preproc.pos_tag(sentence_tokens)
left = sentence_pos[:ix_from]
right = sentence_pos[ix_to:]
initial_content = [token] if token not in mw_tokens and not loe_util.is_percentate(token) else []
tokens = collections.deque(initial_content)
search_left(left, tokens)
search_right(right, tokens)
return ' '.join(tokens)
def search_left(pos_tagged_words, tokens=collections.deque()):
i = len(pos_tagged_words) - 1
while 0 <= i:
word, pos_tag = pos_tagged_words[i]
if is_part_of_name(word, pos_tag):
tokens.appendleft(word)
else:
break
i -= 1
return tokens
def search_right(pos_tagged_words, tokens=collections.deque()):
i = 0
while 0 <= i < len(pos_tagged_words):
word, pos_tag = pos_tagged_words[i]
if is_part_of_name(word, pos_tag):
tokens.append(word)
else:
break
i += 1
return tokens
def is_part_of_name(word, pos_tag):
return is_noun(pos_tag) or word in ['/']
def is_noun(pos_tag):
return pos_tag[0] in ['N', 'F']
def is_punctuation(pos_tag):
return pos_tag.startswith('$')
def calculate_positions(job_name_tokens, sentence_tokens):
ix_from = [i for i, word in enumerate(sentence_tokens) if job_name_tokens[0] in word][0]
ix_to = ix_from + len(job_name_tokens)
return ix_from, ix_to
| true | true |
f71b56c4085612ca2aacc209126330286fb3b4f9 | 2,980 | py | Python | webvep/main/settings.py | IanVermes/vep_api | 9d9d31eddd969aad1c462278ea1f1fb09153e054 | [
"MIT"
] | null | null | null | webvep/main/settings.py | IanVermes/vep_api | 9d9d31eddd969aad1c462278ea1f1fb09153e054 | [
"MIT"
] | 1 | 2020-03-30T10:52:58.000Z | 2020-03-30T16:46:31.000Z | webvep/main/settings.py | IanVermes/vep_api | 9d9d31eddd969aad1c462278ea1f1fb09153e054 | [
"MIT"
] | null | null | null | """
Django settings for webvep project.
Generated by 'django-admin startproject' using Django 3.0.4.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = "ec)mzu6ls4qaj!8)txrke(uxxtb1gmz^2a_^1$lqe9&ys17^!$"
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.messages",
"django.contrib.staticfiles",
"rest_framework",
"webvep_frontend",
"webvep_api",
]
MIDDLEWARE = [
"django.middleware.security.SecurityMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
]
ROOT_URLCONF = "main.urls"
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [],
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
],
},
},
]
WSGI_APPLICATION = "main.wsgi.application"
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
## DONT NEED ONE
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
"NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator",
},
{"NAME": "django.contrib.auth.password_validation.MinimumLengthValidator",},
{"NAME": "django.contrib.auth.password_validation.CommonPasswordValidator",},
{"NAME": "django.contrib.auth.password_validation.NumericPasswordValidator",},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = "en-us"
TIME_ZONE = "UTC"
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = "/static/"
| 26.607143 | 91 | 0.711409 |
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
SECRET_KEY = "ec)mzu6ls4qaj!8)txrke(uxxtb1gmz^2a_^1$lqe9&ys17^!$"
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.messages",
"django.contrib.staticfiles",
"rest_framework",
"webvep_frontend",
"webvep_api",
]
MIDDLEWARE = [
"django.middleware.security.SecurityMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
]
ROOT_URLCONF = "main.urls"
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [],
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
],
},
},
]
WSGI_APPLICATION = "main.wsgi.application"
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
## DONT NEED ONE
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
"NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator",
},
{"NAME": "django.contrib.auth.password_validation.MinimumLengthValidator",},
{"NAME": "django.contrib.auth.password_validation.CommonPasswordValidator",},
{"NAME": "django.contrib.auth.password_validation.NumericPasswordValidator",},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = "en-us"
TIME_ZONE = "UTC"
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = "/static/"
| true | true |
f71b57614025c4aecf90a7ea7cf4fe3d2b9c8499 | 77 | py | Python | zmon_aws_agent/__main__.py | jrake-revelant/zmon-aws-agent | 67ed3d0230f3bec9b3a3950c3eefa48404ee9d6b | [
"Apache-2.0"
] | 9 | 2016-07-28T09:28:20.000Z | 2022-02-27T23:40:09.000Z | zmon_aws_agent/__main__.py | jrake-revelant/zmon-aws-agent | 67ed3d0230f3bec9b3a3950c3eefa48404ee9d6b | [
"Apache-2.0"
] | 146 | 2016-05-23T13:43:42.000Z | 2020-03-10T09:47:07.000Z | zmon_aws_agent/__main__.py | jrake-revelant/zmon-aws-agent | 67ed3d0230f3bec9b3a3950c3eefa48404ee9d6b | [
"Apache-2.0"
] | 11 | 2017-02-18T12:46:27.000Z | 2020-01-22T13:13:36.000Z | from zmon_aws_agent.main import main
if __name__ == '__main__':
main()
| 12.833333 | 36 | 0.701299 | from zmon_aws_agent.main import main
if __name__ == '__main__':
main()
| true | true |
f71b584d40b3c272f7646b95950ee740aeb0fc1c | 5,834 | py | Python | tests/model_connectors/test_spawn_ets.py | macintoshpie/geojson-modelica-translator | 5ed02d53f06961b8d0f3705343368e4c920e7d7d | [
"BSD-3-Clause"
] | null | null | null | tests/model_connectors/test_spawn_ets.py | macintoshpie/geojson-modelica-translator | 5ed02d53f06961b8d0f3705343368e4c920e7d7d | [
"BSD-3-Clause"
] | null | null | null | tests/model_connectors/test_spawn_ets.py | macintoshpie/geojson-modelica-translator | 5ed02d53f06961b8d0f3705343368e4c920e7d7d | [
"BSD-3-Clause"
] | null | null | null | """
****************************************************************************************************
:copyright (c) 2019-2020 URBANopt, Alliance for Sustainable Energy, LLC, and other contributors.
All rights reserved.
Redistribution and use in source and binary forms, with or without modification, are permitted
provided that the following conditions are met:
Redistributions of source code must retain the above copyright notice, this list of conditions
and the following disclaimer.
Redistributions in binary form must reproduce the above copyright notice, this list of conditions
and the following disclaimer in the documentation and/or other materials provided with the
distribution.
Neither the name of the copyright holder nor the names of its contributors may be used to endorse
or promote products derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR
IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
****************************************************************************************************
"""
import os
import shutil
import unittest
from pathlib import Path
from geojson_modelica_translator.geojson_modelica_translator import (
GeoJsonModelicaTranslator
)
from geojson_modelica_translator.model_connectors.spawnBui_ETS_Coupling import (
SpawnConnectorETS
)
from geojson_modelica_translator.modelica.modelica_runner import ModelicaRunner
from geojson_modelica_translator.system_parameters.system_parameters import (
SystemParameters
)
class SpawnModelConnectorSingleBuildingTest(unittest.TestCase):
def setUp(self):
self.data_dir = os.path.join(os.path.dirname(__file__), 'data')
self.output_dir = os.path.join(os.path.dirname(__file__), 'output')
project_name = "spawn_single"
if os.path.exists(os.path.join(self.output_dir, project_name)):
shutil.rmtree(os.path.join(self.output_dir, project_name))
# load in the example geojson with a single offie building
filename = os.path.join(self.data_dir, "spawn_geojson_ex1.json")
self.gj = GeoJsonModelicaTranslator.from_geojson(filename)
# use the GeoJson translator to scaffold out the directory
self.gj.scaffold_directory(self.output_dir, project_name)
# load system parameter data
filename = os.path.join(self.data_dir, "spawn_system_params_ex1.json")
sys_params = SystemParameters(filename)
# now test the spawn connector (independent of the larger geojson translator
self.spawn = SpawnConnectorETS(sys_params)
for b in self.gj.buildings:
self.spawn.add_building(b)
def test_spawn_init(self):
self.assertIsNotNone(self.spawn)
self.assertEqual(self.spawn.system_parameters.get_param("buildings.custom")[0]["load_model"], "Spawn")
def test_spawn_to_modelica_and_run(self):
self.spawn.to_modelica(self.gj.scaffold)
# make sure the model can run using the ModelicaRunner class
mr = ModelicaRunner()
file_to_run = os.path.abspath(
os.path.join(
self.gj.scaffold.loads_path.files_dir, 'B5a6b99ec37f4de7f94020090', 'CouplingETS_SpawnBuilding.mo'
)
)
run_path = Path(os.path.abspath(self.gj.scaffold.project_path)).parent
exitcode = mr.run_in_docker(file_to_run, run_path=run_path)
self.assertEqual(0, exitcode)
class SpawnModelConnectorTwoBuildingTest(unittest.TestCase):
def setUp(self):
self.data_dir = os.path.join(os.path.dirname(__file__), 'data')
self.output_dir = os.path.join(os.path.dirname(__file__), 'output')
project_name = "spawn_two_building"
if os.path.exists(os.path.join(self.output_dir, project_name)):
shutil.rmtree(os.path.join(self.output_dir, project_name))
# load in the example geojson with a single offie building
filename = os.path.join(self.data_dir, "spawn_geojson_ex2.json")
self.gj = GeoJsonModelicaTranslator.from_geojson(filename)
# use the GeoJson translator to scaffold out the directory
self.gj.scaffold_directory(self.output_dir, project_name)
# load system parameter data
filename = os.path.join(self.data_dir, "spawn_system_params_ex2.json")
sys_params = SystemParameters(filename)
# now test the spawn connector (independent of the larger geojson translator
self.spawn = SpawnConnectorETS(sys_params)
for b in self.gj.buildings:
self.spawn.add_building(b)
def test_spawn_to_modelica_and_run(self):
self.spawn.to_modelica(self.gj.scaffold)
# make sure the model can run using the ModelicaRunner class
mr = ModelicaRunner()
file_to_run = os.path.abspath(
os.path.join(
self.gj.scaffold.loads_path.files_dir, 'B5a6b99ec37f4de7f94021950', 'CouplingETS_SpawnBuilding.mo'
)
)
run_path = Path(os.path.abspath(self.gj.scaffold.project_path)).parent
exitcode = mr.run_in_docker(file_to_run, run_path=run_path)
self.assertEqual(0, exitcode)
| 44.876923 | 114 | 0.711176 |
import os
import shutil
import unittest
from pathlib import Path
from geojson_modelica_translator.geojson_modelica_translator import (
GeoJsonModelicaTranslator
)
from geojson_modelica_translator.model_connectors.spawnBui_ETS_Coupling import (
SpawnConnectorETS
)
from geojson_modelica_translator.modelica.modelica_runner import ModelicaRunner
from geojson_modelica_translator.system_parameters.system_parameters import (
SystemParameters
)
class SpawnModelConnectorSingleBuildingTest(unittest.TestCase):
def setUp(self):
self.data_dir = os.path.join(os.path.dirname(__file__), 'data')
self.output_dir = os.path.join(os.path.dirname(__file__), 'output')
project_name = "spawn_single"
if os.path.exists(os.path.join(self.output_dir, project_name)):
shutil.rmtree(os.path.join(self.output_dir, project_name))
filename = os.path.join(self.data_dir, "spawn_geojson_ex1.json")
self.gj = GeoJsonModelicaTranslator.from_geojson(filename)
self.gj.scaffold_directory(self.output_dir, project_name)
filename = os.path.join(self.data_dir, "spawn_system_params_ex1.json")
sys_params = SystemParameters(filename)
self.spawn = SpawnConnectorETS(sys_params)
for b in self.gj.buildings:
self.spawn.add_building(b)
def test_spawn_init(self):
self.assertIsNotNone(self.spawn)
self.assertEqual(self.spawn.system_parameters.get_param("buildings.custom")[0]["load_model"], "Spawn")
def test_spawn_to_modelica_and_run(self):
self.spawn.to_modelica(self.gj.scaffold)
mr = ModelicaRunner()
file_to_run = os.path.abspath(
os.path.join(
self.gj.scaffold.loads_path.files_dir, 'B5a6b99ec37f4de7f94020090', 'CouplingETS_SpawnBuilding.mo'
)
)
run_path = Path(os.path.abspath(self.gj.scaffold.project_path)).parent
exitcode = mr.run_in_docker(file_to_run, run_path=run_path)
self.assertEqual(0, exitcode)
class SpawnModelConnectorTwoBuildingTest(unittest.TestCase):
def setUp(self):
self.data_dir = os.path.join(os.path.dirname(__file__), 'data')
self.output_dir = os.path.join(os.path.dirname(__file__), 'output')
project_name = "spawn_two_building"
if os.path.exists(os.path.join(self.output_dir, project_name)):
shutil.rmtree(os.path.join(self.output_dir, project_name))
filename = os.path.join(self.data_dir, "spawn_geojson_ex2.json")
self.gj = GeoJsonModelicaTranslator.from_geojson(filename)
self.gj.scaffold_directory(self.output_dir, project_name)
filename = os.path.join(self.data_dir, "spawn_system_params_ex2.json")
sys_params = SystemParameters(filename)
self.spawn = SpawnConnectorETS(sys_params)
for b in self.gj.buildings:
self.spawn.add_building(b)
def test_spawn_to_modelica_and_run(self):
self.spawn.to_modelica(self.gj.scaffold)
mr = ModelicaRunner()
file_to_run = os.path.abspath(
os.path.join(
self.gj.scaffold.loads_path.files_dir, 'B5a6b99ec37f4de7f94021950', 'CouplingETS_SpawnBuilding.mo'
)
)
run_path = Path(os.path.abspath(self.gj.scaffold.project_path)).parent
exitcode = mr.run_in_docker(file_to_run, run_path=run_path)
self.assertEqual(0, exitcode)
| true | true |
f71b58bc6969e1f4e15e0b32876c55ba66d9757e | 18,715 | py | Python | OLD/losses.py | ivankreso/semseg | fcd2889a1e9e03c3d1a71d19d68d15ce25a5dc79 | [
"MIT"
] | 2 | 2017-11-17T06:55:44.000Z | 2019-06-11T13:07:05.000Z | OLD/losses.py | ivankreso/semseg | fcd2889a1e9e03c3d1a71d19d68d15ce25a5dc79 | [
"MIT"
] | null | null | null | OLD/losses.py | ivankreso/semseg | fcd2889a1e9e03c3d1a71d19d68d15ce25a5dc79 | [
"MIT"
] | null | null | null | import tensorflow as tf
import slim
FLAGS = tf.app.flags.FLAGS
def add_loss_summaries(total_loss):
"""Add summaries for losses in model.
Generates moving average for all losses and associated summaries for
visualizing the performance of the network.
Args:
total_loss: Total loss from loss().
Returns:
loss_averages_op: op for generating moving averages of losses.
"""
# Compute the moving average of all individual losses and the total loss.
loss_averages = tf.train.ExponentialMovingAverage(0.9, name='avg')
losses = tf.get_collection('losses')
loss_averages_op = loss_averages.apply(losses + [total_loss])
# Attach a scalar summary to all individual losses and the total loss; do the
# same for the averaged version of the losses.
for l in losses + [total_loss]:
#print(l.op.name)
# Name each loss as '(raw)' and name the moving average version of the loss
# as the original loss name.
#tf.scalar_summary(l.op.name + ' (raw)', l)
#tf.scalar_summary(l.op.name, loss_averages.average(l))
tf.summary.scalar(l.op.name + ' (raw)', l)
tf.summary.scalar(l.op.name, loss_averages.average(l))
#tf.scalar_summary([l.op.name + ' (raw)'], l)
#tf.scalar_summary([l.op.name], loss_averages.average(l))
return loss_averages_op
def total_loss_sum(losses):
# Assemble all of the losses for the current tower only.
#losses = tf.get_collection(slim.losses.LOSSES_COLLECTION)
#print(losses)
# Calculate the total loss for the current tower.
#regularization_losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
#regularization_losses = tf.contrib.losses.get_regularization_losses()
regularization_losses = tf.losses.get_regularization_losses()
#total_loss = tf.add_n(losses + regularization_losses, name='total_loss')
total_loss = tf.add_n(losses + regularization_losses, name='total_loss')
return total_loss
def cross_entropy_loss(logits, labels):
print('loss: cross-entropy')
num_pixels = -1
with tf.name_scope(None, 'CrossEntropyLoss', [logits, labels]):
labels = tf.reshape(labels, shape=[num_pixels])
logits = tf.reshape(logits, [num_pixels, FLAGS.num_classes])
mask = labels < FLAGS.num_classes
idx = tf.where(mask)
# # labels = tf.reshape(labels, shape=[num_pixels])
# print(idx)
labels = tf.to_float(labels)
labels = tf.gather_nd(labels, idx)
# labels = tf.boolean_mask(labels, mask)
labels = tf.to_int32(labels)
logits = tf.gather_nd(logits, idx)
# logits = tf.boolean_mask(logits, mask)
onehot_labels = tf.one_hot(labels, FLAGS.num_classes)
xent = tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=onehot_labels)
# range_idx = tf.range(tf.shape(labels)[0], dtype=tf.int32)
# print(range_idx, labels)
# labels = tf.reshape(labels, shape=[-1,1])
# range_idx = tf.reshape(range_idx, shape=[-1,1])
# idx = tf.concat([range_idx, labels], axis=1)
# print(idx)
# probs = tf.nn.softmax(logits)
# probs = tf.gather_nd(probs, idx)
# print(probs)
# xent = tf.square(1 - probs) * xent
# # xent = tf.pow(1 - probs, 3) * xent
# # xent = (1 - probs) * xent
#num_labels = tf.to_float(tf.reduce_sum(num_labels))
#num_labels = tf.reduce_sum(tf.to_float(num_labels))
#class_hist = tf.Print(class_hist, [class_hist], 'hist = ', summarize=30)
#num_labels = tf.reduce_sum(onehot_labels)
#class_hist = tf.to_float(tf.reduce_sum(class_hist, axis=0))
##num_labels = tf.Print(num_labels, [num_labels, tf.reduce_sum(onehot_labels)], 'lab = ', summarize=30)
#class_weights = num_labels / (class_hist + 1)
##class_weights = tf.Print(class_weights, [class_weights], 'wgt hist = ', summarize=30)
## we need to append 0 here for ignore pixels
#class_weights = tf.concat([class_weights, [0]], axis=0)
##class_weights = tf.Print(class_weights, [class_weights], 'wgt hist = ', summarize=30)
#class_weights = tf.minimum(tf.to_float(max_weight), class_weights)
# class_weights = tf.ones([FLAGS.num_classes])
# class_weights = tf.concat([class_weights, [0]], axis=0)
# #class_weights = tf.Print(class_weights, [class_weights], 'wgt hist = ', summarize=30)
# weights = tf.gather(class_weights, labels)
xent = tf.reduce_mean(xent)
return xent
def weighted_cross_entropy_loss(logits, labels, class_hist=None, max_weight=1):
print('loss: cross-entropy')
print('Using balanced loss with max weight = ', max_weight)
num_pixels = -1
with tf.name_scope(None, 'CrossEntropyLoss', [logits, labels]):
labels = tf.reshape(labels, shape=[num_pixels])
onehot_labels = tf.one_hot(labels, FLAGS.num_classes)
logits = tf.reshape(logits, [num_pixels, FLAGS.num_classes])
xent = tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=onehot_labels)
#num_labels = tf.to_float(tf.reduce_sum(num_labels))
#num_labels = tf.reduce_sum(tf.to_float(num_labels))
#class_hist = tf.Print(class_hist, [class_hist], 'hist = ', summarize=30)
num_labels = tf.reduce_sum(onehot_labels)
#class_hist = tf.to_float(tf.reduce_sum(class_hist, axis=0))
##num_labels = tf.Print(num_labels, [num_labels, tf.reduce_sum(onehot_labels)], 'lab = ', summarize=30)
#class_weights = num_labels / (class_hist + 1)
##class_weights = tf.Print(class_weights, [class_weights], 'wgt hist = ', summarize=30)
## we need to append 0 here for ignore pixels
#class_weights = tf.concat([class_weights, [0]], axis=0)
##class_weights = tf.Print(class_weights, [class_weights], 'wgt hist = ', summarize=30)
#class_weights = tf.minimum(tf.to_float(max_weight), class_weights)
class_weights = tf.ones([FLAGS.num_classes])
class_weights = tf.concat([class_weights, [0]], axis=0)
#class_weights = tf.Print(class_weights, [class_weights], 'wgt hist = ', summarize=30)
weights = tf.gather(class_weights, labels)
if max_weight > 1:
raise ValueError()
wgt_sum = tf.reduce_sum(weights)
norm_factor = num_labels / wgt_sum
# weights need to sum to 1
weights = tf.multiply(weights, norm_factor)
xent = tf.multiply(weights, xent)
#num_labels = tf.Print(num_labels, [num_labels, wgt_sum], 'num_labels = ')
#xent = tf.Print(xent, [xent], 'num_labels = ')
xent = tf.reduce_sum(xent) / num_labels
return xent
def weighted_cross_entropy_loss_dense(logits, labels, weights=None,
num_labels=None, max_weight=100):
print('loss: cross-entropy')
num_pixels = -1
with tf.name_scope(None, 'CrossEntropyLoss', [logits, labels]):
labels = tf.reshape(labels, shape=[num_pixels])
onehot_labels = tf.one_hot(labels, FLAGS.num_classes)
logits = tf.reshape(logits, [num_pixels, FLAGS.num_classes])
xent = tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=onehot_labels)
if num_labels is None:
num_labels = tf.reduce_sum(onehot_labels)
else:
num_labels = tf.reduce_sum(num_labels)
print('Using balanced loss with max weight = ', max_weight)
weights = tf.reshape(weights, shape=[num_pixels])
weights = tf.minimum(tf.to_float(max_weight), weights)
wgt_sum = tf.reduce_sum(weights)
norm_factor = num_labels / wgt_sum
# weights need to sum to 1
weights = tf.multiply(weights, norm_factor)
xent = tf.multiply(weights, xent)
#num_labels = tf.Print(num_labels, [num_labels, wgt_sum], 'num_labels = ')
#xent = tf.Print(xent, [xent], 'num_labels = ')
xent = tf.reduce_sum(xent) / num_labels
print(xent)
return xent
def cross_entropy_loss_old(logits, labels, weights, num_labels):
print('loss: cross-entropy')
num_pixels = -1
with tf.name_scope(None, 'CrossEntropyLoss', [logits, labels, num_labels]):
labels = tf.reshape(labels, shape=[num_pixels])
onehot_labels = tf.one_hot(labels, FLAGS.num_classes)
logits = tf.reshape(logits, [num_pixels, FLAGS.num_classes])
xent = tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=onehot_labels)
weights = tf.reshape(weights, shape=[num_pixels])
xent = tf.multiply(weights, xent)
xent = tf.reduce_sum(xent) / tf.reduce_sum(num_labels)
print(xent)
return xent
def mse(yp, yt):
num_examples = FLAGS.batch_size * FLAGS.img_height * FLAGS.img_width
with tf.name_scope('MeanSquareError'):
yt = tf.reshape(yt, shape=[num_examples])
yp = tf.reshape(yp, shape=[num_examples])
return tf.reduce_mean(tf.square(yt - yp))
def weighted_cross_entropy_loss_deprecated(logits, labels, weights=None, max_weight=100):
#def weighted_cross_entropy_loss(logits, labels, weights=None, max_weight=1e2):
#def weighted_cross_entropy_loss(logits, labels, weights=None, max_weight=1e3):
print('loss: Weighted Cross Entropy Loss')
shape = labels.get_shape().as_list()
print(shape)
#num_examples = shape[0] * shape[1]
num_examples = -1
#num_examples = FLAGS.batch_size * FLAGS.img_height * FLAGS.img_width
with tf.name_scope(None, 'WeightedCrossEntropyLoss', [logits, labels]):
labels = tf.reshape(labels, shape=[num_examples])
#num_labels = tf.to_float(tf.reduce_sum(num_labels))
one_hot_labels = tf.one_hot(tf.to_int64(labels), FLAGS.num_classes, 1, 0)
one_hot_labels = tf.reshape(one_hot_labels, [num_examples, FLAGS.num_classes])
num_labels = tf.to_float(tf.reduce_sum(one_hot_labels))
logits_1d = tf.reshape(logits, [num_examples, FLAGS.num_classes])
# todo
#log_softmax = tf.log(tf.nn.softmax(logits_1d)) - never do this!
log_softmax = tf.nn.log_softmax(logits_1d)
xent = tf.reduce_sum(-tf.multiply(tf.to_float(one_hot_labels), log_softmax), 1)
#weighted_xent = tf.mul(weights, xent)
if weights != None:
weights = tf.reshape(weights, shape=[num_examples])
xent = tf.mul(tf.minimum(tf.to_float(max_weight), weights), xent)
#weighted_xent = xent
total_loss = tf.div(tf.reduce_sum(xent), tf.to_float(num_labels), name='value')
print(total_loss)
return total_loss
def flip_xent_loss(logits, labels, weights, max_weight=10):
print('Loss: Weighted Cross Entropy Loss')
assert(FLAGS.batch_size == 2)
num_examples = FLAGS.batch_size * FLAGS.img_height * FLAGS.img_width
labels = tf.reshape(labels, shape=[num_examples])
weights = tf.reshape(weights, shape=[num_examples])
#num_labels = tf.to_float(tf.reduce_sum(num_labels))
with tf.name_scope('FlipXentLoss', [logits, labels]):
one_hot_labels = tf.one_hot(tf.to_int64(labels), FLAGS.num_classes, 1, 0)
one_hot_labels = tf.reshape(one_hot_labels, [num_examples, FLAGS.num_classes])
num_labels = tf.to_float(tf.reduce_sum(one_hot_labels))
#print(logits[].get_shape())
logits_1d = tf.reshape(logits, [num_examples, FLAGS.num_classes])
# TODO
#log_softmax = tf.log(tf.nn.softmax(logits_1d))
log_softmax = tf.nn.log_softmax(logits_1d)
xent = tf.reduce_sum(tf.mul(tf.to_float(one_hot_labels), log_softmax), 1)
#weighted_xent = tf.mul(weights, xent)
weighted_xent = tf.mul(tf.minimum(tf.to_float(max_weight), weights), xent)
#weighted_xent = xent
total_loss = - tf.div(tf.reduce_sum(weighted_xent), num_labels, name='value')
return total_loss
def slim_cross_entropy_loss(logits, labels, num_labels):
print('Loss: Cross Entropy Loss')
num_examples = FLAGS.batch_size * FLAGS.img_height * FLAGS.img_width
one_hot_labels = tf.one_hot(tf.to_int64(labels), FLAGS.num_classes, 1, 0)
logits_1d = tf.reshape(logits, [num_examples, FLAGS.num_classes])
xent_loss = slim.losses.cross_entropy_loss(logits_1d, one_hot_labels)
return xent_loss
def softmax(logits):
num_examples = FLAGS.batch_size * FLAGS.img_height * FLAGS.img_width
with tf.op_scope([logits], None, 'Softmax'):
logits_1d = tf.reshape(logits, [num_examples, FLAGS.num_classes])
softmax_1d = tf.nn.softmax(logits_1d)
softmax_2d = tf.reshape(softmax_1d, [FLAGS.img_height, FLAGS.img_width, FLAGS.num_classes])
return softmax_2d
def multiclass_hinge_loss(logits, labels, weights):
print('loss: Hinge loss')
num_examples = FLAGS.batch_size * FLAGS.img_height * FLAGS.img_width
num_classes = FLAGS.num_classes
with tf.op_scope([logits, labels], None, 'MulticlassHingeLoss'):
#logits = tf.reshape(logits, [num_examples, num_classes])
#labels = tf.reshape(labels, [num_examples])
#weights = tf.reshape(weights, [num_examples])
logits = tf.reshape(logits, [-1, num_classes])
labels = tf.reshape(labels, [-1])
weights = tf.reshape(weights, [-1])
select_mask = tf.greater_equal(labels, 0)
logits = tf.boolean_mask(logits, select_mask)
labels = tf.boolean_mask(labels, select_mask)
weights = tf.boolean_mask(weights, select_mask)
num_examples = tf.reduce_sum(tf.to_int32(select_mask))
#num_examples = tf.Print(num_examples, [num_examples, num_labels_old], 'num_examples = ')
#print(labels)
#print(logits)
#print(weights)
#print(select_mask)
partitions = tf.one_hot(tf.to_int64(labels), FLAGS.num_classes, 1, 0, dtype=tf.int32)
#print(partitions)
#one_hot_labels = tf.one_hot(tf.to_int64(labels), FLAGS.num_classes, 1, 0)
#one_hot_labels = tf.reshape(one_hot_labels, [num_examples, FLAGS.num_classes])
#partitions = tf.to_int32(one_hot_labels)
num_partitions = 2
scores, score_yt = tf.dynamic_partition(logits, partitions, num_partitions)
#scores = tf.reshape(scores, [num_examples, num_classes - 1])
#score_yt = tf.reshape(score_yt, [num_examples, 1])
scores = tf.reshape(scores, [-1, num_classes - 1])
score_yt = tf.reshape(score_yt, [-1, 1])
#print(scores)
#print(score_yt)
#hinge_loss = tf.maximum(0.0, scores - score_yt + margin)
hinge_loss = tf.square(tf.maximum(0.0, scores - score_yt + 1.0))
hinge_loss = tf.reduce_sum(hinge_loss, 1)
#total_loss = tf.reduce_sum(tf.mul(weights, hinge_loss))
#total_loss = tf.div(total_loss, tf.to_float(num_examples), name='value')
total_loss = tf.reduce_mean(tf.mul(tf.minimum(100.0, weights), hinge_loss))
#tf.nn.l2_normalize(x, dim, epsilon=1e-12, name=None)
#tf.nn.l2_loss(t, name=None)
return total_loss
def metric_hinge_loss(logits, labels, weights, num_labels):
print('loss: Hinge loss')
num_examples = FLAGS.batch_size * FLAGS.img_height * FLAGS.img_width
with tf.op_scope([logits, labels], None, 'weightedhingeloss'):
one_hot_labels = tf.one_hot(tf.to_int64(labels), FLAGS.num_classes, 1, 0)
logits_1d = tf.reshape(logits, [num_examples, FLAGS.num_classes])
#codes = tf.nn.softmax(logits_1d)
codes = tf.nn.l2_normalize(logits_1d, 1)
# works worse
# l2 loss -> bad!
# todo - this is not true svm loss, try it from cs231n
l2_dist = tf.sqrt(tf.reduce_sum(tf.square(tf.to_float(one_hot_labels) - codes), 1))
m = 0.2
#l2_dist = tf.reduce_sum(tf.square(tf.to_float(one_hot_labels) - codes), 1)
#m = 0.2 ** 2
#m = 0.1 ** 2
#m = 0.3 ** 2
for i in range(num_classes):
for j in range(num_classes):
raise valueerror(1)
hinge_loss = tf.maximum(tf.to_float(0), l2_dist - m)
total_loss = tf.reduce_sum(tf.mul(weights, hinge_loss))
total_loss = tf.div(total_loss, tf.to_float(num_labels), name='value')
tf.add_to_collection(slim.losses.LOSSES_COLLECTION, total_loss)
#tf.nn.l2_normalize(x, dim, epsilon=1e-12, name=None)
#tf.nn.l2_loss(t, name=None)
return total_loss
#def weighted_hinge_loss(logits, labels, weights, num_labels):
# print('Loss: Hinge Loss')
# num_examples = FLAGS.batch_size * FLAGS.img_height * FLAGS.img_width
# with tf.op_scope([logits, labels], None, 'WeightedHingeLoss'):
# weights = tf.reshape(weights, shape=[num_examples])
# labels = tf.reshape(labels, shape=[num_examples])
# num_labels = tf.to_float(tf.reduce_sum(num_labels))
# one_hot_labels = tf.one_hot(tf.to_int64(labels), FLAGS.num_classes, 1, 0)
# one_hot_labels = tf.reshape(one_hot_labels, [num_examples, FLAGS.num_classes])
# logits_1d = tf.reshape(logits, [num_examples, FLAGS.num_classes])
# #codes = tf.nn.softmax(logits_1d)
# codes = tf.nn.l2_normalize(logits_1d, 1)
# # works worse
# #l2_dist = tf.sqrt(tf.reduce_sum(tf.square(tf.to_float(one_hot_labels) - codes), 1))
# #m = 0.2
# l2_dist = tf.reduce_sum(tf.square(tf.to_float(one_hot_labels) - codes), 1)
# m = 0.2 ** 2
# #m = 0.1 ** 2
# #m = 0.3 ** 2
# hinge_loss = tf.maximum(tf.to_float(0), l2_dist - m)
# total_loss = tf.reduce_sum(tf.mul(weights, hinge_loss))
#
# total_loss = tf.div(total_loss, tf.to_float(num_labels), name='value')
# tf.add_to_collection(slim.losses.LOSSES_COLLECTION, total_loss)
#
# #tf.nn.l2_normalize(x, dim, epsilon=1e-12, name=None)
# #tf.nn.l2_loss(t, name=None)
# return total_loss
def flip_xent_loss_symmetric(logits, labels, weights, num_labels):
print('Loss: Weighted Cross Entropy Loss')
num_examples = FLAGS.img_height * FLAGS.img_width
with tf.op_scope([logits, labels], None, 'WeightedCrossEntropyLoss'):
labels = tf.reshape(labels, shape=[2, num_examples])
weights = tf.reshape(weights, shape=[2, num_examples])
num_labels = tf.to_float(tf.reduce_sum(num_labels))
#num_labels = tf.to_float(num_labels[0])
logits_flip = logits[1,:,:,:]
#weights_flip = weights[1,:]
logits = logits[0,:,:,:]
weights = weights[0,:]
labels = labels[0,:]
one_hot_labels = tf.one_hot(tf.to_int64(labels), FLAGS.num_classes, 1, 0)
one_hot_labels = tf.reshape(one_hot_labels, [num_examples, FLAGS.num_classes])
#logits_orig, logits_flip = tf.split(0, 2, logits)
logits_flip = tf.image.flip_left_right(logits_flip)
#print(logits[].get_shape())
logits_1d = tf.reshape(logits, [num_examples, FLAGS.num_classes])
logits_1d_flip = tf.reshape(logits_flip, [num_examples, FLAGS.num_classes])
# TODO
log_softmax = tf.nn.log_softmax(logits_1d)
#log_softmax_flip = tf.nn.log_softmax(logits_1d_flip)
softmax_flip = tf.nn.softmax(logits_1d_flip)
xent = tf.reduce_sum(tf.mul(tf.to_float(one_hot_labels), log_softmax), 1)
weighted_xent = tf.mul(tf.minimum(tf.to_float(100), weights), xent)
xent_flip = tf.reduce_sum(tf.mul(softmax_flip, log_softmax), 1)
xent_flip = tf.mul(tf.minimum(tf.to_float(100), weights), xent_flip)
#weighted_xent = tf.mul(weights, xent)
#weighted_xent = xent
#total_loss = tf.div(- tf.reduce_sum(weighted_xent_flip),
# num_labels, name='value')
total_loss = - tf.div(tf.reduce_sum(weighted_xent) + tf.reduce_sum(xent_flip),
num_labels, name='value')
tf.add_to_collection(slim.losses.LOSSES_COLLECTION, total_loss)
return total_loss
| 42.924312 | 107 | 0.704996 | import tensorflow as tf
import slim
FLAGS = tf.app.flags.FLAGS
def add_loss_summaries(total_loss):
loss_averages = tf.train.ExponentialMovingAverage(0.9, name='avg')
losses = tf.get_collection('losses')
loss_averages_op = loss_averages.apply(losses + [total_loss])
for l in losses + [total_loss]:
tf.summary.scalar(l.op.name + ' (raw)', l)
tf.summary.scalar(l.op.name, loss_averages.average(l))
return loss_averages_op
def total_loss_sum(losses):
regularization_losses = tf.losses.get_regularization_losses()
total_loss = tf.add_n(losses + regularization_losses, name='total_loss')
return total_loss
def cross_entropy_loss(logits, labels):
print('loss: cross-entropy')
num_pixels = -1
with tf.name_scope(None, 'CrossEntropyLoss', [logits, labels]):
labels = tf.reshape(labels, shape=[num_pixels])
logits = tf.reshape(logits, [num_pixels, FLAGS.num_classes])
mask = labels < FLAGS.num_classes
idx = tf.where(mask)
= tf.gather_nd(labels, idx)
labels = tf.to_int32(labels)
logits = tf.gather_nd(logits, idx)
onehot_labels = tf.one_hot(labels, FLAGS.num_classes)
xent = tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=onehot_labels)
= tf.one_hot(labels, FLAGS.num_classes)
logits = tf.reshape(logits, [num_pixels, FLAGS.num_classes])
xent = tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=onehot_labels)
num_labels = tf.reduce_sum(onehot_labels)
sum
weights = tf.multiply(weights, norm_factor)
xent = tf.multiply(weights, xent)
xent = tf.reduce_sum(xent) / num_labels
return xent
def weighted_cross_entropy_loss_dense(logits, labels, weights=None,
num_labels=None, max_weight=100):
print('loss: cross-entropy')
num_pixels = -1
with tf.name_scope(None, 'CrossEntropyLoss', [logits, labels]):
labels = tf.reshape(labels, shape=[num_pixels])
onehot_labels = tf.one_hot(labels, FLAGS.num_classes)
logits = tf.reshape(logits, [num_pixels, FLAGS.num_classes])
xent = tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=onehot_labels)
if num_labels is None:
num_labels = tf.reduce_sum(onehot_labels)
else:
num_labels = tf.reduce_sum(num_labels)
print('Using balanced loss with max weight = ', max_weight)
weights = tf.reshape(weights, shape=[num_pixels])
weights = tf.minimum(tf.to_float(max_weight), weights)
wgt_sum = tf.reduce_sum(weights)
norm_factor = num_labels / wgt_sum
weights = tf.multiply(weights, norm_factor)
xent = tf.multiply(weights, xent)
xent = tf.reduce_sum(xent) / num_labels
print(xent)
return xent
def cross_entropy_loss_old(logits, labels, weights, num_labels):
print('loss: cross-entropy')
num_pixels = -1
with tf.name_scope(None, 'CrossEntropyLoss', [logits, labels, num_labels]):
labels = tf.reshape(labels, shape=[num_pixels])
onehot_labels = tf.one_hot(labels, FLAGS.num_classes)
logits = tf.reshape(logits, [num_pixels, FLAGS.num_classes])
xent = tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=onehot_labels)
weights = tf.reshape(weights, shape=[num_pixels])
xent = tf.multiply(weights, xent)
xent = tf.reduce_sum(xent) / tf.reduce_sum(num_labels)
print(xent)
return xent
def mse(yp, yt):
num_examples = FLAGS.batch_size * FLAGS.img_height * FLAGS.img_width
with tf.name_scope('MeanSquareError'):
yt = tf.reshape(yt, shape=[num_examples])
yp = tf.reshape(yp, shape=[num_examples])
return tf.reduce_mean(tf.square(yt - yp))
def weighted_cross_entropy_loss_deprecated(logits, labels, weights=None, max_weight=100):
print('loss: Weighted Cross Entropy Loss')
shape = labels.get_shape().as_list()
print(shape)
num_examples = -1
with tf.name_scope(None, 'WeightedCrossEntropyLoss', [logits, labels]):
labels = tf.reshape(labels, shape=[num_examples])
one_hot_labels = tf.one_hot(tf.to_int64(labels), FLAGS.num_classes, 1, 0)
one_hot_labels = tf.reshape(one_hot_labels, [num_examples, FLAGS.num_classes])
num_labels = tf.to_float(tf.reduce_sum(one_hot_labels))
logits_1d = tf.reshape(logits, [num_examples, FLAGS.num_classes])
log_softmax = tf.nn.log_softmax(logits_1d)
xent = tf.reduce_sum(-tf.multiply(tf.to_float(one_hot_labels), log_softmax), 1)
if weights != None:
weights = tf.reshape(weights, shape=[num_examples])
xent = tf.mul(tf.minimum(tf.to_float(max_weight), weights), xent)
total_loss = tf.div(tf.reduce_sum(xent), tf.to_float(num_labels), name='value')
print(total_loss)
return total_loss
def flip_xent_loss(logits, labels, weights, max_weight=10):
print('Loss: Weighted Cross Entropy Loss')
assert(FLAGS.batch_size == 2)
num_examples = FLAGS.batch_size * FLAGS.img_height * FLAGS.img_width
labels = tf.reshape(labels, shape=[num_examples])
weights = tf.reshape(weights, shape=[num_examples])
with tf.name_scope('FlipXentLoss', [logits, labels]):
one_hot_labels = tf.one_hot(tf.to_int64(labels), FLAGS.num_classes, 1, 0)
one_hot_labels = tf.reshape(one_hot_labels, [num_examples, FLAGS.num_classes])
num_labels = tf.to_float(tf.reduce_sum(one_hot_labels))
logits_1d = tf.reshape(logits, [num_examples, FLAGS.num_classes])
log_softmax = tf.nn.log_softmax(logits_1d)
xent = tf.reduce_sum(tf.mul(tf.to_float(one_hot_labels), log_softmax), 1)
weighted_xent = tf.mul(tf.minimum(tf.to_float(max_weight), weights), xent)
total_loss = - tf.div(tf.reduce_sum(weighted_xent), num_labels, name='value')
return total_loss
def slim_cross_entropy_loss(logits, labels, num_labels):
print('Loss: Cross Entropy Loss')
num_examples = FLAGS.batch_size * FLAGS.img_height * FLAGS.img_width
one_hot_labels = tf.one_hot(tf.to_int64(labels), FLAGS.num_classes, 1, 0)
logits_1d = tf.reshape(logits, [num_examples, FLAGS.num_classes])
xent_loss = slim.losses.cross_entropy_loss(logits_1d, one_hot_labels)
return xent_loss
def softmax(logits):
num_examples = FLAGS.batch_size * FLAGS.img_height * FLAGS.img_width
with tf.op_scope([logits], None, 'Softmax'):
logits_1d = tf.reshape(logits, [num_examples, FLAGS.num_classes])
softmax_1d = tf.nn.softmax(logits_1d)
softmax_2d = tf.reshape(softmax_1d, [FLAGS.img_height, FLAGS.img_width, FLAGS.num_classes])
return softmax_2d
def multiclass_hinge_loss(logits, labels, weights):
print('loss: Hinge loss')
num_examples = FLAGS.batch_size * FLAGS.img_height * FLAGS.img_width
num_classes = FLAGS.num_classes
with tf.op_scope([logits, labels], None, 'MulticlassHingeLoss'):
logits = tf.reshape(logits, [-1, num_classes])
labels = tf.reshape(labels, [-1])
weights = tf.reshape(weights, [-1])
select_mask = tf.greater_equal(labels, 0)
logits = tf.boolean_mask(logits, select_mask)
labels = tf.boolean_mask(labels, select_mask)
weights = tf.boolean_mask(weights, select_mask)
num_examples = tf.reduce_sum(tf.to_int32(select_mask))
partitions = tf.one_hot(tf.to_int64(labels), FLAGS.num_classes, 1, 0, dtype=tf.int32)
num_partitions = 2
scores, score_yt = tf.dynamic_partition(logits, partitions, num_partitions)
scores = tf.reshape(scores, [-1, num_classes - 1])
score_yt = tf.reshape(score_yt, [-1, 1])
hinge_loss = tf.square(tf.maximum(0.0, scores - score_yt + 1.0))
hinge_loss = tf.reduce_sum(hinge_loss, 1)
total_loss = tf.reduce_mean(tf.mul(tf.minimum(100.0, weights), hinge_loss))
return total_loss
def metric_hinge_loss(logits, labels, weights, num_labels):
print('loss: Hinge loss')
num_examples = FLAGS.batch_size * FLAGS.img_height * FLAGS.img_width
with tf.op_scope([logits, labels], None, 'weightedhingeloss'):
one_hot_labels = tf.one_hot(tf.to_int64(labels), FLAGS.num_classes, 1, 0)
logits_1d = tf.reshape(logits, [num_examples, FLAGS.num_classes])
codes = tf.nn.l2_normalize(logits_1d, 1)
l2_dist = tf.sqrt(tf.reduce_sum(tf.square(tf.to_float(one_hot_labels) - codes), 1))
m = 0.2
for i in range(num_classes):
for j in range(num_classes):
raise valueerror(1)
hinge_loss = tf.maximum(tf.to_float(0), l2_dist - m)
total_loss = tf.reduce_sum(tf.mul(weights, hinge_loss))
total_loss = tf.div(total_loss, tf.to_float(num_labels), name='value')
tf.add_to_collection(slim.losses.LOSSES_COLLECTION, total_loss)
return total_loss
pyLoss'):
labels = tf.reshape(labels, shape=[2, num_examples])
weights = tf.reshape(weights, shape=[2, num_examples])
num_labels = tf.to_float(tf.reduce_sum(num_labels))
logits_flip = logits[1,:,:,:]
logits = logits[0,:,:,:]
weights = weights[0,:]
labels = labels[0,:]
one_hot_labels = tf.one_hot(tf.to_int64(labels), FLAGS.num_classes, 1, 0)
one_hot_labels = tf.reshape(one_hot_labels, [num_examples, FLAGS.num_classes])
logits_flip = tf.image.flip_left_right(logits_flip)
logits_1d = tf.reshape(logits, [num_examples, FLAGS.num_classes])
logits_1d_flip = tf.reshape(logits_flip, [num_examples, FLAGS.num_classes])
log_softmax = tf.nn.log_softmax(logits_1d)
softmax_flip = tf.nn.softmax(logits_1d_flip)
xent = tf.reduce_sum(tf.mul(tf.to_float(one_hot_labels), log_softmax), 1)
weighted_xent = tf.mul(tf.minimum(tf.to_float(100), weights), xent)
xent_flip = tf.reduce_sum(tf.mul(softmax_flip, log_softmax), 1)
xent_flip = tf.mul(tf.minimum(tf.to_float(100), weights), xent_flip)
total_loss = - tf.div(tf.reduce_sum(weighted_xent) + tf.reduce_sum(xent_flip),
num_labels, name='value')
tf.add_to_collection(slim.losses.LOSSES_COLLECTION, total_loss)
return total_loss
| true | true |
f71b599c49ef3382050c2d01eff0c192906c1d7b | 1,580 | py | Python | aiLogic/tankAI.py | JoelEager/pyTanks.Player | a35a653e9df2416c63204aba87a95f33e6815b63 | [
"MIT"
] | 2 | 2017-03-09T15:32:55.000Z | 2017-09-04T11:25:41.000Z | aiLogic/tankAI.py | JoelEager/pyTanks.Player | a35a653e9df2416c63204aba87a95f33e6815b63 | [
"MIT"
] | null | null | null | aiLogic/tankAI.py | JoelEager/pyTanks.Player | a35a653e9df2416c63204aba87a95f33e6815b63 | [
"MIT"
] | 4 | 2017-05-16T15:10:09.000Z | 2017-07-06T15:24:50.000Z | """
The player's AI code
Functions here are called by clock.py to run the AI code
"""
import random
import math
from clientLogic.logging import logPrint
from clientLogic import clientData, commands
def onConnect():
"""
Called when the player initially connects to the server but before the tank first spawns
"""
commands.setInfo("Python player instance running the example AI.\n" +
"Fork me at https://github.com/JoelEager/pyTanks.Player")
def onSpawn():
"""
Called when the tank spawns in a new game
"""
pass
def onTick(elapsedTime):
"""
Called once every frame while the tank is alive
:param elapsedTime: The time elapsed, in seconds, since the last frame
"""
gs = clientData.gameState
# Collided so try to get moving again
if not gs.myTank.moving:
commands.turn((math.pi / 4) * random.randint(0, 7))
commands.go()
logPrint("Turned and starting moving", 2)
# Shooting logic
if gs.myTank.canShoot and random.randint(0, 4) == 0:
# Select a target
random.shuffle(gs.tanks)
for target in gs.tanks:
if target.alive:
# Do the math
deltaX = abs(gs.myTank.x - target.x)
if deltaX == 0: return
deltaY = gs.myTank.y - target.y
angle = math.atan(deltaY / deltaX)
if target.x < gs.myTank.x:
angle = math.pi - angle
commands.fire(angle)
logPrint("Fired", 2)
break
| 27.719298 | 92 | 0.588608 |
import random
import math
from clientLogic.logging import logPrint
from clientLogic import clientData, commands
def onConnect():
commands.setInfo("Python player instance running the example AI.\n" +
"Fork me at https://github.com/JoelEager/pyTanks.Player")
def onSpawn():
pass
def onTick(elapsedTime):
gs = clientData.gameState
if not gs.myTank.moving:
commands.turn((math.pi / 4) * random.randint(0, 7))
commands.go()
logPrint("Turned and starting moving", 2)
if gs.myTank.canShoot and random.randint(0, 4) == 0:
random.shuffle(gs.tanks)
for target in gs.tanks:
if target.alive:
deltaX = abs(gs.myTank.x - target.x)
if deltaX == 0: return
deltaY = gs.myTank.y - target.y
angle = math.atan(deltaY / deltaX)
if target.x < gs.myTank.x:
angle = math.pi - angle
commands.fire(angle)
logPrint("Fired", 2)
break
| true | true |
f71b5a856153c3564ce7371764011afc06ba93ae | 12,294 | py | Python | tests/unit/common_utils.py | dannielarriola/uai-coursebuilder | fbd440a8bfe1a928ac52985aea2949d5e91ad203 | [
"Apache-2.0"
] | null | null | null | tests/unit/common_utils.py | dannielarriola/uai-coursebuilder | fbd440a8bfe1a928ac52985aea2949d5e91ad203 | [
"Apache-2.0"
] | 27 | 2016-08-31T19:04:46.000Z | 2016-09-29T00:22:32.000Z | tests/unit/common_utils.py | dannielarriola/uai-coursebuilder | fbd440a8bfe1a928ac52985aea2949d5e91ad203 | [
"Apache-2.0"
] | null | null | null | # Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests for common.tags."""
__author__ = 'Mike Gainer (mgainer@google.com)'
import datetime
import os
import unittest
import appengine_config
from common import utils
class CommonUnitTests(unittest.TestCase):
# --------------------------- String-to-list.
def test_list_parsing(self):
self.assertListEqual(['foo'], utils.text_to_list('foo'))
self.assertListEqual(['foo'], utils.text_to_list(' foo'))
self.assertListEqual(['foo'], utils.text_to_list('foo '))
self.assertListEqual(['foo'], utils.text_to_list(' foo '))
self.assertListEqual(['foo'], utils.text_to_list('foo\t'))
self.assertListEqual(['foo'], utils.text_to_list('\tfoo'))
self.assertListEqual(['foo'], utils.text_to_list('\tfoo\t'))
self.assertListEqual(['foo'], utils.text_to_list('foo '))
self.assertListEqual(['foo'], utils.text_to_list(' foo'))
self.assertListEqual(['foo'], utils.text_to_list(' foo '))
self.assertListEqual(['foo'], utils.text_to_list('foo\n'))
self.assertListEqual(['foo'], utils.text_to_list('\nfoo'))
self.assertListEqual(['foo'], utils.text_to_list('\nfoo\n'))
self.assertListEqual(['foo'], utils.text_to_list('foo,'))
self.assertListEqual(['foo'], utils.text_to_list(',foo'))
self.assertListEqual(['foo'], utils.text_to_list(',foo,'))
self.assertListEqual(['foo'], utils.text_to_list(' foo ,\n'))
self.assertListEqual(['foo'], utils.text_to_list('\tfoo,\t\n'))
self.assertListEqual(['foo'], utils.text_to_list(',foo,\n'))
self.assertListEqual(['foo'],
utils.text_to_list(
'[foo]',
utils.BACKWARD_COMPATIBLE_SPLITTER))
self.assertListEqual(['foo'],
utils.text_to_list(
'[foo],',
utils.BACKWARD_COMPATIBLE_SPLITTER))
self.assertListEqual(['foo'],
utils.text_to_list(
'[foo], ', utils.BACKWARD_COMPATIBLE_SPLITTER))
self.assertListEqual(['foo'],
utils.text_to_list(
'[foo],\n',
utils.BACKWARD_COMPATIBLE_SPLITTER))
self.assertListEqual(['foo'],
utils.text_to_list(
'[foo], \n',
utils.BACKWARD_COMPATIBLE_SPLITTER))
self.assertListEqual(['foo', 'bar'],
utils.text_to_list('foo bar'))
self.assertListEqual(['foo', 'bar'],
utils.text_to_list(' foo bar'))
self.assertListEqual(['foo', 'bar'],
utils.text_to_list('foo bar '))
self.assertListEqual(['foo', 'bar'],
utils.text_to_list('foo\tbar'))
self.assertListEqual(['foo', 'bar'],
utils.text_to_list('\tfoo\tbar'))
self.assertListEqual(['foo', 'bar'],
utils.text_to_list('foo\tbar\t'))
self.assertListEqual(['foo', 'bar'],
utils.text_to_list('foo\nbar\n'))
self.assertListEqual(['foo', 'bar'],
utils.text_to_list('\nfoo\nbar\n'))
self.assertListEqual(['foo', 'bar'],
utils.text_to_list('\n foo\n bar\n'))
self.assertListEqual(['foo', 'bar'],
utils.text_to_list(' \n foo \n bar \n'))
self.assertListEqual(['foo', 'bar'],
utils.text_to_list(
'[foo][bar]',
utils.BACKWARD_COMPATIBLE_SPLITTER))
self.assertListEqual(['foo', 'bar'],
utils.text_to_list(
' [foo] [bar] ',
utils.BACKWARD_COMPATIBLE_SPLITTER))
self.assertListEqual(['foo', 'bar'],
utils.text_to_list(
'\n[foo]\n[bar]\n',
utils.BACKWARD_COMPATIBLE_SPLITTER))
self.assertListEqual(['foo', 'bar'],
utils.text_to_list(
'\n,[foo],\n[bar],\n',
utils.BACKWARD_COMPATIBLE_SPLITTER))
def test_none_split(self):
self.assertListEqual([], utils.text_to_list(None))
def test_empty_split(self):
self.assertListEqual([], utils.text_to_list(''))
def test_all_separators_split(self):
self.assertListEqual([], utils.text_to_list(' ,,, \t\t\n\t '))
def test_one_item_split(self):
self.assertListEqual(['x'], utils.text_to_list('x'))
def test_join_none(self):
self.assertEquals('', utils.list_to_text(None))
def test_join_empty(self):
self.assertEquals('', utils.list_to_text([]))
def test_join_one(self):
self.assertEquals('x', utils.list_to_text(['x']))
def test_join_two(self):
self.assertEquals('x y', utils.list_to_text(['x', 'y']))
def test_join_split(self):
l = ['a', 'b', 'c']
self.assertListEqual(l, utils.text_to_list(utils.list_to_text(l)))
def test_split_join(self):
text = 'a b c'
self.assertEquals(text, utils.list_to_text(utils.text_to_list(text)))
class ZipAwareOpenTests(unittest.TestCase):
def test_find_in_lib_without_relative_path(self):
path = os.path.join(
appengine_config.BUNDLE_ROOT, 'lib', 'babel-0.9.6.zip',
'babel', 'localedata', 'root.dat')
with self.assertRaises(IOError):
open(path) # This fails.
with utils.ZipAwareOpen():
data = open(path).read()
self.assertEquals(17490, len(data))
data = open(path, 'r').read()
self.assertEquals(17490, len(data))
data = open(path, mode='r').read()
self.assertEquals(17490, len(data))
data = open(name=path, mode='r').read()
self.assertEquals(17490, len(data))
data = open(name=path).read()
self.assertEquals(17490, len(data))
with self.assertRaises(IOError):
open(path) # This fails again; open has been reset to normal.
def test_find_in_lib_with_relative_path(self):
path = os.path.join(
appengine_config.BUNDLE_ROOT, 'lib', 'markdown-2.5.zip',
'setup.cfg')
with self.assertRaises(IOError):
open(path) # This fails.
with utils.ZipAwareOpen():
data = open(path).read()
self.assertEquals(12, len(data))
class ParseTimedeltaTests(unittest.TestCase):
def test_parse_empty_string(self):
self.assertEquals(
utils.parse_timedelta_string(''),
datetime.timedelta())
def test_parse_zero(self):
self.assertEquals(
utils.parse_timedelta_string('0'),
datetime.timedelta())
def test_parse_gibberish(self):
self.assertEquals(
utils.parse_timedelta_string('Amidst the mists and coldest frosts'),
datetime.timedelta())
def test_parse_leading_valid_partial_gibberish(self):
self.assertEquals(
utils.parse_timedelta_string(
'5 days and a partridge in a pear tree'),
datetime.timedelta(days=5))
def test_parse_trailing_valid_partial_gibberish(self):
self.assertEquals(
utils.parse_timedelta_string('we will leave in 5 days'),
datetime.timedelta(days=5))
def test_parse_units(self):
for unit in ('week', 'day', 'hour', 'minute', 'second'):
self._test_parse_units(unit)
def _test_parse_units(self, unit):
expected1 = datetime.timedelta(**{unit + 's': 1})
expected2 = datetime.timedelta(**{unit + 's': 2})
self.assertEquals(
utils.parse_timedelta_string('1%s' % unit[0]), expected1)
self.assertEquals(
utils.parse_timedelta_string('1%s' % unit), expected1)
self.assertEquals(
utils.parse_timedelta_string('2%ss' % unit), expected2)
self.assertEquals(
utils.parse_timedelta_string('2 %s' % unit[0]), expected2)
self.assertEquals(
utils.parse_timedelta_string('1 %s' % unit), expected1)
self.assertEquals(
utils.parse_timedelta_string('2 %s' % unit), expected2)
self.assertEquals(
utils.parse_timedelta_string('2 \t\t\n %ss' % unit), expected2)
def test_parse_out_of_bounds_handled_successfully(self):
self.assertEquals(
utils.parse_timedelta_string('86400s'),
datetime.timedelta(days=1))
self.assertEquals(
utils.parse_timedelta_string('19d, 86400s'),
datetime.timedelta(weeks=2, days=6))
def test_parse_combinations(self):
self.assertEquals(
utils.parse_timedelta_string('3w1d3m'),
datetime.timedelta(weeks=3, days=1, minutes=3))
self.assertEquals(
utils.parse_timedelta_string('3w, 1d, 3m'),
datetime.timedelta(weeks=3, days=1, minutes=3))
self.assertEquals(
utils.parse_timedelta_string('3 w 1 d 3 m'),
datetime.timedelta(weeks=3, days=1, minutes=3))
self.assertEquals(
utils.parse_timedelta_string('3 weeks 1 day 3 minutes'),
datetime.timedelta(weeks=3, days=1, minutes=3))
self.assertEquals(
utils.parse_timedelta_string('3 weeks, 1 day, 3 minutes'),
datetime.timedelta(weeks=3, days=1, minutes=3))
class ValidateTimedeltaTests(unittest.TestCase):
def test_blank_is_allowed(self):
errors = []
utils.ValidateTimedelta.validate('', errors)
self.assertEquals(0, len(errors))
def test_none_is_allowed(self):
errors = []
utils.ValidateTimedelta.validate(None, errors)
self.assertEquals(0, len(errors))
def test_bare_numbers_not_allowed(self):
errors = []
utils.ValidateTimedelta.validate('0', errors)
self.assertEquals(1, len(errors))
errors = []
utils.ValidateTimedelta.validate('1', errors)
self.assertEquals(1, len(errors))
errors = []
utils.ValidateTimedelta.validate('-1', errors)
self.assertEquals(1, len(errors))
errors = []
utils.ValidateTimedelta.validate('100', errors)
self.assertEquals(1, len(errors))
def test_valid_items_allowed(self):
errors = []
utils.ValidateTimedelta.validate('1s', errors)
utils.ValidateTimedelta.validate('2m', errors)
utils.ValidateTimedelta.validate('3h', errors)
utils.ValidateTimedelta.validate('4d', errors)
utils.ValidateTimedelta.validate('5w', errors)
utils.ValidateTimedelta.validate('5 Weeks, 1D,2HOURS 3 seconds',
errors)
self.assertEquals(0, len(errors))
def test_invalid_items_disallowed(self):
errors = []
utils.ValidateTimedelta.validate('1t', errors)
self.assertEquals(1, len(errors))
errors = []
utils.ValidateTimedelta.validate('1 year', errors)
self.assertEquals(1, len(errors))
def test_parse_months_gives_error(self):
errors = []
utils.ValidateTimedelta.validate('3 months', errors)
self.assertEquals(1, len(errors))
| 38.299065 | 80 | 0.577843 |
__author__ = 'Mike Gainer (mgainer@google.com)'
import datetime
import os
import unittest
import appengine_config
from common import utils
class CommonUnitTests(unittest.TestCase):
def test_list_parsing(self):
self.assertListEqual(['foo'], utils.text_to_list('foo'))
self.assertListEqual(['foo'], utils.text_to_list(' foo'))
self.assertListEqual(['foo'], utils.text_to_list('foo '))
self.assertListEqual(['foo'], utils.text_to_list(' foo '))
self.assertListEqual(['foo'], utils.text_to_list('foo\t'))
self.assertListEqual(['foo'], utils.text_to_list('\tfoo'))
self.assertListEqual(['foo'], utils.text_to_list('\tfoo\t'))
self.assertListEqual(['foo'], utils.text_to_list('foo '))
self.assertListEqual(['foo'], utils.text_to_list(' foo'))
self.assertListEqual(['foo'], utils.text_to_list(' foo '))
self.assertListEqual(['foo'], utils.text_to_list('foo\n'))
self.assertListEqual(['foo'], utils.text_to_list('\nfoo'))
self.assertListEqual(['foo'], utils.text_to_list('\nfoo\n'))
self.assertListEqual(['foo'], utils.text_to_list('foo,'))
self.assertListEqual(['foo'], utils.text_to_list(',foo'))
self.assertListEqual(['foo'], utils.text_to_list(',foo,'))
self.assertListEqual(['foo'], utils.text_to_list(' foo ,\n'))
self.assertListEqual(['foo'], utils.text_to_list('\tfoo,\t\n'))
self.assertListEqual(['foo'], utils.text_to_list(',foo,\n'))
self.assertListEqual(['foo'],
utils.text_to_list(
'[foo]',
utils.BACKWARD_COMPATIBLE_SPLITTER))
self.assertListEqual(['foo'],
utils.text_to_list(
'[foo],',
utils.BACKWARD_COMPATIBLE_SPLITTER))
self.assertListEqual(['foo'],
utils.text_to_list(
'[foo], ', utils.BACKWARD_COMPATIBLE_SPLITTER))
self.assertListEqual(['foo'],
utils.text_to_list(
'[foo],\n',
utils.BACKWARD_COMPATIBLE_SPLITTER))
self.assertListEqual(['foo'],
utils.text_to_list(
'[foo], \n',
utils.BACKWARD_COMPATIBLE_SPLITTER))
self.assertListEqual(['foo', 'bar'],
utils.text_to_list('foo bar'))
self.assertListEqual(['foo', 'bar'],
utils.text_to_list(' foo bar'))
self.assertListEqual(['foo', 'bar'],
utils.text_to_list('foo bar '))
self.assertListEqual(['foo', 'bar'],
utils.text_to_list('foo\tbar'))
self.assertListEqual(['foo', 'bar'],
utils.text_to_list('\tfoo\tbar'))
self.assertListEqual(['foo', 'bar'],
utils.text_to_list('foo\tbar\t'))
self.assertListEqual(['foo', 'bar'],
utils.text_to_list('foo\nbar\n'))
self.assertListEqual(['foo', 'bar'],
utils.text_to_list('\nfoo\nbar\n'))
self.assertListEqual(['foo', 'bar'],
utils.text_to_list('\n foo\n bar\n'))
self.assertListEqual(['foo', 'bar'],
utils.text_to_list(' \n foo \n bar \n'))
self.assertListEqual(['foo', 'bar'],
utils.text_to_list(
'[foo][bar]',
utils.BACKWARD_COMPATIBLE_SPLITTER))
self.assertListEqual(['foo', 'bar'],
utils.text_to_list(
' [foo] [bar] ',
utils.BACKWARD_COMPATIBLE_SPLITTER))
self.assertListEqual(['foo', 'bar'],
utils.text_to_list(
'\n[foo]\n[bar]\n',
utils.BACKWARD_COMPATIBLE_SPLITTER))
self.assertListEqual(['foo', 'bar'],
utils.text_to_list(
'\n,[foo],\n[bar],\n',
utils.BACKWARD_COMPATIBLE_SPLITTER))
def test_none_split(self):
self.assertListEqual([], utils.text_to_list(None))
def test_empty_split(self):
self.assertListEqual([], utils.text_to_list(''))
def test_all_separators_split(self):
self.assertListEqual([], utils.text_to_list(' ,,, \t\t\n\t '))
def test_one_item_split(self):
self.assertListEqual(['x'], utils.text_to_list('x'))
def test_join_none(self):
self.assertEquals('', utils.list_to_text(None))
def test_join_empty(self):
self.assertEquals('', utils.list_to_text([]))
def test_join_one(self):
self.assertEquals('x', utils.list_to_text(['x']))
def test_join_two(self):
self.assertEquals('x y', utils.list_to_text(['x', 'y']))
def test_join_split(self):
l = ['a', 'b', 'c']
self.assertListEqual(l, utils.text_to_list(utils.list_to_text(l)))
def test_split_join(self):
text = 'a b c'
self.assertEquals(text, utils.list_to_text(utils.text_to_list(text)))
class ZipAwareOpenTests(unittest.TestCase):
def test_find_in_lib_without_relative_path(self):
path = os.path.join(
appengine_config.BUNDLE_ROOT, 'lib', 'babel-0.9.6.zip',
'babel', 'localedata', 'root.dat')
with self.assertRaises(IOError):
open(path)
with utils.ZipAwareOpen():
data = open(path).read()
self.assertEquals(17490, len(data))
data = open(path, 'r').read()
self.assertEquals(17490, len(data))
data = open(path, mode='r').read()
self.assertEquals(17490, len(data))
data = open(name=path, mode='r').read()
self.assertEquals(17490, len(data))
data = open(name=path).read()
self.assertEquals(17490, len(data))
with self.assertRaises(IOError):
open(path)
def test_find_in_lib_with_relative_path(self):
path = os.path.join(
appengine_config.BUNDLE_ROOT, 'lib', 'markdown-2.5.zip',
'setup.cfg')
with self.assertRaises(IOError):
open(path)
with utils.ZipAwareOpen():
data = open(path).read()
self.assertEquals(12, len(data))
class ParseTimedeltaTests(unittest.TestCase):
def test_parse_empty_string(self):
self.assertEquals(
utils.parse_timedelta_string(''),
datetime.timedelta())
def test_parse_zero(self):
self.assertEquals(
utils.parse_timedelta_string('0'),
datetime.timedelta())
def test_parse_gibberish(self):
self.assertEquals(
utils.parse_timedelta_string('Amidst the mists and coldest frosts'),
datetime.timedelta())
def test_parse_leading_valid_partial_gibberish(self):
self.assertEquals(
utils.parse_timedelta_string(
'5 days and a partridge in a pear tree'),
datetime.timedelta(days=5))
def test_parse_trailing_valid_partial_gibberish(self):
self.assertEquals(
utils.parse_timedelta_string('we will leave in 5 days'),
datetime.timedelta(days=5))
def test_parse_units(self):
for unit in ('week', 'day', 'hour', 'minute', 'second'):
self._test_parse_units(unit)
def _test_parse_units(self, unit):
expected1 = datetime.timedelta(**{unit + 's': 1})
expected2 = datetime.timedelta(**{unit + 's': 2})
self.assertEquals(
utils.parse_timedelta_string('1%s' % unit[0]), expected1)
self.assertEquals(
utils.parse_timedelta_string('1%s' % unit), expected1)
self.assertEquals(
utils.parse_timedelta_string('2%ss' % unit), expected2)
self.assertEquals(
utils.parse_timedelta_string('2 %s' % unit[0]), expected2)
self.assertEquals(
utils.parse_timedelta_string('1 %s' % unit), expected1)
self.assertEquals(
utils.parse_timedelta_string('2 %s' % unit), expected2)
self.assertEquals(
utils.parse_timedelta_string('2 \t\t\n %ss' % unit), expected2)
def test_parse_out_of_bounds_handled_successfully(self):
self.assertEquals(
utils.parse_timedelta_string('86400s'),
datetime.timedelta(days=1))
self.assertEquals(
utils.parse_timedelta_string('19d, 86400s'),
datetime.timedelta(weeks=2, days=6))
def test_parse_combinations(self):
self.assertEquals(
utils.parse_timedelta_string('3w1d3m'),
datetime.timedelta(weeks=3, days=1, minutes=3))
self.assertEquals(
utils.parse_timedelta_string('3w, 1d, 3m'),
datetime.timedelta(weeks=3, days=1, minutes=3))
self.assertEquals(
utils.parse_timedelta_string('3 w 1 d 3 m'),
datetime.timedelta(weeks=3, days=1, minutes=3))
self.assertEquals(
utils.parse_timedelta_string('3 weeks 1 day 3 minutes'),
datetime.timedelta(weeks=3, days=1, minutes=3))
self.assertEquals(
utils.parse_timedelta_string('3 weeks, 1 day, 3 minutes'),
datetime.timedelta(weeks=3, days=1, minutes=3))
class ValidateTimedeltaTests(unittest.TestCase):
def test_blank_is_allowed(self):
errors = []
utils.ValidateTimedelta.validate('', errors)
self.assertEquals(0, len(errors))
def test_none_is_allowed(self):
errors = []
utils.ValidateTimedelta.validate(None, errors)
self.assertEquals(0, len(errors))
def test_bare_numbers_not_allowed(self):
errors = []
utils.ValidateTimedelta.validate('0', errors)
self.assertEquals(1, len(errors))
errors = []
utils.ValidateTimedelta.validate('1', errors)
self.assertEquals(1, len(errors))
errors = []
utils.ValidateTimedelta.validate('-1', errors)
self.assertEquals(1, len(errors))
errors = []
utils.ValidateTimedelta.validate('100', errors)
self.assertEquals(1, len(errors))
def test_valid_items_allowed(self):
errors = []
utils.ValidateTimedelta.validate('1s', errors)
utils.ValidateTimedelta.validate('2m', errors)
utils.ValidateTimedelta.validate('3h', errors)
utils.ValidateTimedelta.validate('4d', errors)
utils.ValidateTimedelta.validate('5w', errors)
utils.ValidateTimedelta.validate('5 Weeks, 1D,2HOURS 3 seconds',
errors)
self.assertEquals(0, len(errors))
def test_invalid_items_disallowed(self):
errors = []
utils.ValidateTimedelta.validate('1t', errors)
self.assertEquals(1, len(errors))
errors = []
utils.ValidateTimedelta.validate('1 year', errors)
self.assertEquals(1, len(errors))
def test_parse_months_gives_error(self):
errors = []
utils.ValidateTimedelta.validate('3 months', errors)
self.assertEquals(1, len(errors))
| true | true |
f71b5b1995d1ecc2a1ba880156e9343a02f0c212 | 578 | py | Python | bark_ml/commons/tracer.py | bark-simulator/rl | 84f9c74b60becbc4bc758e19b201d85a21880717 | [
"MIT"
] | null | null | null | bark_ml/commons/tracer.py | bark-simulator/rl | 84f9c74b60becbc4bc758e19b201d85a21880717 | [
"MIT"
] | null | null | null | bark_ml/commons/tracer.py | bark-simulator/rl | 84f9c74b60becbc4bc758e19b201d85a21880717 | [
"MIT"
] | null | null | null | # Copyright (c) 2020 fortiss GmbH
#
# Authors: Patrick Hart
#
# This work is licensed under the terms of the MIT license.
# For a copy, see <https://opensource.org/licenses/MIT>.
import pickle
class Tracer:
"""The tracer can be used to log certain values during episodes."""
def __init__(self, states=None, trace_history=True):
self._trace_history = trace_history
self._states = []
def Trace(self, eval_dict):
"""Traces and stores a state."""
if self._trace_history:
self._states.append(eval_dict)
def Reset(self):
self._trace_history = [] | 25.130435 | 69 | 0.698962 |
import pickle
class Tracer:
def __init__(self, states=None, trace_history=True):
self._trace_history = trace_history
self._states = []
def Trace(self, eval_dict):
if self._trace_history:
self._states.append(eval_dict)
def Reset(self):
self._trace_history = [] | true | true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.