hexsha stringlengths 40 40 | size int64 2 1.02M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 245 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 245 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 245 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 2 1.02M | avg_line_length float64 1 417k | max_line_length int64 1 987k | alphanum_fraction float64 0 1 | content_no_comment stringlengths 0 1.01M | is_comment_constant_removed bool 1
class | is_sharp_comment_removed bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f7112440105b7f7dbd74bf0e92d94f8666cfac68 | 5,020 | py | Python | third_party/blink/tools/blinkpy/style/checkers/jsonchecker_unittest.py | zipated/src | 2b8388091c71e442910a21ada3d97ae8bc1845d3 | [
"BSD-3-Clause"
] | 2,151 | 2020-04-18T07:31:17.000Z | 2022-03-31T08:39:18.000Z | third_party/blink/tools/blinkpy/style/checkers/jsonchecker_unittest.py | cangulcan/src | 2b8388091c71e442910a21ada3d97ae8bc1845d3 | [
"BSD-3-Clause"
] | 395 | 2020-04-18T08:22:18.000Z | 2021-12-08T13:04:49.000Z | third_party/blink/tools/blinkpy/style/checkers/jsonchecker_unittest.py | cangulcan/src | 2b8388091c71e442910a21ada3d97ae8bc1845d3 | [
"BSD-3-Clause"
] | 338 | 2020-04-18T08:03:10.000Z | 2022-03-29T12:33:22.000Z | # Copyright (C) 2010 Apple Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unit test for jsonchecker.py."""
import unittest
from blinkpy.style.checkers import jsonchecker
class MockErrorHandler(object):
def __init__(self, handle_style_error):
self.turned_off_filtering = False
self._handle_style_error = handle_style_error
def turn_off_line_filtering(self):
self.turned_off_filtering = True
def __call__(self, line_number, category, confidence, message):
self._handle_style_error(self, line_number, category, confidence, message)
return True
class JSONCheckerTest(unittest.TestCase):
"""Tests JSONChecker class."""
def test_line_number_from_json_exception(self):
tests = (
(0, 'No JSON object could be decoded'),
(2, 'Expecting property name: line 2 column 1 (char 2)'),
(3, 'Expecting object: line 3 column 1 (char 15)'),
(9, 'Expecting property name: line 9 column 21 (char 478)'),
)
for expected_line, message in tests:
self.assertEqual(expected_line, jsonchecker.JSONChecker.line_number_from_json_exception(ValueError(message)))
def assert_no_error(self, json_data):
def handle_style_error(mock_error_handler, line_number, category, confidence, message):
self.fail('Unexpected error: %d %s %d %s' % (line_number, category, confidence, message))
error_handler = MockErrorHandler(handle_style_error)
checker = jsonchecker.JSONChecker('foo.json', error_handler)
checker.check(json_data.split('\n'))
self.assertTrue(error_handler.turned_off_filtering)
def assert_error(self, expected_line_number, expected_category, json_data):
def handle_style_error(mock_error_handler, line_number, category, confidence, message):
mock_error_handler.had_error = True
self.assertEqual(expected_line_number, line_number)
self.assertEqual(expected_category, category)
self.assertIn(category, jsonchecker.JSONChecker.categories)
error_handler = MockErrorHandler(handle_style_error)
error_handler.had_error = False
checker = jsonchecker.JSONChecker('foo.json', error_handler)
checker.check(json_data.split('\n'))
self.assertTrue(error_handler.had_error)
self.assertTrue(error_handler.turned_off_filtering)
def mock_handle_style_error(self):
pass
def test_conflict_marker(self):
self.assert_error(0, 'json/syntax', '<<<<<<< HEAD\n{\n}\n')
def test_single_quote(self):
self.assert_error(2, 'json/syntax', "{\n'slaves': []\n}\n")
def test_init(self):
error_handler = MockErrorHandler(self.mock_handle_style_error)
checker = jsonchecker.JSONChecker('foo.json', error_handler)
self.assertEqual(checker._handle_style_error, error_handler)
def test_no_error(self):
self.assert_no_error("""{
"slaves": [ { "name": "test-slave", "platform": "*" },
{ "name": "apple-xserve-4", "platform": "mac-snowleopard" }
],
"builders": [ { "name": "SnowLeopard Intel Release (Build)", "type": "Build", "builddir": "snowleopard-intel-release",
"platform": "mac-snowleopard", "configuration": "release", "architectures": ["x86_64"],
"slavenames": ["apple-xserve-4"]
}
],
"schedulers": [ { "type": "PlatformSpecificScheduler", "platform": "mac-snowleopard", "branch": "trunk", "treeStableTimer": 45.0,
"builderNames": ["SnowLeopard Intel Release (Build)", "SnowLeopard Intel Debug (Build)"]
}
]
}
""")
| 44.424779 | 133 | 0.684462 |
import unittest
from blinkpy.style.checkers import jsonchecker
class MockErrorHandler(object):
def __init__(self, handle_style_error):
self.turned_off_filtering = False
self._handle_style_error = handle_style_error
def turn_off_line_filtering(self):
self.turned_off_filtering = True
def __call__(self, line_number, category, confidence, message):
self._handle_style_error(self, line_number, category, confidence, message)
return True
class JSONCheckerTest(unittest.TestCase):
def test_line_number_from_json_exception(self):
tests = (
(0, 'No JSON object could be decoded'),
(2, 'Expecting property name: line 2 column 1 (char 2)'),
(3, 'Expecting object: line 3 column 1 (char 15)'),
(9, 'Expecting property name: line 9 column 21 (char 478)'),
)
for expected_line, message in tests:
self.assertEqual(expected_line, jsonchecker.JSONChecker.line_number_from_json_exception(ValueError(message)))
def assert_no_error(self, json_data):
def handle_style_error(mock_error_handler, line_number, category, confidence, message):
self.fail('Unexpected error: %d %s %d %s' % (line_number, category, confidence, message))
error_handler = MockErrorHandler(handle_style_error)
checker = jsonchecker.JSONChecker('foo.json', error_handler)
checker.check(json_data.split('\n'))
self.assertTrue(error_handler.turned_off_filtering)
def assert_error(self, expected_line_number, expected_category, json_data):
def handle_style_error(mock_error_handler, line_number, category, confidence, message):
mock_error_handler.had_error = True
self.assertEqual(expected_line_number, line_number)
self.assertEqual(expected_category, category)
self.assertIn(category, jsonchecker.JSONChecker.categories)
error_handler = MockErrorHandler(handle_style_error)
error_handler.had_error = False
checker = jsonchecker.JSONChecker('foo.json', error_handler)
checker.check(json_data.split('\n'))
self.assertTrue(error_handler.had_error)
self.assertTrue(error_handler.turned_off_filtering)
def mock_handle_style_error(self):
pass
def test_conflict_marker(self):
self.assert_error(0, 'json/syntax', '<<<<<<< HEAD\n{\n}\n')
def test_single_quote(self):
self.assert_error(2, 'json/syntax', "{\n'slaves': []\n}\n")
def test_init(self):
error_handler = MockErrorHandler(self.mock_handle_style_error)
checker = jsonchecker.JSONChecker('foo.json', error_handler)
self.assertEqual(checker._handle_style_error, error_handler)
def test_no_error(self):
self.assert_no_error("""{
"slaves": [ { "name": "test-slave", "platform": "*" },
{ "name": "apple-xserve-4", "platform": "mac-snowleopard" }
],
"builders": [ { "name": "SnowLeopard Intel Release (Build)", "type": "Build", "builddir": "snowleopard-intel-release",
"platform": "mac-snowleopard", "configuration": "release", "architectures": ["x86_64"],
"slavenames": ["apple-xserve-4"]
}
],
"schedulers": [ { "type": "PlatformSpecificScheduler", "platform": "mac-snowleopard", "branch": "trunk", "treeStableTimer": 45.0,
"builderNames": ["SnowLeopard Intel Release (Build)", "SnowLeopard Intel Debug (Build)"]
}
]
}
""")
| true | true |
f711250e17869c60f5d238e81eb16be393f3d0db | 8,373 | py | Python | oc_config_validate/oc_config_validate/__main__.py | wenovus/gnxi | 6b0be2d26413d2467ed2ab803df61450035431b1 | [
"Apache-2.0"
] | 1 | 2019-08-06T09:25:43.000Z | 2019-08-06T09:25:43.000Z | oc_config_validate/oc_config_validate/__main__.py | jihaix/gnxi | a4392bf8ac8d207c8368c69387ecc4efb29c22cb | [
"Apache-2.0"
] | null | null | null | oc_config_validate/oc_config_validate/__main__.py | jihaix/gnxi | a4392bf8ac8d207c8368c69387ecc4efb29c22cb | [
"Apache-2.0"
] | null | null | null | """Copyright 2021 Google LLC.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import argparse
import logging
import os
import sys
import time
from typing import Any, Dict
import yaml
from oc_config_validate import (context, formatter, runner, schema, target,
testbase)
__version__ = "2.0.0"
LOGGING_FORMAT = "%(levelname)s(%(filename)s:%(lineno)d):%(message)s"
def createArgsParser() -> argparse.ArgumentParser:
"""Create parser for arguments passed into the program from the CLI.
Returns:
argparse.ArgumentParser object.
"""
parser = argparse.ArgumentParser(
description="OpenConfig Configuration Validation utility.")
parser.add_argument(
"-tgt",
"--target",
type=str,
help="The gNMI Target, as hostname:port.",
)
parser.add_argument(
"-user",
"--username",
type=str,
help="Username to use when establishing a gNMI Channel to the Target.",
)
parser.add_argument(
"-pass",
"--password",
type=str,
help="Password to use when establishing a gNMI Channel to the Target.",
)
parser.add_argument(
"-key",
"--private_key",
type=str,
help="Path to the Private key to use when establishing"
"a gNMI Channel to the Target.",
)
parser.add_argument(
"-ca",
"--root_ca_cert",
type=str,
help="Path to Root CA to use when building the gNMI Channel.",
)
parser.add_argument(
"-cert",
"--cert_chain",
type=str,
help="Path to Certificate chain to use when"
"establishing a gNMI Channel to the Target.")
parser.add_argument(
"-tests",
"--tests_file",
type=str,
action="store",
help="YAML file to read the test to run.")
parser.add_argument(
"-init",
"--init_config_file",
type=str,
action="store",
help="JSON file with the initial full OpenConfig configuration to "
"apply.")
parser.add_argument(
"-xpath",
"--init_config_xpath",
type=str,
action="store",
help="gNMI xpath where to apply the initial config.",
default="/")
parser.add_argument(
"-results",
"--results_file",
type=str,
action="store",
help="Filename where to write the test results.")
parser.add_argument(
"-f",
"--format",
type=str,
action="store",
help="Format "
"of the GetResponse to be printed. Default=JSON.",
choices=["json", "protobuff"],
default="json")
parser.add_argument(
"-v", "--version", help="Print program version", action="store_true")
parser.add_argument(
"-V", "--verbose", help="Enable gRPC debugging and extra logging.",
action="store_true")
parser.add_argument(
"-models", "--oc_models_versions", help="Print OC models versions.",
action="store_true")
parser.add_argument(
"--no_tls", help="gRPC insecure mode.", action="store_true")
parser.add_argument(
"-o",
"--tls_host_override",
type=str,
action="store",
help="Hostname to use during the TLS certificate check.",
)
parser.add_argument(
"-set_cooldown",
"--gnmi_set_cooldown_secs",
type=int,
action="store",
help="Seconds to wait after a successful gNMI Set message.",
)
parser.add_argument(
"--stop_on_error",
action="store_true",
help="Stop the execution if a test fails.",
)
parser.add_argument(
"--log_gnmi",
action="store_true",
help="Log the gnmi requests to the tests results.",
)
return parser
def validateArgs(args: Dict[str, Any]):
"""Returns True if the arguments are valid.
Raises:
ValueError if any argument is invalid.
IOError is unable to open a file given in argument.
"""
def isFileOK(filename: str, writable: bool = False):
try:
file = open(filename, "w+" if writable else "r", encoding="utf8")
file.close()
except IOError as io_error:
logging.error("Unable to open %s: %s", filename, io_error)
raise
# Mandatory args for tests
for arg, write in [("tests_file", False), ("results_file", True)]:
if not args[arg]:
raise ValueError("Needed --%s file" % arg)
isFileOK(args[arg], write)
if args["init_config_file"]:
isFileOK(args["init_config_file"], False)
# Output format supported
if (args["format"] and
args["format"].lower() not in formatter.SUPPORTED_FORMATS):
raise ValueError("Output format %s is not supported.")
def main(): # noqa
"""Executes this library."""
argparser = createArgsParser()
args = vars(argparser.parse_args())
if args["version"]:
print(__version__)
sys.exit()
if args["oc_models_versions"]:
print(schema.getOcModelsVersions())
sys.exit()
if args["verbose"]:
# os.environ["GRPC_TRACE"] = "all"
os.environ["GRPC_VERBOSITY"] = "DEBUG"
logging.basicConfig(
level=logging.DEBUG if args["verbose"] else logging.INFO,
format=LOGGING_FORMAT)
try:
validateArgs(args)
except (IOError, ValueError) as error:
sys.exit("Invalid arguments: %s" % error)
if args["log_gnmi"]:
testbase.LOG_GNMI = args["log_gnmi"]
try:
ctx = context.fromFile(args["tests_file"])
except IOError as io_error:
sys.exit("Unable to read %s: %s" % (args["tests_file"], io_error))
except yaml.YAMLError as yaml_error:
sys.exit("Unable to parse YAML file %s: %s" % (args["tests_file"],
yaml_error))
logging.info("Read tests file '%s': %d tests to run",
args["tests_file"], len(ctx.tests))
if not ctx.target:
ctx.target = context.Target()
# Override Target options
for arg in ["target", "username", "password", "no_tls", "private_key",
"cert_chain", "root_ca_cert", "tls_host_override",
"gnmi_set_cooldown_secs"]:
if args[arg]:
setattr(ctx.target, arg, args[arg])
tgt = target.TestTarget(ctx.target)
try:
tgt.validate()
except ValueError as error:
sys.exit("Invalid Target: %s" % error)
logging.info("Testing gNMI Target %s.", tgt)
if tgt.gnmi_set_cooldown_secs:
logging.info("Using gNMI Set Cooldown of %d secs",
tgt.gnmi_set_cooldown_secs)
# Apply initial configuration
if args["init_config_file"]:
ctx.init_configs.append(context.InitConfig(args["init_config_file"],
args["init_config_xpath"]))
if not runner.setInitConfigs(ctx, tgt,
stop_on_error=args["stop_on_error"]):
sys.exit(1)
start_t = time.time()
results = runner.runTests(ctx, tgt, stop_on_error=args["stop_on_error"])
end_t = time.time()
test_run = testbase.TestRun(ctx)
test_run.copyResults(results, start_t, end_t)
logging.info("Results Summary: %s", test_run.summary())
try:
fmtr = formatter.makeFormatter(args["format"])
fmtr.writeResultsToFile(test_run, args["results_file"])
logging.info("Test results written to %s", args["results_file"])
except IOError as io_error:
logging.exception("Unable to write file %s: %s", args["results_file"],
io_error)
except TypeError as type_error:
logging.exception("Unable to parse results into a JSON text: %s",
type_error)
if __name__ == "__main__":
main()
| 30.67033 | 79 | 0.601696 | import argparse
import logging
import os
import sys
import time
from typing import Any, Dict
import yaml
from oc_config_validate import (context, formatter, runner, schema, target,
testbase)
__version__ = "2.0.0"
LOGGING_FORMAT = "%(levelname)s(%(filename)s:%(lineno)d):%(message)s"
def createArgsParser() -> argparse.ArgumentParser:
parser = argparse.ArgumentParser(
description="OpenConfig Configuration Validation utility.")
parser.add_argument(
"-tgt",
"--target",
type=str,
help="The gNMI Target, as hostname:port.",
)
parser.add_argument(
"-user",
"--username",
type=str,
help="Username to use when establishing a gNMI Channel to the Target.",
)
parser.add_argument(
"-pass",
"--password",
type=str,
help="Password to use when establishing a gNMI Channel to the Target.",
)
parser.add_argument(
"-key",
"--private_key",
type=str,
help="Path to the Private key to use when establishing"
"a gNMI Channel to the Target.",
)
parser.add_argument(
"-ca",
"--root_ca_cert",
type=str,
help="Path to Root CA to use when building the gNMI Channel.",
)
parser.add_argument(
"-cert",
"--cert_chain",
type=str,
help="Path to Certificate chain to use when"
"establishing a gNMI Channel to the Target.")
parser.add_argument(
"-tests",
"--tests_file",
type=str,
action="store",
help="YAML file to read the test to run.")
parser.add_argument(
"-init",
"--init_config_file",
type=str,
action="store",
help="JSON file with the initial full OpenConfig configuration to "
"apply.")
parser.add_argument(
"-xpath",
"--init_config_xpath",
type=str,
action="store",
help="gNMI xpath where to apply the initial config.",
default="/")
parser.add_argument(
"-results",
"--results_file",
type=str,
action="store",
help="Filename where to write the test results.")
parser.add_argument(
"-f",
"--format",
type=str,
action="store",
help="Format "
"of the GetResponse to be printed. Default=JSON.",
choices=["json", "protobuff"],
default="json")
parser.add_argument(
"-v", "--version", help="Print program version", action="store_true")
parser.add_argument(
"-V", "--verbose", help="Enable gRPC debugging and extra logging.",
action="store_true")
parser.add_argument(
"-models", "--oc_models_versions", help="Print OC models versions.",
action="store_true")
parser.add_argument(
"--no_tls", help="gRPC insecure mode.", action="store_true")
parser.add_argument(
"-o",
"--tls_host_override",
type=str,
action="store",
help="Hostname to use during the TLS certificate check.",
)
parser.add_argument(
"-set_cooldown",
"--gnmi_set_cooldown_secs",
type=int,
action="store",
help="Seconds to wait after a successful gNMI Set message.",
)
parser.add_argument(
"--stop_on_error",
action="store_true",
help="Stop the execution if a test fails.",
)
parser.add_argument(
"--log_gnmi",
action="store_true",
help="Log the gnmi requests to the tests results.",
)
return parser
def validateArgs(args: Dict[str, Any]):
def isFileOK(filename: str, writable: bool = False):
try:
file = open(filename, "w+" if writable else "r", encoding="utf8")
file.close()
except IOError as io_error:
logging.error("Unable to open %s: %s", filename, io_error)
raise
for arg, write in [("tests_file", False), ("results_file", True)]:
if not args[arg]:
raise ValueError("Needed --%s file" % arg)
isFileOK(args[arg], write)
if args["init_config_file"]:
isFileOK(args["init_config_file"], False)
if (args["format"] and
args["format"].lower() not in formatter.SUPPORTED_FORMATS):
raise ValueError("Output format %s is not supported.")
def main():
argparser = createArgsParser()
args = vars(argparser.parse_args())
if args["version"]:
print(__version__)
sys.exit()
if args["oc_models_versions"]:
print(schema.getOcModelsVersions())
sys.exit()
if args["verbose"]:
os.environ["GRPC_VERBOSITY"] = "DEBUG"
logging.basicConfig(
level=logging.DEBUG if args["verbose"] else logging.INFO,
format=LOGGING_FORMAT)
try:
validateArgs(args)
except (IOError, ValueError) as error:
sys.exit("Invalid arguments: %s" % error)
if args["log_gnmi"]:
testbase.LOG_GNMI = args["log_gnmi"]
try:
ctx = context.fromFile(args["tests_file"])
except IOError as io_error:
sys.exit("Unable to read %s: %s" % (args["tests_file"], io_error))
except yaml.YAMLError as yaml_error:
sys.exit("Unable to parse YAML file %s: %s" % (args["tests_file"],
yaml_error))
logging.info("Read tests file '%s': %d tests to run",
args["tests_file"], len(ctx.tests))
if not ctx.target:
ctx.target = context.Target()
for arg in ["target", "username", "password", "no_tls", "private_key",
"cert_chain", "root_ca_cert", "tls_host_override",
"gnmi_set_cooldown_secs"]:
if args[arg]:
setattr(ctx.target, arg, args[arg])
tgt = target.TestTarget(ctx.target)
try:
tgt.validate()
except ValueError as error:
sys.exit("Invalid Target: %s" % error)
logging.info("Testing gNMI Target %s.", tgt)
if tgt.gnmi_set_cooldown_secs:
logging.info("Using gNMI Set Cooldown of %d secs",
tgt.gnmi_set_cooldown_secs)
if args["init_config_file"]:
ctx.init_configs.append(context.InitConfig(args["init_config_file"],
args["init_config_xpath"]))
if not runner.setInitConfigs(ctx, tgt,
stop_on_error=args["stop_on_error"]):
sys.exit(1)
start_t = time.time()
results = runner.runTests(ctx, tgt, stop_on_error=args["stop_on_error"])
end_t = time.time()
test_run = testbase.TestRun(ctx)
test_run.copyResults(results, start_t, end_t)
logging.info("Results Summary: %s", test_run.summary())
try:
fmtr = formatter.makeFormatter(args["format"])
fmtr.writeResultsToFile(test_run, args["results_file"])
logging.info("Test results written to %s", args["results_file"])
except IOError as io_error:
logging.exception("Unable to write file %s: %s", args["results_file"],
io_error)
except TypeError as type_error:
logging.exception("Unable to parse results into a JSON text: %s",
type_error)
if __name__ == "__main__":
main()
| true | true |
f7112541ccf3fa57a52b0d8b4db34cff4b6eeffa | 3,607 | py | Python | samples/basic/crud/models/openconfig/openconfig-mpls/nc-create-oc-mpls-54-ydk.py | maccioni/ydk-py-samples | d1758694bef97327c5477e65649326c7595ce499 | [
"Apache-2.0"
] | 1 | 2021-07-08T14:02:12.000Z | 2021-07-08T14:02:12.000Z | samples/basic/crud/models/openconfig/openconfig-mpls/nc-create-oc-mpls-54-ydk.py | maccioni/ydk-py-samples | d1758694bef97327c5477e65649326c7595ce499 | [
"Apache-2.0"
] | null | null | null | samples/basic/crud/models/openconfig/openconfig-mpls/nc-create-oc-mpls-54-ydk.py | maccioni/ydk-py-samples | d1758694bef97327c5477e65649326c7595ce499 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
#
# Copyright 2016 Cisco Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Create configuration for model openconfig-mpls.
usage: nc-create-oc-mpls-54-ydk.py [-h] [-v] device
positional arguments:
device NETCONF device (ssh://user:password@host:port)
optional arguments:
-h, --help show this help message and exit
-v, --verbose print debugging messages
"""
from argparse import ArgumentParser
from urlparse import urlparse
from ydk.services import CRUDService
from ydk.providers import NetconfServiceProvider
from ydk.models.openconfig import openconfig_mpls \
as oc_mpls
from ydk.models.openconfig import openconfig_mpls_types as oc_mpls_types
import logging
def config_mpls(mpls):
"""Add config data to mpls object."""
# tunnel with protection requested
tunnel = mpls.lsps.constrained_path.Tunnel()
tunnel.name = "LER1-LER2-t54"
tunnel.config.name = "LER1-LER2-t54"
tunnel.config.type = oc_mpls_types.P2P()
tunnel.config.protection_style_requested = oc_mpls_types.LinkProtectionRequested()
tunnel.type = oc_mpls_types.P2P()
p2p_primary_paths = tunnel.p2p_tunnel_attributes.P2PPrimaryPaths()
p2p_primary_paths.name = "DYNAMIC"
p2p_primary_paths.config.name = "DYNAMIC"
p2p_primary_paths.config.preference = 10
path_computation_method = oc_mpls.LocallyComputed()
p2p_primary_paths.config.path_computation_method = path_computation_method
tunnel.p2p_tunnel_attributes.p2p_primary_paths.append(p2p_primary_paths)
tunnel.p2p_tunnel_attributes.config.destination = "172.16.255.2"
tunnel.bandwidth.config.set_bandwidth = 100000
mpls.lsps.constrained_path.tunnel.append(tunnel)
if __name__ == "__main__":
"""Execute main program."""
parser = ArgumentParser()
parser.add_argument("-v", "--verbose", help="print debugging messages",
action="store_true")
parser.add_argument("device",
help="NETCONF device (ssh://user:password@host:port)")
args = parser.parse_args()
device = urlparse(args.device)
# log debug messages if verbose argument specified
if args.verbose:
logger = logging.getLogger("ydk")
logger.setLevel(logging.INFO)
handler = logging.StreamHandler()
formatter = logging.Formatter(("%(asctime)s - %(name)s - "
"%(levelname)s - %(message)s"))
handler.setFormatter(formatter)
logger.addHandler(handler)
# create NETCONF provider
provider = NetconfServiceProvider(address=device.hostname,
port=device.port,
username=device.username,
password=device.password,
protocol=device.scheme)
# create CRUD service
crud = CRUDService()
mpls = oc_mpls.Mpls() # create object
config_mpls(mpls) # add object configuration
# create configuration on NETCONF device
crud.create(provider, mpls)
exit()
# End of script
| 35.712871 | 86 | 0.689215 |
from argparse import ArgumentParser
from urlparse import urlparse
from ydk.services import CRUDService
from ydk.providers import NetconfServiceProvider
from ydk.models.openconfig import openconfig_mpls \
as oc_mpls
from ydk.models.openconfig import openconfig_mpls_types as oc_mpls_types
import logging
def config_mpls(mpls):
tunnel = mpls.lsps.constrained_path.Tunnel()
tunnel.name = "LER1-LER2-t54"
tunnel.config.name = "LER1-LER2-t54"
tunnel.config.type = oc_mpls_types.P2P()
tunnel.config.protection_style_requested = oc_mpls_types.LinkProtectionRequested()
tunnel.type = oc_mpls_types.P2P()
p2p_primary_paths = tunnel.p2p_tunnel_attributes.P2PPrimaryPaths()
p2p_primary_paths.name = "DYNAMIC"
p2p_primary_paths.config.name = "DYNAMIC"
p2p_primary_paths.config.preference = 10
path_computation_method = oc_mpls.LocallyComputed()
p2p_primary_paths.config.path_computation_method = path_computation_method
tunnel.p2p_tunnel_attributes.p2p_primary_paths.append(p2p_primary_paths)
tunnel.p2p_tunnel_attributes.config.destination = "172.16.255.2"
tunnel.bandwidth.config.set_bandwidth = 100000
mpls.lsps.constrained_path.tunnel.append(tunnel)
if __name__ == "__main__":
parser = ArgumentParser()
parser.add_argument("-v", "--verbose", help="print debugging messages",
action="store_true")
parser.add_argument("device",
help="NETCONF device (ssh://user:password@host:port)")
args = parser.parse_args()
device = urlparse(args.device)
if args.verbose:
logger = logging.getLogger("ydk")
logger.setLevel(logging.INFO)
handler = logging.StreamHandler()
formatter = logging.Formatter(("%(asctime)s - %(name)s - "
"%(levelname)s - %(message)s"))
handler.setFormatter(formatter)
logger.addHandler(handler)
provider = NetconfServiceProvider(address=device.hostname,
port=device.port,
username=device.username,
password=device.password,
protocol=device.scheme)
crud = CRUDService()
mpls = oc_mpls.Mpls()
config_mpls(mpls)
crud.create(provider, mpls)
exit()
| true | true |
f71126725faac245baf3d0b86e42241cb62b491f | 3,155 | py | Python | startupmoney/startupmoney/settings.py | RanjithaRao22/TestWebApp | 581edcec8fb39001917d9132b7f371aabc506e51 | [
"MIT"
] | 1 | 2020-04-13T06:33:15.000Z | 2020-04-13T06:33:15.000Z | startupmoney/startupmoney/settings.py | vatsamail/TestWebApp | 581edcec8fb39001917d9132b7f371aabc506e51 | [
"MIT"
] | 7 | 2020-04-12T23:26:42.000Z | 2022-02-10T12:18:08.000Z | startupmoney/startupmoney/settings.py | vatsamail/TestWebApp | 581edcec8fb39001917d9132b7f371aabc506e51 | [
"MIT"
] | null | null | null | """
Django settings for startupmoney project.
Generated by 'django-admin startproject' using Django 2.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '7_bcd_om-v=oud6403zs5#snm5(&_&d(l38#&qc2=(xb77g)^j'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.admindocs',
'handlemoney',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'startupmoney.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'startupmoney.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
| 25.650407 | 91 | 0.696672 |
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
SECRET_KEY = '7_bcd_om-v=oud6403zs5#snm5(&_&d(l38#&qc2=(xb77g)^j'
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.admindocs',
'handlemoney',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'startupmoney.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'startupmoney.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
| true | true |
f711269d3ecd4d7485d60302c52b1eb2a9b6db66 | 6,178 | py | Python | cfgov/data_research/tests/test_forms.py | thephillipsequation/cfgov-refresh | 1412dd4215fce5597c0ec704b0d480cf00aeb82c | [
"CC0-1.0"
] | 37 | 2020-08-18T19:52:39.000Z | 2022-03-23T08:08:41.000Z | cfgov/data_research/tests/test_forms.py | thephillipsequation/cfgov-refresh | 1412dd4215fce5597c0ec704b0d480cf00aeb82c | [
"CC0-1.0"
] | 338 | 2020-08-14T20:46:36.000Z | 2022-03-31T20:49:32.000Z | cfgov/data_research/tests/test_forms.py | raft-tech/cfgov-refresh | 7c63c31fd6bb95ed4f7d368f1e1252175f0c71ca | [
"CC0-1.0"
] | 14 | 2020-10-21T15:27:03.000Z | 2022-03-17T03:16:36.000Z | from django.test import TestCase
from core.govdelivery import MockGovDelivery
from data_research.forms import ConferenceRegistrationForm
from data_research.models import ConferenceRegistration
class ConferenceRegistrationFormTests(TestCase):
capacity = 100
govdelivery_code = 'TEST-CODE'
govdelivery_question_id = '12345'
govdelivery_answer_id = '67890'
def test_invalid_form_if_fields_are_missing(self):
form = ConferenceRegistrationForm(
capacity=self.capacity,
govdelivery_code=self.govdelivery_code,
govdelivery_question_id=self.govdelivery_question_id,
govdelivery_answer_id=self.govdelivery_answer_id,
data={'foo': 'bar'}
)
self.assertFalse(form.is_valid())
def get_valid_form(
self,
attendee_type=ConferenceRegistrationForm.ATTENDEE_IN_PERSON,
govdelivery_question_id=None,
govdelivery_answer_id=None
):
return ConferenceRegistrationForm(
capacity=self.capacity,
govdelivery_code=self.govdelivery_code,
govdelivery_question_id=govdelivery_question_id,
govdelivery_answer_id=govdelivery_answer_id,
data={
'attendee_type': attendee_type,
'name': 'A User',
'organization': 'An Organization',
'email': 'user@domain.com',
}
)
def test_valid_form_if_required_fields_are_provided(self):
form = self.get_valid_form()
self.assertTrue(form.is_valid())
def test_form_save_commit_false_doesnt_save_user(self):
form = self.get_valid_form()
form.is_valid()
form.save(commit=False)
self.assertFalse(ConferenceRegistration.objects.exists())
def test_form_save_commit_false_doesnt_subscribe_to_govdelivery(self):
calls_before = list(MockGovDelivery.calls)
form = self.get_valid_form()
form.is_valid()
form.save(commit=False)
self.assertEqual(MockGovDelivery.calls, calls_before)
def test_form_save_sets_registration_code_and_details(self):
form = self.get_valid_form()
form.is_valid()
registrant = form.save(commit=False)
self.assertEqual(registrant.govdelivery_code, 'TEST-CODE')
self.assertEqual(registrant.details, {
'attendee_type': ConferenceRegistrationForm.ATTENDEE_IN_PERSON,
'name': 'A User',
'organization': 'An Organization',
'email': 'user@domain.com',
'dietary_restrictions': [],
'other_dietary_restrictions': '',
'accommodations': [],
'other_accommodations': '',
})
def test_form_save_commit_true_saves_to_db(self):
form = self.get_valid_form()
form.is_valid()
registrant = form.save()
self.assertEqual(registrant, ConferenceRegistration.objects.first())
def test_form_save_commit_true_subscribes_to_gd(self):
form = self.get_valid_form()
form.is_valid()
form.save()
self.assertEqual(
MockGovDelivery.calls,
[(
'set_subscriber_topics',
(),
{
'contact_details': 'user@domain.com',
'topic_codes': ['TEST-CODE'],
'send_notifications': True,
}
)]
)
def test_form_save_commit_true_subscribes_and_sets_question(self):
form = self.get_valid_form(
govdelivery_question_id='12345',
govdelivery_answer_id='67890'
)
form.is_valid()
form.save()
self.assertEqual(MockGovDelivery.calls, [
(
'set_subscriber_topics',
(),
{
'contact_details': 'user@domain.com',
'topic_codes': ['TEST-CODE'],
'send_notifications': True,
}
),
(
'set_subscriber_answer_to_select_question',
(),
{
'contact_details': 'user@domain.com',
'question_id': '12345',
'answer_id': '67890',
}
),
])
def make_capacity_registrants(self, govdelivery_code, attendee_type):
registrant = ConferenceRegistration(
govdelivery_code=govdelivery_code,
details={'attendee_type': attendee_type}
)
ConferenceRegistration.objects.bulk_create(
[registrant] * self.capacity
)
def test_form_not_at_capacity(self):
self.assertFalse(self.get_valid_form().at_capacity)
def test_form_at_capacity(self):
self.make_capacity_registrants(
self.govdelivery_code,
ConferenceRegistrationForm.ATTENDEE_IN_PERSON
)
self.assertTrue(self.get_valid_form().at_capacity)
def test_form_at_capacity_for_some_other_code(self):
self.make_capacity_registrants(
'some-other-code',
ConferenceRegistrationForm.ATTENDEE_IN_PERSON
)
self.assertFalse(self.get_valid_form().at_capacity)
def test_form_at_capacity_invalid(self):
self.make_capacity_registrants(
self.govdelivery_code,
ConferenceRegistrationForm.ATTENDEE_IN_PERSON
)
form = self.get_valid_form()
self.assertFalse(form.is_valid())
def test_form_at_capacity_still_valid_for_virtual_attendees(self):
self.make_capacity_registrants(
self.govdelivery_code,
ConferenceRegistrationForm.ATTENDEE_IN_PERSON
)
form = self.get_valid_form(
attendee_type=ConferenceRegistrationForm.ATTENDEE_VIRTUALLY
)
self.assertTrue(form.is_valid())
def test_form_virtual_attendees_dont_count_against_capacity(self):
self.make_capacity_registrants(
self.govdelivery_code,
ConferenceRegistrationForm.ATTENDEE_VIRTUALLY
)
self.assertFalse(self.get_valid_form().at_capacity)
| 34.132597 | 76 | 0.621075 | from django.test import TestCase
from core.govdelivery import MockGovDelivery
from data_research.forms import ConferenceRegistrationForm
from data_research.models import ConferenceRegistration
class ConferenceRegistrationFormTests(TestCase):
capacity = 100
govdelivery_code = 'TEST-CODE'
govdelivery_question_id = '12345'
govdelivery_answer_id = '67890'
def test_invalid_form_if_fields_are_missing(self):
form = ConferenceRegistrationForm(
capacity=self.capacity,
govdelivery_code=self.govdelivery_code,
govdelivery_question_id=self.govdelivery_question_id,
govdelivery_answer_id=self.govdelivery_answer_id,
data={'foo': 'bar'}
)
self.assertFalse(form.is_valid())
def get_valid_form(
self,
attendee_type=ConferenceRegistrationForm.ATTENDEE_IN_PERSON,
govdelivery_question_id=None,
govdelivery_answer_id=None
):
return ConferenceRegistrationForm(
capacity=self.capacity,
govdelivery_code=self.govdelivery_code,
govdelivery_question_id=govdelivery_question_id,
govdelivery_answer_id=govdelivery_answer_id,
data={
'attendee_type': attendee_type,
'name': 'A User',
'organization': 'An Organization',
'email': 'user@domain.com',
}
)
def test_valid_form_if_required_fields_are_provided(self):
form = self.get_valid_form()
self.assertTrue(form.is_valid())
def test_form_save_commit_false_doesnt_save_user(self):
form = self.get_valid_form()
form.is_valid()
form.save(commit=False)
self.assertFalse(ConferenceRegistration.objects.exists())
def test_form_save_commit_false_doesnt_subscribe_to_govdelivery(self):
calls_before = list(MockGovDelivery.calls)
form = self.get_valid_form()
form.is_valid()
form.save(commit=False)
self.assertEqual(MockGovDelivery.calls, calls_before)
def test_form_save_sets_registration_code_and_details(self):
form = self.get_valid_form()
form.is_valid()
registrant = form.save(commit=False)
self.assertEqual(registrant.govdelivery_code, 'TEST-CODE')
self.assertEqual(registrant.details, {
'attendee_type': ConferenceRegistrationForm.ATTENDEE_IN_PERSON,
'name': 'A User',
'organization': 'An Organization',
'email': 'user@domain.com',
'dietary_restrictions': [],
'other_dietary_restrictions': '',
'accommodations': [],
'other_accommodations': '',
})
def test_form_save_commit_true_saves_to_db(self):
form = self.get_valid_form()
form.is_valid()
registrant = form.save()
self.assertEqual(registrant, ConferenceRegistration.objects.first())
def test_form_save_commit_true_subscribes_to_gd(self):
form = self.get_valid_form()
form.is_valid()
form.save()
self.assertEqual(
MockGovDelivery.calls,
[(
'set_subscriber_topics',
(),
{
'contact_details': 'user@domain.com',
'topic_codes': ['TEST-CODE'],
'send_notifications': True,
}
)]
)
def test_form_save_commit_true_subscribes_and_sets_question(self):
form = self.get_valid_form(
govdelivery_question_id='12345',
govdelivery_answer_id='67890'
)
form.is_valid()
form.save()
self.assertEqual(MockGovDelivery.calls, [
(
'set_subscriber_topics',
(),
{
'contact_details': 'user@domain.com',
'topic_codes': ['TEST-CODE'],
'send_notifications': True,
}
),
(
'set_subscriber_answer_to_select_question',
(),
{
'contact_details': 'user@domain.com',
'question_id': '12345',
'answer_id': '67890',
}
),
])
def make_capacity_registrants(self, govdelivery_code, attendee_type):
registrant = ConferenceRegistration(
govdelivery_code=govdelivery_code,
details={'attendee_type': attendee_type}
)
ConferenceRegistration.objects.bulk_create(
[registrant] * self.capacity
)
def test_form_not_at_capacity(self):
self.assertFalse(self.get_valid_form().at_capacity)
def test_form_at_capacity(self):
self.make_capacity_registrants(
self.govdelivery_code,
ConferenceRegistrationForm.ATTENDEE_IN_PERSON
)
self.assertTrue(self.get_valid_form().at_capacity)
def test_form_at_capacity_for_some_other_code(self):
self.make_capacity_registrants(
'some-other-code',
ConferenceRegistrationForm.ATTENDEE_IN_PERSON
)
self.assertFalse(self.get_valid_form().at_capacity)
def test_form_at_capacity_invalid(self):
self.make_capacity_registrants(
self.govdelivery_code,
ConferenceRegistrationForm.ATTENDEE_IN_PERSON
)
form = self.get_valid_form()
self.assertFalse(form.is_valid())
def test_form_at_capacity_still_valid_for_virtual_attendees(self):
self.make_capacity_registrants(
self.govdelivery_code,
ConferenceRegistrationForm.ATTENDEE_IN_PERSON
)
form = self.get_valid_form(
attendee_type=ConferenceRegistrationForm.ATTENDEE_VIRTUALLY
)
self.assertTrue(form.is_valid())
def test_form_virtual_attendees_dont_count_against_capacity(self):
self.make_capacity_registrants(
self.govdelivery_code,
ConferenceRegistrationForm.ATTENDEE_VIRTUALLY
)
self.assertFalse(self.get_valid_form().at_capacity)
| true | true |
f711279795ac54742a452b547b1f96fda8fcd72e | 1,218 | py | Python | venv/lib/python3.6/site-packages/sqlalchemy/dialects/mysql/__init__.py | tchengatcincoai/cryptocoin-prices-compare | f295fecc7213a877bf717af0eb98414e9137b554 | [
"MIT"
] | 78 | 2017-08-19T03:46:13.000Z | 2020-02-19T04:29:45.000Z | desktop/core/ext-py/SQLAlchemy-1.2.0b3/lib/sqlalchemy/dialects/mysql/__init__.py | zks888/hue | 93a8c370713e70b216c428caa2f75185ef809deb | [
"Apache-2.0"
] | 5 | 2017-08-21T16:33:08.000Z | 2018-06-21T18:37:18.000Z | desktop/core/ext-py/SQLAlchemy-1.2.0b3/lib/sqlalchemy/dialects/mysql/__init__.py | zks888/hue | 93a8c370713e70b216c428caa2f75185ef809deb | [
"Apache-2.0"
] | 43 | 2018-02-05T23:23:46.000Z | 2021-07-28T22:51:42.000Z | # mysql/__init__.py
# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
from . import base, mysqldb, oursql, \
pyodbc, zxjdbc, mysqlconnector, pymysql,\
gaerdbms, cymysql
# default dialect
base.dialect = mysqldb.dialect
from .base import \
BIGINT, BINARY, BIT, BLOB, BOOLEAN, CHAR, DATE, DATETIME, \
DECIMAL, DOUBLE, ENUM, DECIMAL,\
FLOAT, INTEGER, INTEGER, JSON, LONGBLOB, LONGTEXT, MEDIUMBLOB, \
MEDIUMINT, MEDIUMTEXT, NCHAR, \
NVARCHAR, NUMERIC, SET, SMALLINT, REAL, TEXT, TIME, TIMESTAMP, \
TINYBLOB, TINYINT, TINYTEXT,\
VARBINARY, VARCHAR, YEAR, dialect
from .dml import insert, Insert
__all__ = (
'BIGINT', 'BINARY', 'BIT', 'BLOB', 'BOOLEAN', 'CHAR', 'DATE', 'DATETIME',
'DECIMAL', 'DOUBLE', 'ENUM', 'DECIMAL', 'FLOAT', 'INTEGER', 'INTEGER',
'JSON', 'LONGBLOB', 'LONGTEXT', 'MEDIUMBLOB', 'MEDIUMINT', 'MEDIUMTEXT',
'NCHAR', 'NVARCHAR', 'NUMERIC', 'SET', 'SMALLINT', 'REAL', 'TEXT', 'TIME',
'TIMESTAMP', 'TINYBLOB', 'TINYINT', 'TINYTEXT', 'VARBINARY', 'VARCHAR',
'YEAR', 'dialect'
)
| 35.823529 | 78 | 0.668309 |
from . import base, mysqldb, oursql, \
pyodbc, zxjdbc, mysqlconnector, pymysql,\
gaerdbms, cymysql
base.dialect = mysqldb.dialect
from .base import \
BIGINT, BINARY, BIT, BLOB, BOOLEAN, CHAR, DATE, DATETIME, \
DECIMAL, DOUBLE, ENUM, DECIMAL,\
FLOAT, INTEGER, INTEGER, JSON, LONGBLOB, LONGTEXT, MEDIUMBLOB, \
MEDIUMINT, MEDIUMTEXT, NCHAR, \
NVARCHAR, NUMERIC, SET, SMALLINT, REAL, TEXT, TIME, TIMESTAMP, \
TINYBLOB, TINYINT, TINYTEXT,\
VARBINARY, VARCHAR, YEAR, dialect
from .dml import insert, Insert
__all__ = (
'BIGINT', 'BINARY', 'BIT', 'BLOB', 'BOOLEAN', 'CHAR', 'DATE', 'DATETIME',
'DECIMAL', 'DOUBLE', 'ENUM', 'DECIMAL', 'FLOAT', 'INTEGER', 'INTEGER',
'JSON', 'LONGBLOB', 'LONGTEXT', 'MEDIUMBLOB', 'MEDIUMINT', 'MEDIUMTEXT',
'NCHAR', 'NVARCHAR', 'NUMERIC', 'SET', 'SMALLINT', 'REAL', 'TEXT', 'TIME',
'TIMESTAMP', 'TINYBLOB', 'TINYINT', 'TINYTEXT', 'VARBINARY', 'VARCHAR',
'YEAR', 'dialect'
)
| true | true |
f7112864b35d42a4077973fcef95554472aa1dad | 2,769 | py | Python | aliyun-python-sdk-emr/aliyunsdkemr/request/v20160408/ModifyFlowProjectClusterSettingRequest.py | liumihust/aliyun-openapi-python-sdk | c7b5dd4befae4b9c59181654289f9272531207ef | [
"Apache-2.0"
] | null | null | null | aliyun-python-sdk-emr/aliyunsdkemr/request/v20160408/ModifyFlowProjectClusterSettingRequest.py | liumihust/aliyun-openapi-python-sdk | c7b5dd4befae4b9c59181654289f9272531207ef | [
"Apache-2.0"
] | null | null | null | aliyun-python-sdk-emr/aliyunsdkemr/request/v20160408/ModifyFlowProjectClusterSettingRequest.py | liumihust/aliyun-openapi-python-sdk | c7b5dd4befae4b9c59181654289f9272531207ef | [
"Apache-2.0"
] | null | null | null | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkemr.endpoint import endpoint_data
class ModifyFlowProjectClusterSettingRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Emr', '2016-04-08', 'ModifyFlowProjectClusterSetting','emr')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_UserLists(self):
return self.get_query_params().get('UserLists')
def set_UserLists(self,UserLists):
for i in range(len(UserLists)):
if UserLists[i] is not None:
self.add_query_param('UserList.' + str(i + 1) , UserLists[i]);
def get_QueueLists(self):
return self.get_query_params().get('QueueLists')
def set_QueueLists(self,QueueLists):
for i in range(len(QueueLists)):
if QueueLists[i] is not None:
self.add_query_param('QueueList.' + str(i + 1) , QueueLists[i]);
def get_HostLists(self):
return self.get_query_params().get('HostLists')
def set_HostLists(self,HostLists):
for i in range(len(HostLists)):
if HostLists[i] is not None:
self.add_query_param('HostList.' + str(i + 1) , HostLists[i]);
def get_ClusterId(self):
return self.get_query_params().get('ClusterId')
def set_ClusterId(self,ClusterId):
self.add_query_param('ClusterId',ClusterId)
def get_DefaultQueue(self):
return self.get_query_params().get('DefaultQueue')
def set_DefaultQueue(self,DefaultQueue):
self.add_query_param('DefaultQueue',DefaultQueue)
def get_ProjectId(self):
return self.get_query_params().get('ProjectId')
def set_ProjectId(self,ProjectId):
self.add_query_param('ProjectId',ProjectId)
def get_DefaultUser(self):
return self.get_query_params().get('DefaultUser')
def set_DefaultUser(self,DefaultUser):
self.add_query_param('DefaultUser',DefaultUser) | 35.050633 | 90 | 0.737811 |
from aliyunsdkcore.request import RpcRequest
from aliyunsdkemr.endpoint import endpoint_data
class ModifyFlowProjectClusterSettingRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Emr', '2016-04-08', 'ModifyFlowProjectClusterSetting','emr')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_UserLists(self):
return self.get_query_params().get('UserLists')
def set_UserLists(self,UserLists):
for i in range(len(UserLists)):
if UserLists[i] is not None:
self.add_query_param('UserList.' + str(i + 1) , UserLists[i]);
def get_QueueLists(self):
return self.get_query_params().get('QueueLists')
def set_QueueLists(self,QueueLists):
for i in range(len(QueueLists)):
if QueueLists[i] is not None:
self.add_query_param('QueueList.' + str(i + 1) , QueueLists[i]);
def get_HostLists(self):
return self.get_query_params().get('HostLists')
def set_HostLists(self,HostLists):
for i in range(len(HostLists)):
if HostLists[i] is not None:
self.add_query_param('HostList.' + str(i + 1) , HostLists[i]);
def get_ClusterId(self):
return self.get_query_params().get('ClusterId')
def set_ClusterId(self,ClusterId):
self.add_query_param('ClusterId',ClusterId)
def get_DefaultQueue(self):
return self.get_query_params().get('DefaultQueue')
def set_DefaultQueue(self,DefaultQueue):
self.add_query_param('DefaultQueue',DefaultQueue)
def get_ProjectId(self):
return self.get_query_params().get('ProjectId')
def set_ProjectId(self,ProjectId):
self.add_query_param('ProjectId',ProjectId)
def get_DefaultUser(self):
return self.get_query_params().get('DefaultUser')
def set_DefaultUser(self,DefaultUser):
self.add_query_param('DefaultUser',DefaultUser) | true | true |
f71128db7d81033360ad6c8a01962fa528633b2b | 822 | py | Python | Chapter 10/Chap10_Example10.38.py | Anancha/Programming-Techniques-using-Python | e80c329d2a27383909d358741a5cab03cb22fd8b | [
"MIT"
] | null | null | null | Chapter 10/Chap10_Example10.38.py | Anancha/Programming-Techniques-using-Python | e80c329d2a27383909d358741a5cab03cb22fd8b | [
"MIT"
] | null | null | null | Chapter 10/Chap10_Example10.38.py | Anancha/Programming-Techniques-using-Python | e80c329d2a27383909d358741a5cab03cb22fd8b | [
"MIT"
] | null | null | null | from threading import Thread, Event
from time import sleep
def func1():
sleep(2) # Initially sleep for 2 secs
myeventobj.set() # E2
print("func1 sleeping for 3 secs....")
sleep(3) # E3
myeventobj.clear() # E4
def func2():
print("Initially myeventobj is: ", myeventobj.isSet()) # E1
myeventobj.wait()
if myeventobj.isSet(): # E5
print("True when myeventobj.set() is called from func1 .i.e. Internal flag is set")
print("func2 sleeping for 4 secs....")
sleep(4) # E6
if myeventobj.isSet() == False: # E7
print("False when myeventobj.clear() is called from func1.i.e. Internal flag is reset")
myeventobj = Event()
myt1 = Thread(target=func1)
myt2 = Thread(target=func2)
myt1.start()
myt2.start()
myt1.join()
myt2.join()
print("Main Thread Completed")
| 25.6875 | 95 | 0.653285 | from threading import Thread, Event
from time import sleep
def func1():
sleep(2)
myeventobj.set()
print("func1 sleeping for 3 secs....")
sleep(3)
myeventobj.clear()
def func2():
print("Initially myeventobj is: ", myeventobj.isSet())
myeventobj.wait()
if myeventobj.isSet():
print("True when myeventobj.set() is called from func1 .i.e. Internal flag is set")
print("func2 sleeping for 4 secs....")
sleep(4)
if myeventobj.isSet() == False:
print("False when myeventobj.clear() is called from func1.i.e. Internal flag is reset")
myeventobj = Event()
myt1 = Thread(target=func1)
myt2 = Thread(target=func2)
myt1.start()
myt2.start()
myt1.join()
myt2.join()
print("Main Thread Completed")
| true | true |
f71129c57862d43432ef1e52df9e6edb6a786838 | 2,181 | py | Python | package/spack-r-phantompeakqualtools/package.py | ctuning/ck-spack | 307934efce1be2d4f104251275c82fbc70127105 | [
"BSD-3-Clause"
] | 1 | 2018-07-17T07:45:09.000Z | 2018-07-17T07:45:09.000Z | package/spack-r-phantompeakqualtools/package.py | ctuning/ck-spack | 307934efce1be2d4f104251275c82fbc70127105 | [
"BSD-3-Clause"
] | null | null | null | package/spack-r-phantompeakqualtools/package.py | ctuning/ck-spack | 307934efce1be2d4f104251275c82fbc70127105 | [
"BSD-3-Clause"
] | null | null | null | ##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class RPhantompeakqualtools(RPackage):
"""Computes informative enrichment and quality measures for
ChIP-seq/DNase-seq/FAIRE-seq/MNase-seq data. This is a modified version
of r-spp to be used in conjunction with the phantompeakqualtools
package."""
homepage = "https://github.com/kundajelab/phantompeakqualtools"
url = "https://github.com/kundajelab/phantompeakqualtools/raw/master/spp_1.14.tar.gz"
version('1.14', '4de207d570999170c1bf45bcba8c6d2d')
depends_on('boost@1.41.0:')
depends_on('r-catools', type=('build', 'run'))
depends_on('r-snow', type=('build', 'run'))
depends_on('r-snowfall', type=('build', 'run'))
depends_on('r-bitops', type=('build', 'run'))
depends_on('r-rsamtools', type=('build', 'run'))
conflicts('%gcc@6:')
def setup_environment(self, spack_env, run_env):
spack_env.set('BOOST_ROOT', self.spec['boost'].prefix)
| 43.62 | 94 | 0.674461 | true | true | |
f7112b1cfcd98e70b9d0057ad3b84f430dded29a | 868 | py | Python | tests/test_tasks/test_supervised_task.py | hp2500/openml-python | 62cc534cd18e6e011a88a83816fec95a90399a9b | [
"BSD-3-Clause"
] | 1 | 2019-09-02T00:28:26.000Z | 2019-09-02T00:28:26.000Z | tests/test_tasks/test_supervised_task.py | hp2500/openml-python | 62cc534cd18e6e011a88a83816fec95a90399a9b | [
"BSD-3-Clause"
] | 8 | 2019-05-23T08:03:24.000Z | 2019-09-20T10:14:43.000Z | tests/test_tasks/test_supervised_task.py | hp2500/openml-python | 62cc534cd18e6e011a88a83816fec95a90399a9b | [
"BSD-3-Clause"
] | 2 | 2019-06-19T11:10:47.000Z | 2019-07-08T10:31:01.000Z | from typing import Tuple
import unittest
import numpy as np
from openml.tasks import get_task
from .test_task import OpenMLTaskTest
class OpenMLSupervisedTaskTest(OpenMLTaskTest):
"""
A helper class. The methods of the test case
are only executed in subclasses of the test case.
"""
__test__ = False
@classmethod
def setUpClass(cls):
if cls is OpenMLSupervisedTaskTest:
raise unittest.SkipTest(
"Skip OpenMLSupervisedTaskTest tests,"
" it's a base class"
)
super(OpenMLSupervisedTaskTest, cls).setUpClass()
def setUp(self, n_levels: int = 1):
super(OpenMLSupervisedTaskTest, self).setUp()
def test_get_X_and_Y(self) -> Tuple[np.ndarray, np.ndarray]:
task = get_task(self.task_id)
X, Y = task.get_X_and_y()
return X, Y
| 24.111111 | 64 | 0.652074 | from typing import Tuple
import unittest
import numpy as np
from openml.tasks import get_task
from .test_task import OpenMLTaskTest
class OpenMLSupervisedTaskTest(OpenMLTaskTest):
__test__ = False
@classmethod
def setUpClass(cls):
if cls is OpenMLSupervisedTaskTest:
raise unittest.SkipTest(
"Skip OpenMLSupervisedTaskTest tests,"
" it's a base class"
)
super(OpenMLSupervisedTaskTest, cls).setUpClass()
def setUp(self, n_levels: int = 1):
super(OpenMLSupervisedTaskTest, self).setUp()
def test_get_X_and_Y(self) -> Tuple[np.ndarray, np.ndarray]:
task = get_task(self.task_id)
X, Y = task.get_X_and_y()
return X, Y
| true | true |
f7112c22edd7f1c3e79cb1392172dface9da9a6f | 40,707 | py | Python | tensorflow/python/distribute/collective_all_reduce_strategy.py | neochristou/tensorflow | 50b55bfc5c9132c3bd82505181380bffbb47a5ff | [
"Apache-2.0"
] | 4 | 2021-06-30T10:53:39.000Z | 2021-09-19T16:52:00.000Z | tensorflow/python/distribute/collective_all_reduce_strategy.py | donny-stacks/tensorflow | 1fb338b1c42930c0eef4d0b4d8d5fdf24a678654 | [
"Apache-2.0"
] | 1 | 2020-08-28T18:17:58.000Z | 2020-08-28T18:17:58.000Z | tensorflow/python/distribute/collective_all_reduce_strategy.py | donny-stacks/tensorflow | 1fb338b1c42930c0eef4d0b4d8d5fdf24a678654 | [
"Apache-2.0"
] | 4 | 2022-01-13T11:23:44.000Z | 2022-03-02T11:11:42.000Z | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Class CollectiveAllReduceStrategy implementing DistributionStrategy."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import threading
import time
import weakref
from tensorflow.core.protobuf import rewriter_config_pb2
from tensorflow.core.protobuf import tensorflow_server_pb2
from tensorflow.python.distribute import collective_util
from tensorflow.python.distribute import cross_device_ops as cross_device_ops_lib
from tensorflow.python.distribute import cross_device_utils
from tensorflow.python.distribute import device_util
from tensorflow.python.distribute import distribute_lib
from tensorflow.python.distribute import distribute_utils
from tensorflow.python.distribute import distribution_strategy_context as ds_context
from tensorflow.python.distribute import input_lib
from tensorflow.python.distribute import mirrored_strategy
from tensorflow.python.distribute import multi_worker_util
from tensorflow.python.distribute import numpy_dataset
from tensorflow.python.distribute import reduce_util
from tensorflow.python.distribute import values
from tensorflow.python.distribute.cluster_resolver import ClusterResolver
from tensorflow.python.distribute.cluster_resolver import SimpleClusterResolver
from tensorflow.python.distribute.cluster_resolver import TFConfigClusterResolver
from tensorflow.python.eager import context
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import collective_ops
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training.tracking import base
from tensorflow.python.util import deprecation
from tensorflow.python.util.tf_export import tf_export
# pylint: disable=line-too-long
@tf_export("distribute.MultiWorkerMirroredStrategy", v1=[])
class CollectiveAllReduceStrategy(distribute_lib.Strategy):
"""A distribution strategy for synchronous training on multiple workers.
This strategy implements synchronous distributed training across multiple
workers, each with potentially multiple GPUs. Similar to
`tf.distribute.MirroredStrategy`, it replicates all variables and computations
to each local device. The difference is that it uses a distributed collective
implementation (e.g. all-reduce), so that multiple workers can work together.
You need to launch your program on each worker and configure
`cluster_resolver` correctly. For example, if you are using
`tf.distribute.cluster_resolver.TFConfigClusterResolver`, each worker needs to
have its corresponding `task_type` and `task_id` set in the `TF_CONFIG`
environment variable. An example TF_CONFIG on worker-0 of a two worker cluster
is:
```
TF_CONFIG = '{"cluster": {"worker": ["localhost:12345", "localhost:23456"]}, "task": {"type": "worker", "index": 0} }'
```
Your program runs on each worker as-is. Note that collectives require each
worker to participate. All `tf.distribute` and non `tf.distribute` API may use
collectives internally, e.g. checkpointing and saving since reading a
`tf.Variable` with `tf.VariableSynchronization.ON_READ` all-reduces the value.
Therefore it's recommended to run exactly the same program on each worker.
Dispatching based on `task_type` or `task_id` of the worker is error-prone.
`cluster_resolver.num_accelerators()` determines the number of GPUs the
strategy uses. If it's zero, the strategy uses the CPU. All workers need to
use the same number of devices, otherwise the behavior is undefined.
This strategy is not intended for TPU. Use `tf.distribute.TPUStrategy`
instead.
After setting up TF_CONFIG, using this strategy is similar to using
`tf.distribute.MirroredStrategy` and `tf.distribute.TPUStrategy`.
```
strategy = tf.distribute.MultiWorkerMirroredStrategy()
with strategy.scope():
model = tf.keras.Sequential([
tf.keras.layers.Dense(2, input_shape=(5,)),
])
optimizer = tf.keras.optimizers.SGD(learning_rate=0.1)
def dataset_fn(ctx):
x = np.random.random((2, 5)).astype(np.float32)
y = np.random.randint(2, size=(2, 1))
dataset = tf.data.Dataset.from_tensor_slices((x, y))
return dataset.repeat().batch(1, drop_remainder=True)
dist_dataset = strategy.distribute_datasets_from_function(dataset_fn)
model.compile()
model.fit(dist_dataset)
```
You can also write your own training loop:
```
@tf.function
def train_step(iterator):
def step_fn(inputs):
features, labels = inputs
with tf.GradientTape() as tape:
logits = model(features, training=True)
loss = tf.keras.losses.sparse_categorical_crossentropy(
labels, logits)
grads = tape.gradient(loss, model.trainable_variables)
optimizer.apply_gradients(zip(grads, model.trainable_variables))
strategy.run(step_fn, args=(next(iterator),))
for _ in range(NUM_STEP):
train_step(iterator)
```
See
[Multi-worker training with Keras](https://www.tensorflow.org/tutorials/distribute/multi_worker_with_keras)
for a detailed tutorial.
__Saving__
You need to save and checkpoint on all workers instead of just one. This is
because variables whose synchronization=ON_READ triggers aggregation during
saving. It's recommended to save to a different path on each worker to avoid
race conditions. Each worker saves the same thing. See
[Multi-worker training with Keras](https://www.tensorflow.org/tutorials/distribute/multi_worker_with_keras#model_saving_and_loading)
tutorial for examples.
__Known Issues__
* `tf.distribute.cluster_resolver.TFConfigClusterResolver` does not return the
correct number of accelerators. The strategy uses all available GPUs if
`cluster_resolver` is `tf.distribute.cluster_resolver.TFConfigClusterResolver`
or `None`.
* In eager mode, the strategy needs to be created before calling any other
Tensorflow API.
"""
# pylint: enable=line-too-long
# TODO(anjalisridhar): Update our guides with examples showing how we can use
# the cluster_resolver argument.
# The starting number for collective keys. This should only be set in tests.
_collective_key_base = 0
def __init__(self,
cluster_resolver=None,
communication_options=None):
"""Creates the strategy.
Args:
cluster_resolver: optional
`tf.distribute.cluster_resolver.ClusterResolver`. If `None`,
`tf.distribute.cluster_resolver.TFConfigClusterResolver` is used.
communication_options: optional
`tf.distribute.experimental.CommunicationOptions`. This configures the
default options for cross device communications. It can be overridden by
options provided to the communication APIs like
`tf.distribute.ReplicaContext.all_reduce`. See
`tf.distribute.experimental.CommunicationOptions` for details.
"""
if communication_options is None:
communication_options = collective_util.Options()
super(CollectiveAllReduceStrategy, self).__init__(
CollectiveAllReduceExtended(
self,
cluster_resolver=cluster_resolver,
communication_options=communication_options))
distribute_lib.distribution_strategy_gauge.get_cell("V2").set(
"MultiWorkerMirroredStrategy")
# pylint: disable=protected-access
distribute_lib.distribution_strategy_replica_gauge.get_cell(
"num_workers").set(self.extended._num_workers)
distribute_lib.distribution_strategy_replica_gauge.get_cell(
"num_replicas_per_worker").set(self.extended._num_gpus_per_worker)
@classmethod
def _from_local_devices(cls, devices, communication_options=None):
"""A convenience method to create an object with a list of devices."""
obj = cls(communication_options=communication_options)
obj.extended._initialize_local(TFConfigClusterResolver(), devices=devices) # pylint: disable=protected-access
return obj
@property
def cluster_resolver(self):
"""Returns the cluster resolver associated with this strategy.
As a multi-worker strategy, `tf.distribute.MultiWorkerMirroredStrategy`
provides the associated `tf.distribute.cluster_resolver.ClusterResolver`. If
the user provides one in `__init__`, that instance is returned; if the user
does not, a default `TFConfigClusterResolver` is provided.
"""
return self.extended._cluster_resolver # pylint: disable=protected-access
class _CollectiveAllReduceStrategyExperimentalMeta(type):
@classmethod
def __instancecheck__(cls, instance):
# This is to make isinstance(tf.distribute.MultiWorkerMirroredStrategy(),
# tf.distribute.experimental.MultiWorkerMirroredStrategy). Some libraries is
# performing such check.
return isinstance(instance, CollectiveAllReduceStrategy)
@tf_export("distribute.experimental.MultiWorkerMirroredStrategy", v1=[])
class _CollectiveAllReduceStrategyExperimental(
CollectiveAllReduceStrategy,
metaclass=_CollectiveAllReduceStrategyExperimentalMeta):
__doc__ = CollectiveAllReduceStrategy.__doc__
@deprecation.deprecated(
None, "use distribute.MultiWorkerMirroredStrategy instead")
def __init__(self,
communication=collective_util.CommunicationImplementation.AUTO,
cluster_resolver=None):
"""Creates the strategy.
Args:
communication: optional
`tf.distribute.experimental.CommunicationImplementation`. This is a hint
on the preferred collective communication implementation. Possible
values include `AUTO`, `RING`, and `NCCL`.
cluster_resolver: optional
`tf.distribute.cluster_resolver.ClusterResolver`. If `None`,
`tf.distribute.cluster_resolver.TFConfigClusterResolver` is used.
"""
communication_options = collective_util.Options(
implementation=communication)
super(_CollectiveAllReduceStrategyExperimental,
self).__init__(cluster_resolver, communication_options)
@classmethod
def _from_local_devices(
cls,
devices,
communication=collective_util.CommunicationImplementation.AUTO):
"""A convenience method to create an object with a list of devices."""
obj = cls(communication)
obj.extended._initialize_local(TFConfigClusterResolver(), devices=devices) # pylint: disable=protected-access
return obj
_CollectiveAllReduceStrategyExperimental.__name__ = CollectiveAllReduceStrategy.__name__
@tf_export(v1=["distribute.experimental.MultiWorkerMirroredStrategy"]) # pylint: disable=missing-docstring
class CollectiveAllReduceStrategyV1(distribute_lib.StrategyV1):
__doc__ = CollectiveAllReduceStrategy.__doc__
# The starting number for collective keys. This should only be set in tests.
_collective_key_base = 0
def __init__(self,
communication=collective_util.CommunicationImplementation.AUTO,
cluster_resolver=None):
"""Initializes the object."""
communication_options = collective_util.Options(
implementation=communication)
super(CollectiveAllReduceStrategyV1, self).__init__(
CollectiveAllReduceExtended(
self,
cluster_resolver=cluster_resolver,
communication_options=communication_options))
distribute_lib.distribution_strategy_gauge.get_cell("V1").set(
"MultiWorkerMirroredStrategy")
# pylint: disable=protected-access
distribute_lib.distribution_strategy_replica_gauge.get_cell(
"num_workers").set(self.extended._num_workers)
distribute_lib.distribution_strategy_replica_gauge.get_cell(
"num_gpu_per_worker").set(self.extended._num_gpus_per_worker)
class CollectiveAllReduceExtended(mirrored_strategy.MirroredExtended):
"""Implementation of CollectiveAllReduceStrategy."""
# Whether to perdically check the health of the cluster. If any worker is not
# reachable, collectives are aborted and the user program should get a
# tf.errors.UnavailableError. It's required to restart in order to recover.
_enable_check_health = True
# Check health interval in seconds.
_check_health_interval = 30
# Timeout in seconds for the first check health. The first check health needs
# to wait for cluster, which may make a longer time.
_check_health_initial_timeout = 0
# Times to retry before considering the peer is down.
_check_health_retry_limit = 3
# Timeout in seconds the each check health.
_check_health_timeout = 10
def __init__(self, container_strategy, cluster_resolver,
communication_options):
if not isinstance(communication_options, collective_util.Options):
raise ValueError("communication_options must be an instance of "
"tf.distribute.experimental.CommunicationOptions")
self._cluster_resolver = cluster_resolver or TFConfigClusterResolver()
if not isinstance(self._cluster_resolver, ClusterResolver):
raise ValueError("cluster_resolver must be an instance of "
"tf.distribute.cluster_resolver.ClusterResolver")
distribute_lib.StrategyExtendedV1.__init__(self, container_strategy)
self._communication_options = communication_options
self._collective_key_base = container_strategy._collective_key_base # pylint: disable=protected-access
self._initialize_strategy(self._cluster_resolver)
self._cfer_fn_cache = weakref.WeakKeyDictionary()
self.experimental_enable_get_next_as_optional = True
assert isinstance(self._cross_device_ops,
cross_device_ops_lib.CollectiveAllReduce)
def _use_merge_call(self):
"""XLA is not supported for multi-worker strategy."""
return True
def _initialize_strategy(self, cluster_resolver):
if cluster_resolver.cluster_spec().as_dict():
self._initialize_multi_worker(cluster_resolver)
else:
self._initialize_local(cluster_resolver)
def _initialize_local(self, cluster_resolver, devices=None):
"""Initializes the object for local training."""
self._is_chief = True
self._num_workers = 1
if ops.executing_eagerly_outside_functions():
try:
context.context().configure_collective_ops(
scoped_allocator_enabled_ops=("CollectiveReduce",))
except RuntimeError:
logging.warning("Collective ops is not configured at program startup. "
"Some performance features may not be enabled.")
self._collective_ops_configured = True
# TODO(b/126786766): TFConfigClusterResolver returns wrong number of GPUs in
# some cases.
if isinstance(cluster_resolver, TFConfigClusterResolver):
num_gpus = context.num_gpus()
else:
num_gpus = cluster_resolver.num_accelerators().get("GPU", 0)
if devices:
local_devices = devices
else:
if num_gpus:
local_devices = tuple("/device:GPU:%d" % i for i in range(num_gpus))
else:
local_devices = ("/device:CPU:0",)
self._worker_device = device_util.canonicalize("/device:CPU:0")
self._host_input_device = numpy_dataset.SingleDevice(self._worker_device)
self._collective_keys = cross_device_utils.CollectiveKeys(
group_key_start=1 + self._collective_key_base)
self._cross_device_ops = cross_device_ops_lib.CollectiveAllReduce(
devices=local_devices,
group_size=len(local_devices),
collective_keys=self._collective_keys)
# CrossDeviceOps for per host tensors.
self._host_cross_device_ops = cross_device_ops_lib.CollectiveAllReduce(
devices=[self._worker_device],
group_size=self._num_workers,
collective_keys=self._collective_keys)
super(CollectiveAllReduceExtended, self)._initialize_single_worker(
local_devices)
self._cluster_spec = None
self._task_type = None
self._task_id = None
self._id_in_cluster = 0
# This is a mark to tell whether we are running with standalone client or
# independent worker. Right now with standalone client, strategy object is
# created as local strategy and then turn into multi-worker strategy via
# configure call.
self._local_or_standalone_client_mode = True
# Save the num_gpus_per_worker and rpc_layer for configure method.
self._num_gpus_per_worker = num_gpus
self._rpc_layer = cluster_resolver.rpc_layer
self._warn_nccl_no_gpu()
logging.info(
"Single-worker MultiWorkerMirroredStrategy with local_devices "
"= %r, communication = %s", local_devices,
self._communication_options.implementation)
def _initialize_multi_worker(self, cluster_resolver):
"""Initializes the object for multi-worker training."""
cluster_spec = multi_worker_util.normalize_cluster_spec(
cluster_resolver.cluster_spec())
task_type = cluster_resolver.task_type
task_id = cluster_resolver.task_id
if task_type is None or task_id is None:
raise ValueError("When `cluster_spec` is given, you must also specify "
"`task_type` and `task_id`.")
self._cluster_spec = cluster_spec
self._task_type = task_type
self._task_id = task_id
self._id_in_cluster = multi_worker_util.id_in_cluster(
self._cluster_spec, self._task_type, self._task_id)
self._num_workers = multi_worker_util.worker_count(cluster_spec, task_type)
if not self._num_workers:
raise ValueError("No `worker`, `chief` or `evaluator` tasks can be found "
"in `cluster_spec`.")
self._is_chief = multi_worker_util.is_chief(cluster_spec, task_type,
task_id)
self._worker_device = "/job:%s/task:%d" % (task_type, task_id)
self._host_input_device = numpy_dataset.SingleDevice(self._worker_device)
if (ops.executing_eagerly_outside_functions() and
not getattr(self, "_local_or_standalone_client_mode", False)):
context.context().configure_collective_ops(
collective_leader=multi_worker_util.collective_leader(
cluster_spec, task_type, task_id),
scoped_allocator_enabled_ops=("CollectiveReduce",),
device_filters=("/job:%s/task:%d" % (task_type, task_id),))
self._collective_ops_configured = True
# Starting a std server in eager mode and in independent worker mode.
if (context.executing_eagerly() and
not getattr(self, "_std_server_started", False) and
not getattr(self, "_local_or_standalone_client_mode", False)):
# Checking _local_or_standalone_client_mode as well because we should not
# create the std server in standalone client mode.
config_proto = copy.deepcopy(context.context().config)
config_proto = self._update_config_proto(config_proto)
# If coordination service is enabled, use its internal heartbeat to detect
# peer failures instead of the Python-level health check.
if config_proto.experimental.coordination_service:
self._enable_check_health = False
if hasattr(cluster_resolver, "port"):
port = cluster_resolver.port
else:
port = 0
server_def = tensorflow_server_pb2.ServerDef(
cluster=cluster_spec.as_cluster_def(),
default_session_config=config_proto,
job_name=task_type,
task_index=task_id,
protocol=cluster_resolver.rpc_layer or "grpc",
port=port)
context.context().enable_collective_ops(server_def)
self._std_server_started = True
# The `ensure_initialized` is needed before calling
# `context.context().devices()`.
context.context().ensure_initialized()
logging.info(
"Enabled multi-worker collective ops with available devices: %r",
context.context().devices())
# TODO(yuefengz): The `num_gpus` is only for this particular task. It
# assumes all workers have the same number of GPUs. We should remove this
# assumption by querying all tasks for their numbers of GPUs.
# TODO(b/126786766): TFConfigClusterResolver returns wrong number of GPUs in
# some cases.
if isinstance(cluster_resolver, TFConfigClusterResolver):
num_gpus = context.num_gpus()
else:
num_gpus = cluster_resolver.num_accelerators().get("GPU", 0)
if num_gpus:
local_devices = tuple("%s/device:GPU:%d" % (self._worker_device, i)
for i in range(num_gpus))
else:
local_devices = (self._worker_device,)
self._collective_keys = cross_device_utils.CollectiveKeys(
group_key_start=1 + self._collective_key_base)
self._cross_device_ops = cross_device_ops_lib.CollectiveAllReduce(
devices=local_devices,
group_size=len(local_devices) * self._num_workers,
collective_keys=self._collective_keys)
# CrossDeviceOps for per host tensors.
self._host_cross_device_ops = cross_device_ops_lib.CollectiveAllReduce(
devices=[self._worker_device],
group_size=self._num_workers,
collective_keys=self._collective_keys)
super(CollectiveAllReduceExtended, self)._initialize_single_worker(
local_devices)
# Add a default device so that ops without specified devices will not end up
# on other workers.
self._default_device = "/job:%s/task:%d" % (task_type, task_id)
# Save the num_gpus_per_worker and rpc_layer for configure method.
self._num_gpus_per_worker = num_gpus
self._rpc_layer = cluster_resolver.rpc_layer
self._warn_nccl_no_gpu()
if self._enable_check_health and context.executing_eagerly():
self._start_check_health_thread()
else:
logging.info("Check health not enabled.")
logging.info(
"MultiWorkerMirroredStrategy with cluster_spec = %r, task_type = %r, "
"task_id = %r, num_workers = %r, local_devices = %r, "
"communication = %s", cluster_spec.as_dict(), task_type, task_id,
self._num_workers, local_devices,
self._communication_options.implementation)
def __del__(self):
self._stop_check_health_thread()
def _input_workers_with_options(self, options=None):
host_device = device_util.get_host_for_device(self._worker_device)
if not options or options.experimental_fetch_to_device:
return input_lib.InputWorkers([(host_device, self.worker_devices)])
else:
return input_lib.InputWorkers([(
host_device,
[device_util.get_host_for_device(worker) for worker in
self.worker_devices])])
@property
def _input_workers(self):
return self._input_workers_with_options()
def _get_variable_creator_initial_value(self,
replica_id,
device,
primary_var,
**kwargs):
if replica_id == 0: # First replica on each worker.
assert device is not None
assert primary_var is None
def initial_value_fn(): # pylint: disable=g-missing-docstring
# Only the first device participates in the broadcast of initial values.
group_key = self._collective_keys.get_group_key([device])
group_size = self._num_workers
collective_instance_key = (
self._collective_keys.get_instance_key(group_key, device))
with ops.device(device):
initial_value = kwargs["initial_value"]
if callable(initial_value):
initial_value = initial_value()
if isinstance(initial_value, base.CheckpointInitialValue):
initial_value = initial_value.wrapped_value
assert not callable(initial_value)
initial_value = ops.convert_to_tensor(
initial_value, dtype=kwargs.get("dtype", None))
if self._num_workers > 1:
if self._is_chief:
bcast_send = collective_ops.broadcast_send(
initial_value, initial_value.shape, initial_value.dtype,
group_size, group_key, collective_instance_key)
with ops.control_dependencies([bcast_send]):
return array_ops.identity(initial_value)
else:
return collective_ops.broadcast_recv(initial_value.shape,
initial_value.dtype,
group_size, group_key,
collective_instance_key)
return initial_value
return initial_value_fn
else:
return super(CollectiveAllReduceExtended,
self)._get_variable_creator_initial_value(
replica_id=replica_id,
device=device,
primary_var=primary_var,
**kwargs)
def _make_input_context(self):
input_context = distribute_lib.InputContext(
num_input_pipelines=self._num_workers,
input_pipeline_id=self._id_in_cluster,
num_replicas_in_sync=self._num_replicas_in_sync)
return input_context
def _experimental_distribute_dataset(self, dataset, options):
if (options and options.experimental_replication_mode ==
distribute_lib.InputReplicationMode.PER_REPLICA):
raise NotImplementedError(
"InputReplicationMode.PER_REPLICA "
"is only supported in "
"`distribute_datasets_from_function` "
"of tf.distribute.MirroredStrategy"
)
input_context = self._make_input_context()
return input_lib.get_distributed_dataset(
dataset,
self._input_workers_with_options(options),
self._container_strategy(),
num_replicas_in_sync=self._num_replicas_in_sync,
input_context=input_context,
options=options)
def _distribute_datasets_from_function(self, dataset_fn, options):
if (options and options.experimental_replication_mode ==
distribute_lib.InputReplicationMode.PER_REPLICA):
raise NotImplementedError(
"InputReplicationMode.PER_REPLICA "
"is only supported in "
"`distribute_datasets_from_function` "
"of tf.distribute.MirroredStrategy")
input_context = self._make_input_context()
return input_lib.get_distributed_datasets_from_function(
dataset_fn=dataset_fn,
input_workers=self._input_workers_with_options(options),
input_contexts=[input_context],
strategy=self._container_strategy(),
options=options)
def _experimental_distribute_values_from_function(self, value_fn):
per_replica_values = []
num_local_replicas = len(self.worker_devices)
for local_replica_id in range(num_local_replicas):
replica_id = (self._id_in_cluster * num_local_replicas +
local_replica_id)
value_context = distribute_lib.ValueContext(
replica_id, self._num_replicas_in_sync)
per_replica_values.append(value_fn(value_context))
return distribute_utils.regroup(per_replica_values, always_wrap=True)
def _make_dataset_iterator(self, dataset):
"""Distributes the dataset to each local GPU."""
input_context = self._make_input_context()
return input_lib.DatasetIterator(
dataset,
self._input_workers,
self._container_strategy(),
num_replicas_in_sync=self._num_replicas_in_sync,
input_context=input_context)
def _make_input_fn_iterator(
self,
input_fn,
replication_mode=distribute_lib.InputReplicationMode.PER_WORKER):
"""Distributes the input function to each local GPU."""
input_context = self._make_input_context()
return input_lib.InputFunctionIterator(input_fn, self._input_workers,
[input_context],
self._container_strategy())
def _configure(self,
session_config=None,
cluster_spec=None,
task_type=None,
task_id=None):
"""Configures the object.
Args:
session_config: a `tf.compat.v1.ConfigProto`
cluster_spec: a dict, ClusterDef or ClusterSpec object specifying the
cluster configurations.
task_type: the current task type, such as "worker".
task_id: the current task id.
Raises:
ValueError: if `task_type` is not in the `cluster_spec`.
"""
if cluster_spec:
# Use the num_gpus_per_worker recorded in constructor since _configure
# doesn't take num_gpus.
cluster_resolver = SimpleClusterResolver(
cluster_spec=multi_worker_util.normalize_cluster_spec(cluster_spec),
task_type=task_type,
task_id=task_id,
num_accelerators={"GPU": self._num_gpus_per_worker},
rpc_layer=self._rpc_layer)
self._initialize_multi_worker(cluster_resolver)
assert isinstance(self._cross_device_ops,
cross_device_ops_lib.CollectiveAllReduce)
if session_config:
session_config.CopyFrom(self._update_config_proto(session_config))
def _update_config_proto(self, config_proto):
updated_config = copy.deepcopy(config_proto)
# Enable the scoped allocator optimization for CollectiveOps. This
# optimization converts many small all-reduces into fewer larger
# all-reduces.
rewrite_options = updated_config.graph_options.rewrite_options
rewrite_options.scoped_allocator_optimization = (
rewriter_config_pb2.RewriterConfig.ON)
# We turn on ScopedAllocator only for CollectiveReduce op, i.e. enable_op =
# ["CollectiveReduce"]. Since we can't assign to a repeated proto field, we
# clear and then append.
del rewrite_options.scoped_allocator_opts.enable_op[:]
rewrite_options.scoped_allocator_opts.enable_op.append("CollectiveReduce")
if (not ops.executing_eagerly_outside_functions() and
self._communication_options.implementation ==
collective_util.CommunicationImplementation.NCCL):
updated_config.experimental.collective_nccl = True
if not self._cluster_spec:
return updated_config
assert self._task_type
assert self._task_id is not None
# Collective group leader is needed for collective ops to coordinate
# workers.
updated_config.experimental.collective_group_leader = (
multi_worker_util.collective_leader(self._cluster_spec, self._task_type,
self._task_id))
# The device filters prevent communication between workers.
del updated_config.device_filters[:]
updated_config.device_filters.append(
"/job:%s/task:%d" % (self._task_type, self._task_id))
return updated_config
def _get_cross_device_ops(self, value):
# CollectiveAllReduce works on a predefined set of devices. In most cases
# they should be the compute devices, but certain use cases may reduce host
# tensors as well (e.g. early stopping). We infer the cross_device_ops to
# use based on the number of devices, since inputs don't always have device
# annotations. The compute devices one is preferred since we can potentially
# leverage NCCL.
if isinstance(value, values.DistributedValues):
num_devices = len(value._values) # pylint: disable=protected-access
else:
num_devices = 1
if num_devices == len(self.worker_devices):
return self._cross_device_ops
else:
return self._host_cross_device_ops
def _gather_to_implementation(self, value, destinations, axis, options):
return self._get_cross_device_ops(value)._gather( # pylint: disable=protected-access
value,
destinations=destinations,
axis=axis,
options=options)
def _reduce_to(self, reduce_op, value, destinations, options):
if (isinstance(value, values.Mirrored) and
reduce_op == reduce_util.ReduceOp.MEAN):
return value
assert not isinstance(value, values.Mirrored)
if (isinstance(value, values.DistributedValues) and
len(self.worker_devices) == 1):
value = value.values[0]
# When there are multiple workers, we need to reduce across workers using
# collective ops.
if (not isinstance(value, values.DistributedValues) and
self._num_workers == 1):
# This function handles reducing values that are not PerReplica or
# Mirrored values. For example, the same value could be present on all
# replicas in which case `value` would be a single value or value could
# be 0.
return cross_device_ops_lib.reduce_non_distributed_value(
reduce_op, value, destinations, len(self.worker_devices))
return self._get_cross_device_ops(value).reduce(
reduce_op,
value,
destinations=destinations,
options=self._communication_options.merge(options))
def _replica_ctx_all_reduce(self, reduce_op, value, options=None):
"""Implements `StrategyExtendedV2._replica_ctx_all_reduce`."""
# This implementation avoids using `merge_call` and just launches collective
# ops in one replica.
if options is None:
options = collective_util.Options()
if context.executing_eagerly():
# In eager mode, falls back to the default implemenation that uses
# `merge_call`. Replica functions are running sequentially in eager mode,
# and due to the blocking nature of collective ops, execution will hang if
# collective ops are to be launched sequentially.
return super()._replica_ctx_all_reduce(reduce_op, value, options)
replica_context = ds_context.get_replica_context()
assert replica_context, (
"`StrategyExtended._replica_ctx_all_reduce` must be called in a "
"replica context")
return self._cross_device_ops._all_reduce( # pylint: disable=protected-access
reduce_op,
value,
replica_context._replica_id, # pylint: disable=protected-access
options)
def _check_health(self):
while True:
if self._check_health_thread_should_stop.is_set():
return
for job in self._cluster_spec.jobs:
for task_id in range(self._cluster_spec.num_tasks(job)):
peer = "/job:{}/replica:0/task:{}".format(job, task_id)
attempts = 0
while True:
attempts += 1
try:
context.context().check_collective_ops_peer_health(
peer, timeout_in_ms=self._check_health_timeout * 1000)
# If check_collective_ops_peer_health doesn't raise an Exception,
# the peer is healthy.
break
except (errors.UnavailableError, errors.FailedPreconditionError,
errors.DeadlineExceededError) as e:
# TODO(b/151232436): Always raise UnavailableError when a peer
# fails. Now there could be many kinds of errors:
# - Unavailable: when the peer is not reachable, e.g. it's down.
# - FailedPrecondition: when the peer has restarted.
if attempts < self._check_health_retry_limit:
logging.warning("%s seems down, retrying %d/%d", peer, attempts,
self._check_health_retry_limit)
continue
logging.error(
"Cluster check alive failed, %s is down, "
"aborting collectives: %s", peer, e)
context.context().abort_collective_ops(
errors.UNAVAILABLE,
"cluster check alive failed, {} is down".format(peer))
return
except Exception as e: # pylint: disable=broad-except
logging.error("Unexpected exception in check alive: %s", e)
context.context().abort_collective_ops(
errors.INTERNAL,
"unexecpted exception in check alive: %s" % e)
return
time.sleep(self._check_health_interval)
def _start_check_health_thread(self):
# Use a dummy all-reduce as a barrier to wait for all workers to be up,
# otherwise the check health may fail immediately.
# Use array_ops.identity to create the dummy tensor so that we have a new
# Tensor. If we use constant it may be a cached from on a /job:localhost
# device, which will cause some code that relies on tensor.device to error.
#
# TODO(b/151232436): change to an explicit barrier if we have it.
dummy_value = array_ops.identity([])
logging.info("Waiting for the cluster, timeout = %s",
self._check_health_initial_timeout or "inf")
try:
self._host_cross_device_ops.reduce(
reduce_util.ReduceOp.SUM,
dummy_value,
dummy_value,
options=collective_util.Options(
timeout_seconds=self._check_health_initial_timeout,
implementation=collective_util.CommunicationImplementation.RING))
if context.is_async():
context.async_wait()
except errors.DeadlineExceededError:
raise RuntimeError(
"Timeout waiting for the cluster, timeout is %d seconds" %
self._check_health_initial_timeout)
logging.info("Cluster is ready.")
self._check_health_thread_should_stop = threading.Event()
# Start the thread as daemon to avoid it blocking the program from exiting.
# We try best to shutdown the thread but __del__ is not guaranteed to be
# called when program exists.
self._check_health_thread = threading.Thread(
target=self._check_health,
daemon=True)
self._check_health_thread.start()
def _stop_check_health_thread(self):
if getattr(self, "_check_health_thread", None):
logging.info("stopping check health thread")
self._check_health_thread_should_stop.set()
self._check_health_thread.join()
self._check_health_thread = None
logging.info("check health thread stopped")
def _warn_nccl_no_gpu(self):
if ((self._communication_options.implementation ==
collective_util.CommunicationImplementation.NCCL) and
self._num_gpus_per_worker == 0):
logging.warning("Enabled NCCL communication but no GPUs detected/"
"specified.")
def _in_multi_worker_mode(self):
"""Whether this strategy indicates working in multi-worker settings."""
return self._num_workers > 1
@property
def experimental_between_graph(self):
return True
@property
def experimental_should_init(self):
return True
@property
def should_checkpoint(self):
return self._is_chief
@property
def should_save_summary(self):
return self._is_chief
@property
def _num_replicas_in_sync(self):
return len(self.worker_devices) * self._num_workers
# TODO(priyag): Delete this once all strategies use global batch size.
@property
def _global_batch_size(self):
"""`make_dataset_iterator` and `make_numpy_iterator` use global batch size.
`make_input_fn_iterator` assumes per-replica batching.
Returns:
Boolean.
"""
return True
def _get_replica_id_in_sync_group(self, replica_id):
return self._id_in_cluster * len(self.worker_devices) + replica_id
def _get_local_replica_id(self, replica_id_in_sync_group):
return (replica_id_in_sync_group -
self._id_in_cluster * len(self.worker_devices))
def __deepcopy__(self, memo):
# We check the check health thread instead of whether we are in eager mode
# to limit the backward incompatibility.
if hasattr(self, "_check_health_thread"):
raise ValueError(
"MultiWorkerMirroredStrategy cannot be deep copied in eager mode. "
"If you're using Estimator and see this error message, call "
"tf.compat.v1.disable_eager_execution() at the beginning of your "
"program")
# Otherwise, do a regular deepcopy.
cls = self.__class__
result = cls.__new__(cls)
memo[id(self)] = result
for k, v in self.__dict__.items():
setattr(result, k, copy.deepcopy(v, memo))
return result
| 42.271028 | 134 | 0.714717 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import threading
import time
import weakref
from tensorflow.core.protobuf import rewriter_config_pb2
from tensorflow.core.protobuf import tensorflow_server_pb2
from tensorflow.python.distribute import collective_util
from tensorflow.python.distribute import cross_device_ops as cross_device_ops_lib
from tensorflow.python.distribute import cross_device_utils
from tensorflow.python.distribute import device_util
from tensorflow.python.distribute import distribute_lib
from tensorflow.python.distribute import distribute_utils
from tensorflow.python.distribute import distribution_strategy_context as ds_context
from tensorflow.python.distribute import input_lib
from tensorflow.python.distribute import mirrored_strategy
from tensorflow.python.distribute import multi_worker_util
from tensorflow.python.distribute import numpy_dataset
from tensorflow.python.distribute import reduce_util
from tensorflow.python.distribute import values
from tensorflow.python.distribute.cluster_resolver import ClusterResolver
from tensorflow.python.distribute.cluster_resolver import SimpleClusterResolver
from tensorflow.python.distribute.cluster_resolver import TFConfigClusterResolver
from tensorflow.python.eager import context
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import collective_ops
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training.tracking import base
from tensorflow.python.util import deprecation
from tensorflow.python.util.tf_export import tf_export
@tf_export("distribute.MultiWorkerMirroredStrategy", v1=[])
class CollectiveAllReduceStrategy(distribute_lib.Strategy):
_collective_key_base = 0
def __init__(self,
cluster_resolver=None,
communication_options=None):
if communication_options is None:
communication_options = collective_util.Options()
super(CollectiveAllReduceStrategy, self).__init__(
CollectiveAllReduceExtended(
self,
cluster_resolver=cluster_resolver,
communication_options=communication_options))
distribute_lib.distribution_strategy_gauge.get_cell("V2").set(
"MultiWorkerMirroredStrategy")
distribute_lib.distribution_strategy_replica_gauge.get_cell(
"num_workers").set(self.extended._num_workers)
distribute_lib.distribution_strategy_replica_gauge.get_cell(
"num_replicas_per_worker").set(self.extended._num_gpus_per_worker)
@classmethod
def _from_local_devices(cls, devices, communication_options=None):
obj = cls(communication_options=communication_options)
obj.extended._initialize_local(TFConfigClusterResolver(), devices=devices)
return obj
@property
def cluster_resolver(self):
return self.extended._cluster_resolver
class _CollectiveAllReduceStrategyExperimentalMeta(type):
@classmethod
def __instancecheck__(cls, instance):
return isinstance(instance, CollectiveAllReduceStrategy)
@tf_export("distribute.experimental.MultiWorkerMirroredStrategy", v1=[])
class _CollectiveAllReduceStrategyExperimental(
CollectiveAllReduceStrategy,
metaclass=_CollectiveAllReduceStrategyExperimentalMeta):
__doc__ = CollectiveAllReduceStrategy.__doc__
@deprecation.deprecated(
None, "use distribute.MultiWorkerMirroredStrategy instead")
def __init__(self,
communication=collective_util.CommunicationImplementation.AUTO,
cluster_resolver=None):
communication_options = collective_util.Options(
implementation=communication)
super(_CollectiveAllReduceStrategyExperimental,
self).__init__(cluster_resolver, communication_options)
@classmethod
def _from_local_devices(
cls,
devices,
communication=collective_util.CommunicationImplementation.AUTO):
obj = cls(communication)
obj.extended._initialize_local(TFConfigClusterResolver(), devices=devices)
return obj
_CollectiveAllReduceStrategyExperimental.__name__ = CollectiveAllReduceStrategy.__name__
@tf_export(v1=["distribute.experimental.MultiWorkerMirroredStrategy"])
class CollectiveAllReduceStrategyV1(distribute_lib.StrategyV1):
__doc__ = CollectiveAllReduceStrategy.__doc__
_collective_key_base = 0
def __init__(self,
communication=collective_util.CommunicationImplementation.AUTO,
cluster_resolver=None):
communication_options = collective_util.Options(
implementation=communication)
super(CollectiveAllReduceStrategyV1, self).__init__(
CollectiveAllReduceExtended(
self,
cluster_resolver=cluster_resolver,
communication_options=communication_options))
distribute_lib.distribution_strategy_gauge.get_cell("V1").set(
"MultiWorkerMirroredStrategy")
distribute_lib.distribution_strategy_replica_gauge.get_cell(
"num_workers").set(self.extended._num_workers)
distribute_lib.distribution_strategy_replica_gauge.get_cell(
"num_gpu_per_worker").set(self.extended._num_gpus_per_worker)
class CollectiveAllReduceExtended(mirrored_strategy.MirroredExtended):
_enable_check_health = True
# Check health interval in seconds.
_check_health_interval = 30
# Timeout in seconds for the first check health. The first check health needs
# to wait for cluster, which may make a longer time.
_check_health_initial_timeout = 0
# Times to retry before considering the peer is down.
_check_health_retry_limit = 3
# Timeout in seconds the each check health.
_check_health_timeout = 10
def __init__(self, container_strategy, cluster_resolver,
communication_options):
if not isinstance(communication_options, collective_util.Options):
raise ValueError("communication_options must be an instance of "
"tf.distribute.experimental.CommunicationOptions")
self._cluster_resolver = cluster_resolver or TFConfigClusterResolver()
if not isinstance(self._cluster_resolver, ClusterResolver):
raise ValueError("cluster_resolver must be an instance of "
"tf.distribute.cluster_resolver.ClusterResolver")
distribute_lib.StrategyExtendedV1.__init__(self, container_strategy)
self._communication_options = communication_options
self._collective_key_base = container_strategy._collective_key_base # pylint: disable=protected-access
self._initialize_strategy(self._cluster_resolver)
self._cfer_fn_cache = weakref.WeakKeyDictionary()
self.experimental_enable_get_next_as_optional = True
assert isinstance(self._cross_device_ops,
cross_device_ops_lib.CollectiveAllReduce)
def _use_merge_call(self):
return True
def _initialize_strategy(self, cluster_resolver):
if cluster_resolver.cluster_spec().as_dict():
self._initialize_multi_worker(cluster_resolver)
else:
self._initialize_local(cluster_resolver)
def _initialize_local(self, cluster_resolver, devices=None):
self._is_chief = True
self._num_workers = 1
if ops.executing_eagerly_outside_functions():
try:
context.context().configure_collective_ops(
scoped_allocator_enabled_ops=("CollectiveReduce",))
except RuntimeError:
logging.warning("Collective ops is not configured at program startup. "
"Some performance features may not be enabled.")
self._collective_ops_configured = True
# TODO(b/126786766): TFConfigClusterResolver returns wrong number of GPUs in
# some cases.
if isinstance(cluster_resolver, TFConfigClusterResolver):
num_gpus = context.num_gpus()
else:
num_gpus = cluster_resolver.num_accelerators().get("GPU", 0)
if devices:
local_devices = devices
else:
if num_gpus:
local_devices = tuple("/device:GPU:%d" % i for i in range(num_gpus))
else:
local_devices = ("/device:CPU:0",)
self._worker_device = device_util.canonicalize("/device:CPU:0")
self._host_input_device = numpy_dataset.SingleDevice(self._worker_device)
self._collective_keys = cross_device_utils.CollectiveKeys(
group_key_start=1 + self._collective_key_base)
self._cross_device_ops = cross_device_ops_lib.CollectiveAllReduce(
devices=local_devices,
group_size=len(local_devices),
collective_keys=self._collective_keys)
# CrossDeviceOps for per host tensors.
self._host_cross_device_ops = cross_device_ops_lib.CollectiveAllReduce(
devices=[self._worker_device],
group_size=self._num_workers,
collective_keys=self._collective_keys)
super(CollectiveAllReduceExtended, self)._initialize_single_worker(
local_devices)
self._cluster_spec = None
self._task_type = None
self._task_id = None
self._id_in_cluster = 0
# This is a mark to tell whether we are running with standalone client or
# independent worker. Right now with standalone client, strategy object is
# created as local strategy and then turn into multi-worker strategy via
# configure call.
self._local_or_standalone_client_mode = True
# Save the num_gpus_per_worker and rpc_layer for configure method.
self._num_gpus_per_worker = num_gpus
self._rpc_layer = cluster_resolver.rpc_layer
self._warn_nccl_no_gpu()
logging.info(
"Single-worker MultiWorkerMirroredStrategy with local_devices "
"= %r, communication = %s", local_devices,
self._communication_options.implementation)
def _initialize_multi_worker(self, cluster_resolver):
cluster_spec = multi_worker_util.normalize_cluster_spec(
cluster_resolver.cluster_spec())
task_type = cluster_resolver.task_type
task_id = cluster_resolver.task_id
if task_type is None or task_id is None:
raise ValueError("When `cluster_spec` is given, you must also specify "
"`task_type` and `task_id`.")
self._cluster_spec = cluster_spec
self._task_type = task_type
self._task_id = task_id
self._id_in_cluster = multi_worker_util.id_in_cluster(
self._cluster_spec, self._task_type, self._task_id)
self._num_workers = multi_worker_util.worker_count(cluster_spec, task_type)
if not self._num_workers:
raise ValueError("No `worker`, `chief` or `evaluator` tasks can be found "
"in `cluster_spec`.")
self._is_chief = multi_worker_util.is_chief(cluster_spec, task_type,
task_id)
self._worker_device = "/job:%s/task:%d" % (task_type, task_id)
self._host_input_device = numpy_dataset.SingleDevice(self._worker_device)
if (ops.executing_eagerly_outside_functions() and
not getattr(self, "_local_or_standalone_client_mode", False)):
context.context().configure_collective_ops(
collective_leader=multi_worker_util.collective_leader(
cluster_spec, task_type, task_id),
scoped_allocator_enabled_ops=("CollectiveReduce",),
device_filters=("/job:%s/task:%d" % (task_type, task_id),))
self._collective_ops_configured = True
# Starting a std server in eager mode and in independent worker mode.
if (context.executing_eagerly() and
not getattr(self, "_std_server_started", False) and
not getattr(self, "_local_or_standalone_client_mode", False)):
# Checking _local_or_standalone_client_mode as well because we should not
# create the std server in standalone client mode.
config_proto = copy.deepcopy(context.context().config)
config_proto = self._update_config_proto(config_proto)
# If coordination service is enabled, use its internal heartbeat to detect
# peer failures instead of the Python-level health check.
if config_proto.experimental.coordination_service:
self._enable_check_health = False
if hasattr(cluster_resolver, "port"):
port = cluster_resolver.port
else:
port = 0
server_def = tensorflow_server_pb2.ServerDef(
cluster=cluster_spec.as_cluster_def(),
default_session_config=config_proto,
job_name=task_type,
task_index=task_id,
protocol=cluster_resolver.rpc_layer or "grpc",
port=port)
context.context().enable_collective_ops(server_def)
self._std_server_started = True
# The `ensure_initialized` is needed before calling
# `context.context().devices()`.
context.context().ensure_initialized()
logging.info(
"Enabled multi-worker collective ops with available devices: %r",
context.context().devices())
# TODO(yuefengz): The `num_gpus` is only for this particular task. It
# assumes all workers have the same number of GPUs. We should remove this
# assumption by querying all tasks for their numbers of GPUs.
# TODO(b/126786766): TFConfigClusterResolver returns wrong number of GPUs in
# some cases.
if isinstance(cluster_resolver, TFConfigClusterResolver):
num_gpus = context.num_gpus()
else:
num_gpus = cluster_resolver.num_accelerators().get("GPU", 0)
if num_gpus:
local_devices = tuple("%s/device:GPU:%d" % (self._worker_device, i)
for i in range(num_gpus))
else:
local_devices = (self._worker_device,)
self._collective_keys = cross_device_utils.CollectiveKeys(
group_key_start=1 + self._collective_key_base)
self._cross_device_ops = cross_device_ops_lib.CollectiveAllReduce(
devices=local_devices,
group_size=len(local_devices) * self._num_workers,
collective_keys=self._collective_keys)
# CrossDeviceOps for per host tensors.
self._host_cross_device_ops = cross_device_ops_lib.CollectiveAllReduce(
devices=[self._worker_device],
group_size=self._num_workers,
collective_keys=self._collective_keys)
super(CollectiveAllReduceExtended, self)._initialize_single_worker(
local_devices)
# Add a default device so that ops without specified devices will not end up
# on other workers.
self._default_device = "/job:%s/task:%d" % (task_type, task_id)
# Save the num_gpus_per_worker and rpc_layer for configure method.
self._num_gpus_per_worker = num_gpus
self._rpc_layer = cluster_resolver.rpc_layer
self._warn_nccl_no_gpu()
if self._enable_check_health and context.executing_eagerly():
self._start_check_health_thread()
else:
logging.info("Check health not enabled.")
logging.info(
"MultiWorkerMirroredStrategy with cluster_spec = %r, task_type = %r, "
"task_id = %r, num_workers = %r, local_devices = %r, "
"communication = %s", cluster_spec.as_dict(), task_type, task_id,
self._num_workers, local_devices,
self._communication_options.implementation)
def __del__(self):
self._stop_check_health_thread()
def _input_workers_with_options(self, options=None):
host_device = device_util.get_host_for_device(self._worker_device)
if not options or options.experimental_fetch_to_device:
return input_lib.InputWorkers([(host_device, self.worker_devices)])
else:
return input_lib.InputWorkers([(
host_device,
[device_util.get_host_for_device(worker) for worker in
self.worker_devices])])
@property
def _input_workers(self):
return self._input_workers_with_options()
def _get_variable_creator_initial_value(self,
replica_id,
device,
primary_var,
**kwargs):
if replica_id == 0: # First replica on each worker.
assert device is not None
assert primary_var is None
def initial_value_fn(): # pylint: disable=g-missing-docstring
# Only the first device participates in the broadcast of initial values.
group_key = self._collective_keys.get_group_key([device])
group_size = self._num_workers
collective_instance_key = (
self._collective_keys.get_instance_key(group_key, device))
with ops.device(device):
initial_value = kwargs["initial_value"]
if callable(initial_value):
initial_value = initial_value()
if isinstance(initial_value, base.CheckpointInitialValue):
initial_value = initial_value.wrapped_value
assert not callable(initial_value)
initial_value = ops.convert_to_tensor(
initial_value, dtype=kwargs.get("dtype", None))
if self._num_workers > 1:
if self._is_chief:
bcast_send = collective_ops.broadcast_send(
initial_value, initial_value.shape, initial_value.dtype,
group_size, group_key, collective_instance_key)
with ops.control_dependencies([bcast_send]):
return array_ops.identity(initial_value)
else:
return collective_ops.broadcast_recv(initial_value.shape,
initial_value.dtype,
group_size, group_key,
collective_instance_key)
return initial_value
return initial_value_fn
else:
return super(CollectiveAllReduceExtended,
self)._get_variable_creator_initial_value(
replica_id=replica_id,
device=device,
primary_var=primary_var,
**kwargs)
def _make_input_context(self):
input_context = distribute_lib.InputContext(
num_input_pipelines=self._num_workers,
input_pipeline_id=self._id_in_cluster,
num_replicas_in_sync=self._num_replicas_in_sync)
return input_context
def _experimental_distribute_dataset(self, dataset, options):
if (options and options.experimental_replication_mode ==
distribute_lib.InputReplicationMode.PER_REPLICA):
raise NotImplementedError(
"InputReplicationMode.PER_REPLICA "
"is only supported in "
"`distribute_datasets_from_function` "
"of tf.distribute.MirroredStrategy"
)
input_context = self._make_input_context()
return input_lib.get_distributed_dataset(
dataset,
self._input_workers_with_options(options),
self._container_strategy(),
num_replicas_in_sync=self._num_replicas_in_sync,
input_context=input_context,
options=options)
def _distribute_datasets_from_function(self, dataset_fn, options):
if (options and options.experimental_replication_mode ==
distribute_lib.InputReplicationMode.PER_REPLICA):
raise NotImplementedError(
"InputReplicationMode.PER_REPLICA "
"is only supported in "
"`distribute_datasets_from_function` "
"of tf.distribute.MirroredStrategy")
input_context = self._make_input_context()
return input_lib.get_distributed_datasets_from_function(
dataset_fn=dataset_fn,
input_workers=self._input_workers_with_options(options),
input_contexts=[input_context],
strategy=self._container_strategy(),
options=options)
def _experimental_distribute_values_from_function(self, value_fn):
per_replica_values = []
num_local_replicas = len(self.worker_devices)
for local_replica_id in range(num_local_replicas):
replica_id = (self._id_in_cluster * num_local_replicas +
local_replica_id)
value_context = distribute_lib.ValueContext(
replica_id, self._num_replicas_in_sync)
per_replica_values.append(value_fn(value_context))
return distribute_utils.regroup(per_replica_values, always_wrap=True)
def _make_dataset_iterator(self, dataset):
input_context = self._make_input_context()
return input_lib.DatasetIterator(
dataset,
self._input_workers,
self._container_strategy(),
num_replicas_in_sync=self._num_replicas_in_sync,
input_context=input_context)
def _make_input_fn_iterator(
self,
input_fn,
replication_mode=distribute_lib.InputReplicationMode.PER_WORKER):
input_context = self._make_input_context()
return input_lib.InputFunctionIterator(input_fn, self._input_workers,
[input_context],
self._container_strategy())
def _configure(self,
session_config=None,
cluster_spec=None,
task_type=None,
task_id=None):
if cluster_spec:
# Use the num_gpus_per_worker recorded in constructor since _configure
# doesn't take num_gpus.
cluster_resolver = SimpleClusterResolver(
cluster_spec=multi_worker_util.normalize_cluster_spec(cluster_spec),
task_type=task_type,
task_id=task_id,
num_accelerators={"GPU": self._num_gpus_per_worker},
rpc_layer=self._rpc_layer)
self._initialize_multi_worker(cluster_resolver)
assert isinstance(self._cross_device_ops,
cross_device_ops_lib.CollectiveAllReduce)
if session_config:
session_config.CopyFrom(self._update_config_proto(session_config))
def _update_config_proto(self, config_proto):
updated_config = copy.deepcopy(config_proto)
rewrite_options = updated_config.graph_options.rewrite_options
rewrite_options.scoped_allocator_optimization = (
rewriter_config_pb2.RewriterConfig.ON)
# clear and then append.
del rewrite_options.scoped_allocator_opts.enable_op[:]
rewrite_options.scoped_allocator_opts.enable_op.append("CollectiveReduce")
if (not ops.executing_eagerly_outside_functions() and
self._communication_options.implementation ==
collective_util.CommunicationImplementation.NCCL):
updated_config.experimental.collective_nccl = True
if not self._cluster_spec:
return updated_config
assert self._task_type
assert self._task_id is not None
# Collective group leader is needed for collective ops to coordinate
# workers.
updated_config.experimental.collective_group_leader = (
multi_worker_util.collective_leader(self._cluster_spec, self._task_type,
self._task_id))
# The device filters prevent communication between workers.
del updated_config.device_filters[:]
updated_config.device_filters.append(
"/job:%s/task:%d" % (self._task_type, self._task_id))
return updated_config
def _get_cross_device_ops(self, value):
# CollectiveAllReduce works on a predefined set of devices. In most cases
# they should be the compute devices, but certain use cases may reduce host
# tensors as well (e.g. early stopping). We infer the cross_device_ops to
# use based on the number of devices, since inputs don't always have device
if isinstance(value, values.DistributedValues):
num_devices = len(value._values)
else:
num_devices = 1
if num_devices == len(self.worker_devices):
return self._cross_device_ops
else:
return self._host_cross_device_ops
def _gather_to_implementation(self, value, destinations, axis, options):
return self._get_cross_device_ops(value)._gather(
value,
destinations=destinations,
axis=axis,
options=options)
def _reduce_to(self, reduce_op, value, destinations, options):
if (isinstance(value, values.Mirrored) and
reduce_op == reduce_util.ReduceOp.MEAN):
return value
assert not isinstance(value, values.Mirrored)
if (isinstance(value, values.DistributedValues) and
len(self.worker_devices) == 1):
value = value.values[0]
if (not isinstance(value, values.DistributedValues) and
self._num_workers == 1):
return cross_device_ops_lib.reduce_non_distributed_value(
reduce_op, value, destinations, len(self.worker_devices))
return self._get_cross_device_ops(value).reduce(
reduce_op,
value,
destinations=destinations,
options=self._communication_options.merge(options))
def _replica_ctx_all_reduce(self, reduce_op, value, options=None):
if options is None:
options = collective_util.Options()
if context.executing_eagerly():
return super()._replica_ctx_all_reduce(reduce_op, value, options)
replica_context = ds_context.get_replica_context()
assert replica_context, (
"`StrategyExtended._replica_ctx_all_reduce` must be called in a "
"replica context")
return self._cross_device_ops._all_reduce(
reduce_op,
value,
replica_context._replica_id,
options)
def _check_health(self):
while True:
if self._check_health_thread_should_stop.is_set():
return
for job in self._cluster_spec.jobs:
for task_id in range(self._cluster_spec.num_tasks(job)):
peer = "/job:{}/replica:0/task:{}".format(job, task_id)
attempts = 0
while True:
attempts += 1
try:
context.context().check_collective_ops_peer_health(
peer, timeout_in_ms=self._check_health_timeout * 1000)
# the peer is healthy.
break
except (errors.UnavailableError, errors.FailedPreconditionError,
errors.DeadlineExceededError) as e:
# TODO(b/151232436): Always raise UnavailableError when a peer
# fails. Now there could be many kinds of errors:
# - Unavailable: when the peer is not reachable, e.g. it's down.
if attempts < self._check_health_retry_limit:
logging.warning("%s seems down, retrying %d/%d", peer, attempts,
self._check_health_retry_limit)
continue
logging.error(
"Cluster check alive failed, %s is down, "
"aborting collectives: %s", peer, e)
context.context().abort_collective_ops(
errors.UNAVAILABLE,
"cluster check alive failed, {} is down".format(peer))
return
except Exception as e:
logging.error("Unexpected exception in check alive: %s", e)
context.context().abort_collective_ops(
errors.INTERNAL,
"unexecpted exception in check alive: %s" % e)
return
time.sleep(self._check_health_interval)
def _start_check_health_thread(self):
dummy_value = array_ops.identity([])
logging.info("Waiting for the cluster, timeout = %s",
self._check_health_initial_timeout or "inf")
try:
self._host_cross_device_ops.reduce(
reduce_util.ReduceOp.SUM,
dummy_value,
dummy_value,
options=collective_util.Options(
timeout_seconds=self._check_health_initial_timeout,
implementation=collective_util.CommunicationImplementation.RING))
if context.is_async():
context.async_wait()
except errors.DeadlineExceededError:
raise RuntimeError(
"Timeout waiting for the cluster, timeout is %d seconds" %
self._check_health_initial_timeout)
logging.info("Cluster is ready.")
self._check_health_thread_should_stop = threading.Event()
self._check_health_thread = threading.Thread(
target=self._check_health,
daemon=True)
self._check_health_thread.start()
def _stop_check_health_thread(self):
if getattr(self, "_check_health_thread", None):
logging.info("stopping check health thread")
self._check_health_thread_should_stop.set()
self._check_health_thread.join()
self._check_health_thread = None
logging.info("check health thread stopped")
def _warn_nccl_no_gpu(self):
if ((self._communication_options.implementation ==
collective_util.CommunicationImplementation.NCCL) and
self._num_gpus_per_worker == 0):
logging.warning("Enabled NCCL communication but no GPUs detected/"
"specified.")
def _in_multi_worker_mode(self):
return self._num_workers > 1
@property
def experimental_between_graph(self):
return True
@property
def experimental_should_init(self):
return True
@property
def should_checkpoint(self):
return self._is_chief
@property
def should_save_summary(self):
return self._is_chief
@property
def _num_replicas_in_sync(self):
return len(self.worker_devices) * self._num_workers
@property
def _global_batch_size(self):
return True
def _get_replica_id_in_sync_group(self, replica_id):
return self._id_in_cluster * len(self.worker_devices) + replica_id
def _get_local_replica_id(self, replica_id_in_sync_group):
return (replica_id_in_sync_group -
self._id_in_cluster * len(self.worker_devices))
def __deepcopy__(self, memo):
if hasattr(self, "_check_health_thread"):
raise ValueError(
"MultiWorkerMirroredStrategy cannot be deep copied in eager mode. "
"If you're using Estimator and see this error message, call "
"tf.compat.v1.disable_eager_execution() at the beginning of your "
"program")
# Otherwise, do a regular deepcopy.
cls = self.__class__
result = cls.__new__(cls)
memo[id(self)] = result
for k, v in self.__dict__.items():
setattr(result, k, copy.deepcopy(v, memo))
return result
| true | true |
f7112ca75afc4d177c66e264175abe37bea02fdb | 1,299 | py | Python | instastalk/constants.py | jjkoh95/instastalk | e16662d8b0eb22f4d80a2a760674538601f8bb00 | [
"MIT"
] | 4 | 2019-12-19T03:06:24.000Z | 2020-12-08T01:59:52.000Z | instastalk/constants.py | jjkoh95/instastalk | e16662d8b0eb22f4d80a2a760674538601f8bb00 | [
"MIT"
] | null | null | null | instastalk/constants.py | jjkoh95/instastalk | e16662d8b0eb22f4d80a2a760674538601f8bb00 | [
"MIT"
] | 2 | 2019-12-31T02:01:24.000Z | 2020-03-13T07:41:44.000Z | QUERY_HASH = '42323d64886122307be10013ad2dcc44'
STORIES_QUERY_HASH = '45246d3fe16ccc6577e0bd297a5db1ab'
SHORTCODE_QUERY_HASH = 'fead941d698dc1160a298ba7bec277ac'
BASE_URL = "https://www.instagram.com"
LOGIN_REFERER = f'{BASE_URL}/accounts/login'
LOGIN_URL = f'{BASE_URL}/accounts/login/ajax/'
LOGOUT_URL = f'{BASE_URL}/accounts/logout/'
QUERY_URL = f'{BASE_URL}/graphql/query/'
QUERY_POST_URL = f'{QUERY_URL}?' + \
f'query_hash={QUERY_HASH}&' + \
'variables=%7B"id"%3A"{id}"%2C"first"%3A{first}%2C"after"%3A"{after}"%7D'
SHORTCODE_URL = f'{QUERY_URL}?' + \
f'query_hash={SHORTCODE_QUERY_HASH}&' + \
'variables=%7B"shortcode"%3A"{shortcode}"%2C"child_comment_count"%3A{child_comment_count}%2C"fetch_comment_count"%3A{fetch_comment_count}%2C"parent_comment_count"%3A{parent_comment_count}%2C"has_threaded_comments"%3A{has_threaded_comments}%7D'
STORIES_API_URL = BASE_URL + '/graphql/query/?' + \
f'query_hash={STORIES_QUERY_HASH}&' + \
'variables=%7B%22' + \
'reel_ids%22%3A%5B%22{id}%22%5D%2C%22' + \
'tag_names%22%3A%5B%5D%2C%22' + \
'location_ids%22%3A%5B%5D%2C%22' + \
'highlight_reel_ids%22%3A%5B%5D%2C%22' + \
'precomposed_overlay%22%3Afalse%7D'
# make my life easy
# think python might already handle this
null = None
true = True
false = False
| 39.363636 | 247 | 0.722864 | QUERY_HASH = '42323d64886122307be10013ad2dcc44'
STORIES_QUERY_HASH = '45246d3fe16ccc6577e0bd297a5db1ab'
SHORTCODE_QUERY_HASH = 'fead941d698dc1160a298ba7bec277ac'
BASE_URL = "https://www.instagram.com"
LOGIN_REFERER = f'{BASE_URL}/accounts/login'
LOGIN_URL = f'{BASE_URL}/accounts/login/ajax/'
LOGOUT_URL = f'{BASE_URL}/accounts/logout/'
QUERY_URL = f'{BASE_URL}/graphql/query/'
QUERY_POST_URL = f'{QUERY_URL}?' + \
f'query_hash={QUERY_HASH}&' + \
'variables=%7B"id"%3A"{id}"%2C"first"%3A{first}%2C"after"%3A"{after}"%7D'
SHORTCODE_URL = f'{QUERY_URL}?' + \
f'query_hash={SHORTCODE_QUERY_HASH}&' + \
'variables=%7B"shortcode"%3A"{shortcode}"%2C"child_comment_count"%3A{child_comment_count}%2C"fetch_comment_count"%3A{fetch_comment_count}%2C"parent_comment_count"%3A{parent_comment_count}%2C"has_threaded_comments"%3A{has_threaded_comments}%7D'
STORIES_API_URL = BASE_URL + '/graphql/query/?' + \
f'query_hash={STORIES_QUERY_HASH}&' + \
'variables=%7B%22' + \
'reel_ids%22%3A%5B%22{id}%22%5D%2C%22' + \
'tag_names%22%3A%5B%5D%2C%22' + \
'location_ids%22%3A%5B%5D%2C%22' + \
'highlight_reel_ids%22%3A%5B%5D%2C%22' + \
'precomposed_overlay%22%3Afalse%7D'
null = None
true = True
false = False
| true | true |
f7112ccc230534981bbe0dd4d71f57e20b5aabfa | 4,913 | py | Python | haas_lib_bundles/python/docs/examples/smart_fan/esp32/code/main.py | wstong999/AliOS-Things | 6554769cb5b797e28a30a4aa89b3f4cb2ef2f5d9 | [
"Apache-2.0"
] | null | null | null | haas_lib_bundles/python/docs/examples/smart_fan/esp32/code/main.py | wstong999/AliOS-Things | 6554769cb5b797e28a30a4aa89b3f4cb2ef2f5d9 | [
"Apache-2.0"
] | null | null | null | haas_lib_bundles/python/docs/examples/smart_fan/esp32/code/main.py | wstong999/AliOS-Things | 6554769cb5b797e28a30a4aa89b3f4cb2ef2f5d9 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
# -*- encoding: utf-8 -*-
'''
@File : main.py
@Author : guoliang.wgl
@version : 1.0
@Description: smart_fan案例 - 智能控制小风扇
board.json - 硬件资源配置文件
'''
from fan import Fan
from aht21b import AHT21B
from driver import PWM, I2C
import time
from aliyunIoT import Device # iot组件是连接阿里云物联网平台的组件
import json
# 物联网平台连接标志位
iot_connected = False
wlan = None
# 三元组信息
productKey = "产品密钥"
deviceName = "设备名称"
deviceSecret = "设备密钥"
# 物联网设备实例
device = None
# Wi-Fi SSID和Password设置
wifiSsid = "请输入您的路由器名称"
wifiPassword = "请输入您的路由器密码"
# 警报开关以及时间段控制(大于等于alarm_start 或者小于等于alarm_end )
gear1_temp = 22
gear2_temp = 27
gear3_temp = 32
FLAG_CUR_TEMP = "cur_temp"
FLAG_GEAR1 = "gear1"
FLAG_GEAR2 = "gear2"
FLAG_GEAR3 = "gear3"
cur_gear = 0
# 等待Wi-Fi成功连接到路由器
def get_wifi_status():
global wlan
wifi_connected = False
wlan.active(True) #激活界面
wlan.scan() #扫描接入点
#print("start to connect ", wifiSsid)
# 连接到指定的路由器(路由器名称为wifiSsid, 密码为:wifiPassword)
wlan.connect(wifiSsid, wifiPassword)
while True:
wifi_connected = wlan.isconnected() # 获取Wi-Fi连接路由器的状态信息
if wifi_connected: # Wi-Fi连接成功则退出while循环
break
else:
time.sleep(0.5)
print("wifi_connected:", wifi_connected)
ifconfig = wlan.ifconfig() #获取接口的IP/netmask/gw/DNS地址
print(ifconfig)
time.sleep(0.5)
# 物联网平台连接成功的回调函数
def on_connect(data):
global iot_connected
iot_connected = True
# 设置props 事件接收函数(当云平台向设备下发属性时)
def on_props(request):
global FLAG_GEAR1, FLAG_GEAR2, FLAG_GEAR3, gear1_temp, gear2_temp, gear3_temp
try:
props = eval(request['params'])
if FLAG_GEAR1 in props.keys():
gear1_temp = props[FLAG_GEAR1]
print('on_props: name is {},value is {}'.format(
FLAG_GEAR1, gear1_temp))
elif FLAG_GEAR2 in props.keys():
gear2_temp = props[FLAG_GEAR2]
print('on_props: name is {},value is {}'.format(
FLAG_GEAR2, gear2_temp))
elif FLAG_GEAR3 in props.keys():
gear3_temp = props[FLAG_GEAR3]
print('on_props: name is {},value is {}'.format(
FLAG_GEAR3, gear3_temp))
post_default_value()
except Exception as e:
print(e)
def post_props(data):
global device
if isinstance(data, dict):
data = {'params': json.dumps(data)}
ret = device.postProps(data)
return ret
def connect_lk(productKey, deviceName, deviceSecret):
global device, iot_connected
key_info = {
'region': 'cn-shanghai',
'productKey': productKey,
'deviceName': deviceName,
'deviceSecret': deviceSecret,
'keepaliveSec': 60
}
# 将三元组信息设置到iot组件中
device = Device()
# 设定连接到物联网平台的回调函数,如果连接物联网平台成功,则调用on_connect函数
device.on(Device.ON_CONNECT, on_connect)
# 配置收到云端属性控制指令的回调函数
# 如果收到物联网平台发送的属性控制消息,则调用on_props函数
device.on(Device.ON_PROPS, on_props)
# 启动连接阿里云物联网平台过程
device.connect(key_info)
# 等待设备成功连接到物联网平台
while True:
if iot_connected:
print('物联网平台连接成功')
break
else:
print('sleep for 1 s')
time.sleep(1)
time.sleep(2)
def post_default_value():
global FLAG_GEAR1, FLAG_GEAR2, FLAG_GEAR3, gear1_temp, gear2_temp, gear3_temp
value = {FLAG_GEAR1: gear1_temp}
post_props(value)
value = {FLAG_GEAR2: gear2_temp}
post_props(value)
value = {FLAG_GEAR3: gear3_temp}
post_props(value)
def upload_temp(temp):
value = {FLAG_CUR_TEMP: temp}
post_props(value)
if __name__ == '__main__':
wlan = network.WLAN(network.STA_IF) #创建WLAN对象
# 请替换物联网平台申请到的产品和设备信息
# global productKey, deviceName, deviceSecret ,on_request, on_play
get_wifi_status()
connect_lk(productKey, deviceName, deviceSecret)
post_default_value()
# 初始化风扇控制pwm
pwmObj = PWM()
pwmObj.open("fan")
fan = Fan(pwmObj)
fan.control(0)
# 初始化温度传感器
i2c = I2C()
i2c.open('aht21b')
aht = AHT21B(i2c)
while True:
temp = aht.getTemperature()
print('cur temp is {}'.format(temp))
upload_temp(temp)
if temp <= gear1_temp and cur_gear != 0:
cur_gear = 0
fan.control(cur_gear)
print('fan change to gear {}'.format(cur_gear))
elif temp > gear1_temp and temp <= gear2_temp and cur_gear != 1:
cur_gear = 1
fan.control(cur_gear)
print('fan change to gear {}'.format(cur_gear))
elif temp > gear2_temp and temp <= gear3_temp and cur_gear != 2:
cur_gear = 2
fan.control(cur_gear)
print('fan change to gear {}'.format(cur_gear))
elif temp > gear3_temp and cur_gear != 3:
cur_gear = 3
fan.control(cur_gear)
print('fan change to gear {}'.format(cur_gear))
| 27.446927 | 81 | 0.630572 |
from fan import Fan
from aht21b import AHT21B
from driver import PWM, I2C
import time
from aliyunIoT import Device
import json
iot_connected = False
wlan = None
productKey = "产品密钥"
deviceName = "设备名称"
deviceSecret = "设备密钥"
device = None
wifiSsid = "请输入您的路由器名称"
wifiPassword = "请输入您的路由器密码"
gear1_temp = 22
gear2_temp = 27
gear3_temp = 32
FLAG_CUR_TEMP = "cur_temp"
FLAG_GEAR1 = "gear1"
FLAG_GEAR2 = "gear2"
FLAG_GEAR3 = "gear3"
cur_gear = 0
def get_wifi_status():
global wlan
wifi_connected = False
wlan.active(True)
wlan.scan()
wlan.connect(wifiSsid, wifiPassword)
while True:
wifi_connected = wlan.isconnected()
if wifi_connected:
break
else:
time.sleep(0.5)
print("wifi_connected:", wifi_connected)
ifconfig = wlan.ifconfig()
print(ifconfig)
time.sleep(0.5)
def on_connect(data):
global iot_connected
iot_connected = True
def on_props(request):
global FLAG_GEAR1, FLAG_GEAR2, FLAG_GEAR3, gear1_temp, gear2_temp, gear3_temp
try:
props = eval(request['params'])
if FLAG_GEAR1 in props.keys():
gear1_temp = props[FLAG_GEAR1]
print('on_props: name is {},value is {}'.format(
FLAG_GEAR1, gear1_temp))
elif FLAG_GEAR2 in props.keys():
gear2_temp = props[FLAG_GEAR2]
print('on_props: name is {},value is {}'.format(
FLAG_GEAR2, gear2_temp))
elif FLAG_GEAR3 in props.keys():
gear3_temp = props[FLAG_GEAR3]
print('on_props: name is {},value is {}'.format(
FLAG_GEAR3, gear3_temp))
post_default_value()
except Exception as e:
print(e)
def post_props(data):
global device
if isinstance(data, dict):
data = {'params': json.dumps(data)}
ret = device.postProps(data)
return ret
def connect_lk(productKey, deviceName, deviceSecret):
global device, iot_connected
key_info = {
'region': 'cn-shanghai',
'productKey': productKey,
'deviceName': deviceName,
'deviceSecret': deviceSecret,
'keepaliveSec': 60
}
device = Device()
device.on(Device.ON_CONNECT, on_connect)
device.on(Device.ON_PROPS, on_props)
device.connect(key_info)
while True:
if iot_connected:
print('物联网平台连接成功')
break
else:
print('sleep for 1 s')
time.sleep(1)
time.sleep(2)
def post_default_value():
global FLAG_GEAR1, FLAG_GEAR2, FLAG_GEAR3, gear1_temp, gear2_temp, gear3_temp
value = {FLAG_GEAR1: gear1_temp}
post_props(value)
value = {FLAG_GEAR2: gear2_temp}
post_props(value)
value = {FLAG_GEAR3: gear3_temp}
post_props(value)
def upload_temp(temp):
value = {FLAG_CUR_TEMP: temp}
post_props(value)
if __name__ == '__main__':
wlan = network.WLAN(network.STA_IF)
get_wifi_status()
connect_lk(productKey, deviceName, deviceSecret)
post_default_value()
pwmObj = PWM()
pwmObj.open("fan")
fan = Fan(pwmObj)
fan.control(0)
i2c = I2C()
i2c.open('aht21b')
aht = AHT21B(i2c)
while True:
temp = aht.getTemperature()
print('cur temp is {}'.format(temp))
upload_temp(temp)
if temp <= gear1_temp and cur_gear != 0:
cur_gear = 0
fan.control(cur_gear)
print('fan change to gear {}'.format(cur_gear))
elif temp > gear1_temp and temp <= gear2_temp and cur_gear != 1:
cur_gear = 1
fan.control(cur_gear)
print('fan change to gear {}'.format(cur_gear))
elif temp > gear2_temp and temp <= gear3_temp and cur_gear != 2:
cur_gear = 2
fan.control(cur_gear)
print('fan change to gear {}'.format(cur_gear))
elif temp > gear3_temp and cur_gear != 3:
cur_gear = 3
fan.control(cur_gear)
print('fan change to gear {}'.format(cur_gear))
| true | true |
f7112d60b47eb6fb4ad1be2a0ffbcd3b4d41a3f4 | 21,283 | py | Python | reclor_trainer_base_v2.py | SparkJiao/MERIt | e887dd11bd2969345a5fb07c47d49bd0245e41e6 | [
"MIT"
] | 8 | 2022-03-01T09:02:44.000Z | 2022-03-18T14:41:56.000Z | reclor_trainer_base_v2.py | SparkJiao/MERIt | e887dd11bd2969345a5fb07c47d49bd0245e41e6 | [
"MIT"
] | 1 | 2022-03-09T12:12:22.000Z | 2022-03-10T09:08:42.000Z | reclor_trainer_base_v2.py | SparkJiao/MERIt | e887dd11bd2969345a5fb07c47d49bd0245e41e6 | [
"MIT"
] | 2 | 2022-03-02T01:46:52.000Z | 2022-03-02T13:51:53.000Z | # coding=utf-8
#
# Copyright 2020 Heinrich Heine University Duesseldorf
#
# Part of this code is based on the source code of BERT-DST
# (arXiv:1907.03040)
# Part of this code is based on the source code of Transformers
# (arXiv:1910.03771)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import glob
import json
import logging
import os
import sys
from typing import Dict, Union
import hydra
import numpy as np
import torch
import transformers
from fairscale.nn.data_parallel.fully_sharded_data_parallel import FullyShardedDataParallel as FullyShardedDDP
from fairscale.nn.wrap.auto_wrap import auto_wrap
from fairscale.optim.grad_scaler import ShardedGradScaler
from omegaconf import DictConfig, OmegaConf
from torch import distributed as dist
from torch.utils.data import (DataLoader, RandomSampler, SequentialSampler, TensorDataset)
from torch.utils.data.distributed import DistributedSampler
from torch.utils.tensorboard import SummaryWriter
from tqdm import tqdm, trange
from transformers import (get_linear_schedule_with_warmup, AutoTokenizer, PreTrainedTokenizer)
from general_util.logger import setting_logger
from general_util.training_utils import batch_to_device, unwrap_model, set_seed, note_best_checkpoint, initialize_optimizer
logger: logging.Logger
# transformers.logging.set_verbosity_error()
def save_model(model: Union[torch.nn.Module, FullyShardedDDP], cfg: DictConfig, output_dir: str, tokenizer: PreTrainedTokenizer = None):
# Save model checkpoint.
if cfg.local_rank != -1:
state_dict = model.state_dict()
if cfg.local_rank == 0:
unwrap_model(model).save_pretrained(output_dir, state_dict=state_dict)
else:
model.save_pretrained(output_dir)
# Save tokenizer and training args.
if cfg.local_rank in [-1, 0]:
if tokenizer is not None:
tokenizer.save_pretrained(output_dir)
OmegaConf.save(cfg, os.path.join(output_dir, "training_config.yaml"))
logger.info("Saving model checkpoint to %s", output_dir)
def forward_step(model, inputs: Dict[str, torch.Tensor], cfg, scaler):
if cfg.fp16:
with torch.cuda.amp.autocast():
outputs = model(**inputs)
loss = outputs["loss"] # model outputs are always tuple in transformers (see doc)
else:
outputs = model(**inputs)
loss = outputs["loss"] # model outputs are always tuple in pytorch-transformers (see doc)
if cfg.n_gpu > 1:
loss = loss.mean() # mean() to average on multi-gpu parallel (not distributed) training
if cfg.gradient_accumulation_steps > 1:
loss = loss / cfg.gradient_accumulation_steps
if cfg.fp16:
scaler.scale(loss).backward()
else:
loss.backward()
return loss.item()
def train(cfg, train_dataset, features, model, tokenizer, continue_from_global_step=0):
""" Train the model """
if cfg.local_rank in [-1, 0]:
_dir_splits = cfg.output_dir.split('/')
_log_dir = '/'.join([_dir_splits[0], 'runs'] + _dir_splits[1:])
tb_writer = SummaryWriter(log_dir=_log_dir)
else:
tb_writer = None
cfg.train_batch_size = cfg.per_gpu_train_batch_size * max(1, cfg.n_gpu)
train_sampler = RandomSampler(train_dataset) if cfg.local_rank == -1 else DistributedSampler(train_dataset)
train_collator = hydra.utils.instantiate(cfg.collator) if "collator" in cfg and cfg.collator else None
train_dataloader = DataLoader(dataset=train_dataset, sampler=train_sampler, batch_size=cfg.train_batch_size,
collate_fn=train_collator, num_workers=cfg.num_workers, pin_memory=True,
prefetch_factor=cfg.prefetch_factor)
if "extended_vocab" in cfg and cfg.extended_vocab:
logger.info(f"Extended extra vocab size: {cfg.extended_vocab}")
model.resize_token_embeddings(model.config.vocab_size + cfg.extended_vocab)
if cfg.max_steps > 0:
t_total = cfg.max_steps
cfg.num_train_epochs = cfg.max_steps // (len(train_dataloader) // cfg.gradient_accumulation_steps) + 1
else:
t_total = len(train_dataloader) // cfg.gradient_accumulation_steps * cfg.num_train_epochs
num_warmup_steps = int(t_total * cfg.warmup_proportion) if cfg.warmup_proportion else cfg.warmup_steps
optimizer = scheduler = None
# Prepare optimizer and schedule (linear warmup and decay)
if cfg.local_rank == -1:
no_decay = ['bias', 'LayerNorm.weight', 'layer_norm.weight']
optimizer_grouped_parameters = [
{
'params': [p for n, p in model.named_parameters() if (not any(nd in n for nd in no_decay)) and p.requires_grad],
'weight_decay': cfg.weight_decay
},
{
'params': [p for n, p in model.named_parameters() if (any(nd in n for nd in no_decay)) and p.requires_grad],
'weight_decay': 0.0
}
]
optimizer = initialize_optimizer(cfg, optimizer_grouped_parameters)
scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=num_warmup_steps, num_training_steps=t_total)
if cfg.fp16:
if cfg.local_rank != -1:
scaler = ShardedGradScaler()
else:
from torch.cuda.amp.grad_scaler import GradScaler
scaler = GradScaler()
else:
scaler = None
# multi-gpu training (should be after apex fp16 initialization)
model_single_gpu = model
if cfg.n_gpu > 1:
model = torch.nn.DataParallel(model_single_gpu)
# Distributed training (should be after apex fp16 initialization)
if cfg.local_rank != -1:
model = auto_wrap(model)
model = FullyShardedDDP(model,
mixed_precision=cfg.fp16,
flatten_parameters=getattr(cfg, "flatten_parameters", True),
reshard_after_forward=cfg.reshard_after_forward,
move_grads_to_cpu=cfg.move_grads_to_cpu,
move_params_to_cpu=cfg.move_params_to_cpu)
if not cfg.move_params_to_cpu:
model = model.to(cfg.device)
no_decay = ['bias', 'LayerNorm.weight', 'layer_norm.weight']
optimizer_grouped_parameters = [
{
'params': [p for n, p in model.named_parameters() if (not any(nd in n for nd in no_decay)) and p.requires_grad],
'weight_decay': cfg.weight_decay
},
{
'params': [p for n, p in model.named_parameters() if (any(nd in n for nd in no_decay)) and p.requires_grad],
'weight_decay': 0.0
}
]
optimizer = initialize_optimizer(cfg, optimizer_grouped_parameters)
scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=num_warmup_steps, num_training_steps=t_total)
logger.info(optimizer)
# Train!
logger.info("***** Running training *****")
logger.info(" Num examples = %d", len(train_dataset))
logger.info(" Num Epochs = %d", cfg.num_train_epochs)
logger.info(" Instantaneous batch size per GPU = %d", cfg.per_gpu_train_batch_size)
logger.info(" Total train batch size (w. parallel, distributed & accumulation) = %d",
cfg.train_batch_size * cfg.gradient_accumulation_steps * (dist.get_world_size() if cfg.local_rank != -1 else 1))
logger.info(" Gradient Accumulation steps = %d", cfg.gradient_accumulation_steps)
logger.info(" Total optimization steps = %d", t_total)
logger.info(" Warmup steps = %d", num_warmup_steps)
if continue_from_global_step > 0:
logger.info("Fast forwarding to global step %d to resume training from latest checkpoint...", continue_from_global_step)
global_step = 0
tr_loss, logging_loss = 0.0, 0.0
model.zero_grad()
train_iterator = trange(int(cfg.num_train_epochs), desc="Epoch", disable=cfg.local_rank not in [-1, 0])
set_seed(cfg) # Added here for reproducibility (even between python 2 and 3)
for epoch in train_iterator:
epoch_iterator = tqdm(train_dataloader, desc="Iteration", disable=cfg.local_rank not in [-1, 0], dynamic_ncols=True)
if cfg.local_rank != -1:
train_dataloader.sampler.set_epoch(epoch)
for step, batch in enumerate(epoch_iterator):
# If training is continued from a checkpoint, fast forward
# to the state of that checkpoint.
if global_step < continue_from_global_step:
if (step + 1) % cfg.gradient_accumulation_steps == 0:
scheduler.step() # Update learning rate schedule
global_step += 1
continue
model.train()
batch = batch_to_device(batch, cfg.device)
if (step + 1) % cfg.gradient_accumulation_steps != 0 and cfg.local_rank != -1:
# Avoid unnecessary DDP synchronization since there will be no backward pass on this example.
with model.no_sync():
loss = forward_step(model, batch, cfg, scaler)
else:
loss = forward_step(model, batch, cfg, scaler)
tr_loss += loss
if (step + 1) % cfg.gradient_accumulation_steps == 0:
if cfg.fp16:
scaler.unscale_(optimizer)
if cfg.max_grad_norm:
if hasattr(optimizer, "clip_grad_norm"):
optimizer.clip_grad_norm(cfg.max_grad_norm)
elif hasattr(model, "clip_grad_norm_"):
model.clip_grad_norm_(cfg.max_grad_norm)
else:
torch.nn.utils.clip_grad_norm_(model.parameters(), cfg.max_grad_norm)
if cfg.fp16:
scaler.step(optimizer)
scaler.update()
else:
optimizer.step()
scheduler.step() # Update learning rate schedule
model.zero_grad(set_to_none=True)
global_step += 1
# Log metrics
if cfg.local_rank in [-1, 0] and cfg.logging_steps > 0 and global_step % cfg.logging_steps == 0:
tb_writer.add_scalar('lr', scheduler.get_lr()[0], global_step)
tb_writer.add_scalar('loss', (tr_loss - logging_loss) / cfg.logging_steps, global_step)
logging_loss = tr_loss
# Save model checkpoint
if cfg.save_steps > 0 and global_step % cfg.save_steps == 0:
output_dir = os.path.join(cfg.output_dir, 'checkpoint-{}'.format(global_step))
if cfg.local_rank in [-1, 0] and not os.path.exists(output_dir):
os.makedirs(output_dir)
save_model(model, cfg, output_dir, tokenizer)
# Evaluation
if cfg.evaluate_during_training and cfg.eval_steps > 0 and global_step % cfg.eval_steps == 0:
state_dict = model.state_dict()
if cfg.local_rank in [-1, 0]:
results = evaluate(cfg, model, tokenizer, prefix=str(global_step), _split="dev")
for key, value in results.items():
tb_writer.add_scalar(f"eval/{key}", value, global_step)
sub_path = os.path.join(cfg.output_dir, 'checkpoint-{}'.format(global_step))
flag = note_best_checkpoint(cfg, results, sub_path)
if cfg.save_best and flag:
if cfg.local_rank == 0:
unwrap_model(model).save_pretrained(cfg.output_dir, state_dict=state_dict)
else:
model.save_pretrained(cfg.output_dir)
tokenizer.save_pretrained(cfg.output_dir)
OmegaConf.save(cfg, os.path.join(cfg.output_dir, "training_config.yaml"))
logger.info("Saving best model checkpoint to %s", cfg.output_dir)
if 0 < cfg.max_steps < global_step:
epoch_iterator.close()
break
if 0 < cfg.max_steps < global_step:
train_iterator.close()
break
if cfg.local_rank in [-1, 0]:
tb_writer.close()
return global_step, tr_loss / global_step
def evaluate(cfg, model, tokenizer: PreTrainedTokenizer, prefix="", _split="dev"):
dataset, features = load_and_cache_examples(cfg, tokenizer, _split=_split)
if not os.path.exists(os.path.join(cfg.output_dir, prefix)):
os.makedirs(os.path.join(cfg.output_dir, prefix))
cfg.eval_batch_size = cfg.per_gpu_eval_batch_size
eval_sampler = SequentialSampler(dataset) # Note that DistributedSampler samples randomly
eval_collator = hydra.utils.instantiate(cfg.collator) if "collator" in cfg and cfg.collator else None
eval_dataloader = DataLoader(dataset, sampler=eval_sampler, batch_size=cfg.eval_batch_size,
collate_fn=eval_collator)
single_model_gpu = unwrap_model(model)
single_model_gpu.get_eval_log(reset=True)
# Eval!
torch.cuda.empty_cache()
logger.info("***** Running evaluation {}.{} *****".format(_split, prefix))
logger.info(" Num examples = %d", len(dataset))
logger.info(" Batch size = %d", cfg.eval_batch_size)
# Seems FSDP does not need to unwrap the model for evaluating.
model.eval()
pred_list = []
prob_list = []
for batch in tqdm(eval_dataloader, desc="Evaluating", dynamic_ncols=True):
batch = batch_to_device(batch, cfg.device)
with torch.cuda.amp.autocast():
with torch.no_grad():
outputs = model(**batch)
probs = outputs["logits"].softmax(dim=-1).detach().float().cpu()
prob, pred = probs.max(dim=-1)
pred_list.extend(pred.tolist())
prob_list.extend(prob.tolist())
metric_log, results = single_model_gpu.get_eval_log(reset=True)
logger.info("****** Evaluation Results ******")
logger.info(f"Global Steps: {prefix}")
logger.info(metric_log)
prediction_file = os.path.join(cfg.output_dir, prefix, "eval_predictions.npy")
np.save(prediction_file, pred_list)
json.dump(prob_list, open(os.path.join(cfg.output_dir, prefix, "eval_probs.json"), "w"))
return results
def load_and_cache_examples(cfg, tokenizer: PreTrainedTokenizer, _split="train"):
if cfg.local_rank not in [-1, 0] and _split == "train":
dist.barrier() # Make sure only the first process in distributed training process the dataset, and the others will use the cache
if _split == "train":
input_file = cfg.train_file
elif _split == "dev":
input_file = cfg.dev_file
elif _split == "test":
input_file = cfg.test_file
else:
raise RuntimeError(_split)
examples, features, tensors = hydra.utils.call(cfg.read_tensor, file_path=input_file, tokenizer=tokenizer)
if cfg.local_rank == 0 and _split == "train":
dist.barrier() # Make sure only the first process in distributed training process the dataset, and the others will use the cache
dataset = TensorDataset(*tensors)
return dataset, features
@hydra.main(config_path="conf", config_name="config")
def main(cfg: DictConfig):
if cfg.local_rank == -1 or cfg.no_cuda:
device = str(torch.device("cuda" if torch.cuda.is_available() and not cfg.no_cuda else "cpu"))
cfg.n_gpu = torch.cuda.device_count()
else: # Initializes the distributed backend which will take care of synchronizing nodes/GPUs
torch.cuda.set_device(cfg.local_rank)
device = str(torch.device("cuda", cfg.local_rank))
dist.init_process_group(backend='nccl')
cfg.n_gpu = 1
cfg.world_size = dist.get_world_size()
cfg.device = device
global logger
logger = setting_logger(cfg.output_dir, local_rank=cfg.local_rank)
logger.warning("Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s",
cfg.local_rank, device, cfg.n_gpu, bool(cfg.local_rank != -1), cfg.fp16)
# Set seed
set_seed(cfg)
# Load pre-trained model and tokenizer
if cfg.local_rank not in [-1, 0]:
dist.barrier() # Make sure only the first process in distributed training will download model & vocab
if cfg.pretrain:
pretrain_state_dict = torch.load(cfg.pretrain, map_location='cpu')
else:
pretrain_state_dict = None
tokenizer = AutoTokenizer.from_pretrained(cfg.model_name_or_path)
model = hydra.utils.call(cfg.model, cfg.model_name_or_path, state_dict=pretrain_state_dict)
if cfg.local_rank == 0:
dist.barrier() # Make sure only the first process in distributed training will download model & vocab
if cfg.local_rank == -1: # For FullyShardedDDP, place the model on cpu first.
model.to(cfg.device)
# logger.info("Training/evaluation parameters %s", OmegaConf.to_yaml(cfg))
if cfg.local_rank in [-1, 0] and cfg.do_train:
if not os.path.exists(cfg.output_dir):
os.makedirs(cfg.output_dir)
OmegaConf.save(cfg, os.path.join(cfg.output_dir, "training_config.yaml"))
# Training
if cfg.do_train:
# TODO: Add option for continuously training from checkpoint.
# The operation should be introduced in ``train`` method since both the state dict
# of schedule and optimizer (and scaler, if any) should be loaded.
# If output files already exists, assume to continue training from latest checkpoint (unless overwrite_output_dir is set)
continue_from_global_step = 0 # If set to 0, start training from the beginning
# if os.path.exists(args.output_dir) and os.listdir(args.output_dir) and args.do_train and not args.overwrite_output_dir:
# checkpoints = list(os.path.dirname(c) for c in sorted(glob.glob(args.output_dir + '/*/' + WEIGHTS_NAME, recursive=True)))
# if len(checkpoints) > 0:
# checkpoint = checkpoints[-1]
# logger.info("Resuming training from the latest checkpoint: %s", checkpoint)
# continue_from_global_step = int(checkpoint.split('-')[-1])
# model = model_class.from_pretrained(checkpoint)
# model.to(args.device)
train_dataset, features = load_and_cache_examples(cfg, tokenizer, _split="train")
global_step, tr_loss = train(cfg, train_dataset, features, model, tokenizer, continue_from_global_step)
logger.info(" global_step = %s, average loss = %s", global_step, tr_loss)
# Test
results = {}
if cfg.do_eval and cfg.local_rank in [-1, 0]:
checkpoints = [cfg.output_dir]
if cfg.save_best:
logging.getLogger("transformers.modeling_utils").setLevel(logging.WARN) # Reduce logging
elif cfg.prediction_cfg.best_checkpoint and os.path.exists(cfg.prediction_cfg.best_checkpoint):
checkpoints = [cfg.prediction_cfg.best_checkpoint]
logging.getLogger("transformers.modeling_utils").setLevel(logging.WARN) # Reduce logging
elif cfg.eval_sub_path:
checkpoints = list(
os.path.dirname(c) for c in
sorted(glob.glob(cfg.output_dir + f"/{cfg.eval_sub_path}/" + "pytorch_model.bin", recursive=True))
)
logging.getLogger("transformers.modeling_utils").setLevel(logging.WARN) # Reduce logging
logger.info(" the following checkpoints: %s", checkpoints)
for checkpoint in checkpoints:
global_step = checkpoint.split("-")[-1] if len(checkpoints) > 1 else ""
prefix = checkpoint.split("/")[-1] if checkpoint.find("checkpoint") != -1 else ""
split = "dev"
model = hydra.utils.call(cfg.model, checkpoint)
model.to(device)
if cfg.test_file:
prefix = f'test' + (f'-{prefix}' if prefix != "" else "")
split = "test"
result = evaluate(cfg, model, tokenizer, prefix=prefix, _split=split)
result = dict((k + "_{}".format(global_step), v) for k, v in result.items())
results.update(result)
return results
if __name__ == "__main__":
hydra_formatted_args = []
# convert the cli params added by torch.distributed.launch into Hydra format
for arg in sys.argv:
if arg.startswith("--"):
hydra_formatted_args.append(arg[len("--"):])
else:
hydra_formatted_args.append(arg)
sys.argv = hydra_formatted_args
main()
| 45.091102 | 137 | 0.644881 |
import glob
import json
import logging
import os
import sys
from typing import Dict, Union
import hydra
import numpy as np
import torch
import transformers
from fairscale.nn.data_parallel.fully_sharded_data_parallel import FullyShardedDataParallel as FullyShardedDDP
from fairscale.nn.wrap.auto_wrap import auto_wrap
from fairscale.optim.grad_scaler import ShardedGradScaler
from omegaconf import DictConfig, OmegaConf
from torch import distributed as dist
from torch.utils.data import (DataLoader, RandomSampler, SequentialSampler, TensorDataset)
from torch.utils.data.distributed import DistributedSampler
from torch.utils.tensorboard import SummaryWriter
from tqdm import tqdm, trange
from transformers import (get_linear_schedule_with_warmup, AutoTokenizer, PreTrainedTokenizer)
from general_util.logger import setting_logger
from general_util.training_utils import batch_to_device, unwrap_model, set_seed, note_best_checkpoint, initialize_optimizer
logger: logging.Logger
def save_model(model: Union[torch.nn.Module, FullyShardedDDP], cfg: DictConfig, output_dir: str, tokenizer: PreTrainedTokenizer = None):
if cfg.local_rank != -1:
state_dict = model.state_dict()
if cfg.local_rank == 0:
unwrap_model(model).save_pretrained(output_dir, state_dict=state_dict)
else:
model.save_pretrained(output_dir)
if cfg.local_rank in [-1, 0]:
if tokenizer is not None:
tokenizer.save_pretrained(output_dir)
OmegaConf.save(cfg, os.path.join(output_dir, "training_config.yaml"))
logger.info("Saving model checkpoint to %s", output_dir)
def forward_step(model, inputs: Dict[str, torch.Tensor], cfg, scaler):
if cfg.fp16:
with torch.cuda.amp.autocast():
outputs = model(**inputs)
loss = outputs["loss"]
else:
outputs = model(**inputs)
loss = outputs["loss"]
if cfg.n_gpu > 1:
loss = loss.mean()
if cfg.gradient_accumulation_steps > 1:
loss = loss / cfg.gradient_accumulation_steps
if cfg.fp16:
scaler.scale(loss).backward()
else:
loss.backward()
return loss.item()
def train(cfg, train_dataset, features, model, tokenizer, continue_from_global_step=0):
if cfg.local_rank in [-1, 0]:
_dir_splits = cfg.output_dir.split('/')
_log_dir = '/'.join([_dir_splits[0], 'runs'] + _dir_splits[1:])
tb_writer = SummaryWriter(log_dir=_log_dir)
else:
tb_writer = None
cfg.train_batch_size = cfg.per_gpu_train_batch_size * max(1, cfg.n_gpu)
train_sampler = RandomSampler(train_dataset) if cfg.local_rank == -1 else DistributedSampler(train_dataset)
train_collator = hydra.utils.instantiate(cfg.collator) if "collator" in cfg and cfg.collator else None
train_dataloader = DataLoader(dataset=train_dataset, sampler=train_sampler, batch_size=cfg.train_batch_size,
collate_fn=train_collator, num_workers=cfg.num_workers, pin_memory=True,
prefetch_factor=cfg.prefetch_factor)
if "extended_vocab" in cfg and cfg.extended_vocab:
logger.info(f"Extended extra vocab size: {cfg.extended_vocab}")
model.resize_token_embeddings(model.config.vocab_size + cfg.extended_vocab)
if cfg.max_steps > 0:
t_total = cfg.max_steps
cfg.num_train_epochs = cfg.max_steps // (len(train_dataloader) // cfg.gradient_accumulation_steps) + 1
else:
t_total = len(train_dataloader) // cfg.gradient_accumulation_steps * cfg.num_train_epochs
num_warmup_steps = int(t_total * cfg.warmup_proportion) if cfg.warmup_proportion else cfg.warmup_steps
optimizer = scheduler = None
if cfg.local_rank == -1:
no_decay = ['bias', 'LayerNorm.weight', 'layer_norm.weight']
optimizer_grouped_parameters = [
{
'params': [p for n, p in model.named_parameters() if (not any(nd in n for nd in no_decay)) and p.requires_grad],
'weight_decay': cfg.weight_decay
},
{
'params': [p for n, p in model.named_parameters() if (any(nd in n for nd in no_decay)) and p.requires_grad],
'weight_decay': 0.0
}
]
optimizer = initialize_optimizer(cfg, optimizer_grouped_parameters)
scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=num_warmup_steps, num_training_steps=t_total)
if cfg.fp16:
if cfg.local_rank != -1:
scaler = ShardedGradScaler()
else:
from torch.cuda.amp.grad_scaler import GradScaler
scaler = GradScaler()
else:
scaler = None
model_single_gpu = model
if cfg.n_gpu > 1:
model = torch.nn.DataParallel(model_single_gpu)
if cfg.local_rank != -1:
model = auto_wrap(model)
model = FullyShardedDDP(model,
mixed_precision=cfg.fp16,
flatten_parameters=getattr(cfg, "flatten_parameters", True),
reshard_after_forward=cfg.reshard_after_forward,
move_grads_to_cpu=cfg.move_grads_to_cpu,
move_params_to_cpu=cfg.move_params_to_cpu)
if not cfg.move_params_to_cpu:
model = model.to(cfg.device)
no_decay = ['bias', 'LayerNorm.weight', 'layer_norm.weight']
optimizer_grouped_parameters = [
{
'params': [p for n, p in model.named_parameters() if (not any(nd in n for nd in no_decay)) and p.requires_grad],
'weight_decay': cfg.weight_decay
},
{
'params': [p for n, p in model.named_parameters() if (any(nd in n for nd in no_decay)) and p.requires_grad],
'weight_decay': 0.0
}
]
optimizer = initialize_optimizer(cfg, optimizer_grouped_parameters)
scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=num_warmup_steps, num_training_steps=t_total)
logger.info(optimizer)
logger.info("***** Running training *****")
logger.info(" Num examples = %d", len(train_dataset))
logger.info(" Num Epochs = %d", cfg.num_train_epochs)
logger.info(" Instantaneous batch size per GPU = %d", cfg.per_gpu_train_batch_size)
logger.info(" Total train batch size (w. parallel, distributed & accumulation) = %d",
cfg.train_batch_size * cfg.gradient_accumulation_steps * (dist.get_world_size() if cfg.local_rank != -1 else 1))
logger.info(" Gradient Accumulation steps = %d", cfg.gradient_accumulation_steps)
logger.info(" Total optimization steps = %d", t_total)
logger.info(" Warmup steps = %d", num_warmup_steps)
if continue_from_global_step > 0:
logger.info("Fast forwarding to global step %d to resume training from latest checkpoint...", continue_from_global_step)
global_step = 0
tr_loss, logging_loss = 0.0, 0.0
model.zero_grad()
train_iterator = trange(int(cfg.num_train_epochs), desc="Epoch", disable=cfg.local_rank not in [-1, 0])
set_seed(cfg)
for epoch in train_iterator:
epoch_iterator = tqdm(train_dataloader, desc="Iteration", disable=cfg.local_rank not in [-1, 0], dynamic_ncols=True)
if cfg.local_rank != -1:
train_dataloader.sampler.set_epoch(epoch)
for step, batch in enumerate(epoch_iterator):
if global_step < continue_from_global_step:
if (step + 1) % cfg.gradient_accumulation_steps == 0:
scheduler.step()
global_step += 1
continue
model.train()
batch = batch_to_device(batch, cfg.device)
if (step + 1) % cfg.gradient_accumulation_steps != 0 and cfg.local_rank != -1:
with model.no_sync():
loss = forward_step(model, batch, cfg, scaler)
else:
loss = forward_step(model, batch, cfg, scaler)
tr_loss += loss
if (step + 1) % cfg.gradient_accumulation_steps == 0:
if cfg.fp16:
scaler.unscale_(optimizer)
if cfg.max_grad_norm:
if hasattr(optimizer, "clip_grad_norm"):
optimizer.clip_grad_norm(cfg.max_grad_norm)
elif hasattr(model, "clip_grad_norm_"):
model.clip_grad_norm_(cfg.max_grad_norm)
else:
torch.nn.utils.clip_grad_norm_(model.parameters(), cfg.max_grad_norm)
if cfg.fp16:
scaler.step(optimizer)
scaler.update()
else:
optimizer.step()
scheduler.step()
model.zero_grad(set_to_none=True)
global_step += 1
if cfg.local_rank in [-1, 0] and cfg.logging_steps > 0 and global_step % cfg.logging_steps == 0:
tb_writer.add_scalar('lr', scheduler.get_lr()[0], global_step)
tb_writer.add_scalar('loss', (tr_loss - logging_loss) / cfg.logging_steps, global_step)
logging_loss = tr_loss
if cfg.save_steps > 0 and global_step % cfg.save_steps == 0:
output_dir = os.path.join(cfg.output_dir, 'checkpoint-{}'.format(global_step))
if cfg.local_rank in [-1, 0] and not os.path.exists(output_dir):
os.makedirs(output_dir)
save_model(model, cfg, output_dir, tokenizer)
if cfg.evaluate_during_training and cfg.eval_steps > 0 and global_step % cfg.eval_steps == 0:
state_dict = model.state_dict()
if cfg.local_rank in [-1, 0]:
results = evaluate(cfg, model, tokenizer, prefix=str(global_step), _split="dev")
for key, value in results.items():
tb_writer.add_scalar(f"eval/{key}", value, global_step)
sub_path = os.path.join(cfg.output_dir, 'checkpoint-{}'.format(global_step))
flag = note_best_checkpoint(cfg, results, sub_path)
if cfg.save_best and flag:
if cfg.local_rank == 0:
unwrap_model(model).save_pretrained(cfg.output_dir, state_dict=state_dict)
else:
model.save_pretrained(cfg.output_dir)
tokenizer.save_pretrained(cfg.output_dir)
OmegaConf.save(cfg, os.path.join(cfg.output_dir, "training_config.yaml"))
logger.info("Saving best model checkpoint to %s", cfg.output_dir)
if 0 < cfg.max_steps < global_step:
epoch_iterator.close()
break
if 0 < cfg.max_steps < global_step:
train_iterator.close()
break
if cfg.local_rank in [-1, 0]:
tb_writer.close()
return global_step, tr_loss / global_step
def evaluate(cfg, model, tokenizer: PreTrainedTokenizer, prefix="", _split="dev"):
dataset, features = load_and_cache_examples(cfg, tokenizer, _split=_split)
if not os.path.exists(os.path.join(cfg.output_dir, prefix)):
os.makedirs(os.path.join(cfg.output_dir, prefix))
cfg.eval_batch_size = cfg.per_gpu_eval_batch_size
eval_sampler = SequentialSampler(dataset)
eval_collator = hydra.utils.instantiate(cfg.collator) if "collator" in cfg and cfg.collator else None
eval_dataloader = DataLoader(dataset, sampler=eval_sampler, batch_size=cfg.eval_batch_size,
collate_fn=eval_collator)
single_model_gpu = unwrap_model(model)
single_model_gpu.get_eval_log(reset=True)
torch.cuda.empty_cache()
logger.info("***** Running evaluation {}.{} *****".format(_split, prefix))
logger.info(" Num examples = %d", len(dataset))
logger.info(" Batch size = %d", cfg.eval_batch_size)
model.eval()
pred_list = []
prob_list = []
for batch in tqdm(eval_dataloader, desc="Evaluating", dynamic_ncols=True):
batch = batch_to_device(batch, cfg.device)
with torch.cuda.amp.autocast():
with torch.no_grad():
outputs = model(**batch)
probs = outputs["logits"].softmax(dim=-1).detach().float().cpu()
prob, pred = probs.max(dim=-1)
pred_list.extend(pred.tolist())
prob_list.extend(prob.tolist())
metric_log, results = single_model_gpu.get_eval_log(reset=True)
logger.info("****** Evaluation Results ******")
logger.info(f"Global Steps: {prefix}")
logger.info(metric_log)
prediction_file = os.path.join(cfg.output_dir, prefix, "eval_predictions.npy")
np.save(prediction_file, pred_list)
json.dump(prob_list, open(os.path.join(cfg.output_dir, prefix, "eval_probs.json"), "w"))
return results
def load_and_cache_examples(cfg, tokenizer: PreTrainedTokenizer, _split="train"):
if cfg.local_rank not in [-1, 0] and _split == "train":
dist.barrier()
if _split == "train":
input_file = cfg.train_file
elif _split == "dev":
input_file = cfg.dev_file
elif _split == "test":
input_file = cfg.test_file
else:
raise RuntimeError(_split)
examples, features, tensors = hydra.utils.call(cfg.read_tensor, file_path=input_file, tokenizer=tokenizer)
if cfg.local_rank == 0 and _split == "train":
dist.barrier()
dataset = TensorDataset(*tensors)
return dataset, features
@hydra.main(config_path="conf", config_name="config")
def main(cfg: DictConfig):
if cfg.local_rank == -1 or cfg.no_cuda:
device = str(torch.device("cuda" if torch.cuda.is_available() and not cfg.no_cuda else "cpu"))
cfg.n_gpu = torch.cuda.device_count()
else:
torch.cuda.set_device(cfg.local_rank)
device = str(torch.device("cuda", cfg.local_rank))
dist.init_process_group(backend='nccl')
cfg.n_gpu = 1
cfg.world_size = dist.get_world_size()
cfg.device = device
global logger
logger = setting_logger(cfg.output_dir, local_rank=cfg.local_rank)
logger.warning("Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s",
cfg.local_rank, device, cfg.n_gpu, bool(cfg.local_rank != -1), cfg.fp16)
set_seed(cfg)
if cfg.local_rank not in [-1, 0]:
dist.barrier()
if cfg.pretrain:
pretrain_state_dict = torch.load(cfg.pretrain, map_location='cpu')
else:
pretrain_state_dict = None
tokenizer = AutoTokenizer.from_pretrained(cfg.model_name_or_path)
model = hydra.utils.call(cfg.model, cfg.model_name_or_path, state_dict=pretrain_state_dict)
if cfg.local_rank == 0:
dist.barrier()
if cfg.local_rank == -1:
model.to(cfg.device)
if cfg.local_rank in [-1, 0] and cfg.do_train:
if not os.path.exists(cfg.output_dir):
os.makedirs(cfg.output_dir)
OmegaConf.save(cfg, os.path.join(cfg.output_dir, "training_config.yaml"))
if cfg.do_train:
continue_from_global_step = 0
train_dataset, features = load_and_cache_examples(cfg, tokenizer, _split="train")
global_step, tr_loss = train(cfg, train_dataset, features, model, tokenizer, continue_from_global_step)
logger.info(" global_step = %s, average loss = %s", global_step, tr_loss)
results = {}
if cfg.do_eval and cfg.local_rank in [-1, 0]:
checkpoints = [cfg.output_dir]
if cfg.save_best:
logging.getLogger("transformers.modeling_utils").setLevel(logging.WARN)
elif cfg.prediction_cfg.best_checkpoint and os.path.exists(cfg.prediction_cfg.best_checkpoint):
checkpoints = [cfg.prediction_cfg.best_checkpoint]
logging.getLogger("transformers.modeling_utils").setLevel(logging.WARN)
elif cfg.eval_sub_path:
checkpoints = list(
os.path.dirname(c) for c in
sorted(glob.glob(cfg.output_dir + f"/{cfg.eval_sub_path}/" + "pytorch_model.bin", recursive=True))
)
logging.getLogger("transformers.modeling_utils").setLevel(logging.WARN)
logger.info(" the following checkpoints: %s", checkpoints)
for checkpoint in checkpoints:
global_step = checkpoint.split("-")[-1] if len(checkpoints) > 1 else ""
prefix = checkpoint.split("/")[-1] if checkpoint.find("checkpoint") != -1 else ""
split = "dev"
model = hydra.utils.call(cfg.model, checkpoint)
model.to(device)
if cfg.test_file:
prefix = f'test' + (f'-{prefix}' if prefix != "" else "")
split = "test"
result = evaluate(cfg, model, tokenizer, prefix=prefix, _split=split)
result = dict((k + "_{}".format(global_step), v) for k, v in result.items())
results.update(result)
return results
if __name__ == "__main__":
hydra_formatted_args = []
for arg in sys.argv:
if arg.startswith("--"):
hydra_formatted_args.append(arg[len("--"):])
else:
hydra_formatted_args.append(arg)
sys.argv = hydra_formatted_args
main()
| true | true |
f7112f4e2a985351fa67abafbc227e54c3565741 | 1,599 | py | Python | xlsxwriter/test/comparison/test_chart_axis17.py | shareablee/XlsxWriter | 3cfcbe18fbc4526158ffbb5e7bb5227f78e3f5f9 | [
"BSD-2-Clause-FreeBSD"
] | null | null | null | xlsxwriter/test/comparison/test_chart_axis17.py | shareablee/XlsxWriter | 3cfcbe18fbc4526158ffbb5e7bb5227f78e3f5f9 | [
"BSD-2-Clause-FreeBSD"
] | null | null | null | xlsxwriter/test/comparison/test_chart_axis17.py | shareablee/XlsxWriter | 3cfcbe18fbc4526158ffbb5e7bb5227f78e3f5f9 | [
"BSD-2-Clause-FreeBSD"
] | null | null | null | ###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013-2015, John McNamara, jmcnamara@cpan.org
#
from ..excel_comparsion_test import ExcelComparisonTest
from ...workbook import Workbook
class TestCompareXLSXFiles(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.maxDiff = None
filename = 'chart_axis17.xlsx'
test_dir = 'xlsxwriter/test/comparison/'
self.got_filename = test_dir + '_test_' + filename
self.exp_filename = test_dir + 'xlsx_files/' + filename
self.ignore_files = []
self.ignore_elements = {}
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({'type': 'column'})
chart.axis_ids = [43812736, 45705088]
data = [
[1, 2, 3, 4, 5],
[2, 4, 6, 8, 10],
[3, 6, 9, 12, 15],
]
worksheet.write_column('A1', data[0])
worksheet.write_column('B1', data[1])
worksheet.write_column('C1', data[2])
chart.add_series({'values': '=Sheet1!$A$1:$A$5'})
chart.add_series({'values': '=Sheet1!$B$1:$B$5'})
chart.add_series({'values': '=Sheet1!$C$1:$C$5'})
chart.set_y_axis({'log_base': 10})
worksheet.insert_chart('E9', chart)
workbook.close()
self.assertExcelEqual()
| 25.790323 | 79 | 0.565353 | true | true | |
f711301c61a91d397fabaa9789e2c1cc33e29329 | 20,549 | py | Python | log_complete_bcl2/model_17.py | LoLab-VU/Bayesian_Inference_of_Network_Dynamics | 54a5ef7e868be34289836bbbb024a2963c0c9c86 | [
"MIT"
] | null | null | null | log_complete_bcl2/model_17.py | LoLab-VU/Bayesian_Inference_of_Network_Dynamics | 54a5ef7e868be34289836bbbb024a2963c0c9c86 | [
"MIT"
] | null | null | null | log_complete_bcl2/model_17.py | LoLab-VU/Bayesian_Inference_of_Network_Dynamics | 54a5ef7e868be34289836bbbb024a2963c0c9c86 | [
"MIT"
] | null | null | null | # exported from PySB model 'model'
from pysb import Model, Monomer, Parameter, Expression, Compartment, Rule, Observable, Initial, MatchOnce, Annotation, ANY, WILD
Model()
Monomer('Ligand', ['Receptor'])
Monomer('ParpU', ['C3A'])
Monomer('C8A', ['BidU', 'C3pro'])
Monomer('SmacM', ['BaxA'])
Monomer('BaxM', ['BidM', 'BaxA'])
Monomer('Apop', ['C3pro', 'Xiap'])
Monomer('Fadd', ['Receptor', 'C8pro'])
Monomer('SmacC', ['Xiap'])
Monomer('ParpC')
Monomer('Xiap', ['SmacC', 'Apop', 'C3A'])
Monomer('C9')
Monomer('C3ub')
Monomer('C8pro', ['Fadd', 'C6A'])
Monomer('Bcl2', ['BidM', 'BaxA'])
Monomer('C3pro', ['Apop', 'C8A'])
Monomer('CytoCM', ['BaxA'])
Monomer('CytoCC')
Monomer('BaxA', ['BaxM', 'Bcl2', 'BaxA_1', 'BaxA_2', 'SmacM', 'CytoCM'])
Monomer('ApafI')
Monomer('BidU', ['C8A'])
Monomer('BidT')
Monomer('C3A', ['Xiap', 'ParpU', 'C6pro'])
Monomer('ApafA')
Monomer('BidM', ['BaxM', 'Bcl2'])
Monomer('Receptor', ['Ligand', 'Fadd'])
Monomer('C6A', ['C8pro'])
Monomer('C6pro', ['C3A'])
Parameter('bind_0_Ligand_binder_Receptor_binder_target_2kf', 1.0)
Parameter('bind_0_Ligand_binder_Receptor_binder_target_1kr', 1.0)
Parameter('bind_0_Receptor_binder_Fadd_binder_target_2kf', 1.0)
Parameter('bind_0_Receptor_binder_Fadd_binder_target_1kr', 1.0)
Parameter('substrate_binding_0_Fadd_catalyzer_C8pro_substrate_2kf', 1.0)
Parameter('substrate_binding_0_Fadd_catalyzer_C8pro_substrate_1kr', 1.0)
Parameter('catalytic_step_0_Fadd_catalyzer_C8pro_substrate_C8A_product_1kc', 1.0)
Parameter('catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_2kf', 1.0)
Parameter('catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_1kr', 1.0)
Parameter('catalysis_1_C8A_catalyzer_BidU_substrate_BidT_product_1kc', 1.0)
Parameter('conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_2kf', 1.0)
Parameter('conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_1kr', 1.0)
Parameter('inhibition_0_SmacC_inhibitor_Xiap_inh_target_2kf', 1.0)
Parameter('inhibition_0_SmacC_inhibitor_Xiap_inh_target_1kr', 1.0)
Parameter('conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_2kf', 1.0)
Parameter('conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_1kr', 1.0)
Parameter('catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_2kf', 1.0)
Parameter('catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_1kr', 1.0)
Parameter('catalysis_1_Apop_catalyzer_C3pro_substrate_C3A_product_1kc', 1.0)
Parameter('inhibition_0_Xiap_inhibitor_Apop_inh_target_2kf', 1.0)
Parameter('inhibition_0_Xiap_inhibitor_Apop_inh_target_1kr', 1.0)
Parameter('catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_2kf', 1.0)
Parameter('catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_1kr', 1.0)
Parameter('catalysis_1_Xiap_catalyzer_C3A_substrate_C3ub_product_1kc', 1.0)
Parameter('catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_2kf', 1.0)
Parameter('catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_1kr', 1.0)
Parameter('catalysis_1_C3A_catalyzer_ParpU_substrate_ParpC_product_1kc', 1.0)
Parameter('equilibration_0_BidT_equil_a_BidM_equil_b_1kf', 1.0)
Parameter('equilibration_0_BidT_equil_a_BidM_equil_b_1kr', 1.0)
Parameter('catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_2kf', 1.0)
Parameter('catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_1kr', 1.0)
Parameter('catalysis_1_BidM_catalyzer_BaxM_substrate_BaxA_product_1kc', 1.0)
Parameter('self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_2kf', 1.0)
Parameter('self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_1kr', 1.0)
Parameter('self_catalyze_1_BaxA_self_catalyzer_BaxM_self_substrate_1kc', 1.0)
Parameter('inhibition_0_Bcl2_inhibitor_BidM_inh_target_2kf', 1.0)
Parameter('inhibition_0_Bcl2_inhibitor_BidM_inh_target_1kr', 1.0)
Parameter('inhibition_0_Bcl2_inhibitor_BaxA_inh_target_2kf', 1.0)
Parameter('inhibition_0_Bcl2_inhibitor_BaxA_inh_target_1kr', 1.0)
Parameter('pore_formation_0_BaxA_pore_2kf', 1.0)
Parameter('pore_formation_0_BaxA_pore_1kr', 1.0)
Parameter('pore_formation_1_BaxA_pore_2kf', 1.0)
Parameter('pore_formation_1_BaxA_pore_1kr', 1.0)
Parameter('pore_formation_2_BaxA_pore_2kf', 1.0)
Parameter('pore_formation_2_BaxA_pore_1kr', 1.0)
Parameter('transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_2kf', 1.0)
Parameter('transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kr', 1.0)
Parameter('transport_1_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kc', 1.0)
Parameter('transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_2kf', 1.0)
Parameter('transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kr', 1.0)
Parameter('transport_1_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kc', 1.0)
Parameter('catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product_2kf', 1.0)
Parameter('catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product_1kr', 1.0)
Parameter('catalysis_1_C8A_catalyzer_C3pro_substrate_C3A_product_1kc', 1.0)
Parameter('catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product_2kf', 1.0)
Parameter('catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product_1kr', 1.0)
Parameter('catalysis_1_C3A_catalyzer_C6pro_substrate_C6A_product_1kc', 1.0)
Parameter('catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product_2kf', 1.0)
Parameter('catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product_1kr', 1.0)
Parameter('catalysis_1_C6A_catalyzer_C8pro_substrate_C8A_product_1kc', 1.0)
Parameter('Ligand_0', 1000.0)
Parameter('ParpU_0', 1000000.0)
Parameter('C8A_0', 0.0)
Parameter('SmacM_0', 100000.0)
Parameter('BaxM_0', 40000.0)
Parameter('Apop_0', 0.0)
Parameter('Fadd_0', 130000.0)
Parameter('SmacC_0', 0.0)
Parameter('ParpC_0', 0.0)
Parameter('Xiap_0', 4250.0)
Parameter('C9_0', 100000.0)
Parameter('C3ub_0', 0.0)
Parameter('C8pro_0', 130000.0)
Parameter('Bcl2_0', 328000.0)
Parameter('C3pro_0', 21000.0)
Parameter('CytoCM_0', 500000.0)
Parameter('CytoCC_0', 0.0)
Parameter('BaxA_0', 0.0)
Parameter('ApafI_0', 100000.0)
Parameter('BidU_0', 171000.0)
Parameter('BidT_0', 0.0)
Parameter('C3A_0', 0.0)
Parameter('ApafA_0', 0.0)
Parameter('BidM_0', 0.0)
Parameter('Receptor_0', 100.0)
Parameter('C6A_0', 0.0)
Parameter('C6pro_0', 100.0)
Observable('Ligand_obs', Ligand())
Observable('ParpU_obs', ParpU())
Observable('C8A_obs', C8A())
Observable('SmacM_obs', SmacM())
Observable('BaxM_obs', BaxM())
Observable('Apop_obs', Apop())
Observable('Fadd_obs', Fadd())
Observable('SmacC_obs', SmacC())
Observable('ParpC_obs', ParpC())
Observable('Xiap_obs', Xiap())
Observable('C9_obs', C9())
Observable('C3ub_obs', C3ub())
Observable('C8pro_obs', C8pro())
Observable('Bcl2_obs', Bcl2())
Observable('C3pro_obs', C3pro())
Observable('CytoCM_obs', CytoCM())
Observable('CytoCC_obs', CytoCC())
Observable('BaxA_obs', BaxA())
Observable('ApafI_obs', ApafI())
Observable('BidU_obs', BidU())
Observable('BidT_obs', BidT())
Observable('C3A_obs', C3A())
Observable('ApafA_obs', ApafA())
Observable('BidM_obs', BidM())
Observable('Receptor_obs', Receptor())
Observable('C6A_obs', C6A())
Observable('C6pro_obs', C6pro())
Rule('bind_0_Ligand_binder_Receptor_binder_target', Ligand(Receptor=None) + Receptor(Ligand=None, Fadd=None) | Ligand(Receptor=1) % Receptor(Ligand=1, Fadd=None), bind_0_Ligand_binder_Receptor_binder_target_2kf, bind_0_Ligand_binder_Receptor_binder_target_1kr)
Rule('bind_0_Receptor_binder_Fadd_binder_target', Receptor(Ligand=ANY, Fadd=None) + Fadd(Receptor=None, C8pro=None) | Receptor(Ligand=ANY, Fadd=1) % Fadd(Receptor=1, C8pro=None), bind_0_Receptor_binder_Fadd_binder_target_2kf, bind_0_Receptor_binder_Fadd_binder_target_1kr)
Rule('substrate_binding_0_Fadd_catalyzer_C8pro_substrate', Fadd(Receptor=ANY, C8pro=None) + C8pro(Fadd=None, C6A=None) | Fadd(Receptor=ANY, C8pro=1) % C8pro(Fadd=1, C6A=None), substrate_binding_0_Fadd_catalyzer_C8pro_substrate_2kf, substrate_binding_0_Fadd_catalyzer_C8pro_substrate_1kr)
Rule('catalytic_step_0_Fadd_catalyzer_C8pro_substrate_C8A_product', Fadd(Receptor=ANY, C8pro=1) % C8pro(Fadd=1, C6A=None) >> Fadd(Receptor=ANY, C8pro=None) + C8A(BidU=None, C3pro=None), catalytic_step_0_Fadd_catalyzer_C8pro_substrate_C8A_product_1kc)
Rule('catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product', C8A(BidU=None, C3pro=None) + BidU(C8A=None) | C8A(BidU=1, C3pro=None) % BidU(C8A=1), catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_2kf, catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_1kr)
Rule('catalysis_1_C8A_catalyzer_BidU_substrate_BidT_product', C8A(BidU=1, C3pro=None) % BidU(C8A=1) >> C8A(BidU=None, C3pro=None) + BidT(), catalysis_1_C8A_catalyzer_BidU_substrate_BidT_product_1kc)
Rule('conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex', ApafI() + CytoCC() | ApafA(), conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_2kf, conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_1kr)
Rule('inhibition_0_SmacC_inhibitor_Xiap_inh_target', SmacC(Xiap=None) + Xiap(SmacC=None, Apop=None, C3A=None) | SmacC(Xiap=1) % Xiap(SmacC=1, Apop=None, C3A=None), inhibition_0_SmacC_inhibitor_Xiap_inh_target_2kf, inhibition_0_SmacC_inhibitor_Xiap_inh_target_1kr)
Rule('conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex', ApafA() + C9() | Apop(C3pro=None, Xiap=None), conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_2kf, conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_1kr)
Rule('catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product', Apop(C3pro=None, Xiap=None) + C3pro(Apop=None, C8A=None) | Apop(C3pro=1, Xiap=None) % C3pro(Apop=1, C8A=None), catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_2kf, catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_1kr)
Rule('catalysis_1_Apop_catalyzer_C3pro_substrate_C3A_product', Apop(C3pro=1, Xiap=None) % C3pro(Apop=1, C8A=None) >> Apop(C3pro=None, Xiap=None) + C3A(Xiap=None, ParpU=None, C6pro=None), catalysis_1_Apop_catalyzer_C3pro_substrate_C3A_product_1kc)
Rule('inhibition_0_Xiap_inhibitor_Apop_inh_target', Xiap(SmacC=None, Apop=None, C3A=None) + Apop(C3pro=None, Xiap=None) | Xiap(SmacC=None, Apop=1, C3A=None) % Apop(C3pro=None, Xiap=1), inhibition_0_Xiap_inhibitor_Apop_inh_target_2kf, inhibition_0_Xiap_inhibitor_Apop_inh_target_1kr)
Rule('catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product', Xiap(SmacC=None, Apop=None, C3A=None) + C3A(Xiap=None, ParpU=None, C6pro=None) | Xiap(SmacC=None, Apop=None, C3A=1) % C3A(Xiap=1, ParpU=None, C6pro=None), catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_2kf, catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_1kr)
Rule('catalysis_1_Xiap_catalyzer_C3A_substrate_C3ub_product', Xiap(SmacC=None, Apop=None, C3A=1) % C3A(Xiap=1, ParpU=None, C6pro=None) >> Xiap(SmacC=None, Apop=None, C3A=None) + C3ub(), catalysis_1_Xiap_catalyzer_C3A_substrate_C3ub_product_1kc)
Rule('catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product', C3A(Xiap=None, ParpU=None, C6pro=None) + ParpU(C3A=None) | C3A(Xiap=None, ParpU=1, C6pro=None) % ParpU(C3A=1), catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_2kf, catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_1kr)
Rule('catalysis_1_C3A_catalyzer_ParpU_substrate_ParpC_product', C3A(Xiap=None, ParpU=1, C6pro=None) % ParpU(C3A=1) >> C3A(Xiap=None, ParpU=None, C6pro=None) + ParpC(), catalysis_1_C3A_catalyzer_ParpU_substrate_ParpC_product_1kc)
Rule('equilibration_0_BidT_equil_a_BidM_equil_b', BidT() | BidM(BaxM=None, Bcl2=None), equilibration_0_BidT_equil_a_BidM_equil_b_1kf, equilibration_0_BidT_equil_a_BidM_equil_b_1kr)
Rule('catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product', BidM(BaxM=None, Bcl2=None) + BaxM(BidM=None, BaxA=None) | BidM(BaxM=1, Bcl2=None) % BaxM(BidM=1, BaxA=None), catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_2kf, catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_1kr)
Rule('catalysis_1_BidM_catalyzer_BaxM_substrate_BaxA_product', BidM(BaxM=1, Bcl2=None) % BaxM(BidM=1, BaxA=None) >> BidM(BaxM=None, Bcl2=None) + BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None), catalysis_1_BidM_catalyzer_BaxM_substrate_BaxA_product_1kc)
Rule('self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate', BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxM(BidM=None, BaxA=None) | BaxA(BaxM=1, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) % BaxM(BidM=None, BaxA=1), self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_2kf, self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_1kr)
Rule('self_catalyze_1_BaxA_self_catalyzer_BaxM_self_substrate', BaxA(BaxM=1, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) % BaxM(BidM=None, BaxA=1) >> BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None), self_catalyze_1_BaxA_self_catalyzer_BaxM_self_substrate_1kc)
Rule('inhibition_0_Bcl2_inhibitor_BidM_inh_target', Bcl2(BidM=None, BaxA=None) + BidM(BaxM=None, Bcl2=None) | Bcl2(BidM=1, BaxA=None) % BidM(BaxM=None, Bcl2=1), inhibition_0_Bcl2_inhibitor_BidM_inh_target_2kf, inhibition_0_Bcl2_inhibitor_BidM_inh_target_1kr)
Rule('inhibition_0_Bcl2_inhibitor_BaxA_inh_target', Bcl2(BidM=None, BaxA=None) + BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) | Bcl2(BidM=None, BaxA=1) % BaxA(BaxM=None, Bcl2=1, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None), inhibition_0_Bcl2_inhibitor_BaxA_inh_target_2kf, inhibition_0_Bcl2_inhibitor_BaxA_inh_target_1kr)
Rule('pore_formation_0_BaxA_pore', BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) | BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=None, SmacM=None, CytoCM=None), pore_formation_0_BaxA_pore_2kf, pore_formation_0_BaxA_pore_1kr)
Rule('pore_formation_1_BaxA_pore', BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=None, SmacM=None, CytoCM=None) | BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None), pore_formation_1_BaxA_pore_2kf, pore_formation_1_BaxA_pore_1kr)
Rule('pore_formation_2_BaxA_pore', BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) | BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None), pore_formation_2_BaxA_pore_2kf, pore_formation_2_BaxA_pore_1kr)
Rule('transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C', BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None) + SmacM(BaxA=None) | BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=5, CytoCM=None) % SmacM(BaxA=5), transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_2kf, transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kr)
Rule('transport_1_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C', BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=5, CytoCM=None) % SmacM(BaxA=5) >> BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None) + SmacC(Xiap=None), transport_1_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kc)
Rule('transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C', BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None) + CytoCM(BaxA=None) | BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=5) % CytoCM(BaxA=5), transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_2kf, transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kr)
Rule('transport_1_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C', BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=5) % CytoCM(BaxA=5) >> BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None) + CytoCC(), transport_1_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kc)
Rule('catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product', C8A(BidU=None, C3pro=None) + C3pro(Apop=None, C8A=None) | C8A(BidU=None, C3pro=1) % C3pro(Apop=None, C8A=1), catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product_2kf, catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product_1kr)
Rule('catalysis_1_C8A_catalyzer_C3pro_substrate_C3A_product', C8A(BidU=None, C3pro=1) % C3pro(Apop=None, C8A=1) >> C8A(BidU=None, C3pro=None) + C3A(Xiap=None, ParpU=None, C6pro=None), catalysis_1_C8A_catalyzer_C3pro_substrate_C3A_product_1kc)
Rule('catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product', C3A(Xiap=None, ParpU=None, C6pro=None) + C6pro(C3A=None) | C3A(Xiap=None, ParpU=None, C6pro=1) % C6pro(C3A=1), catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product_2kf, catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product_1kr)
Rule('catalysis_1_C3A_catalyzer_C6pro_substrate_C6A_product', C3A(Xiap=None, ParpU=None, C6pro=1) % C6pro(C3A=1) >> C3A(Xiap=None, ParpU=None, C6pro=None) + C6A(C8pro=None), catalysis_1_C3A_catalyzer_C6pro_substrate_C6A_product_1kc)
Rule('catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product', C6A(C8pro=None) + C8pro(Fadd=None, C6A=None) | C6A(C8pro=1) % C8pro(Fadd=None, C6A=1), catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product_2kf, catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product_1kr)
Rule('catalysis_1_C6A_catalyzer_C8pro_substrate_C8A_product', C6A(C8pro=1) % C8pro(Fadd=None, C6A=1) >> C6A(C8pro=None) + C8A(BidU=None, C3pro=None), catalysis_1_C6A_catalyzer_C8pro_substrate_C8A_product_1kc)
Initial(Ligand(Receptor=None), Ligand_0)
Initial(ParpU(C3A=None), ParpU_0)
Initial(C8A(BidU=None, C3pro=None), C8A_0)
Initial(SmacM(BaxA=None), SmacM_0)
Initial(BaxM(BidM=None, BaxA=None), BaxM_0)
Initial(Apop(C3pro=None, Xiap=None), Apop_0)
Initial(Fadd(Receptor=None, C8pro=None), Fadd_0)
Initial(SmacC(Xiap=None), SmacC_0)
Initial(ParpC(), ParpC_0)
Initial(Xiap(SmacC=None, Apop=None, C3A=None), Xiap_0)
Initial(C9(), C9_0)
Initial(C3ub(), C3ub_0)
Initial(C8pro(Fadd=None, C6A=None), C8pro_0)
Initial(Bcl2(BidM=None, BaxA=None), Bcl2_0)
Initial(C3pro(Apop=None, C8A=None), C3pro_0)
Initial(CytoCM(BaxA=None), CytoCM_0)
Initial(CytoCC(), CytoCC_0)
Initial(BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None), BaxA_0)
Initial(ApafI(), ApafI_0)
Initial(BidU(C8A=None), BidU_0)
Initial(BidT(), BidT_0)
Initial(C3A(Xiap=None, ParpU=None, C6pro=None), C3A_0)
Initial(ApafA(), ApafA_0)
Initial(BidM(BaxM=None, Bcl2=None), BidM_0)
Initial(Receptor(Ligand=None, Fadd=None), Receptor_0)
Initial(C6A(C8pro=None), C6A_0)
Initial(C6pro(C3A=None), C6pro_0)
| 95.134259 | 798 | 0.804127 |
from pysb import Model, Monomer, Parameter, Expression, Compartment, Rule, Observable, Initial, MatchOnce, Annotation, ANY, WILD
Model()
Monomer('Ligand', ['Receptor'])
Monomer('ParpU', ['C3A'])
Monomer('C8A', ['BidU', 'C3pro'])
Monomer('SmacM', ['BaxA'])
Monomer('BaxM', ['BidM', 'BaxA'])
Monomer('Apop', ['C3pro', 'Xiap'])
Monomer('Fadd', ['Receptor', 'C8pro'])
Monomer('SmacC', ['Xiap'])
Monomer('ParpC')
Monomer('Xiap', ['SmacC', 'Apop', 'C3A'])
Monomer('C9')
Monomer('C3ub')
Monomer('C8pro', ['Fadd', 'C6A'])
Monomer('Bcl2', ['BidM', 'BaxA'])
Monomer('C3pro', ['Apop', 'C8A'])
Monomer('CytoCM', ['BaxA'])
Monomer('CytoCC')
Monomer('BaxA', ['BaxM', 'Bcl2', 'BaxA_1', 'BaxA_2', 'SmacM', 'CytoCM'])
Monomer('ApafI')
Monomer('BidU', ['C8A'])
Monomer('BidT')
Monomer('C3A', ['Xiap', 'ParpU', 'C6pro'])
Monomer('ApafA')
Monomer('BidM', ['BaxM', 'Bcl2'])
Monomer('Receptor', ['Ligand', 'Fadd'])
Monomer('C6A', ['C8pro'])
Monomer('C6pro', ['C3A'])
Parameter('bind_0_Ligand_binder_Receptor_binder_target_2kf', 1.0)
Parameter('bind_0_Ligand_binder_Receptor_binder_target_1kr', 1.0)
Parameter('bind_0_Receptor_binder_Fadd_binder_target_2kf', 1.0)
Parameter('bind_0_Receptor_binder_Fadd_binder_target_1kr', 1.0)
Parameter('substrate_binding_0_Fadd_catalyzer_C8pro_substrate_2kf', 1.0)
Parameter('substrate_binding_0_Fadd_catalyzer_C8pro_substrate_1kr', 1.0)
Parameter('catalytic_step_0_Fadd_catalyzer_C8pro_substrate_C8A_product_1kc', 1.0)
Parameter('catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_2kf', 1.0)
Parameter('catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_1kr', 1.0)
Parameter('catalysis_1_C8A_catalyzer_BidU_substrate_BidT_product_1kc', 1.0)
Parameter('conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_2kf', 1.0)
Parameter('conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_1kr', 1.0)
Parameter('inhibition_0_SmacC_inhibitor_Xiap_inh_target_2kf', 1.0)
Parameter('inhibition_0_SmacC_inhibitor_Xiap_inh_target_1kr', 1.0)
Parameter('conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_2kf', 1.0)
Parameter('conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_1kr', 1.0)
Parameter('catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_2kf', 1.0)
Parameter('catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_1kr', 1.0)
Parameter('catalysis_1_Apop_catalyzer_C3pro_substrate_C3A_product_1kc', 1.0)
Parameter('inhibition_0_Xiap_inhibitor_Apop_inh_target_2kf', 1.0)
Parameter('inhibition_0_Xiap_inhibitor_Apop_inh_target_1kr', 1.0)
Parameter('catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_2kf', 1.0)
Parameter('catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_1kr', 1.0)
Parameter('catalysis_1_Xiap_catalyzer_C3A_substrate_C3ub_product_1kc', 1.0)
Parameter('catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_2kf', 1.0)
Parameter('catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_1kr', 1.0)
Parameter('catalysis_1_C3A_catalyzer_ParpU_substrate_ParpC_product_1kc', 1.0)
Parameter('equilibration_0_BidT_equil_a_BidM_equil_b_1kf', 1.0)
Parameter('equilibration_0_BidT_equil_a_BidM_equil_b_1kr', 1.0)
Parameter('catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_2kf', 1.0)
Parameter('catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_1kr', 1.0)
Parameter('catalysis_1_BidM_catalyzer_BaxM_substrate_BaxA_product_1kc', 1.0)
Parameter('self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_2kf', 1.0)
Parameter('self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_1kr', 1.0)
Parameter('self_catalyze_1_BaxA_self_catalyzer_BaxM_self_substrate_1kc', 1.0)
Parameter('inhibition_0_Bcl2_inhibitor_BidM_inh_target_2kf', 1.0)
Parameter('inhibition_0_Bcl2_inhibitor_BidM_inh_target_1kr', 1.0)
Parameter('inhibition_0_Bcl2_inhibitor_BaxA_inh_target_2kf', 1.0)
Parameter('inhibition_0_Bcl2_inhibitor_BaxA_inh_target_1kr', 1.0)
Parameter('pore_formation_0_BaxA_pore_2kf', 1.0)
Parameter('pore_formation_0_BaxA_pore_1kr', 1.0)
Parameter('pore_formation_1_BaxA_pore_2kf', 1.0)
Parameter('pore_formation_1_BaxA_pore_1kr', 1.0)
Parameter('pore_formation_2_BaxA_pore_2kf', 1.0)
Parameter('pore_formation_2_BaxA_pore_1kr', 1.0)
Parameter('transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_2kf', 1.0)
Parameter('transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kr', 1.0)
Parameter('transport_1_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kc', 1.0)
Parameter('transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_2kf', 1.0)
Parameter('transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kr', 1.0)
Parameter('transport_1_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kc', 1.0)
Parameter('catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product_2kf', 1.0)
Parameter('catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product_1kr', 1.0)
Parameter('catalysis_1_C8A_catalyzer_C3pro_substrate_C3A_product_1kc', 1.0)
Parameter('catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product_2kf', 1.0)
Parameter('catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product_1kr', 1.0)
Parameter('catalysis_1_C3A_catalyzer_C6pro_substrate_C6A_product_1kc', 1.0)
Parameter('catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product_2kf', 1.0)
Parameter('catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product_1kr', 1.0)
Parameter('catalysis_1_C6A_catalyzer_C8pro_substrate_C8A_product_1kc', 1.0)
Parameter('Ligand_0', 1000.0)
Parameter('ParpU_0', 1000000.0)
Parameter('C8A_0', 0.0)
Parameter('SmacM_0', 100000.0)
Parameter('BaxM_0', 40000.0)
Parameter('Apop_0', 0.0)
Parameter('Fadd_0', 130000.0)
Parameter('SmacC_0', 0.0)
Parameter('ParpC_0', 0.0)
Parameter('Xiap_0', 4250.0)
Parameter('C9_0', 100000.0)
Parameter('C3ub_0', 0.0)
Parameter('C8pro_0', 130000.0)
Parameter('Bcl2_0', 328000.0)
Parameter('C3pro_0', 21000.0)
Parameter('CytoCM_0', 500000.0)
Parameter('CytoCC_0', 0.0)
Parameter('BaxA_0', 0.0)
Parameter('ApafI_0', 100000.0)
Parameter('BidU_0', 171000.0)
Parameter('BidT_0', 0.0)
Parameter('C3A_0', 0.0)
Parameter('ApafA_0', 0.0)
Parameter('BidM_0', 0.0)
Parameter('Receptor_0', 100.0)
Parameter('C6A_0', 0.0)
Parameter('C6pro_0', 100.0)
Observable('Ligand_obs', Ligand())
Observable('ParpU_obs', ParpU())
Observable('C8A_obs', C8A())
Observable('SmacM_obs', SmacM())
Observable('BaxM_obs', BaxM())
Observable('Apop_obs', Apop())
Observable('Fadd_obs', Fadd())
Observable('SmacC_obs', SmacC())
Observable('ParpC_obs', ParpC())
Observable('Xiap_obs', Xiap())
Observable('C9_obs', C9())
Observable('C3ub_obs', C3ub())
Observable('C8pro_obs', C8pro())
Observable('Bcl2_obs', Bcl2())
Observable('C3pro_obs', C3pro())
Observable('CytoCM_obs', CytoCM())
Observable('CytoCC_obs', CytoCC())
Observable('BaxA_obs', BaxA())
Observable('ApafI_obs', ApafI())
Observable('BidU_obs', BidU())
Observable('BidT_obs', BidT())
Observable('C3A_obs', C3A())
Observable('ApafA_obs', ApafA())
Observable('BidM_obs', BidM())
Observable('Receptor_obs', Receptor())
Observable('C6A_obs', C6A())
Observable('C6pro_obs', C6pro())
Rule('bind_0_Ligand_binder_Receptor_binder_target', Ligand(Receptor=None) + Receptor(Ligand=None, Fadd=None) | Ligand(Receptor=1) % Receptor(Ligand=1, Fadd=None), bind_0_Ligand_binder_Receptor_binder_target_2kf, bind_0_Ligand_binder_Receptor_binder_target_1kr)
Rule('bind_0_Receptor_binder_Fadd_binder_target', Receptor(Ligand=ANY, Fadd=None) + Fadd(Receptor=None, C8pro=None) | Receptor(Ligand=ANY, Fadd=1) % Fadd(Receptor=1, C8pro=None), bind_0_Receptor_binder_Fadd_binder_target_2kf, bind_0_Receptor_binder_Fadd_binder_target_1kr)
Rule('substrate_binding_0_Fadd_catalyzer_C8pro_substrate', Fadd(Receptor=ANY, C8pro=None) + C8pro(Fadd=None, C6A=None) | Fadd(Receptor=ANY, C8pro=1) % C8pro(Fadd=1, C6A=None), substrate_binding_0_Fadd_catalyzer_C8pro_substrate_2kf, substrate_binding_0_Fadd_catalyzer_C8pro_substrate_1kr)
Rule('catalytic_step_0_Fadd_catalyzer_C8pro_substrate_C8A_product', Fadd(Receptor=ANY, C8pro=1) % C8pro(Fadd=1, C6A=None) >> Fadd(Receptor=ANY, C8pro=None) + C8A(BidU=None, C3pro=None), catalytic_step_0_Fadd_catalyzer_C8pro_substrate_C8A_product_1kc)
Rule('catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product', C8A(BidU=None, C3pro=None) + BidU(C8A=None) | C8A(BidU=1, C3pro=None) % BidU(C8A=1), catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_2kf, catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_1kr)
Rule('catalysis_1_C8A_catalyzer_BidU_substrate_BidT_product', C8A(BidU=1, C3pro=None) % BidU(C8A=1) >> C8A(BidU=None, C3pro=None) + BidT(), catalysis_1_C8A_catalyzer_BidU_substrate_BidT_product_1kc)
Rule('conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex', ApafI() + CytoCC() | ApafA(), conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_2kf, conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_1kr)
Rule('inhibition_0_SmacC_inhibitor_Xiap_inh_target', SmacC(Xiap=None) + Xiap(SmacC=None, Apop=None, C3A=None) | SmacC(Xiap=1) % Xiap(SmacC=1, Apop=None, C3A=None), inhibition_0_SmacC_inhibitor_Xiap_inh_target_2kf, inhibition_0_SmacC_inhibitor_Xiap_inh_target_1kr)
Rule('conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex', ApafA() + C9() | Apop(C3pro=None, Xiap=None), conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_2kf, conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_1kr)
Rule('catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product', Apop(C3pro=None, Xiap=None) + C3pro(Apop=None, C8A=None) | Apop(C3pro=1, Xiap=None) % C3pro(Apop=1, C8A=None), catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_2kf, catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_1kr)
Rule('catalysis_1_Apop_catalyzer_C3pro_substrate_C3A_product', Apop(C3pro=1, Xiap=None) % C3pro(Apop=1, C8A=None) >> Apop(C3pro=None, Xiap=None) + C3A(Xiap=None, ParpU=None, C6pro=None), catalysis_1_Apop_catalyzer_C3pro_substrate_C3A_product_1kc)
Rule('inhibition_0_Xiap_inhibitor_Apop_inh_target', Xiap(SmacC=None, Apop=None, C3A=None) + Apop(C3pro=None, Xiap=None) | Xiap(SmacC=None, Apop=1, C3A=None) % Apop(C3pro=None, Xiap=1), inhibition_0_Xiap_inhibitor_Apop_inh_target_2kf, inhibition_0_Xiap_inhibitor_Apop_inh_target_1kr)
Rule('catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product', Xiap(SmacC=None, Apop=None, C3A=None) + C3A(Xiap=None, ParpU=None, C6pro=None) | Xiap(SmacC=None, Apop=None, C3A=1) % C3A(Xiap=1, ParpU=None, C6pro=None), catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_2kf, catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_1kr)
Rule('catalysis_1_Xiap_catalyzer_C3A_substrate_C3ub_product', Xiap(SmacC=None, Apop=None, C3A=1) % C3A(Xiap=1, ParpU=None, C6pro=None) >> Xiap(SmacC=None, Apop=None, C3A=None) + C3ub(), catalysis_1_Xiap_catalyzer_C3A_substrate_C3ub_product_1kc)
Rule('catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product', C3A(Xiap=None, ParpU=None, C6pro=None) + ParpU(C3A=None) | C3A(Xiap=None, ParpU=1, C6pro=None) % ParpU(C3A=1), catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_2kf, catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_1kr)
Rule('catalysis_1_C3A_catalyzer_ParpU_substrate_ParpC_product', C3A(Xiap=None, ParpU=1, C6pro=None) % ParpU(C3A=1) >> C3A(Xiap=None, ParpU=None, C6pro=None) + ParpC(), catalysis_1_C3A_catalyzer_ParpU_substrate_ParpC_product_1kc)
Rule('equilibration_0_BidT_equil_a_BidM_equil_b', BidT() | BidM(BaxM=None, Bcl2=None), equilibration_0_BidT_equil_a_BidM_equil_b_1kf, equilibration_0_BidT_equil_a_BidM_equil_b_1kr)
Rule('catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product', BidM(BaxM=None, Bcl2=None) + BaxM(BidM=None, BaxA=None) | BidM(BaxM=1, Bcl2=None) % BaxM(BidM=1, BaxA=None), catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_2kf, catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_1kr)
Rule('catalysis_1_BidM_catalyzer_BaxM_substrate_BaxA_product', BidM(BaxM=1, Bcl2=None) % BaxM(BidM=1, BaxA=None) >> BidM(BaxM=None, Bcl2=None) + BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None), catalysis_1_BidM_catalyzer_BaxM_substrate_BaxA_product_1kc)
Rule('self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate', BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxM(BidM=None, BaxA=None) | BaxA(BaxM=1, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) % BaxM(BidM=None, BaxA=1), self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_2kf, self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_1kr)
Rule('self_catalyze_1_BaxA_self_catalyzer_BaxM_self_substrate', BaxA(BaxM=1, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) % BaxM(BidM=None, BaxA=1) >> BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None), self_catalyze_1_BaxA_self_catalyzer_BaxM_self_substrate_1kc)
Rule('inhibition_0_Bcl2_inhibitor_BidM_inh_target', Bcl2(BidM=None, BaxA=None) + BidM(BaxM=None, Bcl2=None) | Bcl2(BidM=1, BaxA=None) % BidM(BaxM=None, Bcl2=1), inhibition_0_Bcl2_inhibitor_BidM_inh_target_2kf, inhibition_0_Bcl2_inhibitor_BidM_inh_target_1kr)
Rule('inhibition_0_Bcl2_inhibitor_BaxA_inh_target', Bcl2(BidM=None, BaxA=None) + BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) | Bcl2(BidM=None, BaxA=1) % BaxA(BaxM=None, Bcl2=1, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None), inhibition_0_Bcl2_inhibitor_BaxA_inh_target_2kf, inhibition_0_Bcl2_inhibitor_BaxA_inh_target_1kr)
Rule('pore_formation_0_BaxA_pore', BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) | BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=None, SmacM=None, CytoCM=None), pore_formation_0_BaxA_pore_2kf, pore_formation_0_BaxA_pore_1kr)
Rule('pore_formation_1_BaxA_pore', BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=None, SmacM=None, CytoCM=None) | BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None), pore_formation_1_BaxA_pore_2kf, pore_formation_1_BaxA_pore_1kr)
Rule('pore_formation_2_BaxA_pore', BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) | BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None), pore_formation_2_BaxA_pore_2kf, pore_formation_2_BaxA_pore_1kr)
Rule('transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C', BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None) + SmacM(BaxA=None) | BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=5, CytoCM=None) % SmacM(BaxA=5), transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_2kf, transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kr)
Rule('transport_1_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C', BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=5, CytoCM=None) % SmacM(BaxA=5) >> BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None) + SmacC(Xiap=None), transport_1_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kc)
Rule('transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C', BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None) + CytoCM(BaxA=None) | BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=5) % CytoCM(BaxA=5), transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_2kf, transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kr)
Rule('transport_1_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C', BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=5) % CytoCM(BaxA=5) >> BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None) + CytoCC(), transport_1_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kc)
Rule('catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product', C8A(BidU=None, C3pro=None) + C3pro(Apop=None, C8A=None) | C8A(BidU=None, C3pro=1) % C3pro(Apop=None, C8A=1), catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product_2kf, catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product_1kr)
Rule('catalysis_1_C8A_catalyzer_C3pro_substrate_C3A_product', C8A(BidU=None, C3pro=1) % C3pro(Apop=None, C8A=1) >> C8A(BidU=None, C3pro=None) + C3A(Xiap=None, ParpU=None, C6pro=None), catalysis_1_C8A_catalyzer_C3pro_substrate_C3A_product_1kc)
Rule('catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product', C3A(Xiap=None, ParpU=None, C6pro=None) + C6pro(C3A=None) | C3A(Xiap=None, ParpU=None, C6pro=1) % C6pro(C3A=1), catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product_2kf, catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product_1kr)
Rule('catalysis_1_C3A_catalyzer_C6pro_substrate_C6A_product', C3A(Xiap=None, ParpU=None, C6pro=1) % C6pro(C3A=1) >> C3A(Xiap=None, ParpU=None, C6pro=None) + C6A(C8pro=None), catalysis_1_C3A_catalyzer_C6pro_substrate_C6A_product_1kc)
Rule('catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product', C6A(C8pro=None) + C8pro(Fadd=None, C6A=None) | C6A(C8pro=1) % C8pro(Fadd=None, C6A=1), catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product_2kf, catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product_1kr)
Rule('catalysis_1_C6A_catalyzer_C8pro_substrate_C8A_product', C6A(C8pro=1) % C8pro(Fadd=None, C6A=1) >> C6A(C8pro=None) + C8A(BidU=None, C3pro=None), catalysis_1_C6A_catalyzer_C8pro_substrate_C8A_product_1kc)
Initial(Ligand(Receptor=None), Ligand_0)
Initial(ParpU(C3A=None), ParpU_0)
Initial(C8A(BidU=None, C3pro=None), C8A_0)
Initial(SmacM(BaxA=None), SmacM_0)
Initial(BaxM(BidM=None, BaxA=None), BaxM_0)
Initial(Apop(C3pro=None, Xiap=None), Apop_0)
Initial(Fadd(Receptor=None, C8pro=None), Fadd_0)
Initial(SmacC(Xiap=None), SmacC_0)
Initial(ParpC(), ParpC_0)
Initial(Xiap(SmacC=None, Apop=None, C3A=None), Xiap_0)
Initial(C9(), C9_0)
Initial(C3ub(), C3ub_0)
Initial(C8pro(Fadd=None, C6A=None), C8pro_0)
Initial(Bcl2(BidM=None, BaxA=None), Bcl2_0)
Initial(C3pro(Apop=None, C8A=None), C3pro_0)
Initial(CytoCM(BaxA=None), CytoCM_0)
Initial(CytoCC(), CytoCC_0)
Initial(BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None), BaxA_0)
Initial(ApafI(), ApafI_0)
Initial(BidU(C8A=None), BidU_0)
Initial(BidT(), BidT_0)
Initial(C3A(Xiap=None, ParpU=None, C6pro=None), C3A_0)
Initial(ApafA(), ApafA_0)
Initial(BidM(BaxM=None, Bcl2=None), BidM_0)
Initial(Receptor(Ligand=None, Fadd=None), Receptor_0)
Initial(C6A(C8pro=None), C6A_0)
Initial(C6pro(C3A=None), C6pro_0)
| true | true |
f7113193431e64e1b2be0b5b98c20cb05d9b30f6 | 5,751 | py | Python | deblurring_celeba_algorithm_1.py | ChandreyeeB/Blind-Image-Deconvolution-using-Deep-Generative-Priors | 4198bd2d325a32ffc4e714c486540e63440ab110 | [
"MIT"
] | 24 | 2019-01-10T14:18:57.000Z | 2021-12-07T13:56:23.000Z | deblurring_celeba_algorithm_1.py | ChandreyeeB/Blind-Image-Deconvolution-using-Deep-Generative-Priors | 4198bd2d325a32ffc4e714c486540e63440ab110 | [
"MIT"
] | 4 | 2019-02-01T22:21:05.000Z | 2021-06-09T13:00:10.000Z | deblurring_celeba_algorithm_1.py | ChandreyeeB/Blind-Image-Deconvolution-using-Deep-Generative-Priors | 4198bd2d325a32ffc4e714c486540e63440ab110 | [
"MIT"
] | 13 | 2019-01-28T12:23:51.000Z | 2022-03-23T04:38:47.000Z | import tensorflow as tf
import keras.backend as K
import numpy as np
from Utils import *
from generators.MotionBlurGenerator import *
from generators.CelebAGenerator import *
K.set_learning_phase(0)
from glob import glob
import os
# paths
Orig_Path = './results/CelebA/Original Images/*.png'
Range_Path = './results/CelebA/Range Images/*.png'
Blur_Path = './results/CelebA/Original Blurs/Test Blurs.npy'
# constants
REGULARIZORS = [0.01 , 0.01]
RANDOM_RESTARTS = 10
NOISE_STD = 0.01
STEPS = 10000
IMAGE_RANGE = [-1,1]
def step_size(t):
return 0.01 * np.exp( - t / 1000 )
SAVE_PATH = './results/CelebA/deblurring - alg1 - ' +str(int(NOISE_STD*100)) + 'perc noise - ' +str(RANDOM_RESTARTS) + 'RR/deblurring_'
# -----------------------------------------------------------------------
# loading test blur images
W = np.load(Blur_Path)
BLUR_RES = W.shape[1]
# loading test celeba images
X_Orig = np.array([ imread(path) for path in glob(Orig_Path)])/255
X_Range = np.array([ imread(path) for path in glob(Range_Path)])/255
IMAGE_RES = X_Orig.shape[1]
CHANNELS = X_Orig.shape[-1]
# loading celeba generator
CelebAGen = CelebAGenerator()
CelebAGen.GenerateModel()
CelebAGen.LoadWeights()
CelebAGAN = CelebAGen.GetModels()
celeba_latent_dim = CelebAGen.latent_dim
# loading motion blur generator
BLURGen = MotionBlur()
BLURGen.GenerateModel()
BLURGen.LoadWeights()
blur_vae, blur_encoder, blur_decoder = BLURGen.GetModels()
blur_latent_dim = BLURGen.latent_dim
# check if save dir exists, if not create a new one
try:
os.stat(SAVE_PATH[:-11])
except:
os.mkdir(SAVE_PATH[:-11])
# generating blurry images from test
Y_np = []
Blurry_Images = []
for i in tqdm(range(len(X_Orig)), ascii=True, desc ='Gen-Test-Blurry'):
x_np = X_Orig[i]
w_np = W[i]
y_np, y_f = GenerateBlurry(x_np, w_np, noise_std = NOISE_STD )
Y_np.append(y_np)
for _ in range(RANDOM_RESTARTS):
Blurry_Images.append(y_f)
Y_np = np.array(Y_np)
Blurry_Images = np.array(Blurry_Images)
# generating blurry images from range
Blurry_Images_range = []
Y_np_range = []
for i in tqdm(range(len(X_Orig)), ascii=True, desc ='Gen-Range-Blurry'):
y_np, y_f = GenerateBlurry(X_Range[i], W[i], noise_std = NOISE_STD )
Y_np_range.append(y_np)
for _ in range(RANDOM_RESTARTS):
Blurry_Images_range.append(y_f)
Y_np_range = np.array(Y_np_range)
Blurry_Images_range = np.array(Blurry_Images_range)
# alternating gradient descent for test images
image_gradients, blur_gradients, get_loss = Generate_Gradient_Functions(rr = Blurry_Images.shape[0],
reg = REGULARIZORS, image_range = IMAGE_RANGE,
decoder = CelebAGAN, blur_decoder = blur_decoder,
image_res = IMAGE_RES, blur_res = BLUR_RES,
channels = CHANNELS)
m_hat, h_hat, Loss = Optimize_Parallel(blurry_fourier = Blurry_Images, stepsize=step_size,steps = STEPS,
image_grad = image_gradients , blur_grad = blur_gradients,
getloss = get_loss, latent_image_dim = celeba_latent_dim , latent_blur_dim = blur_latent_dim)
X_hat_test = []
W_hat_test = []
for i in range(len(X_Orig)):
m_hat_i = m_hat[i*RANDOM_RESTARTS:(i+1)*RANDOM_RESTARTS]
h_hat_i = h_hat[i*RANDOM_RESTARTS:(i+1)*RANDOM_RESTARTS]
Loss_i = Loss[i*RANDOM_RESTARTS:(i+1)*RANDOM_RESTARTS]
x_hat_test, w_hat_test, loss_last_iter_test = Get_Min_Loss(Loss_i, m_hat_i, h_hat_i, decoder = CelebAGAN, blur_decoder = blur_decoder,
latent_image_dim = celeba_latent_dim, latent_blur_dim = blur_latent_dim, print_grad=False)
X_hat_test.append(x_hat_test)
W_hat_test.append(w_hat_test)
X_hat_test = np.array(X_hat_test)
W_hat_test = np.array(W_hat_test)
# alternating gradient descent for range images
m_hat, h_hat, Loss = Optimize_Parallel(blurry_fourier = Blurry_Images_range, stepsize=step_size,steps = STEPS,
image_grad = image_gradients , blur_grad = blur_gradients,
getloss = get_loss, latent_image_dim = celeba_latent_dim , latent_blur_dim = blur_latent_dim)
X_hat_range = []
W_hat_range = []
for i in range(len(X_Orig)):
m_hat_i = m_hat[i*RANDOM_RESTARTS:(i+1)*RANDOM_RESTARTS]
h_hat_i = h_hat[i*RANDOM_RESTARTS:(i+1)*RANDOM_RESTARTS]
Loss_i = Loss[i*RANDOM_RESTARTS:(i+1)*RANDOM_RESTARTS]
x_hat_range, w_hat_range, loss_last_iter_range = Get_Min_Loss(Loss_i, m_hat_i, h_hat_i, decoder = CelebAGAN, blur_decoder = blur_decoder,
latent_image_dim = celeba_latent_dim, latent_blur_dim = blur_latent_dim, print_grad=False)
X_hat_range.append(x_hat_range)
W_hat_range.append(w_hat_range)
X_hat_range = np.array(X_hat_range)
W_hat_range = np.array(W_hat_range)
X_hat_test = (X_hat_test + 1)/2
X_hat_range = (X_hat_range + 1)/2
Max = 10**len(str(len(X_Orig)-1))
# saving results
for i in range(len(X_Orig)):
Save_Results(path = SAVE_PATH + str(i+Max)[1:],
x_np = None,
w_np = None,
y_np = Y_np[i],
y_np_range = Y_np_range[i] ,
x_hat_test = X_hat_test[i],
w_hat_test = W_hat_test[i],
x_range = None,
x_hat_range = X_hat_range[i],
w_hat_range = W_hat_range[i], clip=True) | 40.216783 | 158 | 0.636933 | import tensorflow as tf
import keras.backend as K
import numpy as np
from Utils import *
from generators.MotionBlurGenerator import *
from generators.CelebAGenerator import *
K.set_learning_phase(0)
from glob import glob
import os
Orig_Path = './results/CelebA/Original Images/*.png'
Range_Path = './results/CelebA/Range Images/*.png'
Blur_Path = './results/CelebA/Original Blurs/Test Blurs.npy'
REGULARIZORS = [0.01 , 0.01]
RANDOM_RESTARTS = 10
NOISE_STD = 0.01
STEPS = 10000
IMAGE_RANGE = [-1,1]
def step_size(t):
return 0.01 * np.exp( - t / 1000 )
SAVE_PATH = './results/CelebA/deblurring - alg1 - ' +str(int(NOISE_STD*100)) + 'perc noise - ' +str(RANDOM_RESTARTS) + 'RR/deblurring_'
W = np.load(Blur_Path)
BLUR_RES = W.shape[1]
X_Orig = np.array([ imread(path) for path in glob(Orig_Path)])/255
X_Range = np.array([ imread(path) for path in glob(Range_Path)])/255
IMAGE_RES = X_Orig.shape[1]
CHANNELS = X_Orig.shape[-1]
CelebAGen = CelebAGenerator()
CelebAGen.GenerateModel()
CelebAGen.LoadWeights()
CelebAGAN = CelebAGen.GetModels()
celeba_latent_dim = CelebAGen.latent_dim
BLURGen = MotionBlur()
BLURGen.GenerateModel()
BLURGen.LoadWeights()
blur_vae, blur_encoder, blur_decoder = BLURGen.GetModels()
blur_latent_dim = BLURGen.latent_dim
try:
os.stat(SAVE_PATH[:-11])
except:
os.mkdir(SAVE_PATH[:-11])
Y_np = []
Blurry_Images = []
for i in tqdm(range(len(X_Orig)), ascii=True, desc ='Gen-Test-Blurry'):
x_np = X_Orig[i]
w_np = W[i]
y_np, y_f = GenerateBlurry(x_np, w_np, noise_std = NOISE_STD )
Y_np.append(y_np)
for _ in range(RANDOM_RESTARTS):
Blurry_Images.append(y_f)
Y_np = np.array(Y_np)
Blurry_Images = np.array(Blurry_Images)
Blurry_Images_range = []
Y_np_range = []
for i in tqdm(range(len(X_Orig)), ascii=True, desc ='Gen-Range-Blurry'):
y_np, y_f = GenerateBlurry(X_Range[i], W[i], noise_std = NOISE_STD )
Y_np_range.append(y_np)
for _ in range(RANDOM_RESTARTS):
Blurry_Images_range.append(y_f)
Y_np_range = np.array(Y_np_range)
Blurry_Images_range = np.array(Blurry_Images_range)
image_gradients, blur_gradients, get_loss = Generate_Gradient_Functions(rr = Blurry_Images.shape[0],
reg = REGULARIZORS, image_range = IMAGE_RANGE,
decoder = CelebAGAN, blur_decoder = blur_decoder,
image_res = IMAGE_RES, blur_res = BLUR_RES,
channels = CHANNELS)
m_hat, h_hat, Loss = Optimize_Parallel(blurry_fourier = Blurry_Images, stepsize=step_size,steps = STEPS,
image_grad = image_gradients , blur_grad = blur_gradients,
getloss = get_loss, latent_image_dim = celeba_latent_dim , latent_blur_dim = blur_latent_dim)
X_hat_test = []
W_hat_test = []
for i in range(len(X_Orig)):
m_hat_i = m_hat[i*RANDOM_RESTARTS:(i+1)*RANDOM_RESTARTS]
h_hat_i = h_hat[i*RANDOM_RESTARTS:(i+1)*RANDOM_RESTARTS]
Loss_i = Loss[i*RANDOM_RESTARTS:(i+1)*RANDOM_RESTARTS]
x_hat_test, w_hat_test, loss_last_iter_test = Get_Min_Loss(Loss_i, m_hat_i, h_hat_i, decoder = CelebAGAN, blur_decoder = blur_decoder,
latent_image_dim = celeba_latent_dim, latent_blur_dim = blur_latent_dim, print_grad=False)
X_hat_test.append(x_hat_test)
W_hat_test.append(w_hat_test)
X_hat_test = np.array(X_hat_test)
W_hat_test = np.array(W_hat_test)
m_hat, h_hat, Loss = Optimize_Parallel(blurry_fourier = Blurry_Images_range, stepsize=step_size,steps = STEPS,
image_grad = image_gradients , blur_grad = blur_gradients,
getloss = get_loss, latent_image_dim = celeba_latent_dim , latent_blur_dim = blur_latent_dim)
X_hat_range = []
W_hat_range = []
for i in range(len(X_Orig)):
m_hat_i = m_hat[i*RANDOM_RESTARTS:(i+1)*RANDOM_RESTARTS]
h_hat_i = h_hat[i*RANDOM_RESTARTS:(i+1)*RANDOM_RESTARTS]
Loss_i = Loss[i*RANDOM_RESTARTS:(i+1)*RANDOM_RESTARTS]
x_hat_range, w_hat_range, loss_last_iter_range = Get_Min_Loss(Loss_i, m_hat_i, h_hat_i, decoder = CelebAGAN, blur_decoder = blur_decoder,
latent_image_dim = celeba_latent_dim, latent_blur_dim = blur_latent_dim, print_grad=False)
X_hat_range.append(x_hat_range)
W_hat_range.append(w_hat_range)
X_hat_range = np.array(X_hat_range)
W_hat_range = np.array(W_hat_range)
X_hat_test = (X_hat_test + 1)/2
X_hat_range = (X_hat_range + 1)/2
Max = 10**len(str(len(X_Orig)-1))
for i in range(len(X_Orig)):
Save_Results(path = SAVE_PATH + str(i+Max)[1:],
x_np = None,
w_np = None,
y_np = Y_np[i],
y_np_range = Y_np_range[i] ,
x_hat_test = X_hat_test[i],
w_hat_test = W_hat_test[i],
x_range = None,
x_hat_range = X_hat_range[i],
w_hat_range = W_hat_range[i], clip=True) | true | true |
f71132d4c7e735b30abb14add36214b3cc1d70d4 | 23,887 | py | Python | ironic/drivers/modules/pxe_base.py | calsoft-internal/ironic | 6222d57a74368264b132885b6140b204f429911f | [
"Apache-2.0"
] | null | null | null | ironic/drivers/modules/pxe_base.py | calsoft-internal/ironic | 6222d57a74368264b132885b6140b204f429911f | [
"Apache-2.0"
] | null | null | null | ironic/drivers/modules/pxe_base.py | calsoft-internal/ironic | 6222d57a74368264b132885b6140b204f429911f | [
"Apache-2.0"
] | null | null | null | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Base PXE Interface Methods
"""
from futurist import periodics
from ironic_lib import metrics_utils
from oslo_config import cfg
from oslo_log import log as logging
from ironic.common import boot_devices
from ironic.common import dhcp_factory
from ironic.common import exception
from ironic.common.glance_service import service_utils
from ironic.common.i18n import _
from ironic.common import pxe_utils
from ironic.common import states
from ironic.conductor import task_manager
from ironic.conductor import utils as manager_utils
from ironic.drivers.modules import boot_mode_utils
from ironic.drivers.modules import deploy_utils
from ironic.drivers import utils as driver_utils
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
METRICS = metrics_utils.get_metrics_logger(__name__)
REQUIRED_PROPERTIES = {
'deploy_kernel': _("UUID (from Glance) of the deployment kernel. "
"Required."),
'deploy_ramdisk': _("UUID (from Glance) of the ramdisk that is "
"mounted at boot time. Required."),
}
RESCUE_PROPERTIES = {
'rescue_kernel': _('UUID (from Glance) of the rescue kernel. This value '
'is required for rescue mode.'),
'rescue_ramdisk': _('UUID (from Glance) of the rescue ramdisk with agent '
'that is used at node rescue time. This value is '
'required for rescue mode.'),
}
OPTIONAL_PROPERTIES = {
'kernel_append_params': _("Additional kernel parameters to pass down to "
"instance kernel. These parameters can be "
"consumed by the kernel or by the applications "
"by reading /proc/cmdline. Mind severe cmdline "
"size limit. Overrides "
"[pxe]/kernel_append_params ironic "
"option."),
}
COMMON_PROPERTIES = REQUIRED_PROPERTIES.copy()
COMMON_PROPERTIES.update(driver_utils.OPTIONAL_PROPERTIES)
COMMON_PROPERTIES.update(RESCUE_PROPERTIES)
COMMON_PROPERTIES.update(OPTIONAL_PROPERTIES)
class PXEBaseMixin(object):
ipxe_enabled = False
def get_properties(self):
"""Return the properties of the interface.
:returns: dictionary of <property name>:<property description> entries.
"""
return COMMON_PROPERTIES
@METRICS.timer('PXEBaseMixin.clean_up_ramdisk')
def clean_up_ramdisk(self, task):
"""Cleans up the boot of ironic ramdisk.
This method cleans up the PXE environment that was setup for booting
the deploy or rescue ramdisk. It unlinks the deploy/rescue
kernel/ramdisk in the node's directory in tftproot and removes it's PXE
config.
:param task: a task from TaskManager.
:param mode: Label indicating a deploy or rescue operation
was carried out on the node. Supported values are 'deploy' and
'rescue'. Defaults to 'deploy', indicating deploy operation was
carried out.
:returns: None
"""
node = task.node
mode = deploy_utils.rescue_or_deploy_mode(node)
try:
images_info = pxe_utils.get_image_info(
node, mode=mode, ipxe_enabled=self.ipxe_enabled)
except exception.MissingParameterValue as e:
LOG.warning('Could not get %(mode)s image info '
'to clean up images for node %(node)s: %(err)s',
{'mode': mode, 'node': node.uuid, 'err': e})
else:
pxe_utils.clean_up_pxe_env(
task, images_info, ipxe_enabled=self.ipxe_enabled)
@METRICS.timer('PXEBaseMixin.clean_up_instance')
def clean_up_instance(self, task):
"""Cleans up the boot of instance.
This method cleans up the environment that was setup for booting
the instance. It unlinks the instance kernel/ramdisk in node's
directory in tftproot and removes the PXE config.
:param task: a task from TaskManager.
:returns: None
"""
node = task.node
try:
images_info = pxe_utils.get_instance_image_info(
task, ipxe_enabled=self.ipxe_enabled)
except exception.MissingParameterValue as e:
LOG.warning('Could not get instance image info '
'to clean up images for node %(node)s: %(err)s',
{'node': node.uuid, 'err': e})
else:
pxe_utils.clean_up_pxe_env(task, images_info,
ipxe_enabled=self.ipxe_enabled)
boot_mode_utils.deconfigure_secure_boot_if_needed(task)
@METRICS.timer('PXEBaseMixin.prepare_ramdisk')
def prepare_ramdisk(self, task, ramdisk_params):
"""Prepares the boot of Ironic ramdisk using PXE.
This method prepares the boot of the deploy or rescue kernel/ramdisk
after reading relevant information from the node's driver_info and
instance_info.
:param task: a task from TaskManager.
:param ramdisk_params: the parameters to be passed to the ramdisk.
pxe driver passes these parameters as kernel command-line
arguments.
:returns: None
:raises: MissingParameterValue, if some information is missing in
node's driver_info or instance_info.
:raises: InvalidParameterValue, if some information provided is
invalid.
:raises: IronicException, if some power or set boot boot device
operation failed on the node.
"""
node = task.node
# Label indicating a deploy or rescue operation being carried out on
# the node, 'deploy' or 'rescue'. Unless the node is in a rescue like
# state, the mode is set to 'deploy', indicating deploy operation is
# being carried out.
mode = deploy_utils.rescue_or_deploy_mode(node)
if self.ipxe_enabled:
# NOTE(mjturek): At this point, the ipxe boot script should
# already exist as it is created at startup time. However, we
# call the boot script create method here to assert its
# existence and handle the unlikely case that it wasn't created
# or was deleted.
pxe_utils.create_ipxe_boot_script()
# Generate options for both IPv4 and IPv6, and they can be
# filtered down later based upon the port options.
# TODO(TheJulia): This should be re-tooled during the Victoria
# development cycle so that we call a single method and return
# combined options. The method we currently call is relied upon
# by two eternal projects, to changing the behavior is not ideal.
dhcp_opts = pxe_utils.dhcp_options_for_instance(
task, ipxe_enabled=self.ipxe_enabled, ip_version=4)
dhcp_opts += pxe_utils.dhcp_options_for_instance(
task, ipxe_enabled=self.ipxe_enabled, ip_version=6)
provider = dhcp_factory.DHCPFactory()
provider.update_dhcp(task, dhcp_opts)
pxe_info = pxe_utils.get_image_info(node, mode=mode,
ipxe_enabled=self.ipxe_enabled)
# NODE: Try to validate and fetch instance images only
# if we are in DEPLOYING state.
if node.provision_state == states.DEPLOYING:
pxe_info.update(
pxe_utils.get_instance_image_info(
task, ipxe_enabled=self.ipxe_enabled))
boot_mode_utils.sync_boot_mode(task)
pxe_options = pxe_utils.build_pxe_config_options(
task, pxe_info, ipxe_enabled=self.ipxe_enabled,
ramdisk_params=ramdisk_params)
# TODO(dtantsur): backwards compability hack, remove in the V release
if ramdisk_params.get("ipa-api-url"):
pxe_options["ipa-api-url"] = ramdisk_params["ipa-api-url"]
if self.ipxe_enabled:
pxe_config_template = deploy_utils.get_ipxe_config_template(node)
else:
pxe_config_template = deploy_utils.get_pxe_config_template(node)
pxe_utils.create_pxe_config(task, pxe_options,
pxe_config_template,
ipxe_enabled=self.ipxe_enabled)
manager_utils.node_set_boot_device(task, boot_devices.PXE,
persistent=False)
if self.ipxe_enabled and CONF.pxe.ipxe_use_swift:
kernel_label = '%s_kernel' % mode
ramdisk_label = '%s_ramdisk' % mode
pxe_info.pop(kernel_label, None)
pxe_info.pop(ramdisk_label, None)
if pxe_info:
pxe_utils.cache_ramdisk_kernel(task, pxe_info,
ipxe_enabled=self.ipxe_enabled)
LOG.debug('Ramdisk (i)PXE boot for node %(node)s has been prepared '
'with kernel params %(params)s',
{'node': node.uuid, 'params': pxe_options})
@METRICS.timer('PXEBaseMixin.prepare_instance')
def prepare_instance(self, task):
"""Prepares the boot of instance.
This method prepares the boot of the instance after reading
relevant information from the node's instance_info. In case of netboot,
it updates the dhcp entries and switches the PXE config. In case of
localboot, it cleans up the PXE config.
:param task: a task from TaskManager.
:returns: None
"""
boot_mode_utils.sync_boot_mode(task)
boot_mode_utils.configure_secure_boot_if_needed(task)
node = task.node
boot_option = deploy_utils.get_boot_option(node)
boot_device = None
instance_image_info = {}
if boot_option == "ramdisk" or boot_option == "kickstart":
instance_image_info = pxe_utils.get_instance_image_info(
task, ipxe_enabled=self.ipxe_enabled)
pxe_utils.cache_ramdisk_kernel(task, instance_image_info,
ipxe_enabled=self.ipxe_enabled)
if 'ks_template' in instance_image_info:
ks_cfg = pxe_utils.validate_kickstart_template(
instance_image_info['ks_template'][1]
)
pxe_utils.validate_kickstart_file(ks_cfg)
if (deploy_utils.is_iscsi_boot(task) or boot_option == "ramdisk"
or boot_option == "kickstart"):
pxe_utils.prepare_instance_pxe_config(
task, instance_image_info,
iscsi_boot=deploy_utils.is_iscsi_boot(task),
ramdisk_boot=(boot_option == "ramdisk"),
anaconda_boot=(boot_option == "kickstart"),
ipxe_enabled=self.ipxe_enabled)
pxe_utils.prepare_instance_kickstart_config(
task, instance_image_info,
anaconda_boot=(boot_option == "kickstart"))
boot_device = boot_devices.PXE
elif boot_option != "local":
if task.driver.storage.should_write_image(task):
# Make sure that the instance kernel/ramdisk is cached.
# This is for the takeover scenario for active nodes.
instance_image_info = pxe_utils.get_instance_image_info(
task, ipxe_enabled=self.ipxe_enabled)
pxe_utils.cache_ramdisk_kernel(task, instance_image_info,
ipxe_enabled=self.ipxe_enabled)
# If it's going to PXE boot we need to update the DHCP server
dhcp_opts = pxe_utils.dhcp_options_for_instance(
task, ipxe_enabled=self.ipxe_enabled, ip_version=4)
dhcp_opts += pxe_utils.dhcp_options_for_instance(
task, ipxe_enabled=self.ipxe_enabled, ip_version=6)
provider = dhcp_factory.DHCPFactory()
provider.update_dhcp(task, dhcp_opts)
iwdi = task.node.driver_internal_info.get('is_whole_disk_image')
try:
root_uuid_or_disk_id = task.node.driver_internal_info[
'root_uuid_or_disk_id'
]
except KeyError:
if not task.driver.storage.should_write_image(task):
pass
elif not iwdi:
LOG.warning("The UUID for the root partition can't be "
"found, unable to switch the pxe config from "
"deployment mode to service (boot) mode for "
"node %(node)s", {"node": task.node.uuid})
else:
LOG.warning("The disk id for the whole disk image can't "
"be found, unable to switch the pxe config "
"from deployment mode to service (boot) mode "
"for node %(node)s. Booting the instance "
"from disk.", {"node": task.node.uuid})
pxe_utils.clean_up_pxe_config(
task, ipxe_enabled=self.ipxe_enabled)
boot_device = boot_devices.DISK
else:
pxe_utils.build_service_pxe_config(
task, instance_image_info, root_uuid_or_disk_id,
ipxe_enabled=self.ipxe_enabled)
boot_device = boot_devices.PXE
else:
# NOTE(dtantsur): create a PXE configuration as a safety net for
# hardware uncapable of persistent boot. If on a reboot it will try
# to boot from PXE, this configuration will return it back.
if CONF.pxe.enable_netboot_fallback:
pxe_utils.build_service_pxe_config(
task, instance_image_info,
task.node.driver_internal_info.get('root_uuid_or_disk_id'),
ipxe_enabled=self.ipxe_enabled,
# PXE config for whole disk images is identical to what
# we need to boot from local disk, so use True even
# for partition images.
is_whole_disk_image=True)
else:
# Clean up the deployment configuration
pxe_utils.clean_up_pxe_config(
task, ipxe_enabled=self.ipxe_enabled)
boot_device = boot_devices.DISK
# NOTE(pas-ha) do not re-set boot device on ACTIVE nodes
# during takeover
if boot_device and task.node.provision_state != states.ACTIVE:
manager_utils.node_set_boot_device(task, boot_device,
persistent=True)
def _validate_common(self, task):
node = task.node
if not driver_utils.get_node_mac_addresses(task):
raise exception.MissingParameterValue(
_("Node %s does not have any port associated with it.")
% node.uuid)
if self.ipxe_enabled:
if not CONF.deploy.http_url or not CONF.deploy.http_root:
raise exception.MissingParameterValue(_(
"iPXE boot is enabled but no HTTP URL or HTTP "
"root was specified."))
# NOTE(zer0c00l): When 'kickstart' boot option is used we need to store
# kickstart and squashfs files in http_root directory. These files
# will be eventually requested by anaconda installer during deployment
# over http(s).
if deploy_utils.get_boot_option(node) == 'kickstart':
if not CONF.deploy.http_url or not CONF.deploy.http_root:
raise exception.MissingParameterValue(_(
"'kickstart' boot option is set on the node but no HTTP "
"URL or HTTP root was specified."))
if not CONF.anaconda.default_ks_template:
raise exception.MissingParameterValue(_(
"'kickstart' boot option is set on the node but no "
"default kickstart template is specified."))
# Check the trusted_boot capabilities value.
deploy_utils.validate_capabilities(node)
if deploy_utils.is_trusted_boot_requested(node):
# Check if 'boot_option' and boot mode is compatible with
# trusted boot.
if self.ipxe_enabled:
# NOTE(TheJulia): So in theory (huge theory here, not put to
# practice or tested), that one can define the kernel as tboot
# and define the actual kernel and ramdisk as appended data.
# Similar to how one can iPXE load the XEN hypervisor.
# tboot mailing list seem to indicate pxe/ipxe support, or
# more specifically avoiding breaking the scenarios of use,
# but there is also no definitive documentation on the subject.
LOG.warning('Trusted boot has been requested for %(node)s in '
'concert with iPXE. This is not a supported '
'configuration for an ironic deployment.',
{'node': node.uuid})
pxe_utils.validate_boot_parameters_for_trusted_boot(node)
# Check if we have invalid parameters being passed which will not work
# for ramdisk configurations.
if (node.instance_info.get('image_source')
and node.instance_info.get('boot_iso')):
raise exception.InvalidParameterValue(_(
"An 'image_source' and 'boot_iso' parameter may not be "
"specified at the same time."))
pxe_utils.parse_driver_info(node)
@METRICS.timer('PXEBaseMixin.validate')
def validate(self, task):
"""Validate the PXE-specific info for booting deploy/instance images.
This method validates the PXE-specific info for booting the
ramdisk and instance on the node. If invalid, raises an
exception; otherwise returns None.
:param task: a task from TaskManager.
:returns: None
:raises: InvalidParameterValue, if some parameters are invalid.
:raises: MissingParameterValue, if some required parameters are
missing.
"""
self._validate_common(task)
node = task.node
# NOTE(TheJulia): If we're not writing an image, we can skip
# the remainder of this method.
# NOTE(dtantsur): if we're are writing an image with local boot
# the boot interface does not care about image parameters and
# must not validate them.
boot_option = deploy_utils.get_boot_option(node)
if (not task.driver.storage.should_write_image(task)
or boot_option == 'local'):
return
d_info = deploy_utils.get_image_instance_info(node)
if node.driver_internal_info.get('is_whole_disk_image'):
props = []
elif d_info.get('boot_iso'):
props = ['boot_iso']
elif service_utils.is_glance_image(d_info['image_source']):
props = ['kernel_id', 'ramdisk_id']
if boot_option == 'kickstart':
props.append('squashfs_id')
else:
props = ['kernel', 'ramdisk']
deploy_utils.validate_image_properties(task.context, d_info, props)
@METRICS.timer('PXEBaseMixin.validate_rescue')
def validate_rescue(self, task):
"""Validate that the node has required properties for rescue.
:param task: a TaskManager instance with the node being checked
:raises: MissingParameterValue if node is missing one or more required
parameters
"""
pxe_utils.parse_driver_info(task.node, mode='rescue')
@METRICS.timer('PXEBaseMixin.validate_inspection')
def validate_inspection(self, task):
"""Validate that the node has required properties for inspection.
:param task: A TaskManager instance with the node being checked
:raises: UnsupportedDriverExtension
"""
try:
self._validate_common(task)
except exception.MissingParameterValue:
# Fall back to non-managed in-band inspection
raise exception.UnsupportedDriverExtension(
driver=task.node.driver, extension='inspection')
_RETRY_ALLOWED_STATES = {states.DEPLOYWAIT, states.CLEANWAIT,
states.RESCUEWAIT}
@METRICS.timer('PXEBaseMixin._check_boot_timeouts')
@periodics.periodic(spacing=CONF.pxe.boot_retry_check_interval,
enabled=bool(CONF.pxe.boot_retry_timeout))
def _check_boot_timeouts(self, manager, context):
"""Periodically checks whether boot has timed out and retry it.
:param manager: conductor manager.
:param context: request context.
"""
filters = {'provision_state_in': self._RETRY_ALLOWED_STATES,
'reserved': False,
'maintenance': False,
'provisioned_before': CONF.pxe.boot_retry_timeout}
node_iter = manager.iter_nodes(filters=filters)
for node_uuid, driver, conductor_group in node_iter:
try:
lock_purpose = 'checking PXE boot status'
with task_manager.acquire(context, node_uuid,
shared=True,
purpose=lock_purpose) as task:
self._check_boot_status(task)
except (exception.NodeLocked, exception.NodeNotFound):
continue
def _check_boot_status(self, task):
if not isinstance(task.driver.boot, PXEBaseMixin):
return
if not _should_retry_boot(task.node):
return
task.upgrade_lock(purpose='retrying PXE boot')
# Retry critical checks after acquiring the exclusive lock.
if (task.node.maintenance or task.node.provision_state
not in self._RETRY_ALLOWED_STATES
or not _should_retry_boot(task.node)):
return
LOG.info('Booting the ramdisk on node %(node)s is taking more than '
'%(timeout)d seconds, retrying boot',
{'node': task.node.uuid,
'timeout': CONF.pxe.boot_retry_timeout})
manager_utils.node_power_action(task, states.POWER_OFF)
manager_utils.node_set_boot_device(task, boot_devices.PXE,
persistent=False)
manager_utils.node_power_action(task, states.POWER_ON)
def _should_retry_boot(node):
# NOTE(dtantsur): this assumes IPA, do we need to make it generic?
for field in ('agent_last_heartbeat', 'last_power_state_change'):
if manager_utils.value_within_timeout(
node.driver_internal_info.get(field),
CONF.pxe.boot_retry_timeout):
# Alive and heartbeating, probably busy with something long
LOG.debug('Not retrying PXE boot for node %(node)s; its '
'%(event)s happened less than %(timeout)d seconds ago',
{'node': node.uuid, 'event': field,
'timeout': CONF.pxe.boot_retry_timeout})
return False
return True
| 45.155009 | 79 | 0.618495 |
from futurist import periodics
from ironic_lib import metrics_utils
from oslo_config import cfg
from oslo_log import log as logging
from ironic.common import boot_devices
from ironic.common import dhcp_factory
from ironic.common import exception
from ironic.common.glance_service import service_utils
from ironic.common.i18n import _
from ironic.common import pxe_utils
from ironic.common import states
from ironic.conductor import task_manager
from ironic.conductor import utils as manager_utils
from ironic.drivers.modules import boot_mode_utils
from ironic.drivers.modules import deploy_utils
from ironic.drivers import utils as driver_utils
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
METRICS = metrics_utils.get_metrics_logger(__name__)
REQUIRED_PROPERTIES = {
'deploy_kernel': _("UUID (from Glance) of the deployment kernel. "
"Required."),
'deploy_ramdisk': _("UUID (from Glance) of the ramdisk that is "
"mounted at boot time. Required."),
}
RESCUE_PROPERTIES = {
'rescue_kernel': _('UUID (from Glance) of the rescue kernel. This value '
'is required for rescue mode.'),
'rescue_ramdisk': _('UUID (from Glance) of the rescue ramdisk with agent '
'that is used at node rescue time. This value is '
'required for rescue mode.'),
}
OPTIONAL_PROPERTIES = {
'kernel_append_params': _("Additional kernel parameters to pass down to "
"instance kernel. These parameters can be "
"consumed by the kernel or by the applications "
"by reading /proc/cmdline. Mind severe cmdline "
"size limit. Overrides "
"[pxe]/kernel_append_params ironic "
"option."),
}
COMMON_PROPERTIES = REQUIRED_PROPERTIES.copy()
COMMON_PROPERTIES.update(driver_utils.OPTIONAL_PROPERTIES)
COMMON_PROPERTIES.update(RESCUE_PROPERTIES)
COMMON_PROPERTIES.update(OPTIONAL_PROPERTIES)
class PXEBaseMixin(object):
ipxe_enabled = False
def get_properties(self):
return COMMON_PROPERTIES
@METRICS.timer('PXEBaseMixin.clean_up_ramdisk')
def clean_up_ramdisk(self, task):
node = task.node
mode = deploy_utils.rescue_or_deploy_mode(node)
try:
images_info = pxe_utils.get_image_info(
node, mode=mode, ipxe_enabled=self.ipxe_enabled)
except exception.MissingParameterValue as e:
LOG.warning('Could not get %(mode)s image info '
'to clean up images for node %(node)s: %(err)s',
{'mode': mode, 'node': node.uuid, 'err': e})
else:
pxe_utils.clean_up_pxe_env(
task, images_info, ipxe_enabled=self.ipxe_enabled)
@METRICS.timer('PXEBaseMixin.clean_up_instance')
def clean_up_instance(self, task):
node = task.node
try:
images_info = pxe_utils.get_instance_image_info(
task, ipxe_enabled=self.ipxe_enabled)
except exception.MissingParameterValue as e:
LOG.warning('Could not get instance image info '
'to clean up images for node %(node)s: %(err)s',
{'node': node.uuid, 'err': e})
else:
pxe_utils.clean_up_pxe_env(task, images_info,
ipxe_enabled=self.ipxe_enabled)
boot_mode_utils.deconfigure_secure_boot_if_needed(task)
@METRICS.timer('PXEBaseMixin.prepare_ramdisk')
def prepare_ramdisk(self, task, ramdisk_params):
node = task.node
mode = deploy_utils.rescue_or_deploy_mode(node)
if self.ipxe_enabled:
# or was deleted.
pxe_utils.create_ipxe_boot_script()
# Generate options for both IPv4 and IPv6, and they can be
# filtered down later based upon the port options.
# TODO(TheJulia): This should be re-tooled during the Victoria
# development cycle so that we call a single method and return
# combined options. The method we currently call is relied upon
# by two eternal projects, to changing the behavior is not ideal.
dhcp_opts = pxe_utils.dhcp_options_for_instance(
task, ipxe_enabled=self.ipxe_enabled, ip_version=4)
dhcp_opts += pxe_utils.dhcp_options_for_instance(
task, ipxe_enabled=self.ipxe_enabled, ip_version=6)
provider = dhcp_factory.DHCPFactory()
provider.update_dhcp(task, dhcp_opts)
pxe_info = pxe_utils.get_image_info(node, mode=mode,
ipxe_enabled=self.ipxe_enabled)
# NODE: Try to validate and fetch instance images only
# if we are in DEPLOYING state.
if node.provision_state == states.DEPLOYING:
pxe_info.update(
pxe_utils.get_instance_image_info(
task, ipxe_enabled=self.ipxe_enabled))
boot_mode_utils.sync_boot_mode(task)
pxe_options = pxe_utils.build_pxe_config_options(
task, pxe_info, ipxe_enabled=self.ipxe_enabled,
ramdisk_params=ramdisk_params)
# TODO(dtantsur): backwards compability hack, remove in the V release
if ramdisk_params.get("ipa-api-url"):
pxe_options["ipa-api-url"] = ramdisk_params["ipa-api-url"]
if self.ipxe_enabled:
pxe_config_template = deploy_utils.get_ipxe_config_template(node)
else:
pxe_config_template = deploy_utils.get_pxe_config_template(node)
pxe_utils.create_pxe_config(task, pxe_options,
pxe_config_template,
ipxe_enabled=self.ipxe_enabled)
manager_utils.node_set_boot_device(task, boot_devices.PXE,
persistent=False)
if self.ipxe_enabled and CONF.pxe.ipxe_use_swift:
kernel_label = '%s_kernel' % mode
ramdisk_label = '%s_ramdisk' % mode
pxe_info.pop(kernel_label, None)
pxe_info.pop(ramdisk_label, None)
if pxe_info:
pxe_utils.cache_ramdisk_kernel(task, pxe_info,
ipxe_enabled=self.ipxe_enabled)
LOG.debug('Ramdisk (i)PXE boot for node %(node)s has been prepared '
'with kernel params %(params)s',
{'node': node.uuid, 'params': pxe_options})
@METRICS.timer('PXEBaseMixin.prepare_instance')
def prepare_instance(self, task):
boot_mode_utils.sync_boot_mode(task)
boot_mode_utils.configure_secure_boot_if_needed(task)
node = task.node
boot_option = deploy_utils.get_boot_option(node)
boot_device = None
instance_image_info = {}
if boot_option == "ramdisk" or boot_option == "kickstart":
instance_image_info = pxe_utils.get_instance_image_info(
task, ipxe_enabled=self.ipxe_enabled)
pxe_utils.cache_ramdisk_kernel(task, instance_image_info,
ipxe_enabled=self.ipxe_enabled)
if 'ks_template' in instance_image_info:
ks_cfg = pxe_utils.validate_kickstart_template(
instance_image_info['ks_template'][1]
)
pxe_utils.validate_kickstart_file(ks_cfg)
if (deploy_utils.is_iscsi_boot(task) or boot_option == "ramdisk"
or boot_option == "kickstart"):
pxe_utils.prepare_instance_pxe_config(
task, instance_image_info,
iscsi_boot=deploy_utils.is_iscsi_boot(task),
ramdisk_boot=(boot_option == "ramdisk"),
anaconda_boot=(boot_option == "kickstart"),
ipxe_enabled=self.ipxe_enabled)
pxe_utils.prepare_instance_kickstart_config(
task, instance_image_info,
anaconda_boot=(boot_option == "kickstart"))
boot_device = boot_devices.PXE
elif boot_option != "local":
if task.driver.storage.should_write_image(task):
# Make sure that the instance kernel/ramdisk is cached.
# This is for the takeover scenario for active nodes.
instance_image_info = pxe_utils.get_instance_image_info(
task, ipxe_enabled=self.ipxe_enabled)
pxe_utils.cache_ramdisk_kernel(task, instance_image_info,
ipxe_enabled=self.ipxe_enabled)
# If it's going to PXE boot we need to update the DHCP server
dhcp_opts = pxe_utils.dhcp_options_for_instance(
task, ipxe_enabled=self.ipxe_enabled, ip_version=4)
dhcp_opts += pxe_utils.dhcp_options_for_instance(
task, ipxe_enabled=self.ipxe_enabled, ip_version=6)
provider = dhcp_factory.DHCPFactory()
provider.update_dhcp(task, dhcp_opts)
iwdi = task.node.driver_internal_info.get('is_whole_disk_image')
try:
root_uuid_or_disk_id = task.node.driver_internal_info[
'root_uuid_or_disk_id'
]
except KeyError:
if not task.driver.storage.should_write_image(task):
pass
elif not iwdi:
LOG.warning("The UUID for the root partition can't be "
"found, unable to switch the pxe config from "
"deployment mode to service (boot) mode for "
"node %(node)s", {"node": task.node.uuid})
else:
LOG.warning("The disk id for the whole disk image can't "
"be found, unable to switch the pxe config "
"from deployment mode to service (boot) mode "
"for node %(node)s. Booting the instance "
"from disk.", {"node": task.node.uuid})
pxe_utils.clean_up_pxe_config(
task, ipxe_enabled=self.ipxe_enabled)
boot_device = boot_devices.DISK
else:
pxe_utils.build_service_pxe_config(
task, instance_image_info, root_uuid_or_disk_id,
ipxe_enabled=self.ipxe_enabled)
boot_device = boot_devices.PXE
else:
if CONF.pxe.enable_netboot_fallback:
pxe_utils.build_service_pxe_config(
task, instance_image_info,
task.node.driver_internal_info.get('root_uuid_or_disk_id'),
ipxe_enabled=self.ipxe_enabled,
is_whole_disk_image=True)
else:
pxe_utils.clean_up_pxe_config(
task, ipxe_enabled=self.ipxe_enabled)
boot_device = boot_devices.DISK
if boot_device and task.node.provision_state != states.ACTIVE:
manager_utils.node_set_boot_device(task, boot_device,
persistent=True)
def _validate_common(self, task):
node = task.node
if not driver_utils.get_node_mac_addresses(task):
raise exception.MissingParameterValue(
_("Node %s does not have any port associated with it.")
% node.uuid)
if self.ipxe_enabled:
if not CONF.deploy.http_url or not CONF.deploy.http_root:
raise exception.MissingParameterValue(_(
"iPXE boot is enabled but no HTTP URL or HTTP "
"root was specified."))
if deploy_utils.get_boot_option(node) == 'kickstart':
if not CONF.deploy.http_url or not CONF.deploy.http_root:
raise exception.MissingParameterValue(_(
"'kickstart' boot option is set on the node but no HTTP "
"URL or HTTP root was specified."))
if not CONF.anaconda.default_ks_template:
raise exception.MissingParameterValue(_(
"'kickstart' boot option is set on the node but no "
"default kickstart template is specified."))
deploy_utils.validate_capabilities(node)
if deploy_utils.is_trusted_boot_requested(node):
if self.ipxe_enabled:
LOG.warning('Trusted boot has been requested for %(node)s in '
'concert with iPXE. This is not a supported '
'configuration for an ironic deployment.',
{'node': node.uuid})
pxe_utils.validate_boot_parameters_for_trusted_boot(node)
if (node.instance_info.get('image_source')
and node.instance_info.get('boot_iso')):
raise exception.InvalidParameterValue(_(
"An 'image_source' and 'boot_iso' parameter may not be "
"specified at the same time."))
pxe_utils.parse_driver_info(node)
@METRICS.timer('PXEBaseMixin.validate')
def validate(self, task):
self._validate_common(task)
node = task.node
# the remainder of this method.
# NOTE(dtantsur): if we're are writing an image with local boot
boot_option = deploy_utils.get_boot_option(node)
if (not task.driver.storage.should_write_image(task)
or boot_option == 'local'):
return
d_info = deploy_utils.get_image_instance_info(node)
if node.driver_internal_info.get('is_whole_disk_image'):
props = []
elif d_info.get('boot_iso'):
props = ['boot_iso']
elif service_utils.is_glance_image(d_info['image_source']):
props = ['kernel_id', 'ramdisk_id']
if boot_option == 'kickstart':
props.append('squashfs_id')
else:
props = ['kernel', 'ramdisk']
deploy_utils.validate_image_properties(task.context, d_info, props)
@METRICS.timer('PXEBaseMixin.validate_rescue')
def validate_rescue(self, task):
pxe_utils.parse_driver_info(task.node, mode='rescue')
@METRICS.timer('PXEBaseMixin.validate_inspection')
def validate_inspection(self, task):
try:
self._validate_common(task)
except exception.MissingParameterValue:
raise exception.UnsupportedDriverExtension(
driver=task.node.driver, extension='inspection')
_RETRY_ALLOWED_STATES = {states.DEPLOYWAIT, states.CLEANWAIT,
states.RESCUEWAIT}
@METRICS.timer('PXEBaseMixin._check_boot_timeouts')
@periodics.periodic(spacing=CONF.pxe.boot_retry_check_interval,
enabled=bool(CONF.pxe.boot_retry_timeout))
def _check_boot_timeouts(self, manager, context):
filters = {'provision_state_in': self._RETRY_ALLOWED_STATES,
'reserved': False,
'maintenance': False,
'provisioned_before': CONF.pxe.boot_retry_timeout}
node_iter = manager.iter_nodes(filters=filters)
for node_uuid, driver, conductor_group in node_iter:
try:
lock_purpose = 'checking PXE boot status'
with task_manager.acquire(context, node_uuid,
shared=True,
purpose=lock_purpose) as task:
self._check_boot_status(task)
except (exception.NodeLocked, exception.NodeNotFound):
continue
def _check_boot_status(self, task):
if not isinstance(task.driver.boot, PXEBaseMixin):
return
if not _should_retry_boot(task.node):
return
task.upgrade_lock(purpose='retrying PXE boot')
if (task.node.maintenance or task.node.provision_state
not in self._RETRY_ALLOWED_STATES
or not _should_retry_boot(task.node)):
return
LOG.info('Booting the ramdisk on node %(node)s is taking more than '
'%(timeout)d seconds, retrying boot',
{'node': task.node.uuid,
'timeout': CONF.pxe.boot_retry_timeout})
manager_utils.node_power_action(task, states.POWER_OFF)
manager_utils.node_set_boot_device(task, boot_devices.PXE,
persistent=False)
manager_utils.node_power_action(task, states.POWER_ON)
def _should_retry_boot(node):
for field in ('agent_last_heartbeat', 'last_power_state_change'):
if manager_utils.value_within_timeout(
node.driver_internal_info.get(field),
CONF.pxe.boot_retry_timeout):
LOG.debug('Not retrying PXE boot for node %(node)s; its '
'%(event)s happened less than %(timeout)d seconds ago',
{'node': node.uuid, 'event': field,
'timeout': CONF.pxe.boot_retry_timeout})
return False
return True
| true | true |
f7113319ab5a3109c6a12ffd7309beed2c6268f7 | 4,269 | py | Python | source/functions/encryption.py | GucciHsuan/CampusCyberInspectionTool2021 | 86636f777192e492f4342519e30a975a6a58b8ab | [
"MIT"
] | null | null | null | source/functions/encryption.py | GucciHsuan/CampusCyberInspectionTool2021 | 86636f777192e492f4342519e30a975a6a58b8ab | [
"MIT"
] | null | null | null | source/functions/encryption.py | GucciHsuan/CampusCyberInspectionTool2021 | 86636f777192e492f4342519e30a975a6a58b8ab | [
"MIT"
] | null | null | null | class cryto:
def decryp_Vige() :
cyphertext=input("cyphertext=")
key=input("key=")
print("plaintext=",end='')
j=0
for i in cyphertext :
c=ord(key[j])
if c < 97 :
c=c+32
c=c-97
x=ord(i)+26
if x < 123 :
x=x-c
if x > 90 :
x=x-26
else :
x=x-c
if x > 122 :
x=x-26
print(chr(x),end='')
j=j+1
print("\n")
def encryp_Vige() :
plaintext=input("plaintext=")
key=input("key=")
print()
print("cyphertext=",end='')
j=0
for i in plaintext :
c=ord(key[j])
if c < 97 :
c=c+32
c=c-97
x=ord(i)-26
if x < 65 :
x=x+c
if x < 65 :
x=x+26
else :
x=x+c
if x < 97 :
x=x+26
print(chr(x),end='')
j=j+1
print("\n")
def Make_a_rsa() :
print("公鑰(n,e) 只能加密小於n的整数m!!!")
while(1) :
p,q=map(int,input("choose two Prime number :(split with space)").split())
if p > 1 :
t=0
for i in range ( 2 , p ) :
if ( p % i ) == 0 :
print ( "請輸入質數",end="")
t=1
break
if t == 1 :
continue
if q > 1 :
t=0
for i in range ( 2 , q ) :
if ( q % i ) == 0 :
print ( "請輸入質數",end="")
t=1
break
if t == 1 :
continue
break
n=p*q
r=(p-1)*(q-1)
e=0
d=0
for i in range ( 2 , r ) :
if ( r-int(r/i)*i ) == 1 :
e=i
break
for i in range ( 2 , r ) :
if ( (i*e) % r ) == 1 :
d=i
break
print("Public key(N,e)=({0},{1})\nPrivate key(N,d)=({2},{3})".format(n, e, n, d))
def rsa_send() :
import math
import array as arr
n,k=map(int,input("input your key :(split with space)").split())
name=input("enter the path of your bin :(Don't use the used name of bin!)")
output_file = open(name+".bin", 'wb')
text=input("plaintext/cyphertext=")
fb=[]
for i in text :
i=ord(i)
i=pow(i,k,n)
fb.append(i)
int_array = arr.array('i', fb)
int_array.tofile(output_file)
output_file.close()
def rsa_read() :
n,k=map(int,input("input your key :(split with space)").split())
name=input("enter the path of your bin :")
with open(name + ".bin" , 'rb') as file:
int_bytes = file.read()
for i in int_bytes :
if i == 0 :
continue
i=pow(i,k,n)
print(chr(i), end="")
def linr_radom() :
text=input("plaintext/cyphertext=")
LFSR=input("LFSR_4=")
print()
print("cyphertext/plaintext=",end='')
a=int(LFSR[0])
b=int(LFSR[1])
c=int(LFSR[2])
d=int(LFSR[3])
for i in text :
print(int(i) ^ a,end="")
t= a ^ d
d=a
a=b
b=c
c=t
print()
def wood_decry() :
text=input("input the cryto :")
n=0
for i in text :
if n%4==0 :
print(i,end="")
n=n+1
def wood_encry() :
import random
text=input("input the plaintext :")
l=[]
for i in range(48,122) :
if (i>48 and i<57) or (i>65 and i<90) or (i>97 and i<122) :
l.append(i)
for i in text :
print(i,end="")
for j in range(3) :
r=random.choice(l)
print(chr(r),end="")
| 27.018987 | 89 | 0.35465 | class cryto:
def decryp_Vige() :
cyphertext=input("cyphertext=")
key=input("key=")
print("plaintext=",end='')
j=0
for i in cyphertext :
c=ord(key[j])
if c < 97 :
c=c+32
c=c-97
x=ord(i)+26
if x < 123 :
x=x-c
if x > 90 :
x=x-26
else :
x=x-c
if x > 122 :
x=x-26
print(chr(x),end='')
j=j+1
print("\n")
def encryp_Vige() :
plaintext=input("plaintext=")
key=input("key=")
print()
print("cyphertext=",end='')
j=0
for i in plaintext :
c=ord(key[j])
if c < 97 :
c=c+32
c=c-97
x=ord(i)-26
if x < 65 :
x=x+c
if x < 65 :
x=x+26
else :
x=x+c
if x < 97 :
x=x+26
print(chr(x),end='')
j=j+1
print("\n")
def Make_a_rsa() :
print("公鑰(n,e) 只能加密小於n的整数m!!!")
while(1) :
p,q=map(int,input("choose two Prime number :(split with space)").split())
if p > 1 :
t=0
for i in range ( 2 , p ) :
if ( p % i ) == 0 :
print ( "請輸入質數",end="")
t=1
break
if t == 1 :
continue
if q > 1 :
t=0
for i in range ( 2 , q ) :
if ( q % i ) == 0 :
print ( "請輸入質數",end="")
t=1
break
if t == 1 :
continue
break
n=p*q
r=(p-1)*(q-1)
e=0
d=0
for i in range ( 2 , r ) :
if ( r-int(r/i)*i ) == 1 :
e=i
break
for i in range ( 2 , r ) :
if ( (i*e) % r ) == 1 :
d=i
break
print("Public key(N,e)=({0},{1})\nPrivate key(N,d)=({2},{3})".format(n, e, n, d))
def rsa_send() :
import math
import array as arr
n,k=map(int,input("input your key :(split with space)").split())
name=input("enter the path of your bin :(Don't use the used name of bin!)")
output_file = open(name+".bin", 'wb')
text=input("plaintext/cyphertext=")
fb=[]
for i in text :
i=ord(i)
i=pow(i,k,n)
fb.append(i)
int_array = arr.array('i', fb)
int_array.tofile(output_file)
output_file.close()
def rsa_read() :
n,k=map(int,input("input your key :(split with space)").split())
name=input("enter the path of your bin :")
with open(name + ".bin" , 'rb') as file:
int_bytes = file.read()
for i in int_bytes :
if i == 0 :
continue
i=pow(i,k,n)
print(chr(i), end="")
def linr_radom() :
text=input("plaintext/cyphertext=")
LFSR=input("LFSR_4=")
print()
print("cyphertext/plaintext=",end='')
a=int(LFSR[0])
b=int(LFSR[1])
c=int(LFSR[2])
d=int(LFSR[3])
for i in text :
print(int(i) ^ a,end="")
t= a ^ d
d=a
a=b
b=c
c=t
print()
def wood_decry() :
text=input("input the cryto :")
n=0
for i in text :
if n%4==0 :
print(i,end="")
n=n+1
def wood_encry() :
import random
text=input("input the plaintext :")
l=[]
for i in range(48,122) :
if (i>48 and i<57) or (i>65 and i<90) or (i>97 and i<122) :
l.append(i)
for i in text :
print(i,end="")
for j in range(3) :
r=random.choice(l)
print(chr(r),end="")
| true | true |
f71133f3b8e1efb4829caeb82d8460761e5bcacc | 2,828 | py | Python | crpm/pvalue.py | dmontemayor/CRPM | e896831fad7bed42d17574b137e600fc5adbf6b0 | [
"MIT"
] | null | null | null | crpm/pvalue.py | dmontemayor/CRPM | e896831fad7bed42d17574b137e600fc5adbf6b0 | [
"MIT"
] | null | null | null | crpm/pvalue.py | dmontemayor/CRPM | e896831fad7bed42d17574b137e600fc5adbf6b0 | [
"MIT"
] | null | null | null | """ Calcualte p-values, ROC, AUC, and proportion of significant observations for
a set of observations given the null hypothesis distribution
Args:
variable: array of observed values
hypothesis: optional null hypotheis distribution (beta distribution by default)
alpha: optional significance parameter (.05 by default)
Returns:
pvalues: for every observation in variable
ROC: on a grid of 1000 points
AUC: integral of ROC
proportion of significant observations
"""
import numpy as np
def pvalue(variable=None, hypothesis=None, alpha=.05):
""" calcualte pvalues, AUC and fraction of significant observations
"""
#set model
if variable is None:
variable = np.random.beta(a=3, b=5, size=5000)
else:
variable = np.array(variable)
#set null-hypothesis
if hypothesis is None:
hypothesis = np.random.beta(a=5, b=5, size=1000)
else:
hypothesis = np.array(hypothesis)
#calculate prob of left-tail event p(H<=x|H) for every instance of X
prob = []
for var in variable:
prob.append((hypothesis <= var).sum())
#normalize p
prob = np.divide(prob, hypothesis.size)
#scan alpha from 0 to 1 and find prob(p<=alpha)
scanprob = []
alphagrid = np.linspace(0, 1, num=1000)
for val in alphagrid:
#calculate prob p<=alpha
scanprob.append((prob <= val).sum() / variable.size)
return prob, scanprob, np.sum(prob) / alphagrid.size, (prob <= alpha).sum() /variable.size
def lefttailpvalue(variable=None, hypothesis=None):
""" calcualte left-tail pvalues
"""
#set model
if variable is None:
variable = np.random.beta(a=3, b=5, size=5000)
else:
variable = np.array(variable)
#set null-hypothesis
if hypothesis is None:
hypothesis = np.random.beta(a=5, b=5, size=1000)
else:
hypothesis = np.array(hypothesis)
#calculate prob of left-tail event p(H<=x|H) for every instance of X
prob = []
for var in variable:
prob.append((hypothesis <= var).sum())
#normalize p
prob = np.divide(prob, hypothesis.size)
return prob
def righttailpvalue(variable=None, hypothesis=None):
""" calcualte left-tail pvalues
"""
#set model
if variable is None:
variable = np.random.beta(a=3, b=5, size=5000)
else:
variable = np.array(variable)
#set null-hypothesis
if hypothesis is None:
hypothesis = np.random.beta(a=5, b=5, size=1000)
else:
hypothesis = np.array(hypothesis)
#calculate prob of right-tail event p(H>=x|H) for every instance of X
prob = []
for var in variable:
prob.append((hypothesis >= var).sum())
#normalize p
prob = np.divide(prob, hypothesis.size)
return prob
| 28.857143 | 94 | 0.640382 |
import numpy as np
def pvalue(variable=None, hypothesis=None, alpha=.05):
if variable is None:
variable = np.random.beta(a=3, b=5, size=5000)
else:
variable = np.array(variable)
if hypothesis is None:
hypothesis = np.random.beta(a=5, b=5, size=1000)
else:
hypothesis = np.array(hypothesis)
prob = []
for var in variable:
prob.append((hypothesis <= var).sum())
prob = np.divide(prob, hypothesis.size)
scanprob = []
alphagrid = np.linspace(0, 1, num=1000)
for val in alphagrid:
scanprob.append((prob <= val).sum() / variable.size)
return prob, scanprob, np.sum(prob) / alphagrid.size, (prob <= alpha).sum() /variable.size
def lefttailpvalue(variable=None, hypothesis=None):
if variable is None:
variable = np.random.beta(a=3, b=5, size=5000)
else:
variable = np.array(variable)
if hypothesis is None:
hypothesis = np.random.beta(a=5, b=5, size=1000)
else:
hypothesis = np.array(hypothesis)
prob = []
for var in variable:
prob.append((hypothesis <= var).sum())
prob = np.divide(prob, hypothesis.size)
return prob
def righttailpvalue(variable=None, hypothesis=None):
if variable is None:
variable = np.random.beta(a=3, b=5, size=5000)
else:
variable = np.array(variable)
if hypothesis is None:
hypothesis = np.random.beta(a=5, b=5, size=1000)
else:
hypothesis = np.array(hypothesis)
prob = []
for var in variable:
prob.append((hypothesis >= var).sum())
prob = np.divide(prob, hypothesis.size)
return prob
| true | true |
f71133f82623f384ba4feeea0b52c7871bf3ea83 | 3,948 | py | Python | book/linreg_poly_vs_degree.py | tywang89/pyprobml | 82cfdcb8daea653cda8f77e8737e585418476ca7 | [
"MIT"
] | 1 | 2019-05-07T12:40:01.000Z | 2019-05-07T12:40:01.000Z | book/linreg_poly_vs_degree.py | tywang89/pyprobml | 82cfdcb8daea653cda8f77e8737e585418476ca7 | [
"MIT"
] | null | null | null | book/linreg_poly_vs_degree.py | tywang89/pyprobml | 82cfdcb8daea653cda8f77e8737e585418476ca7 | [
"MIT"
] | null | null | null | # Plot polynomial regression on 1d problem
# Based on https://github.com/probml/pmtk3/blob/master/demos/linregPolyVsDegree.m
import numpy as np
import matplotlib.pyplot as plt
from pyprobml_utils import save_fig
from sklearn.preprocessing import PolynomialFeatures
from sklearn.linear_model import LinearRegression
from sklearn.preprocessing import MinMaxScaler
import sklearn.metrics
from sklearn.metrics import mean_squared_error as mse
def make_1dregression_data(n=21):
np.random.seed(0)
xtrain = np.linspace(0.0, 20, n)
xtest = np.arange(0.0, 20, 0.1)
sigma2 = 4
w = np.array([-1.5, 1/9.])
fun = lambda x: w[0]*x + w[1]*np.square(x)
ytrain = fun(xtrain) + np.random.normal(0, 1, xtrain.shape) * \
np.sqrt(sigma2)
ytest= fun(xtest) + np.random.normal(0, 1, xtest.shape) * \
np.sqrt(sigma2)
return xtrain, ytrain, xtest, ytest
xtrain, ytrain, xtest, ytest = make_1dregression_data(n=21)
#Rescaling data
scaler = MinMaxScaler(feature_range=(-1, 1))
Xtrain = scaler.fit_transform(xtrain.reshape(-1, 1))
Xtest = scaler.transform(xtest.reshape(-1, 1))
degs = np.arange(1, 21, 1)
ndegs = np.max(degs)
mse_train = np.empty(ndegs)
mse_test = np.empty(ndegs)
ytest_pred_stored = np.empty(ndegs, dtype=np.ndarray)
ytrain_pred_stored = np.empty(ndegs, dtype=np.ndarray)
for deg in degs:
model = LinearRegression()
poly_features = PolynomialFeatures(degree=deg, include_bias=False)
Xtrain_poly = poly_features.fit_transform(Xtrain)
model.fit(Xtrain_poly, ytrain)
ytrain_pred = model.predict(Xtrain_poly)
ytrain_pred_stored[deg-1] = ytrain_pred
Xtest_poly = poly_features.transform(Xtest)
ytest_pred = model.predict(Xtest_poly)
mse_train[deg-1] = mse(ytrain_pred, ytrain)
mse_test[deg-1] = mse(ytest_pred, ytest)
ytest_pred_stored[deg-1] = ytest_pred
# Plot MSE vs degree
fig, ax = plt.subplots()
mask = degs <= 15
ax.plot(degs[mask], mse_test[mask], color = 'r', marker = 'x',label='test')
ax.plot(degs[mask], mse_train[mask], color='b', marker = 's', label='train')
ax.legend(loc='upper right', shadow=True)
plt.xlabel('degree')
plt.ylabel('mse')
save_fig('polyfitVsDegree.pdf')
plt.show()
# Plot fitted functions
chosen_degs = [1, 2, 14, 20]
for deg in chosen_degs:
fig, ax = plt.subplots()
ax.scatter(xtrain, ytrain)
ax.plot(xtest, ytest_pred_stored[deg-1])
ax.set_ylim((-10, 15))
plt.title('degree {}'.format(deg))
save_fig('polyfitDegree{}.pdf'.format(deg))
plt.show()
# Plot residuals
#https://blog.minitab.com/blog/adventures-in-statistics-2/why-you-need-to-check-your-residual-plots-for-regression-analysis
chosen_degs = [1, 2, 14, 20]
for deg in chosen_degs:
fig, ax = plt.subplots()
ypred = ytrain_pred_stored[deg-1]
residuals = ytrain - ypred
ax.plot(ypred, residuals, 'o')
ax.set_xlabel('predicted y')
ax.set_ylabel('residual')
plt.title('degree {}. Predictions on the training set'.format(deg))
save_fig('polyfitDegree{}Residuals.pdf'.format(deg))
plt.show()
# Plot fit vs actual
# https://blog.minitab.com/blog/adventures-in-statistics-2/regression-analysis-how-do-i-interpret-r-squared-and-assess-the-goodness-of-fit
chosen_degs = [1, 2, 14, 20]
for deg in chosen_degs:
for train in [True, False]:
if train:
ytrue = ytrain
ypred = ytrain_pred_stored[deg-1]
dataset = 'Train'
else:
ytrue = ytest
ypred = ytest_pred_stored[deg-1]
dataset = 'Test'
fig, ax = plt.subplots()
ax.scatter(ytrue, ypred)
ax.plot(ax.get_xlim(), ax.get_ylim(), ls="--", c=".3")
ax.set_xlabel('true y')
ax.set_ylabel('predicted y')
r2 = sklearn.metrics.r2_score(ytrue, ypred)
plt.title('degree {}. R2 on {} = {:0.3f}'.format(deg, dataset, r2))
save_fig('polyfitDegree{}FitVsActual{}.pdf'.format(deg, dataset))
plt.show() | 34.938053 | 140 | 0.678318 |
import numpy as np
import matplotlib.pyplot as plt
from pyprobml_utils import save_fig
from sklearn.preprocessing import PolynomialFeatures
from sklearn.linear_model import LinearRegression
from sklearn.preprocessing import MinMaxScaler
import sklearn.metrics
from sklearn.metrics import mean_squared_error as mse
def make_1dregression_data(n=21):
np.random.seed(0)
xtrain = np.linspace(0.0, 20, n)
xtest = np.arange(0.0, 20, 0.1)
sigma2 = 4
w = np.array([-1.5, 1/9.])
fun = lambda x: w[0]*x + w[1]*np.square(x)
ytrain = fun(xtrain) + np.random.normal(0, 1, xtrain.shape) * \
np.sqrt(sigma2)
ytest= fun(xtest) + np.random.normal(0, 1, xtest.shape) * \
np.sqrt(sigma2)
return xtrain, ytrain, xtest, ytest
xtrain, ytrain, xtest, ytest = make_1dregression_data(n=21)
scaler = MinMaxScaler(feature_range=(-1, 1))
Xtrain = scaler.fit_transform(xtrain.reshape(-1, 1))
Xtest = scaler.transform(xtest.reshape(-1, 1))
degs = np.arange(1, 21, 1)
ndegs = np.max(degs)
mse_train = np.empty(ndegs)
mse_test = np.empty(ndegs)
ytest_pred_stored = np.empty(ndegs, dtype=np.ndarray)
ytrain_pred_stored = np.empty(ndegs, dtype=np.ndarray)
for deg in degs:
model = LinearRegression()
poly_features = PolynomialFeatures(degree=deg, include_bias=False)
Xtrain_poly = poly_features.fit_transform(Xtrain)
model.fit(Xtrain_poly, ytrain)
ytrain_pred = model.predict(Xtrain_poly)
ytrain_pred_stored[deg-1] = ytrain_pred
Xtest_poly = poly_features.transform(Xtest)
ytest_pred = model.predict(Xtest_poly)
mse_train[deg-1] = mse(ytrain_pred, ytrain)
mse_test[deg-1] = mse(ytest_pred, ytest)
ytest_pred_stored[deg-1] = ytest_pred
fig, ax = plt.subplots()
mask = degs <= 15
ax.plot(degs[mask], mse_test[mask], color = 'r', marker = 'x',label='test')
ax.plot(degs[mask], mse_train[mask], color='b', marker = 's', label='train')
ax.legend(loc='upper right', shadow=True)
plt.xlabel('degree')
plt.ylabel('mse')
save_fig('polyfitVsDegree.pdf')
plt.show()
chosen_degs = [1, 2, 14, 20]
for deg in chosen_degs:
fig, ax = plt.subplots()
ax.scatter(xtrain, ytrain)
ax.plot(xtest, ytest_pred_stored[deg-1])
ax.set_ylim((-10, 15))
plt.title('degree {}'.format(deg))
save_fig('polyfitDegree{}.pdf'.format(deg))
plt.show()
chosen_degs = [1, 2, 14, 20]
for deg in chosen_degs:
fig, ax = plt.subplots()
ypred = ytrain_pred_stored[deg-1]
residuals = ytrain - ypred
ax.plot(ypred, residuals, 'o')
ax.set_xlabel('predicted y')
ax.set_ylabel('residual')
plt.title('degree {}. Predictions on the training set'.format(deg))
save_fig('polyfitDegree{}Residuals.pdf'.format(deg))
plt.show()
chosen_degs = [1, 2, 14, 20]
for deg in chosen_degs:
for train in [True, False]:
if train:
ytrue = ytrain
ypred = ytrain_pred_stored[deg-1]
dataset = 'Train'
else:
ytrue = ytest
ypred = ytest_pred_stored[deg-1]
dataset = 'Test'
fig, ax = plt.subplots()
ax.scatter(ytrue, ypred)
ax.plot(ax.get_xlim(), ax.get_ylim(), ls="--", c=".3")
ax.set_xlabel('true y')
ax.set_ylabel('predicted y')
r2 = sklearn.metrics.r2_score(ytrue, ypred)
plt.title('degree {}. R2 on {} = {:0.3f}'.format(deg, dataset, r2))
save_fig('polyfitDegree{}FitVsActual{}.pdf'.format(deg, dataset))
plt.show() | true | true |
f71134778da67a8817b7931130ea8e8dcc0520e7 | 13,724 | py | Python | logicmonitor_sdk/models/widget.py | JeremyTangCD/lm-sdk-python | 2a15e055e5a3f72d2f2e4fb43bdbed203c5a9983 | [
"Apache-2.0"
] | null | null | null | logicmonitor_sdk/models/widget.py | JeremyTangCD/lm-sdk-python | 2a15e055e5a3f72d2f2e4fb43bdbed203c5a9983 | [
"Apache-2.0"
] | null | null | null | logicmonitor_sdk/models/widget.py | JeremyTangCD/lm-sdk-python | 2a15e055e5a3f72d2f2e4fb43bdbed203c5a9983 | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
"""
LogicMonitor REST API
LogicMonitor is a SaaS-based performance monitoring platform that provides full visibility into complex, hybrid infrastructures, offering granular performance monitoring and actionable data and insights. logicmonitor_sdk enables you to manage your LogicMonitor account programmatically. # noqa: E501
OpenAPI spec version: 1.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class Widget(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'last_updated_by': 'str',
'user_permission': 'str',
'dashboard_id': 'int',
'name': 'str',
'description': 'str',
'last_updated_on': 'int',
'theme': 'str',
'interval': 'int',
'id': 'int',
'type': 'str',
'timescale': 'str'
}
attribute_map = {
'last_updated_by': 'lastUpdatedBy',
'user_permission': 'userPermission',
'dashboard_id': 'dashboardId',
'name': 'name',
'description': 'description',
'last_updated_on': 'lastUpdatedOn',
'theme': 'theme',
'interval': 'interval',
'id': 'id',
'type': 'type',
'timescale': 'timescale'
}
discriminator_value_class_map = {
'batchjob': 'BatchJobWidget',
'netflow': 'NetflowWidget',
'html': 'HtmlWidget',
'sgraph': 'WebsiteGraphWidget',
'devicesla': 'DeviceSLAWidget',
'groupnetflowgraph': 'NetflowGroupGraphWidget',
'gauge': 'GaugeWidget',
'ograph': 'OverviewGraphWidget',
'statsd': 'StatsDWidget',
'netflowgraph': 'NetflowGraphWidget',
'devicestatus': 'DeviceStatus',
'text': 'TextWidget',
'flash': 'FlashWidget',
'ngraph': 'NormalGraphWidget',
'groupnetflow': 'NetflowGroupWidget',
'bignumber': 'BigNumberWidget',
'cgraph': 'CustomerGraphWidget',
'dynamictable': 'DynamicTableWidget',
'table': 'TableWidget',
'gmap': 'GoogleMapWidget',
'noc': 'NOCWidget',
'': 'ServiceAlert',
'alert': 'AlertWidget',
'websiteindividualstatus': 'WebsiteIndividualsStatusWidget',
'websiteoverallstatus': 'WebsiteOverallStatusWidget',
'piechart': 'PieChartWidget',
'websiteoverview': 'WebsiteOverviewWidget',
'websitesla': 'WebsiteSLAWidget'
}
def __init__(self, last_updated_by=None, user_permission=None, dashboard_id=None, name=None, description=None, last_updated_on=None, theme=None, interval=None, id=None, type=None, timescale=None): # noqa: E501
"""Widget - a model defined in Swagger""" # noqa: E501
self._last_updated_by = None
self._user_permission = None
self._dashboard_id = None
self._name = None
self._description = None
self._last_updated_on = None
self._theme = None
self._interval = None
self._id = None
self._type = None
self._timescale = None
self.discriminator = 'type'
if last_updated_by is not None:
self.last_updated_by = last_updated_by
if user_permission is not None:
self.user_permission = user_permission
self.dashboard_id = dashboard_id
self.name = name
if description is not None:
self.description = description
if last_updated_on is not None:
self.last_updated_on = last_updated_on
if theme is not None:
self.theme = theme
if interval is not None:
self.interval = interval
if id is not None:
self.id = id
self.type = type
if timescale is not None:
self.timescale = timescale
@property
def last_updated_by(self):
"""Gets the last_updated_by of this Widget. # noqa: E501
The user that last updated the widget # noqa: E501
:return: The last_updated_by of this Widget. # noqa: E501
:rtype: str
"""
return self._last_updated_by
@last_updated_by.setter
def last_updated_by(self, last_updated_by):
"""Sets the last_updated_by of this Widget.
The user that last updated the widget # noqa: E501
:param last_updated_by: The last_updated_by of this Widget. # noqa: E501
:type: str
"""
self._last_updated_by = last_updated_by
@property
def user_permission(self):
"""Gets the user_permission of this Widget. # noqa: E501
The permission level of the user who last modified the widget # noqa: E501
:return: The user_permission of this Widget. # noqa: E501
:rtype: str
"""
return self._user_permission
@user_permission.setter
def user_permission(self, user_permission):
"""Sets the user_permission of this Widget.
The permission level of the user who last modified the widget # noqa: E501
:param user_permission: The user_permission of this Widget. # noqa: E501
:type: str
"""
self._user_permission = user_permission
@property
def dashboard_id(self):
"""Gets the dashboard_id of this Widget. # noqa: E501
The id of the dashboard the widget belongs to # noqa: E501
:return: The dashboard_id of this Widget. # noqa: E501
:rtype: int
"""
return self._dashboard_id
@dashboard_id.setter
def dashboard_id(self, dashboard_id):
"""Sets the dashboard_id of this Widget.
The id of the dashboard the widget belongs to # noqa: E501
:param dashboard_id: The dashboard_id of this Widget. # noqa: E501
:type: int
"""
if dashboard_id is None:
raise ValueError("Invalid value for `dashboard_id`, must not be `None`") # noqa: E501
self._dashboard_id = dashboard_id
@property
def name(self):
"""Gets the name of this Widget. # noqa: E501
The name of the widget # noqa: E501
:return: The name of this Widget. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this Widget.
The name of the widget # noqa: E501
:param name: The name of this Widget. # noqa: E501
:type: str
"""
if name is None:
raise ValueError("Invalid value for `name`, must not be `None`") # noqa: E501
self._name = name
@property
def description(self):
"""Gets the description of this Widget. # noqa: E501
The description of the widget # noqa: E501
:return: The description of this Widget. # noqa: E501
:rtype: str
"""
return self._description
@description.setter
def description(self, description):
"""Sets the description of this Widget.
The description of the widget # noqa: E501
:param description: The description of this Widget. # noqa: E501
:type: str
"""
self._description = description
@property
def last_updated_on(self):
"""Gets the last_updated_on of this Widget. # noqa: E501
The time that corresponds to when the widget was last updated, in epoch format # noqa: E501
:return: The last_updated_on of this Widget. # noqa: E501
:rtype: int
"""
return self._last_updated_on
@last_updated_on.setter
def last_updated_on(self, last_updated_on):
"""Sets the last_updated_on of this Widget.
The time that corresponds to when the widget was last updated, in epoch format # noqa: E501
:param last_updated_on: The last_updated_on of this Widget. # noqa: E501
:type: int
"""
self._last_updated_on = last_updated_on
@property
def theme(self):
"""Gets the theme of this Widget. # noqa: E501
The color scheme of the widget. Options are: borderPurple | borderGray | borderBlue | solidPurple | solidGray | solidBlue | simplePurple | simpleBlue | simpleGray | newBorderGray | newBorderBlue | newBorderDarkBlue | newSolidGray | newSolidBlue | newSolidDarkBlue | newSimpleGray | newSimpleBlue |newSimpleDarkBlue # noqa: E501
:return: The theme of this Widget. # noqa: E501
:rtype: str
"""
return self._theme
@theme.setter
def theme(self, theme):
"""Sets the theme of this Widget.
The color scheme of the widget. Options are: borderPurple | borderGray | borderBlue | solidPurple | solidGray | solidBlue | simplePurple | simpleBlue | simpleGray | newBorderGray | newBorderBlue | newBorderDarkBlue | newSolidGray | newSolidBlue | newSolidDarkBlue | newSimpleGray | newSimpleBlue |newSimpleDarkBlue # noqa: E501
:param theme: The theme of this Widget. # noqa: E501
:type: str
"""
self._theme = theme
@property
def interval(self):
"""Gets the interval of this Widget. # noqa: E501
The refresh interval of the widget, in minutes # noqa: E501
:return: The interval of this Widget. # noqa: E501
:rtype: int
"""
return self._interval
@interval.setter
def interval(self, interval):
"""Sets the interval of this Widget.
The refresh interval of the widget, in minutes # noqa: E501
:param interval: The interval of this Widget. # noqa: E501
:type: int
"""
self._interval = interval
@property
def id(self):
"""Gets the id of this Widget. # noqa: E501
The Id of the widget # noqa: E501
:return: The id of this Widget. # noqa: E501
:rtype: int
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this Widget.
The Id of the widget # noqa: E501
:param id: The id of this Widget. # noqa: E501
:type: int
"""
self._id = id
@property
def type(self):
"""Gets the type of this Widget. # noqa: E501
alert | deviceNOC | html | serviceOverallStatus | sgraph | ngraph | serviceNOC | serviceSLA | bigNumber | gmap | serviceIndividualStatus | gauge | pieChart | ngraph | batchjob # noqa: E501
:return: The type of this Widget. # noqa: E501
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""Sets the type of this Widget.
alert | deviceNOC | html | serviceOverallStatus | sgraph | ngraph | serviceNOC | serviceSLA | bigNumber | gmap | serviceIndividualStatus | gauge | pieChart | ngraph | batchjob # noqa: E501
:param type: The type of this Widget. # noqa: E501
:type: str
"""
if type is None:
raise ValueError("Invalid value for `type`, must not be `None`") # noqa: E501
self._type = type
@property
def timescale(self):
"""Gets the timescale of this Widget. # noqa: E501
The default timescale of the widget # noqa: E501
:return: The timescale of this Widget. # noqa: E501
:rtype: str
"""
return self._timescale
@timescale.setter
def timescale(self, timescale):
"""Sets the timescale of this Widget.
The default timescale of the widget # noqa: E501
:param timescale: The timescale of this Widget. # noqa: E501
:type: str
"""
self._timescale = timescale
def get_real_child_model(self, data):
"""Returns the real base class specified by the discriminator"""
discriminator_value = data[self.discriminator].lower()
return self.discriminator_value_class_map.get(discriminator_value)
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(Widget, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, Widget):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 31.405034 | 336 | 0.600627 |
import pprint
import re
import six
class Widget(object):
swagger_types = {
'last_updated_by': 'str',
'user_permission': 'str',
'dashboard_id': 'int',
'name': 'str',
'description': 'str',
'last_updated_on': 'int',
'theme': 'str',
'interval': 'int',
'id': 'int',
'type': 'str',
'timescale': 'str'
}
attribute_map = {
'last_updated_by': 'lastUpdatedBy',
'user_permission': 'userPermission',
'dashboard_id': 'dashboardId',
'name': 'name',
'description': 'description',
'last_updated_on': 'lastUpdatedOn',
'theme': 'theme',
'interval': 'interval',
'id': 'id',
'type': 'type',
'timescale': 'timescale'
}
discriminator_value_class_map = {
'batchjob': 'BatchJobWidget',
'netflow': 'NetflowWidget',
'html': 'HtmlWidget',
'sgraph': 'WebsiteGraphWidget',
'devicesla': 'DeviceSLAWidget',
'groupnetflowgraph': 'NetflowGroupGraphWidget',
'gauge': 'GaugeWidget',
'ograph': 'OverviewGraphWidget',
'statsd': 'StatsDWidget',
'netflowgraph': 'NetflowGraphWidget',
'devicestatus': 'DeviceStatus',
'text': 'TextWidget',
'flash': 'FlashWidget',
'ngraph': 'NormalGraphWidget',
'groupnetflow': 'NetflowGroupWidget',
'bignumber': 'BigNumberWidget',
'cgraph': 'CustomerGraphWidget',
'dynamictable': 'DynamicTableWidget',
'table': 'TableWidget',
'gmap': 'GoogleMapWidget',
'noc': 'NOCWidget',
'': 'ServiceAlert',
'alert': 'AlertWidget',
'websiteindividualstatus': 'WebsiteIndividualsStatusWidget',
'websiteoverallstatus': 'WebsiteOverallStatusWidget',
'piechart': 'PieChartWidget',
'websiteoverview': 'WebsiteOverviewWidget',
'websitesla': 'WebsiteSLAWidget'
}
def __init__(self, last_updated_by=None, user_permission=None, dashboard_id=None, name=None, description=None, last_updated_on=None, theme=None, interval=None, id=None, type=None, timescale=None):
self._last_updated_by = None
self._user_permission = None
self._dashboard_id = None
self._name = None
self._description = None
self._last_updated_on = None
self._theme = None
self._interval = None
self._id = None
self._type = None
self._timescale = None
self.discriminator = 'type'
if last_updated_by is not None:
self.last_updated_by = last_updated_by
if user_permission is not None:
self.user_permission = user_permission
self.dashboard_id = dashboard_id
self.name = name
if description is not None:
self.description = description
if last_updated_on is not None:
self.last_updated_on = last_updated_on
if theme is not None:
self.theme = theme
if interval is not None:
self.interval = interval
if id is not None:
self.id = id
self.type = type
if timescale is not None:
self.timescale = timescale
@property
def last_updated_by(self):
return self._last_updated_by
@last_updated_by.setter
def last_updated_by(self, last_updated_by):
self._last_updated_by = last_updated_by
@property
def user_permission(self):
return self._user_permission
@user_permission.setter
def user_permission(self, user_permission):
self._user_permission = user_permission
@property
def dashboard_id(self):
return self._dashboard_id
@dashboard_id.setter
def dashboard_id(self, dashboard_id):
if dashboard_id is None:
raise ValueError("Invalid value for `dashboard_id`, must not be `None`")
self._dashboard_id = dashboard_id
@property
def name(self):
return self._name
@name.setter
def name(self, name):
if name is None:
raise ValueError("Invalid value for `name`, must not be `None`")
self._name = name
@property
def description(self):
return self._description
@description.setter
def description(self, description):
self._description = description
@property
def last_updated_on(self):
return self._last_updated_on
@last_updated_on.setter
def last_updated_on(self, last_updated_on):
self._last_updated_on = last_updated_on
@property
def theme(self):
return self._theme
@theme.setter
def theme(self, theme):
self._theme = theme
@property
def interval(self):
return self._interval
@interval.setter
def interval(self, interval):
self._interval = interval
@property
def id(self):
return self._id
@id.setter
def id(self, id):
self._id = id
@property
def type(self):
return self._type
@type.setter
def type(self, type):
if type is None:
raise ValueError("Invalid value for `type`, must not be `None`")
self._type = type
@property
def timescale(self):
return self._timescale
@timescale.setter
def timescale(self, timescale):
self._timescale = timescale
def get_real_child_model(self, data):
discriminator_value = data[self.discriminator].lower()
return self.discriminator_value_class_map.get(discriminator_value)
def to_dict(self):
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(Widget, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
return pprint.pformat(self.to_dict())
def __repr__(self):
return self.to_str()
def __eq__(self, other):
if not isinstance(other, Widget):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| true | true |
f71134a742f050558c8c7b7b88a6923832e58fdb | 2,461 | py | Python | msgflow/service/webapi_service.py | colorfulscoop/msgflow | b275748afcdf3aa5aec1f80436cb7e0bd03fc69f | [
"MIT"
] | 5 | 2021-01-01T12:34:23.000Z | 2022-03-08T13:02:11.000Z | msgflow/service/webapi_service.py | colorfulscoop/msgflow | b275748afcdf3aa5aec1f80436cb7e0bd03fc69f | [
"MIT"
] | null | null | null | msgflow/service/webapi_service.py | colorfulscoop/msgflow | b275748afcdf3aa5aec1f80436cb7e0bd03fc69f | [
"MIT"
] | 1 | 2021-01-01T12:34:27.000Z | 2021-01-01T12:34:27.000Z | from pydantic import BaseModel
from fastapi import FastAPI
from fastapi.middleware.cors import CORSMiddleware
import uvicorn
import pkg_resources
from typing import Any
def build_api(handler, endpoint):
def get_version():
pkg_name = "msgflow"
try:
version = pkg_resources.get_distribution(pkg_name).version
except pkg_resources.DistributionNotFound:
print(f"Package name not found: {pkg_name}")
version = "package version info not found"
return version
app = FastAPI(
title="msgFlow",
description="",
version=get_version(),
)
app.add_api_route(endpoint, handler.handle, methods=["POST"])
return app
class Request(BaseModel):
text: str
dialog_id: str = 0
data: dict[str, Any] = None
class Response(BaseModel):
texts: list[str]
request: Request
class Handler:
def __init__(self, bot):
self._bot = bot
def handle(self, req: Request):
msg = WebapiMessage(text=req.text, dialog_id=req.dialog_id, req=req)
self._bot.handle(msg)
return Response(texts=msg.msgs, request=req)
class WebapiMessage:
def __init__(self, text: str, dialog_id: str, req):
""""""
self._text = text
self._cid = dialog_id
self._req = req
self._msgs = []
@property
def text(self):
return self._text
@property
def dialog_id(self) -> str:
# In CliService, a conversation is identified by the user's name
return self._cid
def respond(self, text):
self._msgs.append(text)
@property
def source(self) -> Any:
return self._req
@property
def msgs(self):
return self._msgs
class WebapiService:
def __init__(self, config):
"""
Args:
config (Dict[str, Any])
"""
# Set attributes
self._config = config
@classmethod
def from_config(cls, config: dict[str, object]):
cfg = WebapiConfig(**config)
return cls(config=cfg)
def flow(self, bot):
handler = Handler(bot=bot)
app = build_api(
handler,
endpoint=self._config.endpoint,
)
uvicorn.run(app=app, host=self._config.host, port=self._config.port)
def post(self, text):
raise NotImplementedError()
class WebapiConfig(BaseModel):
host: str
port: int
endpoint: str = "/handle"
| 22.577982 | 76 | 0.611946 | from pydantic import BaseModel
from fastapi import FastAPI
from fastapi.middleware.cors import CORSMiddleware
import uvicorn
import pkg_resources
from typing import Any
def build_api(handler, endpoint):
def get_version():
pkg_name = "msgflow"
try:
version = pkg_resources.get_distribution(pkg_name).version
except pkg_resources.DistributionNotFound:
print(f"Package name not found: {pkg_name}")
version = "package version info not found"
return version
app = FastAPI(
title="msgFlow",
description="",
version=get_version(),
)
app.add_api_route(endpoint, handler.handle, methods=["POST"])
return app
class Request(BaseModel):
text: str
dialog_id: str = 0
data: dict[str, Any] = None
class Response(BaseModel):
texts: list[str]
request: Request
class Handler:
def __init__(self, bot):
self._bot = bot
def handle(self, req: Request):
msg = WebapiMessage(text=req.text, dialog_id=req.dialog_id, req=req)
self._bot.handle(msg)
return Response(texts=msg.msgs, request=req)
class WebapiMessage:
def __init__(self, text: str, dialog_id: str, req):
self._text = text
self._cid = dialog_id
self._req = req
self._msgs = []
@property
def text(self):
return self._text
@property
def dialog_id(self) -> str:
return self._cid
def respond(self, text):
self._msgs.append(text)
@property
def source(self) -> Any:
return self._req
@property
def msgs(self):
return self._msgs
class WebapiService:
def __init__(self, config):
# Set attributes
self._config = config
@classmethod
def from_config(cls, config: dict[str, object]):
cfg = WebapiConfig(**config)
return cls(config=cfg)
def flow(self, bot):
handler = Handler(bot=bot)
app = build_api(
handler,
endpoint=self._config.endpoint,
)
uvicorn.run(app=app, host=self._config.host, port=self._config.port)
def post(self, text):
raise NotImplementedError()
class WebapiConfig(BaseModel):
host: str
port: int
endpoint: str = "/handle"
| true | true |
f71134f7f0b02eb570f36f87275fe61904ada617 | 1,339 | py | Python | tests/test_gui.py | prosodylab/Montreal-Forced-Aligner | 58e95c941924d7cb4db5672e28fb0dbbcf9c42f3 | [
"MIT"
] | null | null | null | tests/test_gui.py | prosodylab/Montreal-Forced-Aligner | 58e95c941924d7cb4db5672e28fb0dbbcf9c42f3 | [
"MIT"
] | null | null | null | tests/test_gui.py | prosodylab/Montreal-Forced-Aligner | 58e95c941924d7cb4db5672e28fb0dbbcf9c42f3 | [
"MIT"
] | null | null | null | import os
from montreal_forced_aligner.corpus.acoustic_corpus import AcousticCorpus
def test_save_text_lab(
basic_corpus_dir,
generated_dir,
):
output_directory = os.path.join(generated_dir, "gui_tests")
corpus = AcousticCorpus(
corpus_directory=basic_corpus_dir,
use_mp=True,
temporary_directory=output_directory,
)
corpus._load_corpus()
corpus.get_file(name="acoustic_corpus").save()
def test_file_properties(
stereo_corpus_dir,
generated_dir,
):
output_directory = os.path.join(generated_dir, "gui_tests")
corpus = AcousticCorpus(
corpus_directory=stereo_corpus_dir,
use_mp=True,
temporary_directory=output_directory,
)
corpus._load_corpus()
file = corpus.get_file(name="michaelandsickmichael")
assert file.sound_file.num_channels == 2
assert file.num_speakers == 2
assert file.num_utterances == 7
x, y = file.sound_file.normalized_waveform()
assert y.shape[0] == 2
def test_flac_tg(flac_tg_corpus_dir, generated_dir):
output_directory = os.path.join(generated_dir, "gui_tests")
corpus = AcousticCorpus(
corpus_directory=flac_tg_corpus_dir,
use_mp=True,
temporary_directory=output_directory,
)
corpus._load_corpus()
corpus.get_file(name="61-70968-0000").save()
| 27.895833 | 73 | 0.716953 | import os
from montreal_forced_aligner.corpus.acoustic_corpus import AcousticCorpus
def test_save_text_lab(
basic_corpus_dir,
generated_dir,
):
output_directory = os.path.join(generated_dir, "gui_tests")
corpus = AcousticCorpus(
corpus_directory=basic_corpus_dir,
use_mp=True,
temporary_directory=output_directory,
)
corpus._load_corpus()
corpus.get_file(name="acoustic_corpus").save()
def test_file_properties(
stereo_corpus_dir,
generated_dir,
):
output_directory = os.path.join(generated_dir, "gui_tests")
corpus = AcousticCorpus(
corpus_directory=stereo_corpus_dir,
use_mp=True,
temporary_directory=output_directory,
)
corpus._load_corpus()
file = corpus.get_file(name="michaelandsickmichael")
assert file.sound_file.num_channels == 2
assert file.num_speakers == 2
assert file.num_utterances == 7
x, y = file.sound_file.normalized_waveform()
assert y.shape[0] == 2
def test_flac_tg(flac_tg_corpus_dir, generated_dir):
output_directory = os.path.join(generated_dir, "gui_tests")
corpus = AcousticCorpus(
corpus_directory=flac_tg_corpus_dir,
use_mp=True,
temporary_directory=output_directory,
)
corpus._load_corpus()
corpus.get_file(name="61-70968-0000").save()
| true | true |
f711359dc17042272390f6f50314b5f2c746d6b9 | 934 | py | Python | CHAP06/wrapper/azdo.py | dotcs/Terraform-Cookbook | 16938bf044353b1552f3ffb676153f922e147700 | [
"MIT"
] | 86 | 2020-02-05T15:00:16.000Z | 2022-03-28T12:06:14.000Z | CHAP06/wrapper/azdo.py | dotcs/Terraform-Cookbook | 16938bf044353b1552f3ffb676153f922e147700 | [
"MIT"
] | 1 | 2021-01-14T16:49:50.000Z | 2021-01-14T16:49:50.000Z | CHAP06/wrapper/azdo.py | dotcs/Terraform-Cookbook | 16938bf044353b1552f3ffb676153f922e147700 | [
"MIT"
] | 113 | 2020-02-09T12:34:19.000Z | 2022-03-22T18:42:59.000Z | import os
def tfoutputtoAzdo(outputlist, jsonObject):
"""
This function convert a dict to Azure DevOps pipelines variable
outputlist : dict { terraform_output : azure devpops variable}
jsonOject : the terraform output in Json format (terraform output -json)
"""
if(len(outputlist) > 0):
for k, v in outputlist.items():
tfoutput_name = k
azdovar = str(v)
if tfoutput_name in jsonObject.keys():
var_value = jsonObject[tfoutput_name]["value"]
print(
"Run [echo ##vso[task.setvariable variable="+azdovar+";]"+var_value+"]")
os.system(
"echo ##vso[task.setvariable variable="+azdovar+";]"+var_value+"")
else:
print("key {} is not present in terraform output".format(
tfoutput_name))
| 37.36 | 96 | 0.541756 | import os
def tfoutputtoAzdo(outputlist, jsonObject):
if(len(outputlist) > 0):
for k, v in outputlist.items():
tfoutput_name = k
azdovar = str(v)
if tfoutput_name in jsonObject.keys():
var_value = jsonObject[tfoutput_name]["value"]
print(
"Run [echo ##vso[task.setvariable variable="+azdovar+";]"+var_value+"]")
os.system(
"echo ##vso[task.setvariable variable="+azdovar+";]"+var_value+"")
else:
print("key {} is not present in terraform output".format(
tfoutput_name))
| true | true |
f71135d130be1b16ed52e91265050b1eeb02e001 | 2,702 | py | Python | baseline/utils/mainFunctions.py | haymrpig/Pytorch_template | 9a0eda43b2da27807461b305ed42e1bd7c1341dd | [
"MIT"
] | null | null | null | baseline/utils/mainFunctions.py | haymrpig/Pytorch_template | 9a0eda43b2da27807461b305ed42e1bd7c1341dd | [
"MIT"
] | null | null | null | baseline/utils/mainFunctions.py | haymrpig/Pytorch_template | 9a0eda43b2da27807461b305ed42e1bd7c1341dd | [
"MIT"
] | null | null | null | import numpy as np
import torch
import torch.nn as nn
from torch.nn import functional as F
from tqdm import tqdm
class _BaseWrapper():
def __init__(self, model):
super().__init__()
self.model = model
self.handlers = []
def forward(self, images):
self.image_shape = images.shape[2:]
print(self.image_shape)
self.logits = self.model(images)
self.probs = F.softmax(self.logits, dim=1)
return self.probs.sort(dim=1, descending=True)
def backward(self, ids):
one_hot = F.one_hot(ids, self.logits.shape[-1])
one_hot = one_hot.squeeze()
self.model.zero_grad()
self.logits.backward(gradient=one_hot, retain_graph=True)
# gradient는 해당 index에 대해서만 미분을 통한 backpropagation을 하겠다는 의미이다.
# 즉, 내가 확인하고 싶은 class에 대해서 featuremap이 얼마나 영향을 미쳤는지 확인할 수 있다.
def generate(self):
raise NotImplementedError
class GradCAM(_BaseWrapper):
def __init__(self, model, layers=None):
super().__init__(model)
self.feature_map = {}
self.grad_map = {}
self.layers = layers
def save_fmaps(key):
def forward_hook(module, input, output):
self.feature_map[key]=output.detach()
return forward_hook
def save_grads(key):
def backward_hook(modeul, grad_in, grad_out):
self.grad_map[key] = grad_out[0].detach()
return backward_hook
for name, module in self.model.named_modules():
if self.layers is None or name in self.layers:
self.handlers.append(module.register_forward_hook(save_fmaps(name)))
self.handlers.append(module.register_backward_hook(save_grads(name)))
def findLayers(self, layers, target_layer):
if target_layer in layers.keys():
return layers[target_layer]
else:
raise ValueError(f"{target_layer} not exists")
def generate(self, target_layer):
feature_maps = self.findLayers(self.feature_map, target_layer)
grad_maps = self.findLayers(self.grad_map, target_layer)
weights = F.adaptive_avg_pool2d(grad_maps, 1)
grad_cam = torch.mul(feature_maps, weights).sum(dim=1, keepdim=True)
grad_cam = F.relu(grad_cam)
grad_cam = F.interpolate(grad_cam, self.image_shape, mode="bilinear", align_corners=False)
B, C, H, W = grad_cam.shape
# C는 1인듯?
grad_cam = grad_cam.view(B, -1)
grad_cam -= grad_cam.min(dim=1, keepdim=True)[0]
# 양수 만들어주려고 하는듯
grad_cam /= grad_cam.max(dim=1, keepdim=True)[0]
grad_cam = grad_cam.view(B, C, H, W)
return grad_cam
| 33.775 | 98 | 0.631384 | import numpy as np
import torch
import torch.nn as nn
from torch.nn import functional as F
from tqdm import tqdm
class _BaseWrapper():
def __init__(self, model):
super().__init__()
self.model = model
self.handlers = []
def forward(self, images):
self.image_shape = images.shape[2:]
print(self.image_shape)
self.logits = self.model(images)
self.probs = F.softmax(self.logits, dim=1)
return self.probs.sort(dim=1, descending=True)
def backward(self, ids):
one_hot = F.one_hot(ids, self.logits.shape[-1])
one_hot = one_hot.squeeze()
self.model.zero_grad()
self.logits.backward(gradient=one_hot, retain_graph=True)
def generate(self):
raise NotImplementedError
class GradCAM(_BaseWrapper):
def __init__(self, model, layers=None):
super().__init__(model)
self.feature_map = {}
self.grad_map = {}
self.layers = layers
def save_fmaps(key):
def forward_hook(module, input, output):
self.feature_map[key]=output.detach()
return forward_hook
def save_grads(key):
def backward_hook(modeul, grad_in, grad_out):
self.grad_map[key] = grad_out[0].detach()
return backward_hook
for name, module in self.model.named_modules():
if self.layers is None or name in self.layers:
self.handlers.append(module.register_forward_hook(save_fmaps(name)))
self.handlers.append(module.register_backward_hook(save_grads(name)))
def findLayers(self, layers, target_layer):
if target_layer in layers.keys():
return layers[target_layer]
else:
raise ValueError(f"{target_layer} not exists")
def generate(self, target_layer):
feature_maps = self.findLayers(self.feature_map, target_layer)
grad_maps = self.findLayers(self.grad_map, target_layer)
weights = F.adaptive_avg_pool2d(grad_maps, 1)
grad_cam = torch.mul(feature_maps, weights).sum(dim=1, keepdim=True)
grad_cam = F.relu(grad_cam)
grad_cam = F.interpolate(grad_cam, self.image_shape, mode="bilinear", align_corners=False)
B, C, H, W = grad_cam.shape
grad_cam = grad_cam.view(B, -1)
grad_cam -= grad_cam.min(dim=1, keepdim=True)[0]
grad_cam /= grad_cam.max(dim=1, keepdim=True)[0]
grad_cam = grad_cam.view(B, C, H, W)
return grad_cam
| true | true |
f71135dc8e414cd1dc043aa36791209c2ac417ba | 3,026 | py | Python | hmc/tests/test_cox_poisson.py | JamesBrofos/Thresholds-in-Hamiltonian-Monte-Carlo | 7ee1b530db0eb536666dbc872fbf8200e53dd49b | [
"MIT"
] | 1 | 2021-11-23T15:40:07.000Z | 2021-11-23T15:40:07.000Z | hmc/tests/test_cox_poisson.py | JamesBrofos/Thresholds-in-Hamiltonian-Monte-Carlo | 7ee1b530db0eb536666dbc872fbf8200e53dd49b | [
"MIT"
] | null | null | null | hmc/tests/test_cox_poisson.py | JamesBrofos/Thresholds-in-Hamiltonian-Monte-Carlo | 7ee1b530db0eb536666dbc872fbf8200e53dd49b | [
"MIT"
] | null | null | null | import unittest
import numpy as np
from hmc.applications.cox_poisson import forward_transform, inverse_transform, generate_data, gaussian_posterior_factory, hyperparameter_posterior_factory
from hmc.applications.cox_poisson.prior import log_prior, grad_log_prior, hess_log_prior, grad_hess_log_prior
class TestCoxPoisson(unittest.TestCase):
def test_prior(self):
def transformed_log_prior(qt):
return log_prior(*inverse_transform(qt)[0])
transformed_grad_log_prior = lambda qt: grad_log_prior(*qt)
transformed_hess_log_prior = lambda qt: hess_log_prior(*qt)
transformed_grad_hess_log_prior = lambda qt: grad_hess_log_prior(*qt)
q = np.random.uniform(size=(2, ))
qt, _ = forward_transform(q)
delta = 1e-5
u = np.random.normal(size=qt.shape)
fd = (transformed_log_prior(qt + 0.5*delta*u) - transformed_log_prior(qt - 0.5*delta*u)) / delta
dd = transformed_grad_log_prior(qt)@u
self.assertTrue(np.allclose(fd, dd))
fd = (transformed_grad_log_prior(qt + 0.5*delta*u) - transformed_grad_log_prior(qt - 0.5*delta*u)) / delta
dd = transformed_hess_log_prior(qt)@u
self.assertTrue(np.allclose(fd, dd))
fd = (transformed_hess_log_prior(qt + 0.5*delta*u) - transformed_hess_log_prior(qt - 0.5*delta*u)) / delta
dd = transformed_grad_hess_log_prior(qt)@u
self.assertTrue(np.allclose(fd, dd))
def test_gaussian_posterior(self):
sigmasq, beta = np.random.uniform(size=(2, ))
mu = np.log(126.0) - sigmasq / 2.0
dist, x, y = generate_data(10, mu, beta, sigmasq)
euclidean_auxiliaries, metric = gaussian_posterior_factory(dist, mu, sigmasq, beta, y)
log_posterior = lambda x: euclidean_auxiliaries(x)[0]
grad_log_posterior = lambda x: euclidean_auxiliaries(x)[1]
delta = 1e-6
u = np.random.normal(size=x.shape)
fd = (log_posterior(x + 0.5*delta*u) - log_posterior(x - 0.5*delta*u)) / delta
dd = grad_log_posterior(x)@u
self.assertTrue(np.allclose(fd, dd))
def test_hyperparameter_posterior(self):
sigmasq, beta = np.random.uniform(size=(2, ))
mu = np.log(126.0) - sigmasq / 2.0
dist, x, y = generate_data(16, mu, beta, sigmasq)
log_posterior, metric, _, euclidean_auxiliaries, riemannian_auxiliaries = hyperparameter_posterior_factory(dist, mu, x, y)
grad_log_posterior = lambda qt: euclidean_auxiliaries(qt)[1]
grad_metric = lambda qt: riemannian_auxiliaries(qt)[3]
q = np.array([sigmasq, beta])
qt, _ = forward_transform(q)
delta = 1e-4
u = np.random.normal(size=(2, ))
fd = (log_posterior(qt + 0.5*delta*u) - log_posterior(qt - 0.5*delta*u)) / delta
dd = grad_log_posterior(qt)@u
self.assertTrue(np.allclose(fd, dd))
fd = (metric(qt + 0.5*delta*u) - metric(qt - 0.5*delta*u)) / delta
dd = grad_metric(qt)@u
self.assertTrue(np.allclose(fd, dd))
| 41.452055 | 154 | 0.663913 | import unittest
import numpy as np
from hmc.applications.cox_poisson import forward_transform, inverse_transform, generate_data, gaussian_posterior_factory, hyperparameter_posterior_factory
from hmc.applications.cox_poisson.prior import log_prior, grad_log_prior, hess_log_prior, grad_hess_log_prior
class TestCoxPoisson(unittest.TestCase):
def test_prior(self):
def transformed_log_prior(qt):
return log_prior(*inverse_transform(qt)[0])
transformed_grad_log_prior = lambda qt: grad_log_prior(*qt)
transformed_hess_log_prior = lambda qt: hess_log_prior(*qt)
transformed_grad_hess_log_prior = lambda qt: grad_hess_log_prior(*qt)
q = np.random.uniform(size=(2, ))
qt, _ = forward_transform(q)
delta = 1e-5
u = np.random.normal(size=qt.shape)
fd = (transformed_log_prior(qt + 0.5*delta*u) - transformed_log_prior(qt - 0.5*delta*u)) / delta
dd = transformed_grad_log_prior(qt)@u
self.assertTrue(np.allclose(fd, dd))
fd = (transformed_grad_log_prior(qt + 0.5*delta*u) - transformed_grad_log_prior(qt - 0.5*delta*u)) / delta
dd = transformed_hess_log_prior(qt)@u
self.assertTrue(np.allclose(fd, dd))
fd = (transformed_hess_log_prior(qt + 0.5*delta*u) - transformed_hess_log_prior(qt - 0.5*delta*u)) / delta
dd = transformed_grad_hess_log_prior(qt)@u
self.assertTrue(np.allclose(fd, dd))
def test_gaussian_posterior(self):
sigmasq, beta = np.random.uniform(size=(2, ))
mu = np.log(126.0) - sigmasq / 2.0
dist, x, y = generate_data(10, mu, beta, sigmasq)
euclidean_auxiliaries, metric = gaussian_posterior_factory(dist, mu, sigmasq, beta, y)
log_posterior = lambda x: euclidean_auxiliaries(x)[0]
grad_log_posterior = lambda x: euclidean_auxiliaries(x)[1]
delta = 1e-6
u = np.random.normal(size=x.shape)
fd = (log_posterior(x + 0.5*delta*u) - log_posterior(x - 0.5*delta*u)) / delta
dd = grad_log_posterior(x)@u
self.assertTrue(np.allclose(fd, dd))
def test_hyperparameter_posterior(self):
sigmasq, beta = np.random.uniform(size=(2, ))
mu = np.log(126.0) - sigmasq / 2.0
dist, x, y = generate_data(16, mu, beta, sigmasq)
log_posterior, metric, _, euclidean_auxiliaries, riemannian_auxiliaries = hyperparameter_posterior_factory(dist, mu, x, y)
grad_log_posterior = lambda qt: euclidean_auxiliaries(qt)[1]
grad_metric = lambda qt: riemannian_auxiliaries(qt)[3]
q = np.array([sigmasq, beta])
qt, _ = forward_transform(q)
delta = 1e-4
u = np.random.normal(size=(2, ))
fd = (log_posterior(qt + 0.5*delta*u) - log_posterior(qt - 0.5*delta*u)) / delta
dd = grad_log_posterior(qt)@u
self.assertTrue(np.allclose(fd, dd))
fd = (metric(qt + 0.5*delta*u) - metric(qt - 0.5*delta*u)) / delta
dd = grad_metric(qt)@u
self.assertTrue(np.allclose(fd, dd))
| true | true |
f711370932f8b4c113c4541c13a5de315eff195e | 1,653 | py | Python | Object detection and depth estimation/catkin_ws/src/f110-fall2018-skeltons/labs/wall_following/scripts/utils/other.py | UF-f1tenth/F1tenth-UFL | 93b0a822c67b2b425664642955342138e65974f4 | [
"Apache-2.0"
] | null | null | null | Object detection and depth estimation/catkin_ws/src/f110-fall2018-skeltons/labs/wall_following/scripts/utils/other.py | UF-f1tenth/F1tenth-UFL | 93b0a822c67b2b425664642955342138e65974f4 | [
"Apache-2.0"
] | null | null | null | Object detection and depth estimation/catkin_ws/src/f110-fall2018-skeltons/labs/wall_following/scripts/utils/other.py | UF-f1tenth/F1tenth-UFL | 93b0a822c67b2b425664642955342138e65974f4 | [
"Apache-2.0"
] | null | null | null | """
Created on Fri Oct 29 18:54:18 2021
@author: Krishna Nuthalapati
"""
import numpy as np
def iou(boxA, boxB):
# determine the (x, y)-coordinates of the intersection rectangle
xA = max(boxA[0], boxB[0])
yA = max(boxA[1], boxB[1])
xB = min(boxA[2], boxB[2])
yB = min(boxA[3], boxB[3])
# compute the area of intersection rectangle
interArea = max(0, xB - xA + 1) * max(0, yB - yA + 1)
# compute the area of both the prediction and ground-truth
# rectangles
boxAArea = (boxA[2] - boxA[0] + 1) * (boxA[3] - boxA[1] + 1)
boxBArea = (boxB[2] - boxB[0] + 1) * (boxB[3] - boxB[1] + 1)
# compute the intersection over union by taking the intersection
# area and dividing it by the sum of prediction + ground-truth
# areas - the interesection area
iou_score = interArea / float(boxAArea + boxBArea - interArea)
# return the intersection over union value
return iou_score
def nms(boxes, scores, thresh):
num_boxes = boxes.shape[0]
indices = np.zeros((num_boxes), dtype=int)
# print("PRINTING : ", num_boxes)
for i in range(num_boxes):
if indices[i] == -1:
continue
for j in range(i+1, num_boxes):
if indices[j] == -1:
continue
base_box = boxes[i]
curr_box = boxes[j]
iou_score = iou(base_box, curr_box)
if iou_score >= thresh:
if scores[i]>scores[j]:
indices[i] = 1
indices[j] = -1
continue
indices[j] = 1
indices[i] = -1
idxs = np.where(indices == 1)[0]
return idxs
| 30.611111 | 65 | 0.566243 |
import numpy as np
def iou(boxA, boxB):
xA = max(boxA[0], boxB[0])
yA = max(boxA[1], boxB[1])
xB = min(boxA[2], boxB[2])
yB = min(boxA[3], boxB[3])
interArea = max(0, xB - xA + 1) * max(0, yB - yA + 1)
boxAArea = (boxA[2] - boxA[0] + 1) * (boxA[3] - boxA[1] + 1)
boxBArea = (boxB[2] - boxB[0] + 1) * (boxB[3] - boxB[1] + 1)
iou_score = interArea / float(boxAArea + boxBArea - interArea)
return iou_score
def nms(boxes, scores, thresh):
num_boxes = boxes.shape[0]
indices = np.zeros((num_boxes), dtype=int)
for i in range(num_boxes):
if indices[i] == -1:
continue
for j in range(i+1, num_boxes):
if indices[j] == -1:
continue
base_box = boxes[i]
curr_box = boxes[j]
iou_score = iou(base_box, curr_box)
if iou_score >= thresh:
if scores[i]>scores[j]:
indices[i] = 1
indices[j] = -1
continue
indices[j] = 1
indices[i] = -1
idxs = np.where(indices == 1)[0]
return idxs
| true | true |
f711371b1ee98e180d6a5e26233698cd11df382f | 3,458 | py | Python | dedupe/blocking.py | daherman/dedupe | 053d373aaed47201f720c5b6d1a568fc49742cc3 | [
"MIT"
] | null | null | null | dedupe/blocking.py | daherman/dedupe | 053d373aaed47201f720c5b6d1a568fc49742cc3 | [
"MIT"
] | null | null | null | dedupe/blocking.py | daherman/dedupe | 053d373aaed47201f720c5b6d1a568fc49742cc3 | [
"MIT"
] | 1 | 2020-03-12T11:14:37.000Z | 2020-03-12T11:14:37.000Z | #!/usr/bin/python
# -*- coding: utf-8 -*-
from future.utils import viewvalues
from collections import defaultdict
import logging
import time
logger = logging.getLogger(__name__)
def index_list():
return defaultdict(list)
class Blocker:
'''Takes in a record and returns all blocks that record belongs to'''
def __init__(self, predicates):
self.predicates = predicates
self.index_fields = defaultdict(index_list)
self.index_predicates = []
for full_predicate in predicates:
for predicate in full_predicate:
if hasattr(predicate, 'index'):
self.index_fields[predicate.field][predicate.type].append(
predicate)
self.index_predicates.append(predicate)
def __call__(self, records, target=False):
start_time = time.clock()
predicates = [(':' + str(i), predicate)
for i, predicate
in enumerate(self.predicates)]
for i, record in enumerate(records):
record_id, instance = record
for pred_id, predicate in predicates:
block_keys = predicate(instance, target=target)
for block_key in block_keys:
yield block_key + pred_id, record_id
if i and i % 10000 == 0:
logger.info('%(iteration)d, %(elapsed)f2 seconds',
{'iteration': i,
'elapsed': time.clock() - start_time})
def resetIndices(self):
# clear canopies to reduce memory usage
for predicate in self.index_predicates:
predicate.reset()
def index(self, data, field):
'''Creates TF/IDF index of a given set of data'''
indices = extractIndices(self.index_fields[field])
for doc in data:
if doc:
for _, index, preprocess in indices:
index.index(preprocess(doc))
for index_type, index, _ in indices:
index.initSearch()
for predicate in self.index_fields[field][index_type]:
logger.debug("Canopy: %s", str(predicate))
predicate.index = index
def unindex(self, data, field):
'''Remove index of a given set of data'''
indices = extractIndices(self.index_fields[field])
for doc in data:
if doc:
for _, index, preprocess in indices:
index.unindex(preprocess(doc))
for index_type, index, _ in indices:
index._index.initSearch()
for predicate in self.index_fields[field][index_type]:
logger.debug("Canopy: %s", str(predicate))
predicate.index = index
def indexAll(self, data_d):
for field in self.index_fields:
unique_fields = {record[field]
for record
in viewvalues(data_d)
if record[field]}
self.index(unique_fields, field)
def extractIndices(index_fields):
indices = []
for index_type, predicates in index_fields.items():
predicate = predicates[0]
index = predicate.index
preprocess = predicate.preprocess
if predicate.index is None:
index = predicate.initIndex()
indices.append((index_type, index, preprocess))
return indices
| 30.333333 | 78 | 0.571139 |
from future.utils import viewvalues
from collections import defaultdict
import logging
import time
logger = logging.getLogger(__name__)
def index_list():
return defaultdict(list)
class Blocker:
def __init__(self, predicates):
self.predicates = predicates
self.index_fields = defaultdict(index_list)
self.index_predicates = []
for full_predicate in predicates:
for predicate in full_predicate:
if hasattr(predicate, 'index'):
self.index_fields[predicate.field][predicate.type].append(
predicate)
self.index_predicates.append(predicate)
def __call__(self, records, target=False):
start_time = time.clock()
predicates = [(':' + str(i), predicate)
for i, predicate
in enumerate(self.predicates)]
for i, record in enumerate(records):
record_id, instance = record
for pred_id, predicate in predicates:
block_keys = predicate(instance, target=target)
for block_key in block_keys:
yield block_key + pred_id, record_id
if i and i % 10000 == 0:
logger.info('%(iteration)d, %(elapsed)f2 seconds',
{'iteration': i,
'elapsed': time.clock() - start_time})
def resetIndices(self):
for predicate in self.index_predicates:
predicate.reset()
def index(self, data, field):
indices = extractIndices(self.index_fields[field])
for doc in data:
if doc:
for _, index, preprocess in indices:
index.index(preprocess(doc))
for index_type, index, _ in indices:
index.initSearch()
for predicate in self.index_fields[field][index_type]:
logger.debug("Canopy: %s", str(predicate))
predicate.index = index
def unindex(self, data, field):
indices = extractIndices(self.index_fields[field])
for doc in data:
if doc:
for _, index, preprocess in indices:
index.unindex(preprocess(doc))
for index_type, index, _ in indices:
index._index.initSearch()
for predicate in self.index_fields[field][index_type]:
logger.debug("Canopy: %s", str(predicate))
predicate.index = index
def indexAll(self, data_d):
for field in self.index_fields:
unique_fields = {record[field]
for record
in viewvalues(data_d)
if record[field]}
self.index(unique_fields, field)
def extractIndices(index_fields):
indices = []
for index_type, predicates in index_fields.items():
predicate = predicates[0]
index = predicate.index
preprocess = predicate.preprocess
if predicate.index is None:
index = predicate.initIndex()
indices.append((index_type, index, preprocess))
return indices
| true | true |
f71137f8453b7453a7288e056250a0b4f1b5adfe | 688 | py | Python | openapi_documentor/users/models.py | codeasashu/openapi-documentor | dde825edaac85bb117d06adf0a4eabf1f5da44f5 | [
"MIT"
] | null | null | null | openapi_documentor/users/models.py | codeasashu/openapi-documentor | dde825edaac85bb117d06adf0a4eabf1f5da44f5 | [
"MIT"
] | 5 | 2021-04-06T07:46:47.000Z | 2022-03-02T13:12:20.000Z | openapi_documentor/users/models.py | codeasashu/openapi-documentor | dde825edaac85bb117d06adf0a4eabf1f5da44f5 | [
"MIT"
] | null | null | null | from django.contrib.auth.models import AbstractUser
from django.db.models import CharField
from django.urls import reverse
from django.utils.translation import gettext_lazy as _
class User(AbstractUser):
"""Default user for Openapi Documentor."""
#: First and last name do not cover name patterns around the globe
name = CharField(_("Name of User"), blank=True, max_length=255)
first_name = None # type: ignore
last_name = None # type: ignore
def get_absolute_url(self):
"""Get url for user's detail view.
Returns:
str: URL for user detail.
"""
return reverse("users:detail", kwargs={"username": self.username})
| 29.913043 | 74 | 0.686047 | from django.contrib.auth.models import AbstractUser
from django.db.models import CharField
from django.urls import reverse
from django.utils.translation import gettext_lazy as _
class User(AbstractUser):
name = CharField(_("Name of User"), blank=True, max_length=255)
first_name = None
last_name = None
def get_absolute_url(self):
return reverse("users:detail", kwargs={"username": self.username})
| true | true |
f7113827be9d6a1cee1e09d156ea82251b27fde6 | 6,164 | py | Python | fgh_gnn/data/graph_builder.py | alstonlo/fgh-gnn | 099aee925a3c5077070803d31b6e45793972239c | [
"MIT"
] | null | null | null | fgh_gnn/data/graph_builder.py | alstonlo/fgh-gnn | 099aee925a3c5077070803d31b6e45793972239c | [
"MIT"
] | null | null | null | fgh_gnn/data/graph_builder.py | alstonlo/fgh-gnn | 099aee925a3c5077070803d31b6e45793972239c | [
"MIT"
] | null | null | null | import itertools
import dgl
import torch
from rdkit import Chem
from scipy.sparse import csr_matrix
from scipy.sparse.csgraph import minimum_spanning_tree
from fgh_gnn.utils import FGROUP_MOLS, get_ring_fragments, ogb_graph_to_mol
class FGroupHetGraphBuilder:
def __init__(self, vocab):
self.vocab = vocab
self.fgroup_vocab = vocab.loc[vocab['type'] == 'fgroup']
self.ring_vocab = vocab.loc[vocab['type'] == 'ring']
self.ring_smiles_set = set(self.ring_vocab['name'].unique())
self.misc_ring_idx = len(vocab) - 1
def build_fgroup_heterograph(self, raw_graph):
atom_feats = torch.from_numpy(raw_graph['node_feat'])
bond_feats = torch.from_numpy(raw_graph['edge_feat'])
a2a_edges = torch.from_numpy(raw_graph['edge_index'])
# build tree
mol = ogb_graph_to_mol(raw_graph)
clusters = self._make_clusters(mol)
cluster_feats = torch.tensor([c.features for c in clusters],
dtype=torch.long)
c2atom_edges, atom2c_edges = self._make_inter_edges(clusters)
c2c_edges, overlap_feats = \
self._make_intracluster_edges(raw_graph, clusters)
data_dict = {
('atom', 'bond', 'atom'): (a2a_edges[0], a2a_edges[1]),
('cluster', 'refine', 'atom'): (c2atom_edges[0], c2atom_edges[1]),
('atom', 'pool', 'cluster'): (atom2c_edges[0], atom2c_edges[1]),
('cluster', 'overlap', 'cluster'): (c2c_edges[0], c2c_edges[1])
}
num_nodes_dict = {
'atom': raw_graph['num_nodes'],
'cluster': len(clusters)
}
g = dgl.heterograph(data_dict=data_dict, num_nodes_dict=num_nodes_dict)
g.nodes['atom'].data['x'] = atom_feats
g.nodes['cluster'].data['x'] = cluster_feats
g.edges['bond'].data['x'] = bond_feats
g.edges['overlap'].data['x'] = overlap_feats
return g
def _make_clusters(self, mol):
clusters = []
# add all functional groups
for row in self.fgroup_vocab.itertuples():
row_idx = row.Index
fgroup_query = FGROUP_MOLS[row.name]
matches = mol.GetSubstructMatches(fgroup_query)
for match_idxs in matches:
clusters.append(Cluster(row_idx, 'fgroup', match_idxs))
# add all rings
for ring_idxs in get_ring_fragments(mol):
ring_smiles = Chem.MolFragmentToSmiles(mol, list(ring_idxs),
isomericSmiles=False,
kekuleSmiles=True)
if ring_smiles in self.ring_smiles_set:
row_idx = self.ring_vocab.index[self.ring_vocab['name']
== ring_smiles]
row_idx = int(row_idx[0])
else:
row_idx = self.misc_ring_idx
clusters.append(Cluster(row_idx, 'ring', ring_idxs))
# add all remaining singular atoms
leftover_atoms = set(range(mol.GetNumAtoms()))
for cluster in clusters:
leftover_atoms.difference_update(cluster.atom_idxs)
for atom_idx in leftover_atoms:
atomic_num = mol.GetAtomWithIdx(atom_idx).GetAtomicNum()
clusters.append(Cluster(atomic_num, 'atom', (atom_idx,)))
return clusters
def _make_inter_edges(self, clusters):
c2atom_edges = [[], []]
atom2c_edges = [[], []]
for cluster_idx, cluster in enumerate(clusters):
for atom_idx in cluster.atom_idxs:
c2atom_edges[0].append(cluster_idx)
c2atom_edges[1].append(atom_idx)
atom2c_edges[0].append(atom_idx)
atom2c_edges[1].append(cluster_idx)
c2atom_edges = torch.tensor(c2atom_edges, dtype=torch.long)
atom2c_edges = torch.tensor(atom2c_edges, dtype=torch.long)
return c2atom_edges, atom2c_edges
def _make_intracluster_edges(self, raw_graph, clusters):
edge_index = raw_graph['edge_index']
edge_dict = {i: set() for i in range(raw_graph['num_nodes'])}
for i, j in zip(edge_index[0], edge_index[1]):
edge_dict[i].add(j)
num_clusters = len(clusters)
adj_matrix = [[0] * num_clusters for _ in range(num_clusters)]
cluster_neighbours = []
for cluster in clusters:
neighbours = set()
for atom_idx in cluster.atom_idxs:
neighbours.add(atom_idx)
neighbours.update(edge_dict[atom_idx])
cluster_neighbours.append(neighbours)
for i, j in itertools.combinations(range(num_clusters), r=2):
ci, cj = clusters[i], clusters[j]
if ci.atom_idxs & cj.atom_idxs:
edge_weight = len(ci.atom_idxs & cj.atom_idxs) + 1
elif cluster_neighbours[i] & cluster_neighbours[j]:
edge_weight = 1
else:
continue
adj_matrix[i][j] = edge_weight
adj_matrix[j][i] = edge_weight
# build spanning tree
adj_matrix = csr_matrix(adj_matrix)
span_tree = minimum_spanning_tree(adj_matrix, overwrite=True)
adj_matrix = torch.from_numpy(span_tree.toarray()).long()
adj_matrix = to_bidirectional(adj_matrix)
# represent as sparse matrix
adj_matrix = adj_matrix.to_sparse().coalesce()
edge_index = adj_matrix.indices()
edge_feats = adj_matrix.values()
return edge_index, edge_feats
class Cluster:
def __init__(self, vocab_id, cluster_type, atom_idxs):
# for sanity
if not isinstance(vocab_id, int):
raise ValueError()
self.vocab_id = vocab_id
self.cluster_type_idx = ('fgroup', 'ring', 'atom').index(cluster_type)
self.atom_idxs = frozenset(atom_idxs)
self.features = [self.vocab_id, self.cluster_type_idx]
# Helper Method
def to_bidirectional(X):
X_T = X.t()
sym_sum = X + X_T
X_min = torch.min(X, X_T)
return torch.where(X_min > 0, X_min, sym_sum)
| 32.613757 | 79 | 0.602531 | import itertools
import dgl
import torch
from rdkit import Chem
from scipy.sparse import csr_matrix
from scipy.sparse.csgraph import minimum_spanning_tree
from fgh_gnn.utils import FGROUP_MOLS, get_ring_fragments, ogb_graph_to_mol
class FGroupHetGraphBuilder:
def __init__(self, vocab):
self.vocab = vocab
self.fgroup_vocab = vocab.loc[vocab['type'] == 'fgroup']
self.ring_vocab = vocab.loc[vocab['type'] == 'ring']
self.ring_smiles_set = set(self.ring_vocab['name'].unique())
self.misc_ring_idx = len(vocab) - 1
def build_fgroup_heterograph(self, raw_graph):
atom_feats = torch.from_numpy(raw_graph['node_feat'])
bond_feats = torch.from_numpy(raw_graph['edge_feat'])
a2a_edges = torch.from_numpy(raw_graph['edge_index'])
mol = ogb_graph_to_mol(raw_graph)
clusters = self._make_clusters(mol)
cluster_feats = torch.tensor([c.features for c in clusters],
dtype=torch.long)
c2atom_edges, atom2c_edges = self._make_inter_edges(clusters)
c2c_edges, overlap_feats = \
self._make_intracluster_edges(raw_graph, clusters)
data_dict = {
('atom', 'bond', 'atom'): (a2a_edges[0], a2a_edges[1]),
('cluster', 'refine', 'atom'): (c2atom_edges[0], c2atom_edges[1]),
('atom', 'pool', 'cluster'): (atom2c_edges[0], atom2c_edges[1]),
('cluster', 'overlap', 'cluster'): (c2c_edges[0], c2c_edges[1])
}
num_nodes_dict = {
'atom': raw_graph['num_nodes'],
'cluster': len(clusters)
}
g = dgl.heterograph(data_dict=data_dict, num_nodes_dict=num_nodes_dict)
g.nodes['atom'].data['x'] = atom_feats
g.nodes['cluster'].data['x'] = cluster_feats
g.edges['bond'].data['x'] = bond_feats
g.edges['overlap'].data['x'] = overlap_feats
return g
def _make_clusters(self, mol):
clusters = []
for row in self.fgroup_vocab.itertuples():
row_idx = row.Index
fgroup_query = FGROUP_MOLS[row.name]
matches = mol.GetSubstructMatches(fgroup_query)
for match_idxs in matches:
clusters.append(Cluster(row_idx, 'fgroup', match_idxs))
for ring_idxs in get_ring_fragments(mol):
ring_smiles = Chem.MolFragmentToSmiles(mol, list(ring_idxs),
isomericSmiles=False,
kekuleSmiles=True)
if ring_smiles in self.ring_smiles_set:
row_idx = self.ring_vocab.index[self.ring_vocab['name']
== ring_smiles]
row_idx = int(row_idx[0])
else:
row_idx = self.misc_ring_idx
clusters.append(Cluster(row_idx, 'ring', ring_idxs))
leftover_atoms = set(range(mol.GetNumAtoms()))
for cluster in clusters:
leftover_atoms.difference_update(cluster.atom_idxs)
for atom_idx in leftover_atoms:
atomic_num = mol.GetAtomWithIdx(atom_idx).GetAtomicNum()
clusters.append(Cluster(atomic_num, 'atom', (atom_idx,)))
return clusters
def _make_inter_edges(self, clusters):
c2atom_edges = [[], []]
atom2c_edges = [[], []]
for cluster_idx, cluster in enumerate(clusters):
for atom_idx in cluster.atom_idxs:
c2atom_edges[0].append(cluster_idx)
c2atom_edges[1].append(atom_idx)
atom2c_edges[0].append(atom_idx)
atom2c_edges[1].append(cluster_idx)
c2atom_edges = torch.tensor(c2atom_edges, dtype=torch.long)
atom2c_edges = torch.tensor(atom2c_edges, dtype=torch.long)
return c2atom_edges, atom2c_edges
def _make_intracluster_edges(self, raw_graph, clusters):
edge_index = raw_graph['edge_index']
edge_dict = {i: set() for i in range(raw_graph['num_nodes'])}
for i, j in zip(edge_index[0], edge_index[1]):
edge_dict[i].add(j)
num_clusters = len(clusters)
adj_matrix = [[0] * num_clusters for _ in range(num_clusters)]
cluster_neighbours = []
for cluster in clusters:
neighbours = set()
for atom_idx in cluster.atom_idxs:
neighbours.add(atom_idx)
neighbours.update(edge_dict[atom_idx])
cluster_neighbours.append(neighbours)
for i, j in itertools.combinations(range(num_clusters), r=2):
ci, cj = clusters[i], clusters[j]
if ci.atom_idxs & cj.atom_idxs:
edge_weight = len(ci.atom_idxs & cj.atom_idxs) + 1
elif cluster_neighbours[i] & cluster_neighbours[j]:
edge_weight = 1
else:
continue
adj_matrix[i][j] = edge_weight
adj_matrix[j][i] = edge_weight
adj_matrix = csr_matrix(adj_matrix)
span_tree = minimum_spanning_tree(adj_matrix, overwrite=True)
adj_matrix = torch.from_numpy(span_tree.toarray()).long()
adj_matrix = to_bidirectional(adj_matrix)
adj_matrix = adj_matrix.to_sparse().coalesce()
edge_index = adj_matrix.indices()
edge_feats = adj_matrix.values()
return edge_index, edge_feats
class Cluster:
def __init__(self, vocab_id, cluster_type, atom_idxs):
if not isinstance(vocab_id, int):
raise ValueError()
self.vocab_id = vocab_id
self.cluster_type_idx = ('fgroup', 'ring', 'atom').index(cluster_type)
self.atom_idxs = frozenset(atom_idxs)
self.features = [self.vocab_id, self.cluster_type_idx]
def to_bidirectional(X):
X_T = X.t()
sym_sum = X + X_T
X_min = torch.min(X, X_T)
return torch.where(X_min > 0, X_min, sym_sum)
| true | true |
f71138b533e46adf6dde35d54e02d04a62c01bd9 | 1,408 | py | Python | translate/cloud-client/translate_v3_get_supported_languages_with_target.py | summersab/python-docs-samples | 7c1e9685fe190f7789d8e1dbcfe8c01a20e3dc66 | [
"Apache-2.0"
] | 2 | 2020-09-19T04:22:52.000Z | 2020-09-23T14:04:17.000Z | translate/cloud-client/translate_v3_get_supported_languages_with_target.py | summersab/python-docs-samples | 7c1e9685fe190f7789d8e1dbcfe8c01a20e3dc66 | [
"Apache-2.0"
] | 1 | 2020-07-23T10:47:32.000Z | 2020-07-23T10:47:32.000Z | translate/cloud-client/translate_v3_get_supported_languages_with_target.py | summersab/python-docs-samples | 7c1e9685fe190f7789d8e1dbcfe8c01a20e3dc66 | [
"Apache-2.0"
] | 2 | 2020-09-13T03:47:22.000Z | 2020-09-23T14:04:19.000Z | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# [START translate_v3_get_supported_languages_for_target]
from google.cloud import translate
def get_supported_languages_with_target(project_id="YOUR_PROJECT_ID"):
"""Listing supported languages with target language name."""
client = translate.TranslationServiceClient()
parent = client.location_path(project_id, "global")
# Supported language codes: https://cloud.google.com/translate/docs/languages
response = client.get_supported_languages(
display_language_code="is", # target language code
parent=parent
)
# List language codes of supported languages
for language in response.languages:
print(u"Language Code: {}".format(language.language_code))
print(u"Display Name: {}".format(language.display_name))
# [END translate_v3_get_supported_languages_for_target]
| 39.111111 | 81 | 0.758523 |
from google.cloud import translate
def get_supported_languages_with_target(project_id="YOUR_PROJECT_ID"):
client = translate.TranslationServiceClient()
parent = client.location_path(project_id, "global")
response = client.get_supported_languages(
display_language_code="is",
parent=parent
)
for language in response.languages:
print(u"Language Code: {}".format(language.language_code))
print(u"Display Name: {}".format(language.display_name))
| true | true |
f711396a297eb7913d70fb420d60db3044534bfe | 209 | py | Python | src/clikit/__init__.py | abn/clikit | c9f96ee7a39a0d59d6cf7b5888589a030f36f051 | [
"MIT"
] | null | null | null | src/clikit/__init__.py | abn/clikit | c9f96ee7a39a0d59d6cf7b5888589a030f36f051 | [
"MIT"
] | null | null | null | src/clikit/__init__.py | abn/clikit | c9f96ee7a39a0d59d6cf7b5888589a030f36f051 | [
"MIT"
] | null | null | null | from .api.config.application_config import ApplicationConfig
from .console_application import ConsoleApplication
from .config.default_application_config import DefaultApplicationConfig
__version__ = "0.2.4"
| 29.857143 | 71 | 0.866029 | from .api.config.application_config import ApplicationConfig
from .console_application import ConsoleApplication
from .config.default_application_config import DefaultApplicationConfig
__version__ = "0.2.4"
| true | true |
f7113a0b6eae6c1acd7f596dd110305a0730d168 | 10,904 | py | Python | mdparser.py | galeo/pagedown-editor-only | e053bb61a48e257011a76f82bd0c546d6f044042 | [
"MIT"
] | 2 | 2015-03-02T10:52:52.000Z | 2016-03-13T11:44:01.000Z | mdparser.py | galeo/pagedown-editor-only | e053bb61a48e257011a76f82bd0c546d6f044042 | [
"MIT"
] | null | null | null | mdparser.py | galeo/pagedown-editor-only | e053bb61a48e257011a76f82bd0c546d6f044042 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Markdown parsers.
#
#
# Author: Moogen Tian <http://blog.galeo.me>
#
# Legal:
#
# This file is published under BSD License.
#
# And the code structure references:
#
# * pagewise (by ainm <ainm at gmx.com>, with personal public license)
#
# * mynt (by Andrew Fricke, the author of Hoep, with BSD license)
#
# please NOTICE that!
#
# Hoep only accepts and returns *unicode* objects in Python 2 and
# *str* objects in Python 3.
from __future__ import unicode_literals
import re
import sys
#
# Error handling.
#
class MDParserException(Exception):
pass
def error(message, *args):
"""
Raise a MDParserException with a given message.
"""
raise MDParserException(message % args)
def warning(message, *args):
"""
Just display a message to standard error.
"""
sys.stderr.write("WARNING: " + message % args)
def halt(message, *args):
"""
Display a message to standard error and stop the program.
"""
sys.stderr.write("FATAL: " + message % args)
sys.exit(1)
#
# Markup support.
#
# Tables with bootstrap
def tablestrap(content, class_=''):
if class_:
class_ = class_.split()
if isinstance(class_, list):
if 'table' not in class_:
class_ = ['table'] + class_
class_ = ' '.join(class_)
if class_:
class_ = 'class="%s"' % class_
return ''.join(['<table ', class_, '>\n',
content, '\n</table>'])
# Pygments.
HAVE_PYGMENTS = True
try:
from pygments import highlight
from pygments.formatters import HtmlFormatter
from pygments.lexers import get_lexer_by_name
except ImportError:
HAVE_PYGMENTS = False
def require_pygments():
"""
For error reporting when trying to use a markup language
with pygments, but pygments isn't installed.
"""
if not HAVE_PYGMENTS:
error("please, install Pygments <http://pygments.org/>.")
def hl_with_pygments(text, lang, fmt_options={}):
s = ''
formatter = HtmlFormatter(**fmt_options)
try:
lexer = get_lexer_by_name(lang, stripall=True)
except ValueError:
s = '<div class="highlight"><span class="err">'\
'Error: language "%s" is not supported</span></div>' % lang
lexer = get_lexer_by_name('text', stripall=True)
return ''.join([s, highlight(text, lexer, formatter)])
# Available renderers will add themselves to this hash.
# The key is the renderer name, the value is another hash
# with two keys/values, the renderer constructor/options.
MARKUP_RENDERERS = {}
def xlate_exts_flags(exts_flags_opts, parser_exts_flags):
actual_exts = 0
actual_flags = 0
exts = exts_flags_opts['extensions']
flags = exts_flags_opts['render_flags']
parser_exts = parser_exts_flags['extensions']
parser_flags = parser_exts_flags['render_flags']
if ('fenced_code' in exts) or ('tables' in exts):
require_pygments()
for ext in exts:
if ext in parser_exts:
actual_exts |= parser_exts[ext]
else:
warning("ignoring unknown extension: %s", str(ext))
for flag in flags:
if flag in parser_flags:
actual_flags |= parser_flags[flag]
else:
warning("ignoring unknown render flag: %s", str(flag))
return actual_exts, actual_flags
#
# Misaka.
#
HAVE_MISAKA = True
try:
import misaka
from misaka import HtmlRenderer
MISAKA_EXTS_FLAGS = {
'extensions': {
'tables': misaka.EXT_TABLES,
'fenced_code': misaka.EXT_FENCED_CODE,
'footnotes': misaka.EXT_FOOTNOTES,
'autolink': misaka.EXT_AUTOLINK,
'strikethrough': misaka.EXT_STRIKETHROUGH,
'underline': misaka.EXT_UNDERLINE,
'highlight': misaka.EXT_HIGHLIGHT,
'quote': misaka.EXT_QUOTE,
'superscript': misaka.EXT_SUPERSCRIPT,
'math': misaka.EXT_MATH,
'no_intra_emphasis': misaka.EXT_NO_INTRA_EMPHASIS,
'space_headers': misaka.EXT_SPACE_HEADERS,
'math_explicit': misaka.EXT_MATH_EXPLICIT,
'disable_indented_code': misaka.EXT_DISABLE_INDENTED_CODE
},
'render_flags': {
'skip_html': misaka.HTML_SKIP_HTML,
'escape': misaka.HTML_ESCAPE,
'hard_wrap': misaka.HTML_HARD_WRAP,
'use_xhtml': misaka.HTML_USE_XHTML,
}
}
class MisakaRenderer(HtmlRenderer):
def __init__(self, tbl_class='', fmt_options={}, *args, **kwargs):
super(MisakaRenderer, self).__init__(*args, **kwargs)
self.tbl_class = tbl_class
self.fmt_options = fmt_options
if HAVE_PYGMENTS:
def blockcode(self, text, lang):
return hl_with_pygments(text, lang, self.fmt_options)
def table(self, content):
return tablestrap(content, self.tbl_class)
def misaka_renderer(options, tbl_class='', fmt_options={}):
"""
Returns a function that can be used to transform Markdown to HTML
using Misaka, preconfigured with the given extensions/flags.
"""
Renderer = MisakaRenderer
used_exts, used_flags = xlate_exts_flags(options, MISAKA_EXTS_FLAGS)
return misaka.Markdown(Renderer(tbl_class, fmt_options, used_flags), used_exts)
MARKUP_RENDERERS['misaka'] = {
'renderer': misaka_renderer,
'options': ['extensions', 'render_flags'],
}
except ImportError:
HAVE_MISAKA = False
#
# hoep
#
HAVE_HOEP = True
try:
import hoep as h
HOEP_EXTS_FLAGS = {
'extensions': {
'autolink': h.EXT_AUTOLINK,
'disable_indented_code': h.EXT_DISABLE_INDENTED_CODE,
'fenced_code': h.EXT_FENCED_CODE,
'footnotes': h.EXT_FOOTNOTES,
'highlight': h.EXT_HIGHLIGHT,
'lax_spacing': h.EXT_LAX_SPACING,
'no_intra_emphasis': h.EXT_NO_INTRA_EMPHASIS,
'quote': h.EXT_QUOTE,
'space_headers': h.EXT_SPACE_HEADERS,
'strikethrough': h.EXT_STRIKETHROUGH,
'superscript': h.EXT_SUPERSCRIPT,
'tables': h.EXT_TABLES,
'underline': h.EXT_UNDERLINE
},
'render_flags': {
'escape': h.HTML_ESCAPE,
'expand_tabs': h.HTML_EXPAND_TABS,
'hard_wrap': h.HTML_HARD_WRAP,
'safelink': h.HTML_SAFELINK,
'skip_html': h.HTML_SKIP_HTML,
'skip_images': h.HTML_SKIP_IMAGES,
'skip_links': h.HTML_SKIP_LINKS,
'skip_style': h.HTML_SKIP_STYLE,
'smartypants': h.HTML_SMARTYPANTS,
'toc': h.HTML_TOC,
'use_xhtml': h.HTML_USE_XHTML
}
}
class HoepRenderer(h.Hoep):
def __init__(self, extensions=0, render_flags=0, tbl_class='',
fmt_options={}):
super(HoepRenderer, self).__init__(extensions, render_flags)
self._toc_ids = {}
self._toc_patterns = (
(r'<[^<]+?>', ''),
(r'[^a-z0-9_.\s-]', ''),
(r'\s+', '-'),
(r'^[^a-z]+', ''),
(r'^$', 'section')
)
self.tbl_class = tbl_class
self.fmt_options = fmt_options
if HAVE_PYGMENTS:
def block_code(self, text, lang):
"""Highlight code with pygments.
"""
return hl_with_pygments(text, lang, self.fmt_options)
def table(self, header, body):
content = header + body
return tablestrap(content, self.tbl_class)
def header(self, text, level):
if self.render_flags & h.HTML_TOC:
identifier = text.lower()
for pattern, replace in self._toc_patterns:
identifier = re.sub(pattern, replace, identifier)
if identifier in self._toc_ids:
self._toc_ids[identifier] += 1
identifier = '{0}-{1}'.format(identifier, self._toc_ids[identifier])
else:
self._toc_ids[identifier] = 1
return ('<h{0} id="{1}">{2}'
'<a class="headerlink" href="#{1}" title="Link to header title.">¶</a>'
'</h{0}>').format(level, identifier, text)
else:
return '<h{0}>{1}</h{0}>'.format(level, text)
def preprocess(self, markdown):
self._toc_ids.clear()
return markdown
def hoep_renderer(options, **kwargs):
"""
Returns a function that can be used to transform Markdown to HTML
using Hoep, preconfigured with the given extensions/flags.
"""
used_exts, used_flags = xlate_exts_flags(options, HOEP_EXTS_FLAGS)
return HoepRenderer(used_exts, used_flags, **kwargs).render
MARKUP_RENDERERS['hoep'] = {
'renderer': hoep_renderer,
'options': ['extensions', 'render_flags']
}
except ImportError:
HAVE_HOEP = False
class MarkupProvider(object):
def __init__(self, markup, options):
"""
Arguments:
- `markup`: str, 'misaka' | 'hoep'.
- `options`: dict, has the keys: 'extensions' and 'render_flags'.
"""
if markup not in MARKUP_RENDERERS:
error("Unavailable markup renderer: %s", markup)
self.markup = markup
if ('extensions' not in options) and ('render_flags' not in options):
error("Key error in options, must contain 'extensions' and 'render_flags'.")
self.options = options
def _get_option(self, option, markup_options={}):
"""
Lookup 'option' in 'markup_options' (a dict)
but fall back to default option if unbound.
"""
if markup_options and (option in markup_options):
return markup_options[option]
else:
return self.options[option]
def get_renderer(self, markup_options={}, **kwargs):
"""
Will return a function to render the item content
based on the options specified in it. All unspecified
options will be taken from the base configuration.
"""
options = {}
for option in MARKUP_RENDERERS[self.markup]['options']:
options[option] = self._get_option(option, markup_options)
return MARKUP_RENDERERS[self.markup]['renderer'](options, **kwargs)
| 30.543417 | 95 | 0.573184 |
from __future__ import unicode_literals
import re
import sys
class MDParserException(Exception):
pass
def error(message, *args):
raise MDParserException(message % args)
def warning(message, *args):
sys.stderr.write("WARNING: " + message % args)
def halt(message, *args):
sys.stderr.write("FATAL: " + message % args)
sys.exit(1)
def tablestrap(content, class_=''):
if class_:
class_ = class_.split()
if isinstance(class_, list):
if 'table' not in class_:
class_ = ['table'] + class_
class_ = ' '.join(class_)
if class_:
class_ = 'class="%s"' % class_
return ''.join(['<table ', class_, '>\n',
content, '\n</table>'])
HAVE_PYGMENTS = True
try:
from pygments import highlight
from pygments.formatters import HtmlFormatter
from pygments.lexers import get_lexer_by_name
except ImportError:
HAVE_PYGMENTS = False
def require_pygments():
if not HAVE_PYGMENTS:
error("please, install Pygments <http://pygments.org/>.")
def hl_with_pygments(text, lang, fmt_options={}):
s = ''
formatter = HtmlFormatter(**fmt_options)
try:
lexer = get_lexer_by_name(lang, stripall=True)
except ValueError:
s = '<div class="highlight"><span class="err">'\
'Error: language "%s" is not supported</span></div>' % lang
lexer = get_lexer_by_name('text', stripall=True)
return ''.join([s, highlight(text, lexer, formatter)])
MARKUP_RENDERERS = {}
def xlate_exts_flags(exts_flags_opts, parser_exts_flags):
actual_exts = 0
actual_flags = 0
exts = exts_flags_opts['extensions']
flags = exts_flags_opts['render_flags']
parser_exts = parser_exts_flags['extensions']
parser_flags = parser_exts_flags['render_flags']
if ('fenced_code' in exts) or ('tables' in exts):
require_pygments()
for ext in exts:
if ext in parser_exts:
actual_exts |= parser_exts[ext]
else:
warning("ignoring unknown extension: %s", str(ext))
for flag in flags:
if flag in parser_flags:
actual_flags |= parser_flags[flag]
else:
warning("ignoring unknown render flag: %s", str(flag))
return actual_exts, actual_flags
HAVE_MISAKA = True
try:
import misaka
from misaka import HtmlRenderer
MISAKA_EXTS_FLAGS = {
'extensions': {
'tables': misaka.EXT_TABLES,
'fenced_code': misaka.EXT_FENCED_CODE,
'footnotes': misaka.EXT_FOOTNOTES,
'autolink': misaka.EXT_AUTOLINK,
'strikethrough': misaka.EXT_STRIKETHROUGH,
'underline': misaka.EXT_UNDERLINE,
'highlight': misaka.EXT_HIGHLIGHT,
'quote': misaka.EXT_QUOTE,
'superscript': misaka.EXT_SUPERSCRIPT,
'math': misaka.EXT_MATH,
'no_intra_emphasis': misaka.EXT_NO_INTRA_EMPHASIS,
'space_headers': misaka.EXT_SPACE_HEADERS,
'math_explicit': misaka.EXT_MATH_EXPLICIT,
'disable_indented_code': misaka.EXT_DISABLE_INDENTED_CODE
},
'render_flags': {
'skip_html': misaka.HTML_SKIP_HTML,
'escape': misaka.HTML_ESCAPE,
'hard_wrap': misaka.HTML_HARD_WRAP,
'use_xhtml': misaka.HTML_USE_XHTML,
}
}
class MisakaRenderer(HtmlRenderer):
def __init__(self, tbl_class='', fmt_options={}, *args, **kwargs):
super(MisakaRenderer, self).__init__(*args, **kwargs)
self.tbl_class = tbl_class
self.fmt_options = fmt_options
if HAVE_PYGMENTS:
def blockcode(self, text, lang):
return hl_with_pygments(text, lang, self.fmt_options)
def table(self, content):
return tablestrap(content, self.tbl_class)
def misaka_renderer(options, tbl_class='', fmt_options={}):
Renderer = MisakaRenderer
used_exts, used_flags = xlate_exts_flags(options, MISAKA_EXTS_FLAGS)
return misaka.Markdown(Renderer(tbl_class, fmt_options, used_flags), used_exts)
MARKUP_RENDERERS['misaka'] = {
'renderer': misaka_renderer,
'options': ['extensions', 'render_flags'],
}
except ImportError:
HAVE_MISAKA = False
HAVE_HOEP = True
try:
import hoep as h
HOEP_EXTS_FLAGS = {
'extensions': {
'autolink': h.EXT_AUTOLINK,
'disable_indented_code': h.EXT_DISABLE_INDENTED_CODE,
'fenced_code': h.EXT_FENCED_CODE,
'footnotes': h.EXT_FOOTNOTES,
'highlight': h.EXT_HIGHLIGHT,
'lax_spacing': h.EXT_LAX_SPACING,
'no_intra_emphasis': h.EXT_NO_INTRA_EMPHASIS,
'quote': h.EXT_QUOTE,
'space_headers': h.EXT_SPACE_HEADERS,
'strikethrough': h.EXT_STRIKETHROUGH,
'superscript': h.EXT_SUPERSCRIPT,
'tables': h.EXT_TABLES,
'underline': h.EXT_UNDERLINE
},
'render_flags': {
'escape': h.HTML_ESCAPE,
'expand_tabs': h.HTML_EXPAND_TABS,
'hard_wrap': h.HTML_HARD_WRAP,
'safelink': h.HTML_SAFELINK,
'skip_html': h.HTML_SKIP_HTML,
'skip_images': h.HTML_SKIP_IMAGES,
'skip_links': h.HTML_SKIP_LINKS,
'skip_style': h.HTML_SKIP_STYLE,
'smartypants': h.HTML_SMARTYPANTS,
'toc': h.HTML_TOC,
'use_xhtml': h.HTML_USE_XHTML
}
}
class HoepRenderer(h.Hoep):
def __init__(self, extensions=0, render_flags=0, tbl_class='',
fmt_options={}):
super(HoepRenderer, self).__init__(extensions, render_flags)
self._toc_ids = {}
self._toc_patterns = (
(r'<[^<]+?>', ''),
(r'[^a-z0-9_.\s-]', ''),
(r'\s+', '-'),
(r'^[^a-z]+', ''),
(r'^$', 'section')
)
self.tbl_class = tbl_class
self.fmt_options = fmt_options
if HAVE_PYGMENTS:
def block_code(self, text, lang):
return hl_with_pygments(text, lang, self.fmt_options)
def table(self, header, body):
content = header + body
return tablestrap(content, self.tbl_class)
def header(self, text, level):
if self.render_flags & h.HTML_TOC:
identifier = text.lower()
for pattern, replace in self._toc_patterns:
identifier = re.sub(pattern, replace, identifier)
if identifier in self._toc_ids:
self._toc_ids[identifier] += 1
identifier = '{0}-{1}'.format(identifier, self._toc_ids[identifier])
else:
self._toc_ids[identifier] = 1
return ('<h{0} id="{1}">{2}'
'<a class="headerlink" href="#{1}" title="Link to header title.">¶</a>'
'</h{0}>').format(level, identifier, text)
else:
return '<h{0}>{1}</h{0}>'.format(level, text)
def preprocess(self, markdown):
self._toc_ids.clear()
return markdown
def hoep_renderer(options, **kwargs):
used_exts, used_flags = xlate_exts_flags(options, HOEP_EXTS_FLAGS)
return HoepRenderer(used_exts, used_flags, **kwargs).render
MARKUP_RENDERERS['hoep'] = {
'renderer': hoep_renderer,
'options': ['extensions', 'render_flags']
}
except ImportError:
HAVE_HOEP = False
class MarkupProvider(object):
def __init__(self, markup, options):
if markup not in MARKUP_RENDERERS:
error("Unavailable markup renderer: %s", markup)
self.markup = markup
if ('extensions' not in options) and ('render_flags' not in options):
error("Key error in options, must contain 'extensions' and 'render_flags'.")
self.options = options
def _get_option(self, option, markup_options={}):
if markup_options and (option in markup_options):
return markup_options[option]
else:
return self.options[option]
def get_renderer(self, markup_options={}, **kwargs):
options = {}
for option in MARKUP_RENDERERS[self.markup]['options']:
options[option] = self._get_option(option, markup_options)
return MARKUP_RENDERERS[self.markup]['renderer'](options, **kwargs)
| true | true |
f7113a19ca443354c370f38ad63f77db03ae42db | 5,269 | py | Python | moment/test/test_isSameOrBefore.py | KrixTam/pymoment | b938cafc4c772df55feb3daa41286eade6f3e310 | [
"MIT"
] | 1 | 2021-04-24T17:51:08.000Z | 2021-04-24T17:51:08.000Z | moment/test/test_isSameOrBefore.py | KrixTam/pymoment | b938cafc4c772df55feb3daa41286eade6f3e310 | [
"MIT"
] | null | null | null | moment/test/test_isSameOrBefore.py | KrixTam/pymoment | b938cafc4c772df55feb3daa41286eade6f3e310 | [
"MIT"
] | null | null | null | import unittest
from moment import moment
class TestIsSameOrBefore(unittest.TestCase):
def test_default(self):
a = moment('2021-04-22 04:02:09.957000 +0800')
b = moment('2021-2-2 13:02:09.957000 +0800')
self.assertTrue(a.isSameOrBefore([2021, 5, 1]))
self.assertFalse(a.isSameOrBefore(b))
a = moment('2021-04-22 04:02:09.957000 +0800')
b = moment('2021-2-2 13:02:09.957000 +0800')
self.assertTrue(a.isSameOrBefore('2021-04-22 04:02:09.957000 +0800'))
self.assertFalse(a.isSameOrBefore(b))
def test_year(self):
a = moment('2021-04-22 04:02:09.957000 +0800')
b = moment('2021-2-2 13:02:09.957000 +0800')
self.assertFalse(a.isSameOrBefore(b, 'year'))
self.assertTrue(a.isSameOrBefore(b, 'year', True))
a = moment('2021-04-22 04:02:09.957000 +0800')
b = moment('2021-1-1 0:0:0.0 +0800')
self.assertFalse(a.isSameOrBefore(b, 'year'))
self.assertTrue(a.isSameOrBefore(b, 'year', True))
def test_month(self):
a = moment('2021-04-22 04:02:09.957000 +0800')
b = moment('2021-4-2 13:02:09.957000 +0800')
self.assertFalse(a.isSameOrBefore(b, 'month'))
self.assertTrue(a.isSameOrBefore(b, 'month', True))
a = moment('2021-04-22 04:02:09.957000 +0800')
b = moment('2021-4-1 0:0:0.0 +0800')
self.assertFalse(a.isSameOrBefore(b, 'month'))
self.assertTrue(a.isSameOrBefore(b, 'month', True))
def test_quarter(self):
a = moment('2021-04-22 04:02:09.957000 +0800')
b = moment('2021-5-2 13:02:09.957000 +0800')
self.assertFalse(a.isSameOrBefore(b, 'quarter'))
self.assertTrue(a.isSameOrBefore(b, 'quarter', True))
a = moment('2021-04-22 04:02:09.957000 +0800')
b = moment('2021-4-1 0:0:0.0 +0800')
self.assertFalse(a.isSameOrBefore(b, 'quarter'))
self.assertTrue(a.isSameOrBefore(b, 'quarter', True))
def test_week(self):
a = moment('2021-04-22 04:02:09.957000 +0800')
b = moment('2021-4-21 13:02:09.957000 +0800')
self.assertFalse(a.isSameOrBefore(b, 'week'))
self.assertTrue(a.isSameOrBefore(b, 'week', True))
a = moment('2021-04-22 04:02:09.957000 +0800')
b = moment('2021-4-18 0:0:0.0 +0800')
self.assertFalse(a.isSameOrBefore(b, 'week'))
self.assertTrue(a.isSameOrBefore(b, 'week', True))
def test_isoWeek(self):
a = moment('2021-04-22 04:02:09.957000 +0800')
b = moment('2021-4-21 13:02:09.957000 +0800')
self.assertFalse(a.isSameOrBefore(b, 'isoWeek'))
self.assertTrue(a.isSameOrBefore(b, 'isoWeek', True))
a = moment('2021-04-22 04:02:09.957000 +0800')
b = moment('2021-4-19 0:0:0.0 +0800')
self.assertFalse(a.isSameOrBefore(b, 'isoWeek'))
self.assertTrue(a.isSameOrBefore(b, 'isoWeek', True))
def test_day(self):
a = moment('2021-04-22 04:02:09.957000 +0800')
b = moment('2021-4-22 13:02:09.957000 +0800')
self.assertFalse(a.isSameOrBefore(b, 'day'))
self.assertTrue(a.isSameOrBefore(b, 'day', True))
a = moment('2021-04-22 04:02:09.957000 +0800')
b = moment('2021-4-22 0:0:0.0 +0800')
self.assertFalse(a.isSameOrBefore(b, 'day'))
self.assertTrue(a.isSameOrBefore(b, 'day', True))
def test_date(self):
a = moment('2021-04-22 04:02:09.957000 +0800')
b = moment('2021-4-22 13:02:09.957000 +0800')
self.assertFalse(a.isSameOrBefore(b, 'date'))
self.assertTrue(a.isSameOrBefore(b, 'date', True))
a = moment('2021-04-22 04:02:09.957000 +0800')
b = moment('2021-4-22 0:0:0.0 +0800')
self.assertFalse(a.isSameOrBefore(b, 'date'))
self.assertTrue(a.isSameOrBefore(b, 'date', True))
def test_hour(self):
a = moment('2021-04-22 04:02:09.957000 +0800')
b = moment('2021-4-22 4:12:09.957000 +0800')
self.assertFalse(a.isSameOrBefore(b, 'hour'))
self.assertTrue(a.isSameOrBefore(b, 'hour', True))
a = moment('2021-04-22 04:02:09.957000 +0800')
b = moment('2021-4-22 4:0:0.0 +0800')
self.assertFalse(a.isSameOrBefore(b, 'hour'))
self.assertTrue(a.isSameOrBefore(b, 'hour', True))
def test_minute(self):
a = moment('2021-04-22 04:02:09.957000 +0800')
b = moment('2021-4-22 4:2:39.957000 +0800')
self.assertFalse(a.isSameOrBefore(b, 'minute'))
self.assertTrue(a.isSameOrBefore(b, 'minute', True))
a = moment('2021-04-22 04:02:09.957000 +0800')
b = moment('2021-4-22 4:2:0.0 +0800')
self.assertFalse(a.isSameOrBefore(b, 'minute'))
self.assertTrue(a.isSameOrBefore(b, 'minute', True))
def test_second(self):
a = moment('2021-04-22 04:02:09.957000 +0800')
b = moment('2021-4-22 4:2:9.957000 +0800')
self.assertFalse(a.isSameOrBefore(b, 'second'))
self.assertTrue(a.isSameOrBefore(b, 'second', True))
a = moment('2021-04-22 04:02:09.957000 +0800')
b = moment('2021-4-22 4:2:9.0 +0800')
self.assertFalse(a.isSameOrBefore(b, 'second'))
self.assertTrue(a.isSameOrBefore(b, 'second', True))
if __name__ == '__main__':
unittest.main()
| 43.908333 | 77 | 0.605808 | import unittest
from moment import moment
class TestIsSameOrBefore(unittest.TestCase):
def test_default(self):
a = moment('2021-04-22 04:02:09.957000 +0800')
b = moment('2021-2-2 13:02:09.957000 +0800')
self.assertTrue(a.isSameOrBefore([2021, 5, 1]))
self.assertFalse(a.isSameOrBefore(b))
a = moment('2021-04-22 04:02:09.957000 +0800')
b = moment('2021-2-2 13:02:09.957000 +0800')
self.assertTrue(a.isSameOrBefore('2021-04-22 04:02:09.957000 +0800'))
self.assertFalse(a.isSameOrBefore(b))
def test_year(self):
a = moment('2021-04-22 04:02:09.957000 +0800')
b = moment('2021-2-2 13:02:09.957000 +0800')
self.assertFalse(a.isSameOrBefore(b, 'year'))
self.assertTrue(a.isSameOrBefore(b, 'year', True))
a = moment('2021-04-22 04:02:09.957000 +0800')
b = moment('2021-1-1 0:0:0.0 +0800')
self.assertFalse(a.isSameOrBefore(b, 'year'))
self.assertTrue(a.isSameOrBefore(b, 'year', True))
def test_month(self):
a = moment('2021-04-22 04:02:09.957000 +0800')
b = moment('2021-4-2 13:02:09.957000 +0800')
self.assertFalse(a.isSameOrBefore(b, 'month'))
self.assertTrue(a.isSameOrBefore(b, 'month', True))
a = moment('2021-04-22 04:02:09.957000 +0800')
b = moment('2021-4-1 0:0:0.0 +0800')
self.assertFalse(a.isSameOrBefore(b, 'month'))
self.assertTrue(a.isSameOrBefore(b, 'month', True))
def test_quarter(self):
a = moment('2021-04-22 04:02:09.957000 +0800')
b = moment('2021-5-2 13:02:09.957000 +0800')
self.assertFalse(a.isSameOrBefore(b, 'quarter'))
self.assertTrue(a.isSameOrBefore(b, 'quarter', True))
a = moment('2021-04-22 04:02:09.957000 +0800')
b = moment('2021-4-1 0:0:0.0 +0800')
self.assertFalse(a.isSameOrBefore(b, 'quarter'))
self.assertTrue(a.isSameOrBefore(b, 'quarter', True))
def test_week(self):
a = moment('2021-04-22 04:02:09.957000 +0800')
b = moment('2021-4-21 13:02:09.957000 +0800')
self.assertFalse(a.isSameOrBefore(b, 'week'))
self.assertTrue(a.isSameOrBefore(b, 'week', True))
a = moment('2021-04-22 04:02:09.957000 +0800')
b = moment('2021-4-18 0:0:0.0 +0800')
self.assertFalse(a.isSameOrBefore(b, 'week'))
self.assertTrue(a.isSameOrBefore(b, 'week', True))
def test_isoWeek(self):
a = moment('2021-04-22 04:02:09.957000 +0800')
b = moment('2021-4-21 13:02:09.957000 +0800')
self.assertFalse(a.isSameOrBefore(b, 'isoWeek'))
self.assertTrue(a.isSameOrBefore(b, 'isoWeek', True))
a = moment('2021-04-22 04:02:09.957000 +0800')
b = moment('2021-4-19 0:0:0.0 +0800')
self.assertFalse(a.isSameOrBefore(b, 'isoWeek'))
self.assertTrue(a.isSameOrBefore(b, 'isoWeek', True))
def test_day(self):
a = moment('2021-04-22 04:02:09.957000 +0800')
b = moment('2021-4-22 13:02:09.957000 +0800')
self.assertFalse(a.isSameOrBefore(b, 'day'))
self.assertTrue(a.isSameOrBefore(b, 'day', True))
a = moment('2021-04-22 04:02:09.957000 +0800')
b = moment('2021-4-22 0:0:0.0 +0800')
self.assertFalse(a.isSameOrBefore(b, 'day'))
self.assertTrue(a.isSameOrBefore(b, 'day', True))
def test_date(self):
a = moment('2021-04-22 04:02:09.957000 +0800')
b = moment('2021-4-22 13:02:09.957000 +0800')
self.assertFalse(a.isSameOrBefore(b, 'date'))
self.assertTrue(a.isSameOrBefore(b, 'date', True))
a = moment('2021-04-22 04:02:09.957000 +0800')
b = moment('2021-4-22 0:0:0.0 +0800')
self.assertFalse(a.isSameOrBefore(b, 'date'))
self.assertTrue(a.isSameOrBefore(b, 'date', True))
def test_hour(self):
a = moment('2021-04-22 04:02:09.957000 +0800')
b = moment('2021-4-22 4:12:09.957000 +0800')
self.assertFalse(a.isSameOrBefore(b, 'hour'))
self.assertTrue(a.isSameOrBefore(b, 'hour', True))
a = moment('2021-04-22 04:02:09.957000 +0800')
b = moment('2021-4-22 4:0:0.0 +0800')
self.assertFalse(a.isSameOrBefore(b, 'hour'))
self.assertTrue(a.isSameOrBefore(b, 'hour', True))
def test_minute(self):
a = moment('2021-04-22 04:02:09.957000 +0800')
b = moment('2021-4-22 4:2:39.957000 +0800')
self.assertFalse(a.isSameOrBefore(b, 'minute'))
self.assertTrue(a.isSameOrBefore(b, 'minute', True))
a = moment('2021-04-22 04:02:09.957000 +0800')
b = moment('2021-4-22 4:2:0.0 +0800')
self.assertFalse(a.isSameOrBefore(b, 'minute'))
self.assertTrue(a.isSameOrBefore(b, 'minute', True))
def test_second(self):
a = moment('2021-04-22 04:02:09.957000 +0800')
b = moment('2021-4-22 4:2:9.957000 +0800')
self.assertFalse(a.isSameOrBefore(b, 'second'))
self.assertTrue(a.isSameOrBefore(b, 'second', True))
a = moment('2021-04-22 04:02:09.957000 +0800')
b = moment('2021-4-22 4:2:9.0 +0800')
self.assertFalse(a.isSameOrBefore(b, 'second'))
self.assertTrue(a.isSameOrBefore(b, 'second', True))
if __name__ == '__main__':
unittest.main()
| true | true |
f7113a7ec84a5912d102e4fdfaf67e71bdf1c10e | 59 | py | Python | dev/ideal.py | baltiloka/fisica | 96e8bb1d4eec9963afa4732e19fb474b3ead1b31 | [
"MIT"
] | null | null | null | dev/ideal.py | baltiloka/fisica | 96e8bb1d4eec9963afa4732e19fb474b3ead1b31 | [
"MIT"
] | null | null | null | dev/ideal.py | baltiloka/fisica | 96e8bb1d4eec9963afa4732e19fb474b3ead1b31 | [
"MIT"
] | null | null | null | """
Version Sofware: 0.0.0
Version Python: 3.7
"""
| 11.8 | 26 | 0.542373 | true | true | |
f7113ad4dce58e0ca6134660c1d3384f46c82957 | 2,323 | py | Python | g_CNN/Optimizers.py | wangjiangtao-NJPI/MachineLearning | 78124b56a26ec68efb3c517a4a2420860b6e4a75 | [
"MIT"
] | null | null | null | g_CNN/Optimizers.py | wangjiangtao-NJPI/MachineLearning | 78124b56a26ec68efb3c517a4a2420860b6e4a75 | [
"MIT"
] | null | null | null | g_CNN/Optimizers.py | wangjiangtao-NJPI/MachineLearning | 78124b56a26ec68efb3c517a4a2420860b6e4a75 | [
"MIT"
] | null | null | null | import os
import sys
root_path = os.path.abspath("../")
if root_path not in sys.path:
sys.path.append(root_path)
import tensorflow as tf
class Optimizer:
def __init__(self, lr=1e-3):
self._lr = lr
self._opt = None
@property
def name(self):
return str(self)
def minimize(self, x, *args, **kwargs):
return self._opt.minimize(x, *args, **kwargs)
def __str__(self):
return self.__class__.__name__
def __repr__(self):
return str(self)
class MBGD(Optimizer):
def __init__(self, lr=1e-3):
Optimizer.__init__(self, lr)
self._opt = tf.train.GradientDescentOptimizer(self._lr)
class Momentum(Optimizer):
def __init__(self, lr=1e-3, momentum=0.8):
Optimizer.__init__(self, lr)
self._opt = tf.train.MomentumOptimizer(self._lr, momentum)
class NAG(Optimizer):
def __init__(self, lr=1e-3, momentum=0.8):
Optimizer.__init__(self, lr)
self._opt = tf.train.MomentumOptimizer(self._lr, momentum, use_nesterov=True)
class AdaDelta(Optimizer):
def __init__(self, lr=1e-3, rho=0.95, eps=1e-8):
Optimizer.__init__(self, lr)
self._opt = tf.train.AdadeltaOptimizer(self._lr, rho, eps)
class AdaGrad(Optimizer):
def __init__(self, lr=1e-3, init=0.1):
Optimizer.__init__(self, lr)
self._opt = tf.train.AdagradOptimizer(self._lr, init)
class Adam(Optimizer):
def __init__(self, lr=1e-3, beta1=0.9, beta2=0.999, eps=1e-8):
Optimizer.__init__(self, lr)
self._opt = tf.train.AdamOptimizer(self._lr, beta1, beta2, eps)
class RMSProp(Optimizer):
def __init__(self, lr=1e-3, decay=0.9, momentum=0.0, eps=1e-10):
Optimizer.__init__(self, lr)
self._opt = tf.train.RMSPropOptimizer(self._lr, decay, momentum, eps)
# Factory
class OptFactory:
available_optimizers = {
"MBGD": MBGD, "Momentum": Momentum, "NAG": NAG,
"AdaDelta": AdaDelta, "AdaGrad": AdaGrad,
"Adam": Adam, "RMSProp": RMSProp
}
def get_optimizer_by_name(self, name, lr, *args, **kwargs):
try:
optimizer = self.available_optimizers[name](lr, *args, **kwargs)
return optimizer
except KeyError:
raise NotImplementedError("Undefined Optimizer '{}' found".format(name))
| 26.701149 | 85 | 0.643134 | import os
import sys
root_path = os.path.abspath("../")
if root_path not in sys.path:
sys.path.append(root_path)
import tensorflow as tf
class Optimizer:
def __init__(self, lr=1e-3):
self._lr = lr
self._opt = None
@property
def name(self):
return str(self)
def minimize(self, x, *args, **kwargs):
return self._opt.minimize(x, *args, **kwargs)
def __str__(self):
return self.__class__.__name__
def __repr__(self):
return str(self)
class MBGD(Optimizer):
def __init__(self, lr=1e-3):
Optimizer.__init__(self, lr)
self._opt = tf.train.GradientDescentOptimizer(self._lr)
class Momentum(Optimizer):
def __init__(self, lr=1e-3, momentum=0.8):
Optimizer.__init__(self, lr)
self._opt = tf.train.MomentumOptimizer(self._lr, momentum)
class NAG(Optimizer):
def __init__(self, lr=1e-3, momentum=0.8):
Optimizer.__init__(self, lr)
self._opt = tf.train.MomentumOptimizer(self._lr, momentum, use_nesterov=True)
class AdaDelta(Optimizer):
def __init__(self, lr=1e-3, rho=0.95, eps=1e-8):
Optimizer.__init__(self, lr)
self._opt = tf.train.AdadeltaOptimizer(self._lr, rho, eps)
class AdaGrad(Optimizer):
def __init__(self, lr=1e-3, init=0.1):
Optimizer.__init__(self, lr)
self._opt = tf.train.AdagradOptimizer(self._lr, init)
class Adam(Optimizer):
def __init__(self, lr=1e-3, beta1=0.9, beta2=0.999, eps=1e-8):
Optimizer.__init__(self, lr)
self._opt = tf.train.AdamOptimizer(self._lr, beta1, beta2, eps)
class RMSProp(Optimizer):
def __init__(self, lr=1e-3, decay=0.9, momentum=0.0, eps=1e-10):
Optimizer.__init__(self, lr)
self._opt = tf.train.RMSPropOptimizer(self._lr, decay, momentum, eps)
class OptFactory:
available_optimizers = {
"MBGD": MBGD, "Momentum": Momentum, "NAG": NAG,
"AdaDelta": AdaDelta, "AdaGrad": AdaGrad,
"Adam": Adam, "RMSProp": RMSProp
}
def get_optimizer_by_name(self, name, lr, *args, **kwargs):
try:
optimizer = self.available_optimizers[name](lr, *args, **kwargs)
return optimizer
except KeyError:
raise NotImplementedError("Undefined Optimizer '{}' found".format(name))
| true | true |
f7113ae3426f27603355965e27500b97e47f2abe | 1,296 | py | Python | setup.py | dxxxm/opencv_wrapper | 4838185cf37b8d93190b5761dcc815ba285ff0cf | [
"MIT"
] | 16 | 2019-04-03T18:34:57.000Z | 2021-11-24T09:24:10.000Z | setup.py | anbergem/cvhelper | 4838185cf37b8d93190b5761dcc815ba285ff0cf | [
"MIT"
] | 7 | 2019-04-04T10:31:48.000Z | 2020-06-21T10:16:18.000Z | setup.py | anbergem/cvhelper | 4838185cf37b8d93190b5761dcc815ba285ff0cf | [
"MIT"
] | 3 | 2019-12-20T13:42:19.000Z | 2021-08-13T08:37:14.000Z | import os.path
import sys
from setuptools import setup
with open("README.md", encoding="utf-8") as fh:
long_description = fh.read()
requirements = ["numpy"]
if sys.version_info[1] == 6:
requirements.append("dataclasses")
here = os.path.abspath(os.path.dirname(__file__))
about = {}
with open(os.path.join(here, "opencv_wrapper", "__version__.py"), "r") as f:
exec(f.read(), about)
setup(
name=about["__title__"],
version=about["__version__"],
author=about["__author__"],
author_email=about["__author_email__"],
description=about["__description__"],
license=about["__license__"],
long_description=long_description,
long_description_content_type="text/markdown",
url=about["__url__"],
packages=["opencv_wrapper"],
classifiers=[
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Natural Language :: English",
"Operating System :: OS Independent",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Topic :: Scientific/Engineering",
"Topic :: Scientific/Engineering :: Image Recognition",
"Typing :: Typed",
],
keywords="OpenCV",
install_requires=requirements,
python_requires=">=3.6",
)
| 27.574468 | 76 | 0.652778 | import os.path
import sys
from setuptools import setup
with open("README.md", encoding="utf-8") as fh:
long_description = fh.read()
requirements = ["numpy"]
if sys.version_info[1] == 6:
requirements.append("dataclasses")
here = os.path.abspath(os.path.dirname(__file__))
about = {}
with open(os.path.join(here, "opencv_wrapper", "__version__.py"), "r") as f:
exec(f.read(), about)
setup(
name=about["__title__"],
version=about["__version__"],
author=about["__author__"],
author_email=about["__author_email__"],
description=about["__description__"],
license=about["__license__"],
long_description=long_description,
long_description_content_type="text/markdown",
url=about["__url__"],
packages=["opencv_wrapper"],
classifiers=[
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Natural Language :: English",
"Operating System :: OS Independent",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Topic :: Scientific/Engineering",
"Topic :: Scientific/Engineering :: Image Recognition",
"Typing :: Typed",
],
keywords="OpenCV",
install_requires=requirements,
python_requires=">=3.6",
)
| true | true |
f7113b355a34d5cbd98cf0956b535f8beac0f567 | 26,950 | py | Python | integration-tests/run-intg-test.py | wso2-incubator/sp-test-integration | 7460ab98df55945e0a2c7351571bb765529a5f45 | [
"Apache-2.0"
] | null | null | null | integration-tests/run-intg-test.py | wso2-incubator/sp-test-integration | 7460ab98df55945e0a2c7351571bb765529a5f45 | [
"Apache-2.0"
] | null | null | null | integration-tests/run-intg-test.py | wso2-incubator/sp-test-integration | 7460ab98df55945e0a2c7351571bb765529a5f45 | [
"Apache-2.0"
] | 2 | 2018-09-05T04:52:17.000Z | 2018-09-05T06:16:53.000Z | # Copyright (c) 2018, WSO2 Inc. (http://wso2.com) All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# importing required modules
import sys
from xml.etree import ElementTree as ET
import subprocess
import wget
import logging
import inspect
import os
import shutil
import pymysql
import sqlparse
import glob
import ast
import stat
import re
from pathlib import Path
import urllib.request as urllib2
from xml.dom import minidom
import configure_product as cp
from subprocess import Popen, PIPE
from const import TEST_PLAN_PROPERTY_FILE_NAME, INFRA_PROPERTY_FILE_NAME, LOG_FILE_NAME, DB_META_DATA, \
PRODUCT_STORAGE_DIR_NAME, DEFAULT_DB_USERNAME, LOG_STORAGE, TESTNG_DIST_XML_PATH, TESTNG_SERVER_MGT_DIST, LOG_FILE_PATHS, DIST_POM_PATH, NS, ZIP_FILE_EXTENSION, DATABASE_NAME
git_repo_url = None
git_branch = None
os_type = None
workspace = None
dist_name = None
dist_zip_name = None
product_id = None
log_file_name = None
target_path = None
db_engine = None
db_engine_version = None
latest_product_release_api = None
latest_product_build_artifacts_api = None
sql_driver_location = None
db_host = None
db_port = None
db_username = None
db_password = None
tag_name = None
test_mode = None
wum_product_version = None
use_custom_testng_file = None
database_config = {}
def read_proprty_files():
global db_engine
global db_engine_version
global git_repo_url
global git_branch
global latest_product_release_api
global latest_product_build_artifacts_api
global sql_driver_location
global db_host
global db_port
global db_username
global db_password
global workspace
global product_id
global database_config
global wum_product_version
global test_mode
global use_custom_testng_file
workspace = os.getcwd()
property_file_paths = []
test_plan_prop_path = Path(workspace + "/" + TEST_PLAN_PROPERTY_FILE_NAME)
infra_prop_path = Path(workspace + "/" + INFRA_PROPERTY_FILE_NAME)
if Path.exists(test_plan_prop_path) and Path.exists(infra_prop_path):
property_file_paths.append(test_plan_prop_path)
property_file_paths.append(infra_prop_path)
for path in property_file_paths:
with open(path, 'r') as filehandle:
for line in filehandle:
if line.startswith("#"):
continue
prop = line.split("=")
key = prop[0]
val = prop[1]
if key == "DBEngine":
db_engine = val.strip()
elif key == "DBEngineVersion":
db_engine_version = val
elif key == "PRODUCT_GIT_URL":
git_repo_url = val.strip().replace('\\', '')
product_id = git_repo_url.split("/")[-1].split('.')[0]
elif key == "PRODUCT_GIT_BRANCH":
git_branch = val.strip()
elif key == "LATEST_PRODUCT_RELEASE_API":
latest_product_release_api = val.strip().replace('\\', '')
elif key == "LATEST_PRODUCT_BUILD_ARTIFACTS_API":
latest_product_build_artifacts_api = val.strip().replace('\\', '')
elif key == "SQL_DRIVERS_LOCATION_UNIX" and not sys.platform.startswith('win'):
sql_driver_location = val.strip()
elif key == "SQL_DRIVERS_LOCATION_WINDOWS" and sys.platform.startswith('win'):
sql_driver_location = val.strip()
elif key == "DatabaseHost":
db_host = val.strip()
elif key == "DatabasePort":
db_port = val.strip()
elif key == "DBUsername":
db_username = val.strip()
elif key == "DBPassword":
db_password = val.strip()
elif key == "TEST_MODE":
test_mode = val.strip()
elif key == "WUM_PRODUCT_VERSION":
wum_product_version = val.strip()
elif key == "USE_CUSTOM_TESTNG":
use_custom_testng_file = val.strip()
else:
raise Exception("Test Plan Property file or Infra Property file is not in the workspace: " + workspace)
def validate_property_readings():
missing_values = ""
if db_engine is None:
missing_values += " -DBEngine- "
if git_repo_url is None:
missing_values += " -PRODUCT_GIT_URL- "
if product_id is None:
missing_values += " -product-id- "
if git_branch is None:
missing_values += " -PRODUCT_GIT_BRANCH- "
if latest_product_release_api is None:
missing_values += " -LATEST_PRODUCT_RELEASE_API- "
if latest_product_build_artifacts_api is None:
missing_values += " -LATEST_PRODUCT_BUILD_ARTIFACTS_API- "
if sql_driver_location is None:
missing_values += " -SQL_DRIVERS_LOCATION_<OS_Type>- "
if db_host is None:
missing_values += " -DatabaseHost- "
if db_port is None:
missing_values += " -DatabasePort- "
if db_password is None:
missing_values += " -DBPassword- "
if test_mode is None:
missing_values += " -TEST_MODE- "
if wum_product_version is None:
missing_values += " -WUM_PRODUCT_VERSION- "
if use_custom_testng_file is None:
missing_values += " -USE_CUSTOM_TESTNG- "
if missing_values != "":
logger.error('Invalid property file is found. Missing values: %s ', missing_values)
return False
else:
return True
def get_db_meta_data(argument):
switcher = DB_META_DATA
return switcher.get(argument, False)
def construct_url(prefix):
url = prefix + db_host + ":" + db_port
return url
def function_logger(file_level, console_level=None):
global log_file_name
log_file_name = LOG_FILE_NAME
function_name = inspect.stack()[1][3]
logger = logging.getLogger(function_name)
# By default, logs all messages
logger.setLevel(logging.DEBUG)
if console_level != None:
# StreamHandler logs to console
ch = logging.StreamHandler()
ch.setLevel(console_level)
ch_format = logging.Formatter('%(asctime)s - %(message)s')
ch.setFormatter(ch_format)
logger.addHandler(ch)
# log in to a file
fh = logging.FileHandler("{0}.log".format(function_name))
fh.setLevel(file_level)
fh_format = logging.Formatter('%(asctime)s - %(lineno)d - %(levelname)-8s - %(message)s')
fh.setFormatter(fh_format)
logger.addHandler(fh)
return logger
def download_file(url, destination):
"""Download a file using wget package.
Download the given file in _url_ as the directory+name provided in _destination_
"""
wget.download(url, destination)
def get_db_hostname(url, db_type):
"""Retreive db hostname from jdbc url
"""
if db_type == 'ORACLE':
hostname = url.split(':')[3].replace("@", "")
else:
hostname = url.split(':')[2].replace("//", "")
return hostname
def run_sqlserver_commands(query):
"""Run SQL_SERVER commands using sqlcmd utility.
"""
subprocess.call(
['sqlcmd', '-S', db_host, '-U', database_config['user'], '-P', database_config['password'], '-Q', query])
def get_mysql_connection(db_name=None):
if db_name is not None:
conn = pymysql.connect(host=get_db_hostname(database_config['url'], 'MYSQL'), user=database_config['user'],
passwd=database_config['password'], db=db_name)
else:
conn = pymysql.connect(host=get_db_hostname(database_config['url'], 'MYSQL'), user=database_config['user'],
passwd=database_config['password'])
return conn
def run_mysql_commands(query):
"""Run mysql commands using mysql client when db name not provided.
"""
conn = get_mysql_connection()
conectr = conn.cursor()
conectr.execute(query)
conn.close()
def get_ora_user_carete_query(database):
query = "CREATE USER {0} IDENTIFIED BY {1};".format(
database, database_config["password"])
return query
def get_ora_grant_query(database):
query = "GRANT CONNECT, RESOURCE, DBA TO {0};".format(
database)
return query
def execute_oracle_command(query):
"""Run oracle commands using sqlplus client when db name(user) is not provided.
"""
connect_string = "{0}/{1}@//{2}/{3}".format(database_config["user"], database_config["password"],
db_host, "ORCL")
session = Popen(['sqlplus64', '-S', connect_string], stdin=PIPE, stdout=PIPE, stderr=PIPE)
session.stdin.write(bytes(query, 'utf-8'))
return session.communicate()
def create_oracle_user(database):
"""This method is able to create the user and grant permission to the created user in oracle
"""
user_creating_query = get_ora_user_carete_query(database)
print("User_creating query is: "+user_creating_query)
logger.info(execute_oracle_command(user_creating_query))
permission_granting_query = get_ora_grant_query(database)
return execute_oracle_command(permission_granting_query)
def run_oracle_script(script, database):
"""Run oracle commands using sqlplus client when dbname(user) is provided.
"""
connect_string = "{0}/{1}@//{2}/{3}".format(database, database_config["password"],
db_host, "ORCL")
session = Popen(['sqlplus', '-S', connect_string], stdin=PIPE, stdout=PIPE, stderr=PIPE)
session.stdin.write(bytes(script, 'utf-8'))
return session.communicate()
def run_sqlserver_script_file(db_name, script_path):
"""Run SQL_SERVER script file on a provided database.
"""
subprocess.call(
['sqlcmd', '-S', db_host, '-U', database_config["user"], '-P', database_config["password"], '-d', db_name, '-i',
script_path])
def run_mysql_script_file(db_name, script_path):
"""Run MYSQL db script file on a provided database.
"""
conn = get_mysql_connection(db_name)
connector = conn.cursor()
sql = open(script_path).read()
sql_parts = sqlparse.split(sql)
for sql_part in sql_parts:
if sql_part.strip() == '':
continue
connector.execute(sql_part)
conn.close()
def copy_file(source, target):
"""Copy the source file to the target.
"""
if sys.platform.startswith('win'):
source = cp.winapi_path(source)
target = cp.winapi_path(target)
shutil.copy(source, target)
else:
shutil.copy(source, target)
def get_dist_name():
"""Get the product name by reading distribution pom.
"""
global dist_name
global dist_zip_name
global product_version
dist_pom_path = Path(workspace + "/" + product_id + "/" + DIST_POM_PATH[product_id])
print(dist_pom_path)
if sys.platform.startswith('win'):
dist_pom_path = cp.winapi_path(dist_pom_path)
ET.register_namespace('', NS['d'])
artifact_tree = ET.parse(dist_pom_path)
artifact_root = artifact_tree.getroot()
parent = artifact_root.find('d:parent', NS)
artifact_id = artifact_root.find('d:artifactId', NS).text
print("ArtifactID" + artifact_id)
product_version = parent.find('d:version', NS).text
print("ProdVersion" + product_version)
dist_name = artifact_id + "-" + product_version
dist_zip_name = dist_name + ZIP_FILE_EXTENSION
return dist_name
def get_dist_name_wum():
global dist_name
global product_version
product_version=wum_product_version
os.chdir(PRODUCT_STORAGE_DIR_NAME)
name = glob.glob('*.zip')[0]
dist_name=os.path.splitext(name)[0]
logger.info("dist_name:" + dist_name)
return dist_name
def setup_databases(db_names):
"""Create required databases.
"""
base_path = Path(workspace + "/" + PRODUCT_STORAGE_DIR_NAME + "/" + dist_name + "/" + 'dbscripts')
print("Base path is: "+str(base_path))
engine = db_engine.upper()
print("Engine is: "+engine)
db_meta_data = get_db_meta_data(engine)
print("DB metadata is: "+str(db_meta_data))
if db_meta_data:
databases = db_meta_data["DB_SETUP"][product_id]
print("Databases is: "+str(databases))
if databases:
for db_name in db_names:
db_scripts = databases[db_name]
if len(db_scripts) == 0:
if engine == 'SQLSERVER-SE':
# create database for MsSQL
run_sqlserver_commands('CREATE DATABASE {0}'.format(db_name))
elif engine == 'MYSQL':
# create database for MySQL
run_mysql_commands('CREATE DATABASE IF NOT EXISTS {0};'.format(db_name))
elif engine == 'ORACLE-SE2':
# create database for Oracle
print("DB_Name is: "+db_name)
create_oracle_user(db_name)
else:
if engine == 'SQLSERVER-SE':
# create database for MsSQL
run_sqlserver_commands('CREATE DATABASE {0}'.format(db_name))
for db_script in db_scripts:
path = base_path / db_script
# run db scripts
run_sqlserver_script_file(db_name, str(path))
elif engine == 'MYSQL':
# create database for MySQL
run_mysql_commands('CREATE DATABASE IF NOT EXISTS {0};'.format(db_name))
# run db scripts
for db_script in db_scripts:
path = base_path / db_script
run_mysql_script_file(db_name, str(path))
elif engine == 'ORACLE-SE2':
# create oracle schema
create_oracle_user(db_name)
# run db script
for db_script in db_scripts:
path = base_path / db_script
run_oracle_script('@{0}'.format(str(path)), db_name)
logger.info('Database setting up is done.')
else:
raise Exception("Database setup configuration is not defined in the constant file")
else:
raise Exception("Database meta data is not defined in the constant file")
def construct_db_config():
"""Use properties which are get by reading property files and construct the database config object which will use
when configuring the databases.
"""
db_meta_data = get_db_meta_data(db_engine.upper())
if db_meta_data:
database_config["driver_class_name"] = db_meta_data["driverClassName"]
database_config["password"] = db_password
database_config["sql_driver_location"] = sql_driver_location + "/" + db_meta_data["jarName"]
database_config["url"] = construct_url(db_meta_data["prefix"])
database_config["db_engine"] = db_engine
if db_username is None:
database_config["user"] = DEFAULT_DB_USERNAME
else:
database_config["user"] = db_username
else:
raise BaseException(
"DB config parsing is failed. DB engine name in the property file doesn't match with the constant: " + str(
db_engine.upper()))
def build_module(module_path):
"""Build a given module.
"""
logger.info('Start building a module. Module: ' + str(module_path))
if sys.platform.startswith('win'):
subprocess.call(['mvn', 'clean', 'install', '-B',
'-Dorg.slf4j.simpleLogger.log.org.apache.maven.cli.transfer.Slf4jMavenTransferListener=warn'],
shell=True, cwd=module_path)
else:
subprocess.call(['mvn', 'clean', 'install', '-B',
'-Dorg.slf4j.simpleLogger.log.org.apache.maven.cli.transfer.Slf4jMavenTransferListener=warn'],
cwd=module_path)
logger.info('Module build is completed. Module: ' + str(module_path))
def save_log_files():
log_storage = Path(workspace + "/" + LOG_STORAGE)
if not Path.exists(log_storage):
Path(log_storage).mkdir(parents=True, exist_ok=True)
log_file_paths = LOG_FILE_PATHS[product_id]
if log_file_paths:
for file in log_file_paths:
absolute_file_path = Path(workspace + "/" + product_id + "/" + file)
if Path.exists(absolute_file_path):
copy_file(absolute_file_path, log_storage)
else:
logger.error("File doesn't contain in the given location: " + str(absolute_file_path))
def clone_repo():
"""Clone the product repo
"""
try:
subprocess.call(['git', 'clone', '--branch', git_branch, git_repo_url], cwd=workspace)
logger.info('product repository cloning is done.')
except Exception as e:
logger.error("Error occurred while cloning the product repo: ", exc_info=True)
def checkout_to_tag(name):
"""Checkout to the given tag
"""
try:
git_path = Path(workspace + "/" + product_id)
tag = "tags/" + name
subprocess.call(["git", "fetch", "origin", tag], cwd=git_path)
subprocess.call(["git", "checkout", "-B", tag, name], cwd=git_path)
logger.info('checkout to the branch: ' + tag)
except Exception as e:
logger.error("Error occurred while cloning the product repo and checkout to the latest tag of the branch",
exc_info=True)
def get_latest_tag_name(product):
"""Get the latest tag name from git location
"""
global tag_name
git_path = Path(workspace + "/" + product)
latest_rev = subprocess.Popen(["git", "rev-list", "--tags", "--max-count=1"], stdout=subprocess.PIPE, cwd=git_path)
binary_val_of_tag_name = subprocess.Popen(
["git", "describe", "--tags", latest_rev.stdout.read().strip().decode("utf-8")], stdout=subprocess.PIPE,
cwd=git_path)
tag_name = binary_val_of_tag_name.stdout.read().strip().decode("utf-8")
print(tag_name)
return tag_name
def get_product_file_path():
"""Get the absolute path of the distribution which is located in the storage directory
"""
# product download path and file name constructing
product_download_dir = Path(workspace + "/" + PRODUCT_STORAGE_DIR_NAME)
if not Path.exists(product_download_dir):
Path(product_download_dir).mkdir(parents=True, exist_ok=True)
return product_download_dir / dist_zip_name
def get_relative_path_of_dist_storage(xml_path):
"""Get the relative path of distribution storage
"""
print("xml_path is: "+xml_path)
dom = minidom.parse(urllib2.urlopen(xml_path)) # parse the data
artifact_elements = dom.getElementsByTagName('artifact')
for artifact in artifact_elements:
file_name_elements = artifact.getElementsByTagName("fileName")
for file_name in file_name_elements:
print("file_name.firstChild.nodeValue is: "+file_name.firstChild.nodeValue)
print("dist_zip_name: "+dist_zip_name)
#if file_name.firstChild.nodeValue == dist_zip_name:
if file_name.firstChild.nodeValue == file_name.firstChild.nodeValue:
parent_node = file_name.parentNode
print("disStorage:==" + parent_node.getElementsByTagName("relativePath")[0].firstChild.nodeValue)
return parent_node.getElementsByTagName("relativePath")[0].firstChild.nodeValue
return None
def get_latest_released_dist():
"""Get the latest released distribution
"""
# construct the distribution downloading url
relative_path = get_relative_path_of_dist_storage(latest_product_release_api + "xml")
print("relatine path is "+relative_path)
if relative_path is None:
raise Exception("Error occured while getting relative path")
dist_downl_url = latest_product_release_api.split('/api')[0] + "/artifact/" + relative_path
# download the last released pack from Jenkins
download_file(dist_downl_url, str(get_product_file_path()))
logger.info('downloading the latest released pack from Jenkins is completed.')
def get_latest_stable_artifacts_api():
"""Get the API of the latest stable artifacts
"""
dom = minidom.parse(urllib2.urlopen(latest_product_build_artifacts_api + "xml"))
main_artifact_elements = dom.getElementsByTagName('mainArtifact')
print("Main artifact elements: "+str(main_artifact_elements))
for main_artifact in main_artifact_elements:
canonical_name_elements = main_artifact.getElementsByTagName("canonicalName")
print("Canonical name: "+str(canonical_name_elements))
for canonical_name in canonical_name_elements:
print("canonical_name.firstChild.nodeValue is: "+canonical_name.firstChild.nodeValue)
print("dist_name is: "+dist_name)
if canonical_name.firstChild.nodeValue == dist_name + ".pom":
#if canonical_name.firstChild.nodeValue == dist_name + "-rc4-SNAPSHOT.pom":
parent_node = main_artifact.parentNode
print("printing msg "+parent_node.getElementsByTagName("url")[0].firstChild.nodeValue)
return parent_node.getElementsByTagName("url")[0].firstChild.nodeValue
return None
def get_latest_stable_dist():
"""Download the latest stable distribution
"""
build_num_artifact = get_latest_stable_artifacts_api()
print("buildnumArti: "+ str(build_num_artifact))
build_num_artifact = re.sub(r'http.//(\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}):(\d{1,5})', "https://wso2.org", build_num_artifact)
#print("buildnumArtiafterReSub:" + build_num_artifact)
if build_num_artifact is None:
raise Exception("Error occured while getting latest stable build artifact API path")
relative_path = get_relative_path_of_dist_storage(build_num_artifact + "api/xml")
print("relativePath:" + relative_path)
if relative_path is None:
raise Exception("Error occured while getting relative path")
dist_downl_url = build_num_artifact + "artifact/" + relative_path
print("dist_downl_url is: "+dist_downl_url)
download_file(dist_downl_url, str(get_product_file_path()))
logger.info('downloading the latest stable pack from Jenkins is completed.')
def create_output_property_fle():
"""Create output property file which is used when generating email
"""
output_property_file = open("output.properties", "w+")
if test_mode == "WUM":
logger.info("PRODUCT GIT URL: " + git_repo_url)
# temporally fix. Needs to be change.get the git url without username and the password
head, sep, tail = git_repo_url.partition('//')
uri=head
head, sep, tail = git_repo_url.partition('@')
urn=tail
git_url=uri+"//"+urn
git_url = git_url + "/tree/" + git_branch
logger.info("GIT URL: " + git_url)
output_property_file.write("GIT_LOCATION=%s\r\n" % git_url)
output_property_file.write("GIT_REVISION=%s\r\n" % git_branch)
else:
git_url = git_repo_url + "/tree/" + git_branch
output_property_file.write("GIT_LOCATION=%s\r\n" % git_url)
output_property_file.write("GIT_REVISION=%s\r\n" % tag_name)
output_property_file.close()
def replace_file(source, destination):
"""Replace source file to the destination
"""
logger.info('replacing files from:' + str(source) + "to: " + str(destination))
if sys.platform.startswith('win'):
source = cp.winapi_path(source)
destination = cp.winapi_path(destination)
shutil.move(source, destination)
def set_custom_testng():
if use_custom_testng_file == "TRUE":
testng_source = Path(workspace + "/" + "testng.xml")
testng_destination = Path(workspace + "/" + product_id + "/" + TESTNG_DIST_XML_PATH)
testng_server_mgt_source = Path(workspace + "/" + "testng-server-mgt.xml")
testng_server_mgt_destination = Path(workspace + "/" + product_id + "/" + TESTNG_SERVER_MGT_DIST)
# replace testng source
replace_file(testng_source, testng_destination)
# replace testng server mgt source
replace_file(testng_server_mgt_source, testng_server_mgt_destination)
def main():
try:
global logger
global dist_name
logger = function_logger(logging.DEBUG, logging.DEBUG)
if sys.version_info < (3, 6):
raise Exception(
"To run run-intg-test.py script you must have Python 3.6 or latest. Current version info: " + sys.version_info)
read_proprty_files()
if not validate_property_readings():
raise Exception(
"Property file doesn't have mandatory key-value pair. Please verify the content of the property file "
"and the format")
# construct database configuration
construct_db_config()
# clone the repository
clone_repo()
# set the custom testng.xml or the product testng.xml
set_custom_testng()
if test_mode == "WUM":
dist_name = get_dist_name_wum()
elif test_mode == "RELEASE":
checkout_to_tag(get_latest_tag_name(product_id))
dist_name = get_dist_name()
get_latest_released_dist()
elif test_mode == "SNAPSHOT":
dist_name = get_dist_name()
print("getDistNameMain: "+ dist_name)
get_latest_stable_dist()
db_names = cp.configure_product(dist_name, product_id, database_config, workspace, product_version)
print("DB names is: "+str(db_names))
if db_names is None or not db_names:
raise Exception("Failed the product configuring")
setup_databases(db_names)
intg_module_path = Path(workspace + "/" + product_id + "/" + 'modules/integration')
build_module(intg_module_path)
save_log_files()
create_output_property_fle()
except Exception as e:
logger.error("Error occurred while running the run-intg.py script", exc_info=True)
except BaseException as e:
logger.error("Error occurred while doing the configuration", exc_info=True)
if __name__ == "__main__":
main()
| 39.985163 | 178 | 0.648646 |
import sys
from xml.etree import ElementTree as ET
import subprocess
import wget
import logging
import inspect
import os
import shutil
import pymysql
import sqlparse
import glob
import ast
import stat
import re
from pathlib import Path
import urllib.request as urllib2
from xml.dom import minidom
import configure_product as cp
from subprocess import Popen, PIPE
from const import TEST_PLAN_PROPERTY_FILE_NAME, INFRA_PROPERTY_FILE_NAME, LOG_FILE_NAME, DB_META_DATA, \
PRODUCT_STORAGE_DIR_NAME, DEFAULT_DB_USERNAME, LOG_STORAGE, TESTNG_DIST_XML_PATH, TESTNG_SERVER_MGT_DIST, LOG_FILE_PATHS, DIST_POM_PATH, NS, ZIP_FILE_EXTENSION, DATABASE_NAME
git_repo_url = None
git_branch = None
os_type = None
workspace = None
dist_name = None
dist_zip_name = None
product_id = None
log_file_name = None
target_path = None
db_engine = None
db_engine_version = None
latest_product_release_api = None
latest_product_build_artifacts_api = None
sql_driver_location = None
db_host = None
db_port = None
db_username = None
db_password = None
tag_name = None
test_mode = None
wum_product_version = None
use_custom_testng_file = None
database_config = {}
def read_proprty_files():
global db_engine
global db_engine_version
global git_repo_url
global git_branch
global latest_product_release_api
global latest_product_build_artifacts_api
global sql_driver_location
global db_host
global db_port
global db_username
global db_password
global workspace
global product_id
global database_config
global wum_product_version
global test_mode
global use_custom_testng_file
workspace = os.getcwd()
property_file_paths = []
test_plan_prop_path = Path(workspace + "/" + TEST_PLAN_PROPERTY_FILE_NAME)
infra_prop_path = Path(workspace + "/" + INFRA_PROPERTY_FILE_NAME)
if Path.exists(test_plan_prop_path) and Path.exists(infra_prop_path):
property_file_paths.append(test_plan_prop_path)
property_file_paths.append(infra_prop_path)
for path in property_file_paths:
with open(path, 'r') as filehandle:
for line in filehandle:
if line.startswith("#"):
continue
prop = line.split("=")
key = prop[0]
val = prop[1]
if key == "DBEngine":
db_engine = val.strip()
elif key == "DBEngineVersion":
db_engine_version = val
elif key == "PRODUCT_GIT_URL":
git_repo_url = val.strip().replace('\\', '')
product_id = git_repo_url.split("/")[-1].split('.')[0]
elif key == "PRODUCT_GIT_BRANCH":
git_branch = val.strip()
elif key == "LATEST_PRODUCT_RELEASE_API":
latest_product_release_api = val.strip().replace('\\', '')
elif key == "LATEST_PRODUCT_BUILD_ARTIFACTS_API":
latest_product_build_artifacts_api = val.strip().replace('\\', '')
elif key == "SQL_DRIVERS_LOCATION_UNIX" and not sys.platform.startswith('win'):
sql_driver_location = val.strip()
elif key == "SQL_DRIVERS_LOCATION_WINDOWS" and sys.platform.startswith('win'):
sql_driver_location = val.strip()
elif key == "DatabaseHost":
db_host = val.strip()
elif key == "DatabasePort":
db_port = val.strip()
elif key == "DBUsername":
db_username = val.strip()
elif key == "DBPassword":
db_password = val.strip()
elif key == "TEST_MODE":
test_mode = val.strip()
elif key == "WUM_PRODUCT_VERSION":
wum_product_version = val.strip()
elif key == "USE_CUSTOM_TESTNG":
use_custom_testng_file = val.strip()
else:
raise Exception("Test Plan Property file or Infra Property file is not in the workspace: " + workspace)
def validate_property_readings():
missing_values = ""
if db_engine is None:
missing_values += " -DBEngine- "
if git_repo_url is None:
missing_values += " -PRODUCT_GIT_URL- "
if product_id is None:
missing_values += " -product-id- "
if git_branch is None:
missing_values += " -PRODUCT_GIT_BRANCH- "
if latest_product_release_api is None:
missing_values += " -LATEST_PRODUCT_RELEASE_API- "
if latest_product_build_artifacts_api is None:
missing_values += " -LATEST_PRODUCT_BUILD_ARTIFACTS_API- "
if sql_driver_location is None:
missing_values += " -SQL_DRIVERS_LOCATION_<OS_Type>- "
if db_host is None:
missing_values += " -DatabaseHost- "
if db_port is None:
missing_values += " -DatabasePort- "
if db_password is None:
missing_values += " -DBPassword- "
if test_mode is None:
missing_values += " -TEST_MODE- "
if wum_product_version is None:
missing_values += " -WUM_PRODUCT_VERSION- "
if use_custom_testng_file is None:
missing_values += " -USE_CUSTOM_TESTNG- "
if missing_values != "":
logger.error('Invalid property file is found. Missing values: %s ', missing_values)
return False
else:
return True
def get_db_meta_data(argument):
switcher = DB_META_DATA
return switcher.get(argument, False)
def construct_url(prefix):
url = prefix + db_host + ":" + db_port
return url
def function_logger(file_level, console_level=None):
global log_file_name
log_file_name = LOG_FILE_NAME
function_name = inspect.stack()[1][3]
logger = logging.getLogger(function_name)
logger.setLevel(logging.DEBUG)
if console_level != None:
ch = logging.StreamHandler()
ch.setLevel(console_level)
ch_format = logging.Formatter('%(asctime)s - %(message)s')
ch.setFormatter(ch_format)
logger.addHandler(ch)
fh = logging.FileHandler("{0}.log".format(function_name))
fh.setLevel(file_level)
fh_format = logging.Formatter('%(asctime)s - %(lineno)d - %(levelname)-8s - %(message)s')
fh.setFormatter(fh_format)
logger.addHandler(fh)
return logger
def download_file(url, destination):
wget.download(url, destination)
def get_db_hostname(url, db_type):
if db_type == 'ORACLE':
hostname = url.split(':')[3].replace("@", "")
else:
hostname = url.split(':')[2].replace("//", "")
return hostname
def run_sqlserver_commands(query):
subprocess.call(
['sqlcmd', '-S', db_host, '-U', database_config['user'], '-P', database_config['password'], '-Q', query])
def get_mysql_connection(db_name=None):
if db_name is not None:
conn = pymysql.connect(host=get_db_hostname(database_config['url'], 'MYSQL'), user=database_config['user'],
passwd=database_config['password'], db=db_name)
else:
conn = pymysql.connect(host=get_db_hostname(database_config['url'], 'MYSQL'), user=database_config['user'],
passwd=database_config['password'])
return conn
def run_mysql_commands(query):
conn = get_mysql_connection()
conectr = conn.cursor()
conectr.execute(query)
conn.close()
def get_ora_user_carete_query(database):
query = "CREATE USER {0} IDENTIFIED BY {1};".format(
database, database_config["password"])
return query
def get_ora_grant_query(database):
query = "GRANT CONNECT, RESOURCE, DBA TO {0};".format(
database)
return query
def execute_oracle_command(query):
connect_string = "{0}/{1}@//{2}/{3}".format(database_config["user"], database_config["password"],
db_host, "ORCL")
session = Popen(['sqlplus64', '-S', connect_string], stdin=PIPE, stdout=PIPE, stderr=PIPE)
session.stdin.write(bytes(query, 'utf-8'))
return session.communicate()
def create_oracle_user(database):
user_creating_query = get_ora_user_carete_query(database)
print("User_creating query is: "+user_creating_query)
logger.info(execute_oracle_command(user_creating_query))
permission_granting_query = get_ora_grant_query(database)
return execute_oracle_command(permission_granting_query)
def run_oracle_script(script, database):
connect_string = "{0}/{1}@//{2}/{3}".format(database, database_config["password"],
db_host, "ORCL")
session = Popen(['sqlplus', '-S', connect_string], stdin=PIPE, stdout=PIPE, stderr=PIPE)
session.stdin.write(bytes(script, 'utf-8'))
return session.communicate()
def run_sqlserver_script_file(db_name, script_path):
subprocess.call(
['sqlcmd', '-S', db_host, '-U', database_config["user"], '-P', database_config["password"], '-d', db_name, '-i',
script_path])
def run_mysql_script_file(db_name, script_path):
conn = get_mysql_connection(db_name)
connector = conn.cursor()
sql = open(script_path).read()
sql_parts = sqlparse.split(sql)
for sql_part in sql_parts:
if sql_part.strip() == '':
continue
connector.execute(sql_part)
conn.close()
def copy_file(source, target):
if sys.platform.startswith('win'):
source = cp.winapi_path(source)
target = cp.winapi_path(target)
shutil.copy(source, target)
else:
shutil.copy(source, target)
def get_dist_name():
global dist_name
global dist_zip_name
global product_version
dist_pom_path = Path(workspace + "/" + product_id + "/" + DIST_POM_PATH[product_id])
print(dist_pom_path)
if sys.platform.startswith('win'):
dist_pom_path = cp.winapi_path(dist_pom_path)
ET.register_namespace('', NS['d'])
artifact_tree = ET.parse(dist_pom_path)
artifact_root = artifact_tree.getroot()
parent = artifact_root.find('d:parent', NS)
artifact_id = artifact_root.find('d:artifactId', NS).text
print("ArtifactID" + artifact_id)
product_version = parent.find('d:version', NS).text
print("ProdVersion" + product_version)
dist_name = artifact_id + "-" + product_version
dist_zip_name = dist_name + ZIP_FILE_EXTENSION
return dist_name
def get_dist_name_wum():
global dist_name
global product_version
product_version=wum_product_version
os.chdir(PRODUCT_STORAGE_DIR_NAME)
name = glob.glob('*.zip')[0]
dist_name=os.path.splitext(name)[0]
logger.info("dist_name:" + dist_name)
return dist_name
def setup_databases(db_names):
base_path = Path(workspace + "/" + PRODUCT_STORAGE_DIR_NAME + "/" + dist_name + "/" + 'dbscripts')
print("Base path is: "+str(base_path))
engine = db_engine.upper()
print("Engine is: "+engine)
db_meta_data = get_db_meta_data(engine)
print("DB metadata is: "+str(db_meta_data))
if db_meta_data:
databases = db_meta_data["DB_SETUP"][product_id]
print("Databases is: "+str(databases))
if databases:
for db_name in db_names:
db_scripts = databases[db_name]
if len(db_scripts) == 0:
if engine == 'SQLSERVER-SE':
run_sqlserver_commands('CREATE DATABASE {0}'.format(db_name))
elif engine == 'MYSQL':
run_mysql_commands('CREATE DATABASE IF NOT EXISTS {0};'.format(db_name))
elif engine == 'ORACLE-SE2':
print("DB_Name is: "+db_name)
create_oracle_user(db_name)
else:
if engine == 'SQLSERVER-SE':
run_sqlserver_commands('CREATE DATABASE {0}'.format(db_name))
for db_script in db_scripts:
path = base_path / db_script
run_sqlserver_script_file(db_name, str(path))
elif engine == 'MYSQL':
run_mysql_commands('CREATE DATABASE IF NOT EXISTS {0};'.format(db_name))
for db_script in db_scripts:
path = base_path / db_script
run_mysql_script_file(db_name, str(path))
elif engine == 'ORACLE-SE2':
create_oracle_user(db_name)
for db_script in db_scripts:
path = base_path / db_script
run_oracle_script('@{0}'.format(str(path)), db_name)
logger.info('Database setting up is done.')
else:
raise Exception("Database setup configuration is not defined in the constant file")
else:
raise Exception("Database meta data is not defined in the constant file")
def construct_db_config():
db_meta_data = get_db_meta_data(db_engine.upper())
if db_meta_data:
database_config["driver_class_name"] = db_meta_data["driverClassName"]
database_config["password"] = db_password
database_config["sql_driver_location"] = sql_driver_location + "/" + db_meta_data["jarName"]
database_config["url"] = construct_url(db_meta_data["prefix"])
database_config["db_engine"] = db_engine
if db_username is None:
database_config["user"] = DEFAULT_DB_USERNAME
else:
database_config["user"] = db_username
else:
raise BaseException(
"DB config parsing is failed. DB engine name in the property file doesn't match with the constant: " + str(
db_engine.upper()))
def build_module(module_path):
logger.info('Start building a module. Module: ' + str(module_path))
if sys.platform.startswith('win'):
subprocess.call(['mvn', 'clean', 'install', '-B',
'-Dorg.slf4j.simpleLogger.log.org.apache.maven.cli.transfer.Slf4jMavenTransferListener=warn'],
shell=True, cwd=module_path)
else:
subprocess.call(['mvn', 'clean', 'install', '-B',
'-Dorg.slf4j.simpleLogger.log.org.apache.maven.cli.transfer.Slf4jMavenTransferListener=warn'],
cwd=module_path)
logger.info('Module build is completed. Module: ' + str(module_path))
def save_log_files():
log_storage = Path(workspace + "/" + LOG_STORAGE)
if not Path.exists(log_storage):
Path(log_storage).mkdir(parents=True, exist_ok=True)
log_file_paths = LOG_FILE_PATHS[product_id]
if log_file_paths:
for file in log_file_paths:
absolute_file_path = Path(workspace + "/" + product_id + "/" + file)
if Path.exists(absolute_file_path):
copy_file(absolute_file_path, log_storage)
else:
logger.error("File doesn't contain in the given location: " + str(absolute_file_path))
def clone_repo():
try:
subprocess.call(['git', 'clone', '--branch', git_branch, git_repo_url], cwd=workspace)
logger.info('product repository cloning is done.')
except Exception as e:
logger.error("Error occurred while cloning the product repo: ", exc_info=True)
def checkout_to_tag(name):
try:
git_path = Path(workspace + "/" + product_id)
tag = "tags/" + name
subprocess.call(["git", "fetch", "origin", tag], cwd=git_path)
subprocess.call(["git", "checkout", "-B", tag, name], cwd=git_path)
logger.info('checkout to the branch: ' + tag)
except Exception as e:
logger.error("Error occurred while cloning the product repo and checkout to the latest tag of the branch",
exc_info=True)
def get_latest_tag_name(product):
global tag_name
git_path = Path(workspace + "/" + product)
latest_rev = subprocess.Popen(["git", "rev-list", "--tags", "--max-count=1"], stdout=subprocess.PIPE, cwd=git_path)
binary_val_of_tag_name = subprocess.Popen(
["git", "describe", "--tags", latest_rev.stdout.read().strip().decode("utf-8")], stdout=subprocess.PIPE,
cwd=git_path)
tag_name = binary_val_of_tag_name.stdout.read().strip().decode("utf-8")
print(tag_name)
return tag_name
def get_product_file_path():
product_download_dir = Path(workspace + "/" + PRODUCT_STORAGE_DIR_NAME)
if not Path.exists(product_download_dir):
Path(product_download_dir).mkdir(parents=True, exist_ok=True)
return product_download_dir / dist_zip_name
def get_relative_path_of_dist_storage(xml_path):
print("xml_path is: "+xml_path)
dom = minidom.parse(urllib2.urlopen(xml_path))
artifact_elements = dom.getElementsByTagName('artifact')
for artifact in artifact_elements:
file_name_elements = artifact.getElementsByTagName("fileName")
for file_name in file_name_elements:
print("file_name.firstChild.nodeValue is: "+file_name.firstChild.nodeValue)
print("dist_zip_name: "+dist_zip_name)
if file_name.firstChild.nodeValue == file_name.firstChild.nodeValue:
parent_node = file_name.parentNode
print("disStorage:==" + parent_node.getElementsByTagName("relativePath")[0].firstChild.nodeValue)
return parent_node.getElementsByTagName("relativePath")[0].firstChild.nodeValue
return None
def get_latest_released_dist():
relative_path = get_relative_path_of_dist_storage(latest_product_release_api + "xml")
print("relatine path is "+relative_path)
if relative_path is None:
raise Exception("Error occured while getting relative path")
dist_downl_url = latest_product_release_api.split('/api')[0] + "/artifact/" + relative_path
download_file(dist_downl_url, str(get_product_file_path()))
logger.info('downloading the latest released pack from Jenkins is completed.')
def get_latest_stable_artifacts_api():
dom = minidom.parse(urllib2.urlopen(latest_product_build_artifacts_api + "xml"))
main_artifact_elements = dom.getElementsByTagName('mainArtifact')
print("Main artifact elements: "+str(main_artifact_elements))
for main_artifact in main_artifact_elements:
canonical_name_elements = main_artifact.getElementsByTagName("canonicalName")
print("Canonical name: "+str(canonical_name_elements))
for canonical_name in canonical_name_elements:
print("canonical_name.firstChild.nodeValue is: "+canonical_name.firstChild.nodeValue)
print("dist_name is: "+dist_name)
if canonical_name.firstChild.nodeValue == dist_name + ".pom":
parent_node = main_artifact.parentNode
print("printing msg "+parent_node.getElementsByTagName("url")[0].firstChild.nodeValue)
return parent_node.getElementsByTagName("url")[0].firstChild.nodeValue
return None
def get_latest_stable_dist():
build_num_artifact = get_latest_stable_artifacts_api()
print("buildnumArti: "+ str(build_num_artifact))
build_num_artifact = re.sub(r'http.//(\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}):(\d{1,5})', "https://wso2.org", build_num_artifact)
if build_num_artifact is None:
raise Exception("Error occured while getting latest stable build artifact API path")
relative_path = get_relative_path_of_dist_storage(build_num_artifact + "api/xml")
print("relativePath:" + relative_path)
if relative_path is None:
raise Exception("Error occured while getting relative path")
dist_downl_url = build_num_artifact + "artifact/" + relative_path
print("dist_downl_url is: "+dist_downl_url)
download_file(dist_downl_url, str(get_product_file_path()))
logger.info('downloading the latest stable pack from Jenkins is completed.')
def create_output_property_fle():
output_property_file = open("output.properties", "w+")
if test_mode == "WUM":
logger.info("PRODUCT GIT URL: " + git_repo_url)
head, sep, tail = git_repo_url.partition('//')
uri=head
head, sep, tail = git_repo_url.partition('@')
urn=tail
git_url=uri+"//"+urn
git_url = git_url + "/tree/" + git_branch
logger.info("GIT URL: " + git_url)
output_property_file.write("GIT_LOCATION=%s\r\n" % git_url)
output_property_file.write("GIT_REVISION=%s\r\n" % git_branch)
else:
git_url = git_repo_url + "/tree/" + git_branch
output_property_file.write("GIT_LOCATION=%s\r\n" % git_url)
output_property_file.write("GIT_REVISION=%s\r\n" % tag_name)
output_property_file.close()
def replace_file(source, destination):
logger.info('replacing files from:' + str(source) + "to: " + str(destination))
if sys.platform.startswith('win'):
source = cp.winapi_path(source)
destination = cp.winapi_path(destination)
shutil.move(source, destination)
def set_custom_testng():
if use_custom_testng_file == "TRUE":
testng_source = Path(workspace + "/" + "testng.xml")
testng_destination = Path(workspace + "/" + product_id + "/" + TESTNG_DIST_XML_PATH)
testng_server_mgt_source = Path(workspace + "/" + "testng-server-mgt.xml")
testng_server_mgt_destination = Path(workspace + "/" + product_id + "/" + TESTNG_SERVER_MGT_DIST)
replace_file(testng_source, testng_destination)
replace_file(testng_server_mgt_source, testng_server_mgt_destination)
def main():
try:
global logger
global dist_name
logger = function_logger(logging.DEBUG, logging.DEBUG)
if sys.version_info < (3, 6):
raise Exception(
"To run run-intg-test.py script you must have Python 3.6 or latest. Current version info: " + sys.version_info)
read_proprty_files()
if not validate_property_readings():
raise Exception(
"Property file doesn't have mandatory key-value pair. Please verify the content of the property file "
"and the format")
# construct database configuration
construct_db_config()
# clone the repository
clone_repo()
# set the custom testng.xml or the product testng.xml
set_custom_testng()
if test_mode == "WUM":
dist_name = get_dist_name_wum()
elif test_mode == "RELEASE":
checkout_to_tag(get_latest_tag_name(product_id))
dist_name = get_dist_name()
get_latest_released_dist()
elif test_mode == "SNAPSHOT":
dist_name = get_dist_name()
print("getDistNameMain: "+ dist_name)
get_latest_stable_dist()
db_names = cp.configure_product(dist_name, product_id, database_config, workspace, product_version)
print("DB names is: "+str(db_names))
if db_names is None or not db_names:
raise Exception("Failed the product configuring")
setup_databases(db_names)
intg_module_path = Path(workspace + "/" + product_id + "/" + 'modules/integration')
build_module(intg_module_path)
save_log_files()
create_output_property_fle()
except Exception as e:
logger.error("Error occurred while running the run-intg.py script", exc_info=True)
except BaseException as e:
logger.error("Error occurred while doing the configuration", exc_info=True)
if __name__ == "__main__":
main()
| true | true |
f7113c4e2ecd8677cdceb6cc10f2089325dd123b | 7,683 | py | Python | BNN/forget.py | fshp971/mcmc-unlearning | 3113dedca6de33bcaf316b804cb9c1e636db7fd5 | [
"MIT"
] | 5 | 2022-03-16T02:28:27.000Z | 2022-03-29T08:36:57.000Z | BNN/forget.py | fshp971/mcmc-unlearning | 3113dedca6de33bcaf316b804cb9c1e636db7fd5 | [
"MIT"
] | null | null | null | BNN/forget.py | fshp971/mcmc-unlearning | 3113dedca6de33bcaf316b804cb9c1e636db7fd5 | [
"MIT"
] | null | null | null | from datetime import datetime
import os
import pickle
import argparse
import numpy as np
import torch
import torch.nn.functional as F
from mcmc_unlearner import sgmcmcUnlearner
import utils
import models
class myUnlearner(sgmcmcUnlearner):
def _apply_sample(self, z):
x, y = z
if not self.cpu: x, y = x.cuda(), y.cuda()
self.model.train()
lo = -self.model.log_prior() + F.cross_entropy(self.model(x), y) * self.model.n
self.optimizer.zero_grad()
lo.backward()
self.optimizer.step()
def _fun(self, z):
x, y = z
if not self.cpu: x, y = x.cuda(), y.cuda()
self.model.train()
return -self.model.log_prior() + F.cross_entropy(self.model(x), y) * self.model.n
def _z_fun(self, z):
x, y = z
if not self.cpu: x, y = x.cuda(), y.cuda()
self.model.train()
return F.cross_entropy(self.model(x), y, reduction='sum')
def get_args():
parser = argparse.ArgumentParser()
utils.add_shared_args(parser)
parser.add_argument('--rm-idx-path', type=str, default=None)
parser.add_argument('--save-freq', type=int, default=-1)
return parser.parse_args()
def get_forget_idx(dataset, kill_num):
kill_val = 0
if 'targets' in vars(dataset).keys():
labels = np.array(dataset.targets)
elif 'labels' in vars(dataset).keys():
labels = np.array(dataset.labels)
else:
raise NotImplementedError
randidx = np.random.permutation( np.where(labels==kill_val)[0] )
return randidx[:kill_num]
def evaluate(model, loader, cpu):
''' average log predictive probability '''
loss = utils.AverageMeter()
acc = utils.AverageMeter()
n = len(loader.sampler.indices)
model.eval()
for x, y in loader:
if not cpu: x, y = x.cuda(), y.cuda()
with torch.no_grad():
_y = model(x)
lo = - model.log_prior() + F.cross_entropy(_y,y) * n
lo = lo.item()
ac = (_y.argmax(dim=1) == y).sum().item() / len(y)
loss.update(lo, len(y))
acc.update(ac, len(y))
return loss.average(), acc.average()
def forget_eval_one_time(model, train_loader, forgetted_train_loader, test_loader, log):
remain_train_loss, remain_train_acc = evaluate(model, train_loader, args.cpu)
forgetted_train_loss, forgetted_train_acc = evaluate(model, forgetted_train_loader, args.cpu)
test_loss, test_acc = evaluate(model, test_loader, args.cpu)
utils.add_log(log, 'remain_train_loss', remain_train_loss)
utils.add_log(log, 'remain_train_acc', remain_train_acc)
utils.add_log(log,'forgetted_train_loss', forgetted_train_loss)
utils.add_log(log,'forgetted_train_acc', forgetted_train_acc)
utils.add_log(log, 'test_loss', test_loss)
utils.add_log(log, 'test_acc', test_acc)
logger.info('remaining train loss {:.2e} \t train acc {:.2%}'
.format(remain_train_loss, remain_train_acc))
logger.info('forgetted train loss {:.2e} \t train acc {:.2%}'
.format(forgetted_train_loss, forgetted_train_acc))
logger.info('test loss {:.2e} \t test acc {:.2%}'
.format(test_loss, test_acc))
logger.info('')
def save_checkpoint(save_dir, save_name, log, model, optimizer):
with open('{}/{}-log.pkl'.format(save_dir, save_name), 'wb') as f:
pickle.dump(log, f)
torch.save({
'model_state_dict': model.state_dict(),
'optimizer_state_dict': optimizer.state_dict(),
}, '{}/{}-model.pkl'.format(save_dir, save_name))
def main(args, logger):
''' retrieve lots of data '''
trainset, testset = utils.get_dataset(args.dataset)
if args.rm_idx_path is not None:
with open(args.rm_idx_path, 'rb') as f:
forgetted_idx = pickle.load(f)
else:
forgetted_idx = get_forget_idx(trainset, args.ifs_kill_num)
forgetted_idx_loader = utils.IndexBatchSampler(
batch_size=args.ifs_rm_bs, indices=forgetted_idx)
train_sampler = utils.DataSampler(trainset, args.batch_size)
train_loader = utils.DataLoader(trainset, args.batch_size)
train_loader.remove(forgetted_idx)
forgetted_train_loader = utils.DataLoader(trainset, args.batch_size)
forgetted_train_loader.set_sampler_indices(forgetted_idx)
test_loader = utils.DataLoader(testset, args.batch_size)
''' end of retrieving data '''
model = utils.get_mcmc_bnn_arch(args.arch, args.dataset, args.prior_sig)
if not args.cpu:
model.cuda()
args.lr /= len(trainset)
optimizer = utils.get_optim(model.parameters(), args.optim,
lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay, sghmc_alpha=args.sghmc_alpha)
model.n = len(train_sampler)
''' restore model / sampler '''
state_dict = torch.load(args.resume_path)
model.load_state_dict(state_dict['model_state_dict'])
optimizer.load_state_dict(state_dict['optimizer_state_dict'])
''' for backward compatibility '''
for group in optimizer.param_groups:
if 'lr_decay' in group:
group['lr'] *= group['lr_decay']
group.pop('lr_decay')
del state_dict
unlearner = myUnlearner(
model = model,
optimizer = optimizer,
params = model.parameters(),
cpu = args.cpu,
iter_T = args.ifs_iter_T,
scaling = args.ifs_scaling,
samp_T = args.ifs_samp_T,)
log = dict()
log['user_time'] = 0
utils.add_log(log, 'forgetted_idx', forgetted_idx)
forget_eval_one_time(model, train_loader, forgetted_train_loader, test_loader, log)
removed_nums = 0
freq_counter = 0
for ii in forgetted_idx_loader:
''' create forget-batch '''
xx, yy = [], []
for i in ii:
x, y = trainset[i]
if len(x.shape) == 3: x = x.reshape(1, *x.shape)
xx.append(x)
yy.append(y)
xx, yy = torch.cat(xx), torch.tensor(yy)
''' end '''
scaling = args.ifs_scaling / len(train_sampler)
unlearner.param_dict['scaling'] = scaling
''' start calculation of time '''
start_time = datetime.now()
unlearner.remove([xx,yy], train_sampler)
torch.cuda.synchronize()
end_time = datetime.now()
user_time = (end_time - start_time).total_seconds()
''' end calculation of time '''
log['user_time'] += user_time
train_sampler.remove(ii)
''' after removal, update the number of remaining datums '''
unlearner.model.n = len(train_sampler)
removed_nums += len(ii)
freq_counter += len(ii)
''' update mcmc sampler '''
for group in unlearner.optimizer.param_groups:
group['lr'] *= (len(train_sampler) + len(ii)) / len(train_sampler)
logger.info('remaining trainset size {}'.format(len(train_sampler)))
logger.info('user time {:.3f} sec \t'
'cumulated user time {:.3f} mins'
.format(user_time, log['user_time']/60) )
if (args.save_freq > 0) and (freq_counter >= args.save_freq):
freq_counter = 0
save_checkpoint(args.save_dir, '{}-ckpt-{}'.format(args.save_name, removed_nums), log, model, optimizer)
forget_eval_one_time(model, train_loader, forgetted_train_loader, test_loader, log)
save_checkpoint(args.save_dir, args.save_name, log, model, optimizer)
return
if __name__ == '__main__':
args = get_args()
logger = utils.generic_init(args)
try:
main(args, logger)
except Exception as e:
logger.exception('Unexpected exception! %s', e)
| 31.879668 | 116 | 0.63647 | from datetime import datetime
import os
import pickle
import argparse
import numpy as np
import torch
import torch.nn.functional as F
from mcmc_unlearner import sgmcmcUnlearner
import utils
import models
class myUnlearner(sgmcmcUnlearner):
def _apply_sample(self, z):
x, y = z
if not self.cpu: x, y = x.cuda(), y.cuda()
self.model.train()
lo = -self.model.log_prior() + F.cross_entropy(self.model(x), y) * self.model.n
self.optimizer.zero_grad()
lo.backward()
self.optimizer.step()
def _fun(self, z):
x, y = z
if not self.cpu: x, y = x.cuda(), y.cuda()
self.model.train()
return -self.model.log_prior() + F.cross_entropy(self.model(x), y) * self.model.n
def _z_fun(self, z):
x, y = z
if not self.cpu: x, y = x.cuda(), y.cuda()
self.model.train()
return F.cross_entropy(self.model(x), y, reduction='sum')
def get_args():
parser = argparse.ArgumentParser()
utils.add_shared_args(parser)
parser.add_argument('--rm-idx-path', type=str, default=None)
parser.add_argument('--save-freq', type=int, default=-1)
return parser.parse_args()
def get_forget_idx(dataset, kill_num):
kill_val = 0
if 'targets' in vars(dataset).keys():
labels = np.array(dataset.targets)
elif 'labels' in vars(dataset).keys():
labels = np.array(dataset.labels)
else:
raise NotImplementedError
randidx = np.random.permutation( np.where(labels==kill_val)[0] )
return randidx[:kill_num]
def evaluate(model, loader, cpu):
loss = utils.AverageMeter()
acc = utils.AverageMeter()
n = len(loader.sampler.indices)
model.eval()
for x, y in loader:
if not cpu: x, y = x.cuda(), y.cuda()
with torch.no_grad():
_y = model(x)
lo = - model.log_prior() + F.cross_entropy(_y,y) * n
lo = lo.item()
ac = (_y.argmax(dim=1) == y).sum().item() / len(y)
loss.update(lo, len(y))
acc.update(ac, len(y))
return loss.average(), acc.average()
def forget_eval_one_time(model, train_loader, forgetted_train_loader, test_loader, log):
remain_train_loss, remain_train_acc = evaluate(model, train_loader, args.cpu)
forgetted_train_loss, forgetted_train_acc = evaluate(model, forgetted_train_loader, args.cpu)
test_loss, test_acc = evaluate(model, test_loader, args.cpu)
utils.add_log(log, 'remain_train_loss', remain_train_loss)
utils.add_log(log, 'remain_train_acc', remain_train_acc)
utils.add_log(log,'forgetted_train_loss', forgetted_train_loss)
utils.add_log(log,'forgetted_train_acc', forgetted_train_acc)
utils.add_log(log, 'test_loss', test_loss)
utils.add_log(log, 'test_acc', test_acc)
logger.info('remaining train loss {:.2e} \t train acc {:.2%}'
.format(remain_train_loss, remain_train_acc))
logger.info('forgetted train loss {:.2e} \t train acc {:.2%}'
.format(forgetted_train_loss, forgetted_train_acc))
logger.info('test loss {:.2e} \t test acc {:.2%}'
.format(test_loss, test_acc))
logger.info('')
def save_checkpoint(save_dir, save_name, log, model, optimizer):
with open('{}/{}-log.pkl'.format(save_dir, save_name), 'wb') as f:
pickle.dump(log, f)
torch.save({
'model_state_dict': model.state_dict(),
'optimizer_state_dict': optimizer.state_dict(),
}, '{}/{}-model.pkl'.format(save_dir, save_name))
def main(args, logger):
trainset, testset = utils.get_dataset(args.dataset)
if args.rm_idx_path is not None:
with open(args.rm_idx_path, 'rb') as f:
forgetted_idx = pickle.load(f)
else:
forgetted_idx = get_forget_idx(trainset, args.ifs_kill_num)
forgetted_idx_loader = utils.IndexBatchSampler(
batch_size=args.ifs_rm_bs, indices=forgetted_idx)
train_sampler = utils.DataSampler(trainset, args.batch_size)
train_loader = utils.DataLoader(trainset, args.batch_size)
train_loader.remove(forgetted_idx)
forgetted_train_loader = utils.DataLoader(trainset, args.batch_size)
forgetted_train_loader.set_sampler_indices(forgetted_idx)
test_loader = utils.DataLoader(testset, args.batch_size)
model = utils.get_mcmc_bnn_arch(args.arch, args.dataset, args.prior_sig)
if not args.cpu:
model.cuda()
args.lr /= len(trainset)
optimizer = utils.get_optim(model.parameters(), args.optim,
lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay, sghmc_alpha=args.sghmc_alpha)
model.n = len(train_sampler)
state_dict = torch.load(args.resume_path)
model.load_state_dict(state_dict['model_state_dict'])
optimizer.load_state_dict(state_dict['optimizer_state_dict'])
for group in optimizer.param_groups:
if 'lr_decay' in group:
group['lr'] *= group['lr_decay']
group.pop('lr_decay')
del state_dict
unlearner = myUnlearner(
model = model,
optimizer = optimizer,
params = model.parameters(),
cpu = args.cpu,
iter_T = args.ifs_iter_T,
scaling = args.ifs_scaling,
samp_T = args.ifs_samp_T,)
log = dict()
log['user_time'] = 0
utils.add_log(log, 'forgetted_idx', forgetted_idx)
forget_eval_one_time(model, train_loader, forgetted_train_loader, test_loader, log)
removed_nums = 0
freq_counter = 0
for ii in forgetted_idx_loader:
xx, yy = [], []
for i in ii:
x, y = trainset[i]
if len(x.shape) == 3: x = x.reshape(1, *x.shape)
xx.append(x)
yy.append(y)
xx, yy = torch.cat(xx), torch.tensor(yy)
scaling = args.ifs_scaling / len(train_sampler)
unlearner.param_dict['scaling'] = scaling
start_time = datetime.now()
unlearner.remove([xx,yy], train_sampler)
torch.cuda.synchronize()
end_time = datetime.now()
user_time = (end_time - start_time).total_seconds()
log['user_time'] += user_time
train_sampler.remove(ii)
unlearner.model.n = len(train_sampler)
removed_nums += len(ii)
freq_counter += len(ii)
for group in unlearner.optimizer.param_groups:
group['lr'] *= (len(train_sampler) + len(ii)) / len(train_sampler)
logger.info('remaining trainset size {}'.format(len(train_sampler)))
logger.info('user time {:.3f} sec \t'
'cumulated user time {:.3f} mins'
.format(user_time, log['user_time']/60) )
if (args.save_freq > 0) and (freq_counter >= args.save_freq):
freq_counter = 0
save_checkpoint(args.save_dir, '{}-ckpt-{}'.format(args.save_name, removed_nums), log, model, optimizer)
forget_eval_one_time(model, train_loader, forgetted_train_loader, test_loader, log)
save_checkpoint(args.save_dir, args.save_name, log, model, optimizer)
return
if __name__ == '__main__':
args = get_args()
logger = utils.generic_init(args)
try:
main(args, logger)
except Exception as e:
logger.exception('Unexpected exception! %s', e)
| true | true |
f7113d3eadd9f0bc689a14dfac6d67b4d3b2ca7f | 2,323 | py | Python | src/timessquare/worker/main.py | lsst-sqre/times-square | 4a8d6183d9ae073d7e6968506e29c671d196446a | [
"MIT"
] | null | null | null | src/timessquare/worker/main.py | lsst-sqre/times-square | 4a8d6183d9ae073d7e6968506e29c671d196446a | [
"MIT"
] | 6 | 2021-12-13T20:19:41.000Z | 2022-03-28T20:09:01.000Z | src/timessquare/worker/main.py | lsst-sqre/times-square | 4a8d6183d9ae073d7e6968506e29c671d196446a | [
"MIT"
] | null | null | null | """Arq-based queue worker lifecycle configuration."""
from __future__ import annotations
import uuid
from typing import Any, Dict
import httpx
import structlog
from safir.dependencies.db_session import db_session_dependency
from safir.logging import configure_logging
from timessquare.config import config
from timessquare.dependencies.redis import redis_dependency
from .functions import (
ping,
pull_request_sync,
repo_added,
repo_push,
repo_removed,
)
async def startup(ctx: Dict[Any, Any]) -> None:
"""Runs during working start-up to set up the worker context."""
configure_logging(
profile=config.profile,
log_level=config.log_level,
name="timessquare",
)
logger = structlog.get_logger("timessquare")
# The instance key uniquely identifies this worker in logs
instance_key = uuid.uuid4().hex
logger = logger.bind(worker_instance=instance_key)
logger.info("Starting up worker")
http_client = httpx.AsyncClient()
ctx["http_client"] = http_client
ctx["logger"] = logger
logger.info("Start up complete")
# Set up FastAPI dependencies; we can use them "manually" with
# arq to provide resources similarly to FastAPI endpoints
await db_session_dependency.initialize(
config.database_url, config.database_password.get_secret_value()
)
await redis_dependency.initialize(config.redis_url)
async def shutdown(ctx: Dict[Any, Any]) -> None:
"""Runs during worker shut-down to resources."""
if "logger" in ctx.keys():
logger = ctx["logger"]
else:
logger = structlog.get_logger("timessquare")
logger.info("Running worker shutdown.")
await db_session_dependency.aclose()
await redis_dependency.close()
try:
await ctx["http_client"].aclose()
except Exception as e:
logger.warning("Issue closing the http_client: %s", str(e))
logger.info("Worker shutdown complete.")
class WorkerSettings:
"""Configuration for a Times Square arq worker.
See `arq.worker.Worker` for details on these attributes.
"""
functions = [ping, repo_push, repo_added, repo_removed, pull_request_sync]
redis_settings = config.arq_redis_settings
queue_name = config.queue_name
on_startup = startup
on_shutdown = shutdown
| 26.701149 | 78 | 0.713732 |
from __future__ import annotations
import uuid
from typing import Any, Dict
import httpx
import structlog
from safir.dependencies.db_session import db_session_dependency
from safir.logging import configure_logging
from timessquare.config import config
from timessquare.dependencies.redis import redis_dependency
from .functions import (
ping,
pull_request_sync,
repo_added,
repo_push,
repo_removed,
)
async def startup(ctx: Dict[Any, Any]) -> None:
configure_logging(
profile=config.profile,
log_level=config.log_level,
name="timessquare",
)
logger = structlog.get_logger("timessquare")
instance_key = uuid.uuid4().hex
logger = logger.bind(worker_instance=instance_key)
logger.info("Starting up worker")
http_client = httpx.AsyncClient()
ctx["http_client"] = http_client
ctx["logger"] = logger
logger.info("Start up complete")
await db_session_dependency.initialize(
config.database_url, config.database_password.get_secret_value()
)
await redis_dependency.initialize(config.redis_url)
async def shutdown(ctx: Dict[Any, Any]) -> None:
if "logger" in ctx.keys():
logger = ctx["logger"]
else:
logger = structlog.get_logger("timessquare")
logger.info("Running worker shutdown.")
await db_session_dependency.aclose()
await redis_dependency.close()
try:
await ctx["http_client"].aclose()
except Exception as e:
logger.warning("Issue closing the http_client: %s", str(e))
logger.info("Worker shutdown complete.")
class WorkerSettings:
functions = [ping, repo_push, repo_added, repo_removed, pull_request_sync]
redis_settings = config.arq_redis_settings
queue_name = config.queue_name
on_startup = startup
on_shutdown = shutdown
| true | true |
f7113d8051dae56c9c2db4dc6bfcbafa078a0893 | 2,015 | py | Python | pymongo_opentracing/tracing.py | khvn26/python-pymongo | d878249b6e1cb11007ab00fe44bdd858f6a78724 | [
"Apache-2.0"
] | null | null | null | pymongo_opentracing/tracing.py | khvn26/python-pymongo | d878249b6e1cb11007ab00fe44bdd858f6a78724 | [
"Apache-2.0"
] | null | null | null | pymongo_opentracing/tracing.py | khvn26/python-pymongo | d878249b6e1cb11007ab00fe44bdd858f6a78724 | [
"Apache-2.0"
] | null | null | null | # Copyright (C) 2018 SignalFx, Inc. All rights reserved.
from bson import json_util as json
from opentracing.ext import tags
import pymongo.monitoring
from six import text_type
import opentracing
class CommandTracing(pymongo.monitoring.CommandListener):
_scopes = {}
def __init__(self, tracer=None, span_tags=None):
try:
global_tracer = opentracing.global_tracer()
except AttributeError:
global_tracer = opentracing.tracer
self._tracer = tracer or global_tracer
self._span_tags = span_tags or {}
def started(self, event):
scope = self._tracer.start_active_span(event.command_name)
self._scopes[event.request_id] = scope
span = scope.span
span.set_tag(tags.DATABASE_TYPE, 'mongodb')
span.set_tag(tags.COMPONENT, 'PyMongo')
span.set_tag(tags.DATABASE_INSTANCE, event.database_name)
for tag, value in self._span_tags.items():
span.set_tag(tag, value)
if not event.command:
return
command_name, collection = next(iter(event.command.items()))
span.set_tag('command.name', command_name)
namespace = text_type('{}.{}').format(event.database_name, collection)
span.set_tag('namespace', namespace)
span.set_tag('command', json.dumps(event.command)[:512])
def succeeded(self, event):
scope = self._scopes.pop(event.request_id, None)
if scope is None:
return
span = scope.span
span.set_tag('event.reply', json.dumps(event.reply)[:512])
span.set_tag('reported_duration', event.duration_micros)
scope.close()
def failed(self, event):
scope = self._scopes.pop(event.request_id, None)
if scope is None:
return
span = scope.span
span.set_tag(tags.ERROR, True)
span.set_tag('event.failure', json.dumps(event.failure))
span.set_tag('reported_duration', event.duration_micros)
scope.close()
| 33.583333 | 78 | 0.653598 |
from bson import json_util as json
from opentracing.ext import tags
import pymongo.monitoring
from six import text_type
import opentracing
class CommandTracing(pymongo.monitoring.CommandListener):
_scopes = {}
def __init__(self, tracer=None, span_tags=None):
try:
global_tracer = opentracing.global_tracer()
except AttributeError:
global_tracer = opentracing.tracer
self._tracer = tracer or global_tracer
self._span_tags = span_tags or {}
def started(self, event):
scope = self._tracer.start_active_span(event.command_name)
self._scopes[event.request_id] = scope
span = scope.span
span.set_tag(tags.DATABASE_TYPE, 'mongodb')
span.set_tag(tags.COMPONENT, 'PyMongo')
span.set_tag(tags.DATABASE_INSTANCE, event.database_name)
for tag, value in self._span_tags.items():
span.set_tag(tag, value)
if not event.command:
return
command_name, collection = next(iter(event.command.items()))
span.set_tag('command.name', command_name)
namespace = text_type('{}.{}').format(event.database_name, collection)
span.set_tag('namespace', namespace)
span.set_tag('command', json.dumps(event.command)[:512])
def succeeded(self, event):
scope = self._scopes.pop(event.request_id, None)
if scope is None:
return
span = scope.span
span.set_tag('event.reply', json.dumps(event.reply)[:512])
span.set_tag('reported_duration', event.duration_micros)
scope.close()
def failed(self, event):
scope = self._scopes.pop(event.request_id, None)
if scope is None:
return
span = scope.span
span.set_tag(tags.ERROR, True)
span.set_tag('event.failure', json.dumps(event.failure))
span.set_tag('reported_duration', event.duration_micros)
scope.close()
| true | true |
f7113ddd020fdfec1185ec67da4041271d3b1e1d | 3,183 | py | Python | files/regressao_linear/regressaolinear1.py | Nina-pinheiro/Data-Science-Python | b6b2bc28f2f8f925e1b43408330641bd72388232 | [
"MIT"
] | 9 | 2021-01-29T14:01:57.000Z | 2022-03-26T00:46:00.000Z | files/regressao_linear/regressaolinear1.py | Nina-pinheiro/machine_learning_statistic_python | b6b2bc28f2f8f925e1b43408330641bd72388232 | [
"MIT"
] | null | null | null | files/regressao_linear/regressaolinear1.py | Nina-pinheiro/machine_learning_statistic_python | b6b2bc28f2f8f925e1b43408330641bd72388232 | [
"MIT"
] | 2 | 2020-07-28T11:25:55.000Z | 2020-08-03T20:04:11.000Z | # Importar as bibliotecas necessárias
import pandas as pd
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_squared_error
import seaborn as sns
from sklearn.linear_model import LinearRegression
# Leitura do dataset
df = pd.read_csv("dataset/consumo.csv")
# Converter uma coluna para numerica
df['Temperatura Maxima (C)'] = df['Temperatura Maxima (C)'].str.replace(',','.').astype(float)
df['Temperatura Minima (C)'] = df['Temperatura Minima (C)'].str.replace(',','.').astype(float)
df['Precipitacao (mm)'] = df['Precipitacao (mm)'].str.replace(',','.').astype(float)
df['Temperatura Media (C)'] = df['Temperatura Media (C)'].str.replace(',','.').astype(float)
# Análise descritiva
df.describe()
df.head()
df.dtypes
df.info()
df.tail()
df.shape
# Verificar quais são os valores faltantes
df.isnull().sum()
# Remover todos os valores faltantes
df.dropna(how = "all", inplace = True)
# Copiando um data frame em uma nova variável
df_feature = df.copy()
# Criação de uma nova feature
df_feature['variacao'] = (df_feature['Temperatura Maxima (C)']) - (df_feature['Temperatura Minima (C)'])
df_feature
# Plotando o gráfico da nova feature
df_feature.plot(x='variacao', y = 'Consumo de cerveja (litros)')
plt.xlabel('variacao', fontsize = 15)
plt.ylabel('Consumo de cerveja (litros)',fontsize = 15)
plt.grid()
# Excluindo a coluna data
df_feature = df_feature.drop(columns = 'Data')
# Realizar a matriz de correlação
df_feature.corr().round(3)
# Gráficos
plt.figure()
sns.pairplot(df_feature,x_vars=['Temperatura Minima (C)','Temperatura Media (C)','Temperatura Maxima (C)','Precipitacao (mm)','variacao'],
y_vars=['Consumo de cerveja (litros)'],hue='Final de Semana',diag_kind=None)
# Realizar o gráfico de final de semana e consumo de cerveja
plt.figure(2)
sns.swarmplot(x='Final de Semana',y='Consumo de cerveja (litros)',data= df_feature)
plt.grid()
plt.xlabel('Final de semana')
plt.ylabel('Consumo de cerveja [L]')
# Realizar o gráfico de final de semana e variacao(nova feature criada)
plt.figure(3)
sns.swarmplot(x = 'Final de Semana', y = 'variacao', data = df_feature)
plt.grid()
plt.xlabel('Final de semana')
plt.ylabel('variacao')
# Utilizando o modelo de regressão linear
modelo = LinearRegression()
# Colocando a variável target
y = df_feature['Consumo de cerveja (litros)'].values #target
# colocando as variaveis independentes neste exemplo pega todos menos consumo de cerveja
x = df_feature.drop(columns='Consumo de cerveja (litros)').values #fetures
xColunas = df_feature.drop(columns='Consumo de cerveja (litros)').columns
# Realizando o treinamento
xTrain,xTest,yTrain,yTest = train_test_split(x,y, test_size = 0.3, random_state = 54564541)
# Fitando o modelo
modelo.fit(xTrain,yTrain)
yPred = modelo.predict(xTest)
# Calcular os resíduos
res = yPred - yTest
# Testes
print('Valor de R2: {}'.format(modelo.score(xTest,yTest)))
print('Valor MSE: {}' .format(mean_squared_error(yTest,yPred)))
print('Coeficientes da regressão: {}'.format(modelo.coef_))
print('Intercept da regressão: {} \n'.format(modelo.intercept_))
| 28.168142 | 138 | 0.733585 |
import pandas as pd
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_squared_error
import seaborn as sns
from sklearn.linear_model import LinearRegression
df = pd.read_csv("dataset/consumo.csv")
df['Temperatura Maxima (C)'] = df['Temperatura Maxima (C)'].str.replace(',','.').astype(float)
df['Temperatura Minima (C)'] = df['Temperatura Minima (C)'].str.replace(',','.').astype(float)
df['Precipitacao (mm)'] = df['Precipitacao (mm)'].str.replace(',','.').astype(float)
df['Temperatura Media (C)'] = df['Temperatura Media (C)'].str.replace(',','.').astype(float)
df.describe()
df.head()
df.dtypes
df.info()
df.tail()
df.shape
df.isnull().sum()
df.dropna(how = "all", inplace = True)
df_feature = df.copy()
df_feature['variacao'] = (df_feature['Temperatura Maxima (C)']) - (df_feature['Temperatura Minima (C)'])
df_feature
df_feature.plot(x='variacao', y = 'Consumo de cerveja (litros)')
plt.xlabel('variacao', fontsize = 15)
plt.ylabel('Consumo de cerveja (litros)',fontsize = 15)
plt.grid()
df_feature = df_feature.drop(columns = 'Data')
df_feature.corr().round(3)
plt.figure()
sns.pairplot(df_feature,x_vars=['Temperatura Minima (C)','Temperatura Media (C)','Temperatura Maxima (C)','Precipitacao (mm)','variacao'],
y_vars=['Consumo de cerveja (litros)'],hue='Final de Semana',diag_kind=None)
plt.figure(2)
sns.swarmplot(x='Final de Semana',y='Consumo de cerveja (litros)',data= df_feature)
plt.grid()
plt.xlabel('Final de semana')
plt.ylabel('Consumo de cerveja [L]')
plt.figure(3)
sns.swarmplot(x = 'Final de Semana', y = 'variacao', data = df_feature)
plt.grid()
plt.xlabel('Final de semana')
plt.ylabel('variacao')
modelo = LinearRegression()
y = df_feature['Consumo de cerveja (litros)'].values
x = df_feature.drop(columns='Consumo de cerveja (litros)').values
xColunas = df_feature.drop(columns='Consumo de cerveja (litros)').columns
xTrain,xTest,yTrain,yTest = train_test_split(x,y, test_size = 0.3, random_state = 54564541)
modelo.fit(xTrain,yTrain)
yPred = modelo.predict(xTest)
res = yPred - yTest
print('Valor de R2: {}'.format(modelo.score(xTest,yTest)))
print('Valor MSE: {}' .format(mean_squared_error(yTest,yPred)))
print('Coeficientes da regressão: {}'.format(modelo.coef_))
print('Intercept da regressão: {} \n'.format(modelo.intercept_))
| true | true |
f7113fc325ff8b58e82cf2a55f7200040cf6703c | 9,272 | py | Python | generate_xfoil/naca4.py | nasa/airfoil-learning | a76dabc0474485d1e573471e70ec4826aeae0517 | [
"NASA-1.3"
] | null | null | null | generate_xfoil/naca4.py | nasa/airfoil-learning | a76dabc0474485d1e573471e70ec4826aeae0517 | [
"NASA-1.3"
] | null | null | null | generate_xfoil/naca4.py | nasa/airfoil-learning | a76dabc0474485d1e573471e70ec4826aeae0517 | [
"NASA-1.3"
] | null | null | null | """
Python 2 and 3 code to generate 4 and 5 digit NACA profiles
The NACA airfoils are airfoil shapes for aircraft wings developed
by the National Advisory Committee for Aeronautics (NACA).
The shape of the NACA airfoils is described using a series of
digits following the word "NACA". The parameters in the numerical
code can be entered into equations to precisely generate the
cross-section of the airfoil and calculate its properties.
https://en.wikipedia.org/wiki/NACA_airfoil
Pots of the Matlab code available here:
http://www.mathworks.com/matlabcentral/fileexchange/19915-naca-4-digit-airfoil-generator
http://www.mathworks.com/matlabcentral/fileexchange/23241-naca-5-digit-airfoil-generator
Copyright (C) 2011 by Dirk Gorissen <dgorissen@gmail.com>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from math import cos, sin, tan
from math import atan
from math import pi
from math import pow
from math import sqrt
def linspace(start,stop,np):
"""
Emulate Matlab linspace
"""
return [start+(stop-start)*i/(np-1) for i in range(np)]
def interpolate(xa,ya,queryPoints):
"""
A cubic spline interpolation on a given set of points (x,y)
Recalculates everything on every call which is far from efficient but does the job for now
should eventually be replaced by an external helper class
"""
# PreCompute() from Paint Mono which in turn adapted:
# NUMERICAL RECIPES IN C: THE ART OF SCIENTIFIC COMPUTING
# ISBN 0-521-43108-5, page 113, section 3.3.
# http://paint-mono.googlecode.com/svn/trunk/src/PdnLib/SplineInterpolator.cs
#number of points
n = len(xa)
u, y2 = [0]*n, [0]*n
for i in range(1,n-1):
# This is the decomposition loop of the tridiagonal algorithm.
# y2 and u are used for temporary storage of the decomposed factors.
wx = xa[i + 1] - xa[i - 1]
sig = (xa[i] - xa[i - 1]) / wx
p = sig * y2[i - 1] + 2.0
y2[i] = (sig - 1.0) / p
ddydx = (ya[i + 1] - ya[i]) / (xa[i + 1] - xa[i]) - (ya[i] - ya[i - 1]) / (xa[i] - xa[i - 1])
u[i] = (6.0 * ddydx / wx - sig * u[i - 1]) / p
y2[n - 1] = 0
# This is the backsubstitution loop of the tridiagonal algorithm
#((int i = n - 2; i >= 0; --i):
for i in range(n-2,-1,-1):
y2[i] = y2[i] * y2[i + 1] + u[i]
# interpolate() adapted from Paint Mono which in turn adapted:
# NUMERICAL RECIPES IN C: THE ART OF SCIENTIFIC COMPUTING
# ISBN 0-521-43108-5, page 113, section 3.3.
# http://paint-mono.googlecode.com/svn/trunk/src/PdnLib/SplineInterpolator.cs
results = [0]*n
#loop over all query points
for i in range(len(queryPoints)):
# bisection. This is optimal if sequential calls to this
# routine are at random values of x. If sequential calls
# are in order, and closely spaced, one would do better
# to store previous values of klo and khi and test if
klo = 0
khi = n - 1
while (khi - klo > 1):
k = (khi + klo) >> 1
if (xa[k] > queryPoints[i]):
khi = k
else:
klo = k
h = xa[khi] - xa[klo]
a = (xa[khi] - queryPoints[i]) / h
b = (queryPoints[i] - xa[klo]) / h
# Cubic spline polynomial is now evaluated.
results[i] = a * ya[klo] + b * ya[khi] + ((a * a * a - a) * y2[klo] + (b * b * b - b) * y2[khi]) * (h * h) / 6.0
return results
def naca4(number, n, finite_TE = False, half_cosine_spacing = False):
"""
Returns 2*n+1 points in [0 1] for the given 4 digit NACA number string
"""
m = float(number[0])/100.0
p = float(number[1])/10.0
t = float(number[2:])/100.0
a0 = +0.2969
a1 = -0.1260
a2 = -0.3516
a3 = +0.2843
if finite_TE:
a4 = -0.1015 # For finite thick TE
else:
a4 = -0.1036 # For zero thick TE
if half_cosine_spacing:
beta = linspace(0.0,pi,n+1)
x = [(0.5*(1.0-cos(xx))) for xx in beta] # Half cosine based spacing
else:
x = linspace(0.0,1.0,n+1)
yt = [5*t*(a0*sqrt(xx)+a1*xx+a2*pow(xx,2)+a3*pow(xx,3)+a4*pow(xx,4)) for xx in x]
xc1 = [xx for xx in x if xx <= p]
xc2 = [xx for xx in x if xx > p]
if p == 0:
xu = x
yu = yt
xl = x
yl = [-xx for xx in yt]
xc = xc1 + xc2
zc = [0]*len(xc)
else:
yc1 = [m/pow(p,2)*xx*(2*p-xx) for xx in xc1]
yc2 = [m/pow(1-p,2)*(1-2*p+xx)*(1-xx) for xx in xc2]
zc = yc1 + yc2
dyc1_dx = [m/pow(p,2)*(2*p-2*xx) for xx in xc1]
dyc2_dx = [m/pow(1-p,2)*(2*p-2*xx) for xx in xc2]
dyc_dx = dyc1_dx + dyc2_dx
theta = [atan(xx) for xx in dyc_dx]
xu = [xx - yy * sin(zz) for xx,yy,zz in zip(x,yt,theta)]
yu = [xx + yy * cos(zz) for xx,yy,zz in zip(zc,yt,theta)]
xl = [xx + yy * sin(zz) for xx,yy,zz in zip(x,yt,theta)]
yl = [xx - yy * cos(zz) for xx,yy,zz in zip(zc,yt,theta)]
X = xu[::-1] + xl[1:]
Z = yu[::-1] + yl[1:]
return X,Z
def naca5(number, n, finite_TE = False, half_cosine_spacing = False):
"""
Returns 2*n+1 points in [0 1] for the given 5 digit NACA number string
"""
naca1 = int(number[0])
naca23 = int(number[1:3])
naca45 = int(number[3:])
cld = naca1*(3.0/2.0)/10.0
p = 0.5*naca23/100.0
t = naca45/100.0
a0 = +0.2969
a1 = -0.1260
a2 = -0.3516
a3 = +0.2843
if finite_TE:
a4 = -0.1015 # For finite thickness trailing edge
else:
a4 = -0.1036 # For zero thickness trailing edge
if half_cosine_spacing:
beta = linspace(0.0,pi,n+1)
x = [(0.5*(1.0-cos(x))) for x in beta] # Half cosine based spacing
else:
x = linspace(0.0,1.0,n+1)
yt = [5*t*(a0*sqrt(xx)+a1*xx+a2*pow(xx,2)+a3*pow(xx,3)+a4*pow(xx,4)) for xx in x]
P = [0.05,0.1,0.15,0.2,0.25]
M = [0.0580,0.1260,0.2025,0.2900,0.3910]
K = [361.4,51.64,15.957,6.643,3.230]
m = interpolate(P,M,[p])[0]
k1 = interpolate(M,K,[m])[0]
xc1 = [xx for xx in x if xx <= p]
xc2 = [xx for xx in x if xx > p]
xc = xc1 + xc2
if p == 0:
xu = x
yu = yt
xl = x
yl = [-x for x in yt]
zc = [0]*len(xc)
else:
yc1 = [k1/6.0*(pow(xx,3)-3*m*pow(xx,2)+ pow(m,2)*(3-m)*xx) for xx in xc1]
yc2 = [k1/6.0*pow(m,3)*(1-xx) for xx in xc2]
zc = [cld/0.3 * xx for xx in yc1 + yc2]
dyc1_dx = [cld/0.3*(1.0/6.0)*k1*(3*pow(xx,2)-6*m*xx+pow(m,2)*(3-m)) for xx in xc1]
dyc2_dx = [cld/0.3*(1.0/6.0)*k1*pow(m,3)]*len(xc2)
dyc_dx = dyc1_dx + dyc2_dx
theta = [atan(xx) for xx in dyc_dx]
xu = [xx - yy * sin(zz) for xx,yy,zz in zip(x,yt,theta)]
yu = [xx + yy * cos(zz) for xx,yy,zz in zip(zc,yt,theta)]
xl = [xx + yy * sin(zz) for xx,yy,zz in zip(x,yt,theta)]
yl = [xx - yy * cos(zz) for xx,yy,zz in zip(zc,yt,theta)]
X = xu[::-1] + xl[1:]
Z = yu[::-1] + yl[1:]
return X,Z
def naca(number, n, finite_TE = False, half_cosine_spacing = False):
if len(number)==4:
return naca4(number, n, finite_TE, half_cosine_spacing)
elif len(number)==5:
return naca5(number, n, finite_TE, half_cosine_spacing)
else:
raise Exception
class Display(object):
def __init__(self):
import matplotlib.pyplot as plt
self.plt = plt
self.h = []
self.label = []
self.fig, self.ax = self.plt.subplots()
self.plt.axis('equal')
self.plt.xlabel('x')
self.plt.ylabel('y')
self.ax.grid(True)
def plot(self, X, Y,label=''):
h, = self.plt.plot(X, Y, '-', linewidth = 1)
self.h.append(h)
self.label.append(label)
def show(self):
self.plt.axis((-0.1,1.1)+self.plt.axis()[2:])
self.ax.legend(self.h, self.label)
self.plt.show()
def demo(profNaca = ['0009', '2414', '6409'], nPoints = 240, finite_TE = False, half_cosine_spacing = False):
#profNaca = ['0009', '0012', '2414', '2415', '6409' , '0006', '0008', '0010', '0012', '0015']
d = Display()
for i,p in enumerate(profNaca):
X,Y = naca(p, nPoints, finite_TE, half_cosine_spacing)
d.plot(X, Y, p)
d.show()
| 31.972414 | 120 | 0.586928 |
from math import cos, sin, tan
from math import atan
from math import pi
from math import pow
from math import sqrt
def linspace(start,stop,np):
return [start+(stop-start)*i/(np-1) for i in range(np)]
def interpolate(xa,ya,queryPoints):
n = len(xa)
u, y2 = [0]*n, [0]*n
for i in range(1,n-1):
wx = xa[i + 1] - xa[i - 1]
sig = (xa[i] - xa[i - 1]) / wx
p = sig * y2[i - 1] + 2.0
y2[i] = (sig - 1.0) / p
ddydx = (ya[i + 1] - ya[i]) / (xa[i + 1] - xa[i]) - (ya[i] - ya[i - 1]) / (xa[i] - xa[i - 1])
u[i] = (6.0 * ddydx / wx - sig * u[i - 1]) / p
y2[n - 1] = 0
for i in range(n-2,-1,-1):
y2[i] = y2[i] * y2[i + 1] + u[i]
results = [0]*n
for i in range(len(queryPoints)):
klo = 0
khi = n - 1
while (khi - klo > 1):
k = (khi + klo) >> 1
if (xa[k] > queryPoints[i]):
khi = k
else:
klo = k
h = xa[khi] - xa[klo]
a = (xa[khi] - queryPoints[i]) / h
b = (queryPoints[i] - xa[klo]) / h
results[i] = a * ya[klo] + b * ya[khi] + ((a * a * a - a) * y2[klo] + (b * b * b - b) * y2[khi]) * (h * h) / 6.0
return results
def naca4(number, n, finite_TE = False, half_cosine_spacing = False):
m = float(number[0])/100.0
p = float(number[1])/10.0
t = float(number[2:])/100.0
a0 = +0.2969
a1 = -0.1260
a2 = -0.3516
a3 = +0.2843
if finite_TE:
a4 = -0.1015
else:
a4 = -0.1036
if half_cosine_spacing:
beta = linspace(0.0,pi,n+1)
x = [(0.5*(1.0-cos(xx))) for xx in beta]
else:
x = linspace(0.0,1.0,n+1)
yt = [5*t*(a0*sqrt(xx)+a1*xx+a2*pow(xx,2)+a3*pow(xx,3)+a4*pow(xx,4)) for xx in x]
xc1 = [xx for xx in x if xx <= p]
xc2 = [xx for xx in x if xx > p]
if p == 0:
xu = x
yu = yt
xl = x
yl = [-xx for xx in yt]
xc = xc1 + xc2
zc = [0]*len(xc)
else:
yc1 = [m/pow(p,2)*xx*(2*p-xx) for xx in xc1]
yc2 = [m/pow(1-p,2)*(1-2*p+xx)*(1-xx) for xx in xc2]
zc = yc1 + yc2
dyc1_dx = [m/pow(p,2)*(2*p-2*xx) for xx in xc1]
dyc2_dx = [m/pow(1-p,2)*(2*p-2*xx) for xx in xc2]
dyc_dx = dyc1_dx + dyc2_dx
theta = [atan(xx) for xx in dyc_dx]
xu = [xx - yy * sin(zz) for xx,yy,zz in zip(x,yt,theta)]
yu = [xx + yy * cos(zz) for xx,yy,zz in zip(zc,yt,theta)]
xl = [xx + yy * sin(zz) for xx,yy,zz in zip(x,yt,theta)]
yl = [xx - yy * cos(zz) for xx,yy,zz in zip(zc,yt,theta)]
X = xu[::-1] + xl[1:]
Z = yu[::-1] + yl[1:]
return X,Z
def naca5(number, n, finite_TE = False, half_cosine_spacing = False):
naca1 = int(number[0])
naca23 = int(number[1:3])
naca45 = int(number[3:])
cld = naca1*(3.0/2.0)/10.0
p = 0.5*naca23/100.0
t = naca45/100.0
a0 = +0.2969
a1 = -0.1260
a2 = -0.3516
a3 = +0.2843
if finite_TE:
a4 = -0.1015
else:
a4 = -0.1036
if half_cosine_spacing:
beta = linspace(0.0,pi,n+1)
x = [(0.5*(1.0-cos(x))) for x in beta]
else:
x = linspace(0.0,1.0,n+1)
yt = [5*t*(a0*sqrt(xx)+a1*xx+a2*pow(xx,2)+a3*pow(xx,3)+a4*pow(xx,4)) for xx in x]
P = [0.05,0.1,0.15,0.2,0.25]
M = [0.0580,0.1260,0.2025,0.2900,0.3910]
K = [361.4,51.64,15.957,6.643,3.230]
m = interpolate(P,M,[p])[0]
k1 = interpolate(M,K,[m])[0]
xc1 = [xx for xx in x if xx <= p]
xc2 = [xx for xx in x if xx > p]
xc = xc1 + xc2
if p == 0:
xu = x
yu = yt
xl = x
yl = [-x for x in yt]
zc = [0]*len(xc)
else:
yc1 = [k1/6.0*(pow(xx,3)-3*m*pow(xx,2)+ pow(m,2)*(3-m)*xx) for xx in xc1]
yc2 = [k1/6.0*pow(m,3)*(1-xx) for xx in xc2]
zc = [cld/0.3 * xx for xx in yc1 + yc2]
dyc1_dx = [cld/0.3*(1.0/6.0)*k1*(3*pow(xx,2)-6*m*xx+pow(m,2)*(3-m)) for xx in xc1]
dyc2_dx = [cld/0.3*(1.0/6.0)*k1*pow(m,3)]*len(xc2)
dyc_dx = dyc1_dx + dyc2_dx
theta = [atan(xx) for xx in dyc_dx]
xu = [xx - yy * sin(zz) for xx,yy,zz in zip(x,yt,theta)]
yu = [xx + yy * cos(zz) for xx,yy,zz in zip(zc,yt,theta)]
xl = [xx + yy * sin(zz) for xx,yy,zz in zip(x,yt,theta)]
yl = [xx - yy * cos(zz) for xx,yy,zz in zip(zc,yt,theta)]
X = xu[::-1] + xl[1:]
Z = yu[::-1] + yl[1:]
return X,Z
def naca(number, n, finite_TE = False, half_cosine_spacing = False):
if len(number)==4:
return naca4(number, n, finite_TE, half_cosine_spacing)
elif len(number)==5:
return naca5(number, n, finite_TE, half_cosine_spacing)
else:
raise Exception
class Display(object):
def __init__(self):
import matplotlib.pyplot as plt
self.plt = plt
self.h = []
self.label = []
self.fig, self.ax = self.plt.subplots()
self.plt.axis('equal')
self.plt.xlabel('x')
self.plt.ylabel('y')
self.ax.grid(True)
def plot(self, X, Y,label=''):
h, = self.plt.plot(X, Y, '-', linewidth = 1)
self.h.append(h)
self.label.append(label)
def show(self):
self.plt.axis((-0.1,1.1)+self.plt.axis()[2:])
self.ax.legend(self.h, self.label)
self.plt.show()
def demo(profNaca = ['0009', '2414', '6409'], nPoints = 240, finite_TE = False, half_cosine_spacing = False):
d = Display()
for i,p in enumerate(profNaca):
X,Y = naca(p, nPoints, finite_TE, half_cosine_spacing)
d.plot(X, Y, p)
d.show()
| true | true |
f71140f71757f91e0819a2fc215c3a2331a74823 | 2,033 | py | Python | Experimental setup/Window size test/data6.py | alancsouza/chip_clas | e6df8713ae7dd70a5719af83b3b6cb5686f87e29 | [
"MIT"
] | null | null | null | Experimental setup/Window size test/data6.py | alancsouza/chip_clas | e6df8713ae7dd70a5719af83b3b6cb5686f87e29 | [
"MIT"
] | null | null | null | Experimental setup/Window size test/data6.py | alancsouza/chip_clas | e6df8713ae7dd70a5719af83b3b6cb5686f87e29 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
data6 = Breast cancer
"""
from chip_clas_new import chip_clas_new
import statistics
from functions import remove_noise
from sklearn.model_selection import train_test_split, KFold
from sklearn.preprocessing import MinMaxScaler
import numpy as np
import pandas as pd
data_name = "Breast cancer"
print(data_name)
url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/breast-cancer-wisconsin/breast-cancer-wisconsin.data'
data1 = pd.read_csv(url, sep=',', header=None, skiprows=1)
data = data1.iloc[:,1:].copy() # the first is the id
# converting object data into category dtype
data.iloc[:,5] = data.iloc[:,5].astype('category')
# encoding labels
data.iloc[:,5] = data.iloc[:,5].cat.codes
X = data.iloc[:,:-1]
min_max_scaler = MinMaxScaler(feature_range=(-1, 1)) # Normalizing data between -1 and 1
X = pd.DataFrame(min_max_scaler.fit_transform(X))
y = data.iloc[:,-1].copy() # Class: (2 for benign, 4 for malignant cancer)
y[y == 2] = 1
y[y == 4] = -1
# Filtering data:
X_new, y_new = remove_noise(X, y)
X_train, X_test, y_train, y_test = train_test_split(X_new, y_new, test_size=0.2, random_state=42)
f = open("results_window_size.txt", "a+")
f.write("\n\nDatabase: %s \n" % data_name)
f.write("Size before filter: %d \n" % X.shape[0])
f.write("Dimension: %d \n" % X.shape[1])
f.write("Size after filter: %d \n" % X_new.shape[0])
f.write("Train Size: %d \n" % X_train.shape[0])
window_size = [50, 30, 20, 10, 5, 1]
for split in window_size:
y_hat, y_test, result, runtime, final_split_size, arestas_suporte_size = chip_clas_new(X_train, X_test, y_train, y_test, method = "parallel", split_size = split)
f.write("\nSplit: %d \n" % split)
f.write("AUC: %f \n" % result)
f.write("Runtime: %d \n" % runtime)
f.write("Final_split_size: %d \n" % final_split_size)
f.write("arestas_suporte_size: %d \n" % arestas_suporte_size)
f.write("#######################################################################")
f.close() | 32.269841 | 166 | 0.666503 |
from chip_clas_new import chip_clas_new
import statistics
from functions import remove_noise
from sklearn.model_selection import train_test_split, KFold
from sklearn.preprocessing import MinMaxScaler
import numpy as np
import pandas as pd
data_name = "Breast cancer"
print(data_name)
url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/breast-cancer-wisconsin/breast-cancer-wisconsin.data'
data1 = pd.read_csv(url, sep=',', header=None, skiprows=1)
data = data1.iloc[:,1:].copy()
data.iloc[:,5] = data.iloc[:,5].astype('category')
data.iloc[:,5] = data.iloc[:,5].cat.codes
X = data.iloc[:,:-1]
min_max_scaler = MinMaxScaler(feature_range=(-1, 1))
X = pd.DataFrame(min_max_scaler.fit_transform(X))
y = data.iloc[:,-1].copy()
y[y == 2] = 1
y[y == 4] = -1
X_new, y_new = remove_noise(X, y)
X_train, X_test, y_train, y_test = train_test_split(X_new, y_new, test_size=0.2, random_state=42)
f = open("results_window_size.txt", "a+")
f.write("\n\nDatabase: %s \n" % data_name)
f.write("Size before filter: %d \n" % X.shape[0])
f.write("Dimension: %d \n" % X.shape[1])
f.write("Size after filter: %d \n" % X_new.shape[0])
f.write("Train Size: %d \n" % X_train.shape[0])
window_size = [50, 30, 20, 10, 5, 1]
for split in window_size:
y_hat, y_test, result, runtime, final_split_size, arestas_suporte_size = chip_clas_new(X_train, X_test, y_train, y_test, method = "parallel", split_size = split)
f.write("\nSplit: %d \n" % split)
f.write("AUC: %f \n" % result)
f.write("Runtime: %d \n" % runtime)
f.write("Final_split_size: %d \n" % final_split_size)
f.write("arestas_suporte_size: %d \n" % arestas_suporte_size)
f.write("#######################################################################")
f.close() | true | true |
f7114124785a5e85f61ee49af63f4110720e5cb2 | 2,254 | py | Python | bufu/bufu.py | indigo13love/bufu | de005ec465b3ae0688aaf1821a64573ca014e56a | [
"Apache-2.0"
] | null | null | null | bufu/bufu.py | indigo13love/bufu | de005ec465b3ae0688aaf1821a64573ca014e56a | [
"Apache-2.0"
] | null | null | null | bufu/bufu.py | indigo13love/bufu | de005ec465b3ae0688aaf1821a64573ca014e56a | [
"Apache-2.0"
] | null | null | null | import fire
import snowflake.connector
import configparser
import secrets
import pathlib
class Bufu():
def connect(self):
cp = configparser.ConfigParser()
path = pathlib.Path('~/.snowsql/config')
cp.read(path.expanduser())
conn = snowflake.connector.connect(
user = cp['connections']['username'],
password = cp['connections']['password'],
account = cp['connections']['accountname'],
database = cp['connections']['database'],
schema = cp['connections']['schema'],
role = cp['connections']['rolename'],
warehouse = cp['connections']['warehouse']
)
return conn
def __init__(self):
self.conn = self.connect()
def show(self, stage=None):
cur = self.conn.cursor(snowflake.connector.DictCursor)
if stage is None:
try:
cur.execute('SHOW STAGES IN SCHEMA')
rs = cur.fetchmany(100)
for row in rs:
print(row['name'])
finally:
self.conn.close()
else:
try:
cur.execute(f'LIST @{stage}')
rs = cur.fetchmany(100)
for row in rs:
print(row['name'])
finally:
self.conn.close()
def put(self, file, stage=None):
path = pathlib.Path(file)
cur = self.conn.cursor()
if stage is None:
stage = f'bufu_{secrets.token_hex(8)}'
cur.execute(f'CREATE STAGE {stage}')
print(f'Stage "{stage}" created.')
try:
cur.execute(f'put {path.resolve().as_uri()} @{stage}')
print(f'File "{path.resolve()}" was uploaded to stage "{stage}".')
finally:
self.conn.close()
def create(self, stage):
try:
cur = self.conn.cursor()
cur.execute(f'CREATE STAGE {stage}')
print(f'Stage "{stage}" created.')
finally:
self.conn.close()
def main():
try:
b = Bufu()
fire.Fire({
'show': b.show,
'create': b.create,
'put': b.put
})
finally:
b.conn.close()
| 29.657895 | 78 | 0.5 | import fire
import snowflake.connector
import configparser
import secrets
import pathlib
class Bufu():
def connect(self):
cp = configparser.ConfigParser()
path = pathlib.Path('~/.snowsql/config')
cp.read(path.expanduser())
conn = snowflake.connector.connect(
user = cp['connections']['username'],
password = cp['connections']['password'],
account = cp['connections']['accountname'],
database = cp['connections']['database'],
schema = cp['connections']['schema'],
role = cp['connections']['rolename'],
warehouse = cp['connections']['warehouse']
)
return conn
def __init__(self):
self.conn = self.connect()
def show(self, stage=None):
cur = self.conn.cursor(snowflake.connector.DictCursor)
if stage is None:
try:
cur.execute('SHOW STAGES IN SCHEMA')
rs = cur.fetchmany(100)
for row in rs:
print(row['name'])
finally:
self.conn.close()
else:
try:
cur.execute(f'LIST @{stage}')
rs = cur.fetchmany(100)
for row in rs:
print(row['name'])
finally:
self.conn.close()
def put(self, file, stage=None):
path = pathlib.Path(file)
cur = self.conn.cursor()
if stage is None:
stage = f'bufu_{secrets.token_hex(8)}'
cur.execute(f'CREATE STAGE {stage}')
print(f'Stage "{stage}" created.')
try:
cur.execute(f'put {path.resolve().as_uri()} @{stage}')
print(f'File "{path.resolve()}" was uploaded to stage "{stage}".')
finally:
self.conn.close()
def create(self, stage):
try:
cur = self.conn.cursor()
cur.execute(f'CREATE STAGE {stage}')
print(f'Stage "{stage}" created.')
finally:
self.conn.close()
def main():
try:
b = Bufu()
fire.Fire({
'show': b.show,
'create': b.create,
'put': b.put
})
finally:
b.conn.close()
| true | true |
f71142d1bd2737e13ea6097fca600ac378ba836b | 16,033 | py | Python | sfa/data_process/transformation.py | lhcezx/Deteciton_3D | e98b9bb0dd96dfa112e196ec93129caf1ffef39e | [
"MIT"
] | null | null | null | sfa/data_process/transformation.py | lhcezx/Deteciton_3D | e98b9bb0dd96dfa112e196ec93129caf1ffef39e | [
"MIT"
] | null | null | null | sfa/data_process/transformation.py | lhcezx/Deteciton_3D | e98b9bb0dd96dfa112e196ec93129caf1ffef39e | [
"MIT"
] | null | null | null | import os
import sys
import math
import numpy as np
import torch
src_dir = os.path.dirname(os.path.realpath(__file__))
while not src_dir.endswith("sfa"):
src_dir = os.path.dirname(src_dir)
if src_dir not in sys.path:
sys.path.append(src_dir)
from config import kitti_config as cnf
def angle_in_limit(angle):
# To limit the angle in -pi/2 - pi/2
limit_degree = 5
while angle >= np.pi / 2:
angle -= np.pi
while angle < -np.pi / 2:
angle += np.pi
if abs(angle + np.pi / 2) < limit_degree / 180 * np.pi:
angle = np.pi / 2
return angle
# 相机坐标系转雷达坐标系
def camera_to_lidar(x, y, z, V2C=None, R0=None, P2=None):
p = np.array([x, y, z, 1]) #
if V2C is None or R0 is None:
p = np.matmul(cnf.R0_inv, p)
p = np.matmul(cnf.Tr_velo_to_cam_inv, p)
else:
# 建立坐标变化矩阵
R0_i = np.zeros((4, 4))
R0_i[:3, :3] = R0
R0_i[3, 3] = 1
p = np.matmul(np.linalg.inv(R0_i), p) # np.linalg.inv() 求逆矩阵
p = np.matmul(inverse_rigid_trans(V2C), p)
p = p[0:3]
return tuple(p)
# 雷达坐标系转图像坐标系
def lidar_to_camera(x, y, z, V2C=None, R0=None, P2=None):
p = np.array([x, y, z, 1]) # 先将点(x,y,z)变为齐次坐标系
if V2C is None or R0 is None:
p = np.matmul(cnf.Tr_velo_to_cam, p) # 将坐标系从雷达坐标坐标系转为相机坐标系
p = np.matmul(cnf.R0, p) # 将Velodyne坐标中的点x投影到编号为0的相机中点进行修正
else:
p = np.matmul(V2C, p)
p = np.matmul(R0, p)
p = p[0:3]
return tuple(p)
def camera_to_lidar_point(points):
# (N, 3) -> (N, 3)
N = points.shape[0]
points = np.hstack([points, np.ones((N, 1))]).T # (N,4) -> (4,N)
points = np.matmul(cnf.R0_inv, points)
points = np.matmul(cnf.Tr_velo_to_cam_inv, points).T # (4, N) -> (N, 4)
points = points[:, 0:3]
return points.reshape(-1, 3)
#
def lidar_to_camera_point(points, V2C=None, R0=None):
# (N, 3) -> (N, 3)
N = points.shape[0]
points = np.hstack([points, np.ones((N, 1))]).T # 在水平方向上拼接一个(N,1)的单位向量并转置
if V2C is None or R0 is None:
points = np.matmul(cnf.Tr_velo_to_cam, points)
points = np.matmul(cnf.R0, points).T
else:
points = np.matmul(V2C, points)
points = np.matmul(R0, points).T
points = points[:, 0:3]
return points.reshape(-1, 3)
# 将相机坐标系下的x,y,z转到雷达坐标系下,同时输出对应的bbox所有信息(x, y, z, h, w, l, rz/y)
def camera_to_lidar_box(boxes, V2C=None, R0=None, P2=None):
# (N, 7) -> (N, 7) x,y,z,h,w,l,r
ret = []
for box in boxes:
x, y, z, h, w, l, ry = box
# 把相机坐标系x,y,z转换为雷达坐标系x,y,z,并通过ry计算出rz
(x, y, z), h, w, l, rz = camera_to_lidar(x, y, z, V2C=V2C, R0=R0, P2=P2), h, w, l, -ry - np.pi / 2
# rz = angle_in_limit(rz)
ret.append([x, y, z, h, w, l, rz])
return np.array(ret).reshape(-1, 7)
# 将雷达坐标系下的x,y,z转到相机坐标系下,同时输出对应的bbox所有信息(x, y, z, h, w, l, ry)
def lidar_to_camera_box(boxes, V2C=None, R0=None, P2=None):
# (N, 7) -> (N, 7) x,y,z,h,w,l,r
# Test模式下读取的prediction结果里面还多一个score
ret = []
for box in boxes:
# x, y, z, h, w, l, rz, score = box
x, y, z, h, w, l, rz = box
# 把雷达坐标系下的x,y,z转换为相机坐标系x,y,z
# (x, y, z), h, w, l, ry, score = lidar_to_camera(x, y, z, V2C=V2C, R0=R0, P2=P2), h, w, l, -rz - np.pi / 2, score
(x, y, z), h, w, l, ry = lidar_to_camera(x, y, z, V2C=V2C, R0=R0, P2=P2), h, w, l, -rz - np.pi / 2
# ry = angle_in_limit(ry)
# ret.append([x, y, z, h, w, l, ry, score])
ret.append([x, y, z, h, w, l, ry])
# return np.array(ret).reshape(-1, 8)
return np.array(ret).reshape(-1, 7)
def center_to_corner_box2d(boxes_center, coordinate='lidar'):
# (N, 5) -> (N, 4, 2)
N = boxes_center.shape[0]
boxes3d_center = np.zeros((N, 7))
boxes3d_center[:, [0, 1, 4, 5, 6]] = boxes_center
boxes3d_corner = center_to_corner_box3d(boxes3d_center, coordinate=coordinate)
return boxes3d_corner[:, 0:4, 0:2]
# 将中心点坐标表示法变成八个角点坐标表示3dbbox
def center_to_corner_box3d(boxes_center, coordinate='lidar'):
# (N, 7) -> (N, 8, 3)
N = boxes_center.shape[0]
ret = np.zeros((N, 8, 3), dtype=np.float32) # 保存每一个样本的3Dbbox的八个角点坐标
if coordinate == 'camera':
boxes_center = camera_to_lidar_box(boxes_center) # 如果是相机坐标系,则需要转变到雷达坐标系下并输出3dbbox的信息
# 样本循环
for i in range(N):
box = boxes_center[i]
translation = box[0:3] # x,y,z
size = box[3:6] # h,w,l
rotation = [0, 0, box[-1]] # [0, 0, rz]
h, w, l = size[0], size[1], size[2]
# 3D bbox的八个点
trackletBox = np.array([ # in velodyne coordinates around zero point and without orientation yet
[-l / 2, -l / 2, l / 2, l / 2, -l / 2, -l / 2, l / 2, l / 2], \
[w / 2, -w / 2, -w / 2, w / 2, w / 2, -w / 2, -w / 2, w / 2], \
[0, 0, 0, 0, h, h, h, h]])
# re-create 3D bounding box in velodyne coordinate system
yaw = rotation[2] # 绕z轴的偏航角
rotMat = np.array([
[np.cos(yaw), -np.sin(yaw), 0.0],
[np.sin(yaw), np.cos(yaw), 0.0],
[0.0, 0.0, 1.0]])
# 根据航向角调整bbox的方向rotation,然后对八个角都加上(x,y,z)中心点坐标,最终获得通过偏航角rz旋转后的3dbbox的八个点坐标
cornerPosInVelo = np.dot(rotMat, trackletBox) + np.tile(translation, (8, 1)).T # 沿着Y轴复制8个同样的向量,沿着X轴保持不变,最后转置。
box3d = cornerPosInVelo.transpose()
ret[i] = box3d
if coordinate == 'camera': # 如果是相机坐标系则需要从雷达坐标系变回相机坐标系
for idx in range(len(ret)):
ret[idx] = lidar_to_camera_point(ret[idx])
return ret
CORNER2CENTER_AVG = True
# 3dbbox的八个角点表示法变成以3dbbox中心点坐标来表示
def corner_to_center_box3d(boxes_corner, coordinate='camera'):
# (N, 8, 3) -> (N, 7) x,y,z,h,w,l,ry/z
if coordinate == 'lidar': # 如果是雷达坐标系则需要先变为相机坐标系
for idx in range(len(boxes_corner)):
boxes_corner[idx] = lidar_to_camera_point(boxes_corner[idx])
ret = []
for roi in boxes_corner:
if CORNER2CENTER_AVG: # average version
roi = np.array(roi) # roi = ()
# 相机坐标系下y轴代表高度
h = abs(np.sum(roi[:4, 1] - roi[4:, 1]) / 4) # 前四个角点的y轴接近0,后四个角点y轴接近h,对他们四个取平均
# 前后相邻的两个角点的欧式距离 w = sqrt(x^2+y^2),对四条边求平均值
# [0, 2]表示x,y坐标
w = np.sum(
np.sqrt(np.sum((roi[0, [0, 2]] - roi[3, [0, 2]]) ** 2)) +
np.sqrt(np.sum((roi[1, [0, 2]] - roi[2, [0, 2]]) ** 2)) +
np.sqrt(np.sum((roi[4, [0, 2]] - roi[7, [0, 2]]) ** 2)) +
np.sqrt(np.sum((roi[5, [0, 2]] - roi[6, [0, 2]]) ** 2))
) / 4
# 左右相邻的两个角点的欧式距离 l = sqrt(x^2+y^2),对四条边求平均值
l = np.sum(
np.sqrt(np.sum((roi[0, [0, 2]] - roi[1, [0, 2]]) ** 2)) +
np.sqrt(np.sum((roi[2, [0, 2]] - roi[3, [0, 2]]) ** 2)) +
np.sqrt(np.sum((roi[4, [0, 2]] - roi[5, [0, 2]]) ** 2)) +
np.sqrt(np.sum((roi[6, [0, 2]] - roi[7, [0, 2]]) ** 2))
) / 4
x = np.sum(roi[:, 0], axis=0) / 8 # 对八个角点的x坐标求平均值
y = np.sum(roi[0:4, 1], axis=0) / 4 # 对四个角点的y坐标求平均值
z = np.sum(roi[:, 2], axis=0) / 8 # 对八个角点的z坐标求平均值
# 对航向角求平均值
ry = np.sum(
math.atan2(roi[2, 0] - roi[1, 0], roi[2, 2] - roi[1, 2]) +
math.atan2(roi[6, 0] - roi[5, 0], roi[6, 2] - roi[5, 2]) +
math.atan2(roi[3, 0] - roi[0, 0], roi[3, 2] - roi[0, 2]) +
math.atan2(roi[7, 0] - roi[4, 0], roi[7, 2] - roi[4, 2]) +
math.atan2(roi[0, 2] - roi[1, 2], roi[1, 0] - roi[0, 0]) +
math.atan2(roi[4, 2] - roi[5, 2], roi[5, 0] - roi[4, 0]) +
math.atan2(roi[3, 2] - roi[2, 2], roi[2, 0] - roi[3, 0]) +
math.atan2(roi[7, 2] - roi[6, 2], roi[6, 0] - roi[7, 0])
) / 8
if w > l:
w, l = l, w
ry = ry - np.pi / 2
elif l > w:
l, w = w, l
ry = ry - np.pi / 2
ret.append([x, y, z, h, w, l, ry])
else: # max version
h = max(abs(roi[:4, 1] - roi[4:, 1])) # 前四个角点的z轴接近0,后四个角点z轴接近h,对他们四个取最大
w = np.max(
np.sqrt(np.sum((roi[0, [0, 2]] - roi[3, [0, 2]]) ** 2)) +
np.sqrt(np.sum((roi[1, [0, 2]] - roi[2, [0, 2]]) ** 2)) +
np.sqrt(np.sum((roi[4, [0, 2]] - roi[7, [0, 2]]) ** 2)) +
np.sqrt(np.sum((roi[5, [0, 2]] - roi[6, [0, 2]]) ** 2))
)
l = np.max(
np.sqrt(np.sum((roi[0, [0, 2]] - roi[1, [0, 2]]) ** 2)) +
np.sqrt(np.sum((roi[2, [0, 2]] - roi[3, [0, 2]]) ** 2)) +
np.sqrt(np.sum((roi[4, [0, 2]] - roi[5, [0, 2]]) ** 2)) +
np.sqrt(np.sum((roi[6, [0, 2]] - roi[7, [0, 2]]) ** 2))
)
x = np.sum(roi[:, 0], axis=0) / 8
y = np.sum(roi[0:4, 1], axis=0) / 4
z = np.sum(roi[:, 2], axis=0) / 8
ry = np.sum(
math.atan2(roi[2, 0] - roi[1, 0], roi[2, 2] - roi[1, 2]) +
math.atan2(roi[6, 0] - roi[5, 0], roi[6, 2] - roi[5, 2]) +
math.atan2(roi[3, 0] - roi[0, 0], roi[3, 2] - roi[0, 2]) +
math.atan2(roi[7, 0] - roi[4, 0], roi[7, 2] - roi[4, 2]) +
math.atan2(roi[0, 2] - roi[1, 2], roi[1, 0] - roi[0, 0]) +
math.atan2(roi[4, 2] - roi[5, 2], roi[5, 0] - roi[4, 0]) +
math.atan2(roi[3, 2] - roi[2, 2], roi[2, 0] - roi[3, 0]) +
math.atan2(roi[7, 2] - roi[6, 2], roi[6, 0] - roi[7, 0])
) / 8
if w > l:
w, l = l, w
ry = angle_in_limit(ry + np.pi / 2)
ret.append([x, y, z, h, w, l, ry])
if coordinate == 'lidar':
ret = camera_to_lidar_box(np.array(ret))
return np.array(ret)
def point_transform(points, tx, ty, tz, rx=0, ry=0, rz=0):
# Input:
# points: (N, 3)
# rx/y/z: in radians
# Output:
# points: (N, 3)
N = points.shape[0]
points = np.hstack([points, np.ones((N, 1))])
# 点云数据平移
mat1 = np.eye(4)
mat1[3, 0:3] = tx, ty, tz
points = np.matmul(points, mat1)
# 点云数据旋转
# 4x4围绕x轴旋转的矩阵
if rx != 0:
mat = np.zeros((4, 4))
mat[0, 0] = 1
mat[3, 3] = 1
mat[1, 1] = np.cos(rx)
mat[1, 2] = -np.sin(rx)
mat[2, 1] = np.sin(rx)
mat[2, 2] = np.cos(rx)
points = np.matmul(points, mat)
# 4x4围绕y轴旋转的矩阵
if ry != 0:
mat = np.zeros((4, 4))
mat[1, 1] = 1
mat[3, 3] = 1
mat[0, 0] = np.cos(ry)
mat[0, 2] = np.sin(ry)
mat[2, 0] = -np.sin(ry)
mat[2, 2] = np.cos(ry)
points = np.matmul(points, mat)
# 4x4围绕z轴旋转的矩阵
if rz != 0:
mat = np.zeros((4, 4))
mat[2, 2] = 1
mat[3, 3] = 1
mat[0, 0] = np.cos(rz)
mat[0, 1] = -np.sin(rz)
mat[1, 0] = np.sin(rz)
mat[1, 1] = np.cos(rz)
points = np.matmul(points, mat)
return points[:, 0:3]
# 返回旋转过后的label标签,如果雷达坐标系下则返回雷达label,反之camera_label
def box_transform(boxes, tx, ty, tz, r=0, coordinate='lidar'):
# Input:
# boxes: (N, 7) x y z h w l rz/y
# Output:
# boxes: (N, 7) x y z h w l rz/y
# 将每个样本的label中心点坐标根据长宽高变为其3dbbox八个角点的坐标(这个过程需要在雷达坐标系下进行),如果input_label是雷达坐标系则返回雷达坐标,如果是camera坐标系则需要把雷达坐标变回camera坐标
boxes_corner = center_to_corner_box3d(boxes, coordinate=coordinate) # (N, 8, 3)
for idx in range(len(boxes_corner)):
if coordinate == 'lidar':
boxes_corner[idx] = point_transform(boxes_corner[idx], tx, ty, tz, rz=r) # 如果是lidar坐标系的话偏向角是沿z轴旋转
else:
boxes_corner[idx] = point_transform(boxes_corner[idx], tx, ty, tz, ry=r) # 如果是camera坐标系的话偏向角是沿y轴旋转
return corner_to_center_box3d(boxes_corner, coordinate=coordinate)
# 刚体的坐标变换
def inverse_rigid_trans(Tr):
''' Inverse a rigid body transform matrix (3x4 as [R|t])
[R'|-R't; 0|1]
'''
inv_Tr = np.zeros_like(Tr) # 3x4
inv_Tr[0:3, 0:3] = np.transpose(Tr[0:3, 0:3])
inv_Tr[0:3, 3] = np.dot(-np.transpose(Tr[0:3, 0:3]), Tr[0:3, 3])
return inv_Tr
# 选择多个方法结合进行数据增强
class Compose(object):
def __init__(self, transforms, p=1.0):
self.transforms = transforms
self.p = p
def __call__(self, lidar, labels):
if np.random.random() <= self.p:
for t in self.transforms:
lidar, labels = t(lidar, labels)
return lidar, labels
# 选择一个方法进行数据增强
class OneOf(object):
def __init__(self, transforms, p=1.0):
self.transforms = transforms
self.p = p
def __call__(self, lidar, labels):
if np.random.random() <= self.p:
choice = np.random.randint(low=0, high=len(self.transforms))
lidar, labels = self.transforms[choice](lidar, labels)
return lidar, labels
class Random_Rotation(object):
def __init__(self, limit_angle=np.pi / 4, p=0.5):
self.limit_angle = limit_angle
self.p = p
def __call__(self, lidar, labels):
"""
:param labels: # (N', 7) x, y, z, h, w, l, r
:return:
"""
if np.random.random() <= self.p:
# 随机取一个角度在-limit_angle到limit_angle之间
angle = np.random.uniform(-self.limit_angle, self.limit_angle)
# 点云数据绕Z轴旋转
lidar[:, 0:3] = point_transform(lidar[:, 0:3], 0, 0, 0, rz=angle)
# 把数据对应的label也旋转
labels = box_transform(labels, 0, 0, 0, r=angle, coordinate='lidar')
return lidar, labels
class Random_Scaling(object):
def __init__(self, scaling_range=(0.95, 1.05), p=0.5):
self.scaling_range = scaling_range
self.p = p
def __call__(self, lidar, labels):
"""
:param labels: # (N', 7) x, y, z, h, w, l, r
:return:
"""
if np.random.random() <= self.p:
# 数据缩放因子
factor = np.random.uniform(self.scaling_range[0], self.scaling_range[0])
# lidar和label数据缩放
lidar[:, 0:3] = lidar[:, 0:3] * factor
labels[:, 0:6] = labels[:, 0:6] * factor
return lidar, labels
class Cutout(object):
"""Randomly mask out one or more patches from an image.
Args:
n_holes (int): Number of patches to cut out of each image.
length (int): The length (in pixels) of each square patch.
Refer from: https://github.com/uoguelph-mlrg/Cutout/blob/master/util/cutout.py
"""
def __init__(self, n_holes, ratio, fill_value=0., p=1.0):
self.n_holes = n_holes
self.ratio = ratio
assert 0. <= fill_value <= 1., "the fill value is in a range of 0 to 1"
self.fill_value = fill_value
self.p = p
def __call__(self, img, targets):
"""
Args:
img (Tensor): Tensor image of size (C, H, W).
Returns:
Tensor: Image with n_holes of dimension length x length cut out of it.
"""
if np.random.random() <= self.p:
h = img.size(1)
w = img.size(2)
h_cutout = int(self.ratio * h)
w_cutout = int(self.ratio * w)
for n in range(self.n_holes):
y = np.random.randint(h)
x = np.random.randint(w)
y1 = np.clip(y - h_cutout // 2, 0, h)
y2 = np.clip(y + h_cutout // 2, 0, h)
x1 = np.clip(x - w_cutout // 2, 0, w)
x2 = np.clip(x + w_cutout // 2, 0, w)
img[:, y1: y2, x1: x2] = self.fill_value # Zero out the selected area
# Remove targets that are in the selected area
keep_target = []
for target_idx, target in enumerate(targets):
_, _, target_x, target_y, target_w, target_l, _, _ = target
if (x1 <= target_x * w <= x2) and (y1 <= target_y * h <= y2):
continue
keep_target.append(target_idx)
targets = targets[keep_target]
return img, targets
| 36.11036 | 122 | 0.51132 | import os
import sys
import math
import numpy as np
import torch
src_dir = os.path.dirname(os.path.realpath(__file__))
while not src_dir.endswith("sfa"):
src_dir = os.path.dirname(src_dir)
if src_dir not in sys.path:
sys.path.append(src_dir)
from config import kitti_config as cnf
def angle_in_limit(angle):
limit_degree = 5
while angle >= np.pi / 2:
angle -= np.pi
while angle < -np.pi / 2:
angle += np.pi
if abs(angle + np.pi / 2) < limit_degree / 180 * np.pi:
angle = np.pi / 2
return angle
def camera_to_lidar(x, y, z, V2C=None, R0=None, P2=None):
p = np.array([x, y, z, 1])
if V2C is None or R0 is None:
p = np.matmul(cnf.R0_inv, p)
p = np.matmul(cnf.Tr_velo_to_cam_inv, p)
else:
R0_i = np.zeros((4, 4))
R0_i[:3, :3] = R0
R0_i[3, 3] = 1
p = np.matmul(np.linalg.inv(R0_i), p)
p = np.matmul(inverse_rigid_trans(V2C), p)
p = p[0:3]
return tuple(p)
def lidar_to_camera(x, y, z, V2C=None, R0=None, P2=None):
p = np.array([x, y, z, 1])
if V2C is None or R0 is None:
p = np.matmul(cnf.Tr_velo_to_cam, p)
p = np.matmul(cnf.R0, p)
else:
p = np.matmul(V2C, p)
p = np.matmul(R0, p)
p = p[0:3]
return tuple(p)
def camera_to_lidar_point(points):
N = points.shape[0]
points = np.hstack([points, np.ones((N, 1))]).T
points = np.matmul(cnf.R0_inv, points)
points = np.matmul(cnf.Tr_velo_to_cam_inv, points).T
points = points[:, 0:3]
return points.reshape(-1, 3)
def lidar_to_camera_point(points, V2C=None, R0=None):
N = points.shape[0]
points = np.hstack([points, np.ones((N, 1))]).T
if V2C is None or R0 is None:
points = np.matmul(cnf.Tr_velo_to_cam, points)
points = np.matmul(cnf.R0, points).T
else:
points = np.matmul(V2C, points)
points = np.matmul(R0, points).T
points = points[:, 0:3]
return points.reshape(-1, 3)
def camera_to_lidar_box(boxes, V2C=None, R0=None, P2=None):
ret = []
for box in boxes:
x, y, z, h, w, l, ry = box
(x, y, z), h, w, l, rz = camera_to_lidar(x, y, z, V2C=V2C, R0=R0, P2=P2), h, w, l, -ry - np.pi / 2
ret.append([x, y, z, h, w, l, rz])
return np.array(ret).reshape(-1, 7)
def lidar_to_camera_box(boxes, V2C=None, R0=None, P2=None):
ret = []
for box in boxes:
x, y, z, h, w, l, rz = box
(x, y, z), h, w, l, ry = lidar_to_camera(x, y, z, V2C=V2C, R0=R0, P2=P2), h, w, l, -rz - np.pi / 2
ret.append([x, y, z, h, w, l, ry])
return np.array(ret).reshape(-1, 7)
def center_to_corner_box2d(boxes_center, coordinate='lidar'):
N = boxes_center.shape[0]
boxes3d_center = np.zeros((N, 7))
boxes3d_center[:, [0, 1, 4, 5, 6]] = boxes_center
boxes3d_corner = center_to_corner_box3d(boxes3d_center, coordinate=coordinate)
return boxes3d_corner[:, 0:4, 0:2]
def center_to_corner_box3d(boxes_center, coordinate='lidar'):
N = boxes_center.shape[0]
ret = np.zeros((N, 8, 3), dtype=np.float32)
if coordinate == 'camera':
boxes_center = camera_to_lidar_box(boxes_center)
for i in range(N):
box = boxes_center[i]
translation = box[0:3]
size = box[3:6]
rotation = [0, 0, box[-1]]
h, w, l = size[0], size[1], size[2]
trackletBox = np.array([
[-l / 2, -l / 2, l / 2, l / 2, -l / 2, -l / 2, l / 2, l / 2], \
[w / 2, -w / 2, -w / 2, w / 2, w / 2, -w / 2, -w / 2, w / 2], \
[0, 0, 0, 0, h, h, h, h]])
yaw = rotation[2]
rotMat = np.array([
[np.cos(yaw), -np.sin(yaw), 0.0],
[np.sin(yaw), np.cos(yaw), 0.0],
[0.0, 0.0, 1.0]])
cornerPosInVelo = np.dot(rotMat, trackletBox) + np.tile(translation, (8, 1)).T
box3d = cornerPosInVelo.transpose()
ret[i] = box3d
if coordinate == 'camera':
for idx in range(len(ret)):
ret[idx] = lidar_to_camera_point(ret[idx])
return ret
CORNER2CENTER_AVG = True
def corner_to_center_box3d(boxes_corner, coordinate='camera'):
if coordinate == 'lidar':
for idx in range(len(boxes_corner)):
boxes_corner[idx] = lidar_to_camera_point(boxes_corner[idx])
ret = []
for roi in boxes_corner:
if CORNER2CENTER_AVG:
roi = np.array(roi)
h = abs(np.sum(roi[:4, 1] - roi[4:, 1]) / 4)
w = np.sum(
np.sqrt(np.sum((roi[0, [0, 2]] - roi[3, [0, 2]]) ** 2)) +
np.sqrt(np.sum((roi[1, [0, 2]] - roi[2, [0, 2]]) ** 2)) +
np.sqrt(np.sum((roi[4, [0, 2]] - roi[7, [0, 2]]) ** 2)) +
np.sqrt(np.sum((roi[5, [0, 2]] - roi[6, [0, 2]]) ** 2))
) / 4
l = np.sum(
np.sqrt(np.sum((roi[0, [0, 2]] - roi[1, [0, 2]]) ** 2)) +
np.sqrt(np.sum((roi[2, [0, 2]] - roi[3, [0, 2]]) ** 2)) +
np.sqrt(np.sum((roi[4, [0, 2]] - roi[5, [0, 2]]) ** 2)) +
np.sqrt(np.sum((roi[6, [0, 2]] - roi[7, [0, 2]]) ** 2))
) / 4
x = np.sum(roi[:, 0], axis=0) / 8
y = np.sum(roi[0:4, 1], axis=0) / 4
z = np.sum(roi[:, 2], axis=0) / 8
ry = np.sum(
math.atan2(roi[2, 0] - roi[1, 0], roi[2, 2] - roi[1, 2]) +
math.atan2(roi[6, 0] - roi[5, 0], roi[6, 2] - roi[5, 2]) +
math.atan2(roi[3, 0] - roi[0, 0], roi[3, 2] - roi[0, 2]) +
math.atan2(roi[7, 0] - roi[4, 0], roi[7, 2] - roi[4, 2]) +
math.atan2(roi[0, 2] - roi[1, 2], roi[1, 0] - roi[0, 0]) +
math.atan2(roi[4, 2] - roi[5, 2], roi[5, 0] - roi[4, 0]) +
math.atan2(roi[3, 2] - roi[2, 2], roi[2, 0] - roi[3, 0]) +
math.atan2(roi[7, 2] - roi[6, 2], roi[6, 0] - roi[7, 0])
) / 8
if w > l:
w, l = l, w
ry = ry - np.pi / 2
elif l > w:
l, w = w, l
ry = ry - np.pi / 2
ret.append([x, y, z, h, w, l, ry])
else:
h = max(abs(roi[:4, 1] - roi[4:, 1]))
w = np.max(
np.sqrt(np.sum((roi[0, [0, 2]] - roi[3, [0, 2]]) ** 2)) +
np.sqrt(np.sum((roi[1, [0, 2]] - roi[2, [0, 2]]) ** 2)) +
np.sqrt(np.sum((roi[4, [0, 2]] - roi[7, [0, 2]]) ** 2)) +
np.sqrt(np.sum((roi[5, [0, 2]] - roi[6, [0, 2]]) ** 2))
)
l = np.max(
np.sqrt(np.sum((roi[0, [0, 2]] - roi[1, [0, 2]]) ** 2)) +
np.sqrt(np.sum((roi[2, [0, 2]] - roi[3, [0, 2]]) ** 2)) +
np.sqrt(np.sum((roi[4, [0, 2]] - roi[5, [0, 2]]) ** 2)) +
np.sqrt(np.sum((roi[6, [0, 2]] - roi[7, [0, 2]]) ** 2))
)
x = np.sum(roi[:, 0], axis=0) / 8
y = np.sum(roi[0:4, 1], axis=0) / 4
z = np.sum(roi[:, 2], axis=0) / 8
ry = np.sum(
math.atan2(roi[2, 0] - roi[1, 0], roi[2, 2] - roi[1, 2]) +
math.atan2(roi[6, 0] - roi[5, 0], roi[6, 2] - roi[5, 2]) +
math.atan2(roi[3, 0] - roi[0, 0], roi[3, 2] - roi[0, 2]) +
math.atan2(roi[7, 0] - roi[4, 0], roi[7, 2] - roi[4, 2]) +
math.atan2(roi[0, 2] - roi[1, 2], roi[1, 0] - roi[0, 0]) +
math.atan2(roi[4, 2] - roi[5, 2], roi[5, 0] - roi[4, 0]) +
math.atan2(roi[3, 2] - roi[2, 2], roi[2, 0] - roi[3, 0]) +
math.atan2(roi[7, 2] - roi[6, 2], roi[6, 0] - roi[7, 0])
) / 8
if w > l:
w, l = l, w
ry = angle_in_limit(ry + np.pi / 2)
ret.append([x, y, z, h, w, l, ry])
if coordinate == 'lidar':
ret = camera_to_lidar_box(np.array(ret))
return np.array(ret)
def point_transform(points, tx, ty, tz, rx=0, ry=0, rz=0):
N = points.shape[0]
points = np.hstack([points, np.ones((N, 1))])
mat1 = np.eye(4)
mat1[3, 0:3] = tx, ty, tz
points = np.matmul(points, mat1)
if rx != 0:
mat = np.zeros((4, 4))
mat[0, 0] = 1
mat[3, 3] = 1
mat[1, 1] = np.cos(rx)
mat[1, 2] = -np.sin(rx)
mat[2, 1] = np.sin(rx)
mat[2, 2] = np.cos(rx)
points = np.matmul(points, mat)
if ry != 0:
mat = np.zeros((4, 4))
mat[1, 1] = 1
mat[3, 3] = 1
mat[0, 0] = np.cos(ry)
mat[0, 2] = np.sin(ry)
mat[2, 0] = -np.sin(ry)
mat[2, 2] = np.cos(ry)
points = np.matmul(points, mat)
if rz != 0:
mat = np.zeros((4, 4))
mat[2, 2] = 1
mat[3, 3] = 1
mat[0, 0] = np.cos(rz)
mat[0, 1] = -np.sin(rz)
mat[1, 0] = np.sin(rz)
mat[1, 1] = np.cos(rz)
points = np.matmul(points, mat)
return points[:, 0:3]
def box_transform(boxes, tx, ty, tz, r=0, coordinate='lidar'):
boxes_corner = center_to_corner_box3d(boxes, coordinate=coordinate)
for idx in range(len(boxes_corner)):
if coordinate == 'lidar':
boxes_corner[idx] = point_transform(boxes_corner[idx], tx, ty, tz, rz=r)
else:
boxes_corner[idx] = point_transform(boxes_corner[idx], tx, ty, tz, ry=r)
return corner_to_center_box3d(boxes_corner, coordinate=coordinate)
def inverse_rigid_trans(Tr):
inv_Tr = np.zeros_like(Tr)
inv_Tr[0:3, 0:3] = np.transpose(Tr[0:3, 0:3])
inv_Tr[0:3, 3] = np.dot(-np.transpose(Tr[0:3, 0:3]), Tr[0:3, 3])
return inv_Tr
class Compose(object):
def __init__(self, transforms, p=1.0):
self.transforms = transforms
self.p = p
def __call__(self, lidar, labels):
if np.random.random() <= self.p:
for t in self.transforms:
lidar, labels = t(lidar, labels)
return lidar, labels
class OneOf(object):
def __init__(self, transforms, p=1.0):
self.transforms = transforms
self.p = p
def __call__(self, lidar, labels):
if np.random.random() <= self.p:
choice = np.random.randint(low=0, high=len(self.transforms))
lidar, labels = self.transforms[choice](lidar, labels)
return lidar, labels
class Random_Rotation(object):
def __init__(self, limit_angle=np.pi / 4, p=0.5):
self.limit_angle = limit_angle
self.p = p
def __call__(self, lidar, labels):
if np.random.random() <= self.p:
angle = np.random.uniform(-self.limit_angle, self.limit_angle)
lidar[:, 0:3] = point_transform(lidar[:, 0:3], 0, 0, 0, rz=angle)
labels = box_transform(labels, 0, 0, 0, r=angle, coordinate='lidar')
return lidar, labels
class Random_Scaling(object):
def __init__(self, scaling_range=(0.95, 1.05), p=0.5):
self.scaling_range = scaling_range
self.p = p
def __call__(self, lidar, labels):
if np.random.random() <= self.p:
factor = np.random.uniform(self.scaling_range[0], self.scaling_range[0])
lidar[:, 0:3] = lidar[:, 0:3] * factor
labels[:, 0:6] = labels[:, 0:6] * factor
return lidar, labels
class Cutout(object):
def __init__(self, n_holes, ratio, fill_value=0., p=1.0):
self.n_holes = n_holes
self.ratio = ratio
assert 0. <= fill_value <= 1., "the fill value is in a range of 0 to 1"
self.fill_value = fill_value
self.p = p
def __call__(self, img, targets):
if np.random.random() <= self.p:
h = img.size(1)
w = img.size(2)
h_cutout = int(self.ratio * h)
w_cutout = int(self.ratio * w)
for n in range(self.n_holes):
y = np.random.randint(h)
x = np.random.randint(w)
y1 = np.clip(y - h_cutout // 2, 0, h)
y2 = np.clip(y + h_cutout // 2, 0, h)
x1 = np.clip(x - w_cutout // 2, 0, w)
x2 = np.clip(x + w_cutout // 2, 0, w)
img[:, y1: y2, x1: x2] = self.fill_value
keep_target = []
for target_idx, target in enumerate(targets):
_, _, target_x, target_y, target_w, target_l, _, _ = target
if (x1 <= target_x * w <= x2) and (y1 <= target_y * h <= y2):
continue
keep_target.append(target_idx)
targets = targets[keep_target]
return img, targets
| true | true |
f71142dc21ca7466db972f45836c427d9d863a33 | 9,539 | py | Python | src/neuro_comma/dataset.py | art-vish/neuro-comma | 148ff7150e92d734d926a576c50bcabf1ae0ec0a | [
"MIT"
] | 1 | 2021-11-12T21:05:33.000Z | 2021-11-12T21:05:33.000Z | src/neuro_comma/dataset.py | art-vish/neuro-comma | 148ff7150e92d734d926a576c50bcabf1ae0ec0a | [
"MIT"
] | null | null | null | src/neuro_comma/dataset.py | art-vish/neuro-comma | 148ff7150e92d734d926a576c50bcabf1ae0ec0a | [
"MIT"
] | null | null | null | from typing import Dict, List, Optional, Tuple, Union
from typing_extensions import TypedDict
import numpy as np
import torch
from torch import Tensor
from tqdm import tqdm
from transformers import PreTrainedTokenizer
from neuro_comma.augmentation import AUGMENTATIONS
from neuro_comma.pretrained import TOKEN_IDX
class BaseDataset(torch.utils.data.Dataset):
def __init__(self,
files: Union[str, List[str]],
tokenizer: PreTrainedTokenizer,
targets: Dict[str, int],
sequence_len: int,
token_style: str,
*args,
**kwargs) -> None:
self.tokenizer = tokenizer
self.targets = targets
self.seq_len = sequence_len
self.token_style = token_style
if isinstance(files, list):
self.data = []
for file in files:
self.data += self._parse_data(file, *args, **kwargs)
else:
self.data = self._parse_data(files, *args, **kwargs)
def _parse_data(self, file_path: str, *args, **kwargs) -> List[List[List[int]]]:
"""Parse file to train data
Args:
file_path (`str`): text file path that contains tokens and punctuations separated by tab in lines
Returns:
list[Batch]: each having sequence_len punctuation_mask is used to ignore special indices like padding and intermediate sub-word token during evaluation
"""
with open(file_path, 'r', encoding='utf-8') as file:
x, y = [], []
for i, line in enumerate(file):
if (line.strip()):
line = line.strip()
token = line.rsplit('\t', 1)
if len(token) == 2:
x.append(token[0])
target = self.targets[token[1]]
y.append(target)
else:
continue
data = self.parse_tokens(x, self.tokenizer, self.seq_len, self.token_style, y, *args, **kwargs)
return data
@classmethod
def parse_tokens(cls,
tokens: Union[List[str], Tuple[str]],
tokenizer: PreTrainedTokenizer,
seq_len: int,
token_style: str,
targets: Optional[List[int]] = None,
*args,
**kwargs) -> List[List[List[int]]]:
"""
Convert tokenized data for model prediction
Args:
tokens (`Union[list[str], tuple[str]]`): splited tokens
tokenizer (`PreTrainedTokenizer`): tokenizer which split tokens to subtokens
seq_len (`int`): sequence length
token_style (`str`): token_style from pretrained.TOKEN_IDX
Returns:
(`list[BatchWithoutTarget]`): list of bathces
```txt
tokens : [token token ##token PAD ]
x : [321 1233 23121 101 ]
y : [tar 0 tar 0 ]
y_mask : [1 0 1 0 ]
attn_mask : [1 1 1 0 ]
```
"""
data_items = []
# loop until end of the entire text
idx = 0
debug = kwargs.get('debug')
if debug:
pbar = tqdm(total=len(tokens))
while idx < len(tokens):
x = [TOKEN_IDX[token_style]['START_SEQ']]
w_id = [-1] # word indexes
y = [0]
y_mask = [1] if targets else [0]
# loop until we have required sequence length
# -1 because we will have a special end of sequence token at the end
while len(x) < seq_len - 1 and idx < len(tokens):
word_pieces = tokenizer.tokenize(tokens[idx])
# if taking these tokens exceeds sequence length we finish
# current sequence with padding
# then start next sequence from this token
if len(word_pieces) + len(x) >= seq_len:
break
for i in range(len(word_pieces) - 1):
x.append(tokenizer.convert_tokens_to_ids(word_pieces[i]))
w_id.append(idx)
y.append(0)
y_mask.append(0)
if len(word_pieces) > 0:
x.append(tokenizer.convert_tokens_to_ids(word_pieces[-1]))
else:
x.append(TOKEN_IDX[token_style]['UNK'])
w_id.append(idx)
if targets:
y.append(targets[idx])
else:
y.append(0)
y_mask.append(1)
idx += 1
if debug:
pbar.update(1)
x.append(TOKEN_IDX[token_style]['END_SEQ'])
w_id.append(-1)
y.append(0)
if targets:
y_mask.append(1)
else:
y_mask.append(0)
# Fill with pad tokens
if len(x) < seq_len:
x = x + [TOKEN_IDX[token_style]['PAD'] for _ in range(seq_len - len(x))]
w_id = w_id + [-100 for _ in range(seq_len - len(w_id))]
y = y + [0 for _ in range(seq_len - len(y))]
y_mask = y_mask + [0 for _ in range(seq_len - len(y_mask))]
attn_mask = [1 if token != TOKEN_IDX[token_style]['PAD'] else 0 for token in x]
data_items.append([x, w_id, attn_mask, y, y_mask])
if debug:
pbar.close()
return data_items
def __len__(self) -> int:
return len(self.data)
def __getitem__(self, index: int) -> Tuple[Tensor, Tensor, Tensor, Tensor]:
x = self.data[index][0]
attn_mask = self.data[index][2]
y = self.data[index][3]
y_mask = self.data[index][4]
x = torch.tensor(x) # type: ignore
attn_mask = torch.tensor(attn_mask) # type: ignore
y = torch.tensor(y) # type: ignore
y_mask = torch.tensor(y_mask) # type: ignore
return x, y, attn_mask, y_mask # type: ignore
class RepunctDataset(BaseDataset):
def __init__(self,
files: Union[str, List[str]],
tokenizer: PreTrainedTokenizer,
targets: Dict[str, int],
sequence_len: int,
token_style: str,
is_train=False,
augment_rate=0.,
augment_type='substitute',
*args,
**kwargs) -> None:
"""Preprocess data for restore punctuation
Args:
files (`Union[str, list[str]]`): single file or list of text files containing tokens and punctuations separated by tab in lines
tokenizer (`PreTrainedTokenizer`): tokenizer that will be used to further tokenize word for BERT like models
targets (`dict[str, int]`): dict with targets
sequence_len (`int`): length of each sequence
token_style (`str`): For getting index of special tokens in pretrained.TOKEN_IDX
is_train (`bool, optional`): if false do not apply augmentation. Defaults to False.
augment_rate (`float, optional`): percent of data which should be augmented. Defaults to 0.0.
augment_type (`str, optional`): augmentation type. Defaults to 'substitute'.
"""
super().__init__(files, tokenizer, targets, sequence_len, token_style, *args, **kwargs)
self.is_train = is_train
self.augment_type = augment_type
self.augment_rate = augment_rate
def _augment(self, x, y, y_mask):
x_aug = []
y_aug = []
y_mask_aug = []
for i in range(len(x)):
r = np.random.rand()
if r < self.augment_rate:
AUGMENTATIONS[self.augment_type](x, y, y_mask, x_aug, y_aug, y_mask_aug, i, self.token_style)
else:
x_aug.append(x[i])
y_aug.append(y[i])
y_mask_aug.append(y_mask[i])
if len(x_aug) > self.seq_len:
# len increased due to insert
x_aug = x_aug[:self.seq_len]
y_aug = y_aug[:self.seq_len]
y_mask_aug = y_mask_aug[:self.seq_len]
elif len(x_aug) < self.seq_len:
# len decreased due to delete
x_aug = x_aug + [TOKEN_IDX[self.token_style]['PAD'] for _ in range(self.seq_len - len(x_aug))]
y_aug = y_aug + [0 for _ in range(self.seq_len - len(y_aug))]
y_mask_aug = y_mask_aug + [0 for _ in range(self.seq_len - len(y_mask_aug))]
attn_mask = [1 if token != TOKEN_IDX[self.token_style]['PAD'] else 0 for token in x]
return x_aug, y_aug, attn_mask, y_mask_aug
def __getitem__(self, index: int) -> Tuple[Tensor, Tensor, Tensor, Tensor]:
x = self.data[index][0]
attn_mask = self.data[index][2]
y = self.data[index][3]
y_mask = self.data[index][4]
if self.is_train and self.augment_rate > 0:
x, y, attn_mask, y_mask = self._augment(x, y, y_mask)
x = torch.tensor(x) # type: ignore
attn_mask = torch.tensor(attn_mask) # type: ignore
y = torch.tensor(y) # type: ignore
y_mask = torch.tensor(y_mask) # type: ignore
return x, y, attn_mask, y_mask # type: ignore
| 38.156 | 163 | 0.528672 | from typing import Dict, List, Optional, Tuple, Union
from typing_extensions import TypedDict
import numpy as np
import torch
from torch import Tensor
from tqdm import tqdm
from transformers import PreTrainedTokenizer
from neuro_comma.augmentation import AUGMENTATIONS
from neuro_comma.pretrained import TOKEN_IDX
class BaseDataset(torch.utils.data.Dataset):
def __init__(self,
files: Union[str, List[str]],
tokenizer: PreTrainedTokenizer,
targets: Dict[str, int],
sequence_len: int,
token_style: str,
*args,
**kwargs) -> None:
self.tokenizer = tokenizer
self.targets = targets
self.seq_len = sequence_len
self.token_style = token_style
if isinstance(files, list):
self.data = []
for file in files:
self.data += self._parse_data(file, *args, **kwargs)
else:
self.data = self._parse_data(files, *args, **kwargs)
def _parse_data(self, file_path: str, *args, **kwargs) -> List[List[List[int]]]:
with open(file_path, 'r', encoding='utf-8') as file:
x, y = [], []
for i, line in enumerate(file):
if (line.strip()):
line = line.strip()
token = line.rsplit('\t', 1)
if len(token) == 2:
x.append(token[0])
target = self.targets[token[1]]
y.append(target)
else:
continue
data = self.parse_tokens(x, self.tokenizer, self.seq_len, self.token_style, y, *args, **kwargs)
return data
@classmethod
def parse_tokens(cls,
tokens: Union[List[str], Tuple[str]],
tokenizer: PreTrainedTokenizer,
seq_len: int,
token_style: str,
targets: Optional[List[int]] = None,
*args,
**kwargs) -> List[List[List[int]]]:
data_items = []
idx = 0
debug = kwargs.get('debug')
if debug:
pbar = tqdm(total=len(tokens))
while idx < len(tokens):
x = [TOKEN_IDX[token_style]['START_SEQ']]
w_id = [-1]
y = [0]
y_mask = [1] if targets else [0]
while len(x) < seq_len - 1 and idx < len(tokens):
word_pieces = tokenizer.tokenize(tokens[idx])
if len(word_pieces) + len(x) >= seq_len:
break
for i in range(len(word_pieces) - 1):
x.append(tokenizer.convert_tokens_to_ids(word_pieces[i]))
w_id.append(idx)
y.append(0)
y_mask.append(0)
if len(word_pieces) > 0:
x.append(tokenizer.convert_tokens_to_ids(word_pieces[-1]))
else:
x.append(TOKEN_IDX[token_style]['UNK'])
w_id.append(idx)
if targets:
y.append(targets[idx])
else:
y.append(0)
y_mask.append(1)
idx += 1
if debug:
pbar.update(1)
x.append(TOKEN_IDX[token_style]['END_SEQ'])
w_id.append(-1)
y.append(0)
if targets:
y_mask.append(1)
else:
y_mask.append(0)
if len(x) < seq_len:
x = x + [TOKEN_IDX[token_style]['PAD'] for _ in range(seq_len - len(x))]
w_id = w_id + [-100 for _ in range(seq_len - len(w_id))]
y = y + [0 for _ in range(seq_len - len(y))]
y_mask = y_mask + [0 for _ in range(seq_len - len(y_mask))]
attn_mask = [1 if token != TOKEN_IDX[token_style]['PAD'] else 0 for token in x]
data_items.append([x, w_id, attn_mask, y, y_mask])
if debug:
pbar.close()
return data_items
def __len__(self) -> int:
return len(self.data)
def __getitem__(self, index: int) -> Tuple[Tensor, Tensor, Tensor, Tensor]:
x = self.data[index][0]
attn_mask = self.data[index][2]
y = self.data[index][3]
y_mask = self.data[index][4]
x = torch.tensor(x)
attn_mask = torch.tensor(attn_mask)
y = torch.tensor(y)
y_mask = torch.tensor(y_mask)
return x, y, attn_mask, y_mask
class RepunctDataset(BaseDataset):
def __init__(self,
files: Union[str, List[str]],
tokenizer: PreTrainedTokenizer,
targets: Dict[str, int],
sequence_len: int,
token_style: str,
is_train=False,
augment_rate=0.,
augment_type='substitute',
*args,
**kwargs) -> None:
super().__init__(files, tokenizer, targets, sequence_len, token_style, *args, **kwargs)
self.is_train = is_train
self.augment_type = augment_type
self.augment_rate = augment_rate
def _augment(self, x, y, y_mask):
x_aug = []
y_aug = []
y_mask_aug = []
for i in range(len(x)):
r = np.random.rand()
if r < self.augment_rate:
AUGMENTATIONS[self.augment_type](x, y, y_mask, x_aug, y_aug, y_mask_aug, i, self.token_style)
else:
x_aug.append(x[i])
y_aug.append(y[i])
y_mask_aug.append(y_mask[i])
if len(x_aug) > self.seq_len:
x_aug = x_aug[:self.seq_len]
y_aug = y_aug[:self.seq_len]
y_mask_aug = y_mask_aug[:self.seq_len]
elif len(x_aug) < self.seq_len:
x_aug = x_aug + [TOKEN_IDX[self.token_style]['PAD'] for _ in range(self.seq_len - len(x_aug))]
y_aug = y_aug + [0 for _ in range(self.seq_len - len(y_aug))]
y_mask_aug = y_mask_aug + [0 for _ in range(self.seq_len - len(y_mask_aug))]
attn_mask = [1 if token != TOKEN_IDX[self.token_style]['PAD'] else 0 for token in x]
return x_aug, y_aug, attn_mask, y_mask_aug
def __getitem__(self, index: int) -> Tuple[Tensor, Tensor, Tensor, Tensor]:
x = self.data[index][0]
attn_mask = self.data[index][2]
y = self.data[index][3]
y_mask = self.data[index][4]
if self.is_train and self.augment_rate > 0:
x, y, attn_mask, y_mask = self._augment(x, y, y_mask)
x = torch.tensor(x)
attn_mask = torch.tensor(attn_mask)
y = torch.tensor(y)
y_mask = torch.tensor(y_mask)
return x, y, attn_mask, y_mask
| true | true |
f71143dd842b9d129c24fafa0c0a8b516d2a0087 | 418 | py | Python | 01-logica-de-programacao-e-algoritmos/Aula 05/5 Recursos avancados com funcoes/5.1 excecoes e erros/ex05.py | rafaelbarretomg/Uninter | 1f84b0103263177122663e991db3a8aeb106a959 | [
"MIT"
] | null | null | null | 01-logica-de-programacao-e-algoritmos/Aula 05/5 Recursos avancados com funcoes/5.1 excecoes e erros/ex05.py | rafaelbarretomg/Uninter | 1f84b0103263177122663e991db3a8aeb106a959 | [
"MIT"
] | null | null | null | 01-logica-de-programacao-e-algoritmos/Aula 05/5 Recursos avancados com funcoes/5.1 excecoes e erros/ex05.py | rafaelbarretomg/Uninter | 1f84b0103263177122663e991db3a8aeb106a959 | [
"MIT"
] | null | null | null | i = 0
while True:
try:
nome = input('Por favor digite o seu nome: ')
ind = int(input('Digite um indice do nome digitado: '))
print(nome[ind])
break
except ValueError:
print('Oops! Nome invalido. Tente novamente...')
except IndexError:
print('Oops! Indice invalido. Tente novamente...')
finally:
print('Tentativa {}' .format(i))
i = i + 1
| 27.866667 | 63 | 0.564593 | i = 0
while True:
try:
nome = input('Por favor digite o seu nome: ')
ind = int(input('Digite um indice do nome digitado: '))
print(nome[ind])
break
except ValueError:
print('Oops! Nome invalido. Tente novamente...')
except IndexError:
print('Oops! Indice invalido. Tente novamente...')
finally:
print('Tentativa {}' .format(i))
i = i + 1
| true | true |
f71144d3fcf748c12ca08f30fa4fdb097ffc23d5 | 9,496 | py | Python | mutagen/_vorbis.py | lucienimmink/scanner.py | cecaa0a570ba8058321dea1c8efa9f77868effb3 | [
"MIT"
] | 2 | 2020-09-16T07:00:41.000Z | 2020-12-20T19:56:03.000Z | mutagen/_vorbis.py | lucienimmink/scanner.py | cecaa0a570ba8058321dea1c8efa9f77868effb3 | [
"MIT"
] | null | null | null | mutagen/_vorbis.py | lucienimmink/scanner.py | cecaa0a570ba8058321dea1c8efa9f77868effb3 | [
"MIT"
] | 2 | 2020-09-17T08:27:12.000Z | 2021-08-23T11:13:52.000Z | # -*- coding: utf-8 -*-
# Copyright (C) 2005-2006 Joe Wreschnig
# 2013 Christoph Reiter
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
"""Read and write Vorbis comment data.
Vorbis comments are freeform key/value pairs; keys are
case-insensitive ASCII and values are Unicode strings. A key may have
multiple values.
The specification is at http://www.xiph.org/vorbis/doc/v-comment.html.
"""
import sys
from io import BytesIO
import mutagen
from mutagen._util import DictMixin, cdata, MutagenError, reraise
def is_valid_key(key):
"""Return true if a string is a valid Vorbis comment key.
Valid Vorbis comment keys are printable ASCII between 0x20 (space)
and 0x7D ('}'), excluding '='.
Takes str/unicode in Python 2, unicode in Python 3
"""
if isinstance(key, bytes):
raise TypeError("needs to be str not bytes")
for c in key:
if c < " " or c > "}" or c == "=":
return False
else:
return bool(key)
istag = is_valid_key
class error(MutagenError):
pass
class VorbisUnsetFrameError(error):
pass
class VorbisEncodingError(error):
pass
class VComment(mutagen.Tags, list):
"""A Vorbis comment parser, accessor, and renderer.
All comment ordering is preserved. A VComment is a list of
key/value pairs, and so any Python list method can be used on it.
Vorbis comments are always wrapped in something like an Ogg Vorbis
bitstream or a FLAC metadata block, so this loads string data or a
file-like object, not a filename.
Attributes:
vendor (text): the stream 'vendor' (i.e. writer); default 'Mutagen'
"""
vendor = u"Mutagen " + mutagen.version_string
def __init__(self, data=None, *args, **kwargs):
self._size = 0
# Collect the args to pass to load, this lets child classes
# override just load and get equivalent magic for the
# constructor.
if data is not None:
if isinstance(data, bytes):
data = BytesIO(data)
elif not hasattr(data, 'read'):
raise TypeError("VComment requires bytes or a file-like")
start = data.tell()
self.load(data, *args, **kwargs)
self._size = data.tell() - start
def load(self, fileobj, errors='replace', framing=True):
"""Parse a Vorbis comment from a file-like object.
Arguments:
errors (str): 'strict', 'replace', or 'ignore'.
This affects Unicode decoding and how other malformed content
is interpreted.
framing (bool): if true, fail if a framing bit is not present
Framing bits are required by the Vorbis comment specification,
but are not used in FLAC Vorbis comment blocks.
"""
try:
vendor_length = cdata.uint_le(fileobj.read(4))
self.vendor = fileobj.read(vendor_length).decode('utf-8', errors)
count = cdata.uint_le(fileobj.read(4))
for i in range(count):
length = cdata.uint_le(fileobj.read(4))
try:
string = fileobj.read(length).decode('utf-8', errors)
except (OverflowError, MemoryError):
raise error("cannot read %d bytes, too large" % length)
try:
tag, value = string.split('=', 1)
except ValueError as err:
if errors == "ignore":
continue
elif errors == "replace":
tag, value = u"unknown%d" % i, string
else:
reraise(VorbisEncodingError, err, sys.exc_info()[2])
try:
tag = tag.encode('ascii', errors)
except UnicodeEncodeError:
raise VorbisEncodingError("invalid tag name %r" % tag)
else:
tag = tag.decode("ascii")
if is_valid_key(tag):
self.append((tag, value))
if framing and not bytearray(fileobj.read(1))[0] & 0x01:
raise VorbisUnsetFrameError("framing bit was unset")
except (cdata.error, TypeError):
raise error("file is not a valid Vorbis comment")
def validate(self):
"""Validate keys and values.
Check to make sure every key used is a valid Vorbis key, and
that every value used is a valid Unicode or UTF-8 string. If
any invalid keys or values are found, a ValueError is raised.
In Python 3 all keys and values have to be a string.
"""
if not isinstance(self.vendor, str):
raise ValueError("vendor needs to be str")
for key, value in self:
try:
if not is_valid_key(key):
raise ValueError("%r is not a valid key" % key)
except TypeError:
raise ValueError("%r is not a valid key" % key)
if not isinstance(value, str):
err = "%r needs to be str for key %r" % (value, key)
raise ValueError(err)
return True
def clear(self):
"""Clear all keys from the comment."""
for i in list(self):
self.remove(i)
def write(self, framing=True):
"""Return a string representation of the data.
Validation is always performed, so calling this function on
invalid data may raise a ValueError.
Arguments:
framing (bool): if true, append a framing bit (see load)
"""
self.validate()
def _encode(value):
if not isinstance(value, bytes):
return value.encode('utf-8')
return value
f = BytesIO()
vendor = _encode(self.vendor)
f.write(cdata.to_uint_le(len(vendor)))
f.write(vendor)
f.write(cdata.to_uint_le(len(self)))
for tag, value in self:
tag = _encode(tag)
value = _encode(value)
comment = tag + b"=" + value
f.write(cdata.to_uint_le(len(comment)))
f.write(comment)
if framing:
f.write(b"\x01")
return f.getvalue()
def pprint(self):
def _decode(value):
if not isinstance(value, str):
return value.decode('utf-8', 'replace')
return value
tags = [u"%s=%s" % (_decode(k), _decode(v)) for k, v in self]
return u"\n".join(tags)
class VCommentDict(VComment, DictMixin):
"""A VComment that looks like a dictionary.
This object differs from a dictionary in two ways. First,
len(comment) will still return the number of values, not the
number of keys. Secondly, iterating through the object will
iterate over (key, value) pairs, not keys. Since a key may have
multiple values, the same value may appear multiple times while
iterating.
Since Vorbis comment keys are case-insensitive, all keys are
normalized to lowercase ASCII.
"""
def __getitem__(self, key):
"""A list of values for the key.
This is a copy, so comment['title'].append('a title') will not
work.
"""
if isinstance(key, slice):
return VComment.__getitem__(self, key)
if not is_valid_key(key):
raise ValueError
key = key.lower()
values = [value for (k, value) in self if k.lower() == key]
if not values:
raise KeyError(key)
else:
return values
def __delitem__(self, key):
"""Delete all values associated with the key."""
if isinstance(key, slice):
return VComment.__delitem__(self, key)
if not is_valid_key(key):
raise ValueError
key = key.lower()
to_delete = [x for x in self if x[0].lower() == key]
if not to_delete:
raise KeyError(key)
else:
for item in to_delete:
self.remove(item)
def __contains__(self, key):
"""Return true if the key has any values."""
if not is_valid_key(key):
raise ValueError
key = key.lower()
for k, value in self:
if k.lower() == key:
return True
else:
return False
def __setitem__(self, key, values):
"""Set a key's value or values.
Setting a value overwrites all old ones. The value may be a
list of Unicode or UTF-8 strings, or a single Unicode or UTF-8
string.
"""
if isinstance(key, slice):
return VComment.__setitem__(self, key, values)
if not is_valid_key(key):
raise ValueError
if not isinstance(values, list):
values = [values]
try:
del(self[key])
except KeyError:
pass
for value in values:
self.append((key, value))
def keys(self):
"""Return all keys in the comment."""
return list(set([k.lower() for k, v in self]))
def as_dict(self):
"""Return a copy of the comment data in a real dict."""
return dict([(key, self[key]) for key in self.keys()])
| 30.731392 | 77 | 0.574031 |
import sys
from io import BytesIO
import mutagen
from mutagen._util import DictMixin, cdata, MutagenError, reraise
def is_valid_key(key):
if isinstance(key, bytes):
raise TypeError("needs to be str not bytes")
for c in key:
if c < " " or c > "}" or c == "=":
return False
else:
return bool(key)
istag = is_valid_key
class error(MutagenError):
pass
class VorbisUnsetFrameError(error):
pass
class VorbisEncodingError(error):
pass
class VComment(mutagen.Tags, list):
vendor = u"Mutagen " + mutagen.version_string
def __init__(self, data=None, *args, **kwargs):
self._size = 0
if data is not None:
if isinstance(data, bytes):
data = BytesIO(data)
elif not hasattr(data, 'read'):
raise TypeError("VComment requires bytes or a file-like")
start = data.tell()
self.load(data, *args, **kwargs)
self._size = data.tell() - start
def load(self, fileobj, errors='replace', framing=True):
try:
vendor_length = cdata.uint_le(fileobj.read(4))
self.vendor = fileobj.read(vendor_length).decode('utf-8', errors)
count = cdata.uint_le(fileobj.read(4))
for i in range(count):
length = cdata.uint_le(fileobj.read(4))
try:
string = fileobj.read(length).decode('utf-8', errors)
except (OverflowError, MemoryError):
raise error("cannot read %d bytes, too large" % length)
try:
tag, value = string.split('=', 1)
except ValueError as err:
if errors == "ignore":
continue
elif errors == "replace":
tag, value = u"unknown%d" % i, string
else:
reraise(VorbisEncodingError, err, sys.exc_info()[2])
try:
tag = tag.encode('ascii', errors)
except UnicodeEncodeError:
raise VorbisEncodingError("invalid tag name %r" % tag)
else:
tag = tag.decode("ascii")
if is_valid_key(tag):
self.append((tag, value))
if framing and not bytearray(fileobj.read(1))[0] & 0x01:
raise VorbisUnsetFrameError("framing bit was unset")
except (cdata.error, TypeError):
raise error("file is not a valid Vorbis comment")
def validate(self):
if not isinstance(self.vendor, str):
raise ValueError("vendor needs to be str")
for key, value in self:
try:
if not is_valid_key(key):
raise ValueError("%r is not a valid key" % key)
except TypeError:
raise ValueError("%r is not a valid key" % key)
if not isinstance(value, str):
err = "%r needs to be str for key %r" % (value, key)
raise ValueError(err)
return True
def clear(self):
for i in list(self):
self.remove(i)
def write(self, framing=True):
self.validate()
def _encode(value):
if not isinstance(value, bytes):
return value.encode('utf-8')
return value
f = BytesIO()
vendor = _encode(self.vendor)
f.write(cdata.to_uint_le(len(vendor)))
f.write(vendor)
f.write(cdata.to_uint_le(len(self)))
for tag, value in self:
tag = _encode(tag)
value = _encode(value)
comment = tag + b"=" + value
f.write(cdata.to_uint_le(len(comment)))
f.write(comment)
if framing:
f.write(b"\x01")
return f.getvalue()
def pprint(self):
def _decode(value):
if not isinstance(value, str):
return value.decode('utf-8', 'replace')
return value
tags = [u"%s=%s" % (_decode(k), _decode(v)) for k, v in self]
return u"\n".join(tags)
class VCommentDict(VComment, DictMixin):
def __getitem__(self, key):
if isinstance(key, slice):
return VComment.__getitem__(self, key)
if not is_valid_key(key):
raise ValueError
key = key.lower()
values = [value for (k, value) in self if k.lower() == key]
if not values:
raise KeyError(key)
else:
return values
def __delitem__(self, key):
if isinstance(key, slice):
return VComment.__delitem__(self, key)
if not is_valid_key(key):
raise ValueError
key = key.lower()
to_delete = [x for x in self if x[0].lower() == key]
if not to_delete:
raise KeyError(key)
else:
for item in to_delete:
self.remove(item)
def __contains__(self, key):
if not is_valid_key(key):
raise ValueError
key = key.lower()
for k, value in self:
if k.lower() == key:
return True
else:
return False
def __setitem__(self, key, values):
if isinstance(key, slice):
return VComment.__setitem__(self, key, values)
if not is_valid_key(key):
raise ValueError
if not isinstance(values, list):
values = [values]
try:
del(self[key])
except KeyError:
pass
for value in values:
self.append((key, value))
def keys(self):
return list(set([k.lower() for k, v in self]))
def as_dict(self):
return dict([(key, self[key]) for key in self.keys()])
| true | true |
f7114556d4b105fe47f72182510cb3fda36299a6 | 2,416 | py | Python | sdn-lab/2 - hub/hub2.py | chenyongzhouking/qqq | 19d0d80d8c8897ed198d9ac7f02eae3dd114635c | [
"Apache-2.0"
] | null | null | null | sdn-lab/2 - hub/hub2.py | chenyongzhouking/qqq | 19d0d80d8c8897ed198d9ac7f02eae3dd114635c | [
"Apache-2.0"
] | null | null | null | sdn-lab/2 - hub/hub2.py | chenyongzhouking/qqq | 19d0d80d8c8897ed198d9ac7f02eae3dd114635c | [
"Apache-2.0"
] | null | null | null | # Implementazione openflow di un hub tramite controller
#
# In ogni switch viene caricata un'unica regola
# di default (table miss) con azione di invio al controller
# dell'intero pacchetto. Il controller risponde con una
# packet out con azione flood
#
# NOTA: OpenVSwitch ignora l'opzione OFPCML_NO_BUFFER
# nelle regole table miss (priorita' 0); pertanto,
# carichiamo una regola con priorita' 1
from ryu.base import app_manager
from ryu.controller import ofp_event
from ryu.controller.handler import CONFIG_DISPATCHER, MAIN_DISPATCHER
from ryu.controller.handler import set_ev_cls
from ryu.ofproto import ofproto_v1_3
class PolimiHub(app_manager.RyuApp):
OFP_VERSIONS = [ofproto_v1_3.OFP_VERSION]
@set_ev_cls(ofp_event.EventOFPSwitchFeatures, CONFIG_DISPATCHER)
def switch_features_handler(self, ev):
datapath = ev.msg.datapath
ofproto = datapath.ofproto
parser = datapath.ofproto_parser
match = parser.OFPMatch()
actions = [
parser.OFPActionOutput(
ofproto.OFPP_CONTROLLER,
ofproto.OFPCML_NO_BUFFER
)
]
inst = [
parser.OFPInstructionActions(
ofproto.OFPIT_APPLY_ACTIONS,
actions
)
]
mod = parser.OFPFlowMod(
datapath=datapath,
priority=1,
match=match,
instructions=inst
)
datapath.send_msg(mod)
# Registriamo un handler dell'evento Packet In
@set_ev_cls(ofp_event.EventOFPPacketIn, MAIN_DISPATCHER)
def _packet_in_handler(self, ev):
msg = ev.msg
datapath = msg.datapath
ofproto = datapath.ofproto
parser = datapath.ofproto_parser
# Per come abbiamo scritto le regole nello switch
# i pacchetti non devono essere bufferizzati allo switch
assert msg.buffer_id == ofproto.OFP_NO_BUFFER
# Recuperiamo dai metadati del pacchetto
# la porta di ingresso allo switch
in_port = msg.match['in_port']
actions = [
parser.OFPActionOutput(
ofproto.OFPP_FLOOD
)
]
out = parser.OFPPacketOut(
datapath=datapath,
buffer_id=msg.buffer_id,
in_port=in_port,
actions=actions,
data=msg.data
)
datapath.send_msg(out)
| 30.974359 | 69 | 0.637003 |
# di default (table miss) con azione di invio al controller
# dell'intero pacchetto. Il controller risponde con una
# nelle regole table miss (priorita' 0); pertanto,
from ryu.base import app_manager
from ryu.controller import ofp_event
from ryu.controller.handler import CONFIG_DISPATCHER, MAIN_DISPATCHER
from ryu.controller.handler import set_ev_cls
from ryu.ofproto import ofproto_v1_3
class PolimiHub(app_manager.RyuApp):
OFP_VERSIONS = [ofproto_v1_3.OFP_VERSION]
@set_ev_cls(ofp_event.EventOFPSwitchFeatures, CONFIG_DISPATCHER)
def switch_features_handler(self, ev):
datapath = ev.msg.datapath
ofproto = datapath.ofproto
parser = datapath.ofproto_parser
match = parser.OFPMatch()
actions = [
parser.OFPActionOutput(
ofproto.OFPP_CONTROLLER,
ofproto.OFPCML_NO_BUFFER
)
]
inst = [
parser.OFPInstructionActions(
ofproto.OFPIT_APPLY_ACTIONS,
actions
)
]
mod = parser.OFPFlowMod(
datapath=datapath,
priority=1,
match=match,
instructions=inst
)
datapath.send_msg(mod)
# Registriamo un handler dell'evento Packet In
@set_ev_cls(ofp_event.EventOFPPacketIn, MAIN_DISPATCHER)
def _packet_in_handler(self, ev):
msg = ev.msg
datapath = msg.datapath
ofproto = datapath.ofproto
parser = datapath.ofproto_parser
assert msg.buffer_id == ofproto.OFP_NO_BUFFER
in_port = msg.match['in_port']
actions = [
parser.OFPActionOutput(
ofproto.OFPP_FLOOD
)
]
out = parser.OFPPacketOut(
datapath=datapath,
buffer_id=msg.buffer_id,
in_port=in_port,
actions=actions,
data=msg.data
)
datapath.send_msg(out)
| true | true |
f71145a2da3e9433195622624a9601d75fc97862 | 2,895 | py | Python | grouper/fe/handlers/service_account_create.py | bonniech3n/merou | 47d9de906686fd5b930a49299d3ffbcc0673ae8a | [
"Apache-2.0"
] | null | null | null | grouper/fe/handlers/service_account_create.py | bonniech3n/merou | 47d9de906686fd5b930a49299d3ffbcc0673ae8a | [
"Apache-2.0"
] | null | null | null | grouper/fe/handlers/service_account_create.py | bonniech3n/merou | 47d9de906686fd5b930a49299d3ffbcc0673ae8a | [
"Apache-2.0"
] | null | null | null | from grouper.fe.forms import ServiceAccountCreateForm
from grouper.fe.settings import settings
from grouper.fe.util import GrouperHandler
from grouper.models.group import Group
from grouper.service_account import (
BadMachineSet,
can_create_service_account,
create_service_account,
DuplicateServiceAccount,
)
class ServiceAccountCreate(GrouperHandler):
def get(self, group_id=None, name=None):
group = Group.get(self.session, group_id, name)
if not group:
return self.notfound()
if not can_create_service_account(self.session, self.current_user, group):
return self.forbidden()
form = ServiceAccountCreateForm()
return self.render("service-account-create.html", form=form, group=group)
def post(self, group_id=None, name=None):
group = Group.get(self.session, group_id, name)
if not group:
return self.notfound()
if "@" not in self.request.arguments["name"][0]:
self.request.arguments["name"][0] += "@" + settings.service_account_email_domain
if not can_create_service_account(self.session, self.current_user, group):
return self.forbidden()
form = ServiceAccountCreateForm(self.request.arguments)
if not form.validate():
return self.render(
"service-account-create.html",
form=form,
group=group,
alerts=self.get_form_alerts(form.errors),
)
if form.data["name"].split("@")[-1] != settings.service_account_email_domain:
form.name.errors.append(
"All service accounts must have a username ending in {}".format(
settings.service_account_email_domain
)
)
return self.render(
"service-account-create.html",
form=form,
group=group,
alerts=self.get_form_alerts(form.errors),
)
try:
create_service_account(
self.session,
self.current_user,
form.data["name"],
form.data["description"],
form.data["machine_set"],
group,
)
except DuplicateServiceAccount:
form.name.errors.append("A user with name {} already exists".format(form.data["name"]))
except BadMachineSet as e:
form.machine_set.errors.append(str(e))
if form.name.errors or form.machine_set.errors:
return self.render(
"service-account-create.html",
form=form,
group=group,
alerts=self.get_form_alerts(form.errors),
)
url = "/groups/{}/service/{}?refresh=yes".format(group.name, form.data["name"])
return self.redirect(url)
| 35.304878 | 99 | 0.591019 | from grouper.fe.forms import ServiceAccountCreateForm
from grouper.fe.settings import settings
from grouper.fe.util import GrouperHandler
from grouper.models.group import Group
from grouper.service_account import (
BadMachineSet,
can_create_service_account,
create_service_account,
DuplicateServiceAccount,
)
class ServiceAccountCreate(GrouperHandler):
def get(self, group_id=None, name=None):
group = Group.get(self.session, group_id, name)
if not group:
return self.notfound()
if not can_create_service_account(self.session, self.current_user, group):
return self.forbidden()
form = ServiceAccountCreateForm()
return self.render("service-account-create.html", form=form, group=group)
def post(self, group_id=None, name=None):
group = Group.get(self.session, group_id, name)
if not group:
return self.notfound()
if "@" not in self.request.arguments["name"][0]:
self.request.arguments["name"][0] += "@" + settings.service_account_email_domain
if not can_create_service_account(self.session, self.current_user, group):
return self.forbidden()
form = ServiceAccountCreateForm(self.request.arguments)
if not form.validate():
return self.render(
"service-account-create.html",
form=form,
group=group,
alerts=self.get_form_alerts(form.errors),
)
if form.data["name"].split("@")[-1] != settings.service_account_email_domain:
form.name.errors.append(
"All service accounts must have a username ending in {}".format(
settings.service_account_email_domain
)
)
return self.render(
"service-account-create.html",
form=form,
group=group,
alerts=self.get_form_alerts(form.errors),
)
try:
create_service_account(
self.session,
self.current_user,
form.data["name"],
form.data["description"],
form.data["machine_set"],
group,
)
except DuplicateServiceAccount:
form.name.errors.append("A user with name {} already exists".format(form.data["name"]))
except BadMachineSet as e:
form.machine_set.errors.append(str(e))
if form.name.errors or form.machine_set.errors:
return self.render(
"service-account-create.html",
form=form,
group=group,
alerts=self.get_form_alerts(form.errors),
)
url = "/groups/{}/service/{}?refresh=yes".format(group.name, form.data["name"])
return self.redirect(url)
| true | true |
f71145a80fcf271f5514d8ace4aea5cef26e4d8b | 394 | py | Python | generate_params_cont_bath.py | patryk-kubiczek/learning-GF | 779250d139307cb72e5b4e467f46825c984c87ec | [
"MIT"
] | 8 | 2019-08-13T22:20:53.000Z | 2020-07-22T01:48:41.000Z | generate_params_cont_bath.py | patryk-kubiczek/learning-GF | 779250d139307cb72e5b4e467f46825c984c87ec | [
"MIT"
] | null | null | null | generate_params_cont_bath.py | patryk-kubiczek/learning-GF | 779250d139307cb72e5b4e467f46825c984c87ec | [
"MIT"
] | null | null | null | from generate_params import *
n_params = 50
for _ in range(n_params):
random_params_cont_bath(beta=beta,
U_range=[1., 8.],
eps_range=[-1., 1.],
D_range=[2. , 8.],
filename=name("params_cont_bath", beta, 0,
parent="data_cont_bath/"))
| 30.307692 | 70 | 0.423858 | from generate_params import *
n_params = 50
for _ in range(n_params):
random_params_cont_bath(beta=beta,
U_range=[1., 8.],
eps_range=[-1., 1.],
D_range=[2. , 8.],
filename=name("params_cont_bath", beta, 0,
parent="data_cont_bath/"))
| true | true |
f71145cadf3e23956ba90f5e44b628c0b29a20a3 | 484 | py | Python | cmsplugin_seocheck/cms_toolbar.py | creimers/cmsplugin_seocheck | b97f38e55dec516ebf0c049cd26b74347e49b86e | [
"BSD-2-Clause"
] | 3 | 2015-05-11T19:46:59.000Z | 2016-07-26T00:20:00.000Z | cmsplugin_seocheck/cms_toolbar.py | creimers/cmsplugin_seocheck | b97f38e55dec516ebf0c049cd26b74347e49b86e | [
"BSD-2-Clause"
] | 2 | 2015-05-09T16:21:26.000Z | 2016-10-29T13:23:35.000Z | cmsplugin_seocheck/cms_toolbar.py | creimers/cmsplugin_seocheck | b97f38e55dec516ebf0c049cd26b74347e49b86e | [
"BSD-2-Clause"
] | 1 | 2018-03-03T16:18:59.000Z | 2018-03-03T16:18:59.000Z | # -*- coding: utf-8 -*-
from django.core.urlresolvers import reverse
from cms.toolbar_base import CMSToolbar
from cms.toolbar_pool import toolbar_pool
@toolbar_pool.register
class SeoCheckToolbar(CMSToolbar):
def populate(self):
seo_check_menu = self.toolbar.get_or_create_menu(
'seo_check',
'SEO'
)
url = reverse('cmsplugin_seocheck:check_modal')
seo_check_menu.add_modal_item(name='SEO-Check', url=url)
| 28.470588 | 64 | 0.673554 |
from django.core.urlresolvers import reverse
from cms.toolbar_base import CMSToolbar
from cms.toolbar_pool import toolbar_pool
@toolbar_pool.register
class SeoCheckToolbar(CMSToolbar):
def populate(self):
seo_check_menu = self.toolbar.get_or_create_menu(
'seo_check',
'SEO'
)
url = reverse('cmsplugin_seocheck:check_modal')
seo_check_menu.add_modal_item(name='SEO-Check', url=url)
| true | true |
f7114642c746c750ee68257580dadca691feecd7 | 737 | py | Python | pele_platform/gpcr/main.py | esguerra/pele_platform | c78a049d5e4000b42688f6ba240cf97b67739770 | [
"Apache-2.0"
] | 5 | 2020-03-06T17:26:42.000Z | 2020-10-28T16:24:39.000Z | pele_platform/gpcr/main.py | esguerra/pele_platform | c78a049d5e4000b42688f6ba240cf97b67739770 | [
"Apache-2.0"
] | 37 | 2019-11-28T11:07:47.000Z | 2020-11-23T16:22:50.000Z | pele_platform/gpcr/main.py | esguerra/pele_platform | c78a049d5e4000b42688f6ba240cf97b67739770 | [
"Apache-2.0"
] | 8 | 2019-11-27T15:16:30.000Z | 2020-10-27T10:29:52.000Z | from dataclasses import dataclass
import pele_platform.Adaptive.simulation as si
import pele_platform.Utilities.Parameters.parameters as pv
@dataclass
class GpcrLauncher:
args: pv.ParametersBuilder
def run_gpcr_simulation(self) -> pv.ParametersBuilder:
# Set parameters for GPCR and launch simulation
self._set_parameters()
simulation_parameters = si.run_adaptive(self.args)
return simulation_parameters
def _set_parameters(self) -> None:
# Set box and initial ligand position
self.orthosteric_site = self.args.orthosteric_site
self.initial_site = self.args.initial_site
self.args.center_of_interface = self.initial_site
self.args.randomize = True
| 32.043478 | 58 | 0.739484 | from dataclasses import dataclass
import pele_platform.Adaptive.simulation as si
import pele_platform.Utilities.Parameters.parameters as pv
@dataclass
class GpcrLauncher:
args: pv.ParametersBuilder
def run_gpcr_simulation(self) -> pv.ParametersBuilder:
self._set_parameters()
simulation_parameters = si.run_adaptive(self.args)
return simulation_parameters
def _set_parameters(self) -> None:
self.orthosteric_site = self.args.orthosteric_site
self.initial_site = self.args.initial_site
self.args.center_of_interface = self.initial_site
self.args.randomize = True
| true | true |
f71146bbe42ce1d7a3023d062762ef23004c106b | 463 | py | Python | Python3/172.factorial-trailing-zeroes.py | 610yilingliu/leetcode | 30d071b3685c2131bd3462ba77c6c05114f3f227 | [
"MIT"
] | null | null | null | Python3/172.factorial-trailing-zeroes.py | 610yilingliu/leetcode | 30d071b3685c2131bd3462ba77c6c05114f3f227 | [
"MIT"
] | null | null | null | Python3/172.factorial-trailing-zeroes.py | 610yilingliu/leetcode | 30d071b3685c2131bd3462ba77c6c05114f3f227 | [
"MIT"
] | null | null | null | #
# @lc app=leetcode id=172 lang=python3
#
# [172] Factorial Trailing Zeroes
#
# @lc code=start
class Solution:
def trailingZeroes(self, n):
# zero generated by 2 and 5
if n < 5:
return 0
ans = 0
base = 5
while n >= base:
ans += n//base
base *= 5
return ans
if __name__ == '__main__':
a = Solution()
b = a.trailingZeroes(200)
print(b)
# @lc code=end
| 16.535714 | 38 | 0.5054 |
class Solution:
def trailingZeroes(self, n):
if n < 5:
return 0
ans = 0
base = 5
while n >= base:
ans += n//base
base *= 5
return ans
if __name__ == '__main__':
a = Solution()
b = a.trailingZeroes(200)
print(b)
| true | true |
f71146e12c601123c8c8e79c664e15a620ce0608 | 184 | py | Python | GPGO/__init__.py | FNTwin/Bayesian-Optimization | 2f89699648601d4499dcab285a1d7376f0e1ef4b | [
"MIT"
] | 3 | 2020-06-07T19:16:40.000Z | 2020-07-18T21:56:13.000Z | GPGO/__init__.py | FNTwin/Bayesian-Optimization | 2f89699648601d4499dcab285a1d7376f0e1ef4b | [
"MIT"
] | null | null | null | GPGO/__init__.py | FNTwin/Bayesian-Optimization | 2f89699648601d4499dcab285a1d7376f0e1ef4b | [
"MIT"
] | 2 | 2021-01-03T19:09:42.000Z | 2021-01-03T19:09:42.000Z | #from .Opt import BayesianOptimization
from .GaussianProcess import GP
from .GaussianProcess.Kernel import RBF
from .Opt import BayesianOptimization
from .Acquisition import Acquistion | 36.8 | 39 | 0.858696 |
from .GaussianProcess import GP
from .GaussianProcess.Kernel import RBF
from .Opt import BayesianOptimization
from .Acquisition import Acquistion | true | true |
f711480da10ac2397dde2ba6ad95be010af52580 | 5,095 | py | Python | submissions/Thompson/mySearches.py | CDeas9/aima-python | 91c89d898f46a8c472277c9b85c9a282af378937 | [
"MIT"
] | null | null | null | submissions/Thompson/mySearches.py | CDeas9/aima-python | 91c89d898f46a8c472277c9b85c9a282af378937 | [
"MIT"
] | null | null | null | submissions/Thompson/mySearches.py | CDeas9/aima-python | 91c89d898f46a8c472277c9b85c9a282af378937 | [
"MIT"
] | null | null | null | import search
import string
from math import(cos, pi)
# A sample map problem
# sumner_map = search.UndirectedGraph(dict(
# Portland=dict(Mitchellville=7, Fairfield=17, Cottontown=18),
# Cottontown=dict(Portland=18),
# Fairfield=dict(Mitchellville=21, Portland=17),
# Mitchellville=dict(Portland=7, Fairfield=21),
# ))
#
# sumner_puzzle = search.GraphProblem('Cottontown', 'Mitchellville', sumner_map)
#
# sumner_puzzle.label = 'Sumner'
# sumner_puzzle.description = '''
# An abbreviated map of Sumner County, TN.
# This map is unique, to the best of my knowledge.
# '''
#=========================================================================
#=========================================================================
norfolk_map = search.UndirectedGraph(dict(
Norfolk=dict(Suffolk=50,Chesapeake=15,VirginiaBeach=35),
Suffolk=dict(Norfolk=50,Chesapeake=35,Hampton=60,Moyock=150,Sunbury=120),
Chesapeake=dict(Suffolk=35,Norfolk=15,VirginiaBeach=40,Moyock=120),
VirginiaBeach=dict(Norfolk=35,Chesapeake=40),
Hampton=dict(Norfolk=30,Suffolk=60,NewportNews=15),
NewportNews=dict(Hampton=15,Jamestown=35,Williamsburg=30,Yorktown=15),
Jamestown=dict(NewportNews=35,Williamsburg=15),
Williamsburg=dict(Jamestown=15,NewportNews=30,Yorktown=20),
Yorktown=dict(Williamsburg=20,Newportnews=15),
Sunbury=dict(Suffolk=120, Moyock=45),
Moyock=dict(Suffolk=150,Chesapeak=120),
))
norfolk_puzzle = search.GraphProblem('Jamestown', 'Yorktown', norfolk_map)
norfolk_puzzle.label = 'Norfolk'
norfolk_puzzle.description = 'This is a map of the Norfolk, VA area.' \
'This map is unique to the best of my' \
'knowledge.'
#=========================================================================
#=========================================================================
romania_map = search.UndirectedGraph(dict(
A=dict(Z=75,S=140,T=118),
Z=dict(O=71,A=75),
S=dict(O=151,R=80,F=99),
T=dict(A=118,L=111),
O=dict(Z=71,S=151),
L=dict(T=111,M=70),
M=dict(L=70,D=75),
D=dict(M=75,C=120),
R=dict(S=80,C=146,P=97),
C=dict(R=146,P=138,D=120),
F=dict(S=99,B=211),
P=dict(R=97,C=138,B=101),
B=dict(G=90,P=101,F=211),
))
romania_puzzle = search.GraphProblem('A', 'B', romania_map)
romania_puzzle.label = 'Romania'
romania_puzzle.description = '''
The simplified map of Romania, per
Russall & Norvig, 3rd Ed., p. 68.
'''
# A trivial Problem definition
class LightSwitch(search.Problem):
def actions(self, state):
return ['up', 'down']
def result(self, state, action):
if action == 'up':
return 'on'
else:
return 'off'
def goal_test(self, state):
return state == 'on'
def h(self, node):
state = node.state
if self.goal_test(state):
return 0
else:
return 1
#swiss_puzzle = search.GraphProblem('A', 'Z', sumner_map)
switch_puzzle = LightSwitch('off')
switch_puzzle.label = 'Light Switch'
#===========================================================================================
#===========================================================================================
# class TrueOrFalse(search.Problem):
# def actions(self, state):
# return ['true', 'false']
#
# def result(self, state, action):
# if action == 'true':
# return 'true'
# else:
# return 'false'
#
# def goal_test(self, state):
# return state == 'true'
#
# def h(self, node):
# state = node.state
# if self.goal_test(state):
# return 0
# else:
# return 1
#
# #swiss_puzzle = search.GraphProblem('A', 'Z', sumner_map)
# trueorfalse_puzzle = TrueOrFalse('false')
# trueorfalse_puzzle.label = 'True or False'
cheese_map = search.UndirectedGraph(dict(
A1=dict(A2=10,A3=20,B1=10,B2=20,B3=30,C1=20,C2=30,C3=40),
A2=dict(A1=10,A3=10,B1=20,B2=10,B3=20,C1=30,C2=20,C3=30),
A3=dict(A1=20,A2=10,B1=30,B2=20,B3=10,C1=40,C2=30,C3=20),
B1=dict(A1=10,A2=20,A3=30,B2=10,B3=10,C1=10,C2=20,C3=30),
B2=dict(A2=10,A3=20,B1=10,A1=20,B3=10,C1=20,C2=10,C3=20),
B3=dict(A2=20,A3=10,B1=20,B2=10,A1=30,C1=30,C2=20,C3=10),
C1=dict(A2=20,A3=40,B1=10,B2=20,B3=30,A1=20,C2=10,C3=20),
C2=dict(A2=10,A3=20,B1=20,B2=10,B3=20,C1=10,A1=30,C3=10),
C3=dict(A2=30,A3=20,B1=30,B2=20,B3=10,C1=20,C2=10,A1=40),
))
import random
def guess_letter():
return random.choice('ABC')
def guess_number():
return random.choice('123')
a = guess_letter()
b = guess_number()
print(a + b)
cheese_puzzle = search.GraphProblem('A1', a+b , cheese_map)
cheese_puzzle.label = 'Cheese Puzzle'
#===========================================================================================
#===========================================================================================
mySearches = [
# swiss_puzzle,
# sumner_puzzle,
romania_puzzle,
switch_puzzle,
norfolk_puzzle,
#trueorfalse_puzzle,
cheese_puzzle,
]
| 29.970588 | 92 | 0.556035 | import search
import string
from math import(cos, pi)
# An abbreviated map of Sumner County, TN.
# This map is unique, to the best of my knowledge.
# '''
norfolk_map = search.UndirectedGraph(dict(
Norfolk=dict(Suffolk=50,Chesapeake=15,VirginiaBeach=35),
Suffolk=dict(Norfolk=50,Chesapeake=35,Hampton=60,Moyock=150,Sunbury=120),
Chesapeake=dict(Suffolk=35,Norfolk=15,VirginiaBeach=40,Moyock=120),
VirginiaBeach=dict(Norfolk=35,Chesapeake=40),
Hampton=dict(Norfolk=30,Suffolk=60,NewportNews=15),
NewportNews=dict(Hampton=15,Jamestown=35,Williamsburg=30,Yorktown=15),
Jamestown=dict(NewportNews=35,Williamsburg=15),
Williamsburg=dict(Jamestown=15,NewportNews=30,Yorktown=20),
Yorktown=dict(Williamsburg=20,Newportnews=15),
Sunbury=dict(Suffolk=120, Moyock=45),
Moyock=dict(Suffolk=150,Chesapeak=120),
))
norfolk_puzzle = search.GraphProblem('Jamestown', 'Yorktown', norfolk_map)
norfolk_puzzle.label = 'Norfolk'
norfolk_puzzle.description = 'This is a map of the Norfolk, VA area.' \
'This map is unique to the best of my' \
'knowledge.'
romania_map = search.UndirectedGraph(dict(
A=dict(Z=75,S=140,T=118),
Z=dict(O=71,A=75),
S=dict(O=151,R=80,F=99),
T=dict(A=118,L=111),
O=dict(Z=71,S=151),
L=dict(T=111,M=70),
M=dict(L=70,D=75),
D=dict(M=75,C=120),
R=dict(S=80,C=146,P=97),
C=dict(R=146,P=138,D=120),
F=dict(S=99,B=211),
P=dict(R=97,C=138,B=101),
B=dict(G=90,P=101,F=211),
))
romania_puzzle = search.GraphProblem('A', 'B', romania_map)
romania_puzzle.label = 'Romania'
romania_puzzle.description = '''
The simplified map of Romania, per
Russall & Norvig, 3rd Ed., p. 68.
'''
class LightSwitch(search.Problem):
def actions(self, state):
return ['up', 'down']
def result(self, state, action):
if action == 'up':
return 'on'
else:
return 'off'
def goal_test(self, state):
return state == 'on'
def h(self, node):
state = node.state
if self.goal_test(state):
return 0
else:
return 1
switch_puzzle = LightSwitch('off')
switch_puzzle.label = 'Light Switch'
(A2=10,A3=20,B1=10,B2=20,B3=30,C1=20,C2=30,C3=40),
A2=dict(A1=10,A3=10,B1=20,B2=10,B3=20,C1=30,C2=20,C3=30),
A3=dict(A1=20,A2=10,B1=30,B2=20,B3=10,C1=40,C2=30,C3=20),
B1=dict(A1=10,A2=20,A3=30,B2=10,B3=10,C1=10,C2=20,C3=30),
B2=dict(A2=10,A3=20,B1=10,A1=20,B3=10,C1=20,C2=10,C3=20),
B3=dict(A2=20,A3=10,B1=20,B2=10,A1=30,C1=30,C2=20,C3=10),
C1=dict(A2=20,A3=40,B1=10,B2=20,B3=30,A1=20,C2=10,C3=20),
C2=dict(A2=10,A3=20,B1=20,B2=10,B3=20,C1=10,A1=30,C3=10),
C3=dict(A2=30,A3=20,B1=30,B2=20,B3=10,C1=20,C2=10,A1=40),
))
import random
def guess_letter():
return random.choice('ABC')
def guess_number():
return random.choice('123')
a = guess_letter()
b = guess_number()
print(a + b)
cheese_puzzle = search.GraphProblem('A1', a+b , cheese_map)
cheese_puzzle.label = 'Cheese Puzzle'
mySearches = [
romania_puzzle,
switch_puzzle,
norfolk_puzzle,
cheese_puzzle,
]
| true | true |
f71148786432618607fcb5a1bef3949e8303ec9a | 12,558 | py | Python | venv/Lib/site-packages/streamlit/caching/hashing.py | ajayiagbebaku/NFL-Model | afcc67a85ca7138c58c3334d45988ada2da158ed | [
"MIT"
] | 19,099 | 2019-08-25T14:00:15.000Z | 2022-03-31T21:00:28.000Z | venv/Lib/site-packages/streamlit/caching/hashing.py | ajayiagbebaku/NFL-Model | afcc67a85ca7138c58c3334d45988ada2da158ed | [
"MIT"
] | 3,078 | 2019-08-25T19:50:14.000Z | 2022-03-31T23:26:14.000Z | venv/Lib/site-packages/streamlit/caching/hashing.py | ajayiagbebaku/NFL-Model | afcc67a85ca7138c58c3334d45988ada2da158ed | [
"MIT"
] | 1,892 | 2019-08-26T04:44:24.000Z | 2022-03-30T16:11:51.000Z | # Copyright 2018-2021 Streamlit Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Hashing for st.memo and st.singleton."""
import collections
import functools
import hashlib
import inspect
import io
import os
import pickle
import sys
import tempfile
import threading
import unittest.mock
import weakref
from typing import Any, Pattern, Optional, Dict, List
from streamlit import type_util
from streamlit import util
from streamlit.logger import get_logger
from streamlit.uploaded_file_manager import UploadedFile
from .cache_errors import (
CacheType,
UnhashableTypeError,
)
_LOGGER = get_logger(__name__)
# If a dataframe has more than this many rows, we consider it large and hash a sample.
_PANDAS_ROWS_LARGE = 100000
_PANDAS_SAMPLE_SIZE = 10000
# Similar to dataframes, we also sample large numpy arrays.
_NP_SIZE_LARGE = 1000000
_NP_SAMPLE_SIZE = 100000
# Arbitrary item to denote where we found a cycle in a hashed object.
# This allows us to hash self-referencing lists, dictionaries, etc.
_CYCLE_PLACEHOLDER = b"streamlit-57R34ML17-hesamagicalponyflyingthroughthesky-CYCLE"
def update_hash(val: Any, hasher, cache_type: CacheType) -> None:
"""Updates a hashlib hasher with the hash of val.
This is the main entrypoint to hashing.py.
"""
ch = _CacheFuncHasher(cache_type)
ch.update(hasher, val)
class _HashStack:
"""Stack of what has been hashed, for debug and circular reference detection.
This internally keeps 1 stack per thread.
Internally, this stores the ID of pushed objects rather than the objects
themselves because otherwise the "in" operator inside __contains__ would
fail for objects that don't return a boolean for "==" operator. For
example, arr == 10 where arr is a NumPy array returns another NumPy array.
This causes the "in" to crash since it expects a boolean.
"""
def __init__(self):
self._stack: collections.OrderedDict[int, List[Any]] = collections.OrderedDict()
def __repr__(self) -> str:
return util.repr_(self)
def push(self, val: Any):
self._stack[id(val)] = val
def pop(self):
self._stack.popitem()
def __contains__(self, val: Any):
return id(val) in self._stack
class _HashStacks:
"""Stacks of what has been hashed, with at most 1 stack per thread."""
def __init__(self):
self._stacks: weakref.WeakKeyDictionary[
threading.Thread, _HashStack
] = weakref.WeakKeyDictionary()
def __repr__(self) -> str:
return util.repr_(self)
@property
def current(self) -> _HashStack:
current_thread = threading.current_thread()
stack = self._stacks.get(current_thread, None)
if stack is None:
stack = _HashStack()
self._stacks[current_thread] = stack
return stack
hash_stacks = _HashStacks()
def _int_to_bytes(i: int) -> bytes:
num_bytes = (i.bit_length() + 8) // 8
return i.to_bytes(num_bytes, "little", signed=True)
def _key(obj: Optional[Any]) -> Any:
"""Return key for memoization."""
if obj is None:
return None
def is_simple(obj):
return (
isinstance(obj, bytes)
or isinstance(obj, bytearray)
or isinstance(obj, str)
or isinstance(obj, float)
or isinstance(obj, int)
or isinstance(obj, bool)
or obj is None
)
if is_simple(obj):
return obj
if isinstance(obj, tuple):
if all(map(is_simple, obj)):
return obj
if isinstance(obj, list):
if all(map(is_simple, obj)):
return ("__l", tuple(obj))
if (
type_util.is_type(obj, "pandas.core.frame.DataFrame")
or type_util.is_type(obj, "numpy.ndarray")
or inspect.isbuiltin(obj)
or inspect.isroutine(obj)
or inspect.iscode(obj)
):
return id(obj)
return NoResult
class _CacheFuncHasher:
"""A hasher that can hash objects with cycles."""
def __init__(self, cache_type: CacheType):
self._hashes: Dict[Any, bytes] = {}
# The number of the bytes in the hash.
self.size = 0
self.cache_type = cache_type
def __repr__(self) -> str:
return util.repr_(self)
def to_bytes(self, obj: Any) -> bytes:
"""Add memoization to _to_bytes and protect against cycles in data structures."""
tname = type(obj).__qualname__.encode()
key = (tname, _key(obj))
# Memoize if possible.
if key[1] is not NoResult:
if key in self._hashes:
return self._hashes[key]
# Break recursive cycles.
if obj in hash_stacks.current:
return _CYCLE_PLACEHOLDER
hash_stacks.current.push(obj)
try:
# Hash the input
b = b"%s:%s" % (tname, self._to_bytes(obj))
# Hmmm... It's possible that the size calculation is wrong. When we
# call to_bytes inside _to_bytes things get double-counted.
self.size += sys.getsizeof(b)
if key[1] is not NoResult:
self._hashes[key] = b
finally:
# In case an UnhashableTypeError (or other) error is thrown, clean up the
# stack so we don't get false positives in future hashing calls
hash_stacks.current.pop()
return b
def update(self, hasher, obj: Any) -> None:
"""Update the provided hasher with the hash of an object."""
b = self.to_bytes(obj)
hasher.update(b)
def _to_bytes(self, obj: Any) -> bytes:
"""Hash objects to bytes, including code with dependencies.
Python's built in `hash` does not produce consistent results across
runs.
"""
if isinstance(obj, unittest.mock.Mock):
# Mock objects can appear to be infinitely
# deep, so we don't try to hash them at all.
return self.to_bytes(id(obj))
elif isinstance(obj, bytes) or isinstance(obj, bytearray):
return obj
elif isinstance(obj, str):
return obj.encode()
elif isinstance(obj, float):
return self.to_bytes(hash(obj))
elif isinstance(obj, int):
return _int_to_bytes(obj)
elif isinstance(obj, (list, tuple)):
h = hashlib.new("md5")
for item in obj:
self.update(h, item)
return h.digest()
elif isinstance(obj, dict):
h = hashlib.new("md5")
for item in obj.items():
self.update(h, item)
return h.digest()
elif obj is None:
return b"0"
elif obj is True:
return b"1"
elif obj is False:
return b"0"
elif type_util.is_type(obj, "pandas.core.frame.DataFrame") or type_util.is_type(
obj, "pandas.core.series.Series"
):
import pandas as pd
if len(obj) >= _PANDAS_ROWS_LARGE:
obj = obj.sample(n=_PANDAS_SAMPLE_SIZE, random_state=0)
try:
return b"%s" % pd.util.hash_pandas_object(obj).sum()
except TypeError:
# Use pickle if pandas cannot hash the object for example if
# it contains unhashable objects.
return b"%s" % pickle.dumps(obj, pickle.HIGHEST_PROTOCOL)
elif type_util.is_type(obj, "numpy.ndarray"):
h = hashlib.new("md5")
self.update(h, obj.shape)
if obj.size >= _NP_SIZE_LARGE:
import numpy as np
state = np.random.RandomState(0)
obj = state.choice(obj.flat, size=_NP_SAMPLE_SIZE)
self.update(h, obj.tobytes())
return h.digest()
elif inspect.isbuiltin(obj):
return bytes(obj.__name__.encode())
elif type_util.is_type(obj, "builtins.mappingproxy") or type_util.is_type(
obj, "builtins.dict_items"
):
return self.to_bytes(dict(obj))
elif type_util.is_type(obj, "builtins.getset_descriptor"):
return bytes(obj.__qualname__.encode())
elif isinstance(obj, UploadedFile):
# UploadedFile is a BytesIO (thus IOBase) but has a name.
# It does not have a timestamp so this must come before
# temproary files
h = hashlib.new("md5")
self.update(h, obj.name)
self.update(h, obj.tell())
self.update(h, obj.getvalue())
return h.digest()
elif hasattr(obj, "name") and (
isinstance(obj, io.IOBase)
# Handle temporary files used during testing
or isinstance(obj, tempfile._TemporaryFileWrapper)
):
# Hash files as name + last modification date + offset.
# NB: we're using hasattr("name") to differentiate between
# on-disk and in-memory StringIO/BytesIO file representations.
# That means that this condition must come *before* the next
# condition, which just checks for StringIO/BytesIO.
h = hashlib.new("md5")
obj_name = getattr(obj, "name", "wonthappen") # Just to appease MyPy.
self.update(h, obj_name)
self.update(h, os.path.getmtime(obj_name))
self.update(h, obj.tell())
return h.digest()
elif isinstance(obj, Pattern):
return self.to_bytes([obj.pattern, obj.flags])
elif isinstance(obj, io.StringIO) or isinstance(obj, io.BytesIO):
# Hash in-memory StringIO/BytesIO by their full contents
# and seek position.
h = hashlib.new("md5")
self.update(h, obj.tell())
self.update(h, obj.getvalue())
return h.digest()
elif type_util.is_type(obj, "numpy.ufunc"):
# For numpy.remainder, this returns remainder.
return bytes(obj.__name__.encode())
elif inspect.ismodule(obj):
# TODO: Figure out how to best show this kind of warning to the
# user. In the meantime, show nothing. This scenario is too common,
# so the current warning is quite annoying...
# st.warning(('Streamlit does not support hashing modules. '
# 'We did not hash `%s`.') % obj.__name__)
# TODO: Hash more than just the name for internal modules.
return self.to_bytes(obj.__name__)
elif inspect.isclass(obj):
# TODO: Figure out how to best show this kind of warning to the
# user. In the meantime, show nothing. This scenario is too common,
# (e.g. in every "except" statement) so the current warning is
# quite annoying...
# st.warning(('Streamlit does not support hashing classes. '
# 'We did not hash `%s`.') % obj.__name__)
# TODO: Hash more than just the name of classes.
return self.to_bytes(obj.__name__)
elif isinstance(obj, functools.partial):
# The return value of functools.partial is not a plain function:
# it's a callable object that remembers the original function plus
# the values you pickled into it. So here we need to special-case it.
h = hashlib.new("md5")
self.update(h, obj.args)
self.update(h, obj.func)
self.update(h, obj.keywords)
return h.digest()
else:
# As a last resort, hash the output of the object's __reduce__ method
h = hashlib.new("md5")
try:
reduce_data = obj.__reduce__()
except BaseException as e:
raise UnhashableTypeError() from e
for item in reduce_data:
self.update(h, item)
return h.digest()
class NoResult:
"""Placeholder class for return values when None is meaningful."""
pass
| 32.2 | 89 | 0.608218 |
import collections
import functools
import hashlib
import inspect
import io
import os
import pickle
import sys
import tempfile
import threading
import unittest.mock
import weakref
from typing import Any, Pattern, Optional, Dict, List
from streamlit import type_util
from streamlit import util
from streamlit.logger import get_logger
from streamlit.uploaded_file_manager import UploadedFile
from .cache_errors import (
CacheType,
UnhashableTypeError,
)
_LOGGER = get_logger(__name__)
_PANDAS_ROWS_LARGE = 100000
_PANDAS_SAMPLE_SIZE = 10000
_NP_SIZE_LARGE = 1000000
_NP_SAMPLE_SIZE = 100000
_CYCLE_PLACEHOLDER = b"streamlit-57R34ML17-hesamagicalponyflyingthroughthesky-CYCLE"
def update_hash(val: Any, hasher, cache_type: CacheType) -> None:
ch = _CacheFuncHasher(cache_type)
ch.update(hasher, val)
class _HashStack:
def __init__(self):
self._stack: collections.OrderedDict[int, List[Any]] = collections.OrderedDict()
def __repr__(self) -> str:
return util.repr_(self)
def push(self, val: Any):
self._stack[id(val)] = val
def pop(self):
self._stack.popitem()
def __contains__(self, val: Any):
return id(val) in self._stack
class _HashStacks:
def __init__(self):
self._stacks: weakref.WeakKeyDictionary[
threading.Thread, _HashStack
] = weakref.WeakKeyDictionary()
def __repr__(self) -> str:
return util.repr_(self)
@property
def current(self) -> _HashStack:
current_thread = threading.current_thread()
stack = self._stacks.get(current_thread, None)
if stack is None:
stack = _HashStack()
self._stacks[current_thread] = stack
return stack
hash_stacks = _HashStacks()
def _int_to_bytes(i: int) -> bytes:
num_bytes = (i.bit_length() + 8) // 8
return i.to_bytes(num_bytes, "little", signed=True)
def _key(obj: Optional[Any]) -> Any:
if obj is None:
return None
def is_simple(obj):
return (
isinstance(obj, bytes)
or isinstance(obj, bytearray)
or isinstance(obj, str)
or isinstance(obj, float)
or isinstance(obj, int)
or isinstance(obj, bool)
or obj is None
)
if is_simple(obj):
return obj
if isinstance(obj, tuple):
if all(map(is_simple, obj)):
return obj
if isinstance(obj, list):
if all(map(is_simple, obj)):
return ("__l", tuple(obj))
if (
type_util.is_type(obj, "pandas.core.frame.DataFrame")
or type_util.is_type(obj, "numpy.ndarray")
or inspect.isbuiltin(obj)
or inspect.isroutine(obj)
or inspect.iscode(obj)
):
return id(obj)
return NoResult
class _CacheFuncHasher:
def __init__(self, cache_type: CacheType):
self._hashes: Dict[Any, bytes] = {}
self.size = 0
self.cache_type = cache_type
def __repr__(self) -> str:
return util.repr_(self)
def to_bytes(self, obj: Any) -> bytes:
tname = type(obj).__qualname__.encode()
key = (tname, _key(obj))
if key[1] is not NoResult:
if key in self._hashes:
return self._hashes[key]
if obj in hash_stacks.current:
return _CYCLE_PLACEHOLDER
hash_stacks.current.push(obj)
try:
b = b"%s:%s" % (tname, self._to_bytes(obj))
# call to_bytes inside _to_bytes things get double-counted.
self.size += sys.getsizeof(b)
if key[1] is not NoResult:
self._hashes[key] = b
finally:
# In case an UnhashableTypeError (or other) error is thrown, clean up the
# stack so we don't get false positives in future hashing calls
hash_stacks.current.pop()
return b
def update(self, hasher, obj: Any) -> None:
b = self.to_bytes(obj)
hasher.update(b)
def _to_bytes(self, obj: Any) -> bytes:
if isinstance(obj, unittest.mock.Mock):
return self.to_bytes(id(obj))
elif isinstance(obj, bytes) or isinstance(obj, bytearray):
return obj
elif isinstance(obj, str):
return obj.encode()
elif isinstance(obj, float):
return self.to_bytes(hash(obj))
elif isinstance(obj, int):
return _int_to_bytes(obj)
elif isinstance(obj, (list, tuple)):
h = hashlib.new("md5")
for item in obj:
self.update(h, item)
return h.digest()
elif isinstance(obj, dict):
h = hashlib.new("md5")
for item in obj.items():
self.update(h, item)
return h.digest()
elif obj is None:
return b"0"
elif obj is True:
return b"1"
elif obj is False:
return b"0"
elif type_util.is_type(obj, "pandas.core.frame.DataFrame") or type_util.is_type(
obj, "pandas.core.series.Series"
):
import pandas as pd
if len(obj) >= _PANDAS_ROWS_LARGE:
obj = obj.sample(n=_PANDAS_SAMPLE_SIZE, random_state=0)
try:
return b"%s" % pd.util.hash_pandas_object(obj).sum()
except TypeError:
# Use pickle if pandas cannot hash the object for example if
# it contains unhashable objects.
return b"%s" % pickle.dumps(obj, pickle.HIGHEST_PROTOCOL)
elif type_util.is_type(obj, "numpy.ndarray"):
h = hashlib.new("md5")
self.update(h, obj.shape)
if obj.size >= _NP_SIZE_LARGE:
import numpy as np
state = np.random.RandomState(0)
obj = state.choice(obj.flat, size=_NP_SAMPLE_SIZE)
self.update(h, obj.tobytes())
return h.digest()
elif inspect.isbuiltin(obj):
return bytes(obj.__name__.encode())
elif type_util.is_type(obj, "builtins.mappingproxy") or type_util.is_type(
obj, "builtins.dict_items"
):
return self.to_bytes(dict(obj))
elif type_util.is_type(obj, "builtins.getset_descriptor"):
return bytes(obj.__qualname__.encode())
elif isinstance(obj, UploadedFile):
# UploadedFile is a BytesIO (thus IOBase) but has a name.
# It does not have a timestamp so this must come before
# temproary files
h = hashlib.new("md5")
self.update(h, obj.name)
self.update(h, obj.tell())
self.update(h, obj.getvalue())
return h.digest()
elif hasattr(obj, "name") and (
isinstance(obj, io.IOBase)
# Handle temporary files used during testing
or isinstance(obj, tempfile._TemporaryFileWrapper)
):
# Hash files as name + last modification date + offset.
# NB: we're using hasattr("name") to differentiate between
h = hashlib.new("md5")
obj_name = getattr(obj, "name", "wonthappen")
self.update(h, obj_name)
self.update(h, os.path.getmtime(obj_name))
self.update(h, obj.tell())
return h.digest()
elif isinstance(obj, Pattern):
return self.to_bytes([obj.pattern, obj.flags])
elif isinstance(obj, io.StringIO) or isinstance(obj, io.BytesIO):
h = hashlib.new("md5")
self.update(h, obj.tell())
self.update(h, obj.getvalue())
return h.digest()
elif type_util.is_type(obj, "numpy.ufunc"):
return bytes(obj.__name__.encode())
elif inspect.ismodule(obj):
return self.to_bytes(obj.__name__)
elif inspect.isclass(obj):
return self.to_bytes(obj.__name__)
elif isinstance(obj, functools.partial):
# the values you pickled into it. So here we need to special-case it.
h = hashlib.new("md5")
self.update(h, obj.args)
self.update(h, obj.func)
self.update(h, obj.keywords)
return h.digest()
else:
# As a last resort, hash the output of the object's __reduce__ method
h = hashlib.new("md5")
try:
reduce_data = obj.__reduce__()
except BaseException as e:
raise UnhashableTypeError() from e
for item in reduce_data:
self.update(h, item)
return h.digest()
class NoResult:
pass
| true | true |
f711488f270af532051f4ab5dbf2bb0e3eb47a6a | 24 | py | Python | src/test.py | nsde/latinum | c9c58e65b1ab6554f9e2d6bc540b2436aa6270a6 | [
"MIT"
] | null | null | null | src/test.py | nsde/latinum | c9c58e65b1ab6554f9e2d6bc540b2436aa6270a6 | [
"MIT"
] | null | null | null | src/test.py | nsde/latinum | c9c58e65b1ab6554f9e2d6bc540b2436aa6270a6 | [
"MIT"
] | null | null | null | print('a b'.split('.s')) | 24 | 24 | 0.541667 | print('a b'.split('.s')) | true | true |
f711491beb33a98ded1b75da64a40b5c95ce8390 | 934 | py | Python | BipHelp/urls.py | Fenn-CS/BipHelp | a343a1b6f4a1374f54a59d12b07ddbe46b4b0225 | [
"Apache-2.0"
] | null | null | null | BipHelp/urls.py | Fenn-CS/BipHelp | a343a1b6f4a1374f54a59d12b07ddbe46b4b0225 | [
"Apache-2.0"
] | null | null | null | BipHelp/urls.py | Fenn-CS/BipHelp | a343a1b6f4a1374f54a59d12b07ddbe46b4b0225 | [
"Apache-2.0"
] | 1 | 2018-10-30T21:51:10.000Z | 2018-10-30T21:51:10.000Z | """BipHelp URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from django.contrib import admin
from rest_framework import routers
router = routers.DefaultRouter()
urlpatterns = [
url(r'^', include(router.urls)),
url(r'^admin/', admin.site.urls),
url(r'^api-auth/', include('rest_framework.urls')),
]
| 35.923077 | 79 | 0.705567 | from django.conf.urls import url, include
from django.contrib import admin
from rest_framework import routers
router = routers.DefaultRouter()
urlpatterns = [
url(r'^', include(router.urls)),
url(r'^admin/', admin.site.urls),
url(r'^api-auth/', include('rest_framework.urls')),
]
| true | true |
f711491c5c5fc5ac07e56331455fd692c0815d01 | 370 | py | Python | web-frameworks/django/iWillTestEachApps/django_filter/models.py | suroegin-learning/learn-python | be5bda86add0dcd6f2fd3db737bb7d0d3ec5f853 | [
"MIT"
] | null | null | null | web-frameworks/django/iWillTestEachApps/django_filter/models.py | suroegin-learning/learn-python | be5bda86add0dcd6f2fd3db737bb7d0d3ec5f853 | [
"MIT"
] | null | null | null | web-frameworks/django/iWillTestEachApps/django_filter/models.py | suroegin-learning/learn-python | be5bda86add0dcd6f2fd3db737bb7d0d3ec5f853 | [
"MIT"
] | null | null | null | from django.db import models
class Manufacturer(models.Model):
name = models.CharField(max_length=255)
class Product(models.Model):
name = models.CharField(max_length=255)
price = models.DecimalField()
description = models.TextField()
release_date = models.DateField()
manufacturer = models.ForeignKey(Manufacturer, on_delete=models.CASCADE)
| 26.428571 | 76 | 0.748649 | from django.db import models
class Manufacturer(models.Model):
name = models.CharField(max_length=255)
class Product(models.Model):
name = models.CharField(max_length=255)
price = models.DecimalField()
description = models.TextField()
release_date = models.DateField()
manufacturer = models.ForeignKey(Manufacturer, on_delete=models.CASCADE)
| true | true |
f7114b1798d03f32f8126bb57de8a94ecc904af6 | 4,302 | py | Python | classification_ModelNet40/test.py | DeVriesMatt/pointMLP-pytorch | e9c09a2038551e83b072353f3fd7e3294463e892 | [
"Apache-2.0"
] | null | null | null | classification_ModelNet40/test.py | DeVriesMatt/pointMLP-pytorch | e9c09a2038551e83b072353f3fd7e3294463e892 | [
"Apache-2.0"
] | null | null | null | classification_ModelNet40/test.py | DeVriesMatt/pointMLP-pytorch | e9c09a2038551e83b072353f3fd7e3294463e892 | [
"Apache-2.0"
] | null | null | null | """
python test.py --model pointMLP --msg 20220209053148-404
"""
import argparse
import os
import datetime
import torch
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
from torch.utils.data import DataLoader
import models as models
from utils import progress_bar, IOStream
from data import ModelNet40
import sklearn.metrics as metrics
from helper import cal_loss
import numpy as np
import torch.nn.functional as F
model_names = sorted(
name for name in models.__dict__ if callable(models.__dict__[name])
)
def parse_args():
"""Parameters"""
parser = argparse.ArgumentParser("training")
parser.add_argument(
"-c",
"--checkpoint",
type=str,
metavar="PATH",
help="path to save checkpoint (default: checkpoint)",
)
parser.add_argument("--msg", type=str, help="message after checkpoint")
parser.add_argument(
"--batch_size", type=int, default=16, help="batch size in training"
)
parser.add_argument(
"--model", default="pointMLP", help="model name [default: pointnet_cls]"
)
parser.add_argument(
"--num_classes",
default=40,
type=int,
choices=[10, 40],
help="training on ModelNet10/40",
)
parser.add_argument("--num_points", type=int, default=1024, help="Point Number")
return parser.parse_args()
def main():
args = parse_args()
print(f"args: {args}")
os.environ["HDF5_USE_FILE_LOCKING"] = "FALSE"
if torch.cuda.is_available():
device = "cuda"
else:
device = "cpu"
print(f"==> Using device: {device}")
if args.msg is None:
message = str(datetime.datetime.now().strftime("-%Y%m%d%H%M%S"))
else:
message = "-" + args.msg
args.checkpoint = "checkpoints/" + args.model + message
print("==> Preparing data..")
test_loader = DataLoader(
ModelNet40(partition="test", num_points=args.num_points),
num_workers=4,
batch_size=args.batch_size,
shuffle=False,
drop_last=False,
)
# Model
print("==> Building model..")
net = models.__dict__[args.model]()
criterion = cal_loss
net = net.to(device)
checkpoint_path = os.path.join(args.checkpoint, "best_checkpoint.pth")
checkpoint = torch.load(checkpoint_path, map_location=torch.device("cpu"))
# criterion = criterion.to(device)
if device == "cuda":
net = torch.nn.DataParallel(net)
cudnn.benchmark = True
net.load_state_dict(checkpoint["net"])
test_out = validate(net, test_loader, criterion, device)
print(f"Vanilla out: {test_out}")
def validate(net, testloader, criterion, device):
net.eval()
test_loss = 0
correct = 0
total = 0
test_true = []
test_pred = []
time_cost = datetime.datetime.now()
with torch.no_grad():
for batch_idx, (data, label) in enumerate(testloader):
data, label = data.to(device), label.to(device).squeeze()
data = data.permute(0, 2, 1)
logits = net(data)
loss = criterion(logits, label)
test_loss += loss.item()
preds = logits.max(dim=1)[1]
test_true.append(label.cpu().numpy())
test_pred.append(preds.detach().cpu().numpy())
total += label.size(0)
correct += preds.eq(label).sum().item()
progress_bar(
batch_idx,
len(testloader),
"Loss: %.3f | Acc: %.3f%% (%d/%d)"
% (
test_loss / (batch_idx + 1),
100.0 * correct / total,
correct,
total,
),
)
time_cost = int((datetime.datetime.now() - time_cost).total_seconds())
test_true = np.concatenate(test_true)
test_pred = np.concatenate(test_pred)
return {
"loss": float("%.3f" % (test_loss / (batch_idx + 1))),
"acc": float("%.3f" % (100.0 * metrics.accuracy_score(test_true, test_pred))),
"acc_avg": float(
"%.3f" % (100.0 * metrics.balanced_accuracy_score(test_true, test_pred))
),
"time": time_cost,
}
if __name__ == "__main__":
main()
| 30.083916 | 86 | 0.600418 | import argparse
import os
import datetime
import torch
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
from torch.utils.data import DataLoader
import models as models
from utils import progress_bar, IOStream
from data import ModelNet40
import sklearn.metrics as metrics
from helper import cal_loss
import numpy as np
import torch.nn.functional as F
model_names = sorted(
name for name in models.__dict__ if callable(models.__dict__[name])
)
def parse_args():
parser = argparse.ArgumentParser("training")
parser.add_argument(
"-c",
"--checkpoint",
type=str,
metavar="PATH",
help="path to save checkpoint (default: checkpoint)",
)
parser.add_argument("--msg", type=str, help="message after checkpoint")
parser.add_argument(
"--batch_size", type=int, default=16, help="batch size in training"
)
parser.add_argument(
"--model", default="pointMLP", help="model name [default: pointnet_cls]"
)
parser.add_argument(
"--num_classes",
default=40,
type=int,
choices=[10, 40],
help="training on ModelNet10/40",
)
parser.add_argument("--num_points", type=int, default=1024, help="Point Number")
return parser.parse_args()
def main():
args = parse_args()
print(f"args: {args}")
os.environ["HDF5_USE_FILE_LOCKING"] = "FALSE"
if torch.cuda.is_available():
device = "cuda"
else:
device = "cpu"
print(f"==> Using device: {device}")
if args.msg is None:
message = str(datetime.datetime.now().strftime("-%Y%m%d%H%M%S"))
else:
message = "-" + args.msg
args.checkpoint = "checkpoints/" + args.model + message
print("==> Preparing data..")
test_loader = DataLoader(
ModelNet40(partition="test", num_points=args.num_points),
num_workers=4,
batch_size=args.batch_size,
shuffle=False,
drop_last=False,
)
print("==> Building model..")
net = models.__dict__[args.model]()
criterion = cal_loss
net = net.to(device)
checkpoint_path = os.path.join(args.checkpoint, "best_checkpoint.pth")
checkpoint = torch.load(checkpoint_path, map_location=torch.device("cpu"))
if device == "cuda":
net = torch.nn.DataParallel(net)
cudnn.benchmark = True
net.load_state_dict(checkpoint["net"])
test_out = validate(net, test_loader, criterion, device)
print(f"Vanilla out: {test_out}")
def validate(net, testloader, criterion, device):
net.eval()
test_loss = 0
correct = 0
total = 0
test_true = []
test_pred = []
time_cost = datetime.datetime.now()
with torch.no_grad():
for batch_idx, (data, label) in enumerate(testloader):
data, label = data.to(device), label.to(device).squeeze()
data = data.permute(0, 2, 1)
logits = net(data)
loss = criterion(logits, label)
test_loss += loss.item()
preds = logits.max(dim=1)[1]
test_true.append(label.cpu().numpy())
test_pred.append(preds.detach().cpu().numpy())
total += label.size(0)
correct += preds.eq(label).sum().item()
progress_bar(
batch_idx,
len(testloader),
"Loss: %.3f | Acc: %.3f%% (%d/%d)"
% (
test_loss / (batch_idx + 1),
100.0 * correct / total,
correct,
total,
),
)
time_cost = int((datetime.datetime.now() - time_cost).total_seconds())
test_true = np.concatenate(test_true)
test_pred = np.concatenate(test_pred)
return {
"loss": float("%.3f" % (test_loss / (batch_idx + 1))),
"acc": float("%.3f" % (100.0 * metrics.accuracy_score(test_true, test_pred))),
"acc_avg": float(
"%.3f" % (100.0 * metrics.balanced_accuracy_score(test_true, test_pred))
),
"time": time_cost,
}
if __name__ == "__main__":
main()
| true | true |
f7114b855ef654c33ec2aa02a726cbcb0a885758 | 720 | py | Python | pydualsense/hidguardian.py | TheComputerDan/pydualsense | c1c10e4eacf37818e31b09f83c0e5aba7001fbad | [
"MIT"
] | null | null | null | pydualsense/hidguardian.py | TheComputerDan/pydualsense | c1c10e4eacf37818e31b09f83c0e5aba7001fbad | [
"MIT"
] | null | null | null | pydualsense/hidguardian.py | TheComputerDan/pydualsense | c1c10e4eacf37818e31b09f83c0e5aba7001fbad | [
"MIT"
] | null | null | null | import winreg
import sys
def check_hide() -> bool:
"""check if hidguardian is used and controller is hidden
"""
if sys.platform.startswith('win32'):
try:
access_reg = winreg.ConnectRegistry(None, winreg.HKEY_LOCAL_MACHINE)
access_key = winreg.OpenKey(access_reg, 'SYSTEM\CurrentControlSet\Services\HidGuardian\Parameters', 0, winreg.KEY_READ)
affected_devices = winreg.QueryValueEx(access_key, 'AffectedDevices')[0]
if "054C" in affected_devices and "0CE6" in affected_devices:
return True
return False
except OSError as e:
print(e)
return False
| 37.894737 | 135 | 0.605556 | import winreg
import sys
def check_hide() -> bool:
if sys.platform.startswith('win32'):
try:
access_reg = winreg.ConnectRegistry(None, winreg.HKEY_LOCAL_MACHINE)
access_key = winreg.OpenKey(access_reg, 'SYSTEM\CurrentControlSet\Services\HidGuardian\Parameters', 0, winreg.KEY_READ)
affected_devices = winreg.QueryValueEx(access_key, 'AffectedDevices')[0]
if "054C" in affected_devices and "0CE6" in affected_devices:
return True
return False
except OSError as e:
print(e)
return False
| true | true |
f7114ceafa6e4c5af4457ca817ffc5d52ca75a2f | 4,600 | py | Python | util/rule_ctl/tests/rule_ctl_test.py | linuxgemini/coreruleset | 0873cbeae35c85de72b0292f0d17a26308026c83 | [
"Apache-2.0"
] | 930 | 2020-05-13T17:07:34.000Z | 2022-03-30T02:56:06.000Z | util/rule_ctl/tests/rule_ctl_test.py | linuxgemini/coreruleset | 0873cbeae35c85de72b0292f0d17a26308026c83 | [
"Apache-2.0"
] | 675 | 2020-05-13T20:32:11.000Z | 2022-03-31T22:07:20.000Z | util/rule_ctl/tests/rule_ctl_test.py | linuxgemini/coreruleset | 0873cbeae35c85de72b0292f0d17a26308026c83 | [
"Apache-2.0"
] | 216 | 2020-05-13T16:58:08.000Z | 2022-03-30T20:50:40.000Z | from .helpers import *
class TestFilterRuleId:
def test_filter_rule_id_exact_match(self):
arguments = [
"--filter-rule-id", "12",
"--append-tag", "foo"
]
rule_string = """
SecRule ARGS|ARGS:foo|!ARGS:bar "@rx foo" "id:12"
"""
expected = """
SecRule ARGS|ARGS:foo|!ARGS:bar "@rx foo" "id:12,tag:'foo'"
"""
context = create_context(arguments, rule_string)
assert expected == get_output(context)
def test_filter_rule_id_prefix_match(self):
arguments = [
"--filter-rule-id", "^12",
"--append-tag", "foo"
]
rule_string = """
SecRule ARGS|ARGS:foo|!ARGS:bar "@rx foo" "id:122"
"""
expected = """
SecRule ARGS|ARGS:foo|!ARGS:bar "@rx foo" "id:122,tag:'foo'"
"""
context = create_context(arguments, rule_string)
assert expected == get_output(context)
def test_filter_rule_id_suffix_match(self):
arguments = [
"--filter-rule-id", ".*22$",
"--append-tag", "foo"
]
rule_string = """
SecRule ARGS|ARGS:foo|!ARGS:bar "@rx foo" "id:122"
"""
expected = """
SecRule ARGS|ARGS:foo|!ARGS:bar "@rx foo" "id:122,tag:'foo'"
"""
context = create_context(arguments, rule_string)
assert expected == get_output(context)
def test_filter_rule_id_no_match(self):
arguments = [
"--filter-rule-id", "11",
"--append-tag", "foo"
]
rule_string = """
SecRule ARGS|ARGS:foo|!ARGS:bar "@rx foo" "id:12"
"""
expected = rule_string
context = create_context(arguments, rule_string)
assert expected == get_output(context)
class TestLineNumbers:
def test_line_numbers_identical(self):
arguments = [
"--append-tag", "foo"
]
rule_string = """
SecRule ARGS|ARGS:foo|!ARGS:bar "@rx foo" "id:12"
SecRule ARGS "@rx bar" "id:13"
"""
expected = """
SecRule ARGS|ARGS:foo|!ARGS:bar "@rx foo" "id:12,tag:'foo'"
SecRule ARGS "@rx bar" "id:13,tag:'foo'"
"""
context = create_context(arguments, rule_string)
assert expected == get_output(context)
def test_line_numbers_shifted_down(self):
arguments = [
"--append-tag", "foo"
]
rule_string = """
SecRule ARGS|ARGS:foo|!ARGS:bar \\
"@rx foo" \\
"id:12"
SecRule ARGS "@rx bar" \\
"id:13"
"""
expected = """
SecRule ARGS|ARGS:foo|!ARGS:bar \\
"@rx foo" \\
"id:12,\\
tag:'foo'"
SecRule ARGS "@rx bar" \\
"id:13,\\
tag:'foo'"
"""
context = create_context(arguments, rule_string)
assert expected == get_output(context)
def test_line_numbers_shifted_up(self):
arguments = [
"--remove-tag", "foo"
]
rule_string = """
SecRule ARGS|ARGS:foo|!ARGS:bar \\
"@rx foo" \\
"id:12,\\
tag:foo"
SecRule ARGS "@rx bar" \\
"id:13,\\
tag:foo"
"""
expected = """
SecRule ARGS|ARGS:foo|!ARGS:bar \\
"@rx foo" \\
"id:12"
SecRule ARGS "@rx bar" \\
"id:13"
"""
context = create_context(arguments, rule_string)
assert expected == get_output(context)
class TestTargetFile:
def test_target_file(self, tmp_path):
import os
from rule_ctl import write_output
file_path = str(tmp_path / 'foo.conf')
arguments = [
"--append-tag", "foo",
"--target-file", file_path
]
rule_string = """
SecRule ARGS|ARGS:foo|!ARGS:bar \\
"@rx foo" \\
"id:12"
"""
expected = """
SecRule ARGS|ARGS:foo|!ARGS:bar \\
"@rx foo" \\
"id:12,\\
tag:'foo'"
"""
context = create_context(arguments, rule_string)
write_output(context)
assert os.path.exists(file_path)
with open(file_path, 'r') as h:
assert expected.rstrip() == h.read()
def test_target_file_uses_config_as_default(self, tmp_path):
import os
from rule_ctl import write_output
file_path = str(tmp_path / 'foo.conf')
arguments = [
"--append-tag", "foo",
"--config", file_path
]
rule_string = """
SecRule ARGS|ARGS:foo|!ARGS:bar \\
"@rx foo" \\
"id:12"
"""
expected = """
SecRule ARGS|ARGS:foo|!ARGS:bar \\
"@rx foo" \\
"id:12,\\
tag:'foo'"
"""
context = create_context(arguments, rule_string)
write_output(context)
assert os.path.exists(file_path)
with open(file_path, 'r') as h:
assert expected.rstrip() == h.read()
| 23.469388 | 64 | 0.553913 | from .helpers import *
class TestFilterRuleId:
def test_filter_rule_id_exact_match(self):
arguments = [
"--filter-rule-id", "12",
"--append-tag", "foo"
]
rule_string = """
SecRule ARGS|ARGS:foo|!ARGS:bar "@rx foo" "id:12"
"""
expected = """
SecRule ARGS|ARGS:foo|!ARGS:bar "@rx foo" "id:12,tag:'foo'"
"""
context = create_context(arguments, rule_string)
assert expected == get_output(context)
def test_filter_rule_id_prefix_match(self):
arguments = [
"--filter-rule-id", "^12",
"--append-tag", "foo"
]
rule_string = """
SecRule ARGS|ARGS:foo|!ARGS:bar "@rx foo" "id:122"
"""
expected = """
SecRule ARGS|ARGS:foo|!ARGS:bar "@rx foo" "id:122,tag:'foo'"
"""
context = create_context(arguments, rule_string)
assert expected == get_output(context)
def test_filter_rule_id_suffix_match(self):
arguments = [
"--filter-rule-id", ".*22$",
"--append-tag", "foo"
]
rule_string = """
SecRule ARGS|ARGS:foo|!ARGS:bar "@rx foo" "id:122"
"""
expected = """
SecRule ARGS|ARGS:foo|!ARGS:bar "@rx foo" "id:122,tag:'foo'"
"""
context = create_context(arguments, rule_string)
assert expected == get_output(context)
def test_filter_rule_id_no_match(self):
arguments = [
"--filter-rule-id", "11",
"--append-tag", "foo"
]
rule_string = """
SecRule ARGS|ARGS:foo|!ARGS:bar "@rx foo" "id:12"
"""
expected = rule_string
context = create_context(arguments, rule_string)
assert expected == get_output(context)
class TestLineNumbers:
def test_line_numbers_identical(self):
arguments = [
"--append-tag", "foo"
]
rule_string = """
SecRule ARGS|ARGS:foo|!ARGS:bar "@rx foo" "id:12"
SecRule ARGS "@rx bar" "id:13"
"""
expected = """
SecRule ARGS|ARGS:foo|!ARGS:bar "@rx foo" "id:12,tag:'foo'"
SecRule ARGS "@rx bar" "id:13,tag:'foo'"
"""
context = create_context(arguments, rule_string)
assert expected == get_output(context)
def test_line_numbers_shifted_down(self):
arguments = [
"--append-tag", "foo"
]
rule_string = """
SecRule ARGS|ARGS:foo|!ARGS:bar \\
"@rx foo" \\
"id:12"
SecRule ARGS "@rx bar" \\
"id:13"
"""
expected = """
SecRule ARGS|ARGS:foo|!ARGS:bar \\
"@rx foo" \\
"id:12,\\
tag:'foo'"
SecRule ARGS "@rx bar" \\
"id:13,\\
tag:'foo'"
"""
context = create_context(arguments, rule_string)
assert expected == get_output(context)
def test_line_numbers_shifted_up(self):
arguments = [
"--remove-tag", "foo"
]
rule_string = """
SecRule ARGS|ARGS:foo|!ARGS:bar \\
"@rx foo" \\
"id:12,\\
tag:foo"
SecRule ARGS "@rx bar" \\
"id:13,\\
tag:foo"
"""
expected = """
SecRule ARGS|ARGS:foo|!ARGS:bar \\
"@rx foo" \\
"id:12"
SecRule ARGS "@rx bar" \\
"id:13"
"""
context = create_context(arguments, rule_string)
assert expected == get_output(context)
class TestTargetFile:
def test_target_file(self, tmp_path):
import os
from rule_ctl import write_output
file_path = str(tmp_path / 'foo.conf')
arguments = [
"--append-tag", "foo",
"--target-file", file_path
]
rule_string = """
SecRule ARGS|ARGS:foo|!ARGS:bar \\
"@rx foo" \\
"id:12"
"""
expected = """
SecRule ARGS|ARGS:foo|!ARGS:bar \\
"@rx foo" \\
"id:12,\\
tag:'foo'"
"""
context = create_context(arguments, rule_string)
write_output(context)
assert os.path.exists(file_path)
with open(file_path, 'r') as h:
assert expected.rstrip() == h.read()
def test_target_file_uses_config_as_default(self, tmp_path):
import os
from rule_ctl import write_output
file_path = str(tmp_path / 'foo.conf')
arguments = [
"--append-tag", "foo",
"--config", file_path
]
rule_string = """
SecRule ARGS|ARGS:foo|!ARGS:bar \\
"@rx foo" \\
"id:12"
"""
expected = """
SecRule ARGS|ARGS:foo|!ARGS:bar \\
"@rx foo" \\
"id:12,\\
tag:'foo'"
"""
context = create_context(arguments, rule_string)
write_output(context)
assert os.path.exists(file_path)
with open(file_path, 'r') as h:
assert expected.rstrip() == h.read()
| true | true |
f7114d567227d1df1f9cb9f60ad3422ee6a8dcd9 | 1,784 | py | Python | backend/models/advices.py | jimbunny/AdminSystem | d9a42e2d8608cb0d9bc88f4c1945da48fb8cc925 | [
"MIT"
] | null | null | null | backend/models/advices.py | jimbunny/AdminSystem | d9a42e2d8608cb0d9bc88f4c1945da48fb8cc925 | [
"MIT"
] | null | null | null | backend/models/advices.py | jimbunny/AdminSystem | d9a42e2d8608cb0d9bc88f4c1945da48fb8cc925 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding:utf-8 -*-
# author:jingtongyu
# datetime:2020/6/7 10:14 下午
# software: PyCharm
from flask import current_app
from . import db
from .base import BaseModel
from sqlalchemy.exc import SQLAlchemyError
from werkzeug.security import generate_password_hash, check_password_hash
import time
class AdvicesModel(db.Model, BaseModel):
__tablename__ = 'advices'
id = db.Column(db.Integer, primary_key=True)
email = db.Column(db.String(25), nullable=False)
username = db.Column(db.String(25), nullable=False)
advice = db.Column(db.String(500), nullable=False)
def __init__(self, email, username, advice):
self.email = email
self.username = username
self.advice = advice
def __str__(self):
return "Advices(id='%s')" % self.id
def paginate(self, page, per_page):
return self.query.paginate(page=page, per_page=per_page, error_out=False)
def filter_by_email(self, email):
return self.query.filter(self.email.like("%" + email + "%")).all()
def filter_by_username(self, username):
return self.query.filter(self.username.like("%" + username + "%")).all()
def get(self, _id):
return self.query.filter_by(id=_id).first()
def add(self, role):
db.session.add(role)
return session_commit()
def update(self):
return session_commit()
def delete(self, ids):
# self.query.filter_by(id=id).delete()
self.query.filter(self.id.in_(ids)).delete(synchronize_session=False)
return session_commit()
def session_commit():
try:
db.session.commit()
except SQLAlchemyError as e:
db.session.rollback()
reason = str(e)
current_app.logger.info(e)
return reason
| 28.31746 | 81 | 0.661996 |
from flask import current_app
from . import db
from .base import BaseModel
from sqlalchemy.exc import SQLAlchemyError
from werkzeug.security import generate_password_hash, check_password_hash
import time
class AdvicesModel(db.Model, BaseModel):
__tablename__ = 'advices'
id = db.Column(db.Integer, primary_key=True)
email = db.Column(db.String(25), nullable=False)
username = db.Column(db.String(25), nullable=False)
advice = db.Column(db.String(500), nullable=False)
def __init__(self, email, username, advice):
self.email = email
self.username = username
self.advice = advice
def __str__(self):
return "Advices(id='%s')" % self.id
def paginate(self, page, per_page):
return self.query.paginate(page=page, per_page=per_page, error_out=False)
def filter_by_email(self, email):
return self.query.filter(self.email.like("%" + email + "%")).all()
def filter_by_username(self, username):
return self.query.filter(self.username.like("%" + username + "%")).all()
def get(self, _id):
return self.query.filter_by(id=_id).first()
def add(self, role):
db.session.add(role)
return session_commit()
def update(self):
return session_commit()
def delete(self, ids):
self.query.filter(self.id.in_(ids)).delete(synchronize_session=False)
return session_commit()
def session_commit():
try:
db.session.commit()
except SQLAlchemyError as e:
db.session.rollback()
reason = str(e)
current_app.logger.info(e)
return reason
| true | true |
f7114e75083e7594277297c3936a74615d8ca5a1 | 7,187 | py | Python | tests/toranj/test-038-clear-address-cache-for-sed.py | ctan-g/openthread | 376f35a49e5c0a5b8170c117d7a930e3a8b3b210 | [
"BSD-3-Clause"
] | 1 | 2020-08-12T06:15:53.000Z | 2020-08-12T06:15:53.000Z | tests/toranj/test-038-clear-address-cache-for-sed.py | ctan-g/openthread | 376f35a49e5c0a5b8170c117d7a930e3a8b3b210 | [
"BSD-3-Clause"
] | null | null | null | tests/toranj/test-038-clear-address-cache-for-sed.py | ctan-g/openthread | 376f35a49e5c0a5b8170c117d7a930e3a8b3b210 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python3
#
# Copyright (c) 2019, The OpenThread Authors.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import wpan
from wpan import verify
# -----------------------------------------------------------------------------------------------------------------------
# Test description: Address Cache Table
#
# This test verifies that address cache entry associated with a SED child
# addresses is removed from new parent node ensuring we would not have a
# routing loop.
test_name = __file__[:-3] if __file__.endswith('.py') else __file__
print('-' * 120)
print('Starting \'{}\''.format(test_name))
# -----------------------------------------------------------------------------------------------------------------------
# Creating `wpan.Nodes` instances
speedup = 4
wpan.Node.set_time_speedup_factor(speedup)
r1 = wpan.Node()
r2 = wpan.Node()
r3 = wpan.Node()
c = wpan.Node()
c3 = wpan.Node()
# -----------------------------------------------------------------------------------------------------------------------
# Init all nodes
wpan.Node.init_all_nodes()
# -----------------------------------------------------------------------------------------------------------------------
# Build network topology
#
# r3 ---- r1 ---- r2
# | |
# | |
# c3 c
#
# c is initially attached to r2 but it switches parent during test to r1 and then r3
# c3 is just added to make sure r3 become router quickly (not involved in test)
PREFIX = "fd00:1234::"
POLL_INTERVAL = 400
r1.form("addr-cache")
r1.add_prefix(PREFIX, stable=True, on_mesh=True, slaac=True, preferred=True)
r1.whitelist_node(r2)
r2.whitelist_node(r1)
r2.join_node(r1, wpan.JOIN_TYPE_ROUTER)
c.set(wpan.WPAN_POLL_INTERVAL, str(POLL_INTERVAL))
c.whitelist_node(r2)
r2.whitelist_node(c)
c.join_node(r2, wpan.JOIN_TYPE_SLEEPY_END_DEVICE)
r3.whitelist_node(r1)
r1.whitelist_node(r3)
r3.join_node(r1, wpan.JOIN_TYPE_ROUTER)
c3.whitelist_node(r3)
r3.whitelist_node(c3)
c3.join_node(r3, wpan.JOIN_TYPE_END_DEVICE)
# -----------------------------------------------------------------------------------------------------------------------
# Test implementation
#
ROUTER_TABLE_WAIT_TIME = 30 / speedup + 5
INVALID_ROUTER_ID = 63
verify(r1.get(wpan.WPAN_NODE_TYPE) == wpan.NODE_TYPE_LEADER)
verify(r2.get(wpan.WPAN_NODE_TYPE) == wpan.NODE_TYPE_ROUTER)
verify(r3.get(wpan.WPAN_NODE_TYPE) == wpan.NODE_TYPE_ROUTER)
verify(c.get(wpan.WPAN_NODE_TYPE) == wpan.NODE_TYPE_SLEEPY_END_DEVICE)
verify(c3.get(wpan.WPAN_NODE_TYPE) == wpan.NODE_TYPE_END_DEVICE)
r1_address = r1.find_ip6_address_with_prefix(PREFIX)
r2_address = r2.find_ip6_address_with_prefix(PREFIX)
c_address = c.find_ip6_address_with_prefix(PREFIX)
# Send a single UDP message from r1 to c
#
# This adds an address cache entry on r1 for c pointing to r2 (the current parent of c).
sender = r1.prepare_tx(r1_address, c_address, "Hi from r1 to c")
recver = c.prepare_rx(sender)
wpan.Node.perform_async_tx_rx()
verify(sender.was_successful and recver.was_successful)
# Force c to switch its parent from r2 to r1
#
# r3 ---- r1 ---- r2
# | |
# | |
# c3 c
CHILD_SUPERVISION_CHECK_TIMEOUT = 2
PARENT_SUPERVISION_INTERVAL = 1
REATTACH_WAIT_TIME = CHILD_SUPERVISION_CHECK_TIMEOUT / speedup + 6
c.set(wpan.WPAN_CHILD_SUPERVISION_CHECK_TIMEOUT, str(CHILD_SUPERVISION_CHECK_TIMEOUT))
r2.set(wpan.WPAN_CHILD_SUPERVISION_INTERVAL, str(PARENT_SUPERVISION_INTERVAL))
r1.set(wpan.WPAN_CHILD_SUPERVISION_INTERVAL, str(PARENT_SUPERVISION_INTERVAL))
r3.set(wpan.WPAN_CHILD_SUPERVISION_INTERVAL, str(PARENT_SUPERVISION_INTERVAL))
r2.un_whitelist_node(c)
r1.whitelist_node(c)
c.whitelist_node(r1)
# Wait for c to detach from r2 and attach to r1.
def check_c_is_removed_from_r2_child_table():
child_table = wpan.parse_list(r2.get(wpan.WPAN_THREAD_CHILD_TABLE))
verify(len(child_table) == 0)
wpan.verify_within(check_c_is_removed_from_r2_child_table, REATTACH_WAIT_TIME)
# check that c is now a child of r1
child_table = wpan.parse_list(r1.get(wpan.WPAN_THREAD_CHILD_TABLE))
verify(len(child_table) == 1)
# Send a single UDP message from r2 to c
#
# This adds an address cache entry on r2 for c pointing to r1 (the current parent of c).
sender = r2.prepare_tx(r2_address, c_address, "Hi from r2 to c")
recver = c.prepare_rx(sender)
wpan.Node.perform_async_tx_rx()
verify(sender.was_successful and recver.was_successful)
# Force c to switch its parent from r1 to r3
#
# r3 ---- r1 ---- r2
# | \
# | \
# c3 c
r1.un_whitelist_node(c)
r3.whitelist_node(c)
c.whitelist_node(r3)
# Wait for c to detach from r1 and attach to r3.
def check_c_is_removed_from_r1_child_table():
child_table = wpan.parse_list(r1.get(wpan.WPAN_THREAD_CHILD_TABLE))
verify(len(child_table) == 0)
wpan.verify_within(check_c_is_removed_from_r1_child_table, REATTACH_WAIT_TIME)
# check that c is now a child of r3 (r3 should have two child, c and c3)
child_table = wpan.parse_list(r3.get(wpan.WPAN_THREAD_CHILD_TABLE))
verify(len(child_table) == 2)
# Send a single UDP message from r1 to c
#
# If the r1 address cache entry is not cleared when c attached to r1,
# r1 will still have an entry pointing to r2, and r2 will have an entry
# pointing to r1, thus creating a loop (the msg will not be delivered to r3)
sender = r1.prepare_tx(r1_address, c_address, "Hi from r1 to c")
recver = c.prepare_rx(sender)
wpan.Node.perform_async_tx_rx()
verify(sender.was_successful and recver.was_successful)
# -----------------------------------------------------------------------------------------------------------------------
# Test finished
wpan.Node.finalize_all_nodes()
print('\'{}\' passed.'.format(test_name))
| 34.552885 | 121 | 0.681647 |
import wpan
from wpan import verify
test_name = __file__[:-3] if __file__.endswith('.py') else __file__
print('-' * 120)
print('Starting \'{}\''.format(test_name))
speedup = 4
wpan.Node.set_time_speedup_factor(speedup)
r1 = wpan.Node()
r2 = wpan.Node()
r3 = wpan.Node()
c = wpan.Node()
c3 = wpan.Node()
wpan.Node.init_all_nodes()
PREFIX = "fd00:1234::"
POLL_INTERVAL = 400
r1.form("addr-cache")
r1.add_prefix(PREFIX, stable=True, on_mesh=True, slaac=True, preferred=True)
r1.whitelist_node(r2)
r2.whitelist_node(r1)
r2.join_node(r1, wpan.JOIN_TYPE_ROUTER)
c.set(wpan.WPAN_POLL_INTERVAL, str(POLL_INTERVAL))
c.whitelist_node(r2)
r2.whitelist_node(c)
c.join_node(r2, wpan.JOIN_TYPE_SLEEPY_END_DEVICE)
r3.whitelist_node(r1)
r1.whitelist_node(r3)
r3.join_node(r1, wpan.JOIN_TYPE_ROUTER)
c3.whitelist_node(r3)
r3.whitelist_node(c3)
c3.join_node(r3, wpan.JOIN_TYPE_END_DEVICE)
ROUTER_TABLE_WAIT_TIME = 30 / speedup + 5
INVALID_ROUTER_ID = 63
verify(r1.get(wpan.WPAN_NODE_TYPE) == wpan.NODE_TYPE_LEADER)
verify(r2.get(wpan.WPAN_NODE_TYPE) == wpan.NODE_TYPE_ROUTER)
verify(r3.get(wpan.WPAN_NODE_TYPE) == wpan.NODE_TYPE_ROUTER)
verify(c.get(wpan.WPAN_NODE_TYPE) == wpan.NODE_TYPE_SLEEPY_END_DEVICE)
verify(c3.get(wpan.WPAN_NODE_TYPE) == wpan.NODE_TYPE_END_DEVICE)
r1_address = r1.find_ip6_address_with_prefix(PREFIX)
r2_address = r2.find_ip6_address_with_prefix(PREFIX)
c_address = c.find_ip6_address_with_prefix(PREFIX)
sender = r1.prepare_tx(r1_address, c_address, "Hi from r1 to c")
recver = c.prepare_rx(sender)
wpan.Node.perform_async_tx_rx()
verify(sender.was_successful and recver.was_successful)
CHILD_SUPERVISION_CHECK_TIMEOUT = 2
PARENT_SUPERVISION_INTERVAL = 1
REATTACH_WAIT_TIME = CHILD_SUPERVISION_CHECK_TIMEOUT / speedup + 6
c.set(wpan.WPAN_CHILD_SUPERVISION_CHECK_TIMEOUT, str(CHILD_SUPERVISION_CHECK_TIMEOUT))
r2.set(wpan.WPAN_CHILD_SUPERVISION_INTERVAL, str(PARENT_SUPERVISION_INTERVAL))
r1.set(wpan.WPAN_CHILD_SUPERVISION_INTERVAL, str(PARENT_SUPERVISION_INTERVAL))
r3.set(wpan.WPAN_CHILD_SUPERVISION_INTERVAL, str(PARENT_SUPERVISION_INTERVAL))
r2.un_whitelist_node(c)
r1.whitelist_node(c)
c.whitelist_node(r1)
def check_c_is_removed_from_r2_child_table():
child_table = wpan.parse_list(r2.get(wpan.WPAN_THREAD_CHILD_TABLE))
verify(len(child_table) == 0)
wpan.verify_within(check_c_is_removed_from_r2_child_table, REATTACH_WAIT_TIME)
child_table = wpan.parse_list(r1.get(wpan.WPAN_THREAD_CHILD_TABLE))
verify(len(child_table) == 1)
sender = r2.prepare_tx(r2_address, c_address, "Hi from r2 to c")
recver = c.prepare_rx(sender)
wpan.Node.perform_async_tx_rx()
verify(sender.was_successful and recver.was_successful)
r1.un_whitelist_node(c)
r3.whitelist_node(c)
c.whitelist_node(r3)
def check_c_is_removed_from_r1_child_table():
child_table = wpan.parse_list(r1.get(wpan.WPAN_THREAD_CHILD_TABLE))
verify(len(child_table) == 0)
wpan.verify_within(check_c_is_removed_from_r1_child_table, REATTACH_WAIT_TIME)
child_table = wpan.parse_list(r3.get(wpan.WPAN_THREAD_CHILD_TABLE))
verify(len(child_table) == 2)
sender = r1.prepare_tx(r1_address, c_address, "Hi from r1 to c")
recver = c.prepare_rx(sender)
wpan.Node.perform_async_tx_rx()
verify(sender.was_successful and recver.was_successful)
wpan.Node.finalize_all_nodes()
print('\'{}\' passed.'.format(test_name))
| true | true |
f7114f0b8e6cff50b6be4c21a07fc5c0f023fd01 | 864 | py | Python | env/lib/python3.8/site-packages/plotly/validators/volume/_lightposition.py | acrucetta/Chicago_COVI_WebApp | a37c9f492a20dcd625f8647067394617988de913 | [
"MIT",
"Unlicense"
] | 11,750 | 2015-10-12T07:03:39.000Z | 2022-03-31T20:43:15.000Z | env/lib/python3.8/site-packages/plotly/validators/volume/_lightposition.py | acrucetta/Chicago_COVI_WebApp | a37c9f492a20dcd625f8647067394617988de913 | [
"MIT",
"Unlicense"
] | 2,951 | 2015-10-12T00:41:25.000Z | 2022-03-31T22:19:26.000Z | env/lib/python3.8/site-packages/plotly/validators/volume/_lightposition.py | acrucetta/Chicago_COVI_WebApp | a37c9f492a20dcd625f8647067394617988de913 | [
"MIT",
"Unlicense"
] | 2,623 | 2015-10-15T14:40:27.000Z | 2022-03-28T16:05:50.000Z | import _plotly_utils.basevalidators
class LightpositionValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(self, plotly_name="lightposition", parent_name="volume", **kwargs):
super(LightpositionValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Lightposition"),
data_docs=kwargs.pop(
"data_docs",
"""
x
Numeric vector, representing the X coordinate
for each vertex.
y
Numeric vector, representing the Y coordinate
for each vertex.
z
Numeric vector, representing the Z coordinate
for each vertex.
""",
),
**kwargs
)
| 33.230769 | 84 | 0.568287 | import _plotly_utils.basevalidators
class LightpositionValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(self, plotly_name="lightposition", parent_name="volume", **kwargs):
super(LightpositionValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Lightposition"),
data_docs=kwargs.pop(
"data_docs",
"""
x
Numeric vector, representing the X coordinate
for each vertex.
y
Numeric vector, representing the Y coordinate
for each vertex.
z
Numeric vector, representing the Z coordinate
for each vertex.
""",
),
**kwargs
)
| true | true |
f7114f44e7d867f1454daedb6c4471877ffab72f | 5,380 | py | Python | scripts_hico/HICO_eval/bbox_utils.py | roy881020/VSGNet | a9ba741871d1d7ff401cecf23659f0b75576e7c3 | [
"MIT"
] | 111 | 2020-02-27T16:00:27.000Z | 2022-03-22T08:09:56.000Z | scripts_hico/HICO_eval/bbox_utils.py | roy881020/VSGNet | a9ba741871d1d7ff401cecf23659f0b75576e7c3 | [
"MIT"
] | 21 | 2020-04-24T11:37:59.000Z | 2022-02-28T03:10:08.000Z | scripts_hico/HICO_eval/bbox_utils.py | roy881020/VSGNet | a9ba741871d1d7ff401cecf23659f0b75576e7c3 | [
"MIT"
] | 23 | 2020-03-18T10:50:07.000Z | 2022-02-09T12:35:57.000Z | import numpy as np
#import skimage.draw as skdraw
def add_bbox(img,bbox,color=[0,0,0],fill=False,alpha=1):
x1,y1,x2,y2 = bbox
# Clockwise starting from top left
r = [y1,y1,y2,y2]
c = [x1,x2,x2,x1]
if fill:
coords = skdraw.polygon(r,c,shape=img.shape[0:2])
skdraw.set_color(img,coords,color,alpha=alpha)
return
peri_coords = skdraw.polygon_perimeter(r,c,shape=img.shape[0:2])
skdraw.set_color(img,peri_coords,color,alpha=alpha)
def compute_area(bbox,invalid=None):
x1,y1,x2,y2 = bbox
if (x2 <= x1) or (y2 <= y1):
area = invalid
else:
area = (x2 - x1 + 1) * (y2 - y1 + 1)
return area
def compute_iou(bbox1,bbox2,verbose=False):
x1,y1,x2,y2 = bbox1
x1_,y1_,x2_,y2_ = bbox2
x1_in = max(x1,x1_)
y1_in = max(y1,y1_)
x2_in = min(x2,x2_)
y2_in = min(y2,y2_)
intersection = compute_area(bbox=[x1_in,y1_in,x2_in,y2_in],invalid=0.0)
area1 = compute_area(bbox1)
area2 = compute_area(bbox2)
union = area1 + area2 - intersection
iou = intersection / (union + 1e-6)
if verbose:
return iou, intersection, union
return iou
def compute_area_batch(bbox):
x1,y1,x2,y2 = [bbox[:,i] for i in range(4)]
area = np.zeros(x1.shape[0])
valid_mask = np.logical_and(x2 > x1, y2 > y1)
area_ = (x2 - x1 + 1) * (y2 - y1 + 1)
area[valid_mask] = area_[valid_mask]
return area
def compute_iou_batch(bbox1,bbox2,verbose=False):
x1,y1,x2,y2 = [bbox1[:,i] for i in range(4)]
x1_,y1_,x2_,y2_ = [bbox2[:,i] for i in range(4)]
x1_in = np.maximum(x1,x1_)
y1_in = np.maximum(y1,y1_)
x2_in = np.minimum(x2,x2_)
y2_in = np.minimum(y2,y2_)
intersection_bbox = np.stack((x1_in,y1_in,x2_in,y2_in),1)
intersection = compute_area_batch(bbox=intersection_bbox)
area1 = compute_area_batch(bbox1)
area2 = compute_area_batch(bbox2)
union = area1 + area2 - intersection
iou = intersection / (union + 1e-6)
if verbose:
return iou, intersection, union
return iou
def vis_bbox(bbox,img,color=(0,0,0),modify=False):
im_h,im_w = img.shape[0:2]
x1,y1,x2,y2 = bbox
x1 = max(0,min(x1,im_w-1))
x2 = max(x1,min(x2,im_w-1))
y1 = max(0,min(y1,im_h-1))
y2 = max(y1,min(y2,im_h-1))
r = [y1,y1,y2,y2]
c = [x1,x2,x2,x1]
if modify:
img_ = img
else:
img_ = np.copy(img)
rr,cc = skdraw.polygon(r,c,img.shape[:2])
skdraw.set_color(img_,(rr,cc),color,alpha=0.2)
rr,cc = skdraw.polygon_perimeter(r,c,img.shape[:2])
for k in range(3):
img_[rr,cc,k] = color[k]
return img_
def vis_bboxes(bboxes,img,color=(0,0,0),modify=False):
if modify:
img_ = img
else:
img_ = np.copy(img)
for bbox in bboxes:
img_ = vis_bbox(bbox,img_,color,True)
return img_
def join_bboxes_by_line(bbox1,bbox2,img,color=(255,0,255),modify=False):
im_h,im_w = img.shape[0:2]
x1,y1,x2,y2 = bbox1
x1_,y1_,x2_,y2_ = bbox2
c0 = 0.5*(x1+x2)
r0 = 0.5*(y1+y2)
c1 = 0.5*(x1_+x2_)
r1 = 0.5*(y1_+y2_)
r0,c0,r1,c1 = [int(x) for x in [r0,c0,r1,c1]]
c0 = max(0,min(c0,im_w-1))
c1 = max(0,min(c1,im_w-1))
r0 = max(0,min(r0,im_h-1))
r1 = max(0,min(r1,im_h-1))
rr,cc,val = skdraw.draw.line_aa(r0,c0,r1,c1)
if modify:
img_ = img
else:
img_ = np.copy(img)
for k in range(3):
img_[rr,cc,k] = val*color[k]
rr,cc = skdraw.circle(r0,c0,4,img_.shape[:2])
for k in range(3):
img_[rr,cc,k] = color[k]
rr,cc = skdraw.circle(r1,c1,4,img_.shape[:2])
for k in range(3):
img_[rr,cc,k] = color[k]
return img_
def vis_sub_obj_bboxes(
sub_bboxes,
obj_bboxes,
img,
sub_color=(0,0,255),
obj_color=(255,0,0),
modify=False):
img_ = vis_bboxes(sub_bboxes,img,sub_color,modify)
img_ = vis_bboxes(obj_bboxes,img_,obj_color,modify=True)
for sub_bbox,obj_bbox in zip(sub_bboxes,obj_bboxes):
img_ = join_bboxes_by_line(sub_bbox,obj_bbox,img_,modify=True)
return img_
def vis_human_keypts(
img,
keypts,
radius=2,
pt_color=(0,255,255),
line_color=(0,255,255),
modify=False):
LINKS = [
(0,1),
(1,2),
(2,3),
(3,4),
(1,5),
(5,6),
(6,7),
(0,15),
(15,17),
(0,14),
(14,16),
(1,8),
(8,9),
(9,10),
(1,11),
(11,12),
(12,13),
(8,11)
]
if modify:
img_ = img
else:
img_ = np.copy(img)
h,w = img.shape[:2]
for i,j in LINKS:
c0,r0,conf0 = keypts[i]
c1,r1,conf1 = keypts[j]
r0,r1 = [max(0,min(h-1,int(v))) for v in [r0,r1]]
c0,c1 = [max(0,min(w-1,int(v))) for v in [c0,c1]]
if conf0 > 0 and conf1 > 0:
rr,cc,val = skdraw.draw.line_aa(r0,c0,r1,c1)
for k in range(3):
img_[rr,cc,k] = val*line_color[k]
num_keypts = keypts.shape[0]
for i in range(num_keypts):
c,r,conf = keypts[i]
if conf==0.0:
continue
rr,cc = skdraw.circle(r,c,radius,img_.shape[:2])
for k in range(3):
img_[rr,cc,k] = pt_color[k]
return img_
| 23.189655 | 75 | 0.554461 | import numpy as np
def add_bbox(img,bbox,color=[0,0,0],fill=False,alpha=1):
x1,y1,x2,y2 = bbox
r = [y1,y1,y2,y2]
c = [x1,x2,x2,x1]
if fill:
coords = skdraw.polygon(r,c,shape=img.shape[0:2])
skdraw.set_color(img,coords,color,alpha=alpha)
return
peri_coords = skdraw.polygon_perimeter(r,c,shape=img.shape[0:2])
skdraw.set_color(img,peri_coords,color,alpha=alpha)
def compute_area(bbox,invalid=None):
x1,y1,x2,y2 = bbox
if (x2 <= x1) or (y2 <= y1):
area = invalid
else:
area = (x2 - x1 + 1) * (y2 - y1 + 1)
return area
def compute_iou(bbox1,bbox2,verbose=False):
x1,y1,x2,y2 = bbox1
x1_,y1_,x2_,y2_ = bbox2
x1_in = max(x1,x1_)
y1_in = max(y1,y1_)
x2_in = min(x2,x2_)
y2_in = min(y2,y2_)
intersection = compute_area(bbox=[x1_in,y1_in,x2_in,y2_in],invalid=0.0)
area1 = compute_area(bbox1)
area2 = compute_area(bbox2)
union = area1 + area2 - intersection
iou = intersection / (union + 1e-6)
if verbose:
return iou, intersection, union
return iou
def compute_area_batch(bbox):
x1,y1,x2,y2 = [bbox[:,i] for i in range(4)]
area = np.zeros(x1.shape[0])
valid_mask = np.logical_and(x2 > x1, y2 > y1)
area_ = (x2 - x1 + 1) * (y2 - y1 + 1)
area[valid_mask] = area_[valid_mask]
return area
def compute_iou_batch(bbox1,bbox2,verbose=False):
x1,y1,x2,y2 = [bbox1[:,i] for i in range(4)]
x1_,y1_,x2_,y2_ = [bbox2[:,i] for i in range(4)]
x1_in = np.maximum(x1,x1_)
y1_in = np.maximum(y1,y1_)
x2_in = np.minimum(x2,x2_)
y2_in = np.minimum(y2,y2_)
intersection_bbox = np.stack((x1_in,y1_in,x2_in,y2_in),1)
intersection = compute_area_batch(bbox=intersection_bbox)
area1 = compute_area_batch(bbox1)
area2 = compute_area_batch(bbox2)
union = area1 + area2 - intersection
iou = intersection / (union + 1e-6)
if verbose:
return iou, intersection, union
return iou
def vis_bbox(bbox,img,color=(0,0,0),modify=False):
im_h,im_w = img.shape[0:2]
x1,y1,x2,y2 = bbox
x1 = max(0,min(x1,im_w-1))
x2 = max(x1,min(x2,im_w-1))
y1 = max(0,min(y1,im_h-1))
y2 = max(y1,min(y2,im_h-1))
r = [y1,y1,y2,y2]
c = [x1,x2,x2,x1]
if modify:
img_ = img
else:
img_ = np.copy(img)
rr,cc = skdraw.polygon(r,c,img.shape[:2])
skdraw.set_color(img_,(rr,cc),color,alpha=0.2)
rr,cc = skdraw.polygon_perimeter(r,c,img.shape[:2])
for k in range(3):
img_[rr,cc,k] = color[k]
return img_
def vis_bboxes(bboxes,img,color=(0,0,0),modify=False):
if modify:
img_ = img
else:
img_ = np.copy(img)
for bbox in bboxes:
img_ = vis_bbox(bbox,img_,color,True)
return img_
def join_bboxes_by_line(bbox1,bbox2,img,color=(255,0,255),modify=False):
im_h,im_w = img.shape[0:2]
x1,y1,x2,y2 = bbox1
x1_,y1_,x2_,y2_ = bbox2
c0 = 0.5*(x1+x2)
r0 = 0.5*(y1+y2)
c1 = 0.5*(x1_+x2_)
r1 = 0.5*(y1_+y2_)
r0,c0,r1,c1 = [int(x) for x in [r0,c0,r1,c1]]
c0 = max(0,min(c0,im_w-1))
c1 = max(0,min(c1,im_w-1))
r0 = max(0,min(r0,im_h-1))
r1 = max(0,min(r1,im_h-1))
rr,cc,val = skdraw.draw.line_aa(r0,c0,r1,c1)
if modify:
img_ = img
else:
img_ = np.copy(img)
for k in range(3):
img_[rr,cc,k] = val*color[k]
rr,cc = skdraw.circle(r0,c0,4,img_.shape[:2])
for k in range(3):
img_[rr,cc,k] = color[k]
rr,cc = skdraw.circle(r1,c1,4,img_.shape[:2])
for k in range(3):
img_[rr,cc,k] = color[k]
return img_
def vis_sub_obj_bboxes(
sub_bboxes,
obj_bboxes,
img,
sub_color=(0,0,255),
obj_color=(255,0,0),
modify=False):
img_ = vis_bboxes(sub_bboxes,img,sub_color,modify)
img_ = vis_bboxes(obj_bboxes,img_,obj_color,modify=True)
for sub_bbox,obj_bbox in zip(sub_bboxes,obj_bboxes):
img_ = join_bboxes_by_line(sub_bbox,obj_bbox,img_,modify=True)
return img_
def vis_human_keypts(
img,
keypts,
radius=2,
pt_color=(0,255,255),
line_color=(0,255,255),
modify=False):
LINKS = [
(0,1),
(1,2),
(2,3),
(3,4),
(1,5),
(5,6),
(6,7),
(0,15),
(15,17),
(0,14),
(14,16),
(1,8),
(8,9),
(9,10),
(1,11),
(11,12),
(12,13),
(8,11)
]
if modify:
img_ = img
else:
img_ = np.copy(img)
h,w = img.shape[:2]
for i,j in LINKS:
c0,r0,conf0 = keypts[i]
c1,r1,conf1 = keypts[j]
r0,r1 = [max(0,min(h-1,int(v))) for v in [r0,r1]]
c0,c1 = [max(0,min(w-1,int(v))) for v in [c0,c1]]
if conf0 > 0 and conf1 > 0:
rr,cc,val = skdraw.draw.line_aa(r0,c0,r1,c1)
for k in range(3):
img_[rr,cc,k] = val*line_color[k]
num_keypts = keypts.shape[0]
for i in range(num_keypts):
c,r,conf = keypts[i]
if conf==0.0:
continue
rr,cc = skdraw.circle(r,c,radius,img_.shape[:2])
for k in range(3):
img_[rr,cc,k] = pt_color[k]
return img_
| true | true |
f7114fba470e1fb01f21f644c0cf7c1507d1186c | 4,585 | py | Python | load_neo4j.py | newmanrs/cloudburst-graph | 6e3a6878b1ae04f07fa35f2c689243a906bd026e | [
"MIT"
] | 1 | 2021-05-30T17:35:20.000Z | 2021-05-30T17:35:20.000Z | load_neo4j.py | newmanrs/cloudburst-graph | 6e3a6878b1ae04f07fa35f2c689243a906bd026e | [
"MIT"
] | 14 | 2021-05-30T03:51:39.000Z | 2021-11-13T03:18:19.000Z | load_neo4j.py | newmanrs/cloudburst-graph | 6e3a6878b1ae04f07fa35f2c689243a906bd026e | [
"MIT"
] | null | null | null | from neo4j import GraphDatabase
import json
import os
def create_beers(tx):
""" Load from the results of cloudburst site scraper """
with open('data/beers.json', 'r') as f:
beer_hops = json.load(f)
beers = beer_hops['beers']
query = """
UNWIND $beers as beer
MERGE (b:Beer {name : beer.beer_name,
abv : beer.abv,
style : beer.beer_style,
description : beer.description
})
RETURN count(b) as c
"""
records = tx.run(query, beers=beers)
print(
'Merged {} Beer nodes'
.format(records.single()['c']))
def create_hops(tx):
""" Hops are loaded into the DB from multiple sources
First is a hand-curated hop list to get better coverage
of the cloudburst beer descriptions. Contains names only.
We also load from Yakima Chief, which makes nodes with
additional data on aroma profiles and a useful description
of the hop.
"""
with open('data/hopnames.txt') as f:
hoplist = f.read().splitlines()
hoplist = [h.title() for h in hoplist if len(h) > 0]
with open('data/yakimachiefhopdata.json', 'r') as f:
ych = json.load(f)
# This query is fast but definitely not idempotent
query_params = []
for i, hop in enumerate(ych['hops']):
query_params.append([i, hop])
query = """
UNWIND $query_params as params
MERGE (h:Hop { idx : params[0]})
SET h += params[1]
SET h.data_source = 'Yakima Chief'
SET h.data_file = 'yakimachiefhopdata.json'
"""
tx.run(query, query_params=query_params)
query = """
with $hoplist as hoplist
UNWIND hoplist as name
OPTIONAL MATCH (h:Hop {name:name})
with h,name where h is NULL
MERGE (new:Hop {name : name})
SET new.data_source = 'Curated List'
SET new.data_file = 'hopnames.txt'
"""
tx.run(query, hoplist=hoplist)
query = """
match (n:Hop)
return count(n) as c
"""
records = tx.run(query)
print("Merged {} Hop nodes".format(records.single()['c']))
def create_beer_contains_hop_edges(tx):
query = """
match (b:Beer)
match (h:Hop)
where b.description contains h.name
merge (b)-[e:CONTAINS]-(h)
return count(e) as c
"""
records = tx.run(query)
print(
'Merged {} (:Beer)-[:CONTAINS]-(:Hop) relationships'
.format(records.single()['c']))
def create_styles(tx):
query = """
match (b:Beer)
with distinct b.style as styles
MERGE (s:Style {style : styles})
with s
match (b:Beer) where b.style = s.style
MERGE (b)-[e:STYLE]->(s)
return count(e) as c
"""
records = tx.run(query)
print(
"Merged {} (:Beer)-[:STYLE]-(:Style) relationships"
.format(records.single()['c']))
def create_hop_aromas(tx):
query = """
match (h:Hop)
UNWIND h.aroma_profile as aromas
with distinct aromas as aroma
MERGE (a:Aroma {aroma : aroma})
with a
match (h:Hop) where a.aroma in h.aroma_profile
MERGE (h)-[e:HAS_AROMA]-(a)
return count(e) as c
"""
records = tx.run(query)
print(
"Merged {} (:Aroma)-[:RECOMMENDED]-(:Aroma) relationships"
.format(records.single()['c']))
def style_abv_stats(tx):
query = """
match (s:Style)-[:STYLE]-(b:Beer)
with s, avg(b.abv) as abv_mean, stDevP(b.abv) as abv_std
set s.abv_mean = abv_mean
set s.abv_std = abv_std
"""
tx.run(query)
print("Computed style mean/std abv.")
query = """
match (b:Beer)-[:STYLE]-(s:Style)
set b.style_abv_z_score = (b.abv - s.abv_mean) / s.abv_std
"""
tx.run(query)
print("Computed beer style_abv_z_score")
if __name__ == '__main__':
uri = "neo4j://localhost:7687"
try:
pw = os.environ['NEO4J_PW']
except KeyError as e:
msg = "No environment variable `NEO4J_PW` found. " \
"Export NEO4J_PW='yourpassword' " \
"in the current shell environment or in your shell config file."
raise KeyError(msg) from e
driver = GraphDatabase.driver(uri, auth=("neo4j", pw))
with driver.session() as session:
swt = session.write_transaction
swt(create_beers)
swt(create_hops)
swt(create_beer_contains_hop_edges)
swt(create_hop_aromas)
swt(create_styles)
swt(style_abv_stats)
driver.close()
| 26.50289 | 76 | 0.577317 | from neo4j import GraphDatabase
import json
import os
def create_beers(tx):
with open('data/beers.json', 'r') as f:
beer_hops = json.load(f)
beers = beer_hops['beers']
query = """
UNWIND $beers as beer
MERGE (b:Beer {name : beer.beer_name,
abv : beer.abv,
style : beer.beer_style,
description : beer.description
})
RETURN count(b) as c
"""
records = tx.run(query, beers=beers)
print(
'Merged {} Beer nodes'
.format(records.single()['c']))
def create_hops(tx):
with open('data/hopnames.txt') as f:
hoplist = f.read().splitlines()
hoplist = [h.title() for h in hoplist if len(h) > 0]
with open('data/yakimachiefhopdata.json', 'r') as f:
ych = json.load(f)
query_params = []
for i, hop in enumerate(ych['hops']):
query_params.append([i, hop])
query = """
UNWIND $query_params as params
MERGE (h:Hop { idx : params[0]})
SET h += params[1]
SET h.data_source = 'Yakima Chief'
SET h.data_file = 'yakimachiefhopdata.json'
"""
tx.run(query, query_params=query_params)
query = """
with $hoplist as hoplist
UNWIND hoplist as name
OPTIONAL MATCH (h:Hop {name:name})
with h,name where h is NULL
MERGE (new:Hop {name : name})
SET new.data_source = 'Curated List'
SET new.data_file = 'hopnames.txt'
"""
tx.run(query, hoplist=hoplist)
query = """
match (n:Hop)
return count(n) as c
"""
records = tx.run(query)
print("Merged {} Hop nodes".format(records.single()['c']))
def create_beer_contains_hop_edges(tx):
query = """
match (b:Beer)
match (h:Hop)
where b.description contains h.name
merge (b)-[e:CONTAINS]-(h)
return count(e) as c
"""
records = tx.run(query)
print(
'Merged {} (:Beer)-[:CONTAINS]-(:Hop) relationships'
.format(records.single()['c']))
def create_styles(tx):
query = """
match (b:Beer)
with distinct b.style as styles
MERGE (s:Style {style : styles})
with s
match (b:Beer) where b.style = s.style
MERGE (b)-[e:STYLE]->(s)
return count(e) as c
"""
records = tx.run(query)
print(
"Merged {} (:Beer)-[:STYLE]-(:Style) relationships"
.format(records.single()['c']))
def create_hop_aromas(tx):
query = """
match (h:Hop)
UNWIND h.aroma_profile as aromas
with distinct aromas as aroma
MERGE (a:Aroma {aroma : aroma})
with a
match (h:Hop) where a.aroma in h.aroma_profile
MERGE (h)-[e:HAS_AROMA]-(a)
return count(e) as c
"""
records = tx.run(query)
print(
"Merged {} (:Aroma)-[:RECOMMENDED]-(:Aroma) relationships"
.format(records.single()['c']))
def style_abv_stats(tx):
query = """
match (s:Style)-[:STYLE]-(b:Beer)
with s, avg(b.abv) as abv_mean, stDevP(b.abv) as abv_std
set s.abv_mean = abv_mean
set s.abv_std = abv_std
"""
tx.run(query)
print("Computed style mean/std abv.")
query = """
match (b:Beer)-[:STYLE]-(s:Style)
set b.style_abv_z_score = (b.abv - s.abv_mean) / s.abv_std
"""
tx.run(query)
print("Computed beer style_abv_z_score")
if __name__ == '__main__':
uri = "neo4j://localhost:7687"
try:
pw = os.environ['NEO4J_PW']
except KeyError as e:
msg = "No environment variable `NEO4J_PW` found. " \
"Export NEO4J_PW='yourpassword' " \
"in the current shell environment or in your shell config file."
raise KeyError(msg) from e
driver = GraphDatabase.driver(uri, auth=("neo4j", pw))
with driver.session() as session:
swt = session.write_transaction
swt(create_beers)
swt(create_hops)
swt(create_beer_contains_hop_edges)
swt(create_hop_aromas)
swt(create_styles)
swt(style_abv_stats)
driver.close()
| true | true |
f7115016c89676f8c77084533e51e99ca21fe5e2 | 517 | py | Python | sort_dict.py | taijiji/python-memo | 627c887cf318a56824c51fef3c486bd8160c340d | [
"MIT"
] | null | null | null | sort_dict.py | taijiji/python-memo | 627c887cf318a56824c51fef3c486bd8160c340d | [
"MIT"
] | null | null | null | sort_dict.py | taijiji/python-memo | 627c887cf318a56824c51fef3c486bd8160c340d | [
"MIT"
] | null | null | null | from pprint import pprint
d = {
'A' : 10,
'B' : 50,
'C' : 40,
}
print(sorted(d)) # ['A', 'B', 'C']
print(sorted(d, key=d.get)) # ['A', 'C', 'B']
print(sorted(d, key=d.get, reverse=True)) # ['B', 'C', 'A']
l = [
{
'id' : 'A',
'keyword' : 10,
},
{
'id' : 'B',
'keyword' : 50,
},
{
'id' : 'C',
'keyword' : 40,
},
]
pprint(sorted(l, key=lambda x:x['keyword'], reverse=True))
'''
[{'id': 'B', 'keyword': 50},
{'id': 'C', 'keyword': 40},
{'id': 'A', 'keyword': 10}]
'''
| 15.666667 | 59 | 0.419729 | from pprint import pprint
d = {
'A' : 10,
'B' : 50,
'C' : 40,
}
print(sorted(d))
print(sorted(d, key=d.get))
print(sorted(d, key=d.get, reverse=True))
l = [
{
'id' : 'A',
'keyword' : 10,
},
{
'id' : 'B',
'keyword' : 50,
},
{
'id' : 'C',
'keyword' : 40,
},
]
pprint(sorted(l, key=lambda x:x['keyword'], reverse=True))
| true | true |
f71151ed9546386e8ded0a2cd7402796a6e469b3 | 978 | py | Python | scripts/strelka-2.9.2.centos6_x86_64/lib/python/configBuildTimeInfo.py | dongxuemin666/RNA-combine | 13e178aae585e16a9a8eda8151d0f34316de0475 | [
"Apache-2.0"
] | 7 | 2021-09-03T09:11:00.000Z | 2022-02-14T15:02:12.000Z | scripts/strelka-2.9.2.centos6_x86_64/lib/python/configBuildTimeInfo.py | dongxuemin666/RNA-combine | 13e178aae585e16a9a8eda8151d0f34316de0475 | [
"Apache-2.0"
] | null | null | null | scripts/strelka-2.9.2.centos6_x86_64/lib/python/configBuildTimeInfo.py | dongxuemin666/RNA-combine | 13e178aae585e16a9a8eda8151d0f34316de0475 | [
"Apache-2.0"
] | 2 | 2022-01-10T13:07:29.000Z | 2022-01-11T22:14:11.000Z | #
# Strelka - Small Variant Caller
# Copyright (c) 2009-2018 Illumina, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#
"""
This consolidates build-time config data such as git status
and build date. This is in contrast to cmake configuration-time
config data like relative paths and library/header availability.
"""
workflowVersion="2.9.2"
buildTime="2018-03-02T22:08:15.960987Z"
| 34.928571 | 71 | 0.762781 |
workflowVersion="2.9.2"
buildTime="2018-03-02T22:08:15.960987Z"
| true | true |
f7115465b95c95820af6207e54e3ffcd9ec5c3fc | 3,812 | py | Python | Section_07_code/speech_recognizer.py | PacktPublishing/Python-Machine-Learning-Solutions-V- | 8bb80a43a7c64032c25c1023faaa29bbfbd39d45 | [
"MIT"
] | 1 | 2022-03-16T02:10:30.000Z | 2022-03-16T02:10:30.000Z | Section_07_code/speech_recognizer.py | wensincai/Python-Machine-Learning-Solutions-V- | 130c9881757fa90bbb124d48ddd0c6c1136fa20c | [
"MIT"
] | null | null | null | Section_07_code/speech_recognizer.py | wensincai/Python-Machine-Learning-Solutions-V- | 130c9881757fa90bbb124d48ddd0c6c1136fa20c | [
"MIT"
] | 2 | 2019-05-28T11:58:59.000Z | 2020-09-23T17:21:19.000Z | import os
import argparse
import warnings
import numpy as np
from scipy.io import wavfile
from hmmlearn import hmm
from python_speech_features import mfcc
# Function to parse input arguments
def build_arg_parser():
parser = argparse.ArgumentParser(description='Trains the HMM classifier')
parser.add_argument("--input-folder", dest="input_folder", required=True,
help="Input folder containing the audio files in subfolders")
return parser
# Class to handle all HMM related processing
class HMMTrainer(object):
def __init__(self, model_name='GaussianHMM', n_components=4, cov_type='diag', n_iter=1000):
self.model_name = model_name
self.n_components = n_components
self.cov_type = cov_type
self.n_iter = n_iter
self.models = []
if self.model_name == 'GaussianHMM':
self.model = hmm.GaussianHMM(n_components=self.n_components,
covariance_type=self.cov_type, n_iter=self.n_iter)
else:
raise TypeError('Invalid model type')
# X is a 2D numpy array where each row is 13D
def train(self, X):
np.seterr(all='ignore')
self.models.append(self.model.fit(X))
# Run the model on input data
def get_score(self, input_data):
return self.model.score(input_data)
if __name__=='__main__':
args = build_arg_parser().parse_args()
input_folder = args.input_folder
hmm_models = []
# Parse the input directory
for dirname in os.listdir(input_folder):
# Get the name of the subfolder
subfolder = os.path.join(input_folder, dirname)
if not os.path.isdir(subfolder):
continue
# Extract the label
label = subfolder[subfolder.rfind('/') + 1:]
# Initialize variables
X = np.array([])
y_words = []
warnings.filterwarnings("ignore")
# Iterate through the audio files (leaving 1 file for testing in each class)
for filename in [x for x in os.listdir(subfolder) if x.endswith('.wav')][:-1]:
# Read the input file
filepath = os.path.join(subfolder, filename)
sampling_freq, audio = wavfile.read(filepath)
# Extract MFCC features
mfcc_features = mfcc(audio, sampling_freq)
# Append to the variable X
if len(X) == 0:
X = mfcc_features
else:
X = np.append(X, mfcc_features, axis=0)
# Append the label
y_words.append(label)
#print('X.shape =', X.shape)
# Train and save HMM model
hmm_trainer = HMMTrainer()
hmm_trainer.train(X)
hmm_models.append((hmm_trainer, label))
hmm_trainer = None
# Test files
input_files = [
'data/pineapple/pineapple15.wav',
'data/orange/orange15.wav',
'data/apple/apple15.wav',
'data/kiwi/kiwi15.wav'
]
# Classify input data
for input_file in input_files:
# Read input file
sampling_freq, audio = wavfile.read(input_file)
# Extract MFCC features
mfcc_features = mfcc(audio, sampling_freq)
# Define variables
max_score = [float("-inf")]
output_label = [float("-inf")]
# Iterate through all HMM models and pick
# the one with the highest score
for item in hmm_models:
hmm_model, label = item
score = hmm_model.get_score(mfcc_features)
if score > max_score:
max_score = score
output_label = label
# Print the output
print( "\nTrue:", input_file[input_file.find('/')+1:input_file.rfind('/')])
print("Predicted:", output_label)
warnings.filterwarnings("ignore")
| 31.766667 | 95 | 0.612802 | import os
import argparse
import warnings
import numpy as np
from scipy.io import wavfile
from hmmlearn import hmm
from python_speech_features import mfcc
def build_arg_parser():
parser = argparse.ArgumentParser(description='Trains the HMM classifier')
parser.add_argument("--input-folder", dest="input_folder", required=True,
help="Input folder containing the audio files in subfolders")
return parser
class HMMTrainer(object):
def __init__(self, model_name='GaussianHMM', n_components=4, cov_type='diag', n_iter=1000):
self.model_name = model_name
self.n_components = n_components
self.cov_type = cov_type
self.n_iter = n_iter
self.models = []
if self.model_name == 'GaussianHMM':
self.model = hmm.GaussianHMM(n_components=self.n_components,
covariance_type=self.cov_type, n_iter=self.n_iter)
else:
raise TypeError('Invalid model type')
def train(self, X):
np.seterr(all='ignore')
self.models.append(self.model.fit(X))
def get_score(self, input_data):
return self.model.score(input_data)
if __name__=='__main__':
args = build_arg_parser().parse_args()
input_folder = args.input_folder
hmm_models = []
for dirname in os.listdir(input_folder):
subfolder = os.path.join(input_folder, dirname)
if not os.path.isdir(subfolder):
continue
label = subfolder[subfolder.rfind('/') + 1:]
X = np.array([])
y_words = []
warnings.filterwarnings("ignore")
for filename in [x for x in os.listdir(subfolder) if x.endswith('.wav')][:-1]:
filepath = os.path.join(subfolder, filename)
sampling_freq, audio = wavfile.read(filepath)
mfcc_features = mfcc(audio, sampling_freq)
if len(X) == 0:
X = mfcc_features
else:
X = np.append(X, mfcc_features, axis=0)
y_words.append(label)
hmm_trainer = HMMTrainer()
hmm_trainer.train(X)
hmm_models.append((hmm_trainer, label))
hmm_trainer = None
input_files = [
'data/pineapple/pineapple15.wav',
'data/orange/orange15.wav',
'data/apple/apple15.wav',
'data/kiwi/kiwi15.wav'
]
for input_file in input_files:
sampling_freq, audio = wavfile.read(input_file)
mfcc_features = mfcc(audio, sampling_freq)
max_score = [float("-inf")]
output_label = [float("-inf")]
for item in hmm_models:
hmm_model, label = item
score = hmm_model.get_score(mfcc_features)
if score > max_score:
max_score = score
output_label = label
print( "\nTrue:", input_file[input_file.find('/')+1:input_file.rfind('/')])
print("Predicted:", output_label)
warnings.filterwarnings("ignore")
| true | true |
f71157014d09fab40e49d12085e7b79c88f79c03 | 4,010 | py | Python | src/third_party/beaengine/tests/0fd3.py | CrackerCat/rp | 5fe693c26d76b514efaedb4084f6e37d820db023 | [
"MIT"
] | 1 | 2022-01-17T17:40:29.000Z | 2022-01-17T17:40:29.000Z | src/third_party/beaengine/tests/0fd3.py | CrackerCat/rp | 5fe693c26d76b514efaedb4084f6e37d820db023 | [
"MIT"
] | null | null | null | src/third_party/beaengine/tests/0fd3.py | CrackerCat/rp | 5fe693c26d76b514efaedb4084f6e37d820db023 | [
"MIT"
] | null | null | null | #!/usr/bin/python
# -*- coding: utf-8 -*-
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#
# @author : beaengine@gmail.com
from headers.BeaEnginePython import *
from nose.tools import *
class TestSuite:
def test(self):
# 66 0F d3 /r
# psrlq mm1, mm2/m64
Buffer = bytes.fromhex('660fd39011223344')
myDisasm = Disasm(Buffer)
myDisasm.read()
assert_equal(hex(myDisasm.infos.Instruction.Opcode), '0xfd3')
assert_equal(myDisasm.infos.Instruction.Mnemonic, b'psrlq')
assert_equal(myDisasm.repr(), 'psrlq xmm2, xmmword ptr [rax+44332211h]')
# VEX.NDS.128.66.0F.WIG d3 /r
# vpsrlq xmm1, xmm2, xmm3/m128
Buffer = bytes.fromhex('c40101d30e')
myDisasm = Disasm(Buffer)
myDisasm.read()
assert_equal(myDisasm.infos.Instruction.Mnemonic, b'vpsrlq')
assert_equal(myDisasm.repr(), 'vpsrlq xmm9, xmm15, xmmword ptr [r14]')
# VEX.NDS.256.66.0F.WIG d3 /r
# vpsrlq ymm1, ymm2, ymm3/m256
Buffer = bytes.fromhex('c40105d30e')
myDisasm = Disasm(Buffer)
myDisasm.read()
assert_equal(myDisasm.infos.Instruction.Mnemonic, b'vpsrlq')
assert_equal(myDisasm.repr(), 'vpsrlq ymm9, ymm15, ymmword ptr [r14]')
# EVEX.NDS.128.66.0F.WIG d3 /r
# vpsrlq xmm1 {k1}{z}, xmm2, xmm3/m128
Buffer = bytes.fromhex('62010506d30e')
myDisasm = Disasm(Buffer)
myDisasm.read()
assert_equal(myDisasm.infos.Reserved_.EVEX.P0, 0x1)
assert_equal(myDisasm.infos.Reserved_.EVEX.P1, 0x5)
assert_equal(myDisasm.infos.Reserved_.EVEX.P2, 0x6)
assert_equal(myDisasm.infos.Reserved_.EVEX.pp, 0x1)
assert_equal(myDisasm.infos.Reserved_.EVEX.mm, 0x1)
assert_equal(hex(myDisasm.infos.Instruction.Opcode), '0xd3')
assert_equal(myDisasm.infos.Instruction.Mnemonic, b'vpsrlq')
assert_equal(myDisasm.repr(), 'vpsrlq xmm25, xmm31, xmmword ptr [r14]')
# EVEX.NDS.256.66.0F.WIG d3 /r
# vpsrlq ymm1 {k1}{z}, ymm2, ymm3/m256
Buffer = bytes.fromhex('62010520d30e')
myDisasm = Disasm(Buffer)
myDisasm.read()
assert_equal(myDisasm.infos.Reserved_.EVEX.P0, 0x1)
assert_equal(myDisasm.infos.Reserved_.EVEX.P1, 0x5)
assert_equal(myDisasm.infos.Reserved_.EVEX.P2, 0x20)
assert_equal(myDisasm.infos.Reserved_.EVEX.pp, 0x1)
assert_equal(myDisasm.infos.Reserved_.EVEX.mm, 0x1)
assert_equal(hex(myDisasm.infos.Instruction.Opcode), '0xd3')
assert_equal(myDisasm.infos.Instruction.Mnemonic, b'vpsrlq')
assert_equal(myDisasm.repr(), 'vpsrlq ymm25, ymm31, ymmword ptr [r14]')
# EVEX.NDS.512.66.0F.WIG d3 /r
# vpsrlq zmm1 {k1}{z}, zmm2, zmm3/m512
Buffer = bytes.fromhex('62010540d30e')
myDisasm = Disasm(Buffer)
myDisasm.read()
assert_equal(myDisasm.infos.Reserved_.EVEX.P0, 0x1)
assert_equal(myDisasm.infos.Reserved_.EVEX.P1, 0x5)
assert_equal(myDisasm.infos.Reserved_.EVEX.P2, 0x40)
assert_equal(myDisasm.infos.Reserved_.EVEX.pp, 0x1)
assert_equal(myDisasm.infos.Reserved_.EVEX.mm, 0x1)
assert_equal(hex(myDisasm.infos.Instruction.Opcode), '0xd3')
assert_equal(myDisasm.infos.Instruction.Mnemonic, b'vpsrlq')
assert_equal(myDisasm.repr(), 'vpsrlq zmm25, zmm31, zmmword ptr [r14]')
| 45.05618 | 80 | 0.669327 |
from headers.BeaEnginePython import *
from nose.tools import *
class TestSuite:
def test(self):
Buffer = bytes.fromhex('660fd39011223344')
myDisasm = Disasm(Buffer)
myDisasm.read()
assert_equal(hex(myDisasm.infos.Instruction.Opcode), '0xfd3')
assert_equal(myDisasm.infos.Instruction.Mnemonic, b'psrlq')
assert_equal(myDisasm.repr(), 'psrlq xmm2, xmmword ptr [rax+44332211h]')
Buffer = bytes.fromhex('c40101d30e')
myDisasm = Disasm(Buffer)
myDisasm.read()
assert_equal(myDisasm.infos.Instruction.Mnemonic, b'vpsrlq')
assert_equal(myDisasm.repr(), 'vpsrlq xmm9, xmm15, xmmword ptr [r14]')
Buffer = bytes.fromhex('c40105d30e')
myDisasm = Disasm(Buffer)
myDisasm.read()
assert_equal(myDisasm.infos.Instruction.Mnemonic, b'vpsrlq')
assert_equal(myDisasm.repr(), 'vpsrlq ymm9, ymm15, ymmword ptr [r14]')
Buffer = bytes.fromhex('62010506d30e')
myDisasm = Disasm(Buffer)
myDisasm.read()
assert_equal(myDisasm.infos.Reserved_.EVEX.P0, 0x1)
assert_equal(myDisasm.infos.Reserved_.EVEX.P1, 0x5)
assert_equal(myDisasm.infos.Reserved_.EVEX.P2, 0x6)
assert_equal(myDisasm.infos.Reserved_.EVEX.pp, 0x1)
assert_equal(myDisasm.infos.Reserved_.EVEX.mm, 0x1)
assert_equal(hex(myDisasm.infos.Instruction.Opcode), '0xd3')
assert_equal(myDisasm.infos.Instruction.Mnemonic, b'vpsrlq')
assert_equal(myDisasm.repr(), 'vpsrlq xmm25, xmm31, xmmword ptr [r14]')
Buffer = bytes.fromhex('62010520d30e')
myDisasm = Disasm(Buffer)
myDisasm.read()
assert_equal(myDisasm.infos.Reserved_.EVEX.P0, 0x1)
assert_equal(myDisasm.infos.Reserved_.EVEX.P1, 0x5)
assert_equal(myDisasm.infos.Reserved_.EVEX.P2, 0x20)
assert_equal(myDisasm.infos.Reserved_.EVEX.pp, 0x1)
assert_equal(myDisasm.infos.Reserved_.EVEX.mm, 0x1)
assert_equal(hex(myDisasm.infos.Instruction.Opcode), '0xd3')
assert_equal(myDisasm.infos.Instruction.Mnemonic, b'vpsrlq')
assert_equal(myDisasm.repr(), 'vpsrlq ymm25, ymm31, ymmword ptr [r14]')
Buffer = bytes.fromhex('62010540d30e')
myDisasm = Disasm(Buffer)
myDisasm.read()
assert_equal(myDisasm.infos.Reserved_.EVEX.P0, 0x1)
assert_equal(myDisasm.infos.Reserved_.EVEX.P1, 0x5)
assert_equal(myDisasm.infos.Reserved_.EVEX.P2, 0x40)
assert_equal(myDisasm.infos.Reserved_.EVEX.pp, 0x1)
assert_equal(myDisasm.infos.Reserved_.EVEX.mm, 0x1)
assert_equal(hex(myDisasm.infos.Instruction.Opcode), '0xd3')
assert_equal(myDisasm.infos.Instruction.Mnemonic, b'vpsrlq')
assert_equal(myDisasm.repr(), 'vpsrlq zmm25, zmm31, zmmword ptr [r14]')
| true | true |
f711577e3e237a8ca848394b36f174ba9d6b998e | 4,645 | py | Python | PageBotNano-005-TextBox/MyTypeSpecimen.py | juandelperal/PageBotNano | 7f0d82755d6eb6962f206e5dd0d08c40c0947bde | [
"MIT"
] | null | null | null | PageBotNano-005-TextBox/MyTypeSpecimen.py | juandelperal/PageBotNano | 7f0d82755d6eb6962f206e5dd0d08c40c0947bde | [
"MIT"
] | null | null | null | PageBotNano-005-TextBox/MyTypeSpecimen.py | juandelperal/PageBotNano | 7f0d82755d6eb6962f206e5dd0d08c40c0947bde | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: UTF-8 -*-
# -----------------------------------------------------------------------------
#
# P A G E B O T N A N O
#
# Copyright (c) 2020+ Buro Petr van Blokland + Claudia Mens
# www.pagebot.io
# Licensed under MIT conditions
#
# Supporting DrawBot, www.drawbot.com
# -----------------------------------------------------------------------------
#
# MyTypeSpecimen.py
#
# This MyTypeSpecimen.py shows an example how to import
# existing libaries, that contain knowledge about document,
# pages and the elements on the pages.
#
from random import random
#
# From the library we import the classes (=object factories)
# that we need for creating the type specimen.
# Classes can be recognised by their initial capital name.
from pagebotnano_005.document import Document
from pagebotnano_005.elements import Rect, Text, TextBox
from pagebotnano_005.toolbox.loremipsum import loremipsum
class TypeSpecimen(Document):
# Class names start with a capital. See a class as a factory
# of type specimen objects (name spelled with an initial lower case.)
# In this case we inherit from what is already defined in Document.
# Similar how a Volkswagen factory would inherit the functions already
# defined in a generic car factory. Inheriting is one of the most
# powerful aspects of Python programming, so an object can perform
# complex tasks, without the need to add these functions again for
# every new project.
pass # For now it will do nothing, but that will change.
# Now we create a new type specimen, by executing the class.
# Compare that by letting a car factory produce a car. We only need
# one factory ("TypeSpecimen" name starting with capital), which
# then can product an inlimited number of typeSpecimen objects (name
# starting with a lower case.)
typeSpecimen = TypeSpecimen() # Execute the class/factory by adding "()"
fontName = 'Georgia'
titleSize = 64
headSize = 24
bodyFontSize = 16
leading = 1.4 # Multiplier for the fontSize;lineHe
padding = 80 # Padding of the page. Outside CSS called "margin" of the page.
def makeCoverPage(doc, title):
global Rect, Text, TextBox
global fontName, titleSize, headSize, bodyFontSize, leading, padding
page = doc.newPage()
# Fill the page with a random dark color (< 50% for (r, g, b))
fillColor = random()*0.5, random()*0.5, random()*0.5
rectangleElement = Rect(0, 0, page.w, page.h, fill=fillColor)
page.addElement(rectangleElement) # Add the rectangle element to the page.
# Make a FormattedString for the text box
fs = Text.FS(title,
font=fontName, fontSize=titleSize, lineHeight=titleSize*1.1, fill=1)
# Make a Text element with an (x, y) position and add it to the page.
textElement = Text(fs, x=padding, y=page.h-1.5*padding)
page.addElement(textElement) # Add the text element to the page.
# Add square with light color (> 50% for (r, g, b)) and lighter frame.
rx = ry = padding # Position from bottom-left
rw = rh = page.w - 2*padding # Make a square, so w = h
fillColor = 0.5+random()*0.5, 0.5+random()*0.5, 0.5+random()*0.5
strokeColor = 0.75+random()*0.25, 0.75+random()*0.25, 0.75+random()*0.25
rectangleElement = Rect(rx, ry, rw, rh, fill=fillColor,
stroke=strokeColor, strokeWidth=5)
page.addElement(rectangleElement) # Add the rectangle element to the page.
def makeBodyPages(doc, bodyText):
"""Create a number of new pages in the document, as long as there is overflow.
If no new page size is given, it will take over the size of the document.
"""
fs = Text.FS(bodyText, font=fontName, fontSize=bodyFontSize, lineHeight=bodyFontSize*leading)
while True:
page = doc.newPage()
# Add text element with page number
pn = TextBox.FS(str(page.pn), align='center', font=fontName, fontSize=bodyFontSize)
page.addElement(Text(pn, page.w/2, padding/2))
e = TextBox(fs, x=padding, y=padding, w=page.w-2*padding, h=page.h-2*padding, fill=1)
page.addElement(e)
fs = e.getOverflow(fs)
if not fs:
break
txt = loremipsum(doShuffle=True)
makeCoverPage(typeSpecimen, 'Type specimen\n'+fontName)
makeBodyPages(typeSpecimen, txt)
# Build the document, all pages and their contained elements.
typeSpecimen.build()
# Create the "_export" folder if it does not exist yet.
# This Github repository is filtering file to not upload _export.
# Export the specimen as empty page as PDF and PNG.
typeSpecimen.export('_export/MyTypeSpecimen.pdf')
typeSpecimen.export('_export/MyTypeSpecimen.png')
print('Done') | 41.106195 | 97 | 0.685038 |
from random import random
from pagebotnano_005.document import Document
from pagebotnano_005.elements import Rect, Text, TextBox
from pagebotnano_005.toolbox.loremipsum import loremipsum
class TypeSpecimen(Document):
pass
typeSpecimen = TypeSpecimen()
fontName = 'Georgia'
titleSize = 64
headSize = 24
bodyFontSize = 16
leading = 1.4
padding = 80
def makeCoverPage(doc, title):
global Rect, Text, TextBox
global fontName, titleSize, headSize, bodyFontSize, leading, padding
page = doc.newPage()
fillColor = random()*0.5, random()*0.5, random()*0.5
rectangleElement = Rect(0, 0, page.w, page.h, fill=fillColor)
page.addElement(rectangleElement)
fs = Text.FS(title,
font=fontName, fontSize=titleSize, lineHeight=titleSize*1.1, fill=1)
textElement = Text(fs, x=padding, y=page.h-1.5*padding)
page.addElement(textElement)
rx = ry = padding
rw = rh = page.w - 2*padding
fillColor = 0.5+random()*0.5, 0.5+random()*0.5, 0.5+random()*0.5
strokeColor = 0.75+random()*0.25, 0.75+random()*0.25, 0.75+random()*0.25
rectangleElement = Rect(rx, ry, rw, rh, fill=fillColor,
stroke=strokeColor, strokeWidth=5)
page.addElement(rectangleElement)
def makeBodyPages(doc, bodyText):
fs = Text.FS(bodyText, font=fontName, fontSize=bodyFontSize, lineHeight=bodyFontSize*leading)
while True:
page = doc.newPage()
pn = TextBox.FS(str(page.pn), align='center', font=fontName, fontSize=bodyFontSize)
page.addElement(Text(pn, page.w/2, padding/2))
e = TextBox(fs, x=padding, y=padding, w=page.w-2*padding, h=page.h-2*padding, fill=1)
page.addElement(e)
fs = e.getOverflow(fs)
if not fs:
break
txt = loremipsum(doShuffle=True)
makeCoverPage(typeSpecimen, 'Type specimen\n'+fontName)
makeBodyPages(typeSpecimen, txt)
typeSpecimen.build()
typeSpecimen.export('_export/MyTypeSpecimen.pdf')
typeSpecimen.export('_export/MyTypeSpecimen.png')
print('Done') | true | true |
f7115956f5a17031f875ed574bd2db94bd8aaa40 | 142 | py | Python | hcfg/exceptions.py | hyper1923/hcfg | ad37e2bf4a5cc78c4f93331321611d642e52d7d3 | [
"MIT"
] | 4 | 2021-07-25T21:01:33.000Z | 2021-12-17T12:35:16.000Z | hcfg/exceptions.py | hyper1923/hcfg | ad37e2bf4a5cc78c4f93331321611d642e52d7d3 | [
"MIT"
] | null | null | null | hcfg/exceptions.py | hyper1923/hcfg | ad37e2bf4a5cc78c4f93331321611d642e52d7d3 | [
"MIT"
] | 1 | 2021-07-25T21:01:35.000Z | 2021-07-25T21:01:35.000Z |
class hypSyntaxError(Exception):
pass
class hypFileError(Exception):
pass
class hypObjectError(Exception):
pass | 10.923077 | 33 | 0.661972 |
class hypSyntaxError(Exception):
pass
class hypFileError(Exception):
pass
class hypObjectError(Exception):
pass | true | true |
f7115ab9b4c5b56cdf3c82610fb39000f9062f83 | 3,873 | py | Python | Tests/benchmarks/bench_nbody.py | AlexWaygood/Pyjion | 974bd3cf434fad23fbfa1ea9acf43e3387a5c21f | [
"MIT"
] | null | null | null | Tests/benchmarks/bench_nbody.py | AlexWaygood/Pyjion | 974bd3cf434fad23fbfa1ea9acf43e3387a5c21f | [
"MIT"
] | null | null | null | Tests/benchmarks/bench_nbody.py | AlexWaygood/Pyjion | 974bd3cf434fad23fbfa1ea9acf43e3387a5c21f | [
"MIT"
] | null | null | null | # The Computer Language Benchmarks Game
# http://benchmarksgame.alioth.debian.org/
#
# originally by Kevin Carson
# modified by Tupteq, Fredrik Johansson, and Daniel Nanz
# modified by Maciej Fijalkowski
# 2to3
import pyjion
import timeit
import gc
def combinations(l):
result = []
for x in range(len(l) - 1):
ls = l[x+1:]
for y in ls:
result.append((l[x],y))
return result
PI = 3.14159265358979323
SOLAR_MASS = 4 * PI * PI
DAYS_PER_YEAR = 365.24
BODIES = {
'sun': ([0.0, 0.0, 0.0], [0.0, 0.0, 0.0], SOLAR_MASS),
'jupiter': ([4.84143144246472090e+00,
-1.16032004402742839e+00,
-1.03622044471123109e-01],
[1.66007664274403694e-03 * DAYS_PER_YEAR,
7.69901118419740425e-03 * DAYS_PER_YEAR,
-6.90460016972063023e-05 * DAYS_PER_YEAR],
9.54791938424326609e-04 * SOLAR_MASS),
'saturn': ([8.34336671824457987e+00,
4.12479856412430479e+00,
-4.03523417114321381e-01],
[-2.76742510726862411e-03 * DAYS_PER_YEAR,
4.99852801234917238e-03 * DAYS_PER_YEAR,
2.30417297573763929e-05 * DAYS_PER_YEAR],
2.85885980666130812e-04 * SOLAR_MASS),
'uranus': ([1.28943695621391310e+01,
-1.51111514016986312e+01,
-2.23307578892655734e-01],
[2.96460137564761618e-03 * DAYS_PER_YEAR,
2.37847173959480950e-03 * DAYS_PER_YEAR,
-2.96589568540237556e-05 * DAYS_PER_YEAR],
4.36624404335156298e-05 * SOLAR_MASS),
'neptune': ([1.53796971148509165e+01,
-2.59193146099879641e+01,
1.79258772950371181e-01],
[2.68067772490389322e-03 * DAYS_PER_YEAR,
1.62824170038242295e-03 * DAYS_PER_YEAR,
-9.51592254519715870e-05 * DAYS_PER_YEAR],
5.15138902046611451e-05 * SOLAR_MASS) }
SYSTEM = list(BODIES.values())
PAIRS = combinations(SYSTEM)
def advance(dt, n, bodies=SYSTEM, pairs=PAIRS):
for i in range(n):
for (([x1, y1, z1], v1, m1),
([x2, y2, z2], v2, m2)) in pairs:
dx = x1 - x2
dy = y1 - y2
dz = z1 - z2
mag = dt * ((dx * dx + dy * dy + dz * dz) ** (-1.5))
b1m = m1 * mag
b2m = m2 * mag
v1[0] -= dx * b2m
v1[1] -= dy * b2m
v1[2] -= dz * b2m
v2[0] += dx * b1m
v2[1] += dy * b1m
v2[2] += dz * b1m
for (r, [vx, vy, vz], m) in bodies:
r[0] += dt * vx
r[1] += dt * vy
r[2] += dt * vz
def report_energy(bodies=SYSTEM, pairs=PAIRS, e=0.0):
for (((x1, y1, z1), v1, m1),
((x2, y2, z2), v2, m2)) in pairs:
dx = x1 - x2
dy = y1 - y2
dz = z1 - z2
e -= (m1 * m2) / ((dx * dx + dy * dy + dz * dz) ** 0.5)
for (r, [vx, vy, vz], m) in bodies:
e += m * (vx * vx + vy * vy + vz * vz) / 2.
print("%.9f" % e)
def offset_momentum(ref, bodies=SYSTEM, px=0.0, py=0.0, pz=0.0):
for (r, [vx, vy, vz], m) in bodies:
px -= vx * m
py -= vy * m
pz -= vz * m
(r, v, m) = ref
v[0] = px / m
v[1] = py / m
v[2] = pz / m
def main(n=50000, ref='sun'):
offset_momentum(BODIES[ref])
report_energy()
advance(0.01, n)
report_energy()
if __name__ == "__main__":
print("N-body took {0} without Pyjion".format(timeit.repeat(main, repeat=5, number=1)))
pyjion.enable()
pyjion.set_optimization_level(1)
print("N-body took {0} with Pyjion".format(timeit.repeat(main, repeat=5, number=1)))
pyjion.disable()
print(pyjion.info(offset_momentum))
print(pyjion.info(advance))
print(pyjion.info(report_energy))
gc.collect()
| 29.564885 | 91 | 0.528014 |
import pyjion
import timeit
import gc
def combinations(l):
result = []
for x in range(len(l) - 1):
ls = l[x+1:]
for y in ls:
result.append((l[x],y))
return result
PI = 3.14159265358979323
SOLAR_MASS = 4 * PI * PI
DAYS_PER_YEAR = 365.24
BODIES = {
'sun': ([0.0, 0.0, 0.0], [0.0, 0.0, 0.0], SOLAR_MASS),
'jupiter': ([4.84143144246472090e+00,
-1.16032004402742839e+00,
-1.03622044471123109e-01],
[1.66007664274403694e-03 * DAYS_PER_YEAR,
7.69901118419740425e-03 * DAYS_PER_YEAR,
-6.90460016972063023e-05 * DAYS_PER_YEAR],
9.54791938424326609e-04 * SOLAR_MASS),
'saturn': ([8.34336671824457987e+00,
4.12479856412430479e+00,
-4.03523417114321381e-01],
[-2.76742510726862411e-03 * DAYS_PER_YEAR,
4.99852801234917238e-03 * DAYS_PER_YEAR,
2.30417297573763929e-05 * DAYS_PER_YEAR],
2.85885980666130812e-04 * SOLAR_MASS),
'uranus': ([1.28943695621391310e+01,
-1.51111514016986312e+01,
-2.23307578892655734e-01],
[2.96460137564761618e-03 * DAYS_PER_YEAR,
2.37847173959480950e-03 * DAYS_PER_YEAR,
-2.96589568540237556e-05 * DAYS_PER_YEAR],
4.36624404335156298e-05 * SOLAR_MASS),
'neptune': ([1.53796971148509165e+01,
-2.59193146099879641e+01,
1.79258772950371181e-01],
[2.68067772490389322e-03 * DAYS_PER_YEAR,
1.62824170038242295e-03 * DAYS_PER_YEAR,
-9.51592254519715870e-05 * DAYS_PER_YEAR],
5.15138902046611451e-05 * SOLAR_MASS) }
SYSTEM = list(BODIES.values())
PAIRS = combinations(SYSTEM)
def advance(dt, n, bodies=SYSTEM, pairs=PAIRS):
for i in range(n):
for (([x1, y1, z1], v1, m1),
([x2, y2, z2], v2, m2)) in pairs:
dx = x1 - x2
dy = y1 - y2
dz = z1 - z2
mag = dt * ((dx * dx + dy * dy + dz * dz) ** (-1.5))
b1m = m1 * mag
b2m = m2 * mag
v1[0] -= dx * b2m
v1[1] -= dy * b2m
v1[2] -= dz * b2m
v2[0] += dx * b1m
v2[1] += dy * b1m
v2[2] += dz * b1m
for (r, [vx, vy, vz], m) in bodies:
r[0] += dt * vx
r[1] += dt * vy
r[2] += dt * vz
def report_energy(bodies=SYSTEM, pairs=PAIRS, e=0.0):
for (((x1, y1, z1), v1, m1),
((x2, y2, z2), v2, m2)) in pairs:
dx = x1 - x2
dy = y1 - y2
dz = z1 - z2
e -= (m1 * m2) / ((dx * dx + dy * dy + dz * dz) ** 0.5)
for (r, [vx, vy, vz], m) in bodies:
e += m * (vx * vx + vy * vy + vz * vz) / 2.
print("%.9f" % e)
def offset_momentum(ref, bodies=SYSTEM, px=0.0, py=0.0, pz=0.0):
for (r, [vx, vy, vz], m) in bodies:
px -= vx * m
py -= vy * m
pz -= vz * m
(r, v, m) = ref
v[0] = px / m
v[1] = py / m
v[2] = pz / m
def main(n=50000, ref='sun'):
offset_momentum(BODIES[ref])
report_energy()
advance(0.01, n)
report_energy()
if __name__ == "__main__":
print("N-body took {0} without Pyjion".format(timeit.repeat(main, repeat=5, number=1)))
pyjion.enable()
pyjion.set_optimization_level(1)
print("N-body took {0} with Pyjion".format(timeit.repeat(main, repeat=5, number=1)))
pyjion.disable()
print(pyjion.info(offset_momentum))
print(pyjion.info(advance))
print(pyjion.info(report_energy))
gc.collect()
| true | true |
f7115adab6ff8f96dbe31caef921ac48511b27db | 46,233 | py | Python | tests/unit/test_swift_store.py | citrix-openstack-build/glance_store | 475d144cfe2a3fb5fc49dd0ad0a95fa90790f5b7 | [
"Apache-2.0"
] | null | null | null | tests/unit/test_swift_store.py | citrix-openstack-build/glance_store | 475d144cfe2a3fb5fc49dd0ad0a95fa90790f5b7 | [
"Apache-2.0"
] | null | null | null | tests/unit/test_swift_store.py | citrix-openstack-build/glance_store | 475d144cfe2a3fb5fc49dd0ad0a95fa90790f5b7 | [
"Apache-2.0"
] | null | null | null | # Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tests the Swift backend store"""
import copy
import fixtures
import hashlib
import httplib
import mock
import tempfile
import uuid
from oslo.config import cfg
from oslotest import moxstubout
import six
import stubout
import swiftclient
from glance_store._drivers.swift import store as swift
from glance_store._drivers.swift import utils as sutils
from glance_store import backend
from glance_store import BackendException
from glance_store.common import auth
from glance_store import exceptions
from glance_store.location import get_location_from_uri
from glance_store.openstack.common import context
from glance_store.openstack.common import units
from glance_store.tests import base
CONF = cfg.CONF
FAKE_UUID = lambda: str(uuid.uuid4())
Store = swift.Store
FIVE_KB = 5 * units.Ki
FIVE_GB = 5 * units.Gi
MAX_SWIFT_OBJECT_SIZE = FIVE_GB
SWIFT_PUT_OBJECT_CALLS = 0
SWIFT_CONF = {'swift_store_auth_address': 'localhost:8080',
'swift_store_container': 'glance',
'swift_store_user': 'user',
'swift_store_key': 'key',
'swift_store_auth_address': 'localhost:8080',
'swift_store_container': 'glance',
'swift_store_retry_get_count': 1,
'default_swift_reference': 'ref1'
}
# We stub out as little as possible to ensure that the code paths
# between swift and swiftclient are tested
# thoroughly
def stub_out_swiftclient(stubs, swift_store_auth_version):
fixture_containers = ['glance']
fixture_container_headers = {}
fixture_headers = {
'glance/%s' % FAKE_UUID: {
'content-length': FIVE_KB,
'etag': 'c2e5db72bd7fd153f53ede5da5a06de3'
}
}
fixture_objects = {'glance/%s' % FAKE_UUID:
six.StringIO("*" * FIVE_KB)}
def fake_head_container(url, token, container, **kwargs):
if container not in fixture_containers:
msg = "No container %s found" % container
raise swiftclient.ClientException(msg,
http_status=httplib.NOT_FOUND)
return fixture_container_headers
def fake_put_container(url, token, container, **kwargs):
fixture_containers.append(container)
def fake_post_container(url, token, container, headers, http_conn=None):
for key, value in six.iteritems(headers):
fixture_container_headers[key] = value
def fake_put_object(url, token, container, name, contents, **kwargs):
# PUT returns the ETag header for the newly-added object
# Large object manifest...
global SWIFT_PUT_OBJECT_CALLS
SWIFT_PUT_OBJECT_CALLS += 1
CHUNKSIZE = 64 * units.Ki
fixture_key = "%s/%s" % (container, name)
if fixture_key not in fixture_headers:
if kwargs.get('headers'):
etag = kwargs['headers']['ETag']
fixture_headers[fixture_key] = {'manifest': True,
'etag': etag}
return etag
if hasattr(contents, 'read'):
fixture_object = six.StringIO()
chunk = contents.read(CHUNKSIZE)
checksum = hashlib.md5()
while chunk:
fixture_object.write(chunk)
checksum.update(chunk)
chunk = contents.read(CHUNKSIZE)
etag = checksum.hexdigest()
else:
fixture_object = six.StringIO(contents)
etag = hashlib.md5(fixture_object.getvalue()).hexdigest()
read_len = fixture_object.len
if read_len > MAX_SWIFT_OBJECT_SIZE:
msg = ('Image size:%d exceeds Swift max:%d' %
(read_len, MAX_SWIFT_OBJECT_SIZE))
raise swiftclient.ClientException(
msg, http_status=httplib.REQUEST_ENTITY_TOO_LARGE)
fixture_objects[fixture_key] = fixture_object
fixture_headers[fixture_key] = {
'content-length': read_len,
'etag': etag}
return etag
else:
msg = ("Object PUT failed - Object with key %s already exists"
% fixture_key)
raise swiftclient.ClientException(msg,
http_status=httplib.CONFLICT)
def fake_get_object(url, token, container, name, **kwargs):
# GET returns the tuple (list of headers, file object)
fixture_key = "%s/%s" % (container, name)
if fixture_key not in fixture_headers:
msg = "Object GET failed"
raise swiftclient.ClientException(msg,
http_status=httplib.NOT_FOUND)
byte_range = None
headers = kwargs.get('headers', dict())
if headers is not None:
headers = dict((k.lower(), v) for k, v in six.iteritems(headers))
if 'range' in headers:
byte_range = headers.get('range')
fixture = fixture_headers[fixture_key]
if 'manifest' in fixture:
# Large object manifest... we return a file containing
# all objects with prefix of this fixture key
chunk_keys = sorted([k for k in fixture_headers.keys()
if k.startswith(fixture_key) and
k != fixture_key])
result = six.StringIO()
for key in chunk_keys:
result.write(fixture_objects[key].getvalue())
else:
result = fixture_objects[fixture_key]
if byte_range is not None:
start = int(byte_range.split('=')[1].strip('-'))
result = six.StringIO(result.getvalue()[start:])
fixture_headers[fixture_key]['content-length'] = len(
result.getvalue())
return fixture_headers[fixture_key], result
def fake_head_object(url, token, container, name, **kwargs):
# HEAD returns the list of headers for an object
try:
fixture_key = "%s/%s" % (container, name)
return fixture_headers[fixture_key]
except KeyError:
msg = "Object HEAD failed - Object does not exist"
raise swiftclient.ClientException(msg,
http_status=httplib.NOT_FOUND)
def fake_delete_object(url, token, container, name, **kwargs):
# DELETE returns nothing
fixture_key = "%s/%s" % (container, name)
if fixture_key not in fixture_headers:
msg = "Object DELETE failed - Object does not exist"
raise swiftclient.ClientException(msg,
http_status=httplib.NOT_FOUND)
else:
del fixture_headers[fixture_key]
del fixture_objects[fixture_key]
def fake_http_connection(*args, **kwargs):
return None
def fake_get_auth(url, user, key, snet, auth_version, **kwargs):
if url is None:
return None, None
if 'http' in url and '://' not in url:
raise ValueError('Invalid url %s' % url)
# Check the auth version against the configured value
if swift_store_auth_version != auth_version:
msg = 'AUTHENTICATION failed (version mismatch)'
raise swiftclient.ClientException(msg)
return None, None
stubs.Set(swiftclient.client,
'head_container', fake_head_container)
stubs.Set(swiftclient.client,
'put_container', fake_put_container)
stubs.Set(swiftclient.client,
'post_container', fake_post_container)
stubs.Set(swiftclient.client,
'put_object', fake_put_object)
stubs.Set(swiftclient.client,
'delete_object', fake_delete_object)
stubs.Set(swiftclient.client,
'head_object', fake_head_object)
stubs.Set(swiftclient.client,
'get_object', fake_get_object)
stubs.Set(swiftclient.client,
'get_auth', fake_get_auth)
stubs.Set(swiftclient.client,
'http_connection', fake_http_connection)
class SwiftTests(object):
@property
def swift_store_user(self):
return 'tenant:user1'
def test_get_size(self):
"""
Test that we can get the size of an object in the swift store
"""
uri = "swift://%s:key@auth_address/glance/%s" % (
self.swift_store_user, FAKE_UUID)
loc = get_location_from_uri(uri)
image_size = self.store.get_size(loc)
self.assertEqual(image_size, 5120)
def test_validate_location_for_invalid_uri(self):
"""
Test that validate location raises when the location contains
any account reference.
"""
uri = "swift+config://store_1/glance/%s"
self.assertRaises(exceptions.BadStoreUri,
self.store.validate_location,
uri)
def test_validate_location_for_valid_uri(self):
"""
Test that validate location verifies that the location does not
contain any account reference
"""
uri = "swift://user:key@auth_address/glance/%s"
try:
self.assertIsNone(self.store.validate_location(uri))
except Exception:
self.fail('Location uri validation failed')
def test_get_size_with_multi_tenant_on(self):
"""Test that single tenant uris work with multi tenant on."""
uri = ("swift://%s:key@auth_address/glance/%s" %
(self.swift_store_user, FAKE_UUID))
self.config(swift_store_multi_tenant=True)
#NOTE(markwash): ensure the image is found
size = backend.get_size_from_backend(uri, context={})
self.assertEqual(size, 5120)
def test_get(self):
"""Test a "normal" retrieval of an image in chunks"""
uri = "swift://%s:key@auth_address/glance/%s" % (
self.swift_store_user, FAKE_UUID)
loc = get_location_from_uri(uri)
(image_swift, image_size) = self.store.get(loc)
self.assertEqual(image_size, 5120)
expected_data = "*" * FIVE_KB
data = ""
for chunk in image_swift:
data += chunk
self.assertEqual(expected_data, data)
def test_get_with_retry(self):
"""
Test a retrieval where Swift does not get the full image in a single
request.
"""
uri = "swift://%s:key@auth_address/glance/%s" % (
self.swift_store_user, FAKE_UUID)
loc = get_location_from_uri(uri)
ctxt = context.RequestContext()
(image_swift, image_size) = self.store.get(loc, context=ctxt)
resp_full = ''.join([chunk for chunk in image_swift.wrapped])
resp_half = resp_full[:len(resp_full) / 2]
image_swift.wrapped = swift.swift_retry_iter(resp_half, image_size,
self.store,
loc.store_location,
ctxt)
self.assertEqual(image_size, 5120)
expected_data = "*" * FIVE_KB
data = ""
for chunk in image_swift:
data += chunk
self.assertEqual(expected_data, data)
def test_get_with_http_auth(self):
"""
Test a retrieval from Swift with an HTTP authurl. This is
specified either via a Location header with swift+http:// or using
http:// in the swift_store_auth_address config value
"""
loc = get_location_from_uri("swift+http://%s:key@auth_address/"
"glance/%s" %
(self.swift_store_user, FAKE_UUID))
ctxt = context.RequestContext()
(image_swift, image_size) = self.store.get(loc, context=ctxt)
self.assertEqual(image_size, 5120)
expected_data = "*" * FIVE_KB
data = ""
for chunk in image_swift:
data += chunk
self.assertEqual(expected_data, data)
def test_get_non_existing(self):
"""
Test that trying to retrieve a swift that doesn't exist
raises an error
"""
loc = get_location_from_uri("swift://%s:key@authurl/glance/noexist" % (
self.swift_store_user))
self.assertRaises(exceptions.NotFound,
self.store.get,
loc)
def test_add(self):
"""Test that we can add an image via the swift backend"""
sutils.is_multiple_swift_store_accounts_enabled = \
mock.Mock(return_value=False)
reload(swift)
self.store = Store(self.conf)
self.store.configure()
expected_swift_size = FIVE_KB
expected_swift_contents = "*" * expected_swift_size
expected_checksum = hashlib.md5(expected_swift_contents).hexdigest()
expected_image_id = str(uuid.uuid4())
loc = "swift+https://tenant%%3Auser1:key@localhost:8080/glance/%s"
expected_location = loc % (expected_image_id)
image_swift = six.StringIO(expected_swift_contents)
global SWIFT_PUT_OBJECT_CALLS
SWIFT_PUT_OBJECT_CALLS = 0
location, size, checksum, _ = self.store.add(expected_image_id,
image_swift,
expected_swift_size)
self.assertEqual(expected_location, location)
self.assertEqual(expected_swift_size, size)
self.assertEqual(expected_checksum, checksum)
# Expecting a single object to be created on Swift i.e. no chunking.
self.assertEqual(SWIFT_PUT_OBJECT_CALLS, 1)
loc = get_location_from_uri(expected_location)
(new_image_swift, new_image_size) = self.store.get(loc)
new_image_contents = ''.join([chunk for chunk in new_image_swift])
new_image_swift_size = len(new_image_swift)
self.assertEqual(expected_swift_contents, new_image_contents)
self.assertEqual(expected_swift_size, new_image_swift_size)
def test_add_multi_store(self):
conf = copy.deepcopy(SWIFT_CONF)
conf['default_swift_reference'] = 'store_2'
self.config(**conf)
reload(swift)
self.store = Store(self.conf)
self.store.configure()
expected_swift_size = FIVE_KB
expected_swift_contents = "*" * expected_swift_size
expected_image_id = str(uuid.uuid4())
image_swift = six.StringIO(expected_swift_contents)
global SWIFT_PUT_OBJECT_CALLS
SWIFT_PUT_OBJECT_CALLS = 0
loc = 'swift+config://store_2/glance/%s'
expected_location = loc % (expected_image_id)
location, size, checksum, arg = self.store.add(expected_image_id,
image_swift,
expected_swift_size)
self.assertEqual(expected_location, location)
def test_add_auth_url_variations(self):
"""
Test that we can add an image via the swift backend with
a variety of different auth_address values
"""
sutils.is_multiple_swift_store_accounts_enabled = \
mock.Mock(return_value=True)
conf = copy.deepcopy(SWIFT_CONF)
self.config(**conf)
variations = {
'store_4': 'swift+config://store_4/glance/%s',
'store_5': 'swift+config://store_5/glance/%s',
'store_6': 'swift+config://store_6/glance/%s'
}
for variation, expected_location in variations.items():
image_id = str(uuid.uuid4())
expected_location = expected_location % image_id
expected_swift_size = FIVE_KB
expected_swift_contents = "*" * expected_swift_size
expected_checksum = \
hashlib.md5(expected_swift_contents).hexdigest()
image_swift = six.StringIO(expected_swift_contents)
global SWIFT_PUT_OBJECT_CALLS
SWIFT_PUT_OBJECT_CALLS = 0
conf['default_swift_reference'] = variation
self.config(**conf)
reload(swift)
self.store = Store(self.conf)
self.store.configure()
location, size, checksum, _ = self.store.add(image_id, image_swift,
expected_swift_size)
self.assertEqual(expected_location, location)
self.assertEqual(expected_swift_size, size)
self.assertEqual(expected_checksum, checksum)
self.assertEqual(SWIFT_PUT_OBJECT_CALLS, 1)
loc = get_location_from_uri(expected_location)
(new_image_swift, new_image_size) = self.store.get(loc)
new_image_contents = ''.join([chunk for chunk in new_image_swift])
new_image_swift_size = len(new_image_swift)
self.assertEqual(expected_swift_contents, new_image_contents)
self.assertEqual(expected_swift_size, new_image_swift_size)
def test_add_no_container_no_create(self):
"""
Tests that adding an image with a non-existing container
raises an appropriate exception
"""
conf = copy.deepcopy(SWIFT_CONF)
conf['swift_store_user'] = 'tenant:user'
conf['swift_store_create_container_on_put'] = False
conf['swift_store_container'] = 'noexist'
self.config(**conf)
reload(swift)
self.store = Store(self.conf)
self.store.configure()
image_swift = six.StringIO("nevergonnamakeit")
global SWIFT_PUT_OBJECT_CALLS
SWIFT_PUT_OBJECT_CALLS = 0
# We check the exception text to ensure the container
# missing text is found in it, otherwise, we would have
# simply used self.assertRaises here
exception_caught = False
try:
self.store.add(str(uuid.uuid4()), image_swift, 0)
except BackendException as e:
exception_caught = True
self.assertIn("container noexist does not exist "
"in Swift", unicode(e))
self.assertTrue(exception_caught)
self.assertEqual(SWIFT_PUT_OBJECT_CALLS, 0)
def test_add_no_container_and_create(self):
"""
Tests that adding an image with a non-existing container
creates the container automatically if flag is set
"""
sutils.is_multiple_swift_store_accounts_enabled = \
mock.Mock(return_value=True)
expected_swift_size = FIVE_KB
expected_swift_contents = "*" * expected_swift_size
expected_checksum = hashlib.md5(expected_swift_contents).hexdigest()
expected_image_id = str(uuid.uuid4())
loc = 'swift+config://ref1/noexist/%s'
expected_location = loc % (expected_image_id)
image_swift = six.StringIO(expected_swift_contents)
global SWIFT_PUT_OBJECT_CALLS
SWIFT_PUT_OBJECT_CALLS = 0
conf = copy.deepcopy(SWIFT_CONF)
conf['swift_store_user'] = 'tenant:user'
conf['swift_store_create_container_on_put'] = True
conf['swift_store_container'] = 'noexist'
self.config(**conf)
reload(swift)
self.store = Store(self.conf)
self.store.configure()
location, size, checksum, _ = self.store.add(expected_image_id,
image_swift,
expected_swift_size)
self.assertEqual(expected_location, location)
self.assertEqual(expected_swift_size, size)
self.assertEqual(expected_checksum, checksum)
self.assertEqual(SWIFT_PUT_OBJECT_CALLS, 1)
loc = get_location_from_uri(expected_location)
(new_image_swift, new_image_size) = self.store.get(loc)
new_image_contents = ''.join([chunk for chunk in new_image_swift])
new_image_swift_size = len(new_image_swift)
self.assertEqual(expected_swift_contents, new_image_contents)
self.assertEqual(expected_swift_size, new_image_swift_size)
def test_add_large_object(self):
"""
Tests that adding a very large image. We simulate the large
object by setting store.large_object_size to a small number
and then verify that there have been a number of calls to
put_object()...
"""
sutils.is_multiple_swift_store_accounts_enabled = \
mock.Mock(return_value=True)
expected_swift_size = FIVE_KB
expected_swift_contents = "*" * expected_swift_size
expected_checksum = hashlib.md5(expected_swift_contents).hexdigest()
expected_image_id = str(uuid.uuid4())
loc = 'swift+config://ref1/glance/%s'
expected_location = loc % (expected_image_id)
image_swift = six.StringIO(expected_swift_contents)
global SWIFT_PUT_OBJECT_CALLS
SWIFT_PUT_OBJECT_CALLS = 0
self.store = Store(self.conf)
self.store.configure()
orig_max_size = self.store.large_object_size
orig_temp_size = self.store.large_object_chunk_size
try:
self.store.large_object_size = 1024
self.store.large_object_chunk_size = 1024
location, size, checksum, _ = self.store.add(expected_image_id,
image_swift,
expected_swift_size)
finally:
self.store.large_object_chunk_size = orig_temp_size
self.store.large_object_size = orig_max_size
self.assertEqual(expected_location, location)
self.assertEqual(expected_swift_size, size)
self.assertEqual(expected_checksum, checksum)
# Expecting 6 objects to be created on Swift -- 5 chunks and 1
# manifest.
self.assertEqual(SWIFT_PUT_OBJECT_CALLS, 6)
loc = get_location_from_uri(expected_location)
(new_image_swift, new_image_size) = self.store.get(loc)
new_image_contents = ''.join([chunk for chunk in new_image_swift])
new_image_swift_size = len(new_image_contents)
self.assertEqual(expected_swift_contents, new_image_contents)
self.assertEqual(expected_swift_size, new_image_swift_size)
def test_add_large_object_zero_size(self):
"""
Tests that adding an image to Swift which has both an unknown size and
exceeds Swift's maximum limit of 5GB is correctly uploaded.
We avoid the overhead of creating a 5GB object for this test by
temporarily setting MAX_SWIFT_OBJECT_SIZE to 1KB, and then adding
an object of 5KB.
Bug lp:891738
"""
# Set up a 'large' image of 5KB
expected_swift_size = FIVE_KB
expected_swift_contents = "*" * expected_swift_size
expected_checksum = hashlib.md5(expected_swift_contents).hexdigest()
expected_image_id = str(uuid.uuid4())
loc = 'swift+config://ref1/glance/%s'
expected_location = loc % (expected_image_id)
image_swift = six.StringIO(expected_swift_contents)
global SWIFT_PUT_OBJECT_CALLS
SWIFT_PUT_OBJECT_CALLS = 0
# Temporarily set Swift MAX_SWIFT_OBJECT_SIZE to 1KB and add our image,
# explicitly setting the image_length to 0
self.store = Store(self.conf)
self.store.configure()
orig_max_size = self.store.large_object_size
orig_temp_size = self.store.large_object_chunk_size
global MAX_SWIFT_OBJECT_SIZE
orig_max_swift_object_size = MAX_SWIFT_OBJECT_SIZE
try:
MAX_SWIFT_OBJECT_SIZE = 1024
self.store.large_object_size = 1024
self.store.large_object_chunk_size = 1024
location, size, checksum, _ = self.store.add(expected_image_id,
image_swift, 0)
finally:
self.store.large_object_chunk_size = orig_temp_size
self.store.large_object_size = orig_max_size
MAX_SWIFT_OBJECT_SIZE = orig_max_swift_object_size
self.assertEqual(expected_location, location)
self.assertEqual(expected_swift_size, size)
self.assertEqual(expected_checksum, checksum)
# Expecting 7 calls to put_object -- 5 chunks, a zero chunk which is
# then deleted, and the manifest. Note the difference with above
# where the image_size is specified in advance (there's no zero chunk
# in that case).
self.assertEqual(SWIFT_PUT_OBJECT_CALLS, 7)
loc = get_location_from_uri(expected_location)
(new_image_swift, new_image_size) = self.store.get(loc)
new_image_contents = ''.join([chunk for chunk in new_image_swift])
new_image_swift_size = len(new_image_contents)
self.assertEqual(expected_swift_contents, new_image_contents)
self.assertEqual(expected_swift_size, new_image_swift_size)
def test_add_already_existing(self):
"""
Tests that adding an image with an existing identifier
raises an appropriate exception
"""
image_swift = six.StringIO("nevergonnamakeit")
self.assertRaises(exceptions.Duplicate,
self.store.add,
FAKE_UUID, image_swift, 0)
def _option_required(self, key):
conf = self.getConfig()
conf[key] = None
try:
self.config(**conf)
self.store = Store(self.conf)
return self.store.add == self.store.add_disabled
except Exception:
return False
return False
def test_no_store_credentials(self):
"""
Tests that options without a valid credentials disables the add method
"""
swift.SWIFT_STORE_REF_PARAMS = {'ref1': {'auth_address':
'authurl.com', 'user': '',
'key': ''}}
self.store = Store(self.conf)
self.store.configure()
self.assertEqual(self.store.add, self.store.add_disabled)
def test_no_auth_address(self):
"""
Tests that options without auth address disables the add method
"""
swift.SWIFT_STORE_REF_PARAMS = {'ref1': {'auth_address':
'', 'user': 'user1',
'key': 'key1'}}
self.store = Store(self.conf)
self.store.configure()
self.assertEqual(self.store.add, self.store.add_disabled)
def test_delete(self):
"""
Test we can delete an existing image in the swift store
"""
uri = "swift://%s:key@authurl/glance/%s" % (
self.swift_store_user, FAKE_UUID)
loc = get_location_from_uri(uri)
self.store.delete(loc)
self.assertRaises(exceptions.NotFound, self.store.get, loc)
def test_delete_with_reference_params(self):
"""
Test we can delete an existing image in the swift store
"""
uri = "swift+config://ref1/glance/%s" % (FAKE_UUID)
loc = get_location_from_uri(uri)
self.store.delete(loc)
self.assertRaises(exceptions.NotFound, self.store.get, loc)
def test_delete_non_existing(self):
"""
Test that trying to delete a swift that doesn't exist
raises an error
"""
loc = get_location_from_uri("swift://%s:key@authurl/glance/noexist" % (
self.swift_store_user))
self.assertRaises(exceptions.NotFound, self.store.delete, loc)
def test_read_acl_public(self):
"""
Test that we can set a public read acl.
"""
self.config(swift_store_multi_tenant=True)
store = Store(self.conf)
store.configure()
uri = "swift+http://storeurl/glance/%s" % FAKE_UUID
loc = get_location_from_uri(uri)
ctxt = context.RequestContext()
store.set_acls(loc, public=True, context=ctxt)
container_headers = swiftclient.client.head_container('x', 'y',
'glance')
self.assertEqual(container_headers['X-Container-Read'],
".r:*,.rlistings")
def test_read_acl_tenants(self):
"""
Test that we can set read acl for tenants.
"""
self.config(swift_store_multi_tenant=True)
store = Store(self.conf)
store.configure()
uri = "swift+http://storeurl/glance/%s" % FAKE_UUID
loc = get_location_from_uri(uri)
read_tenants = ['matt', 'mark']
ctxt = context.RequestContext()
store.set_acls(loc, read_tenants=read_tenants, context=ctxt)
container_headers = swiftclient.client.head_container('x', 'y',
'glance')
self.assertEqual(container_headers['X-Container-Read'],
'matt:*,mark:*')
def test_write_acls(self):
"""
Test that we can set write acl for tenants.
"""
self.config(swift_store_multi_tenant=True)
store = Store(self.conf)
store.configure()
uri = "swift+http://storeurl/glance/%s" % FAKE_UUID
loc = get_location_from_uri(uri)
read_tenants = ['frank', 'jim']
ctxt = context.RequestContext()
store.set_acls(loc, write_tenants=read_tenants, context=ctxt)
container_headers = swiftclient.client.head_container('x', 'y',
'glance')
self.assertEqual(container_headers['X-Container-Write'],
'frank:*,jim:*')
class TestStoreAuthV1(base.StoreBaseTest, SwiftTests):
_CONF = cfg.CONF
def getConfig(self):
conf = SWIFT_CONF.copy()
conf['swift_store_auth_version'] = '1'
conf['swift_store_user'] = 'tenant:user1'
return conf
def setUp(self):
"""Establish a clean test environment"""
super(TestStoreAuthV1, self).setUp()
conf = self.getConfig()
conf_file = 'glance-swift.conf'
self.swift_config_file = self.copy_data_file(conf_file, self.test_dir)
conf.update({'swift_store_config_file': self.swift_config_file})
self.stubs = stubout.StubOutForTesting()
stub_out_swiftclient(self.stubs, conf['swift_store_auth_version'])
self.store = Store(self.conf)
self.config(**conf)
self.store.configure()
self.addCleanup(self.stubs.UnsetAll)
self.register_store_schemes(self.store)
swift.SWIFT_STORE_REF_PARAMS = sutils.SwiftParams().params
self.addCleanup(self.conf.reset)
class TestStoreAuthV2(TestStoreAuthV1):
def getConfig(self):
conf = super(TestStoreAuthV2, self).getConfig()
conf['swift_store_auth_version'] = '2'
conf['swift_store_user'] = 'tenant:user1'
return conf
def test_v2_with_no_tenant(self):
uri = "swift://failme:key@auth_address/glance/%s" % (FAKE_UUID)
loc = get_location_from_uri(uri)
self.assertRaises(exceptions.BadStoreUri,
self.store.get,
loc)
def test_v2_multi_tenant_location(self):
conf = self.getConfig()
conf['swift_store_multi_tenant'] = True
uri = "swift://auth_address/glance/%s" % (FAKE_UUID)
loc = get_location_from_uri(uri)
self.assertEqual('swift', loc.store_name)
class FakeConnection(object):
def __init__(self, authurl, user, key, retries=5, preauthurl=None,
preauthtoken=None, snet=False, starting_backoff=1,
tenant_name=None, os_options=None, auth_version="1",
insecure=False, ssl_compression=True):
if os_options is None:
os_options = {}
self.authurl = authurl
self.user = user
self.key = key
self.preauthurl = preauthurl
self.preauthtoken = preauthtoken
self.snet = snet
self.tenant_name = tenant_name
self.os_options = os_options
self.auth_version = auth_version
self.insecure = insecure
class TestSingleTenantStoreConnections(base.StoreBaseTest):
_CONF = cfg.CONF
def setUp(self):
super(TestSingleTenantStoreConnections, self).setUp()
moxfixture = self.useFixture(moxstubout.MoxStubout())
self.stubs = moxfixture.stubs
self.stubs.Set(swiftclient, 'Connection', FakeConnection)
self.store = swift.SingleTenantStore(self.conf)
self.store.configure()
specs = {'scheme': 'swift',
'auth_or_store_url': 'example.com/v2/',
'user': 'tenant:user1',
'key': 'key1',
'container': 'cont',
'obj': 'object'}
self.location = swift.StoreLocation(specs)
self.addCleanup(self.conf.reset)
def test_basic_connection(self):
connection = self.store.get_connection(self.location)
self.assertEqual(connection.authurl, 'https://example.com/v2/')
self.assertEqual(connection.auth_version, '2')
self.assertEqual(connection.user, 'user1')
self.assertEqual(connection.tenant_name, 'tenant')
self.assertFalse(connection.snet)
self.assertEqual(connection.key, 'key1')
self.assertIsNone(connection.preauthurl)
self.assertIsNone(connection.preauthtoken)
self.assertFalse(connection.insecure)
self.assertEqual(connection.os_options,
{'service_type': 'object-store',
'endpoint_type': 'publicURL'})
def test_connection_with_no_trailing_slash(self):
self.location.auth_or_store_url = 'example.com/v2'
connection = self.store.get_connection(self.location)
self.assertEqual(connection.authurl, 'https://example.com/v2/')
def test_connection_insecure(self):
self.config(swift_store_auth_insecure=True)
self.store.configure()
connection = self.store.get_connection(self.location)
self.assertTrue(connection.insecure)
def test_connection_with_auth_v1(self):
self.config(swift_store_auth_version='1')
self.store.configure()
self.location.user = 'auth_v1_user'
connection = self.store.get_connection(self.location)
self.assertEqual(connection.auth_version, '1')
self.assertEqual(connection.user, 'auth_v1_user')
self.assertIsNone(connection.tenant_name)
def test_connection_invalid_user(self):
self.store.configure()
self.location.user = 'invalid:format:user'
self.assertRaises(exceptions.BadStoreUri,
self.store.get_connection, self.location)
def test_connection_missing_user(self):
self.store.configure()
self.location.user = None
self.assertRaises(exceptions.BadStoreUri,
self.store.get_connection, self.location)
def test_connection_with_region(self):
self.config(swift_store_region='Sahara')
self.store.configure()
connection = self.store.get_connection(self.location)
self.assertEqual(connection.os_options,
{'region_name': 'Sahara',
'service_type': 'object-store',
'endpoint_type': 'publicURL'})
def test_connection_with_service_type(self):
self.config(swift_store_service_type='shoe-store')
self.store.configure()
connection = self.store.get_connection(self.location)
self.assertEqual(connection.os_options,
{'service_type': 'shoe-store',
'endpoint_type': 'publicURL'})
def test_connection_with_endpoint_type(self):
self.config(swift_store_endpoint_type='internalURL')
self.store.configure()
connection = self.store.get_connection(self.location)
self.assertEqual(connection.os_options,
{'service_type': 'object-store',
'endpoint_type': 'internalURL'})
def test_connection_with_snet(self):
self.config(swift_enable_snet=True)
self.store.configure()
connection = self.store.get_connection(self.location)
self.assertTrue(connection.snet)
def test_bad_location_uri(self):
self.store.configure()
self.location.uri = 'http://bad_uri://'
self.assertRaises(exceptions.BadStoreUri,
self.location.parse_uri,
self.location.uri)
def test_bad_location_uri_invalid_credentials(self):
self.store.configure()
self.location.uri = 'swift://bad_creds@uri/cont/obj'
self.assertRaises(exceptions.BadStoreUri,
self.location.parse_uri,
self.location.uri)
def test_bad_location_uri_invalid_object_path(self):
self.store.configure()
self.location.uri = 'swift://user:key@uri/cont'
self.assertRaises(exceptions.BadStoreUri,
self.location.parse_uri,
self.location.uri)
class TestMultiTenantStoreConnections(base.StoreBaseTest):
def setUp(self):
super(TestMultiTenantStoreConnections, self).setUp()
moxfixture = self.useFixture(moxstubout.MoxStubout())
self.stubs = moxfixture.stubs
self.stubs.Set(swiftclient, 'Connection', FakeConnection)
self.context = context.RequestContext(
user='tenant:user1', tenant='tenant', auth_token='0123')
self.store = swift.MultiTenantStore(self.conf)
specs = {'scheme': 'swift',
'auth_or_store_url': 'example.com',
'container': 'cont',
'obj': 'object'}
self.location = swift.StoreLocation(specs)
self.addCleanup(self.conf.reset)
def test_basic_connection(self):
self.store.configure()
connection = self.store.get_connection(self.location,
context=self.context)
self.assertIsNone(connection.authurl)
self.assertEqual(connection.auth_version, '2')
self.assertEqual(connection.user, 'tenant:user1')
self.assertEqual(connection.tenant_name, 'tenant')
self.assertIsNone(connection.key)
self.assertFalse(connection.snet)
self.assertEqual(connection.preauthurl, 'https://example.com')
self.assertEqual(connection.preauthtoken, '0123')
self.assertEqual(connection.os_options, {})
def test_connection_with_snet(self):
self.config(swift_enable_snet=True)
self.store.configure()
connection = self.store.get_connection(self.location,
context=self.context)
self.assertTrue(connection.snet)
class FakeGetEndpoint(object):
def __init__(self, response):
self.response = response
def __call__(self, service_catalog, service_type=None,
endpoint_region=None, endpoint_type=None):
self.service_type = service_type
self.endpoint_region = endpoint_region
self.endpoint_type = endpoint_type
return self.response
class TestCreatingLocations(base.StoreBaseTest):
_CONF = cfg.CONF
def setUp(self):
super(TestCreatingLocations, self).setUp()
moxfixture = self.useFixture(moxstubout.MoxStubout())
self.stubs = moxfixture.stubs
conf = copy.deepcopy(SWIFT_CONF)
self.store = Store(self.conf)
self.config(**conf)
reload(swift)
self.addCleanup(self.conf.reset)
def test_single_tenant_location(self):
conf = copy.deepcopy(SWIFT_CONF)
conf['swift_store_container'] = 'container'
conf_file = "glance-swift.conf"
self.swift_config_file = self.copy_data_file(conf_file, self.test_dir)
conf.update({'swift_store_config_file': self.swift_config_file})
conf['default_swift_reference'] = 'ref1'
self.config(**conf)
reload(swift)
store = swift.SingleTenantStore(self.conf)
store.configure()
location = store.create_location('image-id')
self.assertEqual(location.scheme, 'swift+https')
self.assertEqual(location.swift_url, 'https://example.com')
self.assertEqual(location.container, 'container')
self.assertEqual(location.obj, 'image-id')
self.assertEqual(location.user, 'tenant:user1')
self.assertEqual(location.key, 'key1')
def test_single_tenant_location_http(self):
conf_file = "glance-swift.conf"
test_dir = self.useFixture(fixtures.TempDir()).path
self.swift_config_file = self.copy_data_file(conf_file, test_dir)
self.config(swift_store_container='container',
default_swift_reference='ref2',
swift_store_config_file=self.swift_config_file)
swift.SWIFT_STORE_REF_PARAMS = sutils.SwiftParams().params
store = swift.SingleTenantStore(self.conf)
store.configure()
location = store.create_location('image-id')
self.assertEqual(location.scheme, 'swift+http')
self.assertEqual(location.swift_url, 'http://example.com')
def test_multi_tenant_location(self):
self.config(swift_store_container='container')
fake_get_endpoint = FakeGetEndpoint('https://some_endpoint')
self.stubs.Set(auth, 'get_endpoint', fake_get_endpoint)
ctxt = context.RequestContext(
user='user', tenant='tenant', auth_token='123',
service_catalog={})
store = swift.MultiTenantStore(self.conf)
store.configure()
location = store.create_location('image-id', context=ctxt)
self.assertEqual(location.scheme, 'swift+https')
self.assertEqual(location.swift_url, 'https://some_endpoint')
self.assertEqual(location.container, 'container_image-id')
self.assertEqual(location.obj, 'image-id')
self.assertIsNone(location.user)
self.assertIsNone(location.key)
self.assertEqual(fake_get_endpoint.service_type, 'object-store')
def test_multi_tenant_location_http(self):
fake_get_endpoint = FakeGetEndpoint('http://some_endpoint')
self.stubs.Set(auth, 'get_endpoint', fake_get_endpoint)
ctxt = context.RequestContext(
user='user', tenant='tenant', auth_token='123',
service_catalog={})
store = swift.MultiTenantStore(self.conf)
store.configure()
location = store.create_location('image-id', context=ctxt)
self.assertEqual(location.scheme, 'swift+http')
self.assertEqual(location.swift_url, 'http://some_endpoint')
def test_multi_tenant_location_with_region(self):
self.config(swift_store_region='WestCarolina')
fake_get_endpoint = FakeGetEndpoint('https://some_endpoint')
self.stubs.Set(auth, 'get_endpoint', fake_get_endpoint)
ctxt = context.RequestContext(
user='user', tenant='tenant', auth_token='123',
service_catalog={})
store = swift.MultiTenantStore(self.conf)
store.configure()
store._get_endpoint(ctxt)
self.assertEqual(fake_get_endpoint.endpoint_region, 'WestCarolina')
def test_multi_tenant_location_custom_service_type(self):
self.config(swift_store_service_type='toy-store')
fake_get_endpoint = FakeGetEndpoint('https://some_endpoint')
self.stubs.Set(auth, 'get_endpoint', fake_get_endpoint)
ctxt = context.RequestContext(
user='user', tenant='tenant', auth_token='123',
service_catalog={})
store = swift.MultiTenantStore(self.conf)
store.configure()
store._get_endpoint(ctxt)
self.assertEqual(fake_get_endpoint.service_type, 'toy-store')
def test_multi_tenant_location_custom_endpoint_type(self):
self.config(swift_store_endpoint_type='InternalURL')
fake_get_endpoint = FakeGetEndpoint('https://some_endpoint')
self.stubs.Set(auth, 'get_endpoint', fake_get_endpoint)
ctxt = context.RequestContext(
user='user', tenant='tenant', auth_token='123',
service_catalog={})
store = swift.MultiTenantStore(self.conf)
store.configure()
store._get_endpoint(ctxt)
self.assertEqual(fake_get_endpoint.endpoint_type, 'InternalURL')
class TestChunkReader(base.StoreBaseTest):
_CONF = cfg.CONF
def setUp(self):
super(TestChunkReader, self).setUp()
conf = copy.deepcopy(SWIFT_CONF)
store = Store(self.conf)
self.config(**conf)
def test_read_all_data(self):
"""
Replicate what goes on in the Swift driver with the
repeated creation of the ChunkReader object
"""
CHUNKSIZE = 100
checksum = hashlib.md5()
data_file = tempfile.NamedTemporaryFile()
data_file.write('*' * units.Ki)
data_file.flush()
infile = open(data_file.name, 'rb')
bytes_read = 0
while True:
cr = swift.ChunkReader(infile, checksum, CHUNKSIZE)
chunk = cr.read(CHUNKSIZE)
bytes_read += len(chunk)
if not chunk:
break
self.assertEqual(1024, bytes_read)
data_file.close()
| 40.272648 | 79 | 0.624165 |
import copy
import fixtures
import hashlib
import httplib
import mock
import tempfile
import uuid
from oslo.config import cfg
from oslotest import moxstubout
import six
import stubout
import swiftclient
from glance_store._drivers.swift import store as swift
from glance_store._drivers.swift import utils as sutils
from glance_store import backend
from glance_store import BackendException
from glance_store.common import auth
from glance_store import exceptions
from glance_store.location import get_location_from_uri
from glance_store.openstack.common import context
from glance_store.openstack.common import units
from glance_store.tests import base
CONF = cfg.CONF
FAKE_UUID = lambda: str(uuid.uuid4())
Store = swift.Store
FIVE_KB = 5 * units.Ki
FIVE_GB = 5 * units.Gi
MAX_SWIFT_OBJECT_SIZE = FIVE_GB
SWIFT_PUT_OBJECT_CALLS = 0
SWIFT_CONF = {'swift_store_auth_address': 'localhost:8080',
'swift_store_container': 'glance',
'swift_store_user': 'user',
'swift_store_key': 'key',
'swift_store_auth_address': 'localhost:8080',
'swift_store_container': 'glance',
'swift_store_retry_get_count': 1,
'default_swift_reference': 'ref1'
}
def stub_out_swiftclient(stubs, swift_store_auth_version):
fixture_containers = ['glance']
fixture_container_headers = {}
fixture_headers = {
'glance/%s' % FAKE_UUID: {
'content-length': FIVE_KB,
'etag': 'c2e5db72bd7fd153f53ede5da5a06de3'
}
}
fixture_objects = {'glance/%s' % FAKE_UUID:
six.StringIO("*" * FIVE_KB)}
def fake_head_container(url, token, container, **kwargs):
if container not in fixture_containers:
msg = "No container %s found" % container
raise swiftclient.ClientException(msg,
http_status=httplib.NOT_FOUND)
return fixture_container_headers
def fake_put_container(url, token, container, **kwargs):
fixture_containers.append(container)
def fake_post_container(url, token, container, headers, http_conn=None):
for key, value in six.iteritems(headers):
fixture_container_headers[key] = value
def fake_put_object(url, token, container, name, contents, **kwargs):
global SWIFT_PUT_OBJECT_CALLS
SWIFT_PUT_OBJECT_CALLS += 1
CHUNKSIZE = 64 * units.Ki
fixture_key = "%s/%s" % (container, name)
if fixture_key not in fixture_headers:
if kwargs.get('headers'):
etag = kwargs['headers']['ETag']
fixture_headers[fixture_key] = {'manifest': True,
'etag': etag}
return etag
if hasattr(contents, 'read'):
fixture_object = six.StringIO()
chunk = contents.read(CHUNKSIZE)
checksum = hashlib.md5()
while chunk:
fixture_object.write(chunk)
checksum.update(chunk)
chunk = contents.read(CHUNKSIZE)
etag = checksum.hexdigest()
else:
fixture_object = six.StringIO(contents)
etag = hashlib.md5(fixture_object.getvalue()).hexdigest()
read_len = fixture_object.len
if read_len > MAX_SWIFT_OBJECT_SIZE:
msg = ('Image size:%d exceeds Swift max:%d' %
(read_len, MAX_SWIFT_OBJECT_SIZE))
raise swiftclient.ClientException(
msg, http_status=httplib.REQUEST_ENTITY_TOO_LARGE)
fixture_objects[fixture_key] = fixture_object
fixture_headers[fixture_key] = {
'content-length': read_len,
'etag': etag}
return etag
else:
msg = ("Object PUT failed - Object with key %s already exists"
% fixture_key)
raise swiftclient.ClientException(msg,
http_status=httplib.CONFLICT)
def fake_get_object(url, token, container, name, **kwargs):
fixture_key = "%s/%s" % (container, name)
if fixture_key not in fixture_headers:
msg = "Object GET failed"
raise swiftclient.ClientException(msg,
http_status=httplib.NOT_FOUND)
byte_range = None
headers = kwargs.get('headers', dict())
if headers is not None:
headers = dict((k.lower(), v) for k, v in six.iteritems(headers))
if 'range' in headers:
byte_range = headers.get('range')
fixture = fixture_headers[fixture_key]
if 'manifest' in fixture:
chunk_keys = sorted([k for k in fixture_headers.keys()
if k.startswith(fixture_key) and
k != fixture_key])
result = six.StringIO()
for key in chunk_keys:
result.write(fixture_objects[key].getvalue())
else:
result = fixture_objects[fixture_key]
if byte_range is not None:
start = int(byte_range.split('=')[1].strip('-'))
result = six.StringIO(result.getvalue()[start:])
fixture_headers[fixture_key]['content-length'] = len(
result.getvalue())
return fixture_headers[fixture_key], result
def fake_head_object(url, token, container, name, **kwargs):
try:
fixture_key = "%s/%s" % (container, name)
return fixture_headers[fixture_key]
except KeyError:
msg = "Object HEAD failed - Object does not exist"
raise swiftclient.ClientException(msg,
http_status=httplib.NOT_FOUND)
def fake_delete_object(url, token, container, name, **kwargs):
fixture_key = "%s/%s" % (container, name)
if fixture_key not in fixture_headers:
msg = "Object DELETE failed - Object does not exist"
raise swiftclient.ClientException(msg,
http_status=httplib.NOT_FOUND)
else:
del fixture_headers[fixture_key]
del fixture_objects[fixture_key]
def fake_http_connection(*args, **kwargs):
return None
def fake_get_auth(url, user, key, snet, auth_version, **kwargs):
if url is None:
return None, None
if 'http' in url and '://' not in url:
raise ValueError('Invalid url %s' % url)
if swift_store_auth_version != auth_version:
msg = 'AUTHENTICATION failed (version mismatch)'
raise swiftclient.ClientException(msg)
return None, None
stubs.Set(swiftclient.client,
'head_container', fake_head_container)
stubs.Set(swiftclient.client,
'put_container', fake_put_container)
stubs.Set(swiftclient.client,
'post_container', fake_post_container)
stubs.Set(swiftclient.client,
'put_object', fake_put_object)
stubs.Set(swiftclient.client,
'delete_object', fake_delete_object)
stubs.Set(swiftclient.client,
'head_object', fake_head_object)
stubs.Set(swiftclient.client,
'get_object', fake_get_object)
stubs.Set(swiftclient.client,
'get_auth', fake_get_auth)
stubs.Set(swiftclient.client,
'http_connection', fake_http_connection)
class SwiftTests(object):
@property
def swift_store_user(self):
return 'tenant:user1'
def test_get_size(self):
uri = "swift://%s:key@auth_address/glance/%s" % (
self.swift_store_user, FAKE_UUID)
loc = get_location_from_uri(uri)
image_size = self.store.get_size(loc)
self.assertEqual(image_size, 5120)
def test_validate_location_for_invalid_uri(self):
uri = "swift+config://store_1/glance/%s"
self.assertRaises(exceptions.BadStoreUri,
self.store.validate_location,
uri)
def test_validate_location_for_valid_uri(self):
uri = "swift://user:key@auth_address/glance/%s"
try:
self.assertIsNone(self.store.validate_location(uri))
except Exception:
self.fail('Location uri validation failed')
def test_get_size_with_multi_tenant_on(self):
uri = ("swift://%s:key@auth_address/glance/%s" %
(self.swift_store_user, FAKE_UUID))
self.config(swift_store_multi_tenant=True)
size = backend.get_size_from_backend(uri, context={})
self.assertEqual(size, 5120)
def test_get(self):
uri = "swift://%s:key@auth_address/glance/%s" % (
self.swift_store_user, FAKE_UUID)
loc = get_location_from_uri(uri)
(image_swift, image_size) = self.store.get(loc)
self.assertEqual(image_size, 5120)
expected_data = "*" * FIVE_KB
data = ""
for chunk in image_swift:
data += chunk
self.assertEqual(expected_data, data)
def test_get_with_retry(self):
uri = "swift://%s:key@auth_address/glance/%s" % (
self.swift_store_user, FAKE_UUID)
loc = get_location_from_uri(uri)
ctxt = context.RequestContext()
(image_swift, image_size) = self.store.get(loc, context=ctxt)
resp_full = ''.join([chunk for chunk in image_swift.wrapped])
resp_half = resp_full[:len(resp_full) / 2]
image_swift.wrapped = swift.swift_retry_iter(resp_half, image_size,
self.store,
loc.store_location,
ctxt)
self.assertEqual(image_size, 5120)
expected_data = "*" * FIVE_KB
data = ""
for chunk in image_swift:
data += chunk
self.assertEqual(expected_data, data)
def test_get_with_http_auth(self):
loc = get_location_from_uri("swift+http://%s:key@auth_address/"
"glance/%s" %
(self.swift_store_user, FAKE_UUID))
ctxt = context.RequestContext()
(image_swift, image_size) = self.store.get(loc, context=ctxt)
self.assertEqual(image_size, 5120)
expected_data = "*" * FIVE_KB
data = ""
for chunk in image_swift:
data += chunk
self.assertEqual(expected_data, data)
def test_get_non_existing(self):
loc = get_location_from_uri("swift://%s:key@authurl/glance/noexist" % (
self.swift_store_user))
self.assertRaises(exceptions.NotFound,
self.store.get,
loc)
def test_add(self):
sutils.is_multiple_swift_store_accounts_enabled = \
mock.Mock(return_value=False)
reload(swift)
self.store = Store(self.conf)
self.store.configure()
expected_swift_size = FIVE_KB
expected_swift_contents = "*" * expected_swift_size
expected_checksum = hashlib.md5(expected_swift_contents).hexdigest()
expected_image_id = str(uuid.uuid4())
loc = "swift+https://tenant%%3Auser1:key@localhost:8080/glance/%s"
expected_location = loc % (expected_image_id)
image_swift = six.StringIO(expected_swift_contents)
global SWIFT_PUT_OBJECT_CALLS
SWIFT_PUT_OBJECT_CALLS = 0
location, size, checksum, _ = self.store.add(expected_image_id,
image_swift,
expected_swift_size)
self.assertEqual(expected_location, location)
self.assertEqual(expected_swift_size, size)
self.assertEqual(expected_checksum, checksum)
self.assertEqual(SWIFT_PUT_OBJECT_CALLS, 1)
loc = get_location_from_uri(expected_location)
(new_image_swift, new_image_size) = self.store.get(loc)
new_image_contents = ''.join([chunk for chunk in new_image_swift])
new_image_swift_size = len(new_image_swift)
self.assertEqual(expected_swift_contents, new_image_contents)
self.assertEqual(expected_swift_size, new_image_swift_size)
def test_add_multi_store(self):
conf = copy.deepcopy(SWIFT_CONF)
conf['default_swift_reference'] = 'store_2'
self.config(**conf)
reload(swift)
self.store = Store(self.conf)
self.store.configure()
expected_swift_size = FIVE_KB
expected_swift_contents = "*" * expected_swift_size
expected_image_id = str(uuid.uuid4())
image_swift = six.StringIO(expected_swift_contents)
global SWIFT_PUT_OBJECT_CALLS
SWIFT_PUT_OBJECT_CALLS = 0
loc = 'swift+config://store_2/glance/%s'
expected_location = loc % (expected_image_id)
location, size, checksum, arg = self.store.add(expected_image_id,
image_swift,
expected_swift_size)
self.assertEqual(expected_location, location)
def test_add_auth_url_variations(self):
sutils.is_multiple_swift_store_accounts_enabled = \
mock.Mock(return_value=True)
conf = copy.deepcopy(SWIFT_CONF)
self.config(**conf)
variations = {
'store_4': 'swift+config://store_4/glance/%s',
'store_5': 'swift+config://store_5/glance/%s',
'store_6': 'swift+config://store_6/glance/%s'
}
for variation, expected_location in variations.items():
image_id = str(uuid.uuid4())
expected_location = expected_location % image_id
expected_swift_size = FIVE_KB
expected_swift_contents = "*" * expected_swift_size
expected_checksum = \
hashlib.md5(expected_swift_contents).hexdigest()
image_swift = six.StringIO(expected_swift_contents)
global SWIFT_PUT_OBJECT_CALLS
SWIFT_PUT_OBJECT_CALLS = 0
conf['default_swift_reference'] = variation
self.config(**conf)
reload(swift)
self.store = Store(self.conf)
self.store.configure()
location, size, checksum, _ = self.store.add(image_id, image_swift,
expected_swift_size)
self.assertEqual(expected_location, location)
self.assertEqual(expected_swift_size, size)
self.assertEqual(expected_checksum, checksum)
self.assertEqual(SWIFT_PUT_OBJECT_CALLS, 1)
loc = get_location_from_uri(expected_location)
(new_image_swift, new_image_size) = self.store.get(loc)
new_image_contents = ''.join([chunk for chunk in new_image_swift])
new_image_swift_size = len(new_image_swift)
self.assertEqual(expected_swift_contents, new_image_contents)
self.assertEqual(expected_swift_size, new_image_swift_size)
def test_add_no_container_no_create(self):
conf = copy.deepcopy(SWIFT_CONF)
conf['swift_store_user'] = 'tenant:user'
conf['swift_store_create_container_on_put'] = False
conf['swift_store_container'] = 'noexist'
self.config(**conf)
reload(swift)
self.store = Store(self.conf)
self.store.configure()
image_swift = six.StringIO("nevergonnamakeit")
global SWIFT_PUT_OBJECT_CALLS
SWIFT_PUT_OBJECT_CALLS = 0
exception_caught = False
try:
self.store.add(str(uuid.uuid4()), image_swift, 0)
except BackendException as e:
exception_caught = True
self.assertIn("container noexist does not exist "
"in Swift", unicode(e))
self.assertTrue(exception_caught)
self.assertEqual(SWIFT_PUT_OBJECT_CALLS, 0)
def test_add_no_container_and_create(self):
sutils.is_multiple_swift_store_accounts_enabled = \
mock.Mock(return_value=True)
expected_swift_size = FIVE_KB
expected_swift_contents = "*" * expected_swift_size
expected_checksum = hashlib.md5(expected_swift_contents).hexdigest()
expected_image_id = str(uuid.uuid4())
loc = 'swift+config://ref1/noexist/%s'
expected_location = loc % (expected_image_id)
image_swift = six.StringIO(expected_swift_contents)
global SWIFT_PUT_OBJECT_CALLS
SWIFT_PUT_OBJECT_CALLS = 0
conf = copy.deepcopy(SWIFT_CONF)
conf['swift_store_user'] = 'tenant:user'
conf['swift_store_create_container_on_put'] = True
conf['swift_store_container'] = 'noexist'
self.config(**conf)
reload(swift)
self.store = Store(self.conf)
self.store.configure()
location, size, checksum, _ = self.store.add(expected_image_id,
image_swift,
expected_swift_size)
self.assertEqual(expected_location, location)
self.assertEqual(expected_swift_size, size)
self.assertEqual(expected_checksum, checksum)
self.assertEqual(SWIFT_PUT_OBJECT_CALLS, 1)
loc = get_location_from_uri(expected_location)
(new_image_swift, new_image_size) = self.store.get(loc)
new_image_contents = ''.join([chunk for chunk in new_image_swift])
new_image_swift_size = len(new_image_swift)
self.assertEqual(expected_swift_contents, new_image_contents)
self.assertEqual(expected_swift_size, new_image_swift_size)
def test_add_large_object(self):
sutils.is_multiple_swift_store_accounts_enabled = \
mock.Mock(return_value=True)
expected_swift_size = FIVE_KB
expected_swift_contents = "*" * expected_swift_size
expected_checksum = hashlib.md5(expected_swift_contents).hexdigest()
expected_image_id = str(uuid.uuid4())
loc = 'swift+config://ref1/glance/%s'
expected_location = loc % (expected_image_id)
image_swift = six.StringIO(expected_swift_contents)
global SWIFT_PUT_OBJECT_CALLS
SWIFT_PUT_OBJECT_CALLS = 0
self.store = Store(self.conf)
self.store.configure()
orig_max_size = self.store.large_object_size
orig_temp_size = self.store.large_object_chunk_size
try:
self.store.large_object_size = 1024
self.store.large_object_chunk_size = 1024
location, size, checksum, _ = self.store.add(expected_image_id,
image_swift,
expected_swift_size)
finally:
self.store.large_object_chunk_size = orig_temp_size
self.store.large_object_size = orig_max_size
self.assertEqual(expected_location, location)
self.assertEqual(expected_swift_size, size)
self.assertEqual(expected_checksum, checksum)
self.assertEqual(SWIFT_PUT_OBJECT_CALLS, 6)
loc = get_location_from_uri(expected_location)
(new_image_swift, new_image_size) = self.store.get(loc)
new_image_contents = ''.join([chunk for chunk in new_image_swift])
new_image_swift_size = len(new_image_contents)
self.assertEqual(expected_swift_contents, new_image_contents)
self.assertEqual(expected_swift_size, new_image_swift_size)
def test_add_large_object_zero_size(self):
expected_swift_size = FIVE_KB
expected_swift_contents = "*" * expected_swift_size
expected_checksum = hashlib.md5(expected_swift_contents).hexdigest()
expected_image_id = str(uuid.uuid4())
loc = 'swift+config://ref1/glance/%s'
expected_location = loc % (expected_image_id)
image_swift = six.StringIO(expected_swift_contents)
global SWIFT_PUT_OBJECT_CALLS
SWIFT_PUT_OBJECT_CALLS = 0
self.store = Store(self.conf)
self.store.configure()
orig_max_size = self.store.large_object_size
orig_temp_size = self.store.large_object_chunk_size
global MAX_SWIFT_OBJECT_SIZE
orig_max_swift_object_size = MAX_SWIFT_OBJECT_SIZE
try:
MAX_SWIFT_OBJECT_SIZE = 1024
self.store.large_object_size = 1024
self.store.large_object_chunk_size = 1024
location, size, checksum, _ = self.store.add(expected_image_id,
image_swift, 0)
finally:
self.store.large_object_chunk_size = orig_temp_size
self.store.large_object_size = orig_max_size
MAX_SWIFT_OBJECT_SIZE = orig_max_swift_object_size
self.assertEqual(expected_location, location)
self.assertEqual(expected_swift_size, size)
self.assertEqual(expected_checksum, checksum)
# in that case).
self.assertEqual(SWIFT_PUT_OBJECT_CALLS, 7)
loc = get_location_from_uri(expected_location)
(new_image_swift, new_image_size) = self.store.get(loc)
new_image_contents = ''.join([chunk for chunk in new_image_swift])
new_image_swift_size = len(new_image_contents)
self.assertEqual(expected_swift_contents, new_image_contents)
self.assertEqual(expected_swift_size, new_image_swift_size)
def test_add_already_existing(self):
image_swift = six.StringIO("nevergonnamakeit")
self.assertRaises(exceptions.Duplicate,
self.store.add,
FAKE_UUID, image_swift, 0)
def _option_required(self, key):
conf = self.getConfig()
conf[key] = None
try:
self.config(**conf)
self.store = Store(self.conf)
return self.store.add == self.store.add_disabled
except Exception:
return False
return False
def test_no_store_credentials(self):
swift.SWIFT_STORE_REF_PARAMS = {'ref1': {'auth_address':
'authurl.com', 'user': '',
'key': ''}}
self.store = Store(self.conf)
self.store.configure()
self.assertEqual(self.store.add, self.store.add_disabled)
def test_no_auth_address(self):
swift.SWIFT_STORE_REF_PARAMS = {'ref1': {'auth_address':
'', 'user': 'user1',
'key': 'key1'}}
self.store = Store(self.conf)
self.store.configure()
self.assertEqual(self.store.add, self.store.add_disabled)
def test_delete(self):
uri = "swift://%s:key@authurl/glance/%s" % (
self.swift_store_user, FAKE_UUID)
loc = get_location_from_uri(uri)
self.store.delete(loc)
self.assertRaises(exceptions.NotFound, self.store.get, loc)
def test_delete_with_reference_params(self):
uri = "swift+config://ref1/glance/%s" % (FAKE_UUID)
loc = get_location_from_uri(uri)
self.store.delete(loc)
self.assertRaises(exceptions.NotFound, self.store.get, loc)
def test_delete_non_existing(self):
loc = get_location_from_uri("swift://%s:key@authurl/glance/noexist" % (
self.swift_store_user))
self.assertRaises(exceptions.NotFound, self.store.delete, loc)
def test_read_acl_public(self):
self.config(swift_store_multi_tenant=True)
store = Store(self.conf)
store.configure()
uri = "swift+http://storeurl/glance/%s" % FAKE_UUID
loc = get_location_from_uri(uri)
ctxt = context.RequestContext()
store.set_acls(loc, public=True, context=ctxt)
container_headers = swiftclient.client.head_container('x', 'y',
'glance')
self.assertEqual(container_headers['X-Container-Read'],
".r:*,.rlistings")
def test_read_acl_tenants(self):
self.config(swift_store_multi_tenant=True)
store = Store(self.conf)
store.configure()
uri = "swift+http://storeurl/glance/%s" % FAKE_UUID
loc = get_location_from_uri(uri)
read_tenants = ['matt', 'mark']
ctxt = context.RequestContext()
store.set_acls(loc, read_tenants=read_tenants, context=ctxt)
container_headers = swiftclient.client.head_container('x', 'y',
'glance')
self.assertEqual(container_headers['X-Container-Read'],
'matt:*,mark:*')
def test_write_acls(self):
self.config(swift_store_multi_tenant=True)
store = Store(self.conf)
store.configure()
uri = "swift+http://storeurl/glance/%s" % FAKE_UUID
loc = get_location_from_uri(uri)
read_tenants = ['frank', 'jim']
ctxt = context.RequestContext()
store.set_acls(loc, write_tenants=read_tenants, context=ctxt)
container_headers = swiftclient.client.head_container('x', 'y',
'glance')
self.assertEqual(container_headers['X-Container-Write'],
'frank:*,jim:*')
class TestStoreAuthV1(base.StoreBaseTest, SwiftTests):
_CONF = cfg.CONF
def getConfig(self):
conf = SWIFT_CONF.copy()
conf['swift_store_auth_version'] = '1'
conf['swift_store_user'] = 'tenant:user1'
return conf
def setUp(self):
super(TestStoreAuthV1, self).setUp()
conf = self.getConfig()
conf_file = 'glance-swift.conf'
self.swift_config_file = self.copy_data_file(conf_file, self.test_dir)
conf.update({'swift_store_config_file': self.swift_config_file})
self.stubs = stubout.StubOutForTesting()
stub_out_swiftclient(self.stubs, conf['swift_store_auth_version'])
self.store = Store(self.conf)
self.config(**conf)
self.store.configure()
self.addCleanup(self.stubs.UnsetAll)
self.register_store_schemes(self.store)
swift.SWIFT_STORE_REF_PARAMS = sutils.SwiftParams().params
self.addCleanup(self.conf.reset)
class TestStoreAuthV2(TestStoreAuthV1):
def getConfig(self):
conf = super(TestStoreAuthV2, self).getConfig()
conf['swift_store_auth_version'] = '2'
conf['swift_store_user'] = 'tenant:user1'
return conf
def test_v2_with_no_tenant(self):
uri = "swift://failme:key@auth_address/glance/%s" % (FAKE_UUID)
loc = get_location_from_uri(uri)
self.assertRaises(exceptions.BadStoreUri,
self.store.get,
loc)
def test_v2_multi_tenant_location(self):
conf = self.getConfig()
conf['swift_store_multi_tenant'] = True
uri = "swift://auth_address/glance/%s" % (FAKE_UUID)
loc = get_location_from_uri(uri)
self.assertEqual('swift', loc.store_name)
class FakeConnection(object):
def __init__(self, authurl, user, key, retries=5, preauthurl=None,
preauthtoken=None, snet=False, starting_backoff=1,
tenant_name=None, os_options=None, auth_version="1",
insecure=False, ssl_compression=True):
if os_options is None:
os_options = {}
self.authurl = authurl
self.user = user
self.key = key
self.preauthurl = preauthurl
self.preauthtoken = preauthtoken
self.snet = snet
self.tenant_name = tenant_name
self.os_options = os_options
self.auth_version = auth_version
self.insecure = insecure
class TestSingleTenantStoreConnections(base.StoreBaseTest):
_CONF = cfg.CONF
def setUp(self):
super(TestSingleTenantStoreConnections, self).setUp()
moxfixture = self.useFixture(moxstubout.MoxStubout())
self.stubs = moxfixture.stubs
self.stubs.Set(swiftclient, 'Connection', FakeConnection)
self.store = swift.SingleTenantStore(self.conf)
self.store.configure()
specs = {'scheme': 'swift',
'auth_or_store_url': 'example.com/v2/',
'user': 'tenant:user1',
'key': 'key1',
'container': 'cont',
'obj': 'object'}
self.location = swift.StoreLocation(specs)
self.addCleanup(self.conf.reset)
def test_basic_connection(self):
connection = self.store.get_connection(self.location)
self.assertEqual(connection.authurl, 'https://example.com/v2/')
self.assertEqual(connection.auth_version, '2')
self.assertEqual(connection.user, 'user1')
self.assertEqual(connection.tenant_name, 'tenant')
self.assertFalse(connection.snet)
self.assertEqual(connection.key, 'key1')
self.assertIsNone(connection.preauthurl)
self.assertIsNone(connection.preauthtoken)
self.assertFalse(connection.insecure)
self.assertEqual(connection.os_options,
{'service_type': 'object-store',
'endpoint_type': 'publicURL'})
def test_connection_with_no_trailing_slash(self):
self.location.auth_or_store_url = 'example.com/v2'
connection = self.store.get_connection(self.location)
self.assertEqual(connection.authurl, 'https://example.com/v2/')
def test_connection_insecure(self):
self.config(swift_store_auth_insecure=True)
self.store.configure()
connection = self.store.get_connection(self.location)
self.assertTrue(connection.insecure)
def test_connection_with_auth_v1(self):
self.config(swift_store_auth_version='1')
self.store.configure()
self.location.user = 'auth_v1_user'
connection = self.store.get_connection(self.location)
self.assertEqual(connection.auth_version, '1')
self.assertEqual(connection.user, 'auth_v1_user')
self.assertIsNone(connection.tenant_name)
def test_connection_invalid_user(self):
self.store.configure()
self.location.user = 'invalid:format:user'
self.assertRaises(exceptions.BadStoreUri,
self.store.get_connection, self.location)
def test_connection_missing_user(self):
self.store.configure()
self.location.user = None
self.assertRaises(exceptions.BadStoreUri,
self.store.get_connection, self.location)
def test_connection_with_region(self):
self.config(swift_store_region='Sahara')
self.store.configure()
connection = self.store.get_connection(self.location)
self.assertEqual(connection.os_options,
{'region_name': 'Sahara',
'service_type': 'object-store',
'endpoint_type': 'publicURL'})
def test_connection_with_service_type(self):
self.config(swift_store_service_type='shoe-store')
self.store.configure()
connection = self.store.get_connection(self.location)
self.assertEqual(connection.os_options,
{'service_type': 'shoe-store',
'endpoint_type': 'publicURL'})
def test_connection_with_endpoint_type(self):
self.config(swift_store_endpoint_type='internalURL')
self.store.configure()
connection = self.store.get_connection(self.location)
self.assertEqual(connection.os_options,
{'service_type': 'object-store',
'endpoint_type': 'internalURL'})
def test_connection_with_snet(self):
self.config(swift_enable_snet=True)
self.store.configure()
connection = self.store.get_connection(self.location)
self.assertTrue(connection.snet)
def test_bad_location_uri(self):
self.store.configure()
self.location.uri = 'http://bad_uri://'
self.assertRaises(exceptions.BadStoreUri,
self.location.parse_uri,
self.location.uri)
def test_bad_location_uri_invalid_credentials(self):
self.store.configure()
self.location.uri = 'swift://bad_creds@uri/cont/obj'
self.assertRaises(exceptions.BadStoreUri,
self.location.parse_uri,
self.location.uri)
def test_bad_location_uri_invalid_object_path(self):
self.store.configure()
self.location.uri = 'swift://user:key@uri/cont'
self.assertRaises(exceptions.BadStoreUri,
self.location.parse_uri,
self.location.uri)
class TestMultiTenantStoreConnections(base.StoreBaseTest):
def setUp(self):
super(TestMultiTenantStoreConnections, self).setUp()
moxfixture = self.useFixture(moxstubout.MoxStubout())
self.stubs = moxfixture.stubs
self.stubs.Set(swiftclient, 'Connection', FakeConnection)
self.context = context.RequestContext(
user='tenant:user1', tenant='tenant', auth_token='0123')
self.store = swift.MultiTenantStore(self.conf)
specs = {'scheme': 'swift',
'auth_or_store_url': 'example.com',
'container': 'cont',
'obj': 'object'}
self.location = swift.StoreLocation(specs)
self.addCleanup(self.conf.reset)
def test_basic_connection(self):
self.store.configure()
connection = self.store.get_connection(self.location,
context=self.context)
self.assertIsNone(connection.authurl)
self.assertEqual(connection.auth_version, '2')
self.assertEqual(connection.user, 'tenant:user1')
self.assertEqual(connection.tenant_name, 'tenant')
self.assertIsNone(connection.key)
self.assertFalse(connection.snet)
self.assertEqual(connection.preauthurl, 'https://example.com')
self.assertEqual(connection.preauthtoken, '0123')
self.assertEqual(connection.os_options, {})
def test_connection_with_snet(self):
self.config(swift_enable_snet=True)
self.store.configure()
connection = self.store.get_connection(self.location,
context=self.context)
self.assertTrue(connection.snet)
class FakeGetEndpoint(object):
def __init__(self, response):
self.response = response
def __call__(self, service_catalog, service_type=None,
endpoint_region=None, endpoint_type=None):
self.service_type = service_type
self.endpoint_region = endpoint_region
self.endpoint_type = endpoint_type
return self.response
class TestCreatingLocations(base.StoreBaseTest):
_CONF = cfg.CONF
def setUp(self):
super(TestCreatingLocations, self).setUp()
moxfixture = self.useFixture(moxstubout.MoxStubout())
self.stubs = moxfixture.stubs
conf = copy.deepcopy(SWIFT_CONF)
self.store = Store(self.conf)
self.config(**conf)
reload(swift)
self.addCleanup(self.conf.reset)
def test_single_tenant_location(self):
conf = copy.deepcopy(SWIFT_CONF)
conf['swift_store_container'] = 'container'
conf_file = "glance-swift.conf"
self.swift_config_file = self.copy_data_file(conf_file, self.test_dir)
conf.update({'swift_store_config_file': self.swift_config_file})
conf['default_swift_reference'] = 'ref1'
self.config(**conf)
reload(swift)
store = swift.SingleTenantStore(self.conf)
store.configure()
location = store.create_location('image-id')
self.assertEqual(location.scheme, 'swift+https')
self.assertEqual(location.swift_url, 'https://example.com')
self.assertEqual(location.container, 'container')
self.assertEqual(location.obj, 'image-id')
self.assertEqual(location.user, 'tenant:user1')
self.assertEqual(location.key, 'key1')
def test_single_tenant_location_http(self):
conf_file = "glance-swift.conf"
test_dir = self.useFixture(fixtures.TempDir()).path
self.swift_config_file = self.copy_data_file(conf_file, test_dir)
self.config(swift_store_container='container',
default_swift_reference='ref2',
swift_store_config_file=self.swift_config_file)
swift.SWIFT_STORE_REF_PARAMS = sutils.SwiftParams().params
store = swift.SingleTenantStore(self.conf)
store.configure()
location = store.create_location('image-id')
self.assertEqual(location.scheme, 'swift+http')
self.assertEqual(location.swift_url, 'http://example.com')
def test_multi_tenant_location(self):
self.config(swift_store_container='container')
fake_get_endpoint = FakeGetEndpoint('https://some_endpoint')
self.stubs.Set(auth, 'get_endpoint', fake_get_endpoint)
ctxt = context.RequestContext(
user='user', tenant='tenant', auth_token='123',
service_catalog={})
store = swift.MultiTenantStore(self.conf)
store.configure()
location = store.create_location('image-id', context=ctxt)
self.assertEqual(location.scheme, 'swift+https')
self.assertEqual(location.swift_url, 'https://some_endpoint')
self.assertEqual(location.container, 'container_image-id')
self.assertEqual(location.obj, 'image-id')
self.assertIsNone(location.user)
self.assertIsNone(location.key)
self.assertEqual(fake_get_endpoint.service_type, 'object-store')
def test_multi_tenant_location_http(self):
fake_get_endpoint = FakeGetEndpoint('http://some_endpoint')
self.stubs.Set(auth, 'get_endpoint', fake_get_endpoint)
ctxt = context.RequestContext(
user='user', tenant='tenant', auth_token='123',
service_catalog={})
store = swift.MultiTenantStore(self.conf)
store.configure()
location = store.create_location('image-id', context=ctxt)
self.assertEqual(location.scheme, 'swift+http')
self.assertEqual(location.swift_url, 'http://some_endpoint')
def test_multi_tenant_location_with_region(self):
self.config(swift_store_region='WestCarolina')
fake_get_endpoint = FakeGetEndpoint('https://some_endpoint')
self.stubs.Set(auth, 'get_endpoint', fake_get_endpoint)
ctxt = context.RequestContext(
user='user', tenant='tenant', auth_token='123',
service_catalog={})
store = swift.MultiTenantStore(self.conf)
store.configure()
store._get_endpoint(ctxt)
self.assertEqual(fake_get_endpoint.endpoint_region, 'WestCarolina')
def test_multi_tenant_location_custom_service_type(self):
self.config(swift_store_service_type='toy-store')
fake_get_endpoint = FakeGetEndpoint('https://some_endpoint')
self.stubs.Set(auth, 'get_endpoint', fake_get_endpoint)
ctxt = context.RequestContext(
user='user', tenant='tenant', auth_token='123',
service_catalog={})
store = swift.MultiTenantStore(self.conf)
store.configure()
store._get_endpoint(ctxt)
self.assertEqual(fake_get_endpoint.service_type, 'toy-store')
def test_multi_tenant_location_custom_endpoint_type(self):
self.config(swift_store_endpoint_type='InternalURL')
fake_get_endpoint = FakeGetEndpoint('https://some_endpoint')
self.stubs.Set(auth, 'get_endpoint', fake_get_endpoint)
ctxt = context.RequestContext(
user='user', tenant='tenant', auth_token='123',
service_catalog={})
store = swift.MultiTenantStore(self.conf)
store.configure()
store._get_endpoint(ctxt)
self.assertEqual(fake_get_endpoint.endpoint_type, 'InternalURL')
class TestChunkReader(base.StoreBaseTest):
_CONF = cfg.CONF
def setUp(self):
super(TestChunkReader, self).setUp()
conf = copy.deepcopy(SWIFT_CONF)
store = Store(self.conf)
self.config(**conf)
def test_read_all_data(self):
CHUNKSIZE = 100
checksum = hashlib.md5()
data_file = tempfile.NamedTemporaryFile()
data_file.write('*' * units.Ki)
data_file.flush()
infile = open(data_file.name, 'rb')
bytes_read = 0
while True:
cr = swift.ChunkReader(infile, checksum, CHUNKSIZE)
chunk = cr.read(CHUNKSIZE)
bytes_read += len(chunk)
if not chunk:
break
self.assertEqual(1024, bytes_read)
data_file.close()
| true | true |
f7115afd92f601168a3271828bcd6583a3f27954 | 8,006 | py | Python | magiclink/views.py | lmccartney/django-magiclink | 62b37ee8ed07fd41b259501fc0aba8deaec4bc5f | [
"MIT"
] | 34 | 2020-08-16T05:47:13.000Z | 2022-03-01T18:19:06.000Z | magiclink/views.py | lmccartney/django-magiclink | 62b37ee8ed07fd41b259501fc0aba8deaec4bc5f | [
"MIT"
] | 11 | 2021-01-04T23:51:50.000Z | 2021-09-19T14:21:44.000Z | magiclink/views.py | lmccartney/django-magiclink | 62b37ee8ed07fd41b259501fc0aba8deaec4bc5f | [
"MIT"
] | 5 | 2021-03-19T04:01:23.000Z | 2022-03-01T14:20:21.000Z | import logging
from django.conf import settings as django_settings
from django.contrib.auth import authenticate, get_user_model, login, logout
from django.http import Http404, HttpResponse, HttpResponseRedirect
from django.utils.decorators import method_decorator
from django.views.decorators.cache import never_cache
from django.views.generic import TemplateView
from django.views.generic.base import RedirectView
try:
from django.utils.http import url_has_allowed_host_and_scheme as safe_url
except ImportError: # pragma: no cover
from django.utils.http import is_safe_url as safe_url
from django.views.decorators.csrf import csrf_protect
from . import settings
from .forms import (
LoginForm, SignupForm, SignupFormEmailOnly, SignupFormFull,
SignupFormWithUsername
)
from .helpers import create_magiclink, get_or_create_user
from .models import MagicLink, MagicLinkError
from .utils import get_url_path
User = get_user_model()
log = logging.getLogger(__name__)
@method_decorator(csrf_protect, name='dispatch')
class Login(TemplateView):
template_name = settings.LOGIN_TEMPLATE_NAME
def get(self, request, *args, **kwargs):
context = self.get_context_data(**kwargs)
context['login_form'] = LoginForm()
context['require_signup'] = settings.REQUIRE_SIGNUP
return self.render_to_response(context)
def post(self, request, *args, **kwargs):
logout(request)
context = self.get_context_data(**kwargs)
context['require_signup'] = settings.REQUIRE_SIGNUP
form = LoginForm(request.POST)
if not form.is_valid():
context['login_form'] = form
return self.render_to_response(context)
email = form.cleaned_data['email']
if not settings.REQUIRE_SIGNUP:
get_or_create_user(email)
redirect_url = self.login_redirect_url(request.GET.get('next', ''))
try:
magiclink = create_magiclink(
email, request, redirect_url=redirect_url
)
except MagicLinkError as e:
form.add_error('email', str(e))
context['login_form'] = form
return self.render_to_response(context)
magiclink.send(request)
sent_url = get_url_path(settings.LOGIN_SENT_REDIRECT)
response = HttpResponseRedirect(sent_url)
if settings.REQUIRE_SAME_BROWSER:
cookie_name = f'magiclink{magiclink.pk}'
response.set_cookie(cookie_name, magiclink.cookie_value)
log.info(f'Cookie {cookie_name} set for {email}')
return response
def login_redirect_url(self, next_url) -> str:
redirect_url = ''
allowed_hosts = django_settings.ALLOWED_HOSTS
if '*' in allowed_hosts:
allowed_hosts = [self.request.get_host()]
url_is_safe = safe_url(
url=next_url,
allowed_hosts=allowed_hosts,
require_https=self.request.is_secure(),
)
if url_is_safe:
redirect_url = next_url
return redirect_url
class LoginSent(TemplateView):
template_name = settings.LOGIN_SENT_TEMPLATE_NAME
@method_decorator(never_cache, name='dispatch')
class LoginVerify(TemplateView):
template_name = settings.LOGIN_FAILED_TEMPLATE_NAME
def get(self, request, *args, **kwargs):
token = request.GET.get('token')
email = request.GET.get('email')
user = authenticate(request, token=token, email=email)
if not user:
if settings.LOGIN_FAILED_REDIRECT:
redirect_url = get_url_path(settings.LOGIN_FAILED_REDIRECT)
return HttpResponseRedirect(redirect_url)
if not settings.LOGIN_FAILED_TEMPLATE_NAME:
raise Http404()
context = self.get_context_data(**kwargs)
# The below settings are left in for backward compatibility
context['ONE_TOKEN_PER_USER'] = settings.ONE_TOKEN_PER_USER
context['REQUIRE_SAME_BROWSER'] = settings.REQUIRE_SAME_BROWSER
context['REQUIRE_SAME_IP'] = settings.REQUIRE_SAME_IP
context['ALLOW_SUPERUSER_LOGIN'] = settings.ALLOW_SUPERUSER_LOGIN # NOQA: E501
context['ALLOW_STAFF_LOGIN'] = settings.ALLOW_STAFF_LOGIN
try:
magiclink = MagicLink.objects.get(token=token)
except MagicLink.DoesNotExist:
error = 'A magic link with that token could not be found'
context['login_error'] = error
return self.render_to_response(context)
try:
magiclink.validate(request, email)
except MagicLinkError as error:
context['login_error'] = str(error)
return self.render_to_response(context)
login(request, user)
log.info(f'Login successful for {email}')
response = self.login_complete_action()
if settings.REQUIRE_SAME_BROWSER:
magiclink = MagicLink.objects.get(token=token)
cookie_name = f'magiclink{magiclink.pk}'
response.delete_cookie(cookie_name, magiclink.cookie_value)
return response
def login_complete_action(self) -> HttpResponse:
token = self.request.GET.get('token')
magiclink = MagicLink.objects.get(token=token)
return HttpResponseRedirect(magiclink.redirect_url)
@method_decorator(csrf_protect, name='dispatch')
class Signup(TemplateView):
template_name = settings.SIGNUP_TEMPLATE_NAME
def get(self, request, *args, **kwargs):
context = self.get_context_data(**kwargs)
context['SignupForm'] = SignupForm()
context['SignupFormEmailOnly'] = SignupFormEmailOnly()
context['SignupFormWithUsername'] = SignupFormWithUsername()
context['SignupFormFull'] = SignupFormFull()
return self.render_to_response(context)
def post(self, request, *args, **kwargs):
logout(request)
context = self.get_context_data(**kwargs)
form_name = request.POST.get('form_name')
from_list = [
'SignupForm, SignupFormEmailOnly', 'SignupFormWithUsername',
'SignupFormFull',
]
forms = __import__('magiclink.forms', fromlist=from_list)
try:
SignupForm = getattr(forms, form_name)
except AttributeError:
return HttpResponseRedirect(self.request.path_info)
form = SignupForm(request.POST)
if not form.is_valid():
context[form_name] = form
return self.render_to_response(context)
email = form.cleaned_data['email']
full_name = form.cleaned_data.get('name', '')
try:
first_name, last_name = full_name.split(' ', 1)
except ValueError:
first_name = full_name
last_name = ''
get_or_create_user(
email=email,
username=form.cleaned_data.get('username', ''),
first_name=first_name,
last_name=last_name
)
default_signup_redirect = get_url_path(settings.SIGNUP_LOGIN_REDIRECT)
next_url = request.GET.get('next', default_signup_redirect)
magiclink = create_magiclink(email, request, redirect_url=next_url)
magiclink.send(request)
sent_url = get_url_path(settings.LOGIN_SENT_REDIRECT)
response = HttpResponseRedirect(sent_url)
if settings.REQUIRE_SAME_BROWSER:
cookie_name = f'magiclink{magiclink.pk}'
response.set_cookie(cookie_name, magiclink.cookie_value)
log.info(f'Cookie {cookie_name} set for {email}')
return response
class Logout(RedirectView):
def get(self, request, *args, **kwargs):
logout(self.request)
next_page = request.GET.get('next')
if next_page:
return HttpResponseRedirect(next_page)
redirect_url = get_url_path(django_settings.LOGOUT_REDIRECT_URL)
return HttpResponseRedirect(redirect_url)
| 36.894009 | 91 | 0.66725 | import logging
from django.conf import settings as django_settings
from django.contrib.auth import authenticate, get_user_model, login, logout
from django.http import Http404, HttpResponse, HttpResponseRedirect
from django.utils.decorators import method_decorator
from django.views.decorators.cache import never_cache
from django.views.generic import TemplateView
from django.views.generic.base import RedirectView
try:
from django.utils.http import url_has_allowed_host_and_scheme as safe_url
except ImportError:
from django.utils.http import is_safe_url as safe_url
from django.views.decorators.csrf import csrf_protect
from . import settings
from .forms import (
LoginForm, SignupForm, SignupFormEmailOnly, SignupFormFull,
SignupFormWithUsername
)
from .helpers import create_magiclink, get_or_create_user
from .models import MagicLink, MagicLinkError
from .utils import get_url_path
User = get_user_model()
log = logging.getLogger(__name__)
@method_decorator(csrf_protect, name='dispatch')
class Login(TemplateView):
template_name = settings.LOGIN_TEMPLATE_NAME
def get(self, request, *args, **kwargs):
context = self.get_context_data(**kwargs)
context['login_form'] = LoginForm()
context['require_signup'] = settings.REQUIRE_SIGNUP
return self.render_to_response(context)
def post(self, request, *args, **kwargs):
logout(request)
context = self.get_context_data(**kwargs)
context['require_signup'] = settings.REQUIRE_SIGNUP
form = LoginForm(request.POST)
if not form.is_valid():
context['login_form'] = form
return self.render_to_response(context)
email = form.cleaned_data['email']
if not settings.REQUIRE_SIGNUP:
get_or_create_user(email)
redirect_url = self.login_redirect_url(request.GET.get('next', ''))
try:
magiclink = create_magiclink(
email, request, redirect_url=redirect_url
)
except MagicLinkError as e:
form.add_error('email', str(e))
context['login_form'] = form
return self.render_to_response(context)
magiclink.send(request)
sent_url = get_url_path(settings.LOGIN_SENT_REDIRECT)
response = HttpResponseRedirect(sent_url)
if settings.REQUIRE_SAME_BROWSER:
cookie_name = f'magiclink{magiclink.pk}'
response.set_cookie(cookie_name, magiclink.cookie_value)
log.info(f'Cookie {cookie_name} set for {email}')
return response
def login_redirect_url(self, next_url) -> str:
redirect_url = ''
allowed_hosts = django_settings.ALLOWED_HOSTS
if '*' in allowed_hosts:
allowed_hosts = [self.request.get_host()]
url_is_safe = safe_url(
url=next_url,
allowed_hosts=allowed_hosts,
require_https=self.request.is_secure(),
)
if url_is_safe:
redirect_url = next_url
return redirect_url
class LoginSent(TemplateView):
template_name = settings.LOGIN_SENT_TEMPLATE_NAME
@method_decorator(never_cache, name='dispatch')
class LoginVerify(TemplateView):
template_name = settings.LOGIN_FAILED_TEMPLATE_NAME
def get(self, request, *args, **kwargs):
token = request.GET.get('token')
email = request.GET.get('email')
user = authenticate(request, token=token, email=email)
if not user:
if settings.LOGIN_FAILED_REDIRECT:
redirect_url = get_url_path(settings.LOGIN_FAILED_REDIRECT)
return HttpResponseRedirect(redirect_url)
if not settings.LOGIN_FAILED_TEMPLATE_NAME:
raise Http404()
context = self.get_context_data(**kwargs)
context['ONE_TOKEN_PER_USER'] = settings.ONE_TOKEN_PER_USER
context['REQUIRE_SAME_BROWSER'] = settings.REQUIRE_SAME_BROWSER
context['REQUIRE_SAME_IP'] = settings.REQUIRE_SAME_IP
context['ALLOW_SUPERUSER_LOGIN'] = settings.ALLOW_SUPERUSER_LOGIN
context['ALLOW_STAFF_LOGIN'] = settings.ALLOW_STAFF_LOGIN
try:
magiclink = MagicLink.objects.get(token=token)
except MagicLink.DoesNotExist:
error = 'A magic link with that token could not be found'
context['login_error'] = error
return self.render_to_response(context)
try:
magiclink.validate(request, email)
except MagicLinkError as error:
context['login_error'] = str(error)
return self.render_to_response(context)
login(request, user)
log.info(f'Login successful for {email}')
response = self.login_complete_action()
if settings.REQUIRE_SAME_BROWSER:
magiclink = MagicLink.objects.get(token=token)
cookie_name = f'magiclink{magiclink.pk}'
response.delete_cookie(cookie_name, magiclink.cookie_value)
return response
def login_complete_action(self) -> HttpResponse:
token = self.request.GET.get('token')
magiclink = MagicLink.objects.get(token=token)
return HttpResponseRedirect(magiclink.redirect_url)
@method_decorator(csrf_protect, name='dispatch')
class Signup(TemplateView):
template_name = settings.SIGNUP_TEMPLATE_NAME
def get(self, request, *args, **kwargs):
context = self.get_context_data(**kwargs)
context['SignupForm'] = SignupForm()
context['SignupFormEmailOnly'] = SignupFormEmailOnly()
context['SignupFormWithUsername'] = SignupFormWithUsername()
context['SignupFormFull'] = SignupFormFull()
return self.render_to_response(context)
def post(self, request, *args, **kwargs):
logout(request)
context = self.get_context_data(**kwargs)
form_name = request.POST.get('form_name')
from_list = [
'SignupForm, SignupFormEmailOnly', 'SignupFormWithUsername',
'SignupFormFull',
]
forms = __import__('magiclink.forms', fromlist=from_list)
try:
SignupForm = getattr(forms, form_name)
except AttributeError:
return HttpResponseRedirect(self.request.path_info)
form = SignupForm(request.POST)
if not form.is_valid():
context[form_name] = form
return self.render_to_response(context)
email = form.cleaned_data['email']
full_name = form.cleaned_data.get('name', '')
try:
first_name, last_name = full_name.split(' ', 1)
except ValueError:
first_name = full_name
last_name = ''
get_or_create_user(
email=email,
username=form.cleaned_data.get('username', ''),
first_name=first_name,
last_name=last_name
)
default_signup_redirect = get_url_path(settings.SIGNUP_LOGIN_REDIRECT)
next_url = request.GET.get('next', default_signup_redirect)
magiclink = create_magiclink(email, request, redirect_url=next_url)
magiclink.send(request)
sent_url = get_url_path(settings.LOGIN_SENT_REDIRECT)
response = HttpResponseRedirect(sent_url)
if settings.REQUIRE_SAME_BROWSER:
cookie_name = f'magiclink{magiclink.pk}'
response.set_cookie(cookie_name, magiclink.cookie_value)
log.info(f'Cookie {cookie_name} set for {email}')
return response
class Logout(RedirectView):
def get(self, request, *args, **kwargs):
logout(self.request)
next_page = request.GET.get('next')
if next_page:
return HttpResponseRedirect(next_page)
redirect_url = get_url_path(django_settings.LOGOUT_REDIRECT_URL)
return HttpResponseRedirect(redirect_url)
| true | true |
f7115b5e8ed27896b0e81e52df3d7ff166edfb54 | 23,257 | py | Python | archiv/urls.py | acdh-oeaw/4dpuzzle | 7856bbd82c7dfa8da1d5f1ad40593219a35b3cfe | [
"MIT"
] | null | null | null | archiv/urls.py | acdh-oeaw/4dpuzzle | 7856bbd82c7dfa8da1d5f1ad40593219a35b3cfe | [
"MIT"
] | 6 | 2020-06-05T18:32:02.000Z | 2022-02-10T07:22:24.000Z | archiv/urls.py | acdh-oeaw/4dpuzzle | 7856bbd82c7dfa8da1d5f1ad40593219a35b3cfe | [
"MIT"
] | 1 | 2020-06-30T13:52:41.000Z | 2020-06-30T13:52:41.000Z | # generated by appcreator
from django.conf.urls import url
from . import views
from . import stats_views
app_name = 'archiv'
urlpatterns = [
url(
r'^match-binary/$',
stats_views.MatchBinaryView.as_view(),
name='match-binary'
),
url(
r'^actor/$',
views.ActorListView.as_view(),
name='actor_browse'
),
url(
r'^actor/detail/(?P<pk>[0-9]+)$',
views.ActorDetailView.as_view(),
name='actor_detail'
),
url(
r'^actor/create/$',
views.ActorCreate.as_view(),
name='actor_create'
),
url(
r'^actor/edit/(?P<pk>[0-9]+)$',
views.ActorUpdate.as_view(),
name='actor_edit'
),
url(
r'^actor/delete/(?P<pk>[0-9]+)$',
views.ActorDelete.as_view(),
name='actor_delete'),
url(
r'^archaeologicalobject4dpuzzleid/$',
views.ArchaeologicalObject4DPuzzleIDListView.as_view(),
name='archaeologicalobject4dpuzzleid_browse'
),
url(
r'^archaeologicalobject4dpuzzleid/detail/(?P<pk>[0-9]+)$',
views.ArchaeologicalObject4DPuzzleIDDetailView.as_view(),
name='archaeologicalobject4dpuzzleid_detail'
),
url(
r'^archaeologicalobject4dpuzzleid/create/$',
views.ArchaeologicalObject4DPuzzleIDCreate.as_view(),
name='archaeologicalobject4dpuzzleid_create'
),
url(
r'^archaeologicalobject4dpuzzleid/edit/(?P<pk>[0-9]+)$',
views.ArchaeologicalObject4DPuzzleIDUpdate.as_view(),
name='archaeologicalobject4dpuzzleid_edit'
),
url(
r'^archaeologicalobject4dpuzzleid/delete/(?P<pk>[0-9]+)$',
views.ArchaeologicalObject4DPuzzleIDDelete.as_view(),
name='archaeologicalobject4dpuzzleid_delete'),
url(
r'^archaeologicalobjectid/$',
views.ArchaeologicalObjectIDListView.as_view(),
name='archaeologicalobjectid_browse'
),
url(
r'^archaeologicalobjectid/detail/(?P<pk>[0-9]+)$',
views.ArchaeologicalObjectIDDetailView.as_view(),
name='archaeologicalobjectid_detail'
),
url(
r'^archaeologicalobjectid/create/$',
views.ArchaeologicalObjectIDCreate.as_view(),
name='archaeologicalobjectid_create'
),
url(
r'^archaeologicalobjectid/edit/(?P<pk>[0-9]+)$',
views.ArchaeologicalObjectIDUpdate.as_view(),
name='archaeologicalobjectid_edit'
),
url(
r'^archaeologicalobjectid/delete/(?P<pk>[0-9]+)$',
views.ArchaeologicalObjectIDDelete.as_view(),
name='archaeologicalobjectid_delete'),
url(
r'^archiveinf/$',
views.ArchiveINFListView.as_view(),
name='archiveinf_browse'
),
url(
r'^archiveinf/detail/(?P<pk>[0-9]+)$',
views.ArchiveINFDetailView.as_view(),
name='archiveinf_detail'
),
url(
r'^archiveinf/create/$',
views.ArchiveINFCreate.as_view(),
name='archiveinf_create'
),
url(
r'^archiveinf/edit/(?P<pk>[0-9]+)$',
views.ArchiveINFUpdate.as_view(),
name='archiveinf_edit'
),
url(
r'^archiveinf/delete/(?P<pk>[0-9]+)$',
views.ArchiveINFDelete.as_view(),
name='archiveinf_delete'),
url(
r'^autocad/$',
views.AutoCADListView.as_view(),
name='autocad_browse'
),
url(
r'^autocad/detail/(?P<pk>[0-9]+)$',
views.AutoCADDetailView.as_view(),
name='autocad_detail'
),
url(
r'^autocad/create/$',
views.AutoCADCreate.as_view(),
name='autocad_create'
),
url(
r'^autocad/edit/(?P<pk>[0-9]+)$',
views.AutoCADUpdate.as_view(),
name='autocad_edit'
),
url(
r'^autocad/delete/(?P<pk>[0-9]+)$',
views.AutoCADDelete.as_view(),
name='autocad_delete'),
url(
r'^convolutecards/$',
views.ConvolutecardsListView.as_view(),
name='convolutecards_browse'
),
url(
r'^convolutecards/detail/(?P<pk>[0-9]+)$',
views.ConvolutecardsDetailView.as_view(),
name='convolutecards_detail'
),
url(
r'^convolutecards/create/$',
views.ConvolutecardsCreate.as_view(),
name='convolutecards_create'
),
url(
r'^convolutecards/edit/(?P<pk>[0-9]+)$',
views.ConvolutecardsUpdate.as_view(),
name='convolutecards_edit'
),
url(
r'^convolutecards/delete/(?P<pk>[0-9]+)$',
views.ConvolutecardsDelete.as_view(),
name='convolutecards_delete'),
url(
r'^datenbase/$',
views.DatenbaseListView.as_view(),
name='datenbase_browse'
),
url(
r'^datenbase/detail/(?P<pk>[0-9]+)$',
views.DatenbaseDetailView.as_view(),
name='datenbase_detail'
),
url(
r'^datenbase/create/$',
views.DatenbaseCreate.as_view(),
name='datenbase_create'
),
url(
r'^datenbase/edit/(?P<pk>[0-9]+)$',
views.DatenbaseUpdate.as_view(),
name='datenbase_edit'
),
url(
r'^datenbase/delete/(?P<pk>[0-9]+)$',
views.DatenbaseDelete.as_view(),
name='datenbase_delete'),
url(
r'^document4dpuzzleid/$',
views.Document4DPuzzleIDListView.as_view(),
name='document4dpuzzleid_browse'
),
url(
r'^document4dpuzzleid/detail/(?P<pk>[0-9]+)$',
views.Document4DPuzzleIDDetailView.as_view(),
name='document4dpuzzleid_detail'
),
url(
r'^document4dpuzzleid/create/$',
views.Document4DPuzzleIDCreate.as_view(),
name='document4dpuzzleid_create'
),
url(
r'^document4dpuzzleid/edit/(?P<pk>[0-9]+)$',
views.Document4DPuzzleIDUpdate.as_view(),
name='document4dpuzzleid_edit'
),
url(
r'^document4dpuzzleid/delete/(?P<pk>[0-9]+)$',
views.Document4DPuzzleIDDelete.as_view(),
name='document4dpuzzleid_delete'),
url(
r'^documenttypes/$',
views.DocumentTypesListView.as_view(),
name='documenttypes_browse'
),
url(
r'^documenttypes/detail/(?P<pk>[0-9]+)$',
views.DocumentTypesDetailView.as_view(),
name='documenttypes_detail'
),
url(
r'^documenttypes/create/$',
views.DocumentTypesCreate.as_view(),
name='documenttypes_create'
),
url(
r'^documenttypes/edit/(?P<pk>[0-9]+)$',
views.DocumentTypesUpdate.as_view(),
name='documenttypes_edit'
),
url(
r'^documenttypes/delete/(?P<pk>[0-9]+)$',
views.DocumentTypesDelete.as_view(),
name='documenttypes_delete'),
url(
r'^excavationobjectid/$',
views.ExcavationObjectIDListView.as_view(),
name='excavationobjectid_browse'
),
url(
r'^excavationobjectid/detail/(?P<pk>[0-9]+)$',
views.ExcavationObjectIDDetailView.as_view(),
name='excavationobjectid_detail'
),
url(
r'^excavationobjectid/create/$',
views.ExcavationObjectIDCreate.as_view(),
name='excavationobjectid_create'
),
url(
r'^excavationobjectid/edit/(?P<pk>[0-9]+)$',
views.ExcavationObjectIDUpdate.as_view(),
name='excavationobjectid_edit'
),
url(
r'^excavationobjectid/delete/(?P<pk>[0-9]+)$',
views.ExcavationObjectIDDelete.as_view(),
name='excavationobjectid_delete'),
url(
r'^excavationseasons/$',
views.ExcavationSeasonsListView.as_view(),
name='excavationseasons_browse'
),
url(
r'^excavationseasons/detail/(?P<pk>[0-9]+)$',
views.ExcavationSeasonsDetailView.as_view(),
name='excavationseasons_detail'
),
url(
r'^excavationseasons/create/$',
views.ExcavationSeasonsCreate.as_view(),
name='excavationseasons_create'
),
url(
r'^excavationseasons/edit/(?P<pk>[0-9]+)$',
views.ExcavationSeasonsUpdate.as_view(),
name='excavationseasons_edit'
),
url(
r'^excavationseasons/delete/(?P<pk>[0-9]+)$',
views.ExcavationSeasonsDelete.as_view(),
name='excavationseasons_delete'),
url(
r'^fielddrawing/$',
views.FielddrawingListView.as_view(),
name='fielddrawing_browse'
),
url(
r'^fielddrawing/detail/(?P<pk>[0-9]+)$',
views.FielddrawingDetailView.as_view(),
name='fielddrawing_detail'
),
url(
r'^fielddrawing/create/$',
views.FielddrawingCreate.as_view(),
name='fielddrawing_create'
),
url(
r'^fielddrawing/edit/(?P<pk>[0-9]+)$',
views.FielddrawingUpdate.as_view(),
name='fielddrawing_edit'
),
url(
r'^fielddrawing/delete/(?P<pk>[0-9]+)$',
views.FielddrawingDelete.as_view(),
name='fielddrawing_delete'),
url(
r'^film/$',
views.FilmListView.as_view(),
name='film_browse'
),
url(
r'^film/detail/(?P<pk>[0-9]+)$',
views.FilmDetailView.as_view(),
name='film_detail'
),
url(
r'^film/create/$',
views.FilmCreate.as_view(),
name='film_create'
),
url(
r'^film/edit/(?P<pk>[0-9]+)$',
views.FilmUpdate.as_view(),
name='film_edit'
),
url(
r'^film/delete/(?P<pk>[0-9]+)$',
views.FilmDelete.as_view(),
name='film_delete'),
url(
r'^finddrawing/$',
views.FinddrawingListView.as_view(),
name='finddrawing_browse'
),
url(
r'^finddrawing/detail/(?P<pk>[0-9]+)$',
views.FinddrawingDetailView.as_view(),
name='finddrawing_detail'
),
url(
r'^finddrawing/create/$',
views.FinddrawingCreate.as_view(),
name='finddrawing_create'
),
url(
r'^finddrawing/edit/(?P<pk>[0-9]+)$',
views.FinddrawingUpdate.as_view(),
name='finddrawing_edit'
),
url(
r'^finddrawing/delete/(?P<pk>[0-9]+)$',
views.FinddrawingDelete.as_view(),
name='finddrawing_delete'),
url(
r'^findsheets/$',
views.FindsheetsListView.as_view(),
name='findsheets_browse'
),
url(
r'^findsheets/detail/(?P<pk>[0-9]+)$',
views.FindsheetsDetailView.as_view(),
name='findsheets_detail'
),
url(
r'^findsheets/create/$',
views.FindsheetsCreate.as_view(),
name='findsheets_create'
),
url(
r'^findsheets/edit/(?P<pk>[0-9]+)$',
views.FindsheetsUpdate.as_view(),
name='findsheets_edit'
),
url(
r'^findsheets/delete/(?P<pk>[0-9]+)$',
views.FindsheetsDelete.as_view(),
name='findsheets_delete'),
url(
r'^fotoborndigital/$',
views.FotoborndigitalListView.as_view(),
name='fotoborndigital_browse'
),
url(
r'^fotoborndigital/detail/(?P<pk>[0-9]+)$',
views.FotoborndigitalDetailView.as_view(),
name='fotoborndigital_detail'
),
url(
r'^fotoborndigital/create/$',
views.FotoborndigitalCreate.as_view(),
name='fotoborndigital_create'
),
url(
r'^fotoborndigital/edit/(?P<pk>[0-9]+)$',
views.FotoborndigitalUpdate.as_view(),
name='fotoborndigital_edit'
),
url(
r'^fotoborndigital/delete/(?P<pk>[0-9]+)$',
views.FotoborndigitalDelete.as_view(),
name='fotoborndigital_delete'),
url(
r'^fotosgescannt/$',
views.FotosgescanntListView.as_view(),
name='fotosgescannt_browse'
),
url(
r'^fotosgescannt/detail/(?P<pk>[0-9]+)$',
views.FotosgescanntDetailView.as_view(),
name='fotosgescannt_detail'
),
url(
r'^fotosgescannt/create/$',
views.FotosgescanntCreate.as_view(),
name='fotosgescannt_create'
),
url(
r'^fotosgescannt/edit/(?P<pk>[0-9]+)$',
views.FotosgescanntUpdate.as_view(),
name='fotosgescannt_edit'
),
url(
r'^fotosgescannt/delete/(?P<pk>[0-9]+)$',
views.FotosgescanntDelete.as_view(),
name='fotosgescannt_delete'),
url(
r'^fundinventar4dpuzzleid/$',
views.Fundinventar4DPuzzleIDListView.as_view(),
name='fundinventar4dpuzzleid_browse'
),
url(
r'^fundinventar4dpuzzleid/detail/(?P<pk>[0-9]+)$',
views.Fundinventar4DPuzzleIDDetailView.as_view(),
name='fundinventar4dpuzzleid_detail'
),
url(
r'^fundinventar4dpuzzleid/create/$',
views.Fundinventar4DPuzzleIDCreate.as_view(),
name='fundinventar4dpuzzleid_create'
),
url(
r'^fundinventar4dpuzzleid/edit/(?P<pk>[0-9]+)$',
views.Fundinventar4DPuzzleIDUpdate.as_view(),
name='fundinventar4dpuzzleid_edit'
),
url(
r'^fundinventar4dpuzzleid/delete/(?P<pk>[0-9]+)$',
views.Fundinventar4DPuzzleIDDelete.as_view(),
name='fundinventar4dpuzzleid_delete'),
url(
r'^fundinventarinventarnummern/$',
views.FundinventarInventarnummernListView.as_view(),
name='fundinventarinventarnummern_browse'
),
url(
r'^fundinventarinventarnummern/detail/(?P<pk>[0-9]+)$',
views.FundinventarInventarnummernDetailView.as_view(),
name='fundinventarinventarnummern_detail'
),
url(
r'^fundinventarinventarnummern/create/$',
views.FundinventarInventarnummernCreate.as_view(),
name='fundinventarinventarnummern_create'
),
url(
r'^fundinventarinventarnummern/edit/(?P<pk>[0-9]+)$',
views.FundinventarInventarnummernUpdate.as_view(),
name='fundinventarinventarnummern_edit'
),
url(
r'^fundinventarinventarnummern/delete/(?P<pk>[0-9]+)$',
views.FundinventarInventarnummernDelete.as_view(),
name='fundinventarinventarnummern_delete'),
url(
r'^fundinventarkonvolutnummern/$',
views.FundinventarKonvolutnummernListView.as_view(),
name='fundinventarkonvolutnummern_browse'
),
url(
r'^fundinventarkonvolutnummern/detail/(?P<pk>[0-9]+)$',
views.FundinventarKonvolutnummernDetailView.as_view(),
name='fundinventarkonvolutnummern_detail'
),
url(
r'^fundinventarkonvolutnummern/create/$',
views.FundinventarKonvolutnummernCreate.as_view(),
name='fundinventarkonvolutnummern_create'
),
url(
r'^fundinventarkonvolutnummern/edit/(?P<pk>[0-9]+)$',
views.FundinventarKonvolutnummernUpdate.as_view(),
name='fundinventarkonvolutnummern_edit'
),
url(
r'^fundinventarkonvolutnummern/delete/(?P<pk>[0-9]+)$',
views.FundinventarKonvolutnummernDelete.as_view(),
name='fundinventarkonvolutnummern_delete'),
url(
r'^fundinventarmaterialproben/$',
views.FundinventarMaterialprobenListView.as_view(),
name='fundinventarmaterialproben_browse'
),
url(
r'^fundinventarmaterialproben/detail/(?P<pk>[0-9]+)$',
views.FundinventarMaterialprobenDetailView.as_view(),
name='fundinventarmaterialproben_detail'
),
url(
r'^fundinventarmaterialproben/create/$',
views.FundinventarMaterialprobenCreate.as_view(),
name='fundinventarmaterialproben_create'
),
url(
r'^fundinventarmaterialproben/edit/(?P<pk>[0-9]+)$',
views.FundinventarMaterialprobenUpdate.as_view(),
name='fundinventarmaterialproben_edit'
),
url(
r'^fundinventarmaterialproben/delete/(?P<pk>[0-9]+)$',
views.FundinventarMaterialprobenDelete.as_view(),
name='fundinventarmaterialproben_delete'),
url(
r'^fundinventarsteininventar/$',
views.FundinventarSteininventarListView.as_view(),
name='fundinventarsteininventar_browse'
),
url(
r'^fundinventarsteininventar/detail/(?P<pk>[0-9]+)$',
views.FundinventarSteininventarDetailView.as_view(),
name='fundinventarsteininventar_detail'
),
url(
r'^fundinventarsteininventar/create/$',
views.FundinventarSteininventarCreate.as_view(),
name='fundinventarsteininventar_create'
),
url(
r'^fundinventarsteininventar/edit/(?P<pk>[0-9]+)$',
views.FundinventarSteininventarUpdate.as_view(),
name='fundinventarsteininventar_edit'
),
url(
r'^fundinventarsteininventar/delete/(?P<pk>[0-9]+)$',
views.FundinventarSteininventarDelete.as_view(),
name='fundinventarsteininventar_delete'),
url(
r'^gis/$',
views.GISListView.as_view(),
name='gis_browse'
),
url(
r'^gis/detail/(?P<pk>[0-9]+)$',
views.GISDetailView.as_view(),
name='gis_detail'
),
url(
r'^gis/create/$',
views.GISCreate.as_view(),
name='gis_create'
),
url(
r'^gis/edit/(?P<pk>[0-9]+)$',
views.GISUpdate.as_view(),
name='gis_edit'
),
url(
r'^gis/delete/(?P<pk>[0-9]+)$',
views.GISDelete.as_view(),
name='gis_delete'),
url(
r'^geophysics/$',
views.GeophysicsListView.as_view(),
name='geophysics_browse'
),
url(
r'^geophysics/detail/(?P<pk>[0-9]+)$',
views.GeophysicsDetailView.as_view(),
name='geophysics_detail'
),
url(
r'^geophysics/create/$',
views.GeophysicsCreate.as_view(),
name='geophysics_create'
),
url(
r'^geophysics/edit/(?P<pk>[0-9]+)$',
views.GeophysicsUpdate.as_view(),
name='geophysics_edit'
),
url(
r'^geophysics/delete/(?P<pk>[0-9]+)$',
views.GeophysicsDelete.as_view(),
name='geophysics_delete'),
url(
r'^inventorybooks/$',
views.InventorybooksListView.as_view(),
name='inventorybooks_browse'
),
url(
r'^inventorybooks/detail/(?P<pk>[0-9]+)$',
views.InventorybooksDetailView.as_view(),
name='inventorybooks_detail'
),
url(
r'^inventorybooks/create/$',
views.InventorybooksCreate.as_view(),
name='inventorybooks_create'
),
url(
r'^inventorybooks/edit/(?P<pk>[0-9]+)$',
views.InventorybooksUpdate.as_view(),
name='inventorybooks_edit'
),
url(
r'^inventorybooks/delete/(?P<pk>[0-9]+)$',
views.InventorybooksDelete.as_view(),
name='inventorybooks_delete'),
url(
r'^phasenid/$',
views.PhasenIDListView.as_view(),
name='phasenid_browse'
),
url(
r'^phasenid/detail/(?P<pk>[0-9]+)$',
views.PhasenIDDetailView.as_view(),
name='phasenid_detail'
),
url(
r'^phasenid/create/$',
views.PhasenIDCreate.as_view(),
name='phasenid_create'
),
url(
r'^phasenid/edit/(?P<pk>[0-9]+)$',
views.PhasenIDUpdate.as_view(),
name='phasenid_edit'
),
url(
r'^phasenid/delete/(?P<pk>[0-9]+)$',
views.PhasenIDDelete.as_view(),
name='phasenid_delete'),
url(
r'^protocols/$',
views.ProtocolsListView.as_view(),
name='protocols_browse'
),
url(
r'^protocols/detail/(?P<pk>[0-9]+)$',
views.ProtocolsDetailView.as_view(),
name='protocols_detail'
),
url(
r'^protocols/create/$',
views.ProtocolsCreate.as_view(),
name='protocols_create'
),
url(
r'^protocols/edit/(?P<pk>[0-9]+)$',
views.ProtocolsUpdate.as_view(),
name='protocols_edit'
),
url(
r'^protocols/delete/(?P<pk>[0-9]+)$',
views.ProtocolsDelete.as_view(),
name='protocols_delete'),
url(
r'^stratenid/$',
views.StratenIDListView.as_view(),
name='stratenid_browse'
),
url(
r'^stratenid/detail/(?P<pk>[0-9]+)$',
views.StratenIDDetailView.as_view(),
name='stratenid_detail'
),
url(
r'^stratenid/create/$',
views.StratenIDCreate.as_view(),
name='stratenid_create'
),
url(
r'^stratenid/edit/(?P<pk>[0-9]+)$',
views.StratenIDUpdate.as_view(),
name='stratenid_edit'
),
url(
r'^stratenid/delete/(?P<pk>[0-9]+)$',
views.StratenIDDelete.as_view(),
name='stratenid_delete'),
url(
r'^tables/$',
views.TablesListView.as_view(),
name='tables_browse'
),
url(
r'^tables/detail/(?P<pk>[0-9]+)$',
views.TablesDetailView.as_view(),
name='tables_detail'
),
url(
r'^tables/create/$',
views.TablesCreate.as_view(),
name='tables_create'
),
url(
r'^tables/edit/(?P<pk>[0-9]+)$',
views.TablesUpdate.as_view(),
name='tables_edit'
),
url(
r'^tables/delete/(?P<pk>[0-9]+)$',
views.TablesDelete.as_view(),
name='tables_delete'),
url(
r'^threedimensionalmodel/$',
views.ThreeDimensionalModelListView.as_view(),
name='threedimensionalmodel_browse'
),
url(
r'^threedimensionalmodel/detail/(?P<pk>[0-9]+)$',
views.ThreeDimensionalModelDetailView.as_view(),
name='threedimensionalmodel_detail'
),
url(
r'^threedimensionalmodel/create/$',
views.ThreeDimensionalModelCreate.as_view(),
name='threedimensionalmodel_create'
),
url(
r'^threedimensionalmodel/edit/(?P<pk>[0-9]+)$',
views.ThreeDimensionalModelUpdate.as_view(),
name='threedimensionalmodel_edit'
),
url(
r'^threedimensionalmodel/delete/(?P<pk>[0-9]+)$',
views.ThreeDimensionalModelDelete.as_view(),
name='threedimensionalmodel_delete'),
url(
r'^videos/$',
views.VideosListView.as_view(),
name='videos_browse'
),
url(
r'^videos/detail/(?P<pk>[0-9]+)$',
views.VideosDetailView.as_view(),
name='videos_detail'
),
url(
r'^videos/create/$',
views.VideosCreate.as_view(),
name='videos_create'
),
url(
r'^videos/edit/(?P<pk>[0-9]+)$',
views.VideosUpdate.as_view(),
name='videos_edit'
),
url(
r'^videos/delete/(?P<pk>[0-9]+)$',
views.VideosDelete.as_view(),
name='videos_delete'),
url(
r'^wallpaintinginventory/$',
views.WallpaintingInventoryListView.as_view(),
name='wallpaintinginventory_browse'
),
url(
r'^wallpaintinginventory/detail/(?P<pk>[0-9]+)$',
views.WallpaintingInventoryDetailView.as_view(),
name='wallpaintinginventory_detail'
),
url(
r'^wallpaintinginventory/create/$',
views.WallpaintingInventoryCreate.as_view(),
name='wallpaintinginventory_create'
),
url(
r'^wallpaintinginventory/edit/(?P<pk>[0-9]+)$',
views.WallpaintingInventoryUpdate.as_view(),
name='wallpaintinginventory_edit'
),
url(
r'^wallpaintinginventory/delete/(?P<pk>[0-9]+)$',
views.WallpaintingInventoryDelete.as_view(),
name='wallpaintinginventory_delete'),
]
| 29.740409 | 66 | 0.588812 |
from django.conf.urls import url
from . import views
from . import stats_views
app_name = 'archiv'
urlpatterns = [
url(
r'^match-binary/$',
stats_views.MatchBinaryView.as_view(),
name='match-binary'
),
url(
r'^actor/$',
views.ActorListView.as_view(),
name='actor_browse'
),
url(
r'^actor/detail/(?P<pk>[0-9]+)$',
views.ActorDetailView.as_view(),
name='actor_detail'
),
url(
r'^actor/create/$',
views.ActorCreate.as_view(),
name='actor_create'
),
url(
r'^actor/edit/(?P<pk>[0-9]+)$',
views.ActorUpdate.as_view(),
name='actor_edit'
),
url(
r'^actor/delete/(?P<pk>[0-9]+)$',
views.ActorDelete.as_view(),
name='actor_delete'),
url(
r'^archaeologicalobject4dpuzzleid/$',
views.ArchaeologicalObject4DPuzzleIDListView.as_view(),
name='archaeologicalobject4dpuzzleid_browse'
),
url(
r'^archaeologicalobject4dpuzzleid/detail/(?P<pk>[0-9]+)$',
views.ArchaeologicalObject4DPuzzleIDDetailView.as_view(),
name='archaeologicalobject4dpuzzleid_detail'
),
url(
r'^archaeologicalobject4dpuzzleid/create/$',
views.ArchaeologicalObject4DPuzzleIDCreate.as_view(),
name='archaeologicalobject4dpuzzleid_create'
),
url(
r'^archaeologicalobject4dpuzzleid/edit/(?P<pk>[0-9]+)$',
views.ArchaeologicalObject4DPuzzleIDUpdate.as_view(),
name='archaeologicalobject4dpuzzleid_edit'
),
url(
r'^archaeologicalobject4dpuzzleid/delete/(?P<pk>[0-9]+)$',
views.ArchaeologicalObject4DPuzzleIDDelete.as_view(),
name='archaeologicalobject4dpuzzleid_delete'),
url(
r'^archaeologicalobjectid/$',
views.ArchaeologicalObjectIDListView.as_view(),
name='archaeologicalobjectid_browse'
),
url(
r'^archaeologicalobjectid/detail/(?P<pk>[0-9]+)$',
views.ArchaeologicalObjectIDDetailView.as_view(),
name='archaeologicalobjectid_detail'
),
url(
r'^archaeologicalobjectid/create/$',
views.ArchaeologicalObjectIDCreate.as_view(),
name='archaeologicalobjectid_create'
),
url(
r'^archaeologicalobjectid/edit/(?P<pk>[0-9]+)$',
views.ArchaeologicalObjectIDUpdate.as_view(),
name='archaeologicalobjectid_edit'
),
url(
r'^archaeologicalobjectid/delete/(?P<pk>[0-9]+)$',
views.ArchaeologicalObjectIDDelete.as_view(),
name='archaeologicalobjectid_delete'),
url(
r'^archiveinf/$',
views.ArchiveINFListView.as_view(),
name='archiveinf_browse'
),
url(
r'^archiveinf/detail/(?P<pk>[0-9]+)$',
views.ArchiveINFDetailView.as_view(),
name='archiveinf_detail'
),
url(
r'^archiveinf/create/$',
views.ArchiveINFCreate.as_view(),
name='archiveinf_create'
),
url(
r'^archiveinf/edit/(?P<pk>[0-9]+)$',
views.ArchiveINFUpdate.as_view(),
name='archiveinf_edit'
),
url(
r'^archiveinf/delete/(?P<pk>[0-9]+)$',
views.ArchiveINFDelete.as_view(),
name='archiveinf_delete'),
url(
r'^autocad/$',
views.AutoCADListView.as_view(),
name='autocad_browse'
),
url(
r'^autocad/detail/(?P<pk>[0-9]+)$',
views.AutoCADDetailView.as_view(),
name='autocad_detail'
),
url(
r'^autocad/create/$',
views.AutoCADCreate.as_view(),
name='autocad_create'
),
url(
r'^autocad/edit/(?P<pk>[0-9]+)$',
views.AutoCADUpdate.as_view(),
name='autocad_edit'
),
url(
r'^autocad/delete/(?P<pk>[0-9]+)$',
views.AutoCADDelete.as_view(),
name='autocad_delete'),
url(
r'^convolutecards/$',
views.ConvolutecardsListView.as_view(),
name='convolutecards_browse'
),
url(
r'^convolutecards/detail/(?P<pk>[0-9]+)$',
views.ConvolutecardsDetailView.as_view(),
name='convolutecards_detail'
),
url(
r'^convolutecards/create/$',
views.ConvolutecardsCreate.as_view(),
name='convolutecards_create'
),
url(
r'^convolutecards/edit/(?P<pk>[0-9]+)$',
views.ConvolutecardsUpdate.as_view(),
name='convolutecards_edit'
),
url(
r'^convolutecards/delete/(?P<pk>[0-9]+)$',
views.ConvolutecardsDelete.as_view(),
name='convolutecards_delete'),
url(
r'^datenbase/$',
views.DatenbaseListView.as_view(),
name='datenbase_browse'
),
url(
r'^datenbase/detail/(?P<pk>[0-9]+)$',
views.DatenbaseDetailView.as_view(),
name='datenbase_detail'
),
url(
r'^datenbase/create/$',
views.DatenbaseCreate.as_view(),
name='datenbase_create'
),
url(
r'^datenbase/edit/(?P<pk>[0-9]+)$',
views.DatenbaseUpdate.as_view(),
name='datenbase_edit'
),
url(
r'^datenbase/delete/(?P<pk>[0-9]+)$',
views.DatenbaseDelete.as_view(),
name='datenbase_delete'),
url(
r'^document4dpuzzleid/$',
views.Document4DPuzzleIDListView.as_view(),
name='document4dpuzzleid_browse'
),
url(
r'^document4dpuzzleid/detail/(?P<pk>[0-9]+)$',
views.Document4DPuzzleIDDetailView.as_view(),
name='document4dpuzzleid_detail'
),
url(
r'^document4dpuzzleid/create/$',
views.Document4DPuzzleIDCreate.as_view(),
name='document4dpuzzleid_create'
),
url(
r'^document4dpuzzleid/edit/(?P<pk>[0-9]+)$',
views.Document4DPuzzleIDUpdate.as_view(),
name='document4dpuzzleid_edit'
),
url(
r'^document4dpuzzleid/delete/(?P<pk>[0-9]+)$',
views.Document4DPuzzleIDDelete.as_view(),
name='document4dpuzzleid_delete'),
url(
r'^documenttypes/$',
views.DocumentTypesListView.as_view(),
name='documenttypes_browse'
),
url(
r'^documenttypes/detail/(?P<pk>[0-9]+)$',
views.DocumentTypesDetailView.as_view(),
name='documenttypes_detail'
),
url(
r'^documenttypes/create/$',
views.DocumentTypesCreate.as_view(),
name='documenttypes_create'
),
url(
r'^documenttypes/edit/(?P<pk>[0-9]+)$',
views.DocumentTypesUpdate.as_view(),
name='documenttypes_edit'
),
url(
r'^documenttypes/delete/(?P<pk>[0-9]+)$',
views.DocumentTypesDelete.as_view(),
name='documenttypes_delete'),
url(
r'^excavationobjectid/$',
views.ExcavationObjectIDListView.as_view(),
name='excavationobjectid_browse'
),
url(
r'^excavationobjectid/detail/(?P<pk>[0-9]+)$',
views.ExcavationObjectIDDetailView.as_view(),
name='excavationobjectid_detail'
),
url(
r'^excavationobjectid/create/$',
views.ExcavationObjectIDCreate.as_view(),
name='excavationobjectid_create'
),
url(
r'^excavationobjectid/edit/(?P<pk>[0-9]+)$',
views.ExcavationObjectIDUpdate.as_view(),
name='excavationobjectid_edit'
),
url(
r'^excavationobjectid/delete/(?P<pk>[0-9]+)$',
views.ExcavationObjectIDDelete.as_view(),
name='excavationobjectid_delete'),
url(
r'^excavationseasons/$',
views.ExcavationSeasonsListView.as_view(),
name='excavationseasons_browse'
),
url(
r'^excavationseasons/detail/(?P<pk>[0-9]+)$',
views.ExcavationSeasonsDetailView.as_view(),
name='excavationseasons_detail'
),
url(
r'^excavationseasons/create/$',
views.ExcavationSeasonsCreate.as_view(),
name='excavationseasons_create'
),
url(
r'^excavationseasons/edit/(?P<pk>[0-9]+)$',
views.ExcavationSeasonsUpdate.as_view(),
name='excavationseasons_edit'
),
url(
r'^excavationseasons/delete/(?P<pk>[0-9]+)$',
views.ExcavationSeasonsDelete.as_view(),
name='excavationseasons_delete'),
url(
r'^fielddrawing/$',
views.FielddrawingListView.as_view(),
name='fielddrawing_browse'
),
url(
r'^fielddrawing/detail/(?P<pk>[0-9]+)$',
views.FielddrawingDetailView.as_view(),
name='fielddrawing_detail'
),
url(
r'^fielddrawing/create/$',
views.FielddrawingCreate.as_view(),
name='fielddrawing_create'
),
url(
r'^fielddrawing/edit/(?P<pk>[0-9]+)$',
views.FielddrawingUpdate.as_view(),
name='fielddrawing_edit'
),
url(
r'^fielddrawing/delete/(?P<pk>[0-9]+)$',
views.FielddrawingDelete.as_view(),
name='fielddrawing_delete'),
url(
r'^film/$',
views.FilmListView.as_view(),
name='film_browse'
),
url(
r'^film/detail/(?P<pk>[0-9]+)$',
views.FilmDetailView.as_view(),
name='film_detail'
),
url(
r'^film/create/$',
views.FilmCreate.as_view(),
name='film_create'
),
url(
r'^film/edit/(?P<pk>[0-9]+)$',
views.FilmUpdate.as_view(),
name='film_edit'
),
url(
r'^film/delete/(?P<pk>[0-9]+)$',
views.FilmDelete.as_view(),
name='film_delete'),
url(
r'^finddrawing/$',
views.FinddrawingListView.as_view(),
name='finddrawing_browse'
),
url(
r'^finddrawing/detail/(?P<pk>[0-9]+)$',
views.FinddrawingDetailView.as_view(),
name='finddrawing_detail'
),
url(
r'^finddrawing/create/$',
views.FinddrawingCreate.as_view(),
name='finddrawing_create'
),
url(
r'^finddrawing/edit/(?P<pk>[0-9]+)$',
views.FinddrawingUpdate.as_view(),
name='finddrawing_edit'
),
url(
r'^finddrawing/delete/(?P<pk>[0-9]+)$',
views.FinddrawingDelete.as_view(),
name='finddrawing_delete'),
url(
r'^findsheets/$',
views.FindsheetsListView.as_view(),
name='findsheets_browse'
),
url(
r'^findsheets/detail/(?P<pk>[0-9]+)$',
views.FindsheetsDetailView.as_view(),
name='findsheets_detail'
),
url(
r'^findsheets/create/$',
views.FindsheetsCreate.as_view(),
name='findsheets_create'
),
url(
r'^findsheets/edit/(?P<pk>[0-9]+)$',
views.FindsheetsUpdate.as_view(),
name='findsheets_edit'
),
url(
r'^findsheets/delete/(?P<pk>[0-9]+)$',
views.FindsheetsDelete.as_view(),
name='findsheets_delete'),
url(
r'^fotoborndigital/$',
views.FotoborndigitalListView.as_view(),
name='fotoborndigital_browse'
),
url(
r'^fotoborndigital/detail/(?P<pk>[0-9]+)$',
views.FotoborndigitalDetailView.as_view(),
name='fotoborndigital_detail'
),
url(
r'^fotoborndigital/create/$',
views.FotoborndigitalCreate.as_view(),
name='fotoborndigital_create'
),
url(
r'^fotoborndigital/edit/(?P<pk>[0-9]+)$',
views.FotoborndigitalUpdate.as_view(),
name='fotoborndigital_edit'
),
url(
r'^fotoborndigital/delete/(?P<pk>[0-9]+)$',
views.FotoborndigitalDelete.as_view(),
name='fotoborndigital_delete'),
url(
r'^fotosgescannt/$',
views.FotosgescanntListView.as_view(),
name='fotosgescannt_browse'
),
url(
r'^fotosgescannt/detail/(?P<pk>[0-9]+)$',
views.FotosgescanntDetailView.as_view(),
name='fotosgescannt_detail'
),
url(
r'^fotosgescannt/create/$',
views.FotosgescanntCreate.as_view(),
name='fotosgescannt_create'
),
url(
r'^fotosgescannt/edit/(?P<pk>[0-9]+)$',
views.FotosgescanntUpdate.as_view(),
name='fotosgescannt_edit'
),
url(
r'^fotosgescannt/delete/(?P<pk>[0-9]+)$',
views.FotosgescanntDelete.as_view(),
name='fotosgescannt_delete'),
url(
r'^fundinventar4dpuzzleid/$',
views.Fundinventar4DPuzzleIDListView.as_view(),
name='fundinventar4dpuzzleid_browse'
),
url(
r'^fundinventar4dpuzzleid/detail/(?P<pk>[0-9]+)$',
views.Fundinventar4DPuzzleIDDetailView.as_view(),
name='fundinventar4dpuzzleid_detail'
),
url(
r'^fundinventar4dpuzzleid/create/$',
views.Fundinventar4DPuzzleIDCreate.as_view(),
name='fundinventar4dpuzzleid_create'
),
url(
r'^fundinventar4dpuzzleid/edit/(?P<pk>[0-9]+)$',
views.Fundinventar4DPuzzleIDUpdate.as_view(),
name='fundinventar4dpuzzleid_edit'
),
url(
r'^fundinventar4dpuzzleid/delete/(?P<pk>[0-9]+)$',
views.Fundinventar4DPuzzleIDDelete.as_view(),
name='fundinventar4dpuzzleid_delete'),
url(
r'^fundinventarinventarnummern/$',
views.FundinventarInventarnummernListView.as_view(),
name='fundinventarinventarnummern_browse'
),
url(
r'^fundinventarinventarnummern/detail/(?P<pk>[0-9]+)$',
views.FundinventarInventarnummernDetailView.as_view(),
name='fundinventarinventarnummern_detail'
),
url(
r'^fundinventarinventarnummern/create/$',
views.FundinventarInventarnummernCreate.as_view(),
name='fundinventarinventarnummern_create'
),
url(
r'^fundinventarinventarnummern/edit/(?P<pk>[0-9]+)$',
views.FundinventarInventarnummernUpdate.as_view(),
name='fundinventarinventarnummern_edit'
),
url(
r'^fundinventarinventarnummern/delete/(?P<pk>[0-9]+)$',
views.FundinventarInventarnummernDelete.as_view(),
name='fundinventarinventarnummern_delete'),
url(
r'^fundinventarkonvolutnummern/$',
views.FundinventarKonvolutnummernListView.as_view(),
name='fundinventarkonvolutnummern_browse'
),
url(
r'^fundinventarkonvolutnummern/detail/(?P<pk>[0-9]+)$',
views.FundinventarKonvolutnummernDetailView.as_view(),
name='fundinventarkonvolutnummern_detail'
),
url(
r'^fundinventarkonvolutnummern/create/$',
views.FundinventarKonvolutnummernCreate.as_view(),
name='fundinventarkonvolutnummern_create'
),
url(
r'^fundinventarkonvolutnummern/edit/(?P<pk>[0-9]+)$',
views.FundinventarKonvolutnummernUpdate.as_view(),
name='fundinventarkonvolutnummern_edit'
),
url(
r'^fundinventarkonvolutnummern/delete/(?P<pk>[0-9]+)$',
views.FundinventarKonvolutnummernDelete.as_view(),
name='fundinventarkonvolutnummern_delete'),
url(
r'^fundinventarmaterialproben/$',
views.FundinventarMaterialprobenListView.as_view(),
name='fundinventarmaterialproben_browse'
),
url(
r'^fundinventarmaterialproben/detail/(?P<pk>[0-9]+)$',
views.FundinventarMaterialprobenDetailView.as_view(),
name='fundinventarmaterialproben_detail'
),
url(
r'^fundinventarmaterialproben/create/$',
views.FundinventarMaterialprobenCreate.as_view(),
name='fundinventarmaterialproben_create'
),
url(
r'^fundinventarmaterialproben/edit/(?P<pk>[0-9]+)$',
views.FundinventarMaterialprobenUpdate.as_view(),
name='fundinventarmaterialproben_edit'
),
url(
r'^fundinventarmaterialproben/delete/(?P<pk>[0-9]+)$',
views.FundinventarMaterialprobenDelete.as_view(),
name='fundinventarmaterialproben_delete'),
url(
r'^fundinventarsteininventar/$',
views.FundinventarSteininventarListView.as_view(),
name='fundinventarsteininventar_browse'
),
url(
r'^fundinventarsteininventar/detail/(?P<pk>[0-9]+)$',
views.FundinventarSteininventarDetailView.as_view(),
name='fundinventarsteininventar_detail'
),
url(
r'^fundinventarsteininventar/create/$',
views.FundinventarSteininventarCreate.as_view(),
name='fundinventarsteininventar_create'
),
url(
r'^fundinventarsteininventar/edit/(?P<pk>[0-9]+)$',
views.FundinventarSteininventarUpdate.as_view(),
name='fundinventarsteininventar_edit'
),
url(
r'^fundinventarsteininventar/delete/(?P<pk>[0-9]+)$',
views.FundinventarSteininventarDelete.as_view(),
name='fundinventarsteininventar_delete'),
url(
r'^gis/$',
views.GISListView.as_view(),
name='gis_browse'
),
url(
r'^gis/detail/(?P<pk>[0-9]+)$',
views.GISDetailView.as_view(),
name='gis_detail'
),
url(
r'^gis/create/$',
views.GISCreate.as_view(),
name='gis_create'
),
url(
r'^gis/edit/(?P<pk>[0-9]+)$',
views.GISUpdate.as_view(),
name='gis_edit'
),
url(
r'^gis/delete/(?P<pk>[0-9]+)$',
views.GISDelete.as_view(),
name='gis_delete'),
url(
r'^geophysics/$',
views.GeophysicsListView.as_view(),
name='geophysics_browse'
),
url(
r'^geophysics/detail/(?P<pk>[0-9]+)$',
views.GeophysicsDetailView.as_view(),
name='geophysics_detail'
),
url(
r'^geophysics/create/$',
views.GeophysicsCreate.as_view(),
name='geophysics_create'
),
url(
r'^geophysics/edit/(?P<pk>[0-9]+)$',
views.GeophysicsUpdate.as_view(),
name='geophysics_edit'
),
url(
r'^geophysics/delete/(?P<pk>[0-9]+)$',
views.GeophysicsDelete.as_view(),
name='geophysics_delete'),
url(
r'^inventorybooks/$',
views.InventorybooksListView.as_view(),
name='inventorybooks_browse'
),
url(
r'^inventorybooks/detail/(?P<pk>[0-9]+)$',
views.InventorybooksDetailView.as_view(),
name='inventorybooks_detail'
),
url(
r'^inventorybooks/create/$',
views.InventorybooksCreate.as_view(),
name='inventorybooks_create'
),
url(
r'^inventorybooks/edit/(?P<pk>[0-9]+)$',
views.InventorybooksUpdate.as_view(),
name='inventorybooks_edit'
),
url(
r'^inventorybooks/delete/(?P<pk>[0-9]+)$',
views.InventorybooksDelete.as_view(),
name='inventorybooks_delete'),
url(
r'^phasenid/$',
views.PhasenIDListView.as_view(),
name='phasenid_browse'
),
url(
r'^phasenid/detail/(?P<pk>[0-9]+)$',
views.PhasenIDDetailView.as_view(),
name='phasenid_detail'
),
url(
r'^phasenid/create/$',
views.PhasenIDCreate.as_view(),
name='phasenid_create'
),
url(
r'^phasenid/edit/(?P<pk>[0-9]+)$',
views.PhasenIDUpdate.as_view(),
name='phasenid_edit'
),
url(
r'^phasenid/delete/(?P<pk>[0-9]+)$',
views.PhasenIDDelete.as_view(),
name='phasenid_delete'),
url(
r'^protocols/$',
views.ProtocolsListView.as_view(),
name='protocols_browse'
),
url(
r'^protocols/detail/(?P<pk>[0-9]+)$',
views.ProtocolsDetailView.as_view(),
name='protocols_detail'
),
url(
r'^protocols/create/$',
views.ProtocolsCreate.as_view(),
name='protocols_create'
),
url(
r'^protocols/edit/(?P<pk>[0-9]+)$',
views.ProtocolsUpdate.as_view(),
name='protocols_edit'
),
url(
r'^protocols/delete/(?P<pk>[0-9]+)$',
views.ProtocolsDelete.as_view(),
name='protocols_delete'),
url(
r'^stratenid/$',
views.StratenIDListView.as_view(),
name='stratenid_browse'
),
url(
r'^stratenid/detail/(?P<pk>[0-9]+)$',
views.StratenIDDetailView.as_view(),
name='stratenid_detail'
),
url(
r'^stratenid/create/$',
views.StratenIDCreate.as_view(),
name='stratenid_create'
),
url(
r'^stratenid/edit/(?P<pk>[0-9]+)$',
views.StratenIDUpdate.as_view(),
name='stratenid_edit'
),
url(
r'^stratenid/delete/(?P<pk>[0-9]+)$',
views.StratenIDDelete.as_view(),
name='stratenid_delete'),
url(
r'^tables/$',
views.TablesListView.as_view(),
name='tables_browse'
),
url(
r'^tables/detail/(?P<pk>[0-9]+)$',
views.TablesDetailView.as_view(),
name='tables_detail'
),
url(
r'^tables/create/$',
views.TablesCreate.as_view(),
name='tables_create'
),
url(
r'^tables/edit/(?P<pk>[0-9]+)$',
views.TablesUpdate.as_view(),
name='tables_edit'
),
url(
r'^tables/delete/(?P<pk>[0-9]+)$',
views.TablesDelete.as_view(),
name='tables_delete'),
url(
r'^threedimensionalmodel/$',
views.ThreeDimensionalModelListView.as_view(),
name='threedimensionalmodel_browse'
),
url(
r'^threedimensionalmodel/detail/(?P<pk>[0-9]+)$',
views.ThreeDimensionalModelDetailView.as_view(),
name='threedimensionalmodel_detail'
),
url(
r'^threedimensionalmodel/create/$',
views.ThreeDimensionalModelCreate.as_view(),
name='threedimensionalmodel_create'
),
url(
r'^threedimensionalmodel/edit/(?P<pk>[0-9]+)$',
views.ThreeDimensionalModelUpdate.as_view(),
name='threedimensionalmodel_edit'
),
url(
r'^threedimensionalmodel/delete/(?P<pk>[0-9]+)$',
views.ThreeDimensionalModelDelete.as_view(),
name='threedimensionalmodel_delete'),
url(
r'^videos/$',
views.VideosListView.as_view(),
name='videos_browse'
),
url(
r'^videos/detail/(?P<pk>[0-9]+)$',
views.VideosDetailView.as_view(),
name='videos_detail'
),
url(
r'^videos/create/$',
views.VideosCreate.as_view(),
name='videos_create'
),
url(
r'^videos/edit/(?P<pk>[0-9]+)$',
views.VideosUpdate.as_view(),
name='videos_edit'
),
url(
r'^videos/delete/(?P<pk>[0-9]+)$',
views.VideosDelete.as_view(),
name='videos_delete'),
url(
r'^wallpaintinginventory/$',
views.WallpaintingInventoryListView.as_view(),
name='wallpaintinginventory_browse'
),
url(
r'^wallpaintinginventory/detail/(?P<pk>[0-9]+)$',
views.WallpaintingInventoryDetailView.as_view(),
name='wallpaintinginventory_detail'
),
url(
r'^wallpaintinginventory/create/$',
views.WallpaintingInventoryCreate.as_view(),
name='wallpaintinginventory_create'
),
url(
r'^wallpaintinginventory/edit/(?P<pk>[0-9]+)$',
views.WallpaintingInventoryUpdate.as_view(),
name='wallpaintinginventory_edit'
),
url(
r'^wallpaintinginventory/delete/(?P<pk>[0-9]+)$',
views.WallpaintingInventoryDelete.as_view(),
name='wallpaintinginventory_delete'),
]
| true | true |
f7115b80054333e64fee1293b4991149a2084c6b | 1,073 | py | Python | google/ads/googleads/v8/googleads-py/google/ads/googleads/v8/services/services/conversion_upload_service/transports/__init__.py | googleapis/googleapis-gen | d84824c78563d59b0e58d5664bfaa430e9ad7e7a | [
"Apache-2.0"
] | 7 | 2021-02-21T10:39:41.000Z | 2021-12-07T07:31:28.000Z | google/ads/googleads/v7/googleads-py/google/ads/googleads/v7/services/services/conversion_upload_service/transports/__init__.py | googleapis/googleapis-gen | d84824c78563d59b0e58d5664bfaa430e9ad7e7a | [
"Apache-2.0"
] | 6 | 2021-02-02T23:46:11.000Z | 2021-11-15T01:46:02.000Z | google/ads/googleads/v8/googleads-py/google/ads/googleads/v8/services/services/conversion_upload_service/transports/__init__.py | googleapis/googleapis-gen | d84824c78563d59b0e58d5664bfaa430e9ad7e7a | [
"Apache-2.0"
] | 4 | 2021-01-28T23:25:45.000Z | 2021-08-30T01:55:16.000Z | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict
from typing import Dict, Type
from .base import ConversionUploadServiceTransport
from .grpc import ConversionUploadServiceGrpcTransport
# Compile a registry of transports.
_transport_registry = OrderedDict() # type: Dict[str, Type[ConversionUploadServiceTransport]]
_transport_registry['grpc'] = ConversionUploadServiceGrpcTransport
__all__ = (
'ConversionUploadServiceTransport',
'ConversionUploadServiceGrpcTransport',
)
| 33.53125 | 94 | 0.78192 |
from collections import OrderedDict
from typing import Dict, Type
from .base import ConversionUploadServiceTransport
from .grpc import ConversionUploadServiceGrpcTransport
_transport_registry = OrderedDict()
_transport_registry['grpc'] = ConversionUploadServiceGrpcTransport
__all__ = (
'ConversionUploadServiceTransport',
'ConversionUploadServiceGrpcTransport',
)
| true | true |
f7115bc1b282ac71f3a3870ed4dc29099eb9633b | 963 | py | Python | djangofiles/BlogProject/blog/forms.py | manvith263/tricalidee | 69cf66a416be7917eb8cbb3562cff7d5a66df088 | [
"BSD-3-Clause"
] | 1 | 2021-05-11T01:52:35.000Z | 2021-05-11T01:52:35.000Z | djangofiles/BlogProject/blog/forms.py | manvith263/tricalidee | 69cf66a416be7917eb8cbb3562cff7d5a66df088 | [
"BSD-3-Clause"
] | null | null | null | djangofiles/BlogProject/blog/forms.py | manvith263/tricalidee | 69cf66a416be7917eb8cbb3562cff7d5a66df088 | [
"BSD-3-Clause"
] | null | null | null | from django import forms
from .models import Comment, Answer
class CommentForm(forms.ModelForm):
class Meta:
model = Comment
fields = ('text',)
widgets = {
#'author':forms.TextInput(attrs={'class':'textinputclass'}),
'text':forms.Textarea(attrs={'class':'editable medium-editor-textarea'}),
}
class AnonymousCommentForm(forms.ModelForm):
class Meta:
model = Comment
fields = ('author','text')
widgets = {
'author':forms.TextInput(attrs={'class':'textinputclass'}),
'text':forms.Textarea(attrs={'class':'editable medium-editor-textarea'}),
}
class AnswerForm(forms.Form):
answer_content = forms.CharField(label='',max_length=200,widget=forms.TextInput(attrs={'class': 'form-control','placeholder':'New Answer ..'}),required=False)
def clean(self):
cleaned_data = super(AnswerForm, self).clean()
return cleaned_data | 33.206897 | 162 | 0.632399 | from django import forms
from .models import Comment, Answer
class CommentForm(forms.ModelForm):
class Meta:
model = Comment
fields = ('text',)
widgets = {
'text':forms.Textarea(attrs={'class':'editable medium-editor-textarea'}),
}
class AnonymousCommentForm(forms.ModelForm):
class Meta:
model = Comment
fields = ('author','text')
widgets = {
'author':forms.TextInput(attrs={'class':'textinputclass'}),
'text':forms.Textarea(attrs={'class':'editable medium-editor-textarea'}),
}
class AnswerForm(forms.Form):
answer_content = forms.CharField(label='',max_length=200,widget=forms.TextInput(attrs={'class': 'form-control','placeholder':'New Answer ..'}),required=False)
def clean(self):
cleaned_data = super(AnswerForm, self).clean()
return cleaned_data | true | true |
f7115cb7531270e4ff2173d2d1820592f1d4257b | 3,224 | py | Python | Strip.py | brandonskerritt51/Everything | c77141309f48d7cf4791bd73c574a8985d86cdc9 | [
"MIT"
] | 3 | 2020-12-26T18:54:12.000Z | 2021-12-22T16:10:01.000Z | Strip.py | brandonskerritt51/Everything | c77141309f48d7cf4791bd73c574a8985d86cdc9 | [
"MIT"
] | null | null | null | Strip.py | brandonskerritt51/Everything | c77141309f48d7cf4791bd73c574a8985d86cdc9 | [
"MIT"
] | 1 | 2020-02-28T10:58:11.000Z | 2020-02-28T10:58:11.000Z | # strip puncuation custom module
# 12 / 03 / 2015
# Brandon
# https://www.facebook.com/AiiYourBaseRBel0ngToUs
"""
This program was designed to strip puncuation
from a string
This program was made by Brandon in February 2015
and was finished in February 2015
If you have any suggestions or want to help
contact me at
https://www.facebook.com/AiiYourBaseRBel0ngToUs
This program abides by the rules of presentation for
PEP-8
shown here on
https://www.python.org/dev/peps/pep-0008/
You may use this code, or any features of this code
in your own work, as long as you link my page
and the BSD licensing, which can be copied directly
below.
https://www.facebook.com/AiiYourBaseRBel0ngToUs
*BSD licensed*
More info can be read here
http://opensource.org/licenses/BSD-3-Clause
"""
import sys
# Sys is required for Sys.exit() in close() function
def main():
# runs through every function and strips everything
message = str(input("enter message here to strip "))
message1 = strip(message)
message2 = stripWithSpace(message)
message3 = stripSpaceOnly(message)
print(message1)
print(message2)
print(message3)
close()
def strip(message):
# strips all basic puncuation
# defines puncuations
punctuations = '''!()-[]{};:'"\,<>./?@#$%^&*_~'''
# creates empty variable
no_punct = ""
# for every charecter in MESSAGE
for char in message:
# if charecter is not in puncuations
if char not in punctuations:
no_punct = no_punct + char
# replaces puncuation with nothing
return no_punct
# returns non-puncuated string
def stripWithSpace(message):
# strips all puncuation with Space
# defines puncuations
punctuations = ''' !()-[]{};:'"\,<>./?@#$%^&*_~'''
# creates empty variable
no_punct = ""
for char in message:
if char not in punctuations:
no_punct = no_punct + char
# replaces puncuation with nothing
return no_punct
def stripSpaceOnly(message):
# Strips Space only
# defines puncuations
punctuations = ''' '''
# creates empty variable
no_punct = ""
for char in message:
if char not in punctuations:
no_punct = no_punct + char
# replaces puncuation with nothing
return no_punct
def stripLetters(message):
# Strips only alphabetical letters
# defines puncuations
message = message.upper()
# converts message to upper case, makes it easier to strip
punctuations = '''ABCDEFGHIJKLMNOPQRSTUVWXYZ'''
# creates empty variable
no_punct = ""
for char in message:
if char not in punctuations:
no_punct = no_punct + char
# replaces puncuation with nothing
return no_punct
def Reverse(message):
# reverse a string
# may be useful
reverseTranslated = ''
i = len(message) - 1
while i >= 0:
reverseTranslated = reverseTranslated + message[i]
i = i - 1
def close():
input("Any key to exit! ")
sys.exit()
if __name__ == '__main__':
main() | 23.881481 | 63 | 0.631514 |
import sys
def main():
message = str(input("enter message here to strip "))
message1 = strip(message)
message2 = stripWithSpace(message)
message3 = stripSpaceOnly(message)
print(message1)
print(message2)
print(message3)
close()
def strip(message):
punctuations = '''!()-[]{};:'"\,<>./?@#$%^&*_~'''
# creates empty variable
no_punct = ""
# for every charecter in MESSAGE
for char in message:
# if charecter is not in puncuations
if char not in punctuations:
no_punct = no_punct + char
# replaces puncuation with nothing
return no_punct
# returns non-puncuated string
def stripWithSpace(message):
# strips all puncuation with Space
# defines puncuations
punctuations = ''' !()-[]{};:'"\,<>./?@#$%^&*_~'''
no_punct = ""
for char in message:
if char not in punctuations:
no_punct = no_punct + char
return no_punct
def stripSpaceOnly(message):
punctuations = ''' '''
no_punct = ""
for char in message:
if char not in punctuations:
no_punct = no_punct + char
return no_punct
def stripLetters(message):
message = message.upper()
punctuations = '''ABCDEFGHIJKLMNOPQRSTUVWXYZ'''
no_punct = ""
for char in message:
if char not in punctuations:
no_punct = no_punct + char
return no_punct
def Reverse(message):
reverseTranslated = ''
i = len(message) - 1
while i >= 0:
reverseTranslated = reverseTranslated + message[i]
i = i - 1
def close():
input("Any key to exit! ")
sys.exit()
if __name__ == '__main__':
main() | true | true |
f7115d7c70566ed892299ff982e39d43adecf586 | 2,486 | py | Python | tests/test_reader_table.py | baklanovp/pystella | 47a8b9c3dcd343bf80fba80c8468b803f0f842ce | [
"MIT"
] | 1 | 2019-08-08T13:11:57.000Z | 2019-08-08T13:11:57.000Z | tests/test_reader_table.py | cradesto/pystella | f6f44ed12d9648585a52a09e15d494daa4c70c59 | [
"MIT"
] | 9 | 2015-07-11T16:39:57.000Z | 2021-11-23T07:31:49.000Z | tests/test_reader_table.py | cradesto/pystella | f6f44ed12d9648585a52a09e15d494daa4c70c59 | [
"MIT"
] | 1 | 2019-08-08T13:08:55.000Z | 2019-08-08T13:08:55.000Z | # coding=utf-8
import numpy as np
import unittest
import pystella as ps
# from pystella.rf import band
# from pystella.rf.lc import LightCurve
# from pystella.util.reader_table import read_table_header_float, table2curves, read_obs_table_header, curves2table
__author__ = 'bakl'
def lc_create(b, m=-19, dt=0.):
n = 10
time = np.linspace(0. + dt, 200. + dt, n)
mags = m * np.ones(n)
return ps.LightCurve(b, time, mags)
class TestReaderTable(unittest.TestCase):
def test_read_table_header_float(self):
fname = 'data/stella/cat_R500_M15_Ni006_E12.gri'
data = ps.util.read_table_header_float(fname)
cols = len(data.dtype.names)
self.assertTrue(cols == 15,
msg="The number of colums in the data should be 15, but it's : %d." % cols)
def test_read_table_header_float_skiprows(self):
fname = 'data/stella/rednova_R3.2_M6_Ni0_E0.25.tt'
data = ps.util.read_table_header_float(fname, skip=87)
cols = len(data.dtype.names)
self.assertTrue(cols == 14,
msg="The number of colums in [%s] should be 14, but it's : %d." % (fname, cols))
def test_table2curves_no_bands(self):
ps.Band.load_settings()
fname = 'data/stella/rednova_R3.2_M6_Ni0_E0.25.tt'
data = ps.util.read_table_header_float(fname, skip=87)
data.dtype.names = [col.replace('M', '') for col in data.dtype.names]
curves = ps.table2curves('test', data)
for bname in curves.BandNames:
self.assertTrue(bname in data.dtype.names,
msg="No band %s in [%s] after table2curves." % (bname, ''.join(data.dtype.names)))
def test_curves2table(self):
ps.Band.load_settings()
fname = 'data/stella/rednova_R3.2_M6_Ni0_E0.25.tt'
data = ps.util.read_table_header_float(fname, skip=87)
data.dtype.names = [col.replace('M', '') for col in data.dtype.names]
curves = ps.table2curves('test', data, is_filter_zero=False)
tbl = ps.curves2table(curves)
self.assertCountEqual(curves.Length, len(tbl.names))
def test_read_obs_table_header(self):
fname = 'data/obs/1999em-uphHamuy.dat'
tbl, cols_data = ps.util.read_obs_table_header(fname, is_out=True)
for c in ('JD', 'V'):
self.assertTrue(c in tbl.dtype.names,
msg="No band %s in [%s] after read_obs_table_header." % (c, ','.join(tbl.dtype.names)))
| 41.433333 | 115 | 0.641191 |
import numpy as np
import unittest
import pystella as ps
__author__ = 'bakl'
def lc_create(b, m=-19, dt=0.):
n = 10
time = np.linspace(0. + dt, 200. + dt, n)
mags = m * np.ones(n)
return ps.LightCurve(b, time, mags)
class TestReaderTable(unittest.TestCase):
def test_read_table_header_float(self):
fname = 'data/stella/cat_R500_M15_Ni006_E12.gri'
data = ps.util.read_table_header_float(fname)
cols = len(data.dtype.names)
self.assertTrue(cols == 15,
msg="The number of colums in the data should be 15, but it's : %d." % cols)
def test_read_table_header_float_skiprows(self):
fname = 'data/stella/rednova_R3.2_M6_Ni0_E0.25.tt'
data = ps.util.read_table_header_float(fname, skip=87)
cols = len(data.dtype.names)
self.assertTrue(cols == 14,
msg="The number of colums in [%s] should be 14, but it's : %d." % (fname, cols))
def test_table2curves_no_bands(self):
ps.Band.load_settings()
fname = 'data/stella/rednova_R3.2_M6_Ni0_E0.25.tt'
data = ps.util.read_table_header_float(fname, skip=87)
data.dtype.names = [col.replace('M', '') for col in data.dtype.names]
curves = ps.table2curves('test', data)
for bname in curves.BandNames:
self.assertTrue(bname in data.dtype.names,
msg="No band %s in [%s] after table2curves." % (bname, ''.join(data.dtype.names)))
def test_curves2table(self):
ps.Band.load_settings()
fname = 'data/stella/rednova_R3.2_M6_Ni0_E0.25.tt'
data = ps.util.read_table_header_float(fname, skip=87)
data.dtype.names = [col.replace('M', '') for col in data.dtype.names]
curves = ps.table2curves('test', data, is_filter_zero=False)
tbl = ps.curves2table(curves)
self.assertCountEqual(curves.Length, len(tbl.names))
def test_read_obs_table_header(self):
fname = 'data/obs/1999em-uphHamuy.dat'
tbl, cols_data = ps.util.read_obs_table_header(fname, is_out=True)
for c in ('JD', 'V'):
self.assertTrue(c in tbl.dtype.names,
msg="No band %s in [%s] after read_obs_table_header." % (c, ','.join(tbl.dtype.names)))
| true | true |
f7115da64a528c5832fc24488f6a9968dd730194 | 33,409 | py | Python | python/tvm/relay/op/nn/_nn.py | CaramelFc/tvm | 0b95de439499122c98857e9006331b53f3578dbc | [
"Apache-2.0"
] | 1 | 2020-09-02T11:58:01.000Z | 2020-09-02T11:58:01.000Z | python/tvm/relay/op/nn/_nn.py | CaramelFc/tvm | 0b95de439499122c98857e9006331b53f3578dbc | [
"Apache-2.0"
] | null | null | null | python/tvm/relay/op/nn/_nn.py | CaramelFc/tvm | 0b95de439499122c98857e9006331b53f3578dbc | [
"Apache-2.0"
] | 2 | 2020-11-26T00:35:02.000Z | 2020-12-07T03:15:56.000Z | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=no-else-return, invalid-name, unused-argument, too-many-arguments, consider-using-in
"""Backend compiler related feature registration"""
from __future__ import absolute_import
from tvm import topi
from tvm.topi.utils import get_const_tuple
from tvm.runtime import convert
from tvm.te.hybrid import script
from .. import op as reg
from .. import strategy
from ..op import OpPattern
from .._tensor import elemwise_shape_func
from ..strategy.generic import is_depthwise_conv2d
from ...transform import LayoutConfig
# relu
reg.register_broadcast_schedule("nn.relu")
reg.register_pattern("nn.relu", OpPattern.ELEMWISE)
# softmax
reg.register_strategy("nn.softmax", strategy.softmax_strategy)
reg.register_pattern("nn.softmax", OpPattern.OPAQUE)
# log_softmax
reg.register_schedule("nn.log_softmax", strategy.schedule_log_softmax)
reg.register_pattern("nn.log_softmax", OpPattern.OPAQUE)
# dense
reg.register_strategy("nn.dense", strategy.dense_strategy)
reg.register_pattern("nn.dense", reg.OpPattern.OUT_ELEMWISE_FUSABLE)
# fifo_buffer
@reg.register_compute("nn.fifo_buffer")
def compute_fifo_buffer(attrs, inputs, out_type):
return [topi.nn.fifo_buffer(inputs[0], inputs[1], axis=attrs.get_int("axis"))]
reg.register_injective_schedule("nn.fifo_buffer")
reg.register_pattern("nn.fifo_buffer", OpPattern.OPAQUE)
# batch_matmul
reg.register_strategy("nn.batch_matmul", strategy.batch_matmul_strategy)
reg.register_pattern("nn.batch_matmul", reg.OpPattern.OUT_ELEMWISE_FUSABLE)
# sparse_dense
@reg.register_compute("nn.sparse_dense")
def compute_sparse_dense(attrs, inputs, out_type):
"""Compute definition of sparse_dense"""
return [topi.nn.sparse_dense(inputs[0], inputs[1], inputs[2], inputs[3])]
reg.register_strategy("nn.sparse_dense", strategy.sparse_dense_strategy)
reg.register_pattern("nn.sparse_dense", reg.OpPattern.OUT_ELEMWISE_FUSABLE)
@reg.register_alter_op_layout("nn.sparse_dense")
def alter_op_layout_sparse_dense(attrs, inputs, tinfos, out_type):
"""Alternate the layout of sparse_dense"""
return topi.nn.sparse_dense_alter_layout(attrs, inputs, tinfos, out_type)
@reg.register_compute("nn.internal.sparse_dense_padded")
def compute_sparse_dense_padded(attrs, inputs, out_type):
"""Compute definition of sparse_dense_padded"""
raise NotImplementedError("nn.internal.sparse_dense_padded is only available on cuda")
reg.register_strategy("nn.internal.sparse_dense_padded", strategy.sparse_dense_padded_strategy)
reg.register_pattern("nn.internal.sparse_dense_padded", reg.OpPattern.OUT_ELEMWISE_FUSABLE)
# sparse_transpose
@reg.register_compute("nn.sparse_transpose")
def compute_sparse_transpose(attrs, inputs, out_type):
"""Compute definition of sparse_transpose"""
return topi.nn.sparse_transpose(inputs[0], inputs[1], inputs[2])
reg.register_schedule("nn.sparse_transpose", strategy.schedule_sparse_transpose)
reg.register_pattern("nn.sparse_transpose", reg.OpPattern.OUT_ELEMWISE_FUSABLE)
# conv1d
reg.register_strategy("nn.conv1d", strategy.conv1d_strategy)
reg.register_pattern("nn.conv1d", OpPattern.OUT_ELEMWISE_FUSABLE)
# conv2d
reg.register_strategy("nn.conv2d", strategy.conv2d_strategy)
reg.register_pattern("nn.conv2d", OpPattern.OUT_ELEMWISE_FUSABLE)
@reg.register_alter_op_layout("nn.conv2d")
def alter_op_layout_conv2d(attrs, inputs, tinfos, out_type):
"""Alternate the layout of conv2d"""
return topi.nn.conv2d_alter_layout(attrs, inputs, tinfos, out_type)
@reg.register_legalize("nn.conv2d")
def legalize_conv2d(attrs, inputs, types):
"""Legalize conv2d op.
Parameters
----------
attrs : tvm.ir.Attrs
Attributes of current convolution
inputs : list of tvm.relay.Expr
The args of the Relay expr to be legalized
types : list of types
List of input and output types
Returns
-------
result : tvm.relay.Expr
The legalized expr
"""
return topi.nn.conv2d_legalize(attrs, inputs, types)
@reg.register_convert_op_layout("nn.conv2d")
def convert_conv2d(attrs, inputs, tinfos, desired_layouts):
"""Convert Layout pass registration for conv2d op.
Parameters
----------
attrs : tvm.ir.Attrs
Attributes of current convolution
inputs : list of tvm.relay.Expr
The args of the Relay expr to be legalized
tinfos : list of types
List of input and output types
desired_layouts : list of layout strings
List of layouts defining our desired
layout for the data and kernel inputs respectively.
Returns
-------
result : tvm.relay.Expr
The transformed expr
"""
# pylint: disable=import-outside-toplevel
from tvm import relay
data, weight = inputs
# First check if there is a LayoutConfig scope, and if so, whether
# it indicates we should ignore this layer or not.
layout_config = LayoutConfig.current
if layout_config is not None:
skip_layer = layout_config.check_skip()
if skip_layer:
return relay.nn.conv2d(data, weight, **attrs)
# Prepare new layout.
new_attrs = dict(attrs)
assert len(desired_layouts) == 2, "A desired layout is expected for both of nn.conv2d's inputs"
desired_data_layout, desired_kernel_layout = map(str, desired_layouts)
assert desired_data_layout != "default", "Data layout cannot be default"
new_attrs["data_layout"] = desired_data_layout
if desired_kernel_layout != "default":
new_attrs["kernel_layout"] = desired_kernel_layout
return relay.nn.conv2d(data, weight, **new_attrs)
# Handle default kernel layouts
if desired_data_layout == "NCHW":
new_attrs["kernel_layout"] = "OIHW"
return relay.nn.conv2d(data, weight, **new_attrs)
elif desired_data_layout == "NHWC":
# Check for depthwise convolution.
data_info, weight_info = tinfos
if is_depthwise_conv2d(
data_info.shape,
attrs["data_layout"],
weight_info.shape,
attrs["kernel_layout"],
attrs["groups"],
):
new_attrs["kernel_layout"] = "HWOI"
else:
new_attrs["kernel_layout"] = "HWIO"
return relay.nn.conv2d(data, weight, **new_attrs)
elif desired_data_layout == "HWNC":
new_attrs["kernel_layout"] = "HWOI"
return relay.nn.conv2d(data, weight, **new_attrs)
raise ValueError("Layout %s is not yet supported." % desired_data_layout)
# conv2d_transpose
reg.register_strategy("nn.conv2d_transpose", strategy.conv2d_transpose_strategy)
reg.register_pattern("nn.conv2d_transpose", OpPattern.OUT_ELEMWISE_FUSABLE)
@reg.register_legalize("nn.conv2d_transpose")
def legalize_conv2d_transpose(attrs, inputs, types):
"""Legalize conv2d_transpose op.
Parameters
----------
attrs : tvm.ir.Attrs
Attributes of current Transposed convolution
inputs : list of tvm.relay.Expr
The args of the Relay expr to be legalized
types : list of types
List of input and output types
Returns
-------
result : tvm.relay.Expr
The legalized expr
"""
return topi.nn.conv2d_transpose_legalize(attrs, inputs, types)
@reg.register_convert_op_layout("nn.conv2d_transpose")
def convert_conv2d_transpose(attrs, inputs, tinfos, desired_layouts):
"""Convert Layout pass registration for conv2d_transpose op.
Parameters
----------
attrs : tvm.ir.Attrs
Attributes of current convolution
inputs : list of tvm.relay.Expr
The args of the Relay expr to be legalized
tinfos : list of types
List of input and output types
desired_layouts : list of layout strings
List of layouts defining our desired
layout for the data and kernel inputs respectively.
Returns
-------
result : tvm.relay.Expr
The transformed expr
"""
# pylint: disable=import-outside-toplevel
from tvm import relay
data, weight = inputs
new_attrs = dict(attrs)
assert len(desired_layouts) == 2, "A desired layout is expected for both of nn.conv2d's inputs"
desired_data_layout, desired_kernel_layout = map(str, desired_layouts)
assert desired_data_layout != "default", "Data layout cannot be default"
new_attrs["data_layout"] = desired_data_layout
if desired_kernel_layout != "default":
new_attrs["kernel_layout"] = desired_kernel_layout
return relay.nn.conv2d_transpose(data, weight, **new_attrs)
# Handle default kernel layouts
if desired_data_layout == "NCHW":
new_attrs["kernel_layout"] = "OIHW"
return relay.nn.conv2d_transpose(data, weight, **new_attrs)
elif desired_data_layout == "NHWC":
new_attrs["kernel_layout"] = "HWIO"
return relay.nn.conv2d_transpose(data, weight, **new_attrs)
raise ValueError("Layout %s is not yet supported." % desired_data_layout)
# conv3d_transpose
reg.register_strategy("nn.conv3d_transpose", strategy.conv3d_transpose_strategy)
reg.register_pattern("nn.conv3d_transpose", OpPattern.OUT_ELEMWISE_FUSABLE)
@reg.register_legalize("nn.conv3d_transpose")
def legalize_conv3d_transpose(attrs, inputs, types):
"""Legalize conv3d_transpose op.
Parameters
----------
attrs : tvm.ir.Attrs
Attributes of current Transposed convolution
inputs : list of tvm.relay.Expr
The args of the Relay expr to be legalized
types : list of types
List of input and output types
Returns
-------
result : tvm.relay.Expr
The legalized expr
"""
return topi.nn.conv3d_transpose_legalize(attrs, inputs, types)
# conv3d
reg.register_strategy("nn.conv3d", strategy.conv3d_strategy)
reg.register_pattern("nn.conv3d", OpPattern.OUT_ELEMWISE_FUSABLE)
@reg.register_alter_op_layout("nn.conv3d")
def alter_op_layout_conv3d(attrs, inputs, tinfos, out_type):
"""Alternate the layout of conv3d"""
return topi.nn.conv3d_alter_layout(attrs, inputs, tinfos, out_type)
@reg.register_convert_op_layout("nn.conv3d")
def convert_conv3d(attrs, inputs, tinfos, desired_layouts):
"""Convert Layout pass registration for conv3d op.
Parameters
----------
attrs : tvm.ir.Attrs
Attributes of current convolution
inputs : list of tvm.relay.Expr
The args of the Relay expr to be legalized
tinfos : list of types
List of input and output types
desired_layouts : list of layout strings
List of layouts defining our desired
layout for the data and kernel inputs respectively.
Returns
-------
result : tvm.relay.Expr
The transformed expr
"""
# pylint: disable=import-outside-toplevel
from tvm import relay
data, weight = inputs
new_attrs = dict(attrs)
assert len(desired_layouts) == 2, "A desired layout is expected for both of nn.conv3d's inputs"
desired_data_layout, desired_kernel_layout = map(str, desired_layouts)
assert desired_data_layout != "default", "Data layout cannot be default"
new_attrs["data_layout"] = desired_data_layout
if desired_kernel_layout != "default":
new_attrs["kernel_layout"] = desired_kernel_layout
return relay.nn.conv3d(data, weight, **new_attrs)
# Handle default kernel layouts
if desired_data_layout == "NCDHW":
new_attrs["kernel_layout"] = "OIDHW"
return relay.nn.conv3d(data, weight, **new_attrs)
elif desired_data_layout == "NDHWC":
new_attrs["kernel_layout"] = "DHWIO"
return relay.nn.conv3d(data, weight, **new_attrs)
raise ValueError("Layout %s is not yet supported" % desired_data_layout)
# conv3d_winograd related operators
reg.register_strategy(
"nn.contrib_conv3d_winograd_without_weight_transform",
strategy.conv3d_winograd_without_weight_transfrom_strategy,
)
reg.register_pattern(
"nn.contrib_conv3d_winograd_without_weight_transform", OpPattern.OUT_ELEMWISE_FUSABLE
)
@reg.register_compute("nn.contrib_conv3d_winograd_weight_transform")
def compute_contrib_conv3d_winograd_weight_transform(attrs, inputs, out_dtype):
"""Compute definition of contrib_conv3d_winograd_weight_transform"""
out = topi.nn.conv3d_winograd_weight_transform(inputs[0], attrs.get_int("tile_size"))
return [out]
reg.register_schedule(
"nn.contrib_conv3d_winograd_weight_transform",
strategy.schedule_conv3d_winograd_weight_transform,
)
reg.register_pattern("nn.contrib_conv3d_winograd_weight_transform", OpPattern.OUT_ELEMWISE_FUSABLE)
# conv1d_transpose
reg.register_strategy("nn.conv1d_transpose", strategy.conv1d_transpose_strategy)
reg.register_pattern("nn.conv1d_transpose", OpPattern.OUT_ELEMWISE_FUSABLE)
# bias_add
reg.register_injective_schedule("nn.bias_add")
reg.register_pattern("nn.bias_add", OpPattern.BROADCAST)
# max_pool1d
reg.register_schedule("nn.max_pool1d", strategy.schedule_pool)
reg.register_pattern("nn.max_pool1d", OpPattern.OUT_ELEMWISE_FUSABLE)
# max_pool2d
reg.register_schedule("nn.max_pool2d", strategy.schedule_pool)
reg.register_pattern("nn.max_pool2d", OpPattern.OUT_ELEMWISE_FUSABLE)
# max_pool3d
reg.register_schedule("nn.max_pool3d", strategy.schedule_pool)
reg.register_pattern("nn.max_pool3d", OpPattern.OUT_ELEMWISE_FUSABLE)
# avg_pool1d
reg.register_schedule("nn.avg_pool1d", strategy.schedule_pool)
reg.register_pattern("nn.avg_pool1d", OpPattern.OUT_ELEMWISE_FUSABLE)
# avg_pool2d
reg.register_schedule("nn.avg_pool2d", strategy.schedule_pool)
reg.register_pattern("nn.avg_pool2d", OpPattern.OUT_ELEMWISE_FUSABLE)
# avg_pool3d
reg.register_schedule("nn.avg_pool3d", strategy.schedule_pool)
reg.register_pattern("nn.avg_pool3d", OpPattern.OUT_ELEMWISE_FUSABLE)
# max_pool2d_grad
reg.register_schedule("nn.max_pool2d_grad", strategy.schedule_pool_grad)
reg.register_pattern("nn.max_pool2d_grad", OpPattern.OUT_ELEMWISE_FUSABLE)
# avg_pool2d_grad
reg.register_schedule("nn.avg_pool2d_grad", strategy.schedule_pool_grad)
reg.register_pattern("nn.avg_pool2d_grad", OpPattern.OUT_ELEMWISE_FUSABLE)
# global_max_pool2d
reg.register_schedule("nn.global_max_pool2d", strategy.schedule_adaptive_pool)
reg.register_pattern("nn.global_max_pool2d", OpPattern.OUT_ELEMWISE_FUSABLE)
# global_avg_pool2d
reg.register_schedule("nn.global_avg_pool2d", strategy.schedule_adaptive_pool)
reg.register_pattern("nn.global_avg_pool2d", OpPattern.OUT_ELEMWISE_FUSABLE)
# adaptive_max_pool2d
reg.register_schedule("nn.adaptive_max_pool2d", strategy.schedule_adaptive_pool)
reg.register_pattern("nn.adaptive_max_pool2d", OpPattern.OUT_ELEMWISE_FUSABLE)
# adaptive_avg_pool2d
reg.register_schedule("nn.adaptive_avg_pool2d", strategy.schedule_adaptive_pool)
reg.register_pattern("nn.adaptive_avg_pool2d", OpPattern.OUT_ELEMWISE_FUSABLE)
# adaptive_max_pool3d
reg.register_schedule("nn.adaptive_max_pool3d", strategy.schedule_adaptive_pool)
reg.register_pattern("nn.adaptive_max_pool3d", OpPattern.OUT_ELEMWISE_FUSABLE)
# adaptive_avg_pool3d
reg.register_schedule("nn.adaptive_avg_pool3d", strategy.schedule_adaptive_pool)
reg.register_pattern("nn.adaptive_avg_pool3d", OpPattern.OUT_ELEMWISE_FUSABLE)
# leaky_relu
reg.register_broadcast_schedule("nn.leaky_relu")
reg.register_pattern("nn.leaky_relu", OpPattern.ELEMWISE)
# prelu
reg.register_broadcast_schedule("nn.prelu")
reg.register_pattern("nn.prelu", OpPattern.BROADCAST)
# flatten
reg.register_broadcast_schedule("nn.batch_flatten")
reg.register_pattern("nn.batch_flatten", OpPattern.INJECTIVE)
# lrn
@reg.register_compute("nn.lrn")
def compute_lrn(attrs, inputs, out_dtype):
"""Compute definition of lrn"""
assert len(inputs) == 1
return [topi.nn.lrn(inputs[0], attrs.size, attrs.axis, attrs.alpha, attrs.beta, attrs.bias)]
reg.register_schedule("nn.lrn", strategy.schedule_lrn)
reg.register_pattern("nn.lrn", OpPattern.OPAQUE)
# upsampling
@reg.register_compute("nn.upsampling")
def compute_upsampling(attrs, inputs, out_dtype):
scale_h = attrs.scale_h
scale_w = attrs.scale_w
layout = attrs.layout
method = attrs.method
align_corners = attrs.align_corners
return [topi.nn.upsampling(inputs[0], scale_h, scale_w, layout, method, align_corners)]
reg.register_injective_schedule("nn.upsampling")
# upsampling3d
@reg.register_compute("nn.upsampling3d")
def compute_upsampling3d(attrs, inputs, out_dtype):
scale_d = attrs.scale_d
scale_h = attrs.scale_h
scale_w = attrs.scale_w
layout = attrs.layout
method = attrs.method
coordinate_transformation_mode = attrs.coordinate_transformation_mode
return [
topi.nn.upsampling3d(
inputs[0], scale_d, scale_h, scale_w, layout, method, coordinate_transformation_mode
)
]
reg.register_injective_schedule("nn.upsampling3d")
# pad
reg.register_broadcast_schedule("nn.pad")
# mirror_pad
@reg.register_compute("nn.mirror_pad")
def compute_mirror_pad(attrs, inputs, out_dtype):
pad_before, pad_after = list(zip(*attrs.pad_width))
mode = attrs.mode
out = topi.nn.mirror_pad(inputs[0], pad_before=pad_before, pad_after=pad_after, mode=mode)
return [out]
reg.register_broadcast_schedule("nn.mirror_pad")
@script
def _mirror_pad_func(data_shape, pad_width):
out = output_tensor((data_shape.shape[0],), "int64")
for i in const_range(data_shape.shape[0]):
out[i] = data_shape[i] + int64(pad_width[i][0]) + int64(pad_width[i][1])
return out
@reg.register_shape_func("nn.mirror_pad", False)
def mirror_pad_func(attrs, inputs, _):
pad_width_tuple = [get_const_tuple(p) for p in attrs.pad_width]
return [_mirror_pad_func(inputs[0], convert(pad_width_tuple))]
# conv2d_winograd related operators
reg.register_strategy(
"nn.contrib_conv2d_winograd_without_weight_transform",
strategy.conv2d_winograd_without_weight_transfrom_strategy,
)
reg.register_pattern(
"nn.contrib_conv2d_winograd_without_weight_transform", OpPattern.OUT_ELEMWISE_FUSABLE
)
# conv2d_gemm related operators
reg.register_strategy(
"nn.contrib_conv2d_gemm_without_weight_transform",
strategy.conv2d_gemm_without_weight_transform_strategy,
)
reg.register_pattern(
"nn.contrib_conv2d_gemm_without_weight_transform", OpPattern.OUT_ELEMWISE_FUSABLE
)
@reg.register_compute("nn.contrib_conv2d_gemm_weight_transform")
def compute_contrib_conv2d_gemm_weight_transform(attrs, inputs, out_dtype):
"""Compute definition of contrib_conv2d_gemm_weight_transform"""
out = topi.nn.conv2d_gemm_weight_transform(inputs[0], attrs.tile_rows, attrs.tile_cols)
return [out]
reg.register_schedule(
"nn.contrib_conv2d_gemm_weight_transform", strategy.schedule_conv2d_gemm_weight_transform
)
reg.register_pattern("nn.contrib_conv2d_gemm_weight_transform", OpPattern.OUT_ELEMWISE_FUSABLE)
@reg.register_compute("nn.contrib_conv2d_winograd_weight_transform")
def compute_contrib_conv2d_winograd_weight_transform(attrs, inputs, out_dtype):
"""Compute definition of contrib_conv2d_winograd_weight_transform"""
out = topi.nn.conv2d_winograd_weight_transform(inputs[0], attrs.get_int("tile_size"))
return [out]
reg.register_schedule(
"nn.contrib_conv2d_winograd_weight_transform",
strategy.schedule_conv2d_winograd_weight_transform,
)
reg.register_pattern("nn.contrib_conv2d_winograd_weight_transform", OpPattern.OUT_ELEMWISE_FUSABLE)
@reg.register_compute("nn.contrib_conv2d_winograd_nnpack_weight_transform")
def compute_contrib_conv2d_winograd_nnpack_weight_transform(attrs, inputs, out_dtype):
"""Compute definition of contrib_conv2d_winograd_nnpack_weight_transform"""
convolution_algorithm = attrs.get_int("convolution_algorithm")
out = topi.nn.conv2d_winograd_nnpack_weight_transform(
inputs[0], convolution_algorithm, out_dtype
)
return [out]
reg.register_schedule(
"nn.contrib_conv2d_winograd_nnpack_weight_transform",
strategy.schedule_conv2d_winograd_nnpack_weight_transform,
)
reg.register_pattern("nn.contrib_conv2d_winograd_nnpack_weight_transform", OpPattern.OPAQUE)
# conv2d_NCHWc
reg.register_strategy("nn.contrib_conv2d_NCHWc", strategy.conv2d_NCHWc_strategy)
reg.register_pattern("nn.contrib_conv2d_NCHWc", OpPattern.OUT_ELEMWISE_FUSABLE)
# depthwise_conv2d_NCHWc
reg.register_strategy("nn.contrib_depthwise_conv2d_NCHWc", strategy.depthwise_conv2d_NCHWc_strategy)
reg.register_pattern("nn.contrib_depthwise_conv2d_NCHWc", OpPattern.OUT_ELEMWISE_FUSABLE)
# deformable_conv2d
reg.register_strategy("nn.deformable_conv2d", strategy.deformable_conv2d_strategy)
reg.register_pattern("nn.deformable_conv2d", OpPattern.OUT_ELEMWISE_FUSABLE)
# bitpack
@reg.register_compute("nn.bitpack")
def compute_bitpack(attrs, inputs, out_dtype):
"""Compute definition for bitpack"""
bits = attrs.bits
pack_axis = attrs.pack_axis
bit_axis = attrs.bit_axis
pack_type = attrs.pack_type
name = attrs.name
out = topi.nn.bitpack(inputs[0], bits, pack_axis, bit_axis, pack_type, name)
return [out]
reg.register_schedule("nn.bitpack", strategy.schedule_bitpack)
reg.register_pattern("nn.bitpack", OpPattern.INJECTIVE)
# bitserial_conv2d
reg.register_strategy("nn.bitserial_conv2d", strategy.bitserial_conv2d_strategy)
reg.register_pattern("nn.bitserial_conv2d", OpPattern.OUT_ELEMWISE_FUSABLE)
@reg.register_legalize("nn.bitserial_conv2d")
def legalize_bitserial_conv2d(attrs, inputs, types):
"""Legalize bitserial_conv2d op.
Parameters
----------
attrs : tvm.ir.Attrs
Attributes of current convolution
inputs : list of tvm.relay.Expr
The args of the Relay expr to be legalized
types : list of types
List of input and output types
Returns
-------
result : tvm.relay.Expr
The legalized expr
"""
return topi.nn.bitserial_conv2d_legalize(attrs, inputs, types)
# bitserial_dense
reg.register_strategy("nn.bitserial_dense", strategy.bitserial_dense_strategy)
reg.register_pattern("nn.bitserial_dense", reg.OpPattern.OUT_ELEMWISE_FUSABLE)
# cross_entropy
@reg.register_compute("nn.cross_entropy")
def compute_cross_entropy(attrs, inputs, out_dtype):
x, y = inputs
return [-topi.sum(topi.log(x) * y) / x.shape[0]]
reg.register_reduce_schedule("nn.cross_entropy")
reg.register_pattern("nn.cross_entropy", OpPattern.OPAQUE)
# dilate
@reg.register_compute("nn.dilate")
def compute_dilate(attrs, inputs, out_dtype):
return [topi.nn.dilate(inputs[0], attrs.strides, attrs.dilation_value)]
reg.register_broadcast_schedule("nn.dilate")
reg.register_pattern("nn.dilate", OpPattern.INJECTIVE)
# cross_entropy_with_logits
@reg.register_compute("nn.cross_entropy_with_logits")
def compute_cross_entropy_with_logits(attrs, inputs, out_dtype):
x, y = inputs
return [-topi.sum(x * y) / x.shape[0]]
reg.register_reduce_schedule("nn.cross_entropy_with_logits")
reg.register_pattern("nn.cross_entropy_with_logits", OpPattern.OPAQUE)
# depth_to_space
@reg.register_compute("nn.depth_to_space")
def compute_depth_to_space(attrs, inputs, out_dtype):
block_size = attrs.block_size
layout = attrs.layout
mode = attrs.mode
return [topi.nn.depth_to_space(inputs[0], block_size, layout=layout, mode=mode)]
reg.register_injective_schedule("nn.depth_to_space")
reg.register_pattern("nn.depth_to_space", OpPattern.INJECTIVE)
# space_to_depth
@reg.register_compute("nn.space_to_depth")
def compute_space_to_depth(attrs, inputs, out_dtype):
block_size = attrs.block_size
layout = attrs.layout
return [topi.nn.space_to_depth(inputs[0], block_size, layout=layout)]
reg.register_injective_schedule("nn.space_to_depth")
reg.register_pattern("nn.space_to_depth", OpPattern.INJECTIVE)
# correlation
reg.register_strategy("nn.correlation", strategy.correlation_strategy)
reg.register_pattern("nn.correlation", OpPattern.OUT_ELEMWISE_FUSABLE)
# space_to_batch_nd and batch_to_space_nd
reg.register_injective_schedule("nn.space_to_batch_nd")
reg.register_injective_schedule("nn.batch_to_space_nd")
#####################
# Shape functions #
#####################
@script
def _conv_shape_func(dshape, kshape, strides, padding, dilation):
out = output_tensor((dshape.shape[0],), "int64")
out[0] = dshape[0]
out[1] = kshape[0]
for i in const_range(dshape.shape[0] - 2):
dilated_k = (kshape[i + 2] - 1) * dilation[i] + 1
out[i + 2] = (dshape[i + 2] + 2 * padding[i] - dilated_k) // strides[i] + 1
return out
def conv_shape_func(attrs, inputs, _):
"""
Shape function for contrib_conv2d_NCHWc op.
"""
strides = get_const_tuple(attrs.strides)
padding = get_const_tuple(attrs.padding)
dilation = get_const_tuple(attrs.dilation)
return [
_conv_shape_func(
inputs[0],
inputs[1],
convert(strides),
convert(padding),
convert(dilation),
)
]
reg.register_shape_func("nn.conv1d", False, conv_shape_func)
reg.register_shape_func("nn.conv2d", False, conv_shape_func)
reg.register_shape_func("nn.conv3d", False, conv_shape_func)
@script
def _conv2d_NCHWc_shape_func(dshape, kshape, strides, padding, dilation, oc_bn):
out = output_tensor((dshape.shape[0],), "int64")
ic_chunk = dshape[1]
height = dshape[2]
width = dshape[3]
ic_bn = dshape[4]
kheight = kshape[2]
kwidth = kshape[3]
dilated_kh = (kheight - 1) * dilation[0] + 1
dilated_kw = (kwidth - 1) * dilation[1] + 1
kflatten = int64(1)
for i in const_range(kshape.shape[0]):
kflatten *= kshape[i]
oc = kflatten // (kheight * kwidth * ic_chunk * ic_bn)
oc_chunk = oc // oc_bn
out_height = (height + 2 * padding[0] - dilated_kh) // strides[0] + 1
out_width = (width + 2 * padding[1] - dilated_kw) // strides[1] + 1
out[0] = dshape[0]
out[1] = oc_chunk
out[2] = out_height
out[3] = out_width
out[4] = int64(oc_bn)
return out
@reg.register_shape_func("nn.contrib_conv2d_NCHWc", False)
def conv2d_NCHWc_shape_func(attrs, inputs, _):
"""
Shape function for contrib_conv2d_NCHWc op.
"""
strides = get_const_tuple(attrs.strides)
padding = get_const_tuple(attrs.padding)
dilation = get_const_tuple(attrs.dilation)
out_layout = attrs.out_layout
oc_bn = int(out_layout[4:-1])
return [
_conv2d_NCHWc_shape_func(
inputs[0],
inputs[1],
convert(strides),
convert(padding),
convert(dilation),
convert(oc_bn),
)
]
@script
def _conv2d_transpose_nchw_shape_func(dshape, kshape, strides, padding, dilation, output_padding):
out = output_tensor((dshape.shape[0],), "int64")
kheight = kshape[2]
kwidth = kshape[3]
dilated_kh = (kheight - 1) * dilation[0] + 1
dilated_kw = (kwidth - 1) * dilation[1] + 1
out_height = strides[0] * (dshape[2] - 1) + dilated_kh - 2 * padding[0] + output_padding[0]
out_width = strides[1] * (dshape[3] - 1) + dilated_kw - 2 * padding[1] + output_padding[1]
out[0] = dshape[0]
out[1] = kshape[1]
out[2] = out_height
out[3] = out_width
return out
@reg.register_shape_func("nn.conv2d_transpose", False)
def conv2d_transpose_nchw_shape_func(attrs, inputs, _):
"""
Shape function for conv2d_transpose op.
"""
strides = get_const_tuple(attrs.strides)
padding = get_const_tuple(attrs.padding)
dilation = get_const_tuple(attrs.dilation)
output_padding = get_const_tuple(attrs.output_padding)
return [
_conv2d_transpose_nchw_shape_func(
inputs[0],
inputs[1],
convert(strides),
convert(padding),
convert(dilation),
convert(output_padding),
)
]
@script
def _pool2d_shape_func(data_shape, pool_size, strides, padding, height_axis, width_axis):
out = output_tensor((data_shape.shape[0],), "int64")
for i in const_range(data_shape.shape[0]):
if i == height_axis:
out[i] = (data_shape[i] + padding[0] + padding[2] - pool_size[0]) // strides[0] + 1
elif i == width_axis:
out[i] = (data_shape[i] + padding[1] + padding[3] - pool_size[1]) // strides[1] + 1
else:
out[i] = data_shape[i]
return out
def pool2d_shape_func(attrs, inputs, _):
"""
Shape function for pool2d op.
"""
pool_size = get_const_tuple(attrs.pool_size)
strides = get_const_tuple(attrs.strides)
padding = get_const_tuple(attrs.padding)
layout = attrs.layout
height_axis = layout.index("H")
width_axis = layout.index("W")
if len(padding) == 1:
padding = [padding[0]] * 4
elif len(padding) == 2:
padding = [padding[0], padding[1], padding[0], padding[1]]
return [
_pool2d_shape_func(
inputs[0],
convert(pool_size),
convert(strides),
convert(padding),
convert(height_axis),
convert(width_axis),
)
]
reg.register_shape_func("nn.max_pool2d", False, pool2d_shape_func)
reg.register_shape_func("nn.avg_pool2d", False, pool2d_shape_func)
@script
def _global_pool2d_shape_func(data_shape, height_axis, width_axis):
out = output_tensor((data_shape.shape[0],), "int64")
for i in const_range(out.shape[0]):
if i == height_axis or i == width_axis:
out[i] = int64(1)
else:
out[i] = data_shape[i]
return out
def global_pool2d_shape_func(attrs, inputs, _):
"""
Shape function for global pool2d op.
"""
layout = attrs.layout
height_axis = width_axis = 1
for i, letter in enumerate(layout):
if letter == "H":
height_axis = i
if letter == "W":
width_axis = i
return [_global_pool2d_shape_func(inputs[0], convert(height_axis), convert(width_axis))]
reg.register_shape_func("nn.global_max_pool2d", False, global_pool2d_shape_func)
reg.register_shape_func("nn.global_avg_pool2d", False, global_pool2d_shape_func)
@script
def _batch_flatten_shape_func(data_shape):
out = output_tensor((2,), "int64")
out[0] = data_shape[0]
out[1] = int64(1)
for i in const_range(data_shape.shape[0] - 1):
out[1] *= data_shape[i + 1]
return out
@reg.register_shape_func("nn.batch_flatten", False)
def batch_flatten_shape_func(attrs, inputs, _):
"""
Shape function for batch_flatten op.
"""
return [_batch_flatten_shape_func(inputs[0])]
@script
def _dense_shape_func(data_shape, weight_shape):
out = output_tensor((data_shape.shape[0],), "int64")
for i in const_range(out.shape[0] - 1):
out[i] = data_shape[i]
out[out.shape[0] - 1] = weight_shape[0]
return out
@reg.register_shape_func("nn.dense", False)
def dense_shape_func(attrs, inputs, _):
"""
Shape function for dense op.
"""
ret = [_dense_shape_func(inputs[0], inputs[1])]
return ret
@script
def _batch_matmul_shape_func(data_shape, weight_shape):
out = output_tensor((data_shape.shape[0],), "int64")
for i in const_range(out.shape[0] - 1):
if i == 0:
out[i] = max(data_shape[i], weight_shape[i])
else:
out[i] = data_shape[i]
out[out.shape[0] - 1] = weight_shape[weight_shape.shape[0] - 2]
return out
@reg.register_shape_func("nn.batch_matmul", False)
def batch_matmul_shape_func(attrs, inputs, _):
"""
Shape function for dense op.
"""
ret = [_batch_matmul_shape_func(inputs[0], inputs[1])]
return ret
@script
def _pad_shape_func(data_shape, pad_width):
out = output_tensor((data_shape.shape[0],), "int64")
for i in const_range(out.shape[0]):
out[i] = data_shape[i] + pad_width[i][0] + pad_width[i][1]
return out
@reg.register_shape_func("nn.pad", False)
def pad_shape_func(attrs, inputs, _):
"""
Shape function for pad op.
"""
pad_width = []
for pair in attrs.pad_width:
pad_width.append(get_const_tuple(pair))
return [_pad_shape_func(inputs[0], convert(pad_width))]
@script
def _dilate_shape_func(data_shape, strides):
out = output_tensor((data_shape.shape[0],), "int64")
for i in const_range(out.shape[0]):
out[i] = (data_shape[i] - 1) * strides[i] + 1
return out
@reg.register_shape_func("nn.dilate", False)
def dilate_shape_func(attrs, inputs, _):
"""
Shape function for dilate op.
"""
return [_dilate_shape_func(inputs[0], convert(attrs.strides))]
reg.register_shape_func("nn.bias_add", False, elemwise_shape_func)
reg.register_shape_func("nn.softmax", False, elemwise_shape_func)
reg.register_shape_func("nn.relu", False, elemwise_shape_func)
| 31.458569 | 102 | 0.728097 |
from __future__ import absolute_import
from tvm import topi
from tvm.topi.utils import get_const_tuple
from tvm.runtime import convert
from tvm.te.hybrid import script
from .. import op as reg
from .. import strategy
from ..op import OpPattern
from .._tensor import elemwise_shape_func
from ..strategy.generic import is_depthwise_conv2d
from ...transform import LayoutConfig
reg.register_broadcast_schedule("nn.relu")
reg.register_pattern("nn.relu", OpPattern.ELEMWISE)
reg.register_strategy("nn.softmax", strategy.softmax_strategy)
reg.register_pattern("nn.softmax", OpPattern.OPAQUE)
reg.register_schedule("nn.log_softmax", strategy.schedule_log_softmax)
reg.register_pattern("nn.log_softmax", OpPattern.OPAQUE)
reg.register_strategy("nn.dense", strategy.dense_strategy)
reg.register_pattern("nn.dense", reg.OpPattern.OUT_ELEMWISE_FUSABLE)
@reg.register_compute("nn.fifo_buffer")
def compute_fifo_buffer(attrs, inputs, out_type):
return [topi.nn.fifo_buffer(inputs[0], inputs[1], axis=attrs.get_int("axis"))]
reg.register_injective_schedule("nn.fifo_buffer")
reg.register_pattern("nn.fifo_buffer", OpPattern.OPAQUE)
reg.register_strategy("nn.batch_matmul", strategy.batch_matmul_strategy)
reg.register_pattern("nn.batch_matmul", reg.OpPattern.OUT_ELEMWISE_FUSABLE)
@reg.register_compute("nn.sparse_dense")
def compute_sparse_dense(attrs, inputs, out_type):
return [topi.nn.sparse_dense(inputs[0], inputs[1], inputs[2], inputs[3])]
reg.register_strategy("nn.sparse_dense", strategy.sparse_dense_strategy)
reg.register_pattern("nn.sparse_dense", reg.OpPattern.OUT_ELEMWISE_FUSABLE)
@reg.register_alter_op_layout("nn.sparse_dense")
def alter_op_layout_sparse_dense(attrs, inputs, tinfos, out_type):
return topi.nn.sparse_dense_alter_layout(attrs, inputs, tinfos, out_type)
@reg.register_compute("nn.internal.sparse_dense_padded")
def compute_sparse_dense_padded(attrs, inputs, out_type):
raise NotImplementedError("nn.internal.sparse_dense_padded is only available on cuda")
reg.register_strategy("nn.internal.sparse_dense_padded", strategy.sparse_dense_padded_strategy)
reg.register_pattern("nn.internal.sparse_dense_padded", reg.OpPattern.OUT_ELEMWISE_FUSABLE)
@reg.register_compute("nn.sparse_transpose")
def compute_sparse_transpose(attrs, inputs, out_type):
return topi.nn.sparse_transpose(inputs[0], inputs[1], inputs[2])
reg.register_schedule("nn.sparse_transpose", strategy.schedule_sparse_transpose)
reg.register_pattern("nn.sparse_transpose", reg.OpPattern.OUT_ELEMWISE_FUSABLE)
reg.register_strategy("nn.conv1d", strategy.conv1d_strategy)
reg.register_pattern("nn.conv1d", OpPattern.OUT_ELEMWISE_FUSABLE)
reg.register_strategy("nn.conv2d", strategy.conv2d_strategy)
reg.register_pattern("nn.conv2d", OpPattern.OUT_ELEMWISE_FUSABLE)
@reg.register_alter_op_layout("nn.conv2d")
def alter_op_layout_conv2d(attrs, inputs, tinfos, out_type):
return topi.nn.conv2d_alter_layout(attrs, inputs, tinfos, out_type)
@reg.register_legalize("nn.conv2d")
def legalize_conv2d(attrs, inputs, types):
return topi.nn.conv2d_legalize(attrs, inputs, types)
@reg.register_convert_op_layout("nn.conv2d")
def convert_conv2d(attrs, inputs, tinfos, desired_layouts):
from tvm import relay
data, weight = inputs
layout_config = LayoutConfig.current
if layout_config is not None:
skip_layer = layout_config.check_skip()
if skip_layer:
return relay.nn.conv2d(data, weight, **attrs)
new_attrs = dict(attrs)
assert len(desired_layouts) == 2, "A desired layout is expected for both of nn.conv2d's inputs"
desired_data_layout, desired_kernel_layout = map(str, desired_layouts)
assert desired_data_layout != "default", "Data layout cannot be default"
new_attrs["data_layout"] = desired_data_layout
if desired_kernel_layout != "default":
new_attrs["kernel_layout"] = desired_kernel_layout
return relay.nn.conv2d(data, weight, **new_attrs)
# Handle default kernel layouts
if desired_data_layout == "NCHW":
new_attrs["kernel_layout"] = "OIHW"
return relay.nn.conv2d(data, weight, **new_attrs)
elif desired_data_layout == "NHWC":
# Check for depthwise convolution.
data_info, weight_info = tinfos
if is_depthwise_conv2d(
data_info.shape,
attrs["data_layout"],
weight_info.shape,
attrs["kernel_layout"],
attrs["groups"],
):
new_attrs["kernel_layout"] = "HWOI"
else:
new_attrs["kernel_layout"] = "HWIO"
return relay.nn.conv2d(data, weight, **new_attrs)
elif desired_data_layout == "HWNC":
new_attrs["kernel_layout"] = "HWOI"
return relay.nn.conv2d(data, weight, **new_attrs)
raise ValueError("Layout %s is not yet supported." % desired_data_layout)
# conv2d_transpose
reg.register_strategy("nn.conv2d_transpose", strategy.conv2d_transpose_strategy)
reg.register_pattern("nn.conv2d_transpose", OpPattern.OUT_ELEMWISE_FUSABLE)
@reg.register_legalize("nn.conv2d_transpose")
def legalize_conv2d_transpose(attrs, inputs, types):
return topi.nn.conv2d_transpose_legalize(attrs, inputs, types)
@reg.register_convert_op_layout("nn.conv2d_transpose")
def convert_conv2d_transpose(attrs, inputs, tinfos, desired_layouts):
# pylint: disable=import-outside-toplevel
from tvm import relay
data, weight = inputs
new_attrs = dict(attrs)
assert len(desired_layouts) == 2, "A desired layout is expected for both of nn.conv2d's inputs"
desired_data_layout, desired_kernel_layout = map(str, desired_layouts)
assert desired_data_layout != "default", "Data layout cannot be default"
new_attrs["data_layout"] = desired_data_layout
if desired_kernel_layout != "default":
new_attrs["kernel_layout"] = desired_kernel_layout
return relay.nn.conv2d_transpose(data, weight, **new_attrs)
if desired_data_layout == "NCHW":
new_attrs["kernel_layout"] = "OIHW"
return relay.nn.conv2d_transpose(data, weight, **new_attrs)
elif desired_data_layout == "NHWC":
new_attrs["kernel_layout"] = "HWIO"
return relay.nn.conv2d_transpose(data, weight, **new_attrs)
raise ValueError("Layout %s is not yet supported." % desired_data_layout)
reg.register_strategy("nn.conv3d_transpose", strategy.conv3d_transpose_strategy)
reg.register_pattern("nn.conv3d_transpose", OpPattern.OUT_ELEMWISE_FUSABLE)
@reg.register_legalize("nn.conv3d_transpose")
def legalize_conv3d_transpose(attrs, inputs, types):
return topi.nn.conv3d_transpose_legalize(attrs, inputs, types)
reg.register_strategy("nn.conv3d", strategy.conv3d_strategy)
reg.register_pattern("nn.conv3d", OpPattern.OUT_ELEMWISE_FUSABLE)
@reg.register_alter_op_layout("nn.conv3d")
def alter_op_layout_conv3d(attrs, inputs, tinfos, out_type):
return topi.nn.conv3d_alter_layout(attrs, inputs, tinfos, out_type)
@reg.register_convert_op_layout("nn.conv3d")
def convert_conv3d(attrs, inputs, tinfos, desired_layouts):
from tvm import relay
data, weight = inputs
new_attrs = dict(attrs)
assert len(desired_layouts) == 2, "A desired layout is expected for both of nn.conv3d's inputs"
desired_data_layout, desired_kernel_layout = map(str, desired_layouts)
assert desired_data_layout != "default", "Data layout cannot be default"
new_attrs["data_layout"] = desired_data_layout
if desired_kernel_layout != "default":
new_attrs["kernel_layout"] = desired_kernel_layout
return relay.nn.conv3d(data, weight, **new_attrs)
# Handle default kernel layouts
if desired_data_layout == "NCDHW":
new_attrs["kernel_layout"] = "OIDHW"
return relay.nn.conv3d(data, weight, **new_attrs)
elif desired_data_layout == "NDHWC":
new_attrs["kernel_layout"] = "DHWIO"
return relay.nn.conv3d(data, weight, **new_attrs)
raise ValueError("Layout %s is not yet supported" % desired_data_layout)
# conv3d_winograd related operators
reg.register_strategy(
"nn.contrib_conv3d_winograd_without_weight_transform",
strategy.conv3d_winograd_without_weight_transfrom_strategy,
)
reg.register_pattern(
"nn.contrib_conv3d_winograd_without_weight_transform", OpPattern.OUT_ELEMWISE_FUSABLE
)
@reg.register_compute("nn.contrib_conv3d_winograd_weight_transform")
def compute_contrib_conv3d_winograd_weight_transform(attrs, inputs, out_dtype):
out = topi.nn.conv3d_winograd_weight_transform(inputs[0], attrs.get_int("tile_size"))
return [out]
reg.register_schedule(
"nn.contrib_conv3d_winograd_weight_transform",
strategy.schedule_conv3d_winograd_weight_transform,
)
reg.register_pattern("nn.contrib_conv3d_winograd_weight_transform", OpPattern.OUT_ELEMWISE_FUSABLE)
# conv1d_transpose
reg.register_strategy("nn.conv1d_transpose", strategy.conv1d_transpose_strategy)
reg.register_pattern("nn.conv1d_transpose", OpPattern.OUT_ELEMWISE_FUSABLE)
# bias_add
reg.register_injective_schedule("nn.bias_add")
reg.register_pattern("nn.bias_add", OpPattern.BROADCAST)
# max_pool1d
reg.register_schedule("nn.max_pool1d", strategy.schedule_pool)
reg.register_pattern("nn.max_pool1d", OpPattern.OUT_ELEMWISE_FUSABLE)
# max_pool2d
reg.register_schedule("nn.max_pool2d", strategy.schedule_pool)
reg.register_pattern("nn.max_pool2d", OpPattern.OUT_ELEMWISE_FUSABLE)
# max_pool3d
reg.register_schedule("nn.max_pool3d", strategy.schedule_pool)
reg.register_pattern("nn.max_pool3d", OpPattern.OUT_ELEMWISE_FUSABLE)
# avg_pool1d
reg.register_schedule("nn.avg_pool1d", strategy.schedule_pool)
reg.register_pattern("nn.avg_pool1d", OpPattern.OUT_ELEMWISE_FUSABLE)
# avg_pool2d
reg.register_schedule("nn.avg_pool2d", strategy.schedule_pool)
reg.register_pattern("nn.avg_pool2d", OpPattern.OUT_ELEMWISE_FUSABLE)
# avg_pool3d
reg.register_schedule("nn.avg_pool3d", strategy.schedule_pool)
reg.register_pattern("nn.avg_pool3d", OpPattern.OUT_ELEMWISE_FUSABLE)
# max_pool2d_grad
reg.register_schedule("nn.max_pool2d_grad", strategy.schedule_pool_grad)
reg.register_pattern("nn.max_pool2d_grad", OpPattern.OUT_ELEMWISE_FUSABLE)
# avg_pool2d_grad
reg.register_schedule("nn.avg_pool2d_grad", strategy.schedule_pool_grad)
reg.register_pattern("nn.avg_pool2d_grad", OpPattern.OUT_ELEMWISE_FUSABLE)
# global_max_pool2d
reg.register_schedule("nn.global_max_pool2d", strategy.schedule_adaptive_pool)
reg.register_pattern("nn.global_max_pool2d", OpPattern.OUT_ELEMWISE_FUSABLE)
# global_avg_pool2d
reg.register_schedule("nn.global_avg_pool2d", strategy.schedule_adaptive_pool)
reg.register_pattern("nn.global_avg_pool2d", OpPattern.OUT_ELEMWISE_FUSABLE)
# adaptive_max_pool2d
reg.register_schedule("nn.adaptive_max_pool2d", strategy.schedule_adaptive_pool)
reg.register_pattern("nn.adaptive_max_pool2d", OpPattern.OUT_ELEMWISE_FUSABLE)
# adaptive_avg_pool2d
reg.register_schedule("nn.adaptive_avg_pool2d", strategy.schedule_adaptive_pool)
reg.register_pattern("nn.adaptive_avg_pool2d", OpPattern.OUT_ELEMWISE_FUSABLE)
# adaptive_max_pool3d
reg.register_schedule("nn.adaptive_max_pool3d", strategy.schedule_adaptive_pool)
reg.register_pattern("nn.adaptive_max_pool3d", OpPattern.OUT_ELEMWISE_FUSABLE)
# adaptive_avg_pool3d
reg.register_schedule("nn.adaptive_avg_pool3d", strategy.schedule_adaptive_pool)
reg.register_pattern("nn.adaptive_avg_pool3d", OpPattern.OUT_ELEMWISE_FUSABLE)
# leaky_relu
reg.register_broadcast_schedule("nn.leaky_relu")
reg.register_pattern("nn.leaky_relu", OpPattern.ELEMWISE)
# prelu
reg.register_broadcast_schedule("nn.prelu")
reg.register_pattern("nn.prelu", OpPattern.BROADCAST)
# flatten
reg.register_broadcast_schedule("nn.batch_flatten")
reg.register_pattern("nn.batch_flatten", OpPattern.INJECTIVE)
# lrn
@reg.register_compute("nn.lrn")
def compute_lrn(attrs, inputs, out_dtype):
assert len(inputs) == 1
return [topi.nn.lrn(inputs[0], attrs.size, attrs.axis, attrs.alpha, attrs.beta, attrs.bias)]
reg.register_schedule("nn.lrn", strategy.schedule_lrn)
reg.register_pattern("nn.lrn", OpPattern.OPAQUE)
# upsampling
@reg.register_compute("nn.upsampling")
def compute_upsampling(attrs, inputs, out_dtype):
scale_h = attrs.scale_h
scale_w = attrs.scale_w
layout = attrs.layout
method = attrs.method
align_corners = attrs.align_corners
return [topi.nn.upsampling(inputs[0], scale_h, scale_w, layout, method, align_corners)]
reg.register_injective_schedule("nn.upsampling")
# upsampling3d
@reg.register_compute("nn.upsampling3d")
def compute_upsampling3d(attrs, inputs, out_dtype):
scale_d = attrs.scale_d
scale_h = attrs.scale_h
scale_w = attrs.scale_w
layout = attrs.layout
method = attrs.method
coordinate_transformation_mode = attrs.coordinate_transformation_mode
return [
topi.nn.upsampling3d(
inputs[0], scale_d, scale_h, scale_w, layout, method, coordinate_transformation_mode
)
]
reg.register_injective_schedule("nn.upsampling3d")
# pad
reg.register_broadcast_schedule("nn.pad")
# mirror_pad
@reg.register_compute("nn.mirror_pad")
def compute_mirror_pad(attrs, inputs, out_dtype):
pad_before, pad_after = list(zip(*attrs.pad_width))
mode = attrs.mode
out = topi.nn.mirror_pad(inputs[0], pad_before=pad_before, pad_after=pad_after, mode=mode)
return [out]
reg.register_broadcast_schedule("nn.mirror_pad")
@script
def _mirror_pad_func(data_shape, pad_width):
out = output_tensor((data_shape.shape[0],), "int64")
for i in const_range(data_shape.shape[0]):
out[i] = data_shape[i] + int64(pad_width[i][0]) + int64(pad_width[i][1])
return out
@reg.register_shape_func("nn.mirror_pad", False)
def mirror_pad_func(attrs, inputs, _):
pad_width_tuple = [get_const_tuple(p) for p in attrs.pad_width]
return [_mirror_pad_func(inputs[0], convert(pad_width_tuple))]
# conv2d_winograd related operators
reg.register_strategy(
"nn.contrib_conv2d_winograd_without_weight_transform",
strategy.conv2d_winograd_without_weight_transfrom_strategy,
)
reg.register_pattern(
"nn.contrib_conv2d_winograd_without_weight_transform", OpPattern.OUT_ELEMWISE_FUSABLE
)
# conv2d_gemm related operators
reg.register_strategy(
"nn.contrib_conv2d_gemm_without_weight_transform",
strategy.conv2d_gemm_without_weight_transform_strategy,
)
reg.register_pattern(
"nn.contrib_conv2d_gemm_without_weight_transform", OpPattern.OUT_ELEMWISE_FUSABLE
)
@reg.register_compute("nn.contrib_conv2d_gemm_weight_transform")
def compute_contrib_conv2d_gemm_weight_transform(attrs, inputs, out_dtype):
out = topi.nn.conv2d_gemm_weight_transform(inputs[0], attrs.tile_rows, attrs.tile_cols)
return [out]
reg.register_schedule(
"nn.contrib_conv2d_gemm_weight_transform", strategy.schedule_conv2d_gemm_weight_transform
)
reg.register_pattern("nn.contrib_conv2d_gemm_weight_transform", OpPattern.OUT_ELEMWISE_FUSABLE)
@reg.register_compute("nn.contrib_conv2d_winograd_weight_transform")
def compute_contrib_conv2d_winograd_weight_transform(attrs, inputs, out_dtype):
out = topi.nn.conv2d_winograd_weight_transform(inputs[0], attrs.get_int("tile_size"))
return [out]
reg.register_schedule(
"nn.contrib_conv2d_winograd_weight_transform",
strategy.schedule_conv2d_winograd_weight_transform,
)
reg.register_pattern("nn.contrib_conv2d_winograd_weight_transform", OpPattern.OUT_ELEMWISE_FUSABLE)
@reg.register_compute("nn.contrib_conv2d_winograd_nnpack_weight_transform")
def compute_contrib_conv2d_winograd_nnpack_weight_transform(attrs, inputs, out_dtype):
convolution_algorithm = attrs.get_int("convolution_algorithm")
out = topi.nn.conv2d_winograd_nnpack_weight_transform(
inputs[0], convolution_algorithm, out_dtype
)
return [out]
reg.register_schedule(
"nn.contrib_conv2d_winograd_nnpack_weight_transform",
strategy.schedule_conv2d_winograd_nnpack_weight_transform,
)
reg.register_pattern("nn.contrib_conv2d_winograd_nnpack_weight_transform", OpPattern.OPAQUE)
# conv2d_NCHWc
reg.register_strategy("nn.contrib_conv2d_NCHWc", strategy.conv2d_NCHWc_strategy)
reg.register_pattern("nn.contrib_conv2d_NCHWc", OpPattern.OUT_ELEMWISE_FUSABLE)
# depthwise_conv2d_NCHWc
reg.register_strategy("nn.contrib_depthwise_conv2d_NCHWc", strategy.depthwise_conv2d_NCHWc_strategy)
reg.register_pattern("nn.contrib_depthwise_conv2d_NCHWc", OpPattern.OUT_ELEMWISE_FUSABLE)
# deformable_conv2d
reg.register_strategy("nn.deformable_conv2d", strategy.deformable_conv2d_strategy)
reg.register_pattern("nn.deformable_conv2d", OpPattern.OUT_ELEMWISE_FUSABLE)
# bitpack
@reg.register_compute("nn.bitpack")
def compute_bitpack(attrs, inputs, out_dtype):
bits = attrs.bits
pack_axis = attrs.pack_axis
bit_axis = attrs.bit_axis
pack_type = attrs.pack_type
name = attrs.name
out = topi.nn.bitpack(inputs[0], bits, pack_axis, bit_axis, pack_type, name)
return [out]
reg.register_schedule("nn.bitpack", strategy.schedule_bitpack)
reg.register_pattern("nn.bitpack", OpPattern.INJECTIVE)
# bitserial_conv2d
reg.register_strategy("nn.bitserial_conv2d", strategy.bitserial_conv2d_strategy)
reg.register_pattern("nn.bitserial_conv2d", OpPattern.OUT_ELEMWISE_FUSABLE)
@reg.register_legalize("nn.bitserial_conv2d")
def legalize_bitserial_conv2d(attrs, inputs, types):
return topi.nn.bitserial_conv2d_legalize(attrs, inputs, types)
# bitserial_dense
reg.register_strategy("nn.bitserial_dense", strategy.bitserial_dense_strategy)
reg.register_pattern("nn.bitserial_dense", reg.OpPattern.OUT_ELEMWISE_FUSABLE)
# cross_entropy
@reg.register_compute("nn.cross_entropy")
def compute_cross_entropy(attrs, inputs, out_dtype):
x, y = inputs
return [-topi.sum(topi.log(x) * y) / x.shape[0]]
reg.register_reduce_schedule("nn.cross_entropy")
reg.register_pattern("nn.cross_entropy", OpPattern.OPAQUE)
# dilate
@reg.register_compute("nn.dilate")
def compute_dilate(attrs, inputs, out_dtype):
return [topi.nn.dilate(inputs[0], attrs.strides, attrs.dilation_value)]
reg.register_broadcast_schedule("nn.dilate")
reg.register_pattern("nn.dilate", OpPattern.INJECTIVE)
# cross_entropy_with_logits
@reg.register_compute("nn.cross_entropy_with_logits")
def compute_cross_entropy_with_logits(attrs, inputs, out_dtype):
x, y = inputs
return [-topi.sum(x * y) / x.shape[0]]
reg.register_reduce_schedule("nn.cross_entropy_with_logits")
reg.register_pattern("nn.cross_entropy_with_logits", OpPattern.OPAQUE)
# depth_to_space
@reg.register_compute("nn.depth_to_space")
def compute_depth_to_space(attrs, inputs, out_dtype):
block_size = attrs.block_size
layout = attrs.layout
mode = attrs.mode
return [topi.nn.depth_to_space(inputs[0], block_size, layout=layout, mode=mode)]
reg.register_injective_schedule("nn.depth_to_space")
reg.register_pattern("nn.depth_to_space", OpPattern.INJECTIVE)
# space_to_depth
@reg.register_compute("nn.space_to_depth")
def compute_space_to_depth(attrs, inputs, out_dtype):
block_size = attrs.block_size
layout = attrs.layout
return [topi.nn.space_to_depth(inputs[0], block_size, layout=layout)]
reg.register_injective_schedule("nn.space_to_depth")
reg.register_pattern("nn.space_to_depth", OpPattern.INJECTIVE)
# correlation
reg.register_strategy("nn.correlation", strategy.correlation_strategy)
reg.register_pattern("nn.correlation", OpPattern.OUT_ELEMWISE_FUSABLE)
# space_to_batch_nd and batch_to_space_nd
reg.register_injective_schedule("nn.space_to_batch_nd")
reg.register_injective_schedule("nn.batch_to_space_nd")
#####################
# Shape functions #
#####################
@script
def _conv_shape_func(dshape, kshape, strides, padding, dilation):
out = output_tensor((dshape.shape[0],), "int64")
out[0] = dshape[0]
out[1] = kshape[0]
for i in const_range(dshape.shape[0] - 2):
dilated_k = (kshape[i + 2] - 1) * dilation[i] + 1
out[i + 2] = (dshape[i + 2] + 2 * padding[i] - dilated_k) // strides[i] + 1
return out
def conv_shape_func(attrs, inputs, _):
strides = get_const_tuple(attrs.strides)
padding = get_const_tuple(attrs.padding)
dilation = get_const_tuple(attrs.dilation)
return [
_conv_shape_func(
inputs[0],
inputs[1],
convert(strides),
convert(padding),
convert(dilation),
)
]
reg.register_shape_func("nn.conv1d", False, conv_shape_func)
reg.register_shape_func("nn.conv2d", False, conv_shape_func)
reg.register_shape_func("nn.conv3d", False, conv_shape_func)
@script
def _conv2d_NCHWc_shape_func(dshape, kshape, strides, padding, dilation, oc_bn):
out = output_tensor((dshape.shape[0],), "int64")
ic_chunk = dshape[1]
height = dshape[2]
width = dshape[3]
ic_bn = dshape[4]
kheight = kshape[2]
kwidth = kshape[3]
dilated_kh = (kheight - 1) * dilation[0] + 1
dilated_kw = (kwidth - 1) * dilation[1] + 1
kflatten = int64(1)
for i in const_range(kshape.shape[0]):
kflatten *= kshape[i]
oc = kflatten // (kheight * kwidth * ic_chunk * ic_bn)
oc_chunk = oc // oc_bn
out_height = (height + 2 * padding[0] - dilated_kh) // strides[0] + 1
out_width = (width + 2 * padding[1] - dilated_kw) // strides[1] + 1
out[0] = dshape[0]
out[1] = oc_chunk
out[2] = out_height
out[3] = out_width
out[4] = int64(oc_bn)
return out
@reg.register_shape_func("nn.contrib_conv2d_NCHWc", False)
def conv2d_NCHWc_shape_func(attrs, inputs, _):
strides = get_const_tuple(attrs.strides)
padding = get_const_tuple(attrs.padding)
dilation = get_const_tuple(attrs.dilation)
out_layout = attrs.out_layout
oc_bn = int(out_layout[4:-1])
return [
_conv2d_NCHWc_shape_func(
inputs[0],
inputs[1],
convert(strides),
convert(padding),
convert(dilation),
convert(oc_bn),
)
]
@script
def _conv2d_transpose_nchw_shape_func(dshape, kshape, strides, padding, dilation, output_padding):
out = output_tensor((dshape.shape[0],), "int64")
kheight = kshape[2]
kwidth = kshape[3]
dilated_kh = (kheight - 1) * dilation[0] + 1
dilated_kw = (kwidth - 1) * dilation[1] + 1
out_height = strides[0] * (dshape[2] - 1) + dilated_kh - 2 * padding[0] + output_padding[0]
out_width = strides[1] * (dshape[3] - 1) + dilated_kw - 2 * padding[1] + output_padding[1]
out[0] = dshape[0]
out[1] = kshape[1]
out[2] = out_height
out[3] = out_width
return out
@reg.register_shape_func("nn.conv2d_transpose", False)
def conv2d_transpose_nchw_shape_func(attrs, inputs, _):
strides = get_const_tuple(attrs.strides)
padding = get_const_tuple(attrs.padding)
dilation = get_const_tuple(attrs.dilation)
output_padding = get_const_tuple(attrs.output_padding)
return [
_conv2d_transpose_nchw_shape_func(
inputs[0],
inputs[1],
convert(strides),
convert(padding),
convert(dilation),
convert(output_padding),
)
]
@script
def _pool2d_shape_func(data_shape, pool_size, strides, padding, height_axis, width_axis):
out = output_tensor((data_shape.shape[0],), "int64")
for i in const_range(data_shape.shape[0]):
if i == height_axis:
out[i] = (data_shape[i] + padding[0] + padding[2] - pool_size[0]) // strides[0] + 1
elif i == width_axis:
out[i] = (data_shape[i] + padding[1] + padding[3] - pool_size[1]) // strides[1] + 1
else:
out[i] = data_shape[i]
return out
def pool2d_shape_func(attrs, inputs, _):
pool_size = get_const_tuple(attrs.pool_size)
strides = get_const_tuple(attrs.strides)
padding = get_const_tuple(attrs.padding)
layout = attrs.layout
height_axis = layout.index("H")
width_axis = layout.index("W")
if len(padding) == 1:
padding = [padding[0]] * 4
elif len(padding) == 2:
padding = [padding[0], padding[1], padding[0], padding[1]]
return [
_pool2d_shape_func(
inputs[0],
convert(pool_size),
convert(strides),
convert(padding),
convert(height_axis),
convert(width_axis),
)
]
reg.register_shape_func("nn.max_pool2d", False, pool2d_shape_func)
reg.register_shape_func("nn.avg_pool2d", False, pool2d_shape_func)
@script
def _global_pool2d_shape_func(data_shape, height_axis, width_axis):
out = output_tensor((data_shape.shape[0],), "int64")
for i in const_range(out.shape[0]):
if i == height_axis or i == width_axis:
out[i] = int64(1)
else:
out[i] = data_shape[i]
return out
def global_pool2d_shape_func(attrs, inputs, _):
layout = attrs.layout
height_axis = width_axis = 1
for i, letter in enumerate(layout):
if letter == "H":
height_axis = i
if letter == "W":
width_axis = i
return [_global_pool2d_shape_func(inputs[0], convert(height_axis), convert(width_axis))]
reg.register_shape_func("nn.global_max_pool2d", False, global_pool2d_shape_func)
reg.register_shape_func("nn.global_avg_pool2d", False, global_pool2d_shape_func)
@script
def _batch_flatten_shape_func(data_shape):
out = output_tensor((2,), "int64")
out[0] = data_shape[0]
out[1] = int64(1)
for i in const_range(data_shape.shape[0] - 1):
out[1] *= data_shape[i + 1]
return out
@reg.register_shape_func("nn.batch_flatten", False)
def batch_flatten_shape_func(attrs, inputs, _):
return [_batch_flatten_shape_func(inputs[0])]
@script
def _dense_shape_func(data_shape, weight_shape):
out = output_tensor((data_shape.shape[0],), "int64")
for i in const_range(out.shape[0] - 1):
out[i] = data_shape[i]
out[out.shape[0] - 1] = weight_shape[0]
return out
@reg.register_shape_func("nn.dense", False)
def dense_shape_func(attrs, inputs, _):
ret = [_dense_shape_func(inputs[0], inputs[1])]
return ret
@script
def _batch_matmul_shape_func(data_shape, weight_shape):
out = output_tensor((data_shape.shape[0],), "int64")
for i in const_range(out.shape[0] - 1):
if i == 0:
out[i] = max(data_shape[i], weight_shape[i])
else:
out[i] = data_shape[i]
out[out.shape[0] - 1] = weight_shape[weight_shape.shape[0] - 2]
return out
@reg.register_shape_func("nn.batch_matmul", False)
def batch_matmul_shape_func(attrs, inputs, _):
ret = [_batch_matmul_shape_func(inputs[0], inputs[1])]
return ret
@script
def _pad_shape_func(data_shape, pad_width):
out = output_tensor((data_shape.shape[0],), "int64")
for i in const_range(out.shape[0]):
out[i] = data_shape[i] + pad_width[i][0] + pad_width[i][1]
return out
@reg.register_shape_func("nn.pad", False)
def pad_shape_func(attrs, inputs, _):
pad_width = []
for pair in attrs.pad_width:
pad_width.append(get_const_tuple(pair))
return [_pad_shape_func(inputs[0], convert(pad_width))]
@script
def _dilate_shape_func(data_shape, strides):
out = output_tensor((data_shape.shape[0],), "int64")
for i in const_range(out.shape[0]):
out[i] = (data_shape[i] - 1) * strides[i] + 1
return out
@reg.register_shape_func("nn.dilate", False)
def dilate_shape_func(attrs, inputs, _):
return [_dilate_shape_func(inputs[0], convert(attrs.strides))]
reg.register_shape_func("nn.bias_add", False, elemwise_shape_func)
reg.register_shape_func("nn.softmax", False, elemwise_shape_func)
reg.register_shape_func("nn.relu", False, elemwise_shape_func)
| true | true |
f7115dacfbfb5e34e0212cad048528683fc48da9 | 77 | py | Python | tests/test_tensorflow_v2_examples.py | awerdich/TensorFlow_v2_examples | ca8fb57728a821fe53ae01248fc1d8b4a45a0074 | [
"MIT"
] | null | null | null | tests/test_tensorflow_v2_examples.py | awerdich/TensorFlow_v2_examples | ca8fb57728a821fe53ae01248fc1d8b4a45a0074 | [
"MIT"
] | null | null | null | tests/test_tensorflow_v2_examples.py | awerdich/TensorFlow_v2_examples | ca8fb57728a821fe53ae01248fc1d8b4a45a0074 | [
"MIT"
] | null | null | null |
from tensorflow_v2_examples.cli import main
def test_main():
main([])
| 11 | 43 | 0.714286 |
from tensorflow_v2_examples.cli import main
def test_main():
main([])
| true | true |
f7115de338f1de8c6d119b74bc44f2877d482c1c | 15,511 | py | Python | ibis/backends/clickhouse/tests/test_functions.py | jreback/ibis | fdcca59b085416b1311eb268be3886abad1db230 | [
"Apache-2.0"
] | 1 | 2020-08-19T03:36:26.000Z | 2020-08-19T03:36:26.000Z | ibis/backends/clickhouse/tests/test_functions.py | jreback/ibis | fdcca59b085416b1311eb268be3886abad1db230 | [
"Apache-2.0"
] | null | null | null | ibis/backends/clickhouse/tests/test_functions.py | jreback/ibis | fdcca59b085416b1311eb268be3886abad1db230 | [
"Apache-2.0"
] | 2 | 2020-11-27T22:21:50.000Z | 2021-04-03T09:36:25.000Z | import math
import operator
from datetime import date, datetime
from operator import methodcaller
import pandas as pd
import pandas.testing as tm
import pytest
from pytest import param
import ibis
import ibis.expr.datatypes as dt
import ibis.expr.types as ir
from ibis import literal as L
clickhouse_driver = pytest.importorskip('clickhouse_driver')
pytestmark = pytest.mark.clickhouse
@pytest.mark.parametrize(
('to_type', 'expected'),
[
('int8', 'CAST(`double_col` AS Int8)'),
('int16', 'CAST(`double_col` AS Int16)'),
('float', 'CAST(`double_col` AS Float32)'),
# alltypes.double_col is non-nullable
(dt.Double(nullable=False), '`double_col`'),
],
)
def test_cast_double_col(alltypes, translate, to_type, expected):
expr = alltypes.double_col.cast(to_type)
assert translate(expr) == expected
@pytest.mark.parametrize(
('to_type', 'expected'),
[
('int8', 'CAST(`string_col` AS Int8)'),
('int16', 'CAST(`string_col` AS Int16)'),
(dt.String(nullable=False), '`string_col`'),
('timestamp', 'CAST(`string_col` AS DateTime)'),
('date', 'CAST(`string_col` AS Date)'),
],
)
def test_cast_string_col(alltypes, translate, to_type, expected):
expr = alltypes.string_col.cast(to_type)
assert translate(expr) == expected
@pytest.mark.xfail(
raises=AssertionError, reason='Clickhouse doesn\'t have decimal type'
)
def test_decimal_cast():
assert False
@pytest.mark.parametrize(
'column',
[
'index',
'Unnamed: 0',
'id',
'bool_col',
'tinyint_col',
'smallint_col',
'int_col',
'bigint_col',
'float_col',
'double_col',
'date_string_col',
'string_col',
'timestamp_col',
'year',
'month',
],
)
def test_noop_cast(alltypes, translate, column):
col = alltypes[column]
result = col.cast(col.type())
assert result.equals(col)
assert translate(result) == '`{}`'.format(column)
def test_timestamp_cast_noop(alltypes, translate):
target = dt.Timestamp(nullable=False)
result1 = alltypes.timestamp_col.cast(target)
result2 = alltypes.int_col.cast(target)
assert isinstance(result1, ir.TimestampColumn)
assert isinstance(result2, ir.TimestampColumn)
assert translate(result1) == '`timestamp_col`'
assert translate(result2) == 'CAST(`int_col` AS DateTime)'
def test_timestamp_now(con, translate):
expr = ibis.now()
# now = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
assert translate(expr) == 'now()'
# assert con.execute(expr) == now
@pytest.mark.parametrize(
('unit', 'expected'),
[
('y', '2009-01-01'),
param('m', '2009-05-01', marks=pytest.mark.xfail),
('d', '2009-05-17'),
('w', '2009-05-11'),
('h', '2009-05-17 12:00:00'),
('minute', '2009-05-17 12:34:00'),
],
)
def test_timestamp_truncate(con, translate, unit, expected):
stamp = ibis.timestamp('2009-05-17 12:34:56')
expr = stamp.truncate(unit)
assert con.execute(expr) == pd.Timestamp(expected)
@pytest.mark.parametrize(
('func', 'expected'),
[
(methodcaller('year'), 2015),
(methodcaller('month'), 9),
(methodcaller('day'), 1),
(methodcaller('hour'), 14),
(methodcaller('minute'), 48),
(methodcaller('second'), 5),
],
)
def test_simple_datetime_operations(con, func, expected):
value = ibis.timestamp('2015-09-01 14:48:05.359')
with pytest.raises(ValueError):
con.execute(func(value))
value = ibis.timestamp('2015-09-01 14:48:05')
con.execute(func(value)) == expected
@pytest.mark.parametrize(('value', 'expected'), [(0, None), (5.5, 5.5)])
def test_nullifzero(con, value, expected):
result = con.execute(L(value).nullifzero())
if expected is None:
assert pd.isnull(result)
else:
assert result == expected
@pytest.mark.parametrize(
('expr', 'expected'),
[
(L(None).isnull(), True),
(L(1).isnull(), False),
(L(None).notnull(), False),
(L(1).notnull(), True),
],
)
def test_isnull_notnull(con, expr, expected):
assert con.execute(expr) == expected
@pytest.mark.parametrize(
('expr', 'expected'),
[
(ibis.coalesce(5, None, 4), 5),
(ibis.coalesce(ibis.NA, 4, ibis.NA), 4),
(ibis.coalesce(ibis.NA, ibis.NA, 3.14), 3.14),
],
)
def test_coalesce(con, expr, expected):
assert con.execute(expr) == expected
@pytest.mark.parametrize(
('expr', 'expected'),
[
(ibis.NA.fillna(5), 5),
(L(5).fillna(10), 5),
(L(5).nullif(5), None),
(L(10).nullif(5), 10),
],
)
def test_fillna_nullif(con, expr, expected):
result = con.execute(expr)
if expected is None:
assert pd.isnull(result)
else:
assert result == expected
@pytest.mark.parametrize(
('value', 'expected'),
[
(L('foo_bar'), 'String'),
(L(5), 'UInt8'),
(L(1.2345), 'Float64'),
(L(datetime(2015, 9, 1, hour=14, minute=48, second=5)), 'DateTime'),
(L(date(2015, 9, 1)), 'Date'),
param(
ibis.NA,
'Null',
marks=pytest.mark.xfail(
raises=AssertionError,
reason=(
'Client/server version mismatch not handled in the '
'clickhouse driver'
),
),
),
],
)
def test_typeof(con, value, expected):
assert con.execute(value.typeof()) == expected
@pytest.mark.parametrize(('value', 'expected'), [('foo_bar', 7), ('', 0)])
def test_string_length(con, value, expected):
assert con.execute(L(value).length()) == expected
@pytest.mark.parametrize(
('op', 'expected'),
[
(methodcaller('substr', 0, 3), 'foo'),
(methodcaller('substr', 4, 3), 'bar'),
(methodcaller('substr', 1), 'oo_bar'),
],
)
def test_string_substring(con, op, expected):
value = L('foo_bar')
assert con.execute(op(value)) == expected
def test_string_column_substring(con, alltypes, translate):
expr = alltypes.string_col.substr(2)
assert translate(expr) == 'substring(`string_col`, 2 + 1)'
assert len(con.execute(expr))
expr = alltypes.string_col.substr(0, 3)
assert translate(expr) == 'substring(`string_col`, 0 + 1, 3)'
assert len(con.execute(expr))
def test_string_reverse(con):
assert con.execute(L('foo').reverse()) == 'oof'
def test_string_upper(con):
assert con.execute(L('foo').upper()) == 'FOO'
def test_string_lower(con):
assert con.execute(L('FOO').lower()) == 'foo'
def test_string_lenght(con):
assert con.execute(L('FOO').length()) == 3
@pytest.mark.parametrize(
('value', 'op', 'expected'),
[
(L('foobar'), methodcaller('contains', 'bar'), True),
(L('foobar'), methodcaller('contains', 'foo'), True),
(L('foobar'), methodcaller('contains', 'baz'), False),
(L('100%'), methodcaller('contains', '%'), True),
(L('a_b_c'), methodcaller('contains', '_'), True),
],
)
def test_string_contains(con, op, value, expected):
assert con.execute(op(value)) == expected
# TODO: clickhouse-driver escaping bug
def test_re_replace(con, translate):
expr1 = L('Hello, World!').re_replace('.', '\\\\0\\\\0')
expr2 = L('Hello, World!').re_replace('^', 'here: ')
assert con.execute(expr1) == 'HHeelllloo,, WWoorrlldd!!'
assert con.execute(expr2) == 'here: Hello, World!'
@pytest.mark.parametrize(
('value', 'expected'),
[(L('a'), 0), (L('b'), 1), (L('d'), -1)], # TODO: what's the expected?
)
def test_find_in_set(con, value, expected, translate):
vals = list('abc')
expr = value.find_in_set(vals)
assert con.execute(expr) == expected
def test_string_column_find_in_set(con, alltypes, translate):
s = alltypes.string_col
vals = list('abc')
expr = s.find_in_set(vals)
assert translate(expr) == "indexOf(['a','b','c'], `string_col`) - 1"
assert len(con.execute(expr))
@pytest.mark.parametrize(
('url', 'extract', 'expected'),
[
(L('https://www.cloudera.com'), 'HOST', 'www.cloudera.com'),
(L('https://www.cloudera.com'), 'PROTOCOL', 'https'),
(
L('https://www.youtube.com/watch?v=kEuEcWfewf8&t=10'),
'PATH',
'/watch',
),
(
L('https://www.youtube.com/watch?v=kEuEcWfewf8&t=10'),
'QUERY',
'v=kEuEcWfewf8&t=10',
),
],
)
def test_parse_url(con, translate, url, extract, expected):
expr = url.parse_url(extract)
assert con.execute(expr) == expected
def test_parse_url_query_parameter(con, translate):
url = L('https://www.youtube.com/watch?v=kEuEcWfewf8&t=10')
expr = url.parse_url('QUERY', 't')
assert con.execute(expr) == '10'
expr = url.parse_url('QUERY', 'v')
assert con.execute(expr) == 'kEuEcWfewf8'
@pytest.mark.parametrize(
('expr', 'expected'),
[
(L('foobar').find('bar'), 3),
(L('foobar').find('baz'), -1),
(L('foobar').like('%bar'), True),
(L('foobar').like('foo%'), True),
(L('foobar').like('%baz%'), False),
(L('foobar').like(['%bar']), True),
(L('foobar').like(['foo%']), True),
(L('foobar').like(['%baz%']), False),
(L('foobar').like(['%bar', 'foo%']), True),
(L('foobarfoo').replace('foo', 'H'), 'HbarH'),
],
)
def test_string_find_like(con, expr, expected):
assert con.execute(expr) == expected
def test_string_column_like(con, alltypes, translate):
expr = alltypes.string_col.like('foo%')
assert translate(expr) == "`string_col` LIKE 'foo%'"
assert len(con.execute(expr))
expr = alltypes.string_col.like(['foo%', '%bar'])
expected = "`string_col` LIKE 'foo%' OR `string_col` LIKE '%bar'"
assert translate(expr) == expected
assert len(con.execute(expr))
def test_string_column_find(con, alltypes, translate):
s = alltypes.string_col
expr = s.find('a')
assert translate(expr) == "position(`string_col`, 'a') - 1"
assert len(con.execute(expr))
expr = s.find(s)
assert translate(expr) == "position(`string_col`, `string_col`) - 1"
assert len(con.execute(expr))
@pytest.mark.parametrize(
('call', 'expected'),
[
(methodcaller('log'), 'log(`double_col`)'),
(methodcaller('log2'), 'log2(`double_col`)'),
(methodcaller('log10'), 'log10(`double_col`)'),
(methodcaller('round'), 'round(`double_col`)'),
(methodcaller('round', 0), 'round(`double_col`, 0)'),
(methodcaller('round', 2), 'round(`double_col`, 2)'),
(methodcaller('exp'), 'exp(`double_col`)'),
(methodcaller('abs'), 'abs(`double_col`)'),
(methodcaller('ceil'), 'ceil(`double_col`)'),
(methodcaller('floor'), 'floor(`double_col`)'),
(methodcaller('sqrt'), 'sqrt(`double_col`)'),
(
methodcaller('sign'),
'intDivOrZero(`double_col`, abs(`double_col`))',
),
],
)
def test_translate_math_functions(con, alltypes, translate, call, expected):
expr = call(alltypes.double_col)
assert translate(expr) == expected
assert len(con.execute(expr))
@pytest.mark.parametrize(
('expr', 'expected'),
[
(L(-5).abs(), 5),
(L(5).abs(), 5),
(L(5.5).round(), 6.0),
(L(5.556).round(2), 5.56),
(L(5.556).ceil(), 6.0),
(L(5.556).floor(), 5.0),
(L(5.556).exp(), math.exp(5.556)),
(L(5.556).sign(), 1),
(L(-5.556).sign(), -1),
(L(0).sign(), 0),
(L(5.556).sqrt(), math.sqrt(5.556)),
(L(5.556).log(2), math.log(5.556, 2)),
(L(5.556).ln(), math.log(5.556)),
(L(5.556).log2(), math.log(5.556, 2)),
(L(5.556).log10(), math.log10(5.556)),
],
)
def test_math_functions(con, expr, expected, translate):
assert con.execute(expr) == expected
def test_greatest(con, alltypes, translate):
expr = ibis.greatest(alltypes.int_col, 10)
assert translate(expr) == "greatest(`int_col`, 10)"
assert len(con.execute(expr))
expr = ibis.greatest(alltypes.int_col, alltypes.bigint_col)
assert translate(expr) == "greatest(`int_col`, `bigint_col`)"
assert len(con.execute(expr))
def test_least(con, alltypes, translate):
expr = ibis.least(alltypes.int_col, 10)
assert translate(expr) == "least(`int_col`, 10)"
assert len(con.execute(expr))
expr = ibis.least(alltypes.int_col, alltypes.bigint_col)
assert translate(expr) == "least(`int_col`, `bigint_col`)"
assert len(con.execute(expr))
# TODO: clickhouse-driver escaping bug
@pytest.mark.parametrize(
('expr', 'expected'),
[
(L('abcd').re_search('[a-z]'), True),
(L('abcd').re_search(r'[\\d]+'), False),
(L('1222').re_search(r'[\\d]+'), True),
],
)
def test_regexp(con, expr, expected):
assert con.execute(expr) == expected
@pytest.mark.parametrize(
('expr', 'expected'),
[
(L('abcd').re_extract('([a-z]+)', 0), 'abcd'),
# (L('abcd').re_extract('(ab)(cd)', 1), 'cd'),
# valid group number but no match => empty string
(L('abcd').re_extract(r'(\\d)', 0), ''),
# match but not a valid group number => NULL
# (L('abcd').re_extract('abcd', 3), None),
],
)
def test_regexp_extract(con, expr, expected, translate):
assert con.execute(expr) == expected
def test_column_regexp_extract(con, alltypes, translate):
expected = r"extractAll(`string_col`, '[\d]+')[3 + 1]"
expr = alltypes.string_col.re_extract(r'[\d]+', 3)
assert translate(expr) == expected
assert len(con.execute(expr))
def test_column_regexp_replace(con, alltypes, translate):
expected = r"replaceRegexpAll(`string_col`, '[\d]+', 'aaa')"
expr = alltypes.string_col.re_replace(r'[\d]+', 'aaa')
assert translate(expr) == expected
assert len(con.execute(expr))
def test_numeric_builtins_work(con, alltypes, df, translate):
expr = alltypes.double_col
result = expr.execute()
expected = df.double_col.fillna(0)
tm.assert_series_equal(result, expected)
def test_null_column(alltypes, translate):
t = alltypes
nrows = t.count().execute()
expr = t.mutate(na_column=ibis.NA).na_column
result = expr.execute()
expected = pd.Series([None] * nrows, name='na_column')
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
('attr', 'expected'),
[
(operator.methodcaller('year'), {2009, 2010}),
(operator.methodcaller('month'), set(range(1, 13))),
(operator.methodcaller('day'), set(range(1, 32))),
],
)
def test_date_extract_field(db, alltypes, attr, expected):
t = alltypes
expr = attr(t.timestamp_col.cast('date')).distinct()
result = expr.execute().astype(int)
assert set(result) == expected
def test_timestamp_from_integer(con, alltypes, translate):
# timestamp_col has datetime type
expr = alltypes.int_col.to_timestamp()
assert translate(expr) == 'toDateTime(`int_col`)'
assert len(con.execute(expr))
def test_count_distinct_with_filter(alltypes):
expr = alltypes.string_col.nunique(
where=alltypes.string_col.cast('int64') > 1
)
result = expr.execute()
expected = alltypes.string_col.execute()
expected = expected[expected.astype('int64') > 1].nunique()
assert result == expected
| 28.938433 | 76 | 0.597318 | import math
import operator
from datetime import date, datetime
from operator import methodcaller
import pandas as pd
import pandas.testing as tm
import pytest
from pytest import param
import ibis
import ibis.expr.datatypes as dt
import ibis.expr.types as ir
from ibis import literal as L
clickhouse_driver = pytest.importorskip('clickhouse_driver')
pytestmark = pytest.mark.clickhouse
@pytest.mark.parametrize(
('to_type', 'expected'),
[
('int8', 'CAST(`double_col` AS Int8)'),
('int16', 'CAST(`double_col` AS Int16)'),
('float', 'CAST(`double_col` AS Float32)'),
(dt.Double(nullable=False), '`double_col`'),
],
)
def test_cast_double_col(alltypes, translate, to_type, expected):
expr = alltypes.double_col.cast(to_type)
assert translate(expr) == expected
@pytest.mark.parametrize(
('to_type', 'expected'),
[
('int8', 'CAST(`string_col` AS Int8)'),
('int16', 'CAST(`string_col` AS Int16)'),
(dt.String(nullable=False), '`string_col`'),
('timestamp', 'CAST(`string_col` AS DateTime)'),
('date', 'CAST(`string_col` AS Date)'),
],
)
def test_cast_string_col(alltypes, translate, to_type, expected):
expr = alltypes.string_col.cast(to_type)
assert translate(expr) == expected
@pytest.mark.xfail(
raises=AssertionError, reason='Clickhouse doesn\'t have decimal type'
)
def test_decimal_cast():
assert False
@pytest.mark.parametrize(
'column',
[
'index',
'Unnamed: 0',
'id',
'bool_col',
'tinyint_col',
'smallint_col',
'int_col',
'bigint_col',
'float_col',
'double_col',
'date_string_col',
'string_col',
'timestamp_col',
'year',
'month',
],
)
def test_noop_cast(alltypes, translate, column):
col = alltypes[column]
result = col.cast(col.type())
assert result.equals(col)
assert translate(result) == '`{}`'.format(column)
def test_timestamp_cast_noop(alltypes, translate):
target = dt.Timestamp(nullable=False)
result1 = alltypes.timestamp_col.cast(target)
result2 = alltypes.int_col.cast(target)
assert isinstance(result1, ir.TimestampColumn)
assert isinstance(result2, ir.TimestampColumn)
assert translate(result1) == '`timestamp_col`'
assert translate(result2) == 'CAST(`int_col` AS DateTime)'
def test_timestamp_now(con, translate):
expr = ibis.now()
# now = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
assert translate(expr) == 'now()'
# assert con.execute(expr) == now
@pytest.mark.parametrize(
('unit', 'expected'),
[
('y', '2009-01-01'),
param('m', '2009-05-01', marks=pytest.mark.xfail),
('d', '2009-05-17'),
('w', '2009-05-11'),
('h', '2009-05-17 12:00:00'),
('minute', '2009-05-17 12:34:00'),
],
)
def test_timestamp_truncate(con, translate, unit, expected):
stamp = ibis.timestamp('2009-05-17 12:34:56')
expr = stamp.truncate(unit)
assert con.execute(expr) == pd.Timestamp(expected)
@pytest.mark.parametrize(
('func', 'expected'),
[
(methodcaller('year'), 2015),
(methodcaller('month'), 9),
(methodcaller('day'), 1),
(methodcaller('hour'), 14),
(methodcaller('minute'), 48),
(methodcaller('second'), 5),
],
)
def test_simple_datetime_operations(con, func, expected):
value = ibis.timestamp('2015-09-01 14:48:05.359')
with pytest.raises(ValueError):
con.execute(func(value))
value = ibis.timestamp('2015-09-01 14:48:05')
con.execute(func(value)) == expected
@pytest.mark.parametrize(('value', 'expected'), [(0, None), (5.5, 5.5)])
def test_nullifzero(con, value, expected):
result = con.execute(L(value).nullifzero())
if expected is None:
assert pd.isnull(result)
else:
assert result == expected
@pytest.mark.parametrize(
('expr', 'expected'),
[
(L(None).isnull(), True),
(L(1).isnull(), False),
(L(None).notnull(), False),
(L(1).notnull(), True),
],
)
def test_isnull_notnull(con, expr, expected):
assert con.execute(expr) == expected
@pytest.mark.parametrize(
('expr', 'expected'),
[
(ibis.coalesce(5, None, 4), 5),
(ibis.coalesce(ibis.NA, 4, ibis.NA), 4),
(ibis.coalesce(ibis.NA, ibis.NA, 3.14), 3.14),
],
)
def test_coalesce(con, expr, expected):
assert con.execute(expr) == expected
@pytest.mark.parametrize(
('expr', 'expected'),
[
(ibis.NA.fillna(5), 5),
(L(5).fillna(10), 5),
(L(5).nullif(5), None),
(L(10).nullif(5), 10),
],
)
def test_fillna_nullif(con, expr, expected):
result = con.execute(expr)
if expected is None:
assert pd.isnull(result)
else:
assert result == expected
@pytest.mark.parametrize(
('value', 'expected'),
[
(L('foo_bar'), 'String'),
(L(5), 'UInt8'),
(L(1.2345), 'Float64'),
(L(datetime(2015, 9, 1, hour=14, minute=48, second=5)), 'DateTime'),
(L(date(2015, 9, 1)), 'Date'),
param(
ibis.NA,
'Null',
marks=pytest.mark.xfail(
raises=AssertionError,
reason=(
'Client/server version mismatch not handled in the '
'clickhouse driver'
),
),
),
],
)
def test_typeof(con, value, expected):
assert con.execute(value.typeof()) == expected
@pytest.mark.parametrize(('value', 'expected'), [('foo_bar', 7), ('', 0)])
def test_string_length(con, value, expected):
assert con.execute(L(value).length()) == expected
@pytest.mark.parametrize(
('op', 'expected'),
[
(methodcaller('substr', 0, 3), 'foo'),
(methodcaller('substr', 4, 3), 'bar'),
(methodcaller('substr', 1), 'oo_bar'),
],
)
def test_string_substring(con, op, expected):
value = L('foo_bar')
assert con.execute(op(value)) == expected
def test_string_column_substring(con, alltypes, translate):
expr = alltypes.string_col.substr(2)
assert translate(expr) == 'substring(`string_col`, 2 + 1)'
assert len(con.execute(expr))
expr = alltypes.string_col.substr(0, 3)
assert translate(expr) == 'substring(`string_col`, 0 + 1, 3)'
assert len(con.execute(expr))
def test_string_reverse(con):
assert con.execute(L('foo').reverse()) == 'oof'
def test_string_upper(con):
assert con.execute(L('foo').upper()) == 'FOO'
def test_string_lower(con):
assert con.execute(L('FOO').lower()) == 'foo'
def test_string_lenght(con):
assert con.execute(L('FOO').length()) == 3
@pytest.mark.parametrize(
('value', 'op', 'expected'),
[
(L('foobar'), methodcaller('contains', 'bar'), True),
(L('foobar'), methodcaller('contains', 'foo'), True),
(L('foobar'), methodcaller('contains', 'baz'), False),
(L('100%'), methodcaller('contains', '%'), True),
(L('a_b_c'), methodcaller('contains', '_'), True),
],
)
def test_string_contains(con, op, value, expected):
assert con.execute(op(value)) == expected
# TODO: clickhouse-driver escaping bug
def test_re_replace(con, translate):
expr1 = L('Hello, World!').re_replace('.', '\\\\0\\\\0')
expr2 = L('Hello, World!').re_replace('^', 'here: ')
assert con.execute(expr1) == 'HHeelllloo,, WWoorrlldd!!'
assert con.execute(expr2) == 'here: Hello, World!'
@pytest.mark.parametrize(
('value', 'expected'),
[(L('a'), 0), (L('b'), 1), (L('d'), -1)], # TODO: what's the expected?
)
def test_find_in_set(con, value, expected, translate):
vals = list('abc')
expr = value.find_in_set(vals)
assert con.execute(expr) == expected
def test_string_column_find_in_set(con, alltypes, translate):
s = alltypes.string_col
vals = list('abc')
expr = s.find_in_set(vals)
assert translate(expr) == "indexOf(['a','b','c'], `string_col`) - 1"
assert len(con.execute(expr))
@pytest.mark.parametrize(
('url', 'extract', 'expected'),
[
(L('https://www.cloudera.com'), 'HOST', 'www.cloudera.com'),
(L('https://www.cloudera.com'), 'PROTOCOL', 'https'),
(
L('https://www.youtube.com/watch?v=kEuEcWfewf8&t=10'),
'PATH',
'/watch',
),
(
L('https://www.youtube.com/watch?v=kEuEcWfewf8&t=10'),
'QUERY',
'v=kEuEcWfewf8&t=10',
),
],
)
def test_parse_url(con, translate, url, extract, expected):
expr = url.parse_url(extract)
assert con.execute(expr) == expected
def test_parse_url_query_parameter(con, translate):
url = L('https://www.youtube.com/watch?v=kEuEcWfewf8&t=10')
expr = url.parse_url('QUERY', 't')
assert con.execute(expr) == '10'
expr = url.parse_url('QUERY', 'v')
assert con.execute(expr) == 'kEuEcWfewf8'
@pytest.mark.parametrize(
('expr', 'expected'),
[
(L('foobar').find('bar'), 3),
(L('foobar').find('baz'), -1),
(L('foobar').like('%bar'), True),
(L('foobar').like('foo%'), True),
(L('foobar').like('%baz%'), False),
(L('foobar').like(['%bar']), True),
(L('foobar').like(['foo%']), True),
(L('foobar').like(['%baz%']), False),
(L('foobar').like(['%bar', 'foo%']), True),
(L('foobarfoo').replace('foo', 'H'), 'HbarH'),
],
)
def test_string_find_like(con, expr, expected):
assert con.execute(expr) == expected
def test_string_column_like(con, alltypes, translate):
expr = alltypes.string_col.like('foo%')
assert translate(expr) == "`string_col` LIKE 'foo%'"
assert len(con.execute(expr))
expr = alltypes.string_col.like(['foo%', '%bar'])
expected = "`string_col` LIKE 'foo%' OR `string_col` LIKE '%bar'"
assert translate(expr) == expected
assert len(con.execute(expr))
def test_string_column_find(con, alltypes, translate):
s = alltypes.string_col
expr = s.find('a')
assert translate(expr) == "position(`string_col`, 'a') - 1"
assert len(con.execute(expr))
expr = s.find(s)
assert translate(expr) == "position(`string_col`, `string_col`) - 1"
assert len(con.execute(expr))
@pytest.mark.parametrize(
('call', 'expected'),
[
(methodcaller('log'), 'log(`double_col`)'),
(methodcaller('log2'), 'log2(`double_col`)'),
(methodcaller('log10'), 'log10(`double_col`)'),
(methodcaller('round'), 'round(`double_col`)'),
(methodcaller('round', 0), 'round(`double_col`, 0)'),
(methodcaller('round', 2), 'round(`double_col`, 2)'),
(methodcaller('exp'), 'exp(`double_col`)'),
(methodcaller('abs'), 'abs(`double_col`)'),
(methodcaller('ceil'), 'ceil(`double_col`)'),
(methodcaller('floor'), 'floor(`double_col`)'),
(methodcaller('sqrt'), 'sqrt(`double_col`)'),
(
methodcaller('sign'),
'intDivOrZero(`double_col`, abs(`double_col`))',
),
],
)
def test_translate_math_functions(con, alltypes, translate, call, expected):
expr = call(alltypes.double_col)
assert translate(expr) == expected
assert len(con.execute(expr))
@pytest.mark.parametrize(
('expr', 'expected'),
[
(L(-5).abs(), 5),
(L(5).abs(), 5),
(L(5.5).round(), 6.0),
(L(5.556).round(2), 5.56),
(L(5.556).ceil(), 6.0),
(L(5.556).floor(), 5.0),
(L(5.556).exp(), math.exp(5.556)),
(L(5.556).sign(), 1),
(L(-5.556).sign(), -1),
(L(0).sign(), 0),
(L(5.556).sqrt(), math.sqrt(5.556)),
(L(5.556).log(2), math.log(5.556, 2)),
(L(5.556).ln(), math.log(5.556)),
(L(5.556).log2(), math.log(5.556, 2)),
(L(5.556).log10(), math.log10(5.556)),
],
)
def test_math_functions(con, expr, expected, translate):
assert con.execute(expr) == expected
def test_greatest(con, alltypes, translate):
expr = ibis.greatest(alltypes.int_col, 10)
assert translate(expr) == "greatest(`int_col`, 10)"
assert len(con.execute(expr))
expr = ibis.greatest(alltypes.int_col, alltypes.bigint_col)
assert translate(expr) == "greatest(`int_col`, `bigint_col`)"
assert len(con.execute(expr))
def test_least(con, alltypes, translate):
expr = ibis.least(alltypes.int_col, 10)
assert translate(expr) == "least(`int_col`, 10)"
assert len(con.execute(expr))
expr = ibis.least(alltypes.int_col, alltypes.bigint_col)
assert translate(expr) == "least(`int_col`, `bigint_col`)"
assert len(con.execute(expr))
@pytest.mark.parametrize(
('expr', 'expected'),
[
(L('abcd').re_search('[a-z]'), True),
(L('abcd').re_search(r'[\\d]+'), False),
(L('1222').re_search(r'[\\d]+'), True),
],
)
def test_regexp(con, expr, expected):
assert con.execute(expr) == expected
@pytest.mark.parametrize(
('expr', 'expected'),
[
(L('abcd').re_extract('([a-z]+)', 0), 'abcd'),
(L('abcd').re_extract(r'(\\d)', 0), ''),
],
)
def test_regexp_extract(con, expr, expected, translate):
assert con.execute(expr) == expected
def test_column_regexp_extract(con, alltypes, translate):
expected = r"extractAll(`string_col`, '[\d]+')[3 + 1]"
expr = alltypes.string_col.re_extract(r'[\d]+', 3)
assert translate(expr) == expected
assert len(con.execute(expr))
def test_column_regexp_replace(con, alltypes, translate):
expected = r"replaceRegexpAll(`string_col`, '[\d]+', 'aaa')"
expr = alltypes.string_col.re_replace(r'[\d]+', 'aaa')
assert translate(expr) == expected
assert len(con.execute(expr))
def test_numeric_builtins_work(con, alltypes, df, translate):
expr = alltypes.double_col
result = expr.execute()
expected = df.double_col.fillna(0)
tm.assert_series_equal(result, expected)
def test_null_column(alltypes, translate):
t = alltypes
nrows = t.count().execute()
expr = t.mutate(na_column=ibis.NA).na_column
result = expr.execute()
expected = pd.Series([None] * nrows, name='na_column')
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
('attr', 'expected'),
[
(operator.methodcaller('year'), {2009, 2010}),
(operator.methodcaller('month'), set(range(1, 13))),
(operator.methodcaller('day'), set(range(1, 32))),
],
)
def test_date_extract_field(db, alltypes, attr, expected):
t = alltypes
expr = attr(t.timestamp_col.cast('date')).distinct()
result = expr.execute().astype(int)
assert set(result) == expected
def test_timestamp_from_integer(con, alltypes, translate):
expr = alltypes.int_col.to_timestamp()
assert translate(expr) == 'toDateTime(`int_col`)'
assert len(con.execute(expr))
def test_count_distinct_with_filter(alltypes):
expr = alltypes.string_col.nunique(
where=alltypes.string_col.cast('int64') > 1
)
result = expr.execute()
expected = alltypes.string_col.execute()
expected = expected[expected.astype('int64') > 1].nunique()
assert result == expected
| true | true |
f7115e3d4ccc57f2d750c77603f31e4073f99d90 | 13,084 | py | Python | psutil/tests/test_unicode.py | ulisesh/psutil | f7e898b0987f97352c7551bdd9b29b594e1236f6 | [
"BSD-3-Clause"
] | 2 | 2019-12-04T16:24:44.000Z | 2020-04-06T21:49:34.000Z | psutil/tests/test_unicode.py | vsajip/psutil | 2597253a31bc9f49772242cd249f30331d58fd7c | [
"BSD-3-Clause"
] | 7 | 2020-02-12T03:06:52.000Z | 2021-06-10T19:33:14.000Z | psutil/tests/test_unicode.py | vsajip/psutil | 2597253a31bc9f49772242cd249f30331d58fd7c | [
"BSD-3-Clause"
] | 2 | 2018-05-27T00:13:34.000Z | 2018-05-27T00:18:32.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2009, Giampaolo Rodola'. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Notes about unicode handling in psutil
======================================
In psutil these are the APIs returning or dealing with a string
('not tested' means they are not tested to deal with non-ASCII strings):
* Process.cmdline()
* Process.connections('unix')
* Process.cwd()
* Process.environ()
* Process.exe()
* Process.memory_maps()
* Process.name()
* Process.open_files()
* Process.username() (not tested)
* disk_io_counters() (not tested)
* disk_partitions() (not tested)
* disk_usage(str)
* net_connections('unix')
* net_if_addrs() (not tested)
* net_if_stats() (not tested)
* net_io_counters() (not tested)
* sensors_fans() (not tested)
* sensors_temperatures() (not tested)
* users() (not tested)
* WindowsService.binpath() (not tested)
* WindowsService.description() (not tested)
* WindowsService.display_name() (not tested)
* WindowsService.name() (not tested)
* WindowsService.status() (not tested)
* WindowsService.username() (not tested)
In here we create a unicode path with a funky non-ASCII name and (where
possible) make psutil return it back (e.g. on name(), exe(), open_files(),
etc.) and make sure that:
* psutil never crashes with UnicodeDecodeError
* the returned path matches
For a detailed explanation of how psutil handles unicode see:
- https://github.com/giampaolo/psutil/issues/1040
- http://psutil.readthedocs.io/#unicode
"""
import os
import traceback
import warnings
from contextlib import closing
from psutil import BSD
from psutil import MACOS
from psutil import OPENBSD
from psutil import POSIX
from psutil import WINDOWS
from psutil._compat import PY3
from psutil._compat import u
from psutil.tests import APPVEYOR
from psutil.tests import ASCII_FS
from psutil.tests import bind_unix_socket
from psutil.tests import chdir
from psutil.tests import copyload_shared_lib
from psutil.tests import create_exe
from psutil.tests import get_test_subprocess
from psutil.tests import HAS_CONNECTIONS_UNIX
from psutil.tests import HAS_ENVIRON
from psutil.tests import HAS_MEMORY_MAPS
from psutil.tests import mock
from psutil.tests import PYPY
from psutil.tests import reap_children
from psutil.tests import safe_mkdir
from psutil.tests import safe_rmpath as _safe_rmpath
from psutil.tests import skip_on_access_denied
from psutil.tests import TESTFILE_PREFIX
from psutil.tests import TESTFN
from psutil.tests import TESTFN_UNICODE
from psutil.tests import TRAVIS
from psutil.tests import unittest
from psutil.tests import unix_socket_path
import psutil
def safe_rmpath(path):
if APPVEYOR:
# TODO - this is quite random and I'm not sure why it happens,
# nor I can reproduce it locally:
# https://ci.appveyor.com/project/giampaolo/psutil/build/job/
# jiq2cgd6stsbtn60
# safe_rmpath() happens after reap_children() so this is weird
# Perhaps wait_procs() on Windows is broken? Maybe because
# of STILL_ACTIVE?
# https://github.com/giampaolo/psutil/blob/
# 68c7a70728a31d8b8b58f4be6c4c0baa2f449eda/psutil/arch/
# windows/process_info.c#L146
try:
return _safe_rmpath(path)
except WindowsError:
traceback.print_exc()
else:
return _safe_rmpath(path)
def subprocess_supports_unicode(name):
"""Return True if both the fs and the subprocess module can
deal with a unicode file name.
"""
if PY3:
return True
try:
safe_rmpath(name)
create_exe(name)
get_test_subprocess(cmd=[name])
except UnicodeEncodeError:
return False
else:
return True
finally:
reap_children()
# An invalid unicode string.
if PY3:
INVALID_NAME = (TESTFN.encode('utf8') + b"f\xc0\x80").decode(
'utf8', 'surrogateescape')
else:
INVALID_NAME = TESTFN + "f\xc0\x80"
# ===================================================================
# FS APIs
# ===================================================================
class _BaseFSAPIsTests(object):
funky_name = None
@classmethod
def setUpClass(cls):
safe_rmpath(cls.funky_name)
create_exe(cls.funky_name)
@classmethod
def tearDownClass(cls):
reap_children()
safe_rmpath(cls.funky_name)
def tearDown(self):
reap_children()
def expect_exact_path_match(self):
raise NotImplementedError("must be implemented in subclass")
def test_proc_exe(self):
subp = get_test_subprocess(cmd=[self.funky_name])
p = psutil.Process(subp.pid)
exe = p.exe()
self.assertIsInstance(exe, str)
if self.expect_exact_path_match():
self.assertEqual(exe, self.funky_name)
def test_proc_name(self):
subp = get_test_subprocess(cmd=[self.funky_name])
if WINDOWS:
# On Windows name() is determined from exe() first, because
# it's faster; we want to overcome the internal optimization
# and test name() instead of exe().
with mock.patch("psutil._psplatform.cext.proc_exe",
side_effect=psutil.AccessDenied(os.getpid())) as m:
name = psutil.Process(subp.pid).name()
assert m.called
else:
name = psutil.Process(subp.pid).name()
self.assertIsInstance(name, str)
if self.expect_exact_path_match():
self.assertEqual(name, os.path.basename(self.funky_name))
def test_proc_cmdline(self):
subp = get_test_subprocess(cmd=[self.funky_name])
p = psutil.Process(subp.pid)
cmdline = p.cmdline()
for part in cmdline:
self.assertIsInstance(part, str)
if self.expect_exact_path_match():
self.assertEqual(cmdline, [self.funky_name])
def test_proc_cwd(self):
dname = self.funky_name + "2"
self.addCleanup(safe_rmpath, dname)
safe_mkdir(dname)
with chdir(dname):
p = psutil.Process()
cwd = p.cwd()
self.assertIsInstance(p.cwd(), str)
if self.expect_exact_path_match():
self.assertEqual(cwd, dname)
def test_proc_open_files(self):
p = psutil.Process()
start = set(p.open_files())
with open(self.funky_name, 'rb'):
new = set(p.open_files())
path = (new - start).pop().path
self.assertIsInstance(path, str)
if BSD and not path:
# XXX - see https://github.com/giampaolo/psutil/issues/595
return self.skipTest("open_files on BSD is broken")
if self.expect_exact_path_match():
self.assertEqual(os.path.normcase(path),
os.path.normcase(self.funky_name))
@unittest.skipIf(not POSIX, "POSIX only")
def test_proc_connections(self):
suffix = os.path.basename(self.funky_name)
with unix_socket_path(suffix=suffix) as name:
try:
sock = bind_unix_socket(name)
except UnicodeEncodeError:
if PY3:
raise
else:
raise unittest.SkipTest("not supported")
with closing(sock):
conn = psutil.Process().connections('unix')[0]
self.assertIsInstance(conn.laddr, str)
# AF_UNIX addr not set on OpenBSD
if not OPENBSD:
self.assertEqual(conn.laddr, name)
@unittest.skipIf(not POSIX, "POSIX only")
@unittest.skipIf(not HAS_CONNECTIONS_UNIX, "can't list UNIX sockets")
@skip_on_access_denied()
def test_net_connections(self):
def find_sock(cons):
for conn in cons:
if os.path.basename(conn.laddr).startswith(TESTFILE_PREFIX):
return conn
raise ValueError("connection not found")
suffix = os.path.basename(self.funky_name)
with unix_socket_path(suffix=suffix) as name:
try:
sock = bind_unix_socket(name)
except UnicodeEncodeError:
if PY3:
raise
else:
raise unittest.SkipTest("not supported")
with closing(sock):
cons = psutil.net_connections(kind='unix')
# AF_UNIX addr not set on OpenBSD
if not OPENBSD:
conn = find_sock(cons)
self.assertIsInstance(conn.laddr, str)
self.assertEqual(conn.laddr, name)
def test_disk_usage(self):
dname = self.funky_name + "2"
self.addCleanup(safe_rmpath, dname)
safe_mkdir(dname)
psutil.disk_usage(dname)
@unittest.skipIf(not HAS_MEMORY_MAPS, "not supported")
@unittest.skipIf(not PY3, "ctypes does not support unicode on PY2")
def test_memory_maps(self):
# XXX: on Python 2, using ctypes.CDLL with a unicode path
# opens a message box which blocks the test run.
with copyload_shared_lib(dst_prefix=self.funky_name) as funky_path:
def normpath(p):
return os.path.realpath(os.path.normcase(p))
libpaths = [normpath(x.path)
for x in psutil.Process().memory_maps()]
# ...just to have a clearer msg in case of failure
libpaths = [x for x in libpaths if TESTFILE_PREFIX in x]
self.assertIn(normpath(funky_path), libpaths)
for path in libpaths:
self.assertIsInstance(path, str)
# https://travis-ci.org/giampaolo/psutil/jobs/440073249
@unittest.skipIf(PYPY and TRAVIS, "unreliable on PYPY + TRAVIS")
@unittest.skipIf(MACOS and TRAVIS, "unreliable on TRAVIS") # TODO
@unittest.skipIf(ASCII_FS, "ASCII fs")
@unittest.skipIf(not subprocess_supports_unicode(TESTFN_UNICODE),
"subprocess can't deal with unicode")
class TestFSAPIs(_BaseFSAPIsTests, unittest.TestCase):
"""Test FS APIs with a funky, valid, UTF8 path name."""
funky_name = TESTFN_UNICODE
@classmethod
def expect_exact_path_match(cls):
# Do not expect psutil to correctly handle unicode paths on
# Python 2 if os.listdir() is not able either.
if PY3:
return True
else:
here = '.' if isinstance(cls.funky_name, str) else u('.')
with warnings.catch_warnings():
warnings.simplefilter("ignore")
return cls.funky_name in os.listdir(here)
@unittest.skipIf(PYPY and TRAVIS, "unreliable on PYPY + TRAVIS")
@unittest.skipIf(MACOS and TRAVIS, "unreliable on TRAVIS") # TODO
@unittest.skipIf(not subprocess_supports_unicode(INVALID_NAME),
"subprocess can't deal with invalid unicode")
class TestFSAPIsWithInvalidPath(_BaseFSAPIsTests, unittest.TestCase):
"""Test FS APIs with a funky, invalid path name."""
funky_name = INVALID_NAME
@classmethod
def expect_exact_path_match(cls):
# Invalid unicode names are supposed to work on Python 2.
return True
@unittest.skipIf(not WINDOWS, "WINDOWS only")
class TestWinProcessName(unittest.TestCase):
def test_name_type(self):
# On Windows name() is determined from exe() first, because
# it's faster; we want to overcome the internal optimization
# and test name() instead of exe().
with mock.patch("psutil._psplatform.cext.proc_exe",
side_effect=psutil.AccessDenied(os.getpid())) as m:
self.assertIsInstance(psutil.Process().name(), str)
assert m.called
# ===================================================================
# Non fs APIs
# ===================================================================
class TestNonFSAPIS(unittest.TestCase):
"""Unicode tests for non fs-related APIs."""
def tearDown(self):
reap_children()
@unittest.skipIf(not HAS_ENVIRON, "not supported")
def test_proc_environ(self):
# Note: differently from others, this test does not deal
# with fs paths. On Python 2 subprocess module is broken as
# it's not able to handle with non-ASCII env vars, so
# we use "è", which is part of the extended ASCII table
# (unicode point <= 255).
env = os.environ.copy()
funky_str = TESTFN_UNICODE if PY3 else 'è'
env['FUNNY_ARG'] = funky_str
sproc = get_test_subprocess(env=env)
p = psutil.Process(sproc.pid)
env = p.environ()
for k, v in env.items():
self.assertIsInstance(k, str)
self.assertIsInstance(v, str)
self.assertEqual(env['FUNNY_ARG'], funky_str)
if __name__ == '__main__':
from psutil.tests.runner import run
run(__file__)
| 35.266846 | 79 | 0.626567 |
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import traceback
import warnings
from contextlib import closing
from psutil import BSD
from psutil import MACOS
from psutil import OPENBSD
from psutil import POSIX
from psutil import WINDOWS
from psutil._compat import PY3
from psutil._compat import u
from psutil.tests import APPVEYOR
from psutil.tests import ASCII_FS
from psutil.tests import bind_unix_socket
from psutil.tests import chdir
from psutil.tests import copyload_shared_lib
from psutil.tests import create_exe
from psutil.tests import get_test_subprocess
from psutil.tests import HAS_CONNECTIONS_UNIX
from psutil.tests import HAS_ENVIRON
from psutil.tests import HAS_MEMORY_MAPS
from psutil.tests import mock
from psutil.tests import PYPY
from psutil.tests import reap_children
from psutil.tests import safe_mkdir
from psutil.tests import safe_rmpath as _safe_rmpath
from psutil.tests import skip_on_access_denied
from psutil.tests import TESTFILE_PREFIX
from psutil.tests import TESTFN
from psutil.tests import TESTFN_UNICODE
from psutil.tests import TRAVIS
from psutil.tests import unittest
from psutil.tests import unix_socket_path
import psutil
def safe_rmpath(path):
if APPVEYOR:
# TODO - this is quite random and I'm not sure why it happens,
try:
return _safe_rmpath(path)
except WindowsError:
traceback.print_exc()
else:
return _safe_rmpath(path)
def subprocess_supports_unicode(name):
if PY3:
return True
try:
safe_rmpath(name)
create_exe(name)
get_test_subprocess(cmd=[name])
except UnicodeEncodeError:
return False
else:
return True
finally:
reap_children()
if PY3:
INVALID_NAME = (TESTFN.encode('utf8') + b"f\xc0\x80").decode(
'utf8', 'surrogateescape')
else:
INVALID_NAME = TESTFN + "f\xc0\x80"
class _BaseFSAPIsTests(object):
funky_name = None
@classmethod
def setUpClass(cls):
safe_rmpath(cls.funky_name)
create_exe(cls.funky_name)
@classmethod
def tearDownClass(cls):
reap_children()
safe_rmpath(cls.funky_name)
def tearDown(self):
reap_children()
def expect_exact_path_match(self):
raise NotImplementedError("must be implemented in subclass")
def test_proc_exe(self):
subp = get_test_subprocess(cmd=[self.funky_name])
p = psutil.Process(subp.pid)
exe = p.exe()
self.assertIsInstance(exe, str)
if self.expect_exact_path_match():
self.assertEqual(exe, self.funky_name)
def test_proc_name(self):
subp = get_test_subprocess(cmd=[self.funky_name])
if WINDOWS:
# and test name() instead of exe().
with mock.patch("psutil._psplatform.cext.proc_exe",
side_effect=psutil.AccessDenied(os.getpid())) as m:
name = psutil.Process(subp.pid).name()
assert m.called
else:
name = psutil.Process(subp.pid).name()
self.assertIsInstance(name, str)
if self.expect_exact_path_match():
self.assertEqual(name, os.path.basename(self.funky_name))
def test_proc_cmdline(self):
subp = get_test_subprocess(cmd=[self.funky_name])
p = psutil.Process(subp.pid)
cmdline = p.cmdline()
for part in cmdline:
self.assertIsInstance(part, str)
if self.expect_exact_path_match():
self.assertEqual(cmdline, [self.funky_name])
def test_proc_cwd(self):
dname = self.funky_name + "2"
self.addCleanup(safe_rmpath, dname)
safe_mkdir(dname)
with chdir(dname):
p = psutil.Process()
cwd = p.cwd()
self.assertIsInstance(p.cwd(), str)
if self.expect_exact_path_match():
self.assertEqual(cwd, dname)
def test_proc_open_files(self):
p = psutil.Process()
start = set(p.open_files())
with open(self.funky_name, 'rb'):
new = set(p.open_files())
path = (new - start).pop().path
self.assertIsInstance(path, str)
if BSD and not path:
# XXX - see https://github.com/giampaolo/psutil/issues/595
return self.skipTest("open_files on BSD is broken")
if self.expect_exact_path_match():
self.assertEqual(os.path.normcase(path),
os.path.normcase(self.funky_name))
@unittest.skipIf(not POSIX, "POSIX only")
def test_proc_connections(self):
suffix = os.path.basename(self.funky_name)
with unix_socket_path(suffix=suffix) as name:
try:
sock = bind_unix_socket(name)
except UnicodeEncodeError:
if PY3:
raise
else:
raise unittest.SkipTest("not supported")
with closing(sock):
conn = psutil.Process().connections('unix')[0]
self.assertIsInstance(conn.laddr, str)
# AF_UNIX addr not set on OpenBSD
if not OPENBSD:
self.assertEqual(conn.laddr, name)
@unittest.skipIf(not POSIX, "POSIX only")
@unittest.skipIf(not HAS_CONNECTIONS_UNIX, "can't list UNIX sockets")
@skip_on_access_denied()
def test_net_connections(self):
def find_sock(cons):
for conn in cons:
if os.path.basename(conn.laddr).startswith(TESTFILE_PREFIX):
return conn
raise ValueError("connection not found")
suffix = os.path.basename(self.funky_name)
with unix_socket_path(suffix=suffix) as name:
try:
sock = bind_unix_socket(name)
except UnicodeEncodeError:
if PY3:
raise
else:
raise unittest.SkipTest("not supported")
with closing(sock):
cons = psutil.net_connections(kind='unix')
if not OPENBSD:
conn = find_sock(cons)
self.assertIsInstance(conn.laddr, str)
self.assertEqual(conn.laddr, name)
def test_disk_usage(self):
dname = self.funky_name + "2"
self.addCleanup(safe_rmpath, dname)
safe_mkdir(dname)
psutil.disk_usage(dname)
@unittest.skipIf(not HAS_MEMORY_MAPS, "not supported")
@unittest.skipIf(not PY3, "ctypes does not support unicode on PY2")
def test_memory_maps(self):
with copyload_shared_lib(dst_prefix=self.funky_name) as funky_path:
def normpath(p):
return os.path.realpath(os.path.normcase(p))
libpaths = [normpath(x.path)
for x in psutil.Process().memory_maps()]
libpaths = [x for x in libpaths if TESTFILE_PREFIX in x]
self.assertIn(normpath(funky_path), libpaths)
for path in libpaths:
self.assertIsInstance(path, str)
@unittest.skipIf(PYPY and TRAVIS, "unreliable on PYPY + TRAVIS")
@unittest.skipIf(MACOS and TRAVIS, "unreliable on TRAVIS")
@unittest.skipIf(ASCII_FS, "ASCII fs")
@unittest.skipIf(not subprocess_supports_unicode(TESTFN_UNICODE),
"subprocess can't deal with unicode")
class TestFSAPIs(_BaseFSAPIsTests, unittest.TestCase):
funky_name = TESTFN_UNICODE
@classmethod
def expect_exact_path_match(cls):
# Do not expect psutil to correctly handle unicode paths on
# Python 2 if os.listdir() is not able either.
if PY3:
return True
else:
here = '.' if isinstance(cls.funky_name, str) else u('.')
with warnings.catch_warnings():
warnings.simplefilter("ignore")
return cls.funky_name in os.listdir(here)
@unittest.skipIf(PYPY and TRAVIS, "unreliable on PYPY + TRAVIS")
@unittest.skipIf(MACOS and TRAVIS, "unreliable on TRAVIS") # TODO
@unittest.skipIf(not subprocess_supports_unicode(INVALID_NAME),
"subprocess can't deal with invalid unicode")
class TestFSAPIsWithInvalidPath(_BaseFSAPIsTests, unittest.TestCase):
funky_name = INVALID_NAME
@classmethod
def expect_exact_path_match(cls):
return True
@unittest.skipIf(not WINDOWS, "WINDOWS only")
class TestWinProcessName(unittest.TestCase):
def test_name_type(self):
# and test name() instead of exe().
with mock.patch("psutil._psplatform.cext.proc_exe",
side_effect=psutil.AccessDenied(os.getpid())) as m:
self.assertIsInstance(psutil.Process().name(), str)
assert m.called
# ===================================================================
# Non fs APIs
# ===================================================================
class TestNonFSAPIS(unittest.TestCase):
def tearDown(self):
reap_children()
@unittest.skipIf(not HAS_ENVIRON, "not supported")
def test_proc_environ(self):
# Note: differently from others, this test does not deal
# with fs paths. On Python 2 subprocess module is broken as
# it's not able to handle with non-ASCII env vars, so
env = os.environ.copy()
funky_str = TESTFN_UNICODE if PY3 else 'è'
env['FUNNY_ARG'] = funky_str
sproc = get_test_subprocess(env=env)
p = psutil.Process(sproc.pid)
env = p.environ()
for k, v in env.items():
self.assertIsInstance(k, str)
self.assertIsInstance(v, str)
self.assertEqual(env['FUNNY_ARG'], funky_str)
if __name__ == '__main__':
from psutil.tests.runner import run
run(__file__)
| true | true |
f7115ed0be73dd5f07ae66313df67de5eb1bd650 | 23,190 | py | Python | mittab/apps/tab/outround_pairing_views.py | DanielS6/mit-tab | f2b5bb609546514582697b998b8b50a66bc8a396 | [
"MIT"
] | 9 | 2015-01-22T01:19:15.000Z | 2017-11-01T20:09:47.000Z | mittab/apps/tab/outround_pairing_views.py | DanielS6/mit-tab | f2b5bb609546514582697b998b8b50a66bc8a396 | [
"MIT"
] | 152 | 2018-04-06T14:32:51.000Z | 2022-02-11T22:12:53.000Z | mittab/apps/tab/outround_pairing_views.py | DanielS6/mit-tab | f2b5bb609546514582697b998b8b50a66bc8a396 | [
"MIT"
] | 13 | 2015-09-14T00:40:06.000Z | 2018-01-24T04:05:32.000Z | import random
import math
from django.shortcuts import render, get_object_or_404
from django.http import JsonResponse
from django.contrib.auth.decorators import permission_required
from django.db.models import Q
from django.shortcuts import redirect, reverse
from django.utils import timezone
from mittab.apps.tab.helpers import redirect_and_flash_error, \
redirect_and_flash_success
from mittab.apps.tab.models import *
from mittab.libs.errors import *
from mittab.apps.tab.forms import OutroundResultEntryForm
import mittab.libs.tab_logic as tab_logic
import mittab.libs.outround_tab_logic as outround_tab_logic
from mittab.libs.outround_tab_logic import offset_to_quotient
import mittab.libs.backup as backup
@permission_required("tab.tab_settings.can_change", login_url="/403/")
def pair_next_outround(request, num_teams, type_of_round):
if request.method == "POST":
backup.backup_round("before_pairing_%s_%s" %
(num_teams / 2, type_of_round))
Outround.objects.filter(num_teams__lt=num_teams,
type_of_round=type_of_round).delete()
outround_tab_logic.pair(type_of_round)
return redirect_and_flash_success(
request, "Success!", path=reverse("outround_pairing_view",
kwargs={
"num_teams": int(num_teams / 2),
"type_of_round": type_of_round
}))
# See if we can pair the round
title = "Pairing Outrounds"
current_round_number = 0
previous_round_number = TabSettings.get("tot_rounds", 5)
check_status = []
judges = outround_tab_logic.have_enough_judges_type(type_of_round)
rooms = outround_tab_logic.have_enough_rooms_type(type_of_round)
msg = "Enough judges checked in for Out-rounds? Need {0}, have {1}".format(
judges[1][1], judges[1][0])
if num_teams <= 2:
check_status.append(("Have more rounds?", "No", "Not enough teams"))
else:
check_status.append(("Have more rounds?", "Yes", "Have enough teams!"))
if judges[0]:
check_status.append((msg, "Yes", "Judges are checked in"))
else:
check_status.append((msg, "No", "Not enough judges"))
msg = "N/2 Rooms available Round Out-rounds? Need {0}, have {1}".format(
rooms[1][1], rooms[1][0])
if rooms[0]:
check_status.append((msg, "Yes", "Rooms are checked in"))
else:
check_status.append((msg, "No", "Not enough rooms"))
round_label = "[%s] Ro%s" % ("N" if type_of_round else "V",
num_teams)
msg = "All Rounds properly entered for Round %s" % (
round_label)
ready_to_pair = "Yes"
ready_to_pair_alt = "Checks passed!"
try:
outround_tab_logic.have_properly_entered_data(num_teams, type_of_round)
check_status.append((msg, "Yes", "All rounds look good"))
except PrevRoundNotEnteredError as e:
ready_to_pair = "No"
ready_to_pair_alt = str(e)
check_status.append(
(msg, "No", "Not all rounds are entered. %s" % str(e)))
return render(request, "pairing/pair_round.html", locals())
def get_outround_options(var_teams_to_break,
nov_teams_to_break):
outround_options = []
while not math.log(var_teams_to_break, 2) % 1 == 0:
var_teams_to_break += 1
while not math.log(nov_teams_to_break, 2) % 1 == 0:
nov_teams_to_break += 1
while var_teams_to_break > 1:
if Outround.objects.filter(type_of_round=BreakingTeam.VARSITY,
num_teams=var_teams_to_break).exists():
outround_options.append(
(reverse("outround_pairing_view", kwargs={
"type_of_round": BreakingTeam.VARSITY,
"num_teams": int(var_teams_to_break)}),
"[V] Ro%s" % (int(var_teams_to_break),))
)
var_teams_to_break /= 2
while nov_teams_to_break > 1:
if Outround.objects.filter(type_of_round=BreakingTeam.NOVICE,
num_teams=nov_teams_to_break).exists():
outround_options.append(
(reverse("outround_pairing_view", kwargs={
"type_of_round": BreakingTeam.NOVICE,
"num_teams": int(nov_teams_to_break)}),
"[N] Ro%s" % (int(nov_teams_to_break),))
)
nov_teams_to_break /= 2
return outround_options
@permission_required("tab.tab_settings.can_change", login_url="/403/")
def break_teams(request):
if request.method == "POST":
# Perform the break
backup.backup_round("before_the_break_%s" % (timezone.now().strftime("%H:%M"),))
success, msg = outround_tab_logic.perform_the_break()
if success:
return redirect_and_flash_success(
request, msg, path="/outround_pairing"
)
return redirect_and_flash_error(
request, msg, path="/"
)
# See if we can pair the round
title = "Pairing Outrounds"
current_round_number = 0
previous_round_number = TabSettings.get("tot_rounds", 5)
check_status = []
msg = "All Rounds properly entered for Round %s" % (
previous_round_number)
ready_to_pair = "Yes"
ready_to_pair_alt = "Checks passed!"
try:
tab_logic.have_properly_entered_data(current_round_number)
check_status.append((msg, "Yes", "All rounds look good"))
except PrevRoundNotEnteredError as e:
ready_to_pair = "No"
ready_to_pair_alt = str(e)
check_status.append(
(msg, "No", "Not all rounds are entered. %s" % str(e)))
except ByeAssignmentError as e:
ready_to_pair = "No"
ready_to_pair_alt = str(e)
check_status.append(
(msg, "No", "You have a bye and results. %s" % str(e)))
except NoShowAssignmentError as e:
ready_to_pair = "No"
ready_to_pair_alt = str(e)
check_status.append(
(msg, "No", "You have a noshow and results. %s" % str(e)))
rooms = outround_tab_logic.have_enough_rooms_before_break()
msg = "N/2 Rooms available Round Out-rounds? Need {0}, have {1}".format(
rooms[1][1], rooms[1][0])
if rooms[0]:
check_status.append((msg, "Yes", "Rooms are checked in"))
else:
check_status.append((msg, "No", "Not enough rooms"))
return render(request, "pairing/pair_round.html", locals())
def outround_pairing_view(request,
type_of_round=BreakingTeam.VARSITY,
num_teams=None):
choice = TabSettings.get("choice", 0)
if num_teams is None:
num_teams = TabSettings.get("var_teams_to_break", 8)
while not math.log(num_teams, 2) % 1 == 0:
num_teams += 1
return redirect("outround_pairing_view",
type_of_round=BreakingTeam.VARSITY,
num_teams=num_teams)
pairing_released = False
if type_of_round == BreakingTeam.VARSITY:
pairing_released = TabSettings.get("var_teams_visible", 256) <= num_teams
elif type_of_round == BreakingTeam.NOVICE:
pairing_released = TabSettings.get("nov_teams_visible", 256) <= num_teams
label = "[%s] Ro%s" % ("V" if type_of_round == BreakingTeam.VARSITY else "N",
num_teams)
nov_teams_to_break = TabSettings.get("nov_teams_to_break")
var_teams_to_break = TabSettings.get("var_teams_to_break")
if not nov_teams_to_break or not var_teams_to_break:
return redirect_and_flash_error(request,
"Please check your break tab settings",
path="/")
outround_options = get_outround_options(var_teams_to_break,
nov_teams_to_break)
outrounds = Outround.objects.filter(type_of_round=type_of_round,
num_teams=num_teams).all()
judges_per_panel = TabSettings.get("var_panel_size", 3) \
if type_of_round == BreakingTeam.VARSITY \
else TabSettings.get("nov_panel_size", 3)
judge_slots = [i for i in range(1, judges_per_panel + 1)]
var_to_nov = TabSettings.get("var_to_nov", 2)
var_to_nov = offset_to_quotient(var_to_nov)
other_round_num = num_teams / var_to_nov
if type_of_round == BreakingTeam.NOVICE:
other_round_num = num_teams * var_to_nov
other_round_type = BreakingTeam.VARSITY \
if type_of_round == BreakingTeam.NOVICE \
else BreakingTeam.NOVICE
pairing_exists = len(outrounds) > 0
lost_outrounds = [t.loser.id for t in Outround.objects.all() if t.loser]
excluded_teams = BreakingTeam.objects.filter(
type_of_team=type_of_round
).exclude(
team__id__in=lost_outrounds
)
excluded_teams = [t.team for t in excluded_teams]
excluded_teams = [t for t in excluded_teams if not Outround.objects.filter(
type_of_round=type_of_round,
num_teams=num_teams,
gov_team=t
).exists()]
excluded_teams = [t for t in excluded_teams if not Outround.objects.filter(
type_of_round=type_of_round,
num_teams=num_teams,
opp_team=t
).exists()]
excluded_judges = Judge.objects.exclude(
judges_outrounds__num_teams=num_teams,
judges_outrounds__type_of_round=type_of_round,
).exclude(
judges_outrounds__type_of_round=other_round_type,
judges_outrounds__num_teams=other_round_num
).filter(
checkin__round_number=0
)
non_checkins = Judge.objects.exclude(
judges_outrounds__num_teams=num_teams,
judges_outrounds__type_of_round=type_of_round
).exclude(
judges_outrounds__type_of_round=other_round_type,
judges_outrounds__num_teams=other_round_num
).exclude(
checkin__round_number=0
)
available_rooms = Room.objects.exclude(
rooms_outrounds__num_teams=num_teams,
rooms_outrounds__type_of_round=type_of_round
).exclude(
rooms_outrounds__num_teams=other_round_num,
rooms_outrounds__type_of_round=other_round_type
)
checked_in_rooms = [r.room for r in RoomCheckIn.objects.filter(round_number=0)]
available_rooms = [r for r in available_rooms if r in checked_in_rooms]
size = max(list(
map(
len,
[excluded_teams, excluded_judges, non_checkins, available_rooms]
)))
# The minimum rank you want to warn on
warning = 5
excluded_people = list(
zip(*[
x + [""] * (size - len(x)) for x in [
list(excluded_teams),
list(excluded_judges),
list(non_checkins),
list(available_rooms)
]
]))
return render(request,
"outrounds/pairing_base.html",
locals())
def alternative_judges(request, round_id, judge_id=None):
round_obj = Outround.objects.get(id=int(round_id))
round_gov, round_opp = round_obj.gov_team, round_obj.opp_team
# All of these variables are for the convenience of the template
try:
current_judge_id = int(judge_id)
current_judge_obj = Judge.objects.get(id=current_judge_id)
current_judge_name = current_judge_obj.name
current_judge_rank = current_judge_obj.rank
except TypeError:
current_judge_id, current_judge_obj, current_judge_rank = "", "", ""
current_judge_name = "No judge"
var_to_nov = TabSettings.get("var_to_nov", 2)
var_to_nov = offset_to_quotient(var_to_nov)
other_round_num = round_obj.num_teams / var_to_nov
if round_obj.type_of_round == BreakingTeam.NOVICE:
other_round_num = round_obj.num_teams * var_to_nov
other_round_type = BreakingTeam.NOVICE \
if round_obj.type_of_round == BreakingTeam.VARSITY \
else BreakingTeam.VARSITY
excluded_judges = Judge.objects.exclude(
judges_outrounds__num_teams=round_obj.num_teams,
judges_outrounds__type_of_round=round_obj.type_of_round
).exclude(
judges_outrounds__num_teams=other_round_num,
judges_outrounds__type_of_round=other_round_type
).filter(
checkin__round_number=0
)
query = Q(
judges_outrounds__num_teams=round_obj.num_teams,
judges_outrounds__type_of_round=round_obj.type_of_round
)
query = query | Q(
judges_outrounds__num_teams=other_round_num,
judges_outrounds__type_of_round=other_round_type
)
included_judges = Judge.objects.filter(query) \
.filter(checkin__round_number=0) \
.distinct()
def can_judge(judge, team1, team2):
query = Q(judge=judge, team=team1) | Q(judge=judge, team=team2)
return not Scratch.objects.filter(query).exists()
excluded_judges = [(j.name, j.id, float(j.rank))
for j in excluded_judges if can_judge(j, round_gov, round_opp)]
included_judges = [(j.name, j.id, float(j.rank))
for j in included_judges if can_judge(j, round_gov, round_opp)]
included_judges = sorted(included_judges, key=lambda x: -x[2])
excluded_judges = sorted(excluded_judges, key=lambda x: -x[2])
return render(request, "pairing/judge_dropdown.html", locals())
def alternative_teams(request, round_id, current_team_id, position):
round_obj = Outround.objects.get(pk=round_id)
current_team = Team.objects.get(pk=current_team_id)
breaking_teams_by_type = [t.team.id
for t in BreakingTeam.objects.filter(
type_of_team=current_team.breaking_team.type_of_team
)]
excluded_teams = Team.objects.filter(
id__in=breaking_teams_by_type
).exclude(
gov_team_outround__num_teams=round_obj.num_teams
).exclude(
opp_team_outround__num_teams=round_obj.num_teams
).exclude(pk=current_team_id)
included_teams = Team.objects.filter(
id__in=breaking_teams_by_type
).exclude(
pk__in=excluded_teams
)
return render(request, "pairing/team_dropdown.html", locals())
@permission_required("tab.tab_settings.can_change", login_url="/403/")
def assign_team(request, round_id, position, team_id):
try:
round_obj = Outround.objects.get(id=int(round_id))
team_obj = Team.objects.get(id=int(team_id))
if position.lower() == "gov":
round_obj.gov_team = team_obj
elif position.lower() == "opp":
round_obj.opp_team = team_obj
else:
raise ValueError("Got invalid position: " + position)
round_obj.save()
data = {
"success": True,
"team": {
"id": team_obj.id,
"name": team_obj.name
},
}
except Exception:
emit_current_exception()
data = {"success": False}
return JsonResponse(data)
@permission_required("tab.tab_settings.can_change", login_url="/403/")
def assign_judge(request, round_id, judge_id, remove_id=None):
try:
round_obj = Outround.objects.get(id=int(round_id))
judge_obj = Judge.objects.get(id=int(judge_id))
round_obj.judges.add(judge_obj)
if remove_id is not None:
remove_obj = Judge.objects.get(id=int(remove_id))
round_obj.judges.remove(remove_obj)
if remove_obj == round_obj.chair:
round_obj.chair = round_obj.judges.order_by("-rank").first()
elif not round_obj.chair:
round_obj.chair = judge_obj
round_obj.save()
data = {
"success": True,
"chair_id": round_obj.chair.id,
"round_id": round_obj.id,
"judge_name": judge_obj.name,
"judge_rank": float(judge_obj.rank),
"judge_id": judge_obj.id
}
except Exception:
emit_current_exception()
data = {"success": False}
return JsonResponse(data)
def enter_result(request,
round_id,
form_class=OutroundResultEntryForm):
round_obj = Outround.objects.get(id=round_id)
redirect_to = reverse("outround_pairing_view",
kwargs={
"num_teams": round_obj.num_teams,
"type_of_round": round_obj.type_of_round
})
if request.method == "POST":
form = form_class(request.POST, round_instance=round_obj)
if form.is_valid():
try:
form.save()
except ValueError:
return redirect_and_flash_error(
request, "Invalid round result, could not remedy.")
return redirect_and_flash_success(request,
"Result entered successfully",
path=redirect_to)
else:
form_kwargs = {"round_instance": round_obj}
form = form_class(**form_kwargs)
return render(
request, "outrounds/ballot.html", {
"form": form,
"title": "Entering Ballot for {}".format(round_obj),
"gov_team": round_obj.gov_team,
"opp_team": round_obj.opp_team,
})
def pretty_pair(request, type_of_round=BreakingTeam.VARSITY, printable=False):
gov_opp_display = TabSettings.get("gov_opp_display", 0)
round_number = 256
if type_of_round == BreakingTeam.VARSITY:
round_number = TabSettings.get("var_teams_visible", 256)
else:
round_number = TabSettings.get("nov_teams_visible", 256)
round_pairing = Outround.objects.filter(
num_teams__gte=round_number,
type_of_round=type_of_round
)
unique_values = round_pairing.values_list("num_teams")
unique_values = list(set([value[0] for value in unique_values]))
unique_values.sort(key=lambda v: v, reverse=True)
outround_pairings = []
for value in unique_values:
lost_outrounds = [t.loser.id for t in Outround.objects.all() if t.loser]
excluded_teams = BreakingTeam.objects.filter(
type_of_team=type_of_round
).exclude(
team__id__in=lost_outrounds
)
excluded_teams = [t.team for t in excluded_teams]
excluded_teams = [t for t in excluded_teams if not Outround.objects.filter(
type_of_round=type_of_round,
num_teams=value,
gov_team=t
).exists()]
excluded_teams = [t for t in excluded_teams if not Outround.objects.filter(
type_of_round=type_of_round,
num_teams=value,
opp_team=t
).exists()]
outround_pairings.append({
"label": "[%s] Ro%s" % ("N" if type_of_round else "V", value),
"rounds": Outround.objects.filter(num_teams=value,
type_of_round=type_of_round),
"excluded": excluded_teams
})
label = "%s Outrounds Pairings" % ("Novice" if type_of_round else "Varsity",)
round_pairing = list(round_pairing)
#We want a random looking, but constant ordering of the rounds
random.seed(0xBEEF)
random.shuffle(round_pairing)
round_pairing.sort(key=lambda r: r.gov_team.name)
paired_teams = [team.gov_team for team in round_pairing
] + [team.opp_team for team in round_pairing]
team_count = len(paired_teams)
pairing_exists = True
#pairing_exists = TabSettings.get("pairing_released", 0) == 1
printable = printable
sidelock = TabSettings.get("sidelock", 0)
choice = TabSettings.get("choice", 0)
return render(request, "outrounds/pretty_pairing.html", locals())
def pretty_pair_print(request, type_of_round=BreakingTeam.VARSITY):
return pretty_pair(request, type_of_round, True)
def toggle_pairing_released(request, type_of_round, num_teams):
old = 256
if type_of_round == BreakingTeam.VARSITY:
old = TabSettings.get("var_teams_visible", 256)
if old == num_teams:
TabSettings.set("var_teams_visible", num_teams * 2)
else:
TabSettings.set("var_teams_visible", num_teams)
else:
old = TabSettings.get("nov_teams_visible", 256)
if old == num_teams:
TabSettings.set("nov_teams_visible", num_teams * 2)
else:
TabSettings.set("nov_teams_visible", num_teams)
data = {"success": True, "pairing_released": not old == num_teams}
return JsonResponse(data)
def update_choice(request, outround_id):
outround = get_object_or_404(Outround, pk=outround_id)
outround.choice += 1
if outround.choice == 3:
outround.choice = 0
outround.save()
data = {"success": True,
"data": "%s choice" % (
outround.get_choice_display(),
)}
return JsonResponse(data)
def forum_view(request, type_of_round):
outrounds = Outround.objects.exclude(
victor=Outround.UNKNOWN
).filter(
type_of_round=type_of_round
)
rounds = outrounds.values_list("num_teams")
rounds = [r[0] for r in rounds]
rounds = list(set(rounds))
rounds.sort(key=lambda r: r, reverse=True)
results = []
for _round in rounds:
to_add = {}
to_display = outrounds.filter(num_teams=_round)
to_add["label"] = "[%s] Ro%s" % ("N" if type_of_round else "V", _round)
to_add["results"] = []
for outround in to_display:
to_add["results"] += [
"""[%s] %s (%s, %s) from %s%s (%s) drops to
[%s] %s (%s, %s) from %s%s (%s)""" % (
outround.loser.breaking_team.seed,
outround.loser.display,
outround.loser.debaters.first().name,
outround.loser.debaters.last().name,
outround.loser.school.name,
" / " + outround.loser.hybrid_school.name \
if outround.loser.hybrid_school else "",
"GOV" if outround.loser == outround.gov_team else "OPP",
outround.winner.breaking_team.seed,
outround.winner.display,
outround.winner.debaters.first().name,
outround.winner.debaters.last().name,
outround.winner.school.name,
" / " + outround.winner.hybrid_school.name \
if outround.winner.hybrid_school else "",
"GOV" if outround.winner == outround.gov_team else "OPP",
)
]
results.append(to_add)
return render(request,
"outrounds/forum_result.html",
locals())
| 34.924699 | 88 | 0.617119 | import random
import math
from django.shortcuts import render, get_object_or_404
from django.http import JsonResponse
from django.contrib.auth.decorators import permission_required
from django.db.models import Q
from django.shortcuts import redirect, reverse
from django.utils import timezone
from mittab.apps.tab.helpers import redirect_and_flash_error, \
redirect_and_flash_success
from mittab.apps.tab.models import *
from mittab.libs.errors import *
from mittab.apps.tab.forms import OutroundResultEntryForm
import mittab.libs.tab_logic as tab_logic
import mittab.libs.outround_tab_logic as outround_tab_logic
from mittab.libs.outround_tab_logic import offset_to_quotient
import mittab.libs.backup as backup
@permission_required("tab.tab_settings.can_change", login_url="/403/")
def pair_next_outround(request, num_teams, type_of_round):
if request.method == "POST":
backup.backup_round("before_pairing_%s_%s" %
(num_teams / 2, type_of_round))
Outround.objects.filter(num_teams__lt=num_teams,
type_of_round=type_of_round).delete()
outround_tab_logic.pair(type_of_round)
return redirect_and_flash_success(
request, "Success!", path=reverse("outround_pairing_view",
kwargs={
"num_teams": int(num_teams / 2),
"type_of_round": type_of_round
}))
title = "Pairing Outrounds"
current_round_number = 0
previous_round_number = TabSettings.get("tot_rounds", 5)
check_status = []
judges = outround_tab_logic.have_enough_judges_type(type_of_round)
rooms = outround_tab_logic.have_enough_rooms_type(type_of_round)
msg = "Enough judges checked in for Out-rounds? Need {0}, have {1}".format(
judges[1][1], judges[1][0])
if num_teams <= 2:
check_status.append(("Have more rounds?", "No", "Not enough teams"))
else:
check_status.append(("Have more rounds?", "Yes", "Have enough teams!"))
if judges[0]:
check_status.append((msg, "Yes", "Judges are checked in"))
else:
check_status.append((msg, "No", "Not enough judges"))
msg = "N/2 Rooms available Round Out-rounds? Need {0}, have {1}".format(
rooms[1][1], rooms[1][0])
if rooms[0]:
check_status.append((msg, "Yes", "Rooms are checked in"))
else:
check_status.append((msg, "No", "Not enough rooms"))
round_label = "[%s] Ro%s" % ("N" if type_of_round else "V",
num_teams)
msg = "All Rounds properly entered for Round %s" % (
round_label)
ready_to_pair = "Yes"
ready_to_pair_alt = "Checks passed!"
try:
outround_tab_logic.have_properly_entered_data(num_teams, type_of_round)
check_status.append((msg, "Yes", "All rounds look good"))
except PrevRoundNotEnteredError as e:
ready_to_pair = "No"
ready_to_pair_alt = str(e)
check_status.append(
(msg, "No", "Not all rounds are entered. %s" % str(e)))
return render(request, "pairing/pair_round.html", locals())
def get_outround_options(var_teams_to_break,
nov_teams_to_break):
outround_options = []
while not math.log(var_teams_to_break, 2) % 1 == 0:
var_teams_to_break += 1
while not math.log(nov_teams_to_break, 2) % 1 == 0:
nov_teams_to_break += 1
while var_teams_to_break > 1:
if Outround.objects.filter(type_of_round=BreakingTeam.VARSITY,
num_teams=var_teams_to_break).exists():
outround_options.append(
(reverse("outround_pairing_view", kwargs={
"type_of_round": BreakingTeam.VARSITY,
"num_teams": int(var_teams_to_break)}),
"[V] Ro%s" % (int(var_teams_to_break),))
)
var_teams_to_break /= 2
while nov_teams_to_break > 1:
if Outround.objects.filter(type_of_round=BreakingTeam.NOVICE,
num_teams=nov_teams_to_break).exists():
outround_options.append(
(reverse("outround_pairing_view", kwargs={
"type_of_round": BreakingTeam.NOVICE,
"num_teams": int(nov_teams_to_break)}),
"[N] Ro%s" % (int(nov_teams_to_break),))
)
nov_teams_to_break /= 2
return outround_options
@permission_required("tab.tab_settings.can_change", login_url="/403/")
def break_teams(request):
if request.method == "POST":
backup.backup_round("before_the_break_%s" % (timezone.now().strftime("%H:%M"),))
success, msg = outround_tab_logic.perform_the_break()
if success:
return redirect_and_flash_success(
request, msg, path="/outround_pairing"
)
return redirect_and_flash_error(
request, msg, path="/"
)
title = "Pairing Outrounds"
current_round_number = 0
previous_round_number = TabSettings.get("tot_rounds", 5)
check_status = []
msg = "All Rounds properly entered for Round %s" % (
previous_round_number)
ready_to_pair = "Yes"
ready_to_pair_alt = "Checks passed!"
try:
tab_logic.have_properly_entered_data(current_round_number)
check_status.append((msg, "Yes", "All rounds look good"))
except PrevRoundNotEnteredError as e:
ready_to_pair = "No"
ready_to_pair_alt = str(e)
check_status.append(
(msg, "No", "Not all rounds are entered. %s" % str(e)))
except ByeAssignmentError as e:
ready_to_pair = "No"
ready_to_pair_alt = str(e)
check_status.append(
(msg, "No", "You have a bye and results. %s" % str(e)))
except NoShowAssignmentError as e:
ready_to_pair = "No"
ready_to_pair_alt = str(e)
check_status.append(
(msg, "No", "You have a noshow and results. %s" % str(e)))
rooms = outround_tab_logic.have_enough_rooms_before_break()
msg = "N/2 Rooms available Round Out-rounds? Need {0}, have {1}".format(
rooms[1][1], rooms[1][0])
if rooms[0]:
check_status.append((msg, "Yes", "Rooms are checked in"))
else:
check_status.append((msg, "No", "Not enough rooms"))
return render(request, "pairing/pair_round.html", locals())
def outround_pairing_view(request,
type_of_round=BreakingTeam.VARSITY,
num_teams=None):
choice = TabSettings.get("choice", 0)
if num_teams is None:
num_teams = TabSettings.get("var_teams_to_break", 8)
while not math.log(num_teams, 2) % 1 == 0:
num_teams += 1
return redirect("outround_pairing_view",
type_of_round=BreakingTeam.VARSITY,
num_teams=num_teams)
pairing_released = False
if type_of_round == BreakingTeam.VARSITY:
pairing_released = TabSettings.get("var_teams_visible", 256) <= num_teams
elif type_of_round == BreakingTeam.NOVICE:
pairing_released = TabSettings.get("nov_teams_visible", 256) <= num_teams
label = "[%s] Ro%s" % ("V" if type_of_round == BreakingTeam.VARSITY else "N",
num_teams)
nov_teams_to_break = TabSettings.get("nov_teams_to_break")
var_teams_to_break = TabSettings.get("var_teams_to_break")
if not nov_teams_to_break or not var_teams_to_break:
return redirect_and_flash_error(request,
"Please check your break tab settings",
path="/")
outround_options = get_outround_options(var_teams_to_break,
nov_teams_to_break)
outrounds = Outround.objects.filter(type_of_round=type_of_round,
num_teams=num_teams).all()
judges_per_panel = TabSettings.get("var_panel_size", 3) \
if type_of_round == BreakingTeam.VARSITY \
else TabSettings.get("nov_panel_size", 3)
judge_slots = [i for i in range(1, judges_per_panel + 1)]
var_to_nov = TabSettings.get("var_to_nov", 2)
var_to_nov = offset_to_quotient(var_to_nov)
other_round_num = num_teams / var_to_nov
if type_of_round == BreakingTeam.NOVICE:
other_round_num = num_teams * var_to_nov
other_round_type = BreakingTeam.VARSITY \
if type_of_round == BreakingTeam.NOVICE \
else BreakingTeam.NOVICE
pairing_exists = len(outrounds) > 0
lost_outrounds = [t.loser.id for t in Outround.objects.all() if t.loser]
excluded_teams = BreakingTeam.objects.filter(
type_of_team=type_of_round
).exclude(
team__id__in=lost_outrounds
)
excluded_teams = [t.team for t in excluded_teams]
excluded_teams = [t for t in excluded_teams if not Outround.objects.filter(
type_of_round=type_of_round,
num_teams=num_teams,
gov_team=t
).exists()]
excluded_teams = [t for t in excluded_teams if not Outround.objects.filter(
type_of_round=type_of_round,
num_teams=num_teams,
opp_team=t
).exists()]
excluded_judges = Judge.objects.exclude(
judges_outrounds__num_teams=num_teams,
judges_outrounds__type_of_round=type_of_round,
).exclude(
judges_outrounds__type_of_round=other_round_type,
judges_outrounds__num_teams=other_round_num
).filter(
checkin__round_number=0
)
non_checkins = Judge.objects.exclude(
judges_outrounds__num_teams=num_teams,
judges_outrounds__type_of_round=type_of_round
).exclude(
judges_outrounds__type_of_round=other_round_type,
judges_outrounds__num_teams=other_round_num
).exclude(
checkin__round_number=0
)
available_rooms = Room.objects.exclude(
rooms_outrounds__num_teams=num_teams,
rooms_outrounds__type_of_round=type_of_round
).exclude(
rooms_outrounds__num_teams=other_round_num,
rooms_outrounds__type_of_round=other_round_type
)
checked_in_rooms = [r.room for r in RoomCheckIn.objects.filter(round_number=0)]
available_rooms = [r for r in available_rooms if r in checked_in_rooms]
size = max(list(
map(
len,
[excluded_teams, excluded_judges, non_checkins, available_rooms]
)))
warning = 5
excluded_people = list(
zip(*[
x + [""] * (size - len(x)) for x in [
list(excluded_teams),
list(excluded_judges),
list(non_checkins),
list(available_rooms)
]
]))
return render(request,
"outrounds/pairing_base.html",
locals())
def alternative_judges(request, round_id, judge_id=None):
round_obj = Outround.objects.get(id=int(round_id))
round_gov, round_opp = round_obj.gov_team, round_obj.opp_team
try:
current_judge_id = int(judge_id)
current_judge_obj = Judge.objects.get(id=current_judge_id)
current_judge_name = current_judge_obj.name
current_judge_rank = current_judge_obj.rank
except TypeError:
current_judge_id, current_judge_obj, current_judge_rank = "", "", ""
current_judge_name = "No judge"
var_to_nov = TabSettings.get("var_to_nov", 2)
var_to_nov = offset_to_quotient(var_to_nov)
other_round_num = round_obj.num_teams / var_to_nov
if round_obj.type_of_round == BreakingTeam.NOVICE:
other_round_num = round_obj.num_teams * var_to_nov
other_round_type = BreakingTeam.NOVICE \
if round_obj.type_of_round == BreakingTeam.VARSITY \
else BreakingTeam.VARSITY
excluded_judges = Judge.objects.exclude(
judges_outrounds__num_teams=round_obj.num_teams,
judges_outrounds__type_of_round=round_obj.type_of_round
).exclude(
judges_outrounds__num_teams=other_round_num,
judges_outrounds__type_of_round=other_round_type
).filter(
checkin__round_number=0
)
query = Q(
judges_outrounds__num_teams=round_obj.num_teams,
judges_outrounds__type_of_round=round_obj.type_of_round
)
query = query | Q(
judges_outrounds__num_teams=other_round_num,
judges_outrounds__type_of_round=other_round_type
)
included_judges = Judge.objects.filter(query) \
.filter(checkin__round_number=0) \
.distinct()
def can_judge(judge, team1, team2):
query = Q(judge=judge, team=team1) | Q(judge=judge, team=team2)
return not Scratch.objects.filter(query).exists()
excluded_judges = [(j.name, j.id, float(j.rank))
for j in excluded_judges if can_judge(j, round_gov, round_opp)]
included_judges = [(j.name, j.id, float(j.rank))
for j in included_judges if can_judge(j, round_gov, round_opp)]
included_judges = sorted(included_judges, key=lambda x: -x[2])
excluded_judges = sorted(excluded_judges, key=lambda x: -x[2])
return render(request, "pairing/judge_dropdown.html", locals())
def alternative_teams(request, round_id, current_team_id, position):
round_obj = Outround.objects.get(pk=round_id)
current_team = Team.objects.get(pk=current_team_id)
breaking_teams_by_type = [t.team.id
for t in BreakingTeam.objects.filter(
type_of_team=current_team.breaking_team.type_of_team
)]
excluded_teams = Team.objects.filter(
id__in=breaking_teams_by_type
).exclude(
gov_team_outround__num_teams=round_obj.num_teams
).exclude(
opp_team_outround__num_teams=round_obj.num_teams
).exclude(pk=current_team_id)
included_teams = Team.objects.filter(
id__in=breaking_teams_by_type
).exclude(
pk__in=excluded_teams
)
return render(request, "pairing/team_dropdown.html", locals())
@permission_required("tab.tab_settings.can_change", login_url="/403/")
def assign_team(request, round_id, position, team_id):
try:
round_obj = Outround.objects.get(id=int(round_id))
team_obj = Team.objects.get(id=int(team_id))
if position.lower() == "gov":
round_obj.gov_team = team_obj
elif position.lower() == "opp":
round_obj.opp_team = team_obj
else:
raise ValueError("Got invalid position: " + position)
round_obj.save()
data = {
"success": True,
"team": {
"id": team_obj.id,
"name": team_obj.name
},
}
except Exception:
emit_current_exception()
data = {"success": False}
return JsonResponse(data)
@permission_required("tab.tab_settings.can_change", login_url="/403/")
def assign_judge(request, round_id, judge_id, remove_id=None):
try:
round_obj = Outround.objects.get(id=int(round_id))
judge_obj = Judge.objects.get(id=int(judge_id))
round_obj.judges.add(judge_obj)
if remove_id is not None:
remove_obj = Judge.objects.get(id=int(remove_id))
round_obj.judges.remove(remove_obj)
if remove_obj == round_obj.chair:
round_obj.chair = round_obj.judges.order_by("-rank").first()
elif not round_obj.chair:
round_obj.chair = judge_obj
round_obj.save()
data = {
"success": True,
"chair_id": round_obj.chair.id,
"round_id": round_obj.id,
"judge_name": judge_obj.name,
"judge_rank": float(judge_obj.rank),
"judge_id": judge_obj.id
}
except Exception:
emit_current_exception()
data = {"success": False}
return JsonResponse(data)
def enter_result(request,
round_id,
form_class=OutroundResultEntryForm):
round_obj = Outround.objects.get(id=round_id)
redirect_to = reverse("outround_pairing_view",
kwargs={
"num_teams": round_obj.num_teams,
"type_of_round": round_obj.type_of_round
})
if request.method == "POST":
form = form_class(request.POST, round_instance=round_obj)
if form.is_valid():
try:
form.save()
except ValueError:
return redirect_and_flash_error(
request, "Invalid round result, could not remedy.")
return redirect_and_flash_success(request,
"Result entered successfully",
path=redirect_to)
else:
form_kwargs = {"round_instance": round_obj}
form = form_class(**form_kwargs)
return render(
request, "outrounds/ballot.html", {
"form": form,
"title": "Entering Ballot for {}".format(round_obj),
"gov_team": round_obj.gov_team,
"opp_team": round_obj.opp_team,
})
def pretty_pair(request, type_of_round=BreakingTeam.VARSITY, printable=False):
gov_opp_display = TabSettings.get("gov_opp_display", 0)
round_number = 256
if type_of_round == BreakingTeam.VARSITY:
round_number = TabSettings.get("var_teams_visible", 256)
else:
round_number = TabSettings.get("nov_teams_visible", 256)
round_pairing = Outround.objects.filter(
num_teams__gte=round_number,
type_of_round=type_of_round
)
unique_values = round_pairing.values_list("num_teams")
unique_values = list(set([value[0] for value in unique_values]))
unique_values.sort(key=lambda v: v, reverse=True)
outround_pairings = []
for value in unique_values:
lost_outrounds = [t.loser.id for t in Outround.objects.all() if t.loser]
excluded_teams = BreakingTeam.objects.filter(
type_of_team=type_of_round
).exclude(
team__id__in=lost_outrounds
)
excluded_teams = [t.team for t in excluded_teams]
excluded_teams = [t for t in excluded_teams if not Outround.objects.filter(
type_of_round=type_of_round,
num_teams=value,
gov_team=t
).exists()]
excluded_teams = [t for t in excluded_teams if not Outround.objects.filter(
type_of_round=type_of_round,
num_teams=value,
opp_team=t
).exists()]
outround_pairings.append({
"label": "[%s] Ro%s" % ("N" if type_of_round else "V", value),
"rounds": Outround.objects.filter(num_teams=value,
type_of_round=type_of_round),
"excluded": excluded_teams
})
label = "%s Outrounds Pairings" % ("Novice" if type_of_round else "Varsity",)
round_pairing = list(round_pairing)
random.seed(0xBEEF)
random.shuffle(round_pairing)
round_pairing.sort(key=lambda r: r.gov_team.name)
paired_teams = [team.gov_team for team in round_pairing
] + [team.opp_team for team in round_pairing]
team_count = len(paired_teams)
pairing_exists = True
printable = printable
sidelock = TabSettings.get("sidelock", 0)
choice = TabSettings.get("choice", 0)
return render(request, "outrounds/pretty_pairing.html", locals())
def pretty_pair_print(request, type_of_round=BreakingTeam.VARSITY):
return pretty_pair(request, type_of_round, True)
def toggle_pairing_released(request, type_of_round, num_teams):
old = 256
if type_of_round == BreakingTeam.VARSITY:
old = TabSettings.get("var_teams_visible", 256)
if old == num_teams:
TabSettings.set("var_teams_visible", num_teams * 2)
else:
TabSettings.set("var_teams_visible", num_teams)
else:
old = TabSettings.get("nov_teams_visible", 256)
if old == num_teams:
TabSettings.set("nov_teams_visible", num_teams * 2)
else:
TabSettings.set("nov_teams_visible", num_teams)
data = {"success": True, "pairing_released": not old == num_teams}
return JsonResponse(data)
def update_choice(request, outround_id):
outround = get_object_or_404(Outround, pk=outround_id)
outround.choice += 1
if outround.choice == 3:
outround.choice = 0
outround.save()
data = {"success": True,
"data": "%s choice" % (
outround.get_choice_display(),
)}
return JsonResponse(data)
def forum_view(request, type_of_round):
outrounds = Outround.objects.exclude(
victor=Outround.UNKNOWN
).filter(
type_of_round=type_of_round
)
rounds = outrounds.values_list("num_teams")
rounds = [r[0] for r in rounds]
rounds = list(set(rounds))
rounds.sort(key=lambda r: r, reverse=True)
results = []
for _round in rounds:
to_add = {}
to_display = outrounds.filter(num_teams=_round)
to_add["label"] = "[%s] Ro%s" % ("N" if type_of_round else "V", _round)
to_add["results"] = []
for outround in to_display:
to_add["results"] += [
"""[%s] %s (%s, %s) from %s%s (%s) drops to
[%s] %s (%s, %s) from %s%s (%s)""" % (
outround.loser.breaking_team.seed,
outround.loser.display,
outround.loser.debaters.first().name,
outround.loser.debaters.last().name,
outround.loser.school.name,
" / " + outround.loser.hybrid_school.name \
if outround.loser.hybrid_school else "",
"GOV" if outround.loser == outround.gov_team else "OPP",
outround.winner.breaking_team.seed,
outround.winner.display,
outround.winner.debaters.first().name,
outround.winner.debaters.last().name,
outround.winner.school.name,
" / " + outround.winner.hybrid_school.name \
if outround.winner.hybrid_school else "",
"GOV" if outround.winner == outround.gov_team else "OPP",
)
]
results.append(to_add)
return render(request,
"outrounds/forum_result.html",
locals())
| true | true |
f7115f0749cb5df9d54526ef059c6eea994ea859 | 44,980 | py | Python | appdaemon/app_management.py | chadmccune/appdaemon | 73b49575a72fabf66a5186fe0c603fd041a22ee5 | [
"Apache-2.0"
] | null | null | null | appdaemon/app_management.py | chadmccune/appdaemon | 73b49575a72fabf66a5186fe0c603fd041a22ee5 | [
"Apache-2.0"
] | null | null | null | appdaemon/app_management.py | chadmccune/appdaemon | 73b49575a72fabf66a5186fe0c603fd041a22ee5 | [
"Apache-2.0"
] | null | null | null | import sys
import traceback
import uuid
import os
import importlib
import yaml
import subprocess
import cProfile
import io
import pstats
import logging
import asyncio
import appdaemon.utils as utils
from appdaemon.appdaemon import AppDaemon
class AppManagement:
def __init__(self, ad: AppDaemon, config):
self.AD = ad
self.logger = ad.logging.get_child("_app_management")
self.error = ad.logging.get_error()
self.diag = ad.logging.get_diag()
self.monitored_files = {}
self.filter_files = {}
self.modules = {}
self.objects = {}
self.check_app_updates_profile_stats = None
# Initialize config file tracking
self.app_config_file_modified = 0
self.app_config_files = {}
self.module_dirs = []
self.app_config_file_modified = 0
self.app_config = {}
self.global_module_dependencies = {}
self.app_config_file = config
self.apps_initialized = False
# first declare sensors
self.active_apps_sensor = "sensor.active_apps"
self.inactive_apps_sensor = "sensor.inactive_apps"
self.total_apps_sensor = "sensor.total_apps"
# Add Path for adbase
sys.path.insert(0, os.path.dirname(__file__))
#
# Register App Services
#
self.AD.services.register_service("appdaemon", "app", "start", self.manage_services)
self.AD.services.register_service("appdaemon", "app", "stop", self.manage_services)
self.AD.services.register_service("appdaemon", "app", "restart", self.manage_services)
self.AD.services.register_service("appdaemon", "app", "reload", self.manage_services)
self.active_apps = []
self.inactive_apps = []
self.non_apps = ["global_modules", "sequence"]
async def set_state(self, name, **kwargs):
# not a fully qualified entity name
if name.find(".") == -1:
entity_id = "app.{}".format(name)
else:
entity_id = name
await self.AD.state.set_state("_app_management", "admin", entity_id, _silent=True, **kwargs)
async def get_state(self, name, **kwargs):
# not a fully qualified entity name
if name.find(".") == -1:
entity_id = "app.{}".format(name)
else:
entity_id = name
return await self.AD.state.get_state("_app_management", "admin", entity_id, **kwargs)
async def add_entity(self, name, state, attributes):
# not a fully qualified entity name
if name.find(".") == -1:
entity_id = "app.{}".format(name)
else:
entity_id = name
await self.AD.state.add_entity("admin", entity_id, state, attributes)
async def remove_entity(self, name):
await self.AD.state.remove_entity("admin", "app.{}".format(name))
async def init_admin_stats(self):
# create sensors
await self.add_entity(self.active_apps_sensor, 0, {"friendly_name": "Active Apps"})
await self.add_entity(self.inactive_apps_sensor, 0, {"friendly_name": "Inactive Apps"})
await self.add_entity(self.total_apps_sensor, 0, {"friendly_name": "Total Apps"})
async def terminate(self):
self.logger.debug("terminate() called for app_management")
if self.apps_initialized is True:
await self.check_app_updates(mode="term")
async def dump_objects(self):
self.diag.info("--------------------------------------------------")
self.diag.info("Objects")
self.diag.info("--------------------------------------------------")
for object_ in self.objects.keys():
self.diag.info("%s: %s", object_, self.objects[object_])
self.diag.info("--------------------------------------------------")
async def get_app(self, name):
if name in self.objects:
return self.objects[name]["object"]
else:
return None
def get_app_info(self, name):
if name in self.objects:
return self.objects[name]
else:
return None
async def get_app_instance(self, name, id):
if name in self.objects and self.objects[name]["id"] == id:
return self.AD.app_management.objects[name]["object"]
else:
return None
async def initialize_app(self, name):
if name in self.objects:
init = getattr(self.objects[name]["object"], "initialize", None)
if init is None:
self.logger.warning("Unable to find initialize() function in module %s - skipped", name)
await self.increase_inactive_apps(name)
return
else:
self.logger.warning("Unable to find module %s - initialize() skipped", name)
await self.increase_inactive_apps(name)
return
# Call its initialize function
try:
if asyncio.iscoroutinefunction(init):
await init()
else:
await utils.run_in_executor(self, init)
await self.set_state(name, state="idle")
await self.increase_active_apps(name)
event_data = {"event_type": "app_initialized", "data": {"app": name}}
await self.AD.events.process_event("admin", event_data)
except TypeError:
self.AD.threading.report_callback_sig(name, "initialize", init, {})
except Exception:
error_logger = logging.getLogger("Error.{}".format(name))
error_logger.warning("-" * 60)
error_logger.warning("Unexpected error running initialize() for %s", name)
error_logger.warning("-" * 60)
error_logger.warning(traceback.format_exc())
error_logger.warning("-" * 60)
if self.AD.logging.separate_error_log() is True:
self.logger.warning("Logged an error to %s", self.AD.logging.get_filename("error_log"))
await self.set_state(name, state="initialize_error")
await self.increase_inactive_apps(name)
async def terminate_app(self, name):
term = None
if name in self.objects and hasattr(self.objects[name]["object"], "terminate"):
self.logger.info("Calling terminate() for {}".format(name))
# Call terminate directly rather than via worker thread
# so we know terminate has completed before we move on
term = self.objects[name]["object"].terminate
if term is not None:
try:
if asyncio.iscoroutinefunction(term):
await term()
else:
await utils.run_in_executor(self, term)
except TypeError:
self.AD.threading.report_callback_sig(name, "terminate", term, {})
except BaseException:
error_logger = logging.getLogger("Error.{}".format(name))
error_logger.warning("-" * 60)
error_logger.warning("Unexpected error running terminate() for %s", name)
error_logger.warning("-" * 60)
error_logger.warning(traceback.format_exc())
error_logger.warning("-" * 60)
if self.AD.logging.separate_error_log() is True:
self.logger.warning(
"Logged an error to %s", self.AD.logging.get_filename("error_log"),
)
if name in self.objects:
del self.objects[name]
if name in self.global_module_dependencies:
del self.global_module_dependencies[name]
await self.increase_inactive_apps(name)
await self.AD.callbacks.clear_callbacks(name)
self.AD.futures.cancel_futures(name)
await self.AD.sched.terminate_app(name)
await self.set_state(name, state="terminated")
await self.set_state(name, instancecallbacks=0)
event_data = {"event_type": "app_terminated", "data": {"app": name}}
await self.AD.events.process_event("admin", event_data)
if self.AD.http is not None:
await self.AD.http.terminate_app(name)
async def start_app(self, app):
await self.init_object(app)
if "disable" in self.app_config[app] and self.app_config[app]["disable"] is True:
pass
else:
await self.initialize_app(app)
async def stop_app(self, app):
try:
self.logger.info("Terminating %s", app)
await self.terminate_app(app)
except Exception:
error_logger = logging.getLogger("Error.{}".format(app))
error_logger.warning("-" * 60)
error_logger.warning("Unexpected error terminating app: %s:", app)
error_logger.warning("-" * 60)
error_logger.warning(traceback.format_exc())
error_logger.warning("-" * 60)
if self.AD.logging.separate_error_log() is True:
self.logger.warning("Logged an error to %s", self.AD.logging.get_filename("error_log"))
async def restart_app(self, app):
await self.stop_app(app)
await self.start_app(app)
def get_app_debug_level(self, app):
if app in self.objects:
return self.AD.logging.get_level_from_int(self.objects[app]["object"].logger.getEffectiveLevel())
else:
return "None"
async def init_object(self, name):
app_args = self.app_config[name]
self.logger.info(
"Initializing app %s using class %s from module %s", name, app_args["class"], app_args["module"],
)
if self.get_file_from_module(app_args["module"]) is not None:
if "pin_thread" in app_args:
if app_args["pin_thread"] < 0 or app_args["pin_thread"] >= self.AD.threading.total_threads:
self.logger.warning(
"pin_thread out of range ({}) in app definition for {} - app will be discarded".format(
app_args["pin_thread"], name
)
)
return
else:
pin = app_args["pin_thread"]
else:
pin = -1
modname = await utils.run_in_executor(self, __import__, app_args["module"])
app_class = getattr(modname, app_args["class"], None)
if app_class is None:
self.logger.warning(
"Unable to find class %s in module %s - '%s' is not initialized",
app_args["class"],
app_args["module"],
name,
)
await self.increase_inactive_apps(name)
else:
self.objects[name] = {
"type": "app",
"object": app_class(
self.AD, name, self.AD.logging, app_args, self.AD.config, self.app_config, self.AD.global_vars,
),
"id": uuid.uuid4().hex,
"pin_app": self.AD.threading.app_should_be_pinned(name),
"pin_thread": pin,
}
else:
self.logger.warning(
"Unable to find module module %s - '%s' is not initialized", app_args["module"], name,
)
await self.increase_inactive_apps(name)
def init_plugin_object(self, name, object):
self.objects[name] = {
"type": "plugin",
"object": object,
"id": uuid.uuid4().hex,
"pin_app": False,
"pin_thread": -1,
}
async def read_config(self): # noqa: C901
new_config = None
if await utils.run_in_executor(self, os.path.isfile, self.app_config_file):
self.logger.warning(
"apps.yaml in the Config directory is deprecated. Please move apps.yaml to the apps directory."
)
new_config = utils.run_in_executor(self.read_config_file, self.app_config_file)
else:
for root, subdirs, files in os.walk(self.AD.app_dir):
subdirs[:] = [d for d in subdirs if d not in self.AD.exclude_dirs]
if root[-11:] != "__pycache__":
for file in files:
if file[-5:] == ".yaml" and file[0] != ".":
self.logger.debug("Reading %s", os.path.join(root, file))
config = await utils.run_in_executor(self, self.read_config_file, os.path.join(root, file))
valid_apps = {}
if type(config).__name__ == "dict":
for app in config:
if config[app] is not None:
if app == "global_modules":
#
# Check the parameter format for string or list
#
if isinstance(config[app], str):
valid_apps[app] = [config[app]]
elif isinstance(config[app], list):
valid_apps[app] = config[app]
else:
if self.AD.invalid_yaml_warnings:
self.logger.warning(
"global_modules should be a list or a string in File '%s' - ignoring",
file,
)
elif app == "sequence":
#
# We don't care what it looks like just pass it through
#
valid_apps[app] = config[app]
elif (
isinstance(config[app], dict)
and "class" in config[app]
and "module" in config[app]
):
valid_apps[app] = config[app]
else:
if self.AD.invalid_yaml_warnings:
self.logger.warning(
"App '%s' missing 'class' or 'module' entry - ignoring", app,
)
else:
if self.AD.invalid_yaml_warnings:
self.logger.warning(
"File '%s' invalid structure - ignoring", os.path.join(root, file),
)
if new_config is None:
new_config = {}
for app in valid_apps:
if app == "global_modules":
if app in new_config:
new_config[app].extend(valid_apps[app])
continue
if app == "sequence":
if app in new_config:
new_config[app] = {
**new_config[app],
**valid_apps[app],
}
continue
if app in new_config:
self.logger.warning(
"File '%s' duplicate app: %s - ignoring", os.path.join(root, file), app,
)
else:
new_config[app] = valid_apps[app]
await self.AD.sequences.add_sequences(new_config.get("sequence", {}))
return new_config
# Run in executor
def check_later_app_configs(self, last_latest):
if os.path.isfile(self.app_config_file):
ts = os.path.getmtime(self.app_config_file)
return {
"latest": ts,
"files": [{"name": self.app_config_file, "ts": os.path.getmtime(self.app_config_file)}],
}
else:
later_files = {}
app_config_files = []
later_files["files"] = []
later_files["latest"] = last_latest
later_files["deleted"] = []
for root, subdirs, files in os.walk(self.AD.app_dir):
subdirs[:] = [d for d in subdirs if d not in self.AD.exclude_dirs]
if root[-11:] != "__pycache__":
for file in files:
if file[-5:] == ".yaml":
path = os.path.join(root, file)
app_config_files.append(path)
ts = os.path.getmtime(path)
if ts > last_latest:
later_files["files"].append(path)
if ts > later_files["latest"]:
later_files["latest"] = ts
for file in self.app_config_files:
if file not in app_config_files:
later_files["deleted"].append(file)
if self.app_config_files != {}:
for file in app_config_files:
if file not in self.app_config_files:
later_files["files"].append(file)
self.app_config_files = app_config_files
return later_files
# Run in executor
def read_config_file(self, file):
new_config = None
try:
with open(file, "r") as yamlfd:
config_file_contents = yamlfd.read()
try:
new_config = yaml.load(config_file_contents, Loader=yaml.SafeLoader)
except yaml.YAMLError as exc:
self.logger.warning("Error loading configuration")
if hasattr(exc, "problem_mark"):
if exc.context is not None:
self.logger.warning("parser says")
self.logger.warning(str(exc.problem_mark))
self.logger.warning(str(exc.problem) + " " + str(exc.context))
else:
self.logger.warning("parser says")
self.logger.warning(str(exc.problem_mark))
self.logger.warning(str(exc.problem))
return new_config
except Exception:
self.logger.warning("-" * 60)
self.logger.warning("Unexpected error loading config file: %s", file)
self.logger.warning("-" * 60)
self.logger.warning(traceback.format_exc())
self.logger.warning("-" * 60)
# noinspection PyBroadException
async def check_config(self, silent=False, add_threads=True): # noqa: C901
terminate_apps = {}
initialize_apps = {}
total_apps = len(self.app_config)
try:
latest = await utils.run_in_executor(self, self.check_later_app_configs, self.app_config_file_modified)
self.app_config_file_modified = latest["latest"]
if latest["files"] or latest["deleted"]:
if silent is False:
self.logger.info("Reading config")
new_config = await self.read_config()
if new_config is None:
if silent is False:
self.logger.warning("New config not applied")
return
for file in latest["deleted"]:
if silent is False:
self.logger.info("%s deleted", file)
for file in latest["files"]:
if silent is False:
self.logger.info("%s added or modified", file)
# Check for changes
for name in self.app_config:
if name in self.non_apps:
continue
if name in new_config:
if self.app_config[name] != new_config[name]:
# Something changed, clear and reload
if silent is False:
self.logger.info("App '%s' changed", name)
terminate_apps[name] = 1
initialize_apps[name] = 1
else:
# Section has been deleted, clear it out
if silent is False:
self.logger.info("App '{}' deleted".format(name))
#
# Since the entry has been deleted we can't sensibly determine dependencies
# So just immediately terminate it
#
await self.terminate_app(name)
await self.remove_entity(name)
for name in new_config:
if name in self.non_apps:
continue
if name not in self.app_config:
#
# New section added!
#
if "class" in new_config[name] and "module" in new_config[name]:
self.logger.info("App '%s' added", name)
initialize_apps[name] = 1
await self.add_entity(
name, "loaded", {"totalcallbacks": 0, "instancecallbacks": 0, "args": new_config[name]},
)
elif name in self.non_apps:
pass
else:
if self.AD.invalid_yaml_warnings:
if silent is False:
self.logger.warning(
"App '%s' missing 'class' or 'module' entry - ignoring", name,
)
self.app_config = new_config
total_apps = len(self.app_config)
for name in self.non_apps:
if name in self.app_config:
total_apps -= 1 # remove one
# if silent is False:
self.logger.info("Found %s total apps", total_apps)
await self.set_state(self.total_apps_sensor, state=total_apps)
active_apps = self.get_active_app_count()
inactive_apps = total_apps - active_apps
if inactive_apps > 0:
self.logger.info("Found %s active apps", active_apps)
self.logger.info("Found %s inactive apps", inactive_apps)
# Now we know if we have any new apps we can create new threads if pinning
active_apps = self.get_active_app_count()
if add_threads is True and self.AD.threading.auto_pin is True:
if active_apps > self.AD.threading.thread_count:
for i in range(active_apps - self.AD.threading.thread_count):
await self.AD.threading.add_thread(False, True)
return {
"init": initialize_apps,
"term": terminate_apps,
"total": total_apps,
"active": active_apps,
}
except Exception:
self.logger.warning("-" * 60)
self.logger.warning("Unexpected error:")
self.logger.warning("-" * 60)
self.logger.warning(traceback.format_exc())
self.logger.warning("-" * 60)
def get_active_app_count(self):
c = 0
for name in self.app_config:
if "disable" in self.app_config[name] and self.app_config[name]["disable"] is True:
pass
elif name in self.non_apps:
pass
else:
c += 1
return c
def get_app_from_file(self, file):
module = self.get_module_from_path(file)
for app in self.app_config:
if "module" in self.app_config[app] and self.app_config[app]["module"] == module:
return app
return None
# noinspection PyBroadException
# Run in executor
def read_app(self, file, reload=False):
name = os.path.basename(file)
module_name = os.path.splitext(name)[0]
# Import the App
if reload:
self.logger.info("Reloading Module: %s", file)
file, ext = os.path.splitext(name)
#
# Reload
#
try:
importlib.reload(self.modules[module_name])
except KeyError:
if name not in sys.modules:
# Probably failed to compile on initial load
# so we need to re-import not reload
self.read_app(file)
else:
# A real KeyError!
raise
else:
app = self.get_app_from_file(file)
if app is not None:
self.logger.info("Loading App Module: %s", file)
if module_name not in self.modules:
self.modules[module_name] = importlib.import_module(module_name)
else:
# We previously imported it so we need to reload to pick up any potential changes
importlib.reload(self.modules[module_name])
elif "global_modules" in self.app_config and module_name in self.app_config["global_modules"]:
self.logger.info("Loading Global Module: %s", file)
self.modules[module_name] = importlib.import_module(module_name)
else:
if self.AD.missing_app_warnings:
self.logger.warning("No app description found for: %s - ignoring", file)
@staticmethod
def get_module_from_path(path):
name = os.path.basename(path)
module_name = os.path.splitext(name)[0]
return module_name
def get_file_from_module(self, mod):
for file in self.monitored_files:
module_name = self.get_module_from_path(file)
if module_name == mod:
return file
return None
# Run in executor
def process_filters(self):
if "filters" in self.AD.config:
for filter in self.AD.config["filters"]:
for root, subdirs, files in os.walk(self.AD.app_dir, topdown=True):
# print(root, subdirs, files)
#
# Prune dir list
#
subdirs[:] = [d for d in subdirs if d not in self.AD.exclude_dirs]
ext = filter["input_ext"]
extlen = len(ext) * -1
for file in files:
run = False
if file[extlen:] == ext:
infile = os.path.join(root, file)
modified = os.path.getmtime(infile)
if infile in self.filter_files:
if self.filter_files[infile] < modified:
run = True
else:
self.logger.info("Found new filter file %s", infile)
run = True
if run is True:
self.logger.info("Running filter on %s", infile)
self.filter_files[infile] = modified
# Run the filter
outfile = utils.rreplace(infile, ext, filter["output_ext"], 1)
command_line = filter["command_line"].replace("$1", infile)
command_line = command_line.replace("$2", outfile)
try:
subprocess.Popen(command_line, shell=True)
except Exception:
self.logger.warning("-" * 60)
self.logger.warning("Unexpected running filter on: %s:", infile)
self.logger.warning("-" * 60)
self.logger.warning(traceback.format_exc())
self.logger.warning("-" * 60)
@staticmethod
def file_in_modules(file, modules):
for mod in modules:
if mod["name"] == file:
return True
return False
@staticmethod
def check_file(file):
fh = open(file)
fh.close()
# @_timeit
async def check_app_updates(self, plugin=None, mode="normal"): # noqa: C901
if self.AD.apps is False:
return
# Lets add some profiling
pr = None
if self.AD.check_app_updates_profile is True:
pr = cProfile.Profile()
pr.enable()
# Process filters
await utils.run_in_executor(self, self.process_filters)
# Get list of apps we need to terminate and/or initialize
apps = await self.check_config()
found_files = []
modules = []
for root, subdirs, files in await utils.run_in_executor(self, os.walk, self.AD.app_dir, topdown=True):
# print(root, subdirs, files)
#
# Prune dir list
#
subdirs[:] = [d for d in subdirs if d not in self.AD.exclude_dirs]
if root[-11:] != "__pycache__":
if root not in self.module_dirs:
self.logger.info("Adding %s to module import path", root)
sys.path.insert(0, root)
self.module_dirs.append(root)
for file in files:
if file[-3:] == ".py":
found_files.append(os.path.join(root, file))
for file in found_files:
if file == os.path.join(self.AD.app_dir, "__init__.py"):
continue
try:
# check we can actually open the file
await utils.run_in_executor(self, self.check_file, file)
modified = await utils.run_in_executor(self, os.path.getmtime, file)
if file in self.monitored_files:
if self.monitored_files[file] < modified:
modules.append({"name": file, "reload": True})
self.monitored_files[file] = modified
else:
self.logger.debug("Found module %s", file)
modules.append({"name": file, "reload": False})
self.monitored_files[file] = modified
except IOError as err:
self.logger.warning("Unable to read app %s: %s - skipping", file, err)
# Check for deleted modules and add them to the terminate list
deleted_modules = []
for file in self.monitored_files:
if file not in found_files or mode == "term":
deleted_modules.append(file)
self.logger.info("Removing module %s", file)
for file in deleted_modules:
del self.monitored_files[file]
for app in self.apps_per_module(self.get_module_from_path(file)):
apps["term"][app] = 1
# Add any apps we need to reload because of file changes
for module in modules:
for app in self.apps_per_module(self.get_module_from_path(module["name"])):
if module["reload"]:
apps["term"][app] = 1
apps["init"][app] = 1
if "global_modules" in self.app_config:
for gm in utils.single_or_list(self.app_config["global_modules"]):
if gm == self.get_module_from_path(module["name"]):
for app in self.apps_per_global_module(gm):
if module["reload"]:
apps["term"][app] = 1
apps["init"][app] = 1
if plugin is not None:
self.logger.info("Processing restart for %s", plugin)
# This is a restart of one of the plugins so check which apps need to be restarted
for app in self.app_config:
reload = False
if app in self.non_apps:
continue
if "plugin" in self.app_config[app]:
for this_plugin in utils.single_or_list(self.app_config[app]["plugin"]):
if this_plugin == plugin:
# We got a match so do the reload
reload = True
break
elif plugin == "__ALL__":
reload = True
break
else:
# No plugin dependency specified, reload to error on the side of caution
reload = True
if reload is True:
apps["term"][app] = 1
apps["init"][app] = 1
# Terminate apps
if apps is not None and apps["term"]:
prio_apps = self.get_app_deps_and_prios(apps["term"], mode)
for app in sorted(prio_apps, key=prio_apps.get, reverse=True):
await self.stop_app(app)
# Load/reload modules
for mod in modules:
try:
await utils.run_in_executor(self, self.read_app, mod["name"], mod["reload"])
except Exception:
self.error.warning("-" * 60)
self.error.warning("Unexpected error loading module: %s:", mod["name"])
self.error.warning("-" * 60)
self.error.warning(traceback.format_exc())
self.error.warning("-" * 60)
if self.AD.logging.separate_error_log() is True:
self.logger.warning("Unexpected error loading module: %s:", mod["name"])
self.logger.warning("Removing associated apps:")
module = self.get_module_from_path(mod["name"])
for app in self.app_config:
if "module" in self.app_config[app] and self.app_config[app]["module"] == module:
if apps["init"] and app in apps["init"]:
del apps["init"][app]
self.logger.warning("%s", app)
await self.set_state(app, state="compile_error")
if apps is not None and apps["init"]:
prio_apps = self.get_app_deps_and_prios(apps["init"], mode)
# Load Apps
for app in sorted(prio_apps, key=prio_apps.get):
try:
if "disable" in self.app_config[app] and self.app_config[app]["disable"] is True:
self.logger.info("%s is disabled", app)
await self.set_state(app, state="disabled")
await self.increase_inactive_apps(app)
else:
await self.init_object(app)
except Exception:
error_logger = logging.getLogger("Error.{}".format(app))
error_logger.warning("-" * 60)
error_logger.warning("Unexpected error initializing app: %s:", app)
error_logger.warning("-" * 60)
error_logger.warning(traceback.format_exc())
error_logger.warning("-" * 60)
if self.AD.logging.separate_error_log() is True:
self.logger.warning(
"Logged an error to %s", self.AD.logging.get_filename("error_log"),
)
await self.AD.threading.calculate_pin_threads()
# Call initialize() for apps
for app in sorted(prio_apps, key=prio_apps.get):
if "disable" in self.app_config[app] and self.app_config[app]["disable"] is True:
pass
else:
await self.initialize_app(app)
if self.AD.check_app_updates_profile is True:
pr.disable()
s = io.StringIO()
sortby = "cumulative"
ps = pstats.Stats(pr, stream=s).sort_stats(sortby)
ps.print_stats()
self.check_app_updates_profile_stats = s.getvalue()
self.apps_initialized = True
def get_app_deps_and_prios(self, applist, mode):
# Build a list of modules and their dependencies
deplist = []
for app in applist:
if app not in deplist:
deplist.append(app)
self.get_dependent_apps(app, deplist)
# Need to gove the topological sort a full list of apps or it will fail
full_list = list(self.app_config.keys())
deps = []
for app in full_list:
dependees = []
if "dependencies" in self.app_config[app]:
for dep in utils.single_or_list(self.app_config[app]["dependencies"]):
if dep in self.app_config:
dependees.append(dep)
else:
self.logger.warning("Unable to find app %s in dependencies for %s", dep, app)
self.logger.warning("Ignoring app %s", app)
deps.append((app, dependees))
prio_apps = {}
prio = float(50.1)
try:
for app in self.topological_sort(deps):
if "dependencies" in self.app_config[app] or self.app_has_dependents(app):
prio_apps[app] = prio
prio += float(0.0001)
else:
if mode == "init" and "priority" in self.app_config[app]:
prio_apps[app] = float(self.app_config[app]["priority"])
else:
prio_apps[app] = float(50)
except ValueError:
pass
# now we remove the ones we aren't interested in
final_apps = {}
for app in prio_apps:
if app in deplist:
final_apps[app] = prio_apps[app]
return final_apps
def app_has_dependents(self, name):
for app in self.app_config:
if "dependencies" in self.app_config[app]:
for dep in utils.single_or_list(self.app_config[app]["dependencies"]):
if dep == name:
return True
return False
def get_dependent_apps(self, dependee, deps):
for app in self.app_config:
if "dependencies" in self.app_config[app]:
for dep in utils.single_or_list(self.app_config[app]["dependencies"]):
# print("app= {} dep = {}, dependee = {} deps = {}".format(app, dep, dependee, deps))
if dep == dependee and app not in deps:
deps.append(app)
new_deps = self.get_dependent_apps(app, deps)
if new_deps is not None:
deps.append(new_deps)
def topological_sort(self, source):
pending = [(name, set(deps)) for name, deps in source] # copy deps so we can modify set in-place
emitted = []
while pending:
next_pending = []
next_emitted = []
for entry in pending:
name, deps = entry
deps.difference_update(emitted) # remove deps we emitted last pass
if deps: # still has deps? recheck during next pass
next_pending.append(entry)
else: # no more deps? time to emit
yield name
emitted.append(name) # <-- not required, but helps preserve original ordering
next_emitted.append(name) # remember what we emitted for difference_update() in next pass
if not next_emitted:
# all entries have unmet deps, we have cyclic redundancies
# since we already know all deps are correct
self.logger.warning("Cyclic or missing app dependencies detected")
for pend in next_pending:
deps = ""
for dep in pend[1]:
deps += "{} ".format(dep)
self.logger.warning("%s depends on %s", pend[0], deps)
raise ValueError("cyclic dependency detected")
pending = next_pending
emitted = next_emitted
def apps_per_module(self, module):
apps = []
for app in self.app_config:
if app not in self.non_apps and self.app_config[app]["module"] == module:
apps.append(app)
return apps
def apps_per_global_module(self, module):
apps = []
for app in self.app_config:
if "global_dependencies" in self.app_config[app]:
for gm in utils.single_or_list(self.app_config[app]["global_dependencies"]):
if gm == module:
apps.append(app)
for app, gms in self.global_module_dependencies.items():
for gm in gms:
if gm == module:
apps.append(app)
return apps
async def register_module_dependency(self, name, *modules):
for module in modules:
module_name = None
if isinstance(module, str):
module_name = module
elif isinstance(module, object) and module.__class__.__name__ == "module":
module_name = module.__name__
if (
module_name is not None
and "global_modules" in self.app_config
and module_name in self.app_config["global_modules"]
):
if name not in self.global_module_dependencies:
self.global_module_dependencies[name] = []
if module_name not in self.global_module_dependencies[name]:
self.global_module_dependencies[name].append(module_name)
else:
self.logger.warning(
"Module %s not in global_modules in register_module_dependency() for %s", module_name, name,
)
async def manage_services(self, namespace, domain, service, kwargs):
app = None
if "app" in kwargs:
app = kwargs["app"]
elif service == "reload":
pass
else:
self.logger.warning("App not specified when calling '%s' service. Specify App", service)
return None
if service != "reload" and app not in self.app_config:
self.logger.warning("Specified App '%s' is not a valid App", app)
return None
if service == "start":
await self.start_app(app)
elif service == "stop":
await self.stop_app(app)
elif service == "restart":
await self.restart_app(app)
elif service == "reload":
await self.check_app_updates(mode="init")
async def increase_active_apps(self, name):
if name not in self.active_apps:
self.active_apps.append(name)
if name in self.inactive_apps:
self.inactive_apps.remove(name)
active_apps = len(self.active_apps)
inactive_apps = len(self.inactive_apps)
await self.set_state(self.active_apps_sensor, state=active_apps)
await self.set_state(self.inactive_apps_sensor, state=inactive_apps)
async def increase_inactive_apps(self, name):
if name not in self.inactive_apps:
self.inactive_apps.append(name)
if name in self.active_apps:
self.active_apps.remove(name)
inactive_apps = len(self.inactive_apps)
active_apps = len(self.active_apps)
await self.set_state(self.active_apps_sensor, state=active_apps)
await self.set_state(self.inactive_apps_sensor, state=inactive_apps)
| 40.595668 | 126 | 0.507915 | import sys
import traceback
import uuid
import os
import importlib
import yaml
import subprocess
import cProfile
import io
import pstats
import logging
import asyncio
import appdaemon.utils as utils
from appdaemon.appdaemon import AppDaemon
class AppManagement:
def __init__(self, ad: AppDaemon, config):
self.AD = ad
self.logger = ad.logging.get_child("_app_management")
self.error = ad.logging.get_error()
self.diag = ad.logging.get_diag()
self.monitored_files = {}
self.filter_files = {}
self.modules = {}
self.objects = {}
self.check_app_updates_profile_stats = None
self.app_config_file_modified = 0
self.app_config_files = {}
self.module_dirs = []
self.app_config_file_modified = 0
self.app_config = {}
self.global_module_dependencies = {}
self.app_config_file = config
self.apps_initialized = False
self.active_apps_sensor = "sensor.active_apps"
self.inactive_apps_sensor = "sensor.inactive_apps"
self.total_apps_sensor = "sensor.total_apps"
sys.path.insert(0, os.path.dirname(__file__))
self.AD.services.register_service("appdaemon", "app", "start", self.manage_services)
self.AD.services.register_service("appdaemon", "app", "stop", self.manage_services)
self.AD.services.register_service("appdaemon", "app", "restart", self.manage_services)
self.AD.services.register_service("appdaemon", "app", "reload", self.manage_services)
self.active_apps = []
self.inactive_apps = []
self.non_apps = ["global_modules", "sequence"]
async def set_state(self, name, **kwargs):
if name.find(".") == -1:
entity_id = "app.{}".format(name)
else:
entity_id = name
await self.AD.state.set_state("_app_management", "admin", entity_id, _silent=True, **kwargs)
async def get_state(self, name, **kwargs):
if name.find(".") == -1:
entity_id = "app.{}".format(name)
else:
entity_id = name
return await self.AD.state.get_state("_app_management", "admin", entity_id, **kwargs)
async def add_entity(self, name, state, attributes):
if name.find(".") == -1:
entity_id = "app.{}".format(name)
else:
entity_id = name
await self.AD.state.add_entity("admin", entity_id, state, attributes)
async def remove_entity(self, name):
await self.AD.state.remove_entity("admin", "app.{}".format(name))
async def init_admin_stats(self):
await self.add_entity(self.active_apps_sensor, 0, {"friendly_name": "Active Apps"})
await self.add_entity(self.inactive_apps_sensor, 0, {"friendly_name": "Inactive Apps"})
await self.add_entity(self.total_apps_sensor, 0, {"friendly_name": "Total Apps"})
async def terminate(self):
self.logger.debug("terminate() called for app_management")
if self.apps_initialized is True:
await self.check_app_updates(mode="term")
async def dump_objects(self):
self.diag.info("--------------------------------------------------")
self.diag.info("Objects")
self.diag.info("--------------------------------------------------")
for object_ in self.objects.keys():
self.diag.info("%s: %s", object_, self.objects[object_])
self.diag.info("--------------------------------------------------")
async def get_app(self, name):
if name in self.objects:
return self.objects[name]["object"]
else:
return None
def get_app_info(self, name):
if name in self.objects:
return self.objects[name]
else:
return None
async def get_app_instance(self, name, id):
if name in self.objects and self.objects[name]["id"] == id:
return self.AD.app_management.objects[name]["object"]
else:
return None
async def initialize_app(self, name):
if name in self.objects:
init = getattr(self.objects[name]["object"], "initialize", None)
if init is None:
self.logger.warning("Unable to find initialize() function in module %s - skipped", name)
await self.increase_inactive_apps(name)
return
else:
self.logger.warning("Unable to find module %s - initialize() skipped", name)
await self.increase_inactive_apps(name)
return
try:
if asyncio.iscoroutinefunction(init):
await init()
else:
await utils.run_in_executor(self, init)
await self.set_state(name, state="idle")
await self.increase_active_apps(name)
event_data = {"event_type": "app_initialized", "data": {"app": name}}
await self.AD.events.process_event("admin", event_data)
except TypeError:
self.AD.threading.report_callback_sig(name, "initialize", init, {})
except Exception:
error_logger = logging.getLogger("Error.{}".format(name))
error_logger.warning("-" * 60)
error_logger.warning("Unexpected error running initialize() for %s", name)
error_logger.warning("-" * 60)
error_logger.warning(traceback.format_exc())
error_logger.warning("-" * 60)
if self.AD.logging.separate_error_log() is True:
self.logger.warning("Logged an error to %s", self.AD.logging.get_filename("error_log"))
await self.set_state(name, state="initialize_error")
await self.increase_inactive_apps(name)
async def terminate_app(self, name):
term = None
if name in self.objects and hasattr(self.objects[name]["object"], "terminate"):
self.logger.info("Calling terminate() for {}".format(name))
term = self.objects[name]["object"].terminate
if term is not None:
try:
if asyncio.iscoroutinefunction(term):
await term()
else:
await utils.run_in_executor(self, term)
except TypeError:
self.AD.threading.report_callback_sig(name, "terminate", term, {})
except BaseException:
error_logger = logging.getLogger("Error.{}".format(name))
error_logger.warning("-" * 60)
error_logger.warning("Unexpected error running terminate() for %s", name)
error_logger.warning("-" * 60)
error_logger.warning(traceback.format_exc())
error_logger.warning("-" * 60)
if self.AD.logging.separate_error_log() is True:
self.logger.warning(
"Logged an error to %s", self.AD.logging.get_filename("error_log"),
)
if name in self.objects:
del self.objects[name]
if name in self.global_module_dependencies:
del self.global_module_dependencies[name]
await self.increase_inactive_apps(name)
await self.AD.callbacks.clear_callbacks(name)
self.AD.futures.cancel_futures(name)
await self.AD.sched.terminate_app(name)
await self.set_state(name, state="terminated")
await self.set_state(name, instancecallbacks=0)
event_data = {"event_type": "app_terminated", "data": {"app": name}}
await self.AD.events.process_event("admin", event_data)
if self.AD.http is not None:
await self.AD.http.terminate_app(name)
async def start_app(self, app):
await self.init_object(app)
if "disable" in self.app_config[app] and self.app_config[app]["disable"] is True:
pass
else:
await self.initialize_app(app)
async def stop_app(self, app):
try:
self.logger.info("Terminating %s", app)
await self.terminate_app(app)
except Exception:
error_logger = logging.getLogger("Error.{}".format(app))
error_logger.warning("-" * 60)
error_logger.warning("Unexpected error terminating app: %s:", app)
error_logger.warning("-" * 60)
error_logger.warning(traceback.format_exc())
error_logger.warning("-" * 60)
if self.AD.logging.separate_error_log() is True:
self.logger.warning("Logged an error to %s", self.AD.logging.get_filename("error_log"))
async def restart_app(self, app):
await self.stop_app(app)
await self.start_app(app)
def get_app_debug_level(self, app):
if app in self.objects:
return self.AD.logging.get_level_from_int(self.objects[app]["object"].logger.getEffectiveLevel())
else:
return "None"
async def init_object(self, name):
app_args = self.app_config[name]
self.logger.info(
"Initializing app %s using class %s from module %s", name, app_args["class"], app_args["module"],
)
if self.get_file_from_module(app_args["module"]) is not None:
if "pin_thread" in app_args:
if app_args["pin_thread"] < 0 or app_args["pin_thread"] >= self.AD.threading.total_threads:
self.logger.warning(
"pin_thread out of range ({}) in app definition for {} - app will be discarded".format(
app_args["pin_thread"], name
)
)
return
else:
pin = app_args["pin_thread"]
else:
pin = -1
modname = await utils.run_in_executor(self, __import__, app_args["module"])
app_class = getattr(modname, app_args["class"], None)
if app_class is None:
self.logger.warning(
"Unable to find class %s in module %s - '%s' is not initialized",
app_args["class"],
app_args["module"],
name,
)
await self.increase_inactive_apps(name)
else:
self.objects[name] = {
"type": "app",
"object": app_class(
self.AD, name, self.AD.logging, app_args, self.AD.config, self.app_config, self.AD.global_vars,
),
"id": uuid.uuid4().hex,
"pin_app": self.AD.threading.app_should_be_pinned(name),
"pin_thread": pin,
}
else:
self.logger.warning(
"Unable to find module module %s - '%s' is not initialized", app_args["module"], name,
)
await self.increase_inactive_apps(name)
def init_plugin_object(self, name, object):
self.objects[name] = {
"type": "plugin",
"object": object,
"id": uuid.uuid4().hex,
"pin_app": False,
"pin_thread": -1,
}
async def read_config(self):
new_config = None
if await utils.run_in_executor(self, os.path.isfile, self.app_config_file):
self.logger.warning(
"apps.yaml in the Config directory is deprecated. Please move apps.yaml to the apps directory."
)
new_config = utils.run_in_executor(self.read_config_file, self.app_config_file)
else:
for root, subdirs, files in os.walk(self.AD.app_dir):
subdirs[:] = [d for d in subdirs if d not in self.AD.exclude_dirs]
if root[-11:] != "__pycache__":
for file in files:
if file[-5:] == ".yaml" and file[0] != ".":
self.logger.debug("Reading %s", os.path.join(root, file))
config = await utils.run_in_executor(self, self.read_config_file, os.path.join(root, file))
valid_apps = {}
if type(config).__name__ == "dict":
for app in config:
if config[app] is not None:
if app == "global_modules":
if isinstance(config[app], str):
valid_apps[app] = [config[app]]
elif isinstance(config[app], list):
valid_apps[app] = config[app]
else:
if self.AD.invalid_yaml_warnings:
self.logger.warning(
"global_modules should be a list or a string in File '%s' - ignoring",
file,
)
elif app == "sequence":
#
valid_apps[app] = config[app]
elif (
isinstance(config[app], dict)
and "class" in config[app]
and "module" in config[app]
):
valid_apps[app] = config[app]
else:
if self.AD.invalid_yaml_warnings:
self.logger.warning(
"App '%s' missing 'class' or 'module' entry - ignoring", app,
)
else:
if self.AD.invalid_yaml_warnings:
self.logger.warning(
"File '%s' invalid structure - ignoring", os.path.join(root, file),
)
if new_config is None:
new_config = {}
for app in valid_apps:
if app == "global_modules":
if app in new_config:
new_config[app].extend(valid_apps[app])
continue
if app == "sequence":
if app in new_config:
new_config[app] = {
**new_config[app],
**valid_apps[app],
}
continue
if app in new_config:
self.logger.warning(
"File '%s' duplicate app: %s - ignoring", os.path.join(root, file), app,
)
else:
new_config[app] = valid_apps[app]
await self.AD.sequences.add_sequences(new_config.get("sequence", {}))
return new_config
# Run in executor
def check_later_app_configs(self, last_latest):
if os.path.isfile(self.app_config_file):
ts = os.path.getmtime(self.app_config_file)
return {
"latest": ts,
"files": [{"name": self.app_config_file, "ts": os.path.getmtime(self.app_config_file)}],
}
else:
later_files = {}
app_config_files = []
later_files["files"] = []
later_files["latest"] = last_latest
later_files["deleted"] = []
for root, subdirs, files in os.walk(self.AD.app_dir):
subdirs[:] = [d for d in subdirs if d not in self.AD.exclude_dirs]
if root[-11:] != "__pycache__":
for file in files:
if file[-5:] == ".yaml":
path = os.path.join(root, file)
app_config_files.append(path)
ts = os.path.getmtime(path)
if ts > last_latest:
later_files["files"].append(path)
if ts > later_files["latest"]:
later_files["latest"] = ts
for file in self.app_config_files:
if file not in app_config_files:
later_files["deleted"].append(file)
if self.app_config_files != {}:
for file in app_config_files:
if file not in self.app_config_files:
later_files["files"].append(file)
self.app_config_files = app_config_files
return later_files
# Run in executor
def read_config_file(self, file):
new_config = None
try:
with open(file, "r") as yamlfd:
config_file_contents = yamlfd.read()
try:
new_config = yaml.load(config_file_contents, Loader=yaml.SafeLoader)
except yaml.YAMLError as exc:
self.logger.warning("Error loading configuration")
if hasattr(exc, "problem_mark"):
if exc.context is not None:
self.logger.warning("parser says")
self.logger.warning(str(exc.problem_mark))
self.logger.warning(str(exc.problem) + " " + str(exc.context))
else:
self.logger.warning("parser says")
self.logger.warning(str(exc.problem_mark))
self.logger.warning(str(exc.problem))
return new_config
except Exception:
self.logger.warning("-" * 60)
self.logger.warning("Unexpected error loading config file: %s", file)
self.logger.warning("-" * 60)
self.logger.warning(traceback.format_exc())
self.logger.warning("-" * 60)
# noinspection PyBroadException
async def check_config(self, silent=False, add_threads=True): # noqa: C901
terminate_apps = {}
initialize_apps = {}
total_apps = len(self.app_config)
try:
latest = await utils.run_in_executor(self, self.check_later_app_configs, self.app_config_file_modified)
self.app_config_file_modified = latest["latest"]
if latest["files"] or latest["deleted"]:
if silent is False:
self.logger.info("Reading config")
new_config = await self.read_config()
if new_config is None:
if silent is False:
self.logger.warning("New config not applied")
return
for file in latest["deleted"]:
if silent is False:
self.logger.info("%s deleted", file)
for file in latest["files"]:
if silent is False:
self.logger.info("%s added or modified", file)
# Check for changes
for name in self.app_config:
if name in self.non_apps:
continue
if name in new_config:
if self.app_config[name] != new_config[name]:
# Something changed, clear and reload
if silent is False:
self.logger.info("App '%s' changed", name)
terminate_apps[name] = 1
initialize_apps[name] = 1
else:
# Section has been deleted, clear it out
if silent is False:
self.logger.info("App '{}' deleted".format(name))
#
# Since the entry has been deleted we can't sensibly determine dependencies
await self.terminate_app(name)
await self.remove_entity(name)
for name in new_config:
if name in self.non_apps:
continue
if name not in self.app_config:
if "class" in new_config[name] and "module" in new_config[name]:
self.logger.info("App '%s' added", name)
initialize_apps[name] = 1
await self.add_entity(
name, "loaded", {"totalcallbacks": 0, "instancecallbacks": 0, "args": new_config[name]},
)
elif name in self.non_apps:
pass
else:
if self.AD.invalid_yaml_warnings:
if silent is False:
self.logger.warning(
"App '%s' missing 'class' or 'module' entry - ignoring", name,
)
self.app_config = new_config
total_apps = len(self.app_config)
for name in self.non_apps:
if name in self.app_config:
total_apps -= 1
self.logger.info("Found %s total apps", total_apps)
await self.set_state(self.total_apps_sensor, state=total_apps)
active_apps = self.get_active_app_count()
inactive_apps = total_apps - active_apps
if inactive_apps > 0:
self.logger.info("Found %s active apps", active_apps)
self.logger.info("Found %s inactive apps", inactive_apps)
active_apps = self.get_active_app_count()
if add_threads is True and self.AD.threading.auto_pin is True:
if active_apps > self.AD.threading.thread_count:
for i in range(active_apps - self.AD.threading.thread_count):
await self.AD.threading.add_thread(False, True)
return {
"init": initialize_apps,
"term": terminate_apps,
"total": total_apps,
"active": active_apps,
}
except Exception:
self.logger.warning("-" * 60)
self.logger.warning("Unexpected error:")
self.logger.warning("-" * 60)
self.logger.warning(traceback.format_exc())
self.logger.warning("-" * 60)
def get_active_app_count(self):
c = 0
for name in self.app_config:
if "disable" in self.app_config[name] and self.app_config[name]["disable"] is True:
pass
elif name in self.non_apps:
pass
else:
c += 1
return c
def get_app_from_file(self, file):
module = self.get_module_from_path(file)
for app in self.app_config:
if "module" in self.app_config[app] and self.app_config[app]["module"] == module:
return app
return None
def read_app(self, file, reload=False):
name = os.path.basename(file)
module_name = os.path.splitext(name)[0]
if reload:
self.logger.info("Reloading Module: %s", file)
file, ext = os.path.splitext(name)
try:
importlib.reload(self.modules[module_name])
except KeyError:
if name not in sys.modules:
self.read_app(file)
else:
raise
else:
app = self.get_app_from_file(file)
if app is not None:
self.logger.info("Loading App Module: %s", file)
if module_name not in self.modules:
self.modules[module_name] = importlib.import_module(module_name)
else:
importlib.reload(self.modules[module_name])
elif "global_modules" in self.app_config and module_name in self.app_config["global_modules"]:
self.logger.info("Loading Global Module: %s", file)
self.modules[module_name] = importlib.import_module(module_name)
else:
if self.AD.missing_app_warnings:
self.logger.warning("No app description found for: %s - ignoring", file)
@staticmethod
def get_module_from_path(path):
name = os.path.basename(path)
module_name = os.path.splitext(name)[0]
return module_name
def get_file_from_module(self, mod):
for file in self.monitored_files:
module_name = self.get_module_from_path(file)
if module_name == mod:
return file
return None
def process_filters(self):
if "filters" in self.AD.config:
for filter in self.AD.config["filters"]:
for root, subdirs, files in os.walk(self.AD.app_dir, topdown=True):
subdirs[:] = [d for d in subdirs if d not in self.AD.exclude_dirs]
ext = filter["input_ext"]
extlen = len(ext) * -1
for file in files:
run = False
if file[extlen:] == ext:
infile = os.path.join(root, file)
modified = os.path.getmtime(infile)
if infile in self.filter_files:
if self.filter_files[infile] < modified:
run = True
else:
self.logger.info("Found new filter file %s", infile)
run = True
if run is True:
self.logger.info("Running filter on %s", infile)
self.filter_files[infile] = modified
outfile = utils.rreplace(infile, ext, filter["output_ext"], 1)
command_line = filter["command_line"].replace("$1", infile)
command_line = command_line.replace("$2", outfile)
try:
subprocess.Popen(command_line, shell=True)
except Exception:
self.logger.warning("-" * 60)
self.logger.warning("Unexpected running filter on: %s:", infile)
self.logger.warning("-" * 60)
self.logger.warning(traceback.format_exc())
self.logger.warning("-" * 60)
@staticmethod
def file_in_modules(file, modules):
for mod in modules:
if mod["name"] == file:
return True
return False
@staticmethod
def check_file(file):
fh = open(file)
fh.close()
async def check_app_updates(self, plugin=None, mode="normal"):
if self.AD.apps is False:
return
pr = None
if self.AD.check_app_updates_profile is True:
pr = cProfile.Profile()
pr.enable()
await utils.run_in_executor(self, self.process_filters)
apps = await self.check_config()
found_files = []
modules = []
for root, subdirs, files in await utils.run_in_executor(self, os.walk, self.AD.app_dir, topdown=True):
subdirs[:] = [d for d in subdirs if d not in self.AD.exclude_dirs]
if root[-11:] != "__pycache__":
if root not in self.module_dirs:
self.logger.info("Adding %s to module import path", root)
sys.path.insert(0, root)
self.module_dirs.append(root)
for file in files:
if file[-3:] == ".py":
found_files.append(os.path.join(root, file))
for file in found_files:
if file == os.path.join(self.AD.app_dir, "__init__.py"):
continue
try:
await utils.run_in_executor(self, self.check_file, file)
modified = await utils.run_in_executor(self, os.path.getmtime, file)
if file in self.monitored_files:
if self.monitored_files[file] < modified:
modules.append({"name": file, "reload": True})
self.monitored_files[file] = modified
else:
self.logger.debug("Found module %s", file)
modules.append({"name": file, "reload": False})
self.monitored_files[file] = modified
except IOError as err:
self.logger.warning("Unable to read app %s: %s - skipping", file, err)
deleted_modules = []
for file in self.monitored_files:
if file not in found_files or mode == "term":
deleted_modules.append(file)
self.logger.info("Removing module %s", file)
for file in deleted_modules:
del self.monitored_files[file]
for app in self.apps_per_module(self.get_module_from_path(file)):
apps["term"][app] = 1
for module in modules:
for app in self.apps_per_module(self.get_module_from_path(module["name"])):
if module["reload"]:
apps["term"][app] = 1
apps["init"][app] = 1
if "global_modules" in self.app_config:
for gm in utils.single_or_list(self.app_config["global_modules"]):
if gm == self.get_module_from_path(module["name"]):
for app in self.apps_per_global_module(gm):
if module["reload"]:
apps["term"][app] = 1
apps["init"][app] = 1
if plugin is not None:
self.logger.info("Processing restart for %s", plugin)
for app in self.app_config:
reload = False
if app in self.non_apps:
continue
if "plugin" in self.app_config[app]:
for this_plugin in utils.single_or_list(self.app_config[app]["plugin"]):
if this_plugin == plugin:
reload = True
break
elif plugin == "__ALL__":
reload = True
break
else:
reload = True
if reload is True:
apps["term"][app] = 1
apps["init"][app] = 1
if apps is not None and apps["term"]:
prio_apps = self.get_app_deps_and_prios(apps["term"], mode)
for app in sorted(prio_apps, key=prio_apps.get, reverse=True):
await self.stop_app(app)
for mod in modules:
try:
await utils.run_in_executor(self, self.read_app, mod["name"], mod["reload"])
except Exception:
self.error.warning("-" * 60)
self.error.warning("Unexpected error loading module: %s:", mod["name"])
self.error.warning("-" * 60)
self.error.warning(traceback.format_exc())
self.error.warning("-" * 60)
if self.AD.logging.separate_error_log() is True:
self.logger.warning("Unexpected error loading module: %s:", mod["name"])
self.logger.warning("Removing associated apps:")
module = self.get_module_from_path(mod["name"])
for app in self.app_config:
if "module" in self.app_config[app] and self.app_config[app]["module"] == module:
if apps["init"] and app in apps["init"]:
del apps["init"][app]
self.logger.warning("%s", app)
await self.set_state(app, state="compile_error")
if apps is not None and apps["init"]:
prio_apps = self.get_app_deps_and_prios(apps["init"], mode)
for app in sorted(prio_apps, key=prio_apps.get):
try:
if "disable" in self.app_config[app] and self.app_config[app]["disable"] is True:
self.logger.info("%s is disabled", app)
await self.set_state(app, state="disabled")
await self.increase_inactive_apps(app)
else:
await self.init_object(app)
except Exception:
error_logger = logging.getLogger("Error.{}".format(app))
error_logger.warning("-" * 60)
error_logger.warning("Unexpected error initializing app: %s:", app)
error_logger.warning("-" * 60)
error_logger.warning(traceback.format_exc())
error_logger.warning("-" * 60)
if self.AD.logging.separate_error_log() is True:
self.logger.warning(
"Logged an error to %s", self.AD.logging.get_filename("error_log"),
)
await self.AD.threading.calculate_pin_threads()
for app in sorted(prio_apps, key=prio_apps.get):
if "disable" in self.app_config[app] and self.app_config[app]["disable"] is True:
pass
else:
await self.initialize_app(app)
if self.AD.check_app_updates_profile is True:
pr.disable()
s = io.StringIO()
sortby = "cumulative"
ps = pstats.Stats(pr, stream=s).sort_stats(sortby)
ps.print_stats()
self.check_app_updates_profile_stats = s.getvalue()
self.apps_initialized = True
def get_app_deps_and_prios(self, applist, mode):
deplist = []
for app in applist:
if app not in deplist:
deplist.append(app)
self.get_dependent_apps(app, deplist)
full_list = list(self.app_config.keys())
deps = []
for app in full_list:
dependees = []
if "dependencies" in self.app_config[app]:
for dep in utils.single_or_list(self.app_config[app]["dependencies"]):
if dep in self.app_config:
dependees.append(dep)
else:
self.logger.warning("Unable to find app %s in dependencies for %s", dep, app)
self.logger.warning("Ignoring app %s", app)
deps.append((app, dependees))
prio_apps = {}
prio = float(50.1)
try:
for app in self.topological_sort(deps):
if "dependencies" in self.app_config[app] or self.app_has_dependents(app):
prio_apps[app] = prio
prio += float(0.0001)
else:
if mode == "init" and "priority" in self.app_config[app]:
prio_apps[app] = float(self.app_config[app]["priority"])
else:
prio_apps[app] = float(50)
except ValueError:
pass
final_apps = {}
for app in prio_apps:
if app in deplist:
final_apps[app] = prio_apps[app]
return final_apps
def app_has_dependents(self, name):
for app in self.app_config:
if "dependencies" in self.app_config[app]:
for dep in utils.single_or_list(self.app_config[app]["dependencies"]):
if dep == name:
return True
return False
def get_dependent_apps(self, dependee, deps):
for app in self.app_config:
if "dependencies" in self.app_config[app]:
for dep in utils.single_or_list(self.app_config[app]["dependencies"]):
# print("app= {} dep = {}, dependee = {} deps = {}".format(app, dep, dependee, deps))
if dep == dependee and app not in deps:
deps.append(app)
new_deps = self.get_dependent_apps(app, deps)
if new_deps is not None:
deps.append(new_deps)
def topological_sort(self, source):
pending = [(name, set(deps)) for name, deps in source] # copy deps so we can modify set in-place
emitted = []
while pending:
next_pending = []
next_emitted = []
for entry in pending:
name, deps = entry
deps.difference_update(emitted) # remove deps we emitted last pass
if deps: # still has deps? recheck during next pass
next_pending.append(entry)
else: # no more deps? time to emit
yield name
emitted.append(name) # <-- not required, but helps preserve original ordering
next_emitted.append(name) # remember what we emitted for difference_update() in next pass
if not next_emitted:
# all entries have unmet deps, we have cyclic redundancies
# since we already know all deps are correct
self.logger.warning("Cyclic or missing app dependencies detected")
for pend in next_pending:
deps = ""
for dep in pend[1]:
deps += "{} ".format(dep)
self.logger.warning("%s depends on %s", pend[0], deps)
raise ValueError("cyclic dependency detected")
pending = next_pending
emitted = next_emitted
def apps_per_module(self, module):
apps = []
for app in self.app_config:
if app not in self.non_apps and self.app_config[app]["module"] == module:
apps.append(app)
return apps
def apps_per_global_module(self, module):
apps = []
for app in self.app_config:
if "global_dependencies" in self.app_config[app]:
for gm in utils.single_or_list(self.app_config[app]["global_dependencies"]):
if gm == module:
apps.append(app)
for app, gms in self.global_module_dependencies.items():
for gm in gms:
if gm == module:
apps.append(app)
return apps
async def register_module_dependency(self, name, *modules):
for module in modules:
module_name = None
if isinstance(module, str):
module_name = module
elif isinstance(module, object) and module.__class__.__name__ == "module":
module_name = module.__name__
if (
module_name is not None
and "global_modules" in self.app_config
and module_name in self.app_config["global_modules"]
):
if name not in self.global_module_dependencies:
self.global_module_dependencies[name] = []
if module_name not in self.global_module_dependencies[name]:
self.global_module_dependencies[name].append(module_name)
else:
self.logger.warning(
"Module %s not in global_modules in register_module_dependency() for %s", module_name, name,
)
async def manage_services(self, namespace, domain, service, kwargs):
app = None
if "app" in kwargs:
app = kwargs["app"]
elif service == "reload":
pass
else:
self.logger.warning("App not specified when calling '%s' service. Specify App", service)
return None
if service != "reload" and app not in self.app_config:
self.logger.warning("Specified App '%s' is not a valid App", app)
return None
if service == "start":
await self.start_app(app)
elif service == "stop":
await self.stop_app(app)
elif service == "restart":
await self.restart_app(app)
elif service == "reload":
await self.check_app_updates(mode="init")
async def increase_active_apps(self, name):
if name not in self.active_apps:
self.active_apps.append(name)
if name in self.inactive_apps:
self.inactive_apps.remove(name)
active_apps = len(self.active_apps)
inactive_apps = len(self.inactive_apps)
await self.set_state(self.active_apps_sensor, state=active_apps)
await self.set_state(self.inactive_apps_sensor, state=inactive_apps)
async def increase_inactive_apps(self, name):
if name not in self.inactive_apps:
self.inactive_apps.append(name)
if name in self.active_apps:
self.active_apps.remove(name)
inactive_apps = len(self.inactive_apps)
active_apps = len(self.active_apps)
await self.set_state(self.active_apps_sensor, state=active_apps)
await self.set_state(self.inactive_apps_sensor, state=inactive_apps)
| true | true |
f7115f40fe58ad8b96583fd0dc9e13f70437a634 | 39,208 | py | Python | venv/Lib/site-packages/statsmodels/tsa/filters/tests/test_filters.py | EkremBayar/bayar | aad1a32044da671d0b4f11908416044753360b39 | [
"MIT"
] | 2 | 2020-11-30T14:04:26.000Z | 2021-11-08T11:29:06.000Z | venv/Lib/site-packages/statsmodels/tsa/filters/tests/test_filters.py | EkremBayar/bayar | aad1a32044da671d0b4f11908416044753360b39 | [
"MIT"
] | 7 | 2020-12-04T04:10:42.000Z | 2021-03-16T00:53:09.000Z | venv/Lib/site-packages/statsmodels/tsa/filters/tests/test_filters.py | EkremBayar/bayar | aad1a32044da671d0b4f11908416044753360b39 | [
"MIT"
] | 1 | 2021-11-16T19:06:53.000Z | 2021-11-16T19:06:53.000Z | from statsmodels.compat.pandas import assert_frame_equal, make_dataframe
from datetime import datetime
import numpy as np
from numpy.testing import (assert_almost_equal, assert_equal, assert_allclose,
assert_raises, assert_)
from numpy import array, column_stack
from statsmodels.tsa.filters._utils import pandas_wrapper
from statsmodels.datasets import macrodata
from pandas import DataFrame, date_range, concat
from statsmodels.tsa.filters.api import (bkfilter, hpfilter, cffilter,
convolution_filter, recursive_filter)
def test_bking1d():
# Test Baxter King band-pass filter. Results are taken from Stata
bking_results = array([
7.320813, 2.886914, -6.818976, -13.49436,
-13.27936, -9.405913, -5.691091, -5.133076, -7.273468,
-9.243364, -8.482916, -4.447764, 2.406559, 10.68433,
19.46414, 28.09749, 34.11066, 33.48468, 24.64598, 9.952399,
-4.265528, -12.59471, -13.46714, -9.049501, -3.011248,
.5655082, 2.897976, 7.406077, 14.67959, 18.651, 13.05891,
-2.945415, -24.08659, -41.86147, -48.68383, -43.32689,
-31.66654, -20.38356, -13.76411, -9.978693, -3.7704, 10.27108,
31.02847, 51.87613, 66.93117, 73.51951, 73.4053, 69.17468,
59.8543, 38.23899, -.2604809, -49.0107, -91.1128, -112.1574,
-108.3227, -86.51453, -59.91258, -40.01185, -29.70265,
-22.76396, -13.08037, 1.913622, 20.44045, 37.32873, 46.79802,
51.95937, 59.67393, 70.50803, 81.27311, 83.53191, 67.72536,
33.78039, -6.509092, -37.31579, -46.05207, -29.81496, 1.416417,
28.31503,
32.90134, 8.949259, -35.41895, -84.65775, -124.4288, -144.6036,
-140.2204, -109.2624, -53.6901, 15.07415, 74.44268, 104.0403,
101.0725, 76.58291, 49.27925, 36.15751, 36.48799, 37.60897,
27.75998, 4.216643, -23.20579, -39.33292, -36.6134, -20.90161,
-4.143123, 5.48432, 9.270075, 13.69573, 22.16675, 33.01987,
41.93186, 47.12222, 48.62164, 47.30701, 40.20537, 22.37898,
-7.133002, -43.3339, -78.51229, -101.3684, -105.2179,
-90.97147,
-68.30824, -48.10113, -35.60709, -31.15775, -31.82346,
-32.49278, -28.22499, -14.42852, 10.1827, 36.64189, 49.43468,
38.75517, 6.447761, -33.15883, -62.60446, -72.87829, -66.54629,
-52.61205, -38.06676, -26.19963, -16.51492, -7.007577,
.6125674,
7.866972, 14.8123, 22.52388, 30.65265, 39.47801, 49.05027,
59.02925,
72.88999, 95.08865, 125.8983, 154.4283, 160.7638, 130.6092,
67.84406, -7.070272, -68.08128, -99.39944, -104.911,
-100.2372, -98.11596, -104.2051, -114.0125, -113.3475,
-92.98669, -51.91707, -.7313812, 43.22938, 64.62762, 64.07226,
59.35707, 67.06026, 91.87247, 124.4591, 151.2402, 163.0648,
154.6432])
X = macrodata.load_pandas().data['realinv'].values
Y = bkfilter(X, 6, 32, 12)
assert_almost_equal(Y, bking_results, 4)
def test_bking2d():
# Test Baxter-King band-pass filter with 2d input
bking_results = array([
[7.320813, -.0374475], [2.886914, -.0430094],
[-6.818976, -.053456], [-13.49436, -.0620739], [-13.27936, -.0626929],
[-9.405913, -.0603022], [-5.691091, -.0630016], [-5.133076, -.0832268],
[-7.273468, -.1186448], [-9.243364, -.1619868], [-8.482916, -.2116604],
[-4.447764, -.2670747], [2.406559, -.3209931], [10.68433, -.3583075],
[19.46414, -.3626742], [28.09749, -.3294618], [34.11066, -.2773388],
[33.48468, -.2436127], [24.64598, -.2605531], [9.952399, -.3305166],
[-4.265528, -.4275561], [-12.59471, -.5076068], [-13.46714, -.537573],
[-9.049501, -.5205845], [-3.011248, -.481673], [.5655082, -.4403994],
[2.897976, -.4039957], [7.406077, -.3537394], [14.67959, -.2687359],
[18.651, -.1459743], [13.05891, .0014926], [-2.945415, .1424277],
[-24.08659, .2451936], [-41.86147, .288541], [-48.68383, .2727282],
[-43.32689, .1959127], [-31.66654, .0644874], [-20.38356, -.1158372],
[-13.76411, -.3518627], [-9.978693, -.6557535], [-3.7704, -1.003754],
[10.27108, -1.341632], [31.02847, -1.614486], [51.87613, -1.779089],
[66.93117, -1.807459], [73.51951, -1.679688], [73.4053, -1.401012],
[69.17468, -.9954996], [59.8543, -.511261], [38.23899, -.0146745],
[-.2604809, .4261311], [-49.0107, .7452514], [-91.1128, .8879492],
[-112.1574, .8282748], [-108.3227, .5851508], [-86.51453, .2351699],
[-59.91258, -.1208998], [-40.01185, -.4297895], [-29.70265, -.6821963],
[-22.76396, -.9234254], [-13.08037, -1.217539], [1.913622, -1.57367],
[20.44045, -1.927008], [37.32873, -2.229565], [46.79802, -2.463154],
[51.95937, -2.614697], [59.67393, -2.681357], [70.50803, -2.609654],
[81.27311, -2.301618], [83.53191, -1.720974], [67.72536, -.9837123],
[33.78039, -.2261613], [-6.509092, .4546985], [-37.31579, 1.005751],
[-46.05207, 1.457224], [-29.81496, 1.870815], [1.416417, 2.263313],
[28.31503, 2.599906], [32.90134, 2.812282], [8.949259, 2.83358],
[-35.41895, 2.632667], [-84.65775, 2.201077], [-124.4288, 1.598951],
[-144.6036, .9504762], [-140.2204, .4187932], [-109.2624, .1646726],
[-53.6901, .2034265], [15.07415, .398165], [74.44268, .5427476],
[104.0403, .5454975], [101.0725, .4723354], [76.58291, .4626823],
[49.27925, .5840143], [36.15751, .7187981], [36.48799, .6058422],
[37.60897, .1221227], [27.75998, -.5891272], [4.216643, -1.249841],
[-23.20579, -1.594972], [-39.33292, -1.545968], [-36.6134, -1.275494],
[-20.90161, -1.035783], [-4.143123, -.9971732], [5.48432, -1.154264],
[9.270075, -1.29987], [13.69573, -1.240559], [22.16675, -.9662656],
[33.01987, -.6420301], [41.93186, -.4698712], [47.12222, -.4527797],
[48.62164, -.4407153], [47.30701, -.2416076], [40.20537, .2317583],
[22.37898, .8710276], [-7.133002, 1.426177], [-43.3339, 1.652785],
[-78.51229, 1.488021], [-101.3684, 1.072096], [-105.2179, .6496446],
[-90.97147, .4193682], [-68.30824, .41847], [-48.10113, .5253419],
[-35.60709, .595076], [-31.15775, .5509905], [-31.82346, .3755519],
[-32.49278, .1297979], [-28.22499, -.0916165], [-14.42852, -.2531037],
[10.1827, -.3220784], [36.64189, -.2660561], [49.43468, -.1358522],
[38.75517, -.0279508], [6.447761, .0168735], [-33.15883, .0315687],
[-62.60446, .0819507], [-72.87829, .2274033], [-66.54629, .4641401],
[-52.61205, .7211093], [-38.06676, .907773], [-26.19963, .9387103],
[-16.51492, .7940786], [-7.007577, .5026631], [.6125674, .1224996],
[7.866972, -.2714422], [14.8123, -.6273921], [22.52388, -.9124271],
[30.65265, -1.108861], [39.47801, -1.199206], [49.05027, -1.19908],
[59.02925, -1.139046], [72.88999, -.9775021], [95.08865, -.6592603],
[125.8983, -.1609712], [154.4283, .4796201], [160.7638, 1.100565],
[130.6092, 1.447148], [67.84406, 1.359608], [-7.070272, .8931825],
[-68.08128, .2619787], [-99.39944, -.252208], [-104.911, -.4703874],
[-100.2372, -.4430657], [-98.11596, -.390683], [-104.2051, -.5647846],
[-114.0125, -.9397582], [-113.3475, -1.341633], [-92.98669, -1.567337],
[-51.91707, -1.504943], [-.7313812, -1.30576], [43.22938, -1.17151],
[64.62762, -1.136151], [64.07226, -1.050555], [59.35707, -.7308369],
[67.06026, -.1766731], [91.87247, .3898467], [124.4591, .8135461],
[151.2402, .9644226], [163.0648, .6865934], [154.6432, .0115685]])
mdata = macrodata.load_pandas()
X = mdata.data[['realinv', 'cpi']].values.astype(float)
Y = bkfilter(X, 6, 32, 12)
assert_almost_equal(Y, bking_results, 4)
def test_hpfilter():
# Test Hodrick-Prescott Filter. Results taken from Stata.
hpfilt_res = array([
[3.951191484487844718e+01, 2.670837085155121713e+03],
[8.008853245681075350e+01, 2.698712467543189177e+03],
[4.887545512195401898e+01, 2.726612544878045810e+03],
[3.059193256079834100e+01, 2.754612067439201837e+03],
[6.488266733421960453e+01, 2.782816332665780465e+03],
[2.304024204546703913e+01, 2.811349757954532834e+03],
[-1.355312369487364776e+00, 2.840377312369487299e+03],
[-6.746236512580753697e+01, 2.870078365125807522e+03],
[-8.136743836853429457e+01, 2.900631438368534418e+03],
[-6.016789026443257171e+01, 2.932172890264432681e+03],
[-4.636922433138215638e+01, 2.964788224331382025e+03],
[-2.069533915570400495e+01, 2.998525339155703932e+03],
[-2.162152558595607843e+00, 3.033403152558595593e+03],
[-4.718647774311648391e+00, 3.069427647774311481e+03],
[-1.355645669169007306e+01, 3.106603456691690099e+03],
[-4.436926204475639679e+01, 3.144932262044756499e+03],
[-4.332027378211660107e+01, 3.184407273782116590e+03],
[-4.454697106352068658e+01, 3.224993971063520803e+03],
[-2.629875787765286077e+01, 3.266630757877652741e+03],
[-4.426119635629265758e+01, 3.309228196356292756e+03],
[-1.443441190762496262e+01, 3.352680411907625057e+03],
[-2.026686669186437939e+01, 3.396853866691864368e+03],
[-1.913700136208899494e+01, 3.441606001362089046e+03],
[-5.482458977940950717e+01, 3.486781589779409387e+03],
[-1.596244517937793717e+01, 3.532213445179378141e+03],
[-1.374011542874541192e+01, 3.577700115428745448e+03],
[1.325482813403914406e+01, 3.623030171865960710e+03],
[5.603040174253828809e+01, 3.667983598257461836e+03],
[1.030743373627105939e+02, 3.712348662637289181e+03],
[7.217534795943993231e+01, 3.755948652040559864e+03],
[5.462972503693208637e+01, 3.798671274963067845e+03],
[4.407065050666142270e+01, 3.840449349493338559e+03],
[3.749016270204992907e+01, 3.881249837297949853e+03],
[-1.511244199923112319e+00, 3.921067244199923152e+03],
[-9.093507374079763395e+00, 3.959919507374079785e+03],
[-1.685361946760258434e+01, 3.997823619467602384e+03],
[2.822211031434289907e+01, 4.034790889685657021e+03],
[6.117590627896424849e+01, 4.070822093721035344e+03],
[5.433135391434370831e+01, 4.105935646085656117e+03],
[3.810480376716623141e+01, 4.140188196232833434e+03],
[7.042964928802848590e+01, 4.173670350711971878e+03],
[4.996346842507591646e+01, 4.206496531574924120e+03],
[4.455282059571254649e+01, 4.238825179404287155e+03],
[-7.584961950576143863e+00, 4.270845961950576566e+03],
[-4.620339247697120300e+01, 4.302776392476971523e+03],
[-7.054024364552969928e+01, 4.334829243645529459e+03],
[-6.492941099801464588e+01, 4.367188410998014660e+03],
[-1.433567024239555394e+02, 4.399993702423955256e+03],
[-5.932834493089012540e+01, 4.433344344930889747e+03],
[-6.842096758743628016e+01, 4.467249967587436004e+03],
[-6.774011924654860195e+01, 4.501683119246548813e+03],
[-9.030958565658056614e+01, 4.536573585656580690e+03],
[-4.603981499136807543e+01, 4.571808814991368308e+03],
[2.588118806672991923e+01, 4.607219811933269739e+03],
[3.489419371912299539e+01, 4.642608806280876706e+03],
[7.675179642495095322e+01, 4.677794203575049323e+03],
[1.635497817724171910e+02, 4.712616218227582976e+03],
[1.856079654765617306e+02, 4.746963034523438182e+03],
[1.254269446392718237e+02, 4.780825055360728584e+03],
[1.387413113837174024e+02, 4.814308688616282780e+03],
[6.201826599282230745e+01, 4.847598734007177882e+03],
[4.122129542972197669e+01, 4.880966704570278125e+03],
[-4.120287475842360436e+01, 4.914722874758424041e+03],
[-9.486328233441963675e+01, 4.949203282334419782e+03],
[-1.894232132641573116e+02, 4.984718213264157384e+03],
[-1.895766639620087517e+02, 5.021518663962008759e+03],
[-1.464092413342650616e+02, 5.059737241334265491e+03],
[-1.218770668721217589e+02, 5.099388066872122181e+03],
[-4.973075629078175552e+01, 5.140393756290781312e+03],
[-5.365375213897277717e+01, 5.182600752138972894e+03],
[-7.175241524251214287e+01, 5.225824415242512259e+03],
[-7.834757283225462743e+01, 5.269846572832254424e+03],
[-6.264220687943907251e+01, 5.314404206879438789e+03],
[-3.054332122210325906e+00, 5.359185332122210639e+03],
[4.808218808024685131e+01, 5.403838811919753425e+03],
[2.781399326736391231e+00, 5.448011600673263274e+03],
[-2.197570415173231595e+01, 5.491380704151732061e+03],
[1.509441335012807031e+02, 5.533624866498719712e+03],
[1.658909029574851957e+02, 5.574409097042514986e+03],
[2.027292548049981633e+02, 5.613492745195001589e+03],
[1.752101578176061594e+02, 5.650738842182393455e+03],
[1.452808749847536092e+02, 5.686137125015246056e+03],
[1.535481629475025329e+02, 5.719786837052497503e+03],
[1.376169777998875361e+02, 5.751878022200112355e+03],
[1.257703080340770612e+02, 5.782696691965922582e+03],
[-2.524186846895645431e+01, 5.812614868468956047e+03],
[-6.546618027042404719e+01, 5.842083180270424236e+03],
[1.192352023580315290e+01, 5.871536479764196883e+03],
[1.043482970188742911e+02, 5.901368702981125352e+03],
[2.581376184768396342e+01, 5.931981238152316109e+03],
[6.634330880534071184e+01, 5.963840691194659485e+03],
[-4.236780162594641297e+01, 5.997429801625946311e+03],
[-1.759397735321817891e+02, 6.033272773532181418e+03],
[-1.827933311233055065e+02, 6.071867331123305121e+03],
[-2.472312362505917918e+02, 6.113601236250591683e+03],
[-2.877470049336488955e+02, 6.158748004933649099e+03],
[-2.634066336693540507e+02, 6.207426633669354487e+03],
[-1.819572770763625158e+02, 6.259576277076362203e+03],
[-1.175034606274621183e+02, 6.314971460627461965e+03],
[-4.769898649718379602e+01, 6.373272986497183410e+03],
[1.419578280287896632e+01, 6.434068217197121157e+03],
[6.267929662760798237e+01, 6.496914703372392069e+03],
[6.196413196753746888e+01, 6.561378868032462378e+03],
[5.019769125317907310e+01, 6.627066308746821051e+03],
[4.665364933213822951e+01, 6.693621350667861407e+03],
[3.662430749527266016e+01, 6.760719692504727391e+03],
[7.545680850246480986e+01, 6.828066191497535328e+03],
[6.052940492147536133e+01, 6.895388595078524304e+03],
[6.029518881462354329e+01, 6.962461811185376064e+03],
[2.187042136652689805e+01, 7.029098578633473153e+03],
[2.380067926824722235e+01, 7.095149320731752596e+03],
[-7.119129802169481991e+00, 7.160478129802169860e+03],
[-3.194497359120850888e+01, 7.224963973591208742e+03],
[-1.897137038934124575e+01, 7.288481370389341464e+03],
[-1.832687287845146784e+01, 7.350884872878451461e+03],
[4.600482336597542599e+01, 7.412017176634024509e+03],
[2.489047706403016491e+01, 7.471709522935970199e+03],
[6.305909392127250612e+01, 7.529821906078727807e+03],
[4.585212309498183458e+01, 7.586229876905018500e+03],
[9.314260180878318351e+01, 7.640848398191216802e+03],
[1.129819097095369216e+02, 7.693621090290463144e+03],
[1.204662123176703972e+02, 7.744549787682329224e+03],
[1.336860614601246198e+02, 7.793706938539875409e+03],
[1.034567175813735957e+02, 7.841240282418626521e+03],
[1.403118873372050075e+02, 7.887381112662795204e+03],
[1.271726169351004501e+02, 7.932425383064899506e+03],
[8.271925765282139764e+01, 7.976756742347178260e+03],
[-3.197432211752584408e+01, 8.020838322117525422e+03],
[-1.150209535194062482e+02, 8.065184953519406008e+03],
[-1.064694837456772802e+02, 8.110291483745677397e+03],
[-1.190428718925368230e+02, 8.156580871892536379e+03],
[-1.353635336292991269e+02, 8.204409533629299403e+03],
[-9.644348283027102298e+01, 8.254059482830271008e+03],
[-6.143413116116607853e+01, 8.305728131161165948e+03],
[-3.019161311097923317e+01, 8.359552613110980019e+03],
[1.384333163552582846e+00, 8.415631666836447039e+03],
[-4.156016073666614830e+01, 8.474045160736666730e+03],
[-4.843882841860977351e+01, 8.534873828418609264e+03],
[-6.706442838867042155e+01, 8.598172428388670596e+03],
[-2.019644488579979225e+01, 8.663965444885800025e+03],
[-4.316446881084630149e+00, 8.732235446881084499e+03],
[4.435061943264736328e+01, 8.802952380567352520e+03],
[2.820550564155564643e+01, 8.876083494358445023e+03],
[5.155624419490777655e+01, 8.951623755805092514e+03],
[-4.318760899315748247e+00, 9.029585760899315574e+03],
[-6.534632828542271454e+01, 9.110014328285422380e+03],
[-7.226757738268497633e+01, 9.192951577382684263e+03],
[-9.412378615444868046e+01, 9.278398786154448317e+03],
[-1.191240653288368776e+02, 9.366312065328836979e+03],
[-4.953669826751865912e+01, 9.456588698267518339e+03],
[-6.017251579067487910e+01, 9.549051515790675694e+03],
[-5.103438828313483100e+01, 9.643492388283135369e+03],
[-7.343057830678117170e+01, 9.739665578306781754e+03],
[-2.774245193054957781e+01, 9.837293451930549054e+03],
[-3.380481112519191811e+00, 9.936052481112519672e+03],
[-2.672779877794346248e+01, 1.003560179877794326e+04],
[-3.217342505148371856e+01, 1.013559842505148299e+04],
[-4.140567518359966925e+01, 1.023568267518359971e+04],
[-6.687756033938057953e+00, 1.033547475603393832e+04],
[7.300600408459467872e+01, 1.043456899591540605e+04],
[6.862345670680042531e+01, 1.053255554329319966e+04],
[5.497882461487461114e+01, 1.062907017538512628e+04],
[9.612244093055960548e+01, 1.072379155906944106e+04],
[1.978212770103891671e+02, 1.081643272298961165e+04],
[1.362772276848754700e+02, 1.090676677231512440e+04],
[2.637635494867263333e+02, 1.099469045051327339e+04],
[1.876813256815166824e+02, 1.108018567431848351e+04],
[1.711447873158413131e+02, 1.116339921268415856e+04],
[5.257586460826678376e+01, 1.124459513539173349e+04],
[4.710652228531762375e+01, 1.132414447771468258e+04],
[-6.237613484241046535e+01, 1.140245113484241119e+04],
[-9.982044354035315337e+01, 1.147994844354035376e+04],
[-7.916275548997509759e+01, 1.155703075548997549e+04],
[-9.526003459472303803e+01, 1.163403003459472347e+04],
[-1.147987680369169539e+02, 1.171122876803691724e+04],
[-1.900259054765901965e+02, 1.178884990547659072e+04],
[-2.212256473439556430e+02, 1.186704464734395515e+04],
[-2.071394278781845060e+02, 1.194584542787818464e+04],
[-8.968541528904825100e+01, 1.202514641528904758e+04],
[-6.189531564415665343e+01, 1.210471231564415575e+04],
[-5.662878162551714922e+01, 1.218425178162551674e+04],
[-4.961678134413705266e+01, 1.226343478134413635e+04],
[-3.836288992144181975e+01, 1.234189588992144127e+04],
[-8.956671991456460091e+00, 1.241923867199145570e+04],
[3.907028461866866564e+01, 1.249504271538133071e+04],
[1.865299000184495526e+01, 1.256888200999815490e+04],
[4.279803532226833340e+01, 1.264035496467773191e+04],
[3.962735362631610769e+01, 1.270907164637368442e+04],
[1.412691291877854383e+02, 1.277466887081221466e+04],
[1.256537791844366438e+02, 1.283680822081556289e+04],
[7.067642758858892194e+01, 1.289523957241141034e+04],
[1.108876647603192396e+02, 1.294979133523968085e+04],
[9.956490829291760747e+01, 1.300033609170708223e+04],
[1.571612709880937473e+02, 1.304681572901190702e+04],
[2.318746375812715996e+02, 1.308923436241872878e+04],
[2.635546670125277160e+02, 1.312769433298747208e+04],
[2.044220965739259555e+02, 1.316244290342607383e+04],
[2.213739418903714977e+02, 1.319389205810962812e+04],
[1.020184547767112235e+02, 1.322258154522328914e+04],
[-1.072694716663390864e+02, 1.324918947166633916e+04],
[-3.490477058718843182e+02, 1.327445770587188417e+04],
[-3.975570728533530200e+02, 1.329906107285335383e+04],
[-3.331152428080622485e+02, 1.332345624280806260e+04]])
dta = macrodata.load_pandas().data['realgdp'].values
res = column_stack((hpfilter(dta, 1600)))
assert_almost_equal(res, hpfilt_res, 6)
def test_cfitz_filter():
# Test Christiano-Fitzgerald Filter. Results taken from R.
# NOTE: The Stata mata code and the matlab code it's based on are wrong.
cfilt_res = array([
[0.712599537179426, 0.439563468233128],
[1.06824041304411, 0.352886666575907],
[1.19422467791128, 0.257297004260607],
[0.970845473140327, 0.114504692143872],
[0.467026976628563, -0.070734782329146],
[-0.089153511514031, -0.238609685132605],
[-0.452339254128573, -0.32376584042956],
[-0.513231214461187, -0.314288554228112],
[-0.352372578720063, -0.258815055101336],
[-0.160282602521333, -0.215076844089567],
[-0.0918782593827686, -0.194120745417214],
[-0.168083823205437, -0.158327420072693],
[-0.291595204965808, -0.0742727139742986],
[-0.348638756841307, 0.037008291163602],
[-0.304328040874631, 0.108196527328748],
[-0.215933150969686, 0.0869231107437175],
[-0.165632621390694, -0.0130556619786275],
[-0.182326839507151, -0.126570926191824],
[-0.223737786804725, -0.205535321806185],
[-0.228939291453403, -0.269110078201836],
[-0.185518327227038, -0.375976507132174],
[-0.143900152461529, -0.53760115656157],
[-0.162749541550174, -0.660065018626038],
[-0.236263634756884, -0.588542352053736],
[-0.275785854309211, -0.236867929421996],
[-0.173666515108109, 0.303436335579219],
[0.0963135720251639, 0.779772338801993],
[0.427070069032285, 0.929108075350647],
[0.629034743259998, 0.658330841002647],
[0.557941248993624, 0.118500049361018],
[0.227866624051603, -0.385048321099911],
[-0.179878859883227, -0.582223992561493],
[-0.428263000051965, -0.394053702908091],
[-0.381640684645912, 0.0445437406977307],
[-0.0942745548364887, 0.493997792757968],
[0.238132391504895, 0.764519811304315],
[0.431293754256291, 0.814755206427316],
[0.455010435813661, 0.745567043101108],
[0.452800768971269, 0.709401694610443],
[0.615754619329312, 0.798293251119636],
[1.00256335412457, 0.975856845059388],
[1.44841039351691, 1.09097252730799],
[1.64651971120370, 0.967823457118036],
[1.35534532901802, 0.522397724737059],
[0.580492790312048, -0.16941343361609],
[-0.410746188031773, -0.90760401289056],
[-1.26148406066881, -1.49592867122591],
[-1.75784179124566, -1.87404167409849],
[-1.94478553960064, -2.14586210891112],
[-2.03751202708559, -2.465855239868],
[-2.20376059354166, -2.86294187189049],
[-2.39722338315852, -3.15004697654831],
[-2.38032366161537, -3.01390466643222],
[-1.91798022532025, -2.23395210271226],
[-0.982318490353716, -0.861346053067472],
[0.199047030343412, 0.790266582335616],
[1.28582776574786, 2.33731327460104],
[2.03565905376430, 3.54085486821911],
[2.41201557412526, 4.36519456268955],
[2.52011070482927, 4.84810517685452],
[2.45618479815452, 4.92906708807477],
[2.22272146945388, 4.42591058990048],
[1.78307567169034, 3.20962906108388],
[1.18234431860844, 1.42568060336985],
[0.590069172333348, -0.461896808688991],
[0.19662302949837, -1.89020992539465],
[0.048307034171166, -2.53490571941987],
[-0.0141956981899000, -2.50020338531674],
[-0.230505187108187, -2.20625973569823],
[-0.700947410386801, -2.06643697511048],
[-1.27085123163060, -2.21536883679783],
[-1.64082547897928, -2.49016921117735],
[-1.62286182971254, -2.63948740221362],
[-1.31609762181362, -2.54685250637904],
[-1.03085567704873, -2.27157435428923],
[-1.01100120380112, -1.90404507430561],
[-1.19823958399826, -1.4123209792214],
[-1.26398933608383, -0.654000086153317],
[-0.904710628949692, 0.447960016248203],
[-0.151340093679588, 1.73970411237156],
[0.592926881165989, 2.85741581650685],
[0.851660587507523, 3.4410446351716],
[0.480324393352127, 3.36870271362297],
[-0.165153230782417, 2.82003806696544],
[-0.459235919375844, 2.12858991660866],
[0.0271158842479935, 1.55840980891556],
[1.18759188180671, 1.17980298478623],
[2.43238266962309, 0.904011534980672],
[3.08277213720132, 0.595286911949837],
[2.79953663720953, 0.148014782859571],
[1.73694442845833, -0.496297332023011],
[0.357638079951977, -1.33108149877570],
[-0.891418825216945, -2.22650083183366],
[-1.77646467793627, -2.89359299718574],
[-2.24614790863088, -2.97921619243347],
[-2.29048879096607, -2.30003092779280],
[-1.87929656465888, -1.05298381273274],
[-1.04510101454788, 0.215837488618531],
[0.00413338508394524, 0.937866257924888],
[0.906870625251025, 0.92664365343019],
[1.33869057593416, 0.518564571494679],
[1.22659678454440, 0.288096869652890],
[0.79380139656044, 0.541053084632774],
[0.38029431865832, 1.01905199983437],
[0.183929413600038, 1.10529586616777],
[0.140045425897033, 0.393618564826736],
[0.0337313182352219, -0.86431819007665],
[-0.269208622829813, -1.85638085246792],
[-0.687276639992166, -1.82275359004533],
[-1.00161592325614, -0.692695765071617],
[-1.06320089194036, 0.803577361347341],
[-0.927152307196776, 1.67366338751788],
[-0.786802101366614, 1.42564362251793],
[-0.772970884572502, 0.426446388877964],
[-0.81275662801789, -0.437721213831647],
[-0.686831250382476, -0.504255468075149],
[-0.237936463020255, 0.148656301898438],
[0.459631879129522, 0.832925905720478],
[1.12717379822508, 0.889455302576383],
[1.48640453200855, 0.268042676202216],
[1.46515245776211, -0.446505038539178],
[1.22993484959115, -0.563868578181134],
[1.0272100765927, 0.0996849952196907],
[0.979191212438404, 1.05053652824665],
[1.00733490030391, 1.51658415000556],
[0.932192535457706, 1.06262774912638],
[0.643374300839414, -0.0865180803476065],
[0.186885168954461, -1.24799408923277],
[-0.290842337365465, -1.80035611156538],
[-0.669446735516495, -1.58847333561510],
[-0.928915624595538, -0.932116966867929],
[-1.11758635926997, -0.307879396807850],
[-1.26832454569756, -0.00856199983957032],
[-1.35755577149251, -0.0303537516690989],
[-1.34244112665546, -0.196807620887435],
[-1.22227976023299, -0.342062643495923],
[-1.04601473486818, -0.390474392372016],
[-0.85158508717846, -0.322164402093596],
[-0.605033439160543, -0.126930141915954],
[-0.218304303942818, 0.179551077808122],
[0.352173017779006, 0.512327303000081],
[1.01389600097229, 0.733397490572755],
[1.55149778750607, 0.748740387440165],
[1.75499674757591, 0.601759717901009],
[1.56636057468633, 0.457705308377562],
[1.12239792537274, 0.470849913286519],
[0.655802600286141, 0.646142040378738],
[0.335285115340180, 0.824103600255079],
[0.173454596506888, 0.808068498175582],
[0.0666753011315252, 0.521488214487996],
[-0.0842367474816212, 0.0583493276173476],
[-0.285604762631464, -0.405958418332253],
[-0.465735422869919, -0.747800086512926],
[-0.563586691231348, -0.94982272350799],
[-0.598110322024572, -1.04736894794361],
[-0.65216025756061, -1.04858365218822],
[-0.789663117801624, -0.924145633093637],
[-0.984704045337959, -0.670740724179446],
[-1.12449565589348, -0.359476803003931],
[-1.07878318723543, -0.092290938944355],
[-0.775555435407062, 0.102132527529259],
[-0.231610677329856, 0.314409560305622],
[0.463192794235131, 0.663523546243286],
[1.17416973448423, 1.13156902460931],
[1.74112278814906, 1.48967153067024],
[2.00320855757084, 1.42571085941843],
[1.8529912317336, 0.802460519079555],
[1.30747261947211, -0.169219078629572],
[0.540237070403222, -1.01621539672694],
[-0.177136817092375, -1.3130784867977],
[-0.611981468823591, -0.982477824460773],
[-0.700240028737747, -0.344919609255406],
[-0.572396497740112, 0.125083535035390],
[-0.450934466600975, 0.142553112732280],
[-0.494020014254326, -0.211429053871656],
[-0.701707589094918, -0.599602868825992],
[-0.94721339346157, -0.710669870591623],
[-1.09297139748946, -0.47846194092245],
[-1.08850658866583, -0.082258450179988],
[-0.976082880696692, 0.235758921309309],
[-0.81885695346771, 0.365298185204303],
[-0.63165529525553, 0.384725179378064],
[-0.37983149226421, 0.460240196164378],
[-0.0375551354277652, 0.68580913832794],
[0.361996927427804, 0.984470835955107],
[0.739920615366072, 1.13195975020298],
[1.03583478061534, 0.88812510421667],
[1.25614938962160, 0.172561520611839],
[1.45295030231799, -0.804979390544485],
[1.64887158748426, -1.55662011197859],
[1.78022721495313, -1.52921975346218],
[1.71945683859668, -0.462240366424548],
[1.36728880239190, 1.31213774341268],
[0.740173894315912, 2.88362740582926],
[-0.0205364331835904, 3.20319080963167],
[-0.725643970956428, 1.75222466531151],
[-1.23900506689782, -0.998432917440275],
[-1.52651897508678, -3.72752870885448],
[-1.62857516631435, -5.00551707196292],
[-1.59657420180451, -4.18499132634584],
[-1.45489013276495, -1.81759097305637],
[-1.21309542313047, 0.722029457352468]])
dta = macrodata.load_pandas().data[['tbilrate', 'infl']].values[1:]
cyc, trend = cffilter(dta)
assert_almost_equal(cyc, cfilt_res, 8)
# do 1d
cyc, trend = cffilter(dta[:, 1])
assert_almost_equal(cyc, cfilt_res[:, 1], 8)
def test_bking_pandas():
# 1d
dta = macrodata.load_pandas().data
index = date_range(start='1959-01-01', end='2009-10-01', freq='Q')
dta.index = index
filtered = bkfilter(dta["infl"])
nd_filtered = bkfilter(dta['infl'].values)
assert_equal(filtered.values, nd_filtered)
assert_equal(filtered.index[0], datetime(1962, 3, 31))
assert_equal(filtered.index[-1], datetime(2006, 9, 30))
assert_equal(filtered.name, "infl_cycle")
# 2d
filtered = bkfilter(dta[["infl", "unemp"]])
nd_filtered = bkfilter(dta[['infl', 'unemp']].values)
assert_equal(filtered.values, nd_filtered)
assert_equal(filtered.index[0], datetime(1962, 3, 31))
assert_equal(filtered.index[-1], datetime(2006, 9, 30))
assert_equal(filtered.columns.values, ["infl_cycle", "unemp_cycle"])
def test_cfitz_pandas():
# 1d
dta = macrodata.load_pandas().data
index = date_range(start='1959-01-01', end='2009-10-01', freq='Q')
dta.index = index
cycle, trend = cffilter(dta["infl"])
ndcycle, ndtrend = cffilter(dta['infl'].values)
assert_allclose(cycle.values, ndcycle, rtol=1e-14)
assert_equal(cycle.index[0], datetime(1959, 3, 31))
assert_equal(cycle.index[-1], datetime(2009, 9, 30))
assert_equal(cycle.name, "infl_cycle")
# 2d
cycle, trend = cffilter(dta[["infl", "unemp"]])
ndcycle, ndtrend = cffilter(dta[['infl', 'unemp']].values)
assert_allclose(cycle.values, ndcycle, rtol=1e-14)
assert_equal(cycle.index[0], datetime(1959, 3, 31))
assert_equal(cycle.index[-1], datetime(2009, 9, 30))
assert_equal(cycle.columns.values, ["infl_cycle", "unemp_cycle"])
def test_hpfilter_pandas():
dta = macrodata.load_pandas().data
index = date_range(start='1959-01-01', end='2009-10-01', freq='Q')
dta.index = index
cycle, trend = hpfilter(dta["realgdp"])
ndcycle, ndtrend = hpfilter(dta['realgdp'].values)
assert_equal(cycle.values, ndcycle)
assert_equal(cycle.index[0], datetime(1959, 3, 31))
assert_equal(cycle.index[-1], datetime(2009, 9, 30))
assert_equal(cycle.name, "realgdp_cycle")
class TestFilters(object):
@classmethod
def setup_class(cls):
# even
data = [-50, 175, 149, 214, 247, 237, 225, 329, 729, 809,
530, 489, 540, 457, 195, 176, 337, 239, 128, 102,
232, 429, 3, 98, 43, -141, -77, -13, 125, 361, -45, 184]
cls.data = DataFrame(data, date_range(start='1/1/1951',
periods=len(data),
freq='Q'))
data[9] = np.nan
cls.datana = DataFrame(data, date_range(start='1/1/1951',
periods=len(data),
freq='Q'))
from .results import filter_results
cls.expected = filter_results
def test_convolution(self):
x = self.data.values.squeeze()
res = convolution_filter(x, [.75, .25])
expected = self.expected.conv2
np.testing.assert_almost_equal(res, expected)
res = convolution_filter(x, [.75, .25], nsides=1)
expected = self.expected.conv1
np.testing.assert_almost_equal(res, expected)
x = self.datana.values.squeeze()
res = convolution_filter(x, [.75, .25])
expected = self.expected.conv2_na
np.testing.assert_almost_equal(res, expected)
res = convolution_filter(x, [.75, .25], nsides=1)
expected = self.expected.conv1_na
np.testing.assert_almost_equal(res, expected)
def test_convolution2d(self):
x = self.data.values
res = convolution_filter(x, [[.75], [.25]])
expected = self.expected.conv2
np.testing.assert_almost_equal(res, expected[:, None])
res = convolution_filter(np.c_[x, x], [[.75, .75], [.25, .25]])
np.testing.assert_almost_equal(res, np.c_[expected, expected])
res = convolution_filter(x, [[.75], [.25]], nsides=1)
expected = self.expected.conv1
np.testing.assert_almost_equal(res, expected[:, None])
x = self.datana.values
res = convolution_filter(x, [[.75], [.25]])
expected = self.expected.conv2_na
np.testing.assert_almost_equal(res, expected[:, None])
res = convolution_filter(x, [[.75], [.25]], nsides=1)
expected = self.expected.conv1_na
np.testing.assert_almost_equal(res, expected[:, None])
def test_recursive(self):
x = self.data.values.squeeze()
res = recursive_filter(x, [.75, .25])
expected = self.expected.recurse
np.testing.assert_almost_equal(res, expected)
res = recursive_filter(x, [.75, .25], init=[150, 100])
expected = self.expected.recurse_init
np.testing.assert_almost_equal(res, expected)
x = self.datana.values.squeeze()
res = recursive_filter(x, [.75, .25])
expected = self.expected.recurse_na
np.testing.assert_almost_equal(res, expected)
res = recursive_filter(x, [.75, .25], init=[150, 100])
expected = self.expected.recurse_init_na
np.testing.assert_almost_equal(res, expected)
assert_raises(ValueError, recursive_filter, x,
[.75, .25, .5], [150, 100])
def test_pandas(self):
start = datetime(1951, 3, 31)
end = datetime(1958, 12, 31)
x = self.data[0]
res = convolution_filter(x, [.75, .25])
assert_(res.index[0] == start)
assert_(res.index[-1] == end)
res = convolution_filter(x, [.75, .25], nsides=1)
assert_(res.index[0] == start)
# with no nan-padding q1 if not
assert_(res.index[-1] == end)
res = recursive_filter(x, [.75, .25])
assert_(res.index[0] == start)
assert_(res.index[-1] == end)
x = self.datana
res = recursive_filter(x, [.75, .25])
assert_(res.index[0] == start)
assert_(res.index[-1] == end)
def test_pandas2d(self):
start = datetime(1951, 3, 31)
end = datetime(1958, 12, 31)
x = concat((self.data[0], self.data[0]), axis=1)
res = convolution_filter(x, [[.75, .75], [.25, .25]])
assert_(res.index[0] == start)
assert_(res.index[-1] == end)
def test_odd_length_filter(self):
start = datetime(1951, 3, 31)
end = datetime(1958, 12, 31)
x = self.data[0]
res = convolution_filter(x, [.75, .5, .3, .2, .1])
expected = self.expected.conv2_odd
np.testing.assert_almost_equal(res.values.squeeze(), expected)
np.testing.assert_(res.index[0] == start)
np.testing.assert_(res.index[-1] == end)
res = convolution_filter(x, [.75, .5, .3, .2, .1], nsides=1)
expected = self.expected.conv1_odd
np.testing.assert_almost_equal(res.values.squeeze(), expected)
np.testing.assert_(res.index[0] == start)
np.testing.assert_(res.index[-1] == end)
# with no NAs
# not a stable filter
res = recursive_filter(x, [.75, .5, .3, .2, .1], init=[150, 100,
125, 135,
145])
expected = self.expected.recurse_odd
# only have 12 characters in R and this blows up and gets big
np.testing.assert_almost_equal(res.values.squeeze(), expected, 4)
np.testing.assert_(res.index[0] == start)
np.testing.assert_(res.index[-1] == end)
def dummy_func(x):
return x
def dummy_func_array(x):
return x.values
def dummy_func_pandas_columns(x):
return x.values
def dummy_func_pandas_series(x):
return x['A']
def test_pandas_freq_decorator():
x = make_dataframe()
# in x, get a function back that returns an x with the same columns
func = pandas_wrapper(dummy_func)
np.testing.assert_equal(func(x.values), x)
func = pandas_wrapper(dummy_func_array)
assert_frame_equal(func(x), x)
expected = x.rename(columns=dict(zip('ABCD', 'EFGH')))
func = pandas_wrapper(dummy_func_array, names=list('EFGH'))
assert_frame_equal(func(x), expected)
| 50.590968 | 79 | 0.641221 | from statsmodels.compat.pandas import assert_frame_equal, make_dataframe
from datetime import datetime
import numpy as np
from numpy.testing import (assert_almost_equal, assert_equal, assert_allclose,
assert_raises, assert_)
from numpy import array, column_stack
from statsmodels.tsa.filters._utils import pandas_wrapper
from statsmodels.datasets import macrodata
from pandas import DataFrame, date_range, concat
from statsmodels.tsa.filters.api import (bkfilter, hpfilter, cffilter,
convolution_filter, recursive_filter)
def test_bking1d():
bking_results = array([
7.320813, 2.886914, -6.818976, -13.49436,
-13.27936, -9.405913, -5.691091, -5.133076, -7.273468,
-9.243364, -8.482916, -4.447764, 2.406559, 10.68433,
19.46414, 28.09749, 34.11066, 33.48468, 24.64598, 9.952399,
-4.265528, -12.59471, -13.46714, -9.049501, -3.011248,
.5655082, 2.897976, 7.406077, 14.67959, 18.651, 13.05891,
-2.945415, -24.08659, -41.86147, -48.68383, -43.32689,
-31.66654, -20.38356, -13.76411, -9.978693, -3.7704, 10.27108,
31.02847, 51.87613, 66.93117, 73.51951, 73.4053, 69.17468,
59.8543, 38.23899, -.2604809, -49.0107, -91.1128, -112.1574,
-108.3227, -86.51453, -59.91258, -40.01185, -29.70265,
-22.76396, -13.08037, 1.913622, 20.44045, 37.32873, 46.79802,
51.95937, 59.67393, 70.50803, 81.27311, 83.53191, 67.72536,
33.78039, -6.509092, -37.31579, -46.05207, -29.81496, 1.416417,
28.31503,
32.90134, 8.949259, -35.41895, -84.65775, -124.4288, -144.6036,
-140.2204, -109.2624, -53.6901, 15.07415, 74.44268, 104.0403,
101.0725, 76.58291, 49.27925, 36.15751, 36.48799, 37.60897,
27.75998, 4.216643, -23.20579, -39.33292, -36.6134, -20.90161,
-4.143123, 5.48432, 9.270075, 13.69573, 22.16675, 33.01987,
41.93186, 47.12222, 48.62164, 47.30701, 40.20537, 22.37898,
-7.133002, -43.3339, -78.51229, -101.3684, -105.2179,
-90.97147,
-68.30824, -48.10113, -35.60709, -31.15775, -31.82346,
-32.49278, -28.22499, -14.42852, 10.1827, 36.64189, 49.43468,
38.75517, 6.447761, -33.15883, -62.60446, -72.87829, -66.54629,
-52.61205, -38.06676, -26.19963, -16.51492, -7.007577,
.6125674,
7.866972, 14.8123, 22.52388, 30.65265, 39.47801, 49.05027,
59.02925,
72.88999, 95.08865, 125.8983, 154.4283, 160.7638, 130.6092,
67.84406, -7.070272, -68.08128, -99.39944, -104.911,
-100.2372, -98.11596, -104.2051, -114.0125, -113.3475,
-92.98669, -51.91707, -.7313812, 43.22938, 64.62762, 64.07226,
59.35707, 67.06026, 91.87247, 124.4591, 151.2402, 163.0648,
154.6432])
X = macrodata.load_pandas().data['realinv'].values
Y = bkfilter(X, 6, 32, 12)
assert_almost_equal(Y, bking_results, 4)
def test_bking2d():
bking_results = array([
[7.320813, -.0374475], [2.886914, -.0430094],
[-6.818976, -.053456], [-13.49436, -.0620739], [-13.27936, -.0626929],
[-9.405913, -.0603022], [-5.691091, -.0630016], [-5.133076, -.0832268],
[-7.273468, -.1186448], [-9.243364, -.1619868], [-8.482916, -.2116604],
[-4.447764, -.2670747], [2.406559, -.3209931], [10.68433, -.3583075],
[19.46414, -.3626742], [28.09749, -.3294618], [34.11066, -.2773388],
[33.48468, -.2436127], [24.64598, -.2605531], [9.952399, -.3305166],
[-4.265528, -.4275561], [-12.59471, -.5076068], [-13.46714, -.537573],
[-9.049501, -.5205845], [-3.011248, -.481673], [.5655082, -.4403994],
[2.897976, -.4039957], [7.406077, -.3537394], [14.67959, -.2687359],
[18.651, -.1459743], [13.05891, .0014926], [-2.945415, .1424277],
[-24.08659, .2451936], [-41.86147, .288541], [-48.68383, .2727282],
[-43.32689, .1959127], [-31.66654, .0644874], [-20.38356, -.1158372],
[-13.76411, -.3518627], [-9.978693, -.6557535], [-3.7704, -1.003754],
[10.27108, -1.341632], [31.02847, -1.614486], [51.87613, -1.779089],
[66.93117, -1.807459], [73.51951, -1.679688], [73.4053, -1.401012],
[69.17468, -.9954996], [59.8543, -.511261], [38.23899, -.0146745],
[-.2604809, .4261311], [-49.0107, .7452514], [-91.1128, .8879492],
[-112.1574, .8282748], [-108.3227, .5851508], [-86.51453, .2351699],
[-59.91258, -.1208998], [-40.01185, -.4297895], [-29.70265, -.6821963],
[-22.76396, -.9234254], [-13.08037, -1.217539], [1.913622, -1.57367],
[20.44045, -1.927008], [37.32873, -2.229565], [46.79802, -2.463154],
[51.95937, -2.614697], [59.67393, -2.681357], [70.50803, -2.609654],
[81.27311, -2.301618], [83.53191, -1.720974], [67.72536, -.9837123],
[33.78039, -.2261613], [-6.509092, .4546985], [-37.31579, 1.005751],
[-46.05207, 1.457224], [-29.81496, 1.870815], [1.416417, 2.263313],
[28.31503, 2.599906], [32.90134, 2.812282], [8.949259, 2.83358],
[-35.41895, 2.632667], [-84.65775, 2.201077], [-124.4288, 1.598951],
[-144.6036, .9504762], [-140.2204, .4187932], [-109.2624, .1646726],
[-53.6901, .2034265], [15.07415, .398165], [74.44268, .5427476],
[104.0403, .5454975], [101.0725, .4723354], [76.58291, .4626823],
[49.27925, .5840143], [36.15751, .7187981], [36.48799, .6058422],
[37.60897, .1221227], [27.75998, -.5891272], [4.216643, -1.249841],
[-23.20579, -1.594972], [-39.33292, -1.545968], [-36.6134, -1.275494],
[-20.90161, -1.035783], [-4.143123, -.9971732], [5.48432, -1.154264],
[9.270075, -1.29987], [13.69573, -1.240559], [22.16675, -.9662656],
[33.01987, -.6420301], [41.93186, -.4698712], [47.12222, -.4527797],
[48.62164, -.4407153], [47.30701, -.2416076], [40.20537, .2317583],
[22.37898, .8710276], [-7.133002, 1.426177], [-43.3339, 1.652785],
[-78.51229, 1.488021], [-101.3684, 1.072096], [-105.2179, .6496446],
[-90.97147, .4193682], [-68.30824, .41847], [-48.10113, .5253419],
[-35.60709, .595076], [-31.15775, .5509905], [-31.82346, .3755519],
[-32.49278, .1297979], [-28.22499, -.0916165], [-14.42852, -.2531037],
[10.1827, -.3220784], [36.64189, -.2660561], [49.43468, -.1358522],
[38.75517, -.0279508], [6.447761, .0168735], [-33.15883, .0315687],
[-62.60446, .0819507], [-72.87829, .2274033], [-66.54629, .4641401],
[-52.61205, .7211093], [-38.06676, .907773], [-26.19963, .9387103],
[-16.51492, .7940786], [-7.007577, .5026631], [.6125674, .1224996],
[7.866972, -.2714422], [14.8123, -.6273921], [22.52388, -.9124271],
[30.65265, -1.108861], [39.47801, -1.199206], [49.05027, -1.19908],
[59.02925, -1.139046], [72.88999, -.9775021], [95.08865, -.6592603],
[125.8983, -.1609712], [154.4283, .4796201], [160.7638, 1.100565],
[130.6092, 1.447148], [67.84406, 1.359608], [-7.070272, .8931825],
[-68.08128, .2619787], [-99.39944, -.252208], [-104.911, -.4703874],
[-100.2372, -.4430657], [-98.11596, -.390683], [-104.2051, -.5647846],
[-114.0125, -.9397582], [-113.3475, -1.341633], [-92.98669, -1.567337],
[-51.91707, -1.504943], [-.7313812, -1.30576], [43.22938, -1.17151],
[64.62762, -1.136151], [64.07226, -1.050555], [59.35707, -.7308369],
[67.06026, -.1766731], [91.87247, .3898467], [124.4591, .8135461],
[151.2402, .9644226], [163.0648, .6865934], [154.6432, .0115685]])
mdata = macrodata.load_pandas()
X = mdata.data[['realinv', 'cpi']].values.astype(float)
Y = bkfilter(X, 6, 32, 12)
assert_almost_equal(Y, bking_results, 4)
def test_hpfilter():
hpfilt_res = array([
[3.951191484487844718e+01, 2.670837085155121713e+03],
[8.008853245681075350e+01, 2.698712467543189177e+03],
[4.887545512195401898e+01, 2.726612544878045810e+03],
[3.059193256079834100e+01, 2.754612067439201837e+03],
[6.488266733421960453e+01, 2.782816332665780465e+03],
[2.304024204546703913e+01, 2.811349757954532834e+03],
[-1.355312369487364776e+00, 2.840377312369487299e+03],
[-6.746236512580753697e+01, 2.870078365125807522e+03],
[-8.136743836853429457e+01, 2.900631438368534418e+03],
[-6.016789026443257171e+01, 2.932172890264432681e+03],
[-4.636922433138215638e+01, 2.964788224331382025e+03],
[-2.069533915570400495e+01, 2.998525339155703932e+03],
[-2.162152558595607843e+00, 3.033403152558595593e+03],
[-4.718647774311648391e+00, 3.069427647774311481e+03],
[-1.355645669169007306e+01, 3.106603456691690099e+03],
[-4.436926204475639679e+01, 3.144932262044756499e+03],
[-4.332027378211660107e+01, 3.184407273782116590e+03],
[-4.454697106352068658e+01, 3.224993971063520803e+03],
[-2.629875787765286077e+01, 3.266630757877652741e+03],
[-4.426119635629265758e+01, 3.309228196356292756e+03],
[-1.443441190762496262e+01, 3.352680411907625057e+03],
[-2.026686669186437939e+01, 3.396853866691864368e+03],
[-1.913700136208899494e+01, 3.441606001362089046e+03],
[-5.482458977940950717e+01, 3.486781589779409387e+03],
[-1.596244517937793717e+01, 3.532213445179378141e+03],
[-1.374011542874541192e+01, 3.577700115428745448e+03],
[1.325482813403914406e+01, 3.623030171865960710e+03],
[5.603040174253828809e+01, 3.667983598257461836e+03],
[1.030743373627105939e+02, 3.712348662637289181e+03],
[7.217534795943993231e+01, 3.755948652040559864e+03],
[5.462972503693208637e+01, 3.798671274963067845e+03],
[4.407065050666142270e+01, 3.840449349493338559e+03],
[3.749016270204992907e+01, 3.881249837297949853e+03],
[-1.511244199923112319e+00, 3.921067244199923152e+03],
[-9.093507374079763395e+00, 3.959919507374079785e+03],
[-1.685361946760258434e+01, 3.997823619467602384e+03],
[2.822211031434289907e+01, 4.034790889685657021e+03],
[6.117590627896424849e+01, 4.070822093721035344e+03],
[5.433135391434370831e+01, 4.105935646085656117e+03],
[3.810480376716623141e+01, 4.140188196232833434e+03],
[7.042964928802848590e+01, 4.173670350711971878e+03],
[4.996346842507591646e+01, 4.206496531574924120e+03],
[4.455282059571254649e+01, 4.238825179404287155e+03],
[-7.584961950576143863e+00, 4.270845961950576566e+03],
[-4.620339247697120300e+01, 4.302776392476971523e+03],
[-7.054024364552969928e+01, 4.334829243645529459e+03],
[-6.492941099801464588e+01, 4.367188410998014660e+03],
[-1.433567024239555394e+02, 4.399993702423955256e+03],
[-5.932834493089012540e+01, 4.433344344930889747e+03],
[-6.842096758743628016e+01, 4.467249967587436004e+03],
[-6.774011924654860195e+01, 4.501683119246548813e+03],
[-9.030958565658056614e+01, 4.536573585656580690e+03],
[-4.603981499136807543e+01, 4.571808814991368308e+03],
[2.588118806672991923e+01, 4.607219811933269739e+03],
[3.489419371912299539e+01, 4.642608806280876706e+03],
[7.675179642495095322e+01, 4.677794203575049323e+03],
[1.635497817724171910e+02, 4.712616218227582976e+03],
[1.856079654765617306e+02, 4.746963034523438182e+03],
[1.254269446392718237e+02, 4.780825055360728584e+03],
[1.387413113837174024e+02, 4.814308688616282780e+03],
[6.201826599282230745e+01, 4.847598734007177882e+03],
[4.122129542972197669e+01, 4.880966704570278125e+03],
[-4.120287475842360436e+01, 4.914722874758424041e+03],
[-9.486328233441963675e+01, 4.949203282334419782e+03],
[-1.894232132641573116e+02, 4.984718213264157384e+03],
[-1.895766639620087517e+02, 5.021518663962008759e+03],
[-1.464092413342650616e+02, 5.059737241334265491e+03],
[-1.218770668721217589e+02, 5.099388066872122181e+03],
[-4.973075629078175552e+01, 5.140393756290781312e+03],
[-5.365375213897277717e+01, 5.182600752138972894e+03],
[-7.175241524251214287e+01, 5.225824415242512259e+03],
[-7.834757283225462743e+01, 5.269846572832254424e+03],
[-6.264220687943907251e+01, 5.314404206879438789e+03],
[-3.054332122210325906e+00, 5.359185332122210639e+03],
[4.808218808024685131e+01, 5.403838811919753425e+03],
[2.781399326736391231e+00, 5.448011600673263274e+03],
[-2.197570415173231595e+01, 5.491380704151732061e+03],
[1.509441335012807031e+02, 5.533624866498719712e+03],
[1.658909029574851957e+02, 5.574409097042514986e+03],
[2.027292548049981633e+02, 5.613492745195001589e+03],
[1.752101578176061594e+02, 5.650738842182393455e+03],
[1.452808749847536092e+02, 5.686137125015246056e+03],
[1.535481629475025329e+02, 5.719786837052497503e+03],
[1.376169777998875361e+02, 5.751878022200112355e+03],
[1.257703080340770612e+02, 5.782696691965922582e+03],
[-2.524186846895645431e+01, 5.812614868468956047e+03],
[-6.546618027042404719e+01, 5.842083180270424236e+03],
[1.192352023580315290e+01, 5.871536479764196883e+03],
[1.043482970188742911e+02, 5.901368702981125352e+03],
[2.581376184768396342e+01, 5.931981238152316109e+03],
[6.634330880534071184e+01, 5.963840691194659485e+03],
[-4.236780162594641297e+01, 5.997429801625946311e+03],
[-1.759397735321817891e+02, 6.033272773532181418e+03],
[-1.827933311233055065e+02, 6.071867331123305121e+03],
[-2.472312362505917918e+02, 6.113601236250591683e+03],
[-2.877470049336488955e+02, 6.158748004933649099e+03],
[-2.634066336693540507e+02, 6.207426633669354487e+03],
[-1.819572770763625158e+02, 6.259576277076362203e+03],
[-1.175034606274621183e+02, 6.314971460627461965e+03],
[-4.769898649718379602e+01, 6.373272986497183410e+03],
[1.419578280287896632e+01, 6.434068217197121157e+03],
[6.267929662760798237e+01, 6.496914703372392069e+03],
[6.196413196753746888e+01, 6.561378868032462378e+03],
[5.019769125317907310e+01, 6.627066308746821051e+03],
[4.665364933213822951e+01, 6.693621350667861407e+03],
[3.662430749527266016e+01, 6.760719692504727391e+03],
[7.545680850246480986e+01, 6.828066191497535328e+03],
[6.052940492147536133e+01, 6.895388595078524304e+03],
[6.029518881462354329e+01, 6.962461811185376064e+03],
[2.187042136652689805e+01, 7.029098578633473153e+03],
[2.380067926824722235e+01, 7.095149320731752596e+03],
[-7.119129802169481991e+00, 7.160478129802169860e+03],
[-3.194497359120850888e+01, 7.224963973591208742e+03],
[-1.897137038934124575e+01, 7.288481370389341464e+03],
[-1.832687287845146784e+01, 7.350884872878451461e+03],
[4.600482336597542599e+01, 7.412017176634024509e+03],
[2.489047706403016491e+01, 7.471709522935970199e+03],
[6.305909392127250612e+01, 7.529821906078727807e+03],
[4.585212309498183458e+01, 7.586229876905018500e+03],
[9.314260180878318351e+01, 7.640848398191216802e+03],
[1.129819097095369216e+02, 7.693621090290463144e+03],
[1.204662123176703972e+02, 7.744549787682329224e+03],
[1.336860614601246198e+02, 7.793706938539875409e+03],
[1.034567175813735957e+02, 7.841240282418626521e+03],
[1.403118873372050075e+02, 7.887381112662795204e+03],
[1.271726169351004501e+02, 7.932425383064899506e+03],
[8.271925765282139764e+01, 7.976756742347178260e+03],
[-3.197432211752584408e+01, 8.020838322117525422e+03],
[-1.150209535194062482e+02, 8.065184953519406008e+03],
[-1.064694837456772802e+02, 8.110291483745677397e+03],
[-1.190428718925368230e+02, 8.156580871892536379e+03],
[-1.353635336292991269e+02, 8.204409533629299403e+03],
[-9.644348283027102298e+01, 8.254059482830271008e+03],
[-6.143413116116607853e+01, 8.305728131161165948e+03],
[-3.019161311097923317e+01, 8.359552613110980019e+03],
[1.384333163552582846e+00, 8.415631666836447039e+03],
[-4.156016073666614830e+01, 8.474045160736666730e+03],
[-4.843882841860977351e+01, 8.534873828418609264e+03],
[-6.706442838867042155e+01, 8.598172428388670596e+03],
[-2.019644488579979225e+01, 8.663965444885800025e+03],
[-4.316446881084630149e+00, 8.732235446881084499e+03],
[4.435061943264736328e+01, 8.802952380567352520e+03],
[2.820550564155564643e+01, 8.876083494358445023e+03],
[5.155624419490777655e+01, 8.951623755805092514e+03],
[-4.318760899315748247e+00, 9.029585760899315574e+03],
[-6.534632828542271454e+01, 9.110014328285422380e+03],
[-7.226757738268497633e+01, 9.192951577382684263e+03],
[-9.412378615444868046e+01, 9.278398786154448317e+03],
[-1.191240653288368776e+02, 9.366312065328836979e+03],
[-4.953669826751865912e+01, 9.456588698267518339e+03],
[-6.017251579067487910e+01, 9.549051515790675694e+03],
[-5.103438828313483100e+01, 9.643492388283135369e+03],
[-7.343057830678117170e+01, 9.739665578306781754e+03],
[-2.774245193054957781e+01, 9.837293451930549054e+03],
[-3.380481112519191811e+00, 9.936052481112519672e+03],
[-2.672779877794346248e+01, 1.003560179877794326e+04],
[-3.217342505148371856e+01, 1.013559842505148299e+04],
[-4.140567518359966925e+01, 1.023568267518359971e+04],
[-6.687756033938057953e+00, 1.033547475603393832e+04],
[7.300600408459467872e+01, 1.043456899591540605e+04],
[6.862345670680042531e+01, 1.053255554329319966e+04],
[5.497882461487461114e+01, 1.062907017538512628e+04],
[9.612244093055960548e+01, 1.072379155906944106e+04],
[1.978212770103891671e+02, 1.081643272298961165e+04],
[1.362772276848754700e+02, 1.090676677231512440e+04],
[2.637635494867263333e+02, 1.099469045051327339e+04],
[1.876813256815166824e+02, 1.108018567431848351e+04],
[1.711447873158413131e+02, 1.116339921268415856e+04],
[5.257586460826678376e+01, 1.124459513539173349e+04],
[4.710652228531762375e+01, 1.132414447771468258e+04],
[-6.237613484241046535e+01, 1.140245113484241119e+04],
[-9.982044354035315337e+01, 1.147994844354035376e+04],
[-7.916275548997509759e+01, 1.155703075548997549e+04],
[-9.526003459472303803e+01, 1.163403003459472347e+04],
[-1.147987680369169539e+02, 1.171122876803691724e+04],
[-1.900259054765901965e+02, 1.178884990547659072e+04],
[-2.212256473439556430e+02, 1.186704464734395515e+04],
[-2.071394278781845060e+02, 1.194584542787818464e+04],
[-8.968541528904825100e+01, 1.202514641528904758e+04],
[-6.189531564415665343e+01, 1.210471231564415575e+04],
[-5.662878162551714922e+01, 1.218425178162551674e+04],
[-4.961678134413705266e+01, 1.226343478134413635e+04],
[-3.836288992144181975e+01, 1.234189588992144127e+04],
[-8.956671991456460091e+00, 1.241923867199145570e+04],
[3.907028461866866564e+01, 1.249504271538133071e+04],
[1.865299000184495526e+01, 1.256888200999815490e+04],
[4.279803532226833340e+01, 1.264035496467773191e+04],
[3.962735362631610769e+01, 1.270907164637368442e+04],
[1.412691291877854383e+02, 1.277466887081221466e+04],
[1.256537791844366438e+02, 1.283680822081556289e+04],
[7.067642758858892194e+01, 1.289523957241141034e+04],
[1.108876647603192396e+02, 1.294979133523968085e+04],
[9.956490829291760747e+01, 1.300033609170708223e+04],
[1.571612709880937473e+02, 1.304681572901190702e+04],
[2.318746375812715996e+02, 1.308923436241872878e+04],
[2.635546670125277160e+02, 1.312769433298747208e+04],
[2.044220965739259555e+02, 1.316244290342607383e+04],
[2.213739418903714977e+02, 1.319389205810962812e+04],
[1.020184547767112235e+02, 1.322258154522328914e+04],
[-1.072694716663390864e+02, 1.324918947166633916e+04],
[-3.490477058718843182e+02, 1.327445770587188417e+04],
[-3.975570728533530200e+02, 1.329906107285335383e+04],
[-3.331152428080622485e+02, 1.332345624280806260e+04]])
dta = macrodata.load_pandas().data['realgdp'].values
res = column_stack((hpfilter(dta, 1600)))
assert_almost_equal(res, hpfilt_res, 6)
def test_cfitz_filter():
cfilt_res = array([
[0.712599537179426, 0.439563468233128],
[1.06824041304411, 0.352886666575907],
[1.19422467791128, 0.257297004260607],
[0.970845473140327, 0.114504692143872],
[0.467026976628563, -0.070734782329146],
[-0.089153511514031, -0.238609685132605],
[-0.452339254128573, -0.32376584042956],
[-0.513231214461187, -0.314288554228112],
[-0.352372578720063, -0.258815055101336],
[-0.160282602521333, -0.215076844089567],
[-0.0918782593827686, -0.194120745417214],
[-0.168083823205437, -0.158327420072693],
[-0.291595204965808, -0.0742727139742986],
[-0.348638756841307, 0.037008291163602],
[-0.304328040874631, 0.108196527328748],
[-0.215933150969686, 0.0869231107437175],
[-0.165632621390694, -0.0130556619786275],
[-0.182326839507151, -0.126570926191824],
[-0.223737786804725, -0.205535321806185],
[-0.228939291453403, -0.269110078201836],
[-0.185518327227038, -0.375976507132174],
[-0.143900152461529, -0.53760115656157],
[-0.162749541550174, -0.660065018626038],
[-0.236263634756884, -0.588542352053736],
[-0.275785854309211, -0.236867929421996],
[-0.173666515108109, 0.303436335579219],
[0.0963135720251639, 0.779772338801993],
[0.427070069032285, 0.929108075350647],
[0.629034743259998, 0.658330841002647],
[0.557941248993624, 0.118500049361018],
[0.227866624051603, -0.385048321099911],
[-0.179878859883227, -0.582223992561493],
[-0.428263000051965, -0.394053702908091],
[-0.381640684645912, 0.0445437406977307],
[-0.0942745548364887, 0.493997792757968],
[0.238132391504895, 0.764519811304315],
[0.431293754256291, 0.814755206427316],
[0.455010435813661, 0.745567043101108],
[0.452800768971269, 0.709401694610443],
[0.615754619329312, 0.798293251119636],
[1.00256335412457, 0.975856845059388],
[1.44841039351691, 1.09097252730799],
[1.64651971120370, 0.967823457118036],
[1.35534532901802, 0.522397724737059],
[0.580492790312048, -0.16941343361609],
[-0.410746188031773, -0.90760401289056],
[-1.26148406066881, -1.49592867122591],
[-1.75784179124566, -1.87404167409849],
[-1.94478553960064, -2.14586210891112],
[-2.03751202708559, -2.465855239868],
[-2.20376059354166, -2.86294187189049],
[-2.39722338315852, -3.15004697654831],
[-2.38032366161537, -3.01390466643222],
[-1.91798022532025, -2.23395210271226],
[-0.982318490353716, -0.861346053067472],
[0.199047030343412, 0.790266582335616],
[1.28582776574786, 2.33731327460104],
[2.03565905376430, 3.54085486821911],
[2.41201557412526, 4.36519456268955],
[2.52011070482927, 4.84810517685452],
[2.45618479815452, 4.92906708807477],
[2.22272146945388, 4.42591058990048],
[1.78307567169034, 3.20962906108388],
[1.18234431860844, 1.42568060336985],
[0.590069172333348, -0.461896808688991],
[0.19662302949837, -1.89020992539465],
[0.048307034171166, -2.53490571941987],
[-0.0141956981899000, -2.50020338531674],
[-0.230505187108187, -2.20625973569823],
[-0.700947410386801, -2.06643697511048],
[-1.27085123163060, -2.21536883679783],
[-1.64082547897928, -2.49016921117735],
[-1.62286182971254, -2.63948740221362],
[-1.31609762181362, -2.54685250637904],
[-1.03085567704873, -2.27157435428923],
[-1.01100120380112, -1.90404507430561],
[-1.19823958399826, -1.4123209792214],
[-1.26398933608383, -0.654000086153317],
[-0.904710628949692, 0.447960016248203],
[-0.151340093679588, 1.73970411237156],
[0.592926881165989, 2.85741581650685],
[0.851660587507523, 3.4410446351716],
[0.480324393352127, 3.36870271362297],
[-0.165153230782417, 2.82003806696544],
[-0.459235919375844, 2.12858991660866],
[0.0271158842479935, 1.55840980891556],
[1.18759188180671, 1.17980298478623],
[2.43238266962309, 0.904011534980672],
[3.08277213720132, 0.595286911949837],
[2.79953663720953, 0.148014782859571],
[1.73694442845833, -0.496297332023011],
[0.357638079951977, -1.33108149877570],
[-0.891418825216945, -2.22650083183366],
[-1.77646467793627, -2.89359299718574],
[-2.24614790863088, -2.97921619243347],
[-2.29048879096607, -2.30003092779280],
[-1.87929656465888, -1.05298381273274],
[-1.04510101454788, 0.215837488618531],
[0.00413338508394524, 0.937866257924888],
[0.906870625251025, 0.92664365343019],
[1.33869057593416, 0.518564571494679],
[1.22659678454440, 0.288096869652890],
[0.79380139656044, 0.541053084632774],
[0.38029431865832, 1.01905199983437],
[0.183929413600038, 1.10529586616777],
[0.140045425897033, 0.393618564826736],
[0.0337313182352219, -0.86431819007665],
[-0.269208622829813, -1.85638085246792],
[-0.687276639992166, -1.82275359004533],
[-1.00161592325614, -0.692695765071617],
[-1.06320089194036, 0.803577361347341],
[-0.927152307196776, 1.67366338751788],
[-0.786802101366614, 1.42564362251793],
[-0.772970884572502, 0.426446388877964],
[-0.81275662801789, -0.437721213831647],
[-0.686831250382476, -0.504255468075149],
[-0.237936463020255, 0.148656301898438],
[0.459631879129522, 0.832925905720478],
[1.12717379822508, 0.889455302576383],
[1.48640453200855, 0.268042676202216],
[1.46515245776211, -0.446505038539178],
[1.22993484959115, -0.563868578181134],
[1.0272100765927, 0.0996849952196907],
[0.979191212438404, 1.05053652824665],
[1.00733490030391, 1.51658415000556],
[0.932192535457706, 1.06262774912638],
[0.643374300839414, -0.0865180803476065],
[0.186885168954461, -1.24799408923277],
[-0.290842337365465, -1.80035611156538],
[-0.669446735516495, -1.58847333561510],
[-0.928915624595538, -0.932116966867929],
[-1.11758635926997, -0.307879396807850],
[-1.26832454569756, -0.00856199983957032],
[-1.35755577149251, -0.0303537516690989],
[-1.34244112665546, -0.196807620887435],
[-1.22227976023299, -0.342062643495923],
[-1.04601473486818, -0.390474392372016],
[-0.85158508717846, -0.322164402093596],
[-0.605033439160543, -0.126930141915954],
[-0.218304303942818, 0.179551077808122],
[0.352173017779006, 0.512327303000081],
[1.01389600097229, 0.733397490572755],
[1.55149778750607, 0.748740387440165],
[1.75499674757591, 0.601759717901009],
[1.56636057468633, 0.457705308377562],
[1.12239792537274, 0.470849913286519],
[0.655802600286141, 0.646142040378738],
[0.335285115340180, 0.824103600255079],
[0.173454596506888, 0.808068498175582],
[0.0666753011315252, 0.521488214487996],
[-0.0842367474816212, 0.0583493276173476],
[-0.285604762631464, -0.405958418332253],
[-0.465735422869919, -0.747800086512926],
[-0.563586691231348, -0.94982272350799],
[-0.598110322024572, -1.04736894794361],
[-0.65216025756061, -1.04858365218822],
[-0.789663117801624, -0.924145633093637],
[-0.984704045337959, -0.670740724179446],
[-1.12449565589348, -0.359476803003931],
[-1.07878318723543, -0.092290938944355],
[-0.775555435407062, 0.102132527529259],
[-0.231610677329856, 0.314409560305622],
[0.463192794235131, 0.663523546243286],
[1.17416973448423, 1.13156902460931],
[1.74112278814906, 1.48967153067024],
[2.00320855757084, 1.42571085941843],
[1.8529912317336, 0.802460519079555],
[1.30747261947211, -0.169219078629572],
[0.540237070403222, -1.01621539672694],
[-0.177136817092375, -1.3130784867977],
[-0.611981468823591, -0.982477824460773],
[-0.700240028737747, -0.344919609255406],
[-0.572396497740112, 0.125083535035390],
[-0.450934466600975, 0.142553112732280],
[-0.494020014254326, -0.211429053871656],
[-0.701707589094918, -0.599602868825992],
[-0.94721339346157, -0.710669870591623],
[-1.09297139748946, -0.47846194092245],
[-1.08850658866583, -0.082258450179988],
[-0.976082880696692, 0.235758921309309],
[-0.81885695346771, 0.365298185204303],
[-0.63165529525553, 0.384725179378064],
[-0.37983149226421, 0.460240196164378],
[-0.0375551354277652, 0.68580913832794],
[0.361996927427804, 0.984470835955107],
[0.739920615366072, 1.13195975020298],
[1.03583478061534, 0.88812510421667],
[1.25614938962160, 0.172561520611839],
[1.45295030231799, -0.804979390544485],
[1.64887158748426, -1.55662011197859],
[1.78022721495313, -1.52921975346218],
[1.71945683859668, -0.462240366424548],
[1.36728880239190, 1.31213774341268],
[0.740173894315912, 2.88362740582926],
[-0.0205364331835904, 3.20319080963167],
[-0.725643970956428, 1.75222466531151],
[-1.23900506689782, -0.998432917440275],
[-1.52651897508678, -3.72752870885448],
[-1.62857516631435, -5.00551707196292],
[-1.59657420180451, -4.18499132634584],
[-1.45489013276495, -1.81759097305637],
[-1.21309542313047, 0.722029457352468]])
dta = macrodata.load_pandas().data[['tbilrate', 'infl']].values[1:]
cyc, trend = cffilter(dta)
assert_almost_equal(cyc, cfilt_res, 8)
# do 1d
cyc, trend = cffilter(dta[:, 1])
assert_almost_equal(cyc, cfilt_res[:, 1], 8)
def test_bking_pandas():
# 1d
dta = macrodata.load_pandas().data
index = date_range(start='1959-01-01', end='2009-10-01', freq='Q')
dta.index = index
filtered = bkfilter(dta["infl"])
nd_filtered = bkfilter(dta['infl'].values)
assert_equal(filtered.values, nd_filtered)
assert_equal(filtered.index[0], datetime(1962, 3, 31))
assert_equal(filtered.index[-1], datetime(2006, 9, 30))
assert_equal(filtered.name, "infl_cycle")
# 2d
filtered = bkfilter(dta[["infl", "unemp"]])
nd_filtered = bkfilter(dta[['infl', 'unemp']].values)
assert_equal(filtered.values, nd_filtered)
assert_equal(filtered.index[0], datetime(1962, 3, 31))
assert_equal(filtered.index[-1], datetime(2006, 9, 30))
assert_equal(filtered.columns.values, ["infl_cycle", "unemp_cycle"])
def test_cfitz_pandas():
# 1d
dta = macrodata.load_pandas().data
index = date_range(start='1959-01-01', end='2009-10-01', freq='Q')
dta.index = index
cycle, trend = cffilter(dta["infl"])
ndcycle, ndtrend = cffilter(dta['infl'].values)
assert_allclose(cycle.values, ndcycle, rtol=1e-14)
assert_equal(cycle.index[0], datetime(1959, 3, 31))
assert_equal(cycle.index[-1], datetime(2009, 9, 30))
assert_equal(cycle.name, "infl_cycle")
# 2d
cycle, trend = cffilter(dta[["infl", "unemp"]])
ndcycle, ndtrend = cffilter(dta[['infl', 'unemp']].values)
assert_allclose(cycle.values, ndcycle, rtol=1e-14)
assert_equal(cycle.index[0], datetime(1959, 3, 31))
assert_equal(cycle.index[-1], datetime(2009, 9, 30))
assert_equal(cycle.columns.values, ["infl_cycle", "unemp_cycle"])
def test_hpfilter_pandas():
dta = macrodata.load_pandas().data
index = date_range(start='1959-01-01', end='2009-10-01', freq='Q')
dta.index = index
cycle, trend = hpfilter(dta["realgdp"])
ndcycle, ndtrend = hpfilter(dta['realgdp'].values)
assert_equal(cycle.values, ndcycle)
assert_equal(cycle.index[0], datetime(1959, 3, 31))
assert_equal(cycle.index[-1], datetime(2009, 9, 30))
assert_equal(cycle.name, "realgdp_cycle")
class TestFilters(object):
@classmethod
def setup_class(cls):
# even
data = [-50, 175, 149, 214, 247, 237, 225, 329, 729, 809,
530, 489, 540, 457, 195, 176, 337, 239, 128, 102,
232, 429, 3, 98, 43, -141, -77, -13, 125, 361, -45, 184]
cls.data = DataFrame(data, date_range(start='1/1/1951',
periods=len(data),
freq='Q'))
data[9] = np.nan
cls.datana = DataFrame(data, date_range(start='1/1/1951',
periods=len(data),
freq='Q'))
from .results import filter_results
cls.expected = filter_results
def test_convolution(self):
x = self.data.values.squeeze()
res = convolution_filter(x, [.75, .25])
expected = self.expected.conv2
np.testing.assert_almost_equal(res, expected)
res = convolution_filter(x, [.75, .25], nsides=1)
expected = self.expected.conv1
np.testing.assert_almost_equal(res, expected)
x = self.datana.values.squeeze()
res = convolution_filter(x, [.75, .25])
expected = self.expected.conv2_na
np.testing.assert_almost_equal(res, expected)
res = convolution_filter(x, [.75, .25], nsides=1)
expected = self.expected.conv1_na
np.testing.assert_almost_equal(res, expected)
def test_convolution2d(self):
x = self.data.values
res = convolution_filter(x, [[.75], [.25]])
expected = self.expected.conv2
np.testing.assert_almost_equal(res, expected[:, None])
res = convolution_filter(np.c_[x, x], [[.75, .75], [.25, .25]])
np.testing.assert_almost_equal(res, np.c_[expected, expected])
res = convolution_filter(x, [[.75], [.25]], nsides=1)
expected = self.expected.conv1
np.testing.assert_almost_equal(res, expected[:, None])
x = self.datana.values
res = convolution_filter(x, [[.75], [.25]])
expected = self.expected.conv2_na
np.testing.assert_almost_equal(res, expected[:, None])
res = convolution_filter(x, [[.75], [.25]], nsides=1)
expected = self.expected.conv1_na
np.testing.assert_almost_equal(res, expected[:, None])
def test_recursive(self):
x = self.data.values.squeeze()
res = recursive_filter(x, [.75, .25])
expected = self.expected.recurse
np.testing.assert_almost_equal(res, expected)
res = recursive_filter(x, [.75, .25], init=[150, 100])
expected = self.expected.recurse_init
np.testing.assert_almost_equal(res, expected)
x = self.datana.values.squeeze()
res = recursive_filter(x, [.75, .25])
expected = self.expected.recurse_na
np.testing.assert_almost_equal(res, expected)
res = recursive_filter(x, [.75, .25], init=[150, 100])
expected = self.expected.recurse_init_na
np.testing.assert_almost_equal(res, expected)
assert_raises(ValueError, recursive_filter, x,
[.75, .25, .5], [150, 100])
def test_pandas(self):
start = datetime(1951, 3, 31)
end = datetime(1958, 12, 31)
x = self.data[0]
res = convolution_filter(x, [.75, .25])
assert_(res.index[0] == start)
assert_(res.index[-1] == end)
res = convolution_filter(x, [.75, .25], nsides=1)
assert_(res.index[0] == start)
# with no nan-padding q1 if not
assert_(res.index[-1] == end)
res = recursive_filter(x, [.75, .25])
assert_(res.index[0] == start)
assert_(res.index[-1] == end)
x = self.datana
res = recursive_filter(x, [.75, .25])
assert_(res.index[0] == start)
assert_(res.index[-1] == end)
def test_pandas2d(self):
start = datetime(1951, 3, 31)
end = datetime(1958, 12, 31)
x = concat((self.data[0], self.data[0]), axis=1)
res = convolution_filter(x, [[.75, .75], [.25, .25]])
assert_(res.index[0] == start)
assert_(res.index[-1] == end)
def test_odd_length_filter(self):
start = datetime(1951, 3, 31)
end = datetime(1958, 12, 31)
x = self.data[0]
res = convolution_filter(x, [.75, .5, .3, .2, .1])
expected = self.expected.conv2_odd
np.testing.assert_almost_equal(res.values.squeeze(), expected)
np.testing.assert_(res.index[0] == start)
np.testing.assert_(res.index[-1] == end)
res = convolution_filter(x, [.75, .5, .3, .2, .1], nsides=1)
expected = self.expected.conv1_odd
np.testing.assert_almost_equal(res.values.squeeze(), expected)
np.testing.assert_(res.index[0] == start)
np.testing.assert_(res.index[-1] == end)
# with no NAs
# not a stable filter
res = recursive_filter(x, [.75, .5, .3, .2, .1], init=[150, 100,
125, 135,
145])
expected = self.expected.recurse_odd
# only have 12 characters in R and this blows up and gets big
np.testing.assert_almost_equal(res.values.squeeze(), expected, 4)
np.testing.assert_(res.index[0] == start)
np.testing.assert_(res.index[-1] == end)
def dummy_func(x):
return x
def dummy_func_array(x):
return x.values
def dummy_func_pandas_columns(x):
return x.values
def dummy_func_pandas_series(x):
return x['A']
def test_pandas_freq_decorator():
x = make_dataframe()
# in x, get a function back that returns an x with the same columns
func = pandas_wrapper(dummy_func)
np.testing.assert_equal(func(x.values), x)
func = pandas_wrapper(dummy_func_array)
assert_frame_equal(func(x), x)
expected = x.rename(columns=dict(zip('ABCD', 'EFGH')))
func = pandas_wrapper(dummy_func_array, names=list('EFGH'))
assert_frame_equal(func(x), expected)
| true | true |
f7116008166549e265c58b52b8fbdb0e9ec43e52 | 2,549 | py | Python | templates/template.py | ss005/PyRival | ce94312d429f368b724cdd8d3192935e34b7ba66 | [
"Apache-2.0"
] | null | null | null | templates/template.py | ss005/PyRival | ce94312d429f368b724cdd8d3192935e34b7ba66 | [
"Apache-2.0"
] | null | null | null | templates/template.py | ss005/PyRival | ce94312d429f368b724cdd8d3192935e34b7ba66 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
from __future__ import division, print_function
import os
import sys
from io import BytesIO, IOBase
if sys.version_info[0] < 3:
from __builtin__ import xrange as range
from future_builtins import ascii, filter, hex, map, oct, zip
def main():
pass
# region fastio
BUFSIZE = 8192
class FastIO(IOBase):
newlines = 0
def __init__(self, file):
self._file = file
self._fd = file.fileno()
self.buffer = BytesIO()
self.writable = "x" in file.mode or "r" not in file.mode
self.write = self.buffer.write if self.writable else None
def read(self):
while True:
b = os.read(self._fd, max(os.fstat(self._fd).st_size, BUFSIZE))
if not b:
break
ptr = self.buffer.tell()
self.buffer.seek(0, 2), self.buffer.write(b), self.buffer.seek(ptr)
self.newlines = 0
return self.buffer.read()
def readline(self):
while self.newlines == 0:
b = os.read(self._fd, max(os.fstat(self._fd).st_size, BUFSIZE))
self.newlines = b.count(b"\n") + (not b)
ptr = self.buffer.tell()
self.buffer.seek(0, 2), self.buffer.write(b), self.buffer.seek(ptr)
self.newlines -= 1
return self.buffer.readline()
def flush(self):
if self.writable:
os.write(self._fd, self.buffer.getvalue())
self.buffer.truncate(0), self.buffer.seek(0)
class IOWrapper(IOBase):
def __init__(self, file):
self.buffer = FastIO(file)
self.flush = self.buffer.flush
self.writable = self.buffer.writable
self.write = lambda s: self.buffer.write(s.encode("ascii"))
self.read = lambda: self.buffer.read().decode("ascii")
self.readline = lambda: self.buffer.readline().decode("ascii")
def print(*args, **kwargs):
"""Prints the values to a stream, or to sys.stdout by default."""
sep, file = kwargs.pop("sep", " "), kwargs.pop("file", sys.stdout)
at_start = True
for x in args:
if not at_start:
file.write(sep)
file.write(str(x))
at_start = False
file.write(kwargs.pop("end", "\n"))
if kwargs.pop("flush", False):
file.flush()
if sys.version_info[0] < 3:
sys.stdin, sys.stdout = FastIO(sys.stdin), FastIO(sys.stdout)
else:
sys.stdin, sys.stdout = IOWrapper(sys.stdin), IOWrapper(sys.stdout)
input = lambda: sys.stdin.readline().rstrip("\r\n")
# endregion
if __name__ == "__main__":
main()
| 27.706522 | 79 | 0.604943 |
from __future__ import division, print_function
import os
import sys
from io import BytesIO, IOBase
if sys.version_info[0] < 3:
from __builtin__ import xrange as range
from future_builtins import ascii, filter, hex, map, oct, zip
def main():
pass
BUFSIZE = 8192
class FastIO(IOBase):
newlines = 0
def __init__(self, file):
self._file = file
self._fd = file.fileno()
self.buffer = BytesIO()
self.writable = "x" in file.mode or "r" not in file.mode
self.write = self.buffer.write if self.writable else None
def read(self):
while True:
b = os.read(self._fd, max(os.fstat(self._fd).st_size, BUFSIZE))
if not b:
break
ptr = self.buffer.tell()
self.buffer.seek(0, 2), self.buffer.write(b), self.buffer.seek(ptr)
self.newlines = 0
return self.buffer.read()
def readline(self):
while self.newlines == 0:
b = os.read(self._fd, max(os.fstat(self._fd).st_size, BUFSIZE))
self.newlines = b.count(b"\n") + (not b)
ptr = self.buffer.tell()
self.buffer.seek(0, 2), self.buffer.write(b), self.buffer.seek(ptr)
self.newlines -= 1
return self.buffer.readline()
def flush(self):
if self.writable:
os.write(self._fd, self.buffer.getvalue())
self.buffer.truncate(0), self.buffer.seek(0)
class IOWrapper(IOBase):
def __init__(self, file):
self.buffer = FastIO(file)
self.flush = self.buffer.flush
self.writable = self.buffer.writable
self.write = lambda s: self.buffer.write(s.encode("ascii"))
self.read = lambda: self.buffer.read().decode("ascii")
self.readline = lambda: self.buffer.readline().decode("ascii")
def print(*args, **kwargs):
sep, file = kwargs.pop("sep", " "), kwargs.pop("file", sys.stdout)
at_start = True
for x in args:
if not at_start:
file.write(sep)
file.write(str(x))
at_start = False
file.write(kwargs.pop("end", "\n"))
if kwargs.pop("flush", False):
file.flush()
if sys.version_info[0] < 3:
sys.stdin, sys.stdout = FastIO(sys.stdin), FastIO(sys.stdout)
else:
sys.stdin, sys.stdout = IOWrapper(sys.stdin), IOWrapper(sys.stdout)
input = lambda: sys.stdin.readline().rstrip("\r\n")
if __name__ == "__main__":
main()
| true | true |
f71161ca17d38c1dd04659c07978db4f1cf365c2 | 7,909 | py | Python | xmlrpc_client.py | y11en/tknk_scanner | c6c1b2b9142a3df4d86a1d44d677896d2623ac1e | [
"MIT"
] | 78 | 2018-09-29T19:07:54.000Z | 2022-03-27T20:21:08.000Z | xmlrpc_client.py | y11en/tknk_scanner | c6c1b2b9142a3df4d86a1d44d677896d2623ac1e | [
"MIT"
] | 11 | 2019-06-08T03:20:43.000Z | 2022-01-22T04:15:22.000Z | xmlrpc_client.py | y11en/tknk_scanner | c6c1b2b9142a3df4d86a1d44d677896d2623ac1e | [
"MIT"
] | 23 | 2018-10-01T07:00:49.000Z | 2021-06-10T07:07:19.000Z | #!/usr/bin/env python3
import xmlrpc.client
import os, sys, shutil, json, subprocess, time, yara, hashlib, datetime, requests, magic, redis, socket, pefile
from pathlib import Path
from pymongo import MongoClient
from rq import get_current_job, Queue
from read_avclass_report import run_avclass
from redis import Redis
with open("tknk.conf", 'r') as f:
tknk_conf = json.load(f)
VM_NAME=tknk_conf['vm_name']
VM_URL=tknk_conf['vm_url']
def download():
proxy = xmlrpc.client.ServerProxy(VM_URL)
with open("dump.zip", "wb") as handle:
try:
handle.write(proxy.download_file().data)
return True
except xmlrpc.client.Fault:
print(sys.exc_info())
return sys.exc_info()
def upload(filename):
proxy = xmlrpc.client.ServerProxy(VM_URL)
with open(filename, "rb") as handle:
binary_data = xmlrpc.client.Binary(handle.read())
if "/" in filename:
filename = filename.rsplit("/", 1)[1]
print("upload..." + filename)
proxy.upload_file(binary_data, filename)
def dump(config):
proxy = xmlrpc.client.ServerProxy(VM_URL)
try:
proxy.dump(config)
return True
except:
return False
def vm_down():
print(subprocess.call(['virsh', "destroy", VM_NAME]))
def current_job_init(r):
q = Queue(connection=Redis())# Getting the number of jobs in the queue
queued_job_ids = q.job_ids # Gets a list of job IDs from the queue
if len(queued_job_ids) == 0:
r.set('current_job_id', None)
return
def size_fmt(num, suffix='B'):
for unit in ['','K','M','G','T','P','E','Z']:
if abs(num) < 1000.0:
return "%3.1f%s%s" % (num, unit, suffix)
num /= 1000.0
return "%.1f%s%s" % (num, 'Yi', suffix)
def analyze(uid):
#db connect
client = MongoClient('localhost', 27017)
db = client.scan_database
collection = db.scan_collection
#redis connect
pool = redis.ConnectionPool(host='localhost', port=6379, db=0)
r = redis.StrictRedis(connection_pool=pool)
#update current_job
job=get_current_job()
r.set('current_job_id', job.id)
#config read & write
config = eval(r.get(uid).decode('utf-8'))
pe = pefile.PE(config['path'])
config['entrypoint'] = pe.OPTIONAL_HEADER.AddressOfEntryPoint
#make report format
result = {"result":{"detail":"", "is_success":False},
"run_time":str(config['time']),
"mode":config['mode'],
"timestamp":str(datetime.datetime.today().isoformat()),
"scans":[],
"UUID":uid,
"magic":magic.from_file(config['path']),
"virus_total":0,
"avclass":{"flag":None, "data":[]}
}
with open(config['path'],'rb')as f:
d = f.read()
file_md5 = str(hashlib.md5(d).hexdigest())
file_sha1 = str(hashlib.sha1(d).hexdigest())
file_sha256 = str(hashlib.sha256(d).hexdigest())
#avclass
if tknk_conf['virus_total'] == 1:
result['virus_total'] = 1
result['avclass'] = run_avclass(tknk_conf['vt_key'], file_sha256)
#Detect it easy
cmd=["die/diec.sh", config['path']]
p = subprocess.run(cmd, stdout = subprocess.PIPE, stderr = subprocess.PIPE)
result['die'] = p.stdout.decode("utf8").split("\n")
if result['die'] != []:
result['die'].pop()
#read yara rules
rules = yara.compile('index.yar')
matches = rules.match(config['path'])
result['target_scan']=({"md5":file_md5, "sha1":file_sha1, "sha256":file_sha256, "detect_rule":list(map(str,matches)), "file_name":config['target_file'], "size":size_fmt(os.path.getsize(config['path']))})
cmd=['virsh', 'snapshot-revert', VM_NAME, '--current']
p = subprocess.run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output = p.stderr.decode('utf-8')
print(output)
if "busy" in output:
print("failed to initialize KVM: Device or resource busy")
result["result"]["is_success"] = False
result["result"]["detail"] = "failed to initialize KVM: Device or resource busy"
collection.update({u'UUID':uid},result)
current_job_init(r)
os._exit(0)
elif "Domain" in output:
print("Domain snapshot not found: the domain does not have a current snapshot")
result["result"]["is_success"] = False
result["result"]["detail"] = "Domain snapshot not found: the domain does not have a current snapshot"
collection.update({u'UUID':uid},result)
current_job_init(r)
os._exit(0)
c=0
while(1):
vm_state = subprocess.check_output(["virsh", "domstate", VM_NAME])
time.sleep(1)
c+=1
if "running" in str(vm_state.decode('utf-8')):
break
if c == 60:
current_job_init(r)
os._exit(0)
if config['mode'] == "hollows_hunter":
tools = ["tools/hollows_hunter.exe", "tools/pe-sieve.dll", "tools/mouse_emu.pyw"]
elif config['mode'] == "procdump":
tools = ["tools/procdump.exe", "tools/mouse_emu.pyw"]
elif config['mode'] == "scylla":
tools = ["tools/Scylla.dll", "tools/mouse_emu.pyw"]
elif config['mode'] == "diff":
tools = ["tools/procdump.exe", "tools/mouse_emu.pyw"]
for tool_name in tools:
upload(tool_name)
upload("target/" + config['target_file'])
ret = dump(config)
if ret == False:
print("Connection error\n")
is_success = False
result["result"]["detail"] = "Connection error"
else:
ret = download()
if ret == True:
print("dump finish")
is_success = True
else:
is_success = False
if result["mode"] == "procdump":
result["result"]["detail"] = "Process does not exist"
else:
result["result"]["detail"] = "Dump file does not exist"
vm_down()
if is_success == False:
for scan in result["scans"]:
if scan["detect_rule"] != []:
result["result"]["is_success"] = True
result["result"]["detail"] = "Detected with yara rule!"
break
os.mkdir("result/" + str(uid))
with open("result/"+ str(uid) + "/" +file_sha256+'.json', 'w') as outfile:
json.dump(result, outfile, indent=4)
shutil.copyfile(config['path'], "result/"+str(uid)+"/"+config['target_file'])
print (json.dumps(result, indent=4))
collection.update({u'UUID':uid},result)
current_job_init(r)
os._exit(0)
elif is_success == True:
p = Path("result/dump.zip")
if p.exists():
p.unlink()
print("remove")
shutil.move("dump.zip", "result/")
subprocess.run(['unzip', "dump.zip"], cwd="result")
p = Path("result/dump/")
for f in p.glob("**/*"):
if (".exe" == f.suffix) or (".dll" == f.suffix) or (".dmp" == f.suffix):
size = os.path.getsize(str(f))
matches = rules.match(str(f.resolve()))
result['scans'].append({"detect_rule":list(map(str,matches)), "file_name":f.name, "size":size_fmt(size)})
for scan in result["scans"]:
if scan["detect_rule"] != []:
result["result"]["is_success"] = True
result["result"]["detail"] = "Detected with yara rule!"
break
print (json.dumps(result, indent=4))
with open("result/dump/"+file_sha256+'.json', 'w') as outfile:
json.dump(result, outfile, indent=4)
shutil.copyfile(config['path'], "result/dump/"+config['target_file'])
os.rename("result/dump/", "result/"+str(uid))
os.remove("result/dump.zip")
collection.update({u'UUID':uid},result)
current_job_init(r)
return
| 32.547325 | 207 | 0.580857 |
import xmlrpc.client
import os, sys, shutil, json, subprocess, time, yara, hashlib, datetime, requests, magic, redis, socket, pefile
from pathlib import Path
from pymongo import MongoClient
from rq import get_current_job, Queue
from read_avclass_report import run_avclass
from redis import Redis
with open("tknk.conf", 'r') as f:
tknk_conf = json.load(f)
VM_NAME=tknk_conf['vm_name']
VM_URL=tknk_conf['vm_url']
def download():
proxy = xmlrpc.client.ServerProxy(VM_URL)
with open("dump.zip", "wb") as handle:
try:
handle.write(proxy.download_file().data)
return True
except xmlrpc.client.Fault:
print(sys.exc_info())
return sys.exc_info()
def upload(filename):
proxy = xmlrpc.client.ServerProxy(VM_URL)
with open(filename, "rb") as handle:
binary_data = xmlrpc.client.Binary(handle.read())
if "/" in filename:
filename = filename.rsplit("/", 1)[1]
print("upload..." + filename)
proxy.upload_file(binary_data, filename)
def dump(config):
proxy = xmlrpc.client.ServerProxy(VM_URL)
try:
proxy.dump(config)
return True
except:
return False
def vm_down():
print(subprocess.call(['virsh', "destroy", VM_NAME]))
def current_job_init(r):
q = Queue(connection=Redis())
queued_job_ids = q.job_ids
if len(queued_job_ids) == 0:
r.set('current_job_id', None)
return
def size_fmt(num, suffix='B'):
for unit in ['','K','M','G','T','P','E','Z']:
if abs(num) < 1000.0:
return "%3.1f%s%s" % (num, unit, suffix)
num /= 1000.0
return "%.1f%s%s" % (num, 'Yi', suffix)
def analyze(uid):
client = MongoClient('localhost', 27017)
db = client.scan_database
collection = db.scan_collection
pool = redis.ConnectionPool(host='localhost', port=6379, db=0)
r = redis.StrictRedis(connection_pool=pool)
job=get_current_job()
r.set('current_job_id', job.id)
config = eval(r.get(uid).decode('utf-8'))
pe = pefile.PE(config['path'])
config['entrypoint'] = pe.OPTIONAL_HEADER.AddressOfEntryPoint
result = {"result":{"detail":"", "is_success":False},
"run_time":str(config['time']),
"mode":config['mode'],
"timestamp":str(datetime.datetime.today().isoformat()),
"scans":[],
"UUID":uid,
"magic":magic.from_file(config['path']),
"virus_total":0,
"avclass":{"flag":None, "data":[]}
}
with open(config['path'],'rb')as f:
d = f.read()
file_md5 = str(hashlib.md5(d).hexdigest())
file_sha1 = str(hashlib.sha1(d).hexdigest())
file_sha256 = str(hashlib.sha256(d).hexdigest())
if tknk_conf['virus_total'] == 1:
result['virus_total'] = 1
result['avclass'] = run_avclass(tknk_conf['vt_key'], file_sha256)
cmd=["die/diec.sh", config['path']]
p = subprocess.run(cmd, stdout = subprocess.PIPE, stderr = subprocess.PIPE)
result['die'] = p.stdout.decode("utf8").split("\n")
if result['die'] != []:
result['die'].pop()
rules = yara.compile('index.yar')
matches = rules.match(config['path'])
result['target_scan']=({"md5":file_md5, "sha1":file_sha1, "sha256":file_sha256, "detect_rule":list(map(str,matches)), "file_name":config['target_file'], "size":size_fmt(os.path.getsize(config['path']))})
cmd=['virsh', 'snapshot-revert', VM_NAME, '--current']
p = subprocess.run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output = p.stderr.decode('utf-8')
print(output)
if "busy" in output:
print("failed to initialize KVM: Device or resource busy")
result["result"]["is_success"] = False
result["result"]["detail"] = "failed to initialize KVM: Device or resource busy"
collection.update({u'UUID':uid},result)
current_job_init(r)
os._exit(0)
elif "Domain" in output:
print("Domain snapshot not found: the domain does not have a current snapshot")
result["result"]["is_success"] = False
result["result"]["detail"] = "Domain snapshot not found: the domain does not have a current snapshot"
collection.update({u'UUID':uid},result)
current_job_init(r)
os._exit(0)
c=0
while(1):
vm_state = subprocess.check_output(["virsh", "domstate", VM_NAME])
time.sleep(1)
c+=1
if "running" in str(vm_state.decode('utf-8')):
break
if c == 60:
current_job_init(r)
os._exit(0)
if config['mode'] == "hollows_hunter":
tools = ["tools/hollows_hunter.exe", "tools/pe-sieve.dll", "tools/mouse_emu.pyw"]
elif config['mode'] == "procdump":
tools = ["tools/procdump.exe", "tools/mouse_emu.pyw"]
elif config['mode'] == "scylla":
tools = ["tools/Scylla.dll", "tools/mouse_emu.pyw"]
elif config['mode'] == "diff":
tools = ["tools/procdump.exe", "tools/mouse_emu.pyw"]
for tool_name in tools:
upload(tool_name)
upload("target/" + config['target_file'])
ret = dump(config)
if ret == False:
print("Connection error\n")
is_success = False
result["result"]["detail"] = "Connection error"
else:
ret = download()
if ret == True:
print("dump finish")
is_success = True
else:
is_success = False
if result["mode"] == "procdump":
result["result"]["detail"] = "Process does not exist"
else:
result["result"]["detail"] = "Dump file does not exist"
vm_down()
if is_success == False:
for scan in result["scans"]:
if scan["detect_rule"] != []:
result["result"]["is_success"] = True
result["result"]["detail"] = "Detected with yara rule!"
break
os.mkdir("result/" + str(uid))
with open("result/"+ str(uid) + "/" +file_sha256+'.json', 'w') as outfile:
json.dump(result, outfile, indent=4)
shutil.copyfile(config['path'], "result/"+str(uid)+"/"+config['target_file'])
print (json.dumps(result, indent=4))
collection.update({u'UUID':uid},result)
current_job_init(r)
os._exit(0)
elif is_success == True:
p = Path("result/dump.zip")
if p.exists():
p.unlink()
print("remove")
shutil.move("dump.zip", "result/")
subprocess.run(['unzip', "dump.zip"], cwd="result")
p = Path("result/dump/")
for f in p.glob("**/*"):
if (".exe" == f.suffix) or (".dll" == f.suffix) or (".dmp" == f.suffix):
size = os.path.getsize(str(f))
matches = rules.match(str(f.resolve()))
result['scans'].append({"detect_rule":list(map(str,matches)), "file_name":f.name, "size":size_fmt(size)})
for scan in result["scans"]:
if scan["detect_rule"] != []:
result["result"]["is_success"] = True
result["result"]["detail"] = "Detected with yara rule!"
break
print (json.dumps(result, indent=4))
with open("result/dump/"+file_sha256+'.json', 'w') as outfile:
json.dump(result, outfile, indent=4)
shutil.copyfile(config['path'], "result/dump/"+config['target_file'])
os.rename("result/dump/", "result/"+str(uid))
os.remove("result/dump.zip")
collection.update({u'UUID':uid},result)
current_job_init(r)
return
| true | true |
f71161ef83923d1440360d9a5499009046ba7494 | 1,127 | py | Python | userbot/modules/aeshtetic.py | Wiki28/WikixCilik | a7e8d684e34174001af3e69d1f00de4e98243abe | [
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null | userbot/modules/aeshtetic.py | Wiki28/WikixCilik | a7e8d684e34174001af3e69d1f00de4e98243abe | [
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null | userbot/modules/aeshtetic.py | Wiki28/WikixCilik | a7e8d684e34174001af3e69d1f00de4e98243abe | [
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null | # Copyright (C) 2019 The Raphielscape Company LLC.
#
# Licensed under the Raphielscape Public License, Version 1.d (the "License");
# you may not use this file except in compliance with the License.
#
# Ported for Lord-Userbot By liualvinas/Alvin
from telethon import events
from userbot import CMD_HANDLER as cmd
from userbot import CMD_HELP
from userbot.utils import edit_or_reply, cilik_cmd
PRINTABLE_ASCII = range(0x21, 0x7F)
def aesthetify(string):
for c in string:
c = ord(c)
if c in PRINTABLE_ASCII:
c += 0xFF00 - 0x20
elif c == ord(" "):
c = 0x3000
yield chr(c)
@cilik_cmd(pattern="ae(?: |$)(.*)")
async def _(event):
if event.fwd_from:
return
text = event.pattern_match.group(1)
text = "".join(aesthetify(text))
await edit_or_reply(event, text=text, parse_mode=None, link_preview=False)
raise events.StopPropagation
CMD_HELP.update(
{
"aeshtetic": f"➢ **Plugin : **`aeshtetic`\
\n\n ┌✪ **Command :** `{cmd}ae <teks>`\
\n └✪ **Function : **Mengubah font teks Menjadi aeshtetic.\
"
}
)
| 25.044444 | 78 | 0.640639 |
from telethon import events
from userbot import CMD_HANDLER as cmd
from userbot import CMD_HELP
from userbot.utils import edit_or_reply, cilik_cmd
PRINTABLE_ASCII = range(0x21, 0x7F)
def aesthetify(string):
for c in string:
c = ord(c)
if c in PRINTABLE_ASCII:
c += 0xFF00 - 0x20
elif c == ord(" "):
c = 0x3000
yield chr(c)
@cilik_cmd(pattern="ae(?: |$)(.*)")
async def _(event):
if event.fwd_from:
return
text = event.pattern_match.group(1)
text = "".join(aesthetify(text))
await edit_or_reply(event, text=text, parse_mode=None, link_preview=False)
raise events.StopPropagation
CMD_HELP.update(
{
"aeshtetic": f"➢ **Plugin : **`aeshtetic`\
\n\n ┌✪ **Command :** `{cmd}ae <teks>`\
\n └✪ **Function : **Mengubah font teks Menjadi aeshtetic.\
"
}
)
| true | true |
f71162543479e13ba7a83e8e598676ad16885311 | 6,609 | py | Python | gamification/core/models.py | stephenrjones/django-gamification | d22882f148375102ec351cb2bc75275083468d73 | [
"Unlicense",
"MIT"
] | 15 | 2015-02-21T09:28:55.000Z | 2021-07-31T17:17:06.000Z | gamification/core/models.py | stephenrjones/django-gamification | d22882f148375102ec351cb2bc75275083468d73 | [
"Unlicense",
"MIT"
] | null | null | null | gamification/core/models.py | stephenrjones/django-gamification | d22882f148375102ec351cb2bc75275083468d73 | [
"Unlicense",
"MIT"
] | 1 | 2017-01-22T09:12:44.000Z | 2017-01-22T09:12:44.000Z | # -*- coding: utf-8 -*-
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, as long as
# any reuse or further development of the software attributes the
# National Geospatial-Intelligence Agency (NGA) authorship as follows:
# 'This software (django-gamification)
# is provided to the public as a courtesy of the National
# Geospatial-Intelligence Agency.
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import json
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from django.utils.datastructures import SortedDict
from django.db.models.signals import post_save
from django.db import models
from gamification.badges.models import ProjectBadge, ProjectBadgeToUser
from jsonfield import JSONField
TRUE_FALSE = [(0, 'False'), (1, 'True')]
class ProjectBase(models.Model):
"""
A generic model for GeoQ objects.
"""
active = models.BooleanField(default=True, help_text='If checked, this project will be listed in the active list.')
created_at = models.DateTimeField(auto_now_add=True)
name = models.CharField(max_length=200, help_text='Name of the project.')
description = models.TextField(help_text='Details of this project that will be listed on the viewing page.')
updated_at = models.DateTimeField(auto_now=True)
url = models.TextField(help_text='Project Information URL', null=True)
def __unicode__(self):
return self.name
class Meta:
abstract = True
ordering = ('-created_at',)
class Team(models.Model):
name = models.CharField(max_length=50)
description = models.TextField(null=True, blank=True)
members = models.ManyToManyField(User, null=True, blank=True)
order = models.IntegerField(default=0, null=True, blank=True, help_text='Optionally specify the order teams should appear. Lower numbers appear sooner. By default, teams appear in the order they were created.')
date_created = models.DateTimeField(auto_now_add=True)
background_color = models.CharField(max_length=50, null=True, blank=True, help_text='Optional - Color to use for background of all team badges')
icon = models.ImageField(upload_to='badge_images', null=True, blank=True, help_text='Optional - Image to show next to team names')
def __str__(self):
return "%s (%s)" % (self.name, str(len(self.members.all())))
class Meta:
ordering = ['-order', '-date_created', 'id']
class Project(ProjectBase):
"""
Top-level organizational object.
"""
THEMES = (
("", "None"),
("camping", "Camping"),
("camping2", "Camping Theme 2"),
("map", "Geospatial"),
)
private = models.BooleanField(default=False, help_text='If checked, hide this project from the list of projects and public badge APIs.')
supervisors = models.ManyToManyField(User, blank=True, null=True, related_name="supervisors", help_text='Anyone other than site administrators that can add badges and update the site')
teams = models.ManyToManyField(Team, blank=True, null=True)
viewing_pass_phrase = models.CharField(max_length=200, null=True, blank=True, help_text='Phrase that must be entered to view this page.')
project_closing_date = models.DateTimeField(null=True, blank=True, help_text='Date that project "closes" with countdown shown on project page. Badges can still be added after this.')
visual_theme = models.CharField(max_length=20, default="none", choices=THEMES, help_text='Visual Theme used to style the project page')
background_image = models.ImageField(upload_to='badge_images', null=True, blank=True, help_text='Optional - Override theme background with this image')
properties = JSONField(null=True, blank=True, help_text='JSON key/value pairs associated with this object, e.g. {"badges_mode":"blue"}')
query_token = models.CharField(max_length=200, null=True, blank=True, help_text='Token that must be entered by any server requesting data - not implemented yet.')
allowed_api_hosts = models.TextField(null=True, blank=True, help_text='Comma-separated list of hosts (IPs or Hostnames) that can access this project via data requests - not implemented yet')
@property
def user_count(self):
return User.objects.filter(projectbadgetouser__projectbadge__project=self).distinct().count()
@property
def badge_count(self):
return ProjectBadgeToUser.objects.filter(projectbadge__project=self).count()
def get_absolute_url(self):
return reverse('project-list', args=[self.name])
class Points(models.Model):
user = models.ForeignKey(User)
projectbadge = models.ForeignKey(ProjectBadge)
value = models.IntegerField(default=0)
date_awarded = models.DateTimeField('date awarded',auto_now=True)
description = models.CharField(max_length=200)
def get_absolute_url(self):
return reverse('points-list', args=[self.id])
class Meta:
verbose_name_plural = "Points"
class UserProfile(models.Model):
""" from http://stackoverflow.com/questions/44109/extending-the-user-model-with-custom-fields-in-django; this is one mechanism for adding extra details (currently score for badges) to the User model """
defaultScore = 1
user = models.OneToOneField(User)
score = models.IntegerField(default=defaultScore)
def __str__(self):
return "%s's profile" % self.user
def create_user_profile(sender, instance, created, **kwargs):
if created:
profile, created = UserProfile.objects.get_or_create(user=instance)
post_save.connect(create_user_profile, sender=User)
import sys
if not 'syncdb' in sys.argv[1:2] and not 'migrate' in sys.argv[1:2]:
from meta_badges import * | 46.216783 | 214 | 0.739144 |
# is provided to the public as a courtesy of the National
# Geospatial-Intelligence Agency.
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import json
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from django.utils.datastructures import SortedDict
from django.db.models.signals import post_save
from django.db import models
from gamification.badges.models import ProjectBadge, ProjectBadgeToUser
from jsonfield import JSONField
TRUE_FALSE = [(0, 'False'), (1, 'True')]
class ProjectBase(models.Model):
active = models.BooleanField(default=True, help_text='If checked, this project will be listed in the active list.')
created_at = models.DateTimeField(auto_now_add=True)
name = models.CharField(max_length=200, help_text='Name of the project.')
description = models.TextField(help_text='Details of this project that will be listed on the viewing page.')
updated_at = models.DateTimeField(auto_now=True)
url = models.TextField(help_text='Project Information URL', null=True)
def __unicode__(self):
return self.name
class Meta:
abstract = True
ordering = ('-created_at',)
class Team(models.Model):
name = models.CharField(max_length=50)
description = models.TextField(null=True, blank=True)
members = models.ManyToManyField(User, null=True, blank=True)
order = models.IntegerField(default=0, null=True, blank=True, help_text='Optionally specify the order teams should appear. Lower numbers appear sooner. By default, teams appear in the order they were created.')
date_created = models.DateTimeField(auto_now_add=True)
background_color = models.CharField(max_length=50, null=True, blank=True, help_text='Optional - Color to use for background of all team badges')
icon = models.ImageField(upload_to='badge_images', null=True, blank=True, help_text='Optional - Image to show next to team names')
def __str__(self):
return "%s (%s)" % (self.name, str(len(self.members.all())))
class Meta:
ordering = ['-order', '-date_created', 'id']
class Project(ProjectBase):
THEMES = (
("", "None"),
("camping", "Camping"),
("camping2", "Camping Theme 2"),
("map", "Geospatial"),
)
private = models.BooleanField(default=False, help_text='If checked, hide this project from the list of projects and public badge APIs.')
supervisors = models.ManyToManyField(User, blank=True, null=True, related_name="supervisors", help_text='Anyone other than site administrators that can add badges and update the site')
teams = models.ManyToManyField(Team, blank=True, null=True)
viewing_pass_phrase = models.CharField(max_length=200, null=True, blank=True, help_text='Phrase that must be entered to view this page.')
project_closing_date = models.DateTimeField(null=True, blank=True, help_text='Date that project "closes" with countdown shown on project page. Badges can still be added after this.')
visual_theme = models.CharField(max_length=20, default="none", choices=THEMES, help_text='Visual Theme used to style the project page')
background_image = models.ImageField(upload_to='badge_images', null=True, blank=True, help_text='Optional - Override theme background with this image')
properties = JSONField(null=True, blank=True, help_text='JSON key/value pairs associated with this object, e.g. {"badges_mode":"blue"}')
query_token = models.CharField(max_length=200, null=True, blank=True, help_text='Token that must be entered by any server requesting data - not implemented yet.')
allowed_api_hosts = models.TextField(null=True, blank=True, help_text='Comma-separated list of hosts (IPs or Hostnames) that can access this project via data requests - not implemented yet')
@property
def user_count(self):
return User.objects.filter(projectbadgetouser__projectbadge__project=self).distinct().count()
@property
def badge_count(self):
return ProjectBadgeToUser.objects.filter(projectbadge__project=self).count()
def get_absolute_url(self):
return reverse('project-list', args=[self.name])
class Points(models.Model):
user = models.ForeignKey(User)
projectbadge = models.ForeignKey(ProjectBadge)
value = models.IntegerField(default=0)
date_awarded = models.DateTimeField('date awarded',auto_now=True)
description = models.CharField(max_length=200)
def get_absolute_url(self):
return reverse('points-list', args=[self.id])
class Meta:
verbose_name_plural = "Points"
class UserProfile(models.Model):
defaultScore = 1
user = models.OneToOneField(User)
score = models.IntegerField(default=defaultScore)
def __str__(self):
return "%s's profile" % self.user
def create_user_profile(sender, instance, created, **kwargs):
if created:
profile, created = UserProfile.objects.get_or_create(user=instance)
post_save.connect(create_user_profile, sender=User)
import sys
if not 'syncdb' in sys.argv[1:2] and not 'migrate' in sys.argv[1:2]:
from meta_badges import * | true | true |
f71163d1f61c4563c96be498ede707c910a8f26d | 441 | py | Python | virtual/lib/python3.6/site-packages/pylint/test/functional/exception_is_binary_op.py | drewheathens/The-Moringa-Tribune | 98ee4d63c9df6f1f7497fc6876960a822d914500 | [
"MIT"
] | 463 | 2015-01-15T08:17:42.000Z | 2022-03-28T15:10:20.000Z | virtual/lib/python3.6/site-packages/pylint/test/functional/exception_is_binary_op.py | drewheathens/The-Moringa-Tribune | 98ee4d63c9df6f1f7497fc6876960a822d914500 | [
"MIT"
] | 52 | 2015-01-06T02:43:59.000Z | 2022-03-14T11:15:21.000Z | virtual/lib/python3.6/site-packages/pylint/test/functional/exception_is_binary_op.py | drewheathens/The-Moringa-Tribune | 98ee4d63c9df6f1f7497fc6876960a822d914500 | [
"MIT"
] | 249 | 2015-01-07T22:49:49.000Z | 2022-03-18T02:32:06.000Z | """Warn about binary operations used as exceptions."""
from __future__ import print_function
try:
pass
except Exception or BaseException: # [binary-op-exception]
print("caught1")
except Exception and BaseException: # [binary-op-exception]
print("caught2")
except Exception or BaseException: # [binary-op-exception]
print("caught3")
except (Exception or BaseException) as exc: # [binary-op-exception]
print("caught4")
| 33.923077 | 68 | 0.734694 | from __future__ import print_function
try:
pass
except Exception or BaseException:
print("caught1")
except Exception and BaseException:
print("caught2")
except Exception or BaseException:
print("caught3")
except (Exception or BaseException) as exc:
print("caught4")
| true | true |
f71163e8df50a1a54f85265542eac2fe7669ebfe | 1,601 | py | Python | djangox/lib/python3.8/site-packages/allauth/socialaccount/providers/__init__.py | DemarcusL/django_wiki_lab | 3b7cf18af7e0f89c94d10eb953ca018a150a2f55 | [
"MIT"
] | 6,342 | 2015-01-01T07:40:30.000Z | 2022-03-31T04:18:30.000Z | djangox/lib/python3.8/site-packages/allauth/socialaccount/providers/__init__.py | DemarcusL/django_wiki_lab | 3b7cf18af7e0f89c94d10eb953ca018a150a2f55 | [
"MIT"
] | 2,198 | 2015-01-02T15:17:45.000Z | 2022-03-28T10:20:43.000Z | djangox/lib/python3.8/site-packages/allauth/socialaccount/providers/__init__.py | DemarcusL/django_wiki_lab | 3b7cf18af7e0f89c94d10eb953ca018a150a2f55 | [
"MIT"
] | 2,928 | 2015-01-01T10:44:13.000Z | 2022-03-31T03:20:16.000Z | import importlib
from collections import OrderedDict
from django.conf import settings
class ProviderRegistry(object):
def __init__(self):
self.provider_map = OrderedDict()
self.loaded = False
def get_list(self, request=None):
self.load()
return [provider_cls(request) for provider_cls in self.provider_map.values()]
def register(self, cls):
self.provider_map[cls.id] = cls
def by_id(self, id, request=None):
self.load()
return self.provider_map[id](request=request)
def as_choices(self):
self.load()
for provider_cls in self.provider_map.values():
yield (provider_cls.id, provider_cls.name)
def load(self):
# TODO: Providers register with the provider registry when
# loaded. Here, we build the URLs for all registered providers. So, we
# really need to be sure all providers did register, which is why we're
# forcefully importing the `provider` modules here. The overall
# mechanism is way to magical and depends on the import order et al, so
# all of this really needs to be revisited.
if not self.loaded:
for app in settings.INSTALLED_APPS:
try:
provider_module = importlib.import_module(app + ".provider")
except ImportError:
pass
else:
for cls in getattr(provider_module, "provider_classes", []):
self.register(cls)
self.loaded = True
registry = ProviderRegistry()
| 33.354167 | 85 | 0.621487 | import importlib
from collections import OrderedDict
from django.conf import settings
class ProviderRegistry(object):
def __init__(self):
self.provider_map = OrderedDict()
self.loaded = False
def get_list(self, request=None):
self.load()
return [provider_cls(request) for provider_cls in self.provider_map.values()]
def register(self, cls):
self.provider_map[cls.id] = cls
def by_id(self, id, request=None):
self.load()
return self.provider_map[id](request=request)
def as_choices(self):
self.load()
for provider_cls in self.provider_map.values():
yield (provider_cls.id, provider_cls.name)
def load(self):
# forcefully importing the `provider` modules here. The overall
# mechanism is way to magical and depends on the import order et al, so
# all of this really needs to be revisited.
if not self.loaded:
for app in settings.INSTALLED_APPS:
try:
provider_module = importlib.import_module(app + ".provider")
except ImportError:
pass
else:
for cls in getattr(provider_module, "provider_classes", []):
self.register(cls)
self.loaded = True
registry = ProviderRegistry()
| true | true |
f711648b9fb21bbdd671889d8ae822a03e116ae0 | 11,176 | py | Python | pyrats/halos.py | HugoPfister/Pyrats | fc2cab0d1e14b8dd19b3eba361d47f053187ab47 | [
"MIT"
] | null | null | null | pyrats/halos.py | HugoPfister/Pyrats | fc2cab0d1e14b8dd19b3eba361d47f053187ab47 | [
"MIT"
] | null | null | null | pyrats/halos.py | HugoPfister/Pyrats | fc2cab0d1e14b8dd19b3eba361d47f053187ab47 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
"""Module to deal with halos, to be used with HaloMaker.
This module is heavily inspired by the set of IDL routines originally
found in the Ramses Analysis ToolSuite (RATS).
TODO: Some more documentation
"""
import numpy as np
import pandas as pd
import yt
from yt.utilities.logger import ytLogger as mylog
import yt.utilities.fortran_utils as fpu
from yt.funcs import get_pbar
import os
import pandas as pd
class HaloList(object):
def __init__(self, ds, folder='.', contam=False):
"""
PandaList with halos and their properties
"""
self.folder = folder
self.iout = int(str(ds).split('_')[1])
if os.path.exists(
'{s.folder}/Halos/{s.iout}/tree_bricks{s.iout:03d}.hdf'.format(
s=self)):
self.halos = pd.read_hdf(
'{s.folder}/Halos/{s.iout}/tree_bricks{s.iout:03d}.hdf'.format(
s=self))
else:
self.halos = self._read_halos(data_set=ds, with_contam_option=contam)
if self.halos.index.size > 0:
self.halos.to_hdf(
'{s.folder}/Halos/{s.iout}/tree_bricks{s.iout:03d}.hdf'.format(
s=self), 'hdf')
self.ds = ds
self.halos['bhid'] = -1 ; self.halos['galID'] = -1
self.halos['mgal'] = 0 ; self.halos['msink'] = 0
# read purity of halos
self.halos['pollution'] = 0
contam_file_path = '{s.folder}/Halos/{s.iout}/contam_halos{s.iout:03d}'.format(
s=self)
if os.path.exists(contam_file_path):
p = np.loadtxt(contam_file_path)
if len(p) > 0:
p = p.T
self.halos.loc[p[0], 'pollution'] = p[1]/p[2]
def get_halo(self, hid, fname=None):
halo = self.halos.loc[hid]
scale_mpc = float(self.ds.length_unit.in_units('Mpc'))
halostr = ("Halo {hid:.0f} (level {h.level:.0f}):\n"
"\tContains {h.nbpart:.0f} particles and {h.nbsub:.0f} subhalo(s)\n"
"\tCenter:\t\t ({h.x}, {h.y}, {h.z}) box units\n"
"\tVelocity:\t ({h.vx}, {h.vy}, {h.vz}) km/s\n"
"\tL:\t\t ({h.Lx}, {h.Ly}, {h.Lz}) ToCheck\n"
"\tMass:\t\t {h.m:.3e} Msun\n"
"\tMvir:\t\t {h.mvir:.3e} Msun\n"
"\tRadius:\t\t {h.r:.3e} Mpc ({rcodeunits:.3e} box units)\n"
"\tRvir:\t\t {h.rvir:.3e} Mpc ({rvcodeunits:.3e} box units)\n"
"\tTvir:\t\t {h.tvir:.3e} K".format(hid=hid,
h=halo,
rcodeunits=halo.r / scale_mpc,
rvcodeunits=halo.rvir / scale_mpc))
if fname is not None:
with open(fname, 'w') as f:
f.write(halostr)
return halostr
def get_halo_sphere(self, hid, rvir_factor=5):
halo_spheres = getattr(self, '_halo_spheres', {})
if (hid, rvir_factor) in halo_spheres:
return halo_spheres[hid, rvir_factor]
tmp = self.halos.loc[hid, ['x', 'y', 'z', 'rvir', 'vx', 'vy', 'vz']]\
.values
center = self.ds.arr(tmp[:3], 'code_length')
radius = self.ds.arr(tmp[3] * rvir_factor, 'Mpc')
vel = self.ds.arr(tmp[4:7], 'km/s')
# Get a sphere centered on the halo
sphere = self.ds.sphere(center, radius)
sphere.set_field_parameter('bulk_velocity', vel)
halo_spheres[(hid, rvir_factor)] = sphere
self._halo_spheres = halo_spheres
return sphere
def plot_halo(self, hid, rvir_factor=5, field=('deposit', 'all_density'), folder='./',
weight_field=('index', 'ones'), cmap='viridis', slice=False,
axis='z', **kwargs):
'''Plot a given halo.
Parameters
----------
* hid, integer
The halo id to plot
* rvir_factor, float, default=5
Size of the region to plot in unit of Rvir
* field, tuple
The yt field to plot
* folder, string
The folder where to save the data
* weight_field, tuple
The field to weight the projection by.
* cmap, string
The colormap to use
* slice, boolean
If true, do a slice plot instead of a projection plot
* axis, 'x', 'y' or 'z'
The axis to project onto
'''
for k, v in kwargs.items():
print('%s: %s not supported' % (k, v))
if hid not in self.halos.index:
mylog.error('%s not found.' % hid)
return
# Get position
tmp = np.array(self.halos.loc[hid, ['x', 'y', 'z', 'rvir']])
center = self.ds.arr(tmp[:3], 'code_length')
radius = self.ds.arr(tmp[3] * rvir_factor, 'Mpc')
# Get a sphere centered on the halo
sphere = self.ds.sphere(center, radius)
# Make a projection plot
p = yt.ProjectionPlot(self.ds, axis, field, data_source=sphere,
weight_field=weight_field)
p.set_cmap(field=field, cmap=cmap)
p.annotate_timestamp(corner='upper_left', time=True, redshift=True)
p.annotate_scale(corner='upper_right')
# TODO: annotate halos
# TODO: better name
p.save(folder)
# Accessors
def __getitem__(self, item):
if str(item) in self.halos:
return self.halos[item]
else:
return self.halos.ix[item]
# def __getattr__(self, name):
# return self.halos.__getattr__(name) # self.halos[name]
def __len__(self):
return len(self.halos)
def __iter__(self):
return self.halos.iterrows()
# Printing functions
def __str__(self):
return self.halos.__str__()
# Convenience functions
def _read_halos(self, data_set, with_contam_option=False):
halo_keys = ('ID', 'nbpart', 'level', 'min_part_id',
'host', 'hostsub', 'nbsub', 'nextsub',
'x', 'y', 'z', 'vx', 'vy', 'vz', 'Lx', 'Ly', 'Lz',
'a', 'b', 'c', 'ek', 'ep', 'et', 'rho0', 'r_c',
'spin', 'm', 'r', 'mvir', 'rvir', 'tvir', 'cvel')
filename = '{s.folder}/Halos/{s.iout}/tree_bricks{s.iout:03d}'.format(
s=self)
data = np.empty(shape=(0, len(halo_keys)), dtype=object)
yt.funcs.mylog.debug('Reading halo catalog %s (ds=%s)' % (filename, data_set))
offsets = {}
if os.path.exists(filename):
with open(filename, 'rb') as f:
[npart] = fpu.read_vector(f, 'i')
[massp] = fpu.read_vector(f, 'f')
[aexp] = fpu.read_vector(f, 'f')
[omega_t] = fpu.read_vector(f, 'f')
[age] = fpu.read_vector(f, 'f')
[nhalos, nsubs] = fpu.read_vector(f, 'i')
# Save the age/aexp, the mass of the particle,
# as well as the number of (sub)halos
self.nhalos = nhalos
self.nsubs = nsubs
self.aexp = aexp
self.age = age
self.massp = massp
data = np.empty(shape=(nhalos + nsubs, len(halo_keys)), dtype=object)
mylog.info('Brick: halos : %s' % nhalos)
mylog.info('Brick: sub halos : %s' % nsubs)
mylog.info('Brick: aexp : %s' % aexp)
#pbar = get_pbar('', nhalos+nsubs)
for ihalo in range(nhalos + nsubs):
pos = f.tell()
[nbpart] = fpu.read_vector(f, 'i') # Number of particles
listp = fpu.read_vector(f, 'i') # List of the particles IDs
[ID] = fpu.read_vector(f, 'i') # Halo ID
fpu.skip(f, 1) # Skip timestep
[level, host, hostsub, nbsub, nextsub] = fpu.read_vector(f, 'i')
[m] = fpu.read_vector(f, 'f') # Total mass
[x, y, z] = fpu.read_vector(f, 'f') # Center
[vx, vy, vz] = fpu.read_vector(f, 'f') # Velocity
[Lx, Ly, Lz] = fpu.read_vector(f, 'f') # Angular momentum
[r, a, b, c] = fpu.read_vector(f, 'f') # Shape (ellipticity)
[ek, ep, et] = fpu.read_vector(f, 'f') # Energetics
[spin] = fpu.read_vector(f, 'f') # Total angular momentum
[rvir, mvir, tvir, cvel] = fpu.read_vector(f, 'f') # Virial parameters
[rho0, r_c] = fpu.read_vector(f, 'f') # NFW params
if with_contam_option:
[contam] = fpu.read_vector(f, 'i') # Contamination
# Add the halo to the list
# halos.loc[ihalo] = [ID, nbpart, level, listp.min(),
# host, hostsub, nbsub, nextsub,
# x, y, z, vx, vy, vz, Lx, Ly, Lz,
# a, b, c, ek, ep, et, rho0, r_c,
# spin, m, r, mvir, rvir, tvir, cvel]
data[ihalo] = [ID, nbpart, level, listp.min(),
host, hostsub, nbsub, nextsub,
x, y, z, vx, vy, vz, Lx, Ly, Lz,
a, b, c, ek, ep, et, rho0, r_c,
spin, m, r, mvir, rvir, tvir, cvel]
#pbar.update()
offsets[ID] = pos
print('')
types = {}
for k in ('ID', 'nbpart', 'level', 'min_part_id',
'host', 'hostsub', 'nbsub', 'nextsub'):
types[k] = np.int64
for k in ('x', 'y', 'z', 'vx', 'vy', 'vz', 'Lx', 'Ly', 'Lz',
'a', 'b', 'c', 'ek', 'ep', 'et', 'rho0', 'r_c',
'spin', 'm', 'r', 'mvir', 'rvir', 'tvir', 'cvel'):
types[k] = np.float64
dd = {k: data[:, i].astype(types[k])
for i, k in enumerate(halo_keys)}
halos = pd.DataFrame(dd)
# Get properties in the right units
# Masses
halos.m *= 1e11
halos.mvir *= 1e11
# Positions and distances
scale_mpc = float(data_set.length_unit.in_units('cm') / 3.08e24)
halos.x = halos.x / scale_mpc + .5
halos.y = halos.y / scale_mpc + .5
halos.z = halos.z / scale_mpc + .5
self.offsets = offsets
return halos.set_index('ID')
def get_halo_parts(self, hid):
filename = '{s.folder}/Halos/{s.iout}/tree_bricks{s.iout:03d}'.format(
s=self)
with open(filename, 'br') as fd:
fd.seek(self.offsets[hid])
fpu.skip(fd, 1)
listp = fpu.read_vector(fd, 'i')
return listp
| 39.631206 | 98 | 0.480494 |
import numpy as np
import pandas as pd
import yt
from yt.utilities.logger import ytLogger as mylog
import yt.utilities.fortran_utils as fpu
from yt.funcs import get_pbar
import os
import pandas as pd
class HaloList(object):
def __init__(self, ds, folder='.', contam=False):
self.folder = folder
self.iout = int(str(ds).split('_')[1])
if os.path.exists(
'{s.folder}/Halos/{s.iout}/tree_bricks{s.iout:03d}.hdf'.format(
s=self)):
self.halos = pd.read_hdf(
'{s.folder}/Halos/{s.iout}/tree_bricks{s.iout:03d}.hdf'.format(
s=self))
else:
self.halos = self._read_halos(data_set=ds, with_contam_option=contam)
if self.halos.index.size > 0:
self.halos.to_hdf(
'{s.folder}/Halos/{s.iout}/tree_bricks{s.iout:03d}.hdf'.format(
s=self), 'hdf')
self.ds = ds
self.halos['bhid'] = -1 ; self.halos['galID'] = -1
self.halos['mgal'] = 0 ; self.halos['msink'] = 0
self.halos['pollution'] = 0
contam_file_path = '{s.folder}/Halos/{s.iout}/contam_halos{s.iout:03d}'.format(
s=self)
if os.path.exists(contam_file_path):
p = np.loadtxt(contam_file_path)
if len(p) > 0:
p = p.T
self.halos.loc[p[0], 'pollution'] = p[1]/p[2]
def get_halo(self, hid, fname=None):
halo = self.halos.loc[hid]
scale_mpc = float(self.ds.length_unit.in_units('Mpc'))
halostr = ("Halo {hid:.0f} (level {h.level:.0f}):\n"
"\tContains {h.nbpart:.0f} particles and {h.nbsub:.0f} subhalo(s)\n"
"\tCenter:\t\t ({h.x}, {h.y}, {h.z}) box units\n"
"\tVelocity:\t ({h.vx}, {h.vy}, {h.vz}) km/s\n"
"\tL:\t\t ({h.Lx}, {h.Ly}, {h.Lz}) ToCheck\n"
"\tMass:\t\t {h.m:.3e} Msun\n"
"\tMvir:\t\t {h.mvir:.3e} Msun\n"
"\tRadius:\t\t {h.r:.3e} Mpc ({rcodeunits:.3e} box units)\n"
"\tRvir:\t\t {h.rvir:.3e} Mpc ({rvcodeunits:.3e} box units)\n"
"\tTvir:\t\t {h.tvir:.3e} K".format(hid=hid,
h=halo,
rcodeunits=halo.r / scale_mpc,
rvcodeunits=halo.rvir / scale_mpc))
if fname is not None:
with open(fname, 'w') as f:
f.write(halostr)
return halostr
def get_halo_sphere(self, hid, rvir_factor=5):
halo_spheres = getattr(self, '_halo_spheres', {})
if (hid, rvir_factor) in halo_spheres:
return halo_spheres[hid, rvir_factor]
tmp = self.halos.loc[hid, ['x', 'y', 'z', 'rvir', 'vx', 'vy', 'vz']]\
.values
center = self.ds.arr(tmp[:3], 'code_length')
radius = self.ds.arr(tmp[3] * rvir_factor, 'Mpc')
vel = self.ds.arr(tmp[4:7], 'km/s')
sphere = self.ds.sphere(center, radius)
sphere.set_field_parameter('bulk_velocity', vel)
halo_spheres[(hid, rvir_factor)] = sphere
self._halo_spheres = halo_spheres
return sphere
def plot_halo(self, hid, rvir_factor=5, field=('deposit', 'all_density'), folder='./',
weight_field=('index', 'ones'), cmap='viridis', slice=False,
axis='z', **kwargs):
for k, v in kwargs.items():
print('%s: %s not supported' % (k, v))
if hid not in self.halos.index:
mylog.error('%s not found.' % hid)
return
tmp = np.array(self.halos.loc[hid, ['x', 'y', 'z', 'rvir']])
center = self.ds.arr(tmp[:3], 'code_length')
radius = self.ds.arr(tmp[3] * rvir_factor, 'Mpc')
sphere = self.ds.sphere(center, radius)
p = yt.ProjectionPlot(self.ds, axis, field, data_source=sphere,
weight_field=weight_field)
p.set_cmap(field=field, cmap=cmap)
p.annotate_timestamp(corner='upper_left', time=True, redshift=True)
p.annotate_scale(corner='upper_right')
p.save(folder)
def __getitem__(self, item):
if str(item) in self.halos:
return self.halos[item]
else:
return self.halos.ix[item]
self):
return len(self.halos)
def __iter__(self):
return self.halos.iterrows()
def __str__(self):
return self.halos.__str__()
def _read_halos(self, data_set, with_contam_option=False):
halo_keys = ('ID', 'nbpart', 'level', 'min_part_id',
'host', 'hostsub', 'nbsub', 'nextsub',
'x', 'y', 'z', 'vx', 'vy', 'vz', 'Lx', 'Ly', 'Lz',
'a', 'b', 'c', 'ek', 'ep', 'et', 'rho0', 'r_c',
'spin', 'm', 'r', 'mvir', 'rvir', 'tvir', 'cvel')
filename = '{s.folder}/Halos/{s.iout}/tree_bricks{s.iout:03d}'.format(
s=self)
data = np.empty(shape=(0, len(halo_keys)), dtype=object)
yt.funcs.mylog.debug('Reading halo catalog %s (ds=%s)' % (filename, data_set))
offsets = {}
if os.path.exists(filename):
with open(filename, 'rb') as f:
[npart] = fpu.read_vector(f, 'i')
[massp] = fpu.read_vector(f, 'f')
[aexp] = fpu.read_vector(f, 'f')
[omega_t] = fpu.read_vector(f, 'f')
[age] = fpu.read_vector(f, 'f')
[nhalos, nsubs] = fpu.read_vector(f, 'i')
self.nhalos = nhalos
self.nsubs = nsubs
self.aexp = aexp
self.age = age
self.massp = massp
data = np.empty(shape=(nhalos + nsubs, len(halo_keys)), dtype=object)
mylog.info('Brick: halos : %s' % nhalos)
mylog.info('Brick: sub halos : %s' % nsubs)
mylog.info('Brick: aexp : %s' % aexp)
for ihalo in range(nhalos + nsubs):
pos = f.tell()
[nbpart] = fpu.read_vector(f, 'i')
listp = fpu.read_vector(f, 'i')
[ID] = fpu.read_vector(f, 'i')
fpu.skip(f, 1)
[level, host, hostsub, nbsub, nextsub] = fpu.read_vector(f, 'i')
[m] = fpu.read_vector(f, 'f')
[x, y, z] = fpu.read_vector(f, 'f')
[vx, vy, vz] = fpu.read_vector(f, 'f')
[Lx, Ly, Lz] = fpu.read_vector(f, 'f')
[r, a, b, c] = fpu.read_vector(f, 'f')
[ek, ep, et] = fpu.read_vector(f, 'f')
[spin] = fpu.read_vector(f, 'f')
[rvir, mvir, tvir, cvel] = fpu.read_vector(f, 'f')
[rho0, r_c] = fpu.read_vector(f, 'f')
if with_contam_option:
[contam] = fpu.read_vector(f, 'i')
data[ihalo] = [ID, nbpart, level, listp.min(),
host, hostsub, nbsub, nextsub,
x, y, z, vx, vy, vz, Lx, Ly, Lz,
a, b, c, ek, ep, et, rho0, r_c,
spin, m, r, mvir, rvir, tvir, cvel]
offsets[ID] = pos
print('')
types = {}
for k in ('ID', 'nbpart', 'level', 'min_part_id',
'host', 'hostsub', 'nbsub', 'nextsub'):
types[k] = np.int64
for k in ('x', 'y', 'z', 'vx', 'vy', 'vz', 'Lx', 'Ly', 'Lz',
'a', 'b', 'c', 'ek', 'ep', 'et', 'rho0', 'r_c',
'spin', 'm', 'r', 'mvir', 'rvir', 'tvir', 'cvel'):
types[k] = np.float64
dd = {k: data[:, i].astype(types[k])
for i, k in enumerate(halo_keys)}
halos = pd.DataFrame(dd)
halos.m *= 1e11
halos.mvir *= 1e11
scale_mpc = float(data_set.length_unit.in_units('cm') / 3.08e24)
halos.x = halos.x / scale_mpc + .5
halos.y = halos.y / scale_mpc + .5
halos.z = halos.z / scale_mpc + .5
self.offsets = offsets
return halos.set_index('ID')
def get_halo_parts(self, hid):
filename = '{s.folder}/Halos/{s.iout}/tree_bricks{s.iout:03d}'.format(
s=self)
with open(filename, 'br') as fd:
fd.seek(self.offsets[hid])
fpu.skip(fd, 1)
listp = fpu.read_vector(fd, 'i')
return listp
| true | true |
f71166891b5da8b0c4158d35a906f11005268be1 | 6,324 | py | Python | server/devices/views.py | vahidzee/pi-surveillance | 63996d8abc998d0a777d588231ecbc6d484b6451 | [
"MIT"
] | null | null | null | server/devices/views.py | vahidzee/pi-surveillance | 63996d8abc998d0a777d588231ecbc6d484b6451 | [
"MIT"
] | null | null | null | server/devices/views.py | vahidzee/pi-surveillance | 63996d8abc998d0a777d588231ecbc6d484b6451 | [
"MIT"
] | null | null | null | from PIL import Image
from django.conf import settings
from . import forms, recognition
from . import utils
from . import models
from django.shortcuts import render, redirect
from django.contrib import admin
from django.core.mail import send_mail
from django.http import JsonResponse
from django.views.decorators.csrf import csrf_exempt
from django.utils.decorators import method_decorator
import json
def signup(request):
if request.method == 'POST':
form = forms.UserCreationForm(request.POST)
if form.is_valid():
form.save()
return redirect('../admin/')
else:
form = forms.UserCreationForm()
return render(request, 'admin/logon.html',
{'form': form, 'site_header': admin.site.site_header, 'site_title': admin.site.site_title})
@method_decorator(csrf_exempt, name='dispatch')
def hello(request) -> JsonResponse:
"""hello API endpoint, clients request for access tokens through this api by their device_id"""
data = json.loads(request.body)
try:
device_id = data['device_id']
if (device := models.Device.objects.filter(id=device_id)).count():
device = device[0]
else:
# registering newly connected device (waiting for user to claim)
device = models.Device(id=data['device_id'])
device.save()
if not device.user:
return JsonResponse(data=utils.base_response(ok=False, message='Device is yet to be claimed by a user'))
tokens = models.AccessToken.objects.filter(device=device)
if tokens.count():
# request for new token -> invalidate old token
last_token = tokens.latest('time')
last_token.valid = False
last_token.save()
# create new access token
token = models.AccessToken(
device=device, ip=utils.get_client_ip(request))
token.save()
return JsonResponse(data=utils.base_response(response=dict(token=token.token)))
except KeyError:
return JsonResponse(data=utils.base_response(ok=False, message='No `device_id` specified'))
def authenticate_device(funct):
@method_decorator(csrf_exempt, name='dispatch')
def view_wrapper(request, *args, **kwargs):
if request.POST:
data = dict(request.POST)
file = request.FILES.get('image', None)
else:
data = json.loads(request.body)
file = None
try:
token = data['token']
if isinstance(token, list):
token = token[0]
access_token = models.AccessToken.objects.get(token=token)
if not access_token.is_valid(request):
return JsonResponse(data=utils.base_response(message='This token is no longer valid.', ok=False))
auth_res = dict(user=access_token.device.user,
device=access_token.device)
except KeyError:
return JsonResponse(data=utils.base_response(message='No `token` was specified.', ok=False))
except (models.models.ObjectDoesNotExist, Exception):
return JsonResponse(data=utils.base_response(message='Invalid `token` was specified.', ok=False))
return funct(request, *args, data=data, file=file, auth_res=auth_res, **kwargs)
return view_wrapper
@authenticate_device
def fetch(request, data: dict = None, file=None, auth_res=None):
return JsonResponse(
data=utils.base_response(
response=dict(faces=[
dict(embedding=face.embedding, face_id=face.id) for face in
models.Face.objects.filter(user=auth_res['user'])
],
in_count=auth_res['device'].inside_count(),
)
)
)
@authenticate_device
def introduce(request, data: dict = None, file=None, auth_res=None):
try:
embedding = data['embedding']
embedding = json.loads(embedding if not isinstance(
embedding, list) else embedding[0])
image = Image.open(file).convert('RGB')
face = recognition.find_face(
auth_res['user'], image=image, embedding=embedding)
if isinstance(face, bool):
face = models.Face.save_pil(
user=auth_res['user'], image=image, embedding=embedding)
return JsonResponse(data=utils.base_response(response=dict(face_id=face.id)))
except KeyError:
return JsonResponse(data=utils.base_response(message='Embedding was not mentioned', ok=False))
def mail_message(log):
device = f'{log.device.name if log.device.name else log.device.id}'
face = f'{log.face.name if log.face.name else log.face.id}'
kind = f'{"enter" if log.kind == "E" else "exit"}'
num_in = log.device.inside_count()
return f'Your device "{device}", saw "{face}" {kind}.\nThere are currently {num_in} people' \
f' inside this property.'
@authenticate_device
def log(request, data: dict = None, file=None, auth_res=None):
try:
face_id = data['face_id'] if not isinstance(
data['face_id'], list) else data['face_id'][0]
face = models.Face.objects.get(id=face_id)
kind = data['kind'] if not isinstance(
data['kind'], list) else data['kind'][0]
device = auth_res['device']
image = Image.open(file).convert('RGB') if file is not None else None
log = models.Log.save_pil(
face=face, device=device, kind=kind, image=image)
if settings.GMAIL:
send_mail(subject='Surveillance Log',
message=mail_message(log),
from_email=settings.GMAIL,
recipient_list=[device.user.email],
fail_silently=True)
return JsonResponse(data=utils.base_response(
ok=True, message='Logged successfully', response=dict(
in_count=log.device.inside_count(), name='Unknown' if not log.face.name else log.face.name)
))
except KeyError:
return JsonResponse(
data=utils.base_response(message='Both `face_id` and `kind` are expected to be specified', ok=False))
except (models.models.ObjectDoesNotExist,):
return JsonResponse(data=utils.base_response(message='Invalid `face_id` is specified', ok=False))
| 42.16 | 116 | 0.638046 | from PIL import Image
from django.conf import settings
from . import forms, recognition
from . import utils
from . import models
from django.shortcuts import render, redirect
from django.contrib import admin
from django.core.mail import send_mail
from django.http import JsonResponse
from django.views.decorators.csrf import csrf_exempt
from django.utils.decorators import method_decorator
import json
def signup(request):
if request.method == 'POST':
form = forms.UserCreationForm(request.POST)
if form.is_valid():
form.save()
return redirect('../admin/')
else:
form = forms.UserCreationForm()
return render(request, 'admin/logon.html',
{'form': form, 'site_header': admin.site.site_header, 'site_title': admin.site.site_title})
@method_decorator(csrf_exempt, name='dispatch')
def hello(request) -> JsonResponse:
data = json.loads(request.body)
try:
device_id = data['device_id']
if (device := models.Device.objects.filter(id=device_id)).count():
device = device[0]
else:
device = models.Device(id=data['device_id'])
device.save()
if not device.user:
return JsonResponse(data=utils.base_response(ok=False, message='Device is yet to be claimed by a user'))
tokens = models.AccessToken.objects.filter(device=device)
if tokens.count():
last_token = tokens.latest('time')
last_token.valid = False
last_token.save()
token = models.AccessToken(
device=device, ip=utils.get_client_ip(request))
token.save()
return JsonResponse(data=utils.base_response(response=dict(token=token.token)))
except KeyError:
return JsonResponse(data=utils.base_response(ok=False, message='No `device_id` specified'))
def authenticate_device(funct):
@method_decorator(csrf_exempt, name='dispatch')
def view_wrapper(request, *args, **kwargs):
if request.POST:
data = dict(request.POST)
file = request.FILES.get('image', None)
else:
data = json.loads(request.body)
file = None
try:
token = data['token']
if isinstance(token, list):
token = token[0]
access_token = models.AccessToken.objects.get(token=token)
if not access_token.is_valid(request):
return JsonResponse(data=utils.base_response(message='This token is no longer valid.', ok=False))
auth_res = dict(user=access_token.device.user,
device=access_token.device)
except KeyError:
return JsonResponse(data=utils.base_response(message='No `token` was specified.', ok=False))
except (models.models.ObjectDoesNotExist, Exception):
return JsonResponse(data=utils.base_response(message='Invalid `token` was specified.', ok=False))
return funct(request, *args, data=data, file=file, auth_res=auth_res, **kwargs)
return view_wrapper
@authenticate_device
def fetch(request, data: dict = None, file=None, auth_res=None):
return JsonResponse(
data=utils.base_response(
response=dict(faces=[
dict(embedding=face.embedding, face_id=face.id) for face in
models.Face.objects.filter(user=auth_res['user'])
],
in_count=auth_res['device'].inside_count(),
)
)
)
@authenticate_device
def introduce(request, data: dict = None, file=None, auth_res=None):
try:
embedding = data['embedding']
embedding = json.loads(embedding if not isinstance(
embedding, list) else embedding[0])
image = Image.open(file).convert('RGB')
face = recognition.find_face(
auth_res['user'], image=image, embedding=embedding)
if isinstance(face, bool):
face = models.Face.save_pil(
user=auth_res['user'], image=image, embedding=embedding)
return JsonResponse(data=utils.base_response(response=dict(face_id=face.id)))
except KeyError:
return JsonResponse(data=utils.base_response(message='Embedding was not mentioned', ok=False))
def mail_message(log):
device = f'{log.device.name if log.device.name else log.device.id}'
face = f'{log.face.name if log.face.name else log.face.id}'
kind = f'{"enter" if log.kind == "E" else "exit"}'
num_in = log.device.inside_count()
return f'Your device "{device}", saw "{face}" {kind}.\nThere are currently {num_in} people' \
f' inside this property.'
@authenticate_device
def log(request, data: dict = None, file=None, auth_res=None):
try:
face_id = data['face_id'] if not isinstance(
data['face_id'], list) else data['face_id'][0]
face = models.Face.objects.get(id=face_id)
kind = data['kind'] if not isinstance(
data['kind'], list) else data['kind'][0]
device = auth_res['device']
image = Image.open(file).convert('RGB') if file is not None else None
log = models.Log.save_pil(
face=face, device=device, kind=kind, image=image)
if settings.GMAIL:
send_mail(subject='Surveillance Log',
message=mail_message(log),
from_email=settings.GMAIL,
recipient_list=[device.user.email],
fail_silently=True)
return JsonResponse(data=utils.base_response(
ok=True, message='Logged successfully', response=dict(
in_count=log.device.inside_count(), name='Unknown' if not log.face.name else log.face.name)
))
except KeyError:
return JsonResponse(
data=utils.base_response(message='Both `face_id` and `kind` are expected to be specified', ok=False))
except (models.models.ObjectDoesNotExist,):
return JsonResponse(data=utils.base_response(message='Invalid `face_id` is specified', ok=False))
| true | true |
f711678dac36327210c7a3f075c080ff99a8e948 | 1,499 | py | Python | setup.py | candh/totp-cli | 191c46caab3b1a6863336189521b460d45e1223c | [
"MIT"
] | 1 | 2022-01-01T15:53:09.000Z | 2022-01-01T15:53:09.000Z | setup.py | candh/totp-cli | 191c46caab3b1a6863336189521b460d45e1223c | [
"MIT"
] | null | null | null | setup.py | candh/totp-cli | 191c46caab3b1a6863336189521b460d45e1223c | [
"MIT"
] | null | null | null | from setuptools import setup
import sys
import os
if sys.version_info.major < 3:
raise Exception("python3 is required to run this script")
# also cleanup the info.json file before building
if os.path.exists('totpauth/database/info.json'):
os.remove('totpauth/database/info.json')
open('totpauth/database/info.json', 'w')
setup(
name='totp-cli',
version='1.0',
description='A CLI tool to generate Time-Based One Time Passwords (TOTP)',
author='Haider Ali Khichi',
author_email='khichihaider@gmail.com',
license='MIT',
url='https://github.com/candh/totp-cli',
keywords='totp otp 2fa cli tools two factor authentication google authenticator',
install_requires=['termcolor', 'tinydb', 'keyring', 'pyotp'],
packages=['totpauth'],
entry_points = {
'console_scripts': [
'totp=totpauth.totp:main'
]
},
package_data = {
'totpauth': ['database/info.json']
},
classifiers = [
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Environment :: Console',
'Intended Audience :: Developers',
'Intended Audience :: End Users/Desktop',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Topic :: Security',
'Topic :: Security :: Cryptography'
]
) | 31.229167 | 83 | 0.667779 | from setuptools import setup
import sys
import os
if sys.version_info.major < 3:
raise Exception("python3 is required to run this script")
if os.path.exists('totpauth/database/info.json'):
os.remove('totpauth/database/info.json')
open('totpauth/database/info.json', 'w')
setup(
name='totp-cli',
version='1.0',
description='A CLI tool to generate Time-Based One Time Passwords (TOTP)',
author='Haider Ali Khichi',
author_email='khichihaider@gmail.com',
license='MIT',
url='https://github.com/candh/totp-cli',
keywords='totp otp 2fa cli tools two factor authentication google authenticator',
install_requires=['termcolor', 'tinydb', 'keyring', 'pyotp'],
packages=['totpauth'],
entry_points = {
'console_scripts': [
'totp=totpauth.totp:main'
]
},
package_data = {
'totpauth': ['database/info.json']
},
classifiers = [
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Environment :: Console',
'Intended Audience :: Developers',
'Intended Audience :: End Users/Desktop',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Topic :: Security',
'Topic :: Security :: Cryptography'
]
) | true | true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.