hexsha stringlengths 40 40 | size int64 4 1.02M | ext stringclasses 8
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 209 | max_stars_repo_name stringlengths 5 121 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 209 | max_issues_repo_name stringlengths 5 121 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 209 | max_forks_repo_name stringlengths 5 121 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 4 1.02M | avg_line_length float64 1.07 66.1k | max_line_length int64 4 266k | alphanum_fraction float64 0.01 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
cb35a1cba287ae52c6dc23ab1d656bee043fa23c | 1,928 | py | Python | selfdrive/hardware/pc/hardware.py | moreno7798/openpilot-1 | 1a41ce3db293729a0491f757092f0c02ded66ff5 | [
"MIT"
] | 45 | 2020-09-10T02:43:34.000Z | 2022-03-23T12:12:48.000Z | selfdrive/hardware/pc/hardware.py | moreno7798/openpilot-1 | 1a41ce3db293729a0491f757092f0c02ded66ff5 | [
"MIT"
] | 4 | 2021-07-17T19:33:13.000Z | 2021-12-26T02:51:07.000Z | selfdrive/hardware/pc/hardware.py | moreno7798/openpilot-1 | 1a41ce3db293729a0491f757092f0c02ded66ff5 | [
"MIT"
] | 130 | 2020-08-19T04:20:02.000Z | 2022-03-24T23:05:22.000Z | import random
from cereal import log
from selfdrive.hardware.base import HardwareBase, ThermalConfig
NetworkType = log.DeviceState.NetworkType
NetworkStrength = log.DeviceState.NetworkStrength
class Pc(HardwareBase):
def get_os_version(self):
return None
def get_device_type(self):
return "pc"
def get_sound_card_online(self):
return True
def reboot(self, reason=None):
print("REBOOT!")
def uninstall(self):
print("uninstall")
def get_imei(self, slot):
return "%015d" % random.randint(0, 1 << 32)
def get_serial(self):
return "cccccccc"
def get_subscriber_info(self):
return ""
def get_network_info(self):
return None
def get_network_type(self):
return NetworkType.wifi
def get_sim_info(self):
return {
'sim_id': '',
'mcc_mnc': None,
'network_type': ["Unknown"],
'sim_state': ["ABSENT"],
'data_connected': False
}
def get_network_strength(self, network_type):
return NetworkStrength.unknown
def get_battery_capacity(self):
return 100
def get_battery_status(self):
return ""
def get_battery_current(self):
return 0
def get_battery_voltage(self):
return 0
def get_battery_charging(self):
return True
def set_battery_charging(self, on):
pass
def get_usb_present(self):
return False
def get_current_power_draw(self):
return 0
def shutdown(self):
print("SHUTDOWN!")
def get_thermal_config(self):
return ThermalConfig(cpu=((None,), 1), gpu=((None,), 1), mem=(None, 1), bat=(None, 1), ambient=(None, 1))
def set_screen_brightness(self, percentage):
pass
def set_power_save(self, powersave_enabled):
pass
def get_gpu_usage_percent(self):
return 0
def get_modem_version(self):
return None
def initialize_hardware(self):
pass
def get_networks(self):
return None
def get_ip_address(self):
return ""
| 18.718447 | 109 | 0.686722 |
8e206d574e858802cfcb0dc0e2d5028962331cd6 | 5,072 | py | Python | test/functional/preciousblock.py | arcana-coin/arcana-core | b5d6d71968d1f19c42dc3f351aff17800da5af36 | [
"MIT"
] | null | null | null | test/functional/preciousblock.py | arcana-coin/arcana-core | b5d6d71968d1f19c42dc3f351aff17800da5af36 | [
"MIT"
] | null | null | null | test/functional/preciousblock.py | arcana-coin/arcana-core | b5d6d71968d1f19c42dc3f351aff17800da5af36 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) 2015-2017 The Bytcoyn Core Developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the preciousblock RPC."""
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_equal,
connect_nodes_bi,
sync_chain,
sync_blocks,
)
def unidirectional_node_sync_via_rpc(node_src, node_dest):
blocks_to_copy = []
blockhash = node_src.getbestblockhash()
while True:
try:
assert(len(node_dest.getblock(blockhash, False)) > 0)
break
except:
blocks_to_copy.append(blockhash)
blockhash = node_src.getblockheader(blockhash, True)['previousblockhash']
blocks_to_copy.reverse()
for blockhash in blocks_to_copy:
blockdata = node_src.getblock(blockhash, False)
assert(node_dest.submitblock(blockdata) in (None, 'inconclusive'))
def node_sync_via_rpc(nodes):
for node_src in nodes:
for node_dest in nodes:
if node_src is node_dest:
continue
unidirectional_node_sync_via_rpc(node_src, node_dest)
class PreciousTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 3
def setup_network(self):
self.setup_nodes()
def run_test(self):
self.log.info("Ensure submitblock can in principle reorg to a competing chain")
self.nodes[0].generate(1)
assert_equal(self.nodes[0].getblockcount(), 1)
hashZ = self.nodes[1].generate(2)[-1]
assert_equal(self.nodes[1].getblockcount(), 2)
node_sync_via_rpc(self.nodes[0:3])
assert_equal(self.nodes[0].getbestblockhash(), hashZ)
self.log.info("Mine blocks A-B-C on Node 0")
hashC = self.nodes[0].generate(3)[-1]
assert_equal(self.nodes[0].getblockcount(), 5)
self.log.info("Mine competing blocks E-F-G on Node 1")
hashG = self.nodes[1].generate(3)[-1]
assert_equal(self.nodes[1].getblockcount(), 5)
assert(hashC != hashG)
self.log.info("Connect nodes and check no reorg occurs")
# Submit competing blocks via RPC so any reorg should occur before we proceed (no way to wait on inaction for p2p sync)
node_sync_via_rpc(self.nodes[0:2])
connect_nodes_bi(self.nodes,0,1)
assert_equal(self.nodes[0].getbestblockhash(), hashC)
assert_equal(self.nodes[1].getbestblockhash(), hashG)
self.log.info("Make Node0 prefer block G")
self.nodes[0].preciousblock(hashG)
assert_equal(self.nodes[0].getbestblockhash(), hashG)
self.log.info("Make Node0 prefer block C again")
self.nodes[0].preciousblock(hashC)
assert_equal(self.nodes[0].getbestblockhash(), hashC)
self.log.info("Make Node1 prefer block C")
self.nodes[1].preciousblock(hashC)
sync_chain(self.nodes[0:2]) # wait because node 1 may not have downloaded hashC
assert_equal(self.nodes[1].getbestblockhash(), hashC)
self.log.info("Make Node1 prefer block G again")
self.nodes[1].preciousblock(hashG)
assert_equal(self.nodes[1].getbestblockhash(), hashG)
self.log.info("Make Node0 prefer block G again")
self.nodes[0].preciousblock(hashG)
assert_equal(self.nodes[0].getbestblockhash(), hashG)
self.log.info("Make Node1 prefer block C again")
self.nodes[1].preciousblock(hashC)
assert_equal(self.nodes[1].getbestblockhash(), hashC)
self.log.info("Mine another block (E-F-G-)H on Node 0 and reorg Node 1")
self.nodes[0].generate(1)
assert_equal(self.nodes[0].getblockcount(), 6)
sync_blocks(self.nodes[0:2])
hashH = self.nodes[0].getbestblockhash()
assert_equal(self.nodes[1].getbestblockhash(), hashH)
self.log.info("Node1 should not be able to prefer block C anymore")
self.nodes[1].preciousblock(hashC)
assert_equal(self.nodes[1].getbestblockhash(), hashH)
self.log.info("Mine competing blocks I-J-K-L on Node 2")
self.nodes[2].generate(4)
assert_equal(self.nodes[2].getblockcount(), 6)
hashL = self.nodes[2].getbestblockhash()
self.log.info("Connect nodes and check no reorg occurs")
node_sync_via_rpc(self.nodes[1:3])
connect_nodes_bi(self.nodes,1,2)
connect_nodes_bi(self.nodes,0,2)
assert_equal(self.nodes[0].getbestblockhash(), hashH)
assert_equal(self.nodes[1].getbestblockhash(), hashH)
assert_equal(self.nodes[2].getbestblockhash(), hashL)
self.log.info("Make Node1 prefer block L")
self.nodes[1].preciousblock(hashL)
assert_equal(self.nodes[1].getbestblockhash(), hashL)
self.log.info("Make Node2 prefer block H")
self.nodes[2].preciousblock(hashH)
assert_equal(self.nodes[2].getbestblockhash(), hashH)
if __name__ == '__main__':
PreciousTest().main()
| 44.104348 | 127 | 0.668967 |
048f9d46df40e9266ff8fb6b4192b5114c34c3e0 | 2,715 | py | Python | sources/app/capture_range_profile.py | Steve1an/pymmw | 2bd3bc98c92abedc72922d3fad11e38b569f8418 | [
"MIT"
] | 2 | 2020-06-19T20:40:30.000Z | 2021-02-06T04:58:51.000Z | sources/app/capture_range_profile.py | Steve1an/pymmw | 2bd3bc98c92abedc72922d3fad11e38b569f8418 | [
"MIT"
] | null | null | null | sources/app/capture_range_profile.py | Steve1an/pymmw | 2bd3bc98c92abedc72922d3fad11e38b569f8418 | [
"MIT"
] | 2 | 2020-06-20T17:25:43.000Z | 2021-02-06T04:59:15.000Z | #
# Copyright (c) 2019, Manfred Constapel
# This file is licensed under the terms of the MIT license.
#
#
# range and noise profile - capture
#
import os, sys, time
try:
__base__ = os.path.dirname(os.path.abspath(__file__))
while 'lib' not in [d for d in os.listdir(__base__) if os.path.isdir(os.path.join(__base__, d))]: __base__ = os.path.join(__base__, '..')
if __base__ not in sys.path: sys.path.append(__base__)
from lib.capture import *
except ImportError as e:
print(e, file=sys.stderr, flush=True)
sys.exit(3)
# ------------------------------------------------
def update(data):
x, r, n, o = None, None, None, None
if 'range' in data:
r = data['range']
bin = range_max / len(r)
x = [i*bin for i in range(len(r))]
x = [v - range_bias for v in x]
if 'noise' in data:
n = data['noise']
if x is None:
bin = range_max / len(n)
x = [i*bin for i in range(len(n))]
x = [v - range_bias for v in x]
if x is not None:
if 'objects' in data:
o = [0] * len(x)
items = data['objects']
for p in items:
ri, _ = p['index']
if ri < len(o):
o[ri] += 1
if 'header' in data:
if 'time' not in data['header']: return
if 'number' not in data['header']: return
clk, cnt = data['header']['time'], data['header']['number']
if r is None: r = [float('nan')] * len(x)
if n is None: n = [float('nan')] * len(x)
if o is None: o = [0] * len(x)
for i in range(len(x)):
s = '{} {:.4f} {:.4f} {:.4f} {}'.format(i, x[i], r[i], n[i], o[i])
if i == 0: s += ' {} {} {:.3f}'.format(cnt, clk, time.time())
fh.write(s + '\n')
fh.flush()
os.fsync(fh.fileno())
# ------------------------------------------------
if __name__ == "__main__":
if len(sys.argv[1:]) != 2:
print('Usage: {} {}'.format(sys.argv[0].split(os.sep)[-1], '<range_maximum> <range_bias>'))
sys.exit(1)
fh, fp = None, 'log'
try:
range_max = float(sys.argv[1])
range_bias = float(sys.argv[2])
this_name = os.path.basename(sys.argv[0])
this_name = this_name[len('capture '):-len('.py')]
if not os.path.exists(fp): os.makedirs(fp)
fh = open('{}/{}_{}.log'.format(fp, this_name, int(time.time())), 'w')
start_capture(update)
except Exception as e:
print(e, file=sys.stderr, flush=True)
sys.exit(2)
| 27.704082 | 141 | 0.471455 |
503869233fbb6f547b87e33ca89401d958ae49f4 | 381 | py | Python | zinnia_threaded_comments/migrations/0004_auto_20180903_2330.py | dantezhu/zinnia-threaded-comments | fce5e76f5785bcca368fd80f8b24379e1b2420ee | [
"BSD-3-Clause"
] | null | null | null | zinnia_threaded_comments/migrations/0004_auto_20180903_2330.py | dantezhu/zinnia-threaded-comments | fce5e76f5785bcca368fd80f8b24379e1b2420ee | [
"BSD-3-Clause"
] | null | null | null | zinnia_threaded_comments/migrations/0004_auto_20180903_2330.py | dantezhu/zinnia-threaded-comments | fce5e76f5785bcca368fd80f8b24379e1b2420ee | [
"BSD-3-Clause"
] | null | null | null | # Generated by Django 2.0.7 on 2018-09-03 23:30
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('zinnia-threaded-comments', '0003_auto_20180903_0240'),
]
operations = [
migrations.AlterModelOptions(
name='threadedcomment',
options={'ordering': ['tree_id', 'lft']},
),
]
| 21.166667 | 64 | 0.611549 |
027b73892db963b796d0ed09de3056e363460eb5 | 37,688 | py | Python | testtools/testcase.py | jml/testtools | cd286b23f23c2d79b60e1bb3686d7a17507595b8 | [
"MIT"
] | null | null | null | testtools/testcase.py | jml/testtools | cd286b23f23c2d79b60e1bb3686d7a17507595b8 | [
"MIT"
] | null | null | null | testtools/testcase.py | jml/testtools | cd286b23f23c2d79b60e1bb3686d7a17507595b8 | [
"MIT"
] | null | null | null | # Copyright (c) 2008-2011 testtools developers. See LICENSE for details.
"""Test case related stuff."""
__metaclass__ = type
__all__ = [
'attr',
'clone_test_with_new_id',
'ExpectedException',
'gather_details',
'run_test_with',
'skip',
'skipIf',
'skipUnless',
'TestCase',
]
import copy
import functools
import itertools
import sys
import warnings
from extras import (
safe_hasattr,
try_import,
try_imports,
)
# To let setup.py work, make this a conditional import.
unittest = try_imports(['unittest2', 'unittest'])
from testtools import (
content,
)
from testtools.compat import (
advance_iterator,
reraise,
)
from testtools.matchers import (
Annotate,
Contains,
MatchesAll,
MatchesException,
MismatchError,
Is,
IsInstance,
Not,
Raises,
)
from testtools.matchers._basic import _FlippedEquals
from testtools.monkey import patch
from testtools.runtest import RunTest
from testtools.testresult import (
ExtendedToOriginalDecorator,
TestResult,
)
wraps = try_import('functools.wraps')
class TestSkipped(Exception):
"""Raised within TestCase.run() when a test is skipped."""
TestSkipped = try_import('unittest.case.SkipTest', TestSkipped)
TestSkipped = try_import('unittest2.case.SkipTest', TestSkipped)
class _UnexpectedSuccess(Exception):
"""An unexpected success was raised.
Note that this exception is private plumbing in testtools' testcase
module.
"""
_UnexpectedSuccess = try_import(
'unittest.case._UnexpectedSuccess', _UnexpectedSuccess)
_UnexpectedSuccess = try_import(
'unittest2.case._UnexpectedSuccess', _UnexpectedSuccess)
class _ExpectedFailure(Exception):
"""An expected failure occured.
Note that this exception is private plumbing in testtools' testcase
module.
"""
_ExpectedFailure = try_import(
'unittest.case._ExpectedFailure', _ExpectedFailure)
_ExpectedFailure = try_import(
'unittest2.case._ExpectedFailure', _ExpectedFailure)
# Copied from unittest before python 3.4 release. Used to maintain
# compatibility with unittest sub-test feature. Users should not use this
# directly.
def _expectedFailure(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
try:
func(*args, **kwargs)
except Exception:
raise _ExpectedFailure(sys.exc_info())
raise _UnexpectedSuccess
return wrapper
def run_test_with(test_runner, **kwargs):
"""Decorate a test as using a specific ``RunTest``.
e.g.::
@run_test_with(CustomRunner, timeout=42)
def test_foo(self):
self.assertTrue(True)
The returned decorator works by setting an attribute on the decorated
function. `TestCase.__init__` looks for this attribute when deciding on a
``RunTest`` factory. If you wish to use multiple decorators on a test
method, then you must either make this one the top-most decorator, or you
must write your decorators so that they update the wrapping function with
the attributes of the wrapped function. The latter is recommended style
anyway. ``functools.wraps``, ``functools.wrapper`` and
``twisted.python.util.mergeFunctionMetadata`` can help you do this.
:param test_runner: A ``RunTest`` factory that takes a test case and an
optional list of exception handlers. See ``RunTest``.
:param kwargs: Keyword arguments to pass on as extra arguments to
'test_runner'.
:return: A decorator to be used for marking a test as needing a special
runner.
"""
def decorator(function):
# Set an attribute on 'function' which will inform TestCase how to
# make the runner.
def _run_test_with(case, handlers=None, last_resort=None):
try:
return test_runner(
case, handlers=handlers, last_resort=last_resort,
**kwargs)
except TypeError:
# Backwards compat: if we can't call the constructor
# with last_resort, try without that.
return test_runner(case, handlers=handlers, **kwargs)
function._run_test_with = _run_test_with
return function
return decorator
def _copy_content(content_object):
"""Make a copy of the given content object.
The content within ``content_object`` is iterated and saved. This is
useful when the source of the content is volatile, a log file in a
temporary directory for example.
:param content_object: A `content.Content` instance.
:return: A `content.Content` instance with the same mime-type as
``content_object`` and a non-volatile copy of its content.
"""
content_bytes = list(content_object.iter_bytes())
content_callback = lambda: content_bytes
return content.Content(content_object.content_type, content_callback)
def gather_details(source_dict, target_dict):
"""Merge the details from ``source_dict`` into ``target_dict``.
``gather_details`` evaluates all details in ``source_dict``. Do not use it
if the details are not ready to be evaluated.
:param source_dict: A dictionary of details will be gathered.
:param target_dict: A dictionary into which details will be gathered.
"""
for name, content_object in source_dict.items():
new_name = name
disambiguator = itertools.count(1)
while new_name in target_dict:
new_name = '%s-%d' % (name, advance_iterator(disambiguator))
name = new_name
target_dict[name] = _copy_content(content_object)
class TestCase(unittest.TestCase):
"""Extensions to the basic TestCase.
:ivar exception_handlers: Exceptions to catch from setUp, runTest and
tearDown. This list is able to be modified at any time and consists of
(exception_class, handler(case, result, exception_value)) pairs.
:ivar force_failure: Force testtools.RunTest to fail the test after the
test has completed.
:cvar run_tests_with: A factory to make the ``RunTest`` to run tests with.
Defaults to ``RunTest``. The factory is expected to take a test case
and an optional list of exception handlers.
"""
skipException = TestSkipped
run_tests_with = RunTest
def __init__(self, *args, **kwargs):
"""Construct a TestCase.
:param testMethod: The name of the method to run.
:keyword runTest: Optional class to use to execute the test. If not
supplied ``RunTest`` is used. The instance to be used is created
when run() is invoked, so will be fresh each time. Overrides
``TestCase.run_tests_with`` if given.
"""
runTest = kwargs.pop('runTest', None)
super(TestCase, self).__init__(*args, **kwargs)
self._reset()
test_method = self._get_test_method()
if runTest is None:
runTest = getattr(
test_method, '_run_test_with', self.run_tests_with)
self.__RunTest = runTest
if getattr(test_method, '__unittest_expecting_failure__', False):
setattr(self, self._testMethodName, _expectedFailure(test_method))
# Used internally for onException processing - used to gather extra
# data from exceptions.
self.__exception_handlers = []
# Passed to RunTest to map exceptions to result actions
self.exception_handlers = [
(self.skipException, self._report_skip),
(self.failureException, self._report_failure),
(_ExpectedFailure, self._report_expected_failure),
(_UnexpectedSuccess, self._report_unexpected_success),
(Exception, self._report_error),
]
def _reset(self):
"""Reset the test case as if it had never been run."""
self._cleanups = []
self._unique_id_gen = itertools.count(1)
# Generators to ensure unique traceback ids. Maps traceback label to
# iterators.
self._traceback_id_gens = {}
self.__setup_called = False
self.__teardown_called = False
# __details is lazy-initialized so that a constructed-but-not-run
# TestCase is safe to use with clone_test_with_new_id.
self.__details = None
def __eq__(self, other):
eq = getattr(unittest.TestCase, '__eq__', None)
if eq is not None and not unittest.TestCase.__eq__(self, other):
return False
return self.__dict__ == other.__dict__
def __repr__(self):
# We add id to the repr because it makes testing testtools easier.
return "<%s id=0x%0x>" % (self.id(), id(self))
def addDetail(self, name, content_object):
"""Add a detail to be reported with this test's outcome.
For more details see pydoc testtools.TestResult.
:param name: The name to give this detail.
:param content_object: The content object for this detail. See
testtools.content for more detail.
"""
if self.__details is None:
self.__details = {}
self.__details[name] = content_object
def getDetails(self):
"""Get the details dict that will be reported with this test's outcome.
For more details see pydoc testtools.TestResult.
"""
if self.__details is None:
self.__details = {}
return self.__details
def patch(self, obj, attribute, value):
"""Monkey-patch 'obj.attribute' to 'value' while the test is running.
If 'obj' has no attribute, then the monkey-patch will still go ahead,
and the attribute will be deleted instead of restored to its original
value.
:param obj: The object to patch. Can be anything.
:param attribute: The attribute on 'obj' to patch.
:param value: The value to set 'obj.attribute' to.
"""
self.addCleanup(patch(obj, attribute, value))
def shortDescription(self):
return self.id()
def skipTest(self, reason):
"""Cause this test to be skipped.
This raises self.skipException(reason). skipException is raised
to permit a skip to be triggered at any point (during setUp or the
testMethod itself). The run() method catches skipException and
translates that into a call to the result objects addSkip method.
:param reason: The reason why the test is being skipped. This must
support being cast into a unicode string for reporting.
"""
raise self.skipException(reason)
def skip(self, reason):
"""DEPRECATED: Use skipTest instead."""
warnings.warn(
'Only valid in 1.8.1 and earlier. Use skipTest instead.',
DeprecationWarning, stacklevel=2)
self.skipTest(reason)
def _formatTypes(self, classOrIterable):
"""Format a class or a bunch of classes for display in an error."""
className = getattr(classOrIterable, '__name__', None)
if className is None:
className = ', '.join(klass.__name__ for klass in classOrIterable)
return className
def addCleanup(self, function, *arguments, **keywordArguments):
"""Add a cleanup function to be called after tearDown.
Functions added with addCleanup will be called in reverse order of
adding after tearDown, or after setUp if setUp raises an exception.
If a function added with addCleanup raises an exception, the error
will be recorded as a test error, and the next cleanup will then be
run.
Cleanup functions are always called before a test finishes running,
even if setUp is aborted by an exception.
"""
self._cleanups.append((function, arguments, keywordArguments))
def addOnException(self, handler):
"""Add a handler to be called when an exception occurs in test code.
This handler cannot affect what result methods are called, and is
called before any outcome is called on the result object. An example
use for it is to add some diagnostic state to the test details dict
which is expensive to calculate and not interesting for reporting in
the success case.
Handlers are called before the outcome (such as addFailure) that
the exception has caused.
Handlers are called in first-added, first-called order, and if they
raise an exception, that will propogate out of the test running
machinery, halting test processing. As a result, do not call code that
may unreasonably fail.
"""
self.__exception_handlers.append(handler)
def _add_reason(self, reason):
self.addDetail('reason', content.text_content(reason))
def assertEqual(self, expected, observed, message=''):
"""Assert that 'expected' is equal to 'observed'.
:param expected: The expected value.
:param observed: The observed value.
:param message: An optional message to include in the error.
"""
matcher = _FlippedEquals(expected)
self.assertThat(observed, matcher, message)
failUnlessEqual = assertEquals = assertEqual
def assertIn(self, needle, haystack, message=''):
"""Assert that needle is in haystack."""
self.assertThat(haystack, Contains(needle), message)
def assertIsNone(self, observed, message=''):
"""Assert that 'observed' is equal to None.
:param observed: The observed value.
:param message: An optional message describing the error.
"""
matcher = Is(None)
self.assertThat(observed, matcher, message)
def assertIsNotNone(self, observed, message=''):
"""Assert that 'observed' is not equal to None.
:param observed: The observed value.
:param message: An optional message describing the error.
"""
matcher = Not(Is(None))
self.assertThat(observed, matcher, message)
def assertIs(self, expected, observed, message=''):
"""Assert that 'expected' is 'observed'.
:param expected: The expected value.
:param observed: The observed value.
:param message: An optional message describing the error.
"""
matcher = Is(expected)
self.assertThat(observed, matcher, message)
def assertIsNot(self, expected, observed, message=''):
"""Assert that 'expected' is not 'observed'."""
matcher = Not(Is(expected))
self.assertThat(observed, matcher, message)
def assertNotIn(self, needle, haystack, message=''):
"""Assert that needle is not in haystack."""
matcher = Not(Contains(needle))
self.assertThat(haystack, matcher, message)
def assertIsInstance(self, obj, klass, msg=None):
if isinstance(klass, tuple):
matcher = IsInstance(*klass)
else:
matcher = IsInstance(klass)
self.assertThat(obj, matcher, msg)
def assertRaises(self, excClass, callableObj, *args, **kwargs):
"""Fail unless an exception of class excClass is thrown
by callableObj when invoked with arguments args and keyword
arguments kwargs. If a different type of exception is
thrown, it will not be caught, and the test case will be
deemed to have suffered an error, exactly as for an
unexpected exception.
"""
class ReRaiseOtherTypes(object):
def match(self, matchee):
if not issubclass(matchee[0], excClass):
reraise(*matchee)
class CaptureMatchee(object):
def match(self, matchee):
self.matchee = matchee[1]
capture = CaptureMatchee()
matcher = Raises(
MatchesAll(ReRaiseOtherTypes(),
MatchesException(excClass), capture))
our_callable = Nullary(callableObj, *args, **kwargs)
self.assertThat(our_callable, matcher)
return capture.matchee
failUnlessRaises = assertRaises
def assertThat(self, matchee, matcher, message='', verbose=False):
"""Assert that matchee is matched by matcher.
:param matchee: An object to match with matcher.
:param matcher: An object meeting the testtools.Matcher protocol.
:raises MismatchError: When matcher does not match thing.
"""
mismatch_error = self._matchHelper(matchee, matcher, message, verbose)
if mismatch_error is not None:
raise mismatch_error
def addDetailUniqueName(self, name, content_object):
"""Add a detail to the test, but ensure it's name is unique.
This method checks whether ``name`` conflicts with a detail that has
already been added to the test. If it does, it will modify ``name`` to
avoid the conflict.
For more details see pydoc testtools.TestResult.
:param name: The name to give this detail.
:param content_object: The content object for this detail. See
testtools.content for more detail.
"""
existing_details = self.getDetails()
full_name = name
suffix = 1
while full_name in existing_details:
full_name = "%s-%d" % (name, suffix)
suffix += 1
self.addDetail(full_name, content_object)
def expectThat(self, matchee, matcher, message='', verbose=False):
"""Check that matchee is matched by matcher, but delay the assertion failure.
This method behaves similarly to ``assertThat``, except that a failed
match does not exit the test immediately. The rest of the test code
will continue to run, and the test will be marked as failing after the
test has finished.
:param matchee: An object to match with matcher.
:param matcher: An object meeting the testtools.Matcher protocol.
:param message: If specified, show this message with any failed match.
"""
mismatch_error = self._matchHelper(matchee, matcher, message, verbose)
if mismatch_error is not None:
self.addDetailUniqueName(
"Failed expectation",
content.StacktraceContent(
postfix_content="MismatchError: " + str(mismatch_error)
)
)
self.force_failure = True
def _matchHelper(self, matchee, matcher, message, verbose):
matcher = Annotate.if_message(message, matcher)
mismatch = matcher.match(matchee)
if not mismatch:
return
for (name, value) in mismatch.get_details().items():
self.addDetailUniqueName(name, value)
return MismatchError(matchee, matcher, mismatch, verbose)
def defaultTestResult(self):
return TestResult()
def expectFailure(self, reason, predicate, *args, **kwargs):
"""Check that a test fails in a particular way.
If the test fails in the expected way, a KnownFailure is caused. If it
succeeds an UnexpectedSuccess is caused.
The expected use of expectFailure is as a barrier at the point in a
test where the test would fail. For example:
>>> def test_foo(self):
>>> self.expectFailure("1 should be 0", self.assertNotEqual, 1, 0)
>>> self.assertEqual(1, 0)
If in the future 1 were to equal 0, the expectFailure call can simply
be removed. This separation preserves the original intent of the test
while it is in the expectFailure mode.
"""
# TODO: implement with matchers.
self._add_reason(reason)
try:
predicate(*args, **kwargs)
except self.failureException:
# GZ 2010-08-12: Don't know how to avoid exc_info cycle as the new
# unittest _ExpectedFailure wants old traceback
exc_info = sys.exc_info()
try:
self._report_traceback(exc_info)
raise _ExpectedFailure(exc_info)
finally:
del exc_info
else:
raise _UnexpectedSuccess(reason)
def getUniqueInteger(self):
"""Get an integer unique to this test.
Returns an integer that is guaranteed to be unique to this instance.
Use this when you need an arbitrary integer in your test, or as a
helper for custom anonymous factory methods.
"""
return advance_iterator(self._unique_id_gen)
def getUniqueString(self, prefix=None):
"""Get a string unique to this test.
Returns a string that is guaranteed to be unique to this instance. Use
this when you need an arbitrary string in your test, or as a helper
for custom anonymous factory methods.
:param prefix: The prefix of the string. If not provided, defaults
to the id of the tests.
:return: A bytestring of '<prefix>-<unique_int>'.
"""
if prefix is None:
prefix = self.id()
return '%s-%d' % (prefix, self.getUniqueInteger())
def onException(self, exc_info, tb_label='traceback'):
"""Called when an exception propagates from test code.
:seealso addOnException:
"""
if exc_info[0] not in [
self.skipException, _UnexpectedSuccess, _ExpectedFailure]:
self._report_traceback(exc_info, tb_label=tb_label)
for handler in self.__exception_handlers:
handler(exc_info)
@staticmethod
def _report_error(self, result, err):
result.addError(self, details=self.getDetails())
@staticmethod
def _report_expected_failure(self, result, err):
result.addExpectedFailure(self, details=self.getDetails())
@staticmethod
def _report_failure(self, result, err):
result.addFailure(self, details=self.getDetails())
@staticmethod
def _report_skip(self, result, err):
if err.args:
reason = err.args[0]
else:
reason = "no reason given."
self._add_reason(reason)
result.addSkip(self, details=self.getDetails())
def _report_traceback(self, exc_info, tb_label='traceback'):
id_gen = self._traceback_id_gens.setdefault(
tb_label, itertools.count(0))
while True:
tb_id = advance_iterator(id_gen)
if tb_id:
tb_label = '%s-%d' % (tb_label, tb_id)
if tb_label not in self.getDetails():
break
self.addDetail(tb_label, content.TracebackContent(
exc_info, self, capture_locals=getattr(
self, '__testtools_tb_locals__', False)))
@staticmethod
def _report_unexpected_success(self, result, err):
result.addUnexpectedSuccess(self, details=self.getDetails())
def run(self, result=None):
self._reset()
try:
run_test = self.__RunTest(
self, self.exception_handlers, last_resort=self._report_error)
except TypeError:
# Backwards compat: if we can't call the constructor
# with last_resort, try without that.
run_test = self.__RunTest(self, self.exception_handlers)
return run_test.run(result)
def _run_setup(self, result):
"""Run the setUp function for this test.
:param result: A testtools.TestResult to report activity to.
:raises ValueError: If the base class setUp is not called, a
ValueError is raised.
"""
ret = self.setUp()
if not self.__setup_called:
raise ValueError(
"In File: %s\n"
"TestCase.setUp was not called. Have you upcalled all the "
"way up the hierarchy from your setUp? e.g. Call "
"super(%s, self).setUp() from your setUp()."
% (sys.modules[self.__class__.__module__].__file__,
self.__class__.__name__))
return ret
def _run_teardown(self, result):
"""Run the tearDown function for this test.
:param result: A testtools.TestResult to report activity to.
:raises ValueError: If the base class tearDown is not called, a
ValueError is raised.
"""
ret = self.tearDown()
if not self.__teardown_called:
raise ValueError(
"In File: %s\n"
"TestCase.tearDown was not called. Have you upcalled all the "
"way up the hierarchy from your tearDown? e.g. Call "
"super(%s, self).tearDown() from your tearDown()."
% (sys.modules[self.__class__.__module__].__file__,
self.__class__.__name__))
return ret
def _get_test_method(self):
method_name = getattr(self, '_testMethodName')
return getattr(self, method_name)
def _run_test_method(self, result):
"""Run the test method for this test.
:param result: A testtools.TestResult to report activity to.
:return: None.
"""
return self._get_test_method()()
def useFixture(self, fixture):
"""Use fixture in a test case.
The fixture will be setUp, and self.addCleanup(fixture.cleanUp) called.
:param fixture: The fixture to use.
:return: The fixture, after setting it up and scheduling a cleanup for
it.
"""
try:
fixture.setUp()
except:
exc_info = sys.exc_info()
try:
gather_details(fixture.getDetails(), self.getDetails())
except:
# Report the setUp exception, then raise the error during
# gather_details.
self._report_traceback(exc_info)
raise
else:
# Gather_details worked, so raise the exception setUp
# encountered.
reraise(*exc_info)
else:
self.addCleanup(fixture.cleanUp)
self.addCleanup(
gather_details, fixture.getDetails(), self.getDetails())
return fixture
def setUp(self):
super(TestCase, self).setUp()
if self.__setup_called:
raise ValueError(
"In File: %s\n"
"TestCase.setUp was already called. Do not explicitly call "
"setUp from your tests. In your own setUp, use super to call "
"the base setUp."
% (sys.modules[self.__class__.__module__].__file__,))
self.__setup_called = True
def tearDown(self):
super(TestCase, self).tearDown()
if self.__teardown_called:
raise ValueError(
"In File: %s\n"
"TestCase.tearDown was already called. Do not explicitly call "
"tearDown from your tests. In your own tearDown, use super to "
"call the base tearDown."
% (sys.modules[self.__class__.__module__].__file__,))
self.__teardown_called = True
class PlaceHolder(object):
"""A placeholder test.
`PlaceHolder` implements much of the same interface as TestCase and is
particularly suitable for being added to TestResults.
"""
failureException = None
def __init__(self, test_id, short_description=None, details=None,
outcome='addSuccess', error=None, tags=None, timestamps=(None, None)):
"""Construct a `PlaceHolder`.
:param test_id: The id of the placeholder test.
:param short_description: The short description of the place holder
test. If not provided, the id will be used instead.
:param details: Outcome details as accepted by addSuccess etc.
:param outcome: The outcome to call. Defaults to 'addSuccess'.
:param tags: Tags to report for the test.
:param timestamps: A two-tuple of timestamps for the test start and
finish. Each timestamp may be None to indicate it is not known.
"""
self._test_id = test_id
self._short_description = short_description
self._details = details or {}
self._outcome = outcome
if error is not None:
self._details['traceback'] = content.TracebackContent(error, self)
tags = tags or frozenset()
self._tags = frozenset(tags)
self._timestamps = timestamps
def __call__(self, result=None):
return self.run(result=result)
def __repr__(self):
internal = [self._outcome, self._test_id, self._details]
if self._short_description is not None:
internal.append(self._short_description)
return "<%s.%s(%s)>" % (
self.__class__.__module__,
self.__class__.__name__,
", ".join(map(repr, internal)))
def __str__(self):
return self.id()
def countTestCases(self):
return 1
def debug(self):
pass
def id(self):
return self._test_id
def _result(self, result):
if result is None:
return TestResult()
else:
return ExtendedToOriginalDecorator(result)
def run(self, result=None):
result = self._result(result)
if self._timestamps[0] is not None:
result.time(self._timestamps[0])
result.tags(self._tags, set())
result.startTest(self)
if self._timestamps[1] is not None:
result.time(self._timestamps[1])
outcome = getattr(result, self._outcome)
outcome(self, details=self._details)
result.stopTest(self)
result.tags(set(), self._tags)
def shortDescription(self):
if self._short_description is None:
return self.id()
else:
return self._short_description
def ErrorHolder(test_id, error, short_description=None, details=None):
"""Construct an `ErrorHolder`.
:param test_id: The id of the test.
:param error: The exc info tuple that will be used as the test's error.
This is inserted into the details as 'traceback' - any existing key
will be overridden.
:param short_description: An optional short description of the test.
:param details: Outcome details as accepted by addSuccess etc.
"""
return PlaceHolder(
test_id, short_description=short_description,
details=details, outcome='addError', error=error)
def _clone_test_id_callback(test, callback):
"""Copy a `TestCase`, and make it call callback for its id().
This is only expected to be used on tests that have been constructed but
not executed.
:param test: A TestCase instance.
:param callback: A callable that takes no parameters and returns a string.
:return: A copy.copy of the test with id=callback.
"""
newTest = copy.copy(test)
newTest.id = callback
return newTest
def clone_test_with_new_id(test, new_id):
"""Copy a `TestCase`, and give the copied test a new id.
This is only expected to be used on tests that have been constructed but
not executed.
"""
return _clone_test_id_callback(test, lambda: new_id)
def attr(*args):
"""Decorator for adding attributes to WithAttributes.
:param args: The name of attributes to add.
:return: A callable that when applied to a WithAttributes will
alter its id to enumerate the added attributes.
"""
def decorate(fn):
if not safe_hasattr(fn, '__testtools_attrs'):
fn.__testtools_attrs = set()
fn.__testtools_attrs.update(args)
return fn
return decorate
class WithAttributes(object):
"""A mix-in class for modifying test id by attributes.
e.g.
>>> class MyTest(WithAttributes, TestCase):
... @attr('foo')
... def test_bar(self):
... pass
>>> MyTest('test_bar').id()
testtools.testcase.MyTest/test_bar[foo]
"""
def id(self):
orig = super(WithAttributes, self).id()
# Depends on testtools.TestCase._get_test_method, be nice to support
# plain unittest.
fn = self._get_test_method()
attributes = getattr(fn, '__testtools_attrs', None)
if not attributes:
return orig
return orig + '[' + ','.join(sorted(attributes)) + ']'
def skip(reason):
"""A decorator to skip unit tests.
This is just syntactic sugar so users don't have to change any of their
unit tests in order to migrate to python 2.7, which provides the
@unittest.skip decorator.
"""
def decorator(test_item):
# This attribute signals to RunTest._run_core that the entire test
# must be skipped - including setUp and tearDown. This makes us
# compatible with testtools.skip* functions, which set the same
# attributes.
test_item.__unittest_skip__ = True
test_item.__unittest_skip_why__ = reason
if wraps is not None:
@wraps(test_item)
def skip_wrapper(*args, **kwargs):
raise TestCase.skipException(reason)
else:
def skip_wrapper(test_item):
test_item.skip(reason)
return skip_wrapper
return decorator
def skipIf(condition, reason):
"""A decorator to skip a test if the condition is true."""
if condition:
return skip(reason)
def _id(obj):
return obj
return _id
def skipUnless(condition, reason):
"""A decorator to skip a test unless the condition is true."""
if not condition:
return skip(reason)
def _id(obj):
return obj
return _id
class ExpectedException:
"""A context manager to handle expected exceptions.
def test_foo(self):
with ExpectedException(ValueError, 'fo.*'):
raise ValueError('foo')
will pass. If the raised exception has a type other than the specified
type, it will be re-raised. If it has a 'str()' that does not match the
given regular expression, an AssertionError will be raised. If no
exception is raised, an AssertionError will be raised.
"""
def __init__(self, exc_type, value_re=None, msg=None):
"""Construct an `ExpectedException`.
:param exc_type: The type of exception to expect.
:param value_re: A regular expression to match against the
'str()' of the raised exception.
:param msg: An optional message explaining the failure.
"""
self.exc_type = exc_type
self.value_re = value_re
self.msg = msg
def __enter__(self):
pass
def __exit__(self, exc_type, exc_value, traceback):
if exc_type is None:
error_msg = '%s not raised.' % self.exc_type.__name__
if self.msg:
error_msg = error_msg + ' : ' + self.msg
raise AssertionError(error_msg)
if exc_type != self.exc_type:
return False
if self.value_re:
matcher = MatchesException(self.exc_type, self.value_re)
if self.msg:
matcher = Annotate(self.msg, matcher)
mismatch = matcher.match((exc_type, exc_value, traceback))
if mismatch:
raise AssertionError(mismatch.describe())
return True
class Nullary(object):
"""Turn a callable into a nullary callable.
The advantage of this over ``lambda: f(*args, **kwargs)`` is that it
preserves the ``repr()`` of ``f``.
"""
def __init__(self, callable_object, *args, **kwargs):
self._callable_object = callable_object
self._args = args
self._kwargs = kwargs
def __call__(self):
return self._callable_object(*self._args, **self._kwargs)
def __repr__(self):
return repr(self._callable_object)
class DecorateTestCaseResult(object):
"""Decorate a TestCase and permit customisation of the result for runs."""
def __init__(self, case, callout, before_run=None, after_run=None):
"""Construct a DecorateTestCaseResult.
:param case: The case to decorate.
:param callout: A callback to call when run/__call__/debug is called.
Must take a result parameter and return a result object to be used.
For instance: lambda result: result.
:param before_run: If set, call this with the decorated result before
calling into the decorated run/__call__ method.
:param before_run: If set, call this with the decorated result after
calling into the decorated run/__call__ method.
"""
self.decorated = case
self.callout = callout
self.before_run = before_run
self.after_run = after_run
def _run(self, result, run_method):
result = self.callout(result)
if self.before_run:
self.before_run(result)
try:
return run_method(result)
finally:
if self.after_run:
self.after_run(result)
def run(self, result=None):
self._run(result, self.decorated.run)
def __call__(self, result=None):
self._run(result, self.decorated)
def __getattr__(self, name):
return getattr(self.decorated, name)
def __delattr__(self, name):
delattr(self.decorated, name)
def __setattr__(self, name, value):
if name in ('decorated', 'callout', 'before_run', 'after_run'):
self.__dict__[name] = value
return
setattr(self.decorated, name, value)
# Signal that this is part of the testing framework, and that code from this
# should not normally appear in tracebacks.
__unittest = True
| 36.030593 | 85 | 0.639195 |
679a860befd09ad3df4385eb8f082e5c6560686d | 4,499 | py | Python | detectron/lib/python3.6/site-packages/torch/distributed/rpc/backend_registry.py | JustinBear99/Mask_RCNN | d43eaf7c6ebf29d4d6da796a0f7ff5561e21acff | [
"Apache-2.0"
] | null | null | null | detectron/lib/python3.6/site-packages/torch/distributed/rpc/backend_registry.py | JustinBear99/Mask_RCNN | d43eaf7c6ebf29d4d6da796a0f7ff5561e21acff | [
"Apache-2.0"
] | null | null | null | detectron/lib/python3.6/site-packages/torch/distributed/rpc/backend_registry.py | JustinBear99/Mask_RCNN | d43eaf7c6ebf29d4d6da796a0f7ff5561e21acff | [
"Apache-2.0"
] | null | null | null | from __future__ import absolute_import, division, print_function, unicode_literals
import collections
import datetime
import enum
import torch.distributed as dist
import torch.distributed.distributed_c10d as dc10d
from . import constants as rpc_constants
BackendValue = collections.namedtuple(
"BackendValue", ["construct_rpc_backend_options_handler", "init_backend_handler"]
)
def _backend_type_repr(self):
return "BackendType." + self.name
# Create an enum type, `BackendType`, with empty members.
BackendType = enum.Enum(value="BackendType", names={})
BackendType.__repr__ = _backend_type_repr
def register_backend(
backend_name, construct_rpc_backend_options_handler, init_backend_handler
):
"""Registers a new RPC backend.
Arguments:
backend_name (str): backend string to identify the handler.
construct_rpc_backend_options_handler (function):
Handler that is invoked when
rpc_backend.construct_rpc_backend_options(**dict) is called.
init_backend_handler (function): Handler that is invoked when the
`_init_rpc_backend()` function is called with a backend.
This returns the agent.
"""
global BackendType
if backend_name in BackendType.__members__.keys():
raise RuntimeError("RPC backend {}: already registered".format(backend_name))
# Create a new enum type, `BackendType`, with extended members.
existing_enum_dict = {member.name: member.value for member in BackendType}
extended_enum_dict = dict(
{
backend_name: BackendValue(
construct_rpc_backend_options_handler=construct_rpc_backend_options_handler,
init_backend_handler=init_backend_handler,
)
},
**existing_enum_dict
)
BackendType = enum.Enum(value="BackendType", names=extended_enum_dict)
BackendType.__repr__ = _backend_type_repr
return BackendType[backend_name]
def construct_rpc_backend_options(
backend,
rpc_timeout=rpc_constants.DEFAULT_RPC_TIMEOUT,
init_method=rpc_constants.DEFAULT_INIT_METHOD,
**kwargs
):
if not isinstance(rpc_timeout, datetime.timedelta):
raise RuntimeError("`rpc_timeout` must be a `datetime.timedelta`.")
return backend.value.construct_rpc_backend_options_handler(
rpc_timeout, init_method, **kwargs
)
def init_backend(backend, *args, **kwargs):
return backend.value.init_backend_handler(*args, **kwargs)
def _process_group_construct_rpc_backend_options_handler(
rpc_timeout,
init_method,
num_send_recv_threads=rpc_constants.DEFAULT_NUM_SEND_RECV_THREADS,
**kwargs
):
from . import ProcessGroupRpcBackendOptions
return ProcessGroupRpcBackendOptions(
rpc_timeout=rpc_timeout,
init_method=init_method,
num_send_recv_threads=num_send_recv_threads
)
def _process_group_init_backend_handler(
store, name, rank, world_size, rpc_backend_options
):
from . import ProcessGroupAgent
# Initialize ProcessGroup.
if dist.is_initialized():
raise RuntimeError(
"Default process group must not be initialized before init_rpc."
)
process_group_timeout = rpc_constants.DEFAULT_PROCESS_GROUP_TIMEOUT
dist.init_process_group(
backend=dist.Backend.GLOO,
store=store,
rank=rank,
world_size=world_size,
timeout=process_group_timeout,
)
try:
group = dc10d._get_default_group()
assert group is not None, "Failed to initialize default ProcessGroup."
if (rank != -1) and (rank != group.rank()):
raise RuntimeError(
"rank argument {} doesn't match pg rank {}".format(rank, group.rank())
)
if (world_size != -1) and (world_size != group.size()):
raise RuntimeError(
"world_size argument {} doesn't match pg size {}".format(
world_size, group.size()
)
)
# TODO: add try-except and destroy _agent in all processes if any fails.
return ProcessGroupAgent(
name,
group,
rpc_backend_options.num_send_recv_threads,
rpc_backend_options.rpc_timeout,
)
except Exception as ex:
dist.destroy_process_group()
raise ex
register_backend(
"PROCESS_GROUP",
_process_group_construct_rpc_backend_options_handler,
_process_group_init_backend_handler,
)
| 31.027586 | 92 | 0.696377 |
8edaf8d948f9d6ce5ca8745fd0bc7d37137ae1c3 | 1,406 | py | Python | src/demo_predictor.py | bagustris/nkululeko | 87a4918b37e2a8599b81c4752c6750fc8adaa079 | [
"MIT"
] | null | null | null | src/demo_predictor.py | bagustris/nkululeko | 87a4918b37e2a8599b81c4752c6750fc8adaa079 | [
"MIT"
] | null | null | null | src/demo_predictor.py | bagustris/nkululeko | 87a4918b37e2a8599b81c4752c6750fc8adaa079 | [
"MIT"
] | null | null | null | import glob_conf
from util import Util
import numpy as np
class Demo_predictor():
def __init__(self, model, feature_extractor, label_encoder):
"""Constructor setting up name and configuration"""
self.model = model
self.feature_extractor = feature_extractor
self.label_encoder = label_encoder
self.sr = 16000
self.target = glob_conf.config['DATA']['target']
self.util = Util()
def run_demo(self):
import sounddevice as sd
while True:
signal = self.record_audio(3)
# self.play_audio(signal)
features = self.feature_extractor.extract_sample(signal, self.sr)
features = features.to_numpy()
result_dict = self.model.predict_sample(features)
keys = result_dict.keys()
dict_2 = {}
for i, k in enumerate(keys):
ak = np.array(int(k)).reshape(1)
lab = self.label_encoder.inverse_transform(ak)[0]
dict_2[lab] = f'{result_dict[k]:.3f}'
print(dict_2)
def record_audio(self, seconds):
print("recording ...")
y = sd.rec(int(seconds * self.sr), samplerate=self.sr, channels=1)
sd.wait()
y = y.T
return y
def play_audio(self, signal):
print("playback ...")
sd.play(signal.T, self.sr)
status = sd.wait() | 32.697674 | 77 | 0.581081 |
a0748ccfc2f7977bfe24c9a51cc5e11e72b0d502 | 752 | py | Python | adv/veronica.py.3.py | betairylia/dl | 4eb06837b13decc569066a43b58e205f3b716ff8 | [
"Apache-2.0"
] | null | null | null | adv/veronica.py.3.py | betairylia/dl | 4eb06837b13decc569066a43b58e205f3b716ff8 | [
"Apache-2.0"
] | null | null | null | adv/veronica.py.3.py | betairylia/dl | 4eb06837b13decc569066a43b58e205f3b716ff8 | [
"Apache-2.0"
] | null | null | null | import adv_test
import adv
from slot.d import *
def module():
return Veronica
class Veronica(adv.Adv):
comment = ''
a3 = ('prep','100%')
conf = {}
def pre(this):
if this.condition('hp=80%'):
this.s1boost = 1.25*0.2*0.2
else:
this.s1boost = 1.25*0.3*0.3
def init(this):
adv.Teambuff('last',2.28,1).on()
def s1_proc(this, e):
if this.s1boost:
this.dmg_make('o_s1_crisis', this.s1boost*10.84)
if __name__ == '__main__':
conf = {}
conf['acl'] = """
`s1, seq=5 and cancel or fsc
`s2, seq=5 and cancel
`s1, pin == 'prep'
`fs, seq=5 and s1.charged >= 2500
"""
adv_test.test(module(), conf, verbose=0)
| 19.282051 | 60 | 0.521277 |
697d506be361f29069421f53651fd36550d5c9f0 | 2,123 | py | Python | app/database/__init__.py | nyxgear/PSD-e-service-pronto-soccorso | 92eb0586c2cfb12a844a106b71911c80e8e3e57b | [
"MIT"
] | null | null | null | app/database/__init__.py | nyxgear/PSD-e-service-pronto-soccorso | 92eb0586c2cfb12a844a106b71911c80e8e3e57b | [
"MIT"
] | null | null | null | app/database/__init__.py | nyxgear/PSD-e-service-pronto-soccorso | 92eb0586c2cfb12a844a106b71911c80e8e3e57b | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from .user import User
from .pronto_soccorso import ProntoSoccorso
from .pratica_assistenza import PraticaAssistenza
from .richiesta_soccorso import RichiestaSoccorso
def get(entity_class, attribute_name, attribute_value):
"""
Retrieve an instantiated entity from the database
:param entity_class: Class of the entity e.g. db.EntityClass
:param attribute_name: The attribute name on which to query
:param attribute_value: The attribute value to look for
:return:
"""
l = get_list(entity_class, attribute_name, attribute_value)
if len(l) > 0:
return l[0]
return None
def get_list(entity_class, attribute_name=None, attribute_value=None):
"""
Retrieve a list of instantiated entities from the database
:param entity_class: Class of the entity e.g. db.EntityClass
:param attribute_name: The attribute name on which to query
:param attribute_value: The attribute value to look for
:return:
"""
res = []
for x in entity_class._table:
if attribute_name is None and attribute_value is None:
res.append(entity_class(x))
else:
value = x.get(attribute_name)
if str(value) == str(attribute_value):
res.append(entity_class(x))
return res
def save(entity):
"""
Save the entity to the database
:param entity:
:return:
"""
e_id = entity.e_d.get('id')
if e_id:
print('::DB:: Updating {}<{}>'.format(entity.__class__.__name__, e_id))
# the entity already has an id
# search for the entity in the table and update it
for d in entity._table:
if d['id'] == e_id:
d.update(entity.to_dict())
else:
# it's a new entity, let's create a new id
last_entity = entity._table[-1]
new_id = last_entity.get('id') + 1
print('::DB:: Inserting {}<{}>'.format(entity.__class__.__name__, new_id))
entity.e_d['id'] = new_id
entity._table.append(entity.e_d)
| 30.328571 | 83 | 0.623175 |
9fb9db1cc35075c4ca7ca9851af7d272409b822f | 3,595 | py | Python | imposm/writer.py | olt/imposm | 796cc9e5aad6b61b0837a71875c8a2be84f1f92e | [
"Apache-2.0"
] | 36 | 2015-02-04T14:40:40.000Z | 2018-04-13T16:08:42.000Z | imposm/writer.py | Outdooractive/imposm | adb07dbea332307c5900fa355482df1e3073da62 | [
"Apache-2.0"
] | 10 | 2015-01-05T08:30:03.000Z | 2017-03-06T07:37:42.000Z | imposm/writer.py | Outdooractive/imposm | adb07dbea332307c5900fa355482df1e3073da62 | [
"Apache-2.0"
] | 7 | 2015-03-16T07:55:27.000Z | 2017-08-24T14:02:01.000Z | # Copyright 2011 Omniscale (http://omniscale.com)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from multiprocessing import Process, JoinableQueue
from imposm.dbimporter import NodeProcessTuple, WayProcessTuple, RelationProcessTuple
from imposm.dbimporter import NodeProcessDict, WayProcessDict, RelationProcessDict
from imposm.util import create_pool, shutdown_pool
import_processes = {
'tuple': {
'node': NodeProcessTuple,
'way': WayProcessTuple,
'relation': RelationProcessTuple,
},
'dict': {
'node': NodeProcessDict,
'way': WayProcessDict,
'relation': RelationProcessDict,
}
}
class ImposmWriter(object):
def __init__(self, mapping, db, cache, pool_size=2, logger=None, dry_run=False):
self.mapping = mapping
self.db = db
self.mapper = mapping
self.cache = cache
self.pool_size = pool_size
self.logger = logger
self.dry_run = dry_run
def _write_elem(self, proc, elem_cache, log, pool_size, proc_args=[]):
queue = JoinableQueue(16)
importer = lambda: proc(queue, self.db, self.mapper, self.cache, self.dry_run, *proc_args)
pool = create_pool(importer, pool_size)
data = []
for i, elem in enumerate(elem_cache):
if elem.tags:
data.append(elem)
if len(data) >= 128:
queue.put(data)
log.log(i)
data = []
queue.put(data)
shutdown_pool(pool, queue)
log.stop()
self.cache.close_all()
def relations(self):
self.cache.remove_inserted_way_cache()
cache = self.cache.relations_cache()
log = self.logger('relations', len(cache))
inserted_way_queue = JoinableQueue()
way_marker = WayMarkerProcess(inserted_way_queue, self.cache, self.logger)
way_marker.start()
self._write_elem(import_processes[self.db.insert_data_format]['relation'],
cache, log, self.pool_size, [inserted_way_queue])
inserted_way_queue.put(None)
way_marker.join()
def ways(self):
cache = self.cache.ways_cache()
log = self.logger('ways', len(cache))
self._write_elem(import_processes[self.db.insert_data_format]['way'],
cache, log, self.pool_size)
self.cache.remove_inserted_way_cache()
def nodes(self):
cache = self.cache.nodes_cache()
log = self.logger('nodes', len(cache))
self._write_elem(import_processes[self.db.insert_data_format]['node'],
cache, log, self.pool_size)
class WayMarkerProcess(Process):
def __init__(self, queue, cache, logger):
Process.__init__(self)
self.daemon = True
self.queue = queue
self.cache = cache
self.logger = logger
def run(self):
inserted_ways = self.cache.inserted_ways_cache('w')
while True:
osmid = self.queue.get()
if osmid is None:
break
inserted_ways.put(osmid)
inserted_ways.close()
| 32.681818 | 98 | 0.64395 |
258b60379aaa1cbd111d718299e84818d63411ec | 1,148 | py | Python | sa/profiles/Vitesse/VSC/get_ipv6_neighbor.py | xUndero/noc | 9fb34627721149fcf7064860bd63887e38849131 | [
"BSD-3-Clause"
] | 1 | 2019-09-20T09:36:48.000Z | 2019-09-20T09:36:48.000Z | sa/profiles/Vitesse/VSC/get_ipv6_neighbor.py | ewwwcha/noc | aba08dc328296bb0e8e181c2ac9a766e1ec2a0bb | [
"BSD-3-Clause"
] | null | null | null | sa/profiles/Vitesse/VSC/get_ipv6_neighbor.py | ewwwcha/noc | aba08dc328296bb0e8e181c2ac9a766e1ec2a0bb | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
# ---------------------------------------------------------------------
# Vitesse.VSC.get_ipv6_neighbor
# ---------------------------------------------------------------------
# Copyright (C) 2007-2018 The NOC Project
# See LICENSE for details
# ---------------------------------------------------------------------
# Python modules
import re
# NOC modules
from noc.core.script.base import BaseScript
from noc.sa.interfaces.igetipv6neighbor import IGetIPv6Neighbor
class Script(BaseScript):
name = "Vitesse.VSC.get_ipv6_neighbor"
interface = IGetIPv6Neighbor
rx_line = re.compile(
r"^(?P<ip>[0-9a-fA-F:\.]+) via (?P<interface>\S+):\s+" r"(?P<mac>\S+)\s+(?P<state>\S+)\s*$"
)
s_map = {
"INCMP": "incomplete",
"Permanent/REACHABLE": "reachable",
"STALE": "stale",
"DELAY": "delay",
"PROBE": "probe",
}
def execute(self, vrf=None):
# Get states
cmd = "show ipv6 neighbor"
r = self.cli(cmd, list_re=self.rx_line)
# Remap states
for n in r:
n["state"] = self.s_map[n["state"]]
return r
| 28 | 99 | 0.476481 |
ce9d88a4d547cc370ba3e96e37562a3a983972d5 | 2,880 | py | Python | server/website/website/set_default_knobs.py | mjain2/ottertune | 011e896bf89df831fb1189b1ab4c9a7d7dca420a | [
"Apache-2.0"
] | 1 | 2019-08-16T19:35:35.000Z | 2019-08-16T19:35:35.000Z | server/website/website/set_default_knobs.py | mjain2/ottertune | 011e896bf89df831fb1189b1ab4c9a7d7dca420a | [
"Apache-2.0"
] | null | null | null | server/website/website/set_default_knobs.py | mjain2/ottertune | 011e896bf89df831fb1189b1ab4c9a7d7dca420a | [
"Apache-2.0"
] | null | null | null | #
# OtterTune - set_default_knobs.py
#
# Copyright (c) 2017-18, Carnegie Mellon University Database Group
#
from .models import KnobCatalog, SessionKnob
from .types import DBMSType
def turn_knobs_off(session, knob_names):
for knob_name in knob_names:
knob = KnobCatalog.objects.filter(dbms=session.dbms, name=knob_name).first()
SessionKnob.objects.create(session=session,
knob=knob,
minval=knob.minval,
maxval=knob.maxval,
tunable=False)
def set_knob_tuning_range(session, knob_name, minval, maxval):
knob = KnobCatalog.objects.filter(dbms=session.dbms, name=knob_name).first()
SessionKnob.objects.create(session=session,
knob=knob,
minval=minval,
maxval=maxval,
tunable=True)
def set_default_knobs(session):
if session.dbms.type == DBMSType.POSTGRES and session.dbms.version == '9.6':
turn_knobs_off(session, ["global.backend_flush_after", "global.bgwriter_delay",
"global.bgwriter_flush_after", "global.bgwriter_lru_multiplier",
"global.checkpoint_flush_after", "global.commit_delay",
"global.commit_siblings", "global.deadlock_timeout",
"global.effective_io_concurrency", "global.from_collapse_limit",
"global.join_collapse_limit", "global.maintenance_work_mem",
"global.max_worker_processes",
"global.min_parallel_relation_size", "global.min_wal_size",
"global.seq_page_cost", "global.wal_buffers",
"global.wal_sync_method", "global.wal_writer_delay",
"global.wal_writer_flush_after"])
set_knob_tuning_range(session, "global.checkpoint_completion_target", 0.1, 0.9)
set_knob_tuning_range(session, "global.checkpoint_timeout", 60000, 1800000)
set_knob_tuning_range(session, "global.default_statistics_target", 100, 2048)
set_knob_tuning_range(session, "global.effective_cache_size", 4294967296, 17179869184)
set_knob_tuning_range(session, "global.max_parallel_workers_per_gather", 0, 8)
set_knob_tuning_range(session, "global.max_wal_size", 268435456, 17179869184)
set_knob_tuning_range(session, "global.random_page_cost", 1, 10)
set_knob_tuning_range(session, "global.shared_buffers", 134217728, 12884901888)
set_knob_tuning_range(session, "global.temp_buffers", 8388608, 1073741824)
set_knob_tuning_range(session, "global.work_mem", 4194304, 1073741824)
| 54.339623 | 97 | 0.615972 |
0561b55e5c88d4ebd9b051ea7299144967a11df6 | 2,814 | py | Python | tests/conftest.py | chusloj/taxdata | 52c02c5f457145413983762280f8c1552b5ac740 | [
"CC0-1.0"
] | null | null | null | tests/conftest.py | chusloj/taxdata | 52c02c5f457145413983762280f8c1552b5ac740 | [
"CC0-1.0"
] | null | null | null | tests/conftest.py | chusloj/taxdata | 52c02c5f457145413983762280f8c1552b5ac740 | [
"CC0-1.0"
] | null | null | null | import os
import json
import pytest
import pandas as pd
from pathlib import Path
# TODO: revise the following constants when using new or revised CPS/PUF data
CPS_START_YEAR = 2014
PUF_START_YEAR = 2011
PUF_COUNT = 248591
LAST_YEAR = 2029
@pytest.fixture(scope="session")
def test_path():
return Path(__file__).resolve().parent
@pytest.fixture(scope="session")
def cps_path(test_path):
return Path(test_path, "..", "cps_data", "pycps", "cps.csv.gz")
@pytest.fixture(scope="session")
def growfactors(test_path):
gf_path = Path(test_path, "..", "puf_stage1", "growfactors.csv")
return pd.read_csv(gf_path, index_col="YEAR")
@pytest.fixture(scope="session")
def metadata(test_path):
with Path(test_path, "records_metadata.json").open("r") as mdf:
return json.load(mdf)
@pytest.fixture(scope="session")
def cps_benefit_metadata(test_path):
with Path(test_path, "cps_benefits_metadata.json").open("r") as mdf:
return json.load(mdf)
@pytest.fixture(scope="session")
def cps(test_path, cps_path):
return pd.read_csv(cps_path)
@pytest.fixture(scope="session")
def cps_count(test_path, cps_path):
cps_df = pd.read_csv(cps_path)
return cps_df.shape[0]
@pytest.fixture(scope="session")
def cps_start_year():
return CPS_START_YEAR
@pytest.fixture(scope="session")
def puf_path(test_path):
return Path(test_path, "..", "puf_data", "puf.csv")
@pytest.fixture(scope="session")
def puf(puf_path):
if os.path.isfile(puf_path):
return pd.read_csv(puf_path)
else:
return None
@pytest.fixture(scope="session")
def puf_count(puf_path):
if os.path.isfile(puf_path):
puf_df = pd.read_csv(puf_path)
count = puf_df.shape[0]
if count != PUF_COUNT:
msg = "puf.shape[0] = {} not equal to PUF_COUNT = {}"
raise ValueError(msg.format(count, PUF_COUNT))
else:
count = PUF_COUNT
return count
@pytest.fixture(scope="session")
def puf_start_year():
return PUF_START_YEAR
@pytest.fixture(scope="session")
def last_year():
return LAST_YEAR
@pytest.fixture(scope="session")
def cps_weights(test_path):
cpsw_path = Path(test_path, "..", "cps_stage2", "cps_weights.csv.gz")
return pd.read_csv(cpsw_path)
@pytest.fixture(scope="session")
def puf_weights(test_path):
pufw_path = Path(test_path, "..", "puf_stage2", "puf_weights.csv.gz")
return pd.read_csv(pufw_path)
@pytest.fixture(scope="session")
def cps_ratios(test_path):
# cpsr_path = os.path.join(test_path, '../cps_stage3/cps_ratios.csv')
# return pd.read_csv(cpsr_path, index_col=0)
return None
@pytest.fixture(scope="session")
def puf_ratios(test_path):
pufr_path = Path(test_path, "..", "puf_stage3", "puf_ratios.csv")
return pd.read_csv(pufr_path, index_col=0)
| 23.847458 | 77 | 0.697939 |
24cc6bbe9c2ba4244015f3a01d338c4f2ba0c2dd | 663 | py | Python | manage.py | sd2001/FuzzyUrls | eb07258ab0fd3be2675521513c00e787f85f3729 | [
"MIT"
] | 6 | 2021-03-01T13:44:01.000Z | 2021-06-13T18:00:52.000Z | manage.py | sd2001/FuzzyUrls | eb07258ab0fd3be2675521513c00e787f85f3729 | [
"MIT"
] | null | null | null | manage.py | sd2001/FuzzyUrls | eb07258ab0fd3be2675521513c00e787f85f3729 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'Urlshort.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| 30.136364 | 73 | 0.680241 |
06545d644ce38ea78f7648f63247f1fd330fd953 | 2,771 | py | Python | koans/about_string_manipulation.py | gtl-keyur-lakhlani/koans-playground | 210b6cb7b34bea0669f4109664dbcbe935f63d74 | [
"MIT"
] | null | null | null | koans/about_string_manipulation.py | gtl-keyur-lakhlani/koans-playground | 210b6cb7b34bea0669f4109664dbcbe935f63d74 | [
"MIT"
] | null | null | null | koans/about_string_manipulation.py | gtl-keyur-lakhlani/koans-playground | 210b6cb7b34bea0669f4109664dbcbe935f63d74 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from runner.koan import *
class AboutStringManipulation(Koan):
def test_use_format_to_interpolate_variables(self):
value1 = 'one'
value2 = 2
string = "The values are {0} and {1}".format(value1, value2)
self.assertEqual("The values are one and 2", string)
def test_formatted_values_can_be_shown_in_any_order_or_be_repeated(self):
value1 = 'doh'
value2 = 'DOH'
string = "The values are {1}, {0}, {0} and {1}!".format(value1, value2)
self.assertEqual("The values are DOH, doh, doh and DOH!", string)
def test_any_python_expression_may_be_interpolated(self):
import math # import a standard python module with math functions
decimal_places = 4
string = "The square root of 5 is {0:.{1}f}".format(math.sqrt(5),
decimal_places)
self.assertEqual("The square root of 5 is 2.2361", string)
def test_you_can_get_a_substring_from_a_string(self):
string = "Bacon, lettuce and tomato"
self.assertEqual("let", string[7:10])
def test_you_can_get_a_single_character_from_a_string(self):
string = "Bacon, lettuce and tomato"
self.assertEqual("a", string[1])
def test_single_characters_can_be_represented_by_integers(self):
self.assertEqual(97, ord('a'))
self.assertEqual(True, ord('b') == (ord('a') + 1))
def test_strings_can_be_split(self):
string = "Sausage Egg Cheese"
words = string.split()
self.assertListEqual(['Sausage', 'Egg', 'Cheese'], words)
def test_strings_can_be_split_with_different_patterns(self):
import re #import python regular expression library
string = "the,rain;in,spain"
pattern = re.compile(',|;')
words = pattern.split(string)
self.assertListEqual(['the', 'rain', 'in', 'spain'], words)
# Pattern is a Python regular expression pattern which matches ',' or ';'
def test_raw_strings_do_not_interpret_escape_characters(self):
string = r'\n'
self.assertNotEqual('\n', string)
self.assertEqual('\\n', string)
self.assertEqual(2, len(string))
# Useful in regular expressions, file paths, URLs, etc.
def test_strings_can_be_joined(self):
words = ["Now", "is", "the", "time"]
self.assertEqual("Now is the time", ' '.join(words))
def test_strings_can_change_case(self):
self.assertEqual("Guido", 'guido'.capitalize())
self.assertEqual("GUIDO", 'guido'.upper())
self.assertEqual("timbot", 'TimBot'.lower())
self.assertEqual("Guido Van Rossum", 'guido van rossum'.title())
self.assertEqual("tOtAlLy AwEsOmE", 'ToTaLlY aWeSoMe'.swapcase())
| 36.946667 | 81 | 0.648502 |
46e04ef9dc3d1268d1b46b11984d60998ed7d10a | 47 | py | Python | pytype/__version__.py | softagram/pytype | 9e720fbb08f517e27c6902e278b32af00e84d80e | [
"Apache-2.0"
] | null | null | null | pytype/__version__.py | softagram/pytype | 9e720fbb08f517e27c6902e278b32af00e84d80e | [
"Apache-2.0"
] | null | null | null | pytype/__version__.py | softagram/pytype | 9e720fbb08f517e27c6902e278b32af00e84d80e | [
"Apache-2.0"
] | null | null | null | # pylint: skip-file
__version__ = '2018.11.06'
| 15.666667 | 26 | 0.702128 |
c486cfb4bfbcd572d5370fff2b9c0dbd2c0c657e | 5,561 | py | Python | construct/debug.py | nrrpinto/construct | cfc980c6edfbe33c56015b736f59fb3155b51317 | [
"MIT"
] | 629 | 2015-01-06T03:01:56.000Z | 2022-03-23T13:13:26.000Z | construct/debug.py | nrrpinto/construct | cfc980c6edfbe33c56015b736f59fb3155b51317 | [
"MIT"
] | 897 | 2015-02-28T15:46:06.000Z | 2022-03-30T08:19:13.000Z | construct/debug.py | nrrpinto/construct | cfc980c6edfbe33c56015b736f59fb3155b51317 | [
"MIT"
] | 151 | 2015-01-08T16:36:24.000Z | 2022-03-10T16:59:49.000Z | from construct import *
from construct.lib import *
import sys, traceback, pdb, inspect
class Probe(Construct):
r"""
Probe that dumps the context, and some stream content (peeks into it) to the screen to aid the debugging process. It can optionally limit itself to a single context entry, instead of printing entire context.
:param into: optional, None by default, or context lambda
:param lookahead: optional, integer, number of bytes to dump from the stream
Example::
>>> d = Struct(
... "count" / Byte,
... "items" / Byte[this.count],
... Probe(lookahead=32),
... )
>>> d.parse(b"\x05abcde\x01\x02\x03")
--------------------------------------------------
Probe, path is (parsing), into is None
Stream peek: (hexlified) b'010203'...
Container:
count = 5
items = ListContainer:
97
98
99
100
101
--------------------------------------------------
::
>>> d = Struct(
... "count" / Byte,
... "items" / Byte[this.count],
... Probe(this.count),
... )
>>> d.parse(b"\x05abcde\x01\x02\x03")
--------------------------------------------------
Probe, path is (parsing), into is this.count
5
--------------------------------------------------
"""
def __init__(self, into=None, lookahead=None):
super(Probe, self).__init__()
self.flagbuildnone = True
self.into = into
self.lookahead = lookahead
def _parse(self, stream, context, path):
self.printout(stream, context, path)
def _build(self, obj, stream, context, path):
self.printout(stream, context, path)
def _sizeof(self, context, path):
self.printout(None, context, path)
return 0
def _emitparse(self, code):
return f"print({self.into})" if self.into else "print(this)"
def _emitbuild(self, code):
return f"print({self.into})" if self.into else "print(this)"
def printout(self, stream, context, path):
print("--------------------------------------------------")
print("Probe, path is %s, into is %r" % (path, self.into, ))
if self.lookahead and stream is not None:
fallback = stream.tell()
datafollows = stream.read(self.lookahead)
stream.seek(fallback)
if datafollows:
print("Stream peek: (hexlified) %s..." % (hexlify(datafollows), ))
else:
print("Stream peek: EOF reached")
if context is not None:
if self.into:
try:
subcontext = self.into(context)
print(subcontext)
except Exception:
print("Failed to compute %r on the context %r" % (self.into, context, ))
else:
print(context)
print("--------------------------------------------------")
class Debugger(Subconstruct):
r"""
PDB-based debugger. When an exception occurs in the subcon, a debugger will appear and allow you to debug the error (and even fix it on-the-fly).
:param subcon: Construct instance, subcon to debug
Example::
>>> Debugger(Byte[3]).build([])
--------------------------------------------------
Debugging exception of <Array: None>
path is (building)
File "/media/arkadiusz/MAIN/GitHub/construct/construct/debug.py", line 192, in _build
return self.subcon._build(obj, stream, context, path)
File "/media/arkadiusz/MAIN/GitHub/construct/construct/core.py", line 2149, in _build
raise RangeError("expected %d elements, found %d" % (count, len(obj)))
construct.core.RangeError: expected 3 elements, found 0
> /media/arkadiusz/MAIN/GitHub/construct/construct/core.py(2149)_build()
-> raise RangeError("expected %d elements, found %d" % (count, len(obj)))
(Pdb) q
--------------------------------------------------
"""
def _parse(self, stream, context, path):
try:
return self.subcon._parse(stream, context, path)
except Exception:
self.retval = NotImplemented
self.handle_exc(path, msg="(you can set self.retval, which will be returned from method)")
if self.retval is NotImplemented:
raise
else:
return self.retval
def _build(self, obj, stream, context, path):
try:
return self.subcon._build(obj, stream, context, path)
except Exception:
self.handle_exc(path)
def _sizeof(self, context, path):
try:
return self.subcon._sizeof(context, path)
except Exception:
self.handle_exc(path)
def _emitparse(self, code):
return self.subcon._compileparse(code)
def _emitbuild(self, code):
return self.subcon._compilebuild(code)
def handle_exc(self, path, msg=None):
print("--------------------------------------------------")
print("Debugging exception of %r" % (self.subcon, ))
print("path is %s" % (path, ))
print("".join(traceback.format_exception(*sys.exc_info())[1:]))
if msg:
print(msg)
pdb.post_mortem(sys.exc_info()[2])
print("--------------------------------------------------")
| 34.540373 | 211 | 0.5107 |
df80aa54001c7d0bee7ee535d71832d8ed9ec2dd | 3,167 | py | Python | userbot/modules/spam.py | Adityar267/Man-Userbot | b3b99312f7f399044815aac1a96c81dd20ced335 | [
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null | userbot/modules/spam.py | Adityar267/Man-Userbot | b3b99312f7f399044815aac1a96c81dd20ced335 | [
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null | userbot/modules/spam.py | Adityar267/Man-Userbot | b3b99312f7f399044815aac1a96c81dd20ced335 | [
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null | # Copyright (C) 2019 The Raphielscape Company LLC.
#
# Licensed under the Raphielscape Public License, Version 1.d (the "License");
# you may not use this file except in compliance with the License.
import asyncio
from asyncio import sleep
from userbot import BOTLOG_CHATID
from userbot import CMD_HANDLER as cmd
from userbot import CMD_HELP
from userbot.utils import man_cmd
@man_cmd(pattern="cspam (.+)")
async def tmeme(e):
cspam = str(e.pattern_match.group(1))
message = cspam.replace(" ", "")
await e.delete()
for letter in message:
await e.respond(letter)
if BOTLOG_CHATID:
await e.client.send_message(
BOTLOG_CHATID, "#CSPAM\n" "TSpam was executed successfully"
)
@man_cmd(pattern="wspam (.+)")
async def t_meme(e):
wspam = str(e.pattern_match.group(1))
message = wspam.split()
await e.delete()
for word in message:
await e.respond(word)
if BOTLOG_CHATID:
await e.client.send_message(
BOTLOG_CHATID, "#WSPAM\n" "WSpam was executed successfully"
)
@man_cmd(pattern="spam (\d+) (.+)")
async def spammer(e):
counter = int(e.pattern_match.group(1))
spam_message = str(e.pattern_match.group(2))
await e.delete()
await asyncio.wait([e.respond(spam_message) for i in range(counter)])
if BOTLOG_CHATID:
await e.client.send_message(
BOTLOG_CHATID, "#SPAM\n" "Spam was executed successfully"
)
@man_cmd(pattern="picspam (\d+) (.+)")
async def tiny_pic_spam(e):
counter = int(e.pattern_match.group(1))
link = str(e.pattern_match.group(2))
await e.delete()
for _ in range(1, counter):
await e.client.send_file(e.chat_id, link)
if BOTLOG_CHATID:
await e.client.send_message(
BOTLOG_CHATID, "#PICSPAM\n" "PicSpam was executed successfully"
)
@man_cmd(pattern="delayspam (.*)")
async def spammer(e):
spamDelay = float(e.pattern_match.group(1).split(" ", 2)[0])
counter = int(e.pattern_match.group(1).split(" ", 2)[1])
spam_message = str(e.pattern_match.group(1).split(" ", 2)[2])
await e.delete()
for _ in range(1, counter):
await e.respond(spam_message)
await sleep(spamDelay)
if BOTLOG_CHATID:
await e.client.send_message(
BOTLOG_CHATID, "#DelaySPAM\n" "DelaySpam was executed successfully"
)
CMD_HELP.update(
{
"spam": f"**Plugin : **`spam`\
\n\n • **Syntax :** `{cmd}spam` <jumlah spam> <text>\
\n • **Function : **Membanjiri teks dalam obrolan!! \
\n\n • **Syntax :** `{cmd}cspam` <text>\
\n • **Function : **Spam surat teks dengan huruf. \
\n\n • **Syntax :** `{cmd}wspam` <text>\
\n • **Function : **Spam kata teks demi kata. \
\n\n • **Syntax :** `{cmd}picspam` <jumlah spam> <link image/gif>\
\n • **Function : **Spam Foto Seolah-olah spam teks tidak cukup !! \
\n\n • **Syntax :** `{cmd}delayspam` <detik> <jumlah spam> <text>\
\n • **Function : **Spam surat teks dengan huruf. \
\n\n • **NOTE : Spam dengan Risiko Anda sendiri**\
"
}
)
| 32.316327 | 79 | 0.614462 |
38aa894c10cda1feb523eafe29ecf625bac81a73 | 25,240 | py | Python | tests/unit/sagemaker/model/test_model.py | guoqiaoli1992/sagemaker-python-sdk | 24bdec115a364f39b1ba405e3047c5dda600db66 | [
"Apache-2.0"
] | null | null | null | tests/unit/sagemaker/model/test_model.py | guoqiaoli1992/sagemaker-python-sdk | 24bdec115a364f39b1ba405e3047c5dda600db66 | [
"Apache-2.0"
] | null | null | null | tests/unit/sagemaker/model/test_model.py | guoqiaoli1992/sagemaker-python-sdk | 24bdec115a364f39b1ba405e3047c5dda600db66 | [
"Apache-2.0"
] | null | null | null | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from __future__ import absolute_import
from unittest.mock import MagicMock
import pytest
from mock import Mock, patch
import sagemaker
from sagemaker.model import FrameworkModel, Model
from sagemaker.huggingface.model import HuggingFaceModel
from sagemaker.jumpstart.constants import JUMPSTART_BUCKET_NAME_SET, JUMPSTART_RESOURCE_BASE_NAME
from sagemaker.jumpstart.enums import JumpStartTag
from sagemaker.mxnet.model import MXNetModel
from sagemaker.pytorch.model import PyTorchModel
from sagemaker.sklearn.model import SKLearnModel
from sagemaker.tensorflow.model import TensorFlowModel
from sagemaker.xgboost.model import XGBoostModel
from sagemaker.workflow.properties import Properties
MODEL_DATA = "s3://bucket/model.tar.gz"
MODEL_IMAGE = "mi"
TIMESTAMP = "2017-10-10-14-14-15"
MODEL_NAME = "{}-{}".format(MODEL_IMAGE, TIMESTAMP)
INSTANCE_COUNT = 2
INSTANCE_TYPE = "ml.c4.4xlarge"
ROLE = "some-role"
REGION = "us-west-2"
BUCKET_NAME = "some-bucket-name"
GIT_REPO = "https://github.com/aws/sagemaker-python-sdk.git"
BRANCH = "test-branch-git-config"
COMMIT = "ae15c9d7d5b97ea95ea451e4662ee43da3401d73"
ENTRY_POINT_INFERENCE = "inference.py"
SCRIPT_URI = "s3://codebucket/someprefix/sourcedir.tar.gz"
IMAGE_URI = "763104351884.dkr.ecr.us-west-2.amazonaws.com/pytorch-inference:1.9.0-gpu-py38"
class DummyFrameworkModel(FrameworkModel):
def __init__(self, **kwargs):
super(DummyFrameworkModel, self).__init__(
**kwargs,
)
@pytest.fixture()
def sagemaker_session():
boto_mock = Mock(name="boto_session", region_name=REGION)
sms = MagicMock(
name="sagemaker_session",
boto_session=boto_mock,
boto_region_name=REGION,
config=None,
local_mode=False,
s3_client=None,
s3_resource=None,
)
sms.default_bucket = Mock(name="default_bucket", return_value=BUCKET_NAME)
return sms
@patch("shutil.rmtree", MagicMock())
@patch("tarfile.open", MagicMock())
@patch("os.listdir", MagicMock(return_value=[ENTRY_POINT_INFERENCE]))
def test_prepare_container_def_with_model_src_s3_returns_correct_url(sagemaker_session):
model = Model(
entry_point=ENTRY_POINT_INFERENCE,
role=ROLE,
sagemaker_session=sagemaker_session,
source_dir=SCRIPT_URI,
image_uri=MODEL_IMAGE,
model_data=Properties("Steps.MyStep"),
)
container_def = model.prepare_container_def(INSTANCE_TYPE, "ml.eia.medium")
assert container_def["Environment"]["SAGEMAKER_SUBMIT_DIRECTORY"] == SCRIPT_URI
def test_prepare_container_def_with_model_data():
model = Model(MODEL_IMAGE)
container_def = model.prepare_container_def(INSTANCE_TYPE, "ml.eia.medium")
expected = {"Image": MODEL_IMAGE, "Environment": {}}
assert expected == container_def
def test_prepare_container_def_with_model_data_and_env():
env = {"FOO": "BAR"}
model = Model(MODEL_IMAGE, MODEL_DATA, env=env)
expected = {"Image": MODEL_IMAGE, "Environment": env, "ModelDataUrl": MODEL_DATA}
container_def = model.prepare_container_def(INSTANCE_TYPE, "ml.eia.medium")
assert expected == container_def
container_def = model.prepare_container_def()
assert expected == container_def
def test_prepare_container_def_with_image_config():
image_config = {"RepositoryAccessMode": "Vpc"}
model = Model(MODEL_IMAGE, image_config=image_config)
expected = {
"Image": MODEL_IMAGE,
"ImageConfig": {"RepositoryAccessMode": "Vpc"},
"Environment": {},
}
container_def = model.prepare_container_def()
assert expected == container_def
def test_model_enable_network_isolation():
model = Model(MODEL_IMAGE, MODEL_DATA)
assert model.enable_network_isolation() is False
model = Model(MODEL_IMAGE, MODEL_DATA, enable_network_isolation=True)
assert model.enable_network_isolation()
@patch("sagemaker.model.Model.prepare_container_def")
def test_create_sagemaker_model(prepare_container_def, sagemaker_session):
container_def = {"Image": MODEL_IMAGE, "Environment": {}, "ModelDataUrl": MODEL_DATA}
prepare_container_def.return_value = container_def
model = Model(MODEL_DATA, MODEL_IMAGE, name=MODEL_NAME, sagemaker_session=sagemaker_session)
model._create_sagemaker_model()
prepare_container_def.assert_called_with(
None, accelerator_type=None, serverless_inference_config=None
)
sagemaker_session.create_model.assert_called_with(
MODEL_NAME, None, container_def, vpc_config=None, enable_network_isolation=False, tags=None
)
@patch("sagemaker.model.Model.prepare_container_def")
def test_create_sagemaker_model_instance_type(prepare_container_def, sagemaker_session):
model = Model(MODEL_DATA, MODEL_IMAGE, name=MODEL_NAME, sagemaker_session=sagemaker_session)
model._create_sagemaker_model(INSTANCE_TYPE)
prepare_container_def.assert_called_with(
INSTANCE_TYPE, accelerator_type=None, serverless_inference_config=None
)
@patch("sagemaker.model.Model.prepare_container_def")
def test_create_sagemaker_model_accelerator_type(prepare_container_def, sagemaker_session):
model = Model(MODEL_IMAGE, MODEL_DATA, name=MODEL_NAME, sagemaker_session=sagemaker_session)
accelerator_type = "ml.eia.medium"
model._create_sagemaker_model(INSTANCE_TYPE, accelerator_type=accelerator_type)
prepare_container_def.assert_called_with(
INSTANCE_TYPE, accelerator_type=accelerator_type, serverless_inference_config=None
)
@patch("sagemaker.model.Model.prepare_container_def")
def test_create_sagemaker_model_tags(prepare_container_def, sagemaker_session):
container_def = {"Image": MODEL_IMAGE, "Environment": {}, "ModelDataUrl": MODEL_DATA}
prepare_container_def.return_value = container_def
model = Model(MODEL_IMAGE, MODEL_DATA, name=MODEL_NAME, sagemaker_session=sagemaker_session)
tags = {"Key": "foo", "Value": "bar"}
model._create_sagemaker_model(INSTANCE_TYPE, tags=tags)
sagemaker_session.create_model.assert_called_with(
MODEL_NAME, None, container_def, vpc_config=None, enable_network_isolation=False, tags=tags
)
@patch("sagemaker.model.Model.prepare_container_def")
@patch("sagemaker.utils.name_from_base")
@patch("sagemaker.utils.base_name_from_image")
def test_create_sagemaker_model_optional_model_params(
base_name_from_image, name_from_base, prepare_container_def, sagemaker_session
):
container_def = {"Image": MODEL_IMAGE, "Environment": {}, "ModelDataUrl": MODEL_DATA}
prepare_container_def.return_value = container_def
vpc_config = {"Subnets": ["123"], "SecurityGroupIds": ["456", "789"]}
model = Model(
MODEL_IMAGE,
MODEL_DATA,
name=MODEL_NAME,
role=ROLE,
vpc_config=vpc_config,
enable_network_isolation=True,
sagemaker_session=sagemaker_session,
)
model._create_sagemaker_model(INSTANCE_TYPE)
base_name_from_image.assert_not_called()
name_from_base.assert_not_called()
sagemaker_session.create_model.assert_called_with(
MODEL_NAME,
ROLE,
container_def,
vpc_config=vpc_config,
enable_network_isolation=True,
tags=None,
)
@patch("sagemaker.model.Model.prepare_container_def")
@patch("sagemaker.utils.name_from_base", return_value=MODEL_NAME)
@patch("sagemaker.utils.base_name_from_image")
def test_create_sagemaker_model_generates_model_name(
base_name_from_image, name_from_base, prepare_container_def, sagemaker_session
):
container_def = {"Image": MODEL_IMAGE, "Environment": {}, "ModelDataUrl": MODEL_DATA}
prepare_container_def.return_value = container_def
model = Model(
MODEL_IMAGE,
MODEL_DATA,
sagemaker_session=sagemaker_session,
)
model._create_sagemaker_model(INSTANCE_TYPE)
base_name_from_image.assert_called_with(MODEL_IMAGE)
name_from_base.assert_called_with(base_name_from_image.return_value)
sagemaker_session.create_model.assert_called_with(
MODEL_NAME,
None,
container_def,
vpc_config=None,
enable_network_isolation=False,
tags=None,
)
@patch("sagemaker.model.Model.prepare_container_def")
@patch("sagemaker.utils.name_from_base", return_value=MODEL_NAME)
@patch("sagemaker.utils.base_name_from_image")
def test_create_sagemaker_model_generates_model_name_each_time(
base_name_from_image, name_from_base, prepare_container_def, sagemaker_session
):
container_def = {"Image": MODEL_IMAGE, "Environment": {}, "ModelDataUrl": MODEL_DATA}
prepare_container_def.return_value = container_def
model = Model(
MODEL_IMAGE,
MODEL_DATA,
sagemaker_session=sagemaker_session,
)
model._create_sagemaker_model(INSTANCE_TYPE)
model._create_sagemaker_model(INSTANCE_TYPE)
base_name_from_image.assert_called_once_with(MODEL_IMAGE)
name_from_base.assert_called_with(base_name_from_image.return_value)
assert 2 == name_from_base.call_count
@patch("sagemaker.session.Session")
@patch("sagemaker.local.LocalSession")
def test_create_sagemaker_model_creates_correct_session(local_session, session):
model = Model(MODEL_IMAGE, MODEL_DATA)
model._create_sagemaker_model("local")
assert model.sagemaker_session == local_session.return_value
model = Model(MODEL_IMAGE, MODEL_DATA)
model._create_sagemaker_model("ml.m5.xlarge")
assert model.sagemaker_session == session.return_value
@patch("sagemaker.model.Model._create_sagemaker_model")
def test_model_create_transformer(create_sagemaker_model, sagemaker_session):
model_name = "auto-generated-model"
model = Model(MODEL_IMAGE, MODEL_DATA, name=model_name, sagemaker_session=sagemaker_session)
instance_type = "ml.m4.xlarge"
transformer = model.transformer(instance_count=1, instance_type=instance_type)
create_sagemaker_model.assert_called_with(instance_type, tags=None)
assert isinstance(transformer, sagemaker.transformer.Transformer)
assert transformer.model_name == model_name
assert transformer.instance_type == instance_type
assert transformer.instance_count == 1
assert transformer.sagemaker_session == sagemaker_session
assert transformer.base_transform_job_name == model_name
assert transformer.strategy is None
assert transformer.env is None
assert transformer.output_path is None
assert transformer.output_kms_key is None
assert transformer.accept is None
assert transformer.assemble_with is None
assert transformer.volume_kms_key is None
assert transformer.max_concurrent_transforms is None
assert transformer.max_payload is None
assert transformer.tags is None
@patch("sagemaker.model.Model._create_sagemaker_model")
def test_model_create_transformer_optional_params(create_sagemaker_model, sagemaker_session):
model = Model(MODEL_IMAGE, MODEL_DATA, sagemaker_session=sagemaker_session)
instance_type = "ml.m4.xlarge"
strategy = "MultiRecord"
assemble_with = "Line"
output_path = "s3://bucket/path"
kms_key = "key"
accept = "text/csv"
env = {"test": True}
max_concurrent_transforms = 1
max_payload = 6
tags = [{"Key": "k", "Value": "v"}]
transformer = model.transformer(
instance_count=1,
instance_type=instance_type,
strategy=strategy,
assemble_with=assemble_with,
output_path=output_path,
output_kms_key=kms_key,
accept=accept,
env=env,
max_concurrent_transforms=max_concurrent_transforms,
max_payload=max_payload,
tags=tags,
volume_kms_key=kms_key,
)
create_sagemaker_model.assert_called_with(instance_type, tags=tags)
assert isinstance(transformer, sagemaker.transformer.Transformer)
assert transformer.strategy == strategy
assert transformer.assemble_with == assemble_with
assert transformer.output_path == output_path
assert transformer.output_kms_key == kms_key
assert transformer.accept == accept
assert transformer.max_concurrent_transforms == max_concurrent_transforms
assert transformer.max_payload == max_payload
assert transformer.env == env
assert transformer.tags == tags
assert transformer.volume_kms_key == kms_key
@patch("sagemaker.model.Model._create_sagemaker_model", Mock())
def test_model_create_transformer_network_isolation(sagemaker_session):
model = Model(
MODEL_IMAGE, MODEL_DATA, sagemaker_session=sagemaker_session, enable_network_isolation=True
)
transformer = model.transformer(1, "ml.m4.xlarge", env={"should_be": "overwritten"})
assert transformer.env is None
@patch("sagemaker.model.Model._create_sagemaker_model", Mock())
def test_model_create_transformer_base_name(sagemaker_session):
model = Model(MODEL_IMAGE, MODEL_DATA, sagemaker_session=sagemaker_session)
base_name = "foo"
model._base_name = base_name
transformer = model.transformer(1, "ml.m4.xlarge")
assert base_name == transformer.base_transform_job_name
@patch("sagemaker.session.Session")
@patch("sagemaker.local.LocalSession")
def test_transformer_creates_correct_session(local_session, session):
model = Model(MODEL_IMAGE, MODEL_DATA, sagemaker_session=None)
transformer = model.transformer(instance_count=1, instance_type="local")
assert model.sagemaker_session == local_session.return_value
assert transformer.sagemaker_session == local_session.return_value
model = Model(MODEL_IMAGE, MODEL_DATA, sagemaker_session=None)
transformer = model.transformer(instance_count=1, instance_type="ml.m5.xlarge")
assert model.sagemaker_session == session.return_value
assert transformer.sagemaker_session == session.return_value
def test_delete_model(sagemaker_session):
model = Model(MODEL_IMAGE, MODEL_DATA, name=MODEL_NAME, sagemaker_session=sagemaker_session)
model.delete_model()
sagemaker_session.delete_model.assert_called_with(model.name)
def test_delete_model_no_name(sagemaker_session):
model = Model(MODEL_IMAGE, MODEL_DATA, sagemaker_session=sagemaker_session)
with pytest.raises(
ValueError, match="The SageMaker model must be created first before attempting to delete."
):
model.delete_model()
sagemaker_session.delete_model.assert_not_called()
@patch("time.strftime", MagicMock(return_value=TIMESTAMP))
@patch("sagemaker.utils.repack_model")
def test_script_mode_model_same_calls_as_framework(repack_model, sagemaker_session):
t = Model(
entry_point=ENTRY_POINT_INFERENCE,
role=ROLE,
sagemaker_session=sagemaker_session,
source_dir=SCRIPT_URI,
image_uri=IMAGE_URI,
model_data=MODEL_DATA,
)
t.deploy(instance_type=INSTANCE_TYPE, initial_instance_count=INSTANCE_COUNT)
assert len(sagemaker_session.create_model.call_args_list) == 1
assert len(sagemaker_session.endpoint_from_production_variants.call_args_list) == 1
assert len(repack_model.call_args_list) == 1
generic_model_create_model_args = sagemaker_session.create_model.call_args_list
generic_model_endpoint_from_production_variants_args = (
sagemaker_session.endpoint_from_production_variants.call_args_list
)
generic_model_repack_model_args = repack_model.call_args_list
sagemaker_session.create_model.reset_mock()
sagemaker_session.endpoint_from_production_variants.reset_mock()
repack_model.reset_mock()
t = DummyFrameworkModel(
entry_point=ENTRY_POINT_INFERENCE,
role=ROLE,
sagemaker_session=sagemaker_session,
source_dir=SCRIPT_URI,
image_uri=IMAGE_URI,
model_data=MODEL_DATA,
)
t.deploy(instance_type=INSTANCE_TYPE, initial_instance_count=INSTANCE_COUNT)
assert generic_model_create_model_args == sagemaker_session.create_model.call_args_list
assert (
generic_model_endpoint_from_production_variants_args
== sagemaker_session.endpoint_from_production_variants.call_args_list
)
assert generic_model_repack_model_args == repack_model.call_args_list
@patch("sagemaker.git_utils.git_clone_repo")
@patch("sagemaker.model.fw_utils.tar_and_upload_dir")
def test_git_support_succeed_model_class(tar_and_upload_dir, git_clone_repo, sagemaker_session):
git_clone_repo.side_effect = lambda gitconfig, entrypoint, sourcedir, dependency: {
"entry_point": "entry_point",
"source_dir": "/tmp/repo_dir/source_dir",
"dependencies": ["/tmp/repo_dir/foo", "/tmp/repo_dir/bar"],
}
entry_point = "entry_point"
source_dir = "source_dir"
dependencies = ["foo", "bar"]
git_config = {"repo": GIT_REPO, "branch": BRANCH, "commit": COMMIT}
model = Model(
sagemaker_session=sagemaker_session,
entry_point=entry_point,
source_dir=source_dir,
dependencies=dependencies,
git_config=git_config,
image_uri=IMAGE_URI,
)
model.prepare_container_def(instance_type=INSTANCE_TYPE)
git_clone_repo.assert_called_with(git_config, entry_point, source_dir, dependencies)
assert model.entry_point == "entry_point"
assert model.source_dir == "/tmp/repo_dir/source_dir"
assert model.dependencies == ["/tmp/repo_dir/foo", "/tmp/repo_dir/bar"]
@patch("sagemaker.utils.repack_model")
def test_script_mode_model_tags_jumpstart_models(repack_model, sagemaker_session):
jumpstart_source_dir = f"s3://{list(JUMPSTART_BUCKET_NAME_SET)[0]}/source_dirs/source.tar.gz"
t = Model(
entry_point=ENTRY_POINT_INFERENCE,
role=ROLE,
sagemaker_session=sagemaker_session,
source_dir=jumpstart_source_dir,
image_uri=IMAGE_URI,
model_data=MODEL_DATA,
)
t.deploy(instance_type=INSTANCE_TYPE, initial_instance_count=INSTANCE_COUNT)
assert sagemaker_session.create_model.call_args_list[0][1]["tags"] == [
{
"Key": JumpStartTag.INFERENCE_SCRIPT_URI.value,
"Value": jumpstart_source_dir,
},
]
assert sagemaker_session.endpoint_from_production_variants.call_args_list[0][1]["tags"] == [
{
"Key": JumpStartTag.INFERENCE_SCRIPT_URI.value,
"Value": jumpstart_source_dir,
},
]
non_jumpstart_source_dir = "s3://blah/blah/blah"
t = Model(
entry_point=ENTRY_POINT_INFERENCE,
role=ROLE,
sagemaker_session=sagemaker_session,
source_dir=non_jumpstart_source_dir,
image_uri=IMAGE_URI,
model_data=MODEL_DATA,
)
t.deploy(instance_type=INSTANCE_TYPE, initial_instance_count=INSTANCE_COUNT)
assert {
"Key": JumpStartTag.INFERENCE_SCRIPT_URI.value,
"Value": non_jumpstart_source_dir,
} not in sagemaker_session.create_model.call_args_list[0][1]["tags"]
assert {
"Key": JumpStartTag.INFERENCE_SCRIPT_URI.value,
"Value": non_jumpstart_source_dir,
} not in sagemaker_session.create_model.call_args_list[0][1]["tags"]
@patch("sagemaker.utils.repack_model")
@patch("sagemaker.fw_utils.tar_and_upload_dir")
def test_all_framework_models_add_jumpstart_tags(
repack_model, tar_and_uload_dir, sagemaker_session
):
framework_model_classes_to_kwargs = {
PyTorchModel: {"framework_version": "1.5.0", "py_version": "py3"},
TensorFlowModel: {
"framework_version": "2.3",
},
HuggingFaceModel: {
"pytorch_version": "1.7.1",
"py_version": "py36",
"transformers_version": "4.6.1",
},
MXNetModel: {"framework_version": "1.7.0", "py_version": "py3"},
SKLearnModel: {
"framework_version": "0.23-1",
},
XGBoostModel: {
"framework_version": "1.3-1",
},
}
jumpstart_model_dir = f"s3://{list(JUMPSTART_BUCKET_NAME_SET)[0]}/model_dirs/model.tar.gz"
for framework_model_class, kwargs in framework_model_classes_to_kwargs.items():
framework_model_class(
entry_point=ENTRY_POINT_INFERENCE,
role=ROLE,
sagemaker_session=sagemaker_session,
model_data=jumpstart_model_dir,
**kwargs,
).deploy(instance_type="ml.m2.xlarge", initial_instance_count=INSTANCE_COUNT)
assert {
"Key": JumpStartTag.INFERENCE_MODEL_URI.value,
"Value": jumpstart_model_dir,
} in sagemaker_session.create_model.call_args_list[0][1]["tags"]
assert {
"Key": JumpStartTag.INFERENCE_MODEL_URI.value,
"Value": jumpstart_model_dir,
} in sagemaker_session.endpoint_from_production_variants.call_args_list[0][1]["tags"]
sagemaker_session.create_model.reset_mock()
sagemaker_session.endpoint_from_production_variants.reset_mock()
@patch("sagemaker.utils.repack_model")
def test_script_mode_model_uses_jumpstart_base_name(repack_model, sagemaker_session):
jumpstart_source_dir = f"s3://{list(JUMPSTART_BUCKET_NAME_SET)[0]}/source_dirs/source.tar.gz"
t = Model(
entry_point=ENTRY_POINT_INFERENCE,
role=ROLE,
sagemaker_session=sagemaker_session,
source_dir=jumpstart_source_dir,
image_uri=IMAGE_URI,
model_data=MODEL_DATA,
)
t.deploy(instance_type=INSTANCE_TYPE, initial_instance_count=INSTANCE_COUNT)
assert sagemaker_session.create_model.call_args_list[0][0][0].startswith(
JUMPSTART_RESOURCE_BASE_NAME
)
assert sagemaker_session.endpoint_from_production_variants.call_args_list[0].startswith(
JUMPSTART_RESOURCE_BASE_NAME
)
sagemaker_session.create_model.reset_mock()
sagemaker_session.endpoint_from_production_variants.reset_mock()
non_jumpstart_source_dir = "s3://blah/blah/blah"
t = Model(
entry_point=ENTRY_POINT_INFERENCE,
role=ROLE,
sagemaker_session=sagemaker_session,
source_dir=non_jumpstart_source_dir,
image_uri=IMAGE_URI,
model_data=MODEL_DATA,
)
t.deploy(instance_type=INSTANCE_TYPE, initial_instance_count=INSTANCE_COUNT)
assert not sagemaker_session.create_model.call_args_list[0][0][0].startswith(
JUMPSTART_RESOURCE_BASE_NAME
)
assert not sagemaker_session.endpoint_from_production_variants.call_args_list[0][1][
"name"
].startswith(JUMPSTART_RESOURCE_BASE_NAME)
@patch("sagemaker.utils.repack_model")
@patch("sagemaker.fw_utils.tar_and_upload_dir")
def test_all_framework_models_add_jumpstart_base_name(
repack_model, tar_and_uload_dir, sagemaker_session
):
framework_model_classes_to_kwargs = {
PyTorchModel: {"framework_version": "1.5.0", "py_version": "py3"},
TensorFlowModel: {
"framework_version": "2.3",
},
HuggingFaceModel: {
"pytorch_version": "1.7.1",
"py_version": "py36",
"transformers_version": "4.6.1",
},
MXNetModel: {"framework_version": "1.7.0", "py_version": "py3"},
SKLearnModel: {
"framework_version": "0.23-1",
},
XGBoostModel: {
"framework_version": "1.3-1",
},
}
jumpstart_model_dir = f"s3://{list(JUMPSTART_BUCKET_NAME_SET)[0]}/model_dirs/model.tar.gz"
for framework_model_class, kwargs in framework_model_classes_to_kwargs.items():
framework_model_class(
entry_point=ENTRY_POINT_INFERENCE,
role=ROLE,
sagemaker_session=sagemaker_session,
model_data=jumpstart_model_dir,
**kwargs,
).deploy(instance_type="ml.m2.xlarge", initial_instance_count=INSTANCE_COUNT)
assert sagemaker_session.create_model.call_args_list[0][0][0].startswith(
JUMPSTART_RESOURCE_BASE_NAME
)
assert sagemaker_session.endpoint_from_production_variants.call_args_list[0].startswith(
JUMPSTART_RESOURCE_BASE_NAME
)
sagemaker_session.create_model.reset_mock()
sagemaker_session.endpoint_from_production_variants.reset_mock()
@patch("sagemaker.utils.repack_model")
def test_script_mode_model_uses_proper_sagemaker_submit_dir(repack_model, sagemaker_session):
source_dir = "s3://blah/blah/blah"
t = Model(
entry_point=ENTRY_POINT_INFERENCE,
role=ROLE,
sagemaker_session=sagemaker_session,
source_dir=source_dir,
image_uri=IMAGE_URI,
model_data=MODEL_DATA,
)
t.deploy(instance_type=INSTANCE_TYPE, initial_instance_count=INSTANCE_COUNT)
assert (
sagemaker_session.create_model.call_args_list[0][0][2]["Environment"][
"SAGEMAKER_SUBMIT_DIRECTORY"
]
== "/opt/ml/model/code"
)
| 36.57971 | 99 | 0.738788 |
8fa7aa269e589e207b6bbfe3db619156f4a6be6f | 4,394 | py | Python | src/clld/db/models/_mixins.py | blurks/clld | d9900f88af726eb6a4d2668f517d5af23bcc6f9d | [
"MIT"
] | 32 | 2015-02-22T02:09:29.000Z | 2022-02-18T14:40:16.000Z | src/clld/db/models/_mixins.py | blurks/clld | d9900f88af726eb6a4d2668f517d5af23bcc6f9d | [
"MIT"
] | 199 | 2015-01-05T11:58:38.000Z | 2022-02-22T14:34:52.000Z | src/clld/db/models/_mixins.py | blurks/clld | d9900f88af726eb6a4d2668f517d5af23bcc6f9d | [
"MIT"
] | 18 | 2015-01-23T13:00:47.000Z | 2022-02-21T16:32:36.000Z | import pathlib
from sqlalchemy import Column, Integer, String, Unicode, ForeignKey
from sqlalchemy.orm import relationship
from sqlalchemy.ext.declarative import declared_attr
__all__ = (
'IdNameDescriptionMixin',
'FilesMixin', 'HasFilesMixin',
'DataMixin', 'HasDataMixin',
)
class IdNameDescriptionMixin(object):
"""Mixin for 'visible' objects, i.e. anything that has to be displayed.
In particular all :doc:`resources` fall into this category.
.. note::
Only one of :py:attr:`clld.db.models.common.IdNameDescriptionMixin.description`
or :py:attr:`clld.db.models.common.IdNameDescriptionMixin.markup_description`
should be supplied, since these are used mutually exclusively.
"""
id = Column(String, unique=True)
"""A ``str`` identifier of an object which can be used for sorting and as part of a
URL path; thus should be limited to characters valid in URLs, and should not contain
'.' or '/' since this may trip up route matching.
"""
name = Column(Unicode)
"""A human readable 'identifier' of the object."""
description = Column(Unicode)
"""A description of the object."""
markup_description = Column(Unicode)
"""A description of the object containing HTML markup."""
# ----------------------------------------------------------------------------
# We augment mapper classes for basic objects using mixins to add the ability
# to store arbitrary key-value pairs and files associated with an object.
# ----------------------------------------------------------------------------
class FilesMixin(IdNameDescriptionMixin):
"""This mixin provides a way to associate files with instances of another model class.
.. note::
The file itself is not stored in the database but must be created in the
filesystem, e.g. using the create method.
"""
@classmethod
def owner_class(cls):
return cls.__name__.split('_')[0]
ord = Column(Integer, default=1)
"""Ordinal to control sorting of files associated with one db object."""
mime_type = Column(String)
"""Mime-type of the file content."""
@declared_attr
def object_pk(cls):
return Column(Integer, ForeignKey('%s.pk' % cls.owner_class().lower()))
@property
def relpath(self):
"""OS file path of the file relative to the application's file-system dir."""
return pathlib.Path(self.owner_class().lower()) / str(self.object.id) / str(self.id)
def create(self, dir_, content):
"""Write ``content`` to a file using ``dir_`` as file-system directory.
:return: File-system path of the file that was created.
"""
p = pathlib.Path(dir_).joinpath(self.relpath)
if not p.parent.exists():
p.parent.mkdir(parents=True)
with open(p.as_posix(), 'wb') as fp:
if isinstance(content, str):
content = content.encode('utf8')
fp.write(content)
return str(p)
class HasFilesMixin(object):
"""Mixin for model classes which may have associated files."""
@property
def files(self):
"""return ``dict`` of associated files keyed by ``id``."""
return dict((f.id, f) for f in self._files)
@declared_attr
def _files(cls):
return relationship(cls.__name__ + '_files', backref='object')
class DataMixin(object):
"""Provide a simple way to attach key-value pairs to a model class given by name."""
@classmethod
def owner_class(cls):
return cls.__name__.split('_')[0]
key = Column(Unicode)
value = Column(Unicode)
ord = Column(Integer, default=1)
@declared_attr
def object_pk(cls):
return Column(Integer, ForeignKey('%s.pk' % cls.owner_class().lower()))
class HasDataMixin(object):
"""Adds a convenience method to retrieve the key-value pairs from data as dict.
.. note::
It is the responsibility of the programmer to make sure conversion to a ``dict``
makes sense, i.e. the keys in data are actually unique, thus usable as dictionary
keys.
"""
def datadict(self):
"""return ``dict`` of associated key-value pairs."""
return dict((d.key, d.value) for d in self.data)
@declared_attr
def data(cls):
return relationship(cls.__name__ + '_data', order_by=cls.__name__ + '_data.ord')
| 31.385714 | 92 | 0.63564 |
e392de1862b8fd5995c2ce469b8b8e5d7a00657b | 52 | py | Python | gen_doc/repos/__init__.py | Shchusia/gen_doc | 216b561fc973c5565a78348c5b0b3db58f84442f | [
"Unlicense"
] | null | null | null | gen_doc/repos/__init__.py | Shchusia/gen_doc | 216b561fc973c5565a78348c5b0b3db58f84442f | [
"Unlicense"
] | null | null | null | gen_doc/repos/__init__.py | Shchusia/gen_doc | 216b561fc973c5565a78348c5b0b3db58f84442f | [
"Unlicense"
] | null | null | null | """
importing
"""
from .repos import Repository
| 10.4 | 30 | 0.653846 |
781e16b3bc2d3eb92b5e885a87846f826e443d63 | 2,332 | py | Python | src/core/models/backbones/simplecnn.py | lucasdavid/tf-experiment | 0c6e6b52c91f498dd0cb5a13beafadfeab627429 | [
"Apache-2.0"
] | null | null | null | src/core/models/backbones/simplecnn.py | lucasdavid/tf-experiment | 0c6e6b52c91f498dd0cb5a13beafadfeab627429 | [
"Apache-2.0"
] | null | null | null | src/core/models/backbones/simplecnn.py | lucasdavid/tf-experiment | 0c6e6b52c91f498dd0cb5a13beafadfeab627429 | [
"Apache-2.0"
] | null | null | null | from typing import Optional
import tensorflow as tf
from tensorflow.keras import layers
def conv_block(
x: tf.Tensor,
filters: int,
kernel_size=(3, 3),
strides=(1, 1),
batch_norm: bool = True,
activation='relu',
dropout: float = 0,
padding: str = 'valid',
name: str = None,
):
if dropout:
x = layers.Dropout(dropout, name=f'{name}/drop')(x)
kwargs = dict(strides=strides, use_bias=not batch_norm, padding=padding, name=f'{name}/conv')
x = layers.Conv2D(filters, kernel_size, **kwargs)(x)
if batch_norm:
x = layers.BatchNormalization(name=f'{name}/bn')(x)
x = layers.Activation(activation, name=f'{name}/{activation}')(x)
return x
def conv_stack(x, filters, name, block_activation, dropout, residual: bool = False):
y = conv_block(x, filters, name=f'g{name}/b1', activation=block_activation, dropout=dropout)
y = conv_block(y, filters, name=f'g{name}/b2', activation=block_activation, strides=2)
shape = tf.shape(y)
height, width = shape[1], shape[2]
if residual:
z = tf.image.resize(x, (height, width), name=f'g{name}/resize')
x = layers.concatenate([z, y], name=f'g{name}/add')
return x
def SimpleCNN(
input_tensor: tf.Tensor,
weights: Optional[str] = None,
pooling: Optional[str] = None,
stacks: int = 3,
dropout: float = 0,
block_activation: str = 'relu',
include_top: bool = True,
classes: int = 1000,
activation: str = 'softmax',
):
if weights:
raise ValueError('This model does not have any checkpoints. You must set `weights=None`.')
x = input_tensor
x = conv_block(x, 32, padding='same', name='stack0/conv', activation=block_activation)
for idx in range(stacks):
x = conv_stack(x, 32*(idx+1), f'stack{idx+1}', block_activation, dropout, residual=True)
if pooling:
if pooling == 'avg':
pool_layer = layers.GlobalAveragePooling2D(name='avg_pool')
elif pooling == 'max':
pool_layer = layers.GlobalMaxPool2D(name='max_pool')
else:
raise ValueError(f'Illegal value {pooling} for parameter `pooling`. Expected `avg` or `max`.')
x = pool_layer(x)
if include_top:
x = layers.Dense(classes, name='predictions', activation=activation)(x)
return tf.keras.Model(input_tensor, x, name=f'scnn{stacks}')
| 30.285714 | 100 | 0.655232 |
83ab6188ff9cf51338e8421e67d9e8a4b94463a4 | 1,857 | py | Python | cpp_ann/script_generator_example.py | marsgr6/ann | 64c051263fd1f80d8f596b217fe733e0e1b4c2fa | [
"MIT"
] | 1 | 2021-05-25T23:43:44.000Z | 2021-05-25T23:43:44.000Z | cpp_ann/script_generator_example.py | marsgr6/ann | 64c051263fd1f80d8f596b217fe733e0e1b4c2fa | [
"MIT"
] | null | null | null | cpp_ann/script_generator_example.py | marsgr6/ann | 64c051263fd1f80d8f596b217fe733e0e1b4c2fa | [
"MIT"
] | null | null | null | """
This code generates a unix bash script
for running the C++ generated program
with the selected set of parameters for
p: patterns (integer)
K: connectivity (integer)
nns: number of modules (list of integers)
topos: topologies (list from ['r', 'c', 'x'])
r: Ring, c: Cross, 'x': X topologies
ws: omega values (list of floats)
Example:
python script_generator_example.py > run_example.sh
Then make run_example.sh executable:
chmod +x run_example.sh
Execute:
./run_example.sh
A list of files for each execution of the C++ program will be found in
the . directory.
"""
p = 150 # patterns
K = 240
nns = [10, 15] # 1, 10 and 15 ensemble modules
topos = ['r', 'c'] # r: ring topology, c: cross topology
ws = [0.5, 1] # omega: random connection ratio
advance = 0
# Total number of times that the cpp program will be executed
# according to the combination of paramters
# p // nn: pattern subsetting assigned to modules
# len(topos): number of topologies to check
# len(ws): number of values of random connection ratios to check
# Above combination is checked for different values of modules: nns
runs = sum([p // nn * len(topos) * len(ws) for nn in nns])
path = "patterns/"
# Iterates for each list above
for nn in nns:
for topo in topos:
for w in ws:
for i in range(p // nn):
advance += 1
percentage = advance / runs * 100
print(r"echo -e '\r" + "%0.0f" % percentage + "%'")
print("./sparsenet 89420 " +
str(K // nn) + " " +
str(w) +
" 0.2258 1 r 0.656 0.7 0.0 100 1 " +
str((i + 1) * nn) +
" 6 100 263 340 " + path + " " + path + " " +
topo + " " + str(i + 1) + " " + str(nn))
| 32.017241 | 73 | 0.572429 |
fca6296e9edec56f94da241eee42ef36a3eff803 | 32 | py | Python | slackbot_settings.py | KagenoMoheji/MySlackbot | bac61d29b773ea75b152f96b605114bf8073d498 | [
"MIT"
] | null | null | null | slackbot_settings.py | KagenoMoheji/MySlackbot | bac61d29b773ea75b152f96b605114bf8073d498 | [
"MIT"
] | null | null | null | slackbot_settings.py | KagenoMoheji/MySlackbot | bac61d29b773ea75b152f96b605114bf8073d498 | [
"MIT"
] | null | null | null | PLUGINS = [
"plugins",
]
| 8 | 15 | 0.4375 |
a61f101d79d323a4bb77b06e995be138a3574b9f | 7,748 | py | Python | ppdet/modeling/losses/iou_loss.py | sjtubinlong/PaddleDetection | 2bd069f4efd57861ef0004b212525fdafa4f0178 | [
"Apache-2.0"
] | null | null | null | ppdet/modeling/losses/iou_loss.py | sjtubinlong/PaddleDetection | 2bd069f4efd57861ef0004b212525fdafa4f0178 | [
"Apache-2.0"
] | null | null | null | ppdet/modeling/losses/iou_loss.py | sjtubinlong/PaddleDetection | 2bd069f4efd57861ef0004b212525fdafa4f0178 | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from paddle.fluid.param_attr import ParamAttr
from paddle.fluid.initializer import NumpyArrayInitializer
from paddle import fluid
from ppdet.core.workspace import register, serializable
__all__ = ['IouLoss']
@register
@serializable
class IouLoss(object):
"""
iou loss, see https://arxiv.org/abs/1908.03851
loss = 1.0 - iou * iou
Args:
loss_weight (float): iou loss weight, default is 2.5
max_height (int): max height of input to support random shape input
max_width (int): max width of input to support random shape input
"""
def __init__(self, loss_weight=2.5, max_height=608, max_width=608):
self._loss_weight = loss_weight
self._MAX_HI = max_height
self._MAX_WI = max_width
def __call__(self,
x,
y,
w,
h,
tx,
ty,
tw,
th,
anchors,
downsample_ratio,
batch_size,
ioup=None,
eps=1.e-10):
'''
Args:
x | y | w | h ([Variables]): the output of yolov3 for encoded x|y|w|h
tx |ty |tw |th ([Variables]): the target of yolov3 for encoded x|y|w|h
anchors ([float]): list of anchors for current output layer
downsample_ratio (float): the downsample ratio for current output layer
batch_size (int): training batch size
eps (float): the decimal to prevent the denominator eqaul zero
'''
iouk = self._iou(x, y, w, h, tx, ty, tw, th, anchors, downsample_ratio,
batch_size, ioup, eps)
loss_iou = 1. - iouk * iouk
loss_iou = loss_iou * self._loss_weight
return loss_iou
def _iou(self,
x,
y,
w,
h,
tx,
ty,
tw,
th,
anchors,
downsample_ratio,
batch_size,
ioup=None,
eps=1.e-10):
x1, y1, x2, y2 = self._bbox_transform(
x, y, w, h, anchors, downsample_ratio, batch_size, False)
x1g, y1g, x2g, y2g = self._bbox_transform(
tx, ty, tw, th, anchors, downsample_ratio, batch_size, True)
x2 = fluid.layers.elementwise_max(x1, x2)
y2 = fluid.layers.elementwise_max(y1, y2)
xkis1 = fluid.layers.elementwise_max(x1, x1g)
ykis1 = fluid.layers.elementwise_max(y1, y1g)
xkis2 = fluid.layers.elementwise_min(x2, x2g)
ykis2 = fluid.layers.elementwise_min(y2, y2g)
intsctk = (xkis2 - xkis1) * (ykis2 - ykis1)
intsctk = intsctk * fluid.layers.greater_than(
xkis2, xkis1) * fluid.layers.greater_than(ykis2, ykis1)
unionk = (x2 - x1) * (y2 - y1) + (x2g - x1g) * (y2g - y1g
) - intsctk + eps
iouk = intsctk / unionk
return iouk
def _bbox_transform(self, dcx, dcy, dw, dh, anchors, downsample_ratio,
batch_size, is_gt):
grid_x = int(self._MAX_WI / downsample_ratio)
grid_y = int(self._MAX_HI / downsample_ratio)
an_num = len(anchors) // 2
shape_fmp = fluid.layers.shape(dcx)
shape_fmp.stop_gradient = True
# generate the grid_w x grid_h center of feature map
idx_i = np.array([[i for i in range(grid_x)]])
idx_j = np.array([[j for j in range(grid_y)]]).transpose()
gi_np = np.repeat(idx_i, grid_y, axis=0)
gi_np = np.reshape(gi_np, newshape=[1, 1, grid_y, grid_x])
gi_np = np.tile(gi_np, reps=[batch_size, an_num, 1, 1])
gj_np = np.repeat(idx_j, grid_x, axis=1)
gj_np = np.reshape(gj_np, newshape=[1, 1, grid_y, grid_x])
gj_np = np.tile(gj_np, reps=[batch_size, an_num, 1, 1])
gi_max = self._create_tensor_from_numpy(gi_np.astype(np.float32))
gi = fluid.layers.crop(x=gi_max, shape=dcx)
gi.stop_gradient = True
gj_max = self._create_tensor_from_numpy(gj_np.astype(np.float32))
gj = fluid.layers.crop(x=gj_max, shape=dcx)
gj.stop_gradient = True
grid_x_act = fluid.layers.cast(shape_fmp[3], dtype="float32")
grid_x_act.stop_gradient = True
grid_y_act = fluid.layers.cast(shape_fmp[2], dtype="float32")
grid_y_act.stop_gradient = True
if is_gt:
cx = fluid.layers.elementwise_add(dcx, gi) / grid_x_act
cx.gradient = True
cy = fluid.layers.elementwise_add(dcy, gj) / grid_y_act
cy.gradient = True
else:
dcx_sig = fluid.layers.sigmoid(dcx)
cx = fluid.layers.elementwise_add(dcx_sig, gi) / grid_x_act
dcy_sig = fluid.layers.sigmoid(dcy)
cy = fluid.layers.elementwise_add(dcy_sig, gj) / grid_y_act
anchor_w_ = [anchors[i] for i in range(0, len(anchors)) if i % 2 == 0]
anchor_w_np = np.array(anchor_w_)
anchor_w_np = np.reshape(anchor_w_np, newshape=[1, an_num, 1, 1])
anchor_w_np = np.tile(anchor_w_np, reps=[batch_size, 1, grid_y, grid_x])
anchor_w_max = self._create_tensor_from_numpy(
anchor_w_np.astype(np.float32))
anchor_w = fluid.layers.crop(x=anchor_w_max, shape=dcx)
anchor_w.stop_gradient = True
anchor_h_ = [anchors[i] for i in range(0, len(anchors)) if i % 2 == 1]
anchor_h_np = np.array(anchor_h_)
anchor_h_np = np.reshape(anchor_h_np, newshape=[1, an_num, 1, 1])
anchor_h_np = np.tile(anchor_h_np, reps=[batch_size, 1, grid_y, grid_x])
anchor_h_max = self._create_tensor_from_numpy(
anchor_h_np.astype(np.float32))
anchor_h = fluid.layers.crop(x=anchor_h_max, shape=dcx)
anchor_h.stop_gradient = True
# e^tw e^th
exp_dw = fluid.layers.exp(dw)
exp_dh = fluid.layers.exp(dh)
pw = fluid.layers.elementwise_mul(exp_dw, anchor_w) / \
(grid_x_act * downsample_ratio)
ph = fluid.layers.elementwise_mul(exp_dh, anchor_h) / \
(grid_y_act * downsample_ratio)
if is_gt:
exp_dw.stop_gradient = True
exp_dh.stop_gradient = True
pw.stop_gradient = True
ph.stop_gradient = True
x1 = cx - 0.5 * pw
y1 = cy - 0.5 * ph
x2 = cx + 0.5 * pw
y2 = cy + 0.5 * ph
if is_gt:
x1.stop_gradient = True
y1.stop_gradient = True
x2.stop_gradient = True
y2.stop_gradient = True
return x1, y1, x2, y2
def _create_tensor_from_numpy(self, numpy_array):
paddle_array = fluid.layers.create_parameter(
attr=ParamAttr(),
shape=numpy_array.shape,
dtype=numpy_array.dtype,
default_initializer=NumpyArrayInitializer(numpy_array))
paddle_array.stop_gradient = True
return paddle_array
| 38.934673 | 83 | 0.596799 |
efbb887ed97cc4ab02303d3fbae515780448b1de | 533 | py | Python | seq2seq/generate_data.py | NikIshmametev/tf_examples | 1596c6c6b2596665f112ee454cff57ab8d610b48 | [
"Apache-2.0"
] | 159 | 2015-11-26T20:09:17.000Z | 2021-11-12T11:36:56.000Z | seq2seq/generate_data.py | NikIshmametev/tf_examples | 1596c6c6b2596665f112ee454cff57ab8d610b48 | [
"Apache-2.0"
] | 8 | 2016-04-29T09:11:57.000Z | 2018-08-30T14:00:17.000Z | seq2seq/generate_data.py | NikIshmametev/tf_examples | 1596c6c6b2596665f112ee454cff57ab8d610b48 | [
"Apache-2.0"
] | 108 | 2015-11-26T01:32:16.000Z | 2020-04-06T22:46:07.000Z | import random
examples = 10000
symbols = 100
length = 10
with open('vocab', 'w') as f:
f.write("<S>\n</S>\n<UNK>\n")
for i in range(100):
f.write("%d\n" % i)
with open('input', 'w') as fin:
with open('output', 'w') as fout:
for i in range(examples):
inp = [random.randint(0, symbols) + 3 for _ in range(length)]
out = [(x + 5) % 100 + 3 for x in inp]
fin.write(' '.join([str(x) for x in inp]) + '\n')
fout.write(' '.join([str(x) for x in out]) + '\n')
| 25.380952 | 73 | 0.50469 |
6428aadd0b2c745b05fcc3885bcc9839d7edb008 | 4,771 | py | Python | opensfm/synthetic_data/synthetic_scene.py | juhan/OpenSfM | 8dd4fe3bf303f974a56857b973feaef7e7c2662f | [
"BSD-2-Clause"
] | null | null | null | opensfm/synthetic_data/synthetic_scene.py | juhan/OpenSfM | 8dd4fe3bf303f974a56857b973feaef7e7c2662f | [
"BSD-2-Clause"
] | 3 | 2021-09-08T01:41:55.000Z | 2022-03-12T00:16:29.000Z | opensfm/synthetic_data/synthetic_scene.py | juhan/OpenSfM | 8dd4fe3bf303f974a56857b973feaef7e7c2662f | [
"BSD-2-Clause"
] | null | null | null | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np
import functools
from opensfm import types
import opensfm.synthetic_data.synthetic_metrics as sm
import opensfm.synthetic_data.synthetic_generator as sg
def get_camera(type, id, focal, k1, k2):
camera = None
if type == 'perspective':
camera = types.PerspectiveCamera()
if type == 'fisheye':
camera = types.FisheyeCamera()
camera.id = id
camera.focal = focal
camera.k1 = k1
camera.k2 = k2
camera.height = 1600
camera.width = 2000
return camera
def get_scene_generator(type, length):
generator = None
if type == 'ellipse':
ellipse_ratio = 4
generator = functools.partial(sg.ellipse_generator, length,
length / ellipse_ratio)
if type == 'line':
generator = functools.partial(sg.line_generator, length)
if type == 'curve':
generator = functools.partial(sg.weird_curve, length)
return generator
class SyntheticScene(object):
def __init__(self, generator):
self.generator = generator
self.wall_points = None
self.floor_points = None
self.width = None
self.shot_positions = []
self.shot_rotations = []
self.cameras = []
def add_street(self, points_count, height, width):
self.wall_points, self.floor_points = sg.generate_street(
sg.samples_generator_random_count(
int(points_count // 3)), self.generator,
height, width)
self.width = width
return self
def perturb_walls(self, walls_pertubation):
sg.perturb_points(self.wall_points, walls_pertubation)
return self
def perturb_floor(self, floor_pertubation):
sg.perturb_points(self.floor_points, floor_pertubation)
return self
def add_camera_sequence(self, camera, start, length, height, interval,
position_noise=None, rotation_noise=None,
gps_noise=None):
default_noise_interval = 0.25*interval
positions, rotations = sg.generate_cameras(
sg.samples_generator_interval(start, length, interval,
default_noise_interval),
self.generator, height)
sg.perturb_points(positions, position_noise)
sg.perturb_rotations(rotations, rotation_noise)
self.shot_rotations.append(rotations)
self.shot_positions.append(positions)
self.cameras.append(camera)
return self
def get_reconstruction(self, rotation_noise=0.0,
position_noise=0.0,
camera_noise=0.0):
floor_color = [120, 90, 10]
wall_color = [10, 90, 130]
positions = self.shot_positions
if position_noise != 0.0:
for p in positions:
sg.perturb_points(p, position_noise)
rotations = self.shot_rotations
if position_noise != 0.0:
for r in rotations:
sg.perturb_rotations(r, rotation_noise)
cameras = self.cameras
if camera_noise != 0.0:
for c in cameras:
c.focal *= (1+camera_noise)
return sg.create_reconstruction(
[self.floor_points, self.wall_points],
[floor_color, wall_color],
cameras, positions, rotations)
def get_scene_exifs(self, gps_noise):
return sg.generate_exifs(self.get_reconstruction(),
gps_noise)
def get_tracks_data(self, maximum_depth, noise):
return sg.generate_track_data(self.get_reconstruction(),
maximum_depth, noise)
def compare(reference, reconstruction):
position = sm.position_errors(reference, reconstruction)
gps = sm.gps_errors(reconstruction)
rotation = sm.rotation_errors(reference, reconstruction)
points = sm.points_errors(reference, reconstruction)
completeness = sm.completeness_errors(reference, reconstruction)
return {
'position_average': np.linalg.norm(np.average(position, axis=0)),
'position_std': np.linalg.norm(np.std(position, axis=0)),
'gps_average': np.linalg.norm(np.average(gps, axis=0)),
'gps_std': np.linalg.norm(np.std(gps, axis=0)),
'rotation_average': np.average(rotation),
'rotation_std': np.std(rotation),
'points_average': np.linalg.norm(np.average(points, axis=0)),
'points_std': np.linalg.norm(np.std(points, axis=0)),
'ratio_cameras': completeness[0],
'ratio_points': completeness[1]
}
| 35.340741 | 74 | 0.63362 |
f483b42d00c30155b78cc64b68fb2fe1218853b3 | 120 | py | Python | create_pyproj/__main__.py | byung-u/create_pyproj | 10e3046cdff7bcab45ef8779cb70d149a16e4cf6 | [
"MIT"
] | null | null | null | create_pyproj/__main__.py | byung-u/create_pyproj | 10e3046cdff7bcab45ef8779cb70d149a16e4cf6 | [
"MIT"
] | null | null | null | create_pyproj/__main__.py | byung-u/create_pyproj | 10e3046cdff7bcab45ef8779cb70d149a16e4cf6 | [
"MIT"
] | null | null | null | """ create_pyproj create python project template command line tool."""
from create_pyproj.main import main
main(None)
| 20 | 70 | 0.783333 |
196f6c09954f5a741185ecd5c83aa2aa092fb064 | 8,104 | py | Python | xldlib/xlpy/tools/frozen.py | Alexhuszagh/XLDiscoverer | 60937b1f7f2e23af4219eb26519d6b83fb4232d6 | [
"Apache-2.0",
"MIT"
] | null | null | null | xldlib/xlpy/tools/frozen.py | Alexhuszagh/XLDiscoverer | 60937b1f7f2e23af4219eb26519d6b83fb4232d6 | [
"Apache-2.0",
"MIT"
] | null | null | null | xldlib/xlpy/tools/frozen.py | Alexhuszagh/XLDiscoverer | 60937b1f7f2e23af4219eb26519d6b83fb4232d6 | [
"Apache-2.0",
"MIT"
] | null | null | null | '''
Xlpy/Tools/frozen
_________________
Tools for freezing common data structures to give them a hash
function.
:copyright: (c) 2015 The Regents of the University of California.
:license: GNU GPL, see licenses/GNU GPLv3.txt for more details.
'''
# load modules
from collections import Counter, namedtuple
from xldlib.definitions import ZIP
from xldlib.general import sequence
from xldlib.qt.objects import base
from xldlib.utils import logger, modtools
# DATA
# ----
CHARS = [
'd',
'u'
]
# OBJECTS
# -------
@sequence.serializable("XLIdentifier")
class XLIdentifier(namedtuple("XLIdentifier", "crosslinker number")):
'''Serialization definitions'''
# MODIFICATIONS
# -------------
@logger.init('xlpy', 'DEBUG')
class ModificationFreezer(base.BaseObject):
'''
Converts internal data structures to enable hashing as applied
to the mod dictionary, a nested dictionary with nested dictionaries
and lists.
'''
def __init__(self, row, isobaric, ignoremodposition=False):
super(ModificationFreezer, self).__init__()
self.engine = row.engines['matched']
self.isobaric = isobaric
self.ignoremodposition = ignoremodposition
def __call__(self, modification):
'''Returns a frozen set representation of the mods'''
tofreeze = list(self.getmodifications(modification))
tofreeze.append(int(modification['neutralloss']))
if self.ignoremodposition:
return frozenset(Counter(tofreeze).items())
else:
return frozenset(tofreeze)
# GETTERS
def getmodifications(self, modification):
'''Returns unique identifiers for the certain and uncertain mods'''
certain = modification['certain']
uncertain = self.getuncertain(modification)
for char, dicts in ZIP(CHARS, [[certain], uncertain]):
for modificationdict in dicts:
for name, positions in modificationdict.items():
# skip loop if not a standard modification
if name not in self.isobaric:
yield self.getmodstring(name, positions, char)
def getuncertain(self, modification):
'''Returns the uncertain list, or single element list'''
if self.ignoremodposition:
# grab slice of first item, works for null case too
return modification['uncertain'][0:1]
else:
return modification['uncertain']
def getmodstring(self, name, positions, char):
'''
Returns a string for the mod/position pair. Joins the positions
as CSV-separated values.
mod_string('Carbamidomethyl', [6, 12], 'u')
->'uCarbamidomethyl@6,12'
'''
if self.ignoremodposition:
return name
else:
sortedpositions = sorted(positions, key=self.engine.key)
positions = ','.join(str(i) for i in sortedpositions)
return '{0}{1}@{2}'.format(char, name, positions)
# CROSSLINKS
# ----------
KEYS = (
'modifications',
'peptide',
'id'
)
@logger.init('xlpy', 'DEBUG')
class SingleFreezer(base.BaseObject):
'''Freezes a single link to render it unique based on identification'''
def __init__(self, row, addindexes=False):
super(SingleFreezer, self).__init__()
self.row = row
self.addindexes = addindexes
source = self.app.discovererthread
isobaric = source.parameters.isobaric.todict()
self.modfreezer = ModificationFreezer(row, isobaric)
def __call__(self, index, spreadsheet):
'''Freeze the crosslinker attributes for set additions and lookups'''
frozen = set()
frozen.add(self.getmatched(index))
frozen.add(XLIdentifier(None, None))
frozen.add(tuple(sorted(spreadsheet['XL'])))
if self.addindexes:
frozen.add(tuple(sorted(index)))
return frozenset(frozen)
# GETTERS
def getmatched(self, index, keys=KEYS):
'''Returns the unique matched data from the indexes'''
row = self.row.data.getrow(index, keys, asdict=True)
modification = self.modfreezer(row['modifications'])
return frozenset((modification, row['peptide'], row['id']))
@logger.init('xlpy', 'DEBUG')
class CrosslinkFreezer(base.BaseObject):
'''Freezes a crosslink to render it unique based on identification'''
def __init__(self, row, crosslinker, addindexes=False):
super(CrosslinkFreezer, self).__init__()
self.row = row
self.crosslinker = crosslinker
self.addindexes = addindexes
source = self.app.discovererthread
isobaric = source.parameters.isobaric[crosslinker.id]
self.modfreezer = ModificationFreezer(row, isobaric)
self.fragment = modtools.FragmentPositions(row, isobaric)
def __call__(self, indexes, ends):
'''Freeze the crosslinker attributes for set additions and lookups'''
frozen = set()
frozen.add(tuple(sorted(self.getmatched(indexes))))
frozen.add(XLIdentifier(self.crosslinker.id, ends.number))
frozen.add(tuple(sorted(self.fragment(indexes))))
if self.addindexes:
frozen.add(tuple(sorted(indexes)))
return frozenset(frozen)
# GETTERS
def getmatched(self, indexes, keys=KEYS):
'''Returns the unique matched data from the indexes'''
for index in indexes:
row = self.row.data.getrow(index, keys, asdict=True)
modification = self.modfreezer(row['modifications'])
yield frozenset((modification, row['peptide'], row['id']))
@logger.init('xlpy', 'DEBUG')
class LabeledCrosslinkFreezer(base.BaseObject):
'''Freezes a crosslink to render it unique based on identification'''
def __init__(self, row, isobaric):
super(LabeledCrosslinkFreezer, self).__init__()
self.row = row
self.isobaric = isobaric
@logger.call('xlpy', 'debug')
def __call__(self, crosslink_index, states):
'''Freezes the crosslinked-peptide attributes from the labels'''
crosslink = self.__get_crosslink(crosslink_index)
spreadsheet = self.__get_spreadsheet(crosslink_index)
isobaric = self.isobaric[crosslink.crosslinker]
modfreezer = ModificationFreezer(self.row, isobaric)
modifications = states[0].modifications
indexes = crosslink.index
matched = self.getmatched(indexes, modifications, modfreezer)
frozen = set()
frozen.add(tuple(sorted(matched)))
frozen.add(XLIdentifier(states[0].crosslinker, crosslink.ends.number))
frozen.add(tuple(sorted(spreadsheet['XL'])))
return frozenset(frozen)
# CLASS METHODS
@classmethod
def fromrow(cls, row):
'''Initializes the class from a source thread'''
source = cls.app.discovererthread
isobaric = source.parameters.isobaric
return cls(row, isobaric)
# GETTERS
def getmatched(self, indexes, modifications, modfreezer):
'''Returns the unique matched data from the indexes'''
zipped = ZIP(modifications, indexes)
for modification, index in zipped:
modification = modfreezer(modification)
peptide, uniprotid = self.row.data.getrow(index, ('peptide', 'id'))
yield frozenset((modification, peptide, uniprotid))
def __get_crosslink(self, crosslink_index):
'''
Returns the data indexes from the isotope-labeled crosslink
Translates the index pointing to an unlabeled crosslink to
one pointing at the matched data.
'''
return self.row.data['crosslinks'][crosslink_index]
def __get_spreadsheet(self, crosslink_index):
'''
Returns the data indexes from the isotope-labeled crosslink
Translates the index pointing to an unlabeled crosslink to
one pointing at the matched data.
'''
return self.row.data['spreadsheet']['crosslinks'][crosslink_index]
| 29.904059 | 79 | 0.649062 |
142da1072f362e416f051960b46ded374cc692a4 | 4,027 | py | Python | pdft/tests/formic/formic.py | ymshi449/pdft | 5839229a4389da95319ceb05269abc635a466878 | [
"BSD-3-Clause"
] | null | null | null | pdft/tests/formic/formic.py | ymshi449/pdft | 5839229a4389da95319ceb05269abc635a466878 | [
"BSD-3-Clause"
] | null | null | null | pdft/tests/formic/formic.py | ymshi449/pdft | 5839229a4389da95319ceb05269abc635a466878 | [
"BSD-3-Clause"
] | 2 | 2020-03-24T21:06:36.000Z | 2021-04-22T19:34:39.000Z | import numpy as np
import psi4
import pdft
import matplotlib.pyplot as plt
import libcubeprop
psi4.core.set_output_file("formic.psi4")
functional = 'svwn'
basis = 'cc-pvdz'
svdc = -4
reguc = -4
title = "formic Newton svdc%i reguc %i" %(svdc, reguc) + basis + functional
print(title)
Full_Molec = psi4.geometry("""
nocom
noreorient
C 0.0000000 0.1929272 -1.9035340
O 0.0000000 1.1595219 -1.1616236
O 0.0000000 -1.0680669 -1.5349870
H 0.0000000 0.2949802 -2.9949776
H 0.0000000 -1.1409414 -0.5399614
C 0.0000000 -0.1929272 1.9035340
O 0.0000000 -1.1595219 1.1616236
O 0.0000000 1.0680669 1.5349870
H 0.0000000 -0.2949802 2.9949776
H 0.0000000 1.1409414 0.5399614
units bohr
symmetry c1
""")
Monomer_1 = psi4.geometry("""
nocom
noreorient
@C 0.0000000 0.1929272 -1.9035340
@O 0.0000000 1.1595219 -1.1616236
@O 0.0000000 -1.0680669 -1.5349870
@H 0.0000000 0.2949802 -2.9949776
@H 0.0000000 -1.1409414 -0.5399614
C 0.0000000 -0.1929272 1.9035340
O 0.0000000 -1.1595219 1.1616236
O 0.0000000 1.0680669 1.5349870
H 0.0000000 -0.2949802 2.9949776
H 0.0000000 1.1409414 0.5399614
units bohr
symmetry c1
""")
Monomer_2 = psi4.geometry("""
nocom
noreorient
C 0.0000000 0.1929272 -1.9035340
O 0.0000000 1.1595219 -1.1616236
O 0.0000000 -1.0680669 -1.5349870
H 0.0000000 0.2949802 -2.9949776
H 0.0000000 -1.1409414 -0.5399614
@C 0.0000000 -0.1929272 1.9035340
@O 0.0000000 -1.1595219 1.1616236
@O 0.0000000 1.0680669 1.5349870
@H 0.0000000 -0.2949802 2.9949776
@H 0.0000000 1.1409414 0.5399614
units bohr
symmetry c1
""")
Full_Molec.set_name("Large")
#Psi4 Options:
psi4.set_options({
# 'DFT_SPHERICAL_POINTS': 434,
# 'DFT_RADIAL_POINTS': 99,
'REFERENCE' : 'UKS'})
#Make fragment calculations:
f1 = pdft.U_Molecule(Monomer_2, basis, functional)
f2 = pdft.U_Molecule(Monomer_1, basis, functional)
mol = pdft.U_Molecule(Full_Molec, basis, functional)
#Start a pdft systemm, and perform calculation to find vp
pdfter = pdft.U_Embedding([f1, f2], mol)
# pdfter.find_vp_response(maxiter=25, beta=0.1, svd_rcond=1e-4)
pdfter.find_vp_response_1basis(49)
# pdfter.find_vp_scipy_1basis(maxiter=42, regularization_constant=1e-4, opt_method="trust-ncg")
#%% 2 basis 2D plot
# vp_psi4 = psi4.core.Matrix.from_array(pdfter.vp[0])
# L = [4.0, 4.0, 4.0]
# D = [0.05, 0.2, 0.2]
# # Plot file
# O, N = libcubeprop.build_grid(mol.wfn, L, D)
# block, points, nxyz, npoints = libcubeprop.populate_grid(mol.wfn, O, N, D)
# vp_cube = libcubeprop.compute_density(mol.wfn, O, N, D, npoints, points, nxyz, block, vp_psi4)
# f, ax = plt.subplots(1, 1, figsize=(16, 12), dpi=160)
# p = ax.imshow(vp_cube[81, :, :], interpolation="bicubic")
# ax.set_title("vp svd_rond=1e-5" + basis + functional)
# f.colorbar(p, ax=ax)
# f.show()
#%% 1 basis 2D plot
L = [2.0, 4.0, 4.0]
D = [0.05, 0.2, 0.2]
# Plot file
O, N = libcubeprop.build_grid(mol.wfn, L, D)
block, points, nxyz, npoints = libcubeprop.populate_grid(mol.wfn, O, N, D)
vp_cube = libcubeprop.compute_density_1basis(mol.wfn, O, N, D, npoints, points, nxyz, block, pdfter.vp[0])
f, ax = plt.subplots(1, 1, figsize=(16, 12), dpi=160)
p = ax.imshow(vp_cube[40, :, :], interpolation="bicubic", cmap="Spectral")
atoms = libcubeprop.get_atoms(mol.wfn, D, O)
ax.scatter(atoms[:,3], atoms[:,2])
ax.set_title("vp" + title)
f.colorbar(p, ax=ax)
f.show()
f.savefig("vp" + title)
dD = psi4.core.Matrix.from_array(pdfter.fragments_Da + pdfter.fragments_Db - mol.Da.np - mol.Db.np)
dn_cube = libcubeprop.compute_density(mol.wfn, O, N, D, npoints, points, nxyz, block, dD)
f, ax = plt.subplots(1, 1, figsize=(16, 12), dpi=160)
p = ax.imshow(dn_cube[40, :, :], interpolation="bicubic", cmap="Spectral")
atoms = libcubeprop.get_atoms(mol.wfn, D, O)
ax.scatter(atoms[:,3], atoms[:,2])
ax.set_title("dn" + title)
f.colorbar(p, ax=ax)
f.show()
f.savefig("dn" + title) | 32.216 | 106 | 0.667991 |
8c996e2fdc60b4f02c62bbbe1327c4969b4500a9 | 10,554 | py | Python | odoo-13.0/addons/crm_iap_lead/models/crm_iap_lead_mining_request.py | VaibhavBhujade/Blockchain-ERP-interoperability | b5190a037fb6615386f7cbad024d51b0abd4ba03 | [
"MIT"
] | null | null | null | odoo-13.0/addons/crm_iap_lead/models/crm_iap_lead_mining_request.py | VaibhavBhujade/Blockchain-ERP-interoperability | b5190a037fb6615386f7cbad024d51b0abd4ba03 | [
"MIT"
] | null | null | null | odoo-13.0/addons/crm_iap_lead/models/crm_iap_lead_mining_request.py | VaibhavBhujade/Blockchain-ERP-interoperability | b5190a037fb6615386f7cbad024d51b0abd4ba03 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import logging
from odoo import api, fields, models, _
from odoo.exceptions import ValidationError
from odoo.addons.iap import jsonrpc, InsufficientCreditError
_logger = logging.getLogger(__name__)
DEFAULT_ENDPOINT = 'https://iap-services.odoo.com'
MAX_LEAD = 200
MAX_CONTACT = 5
CREDIT_PER_COMPANY = 1
CREDIT_PER_CONTACT = 1
class CRMLeadMiningRequest(models.Model):
_name = 'crm.iap.lead.mining.request'
_description = 'CRM Lead Mining Request'
def _default_lead_type(self):
if self.env.user.has_group('crm.group_use_lead'):
return 'lead'
else:
return 'opportunity'
name = fields.Char(string='Request Number', required=True, readonly=True, default=lambda self: _('New'), copy=False)
state = fields.Selection([('draft', 'Draft'), ('done', 'Done'), ('error', 'Error')], string='Status', required=True, default='draft')
# Request Data
lead_number = fields.Integer(string='Number of Leads', required=True, default=10)
search_type = fields.Selection([('companies', 'Companies'), ('people', 'Companies and their Contacts')], string='Target', required=True, default='companies')
error = fields.Text(string='Error', readonly=True)
# Lead / Opportunity Data
lead_type = fields.Selection([('lead', 'Lead'), ('opportunity', 'Opportunity')], string='Type', required=True, default=_default_lead_type)
team_id = fields.Many2one('crm.team', string='Sales Team', domain="[('use_opportunities', '=', True)]")
user_id = fields.Many2one('res.users', string='Salesperson')
tag_ids = fields.Many2many('crm.lead.tag', string='Tags')
lead_ids = fields.One2many('crm.lead', 'lead_mining_request_id', string='Generated Lead / Opportunity')
leads_count = fields.Integer(compute='_compute_leads_count', string='Number of Generated Leads')
# Company Criteria Filter
filter_on_size = fields.Boolean(string='Filter on Size', default=False)
company_size_min = fields.Integer(string='Size', default=1)
company_size_max = fields.Integer(default=1000)
country_ids = fields.Many2many('res.country', string='Countries')
state_ids = fields.Many2many('res.country.state', string='States')
industry_ids = fields.Many2many('crm.iap.lead.industry', string='Industries')
# Contact Generation Filter
contact_number = fields.Integer(string='Number of Contacts', default=1)
contact_filter_type = fields.Selection([('role', 'Role'), ('seniority', 'Seniority')], string='Filter on', default='role')
preferred_role_id = fields.Many2one('crm.iap.lead.role', string='Preferred Role')
role_ids = fields.Many2many('crm.iap.lead.role', string='Other Roles')
seniority_id = fields.Many2one('crm.iap.lead.seniority', string='Seniority')
# Fields for the blue tooltip
lead_credits = fields.Char(compute='_compute_tooltip', readonly=True)
lead_contacts_credits = fields.Char(compute='_compute_tooltip', readonly=True)
lead_total_credits = fields.Char(compute='_compute_tooltip', readonly=True)
@api.onchange('lead_number', 'contact_number')
def _compute_tooltip(self):
for record in self:
company_credits = CREDIT_PER_COMPANY * record.lead_number
contact_credits = CREDIT_PER_CONTACT * record.contact_number
total_contact_credits = contact_credits * record.lead_number
record.lead_contacts_credits = _("Up to %d additional credits will be consumed to identify %d contacts per company.") % (contact_credits*company_credits, record.contact_number)
record.lead_credits = _('%d credits will be consumed to find %d companies.') % (company_credits, record.lead_number)
record.lead_total_credits = _("This makes a total of %d credits for this request.") % (total_contact_credits + company_credits)
@api.depends('lead_ids')
def _compute_leads_count(self):
for req in self:
req.leads_count = len(req.lead_ids)
@api.onchange('lead_number')
def _onchange_lead_number(self):
if self.lead_number <= 0:
self.lead_number = 1
elif self.lead_number > MAX_LEAD:
self.lead_number = MAX_LEAD
@api.onchange('contact_number')
def _onchange_contact_number(self):
if self.contact_number <= 0:
self.contact_number = 1
elif self.contact_number > MAX_CONTACT:
self.contact_number = MAX_CONTACT
@api.onchange('country_ids')
def _onchange_country_ids(self):
self.state_ids = []
@api.onchange('company_size_min')
def _onchange_company_size_min(self):
if self.company_size_min <= 0:
self.company_size_min = 1
elif self.company_size_min > self.company_size_max:
self.company_size_min = self.company_size_max
@api.onchange('company_size_max')
def _onchange_company_size_max(self):
if self.company_size_max < self.company_size_min:
self.company_size_max = self.company_size_min
def _prepare_iap_payload(self):
"""
This will prepare the data to send to the server
"""
self.ensure_one()
payload = {'lead_number': self.lead_number,
'search_type': self.search_type,
'countries': self.country_ids.mapped('code')}
if self.state_ids:
payload['states'] = self.state_ids.mapped('code')
if self.filter_on_size:
payload.update({'company_size_min': self.company_size_min,
'company_size_max': self.company_size_max})
if self.industry_ids:
payload['industry_ids'] = self.industry_ids.mapped('reveal_id')
if self.search_type == 'people':
payload.update({'contact_number': self.contact_number,
'contact_filter_type': self.contact_filter_type})
if self.contact_filter_type == 'role':
payload.update({'preferred_role': self.preferred_role_id.reveal_id,
'other_roles': self.role_ids.mapped('reveal_id')})
elif self.contact_filter_type == 'seniority':
payload['seniority'] = self.seniority_id.reveal_id
return payload
def _perform_request(self):
"""
This will perform the request and create the corresponding leads.
The user will be notified if he hasn't enough credits.
"""
server_payload = self._prepare_iap_payload()
reveal_account = self.env['iap.account'].get('reveal')
dbuuid = self.env['ir.config_parameter'].sudo().get_param('database.uuid')
endpoint = self.env['ir.config_parameter'].sudo().get_param('reveal.endpoint', DEFAULT_ENDPOINT) + '/iap/clearbit/1/lead_mining_request'
params = {
'account_token': reveal_account.account_token,
'dbuuid': dbuuid,
'data': server_payload
}
try:
response = jsonrpc(endpoint, params=params, timeout=300)
return response['data']
except InsufficientCreditError as e:
self.error = 'Insufficient credits. Recharge your account and retry.'
self.state = 'error'
self._cr.commit()
raise e
def _create_leads_from_response(self, result):
""" This method will get the response from the service and create the leads accordingly """
self.ensure_one()
lead_vals = []
messages_to_post = {}
for data in result:
lead_vals.append(self._lead_vals_from_response(data))
messages_to_post[data['company_data']['clearbit_id']] = self.env['crm.iap.lead.helpers'].format_data_for_message_post(data['company_data'], data.get('people_data'))
leads = self.env['crm.lead'].create(lead_vals)
for lead in leads:
if messages_to_post.get(lead.reveal_id):
lead.message_post_with_view('crm_iap_lead.lead_message_template', values=messages_to_post[lead.reveal_id], subtype_id=self.env.ref('mail.mt_note').id)
# Methods responsible for format response data into valid odoo lead data
@api.model
def _lead_vals_from_response(self, data):
self.ensure_one()
company_data = data.get('company_data')
people_data = data.get('people_data')
lead_vals = self.env['crm.iap.lead.helpers'].lead_vals_from_response(self.lead_type, self.team_id.id, self.tag_ids.ids, self.user_id.id, company_data, people_data)
lead_vals['lead_mining_request_id'] = self.id
return lead_vals
@api.model
def get_empty_list_help(self, help):
help_title = _('Create a Lead Mining Request')
sub_title = _('Generate new leads based on their country, industry, size, etc.')
return '<p class="o_view_nocontent_smiling_face">%s</p><p class="oe_view_nocontent_alias">%s</p>' % (help_title, sub_title)
def action_draft(self):
self.ensure_one()
self.name = _('New')
self.state = 'draft'
def action_submit(self):
self.ensure_one()
if self.name == _('New'):
self.name = self.env['ir.sequence'].next_by_code('crm.iap.lead.mining.request') or _('New')
results = self._perform_request()
if results:
self._create_leads_from_response(results)
self.state = 'done'
if self.lead_type == 'lead':
return self.action_get_lead_action()
elif self.lead_type == 'opportunity':
return self.action_get_opportunity_action()
def action_get_lead_action(self):
self.ensure_one()
action = self.env.ref('crm.crm_lead_all_leads').read()[0]
action['domain'] = [('id', 'in', self.lead_ids.ids), ('type', '=', 'lead')]
action['help'] = _("""<p class="o_view_nocontent_empty_folder">
No leads found
</p><p>
No leads could be generated according to your search criteria
</p>""")
return action
def action_get_opportunity_action(self):
self.ensure_one()
action = self.env.ref('crm.crm_lead_opportunities').read()[0]
action['domain'] = [('id', 'in', self.lead_ids.ids), ('type', '=', 'opportunity')]
action['help'] = _("""<p class="o_view_nocontent_empty_folder">
No opportunities found
</p><p>
No opportunities could be generated according to your search criteria
</p>""")
return action
| 46.289474 | 188 | 0.659939 |
d0b44e2536a7f99c4b95113bf18c83b3224f156e | 21,761 | py | Python | lib/googlecloudsdk/third_party/apis/clouduseraccounts/beta/clouduseraccounts_beta_client.py | bopopescu/SDK | e6d9aaee2456f706d1d86e8ec2a41d146e33550d | [
"Apache-2.0"
] | null | null | null | lib/googlecloudsdk/third_party/apis/clouduseraccounts/beta/clouduseraccounts_beta_client.py | bopopescu/SDK | e6d9aaee2456f706d1d86e8ec2a41d146e33550d | [
"Apache-2.0"
] | null | null | null | lib/googlecloudsdk/third_party/apis/clouduseraccounts/beta/clouduseraccounts_beta_client.py | bopopescu/SDK | e6d9aaee2456f706d1d86e8ec2a41d146e33550d | [
"Apache-2.0"
] | 1 | 2020-07-25T12:23:41.000Z | 2020-07-25T12:23:41.000Z | """Generated client library for clouduseraccounts version beta."""
# NOTE: This file is autogenerated and should not be edited by hand.
from googlecloudsdk.third_party.apitools.base.py import base_api
from googlecloudsdk.third_party.apis.clouduseraccounts.beta import clouduseraccounts_beta_messages as messages
class ClouduseraccountsBeta(base_api.BaseApiClient):
"""Generated client library for service clouduseraccounts version beta."""
MESSAGES_MODULE = messages
_PACKAGE = u'clouduseraccounts'
_SCOPES = [u'https://www.googleapis.com/auth/cloud-platform', u'https://www.googleapis.com/auth/cloud-platform.read-only', u'https://www.googleapis.com/auth/cloud.useraccounts', u'https://www.googleapis.com/auth/cloud.useraccounts.readonly']
_VERSION = u'beta'
_CLIENT_ID = '1042881264118.apps.googleusercontent.com'
_CLIENT_SECRET = 'x_Tw5K8nnjoRAqULM9PFAC2b'
_USER_AGENT = 'x_Tw5K8nnjoRAqULM9PFAC2b'
_CLIENT_CLASS_NAME = u'ClouduseraccountsBeta'
_URL_VERSION = u'beta'
_API_KEY = None
def __init__(self, url='', credentials=None,
get_credentials=True, http=None, model=None,
log_request=False, log_response=False,
credentials_args=None, default_global_params=None,
additional_http_headers=None):
"""Create a new clouduseraccounts handle."""
url = url or u'https://www.googleapis.com/clouduseraccounts/beta/'
super(ClouduseraccountsBeta, self).__init__(
url, credentials=credentials,
get_credentials=get_credentials, http=http, model=model,
log_request=log_request, log_response=log_response,
credentials_args=credentials_args,
default_global_params=default_global_params,
additional_http_headers=additional_http_headers)
self.globalAccountsOperations = self.GlobalAccountsOperationsService(self)
self.groups = self.GroupsService(self)
self.linux = self.LinuxService(self)
self.users = self.UsersService(self)
class GlobalAccountsOperationsService(base_api.BaseApiService):
"""Service class for the globalAccountsOperations resource."""
_NAME = u'globalAccountsOperations'
def __init__(self, client):
super(ClouduseraccountsBeta.GlobalAccountsOperationsService, self).__init__(client)
self._method_configs = {
'Delete': base_api.ApiMethodInfo(
http_method=u'DELETE',
method_id=u'clouduseraccounts.globalAccountsOperations.delete',
ordered_params=[u'project', u'operation'],
path_params=[u'operation', u'project'],
query_params=[],
relative_path=u'projects/{project}/global/operations/{operation}',
request_field='',
request_type_name=u'ClouduseraccountsGlobalAccountsOperationsDeleteRequest',
response_type_name=u'ClouduseraccountsGlobalAccountsOperationsDeleteResponse',
supports_download=False,
),
'Get': base_api.ApiMethodInfo(
http_method=u'GET',
method_id=u'clouduseraccounts.globalAccountsOperations.get',
ordered_params=[u'project', u'operation'],
path_params=[u'operation', u'project'],
query_params=[],
relative_path=u'projects/{project}/global/operations/{operation}',
request_field='',
request_type_name=u'ClouduseraccountsGlobalAccountsOperationsGetRequest',
response_type_name=u'Operation',
supports_download=False,
),
'List': base_api.ApiMethodInfo(
http_method=u'GET',
method_id=u'clouduseraccounts.globalAccountsOperations.list',
ordered_params=[u'project'],
path_params=[u'project'],
query_params=[u'filter', u'maxResults', u'orderBy', u'pageToken'],
relative_path=u'projects/{project}/global/operations',
request_field='',
request_type_name=u'ClouduseraccountsGlobalAccountsOperationsListRequest',
response_type_name=u'OperationList',
supports_download=False,
),
}
self._upload_configs = {
}
def Delete(self, request, global_params=None):
"""Deletes the specified operation resource.
Args:
request: (ClouduseraccountsGlobalAccountsOperationsDeleteRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(ClouduseraccountsGlobalAccountsOperationsDeleteResponse) The response message.
"""
config = self.GetMethodConfig('Delete')
return self._RunMethod(
config, request, global_params=global_params)
def Get(self, request, global_params=None):
"""Retrieves the specified operation resource.
Args:
request: (ClouduseraccountsGlobalAccountsOperationsGetRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Operation) The response message.
"""
config = self.GetMethodConfig('Get')
return self._RunMethod(
config, request, global_params=global_params)
def List(self, request, global_params=None):
"""Retrieves the list of operation resources contained within the specified project.
Args:
request: (ClouduseraccountsGlobalAccountsOperationsListRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(OperationList) The response message.
"""
config = self.GetMethodConfig('List')
return self._RunMethod(
config, request, global_params=global_params)
class GroupsService(base_api.BaseApiService):
"""Service class for the groups resource."""
_NAME = u'groups'
def __init__(self, client):
super(ClouduseraccountsBeta.GroupsService, self).__init__(client)
self._method_configs = {
'AddMember': base_api.ApiMethodInfo(
http_method=u'POST',
method_id=u'clouduseraccounts.groups.addMember',
ordered_params=[u'project', u'groupName'],
path_params=[u'groupName', u'project'],
query_params=[],
relative_path=u'projects/{project}/global/groups/{groupName}/addMember',
request_field=u'groupsAddMemberRequest',
request_type_name=u'ClouduseraccountsGroupsAddMemberRequest',
response_type_name=u'Operation',
supports_download=False,
),
'Delete': base_api.ApiMethodInfo(
http_method=u'DELETE',
method_id=u'clouduseraccounts.groups.delete',
ordered_params=[u'project', u'groupName'],
path_params=[u'groupName', u'project'],
query_params=[],
relative_path=u'projects/{project}/global/groups/{groupName}',
request_field='',
request_type_name=u'ClouduseraccountsGroupsDeleteRequest',
response_type_name=u'Operation',
supports_download=False,
),
'Get': base_api.ApiMethodInfo(
http_method=u'GET',
method_id=u'clouduseraccounts.groups.get',
ordered_params=[u'project', u'groupName'],
path_params=[u'groupName', u'project'],
query_params=[],
relative_path=u'projects/{project}/global/groups/{groupName}',
request_field='',
request_type_name=u'ClouduseraccountsGroupsGetRequest',
response_type_name=u'Group',
supports_download=False,
),
'Insert': base_api.ApiMethodInfo(
http_method=u'POST',
method_id=u'clouduseraccounts.groups.insert',
ordered_params=[u'project'],
path_params=[u'project'],
query_params=[],
relative_path=u'projects/{project}/global/groups',
request_field=u'group',
request_type_name=u'ClouduseraccountsGroupsInsertRequest',
response_type_name=u'Operation',
supports_download=False,
),
'List': base_api.ApiMethodInfo(
http_method=u'GET',
method_id=u'clouduseraccounts.groups.list',
ordered_params=[u'project'],
path_params=[u'project'],
query_params=[u'filter', u'maxResults', u'orderBy', u'pageToken'],
relative_path=u'projects/{project}/global/groups',
request_field='',
request_type_name=u'ClouduseraccountsGroupsListRequest',
response_type_name=u'GroupList',
supports_download=False,
),
'RemoveMember': base_api.ApiMethodInfo(
http_method=u'POST',
method_id=u'clouduseraccounts.groups.removeMember',
ordered_params=[u'project', u'groupName'],
path_params=[u'groupName', u'project'],
query_params=[],
relative_path=u'projects/{project}/global/groups/{groupName}/removeMember',
request_field=u'groupsRemoveMemberRequest',
request_type_name=u'ClouduseraccountsGroupsRemoveMemberRequest',
response_type_name=u'Operation',
supports_download=False,
),
}
self._upload_configs = {
}
def AddMember(self, request, global_params=None):
"""Adds users to the specified group.
Args:
request: (ClouduseraccountsGroupsAddMemberRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Operation) The response message.
"""
config = self.GetMethodConfig('AddMember')
return self._RunMethod(
config, request, global_params=global_params)
def Delete(self, request, global_params=None):
"""Deletes the specified Group resource.
Args:
request: (ClouduseraccountsGroupsDeleteRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Operation) The response message.
"""
config = self.GetMethodConfig('Delete')
return self._RunMethod(
config, request, global_params=global_params)
def Get(self, request, global_params=None):
"""Returns the specified Group resource.
Args:
request: (ClouduseraccountsGroupsGetRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Group) The response message.
"""
config = self.GetMethodConfig('Get')
return self._RunMethod(
config, request, global_params=global_params)
def Insert(self, request, global_params=None):
"""Creates a Group resource in the specified project using the data included in the request.
Args:
request: (ClouduseraccountsGroupsInsertRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Operation) The response message.
"""
config = self.GetMethodConfig('Insert')
return self._RunMethod(
config, request, global_params=global_params)
def List(self, request, global_params=None):
"""Retrieves the list of groups contained within the specified project.
Args:
request: (ClouduseraccountsGroupsListRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(GroupList) The response message.
"""
config = self.GetMethodConfig('List')
return self._RunMethod(
config, request, global_params=global_params)
def RemoveMember(self, request, global_params=None):
"""Removes users from the specified group.
Args:
request: (ClouduseraccountsGroupsRemoveMemberRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Operation) The response message.
"""
config = self.GetMethodConfig('RemoveMember')
return self._RunMethod(
config, request, global_params=global_params)
class LinuxService(base_api.BaseApiService):
"""Service class for the linux resource."""
_NAME = u'linux'
def __init__(self, client):
super(ClouduseraccountsBeta.LinuxService, self).__init__(client)
self._method_configs = {
'GetAuthorizedKeysView': base_api.ApiMethodInfo(
http_method=u'POST',
method_id=u'clouduseraccounts.linux.getAuthorizedKeysView',
ordered_params=[u'project', u'zone', u'user', u'instance'],
path_params=[u'project', u'user', u'zone'],
query_params=[u'instance', u'login'],
relative_path=u'projects/{project}/zones/{zone}/authorizedKeysView/{user}',
request_field='',
request_type_name=u'ClouduseraccountsLinuxGetAuthorizedKeysViewRequest',
response_type_name=u'LinuxGetAuthorizedKeysViewResponse',
supports_download=False,
),
'GetLinuxAccountViews': base_api.ApiMethodInfo(
http_method=u'POST',
method_id=u'clouduseraccounts.linux.getLinuxAccountViews',
ordered_params=[u'project', u'zone', u'instance'],
path_params=[u'project', u'zone'],
query_params=[u'filter', u'instance', u'maxResults', u'orderBy', u'pageToken'],
relative_path=u'projects/{project}/zones/{zone}/linuxAccountViews',
request_field='',
request_type_name=u'ClouduseraccountsLinuxGetLinuxAccountViewsRequest',
response_type_name=u'LinuxGetLinuxAccountViewsResponse',
supports_download=False,
),
}
self._upload_configs = {
}
def GetAuthorizedKeysView(self, request, global_params=None):
"""Returns a list of authorized public keys for a specific user account.
Args:
request: (ClouduseraccountsLinuxGetAuthorizedKeysViewRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(LinuxGetAuthorizedKeysViewResponse) The response message.
"""
config = self.GetMethodConfig('GetAuthorizedKeysView')
return self._RunMethod(
config, request, global_params=global_params)
def GetLinuxAccountViews(self, request, global_params=None):
"""Retrieves a list of user accounts for an instance within a specific project.
Args:
request: (ClouduseraccountsLinuxGetLinuxAccountViewsRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(LinuxGetLinuxAccountViewsResponse) The response message.
"""
config = self.GetMethodConfig('GetLinuxAccountViews')
return self._RunMethod(
config, request, global_params=global_params)
class UsersService(base_api.BaseApiService):
"""Service class for the users resource."""
_NAME = u'users'
def __init__(self, client):
super(ClouduseraccountsBeta.UsersService, self).__init__(client)
self._method_configs = {
'AddPublicKey': base_api.ApiMethodInfo(
http_method=u'POST',
method_id=u'clouduseraccounts.users.addPublicKey',
ordered_params=[u'project', u'user'],
path_params=[u'project', u'user'],
query_params=[],
relative_path=u'projects/{project}/global/users/{user}/addPublicKey',
request_field=u'publicKey',
request_type_name=u'ClouduseraccountsUsersAddPublicKeyRequest',
response_type_name=u'Operation',
supports_download=False,
),
'Delete': base_api.ApiMethodInfo(
http_method=u'DELETE',
method_id=u'clouduseraccounts.users.delete',
ordered_params=[u'project', u'user'],
path_params=[u'project', u'user'],
query_params=[],
relative_path=u'projects/{project}/global/users/{user}',
request_field='',
request_type_name=u'ClouduseraccountsUsersDeleteRequest',
response_type_name=u'Operation',
supports_download=False,
),
'Get': base_api.ApiMethodInfo(
http_method=u'GET',
method_id=u'clouduseraccounts.users.get',
ordered_params=[u'project', u'user'],
path_params=[u'project', u'user'],
query_params=[],
relative_path=u'projects/{project}/global/users/{user}',
request_field='',
request_type_name=u'ClouduseraccountsUsersGetRequest',
response_type_name=u'User',
supports_download=False,
),
'Insert': base_api.ApiMethodInfo(
http_method=u'POST',
method_id=u'clouduseraccounts.users.insert',
ordered_params=[u'project'],
path_params=[u'project'],
query_params=[],
relative_path=u'projects/{project}/global/users',
request_field=u'user',
request_type_name=u'ClouduseraccountsUsersInsertRequest',
response_type_name=u'Operation',
supports_download=False,
),
'List': base_api.ApiMethodInfo(
http_method=u'GET',
method_id=u'clouduseraccounts.users.list',
ordered_params=[u'project'],
path_params=[u'project'],
query_params=[u'filter', u'maxResults', u'orderBy', u'pageToken'],
relative_path=u'projects/{project}/global/users',
request_field='',
request_type_name=u'ClouduseraccountsUsersListRequest',
response_type_name=u'UserList',
supports_download=False,
),
'RemovePublicKey': base_api.ApiMethodInfo(
http_method=u'POST',
method_id=u'clouduseraccounts.users.removePublicKey',
ordered_params=[u'project', u'user', u'fingerprint'],
path_params=[u'project', u'user'],
query_params=[u'fingerprint'],
relative_path=u'projects/{project}/global/users/{user}/removePublicKey',
request_field='',
request_type_name=u'ClouduseraccountsUsersRemovePublicKeyRequest',
response_type_name=u'Operation',
supports_download=False,
),
}
self._upload_configs = {
}
def AddPublicKey(self, request, global_params=None):
"""Adds a public key to the specified User resource with the data included in the request.
Args:
request: (ClouduseraccountsUsersAddPublicKeyRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Operation) The response message.
"""
config = self.GetMethodConfig('AddPublicKey')
return self._RunMethod(
config, request, global_params=global_params)
def Delete(self, request, global_params=None):
"""Deletes the specified User resource.
Args:
request: (ClouduseraccountsUsersDeleteRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Operation) The response message.
"""
config = self.GetMethodConfig('Delete')
return self._RunMethod(
config, request, global_params=global_params)
def Get(self, request, global_params=None):
"""Returns the specified User resource.
Args:
request: (ClouduseraccountsUsersGetRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(User) The response message.
"""
config = self.GetMethodConfig('Get')
return self._RunMethod(
config, request, global_params=global_params)
def Insert(self, request, global_params=None):
"""Creates a User resource in the specified project using the data included in the request.
Args:
request: (ClouduseraccountsUsersInsertRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Operation) The response message.
"""
config = self.GetMethodConfig('Insert')
return self._RunMethod(
config, request, global_params=global_params)
def List(self, request, global_params=None):
"""Retrieves a list of users contained within the specified project.
Args:
request: (ClouduseraccountsUsersListRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(UserList) The response message.
"""
config = self.GetMethodConfig('List')
return self._RunMethod(
config, request, global_params=global_params)
def RemovePublicKey(self, request, global_params=None):
"""Removes the specified public key from the user.
Args:
request: (ClouduseraccountsUsersRemovePublicKeyRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Operation) The response message.
"""
config = self.GetMethodConfig('RemovePublicKey')
return self._RunMethod(
config, request, global_params=global_params)
| 42.090909 | 243 | 0.649373 |
dd994ec9abab8f6574e4b170199b4aded8d88246 | 120 | py | Python | src/alexa_event_handler/__init__.py | parveenchahal/AlexaSkill_PCSongs | 2368ff6b15fe76996a2943d11a9275573c6daa7f | [
"MIT"
] | null | null | null | src/alexa_event_handler/__init__.py | parveenchahal/AlexaSkill_PCSongs | 2368ff6b15fe76996a2943d11a9275573c6daa7f | [
"MIT"
] | null | null | null | src/alexa_event_handler/__init__.py | parveenchahal/AlexaSkill_PCSongs | 2368ff6b15fe76996a2943d11a9275573c6daa7f | [
"MIT"
] | null | null | null | from ._alexa_abstract_event_handler import AbstractAlexaEventHandler
from ._alexa_event_handler import AlexaEventHandler | 60 | 68 | 0.925 |
e87eebb94a91ea2f2aff57d0161bccf2ff992694 | 58,692 | py | Python | koku/api/report/test/gcp/test_gcp_query_handler.py | bsquizz/koku | 386dd6ca4a4fd1b50790a929acc81d2dc245a91c | [
"Apache-2.0"
] | 157 | 2018-04-30T16:27:53.000Z | 2022-03-31T08:17:21.000Z | koku/api/report/test/gcp/test_gcp_query_handler.py | bsquizz/koku | 386dd6ca4a4fd1b50790a929acc81d2dc245a91c | [
"Apache-2.0"
] | 3,250 | 2018-04-26T14:14:25.000Z | 2022-03-31T23:49:15.000Z | koku/api/report/test/gcp/test_gcp_query_handler.py | bsquizz/koku | 386dd6ca4a4fd1b50790a929acc81d2dc245a91c | [
"Apache-2.0"
] | 65 | 2018-05-10T14:11:50.000Z | 2022-03-18T19:22:58.000Z | #
# Copyright 2021 Red Hat Inc.
# SPDX-License-Identifier: Apache-2.0
#
"""Test GCP Report Queries."""
import logging
from datetime import timedelta
from decimal import Decimal
from decimal import ROUND_HALF_UP
from unittest.mock import patch
from unittest.mock import PropertyMock
from dateutil.relativedelta import relativedelta
from django.db.models import F
from django.db.models import Sum
from django.urls import reverse
from rest_framework.exceptions import ValidationError
from tenant_schemas.utils import tenant_context
from api.iam.test.iam_test_case import IamTestCase
from api.query_filter import QueryFilter
from api.report.gcp.query_handler import GCPReportQueryHandler
from api.report.gcp.view import GCPCostView
from api.report.gcp.view import GCPInstanceTypeView
from api.report.gcp.view import GCPStorageView
from api.utils import DateHelper
from api.utils import materialized_view_month_start
from reporting.models import GCPCostEntryBill
from reporting.models import GCPCostEntryLineItemDailySummary
from reporting.models import GCPCostSummaryByAccountP
from reporting.models import GCPCostSummaryByProjectP
from reporting.models import GCPCostSummaryByServiceP
from reporting.models import GCPCostSummaryP
from reporting.models import GCPTagsSummary
LOG = logging.getLogger(__name__)
class GCPReportQueryHandlerTest(IamTestCase):
"""Tests for the GCP report query handler."""
def setUp(self):
"""Set up the customer view tests."""
super().setUp()
self.dh = DateHelper()
# The monthly filters for gcp only use the invoice month
# check out this pr for more information:
# https://github.com/project-koku/koku/pull/3098
self.this_month_filter = {
"invoice_month__in": self.dh.gcp_find_invoice_months_in_date_range(
self.dh.this_month_start, self.dh.this_month_end
)
}
self.ten_day_filter = {
"usage_start__gte": self.dh.n_days_ago(self.dh.today, 9),
"invoice_month__in": self.dh.gcp_find_invoice_months_in_date_range(
self.dh.n_days_ago(self.dh.today, 9), self.dh.today
),
}
self.thirty_day_filter = {
"usage_start__gte": self.dh.n_days_ago(self.dh.today, 29),
"invoice_month__in": self.dh.gcp_find_invoice_months_in_date_range(
self.dh.n_days_ago(self.dh.today, 29), self.dh.today
),
}
self.last_month_filter = {
"invoice_month__in": self.dh.gcp_find_invoice_months_in_date_range(
self.dh.last_month_start, self.dh.last_month_end
)
}
with tenant_context(self.tenant):
self.services = GCPCostEntryLineItemDailySummary.objects.values("service_alias").distinct()
self.services = [entry.get("service_alias") for entry in self.services]
def get_totals_by_time_scope(self, aggregates, filters=None):
"""Return the total aggregates for a time period."""
if filters is None:
filters = self.ten_day_filter
with tenant_context(self.tenant):
return GCPCostEntryLineItemDailySummary.objects.filter(**filters).aggregate(**aggregates)
def get_totals_costs_by_time_scope(self, aggregates, filters=None):
"""Return the total costs aggregates for a time period."""
if filters is None:
filters = self.this_month_filter
with tenant_context(self.tenant):
result = GCPCostEntryLineItemDailySummary.objects.filter(**filters).aggregate(**aggregates)
for key in result:
if result[key] is None:
result[key] = Decimal(0)
return result
def test_execute_sum_query_costs(self):
"""Test that the sum query runs properly for the costs endpoint."""
url = "?"
query_params = self.mocked_query_params(url, GCPCostView)
handler = GCPReportQueryHandler(query_params)
aggregates = handler._mapper.report_type_map.get("aggregates")
current_totals = self.get_totals_costs_by_time_scope(aggregates, self.ten_day_filter)
query_output = handler.execute_query()
self.assertIsNotNone(query_output.get("data"))
self.assertIsNotNone(query_output.get("total"))
total = query_output.get("total")
# It is better to not use .get on current totals because if the key doesn't exist
# you can end up comparing to empty .get() are equal.
self.assertEqual(total.get("cost", {}).get("total").get("value"), current_totals["cost_total"])
def test_execute_take_defaults(self):
"""Test execute_query for current month on daily breakdown."""
url = "?"
query_params = self.mocked_query_params(url, GCPCostView)
handler = GCPReportQueryHandler(query_params)
query_output = handler.execute_query()
self.assertIsNotNone(query_output.get("data"))
self.assertIsNotNone(query_output.get("total"))
total = query_output.get("total")
self.assertIsNotNone(total.get("cost"))
def test_execute_query_current_month_daily(self):
"""Test execute_query for current month on daily breakdown."""
url = "?filter[time_scope_units]=month&filter[time_scope_value]=-1&filter[resolution]=daily"
query_params = self.mocked_query_params(url, GCPCostView)
handler = GCPReportQueryHandler(query_params)
query_output = handler.execute_query()
self.assertIsNotNone(query_output.get("data"))
self.assertIsNotNone(query_output.get("total"))
total = query_output.get("total")
aggregates = handler._mapper.report_type_map.get("aggregates")
current_totals = self.get_totals_costs_by_time_scope(aggregates, self.this_month_filter)
self.assertIsNotNone(total.get("cost"))
self.assertEqual(total.get("cost", {}).get("total").get("value"), current_totals["cost_total"])
def test_execute_query_current_month_monthly(self):
"""Test execute_query for current month on monthly breakdown."""
url = "?filter[time_scope_units]=month&filter[time_scope_value]=-1&filter[resolution]=daily"
query_params = self.mocked_query_params(url, GCPCostView)
handler = GCPReportQueryHandler(query_params)
query_output = handler.execute_query()
self.assertIsNotNone(query_output.get("data"))
self.assertIsNotNone(query_output.get("total"))
total = query_output.get("total")
aggregates = handler._mapper.report_type_map.get("aggregates")
current_totals = self.get_totals_costs_by_time_scope(aggregates, self.this_month_filter)
self.assertIsNotNone(total.get("cost"))
self.assertEqual(total.get("cost", {}).get("total").get("value"), current_totals["cost_total"])
def test_execute_query_current_month_by_service(self):
"""Test execute_query for current month on monthly breakdown by service."""
with tenant_context(self.tenant):
valid_services = [
service[0]
for service in GCPCostEntryLineItemDailySummary.objects.values_list("service_alias").distinct()
]
url = "?filter[time_scope_units]=month&filter[time_scope_value]=-1&filter[resolution]=monthly&group_by[service]=*" # noqa: E501
query_params = self.mocked_query_params(url, GCPCostView)
handler = GCPReportQueryHandler(query_params)
query_output = handler.execute_query()
data = query_output.get("data")
self.assertIsNotNone(data)
self.assertIsNotNone(query_output.get("total"))
total = query_output.get("total")
aggregates = handler._mapper.report_type_map.get("aggregates")
current_totals = self.get_totals_costs_by_time_scope(aggregates, self.this_month_filter)
self.assertIsNotNone(total.get("cost"))
self.assertEqual(total.get("cost", {}).get("total").get("value"), current_totals["cost_total"])
cmonth_str = self.dh.this_month_start.strftime("%Y-%m")
for data_item in data:
month_val = data_item.get("date")
month_data = data_item.get("services")
self.assertEqual(month_val, cmonth_str)
self.assertIsInstance(month_data, list)
for month_item in month_data:
name = month_item.get("service")
self.assertIn(name, valid_services)
self.assertIsInstance(month_item.get("values"), list)
def test_query_group_by_partial_filtered_service(self):
"""Test execute_query monthly breakdown by filtered service."""
# This might change as we add more gcp generators to nise
with tenant_context(self.tenant):
valid_services = [
service[0]
for service in GCPCostEntryLineItemDailySummary.objects.values_list("service_alias").distinct()
]
service = valid_services[0]
url = f"?filter[time_scope_units]=month&filter[time_scope_value]=-1&filter[resolution]=monthly&group_by[service]={service}" # noqa: E501
query_params = self.mocked_query_params(url, GCPCostView)
handler = GCPReportQueryHandler(query_params)
query_output = handler.execute_query()
data = query_output.get("data")
self.assertIsNotNone(data)
self.assertIsNotNone(query_output.get("total"))
total = query_output.get("total")
aggregates = handler._mapper.report_type_map.get("aggregates")
filters = {**self.this_month_filter, "service_alias__icontains": service}
for filt in handler._mapper.report_type_map.get("filter"):
if filt:
qf = QueryFilter(**filt)
filters.update({qf.composed_query_string(): qf.parameter})
current_totals = self.get_totals_costs_by_time_scope(aggregates, filters)
self.assertIsNotNone(total.get("cost"))
self.assertEqual(total.get("cost", {}).get("total").get("value"), current_totals["cost_total"])
cmonth_str = self.dh.this_month_start.strftime("%Y-%m")
for data_item in data:
month_val = data_item.get("date")
month_data = data_item.get("services")
self.assertEqual(month_val, cmonth_str)
self.assertIsInstance(month_data, list)
for month_item in month_data:
name = month_item.get("service")
self.assertIn(name, valid_services)
self.assertIsInstance(month_item.get("values"), list)
def test_execute_query_by_filtered_service(self):
"""Test execute_query monthly breakdown by filtered service."""
url = (
"?filter[time_scope_units]=month&filter[time_scope_value]=-1&filter[resolution]=monthly&filter[service]=*"
) # noqa: E501
query_params = self.mocked_query_params(url, GCPCostView)
handler = GCPReportQueryHandler(query_params)
query_output = handler.execute_query()
data = query_output.get("data")
self.assertIsNotNone(data)
self.assertIsNotNone(query_output.get("total"))
total = query_output.get("total")
aggregates = handler._mapper.report_type_map.get("aggregates")
current_totals = self.get_totals_costs_by_time_scope(aggregates, self.this_month_filter)
self.assertIsNotNone(total.get("cost"))
self.assertEqual(total.get("cost", {}).get("total").get("value"), current_totals["cost_total"])
for data_item in data:
self.assertIsInstance(data_item.get("values"), list)
def test_query_by_partial_filtered_service(self):
"""Test execute_query monthly breakdown by filtered service."""
with tenant_context(self.tenant):
valid_services = [
service[0] for service in GCPCostEntryLineItemDailySummary.objects.values_list("service_id").distinct()
]
service = valid_services[0]
url = f"?filter[time_scope_units]=month&filter[time_scope_value]=-1&filter[resolution]=monthly&filter[service]={service}" # noqa: E501
query_params = self.mocked_query_params(url, GCPCostView)
handler = GCPReportQueryHandler(query_params)
query_output = handler.execute_query()
data = query_output.get("data")
self.assertIsNotNone(data)
self.assertIsNotNone(query_output.get("total"))
total = query_output.get("total")
aggregates = handler._mapper.report_type_map.get("aggregates")
filters = {**self.this_month_filter, "service_id__icontains": service}
for filt in handler._mapper.report_type_map.get("filter"):
if filt:
qf = QueryFilter(**filt)
filters.update({qf.composed_query_string(): qf.parameter})
current_totals = self.get_totals_costs_by_time_scope(aggregates, filters)
self.assertIsNotNone(total.get("cost"))
self.assertEqual(total.get("cost", {}).get("total").get("value"), current_totals["cost_total"])
for data_item in data:
self.assertIsInstance(data_item.get("values"), list)
def test_execute_query_current_month_by_account(self):
"""Test execute_query for current month on monthly breakdown by account."""
url = "?filter[time_scope_units]=month&filter[time_scope_value]=-1&filter[resolution]=monthly&group_by[account]=*" # noqa: E501
query_params = self.mocked_query_params(url, GCPCostView)
handler = GCPReportQueryHandler(query_params)
query_output = handler.execute_query()
data = query_output.get("data")
self.assertIsNotNone(data)
self.assertIsNotNone(query_output.get("total"))
total = query_output.get("total")
aggregates = handler._mapper.report_type_map.get("aggregates")
current_totals = self.get_totals_costs_by_time_scope(aggregates, self.this_month_filter)
self.assertIsNotNone(total.get("cost"))
self.assertEqual(total.get("cost", {}).get("total").get("value"), current_totals["cost_total"])
cmonth_str = self.dh.this_month_start.strftime("%Y-%m")
for data_item in data:
month_val = data_item.get("date")
month_data = data_item.get("accounts")
self.assertEqual(month_val, cmonth_str)
self.assertIsInstance(month_data, list)
for month_item in month_data:
self.assertIsNotNone(month_item.get("account"))
self.assertIsInstance(month_item.get("values"), list)
def test_execute_query_by_account_by_service(self):
"""Test execute_query for current month breakdown by account by service."""
url = "?filter[time_scope_units]=month&filter[time_scope_value]=-1&filter[resolution]=monthly&group_by[account]=*&group_by[service]=*" # noqa: E501
query_params = self.mocked_query_params(url, GCPCostView)
handler = GCPReportQueryHandler(query_params)
query_output = handler.execute_query()
data = query_output.get("data")
self.assertIsNotNone(data)
self.assertIsNotNone(query_output.get("total"))
total = query_output.get("total")
aggregates = handler._mapper.report_type_map.get("aggregates")
current_totals = self.get_totals_costs_by_time_scope(aggregates, self.this_month_filter)
self.assertIsNotNone(total.get("cost"))
self.assertEqual(total.get("cost", {}).get("total").get("value"), current_totals["cost_total"])
cmonth_str = DateHelper().this_month_start.strftime("%Y-%m")
for data_item in data:
month_val = data_item.get("date")
month_data = data_item.get("accounts")
self.assertEqual(month_val, cmonth_str)
self.assertIsInstance(month_data, list)
for month_item in month_data:
self.assertIsNotNone(month_item.get("account"))
self.assertIsInstance(month_item.get("services"), list)
def test_execute_query_curr_month_by_service_w_limit(self):
"""Test execute_query for current month on monthly breakdown by service with limit."""
# This might change as we add more gcp generators to nise
limit = 1
url = f"?filter[time_scope_units]=month&filter[time_scope_value]=-1&filter[resolution]=monthly&filter[limit]={limit}&group_by[service]=*" # noqa: E501
query_params = self.mocked_query_params(url, GCPCostView)
handler = GCPReportQueryHandler(query_params)
query_output = handler.execute_query()
data = query_output.get("data")
self.assertIsNotNone(data)
self.assertIsNotNone(query_output.get("total"))
total = query_output.get("total")
aggregates = handler._mapper.report_type_map.get("aggregates")
current_totals = self.get_totals_costs_by_time_scope(aggregates, self.this_month_filter)
self.assertIsNotNone(total.get("cost"))
self.assertEqual(total.get("cost", {}).get("total").get("value"), current_totals["cost_total"])
cmonth_str = DateHelper().this_month_start.strftime("%Y-%m")
for data_item in data:
month_val = data_item.get("date")
month_data = data_item.get("services")
self.assertEqual(month_val, cmonth_str)
self.assertIsInstance(month_data, list)
self.assertEqual(len(month_data), limit + 1)
other_string_created = False
for month_item in month_data:
service = month_item.get("service")
self.assertIsInstance(service, str)
if "Other" in service:
other_string_created = True
self.assertIsInstance(month_item.get("values"), list)
self.assertTrue(other_string_created)
def test_execute_query_curr_month_by_account_w_order(self):
"""Test execute_query for current month on monthly breakdown by account with asc order."""
url = "?filter[time_scope_units]=month&filter[time_scope_value]=-1&filter[resolution]=monthly&order_by[cost]=asc&group_by[account]=*" # noqa: E501
query_params = self.mocked_query_params(url, GCPCostView)
handler = GCPReportQueryHandler(query_params)
query_output = handler.execute_query()
data = query_output.get("data")
self.assertIsNotNone(data)
self.assertIsNotNone(query_output.get("total"))
total = query_output.get("total")
aggregates = handler._mapper.report_type_map.get("aggregates")
current_totals = self.get_totals_costs_by_time_scope(aggregates, self.this_month_filter)
self.assertIsNotNone(total.get("cost"))
self.assertEqual(total.get("cost", {}).get("total").get("value"), current_totals["cost_total"])
cmonth_str = self.dh.this_month_start.strftime("%Y-%m")
for data_item in data:
month_val = data_item.get("date")
month_data = data_item.get("accounts")
self.assertEqual(month_val, cmonth_str)
self.assertIsInstance(month_data, list)
self.assertEqual(len(month_data), 1)
current_total = 0
for month_item in month_data:
self.assertIsInstance(month_item.get("account"), str)
self.assertIsInstance(month_item.get("values"), list)
data_point_total = month_item.get("values")[0].get("cost", {}).get("total", {}).get("value")
self.assertIsNotNone(data_point_total)
self.assertLess(current_total, data_point_total)
current_total = data_point_total
def test_execute_query_curr_month_by_account_w_order_by_account(self):
"""Test execute_query for current month on monthly breakdown by account with asc order."""
url = "?filter[time_scope_units]=month&filter[time_scope_value]=-2&filter[resolution]=monthly&order_by[account]=asc&group_by[account]=*" # noqa: E501
query_params = self.mocked_query_params(url, GCPCostView)
handler = GCPReportQueryHandler(query_params)
query_output = handler.execute_query()
data = query_output.get("data")
self.assertIsNotNone(data)
self.assertIsNotNone(query_output.get("total"))
total = query_output.get("total")
aggregates = handler._mapper.report_type_map.get("aggregates")
current_totals = self.get_totals_costs_by_time_scope(aggregates, self.last_month_filter)
self.assertIsNotNone(total.get("cost"))
self.assertEqual(total.get("cost", {}).get("total").get("value"), current_totals["cost_total"])
cmonth_str = self.dh.last_month_start.strftime("%Y-%m")
for data_item in data:
month_val = data_item.get("date")
month_data = data_item.get("accounts")
self.assertEqual(month_val, cmonth_str)
self.assertIsInstance(month_data, list)
self.assertEqual(len(month_data), 1)
current = "0"
for month_item in month_data:
self.assertIsInstance(month_item.get("account"), str)
self.assertIsInstance(month_item.get("values"), list)
self.assertIsNotNone(month_item.get("values")[0].get("account"))
data_point = month_item.get("values")[0].get("account")
if data_point == "1 Other":
continue
self.assertLess(current, data_point)
current = data_point
def test_execute_query_curr_month_by_project(self):
"""Test execute_query for current month on monthly breakdown by project."""
url = "?filter[time_scope_units]=month&filter[time_scope_value]=-1&filter[resolution]=monthly&group_by[gcp_project]=*" # noqa: E501
with tenant_context(self.tenant):
project_count = (
GCPCostEntryLineItemDailySummary.objects.filter(usage_start__gte=self.dh.this_month_start)
.values_list("project_id")
.distinct()
.count()
)
query_params = self.mocked_query_params(url, GCPCostView)
handler = GCPReportQueryHandler(query_params)
query_output = handler.execute_query()
data = query_output.get("data")
self.assertIsNotNone(data)
self.assertIsNotNone(query_output.get("total"))
total = query_output.get("total")
aggregates = handler._mapper.report_type_map.get("aggregates")
current_totals = self.get_totals_costs_by_time_scope(aggregates, self.this_month_filter)
self.assertIsNotNone(total.get("cost"))
self.assertEqual(total.get("cost", {}).get("total").get("value"), current_totals["cost_total"])
cmonth_str = DateHelper().this_month_start.strftime("%Y-%m")
for data_item in data:
month_val = data_item.get("date")
month_data = data_item.get("gcp_projects")
self.assertEqual(month_val, cmonth_str)
self.assertIsInstance(month_data, list)
self.assertEqual(len(month_data), project_count)
for month_item in month_data:
self.assertIsInstance(month_item.get("gcp_project"), str)
self.assertIsInstance(month_item.get("values"), list)
self.assertIsNotNone(month_item.get("values")[0].get("cost"))
def test_execute_query_curr_month_by_filtered_project(self):
"""Test execute_query for current month on monthly breakdown by filtered project."""
with tenant_context(self.tenant):
project = (
GCPCostEntryLineItemDailySummary.objects.filter(usage_start__gte=self.dh.this_month_start)
.values("project_id")[0]
.get("project_id")
)
url = f"?filter[time_scope_units]=month&filter[time_scope_value]=-1&filter[resolution]=monthly&group_by[gcp_project]={project}" # noqa: E501
query_params = self.mocked_query_params(url, GCPCostView)
handler = GCPReportQueryHandler(query_params)
query_output = handler.execute_query()
data = query_output.get("data")
self.assertIsNotNone(data)
self.assertIsNotNone(query_output.get("total"))
total = query_output.get("total")
aggregates = handler._mapper.report_type_map.get("aggregates")
filters = {**self.this_month_filter, "project_id": project}
current_totals = self.get_totals_costs_by_time_scope(aggregates, filters)
self.assertIsNotNone(total.get("cost"))
self.assertEqual(total.get("cost", {}).get("total").get("value"), current_totals["cost_total"])
cmonth_str = DateHelper().this_month_start.strftime("%Y-%m")
for data_item in data:
month_val = data_item.get("date")
month_data = data_item.get("gcp_projects")
self.assertEqual(month_val, cmonth_str)
self.assertIsInstance(month_data, list)
for month_item in month_data:
self.assertIsInstance(month_item.get("gcp_project"), str)
self.assertIsInstance(month_item.get("values"), list)
self.assertIsNotNone(month_item.get("values")[0].get("cost"))
def test_execute_query_current_month_filter_account(self):
"""Test execute_query for current month on monthly filtered by account."""
with tenant_context(self.tenant):
account = GCPCostEntryLineItemDailySummary.objects.filter(
usage_start__gte=self.dh.this_month_start
).values("account_id")[0]
url = f"?filter[time_scope_units]=month&filter[time_scope_value]=-1&filter[resolution]=monthly&filter[account]={account}" # noqa: E501
query_params = self.mocked_query_params(url, GCPCostView)
handler = GCPReportQueryHandler(query_params)
query_output = handler.execute_query()
data = query_output.get("data")
self.assertIsNotNone(data)
self.assertIsNotNone(query_output.get("total"))
total = query_output.get("total")
aggregates = handler._mapper.report_type_map.get("aggregates")
filters = {**self.this_month_filter, "account_id": account}
current_totals = self.get_totals_costs_by_time_scope(aggregates, filters)
self.assertIsNotNone(total.get("cost"))
self.assertEqual(total.get("cost", {}).get("total").get("value"), current_totals["cost_total"])
cmonth_str = self.dh.this_month_start.strftime("%Y-%m")
for data_item in data:
month_val = data_item.get("date")
month_data = data_item.get("values")
self.assertEqual(month_val, cmonth_str)
self.assertIsInstance(month_data, list)
def test_execute_query_current_month_filter_service(self):
"""Test execute_query for current month on monthly filtered by service."""
with tenant_context(self.tenant):
service = GCPCostEntryLineItemDailySummary.objects.values("service_id")[0].get("service_id")
url = f"?filter[time_scope_units]=month&filter[time_scope_value]=-1&filter[resolution]=monthly&filter[service]={service}" # noqa: E501
query_params = self.mocked_query_params(url, GCPCostView)
handler = GCPReportQueryHandler(query_params)
query_output = handler.execute_query()
data = query_output.get("data")
self.assertIsNotNone(data)
self.assertIsNotNone(query_output.get("total"))
total = query_output.get("total")
aggregates = handler._mapper.report_type_map.get("aggregates")
filters = {**self.this_month_filter, "service_id__icontains": service}
for filt in handler._mapper.report_type_map.get("filter"):
if filt:
qf = QueryFilter(**filt)
filters.update({qf.composed_query_string(): qf.parameter})
current_totals = self.get_totals_costs_by_time_scope(aggregates, filters)
self.assertIsNotNone(total.get("cost"))
self.assertEqual(total.get("cost", {}).get("total").get("value"), current_totals["cost_total"])
cmonth_str = self.dh.this_month_start.strftime("%Y-%m")
for data_item in data:
month_val = data_item.get("date")
month_data = data_item.get("values")
self.assertEqual(month_val, cmonth_str)
self.assertIsInstance(month_data, list)
def test_execute_query_current_month_filter_region(self):
"""Test execute_query for current month on monthly filtered by region."""
url = (
"?filter[time_scope_units]=month&filter[time_scope_value]=-1&filter[resolution]=monthly&filter[region]=*"
) # noqa: E501
query_params = self.mocked_query_params(url, GCPCostView)
handler = GCPReportQueryHandler(query_params)
query_output = handler.execute_query()
data = query_output.get("data")
self.assertIsNotNone(data)
self.assertIsNotNone(query_output.get("total"))
total = query_output.get("total")
aggregates = handler._mapper.report_type_map.get("aggregates")
filters = {**self.this_month_filter}
current_totals = self.get_totals_costs_by_time_scope(aggregates, filters)
self.assertIsNotNone(total.get("cost"))
self.assertEqual(total.get("cost", {}).get("total").get("value"), current_totals["cost_total"])
cmonth_str = DateHelper().this_month_start.strftime("%Y-%m")
for data_item in data:
month_val = data_item.get("date")
month_data = data_item.get("values")
self.assertEqual(month_val, cmonth_str)
self.assertIsInstance(month_data, list)
@patch("api.query_params.QueryParameters.accept_type", new_callable=PropertyMock)
def test_execute_query_current_month_filter_region_csv(self, mock_accept):
"""Test execute_query on monthly filtered by region for csv."""
mock_accept.return_value = "text/csv"
url = (
"?filter[time_scope_units]=month&filter[time_scope_value]=-1&filter[resolution]=monthly&filter[region]=*"
) # noqa: E501
query_params = self.mocked_query_params(url, GCPCostView)
handler = GCPReportQueryHandler(query_params)
query_output = handler.execute_query()
data = query_output.get("data")
self.assertIsNotNone(data)
self.assertIsNotNone(query_output.get("total"))
total = query_output.get("total")
aggregates = handler._mapper.report_type_map.get("aggregates")
filters = {**self.this_month_filter}
current_totals = self.get_totals_costs_by_time_scope(aggregates, filters)
self.assertIsNotNone(total.get("cost"))
self.assertEqual(total.get("cost", {}).get("total").get("value"), current_totals["cost_total"])
cmonth_str = DateHelper().this_month_start.strftime("%Y-%m")
self.assertEqual(len(data), 1)
for data_item in data:
month_val = data_item.get("date")
self.assertEqual(month_val, cmonth_str)
@patch("api.query_params.QueryParameters.accept_type", new_callable=PropertyMock)
def test_execute_query_curr_month_by_account_w_limit_csv(self, mock_accept):
"""Test execute_query for current month on monthly by account with limt as csv."""
mock_accept.return_value = "text/csv"
url = "?filter[time_scope_units]=month&filter[time_scope_value]=-1&filter[resolution]=monthly&filter[limit]=2&group_by[account]=*" # noqa: E501
query_params = self.mocked_query_params(url, GCPCostView)
handler = GCPReportQueryHandler(query_params)
query_output = handler.execute_query()
data = query_output.get("data")
self.assertIsNotNone(data)
self.assertIsNotNone(query_output.get("total"))
total = query_output.get("total")
aggregates = handler._mapper.report_type_map.get("aggregates")
current_totals = self.get_totals_costs_by_time_scope(aggregates, self.this_month_filter)
self.assertIsNotNone(total.get("cost"))
self.assertEqual(total.get("cost", {}).get("total").get("value"), current_totals["cost_total"])
cmonth_str = self.dh.this_month_start.strftime("%Y-%m")
self.assertEqual(len(data), 1)
for data_item in data:
month = data_item.get("date")
self.assertEqual(month, cmonth_str)
def test_execute_query_w_delta(self):
"""Test grouped by deltas."""
path = reverse("reports-gcp-costs")
url = "?filter[time_scope_units]=month&filter[time_scope_value]=-1&filter[resolution]=monthly&group_by[account]=*&delta=cost" # noqa: E501
query_params = self.mocked_query_params(url, GCPCostView, path)
handler = GCPReportQueryHandler(query_params)
# test the calculations
query_output = handler.execute_query()
data = query_output.get("data")
self.assertIsNotNone(data)
accounts = data[0].get("accounts", [{}])
current_invoice_month = self.dh.gcp_find_invoice_months_in_date_range(
self.dh.this_month_start, self.dh.this_month_end
)
last_invoice_month = self.dh.gcp_find_invoice_months_in_date_range(
self.dh.last_month_start, self.dh.last_month_end
)
for account in accounts:
current_total = Decimal(0)
prev_total = Decimal(0)
# fetch the expected sums from the DB.
with tenant_context(self.tenant):
curr = GCPCostEntryLineItemDailySummary.objects.filter(
invoice_month__in=current_invoice_month,
account_id=account.get("account"),
usage_start__gte=self.dh.this_month_start,
usage_start__lte=self.dh.today,
).aggregate(value=Sum(F("unblended_cost") + F("markup_cost")))
current_total = Decimal(curr.get("value"))
prev = GCPCostEntryLineItemDailySummary.objects.filter(
invoice_month__in=last_invoice_month,
account_id=account.get("account"),
usage_start__gte=self.dh.last_month_start,
usage_start__lte=self.dh.today - relativedelta(months=1),
).aggregate(value=Sum(F("unblended_cost") + F("markup_cost")))
prev_total = Decimal(prev.get("value", Decimal(0)))
expected_delta_value = Decimal(current_total - prev_total)
expected_delta_percent = Decimal((current_total - prev_total) / prev_total * 100)
values = account.get("values", [{}])[0]
self.assertIn("delta_value", values)
self.assertIn("delta_percent", values)
self.assertEqual(values.get("delta_value"), expected_delta_value)
self.assertEqual(values.get("delta_percent"), expected_delta_percent)
current_total = Decimal(0)
prev_total = Decimal(0)
# fetch the expected sums from the DB.
with tenant_context(self.tenant):
curr = GCPCostEntryLineItemDailySummary.objects.filter(
invoice_month__in=current_invoice_month,
usage_start__gte=self.dh.this_month_start,
usage_start__lte=self.dh.today,
).aggregate(value=Sum(F("unblended_cost") + F("markup_cost")))
current_total = Decimal(curr.get("value"))
prev = GCPCostEntryLineItemDailySummary.objects.filter(
invoice_month__in=last_invoice_month,
usage_start__gte=self.dh.last_month_start,
usage_start__lte=self.dh.today - relativedelta(months=1),
).aggregate(value=Sum(F("unblended_cost") + F("markup_cost")))
prev_total = Decimal(prev.get("value"))
expected_delta_value = Decimal(current_total - prev_total)
expected_delta_percent = Decimal((current_total - prev_total) / prev_total * 100)
delta = query_output.get("delta")
self.assertIsNotNone(delta.get("value"))
self.assertIsNotNone(delta.get("percent"))
self.assertEqual(delta.get("value"), expected_delta_value)
self.assertEqual(delta.get("percent"), expected_delta_percent)
def test_execute_query_w_delta_no_previous_data(self):
"""Test deltas with no previous data."""
url = "?filter[time_scope_value]=-2&delta=cost"
path = reverse("reports-gcp-costs")
query_params = self.mocked_query_params(url, GCPCostView, path)
handler = GCPReportQueryHandler(query_params)
query_output = handler.execute_query()
data = query_output.get("data")
self.assertIsNotNone(data)
total_cost = query_output.get("total", {}).get("cost", {}).get("total")
self.assertIsNotNone(total_cost)
delta = query_output.get("delta")
self.assertIsNotNone(delta.get("value"))
self.assertIsNone(delta.get("percent"))
self.assertEqual(delta.get("value"), total_cost.get("value"))
self.assertEqual(delta.get("percent"), None)
def test_execute_query_orderby_delta(self):
"""Test execute_query with ordering by delta ascending."""
url = "?filter[time_scope_units]=month&filter[time_scope_value]=-1&filter[resolution]=monthly&order_by[delta]=asc&group_by[account]=*&delta=cost" # noqa: E501
path = reverse("reports-gcp-costs")
query_params = self.mocked_query_params(url, GCPCostView, path)
handler = GCPReportQueryHandler(query_params)
query_output = handler.execute_query()
data = query_output.get("data")
self.assertIsNotNone(data)
cmonth_str = self.dh.this_month_start.strftime("%Y-%m")
for data_item in data:
month_val = data_item.get("date")
month_data = data_item.get("accounts")
self.assertEqual(month_val, cmonth_str)
self.assertIsInstance(month_data, list)
for month_item in month_data:
self.assertIsInstance(month_item.get("account"), str)
self.assertIsInstance(month_item.get("values"), list)
self.assertIsInstance(month_item.get("values")[0].get("delta_value"), Decimal)
def test_calculate_total(self):
"""Test that calculated totals return correctly."""
url = "?filter[time_scope_units]=month&filter[time_scope_value]=-2&filter[resolution]=monthly"
query_params = self.mocked_query_params(url, GCPCostView)
handler = GCPReportQueryHandler(query_params)
expected_units = "USD"
with tenant_context(self.tenant):
result = handler.calculate_total(**{"cost_units": expected_units})
aggregates = handler._mapper.report_type_map.get("aggregates")
current_totals = self.get_totals_costs_by_time_scope(aggregates, self.last_month_filter)
cost_total = result.get("cost", {}).get("total")
self.assertIsNotNone(cost_total)
self.assertEqual(cost_total.get("value"), current_totals["cost_total"])
self.assertEqual(cost_total.get("units"), expected_units)
def test_percent_delta(self):
"""Test _percent_delta() utility method."""
url = "?"
query_params = self.mocked_query_params(url, GCPCostView)
handler = GCPReportQueryHandler(query_params)
self.assertEqual(handler._percent_delta(10, 5), 100)
def test_rank_list_by_account(self):
"""Test rank list limit with account alias."""
url = "?filter[time_scope_units]=month&filter[time_scope_value]=-1&filter[resolution]=monthly&filter[limit]=2&group_by[account]=*" # noqa: E501
query_params = self.mocked_query_params(url, GCPCostView)
handler = GCPReportQueryHandler(query_params)
data_list = [
{"account": "1", "total": 5, "rank": 1},
{"account": "2", "total": 4, "rank": 2},
{"account": "3", "total": 3, "rank": 3},
{"account": "4", "total": 2, "rank": 4},
]
expected = [
{"account": "1", "total": 5, "rank": 1},
{"account": "2", "total": 4, "rank": 2},
{
"account": "Others",
"account_alias": "Others",
"total": 5,
"rank": 3,
"cost_total": 0,
"infra_total": 0,
"sup_total": 0,
},
]
ranked_list = handler._ranked_list(data_list)
self.assertEqual(ranked_list, expected)
def test_rank_list_by_service_alias(self):
"""Test rank list limit with service_alias grouping."""
# This might change as we add more gcp generators to nise
url = "?filter[time_scope_units]=month&filter[time_scope_value]=-1&filter[resolution]=monthly&filter[limit]=2&group_by[service]=*" # noqa: E501
query_params = self.mocked_query_params(url, GCPCostView)
handler = GCPReportQueryHandler(query_params)
data_list = [
{"service_alias": "1", "total": 5, "rank": 1},
{"service_alias": "2", "total": 4, "rank": 2},
{"service_alias": "3", "total": 3, "rank": 3},
{"service_alias": "4", "total": 2, "rank": 4},
]
expected = [
{"service_alias": "1", "total": 5, "rank": 1},
{"service_alias": "2", "total": 4, "rank": 2},
{
"service": "Others",
"service_alias": "1",
"total": 5,
"rank": 3,
"cost_total": 0,
"infra_total": 0,
"sup_total": 0,
},
]
ranked_list = handler._ranked_list(data_list)
self.assertEqual(ranked_list, expected)
def test_rank_list_with_offset(self):
"""Test rank list limit and offset with account alias."""
url = "?filter[time_scope_units]=month&filter[time_scope_value]=-1&filter[resolution]=monthly&filter[limit]=1&filter[offset]=1&group_by[account]=*" # noqa: E501
query_params = self.mocked_query_params(url, GCPCostView)
handler = GCPReportQueryHandler(query_params)
data_list = [
{"account": "1", "total": 5, "rank": 1},
{"account": "2", "total": 4, "rank": 2},
{"account": "3", "total": 3, "rank": 3},
{"account": "4", "total": 2, "rank": 4},
]
expected = [{"account": "2", "total": 4, "rank": 2}]
ranked_list = handler._ranked_list(data_list)
self.assertEqual(ranked_list, expected)
def test_query_costs_with_totals(self):
"""Test execute_query() - costs with totals.
Query for instance_types, validating that cost totals are present.
"""
url = "?filter[time_scope_units]=month&filter[time_scope_value]=-1&filter[resolution]=monthly&group_by[account]=*" # noqa: E501
query_params = self.mocked_query_params(url, GCPCostView)
handler = GCPReportQueryHandler(query_params)
query_output = handler.execute_query()
data = query_output.get("data")
self.assertIsNotNone(data)
for data_item in data:
accounts = data_item.get("accounts")
for account in accounts:
self.assertIsNotNone(account.get("values"))
self.assertGreater(len(account.get("values")), 0)
for value in account.get("values"):
cost_total_value = value.get("cost", {}).get("total", {}).get("value")
self.assertIsInstance(cost_total_value, Decimal)
self.assertGreater(cost_total_value, Decimal(0))
def test_order_by(self):
"""Test that order_by returns properly sorted data."""
url = "?filter[time_scope_units]=month&filter[time_scope_value]=-1&filter[resolution]=monthly"
query_params = self.mocked_query_params(url, GCPCostView)
handler = GCPReportQueryHandler(query_params)
unordered_data = [
{"date": self.dh.today, "delta_percent": 8, "total": 6.2, "rank": 2},
{"date": self.dh.yesterday, "delta_percent": 4, "total": 2.2, "rank": 1},
{"date": self.dh.today, "delta_percent": 7, "total": 8.2, "rank": 1},
{"date": self.dh.yesterday, "delta_percent": 4, "total": 2.2, "rank": 2},
]
order_fields = ["date", "rank"]
expected = [
{"date": self.dh.yesterday, "delta_percent": 4, "total": 2.2, "rank": 1},
{"date": self.dh.yesterday, "delta_percent": 4, "total": 2.2, "rank": 2},
{"date": self.dh.today, "delta_percent": 7, "total": 8.2, "rank": 1},
{"date": self.dh.today, "delta_percent": 8, "total": 6.2, "rank": 2},
]
ordered_data = handler.order_by(unordered_data, order_fields)
self.assertEqual(ordered_data, expected)
order_fields = ["date", "-delta"]
expected = [
{"date": self.dh.yesterday, "delta_percent": 4, "total": 2.2, "rank": 1},
{"date": self.dh.yesterday, "delta_percent": 4, "total": 2.2, "rank": 2},
{"date": self.dh.today, "delta_percent": 8, "total": 6.2, "rank": 2},
{"date": self.dh.today, "delta_percent": 7, "total": 8.2, "rank": 1},
]
ordered_data = handler.order_by(unordered_data, order_fields)
self.assertEqual(ordered_data, expected)
def test_query_table(self):
"""Test that the correct view is assigned by query table property."""
test_cases = [
("?", GCPCostView, GCPCostSummaryP),
("?group_by[account]=*", GCPCostView, GCPCostSummaryByAccountP),
("?group_by[gcp_project]=*", GCPCostView, GCPCostSummaryByProjectP),
("?group_by[gcp_project]=*&group_by[account]=*", GCPCostView, GCPCostSummaryByProjectP),
("?group_by[service]=*", GCPCostView, GCPCostSummaryByServiceP),
("?group_by[service]=*&group_by[account]=*", GCPCostView, GCPCostSummaryByServiceP),
(
"?filter[service]=Database,Cosmos%20DB,Cache%20for%20Redis&group_by[account]=*",
GCPCostView,
GCPCostSummaryByServiceP,
),
(
"?filter[service]=Virtual%20Network,VPN,DNS,Traffic%20Manager,ExpressRoute,Load%20Balancer,Application%20Gateway", # noqa: E501
GCPCostView,
GCPCostSummaryByServiceP,
),
(
"?filter[service]=Virtual%20Network,VPN,DNS,Traffic%20Manager,ExpressRoute,Load%20Balancer,Application%20Gateway&group_by[account]=*", # noqa: E501
GCPCostView,
GCPCostSummaryByServiceP,
),
]
for test_case in test_cases:
with self.subTest(test_case=test_case):
url, view, table = test_case
query_params = self.mocked_query_params(url, view)
handler = GCPReportQueryHandler(query_params)
self.assertEqual(handler.query_table, table)
def test_source_uuid_mapping(self): # noqa: C901
"""Test source_uuid is mapped to the correct source."""
# Find the correct expected source uuid:
with tenant_context(self.tenant):
gcp_uuids = GCPCostEntryLineItemDailySummary.objects.distinct().values_list("source_uuid", flat=True)
expected_source_uuids = GCPCostEntryBill.objects.distinct().values_list("provider_id", flat=True)
for gcp_uuid in gcp_uuids:
self.assertIn(gcp_uuid, expected_source_uuids)
endpoints = [GCPCostView]
source_uuid_list = []
for endpoint in endpoints:
urls = ["?"]
if endpoint == GCPCostView:
urls.extend(
["?group_by[account]=*", "?group_by[gcp_project]=*", "group_by[region]=*", "?group_by[service]=*"]
)
for url in urls:
query_params = self.mocked_query_params(url, endpoint)
handler = GCPReportQueryHandler(query_params)
query_output = handler.execute_query()
for dictionary in query_output.get("data"):
for _, value in dictionary.items():
if isinstance(value, list):
for item in value:
if isinstance(item, dict):
if "values" in item.keys():
value = item["values"][0]
source_uuid_list.extend(value.get("source_uuid"))
self.assertNotEquals(source_uuid_list, [])
for source_uuid in source_uuid_list:
self.assertIn(source_uuid, expected_source_uuids)
def test_execute_query_annotate(self):
"""Test that query enters cost unit and usage unit ."""
with tenant_context(self.tenant):
account = GCPCostEntryLineItemDailySummary.objects.filter(
usage_start__gte=self.dh.this_month_start
).values("account_id")[0]
url = f"?filter[time_scope_units]=month&filter[time_scope_value]=-1&filter[resolution]=monthly&filter[account]={account}" # noqa: E501
query_params = self.mocked_query_params(url, GCPCostView)
handler = GCPReportQueryHandler(query_params)
query_output = handler.execute_query()
data = query_output.get("data")
self.assertIsNotNone(data)
self.assertIsNotNone(query_output.get("total"))
total = query_output.get("total")
aggregates = handler._mapper.report_type_map.get("aggregates")
filters = {**self.this_month_filter, "account_id": account}
current_totals = self.get_totals_costs_by_time_scope(aggregates, filters)
self.assertIsNotNone(total.get("cost"))
self.assertEqual(total.get("cost", {}).get("total").get("value"), current_totals["cost_total"])
cmonth_str = self.dh.this_month_start.strftime("%Y-%m")
for data_item in data:
month_val = data_item.get("date")
self.assertEqual(month_val, cmonth_str)
def test_execute_sum_query_instance_type(self):
"""Test that the sum query runs properly."""
url = "?"
query_params = self.mocked_query_params(url, GCPInstanceTypeView)
handler = GCPReportQueryHandler(query_params)
aggregates = handler._mapper.report_type_map.get("aggregates")
filters = self.ten_day_filter
for filt in handler._mapper.report_type_map.get("filter"):
qf = QueryFilter(**filt)
filters.update({qf.composed_query_string(): qf.parameter})
current_totals = self.get_totals_costs_by_time_scope(aggregates, filters)
expected_cost_total = current_totals.get("cost_total")
self.assertIsNotNone(expected_cost_total)
query_output = handler.execute_query()
self.assertIsNotNone(query_output.get("data"))
self.assertIsNotNone(query_output.get("total"))
total = query_output.get("total")
self.assertIsNotNone(total.get("usage", {}).get("value"))
self.assertEqual(total.get("usage", {}).get("value"), current_totals.get("usage"))
result_cost_total = total.get("cost", {}).get("total", {}).get("value")
self.assertIsNotNone(result_cost_total)
self.assertEqual(result_cost_total, expected_cost_total)
def test_query_instance_types_with_totals(self):
"""Test execute_query() - instance types with totals.
Query for instance_types, validating that cost totals are present.
"""
url = "?filter[time_scope_units]=month&filter[time_scope_value]=-1&filter[resolution]=monthly&group_by[instance_type]=*" # noqa: E501
query_params = self.mocked_query_params(url, GCPInstanceTypeView)
handler = GCPReportQueryHandler(query_params)
query_output = handler.execute_query()
data = query_output.get("data")
self.assertIsNotNone(data)
for data_item in data:
instance_types = data_item.get("instance_types")
for it in instance_types:
self.assertIsNotNone(it.get("values"))
self.assertGreater(len(it.get("values")), 0)
for value in it.get("values"):
cost_value = value.get("cost", {}).get("total", {}).get("value")
self.assertIsNotNone(cost_value)
self.assertIsInstance(cost_value, Decimal)
self.assertGreaterEqual(cost_value.quantize(Decimal(".0001"), ROUND_HALF_UP), Decimal(0))
self.assertIsInstance(value.get("usage", {}), dict)
self.assertGreaterEqual(
value.get("usage", {}).get("value", {}).quantize(Decimal(".0001"), ROUND_HALF_UP), Decimal(0)
)
def test_execute_query_annotate_instance_types(self):
"""Test that query enters cost unit and usage unit ."""
with tenant_context(self.tenant):
account = GCPCostEntryLineItemDailySummary.objects.filter(
usage_start__gte=self.dh.this_month_start
).values("account_id")[0]
url = f"?filter[time_scope_units]=month&filter[time_scope_value]=-1&filter[resolution]=monthly&filter[account]={account}" # noqa: E501
query_params = self.mocked_query_params(url, GCPInstanceTypeView)
handler = GCPReportQueryHandler(query_params)
query_output = handler.execute_query()
data = query_output.get("data")
self.assertIsNotNone(data)
self.assertIsNotNone(query_output.get("total"))
total = query_output.get("total")
aggregates = handler._mapper.report_type_map.get("aggregates")
filters = {**self.this_month_filter, "account_id": account}
current_totals = self.get_totals_costs_by_time_scope(aggregates, filters)
expected_cost_total = current_totals.get("cost_total")
self.assertIsNotNone(expected_cost_total)
result_cost_total = total.get("cost", {}).get("total", {}).get("value")
self.assertIsNotNone(result_cost_total)
self.assertEqual(result_cost_total, expected_cost_total)
cmonth_str = self.dh.this_month_start.strftime("%Y-%m")
for data_item in data:
month_val = data_item.get("date")
self.assertEqual(month_val, cmonth_str)
def test_execute_query_group_by_tag(self):
"""Test execute_query for current month on monthly breakdown by service."""
with tenant_context(self.tenant):
tag_object = GCPTagsSummary.objects.first()
key = tag_object.key
value = tag_object.values[0]
url = f"?filter[time_scope_units]=month&filter[time_scope_value]=-1&filter[resolution]=monthly&group_by[tag:{key}]={value}" # noqa: E501
query_params = self.mocked_query_params(url, GCPCostView)
handler = GCPReportQueryHandler(query_params)
query_output = handler.execute_query()
data = query_output.get("data")
self.assertIsNotNone(data)
self.assertIsNotNone(query_output.get("total"))
def test_query_storage_with_totals(self):
"""Test execute_query() - storage with totals.
Query for storage, validating that cost totals are present.
"""
url = "?filter[time_scope_units]=month&filter[time_scope_value]=-1&filter[resolution]=monthly&group_by[service]=*" # noqa: E501
query_params = self.mocked_query_params(url, GCPStorageView)
handler = GCPReportQueryHandler(query_params)
query_output = handler.execute_query()
data = query_output.get("data")
self.assertIsNotNone(data)
service_checked = False
for data_item in data:
services = data_item.get("services")
self.assertIsNotNone(services)
for srv in services:
if srv.get("service") == "Cloud Storage":
self.assertIsNotNone(srv.get("values"))
self.assertGreater(len(srv.get("values")), 0)
for value in srv.get("values"):
cost_total = value.get("cost", {}).get("total", {}).get("value")
self.assertIsInstance(cost_total, Decimal)
self.assertNotEqual(cost_total, Decimal(0))
self.assertIsInstance(value.get("usage", {}).get("value"), Decimal)
service_checked = True
self.assertTrue(service_checked)
def test_gcp_date_order_by_cost_desc(self):
"""Test that order of every other date matches the order of the `order_by` date."""
yesterday = self.dh.yesterday.date()
url = f"?order_by[cost]=desc&order_by[date]={yesterday}&group_by[service]=*"
query_params = self.mocked_query_params(url, GCPCostView)
handler = GCPReportQueryHandler(query_params)
query_output = handler.execute_query()
data = query_output.get("data")
svc_annotations = handler.annotations.get("service")
cost_annotation = handler.report_annotations.get("cost_total")
with tenant_context(self.tenant):
expected = list(
GCPCostSummaryByServiceP.objects.filter(usage_start=str(yesterday))
.annotate(service=svc_annotations)
.values("service")
.annotate(cost=cost_annotation)
.order_by("-cost")
)
correctlst = [service.get("service") for service in expected]
for element in data:
lst = [service.get("service") for service in element.get("services", [])]
if lst and correctlst:
self.assertEqual(correctlst, lst)
def test_gcp_date_incorrect_date(self):
wrong_date = "200BC"
url = f"?order_by[cost]=desc&order_by[date]={wrong_date}&group_by[service]=*"
with self.assertRaises(ValidationError):
self.mocked_query_params(url, GCPCostView)
def test_gcp_out_of_range_under_date(self):
wrong_date = materialized_view_month_start() - timedelta(days=1)
url = f"?order_by[cost]=desc&order_by[date]={wrong_date}&group_by[service]=*"
with self.assertRaises(ValidationError):
self.mocked_query_params(url, GCPCostView)
def test_gcp_out_of_range_over_date(self):
wrong_date = DateHelper().today.date() + timedelta(days=1)
url = f"?order_by[cost]=desc&order_by[date]={wrong_date}&group_by[service]=*"
with self.assertRaises(ValidationError):
self.mocked_query_params(url, GCPCostView)
| 51.893899 | 169 | 0.65031 |
995fd22d1fe22d7cacb697cde3fd21a9cc87111a | 201 | py | Python | demo/session_test/urls.py | physili/django_test | 09aa61f36e5d32f98af11057ea206dde8d082ac7 | [
"MIT"
] | 1 | 2020-04-25T04:50:30.000Z | 2020-04-25T04:50:30.000Z | demo/session_test/urls.py | physili/django_test | 09aa61f36e5d32f98af11057ea206dde8d082ac7 | [
"MIT"
] | null | null | null | demo/session_test/urls.py | physili/django_test | 09aa61f36e5d32f98af11057ea206dde8d082ac7 | [
"MIT"
] | null | null | null | from django.conf.urls import re_path
from . import views
app_name = 'session'
urlpatterns = [
re_path(r'^set_session/$', views.set_session),
re_path(r'^get_session/$', views.get_session),
]
| 18.272727 | 50 | 0.706468 |
61476d3675c6bbc745d2c03cd4c182992a3dc41f | 3,395 | py | Python | debian/update-control.py | balabit-deps/balabit-os-6-boost1.58 | 88692145c0b665bb4998f81b99c7dbfec442fac8 | [
"BSL-1.0"
] | null | null | null | debian/update-control.py | balabit-deps/balabit-os-6-boost1.58 | 88692145c0b665bb4998f81b99c7dbfec442fac8 | [
"BSL-1.0"
] | null | null | null | debian/update-control.py | balabit-deps/balabit-os-6-boost1.58 | 88692145c0b665bb4998f81b99c7dbfec442fac8 | [
"BSL-1.0"
] | null | null | null | #! /usr/bin/env python
#
from deb822 import Deb822
import re
gOldVersion = None
gNewVersion = None
class BoostVersion:
def __init__(self, version):
(self.Major,self.Minor,self.Revision) = version.split('.')
self.PackageVersion = self.Major + '.' + self.Minor
self.SharedObjectVersion = version
def containsPackageVersion(self, string):
'''Return true if 'string' contains the Package version string.'''
return re.search(self.PackageVersion, string) is not None
def containsSharedObjectVersion(self, string):
'''Return true if 'string' contains the Shared Object version string.'''
return re.search(self.SharedObjectVersion, string) is not None
def stripVersion(self, string):
'''Remove PackageVersion or SharedObjectVersion if contained in 'string'.'''
return self.replaceVersion(string,'')
def replaceVersion(self, string, replacement):
'''Replace either PackageVersion or SharedObjectVersion if contained in 'string',
with 'replacement'.'''
string = re.sub(self.SharedObjectVersion, replacement, string)
string = re.sub(self.PackageVersion, replacement, string)
return string
def replaceVersion(string, ver1, ver2):
'''Search 'string' for a BoostVersion ver1. If
SharedObjectVersion or PackageVersion of ver1 is found, replace by
corresponding ver2 version string. Return the updated string.'''
string = re.sub(ver1.SharedObjectVersion, ver2.SharedObjectVersion, string)
string = re.sub(ver1.PackageVersion, ver2.PackageVersion, string)
return string
def updateVersionedValue(paragraph, key):
if not paragraph.has_key(key): return
oldValue = paragraph[key]
paragraph[key] = replaceVersion(paragraph[key], gOldVersion, gNewVersion)
return (oldValue, paragraph[key])
def conflictsWithPrevious(paragraph):
if not paragraph.has_key('Conflicts'): return False
nameRe = re.sub('\d', '\\d', paragraph['Package'])
return re.search(nameRe, paragraph['Conflicts']) is not None
def updateConflicts(paragraph, oldPkgName):
newPkgName = paragraph['Package']
needsConflict = (newPkgName.endswith("-dev") and not newPkgName.endswith("-all-dev")) or conflictsWithPrevious(paragraph)
if not needsConflict: return
if paragraph.has_key('Conflicts'):
if paragraph['Conflicts'].find(oldPkgName) == -1:
paragraph['Conflicts'] += ', ' + oldPkgName
else:
paragraph['Conflicts'] = oldPkgName
def processSourceParagraph(p):
updateVersionedValue(p, 'Source')
def processPackageParagraph(p):
(oldPkgName, newPkgName) = updateVersionedValue(p, 'Package')
updateVersionedValue(p, 'Depends')
updateVersionedValue(p, 'Recommends')
updateVersionedValue(p, 'Suggests')
updateConflicts(p, oldPkgName)
def printParagraph(p):
for key in p.keys():
print "%s: %s" % (key, p[key])
def processControl():
firstParagraph = True
for paragraph in Deb822.iter_paragraphs(open('control.in')):
if firstParagraph:
processSourceParagraph(paragraph)
printParagraph(paragraph)
firstParagraph = False
else:
processPackageParagraph(paragraph)
print
printParagraph(paragraph)
gOldVersion = BoostVersion('1.57.0')
gNewVersion = BoostVersion('1.58.0')
processControl()
| 36.902174 | 125 | 0.695729 |
9c0be583ec64e8c2385fbb18087790c809a22410 | 2,951 | py | Python | tensorflow_datasets/testing/mocking_test.py | robbjr/datasets | fbb2af9d0e88f8e2ae884e9764fbeff2ee487813 | [
"Apache-2.0"
] | 1 | 2019-09-20T22:08:24.000Z | 2019-09-20T22:08:24.000Z | tensorflow_datasets/testing/mocking_test.py | robbjr/datasets | fbb2af9d0e88f8e2ae884e9764fbeff2ee487813 | [
"Apache-2.0"
] | null | null | null | tensorflow_datasets/testing/mocking_test.py | robbjr/datasets | fbb2af9d0e88f8e2ae884e9764fbeff2ee487813 | [
"Apache-2.0"
] | 1 | 2019-12-14T00:32:08.000Z | 2019-12-14T00:32:08.000Z | # coding=utf-8
# Copyright 2019 The TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for tensorflow_datasets.testing.mocking."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow_datasets.core import dataset_utils
from tensorflow_datasets.core import registered
from tensorflow_datasets.testing import mocking
from tensorflow_datasets.testing import test_case
from tensorflow_datasets.testing import test_utils
# Import for registration
from tensorflow_datasets.image import imagenet # pylint: disable=unused-import,g-bad-import-order
from tensorflow_datasets.text import lm1b # pylint: disable=unused-import,g-bad-import-order
from tensorflow_datasets.image import mnist # pylint: disable=unused-import,g-bad-import-order
tf.compat.v1.enable_eager_execution()
class MockingTest(test_case.TestCase):
def test_mocking_imagenet(self):
with mocking.mock_data():
ds = registered.load('imagenet2012', split='train')
for ex in ds.take(10):
self.assertEqual(
sorted(ex.keys()), ['file_name', 'image', 'label'])
ex['image'].shape.assert_is_compatible_with((None, None, 3))
def test_mocking_lm1b(self):
with mocking.mock_data():
ds = registered.load('lm1b/bytes', split='train')
for ex in ds.take(10):
self.assertEqual(ex['text'].dtype, tf.int64)
ex['text'].shape.assert_is_compatible_with((None,))
def test_custom_as_dataset(self):
def _as_dataset(self, *args, **kwargs): # pylint: disable=unused-argument
return tf.data.Dataset.from_generator(
lambda: ({ # pylint: disable=g-long-lambda
'text': t,
} for t in ['some sentence', 'some other sentence']),
output_types=self.info.features.dtype,
output_shapes=self.info.features.shape,
)
with mocking.mock_data(as_dataset_fn=_as_dataset):
ds = registered.load('lm1b', split='train')
out = [ex['text'] for ex in dataset_utils.as_numpy(ds)]
self.assertEqual(out, [b'some sentence', b'some other sentence'])
def test_max_values(self):
with mocking.mock_data(num_examples=50):
ds = registered.load('mnist', split='train')
for ex in ds.take(50):
self.assertLessEqual(tf.math.reduce_max(ex['label']).numpy(), 10)
if __name__ == '__main__':
test_utils.test_main()
| 37.35443 | 98 | 0.723822 |
ab659ceeac00b8ebb1ed546b8e54e81a981c04c6 | 9,192 | py | Python | visualDet3D/networks/backbones/resnet.py | jovialio/visualDet3D | 5a54e4547576e4eb23d768746b2bdb2b4f25753f | [
"Apache-2.0"
] | 250 | 2021-02-02T02:23:18.000Z | 2022-03-31T11:00:10.000Z | visualDet3D/networks/backbones/resnet.py | jovialio/visualDet3D | 5a54e4547576e4eb23d768746b2bdb2b4f25753f | [
"Apache-2.0"
] | 56 | 2021-02-03T08:32:11.000Z | 2022-03-30T01:41:46.000Z | visualDet3D/networks/backbones/resnet.py | jovialio/visualDet3D | 5a54e4547576e4eb23d768746b2bdb2b4f25753f | [
"Apache-2.0"
] | 45 | 2021-02-25T02:01:15.000Z | 2022-03-03T10:32:58.000Z | from typing import Tuple, List, Union
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import math
import torch.utils.model_zoo as model_zoo
from visualDet3D.networks.utils.registry import BACKBONE_DICT
def conv3x3(in_planes, out_planes, stride=1, dilation=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=dilation, bias=False, dilation=dilation)
model_urls = {
'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',
'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',
'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',
}
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None, dilation=1):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes, dilation=dilation)
self.bn2 = nn.BatchNorm2d(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None, dilation=1):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
padding=dilation, bias=False, dilation=dilation)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * 4)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class ResNet(nn.Module):
planes = [64, 128, 256, 512]
def __init__(self, block:Union[BasicBlock, Bottleneck],
layers:Tuple[int, ...],
num_stages:int=4,
strides:Tuple[int, ...]=(1, 2, 2, 2),
dilations:Tuple[int, ...]=(1, 1, 1, 1),
out_indices:Tuple[int, ...]=(-1, 0, 1, 2, 3),
frozen_stages:int=-1,
norm_eval:bool=True,
):
self.inplanes = 64
super(ResNet, self).__init__()
self.num_stages = num_stages
assert num_stages >= 1 and num_stages <= 4
self.strides = strides
self.dilations = dilations
self.out_indices = out_indices
self.frozen_stages = frozen_stages
self.norm_eval = norm_eval
assert max(out_indices) < num_stages
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
for i in range(num_stages):
setattr(self, f"layer{i+1}", self._make_layer(block, self.planes[i], layers[i], stride=self.strides[i], dilation=self.dilations[i]))
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
#prior = 0.01
self.train()
def _make_layer(self, block, planes, blocks, stride=1, dilation=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes, dilation=dilation))
return nn.Sequential(*layers)
def train(self, mode=True):
super(ResNet, self).train(mode)
if mode:
self.freeze_stages()
if self.norm_eval:
self.freeze_bn()
def freeze_stages(self):
if self.frozen_stages >= 0:
self.conv1.eval()
self.bn1.eval()
for param in self.conv1.parameters():
param.requires_grad = False
for param in self.bn1.parameters():
param.requires_grad = False
for i in range(1, self.frozen_stages + 1):
m = getattr(self, f'layer{i}')
m.eval()
for param in m.parameters():
param.requires_grad = False
def freeze_bn(self):
'''Freeze BatchNorm layers.'''
for layer in self.modules():
if isinstance(layer, nn.modules.batchnorm._BatchNorm): # Will freeze both batchnorm and sync batchnorm
layer.eval()
def forward(self, img_batch):
outs = []
x = self.conv1(img_batch)
x = self.bn1(x)
x = self.relu(x)
if -1 in self.out_indices:
outs.append(x)
x = self.maxpool(x)
for i in range(self.num_stages):
layer = getattr(self, f"layer{i+1}")
x = layer(x)
if i in self.out_indices:
outs.append(x)
return outs
def resnet18(pretrained=True, **kwargs):
"""Constructs a ResNet-18 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet18'], model_dir='.'), strict=False)
return model
def resnet34(pretrained=True, **kwargs):
"""Constructs a ResNet-34 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(BasicBlock, [3, 4, 6, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet34'], model_dir='.'), strict=False)
return model
def resnet50(pretrained=True, **kwargs):
"""Constructs a ResNet-50 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(Bottleneck, [3, 4, 6, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet50'], model_dir='.'), strict=False)
return model
def resnet101(pretrained=True, **kwargs):
"""Constructs a ResNet-101 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(Bottleneck, [3, 4, 23, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet101'], model_dir='.'), strict=False)
return model
def resnet152(pretrained=True, **kwargs):
"""Constructs a ResNet-152 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(Bottleneck, [3, 8, 36, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet152'], model_dir='.'), strict=False)
return model
@BACKBONE_DICT.register_module
def resnet(depth, **kwargs):
if depth == 18:
model = resnet18(**kwargs)
elif depth == 34:
model = resnet34(**kwargs)
elif depth == 50:
model = resnet50(**kwargs)
elif depth == 101:
model = resnet101(**kwargs)
elif depth == 152:
model = resnet152(**kwargs)
else:
raise ValueError(
'Unsupported model depth, must be one of 18, 34, 50, 101, 152')
return model
if __name__ == '__main__':
model = resnet18(False).cuda()
model.eval()
image = torch.rand(2, 3, 224, 224).cuda()
output = model(image)
| 33.064748 | 144 | 0.592472 |
660c6971533c6de48ad255a7091b1bb30a429edf | 1,446 | py | Python | examples/tornadio2go_sample/wsgi.py | rudeb0t/tornadio2go | ad618717c6b185fee533928d378c086e273685a9 | [
"BSD-3-Clause"
] | 1 | 2015-02-13T06:36:03.000Z | 2015-02-13T06:36:03.000Z | examples/tornadio2go_sample/wsgi.py | rudeb0t/tornadio2go | ad618717c6b185fee533928d378c086e273685a9 | [
"BSD-3-Clause"
] | 1 | 2016-03-24T16:00:38.000Z | 2016-03-24T16:00:38.000Z | examples/tornadio2go_sample/wsgi.py | rudeb0t/tornadio2go | ad618717c6b185fee533928d378c086e273685a9 | [
"BSD-3-Clause"
] | null | null | null | """
WSGI config for tornadio_stress project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
# We defer to a DJANGO_SETTINGS_MODULE already in the environment. This breaks
# if running multiple sites in the same mod_wsgi process. To fix this, use
# mod_wsgi daemon mode with each site in its own daemon process, or use
# os.environ["DJANGO_SETTINGS_MODULE"] = "tornadio_stress.settings"
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "tornadio_stress.settings")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
| 43.818182 | 79 | 0.806362 |
0dd13eec4e1896568680bfa9338440b3177c16ac | 2,451 | py | Python | presto-docs/src/main/sphinx/conf.py | zx-hub/prestoSQL | 5c0e8d4db716d2906e9b81c7e4a5c14c37fd420c | [
"Apache-2.0"
] | null | null | null | presto-docs/src/main/sphinx/conf.py | zx-hub/prestoSQL | 5c0e8d4db716d2906e9b81c7e4a5c14c37fd420c | [
"Apache-2.0"
] | 3 | 2021-08-09T21:00:30.000Z | 2022-02-16T01:13:19.000Z | presto-docs/src/main/sphinx/conf.py | zx-hub/prestoSQL | 5c0e8d4db716d2906e9b81c7e4a5c14c37fd420c | [
"Apache-2.0"
] | 1 | 2020-09-02T14:08:01.000Z | 2020-09-02T14:08:01.000Z | #
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
# Presto documentation build configuration file
#
# This file is execfile()d with the current directory set to its containing dir.
#
import os
import sys
import xml.dom.minidom
try:
sys.dont_write_bytecode = True
except:
pass
sys.path.insert(0, os.path.abspath('ext'))
def child_node(node, name):
for i in node.childNodes:
if (i.nodeType == i.ELEMENT_NODE) and (i.tagName == name):
return i
return None
def node_text(node):
return node.childNodes[0].data
def maven_version(pom):
dom = xml.dom.minidom.parse(pom)
project = dom.childNodes[0]
version = child_node(project, 'version')
if version:
return node_text(version)
parent = child_node(project, 'parent')
version = child_node(parent, 'version')
return node_text(version)
def get_version():
version = os.environ.get('PRESTO_VERSION', '').strip()
return version or maven_version('../../../pom.xml')
# -- General configuration -----------------------------------------------------
needs_sphinx = '1.1'
extensions = ['backquote', 'download', 'issue']
templates_path = ['templates']
source_suffix = '.rst'
master_doc = 'index'
project = u'Presto'
version = get_version()
release = version
exclude_patterns = ['_build']
highlight_language = 'sql'
default_role = 'backquote'
rst_epilog = """
.. |presto_server_release| replace:: ``presto-server-{release}``
""".replace('{release}', release)
# -- Options for HTML output ---------------------------------------------------
html_theme_path = ['themes']
html_theme = 'presto'
html_title = '%s %s Documentation' % (project, release)
html_logo = 'images/presto.svg'
html_add_permalinks = '#'
html_show_copyright = False
html_show_sphinx = False
html_sidebars = {
"**": ['logo-text.html', 'globaltoc.html', 'localtoc.html', 'searchbox.html']
}
html_theme_options = {
'base_url': '/',
}
| 22.694444 | 81 | 0.667891 |
e906ffbd7bf7fea0a5f38aa62a19208b5d424b09 | 1,004 | py | Python | kubernetes/test/test_v1_subject_access_review.py | iguazio/python | c2684bb479d44a49a2010ec4ede5ffa7b17349dd | [
"Apache-2.0"
] | null | null | null | kubernetes/test/test_v1_subject_access_review.py | iguazio/python | c2684bb479d44a49a2010ec4ede5ffa7b17349dd | [
"Apache-2.0"
] | null | null | null | kubernetes/test/test_v1_subject_access_review.py | iguazio/python | c2684bb479d44a49a2010ec4ede5ffa7b17349dd | [
"Apache-2.0"
] | 1 | 2019-01-10T11:13:52.000Z | 2019-01-10T11:13:52.000Z | # coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.13.1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import unittest
import kubernetes.client
from kubernetes.client.rest import ApiException
from kubernetes.client.models.v1_subject_access_review import V1SubjectAccessReview
class TestV1SubjectAccessReview(unittest.TestCase):
""" V1SubjectAccessReview unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testV1SubjectAccessReview(self):
"""
Test V1SubjectAccessReview
"""
# FIXME: construct object with mandatory attributes with example values
#model = kubernetes.client.models.v1_subject_access_review.V1SubjectAccessReview()
pass
if __name__ == '__main__':
unittest.main()
| 22.311111 | 105 | 0.721116 |
127bcd3beba33382bfaa229b716dc82354d24294 | 37,532 | py | Python | posthog/api/test/test_action_people.py | msnitish/posthog | cb86113f568e72eedcb64b5fd00c313d21e72f90 | [
"MIT"
] | null | null | null | posthog/api/test/test_action_people.py | msnitish/posthog | cb86113f568e72eedcb64b5fd00c313d21e72f90 | [
"MIT"
] | null | null | null | posthog/api/test/test_action_people.py | msnitish/posthog | cb86113f568e72eedcb64b5fd00c313d21e72f90 | [
"MIT"
] | null | null | null | import json
from uuid import uuid4
from freezegun import freeze_time
from ee.clickhouse.models.event import create_event
from ee.clickhouse.models.session_recording_event import create_session_recording_event
from ee.clickhouse.util import ClickhouseTestMixin, snapshot_clickhouse_queries
from posthog.constants import ENTITY_ID, ENTITY_MATH, ENTITY_TYPE, TRENDS_CUMULATIVE
from posthog.models import Action, ActionStep, Cohort, Organization, Person
from posthog.test.base import APIBaseTest
def _create_action(**kwargs):
team = kwargs.pop("team")
name = kwargs.pop("name")
action = Action.objects.create(team=team, name=name)
ActionStep.objects.create(action=action, event=name)
return action
def _create_cohort(**kwargs):
team = kwargs.pop("team")
name = kwargs.pop("name")
groups = kwargs.pop("groups")
cohort = Cohort.objects.create(team=team, name=name, groups=groups)
return cohort
def _create_person(**kwargs):
person = Person.objects.create(**kwargs)
return Person(id=str(person.uuid))
def _create_event(uuid=None, **kwargs):
kwargs.update({"event_uuid": uuid if uuid else uuid4()})
create_event(**kwargs)
def _create_session_recording_event(team_id, distinct_id, session_id, timestamp, window_id="", has_full_snapshot=True):
create_session_recording_event(
uuid=uuid4(),
team_id=team_id,
distinct_id=distinct_id,
timestamp=timestamp,
session_id=session_id,
window_id=window_id,
snapshot_data={"has_full_snapshot": has_full_snapshot,},
)
class TestActionPeople(ClickhouseTestMixin, APIBaseTest):
def _create_events(self, use_time=False):
_create_action(team=self.team, name="no events")
sign_up_action = _create_action(team=self.team, name="sign up")
person = _create_person(team_id=self.team.pk, distinct_ids=["blabla", "anonymous_id"])
secondTeam = Organization.objects.bootstrap(None, team_fields={"api_token": "token456"})[2]
freeze_without_time = ["2019-12-24", "2020-01-01", "2020-01-02"]
freeze_with_time = [
"2019-12-24 03:45:34",
"2020-01-01 00:06:34",
"2020-01-02 16:34:34",
]
freeze_args = freeze_without_time
if use_time:
freeze_args = freeze_with_time
with freeze_time(freeze_args[0]):
_create_event(
team=self.team, event="sign up", distinct_id="blabla", properties={"$some_property": "value"},
)
with freeze_time(freeze_args[1]):
_create_event(
team=self.team, event="sign up", distinct_id="blabla", properties={"$some_property": "value"},
)
_create_event(team=self.team, event="sign up", distinct_id="anonymous_id")
_create_event(team=self.team, event="sign up", distinct_id="blabla")
with freeze_time(freeze_args[2]):
_create_event(
team=self.team,
event="sign up",
distinct_id="blabla",
properties={"$some_property": "other_value", "$some_numerical_prop": 80,},
)
_create_event(team=self.team, event="no events", distinct_id="blabla")
# second team should have no effect
_create_event(
team=secondTeam, event="sign up", distinct_id="blabla", properties={"$some_property": "other_value"},
)
return sign_up_action, person
def test_people_cumulative(self):
with freeze_time("2020-01-01 00:06:34"):
for i in range(20):
_create_person(team_id=self.team.pk, distinct_ids=[f"blabla_{i}"])
_create_event(
team=self.team, event="sign up", distinct_id=f"blabla_{i}", properties={"$some_property": "value"},
)
with freeze_time("2020-01-05 00:06:34"):
for i in range(20, 40):
_create_person(team_id=self.team.pk, distinct_ids=[f"blabla_{i}"])
_create_event(
team=self.team, event="sign up", distinct_id=f"blabla_{i}", properties={"$some_property": "value"},
)
with freeze_time("2020-01-15 00:06:34"):
for i in range(40, 80):
_create_person(team_id=self.team.pk, distinct_ids=[f"blabla_{i}"])
_create_event(
team=self.team, event="sign up", distinct_id=f"blabla_{i}", properties={"$some_property": "value"},
)
event_response = self.client.get(
f"/api/projects/{self.team.id}/actions/people/",
data={
"date_from": "2020-01-01",
"date_to": "2020-01-31",
"interval": "day",
ENTITY_TYPE: "events",
ENTITY_ID: "sign up",
"display": "ActionsLineGraphCumulative",
},
).json()
self.assertEqual(event_response["results"][0]["count"], 80)
with freeze_time("2020-01-31 00:06:34"):
event_response = self.client.get(
f"/api/projects/{self.team.id}/actions/people/",
data={
"date_from": "-30d",
"date_to": "2020-01-31",
"interval": "day",
ENTITY_TYPE: "events",
ENTITY_ID: "sign up",
"display": "ActionsLineGraphCumulative",
},
).json()
self.assertEqual(event_response["results"][0]["count"], 80)
def _create_breakdown_events(self):
freeze_without_time = ["2020-01-02"]
_create_action(team=self.team, name="sign up")
with freeze_time(freeze_without_time[0]):
for i in range(25):
_create_event(
team=self.team, event="sign up", distinct_id="blabla", properties={"$some_property": i},
)
def test_people_endpoint_paginated(self):
for index in range(0, 150):
_create_person(team_id=self.team.pk, distinct_ids=["person" + str(index)])
_create_event(
team=self.team, event="sign up", distinct_id="person" + str(index), timestamp="2020-01-04T12:00:00Z",
)
event_response = self.client.get(
f"/api/projects/{self.team.id}/actions/people/",
data={"date_from": "2020-01-04", "date_to": "2020-01-04", ENTITY_TYPE: "events", ENTITY_ID: "sign up",},
).json()
self.assertEqual(len(event_response["results"][0]["people"]), 100)
event_response_next = self.client.get(event_response["next"]).json()
self.assertEqual(len(event_response_next["results"][0]["people"]), 50)
def _create_people_interval_events(self):
person1 = _create_person(team_id=self.team.pk, distinct_ids=["person1"])
person2 = _create_person(team_id=self.team.pk, distinct_ids=["person2"])
person3 = _create_person(team_id=self.team.pk, distinct_ids=["person3"])
person4 = _create_person(team_id=self.team.pk, distinct_ids=["person4"])
person5 = _create_person(team_id=self.team.pk, distinct_ids=["person5"])
person6 = _create_person(team_id=self.team.pk, distinct_ids=["person6"])
person7 = _create_person(team_id=self.team.pk, distinct_ids=["person7"])
# solo
_create_event(
team=self.team, event="sign up", distinct_id="person1", timestamp="2020-01-04T14:10:00Z",
)
# group by hour
_create_event(
team=self.team, event="sign up", distinct_id="person2", timestamp="2020-01-04T16:30:00Z",
)
_create_event(
team=self.team, event="sign up", distinct_id="person3", timestamp="2020-01-04T16:50:00Z",
)
# group by min
_create_event(
team=self.team, event="sign up", distinct_id="person4", timestamp="2020-01-04T19:20:00Z",
)
_create_event(
team=self.team, event="sign up", distinct_id="person5", timestamp="2020-01-04T19:20:00Z",
)
# group by week and month
_create_event(
team=self.team, event="sign up", distinct_id="person6", timestamp="2019-11-05T16:30:00Z",
)
_create_event(
team=self.team, event="sign up", distinct_id="person7", timestamp="2019-11-07T16:50:00Z",
)
_create_event(
team=self.team, event="sign up", distinct_id="person1", timestamp="2019-11-27T16:50:00Z",
)
return person1, person2, person3, person4, person5, person6, person7
def test_hour_interval(self):
sign_up_action, person = self._create_events()
person1, person2, person3, person4, person5, person6, person7 = self._create_people_interval_events()
person = _create_person(team_id=self.team.pk, distinct_ids=["outside_range"])
_create_event(
team=self.team, event="sign up", distinct_id="outside_range", timestamp="2020-01-04T13:50:00Z",
)
_create_event(
team=self.team, event="sign up", distinct_id="outside_range", timestamp="2020-01-04T15:50:00Z",
)
# check solo hour
action_response = self.client.get(
f"/api/projects/{self.team.id}/actions/people/",
data={
"interval": "hour",
"date_from": "2020-01-04 14:00:00",
"date_to": "2020-01-04 14:00:00",
ENTITY_TYPE: "actions",
ENTITY_ID: sign_up_action.id,
},
).json()
event_response = self.client.get(
f"/api/projects/{self.team.id}/actions/people/",
data={
"interval": "hour",
"date_from": "2020-01-04 14:00:00",
"date_to": "2020-01-04 14:00:00",
ENTITY_TYPE: "events",
ENTITY_ID: "sign up",
},
).json()
self.assertEqual(str(action_response["results"][0]["people"][0]["id"]), str(person1.pk))
self.assertEqual(len(action_response["results"][0]["people"]), 1)
self.assertEntityResponseEqual(action_response["results"], event_response["results"], remove=[])
# check grouped hour
hour_grouped_action_response = self.client.get(
f"/api/projects/{self.team.id}/actions/people/",
data={
"interval": "hour",
"date_from": "2020-01-04 16:00:00",
"date_to": "2020-01-04 16:00:00",
ENTITY_TYPE: "actions",
ENTITY_ID: sign_up_action.id,
},
).json()
hour_grouped_grevent_response = self.client.get(
f"/api/projects/{self.team.id}/actions/people/",
data={
"interval": "hour",
"date_from": "2020-01-04 16:00:00",
"date_to": "2020-01-04 16:00:00",
ENTITY_TYPE: "events",
ENTITY_ID: "sign up",
},
).json()
all_people_ids = [str(person["id"]) for person in hour_grouped_action_response["results"][0]["people"]]
self.assertListEqual(sorted(all_people_ids), sorted([str(person2.pk), str(person3.pk)]))
self.assertEqual(len(all_people_ids), 2)
self.assertEntityResponseEqual(
hour_grouped_action_response["results"], hour_grouped_grevent_response["results"], remove=[],
)
def test_day_interval(self):
sign_up_action, person = self._create_events()
person1 = _create_person(team_id=self.team.pk, distinct_ids=["person1"])
_create_person(team_id=self.team.pk, distinct_ids=["person2"])
_create_event(
team=self.team, event="sign up", distinct_id="person1", timestamp="2020-01-04T12:00:00Z",
)
_create_event(
team=self.team, event="sign up", distinct_id="person2", timestamp="2020-01-05T12:00:00Z",
)
person = _create_person(team_id=self.team.pk, distinct_ids=["outside_range"])
_create_event(
team=self.team, event="sign up", distinct_id="outside_range", timestamp="2020-01-03T13:50:00Z",
)
_create_event(
team=self.team, event="sign up", distinct_id="outside_range", timestamp="2020-01-05T15:50:00Z",
)
# test people
action_response = self.client.get(
f"/api/projects/{self.team.id}/actions/people/",
data={
"date_from": "2020-01-04",
"date_to": "2020-01-04",
ENTITY_TYPE: "actions",
"interval": "day",
ENTITY_ID: sign_up_action.id,
},
).json()
event_response = self.client.get(
f"/api/projects/{self.team.id}/actions/people/",
data={
"date_from": "2020-01-04",
"date_to": "2020-01-04",
ENTITY_TYPE: "events",
ENTITY_ID: "sign up",
"interval": "day",
},
).json()
self.assertEqual(len(action_response["results"][0]["people"]), 1)
self.assertEqual(str(action_response["results"][0]["people"][0]["id"]), str(person1.pk))
self.assertEntityResponseEqual(action_response["results"], event_response["results"], remove=[])
def test_day_interval_cumulative(self):
sign_up_action, person = self._create_events()
person1 = _create_person(team_id=self.team.pk, distinct_ids=["person1"])
person2 = _create_person(team_id=self.team.pk, distinct_ids=["person2"])
_create_event(
team=self.team, event="sign up", distinct_id="person1", timestamp="2020-01-03T12:00:00Z",
)
_create_event(
team=self.team, event="sign up", distinct_id="person2", timestamp="2020-01-04T20:00:00Z",
)
outside_range_person = _create_person(team_id=self.team.pk, distinct_ids=["outside_range"])
_create_event(
team=self.team, event="sign up", distinct_id="outside_range", timestamp="2020-01-02T13:50:00Z",
)
_create_event(
team=self.team, event="sign up", distinct_id="outside_range", timestamp="2020-01-05T15:50:00Z",
)
# test people
action_response = self.client.get(
f"/api/projects/{self.team.id}/actions/people/",
data={
"date_from": "2020-01-03",
"date_to": "2020-01-04",
ENTITY_TYPE: "actions",
"interval": "day",
ENTITY_ID: sign_up_action.id,
"display": TRENDS_CUMULATIVE,
},
).json()
event_response = self.client.get(
f"/api/projects/{self.team.id}/actions/people/",
data={
"date_from": "2020-01-03",
"date_to": "2020-01-04",
ENTITY_TYPE: "events",
ENTITY_ID: "sign up",
"interval": "day",
"display": TRENDS_CUMULATIVE,
},
).json()
self.assertEqual(len(action_response["results"][0]["people"]), 2)
self.assertEqual(
sorted(p["id"] for p in action_response["results"][0]["people"]), sorted([person1.pk, person2.pk])
)
self.assertEntityResponseEqual(action_response["results"], event_response["results"], remove=[])
def test_week_interval(self):
sign_up_action, person = self._create_events()
person1, person2, person3, person4, person5, person6, person7 = self._create_people_interval_events()
person = _create_person(team_id=self.team.pk, distinct_ids=["outside_range"])
_create_event(
team=self.team, event="sign up", distinct_id="outside_range", timestamp="2019-10-26T13:50:00Z",
)
_create_event(
team=self.team, event="sign up", distinct_id="outside_range", timestamp="2020-11-11T15:50:00Z",
)
# check grouped week
week_grouped_action_response = self.client.get(
f"/api/projects/{self.team.id}/actions/people/",
data={
"interval": "week",
"date_from": "2019-11-01",
"date_to": "2019-11-01",
ENTITY_TYPE: "actions",
ENTITY_ID: sign_up_action.id,
},
).json()
week_grouped_grevent_response = self.client.get(
f"/api/projects/{self.team.id}/actions/people/",
data={
"interval": "week",
"date_from": "2019-11-01",
"date_to": "2019-11-01",
ENTITY_TYPE: "events",
ENTITY_ID: "sign up",
},
).json()
self.maxDiff = None
all_people_ids = [str(person["id"]) for person in week_grouped_action_response["results"][0]["people"]]
self.assertEqual(len(all_people_ids), 2)
self.assertListEqual(sorted(all_people_ids), sorted([str(person6.pk), str(person7.pk)]))
self.assertEntityResponseEqual(
week_grouped_action_response["results"], week_grouped_grevent_response["results"], remove=[],
)
def test_month_interval(self):
sign_up_action, person = self._create_events()
person1, person2, person3, person4, person5, person6, person7 = self._create_people_interval_events()
person = _create_person(team_id=self.team.pk, distinct_ids=["outside_range"])
_create_event(
team=self.team, event="sign up", distinct_id="outside_range", timestamp="2019-12-01T13:50:00Z",
)
_create_event(
team=self.team, event="sign up", distinct_id="outside_range", timestamp="2020-10-10T15:50:00Z",
)
# check grouped month
month_group_action_response = self.client.get(
f"/api/projects/{self.team.id}/actions/people/",
data={
"interval": "month",
"date_from": "2019-11-01",
"date_to": "2019-11-01",
ENTITY_TYPE: "actions",
ENTITY_ID: sign_up_action.id,
},
).json()
month_group_grevent_response = self.client.get(
f"/api/projects/{self.team.id}/actions/people/",
data={
"interval": "month",
"date_from": "2019-11-01",
"date_to": "2019-11-01",
ENTITY_TYPE: "events",
ENTITY_ID: "sign up",
},
).json()
all_people_ids = [str(person["id"]) for person in month_group_action_response["results"][0]["people"]]
self.assertEqual(len(all_people_ids), 3)
self.assertListEqual(sorted(all_people_ids), sorted([str(person6.pk), str(person7.pk), str(person1.pk)]))
self.assertEntityResponseEqual(
month_group_action_response["results"], month_group_grevent_response["results"], remove=[],
)
def test_interval_rounding(self):
pass
def _create_multiple_people(self):
person1 = _create_person(team_id=self.team.pk, distinct_ids=["person1"], properties={"name": "person1"})
_create_event(
team=self.team,
event="watched movie",
distinct_id="person1",
timestamp="2020-01-01T12:00:00Z",
properties={"event_prop": "prop1"},
)
person2 = _create_person(team_id=self.team.pk, distinct_ids=["person2"], properties={"name": "person2"})
_create_event(
team=self.team,
event="watched movie",
distinct_id="person2",
timestamp="2020-01-01T12:00:00Z",
properties={"event_prop": "prop1"},
)
_create_event(
team=self.team,
event="watched movie",
distinct_id="person2",
timestamp="2020-01-02T12:00:00Z",
properties={"event_prop": "prop1"},
)
# same day
_create_event(
team=self.team,
event="watched movie",
distinct_id="person2",
timestamp="2020-01-02T12:00:00Z",
properties={"event_prop": "prop1"},
)
person3 = _create_person(team_id=self.team.pk, distinct_ids=["person3"], properties={"name": "person3"})
_create_event(
team=self.team,
event="watched movie",
distinct_id="person3",
timestamp="2020-01-01T12:00:00Z",
properties={"event_prop": "prop2"},
)
_create_event(
team=self.team,
event="watched movie",
distinct_id="person3",
timestamp="2020-01-02T12:00:00Z",
properties={"event_prop": "prop2"},
)
_create_event(
team=self.team,
event="watched movie",
distinct_id="person3",
timestamp="2020-01-03T12:00:00Z",
properties={"event_prop": "prop2"},
)
person4 = _create_person(team_id=self.team.pk, distinct_ids=["person4"], properties={"name": "person4"})
_create_event(
team=self.team,
event="watched movie",
distinct_id="person4",
timestamp="2020-01-05T12:00:00Z",
properties={"event_prop": "prop3"},
)
return (person1, person2, person3, person4)
def test_people_csv(self):
person1, _, _, _ = self._create_multiple_people()
people = self.client.get(
f"/api/projects/{self.team.id}/actions/people.csv",
data={
"date_from": "2020-01-01",
"date_to": "2020-01-07",
ENTITY_TYPE: "events",
ENTITY_ID: "watched movie",
"display": "ActionsLineGraphCumulative",
"entity_math": "dau",
"events": json.dumps([{"id": "watched movie", "type": "events", "math": "dau"}]),
},
)
resp = people.content.decode("utf-8").split("\r\n")
resp = sorted(resp)
self.assertEqual(len(resp), 6) # header, 4 people, empty line
self.assertEqual(resp[1], "Distinct ID,Internal ID,Email,Name,Properties")
self.assertEqual(resp[2].split(",")[0], "person1")
def test_breakdown_by_cohort_people_endpoint(self):
person1, _, _, _ = self._create_multiple_people()
cohort = _create_cohort(name="cohort1", team=self.team, groups=[{"properties": {"name": "person1"}}])
_create_cohort(name="cohort2", team=self.team, groups=[{"properties": {"name": "person2"}}])
_create_cohort(
name="cohort3",
team=self.team,
groups=[{"properties": {"name": "person1"}}, {"properties": {"name": "person2"}},],
)
_create_action(name="watched movie", team=self.team)
people = self.client.get(
f"/api/projects/{self.team.id}/actions/people/",
data={
"date_from": "2020-01-01",
"date_to": "2020-01-07",
"display": TRENDS_CUMULATIVE, # ensure date range is used as is
ENTITY_TYPE: "events",
ENTITY_ID: "watched movie",
"breakdown_type": "cohort",
"breakdown_value": cohort.pk,
"breakdown": [cohort.pk], # this shouldn't do anything
},
).json()
self.assertEqual(len(people["results"][0]["people"]), 1)
ordered_people = sorted(people["results"][0]["people"], key=lambda i: i["id"])
self.assertEqual(ordered_people[0]["id"], person1.pk)
# all people
people = self.client.get(
f"/api/projects/{self.team.id}/actions/people/",
data={
"date_from": "2020-01-01",
"date_to": "2020-01-07",
"display": TRENDS_CUMULATIVE, # ensure date range is used as is
ENTITY_TYPE: "events",
ENTITY_ID: "watched movie",
"breakdown_type": "cohort",
"breakdown_value": "all",
"breakdown": [cohort.pk],
},
).json()
self.assertEqual(len(people["results"][0]["people"]), 4)
ordered_people = sorted(people["results"][0]["people"], key=lambda i: i["id"])
self.assertEqual(ordered_people[0]["id"], person1.pk)
def test_breakdown_by_person_property_people_endpoint(self):
person1, person2, person3, person4 = self._create_multiple_people()
action = _create_action(name="watched movie", team=self.team)
people = self.client.get(
f"/api/projects/{self.team.id}/actions/people/",
data={
"date_from": "2020-01-01",
"date_to": "2020-01-07",
ENTITY_TYPE: "events",
ENTITY_ID: "watched movie",
"properties": json.dumps([{"key": "name", "value": "person3", "type": "person"}]),
"breakdown_type": "person",
"breakdown_value": "person3",
"breakdown": "name",
},
).json()
self.assertEqual(len(people["results"][0]["people"]), 1)
self.assertEqual(people["results"][0]["people"][0]["id"], person3.pk)
def test_breakdown_by_event_property_people_endpoint(self):
person1, person2, person3, person4 = self._create_multiple_people()
action = _create_action(name="watched movie", team=self.team)
people = self.client.get(
f"/api/projects/{self.team.id}/actions/people/",
data={
"date_from": "2020-01-01",
"date_to": "2020-01-07",
ENTITY_TYPE: "events",
ENTITY_ID: "watched movie",
"properties": json.dumps([{"key": "event_prop", "value": "prop1", "type": "event"}]),
"breakdown_type": "event",
"breakdown_value": "prop1",
"breakdown": "event_prop",
},
).json()
self.assertEqual(len(people["results"][0]["people"]), 2)
ordered_people = sorted(p["id"] for p in people["results"][0]["people"])
self.assertEqual(ordered_people, sorted([person1.pk, person2.pk]))
def test_filtering_by_person_properties(self):
person1, person2, person3, person4 = self._create_multiple_people()
people = self.client.get(
f"/api/projects/{self.team.id}/actions/people/",
data={
"date_from": "2020-01-01",
"date_to": "2020-01-07",
ENTITY_TYPE: "events",
ENTITY_ID: "watched movie",
"properties": json.dumps([{"key": "name", "value": "person2", "type": "person"}]),
},
).json()
self.assertEqual(len(people["results"][0]["people"]), 1)
self.assertEqual(people["results"][0]["people"][0]["id"], person2.pk)
def test_active_user_weekly_people(self):
p1 = _create_person(team_id=self.team.pk, distinct_ids=["p1"], properties={"name": "p1"})
_create_event(
team=self.team,
event="$pageview",
distinct_id="p1",
timestamp="2020-01-09T12:00:00Z",
properties={"key": "val"},
)
_create_event(
team=self.team,
event="$pageview",
distinct_id="p1",
timestamp="2020-01-10T12:00:00Z",
properties={"key": "val"},
)
_create_event(
team=self.team,
event="$pageview",
distinct_id="p1",
timestamp="2020-01-11T12:00:00Z",
properties={"key": "val"},
)
p2 = _create_person(team_id=self.team.pk, distinct_ids=["p2"], properties={"name": "p2"})
_create_event(
team=self.team,
event="$pageview",
distinct_id="p2",
timestamp="2020-01-09T12:00:00Z",
properties={"key": "val"},
)
_create_event(
team=self.team,
event="$pageview",
distinct_id="p2",
timestamp="2020-01-11T12:00:00Z",
properties={"key": "val"},
)
people = self.client.get(
f"/api/projects/{self.team.id}/actions/people/",
data={
"date_from": "2020-01-10",
"date_to": "2020-01-10",
ENTITY_TYPE: "events",
ENTITY_ID: "$pageview",
ENTITY_MATH: "weekly_active",
},
).json()
self.assertEqual(len(people["results"][0]["people"]), 2)
def test_breakdown_by_person_property_nones_people_endpoint(self):
p1 = _create_person(team_id=self.team.pk, distinct_ids=["p1"], properties={"name": "p1"})
_create_event(
team=self.team,
event="$pageview",
distinct_id="p1",
timestamp="2020-01-09T12:00:00Z",
properties={"key": "val"},
)
_create_event(
team=self.team,
event="$pageview",
distinct_id="p1",
timestamp="2020-01-10T12:00:00Z",
properties={"key": "val"},
)
_create_event(
team=self.team,
event="$pageview",
distinct_id="p1",
timestamp="2020-01-11T12:00:00Z",
properties={"key": "val"},
)
p2 = _create_person(team_id=self.team.pk, distinct_ids=["p2"], properties={})
_create_event(
team=self.team,
event="$pageview",
distinct_id="p2",
timestamp="2020-01-09T12:00:00Z",
properties={"key": "val"},
)
_create_event(
team=self.team,
event="$pageview",
distinct_id="p2",
timestamp="2020-01-10T12:00:00Z",
properties={"key": "val"},
)
_create_event(
team=self.team,
event="$pageview",
distinct_id="p2",
timestamp="2020-01-11T12:00:00Z",
properties={"key": "val"},
)
people = self.client.get(
f"/api/projects/{self.team.id}/actions/people/",
data={
"date_from": "2020-01-10",
"date_to": "2020-01-10",
ENTITY_TYPE: "events",
ENTITY_ID: "$pageview",
"breakdown_type": "person",
"breakdown_value": "p1",
"breakdown": "name",
},
).json()
self.assertEqual(len(people["results"][0]["people"]), 1)
people = self.client.get(
f"/api/projects/{self.team.id}/actions/people/",
data={
"date_from": "2020-01-10",
"date_to": "2020-01-10",
ENTITY_TYPE: "events",
ENTITY_ID: "$pageview",
"breakdown_type": "person",
"breakdown_value": "",
"breakdown": "name",
},
).json()
self.assertEqual(len(people["results"][0]["people"]), 1)
def test_breakdown_by_event_property_none_people_endpoint(self):
p1 = _create_person(team_id=self.team.pk, distinct_ids=["p1"], properties={"name": "p1"})
_create_event(
team=self.team,
event="$pageview",
distinct_id="p1",
timestamp="2020-01-09T12:00:00Z",
properties={"key": "val"},
)
_create_event(
team=self.team,
event="$pageview",
distinct_id="p1",
timestamp="2020-01-10T12:00:00Z",
properties={"key": "val"},
)
_create_event(
team=self.team,
event="$pageview",
distinct_id="p1",
timestamp="2020-01-11T12:00:00Z",
properties={"key": "val"},
)
p2 = _create_person(team_id=self.team.pk, distinct_ids=["p2"], properties={"name": "p2"})
_create_event(
team=self.team, event="$pageview", distinct_id="p2", timestamp="2020-01-09T12:00:00Z", properties={},
)
_create_event(
team=self.team,
event="$pageview",
distinct_id="p2",
timestamp="2020-01-11T12:00:00Z",
properties={"key": "val"},
)
people = self.client.get(
f"/api/projects/{self.team.id}/actions/people/",
data={
"date_from": "2020-01-8",
"date_to": "2020-01-12",
ENTITY_TYPE: "events",
ENTITY_ID: "$pageview",
"display": TRENDS_CUMULATIVE, # ensure that the date range is used as is
"breakdown_type": "event",
"breakdown_value": "val",
"breakdown": "key",
},
).json()
self.assertEqual(len(people["results"][0]["people"]), 2)
people = self.client.get(
f"/api/projects/{self.team.id}/actions/people/",
data={
"date_from": "2020-01-08",
"date_to": "2020-01-12",
ENTITY_TYPE: "events",
ENTITY_ID: "$pageview",
"display": TRENDS_CUMULATIVE, # ensure that the date range is used as is
"breakdown_type": "event",
"breakdown_value": "",
"breakdown": "key",
},
).json()
self.assertEqual(len(people["results"][0]["people"]), 1)
@snapshot_clickhouse_queries
def test_trends_people_endpoint_includes_recordings(self):
_create_person(team_id=self.team.pk, distinct_ids=["p1"], properties={})
_create_event(
team=self.team, event="$pageview", distinct_id="p1", timestamp="2020-01-09T14:00:00Z",
)
_create_event(
uuid="693402ed-590e-4737-ba26-93ebf18121bd",
team=self.team,
event="$pageview",
distinct_id="p1",
timestamp="2020-01-09T12:00:00Z",
properties={"$session_id": "s1", "$window_id": "w1"},
)
_create_session_recording_event(
self.team.pk, "u1", "s1", timestamp="2020-01-09T12:00:00Z",
)
people = self.client.get(
f"/api/projects/{self.team.id}/actions/people/",
data={
"date_from": "2020-01-08",
"date_to": "2020-01-12",
ENTITY_TYPE: "events",
ENTITY_ID: "$pageview",
"display": TRENDS_CUMULATIVE,
"breakdown_type": "event",
"breakdown_value": "",
"breakdown": "key",
"include_recordings": "true",
},
).json()
self.assertEqual(
people["results"][0]["people"][0]["matched_recordings"],
[
{
"session_id": "s1",
"events": [
{
"window_id": "w1",
"timestamp": "2020-01-09T12:00:00Z",
"uuid": "693402ed-590e-4737-ba26-93ebf18121bd",
}
],
},
],
)
def _test_interval(self, date_from, interval, timestamps):
for index, ts in enumerate(timestamps):
_create_person(team_id=self.team.pk, distinct_ids=[f"person{index}"])
_create_event(
team=self.team,
event="watched movie",
distinct_id=f"person{index}",
timestamp=ts,
properties={"event_prop": f"prop{index}"},
)
people = self.client.get(
f"/api/projects/{self.team.id}/actions/people/",
data={"interval": interval, "date_from": date_from, ENTITY_TYPE: "events", ENTITY_ID: "watched movie"},
).json()
self.assertCountEqual(
[person["distinct_ids"][0] for person in people["results"][0]["people"]], ["person1", "person2"]
)
def test_interval_month(self):
self._test_interval(
date_from="2021-08-01T00:00:00Z",
interval="month",
timestamps=[
"2021-07-31T23:45:00Z",
"2021-08-01T00:12:00Z",
"2021-08-31T22:40:00Z",
"2021-09-01T00:00:10Z",
],
)
def test_interval_week(self):
self._test_interval(
date_from="2021-09-05T00:00:00Z",
interval="week",
timestamps=[
"2021-09-04T23:45:00Z",
"2021-09-05T00:12:00Z",
"2021-09-11T22:40:00Z",
"2021-09-12T00:00:10Z",
],
)
def test_interval_day(self):
self._test_interval(
date_from="2021-09-05T00:00:00Z",
interval="day",
timestamps=[
"2021-09-04T23:45:00Z",
"2021-09-05T00:12:00Z",
"2021-09-05T22:40:00Z",
"2021-09-06T00:00:10Z",
],
)
def test_interval_hour(self):
self._test_interval(
date_from="2021-09-05T16:00:00Z",
interval="hour",
timestamps=[
"2021-09-05T15:45:00Z",
"2021-09-05T16:01:12Z",
"2021-09-05T16:58:00Z",
"2021-09-05T17:00:10Z",
],
)
| 39.055151 | 119 | 0.542231 |
38ab25f92b278ea145b9034301542ba5f96df441 | 11,288 | py | Python | venv/Lib/site-packages/gensim/models/wrappers/wordrank.py | ishatserka/MachineLearningAndDataAnalysisCoursera | e82e772df2f4aec162cb34ac6127df10d14a625a | [
"MIT"
] | 1 | 2019-01-22T04:59:02.000Z | 2019-01-22T04:59:02.000Z | venv/Lib/site-packages/gensim/models/wrappers/wordrank.py | ishatserka/MachineLearningAndDataAnalysisCoursera | e82e772df2f4aec162cb34ac6127df10d14a625a | [
"MIT"
] | null | null | null | venv/Lib/site-packages/gensim/models/wrappers/wordrank.py | ishatserka/MachineLearningAndDataAnalysisCoursera | e82e772df2f4aec162cb34ac6127df10d14a625a | [
"MIT"
] | null | null | null | # Copyright (C) 2017 Parul Sethi <parul1sethi@gmail.com>
# Copyright (C) 2017 Radim Rehurek <me@radimrehurek.com>
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""
Python wrapper around word representation learning from Wordrank.
The wrapped model can NOT be updated with new documents for online training -- use gensim's
`Word2Vec` for that.
Example:
>>> model = gensim.models.wrappers.Wordrank.train('/Users/dummy/wordrank', corpus_file='text8', out_name='wr_model')
>>> print model[word] # prints vector for given words
.. [1] https://bitbucket.org/shihaoji/wordrank/
.. [2] https://arxiv.org/pdf/1506.02761v3.pdf
Note that the wrapper might not work in a docker container for large datasets due to memory limits (caused by MPI).
"""
from __future__ import division
import logging
import os
import sys
import copy
import multiprocessing
import numpy as np
from gensim import utils
from gensim.models.keyedvectors import KeyedVectors
from gensim.scripts.glove2word2vec import glove2word2vec
from six import string_types
from smart_open import smart_open
from shutil import copyfile, rmtree
logger = logging.getLogger(__name__)
class Wordrank(KeyedVectors):
"""
Class for word vector training using Wordrank. Communication between Wordrank and Python
takes place by working with data files on disk and calling the Wordrank binary and glove's
helper binaries (for preparing training data) with subprocess module.
"""
@classmethod
def train(cls, wr_path, corpus_file, out_name, size=100, window=15, symmetric=1, min_count=5, max_vocab_size=0,
sgd_num=100, lrate=0.001, period=10, iter=90, epsilon=0.75, dump_period=10, reg=0, alpha=100,
beta=99, loss='hinge', memory=4.0, np=1, cleanup_files=False, sorted_vocab=1, ensemble=0):
"""
The word and context embedding files are generated by wordrank binary and are saved in "out_name" directory
which is created inside wordrank directory. The vocab and cooccurence files are generated using glove code
available inside the wordrank directory. These files are used by the wordrank binary for training.
`wr_path` is the absolute path to the Wordrank directory.
`corpus_file` is the filename of the text file to be used for training the Wordrank model.
Expects file to contain space-separated tokens in a single line
`out_name` is name of the directory which will be created (in wordrank folder) to save embeddings and training data.
It will contain following contents:
Word Embeddings saved after every dump_period and stored in a file model_word_current\ iter.txt
Context Embeddings saved after every dump_period and stored in a file model_context_current\ iter.txt
A meta directory which contain: 'vocab.txt' - vocab words, 'wiki.toy' - word-word coccurence values, 'meta' - vocab and coccurence lengths
`size` is the dimensionality of the feature vectors.
`window` is the number of context words to the left (and to the right, if symmetric = 1).
`symmetric` if 0, only use left context words, else use left and right both.
`min_count` = ignore all words with total frequency lower than this.
`max_vocab_size` upper bound on vocabulary size, i.e. keep the <int> most frequent words. Default is 0 for no limit.
`sgd_num` number of SGD taken for each data point.
`lrate` is the learning rate (too high diverges, give Nan).
`period` is the period of xi variable updates
`iter` = number of iterations (epochs) over the corpus.
`epsilon` is the power scaling value for weighting function.
`dump_period` is the period after which embeddings should be dumped.
`reg` is the value of regularization parameter.
`alpha` is the alpha parameter of gamma distribution.
`beta` is the beta parameter of gamma distribution.
`loss` = name of the loss (logistic, hinge).
`memory` = soft limit for memory consumption, in GB.
`np` number of copies to execute. (mpirun option)
`cleanup_files` if True, delete directory and files used by this wrapper, setting to False can be useful for debugging
`sorted_vocab` = if 1 (default), sort the vocabulary by descending frequency before assigning word indexes.
`ensemble` = 0 (default), use ensemble of word and context vectors
"""
# prepare training data (cooccurrence matrix and vocab)
model_dir = os.path.join(wr_path, out_name)
meta_dir = os.path.join(model_dir, 'meta')
os.makedirs(meta_dir)
logger.info("Dumped data will be stored in '%s'", model_dir)
copyfile(corpus_file, os.path.join(meta_dir, corpus_file.split('/')[-1]))
vocab_file = os.path.join(meta_dir, 'vocab.txt')
temp_vocab_file = os.path.join(meta_dir, 'tempvocab.txt')
cooccurrence_file = os.path.join(meta_dir, 'cooccurrence')
cooccurrence_shuf_file = os.path.join(meta_dir, 'wiki.toy')
meta_file = os.path.join(meta_dir, 'meta')
cmd_vocab_count = [os.path.join(wr_path, 'glove', 'vocab_count'), '-min-count', str(min_count), '-max-vocab', str(max_vocab_size)]
cmd_cooccurence_count = [os.path.join(wr_path, 'glove', 'cooccur'), '-memory', str(memory), '-vocab-file', temp_vocab_file, '-window-size', str(window), '-symmetric', str(symmetric)]
cmd_shuffle_cooccurences = [os.path.join(wr_path, 'glove', 'shuffle'), '-memory', str(memory)]
cmd_del_vocab_freq = ['cut', '-d', " ", '-f', '1', temp_vocab_file]
commands = [cmd_vocab_count, cmd_cooccurence_count, cmd_shuffle_cooccurences]
input_fnames = [os.path.join(meta_dir, os.path.split(corpus_file)[-1]), os.path.join(meta_dir, os.path.split(corpus_file)[-1]), cooccurrence_file]
output_fnames = [temp_vocab_file, cooccurrence_file, cooccurrence_shuf_file]
logger.info("Prepare training data (%s) using glove code", ", ".join(input_fnames))
for command, input_fname, output_fname in zip(commands, input_fnames, output_fnames):
with smart_open(input_fname, 'rb') as r:
with smart_open(output_fname, 'wb') as w:
utils.check_output(w, args=command, stdin=r)
logger.info("Deleting frequencies from vocab file")
with smart_open(vocab_file, 'wb') as w:
utils.check_output(w, args=cmd_del_vocab_freq)
with smart_open(vocab_file, 'rb') as f:
numwords = sum(1 for line in f)
with smart_open(cooccurrence_shuf_file, 'rb') as f:
numlines = sum(1 for line in f)
with smart_open(meta_file, 'wb') as f:
meta_info = "{0} {1}\n{2} {3}\n{4} {5}".format(numwords, numwords, numlines, cooccurrence_shuf_file.split('/')[-1], numwords, vocab_file.split('/')[-1])
f.write(meta_info.encode('utf-8'))
if iter % dump_period == 0:
iter += 1
else:
logger.warning(
'Resultant embedding will be from %d iterations rather than the input %d iterations, '
'as wordrank dumps the embedding only at dump_period intervals. '
'Input an appropriate combination of parameters (iter, dump_period) such that '
'"iter mod dump_period" is zero.', iter - (iter % dump_period), iter
)
wr_args = {
'path': meta_dir,
'nthread': multiprocessing.cpu_count(),
'sgd_num': sgd_num,
'lrate': lrate,
'period': period,
'iter': iter,
'epsilon': epsilon,
'dump_prefix': 'model',
'dump_period': dump_period,
'dim': size,
'reg': reg,
'alpha': alpha,
'beta': beta,
'loss': loss
}
# run wordrank executable with wr_args
cmd = ['mpirun', '-np']
cmd.append(str(np))
cmd.append(os.path.join(wr_path, 'wordrank'))
for option, value in wr_args.items():
cmd.append('--%s' % option)
cmd.append(str(value))
logger.info("Running wordrank binary")
output = utils.check_output(args=cmd)
# use embeddings from max. iteration's dump
max_iter_dump = iter - (iter % dump_period)
os.rename('model_word_%d.txt' % max_iter_dump, os.path.join(model_dir, 'wordrank.words'))
os.rename('model_context_%d.txt' % max_iter_dump, os.path.join(model_dir, 'wordrank.contexts'))
model = cls.load_wordrank_model(os.path.join(model_dir, 'wordrank.words'), vocab_file, os.path.join(model_dir, 'wordrank.contexts'), sorted_vocab, ensemble)
if cleanup_files:
rmtree(model_dir)
return model
@classmethod
def load_wordrank_model(cls, model_file, vocab_file=None, context_file=None, sorted_vocab=1, ensemble=1):
glove2word2vec(model_file, model_file+'.w2vformat')
model = cls.load_word2vec_format('%s.w2vformat' % model_file)
if ensemble and context_file:
model.ensemble_embedding(model_file, context_file)
if sorted_vocab and vocab_file:
model.sort_embeddings(vocab_file)
return model
def sort_embeddings(self, vocab_file):
"""Sort embeddings according to word frequency."""
counts = {}
vocab_size = len(self.vocab)
prev_syn0 = copy.deepcopy(self.syn0)
prev_vocab = copy.deepcopy(self.vocab)
self.index2word = []
# sort embeddings using frequency sorted vocab file in wordrank
with utils.smart_open(vocab_file) as fin:
for index, line in enumerate(fin):
word, count = utils.to_unicode(line).strip(), vocab_size - index
# store word with it's count in a dict
counts[word] = int(count)
# build new index2word with frequency sorted words
self.index2word.append(word)
assert len(self.index2word) == vocab_size, 'mismatch between vocab sizes'
for word_id, word in enumerate(self.index2word):
self.syn0[word_id] = prev_syn0[prev_vocab[word].index]
self.vocab[word].index = word_id
self.vocab[word].count = counts[word]
def ensemble_embedding(self, word_embedding, context_embedding):
"""Replace syn0 with the sum of context and word embeddings."""
glove2word2vec(context_embedding, context_embedding+'.w2vformat')
w_emb = KeyedVectors.load_word2vec_format('%s.w2vformat' % word_embedding)
c_emb = KeyedVectors.load_word2vec_format('%s.w2vformat' % context_embedding)
# compare vocab words using keys of dict vocab
assert set(w_emb.vocab) == set(c_emb.vocab), 'Vocabs are not same for both embeddings'
# sort context embedding to have words in same order as word embedding
prev_c_emb = copy.deepcopy(c_emb.syn0)
for word_id, word in enumerate(w_emb.index2word):
c_emb.syn0[word_id] = prev_c_emb[c_emb.vocab[word].index]
# add vectors of the two embeddings
new_emb = w_emb.syn0 + c_emb.syn0
self.syn0 = new_emb
return new_emb
| 49.726872 | 190 | 0.664334 |
830e02a26921c74bd1fd54e1b25f109dbfde723a | 932 | py | Python | a10_neutron_lbaas/db/models/__init__.py | hthompson6/a10-neutron-lbaas | f1639758cd3abcc6c86c8e6b64dcb0397c359621 | [
"Apache-2.0"
] | 10 | 2015-09-15T05:16:15.000Z | 2020-03-18T02:34:39.000Z | a10_neutron_lbaas/db/models/__init__.py | hthompson6/a10-neutron-lbaas | f1639758cd3abcc6c86c8e6b64dcb0397c359621 | [
"Apache-2.0"
] | 334 | 2015-02-11T23:45:00.000Z | 2020-02-28T08:58:51.000Z | a10_neutron_lbaas/db/models/__init__.py | hthompson6/a10-neutron-lbaas | f1639758cd3abcc6c86c8e6b64dcb0397c359621 | [
"Apache-2.0"
] | 24 | 2015-01-13T21:14:45.000Z | 2021-06-02T17:22:14.000Z | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# flake8: noqa
# You should really import the models you want directly; this is a placeholder
# to allow existing code to run, while it's converted.
from a10_neutron_lbaas.db.models.a10_device_instance import A10DeviceInstance
from a10_neutron_lbaas.db.models.a10_slb import A10SLB
from a10_neutron_lbaas.db.models.a10_tenant_binding import A10TenantBinding
| 46.6 | 78 | 0.770386 |
e4d9f9670262f38bee01c15c3109b78ba2aaf305 | 907 | py | Python | 14-Introduction to Data Visualization with Seaborn/Chapter_2/07-Plotting subgroups in line plots.py | Pegasus-01/Data-manipulation-and-merging-with-pandas | 5346678d25820d9fe352bd70294484ecd96fccf7 | [
"Apache-2.0"
] | 1 | 2020-10-18T16:42:28.000Z | 2020-10-18T16:42:28.000Z | 14-Introduction to Data Visualization with Seaborn/Chapter_2/07-Plotting subgroups in line plots.py | Pegasus-01/Data-manipulation-and-merging-with-pandas | 5346678d25820d9fe352bd70294484ecd96fccf7 | [
"Apache-2.0"
] | null | null | null | 14-Introduction to Data Visualization with Seaborn/Chapter_2/07-Plotting subgroups in line plots.py | Pegasus-01/Data-manipulation-and-merging-with-pandas | 5346678d25820d9fe352bd70294484ecd96fccf7 | [
"Apache-2.0"
] | null | null | null | #part1
# Import Matplotlib and Seaborn
import matplotlib.pyplot as plt
import seaborn as sns
# Create line plot of model year vs. horsepower
sns.relplot(x="model_year",y="horsepower",data=mpg,kind="line",ci=None)
# Show plot
plt.show()
#part2
# Import Matplotlib and Seaborn
import matplotlib.pyplot as plt
import seaborn as sns
# Change to create subgroups for country of origin
sns.relplot(x="model_year", y="horsepower",
data=mpg, kind="line",
ci=None,style="origin",hue="origin")
# Show plot
plt.show()
#part3
# Import Matplotlib and Seaborn
import matplotlib.pyplot as plt
import seaborn as sns
# Add markers and make each line have the same style
sns.relplot(x="model_year", y="horsepower",
data=mpg, kind="line",
ci=None, style="origin",
hue="origin",markers=True,dashes=False)
# Show plot
plt.show() | 25.914286 | 72 | 0.675854 |
e50213f14e1aa53d50332091a4af223cc285ad5c | 14,942 | py | Python | quarkchain/p2p/p2p_manager.py | tim-yoshi/pyquarkchain | 1847542c166a180b5ffc3c6e917751be85fa15a6 | [
"MIT"
] | 3 | 2019-03-14T17:08:07.000Z | 2019-10-02T11:13:53.000Z | quarkchain/p2p/p2p_manager.py | tim-yoshi/pyquarkchain | 1847542c166a180b5ffc3c6e917751be85fa15a6 | [
"MIT"
] | null | null | null | quarkchain/p2p/p2p_manager.py | tim-yoshi/pyquarkchain | 1847542c166a180b5ffc3c6e917751be85fa15a6 | [
"MIT"
] | 1 | 2019-05-04T22:57:29.000Z | 2019-05-04T22:57:29.000Z | import ipaddress
import socket
from cryptography.hazmat.primitives.constant_time import bytes_eq
from eth_keys import keys
from typing import Tuple, Dict
from quarkchain.utils import Logger
from quarkchain.cluster.simple_network import Peer, AbstractNetwork
from quarkchain.protocol import AbstractConnection, Connection, ConnectionState
from quarkchain.cluster.p2p_commands import CommandOp
from quarkchain.p2p import ecies
from quarkchain.p2p.cancel_token.token import CancelToken
from quarkchain.p2p.kademlia import Node
from quarkchain.p2p.p2p_server import BaseServer
from quarkchain.p2p.peer import BasePeer, BasePeerContext, BasePeerPool, BasePeerFactory
from quarkchain.p2p.protocol import Command, _DecodedMsgType, NULL_BYTE, Protocol
from .constants import HEADER_LEN, MAC_LEN
from quarkchain.p2p.exceptions import (
DecryptionError,
PeerConnectionLost,
HandshakeFailure,
)
from quarkchain.p2p.utils import sxor
class QuarkProtocol(Protocol):
name = "quarkchain"
# TODO use self.env.quark_chain_config.P2P_PROTOCOL_VERSION
version = 1
def encode_bytes(data: bytes) -> Tuple[bytes, bytes]:
"""
a modified version of rlpx framed protocol
returns header, body
header = len(body) as 4-byte big-endian integer, pad to 16 bytes
body = data
"""
frame_size = len(data)
if frame_size.bit_length() > 32:
raise ValueError("Frame size has to fit in a 4-byte integer")
header = len(data).to_bytes(4, byteorder="big").ljust(HEADER_LEN, NULL_BYTE)
return header, data
class QuarkPeer(BasePeer):
"""
keep secure handshake of BasePeer, but override all wire protocols.
specifically, _run() does not call read_msg() which interpret bytes as RLPx;
instead, we use QuarkChain-specific wire protocol
"""
env = None
_supported_sub_protocols = [QuarkProtocol]
sub_proto = None # : QuarkProtocol
peer_idle_timeout = None # do not timeout for connected peers
async def send_sub_proto_handshake(self) -> None:
pass
async def process_sub_proto_handshake(
self, cmd: Command, msg: _DecodedMsgType
) -> None:
pass
async def do_sub_proto_handshake(self) -> None:
""" overrides BasePeer.do_sub_proto_handshake()
"""
self.secure_peer = SecurePeer(self)
Logger.info("starting peer hello exchange")
start_state = await self.secure_peer.start()
if start_state:
# returns None if successful
raise HandshakeFailure(
"hello message exchange failed: {}".format(start_state)
)
def decrypt_raw_bytes(self, data: bytes, size: int) -> bytes:
"""
same as decrypt_body() but no roundup
"""
if len(data) < size + MAC_LEN:
raise ValueError(
"Insufficient body length; Got {}, wanted {} + {}".format(
len(data), size, MAC_LEN
)
)
frame_ciphertext = data[:size]
frame_mac = data[size : size + MAC_LEN]
self.ingress_mac.update(frame_ciphertext)
fmac_seed = self.ingress_mac.digest()[:MAC_LEN]
self.ingress_mac.update(sxor(self.mac_enc(fmac_seed), fmac_seed))
expected_frame_mac = self.ingress_mac.digest()[:MAC_LEN]
if not bytes_eq(expected_frame_mac, frame_mac):
raise DecryptionError(
"Invalid frame mac: expected {}, got {}".format(
expected_frame_mac, frame_mac
)
)
return self.aes_dec.update(frame_ciphertext)[:size]
def send_raw_bytes(self, data: bytes) -> None:
header, body = encode_bytes(data)
self.writer.write(self.encrypt(header, body))
async def read_raw_bytes(self, timeout) -> bytes:
header_data = await self.read(HEADER_LEN + MAC_LEN, timeout=timeout)
header = self.decrypt_header(header_data)
frame_size = int.from_bytes(header[:4], byteorder="big")
if frame_size > self.env.quark_chain_config.P2P_COMMAND_SIZE_LIMIT:
raise RuntimeError("{} command package exceed limit".format(self))
frame_data = await self.read(frame_size + MAC_LEN, timeout=timeout)
msg = self.decrypt_raw_bytes(frame_data, frame_size)
return msg
async def _run(self) -> None:
"""
overrides BasePeer._run()
forwards decrypted messages to QuarkChain Peer
"""
self.run_child_service(self.boot_manager)
self.secure_peer.add_sync_task()
if self.secure_peer.state == ConnectionState.CONNECTING:
self.secure_peer.state = ConnectionState.ACTIVE
self.secure_peer.active_future.set_result(None)
try:
while self.is_operational:
metadata, raw_data = await self.secure_peer.read_metadata_and_raw_data()
self.run_task(
self.secure_peer.secure_handle_metadata_and_raw_data(
metadata, raw_data
)
)
except (PeerConnectionLost, TimeoutError) as err:
self.logger.debug(
"%s stopped responding (%r), disconnecting", self.remote, err
)
except DecryptionError as err:
self.logger.warning(
"Unable to decrypt message from %s, disconnecting: %r", self.remote, err
)
except Exception as e:
self.logger.error("Unknown exception from %s, message: %r", self.remote, e)
Logger.error_exception()
self.secure_peer.abort_in_flight_rpcs()
self.secure_peer.close()
class SecurePeer(Peer):
"""
keep all wire-level functions (especially for proxy/forwarding-related transports),
but delegate all StreamReader/Writer operations to QuarkPeer
** overrides **
Peer.start
Peer.close
Peer.handle_get_peer_list_request
Connection.read_metadata_and_raw_data
Connection.write_raw_data
** unchanged **
Peer.close_with_error
Peer.get_cluster_peer_id
Peer.get_connection_to_forward
Peer.handle_error
Peer.handle_get_root_block_header_list_request
Peer.handle_get_root_block_list_request
Peer.handle_new_minor_block_header_list
Peer.handle_new_transaction_list
Peer.send_hello
Peer.send_transaction
Peer.send_updated_tip
AbstractConnection.__handle_request
AbstractConnection.__handle_rpc_request
AbstractConnection.__parse_command
AbstractConnection.__write_rpc_response
AbstractConnection.read_command
AbstractConnection.validate_and_update_peer_rpc_id
AbstractConnection.write_command
AbstractConnection.write_raw_command
AbstractConnection.write_rpc_request
P2PConnection.get_metadata_to_forward
P2PConnection.validate_connection
ProxyConnection.handle_metadata_and_raw_data
** unused **
Peer.close_dead_peer
AbstractConnection.__get_next_connection_id
AbstractConnection.__internal_handle_metadata_and_raw_data
Connection.__read_fully
AbstractConnection.active_and_loop_forever
ProxyConnection.close_connection
AbstractConnection.is_active
AbstractConnection.is_closed
AbstractConnection.loop_once
AbstractConnection.wait_until_active
AbstractConnection.wait_until_closed
"""
# Singletons will be set as class variable
env = None
network = None
master_server = None
def __init__(self, quark_peer: QuarkPeer):
cluster_peer_id = quark_peer.remote.id % 2 ** 64
super().__init__(
env=self.env,
reader=None,
writer=None,
network=self.network,
master_server=self.master_server,
cluster_peer_id=cluster_peer_id,
name=repr(quark_peer),
)
self.quark_peer = quark_peer
async def start(self) -> str:
""" Override Peer.start()
exchange hello command, establish cluster connections in master
"""
self.send_hello()
op, cmd, rpc_id = await self.read_command()
if op is None:
assert self.state == ConnectionState.CLOSED
Logger.info("Failed to read command, peer may have closed connection")
return "Failed to read command"
if op != CommandOp.HELLO:
return self.close_with_error("Hello must be the first command")
if cmd.version != self.env.quark_chain_config.P2P_PROTOCOL_VERSION:
return self.close_with_error("incompatible protocol version")
if cmd.network_id != self.env.quark_chain_config.NETWORK_ID:
return self.close_with_error("incompatible network id")
self.id = cmd.peer_id
self.chain_mask_list = cmd.chain_mask_list
# ip is from peer.remote, there may be 2 cases:
# 1. dialed-out: ip is from discovery service;
# 2. dialed-in: ip is from writer.get_extra_info("peername")
self.ip = ipaddress.ip_address(self.quark_peer.remote.address.ip)
# port is what peer claim to be using
self.port = cmd.peer_port
Logger.info(
"Got HELLO from peer {} ({}:{})".format(self.quark_peer, self.ip, self.port)
)
self.best_root_block_header_observed = cmd.root_block_header
await self.master_server.create_peer_cluster_connections(self.cluster_peer_id)
Logger.info(
"Established virtual shard connections with {} cluster_peer_id={} id={}".format(
self.quark_peer, self.cluster_peer_id, self.id.hex()
)
)
def add_sync_task(self):
self.master_server.handle_new_root_block_header(
self.best_root_block_header_observed, self
)
def abort_in_flight_rpcs(self):
for rpc_id, future in self.rpc_future_map.items():
future.set_exception(RuntimeError("{}: connection abort".format(self.name)))
AbstractConnection.aborted_rpc_count += len(self.rpc_future_map)
self.rpc_future_map.clear()
def write_raw_data(self, metadata, raw_data):
""" Override Connection.write_raw_data()
"""
# NOTE QuarkChain serialization returns bytearray
self.quark_peer.send_raw_bytes(bytes(metadata.serialize() + raw_data))
async def read_metadata_and_raw_data(self):
""" Override Connection.read_metadata_and_raw_data()
"""
data = await self.quark_peer.read_raw_bytes(timeout=None)
metadata_bytes = data[: self.metadata_class.get_byte_size()]
metadata = self.metadata_class.deserialize(metadata_bytes)
return metadata, data[self.metadata_class.get_byte_size() :]
def close(self):
if self.state == ConnectionState.ACTIVE:
Logger.info(
"destroying proxy slave connections for {}".format(
self.quark_peer.remote
)
)
self.master_server.destroy_peer_cluster_connections(self.cluster_peer_id)
super(Connection, self).close()
self.quark_peer.close()
async def handle_get_peer_list_request(self, request):
""" shall not handle this request for a real p2p network
"""
pass
async def secure_handle_metadata_and_raw_data(self, metadata, raw_data):
""" same as __internal_handle_metadata_and_raw_data but callable
"""
try:
await self.handle_metadata_and_raw_data(metadata, raw_data)
except Exception as e:
Logger.log_exception()
self.close_with_error(
"{}: error processing request: {}".format(self.name, e)
)
class QuarkContext(BasePeerContext):
quarkchain = "quarkchain" # : str
class QuarkPeerFactory(BasePeerFactory):
peer_class = QuarkPeer
context = None # : QuarkContext
class QuarkPeerPool(BasePeerPool):
peer_factory_class = QuarkPeerFactory
context = None # : QuarkContext
class QuarkServer(BaseServer):
"""
a server using QuarkPeerPool
"""
def _make_peer_pool(self):
return QuarkPeerPool(
privkey=self.privkey,
context=QuarkContext(),
listen_port=self.port,
token=self.cancel_token,
)
def _make_syncer(self):
return
class P2PManager(AbstractNetwork):
"""
a network based on QuarkServer, need the following members for peer conn to work:
network.self_id
network.ip
network.port
"""
def __init__(self, env, master_server, loop):
self.loop = loop
self.env = env
self.master_server = master_server
master_server.network = self # cannot say this is a good design
self.cancel_token = CancelToken("p2pserver")
if env.cluster_config.P2P.BOOT_NODES:
bootstrap_nodes = env.cluster_config.P2P.BOOT_NODES.split(",")
else:
bootstrap_nodes = []
if env.cluster_config.P2P.PRIV_KEY:
privkey = keys.PrivateKey(bytes.fromhex(env.cluster_config.P2P.PRIV_KEY))
else:
privkey = ecies.generate_privkey()
if env.cluster_config.P2P.PREFERRED_NODES:
preferred_nodes = env.cluster_config.P2P.PREFERRED_NODES.split(",")
else:
preferred_nodes = []
self.server = QuarkServer(
privkey=privkey,
port=env.cluster_config.P2P_PORT,
network_id=env.quark_chain_config.NETWORK_ID,
bootstrap_nodes=tuple([Node.from_uri(enode) for enode in bootstrap_nodes]),
preferred_nodes=[Node.from_uri(enode) for enode in preferred_nodes],
token=self.cancel_token,
max_peers=env.cluster_config.P2P.MAX_PEERS,
upnp=env.cluster_config.P2P.UPNP,
allow_dial_in_ratio=env.cluster_config.P2P.ALLOW_DIAL_IN_RATIO,
)
QuarkPeer.env = env
SecurePeer.env = env
SecurePeer.network = self
SecurePeer.master_server = master_server
# used in HelloCommand.peer_id which is hash256
self.self_id = privkey.public_key.to_bytes()[:32]
self.ip = ipaddress.ip_address(socket.gethostbyname(socket.gethostname()))
self.port = env.cluster_config.P2P_PORT
def start(self) -> None:
self.loop.create_task(self.server.run())
def iterate_peers(self):
return [p.secure_peer for p in self.server.peer_pool.connected_nodes.values()]
@property
def active_peer_pool(self) -> Dict[bytes, Peer]:
""" for jrpc and stat reporting
"""
return {
p.secure_peer.id: p.secure_peer
for p in self.server.peer_pool.connected_nodes.values()
}
def get_peer_by_cluster_peer_id(self, cluster_peer_id):
quark_peer = self.server.peer_pool.cluster_peer_map.get(cluster_peer_id)
if quark_peer:
return quark_peer.secure_peer
return None
| 35.918269 | 92 | 0.667514 |
54b63940d99fc706d0566199aae2bff199f32c4e | 709 | py | Python | anonymizePhone.py | sen-den/DataAnonymiser | 1cda7f40d1300ebe4ca1acc83761bdb3a94be1f3 | [
"MIT"
] | null | null | null | anonymizePhone.py | sen-den/DataAnonymiser | 1cda7f40d1300ebe4ca1acc83761bdb3a94be1f3 | [
"MIT"
] | null | null | null | anonymizePhone.py | sen-den/DataAnonymiser | 1cda7f40d1300ebe4ca1acc83761bdb3a94be1f3 | [
"MIT"
] | null | null | null | import re
from config import *
def anonymizePhone(text):
subsLength = cfg["Phone"]["DigitsToHide"]
subsSign = cfg["Phone"]["MaskingSign"]
# could be underfull: 0 chars before most left space
firstGroupLength = subsLength % 3
subsFullGroupsCount = subsLength // 3
# each 3-chars group adds one space char
subsCharCount = subsLength + subsFullGroupsCount
# {0-2} substitute chars followed by the space separated groups of it
subs = subsSign*firstGroupLength + (" " + subsSign*3) * subsFullGroupsCount
# replace each phone by its cutted version followed by masked chars
text = re.sub(r'(\+[\d]+(\ [\d]{3}){3})'
, lambda match: match.group(1)[:-subsCharCount] + subs
, text)
return text
| 26.259259 | 76 | 0.712271 |
b9c43eb21e2ac6c21c9b051f252e39ef13f0b53a | 2,945 | py | Python | official/vision/beta/projects/yolo/train.py | e10101/models | 5c3e08b7697f0035b8731607277dc4e47e18317c | [
"Apache-2.0"
] | 2 | 2017-10-26T06:23:51.000Z | 2020-09-11T21:09:41.000Z | official/vision/beta/projects/yolo/train.py | e10101/models | 5c3e08b7697f0035b8731607277dc4e47e18317c | [
"Apache-2.0"
] | 2 | 2018-06-18T17:08:12.000Z | 2021-04-12T05:39:04.000Z | official/vision/beta/projects/yolo/train.py | e10101/models | 5c3e08b7697f0035b8731607277dc4e47e18317c | [
"Apache-2.0"
] | 2 | 2020-04-11T19:31:17.000Z | 2021-04-07T12:53:28.000Z | # Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""TensorFlow Model Garden Vision training driver."""
from absl import app
from absl import flags
import gin
from official.common import distribute_utils
from official.common import flags as tfm_flags
from official.core import task_factory
from official.core import train_lib
from official.core import train_utils
from official.modeling import performance
from official.vision.beta.projects.yolo.common import registry_imports # pylint: disable=unused-import
FLAGS = flags.FLAGS
'''
python3 -m official.vision.beta.projects.yolo.train --mode=train_and_eval --experiment=darknet_classification --model_dir=training_dir --config_file=official/vision/beta/projects/yolo/configs/experiments/darknet53_tfds.yaml
'''
def main(_):
gin.parse_config_files_and_bindings(FLAGS.gin_file, FLAGS.gin_params)
print(FLAGS.experiment)
params = train_utils.parse_configuration(FLAGS)
model_dir = FLAGS.model_dir
if 'train' in FLAGS.mode:
# Pure eval modes do not output yaml files. Otherwise continuous eval job
# may race against the train job for writing the same file.
train_utils.serialize_config(params, model_dir)
# Sets mixed_precision policy. Using 'mixed_float16' or 'mixed_bfloat16'
# can have significant impact on model speeds by utilizing float16 in case of
# GPUs, and bfloat16 in the case of TPUs. loss_scale takes effect only when
# dtype is float16
if params.runtime.mixed_precision_dtype:
performance.set_mixed_precision_policy(params.runtime.mixed_precision_dtype,
params.runtime.loss_scale,
use_experimental_api=True)
distribution_strategy = distribute_utils.get_distribution_strategy(
distribution_strategy=params.runtime.distribution_strategy,
all_reduce_alg=params.runtime.all_reduce_alg,
num_gpus=params.runtime.num_gpus,
tpu_address=params.runtime.tpu)
with distribution_strategy.scope():
task = task_factory.get_task(params.task, logging_dir=model_dir)
train_lib.run_experiment(
distribution_strategy=distribution_strategy,
task=task,
mode=FLAGS.mode,
params=params,
model_dir=model_dir)
train_utils.save_gin_config(FLAGS.mode, model_dir)
if __name__ == '__main__':
tfm_flags.define_flags()
app.run(main)
| 38.75 | 223 | 0.763328 |
fd4d1c822245146f3c6fe0495e82a148ab0e8009 | 8,875 | py | Python | blackmamba/lib/rope/base/pyobjects.py | oz90210/blackmamba | 65c82c8e99028d6fbb57098ce82d0a394df215a0 | [
"MIT"
] | 239 | 2018-04-20T06:58:32.000Z | 2022-03-22T18:06:08.000Z | blackmamba/lib/rope/base/pyobjects.py | oz90210/blackmamba | 65c82c8e99028d6fbb57098ce82d0a394df215a0 | [
"MIT"
] | 39 | 2017-08-20T15:10:36.000Z | 2020-03-31T18:45:57.000Z | blackmamba/lib/rope/base/pyobjects.py | oz90210/blackmamba | 65c82c8e99028d6fbb57098ce82d0a394df215a0 | [
"MIT"
] | 99 | 2018-07-20T09:16:13.000Z | 2022-03-20T11:58:56.000Z | from rope.base.fscommands import _decode_data
from rope.base import ast, exceptions, utils
class PyObject(object):
def __init__(self, type_):
if type_ is None:
type_ = self
self.type = type_
def get_attributes(self):
if self.type is self:
return {}
return self.type.get_attributes()
def get_attribute(self, name):
if name not in self.get_attributes():
raise exceptions.AttributeNotFoundError(
'Attribute %s not found' % name)
return self.get_attributes()[name]
def get_type(self):
return self.type
def __getitem__(self, key):
"""The same as ``get_attribute(key)``"""
return self.get_attribute(key)
def __contains__(self, key):
"""The same as ``key in self.get_attributes()``"""
return key in self.get_attributes()
def __eq__(self, obj):
"""Check the equality of two `PyObject`\s
Currently it is assumed that instances (the direct instances
of `PyObject`, not the instances of its subclasses) are equal
if their types are equal. For every other object like
defineds or builtins rope assumes objects are reference
objects and their identities should match.
"""
if self.__class__ != obj.__class__:
return False
if type(self) == PyObject:
if self is not self.type:
return self.type == obj.type
else:
return self.type is obj.type
return self is obj
def __ne__(self, obj):
return not self.__eq__(obj)
def __hash__(self):
"""See docs for `__eq__()` method"""
if type(self) == PyObject and self != self.type:
return hash(self.type) + 1
else:
return super(PyObject, self).__hash__()
def __iter__(self):
"""The same as ``iter(self.get_attributes())``"""
return iter(self.get_attributes())
_types = None
_unknown = None
@staticmethod
def _get_base_type(name):
if PyObject._types is None:
PyObject._types = {}
base_type = PyObject(None)
PyObject._types['Type'] = base_type
PyObject._types['Module'] = PyObject(base_type)
PyObject._types['Function'] = PyObject(base_type)
PyObject._types['Unknown'] = PyObject(base_type)
return PyObject._types[name]
def get_base_type(name):
"""Return the base type with name `name`.
The base types are 'Type', 'Function', 'Module' and 'Unknown'. It
was used to check the type of a `PyObject` but currently its use
is discouraged. Use classes defined in this module instead.
For example instead of
``pyobject.get_type() == get_base_type('Function')`` use
``isinstance(pyobject, AbstractFunction)``.
You can use `AbstractClass` for classes, `AbstractFunction` for
functions, and `AbstractModule` for modules. You can also use
`PyFunction` and `PyClass` for testing if an object is
defined somewhere and rope can access its source. These classes
provide more methods.
"""
return PyObject._get_base_type(name)
def get_unknown():
"""Return a pyobject whose type is unknown
Note that two unknown objects are equal. So for example you can
write::
if pyname.get_object() == get_unknown():
print('cannot determine what this pyname holds')
Rope could have used `None` for indicating unknown objects but
we had to check that in many places. So actually this method
returns a null object.
"""
if PyObject._unknown is None:
PyObject._unknown = PyObject(get_base_type('Unknown'))
return PyObject._unknown
class AbstractClass(PyObject):
def __init__(self):
super(AbstractClass, self).__init__(get_base_type('Type'))
def get_name(self):
pass
def get_doc(self):
pass
def get_superclasses(self):
return []
class AbstractFunction(PyObject):
def __init__(self):
super(AbstractFunction, self).__init__(get_base_type('Function'))
def get_name(self):
pass
def get_doc(self):
pass
def get_param_names(self, special_args=True):
return []
def get_returned_object(self, args):
return get_unknown()
class AbstractModule(PyObject):
def __init__(self, doc=None):
super(AbstractModule, self).__init__(get_base_type('Module'))
def get_doc(self):
pass
def get_resource(self):
pass
class PyDefinedObject(object):
"""Python defined names that rope can access their sources"""
def __init__(self, pycore, ast_node, parent):
self.pycore = pycore
self.ast_node = ast_node
self.scope = None
self.parent = parent
self.structural_attributes = None
self.concluded_attributes = self.get_module()._get_concluded_data()
self.attributes = self.get_module()._get_concluded_data()
self.defineds = None
visitor_class = None
@utils.prevent_recursion(lambda: {})
def _get_structural_attributes(self):
if self.structural_attributes is None:
self.structural_attributes = self._create_structural_attributes()
return self.structural_attributes
@utils.prevent_recursion(lambda: {})
def _get_concluded_attributes(self):
if self.concluded_attributes.get() is None:
self._get_structural_attributes()
self.concluded_attributes.set(self._create_concluded_attributes())
return self.concluded_attributes.get()
def get_attributes(self):
if self.attributes.get() is None:
result = dict(self._get_concluded_attributes())
result.update(self._get_structural_attributes())
self.attributes.set(result)
return self.attributes.get()
def get_attribute(self, name):
if name in self._get_structural_attributes():
return self._get_structural_attributes()[name]
if name in self._get_concluded_attributes():
return self._get_concluded_attributes()[name]
raise exceptions.AttributeNotFoundError('Attribute %s not found' %
name)
def get_scope(self):
if self.scope is None:
self.scope = self._create_scope()
return self.scope
def get_module(self):
current_object = self
while current_object.parent is not None:
current_object = current_object.parent
return current_object
def get_doc(self):
if len(self.get_ast().body) > 0:
expr = self.get_ast().body[0]
if isinstance(expr, ast.Expr) and \
isinstance(expr.value, ast.Str):
docstring = expr.value.s
coding = self.get_module().coding
return _decode_data(docstring, coding)
def _get_defined_objects(self):
if self.defineds is None:
self._get_structural_attributes()
return self.defineds
def _create_structural_attributes(self):
if self.visitor_class is None:
return {}
new_visitor = self.visitor_class(self.pycore, self)
for child in ast.get_child_nodes(self.ast_node):
ast.walk(child, new_visitor)
self.defineds = new_visitor.defineds
return new_visitor.names
def _create_concluded_attributes(self):
return {}
def get_ast(self):
return self.ast_node
def _create_scope(self):
pass
class PyFunction(PyDefinedObject, AbstractFunction):
"""Only a placeholder"""
class PyClass(PyDefinedObject, AbstractClass):
"""Only a placeholder"""
class _ConcludedData(object):
def __init__(self):
self.data_ = None
def set(self, data):
self.data_ = data
def get(self):
return self.data_
data = property(get, set)
def _invalidate(self):
self.data = None
def __str__(self):
return '<' + str(self.data) + '>'
class _PyModule(PyDefinedObject, AbstractModule):
def __init__(self, pycore, ast_node, resource):
self.resource = resource
self.concluded_data = []
AbstractModule.__init__(self)
PyDefinedObject.__init__(self, pycore, ast_node, None)
def _get_concluded_data(self):
new_data = _ConcludedData()
self.concluded_data.append(new_data)
return new_data
def _forget_concluded_data(self):
for data in self.concluded_data:
data._invalidate()
def get_resource(self):
return self.resource
class PyModule(_PyModule):
"""Only a placeholder"""
class PyPackage(_PyModule):
"""Only a placeholder"""
class IsBeingInferredError(exceptions.RopeError):
pass
| 28.445513 | 78 | 0.637972 |
fd0811117d45f558cab9ad336b7f2c495aafcff5 | 2,100 | py | Python | datasette/publish/now.py | kevboh/datasette | b7257a21bf3dfa7353980f343c83a616da44daa7 | [
"Apache-2.0"
] | 1 | 2018-06-01T22:15:11.000Z | 2018-06-01T22:15:11.000Z | datasette/publish/now.py | kevboh/datasette | b7257a21bf3dfa7353980f343c83a616da44daa7 | [
"Apache-2.0"
] | null | null | null | datasette/publish/now.py | kevboh/datasette | b7257a21bf3dfa7353980f343c83a616da44daa7 | [
"Apache-2.0"
] | null | null | null | from datasette import hookimpl
import click
from subprocess import call
from .common import (
add_common_publish_arguments_and_options,
fail_if_publish_binary_not_installed,
)
from ..utils import temporary_docker_directory
@hookimpl
def publish_subcommand(publish):
@publish.command()
@add_common_publish_arguments_and_options
@click.option(
"-n",
"--name",
default="datasette",
help="Application name to use when deploying",
)
@click.option("--force", is_flag=True, help="Pass --force option to now")
@click.option("--token", help="Auth token to use for deploy (Now only)")
@click.option("--spatialite", is_flag=True, help="Enable SpatialLite extension")
def now(
files,
metadata,
extra_options,
branch,
template_dir,
plugins_dir,
static,
install,
version_note,
title,
license,
license_url,
source,
source_url,
name,
force,
token,
spatialite,
):
fail_if_publish_binary_not_installed("now", "Zeit Now", "https://zeit.co/now")
if extra_options:
extra_options += " "
else:
extra_options = ""
extra_options += "--config force_https_urls:on"
with temporary_docker_directory(
files,
name,
metadata,
extra_options,
branch,
template_dir,
plugins_dir,
static,
install,
spatialite,
version_note,
{
"title": title,
"license": license,
"license_url": license_url,
"source": source,
"source_url": source_url,
},
):
args = []
if force:
args.append("--force")
if token:
args.append("--token={}".format(token))
if args:
call(["now"] + args)
else:
call("now")
| 25.925926 | 86 | 0.522381 |
578a503ae922d16e9270578ce922f601de5f3ee5 | 4,937 | py | Python | palm/aggregated_kinetic_model.py | milapour/palm | 53cfce67f6621795ca419a79bd91c9ecf02cc93f | [
"BSD-2-Clause"
] | 2 | 2015-03-25T13:02:32.000Z | 2016-12-12T21:00:27.000Z | palm/aggregated_kinetic_model.py | milapour/palm | 53cfce67f6621795ca419a79bd91c9ecf02cc93f | [
"BSD-2-Clause"
] | null | null | null | palm/aggregated_kinetic_model.py | milapour/palm | 53cfce67f6621795ca419a79bd91c9ecf02cc93f | [
"BSD-2-Clause"
] | null | null | null | import numpy
from collections import defaultdict
from palm.base.model import Model
from palm.state_collection import StateIDCollection
from palm.route_collection import RouteIDCollection
from palm.rate_fcn import rate_from_rate_id
from palm.rate_matrix import make_rate_matrix_from_state_ids
from palm.probability_vector import make_prob_vec_from_state_ids
class AggregatedKineticModel(Model):
"""
An AggregatedKineticModel consists of states and routes.
The routes are transitions between states. The model is
aggregated in the sense that each state belongs to one
of several discrete aggregated classes (e.g. 'dark' or 'bright').
Note that the term `class` in "aggregated class" does not refer
to the python concept of a class; it's a different meaning.
Parameters
----------
state_enumerator : callable f()
Generates a StateCollection for the model.
route_mapper : callable f(state_collection)
Generates a RouteCollection for the model.
parameter_set : ParameterSet
fermi_activation : bool, optional
Whether the activation rates vary with time.
Attributes
----------
state_collection : StateCollection
state_groups : pandas.DataFrame
state_id_collection : StateIDCollection
state_ids_by_class_dict : dict
Lists of state ids, indexed by class name.
state_class_by_id_dict : dict
Aggregated class of each state, indexed by state id.
route_collection : RouteCollection
"""
def __init__(self, state_enumerator, route_mapper, parameter_set,
fermi_activation=False):
super(AggregatedKineticModel, self).__init__()
self.state_enumerator = state_enumerator
self.route_mapper = route_mapper
self.parameter_set = parameter_set
self.fermi_activation = fermi_activation
r = self.state_enumerator()
self.state_collection, self.initial_state_id, self.final_state_id = r
self.state_groups = self.state_collection.sort('observation_class')
self.state_id_collection = self.state_collection.get_state_ids()
self.state_ids_by_class_dict = {}
self.state_class_by_id_dict = {}
for obs_class, id_list in self.state_groups.groups.iteritems():
this_state_id_collection = StateIDCollection()
this_state_id_collection.add_state_id_list(id_list)
self.state_ids_by_class_dict[obs_class] = this_state_id_collection
for this_id in id_list:
self.state_class_by_id_dict[this_id] = obs_class
self.route_collection = self.route_mapper(self.state_collection)
def get_parameter(self, parameter_name):
return self.parameter_set.get_parameter(parameter_name)
def get_num_states(self, class_name=None):
if class_name:
return len(self.state_ids_by_class_dict[class_name])
else:
return len(self.state_id_collection)
def get_num_routes(self):
return len(self.route_collection)
def build_rate_matrix(self, time=0.):
"""
Returns
-------
rate_matrix : RateMatrix
"""
rate_matrix = self._build_rate_matrix_from_routes(
self.state_id_collection, self.route_collection,
time)
return rate_matrix
def get_submatrix(self, rate_matrix, start_class, end_class):
"""
Returns
-------
submatrix : RateMatrix
"""
start_id_collection = self.state_ids_by_class_dict[start_class]
end_id_collection = self.state_ids_by_class_dict[end_class]
submatrix = rate_matrix.get_submatrix(
start_id_collection, end_id_collection)
return submatrix
def _build_rate_matrix_from_routes(self, state_id_collection, routes, time):
"""
Parameters
----------
state_id_collection : StateIDCollection
routes : RouteCollection
time : float
Cumulative time since start of trajectory,
needed to compute time-dependent rates.
Returns
-------
rate_matrix : RateMatrix
"""
rate_matrix = make_rate_matrix_from_state_ids(
index_id_collection=state_id_collection,
column_id_collection=state_id_collection)
for r_id, r in routes.iter_routes():
start_id = r['start_state']
end_id = r['end_state']
rate_id = r['rate_id']
multiplicity = r['multiplicity']
this_rate = multiplicity * rate_from_rate_id(
rate_id, time, self.parameter_set,
self.fermi_activation)
rate_matrix.set_rate(start_id, end_id, this_rate)
rate_matrix.balance_transition_rates()
return rate_matrix
| 38.874016 | 80 | 0.664979 |
07a61896f34f32e04636b2ea74b08c9d3ea9abaf | 2,901 | py | Python | erpx_namlifa/hooks.py | nick9822/erpx_namlifa | e092073c3348bccec4c76f367dba6086f27ca447 | [
"MIT"
] | null | null | null | erpx_namlifa/hooks.py | nick9822/erpx_namlifa | e092073c3348bccec4c76f367dba6086f27ca447 | [
"MIT"
] | 3 | 2020-04-06T21:10:18.000Z | 2022-03-25T23:46:57.000Z | erpx_namlifa/hooks.py | nick9822/erpx_namlifa | e092073c3348bccec4c76f367dba6086f27ca447 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from . import __version__ as app_version
app_name = "erpx_namlifa"
app_title = "ERPX for Namlifa"
app_publisher = "ERPX"
app_description = "Human Resource Module for Namlifa powered by ERPX"
app_icon = "octicon octicon-file-directory"
app_color = "grey"
app_email = "dev.erpx@gmail.com"
app_license = "MIT"
# Includes in <head>
# ------------------
# include js, css files in header of desk.html
# app_include_css = "/assets/erpx_namlifa/css/erpx_namlifa.css"
# app_include_js = "/assets/erpx_namlifa/js/erpx_namlifa.js"
# include js, css files in header of web template
# web_include_css = "/assets/erpx_namlifa/css/erpx_namlifa.css"
# web_include_js = "/assets/erpx_namlifa/js/erpx_namlifa.js"
# include js in page
# page_js = {"page" : "public/js/file.js"}
# include js in doctype views
# doctype_js = {"doctype" : "public/js/doctype.js"}
# doctype_list_js = {"doctype" : "public/js/doctype_list.js"}
# doctype_tree_js = {"doctype" : "public/js/doctype_tree.js"}
# doctype_calendar_js = {"doctype" : "public/js/doctype_calendar.js"}
# Home Pages
# ----------
# application home page (will override Website Settings)
# home_page = "login"
# website user home page (by Role)
# role_home_page = {
# "Role": "home_page"
# }
# Website user home page (by function)
# get_website_user_home_page = "erpx_namlifa.utils.get_home_page"
# Generators
# ----------
# automatically create page for each record of this doctype
# website_generators = ["Web Page"]
# Installation
# ------------
# before_install = "erpx_namlifa.install.before_install"
# after_install = "erpx_namlifa.install.after_install"
# Desk Notifications
# ------------------
# See frappe.core.notifications.get_notification_config
# notification_config = "erpx_namlifa.notifications.get_notification_config"
# Permissions
# -----------
# Permissions evaluated in scripted ways
# permission_query_conditions = {
# "Event": "frappe.desk.doctype.event.event.get_permission_query_conditions",
# }
#
# has_permission = {
# "Event": "frappe.desk.doctype.event.event.has_permission",
# }
# Document Events
# ---------------
# Hook on document methods and events
# doc_events = {
# "*": {
# "on_update": "method",
# "on_cancel": "method",
# "on_trash": "method"
# }
# }
# Scheduled Tasks
# ---------------
# scheduler_events = {
# "all": [
# "erpx_namlifa.tasks.all"
# ],
# "daily": [
# "erpx_namlifa.tasks.daily"
# ],
# "hourly": [
# "erpx_namlifa.tasks.hourly"
# ],
# "weekly": [
# "erpx_namlifa.tasks.weekly"
# ]
# "monthly": [
# "erpx_namlifa.tasks.monthly"
# ]
# }
# Testing
# -------
# before_tests = "erpx_namlifa.install.before_tests"
# Overriding Whitelisted Methods
# ------------------------------
#
# override_whitelisted_methods = {
# "frappe.desk.doctype.event.event.get_events": "erpx_namlifa.event.get_events"
# }
| 23.585366 | 80 | 0.677008 |
4bb7d28492434b9b6cf0e6cfe1a106cc5913463c | 27 | py | Python | theme-2/3456.py | sviridovcomp/python-course | 030a0adc1b30e95d9335ab0bb1213dccda335b77 | [
"MIT"
] | 1 | 2021-06-03T13:08:10.000Z | 2021-06-03T13:08:10.000Z | theme-2/3456.py | sviridovcomp/python-course | 030a0adc1b30e95d9335ab0bb1213dccda335b77 | [
"MIT"
] | null | null | null | theme-2/3456.py | sviridovcomp/python-course | 030a0adc1b30e95d9335ab0bb1213dccda335b77 | [
"MIT"
] | null | null | null | print(f"Hello, {input()}!") | 27 | 27 | 0.592593 |
57a7bfd3700125ba478166813dc4cd9b6a748f76 | 4,253 | py | Python | api/tacticalrmm/agents/permissions.py | jpjllorente/tacticalrmm | 506055a8159a22e0d439a00e49a8dbe59e020474 | [
"MIT"
] | null | null | null | api/tacticalrmm/agents/permissions.py | jpjllorente/tacticalrmm | 506055a8159a22e0d439a00e49a8dbe59e020474 | [
"MIT"
] | null | null | null | api/tacticalrmm/agents/permissions.py | jpjllorente/tacticalrmm | 506055a8159a22e0d439a00e49a8dbe59e020474 | [
"MIT"
] | null | null | null | from rest_framework import permissions
from tacticalrmm.permissions import _has_perm, _has_perm_on_agent
class AgentPerms(permissions.BasePermission):
def has_permission(self, r, view) -> bool:
if r.method == "GET":
if "agent_id" in view.kwargs.keys():
return _has_perm(r, "can_list_agents") and _has_perm_on_agent(
r.user, view.kwargs["agent_id"]
)
else:
return _has_perm(r, "can_list_agents")
elif r.method == "DELETE":
return _has_perm(r, "can_uninstall_agents") and _has_perm_on_agent(
r.user, view.kwargs["agent_id"]
)
else:
if r.path == "/agents/maintenance/bulk/":
return _has_perm(r, "can_edit_agent")
else:
return _has_perm(r, "can_edit_agent") and _has_perm_on_agent(
r.user, view.kwargs["agent_id"]
)
class RecoverAgentPerms(permissions.BasePermission):
def has_permission(self, r, view) -> bool:
if "agent_id" not in view.kwargs.keys():
return _has_perm(r, "can_recover_agents")
return _has_perm(r, "can_recover_agents") and _has_perm_on_agent(
r.user, view.kwargs["agent_id"]
)
class MeshPerms(permissions.BasePermission):
def has_permission(self, r, view) -> bool:
return _has_perm(r, "can_use_mesh") and _has_perm_on_agent(
r.user, view.kwargs["agent_id"]
)
class UpdateAgentPerms(permissions.BasePermission):
def has_permission(self, r, view) -> bool:
return _has_perm(r, "can_update_agents")
class PingAgentPerms(permissions.BasePermission):
def has_permission(self, r, view) -> bool:
return _has_perm(r, "can_ping_agents") and _has_perm_on_agent(
r.user, view.kwargs["agent_id"]
)
class ManageProcPerms(permissions.BasePermission):
def has_permission(self, r, view) -> bool:
return _has_perm(r, "can_manage_procs") and _has_perm_on_agent(
r.user, view.kwargs["agent_id"]
)
class EvtLogPerms(permissions.BasePermission):
def has_permission(self, r, view) -> bool:
return _has_perm(r, "can_view_eventlogs") and _has_perm_on_agent(
r.user, view.kwargs["agent_id"]
)
class SendCMDPerms(permissions.BasePermission):
def has_permission(self, r, view) -> bool:
return _has_perm(r, "can_send_cmd") and _has_perm_on_agent(
r.user, view.kwargs["agent_id"]
)
class RebootAgentPerms(permissions.BasePermission):
def has_permission(self, r, view) -> bool:
return _has_perm(r, "can_reboot_agents") and _has_perm_on_agent(
r.user, view.kwargs["agent_id"]
)
class InstallAgentPerms(permissions.BasePermission):
def has_permission(self, r, view) -> bool:
return _has_perm(r, "can_install_agents")
class RunScriptPerms(permissions.BasePermission):
def has_permission(self, r, view) -> bool:
return _has_perm(r, "can_run_scripts") and _has_perm_on_agent(
r.user, view.kwargs["agent_id"]
)
class AgentNotesPerms(permissions.BasePermission):
def has_permission(self, r, view) -> bool:
# permissions for GET /agents/notes/ endpoint
if r.method == "GET":
# permissions for /agents/<agent_id>/notes endpoint
if "agent_id" in view.kwargs.keys():
return _has_perm(r, "can_list_notes") and _has_perm_on_agent(
r.user, view.kwargs["agent_id"]
)
else:
return _has_perm(r, "can_list_notes")
else:
return _has_perm(r, "can_manage_notes")
class RunBulkPerms(permissions.BasePermission):
def has_permission(self, r, view) -> bool:
return _has_perm(r, "can_run_bulk")
class AgentHistoryPerms(permissions.BasePermission):
def has_permission(self, r, view) -> bool:
if "agent_id" in view.kwargs.keys():
return _has_perm(r, "can_list_agent_history") and _has_perm_on_agent(
r.user, view.kwargs["agent_id"]
)
else:
return _has_perm(r, "can_list_agent_history")
| 33.488189 | 81 | 0.631084 |
c1cec07e5f980c72f5f78a3e26bf8e8d3ffdaf5a | 99 | py | Python | cogs/owner/__init__.py | vbe0201/code-inspector | da3db9cdaad17614006adf85fb688931d41fc9f5 | [
"MIT"
] | 3 | 2019-03-22T07:57:41.000Z | 2019-10-08T08:40:04.000Z | cogs/owner/__init__.py | itsVale/code-inspector | da3db9cdaad17614006adf85fb688931d41fc9f5 | [
"MIT"
] | 107 | 2019-02-10T14:03:16.000Z | 2019-06-16T14:11:13.000Z | cogs/owner/__init__.py | vbe0201/code-inspector | da3db9cdaad17614006adf85fb688931d41fc9f5 | [
"MIT"
] | 4 | 2019-06-14T18:15:03.000Z | 2021-05-23T13:53:21.000Z | # -*- coding: utf-8 -*-
"""
Commands from this category are restricted to the bot owner only.
"""
| 16.5 | 65 | 0.646465 |
a7d82dd6ddfee965e54149253e0c217f7c7b9ec0 | 390 | py | Python | performance/profile/montecarlo_profile.py | ThiagoVieira/playsomeeuchre | 6ffbc5096f889efa9bcb575ec4ec73b3a6973b6c | [
"MIT"
] | null | null | null | performance/profile/montecarlo_profile.py | ThiagoVieira/playsomeeuchre | 6ffbc5096f889efa9bcb575ec4ec73b3a6973b6c | [
"MIT"
] | null | null | null | performance/profile/montecarlo_profile.py | ThiagoVieira/playsomeeuchre | 6ffbc5096f889efa9bcb575ec4ec73b3a6973b6c | [
"MIT"
] | null | null | null | import cProfile
from euchre.data_model import CardDeck, PlayerCards, Suite
from euchre.game_simulations import GameTreeSimulation
# Profile MonteCarlo Call, takes about 70 seconds on a full min max
cards = CardDeck().deal_cards()
players = [PlayerCards(i, i, cards[i]) for i in range(4)]
mc_sim = GameTreeSimulation(players, cards, 0, Suite.DIAMOND)
cProfile.run('mc_sim.simulate()')
| 26 | 67 | 0.774359 |
3d43eac4e947acf4ed6651bbf82dbbaf0db5ca90 | 2,412 | py | Python | server/routes/static.py | shifucun/website | e2997109facc105935e34f55288d3b4f1205dedc | [
"Apache-2.0"
] | 11 | 2020-07-18T17:04:26.000Z | 2022-03-23T08:44:09.000Z | server/routes/static.py | shifucun/website | e2997109facc105935e34f55288d3b4f1205dedc | [
"Apache-2.0"
] | 747 | 2020-06-22T16:56:45.000Z | 2022-03-31T19:04:55.000Z | server/routes/static.py | shifucun/website | e2997109facc105935e34f55288d3b4f1205dedc | [
"Apache-2.0"
] | 33 | 2019-09-25T21:26:12.000Z | 2022-03-23T08:27:33.000Z | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Data Commons static content routes."""
from datetime import date
from flask import Blueprint, render_template, current_app, g
from lib.gcs import list_blobs
import babel.dates as babel_dates
import routes.api.place as place_api
_SA_FEED_BUCKET = 'datacommons-frog-feed'
_MAX_BLOBS = 1
bp = Blueprint('static', __name__)
@bp.route('/')
def homepage():
if current_app.config['PRIVATE']:
return render_template('static/private.html')
if current_app.config['SUSTAINABILITY']:
return render_template('sustainability/homepage.html')
blog_date = babel_dates.format_date(date(2021, 7, 26),
format='long',
locale=g.locale)
return render_template('static/homepage.html', blog_date=blog_date)
@bp.route('/about')
def about():
return render_template('static/about.html')
@bp.route('/faq')
def faq():
current_date = date.today().strftime('%-d %b %Y')
current_year = date.today().strftime('%Y')
return render_template('static/faq.html',
current_date=current_date,
current_year=current_year)
@bp.route('/disclaimers')
def disclaimers():
return render_template('static/disclaimers.html')
@bp.route('/datasets')
def datasets():
return render_template('static/datasets.html')
@bp.route('/feedback')
def feedback():
return render_template('static/feedback.html')
@bp.route('/special_announcement')
def special_announcement():
recent_blobs = list_blobs(_SA_FEED_BUCKET, _MAX_BLOBS)
return render_template('static/special_announcement.html',
recent_blobs=recent_blobs)
@bp.route('/special_announcement/faq')
def special_announcement_faq():
return render_template('static/special_announcement_faq.html')
| 30.531646 | 74 | 0.69859 |
cdacb3d6183239049b50f74a8f6926c03411cf7e | 3,466 | py | Python | mwparserfromhell/definitions.py | wikimedia/operations-debs-python-mwparserfromhell | 8039469b268372a63389e2b43825fa3b3463608a | [
"MIT"
] | null | null | null | mwparserfromhell/definitions.py | wikimedia/operations-debs-python-mwparserfromhell | 8039469b268372a63389e2b43825fa3b3463608a | [
"MIT"
] | null | null | null | mwparserfromhell/definitions.py | wikimedia/operations-debs-python-mwparserfromhell | 8039469b268372a63389e2b43825fa3b3463608a | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
#
# Copyright (C) 2012-2015 Ben Kurtovic <ben.kurtovic@gmail.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""Contains data about certain markup, like HTML tags and external links."""
from __future__ import unicode_literals
__all__ = ["get_html_tag", "is_parsable", "is_visible", "is_single",
"is_single_only", "is_scheme"]
URI_SCHEMES = {
# [mediawiki/core.git]/includes/DefaultSettings.php @ 374a0ad943
"http": True, "https": True, "ftp": True, "ftps": True, "ssh": True,
"sftp": True, "irc": True, "ircs": True, "xmpp": False, "sip": False,
"sips": False, "gopher": True, "telnet": True, "nntp": True,
"worldwind": True, "mailto": False, "tel": False, "sms": False,
"news": False, "svn": True, "git": True, "mms": True, "bitcoin": False,
"magnet": False, "urn": False, "geo": False
}
PARSER_BLACKLIST = [
# enwiki extensions @ 2013-06-28
"categorytree", "gallery", "hiero", "imagemap", "inputbox", "math",
"nowiki", "pre", "score", "section", "source", "syntaxhighlight",
"templatedata", "timeline"
]
INVISIBLE_TAGS = [
# enwiki extensions @ 2013-06-28
"categorytree", "gallery", "imagemap", "inputbox", "math", "score",
"section", "templatedata", "timeline"
]
# [mediawiki/core.git]/includes/Sanitizer.php @ 87a0aef762
SINGLE_ONLY = ["br", "hr", "meta", "link", "img"]
SINGLE = SINGLE_ONLY + ["li", "dt", "dd", "th", "td", "tr"]
MARKUP_TO_HTML = {
"#": "li",
"*": "li",
";": "dt",
":": "dd"
}
def get_html_tag(markup):
"""Return the HTML tag associated with the given wiki-markup."""
return MARKUP_TO_HTML[markup]
def is_parsable(tag):
"""Return if the given *tag*'s contents should be passed to the parser."""
return tag.lower() not in PARSER_BLACKLIST
def is_visible(tag):
"""Return whether or not the given *tag* contains visible text."""
return tag.lower() not in INVISIBLE_TAGS
def is_single(tag):
"""Return whether or not the given *tag* can exist without a close tag."""
return tag.lower() in SINGLE
def is_single_only(tag):
"""Return whether or not the given *tag* must exist without a close tag."""
return tag.lower() in SINGLE_ONLY
def is_scheme(scheme, slashes=True):
"""Return whether *scheme* is valid for external links."""
scheme = scheme.lower()
if slashes:
return scheme in URI_SCHEMES
return scheme in URI_SCHEMES and not URI_SCHEMES[scheme]
| 38.511111 | 79 | 0.684939 |
98b0ac1ec6b6774e959913d0361ccce7ccf7672d | 44,152 | py | Python | impacket/dcerpc/v5/scmr.py | stobor28/impacket | e7461f6dbf448084b00230cc6fcd192f9c2baa08 | [
"Apache-1.1"
] | 7 | 2020-07-10T22:52:54.000Z | 2021-11-09T16:05:07.000Z | impacket/dcerpc/v5/scmr.py | stobor28/impacket | e7461f6dbf448084b00230cc6fcd192f9c2baa08 | [
"Apache-1.1"
] | 5 | 2019-10-21T19:59:01.000Z | 2020-05-27T21:42:04.000Z | impacket/dcerpc/v5/scmr.py | stobor28/impacket | e7461f6dbf448084b00230cc6fcd192f9c2baa08 | [
"Apache-1.1"
] | 2 | 2018-07-10T15:38:01.000Z | 2018-08-11T02:57:48.000Z | # SECUREAUTH LABS. Copyright 2018 SecureAuth Corporation. All rights reserved.
#
# This software is provided under under a slightly modified version
# of the Apache Software License. See the accompanying LICENSE file
# for more information.
#
# Author: Alberto Solino (@agsolino)
#
# Description:
# [MS-SCMR] Interface implementation
#
# Best way to learn how to use these calls is to grab the protocol standard
# so you understand what the call does, and then read the test case located
# at https://github.com/SecureAuthCorp/impacket/tree/master/tests/SMB_RPC
#
# Some calls have helper functions, which makes it even easier to use.
# They are located at the end of this file.
# Helper functions start with "h"<name of the call>.
# There are test cases for them too.
#
from impacket import system_errors
from impacket.dcerpc.v5.dtypes import NULL, DWORD, LPWSTR, ULONG, BOOL, LPBYTE, ULONGLONG, PGUID, USHORT, LPDWORD, WSTR, \
GUID, PBOOL, WIDESTR
from impacket.dcerpc.v5.ndr import NDRCALL, NDR, NDRSTRUCT, NDRPOINTER, NDRPOINTERNULL, NDRUniConformantArray, NDRUNION
from impacket.dcerpc.v5.rpcrt import DCERPCException
from impacket.uuid import uuidtup_to_bin
MSRPC_UUID_SCMR = uuidtup_to_bin(('367ABB81-9844-35F1-AD32-98F038001003', '2.0'))
class DCERPCSessionError(DCERPCException):
def __init__(self, error_string=None, error_code=None, packet=None):
DCERPCException.__init__(self, error_string, error_code, packet)
def __str__( self ):
key = self.error_code
if key in system_errors.ERROR_MESSAGES:
error_msg_short = system_errors.ERROR_MESSAGES[key][0]
error_msg_verbose = system_errors.ERROR_MESSAGES[key][1]
return 'SCMR SessionError: code: 0x%x - %s - %s' % (self.error_code, error_msg_short, error_msg_verbose)
else:
return 'SCMR SessionError: unknown error code: 0x%x' % self.error_code
################################################################################
# CONSTANTS
################################################################################
# Access codes
SERVICE_ALL_ACCESS = 0X000F01FF
SERVICE_CHANGE_CONFIG = 0X00000002
SERVICE_ENUMERATE_DEPENDENTS = 0X00000008
SERVICE_INTERROGATE = 0X00000080
SERVICE_PAUSE_CONTINUE = 0X00000040
SERVICE_QUERY_CONFIG = 0X00000001
SERVICE_QUERY_STATUS = 0X00000004
SERVICE_START = 0X00000010
SERVICE_STOP = 0X00000020
SERVICE_USER_DEFINED_CTRL = 0X00000100
SERVICE_SET_STATUS = 0X00008000
# Specific Access for SCM
SC_MANAGER_LOCK = 0x00000008
SC_MANAGER_CREATE_SERVICE = 0x00000002
SC_MANAGER_ENUMERATE_SERVICE = 0x00000004
SC_MANAGER_CONNECT = 0x00000001
SC_MANAGER_QUERY_LOCK_STATUS = 0x00000010
SC_MANAGER_MODIFY_BOOT_CONFIG = 0x00000020
# Service Types
SERVICE_KERNEL_DRIVER = 0x00000001
SERVICE_FILE_SYSTEM_DRIVER = 0x00000002
SERVICE_WIN32_OWN_PROCESS = 0x00000010
SERVICE_WIN32_SHARE_PROCESS = 0x00000020
SERVICE_INTERACTIVE_PROCESS = 0x00000100
SERVICE_NO_CHANGE = 0xffffffff
# Start Types
SERVICE_BOOT_START = 0x00000000
SERVICE_SYSTEM_START = 0x00000001
SERVICE_AUTO_START = 0x00000002
SERVICE_DEMAND_START = 0x00000003
SERVICE_DISABLED = 0x00000004
SERVICE_NO_CHANGE = 0xffffffff
# Error Control
SERVICE_ERROR_IGNORE = 0x00000000
SERVICE_ERROR_NORMAL = 0x00000001
SERVICE_ERROR_SEVERE = 0x00000002
SERVICE_ERROR_CRITICAL = 0x00000003
SERVICE_NO_CHANGE = 0xffffffff
# Service Control Codes
SERVICE_CONTROL_CONTINUE = 0x00000003
SERVICE_CONTROL_INTERROGATE = 0x00000004
SERVICE_CONTROL_PARAMCHANGE = 0x00000006
SERVICE_CONTROL_PAUSE = 0x00000002
SERVICE_CONTROL_STOP = 0x00000001
SERVICE_CONTROL_NETBINDADD = 0x00000007
SERVICE_CONTROL_NETBINDREMOVE = 0x00000008
SERVICE_CONTROL_NETBINDENABLE = 0x00000009
SERVICE_CONTROL_NETBINDDISABLE= 0x0000000A
# Service State
SERVICE_ACTIVE = 0x00000001
SERVICE_INACTIVE = 0x00000002
SERVICE_STATE_ALL = 0x00000003
# Current State
SERVICE_CONTINUE_PENDING = 0x00000005
SERVICE_PAUSE_PENDING = 0x00000006
SERVICE_PAUSED = 0x00000007
SERVICE_RUNNING = 0x00000004
SERVICE_START_PENDING = 0x00000002
SERVICE_STOP_PENDING = 0x00000003
SERVICE_STOPPED = 0x00000001
# Controls Accepted
SERVICE_ACCEPT_PARAMCHANGE = 0x00000008
SERVICE_ACCEPT_PAUSE_CONTINUE = 0x00000002
SERVICE_ACCEPT_SHUTDOWN = 0x00000004
SERVICE_ACCEPT_STOP = 0x00000001
SERVICE_ACCEPT_HARDWAREPROFILECHANGE = 0x00000020
SERVICE_ACCEPT_POWEREVENT = 0x00000040
SERVICE_ACCEPT_SESSIONCHANGE = 0x00000080
SERVICE_ACCEPT_PRESHUTDOWN = 0x00000100
SERVICE_ACCEPT_TIMECHANGE = 0x00000200
ERVICE_ACCEPT_TRIGGEREVENT = 0x00000400
# Security Information
DACL_SECURITY_INFORMATION = 0x4
GROUP_SECURITY_INFORMATION = 0x2
OWNER_SECURITY_INFORMATION = 0x1
SACL_SECURITY_INFORMATION = 0x8
# Service Config2 Info Levels
SERVICE_CONFIG_DESCRIPTION = 0x00000001
SERVICE_CONFIG_FAILURE_ACTIONS = 0x00000002
SERVICE_CONFIG_DELAYED_AUTO_START_INFO = 0x00000003
SERVICE_CONFIG_FAILURE_ACTIONS_FLAG = 0x00000004
SERVICE_CONFIG_SERVICE_SID_INFO = 0x00000005
SERVICE_CONFIG_REQUIRED_PRIVILEGES_INFO = 0x00000006
SERVICE_CONFIG_PRESHUTDOWN_INFO = 0x00000007
SERVICE_CONFIG_PREFERRED_NODE = 0x00000009
SERVICE_CONFIG_RUNLEVEL_INFO = 0x0000000A
# SC_ACTIONS Types
SC_ACTION_NONE = 0
SC_ACTION_RESTART = 1
SC_ACTION_REBOOT = 2
SC_ACTION_RUN_COMMAND = 3
# SERVICE_SID_INFO types
SERVICE_SID_TYPE_NONE = 0x00000000
SERVICE_SID_TYPE_RESTRICTED = 0x00000003
SERVICE_SID_TYPE_UNRESTRICTED = 0x00000001
# SC_STATUS_TYPE types
SC_STATUS_PROCESS_INFO = 0
# Notify Mask
SERVICE_NOTIFY_CREATED = 0x00000080
SERVICE_NOTIFY_CONTINUE_PENDING = 0x00000010
SERVICE_NOTIFY_DELETE_PENDING = 0x00000200
SERVICE_NOTIFY_DELETED = 0x00000100
SERVICE_NOTIFY_PAUSE_PENDING = 0x00000020
SERVICE_NOTIFY_PAUSED = 0x00000040
SERVICE_NOTIFY_RUNNING = 0x00000008
SERVICE_NOTIFY_START_PENDING = 0x00000002
SERVICE_NOTIFY_STOP_PENDING = 0x00000004
SERVICE_NOTIFY_STOPPED = 0x00000001
# SERVICE_CONTROL_STATUS_REASON_IN_PARAMSW Reasons
SERVICE_STOP_CUSTOM = 0x20000000
SERVICE_STOP_PLANNED = 0x40000000
SERVICE_STOP_UNPLANNED = 0x10000000
# SERVICE_TRIGGER triggers
SERVICE_TRIGGER_TYPE_DEVICE_INTERFACE_ARRIVAL = 0x00000001
SERVICE_TRIGGER_TYPE_IP_ADDRESS_AVAILABILITY = 0x00000002
SERVICE_TRIGGER_TYPE_DOMAIN_JOIN = 0x00000003
SERVICE_TRIGGER_TYPE_FIREWALL_PORT_EVENT = 0x00000004
SERVICE_TRIGGER_TYPE_GROUP_POLICY = 0x00000005
SERVICE_TRIGGER_TYPE_CUSTOM = 0x00000020
# SERVICE_TRIGGER actions
SERVICE_TRIGGER_ACTION_SERVICE_START = 0x00000001
SERVICE_TRIGGER_ACTION_SERVICE_STOP = 0x00000002
# SERVICE_TRIGGER subTypes
DOMAIN_JOIN_GUID = '1ce20aba-9851-4421-9430-1ddeb766e809'
DOMAIN_LEAVE_GUID = 'ddaf516e-58c2-4866-9574-c3b615d42ea1'
FIREWALL_PORT_OPEN_GUID = 'b7569e07-8421-4ee0-ad10-86915afdad09'
FIREWALL_PORT_CLOSE_GUID = 'a144ed38-8e12-4de4-9d96-e64740b1a524'
MACHINE_POLICY_PRESENT_GUID = '659FCAE6-5BDB-4DA9-B1FF-CA2A178D46E0'
NETWORK_MANAGER_FIRST_IP_ADDRESS_ARRIVAL_GUID = '4f27f2de-14e2-430b-a549-7cd48cbc8245'
NETWORK_MANAGER_LAST_IP_ADDRESS_REMOVAL_GUID = 'cc4ba62a-162e-4648-847a-b6bdf993e335'
USER_POLICY_PRESENT_GUID = '54FB46C8-F089-464C-B1FD-59D1B62C3B50'
# SERVICE_TRIGGER_SPECIFIC_DATA_ITEM dataTypes
SERVICE_TRIGGER_DATA_TYPE_BINARY = 0x00000001
SERVICE_TRIGGER_DATA_TYPE_STRING = 0x00000002
################################################################################
# STRUCTURES
################################################################################
class SC_RPC_HANDLE(NDRSTRUCT):
structure = (
('Data','20s=""'),
)
def getAlignment(self):
return 1
SC_NOTIFY_RPC_HANDLE = SC_RPC_HANDLE
class SERVICE_STATUS(NDRSTRUCT):
structure = (
('dwServiceType',DWORD),
('dwCurrentState',DWORD),
('dwControlsAccepted',DWORD),
('dwWin32ExitCode',DWORD),
('dwServiceSpecificExitCode',DWORD),
('dwCheckPoint',DWORD),
('dwWaitHint',DWORD),
)
class QUERY_SERVICE_CONFIGW(NDRSTRUCT):
structure = (
('dwServiceType',DWORD),
('dwStartType',DWORD),
('dwErrorControl',DWORD),
('lpBinaryPathName', LPWSTR),
('lpLoadOrderGroup',LPWSTR),
('dwTagId',DWORD),
('lpDependencies',LPWSTR),
('lpServiceStartName',LPWSTR),
('lpDisplayName',LPWSTR),
)
class SC_RPC_LOCK(NDRSTRUCT):
structure = (
('Data','20s=""'),
)
def getAlignment(self):
return 1
class LPSERVICE_STATUS(NDRPOINTER):
referent = (
('Data',SERVICE_STATUS),
)
SECURITY_INFORMATION = ULONG
BOUNDED_DWORD_256K = DWORD
class LPBOUNDED_DWORD_256K(NDRPOINTER):
referent = (
('Data', BOUNDED_DWORD_256K),
)
SVCCTL_HANDLEW = LPWSTR
class ENUM_SERVICE_STATUSW(NDRSTRUCT):
structure = (
('lpServiceName',LPWSTR),
('lpDisplayName',LPWSTR),
('ServiceStatus',SERVICE_STATUS),
)
class LPQUERY_SERVICE_CONFIGW(NDRPOINTER):
referent = (
('Data', QUERY_SERVICE_CONFIGW),
)
BOUNDED_DWORD_8K = DWORD
BOUNDED_DWORD_4K = DWORD
class STRING_PTRSW(NDRSTRUCT):
structure = (
('Data',NDRUniConformantArray),
)
def __init__(self, data = None, isNDR64 = False):
NDR.__init__(self,None,isNDR64)
self.fields['Data'].item = LPWSTR
if data is not None:
self.fromString(data)
class UNIQUE_STRING_PTRSW(NDRPOINTER):
referent = (
('Data', STRING_PTRSW),
)
class QUERY_SERVICE_LOCK_STATUSW(NDRSTRUCT):
structure = (
('fIsLocked',DWORD),
('lpLockOwner',LPWSTR),
('dwLockDuration',DWORD),
)
class SERVICE_DESCRIPTION_WOW64(NDRSTRUCT):
structure = (
('dwDescriptionOffset', DWORD),
)
class SERVICE_DESCRIPTIONW(NDRSTRUCT):
structure = (
('lpDescription', LPWSTR),
)
class LPSERVICE_DESCRIPTIONW(NDRPOINTER):
referent = (
('Data', SERVICE_DESCRIPTIONW),
)
class SERVICE_FAILURE_ACTIONS_WOW64(NDRSTRUCT):
structure = (
('dwResetPeriod', DWORD),
('dwRebootMsgOffset', DWORD),
('dwCommandOffset', DWORD),
('cActions', DWORD),
('dwsaActionsOffset', DWORD),
)
class SC_ACTION(NDRSTRUCT):
structure = (
('Type', DWORD),
('Delay', DWORD) ,
)
class SC_ACTIONS(NDRSTRUCT):
structure = (
('Data', NDRUniConformantArray),
)
def __init__(self, data = None, isNDR64 = False):
NDR.__init__(self,None,isNDR64)
self.fields['Data'].item = SC_ACTION
if data is not None:
self.fromString(data)
class SERVICE_FAILURE_ACTIONSW(NDRSTRUCT):
structure = (
('dwResetPeriod', DWORD),
('lpRebootMsg', LPWSTR) ,
('lpCommand', LPWSTR) ,
('cActions', DWORD) ,
('lpsaActions', SC_ACTIONS) ,
)
class LPSERVICE_FAILURE_ACTIONSW(NDRPOINTER):
referent = (
('Data', SERVICE_FAILURE_ACTIONSW),
)
class SERVICE_FAILURE_ACTIONS_FLAG(NDRSTRUCT):
structure = (
('fFailureActionsOnNonCrashFailures', BOOL),
)
class LPSERVICE_FAILURE_ACTIONS_FLAG(NDRPOINTER):
referent = (
('Data', SERVICE_FAILURE_ACTIONS_FLAG),
)
class SERVICE_DELAYED_AUTO_START_INFO(NDRSTRUCT):
structure = (
('fDelayedAutostart', BOOL),
)
class LPSERVICE_DELAYED_AUTO_START_INFO(NDRPOINTER):
referent = (
('Data', SERVICE_DELAYED_AUTO_START_INFO),
)
class SERVICE_SID_INFO(NDRSTRUCT):
structure = (
('dwServiceSidType', DWORD),
)
class LPSERVICE_SID_INFO(NDRPOINTER):
referent = (
('Data', SERVICE_SID_INFO),
)
class SERVICE_RPC_REQUIRED_PRIVILEGES_INFO(NDRSTRUCT):
structure = (
('cbRequiredPrivileges',DWORD),
('pRequiredPrivileges',LPBYTE),
)
def getData(self, soFar = 0):
self['cbRequiredPrivileges'] = len(self['pRequiredPrivileges'])
return NDR.getData(self, soFar = 0)
class LPSERVICE_RPC_REQUIRED_PRIVILEGES_INFO(NDRPOINTER):
referent = (
('Data', SERVICE_RPC_REQUIRED_PRIVILEGES_INFO),
)
class SERVICE_REQUIRED_PRIVILEGES_INFO_WOW64(NDRSTRUCT):
structure = (
('dwRequiredPrivilegesOffset', DWORD),
)
class SERVICE_PRESHUTDOWN_INFO(NDRSTRUCT):
structure = (
('dwPreshutdownTimeout', DWORD),
)
class LPSERVICE_PRESHUTDOWN_INFO(NDRPOINTER):
referent = (
('Data', SERVICE_PRESHUTDOWN_INFO),
)
class SERVICE_STATUS_PROCESS(NDRSTRUCT):
structure = (
('dwServiceType', DWORD),
('dwCurrentState', DWORD),
('dwControlsAccepted', DWORD),
('dwWin32ExitCode', DWORD),
('dwServiceSpecificExitCode', DWORD),
('dwCheckPoint', DWORD),
('dwWaitHint', DWORD),
('dwProcessId', DWORD),
('dwServiceFlags', DWORD),
)
class UCHAR_16(NDRSTRUCT):
structure = (
('Data', '16s=""'),
)
def getAlignment(self):
return 1
class SERVICE_NOTIFY_STATUS_CHANGE_PARAMS_1(NDRSTRUCT):
structure = (
('ullThreadId',ULONGLONG),
('dwNotifyMask',DWORD),
('CallbackAddressArray',UCHAR_16),
('CallbackParamAddressArray',UCHAR_16),
('ServiceStatus', SERVICE_STATUS_PROCESS),
('dwNotificationStatus',DWORD),
('dwSequence',DWORD),
)
class SERVICE_NOTIFY_STATUS_CHANGE_PARAMS_2(NDRSTRUCT):
structure = (
('ullThreadId',ULONGLONG),
('dwNotifyMask',DWORD),
('CallbackAddressArray',UCHAR_16),
('CallbackParamAddressArray',UCHAR_16),
('ServiceStatus',SERVICE_STATUS_PROCESS),
('dwNotificationStatus',DWORD),
('dwSequence',DWORD),
('dwNotificationTriggered',DWORD),
('pszServiceNames',LPWSTR),
)
class PSERVICE_NOTIFY_STATUS_CHANGE_PARAMS_1(NDRPOINTER):
referent = (
('Data', SERVICE_NOTIFY_STATUS_CHANGE_PARAMS_1),
)
class PSERVICE_NOTIFY_STATUS_CHANGE_PARAMS_2(NDRPOINTER):
referent = (
('Data', SERVICE_NOTIFY_STATUS_CHANGE_PARAMS_2),
)
class SC_RPC_NOTIFY_PARAMS(NDRUNION):
union = {
1: ('pStatusChangeParam1', PSERVICE_NOTIFY_STATUS_CHANGE_PARAMS_1),
2: ('pStatusChangeParams', PSERVICE_NOTIFY_STATUS_CHANGE_PARAMS_2),
}
class SC_RPC_NOTIFY_PARAMS_ARRAY(NDRUniConformantArray):
item = SC_RPC_NOTIFY_PARAMS
class PSC_RPC_NOTIFY_PARAMS_LIST(NDRSTRUCT):
structure = (
('cElements',BOUNDED_DWORD_4K),
('NotifyParamsArray', SC_RPC_NOTIFY_PARAMS_ARRAY),
)
class SERVICE_CONTROL_STATUS_REASON_IN_PARAMSW(NDRSTRUCT):
structure = (
('dwReason', DWORD),
('pszComment', LPWSTR),
)
class SERVICE_TRIGGER_SPECIFIC_DATA_ITEM(NDRSTRUCT):
structure = (
('dwDataType',DWORD ),
('cbData',DWORD),
('pData', LPBYTE),
)
def getData(self, soFar = 0):
if self['pData'] != 0:
self['cbData'] = len(self['pData'])
return NDR.getData(self, soFar)
class SERVICE_TRIGGER_SPECIFIC_DATA_ITEM_ARRAY(NDRUniConformantArray):
item = SERVICE_TRIGGER_SPECIFIC_DATA_ITEM
class PSERVICE_TRIGGER_SPECIFIC_DATA_ITEM(NDRPOINTER):
referent = (
('Data', SERVICE_TRIGGER_SPECIFIC_DATA_ITEM_ARRAY),
)
class SERVICE_TRIGGER(NDRSTRUCT):
structure = (
('dwTriggerType', DWORD),
('dwAction', DWORD),
('pTriggerSubtype', PGUID),
('cDataItems', DWORD),
('pDataItems', PSERVICE_TRIGGER_SPECIFIC_DATA_ITEM),
)
def getData(self, soFar = 0):
if self['pDataItems'] != 0:
self['cDataItems'] = len(self['pDataItems'])
return NDR.getData(self, soFar)
class SERVICE_TRIGGER_ARRAY(NDRUniConformantArray):
item = SERVICE_TRIGGER
class PSERVICE_TRIGGER(NDRPOINTER):
referent = (
('Data', SERVICE_TRIGGER_ARRAY),
)
class SERVICE_CONTROL_STATUS_REASON_OUT_PARAMS(NDRSTRUCT):
structure = (
('ServiceStatus', SERVICE_STATUS_PROCESS),
)
class SERVICE_TRIGGER_INFO(NDRSTRUCT):
structure = (
('cTriggers', DWORD),
('pTriggers', PSERVICE_TRIGGER),
('pReserved', NDRPOINTERNULL ),
)
def getData(self, soFar = 0):
if self['pTriggers'] != 0:
self['cTriggers'] = len(self['pTriggers'])
return NDR.getData(self, soFar)
class PSERVICE_TRIGGER_INFO(NDRPOINTER):
referent = (
('Data', SERVICE_TRIGGER_INFO),
)
class SERVICE_PREFERRED_NODE_INFO(NDRSTRUCT):
structure = (
('usPreferredNode', USHORT),
('fDelete', BOOL),
)
class LPSERVICE_PREFERRED_NODE_INFO(NDRPOINTER):
referent = (
('Data', SERVICE_PREFERRED_NODE_INFO),
)
class SERVICE_RUNLEVEL_INFO(NDRSTRUCT):
structure = (
('eLowestRunLevel', DWORD),
)
class PSERVICE_RUNLEVEL_INFO(NDRPOINTER):
referent = (
('Data', SERVICE_RUNLEVEL_INFO),
)
class SERVICE_MANAGEDACCOUNT_INFO(NDRSTRUCT):
structure = (
('fIsManagedAccount', DWORD),
)
class PSERVICE_MANAGEDACCOUNT_INFO(NDRPOINTER):
referent = (
('Data', SERVICE_MANAGEDACCOUNT_INFO),
)
class SC_RPC_CONFIG_INFOW_UNION(NDRUNION):
commonHdr = (
('tag', ULONG),
)
union = {
1: ('psd', LPSERVICE_DESCRIPTIONW),
2: ('psfa',LPSERVICE_FAILURE_ACTIONSW ),
3: ('psda',LPSERVICE_DELAYED_AUTO_START_INFO),
4: ('psfaf',LPSERVICE_FAILURE_ACTIONS_FLAG),
5: ('pssid',LPSERVICE_SID_INFO),
6: ('psrp',LPSERVICE_RPC_REQUIRED_PRIVILEGES_INFO),
7: ('psps',LPSERVICE_PRESHUTDOWN_INFO),
8: ('psti',PSERVICE_TRIGGER_INFO),
9: ('pspn',LPSERVICE_PREFERRED_NODE_INFO),
10: ('psri',PSERVICE_RUNLEVEL_INFO),
11: ('psma',PSERVICE_MANAGEDACCOUNT_INFO),
}
class SC_RPC_CONFIG_INFOW(NDRSTRUCT):
structure = (
('dwInfoLevel', DWORD),
('Union', SC_RPC_CONFIG_INFOW_UNION),
)
################################################################################
# RPC CALLS
################################################################################
class RCloseServiceHandle(NDRCALL):
opnum = 0
structure = (
('hSCObject',SC_RPC_HANDLE),
)
class RCloseServiceHandleResponse(NDRCALL):
structure = (
('hSCObject',SC_RPC_HANDLE),
('ErrorCode', DWORD),
)
class RControlService(NDRCALL):
opnum = 1
structure = (
('hService',SC_RPC_HANDLE),
('dwControl',DWORD),
)
class RControlServiceResponse(NDRCALL):
structure = (
('lpServiceStatus',SERVICE_STATUS),
('ErrorCode', DWORD),
)
class RDeleteService(NDRCALL):
opnum = 2
structure = (
('hService',SC_RPC_HANDLE),
)
class RDeleteServiceResponse(NDRCALL):
structure = (
('ErrorCode', DWORD),
)
class RLockServiceDatabase(NDRCALL):
opnum = 3
structure = (
('hSCManager',SC_RPC_HANDLE),
)
class RLockServiceDatabaseResponse(NDRCALL):
structure = (
('lpLock',SC_RPC_LOCK),
('ErrorCode', DWORD),
)
class RQueryServiceObjectSecurity(NDRCALL):
opnum = 4
structure = (
('hService',SC_RPC_HANDLE),
('dwSecurityInformation',SECURITY_INFORMATION),
('cbBufSize',DWORD),
)
class RQueryServiceObjectSecurityResponse(NDRCALL):
structure = (
('lpSecurityDescriptor',LPBYTE),
('pcbBytesNeeded',BOUNDED_DWORD_256K),
('ErrorCode', DWORD),
)
class RSetServiceObjectSecurity(NDRCALL):
opnum = 5
structure = (
('hService',SC_RPC_HANDLE),
('dwSecurityInformation',SECURITY_INFORMATION),
('lpSecurityDescriptor',LPBYTE),
('cbBufSize',DWORD),
)
class RSetServiceObjectSecurityResponse(NDRCALL):
structure = (
('ErrorCode', DWORD),
)
class RQueryServiceStatus(NDRCALL):
opnum = 6
structure = (
('hService',SC_RPC_HANDLE),
)
class RQueryServiceStatusResponse(NDRCALL):
structure = (
('lpServiceStatus',SERVICE_STATUS),
('ErrorCode', DWORD),
)
class RSetServiceStatus(NDRCALL):
opnum = 7
structure = (
('hServiceStatus',SC_RPC_HANDLE),
('lpServiceStatus',SERVICE_STATUS),
)
class RSetServiceStatusResponse(NDRCALL):
structure = (
('ErrorCode', DWORD),
)
class RUnlockServiceDatabase(NDRCALL):
opnum = 8
structure = (
('Lock',SC_RPC_LOCK),
)
class RUnlockServiceDatabaseResponse(NDRCALL):
structure = (
('Lock',SC_RPC_LOCK),
('ErrorCode', DWORD),
)
class RNotifyBootConfigStatus(NDRCALL):
opnum = 9
structure = (
('lpMachineName',SVCCTL_HANDLEW),
('BootAcceptable',DWORD),
)
class RNotifyBootConfigStatusResponse(NDRCALL):
structure = (
('ErrorCode', DWORD),
)
class RChangeServiceConfigW(NDRCALL):
opnum = 11
structure = (
('hService',SC_RPC_HANDLE),
('dwServiceType',DWORD),
('dwStartType',DWORD),
('dwErrorControl',DWORD),
('lpBinaryPathName',LPWSTR),
('lpLoadOrderGroup',LPWSTR),
('lpdwTagId',LPDWORD),
('lpDependencies',LPBYTE),
('dwDependSize',DWORD),
('lpServiceStartName',LPWSTR),
('lpPassword',LPBYTE),
('dwPwSize',DWORD),
('lpDisplayName',LPWSTR),
)
class RChangeServiceConfigWResponse(NDRCALL):
structure = (
('lpdwTagId',LPDWORD),
('ErrorCode', DWORD),
)
class RCreateServiceW(NDRCALL):
opnum = 12
structure = (
('hSCManager',SC_RPC_HANDLE),
('lpServiceName',WSTR),
('lpDisplayName',LPWSTR),
('dwDesiredAccess',DWORD),
('dwServiceType',DWORD),
('dwStartType',DWORD),
('dwErrorControl',DWORD),
('lpBinaryPathName',WSTR),
('lpLoadOrderGroup',LPWSTR),
('lpdwTagId',LPDWORD),
('lpDependencies',LPBYTE),
('dwDependSize',DWORD),
('lpServiceStartName',LPWSTR),
('lpPassword',LPBYTE),
('dwPwSize',DWORD),
)
class RCreateServiceWResponse(NDRCALL):
structure = (
('lpdwTagId',LPWSTR),
('lpServiceHandle',SC_RPC_HANDLE),
('ErrorCode', DWORD),
)
class REnumDependentServicesW(NDRCALL):
opnum = 13
structure = (
('hService',SC_RPC_HANDLE),
('dwServiceState',DWORD),
('cbBufSize',DWORD),
)
class REnumDependentServicesWResponse(NDRCALL):
structure = (
('lpServices',NDRUniConformantArray),
('pcbBytesNeeded',BOUNDED_DWORD_256K),
('lpServicesReturned',BOUNDED_DWORD_256K),
('ErrorCode', DWORD),
)
class REnumServicesStatusW(NDRCALL):
opnum = 14
structure = (
('hSCManager',SC_RPC_HANDLE),
('dwServiceType',DWORD),
('dwServiceState',DWORD),
('cbBufSize',DWORD),
('lpResumeIndex',LPBOUNDED_DWORD_256K),
)
class REnumServicesStatusWResponse(NDRCALL):
structure = (
('lpBuffer',NDRUniConformantArray),
('pcbBytesNeeded',BOUNDED_DWORD_256K),
('lpServicesReturned',BOUNDED_DWORD_256K),
('lpResumeIndex',LPBOUNDED_DWORD_256K),
('ErrorCode', DWORD),
)
class ROpenSCManagerW(NDRCALL):
opnum = 15
structure = (
('lpMachineName',SVCCTL_HANDLEW),
('lpDatabaseName',LPWSTR),
('dwDesiredAccess',DWORD),
)
class ROpenSCManagerWResponse(NDRCALL):
structure = (
('lpScHandle',SC_RPC_HANDLE),
('ErrorCode', DWORD),
)
class ROpenServiceW(NDRCALL):
opnum = 16
structure = (
('hSCManager',SC_RPC_HANDLE),
('lpServiceName',WSTR),
('dwDesiredAccess',DWORD),
)
class ROpenServiceWResponse(NDRCALL):
structure = (
('lpServiceHandle',SC_RPC_HANDLE),
('ErrorCode', DWORD),
)
class RQueryServiceConfigW(NDRCALL):
opnum = 17
structure = (
('hService',SC_RPC_HANDLE),
('cbBufSize',DWORD),
)
class RQueryServiceConfigWResponse(NDRCALL):
structure = (
('lpServiceConfig',QUERY_SERVICE_CONFIGW),
('pcbBytesNeeded',BOUNDED_DWORD_8K),
('ErrorCode', DWORD),
)
class RQueryServiceLockStatusW(NDRCALL):
opnum = 18
structure = (
('hSCManager',SC_RPC_HANDLE),
('cbBufSize',DWORD),
)
class RQueryServiceLockStatusWResponse(NDRCALL):
structure = (
('lpLockStatus',QUERY_SERVICE_LOCK_STATUSW),
('pcbBytesNeeded',BOUNDED_DWORD_4K),
('ErrorCode', DWORD),
)
class RStartServiceW(NDRCALL):
opnum = 19
structure = (
('hService',SC_RPC_HANDLE),
('argc',DWORD),
('argv',UNIQUE_STRING_PTRSW),
)
class RStartServiceWResponse(NDRCALL):
structure = (
('ErrorCode', DWORD),
)
class RGetServiceDisplayNameW(NDRCALL):
opnum = 20
structure = (
('hSCManager',SC_RPC_HANDLE),
('lpServiceName',WSTR),
('lpcchBuffer',DWORD),
)
class RGetServiceDisplayNameWResponse(NDRCALL):
structure = (
('lpDisplayName',WSTR),
('lpcchBuffer',DWORD),
('ErrorCode', DWORD),
)
class RGetServiceKeyNameW(NDRCALL):
opnum = 21
structure = (
('hSCManager',SC_RPC_HANDLE),
('lpDisplayName',WSTR),
('lpcchBuffer',DWORD),
)
class RGetServiceKeyNameWResponse(NDRCALL):
structure = (
('lpDisplayName',WSTR),
('lpcchBuffer',DWORD),
('ErrorCode', DWORD),
)
class REnumServiceGroupW(NDRCALL):
opnum = 35
structure = (
('hSCManager',SC_RPC_HANDLE),
('dwServiceType',DWORD),
('dwServiceState',DWORD),
('cbBufSize',DWORD),
('lpResumeIndex',LPBOUNDED_DWORD_256K),
('pszGroupName',LPWSTR),
)
class REnumServiceGroupWResponse(NDRCALL):
structure = (
('lpBuffer',LPBYTE),
('pcbBytesNeeded',BOUNDED_DWORD_256K),
('lpServicesReturned',BOUNDED_DWORD_256K),
('lpResumeIndex',BOUNDED_DWORD_256K),
('ErrorCode', DWORD),
)
class RChangeServiceConfig2W(NDRCALL):
opnum = 37
structure = (
('hService',SC_RPC_HANDLE),
('Info',SC_RPC_CONFIG_INFOW),
)
class RChangeServiceConfig2WResponse(NDRCALL):
structure = (
('ErrorCode', DWORD),
)
class RQueryServiceConfig2W(NDRCALL):
opnum = 39
structure = (
('hService',SC_RPC_HANDLE),
('dwInfoLevel',DWORD),
('cbBufSize',DWORD),
)
class RQueryServiceConfig2WResponse(NDRCALL):
structure = (
('lpBuffer',NDRUniConformantArray),
('pcbBytesNeeded',BOUNDED_DWORD_8K),
('ErrorCode', DWORD),
)
class RQueryServiceStatusEx(NDRCALL):
opnum = 40
structure = (
('hService',SC_RPC_HANDLE),
('InfoLevel',DWORD),
('cbBufSize',DWORD),
)
class RQueryServiceStatusExResponse(NDRCALL):
structure = (
('lpBuffer',NDRUniConformantArray),
('pcbBytesNeeded',BOUNDED_DWORD_8K),
('ErrorCode', DWORD),
)
class REnumServicesStatusExW(NDRCALL):
opnum = 42
structure = (
('hSCManager',SC_RPC_HANDLE),
('InfoLevel',DWORD),
('dwServiceType',DWORD),
('dwServiceState',DWORD),
('cbBufSize',DWORD),
('lpResumeIndex',LPBOUNDED_DWORD_256K),
('pszGroupName',LPWSTR),
)
class REnumServicesStatusExWResponse(NDRCALL):
structure = (
('lpBuffer',NDRUniConformantArray),
('pcbBytesNeeded',BOUNDED_DWORD_256K),
('lpServicesReturned',BOUNDED_DWORD_256K),
('lpResumeIndex',BOUNDED_DWORD_256K),
('ErrorCode', DWORD),
)
class RCreateServiceWOW64W(NDRCALL):
opnum = 45
structure = (
('hSCManager',SC_RPC_HANDLE),
('lpServiceName',WSTR),
('lpDisplayName',LPWSTR),
('dwDesiredAccess',DWORD),
('dwServiceType',DWORD),
('dwStartType',DWORD),
('dwErrorControl',DWORD),
('lpBinaryPathName',WSTR),
('lpLoadOrderGroup',LPWSTR),
('lpdwTagId',LPDWORD),
('lpDependencies',LPBYTE),
('dwDependSize',DWORD),
('lpServiceStartName',LPWSTR),
('lpPassword',LPBYTE),
('dwPwSize',DWORD),
)
class RCreateServiceWOW64WResponse(NDRCALL):
structure = (
('lpdwTagId',LPWSTR),
('lpServiceHandle',SC_RPC_HANDLE),
('ErrorCode', DWORD),
)
# Still not working, for some reason something changes in the way the pointer inside SC_RPC_NOTIFY_PARAMS is marshalled here
class RNotifyServiceStatusChange(NDRCALL):
opnum = 47
structure = (
('hService',SC_RPC_HANDLE),
('NotifyParams',SC_RPC_NOTIFY_PARAMS),
('pClientProcessGuid',GUID),
)
class RNotifyServiceStatusChangeResponse(NDRCALL):
structure = (
('pSCMProcessGuid',GUID),
('pfCreateRemoteQueue',PBOOL),
('phNotify',SC_NOTIFY_RPC_HANDLE),
('ErrorCode', DWORD),
)
# Not working, until I don't fix the previous one
class RGetNotifyResults(NDRCALL):
opnum = 48
structure = (
('hNotify',SC_NOTIFY_RPC_HANDLE),
)
class RGetNotifyResultsResponse(NDRCALL):
structure = (
('ppNotifyParams',PSC_RPC_NOTIFY_PARAMS_LIST),
('ErrorCode', DWORD),
)
# Not working, until I don't fix the previous ones
class RCloseNotifyHandle(NDRCALL):
opnum = 49
structure = (
('phNotify',SC_NOTIFY_RPC_HANDLE),
)
class RCloseNotifyHandleResponse(NDRCALL):
structure = (
('phNotify',SC_NOTIFY_RPC_HANDLE),
('pfApcFired',PBOOL),
('ErrorCode', DWORD),
)
# Not working, returning bad_stub_data
class RControlServiceExW(NDRCALL):
opnum = 51
structure = (
('hService',SC_RPC_HANDLE),
('dwControl',DWORD),
('dwInfoLevel',DWORD),
('pControlInParams',SERVICE_CONTROL_STATUS_REASON_IN_PARAMSW),
)
class RControlServiceExWResponse(NDRCALL):
structure = (
('pControlOutParams',SERVICE_CONTROL_STATUS_REASON_OUT_PARAMS),
('ErrorCode', DWORD),
)
class RQueryServiceConfigEx(NDRCALL):
opnum = 56
structure = (
('hService',SC_RPC_HANDLE),
('dwInfoLevel',DWORD),
)
class RQueryServiceConfigExResponse(NDRCALL):
structure = (
('pInfo',SC_RPC_CONFIG_INFOW),
('ErrorCode', DWORD),
)
################################################################################
# OPNUMs and their corresponding structures
################################################################################
OPNUMS = {
0 : (RCloseServiceHandle, RCloseServiceHandleResponse),
1 : (RControlService, RControlServiceResponse),
2 : (RDeleteService, RDeleteServiceResponse),
3 : (RLockServiceDatabase, RLockServiceDatabaseResponse),
4 : (RQueryServiceObjectSecurity, RQueryServiceObjectSecurityResponse),
5 : (RSetServiceObjectSecurity, RSetServiceObjectSecurityResponse),
6 : (RQueryServiceStatus, RQueryServiceStatusResponse),
7 : (RSetServiceStatus, RSetServiceStatusResponse),
8 : (RUnlockServiceDatabase, RUnlockServiceDatabaseResponse),
9 : (RNotifyBootConfigStatus, RNotifyBootConfigStatusResponse),
11 : (RChangeServiceConfigW, RChangeServiceConfigWResponse),
12 : (RCreateServiceW, RCreateServiceWResponse),
13 : (REnumDependentServicesW, REnumDependentServicesWResponse),
14 : (REnumServicesStatusW, REnumServicesStatusWResponse),
15 : (ROpenSCManagerW, ROpenSCManagerWResponse),
16 : (ROpenServiceW, ROpenServiceWResponse),
17 : (RQueryServiceConfigW, RQueryServiceConfigWResponse),
18 : (RQueryServiceLockStatusW, RQueryServiceLockStatusWResponse),
19 : (RStartServiceW, RStartServiceWResponse),
20 : (RGetServiceDisplayNameW, RGetServiceDisplayNameWResponse),
21 : (RGetServiceKeyNameW, RGetServiceKeyNameWResponse),
35 : (REnumServiceGroupW, REnumServiceGroupWResponse),
37 : (RChangeServiceConfig2W, RChangeServiceConfig2WResponse),
39 : (RQueryServiceConfig2W, RQueryServiceConfig2WResponse),
40 : (RQueryServiceStatusEx, RQueryServiceStatusExResponse),
42 : (REnumServicesStatusExW, REnumServicesStatusExWResponse),
45 : (RCreateServiceWOW64W, RCreateServiceWOW64WResponse),
47 : (RNotifyServiceStatusChange, RNotifyServiceStatusChangeResponse),
48 : (RGetNotifyResults, RGetNotifyResultsResponse),
49 : (RCloseNotifyHandle, RCloseNotifyHandleResponse),
51 : (RControlServiceExW, RControlServiceExWResponse),
56 : (RQueryServiceConfigEx, RQueryServiceConfigExResponse),
}
################################################################################
# HELPER FUNCTIONS
################################################################################
def checkNullString(string):
if string == NULL:
return string
if string[-1:] != '\x00':
return string + '\x00'
else:
return string
def hRCloseServiceHandle(dce, hSCObject):
request = RCloseServiceHandle()
request['hSCObject'] = hSCObject
return dce.request(request)
def hRControlService(dce, hService, dwControl):
request = RControlService()
request['hService'] = hService
request['dwControl'] = dwControl
return dce.request(request)
def hRDeleteService(dce, hService):
request = RDeleteService()
request ['hService'] = hService
return dce.request(request)
def hRLockServiceDatabase(dce, hSCManager):
request = RLockServiceDatabase()
request['hSCManager'] = hSCManager
return dce.request(request)
def hRQueryServiceObjectSecurity(dce, hService, dwSecurityInformation, cbBufSize ):
request = RQueryServiceObjectSecurity()
request['hService'] = hService
request['dwSecurityInformation'] = dwSecurityInformation
request['cbBufSize'] = cbBufSize
return dce.request(request)
def hRSetServiceObjectSecurity(dce, hService, dwSecurityInformation, lpSecurityDescriptor, cbBufSize ):
request = RSetServiceObjectSecurity()
request['hService'] = hService
request['dwSecurityInformation'] = dwSecurityInformation
request['cbBufSize'] = cbBufSize
return dce.request(request)
def hRQueryServiceStatus(dce, hService ):
request = RQueryServiceStatus()
request['hService'] = hService
return dce.request(request)
def hRSetServiceStatus(dce, hServiceStatus, lpServiceStatus ):
request = RSetServiceStatus()
request['hServiceStatus'] = hServiceStatus
request['lpServiceStatus'] = lpServiceStatus
return dce.request(request)
def hRUnlockServiceDatabase(dce, Lock ):
request = RUnlockServiceDatabase()
request['Lock'] = Lock
return dce.request(request)
def hRNotifyBootConfigStatus(dce, lpMachineName, BootAcceptable ):
request = RNotifyBootConfigStatus()
request['lpMachineName'] = lpMachineName
request['BootAcceptable'] = BootAcceptable
return dce.request(request)
def hRChangeServiceConfigW(dce, hService, dwServiceType=SERVICE_NO_CHANGE, dwStartType=SERVICE_NO_CHANGE, dwErrorControl=SERVICE_NO_CHANGE, lpBinaryPathName=NULL, lpLoadOrderGroup=NULL, lpdwTagId=NULL, lpDependencies=NULL, dwDependSize=0, lpServiceStartName=NULL, lpPassword=NULL, dwPwSize=0, lpDisplayName=NULL):
changeServiceConfig = RChangeServiceConfigW()
changeServiceConfig['hService'] = hService
changeServiceConfig['dwServiceType'] = dwServiceType
changeServiceConfig['dwStartType'] = dwStartType
changeServiceConfig['dwErrorControl'] = dwErrorControl
changeServiceConfig['lpBinaryPathName'] = checkNullString(lpBinaryPathName)
changeServiceConfig['lpLoadOrderGroup'] = checkNullString(lpLoadOrderGroup)
changeServiceConfig['lpdwTagId'] = lpdwTagId
changeServiceConfig['lpDependencies'] = lpDependencies
# Strings MUST be NULL terminated for lpDependencies
changeServiceConfig['dwDependSize'] = dwDependSize
changeServiceConfig['lpServiceStartName'] = checkNullString(lpServiceStartName)
changeServiceConfig['lpPassword'] = lpPassword
changeServiceConfig['dwPwSize'] = dwPwSize
changeServiceConfig['lpDisplayName'] = checkNullString(lpDisplayName)
return dce.request(changeServiceConfig)
def hRCreateServiceW(dce, hSCManager, lpServiceName, lpDisplayName, dwDesiredAccess=SERVICE_ALL_ACCESS, dwServiceType=SERVICE_WIN32_OWN_PROCESS, dwStartType=SERVICE_AUTO_START, dwErrorControl=SERVICE_ERROR_IGNORE, lpBinaryPathName=NULL, lpLoadOrderGroup=NULL, lpdwTagId=NULL, lpDependencies=NULL, dwDependSize=0, lpServiceStartName=NULL, lpPassword=NULL, dwPwSize=0):
createService = RCreateServiceW()
createService['hSCManager'] = hSCManager
createService['lpServiceName'] = checkNullString(lpServiceName)
createService['lpDisplayName'] = checkNullString(lpDisplayName)
createService['dwDesiredAccess'] = dwDesiredAccess
createService['dwServiceType'] = dwServiceType
createService['dwStartType'] = dwStartType
createService['dwErrorControl'] = dwErrorControl
createService['lpBinaryPathName'] = checkNullString(lpBinaryPathName)
createService['lpLoadOrderGroup'] = checkNullString(lpLoadOrderGroup)
createService['lpdwTagId'] = lpdwTagId
# Strings MUST be NULL terminated for lpDependencies
createService['lpDependencies'] = lpDependencies
createService['dwDependSize'] = dwDependSize
createService['lpServiceStartName'] = checkNullString(lpServiceStartName)
createService['lpPassword'] = lpPassword
createService['dwPwSize'] = dwPwSize
return dce.request(createService)
def hREnumDependentServicesW(dce, hService, dwServiceState, cbBufSize ):
enumDependentServices = REnumDependentServicesW()
enumDependentServices['hService'] = hService
enumDependentServices['dwServiceState'] = dwServiceState
enumDependentServices['cbBufSize'] = cbBufSize
return dce.request(enumDependentServices)
def hREnumServicesStatusW(dce, hSCManager, dwServiceType=SERVICE_WIN32_OWN_PROCESS|SERVICE_KERNEL_DRIVER|SERVICE_FILE_SYSTEM_DRIVER|SERVICE_WIN32_SHARE_PROCESS|SERVICE_INTERACTIVE_PROCESS, dwServiceState=SERVICE_STATE_ALL):
class ENUM_SERVICE_STATUSW2(NDRSTRUCT):
# This is a little trick, since the original structure is slightly different
# but instead of parsing the LPBYTE buffer at hand, we just do it with the aid
# of the NDR library, although the pointers are swapped from the original specification.
# Why is this? Well.. since we're getting an LPBYTE back, it's just a copy of the remote's memory
# where the pointers are actually POINTING to the data.
# Sadly, the pointers are not aligned based on the services records, so we gotta do this
# It should be easier in C of course.
class STR(NDRPOINTER):
referent = (
('Data', WIDESTR),
)
structure = (
('lpServiceName',STR),
('lpDisplayName',STR),
('ServiceStatus',SERVICE_STATUS),
)
enumServicesStatus = REnumServicesStatusW()
enumServicesStatus['hSCManager'] = hSCManager
enumServicesStatus['dwServiceType'] = dwServiceType
enumServicesStatus['dwServiceState'] = dwServiceState
enumServicesStatus['cbBufSize'] = 0
enumServicesStatus['lpResumeIndex'] = NULL
try:
resp = dce.request(enumServicesStatus)
except DCERPCSessionError as e:
if e.get_error_code() == system_errors.ERROR_MORE_DATA:
resp = e.get_packet()
enumServicesStatus['cbBufSize'] = resp['pcbBytesNeeded']
resp = dce.request(enumServicesStatus)
else:
raise
# Now we're supposed to have all services returned. Now we gotta parse them
enumArray = NDRUniConformantArray()
enumArray.item = ENUM_SERVICE_STATUSW2
enumArray.setArraySize(resp['lpServicesReturned'])
data = b''.join(resp['lpBuffer'])
enumArray.fromString(data)
data = data[4:]
# Since the pointers here are pointing to the actual data, we have to reparse
# the referents
for record in enumArray['Data']:
offset = record.fields['lpDisplayName'].fields['ReferentID']-4
name = WIDESTR(data[offset:])
record['lpDisplayName'] = name['Data']
offset = record.fields['lpServiceName'].fields['ReferentID']-4
name = WIDESTR(data[offset:])
record['lpServiceName'] = name['Data']
return enumArray['Data']
def hROpenSCManagerW(dce, lpMachineName='DUMMY\x00', lpDatabaseName='ServicesActive\x00', dwDesiredAccess=SERVICE_START | SERVICE_STOP | SERVICE_CHANGE_CONFIG | SERVICE_QUERY_CONFIG | SERVICE_QUERY_STATUS | SERVICE_ENUMERATE_DEPENDENTS | SC_MANAGER_ENUMERATE_SERVICE):
openSCManager = ROpenSCManagerW()
openSCManager['lpMachineName'] = checkNullString(lpMachineName)
openSCManager['lpDatabaseName'] = checkNullString(lpDatabaseName)
openSCManager['dwDesiredAccess'] = dwDesiredAccess
return dce.request(openSCManager)
def hROpenServiceW(dce, hSCManager, lpServiceName, dwDesiredAccess= SERVICE_ALL_ACCESS):
openService = ROpenServiceW()
openService['hSCManager'] = hSCManager
openService['lpServiceName'] = checkNullString(lpServiceName)
openService['dwDesiredAccess'] = dwDesiredAccess
return dce.request(openService)
def hRQueryServiceConfigW(dce, hService):
queryService = RQueryServiceConfigW()
queryService['hService'] = hService
queryService['cbBufSize'] = 0
try:
resp = dce.request(queryService)
except DCERPCSessionError as e:
if e.get_error_code() == system_errors.ERROR_INSUFFICIENT_BUFFER:
resp = e.get_packet()
queryService['cbBufSize'] = resp['pcbBytesNeeded']
resp = dce.request(queryService)
else:
raise
return resp
def hRQueryServiceLockStatusW(dce, hSCManager, cbBufSize ):
queryServiceLock = RQueryServiceLockStatusW()
queryServiceLock['hSCManager'] = hSCManager
queryServiceLock['cbBufSize'] = cbBufSize
return dce.request(queryServiceLock)
def hRStartServiceW(dce, hService, argc=0, argv=NULL ):
startService = RStartServiceW()
startService['hService'] = hService
startService['argc'] = argc
if argc == 0:
startService['argv'] = NULL
else:
for item in argv:
itemn = LPWSTR()
itemn['Data'] = checkNullString(item)
startService['argv'].append(itemn)
return dce.request(startService)
def hRGetServiceDisplayNameW(dce, hSCManager, lpServiceName, lpcchBuffer ):
getServiceDisplay = RGetServiceDisplayNameW()
getServiceDisplay['hSCManager'] = hSCManager
getServiceDisplay['lpServiceName'] = checkNullString(lpServiceName)
getServiceDisplay['lpcchBuffer'] = lpcchBuffer
return dce.request(getServiceDisplay)
def hRGetServiceKeyNameW(dce, hSCManager, lpDisplayName, lpcchBuffer ):
getServiceKeyName = RGetServiceKeyNameW()
getServiceKeyName['hSCManager'] = hSCManager
getServiceKeyName['lpDisplayName'] = checkNullString(lpDisplayName)
getServiceKeyName['lpcchBuffer'] = lpcchBuffer
return dce.request(getServiceKeyName)
def hREnumServiceGroupW(dce, hSCManager, dwServiceType, dwServiceState, cbBufSize, lpResumeIndex = NULL, pszGroupName = NULL ):
enumServiceGroup = REnumServiceGroupW()
enumServiceGroup['hSCManager'] = hSCManager
enumServiceGroup['dwServiceType'] = dwServiceType
enumServiceGroup['dwServiceState'] = dwServiceState
enumServiceGroup['cbBufSize'] = cbBufSize
enumServiceGroup['lpResumeIndex'] = lpResumeIndex
enumServiceGroup['pszGroupName'] = pszGroupName
return dce.request(enumServiceGroup)
| 31.8557 | 367 | 0.673265 |
b06c38ede59ccbd400075b5fca547eb666c5f310 | 3,705 | py | Python | game/Window.py | alkief/PurpleRain | c197f4f8d7654f667f715f667df870d97b50a8f7 | [
"MIT"
] | null | null | null | game/Window.py | alkief/PurpleRain | c197f4f8d7654f667f715f667df870d97b50a8f7 | [
"MIT"
] | null | null | null | game/Window.py | alkief/PurpleRain | c197f4f8d7654f667f715f667df870d97b50a8f7 | [
"MIT"
] | null | null | null | from tkinter import Canvas, Frame, Button, Label, BOTH
import tkinter as tk
import threading
from . import Const
class Window(tk.Frame):
def __init__(self):
self.master = tk.Tk()
self.master.protocol('WM_DELETE_WINDOW', self.quit)
self.root = tk.Frame.__init__(self, self.master, width=300, height=300)
self.pack(fill=BOTH, expand=True)
self.canvas = tk.Canvas(self, width=300, height=300, highlightthickness=0, bg='black')
self.canvas.pack()
self.canvas.focus_set() # Set window focus
self.master.title('PurpRain') # Set the window title
self.pack(fill=BOTH, expand=True)
self.canvas.pack(fill=BOTH, expand=True)
self.update()
self.update_idletasks()
self.is_alive = True
def draw(self, transition):
try:
# SEE: game.Transition for information on Transition structure
for t in transition:
# For drawing the initial player rectangle
if t.type == Const.DRAW_HERO:
self.canvas.create_rectangle(t.bbox[0], t.bbox[1], t.bbox[2], t.bbox[3],
fill='white', tags='hero')
# Draw optimal square position
elif t.type == Const.DRAW_OPTIMAL:
self.canvas.create_rectangle(t.bbox[0], t.bbox[1], t.bbox[2], t.bbox[3],
fill='green', tags='optimal')
# Draw initial raindrops
elif t.type == Const.DRAW_RAIN:
self.canvas.create_line(t.x, t.y, t.x, t.y + t.length,
fill='purple', tags=(t.rain_id, 'rain'))
# Move item with a given unique tag by specified distance
elif t.type == Const.MOVE_ENTITY:
entity = self.canvas.find_withtag(t.tag)
self.canvas.move(entity, t.dx, t.dy)
# Draw the initial score label
elif t.type == Const.DRAW_SCORE:
self.canvas.create_text(10, 10, anchor='nw', text='0', tags='score', fill='white')
# Update the score label
elif t.type == Const.UPDATE_SCORE:
score_label = self.canvas.find_withtag('score')
self.canvas.itemconfig(score_label, text=str(t.new_score))
self.update()
except:
pass
# Display the main menu with 'Start' and 'Options' buttons
# @Param start_game{function}: calls logic from engine to trigger game start
def menu(self, start_game):
self.clear()
# Display the 'Start' button
startBtn = Button(self, text='Start', command=start_game)
startBtn.config(width='10', activebackground='#33B5E5')
self.canvas.create_window((self.width()/2) - 100, self.height()/2, window=startBtn)
# Display the 'Options' button
optionBtn = Button(self, text='Options', command=None)
optionBtn.config(width='10', activebackground='#33B5E5')
self.canvas.create_window((self.width()/2) + 100, self.height()/2, window=optionBtn)
def width(self):
width = 1
try:
width = self.master.winfo_width()
except:
pass
return width
def height(self):
height = 1
try:
height = self.master.winfo_height()
except:
pass
return height
def clear(self):
try:
self.canvas.delete('all')
except:
pass
def quit(self):
self.is_alive = False
self.master.destroy()
| 39 | 106 | 0.551687 |
589f47f2afc52626a2d1fe24c1add261f8fd10c1 | 1,102 | py | Python | venv/lib/python3.8/site-packages/azureml/_restclient/models/metric_definition_dto.py | amcclead7336/Enterprise_Data_Science_Final | ccdc0aa08d4726bf82d71c11a1cc0c63eb301a28 | [
"Unlicense",
"MIT"
] | null | null | null | venv/lib/python3.8/site-packages/azureml/_restclient/models/metric_definition_dto.py | amcclead7336/Enterprise_Data_Science_Final | ccdc0aa08d4726bf82d71c11a1cc0c63eb301a28 | [
"Unlicense",
"MIT"
] | null | null | null | venv/lib/python3.8/site-packages/azureml/_restclient/models/metric_definition_dto.py | amcclead7336/Enterprise_Data_Science_Final | ccdc0aa08d4726bf82d71c11a1cc0c63eb301a28 | [
"Unlicense",
"MIT"
] | 2 | 2021-05-23T16:46:31.000Z | 2021-05-26T23:51:09.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator 2.3.33.0
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class MetricDefinitionDto(Model):
"""MetricDefinitionDto.
:param metric_key:
:type metric_key: ~_restclient.models.DerivedMetricKeyDto
:param columns:
:type columns: dict[str, str]
"""
_attribute_map = {
'metric_key': {'key': 'metricKey', 'type': 'DerivedMetricKeyDto'},
'columns': {'key': 'columns', 'type': '{str}'},
}
def __init__(self, metric_key=None, columns=None):
super(MetricDefinitionDto, self).__init__()
self.metric_key = metric_key
self.columns = columns
| 33.393939 | 77 | 0.573503 |
2f542bfa2cbf7a7544c460c16831a0e1d3dce9c7 | 6,416 | py | Python | micro_dl/networks/unet_stack_stack.py | leanhphuong201/microDL | 6ba9ec242d38a0ec32183cb4a9974b27a8722e30 | [
"MIT"
] | 21 | 2018-11-07T15:34:37.000Z | 2022-03-18T14:51:05.000Z | micro_dl/networks/unet_stack_stack.py | leanhphuong201/microDL | 6ba9ec242d38a0ec32183cb4a9974b27a8722e30 | [
"MIT"
] | 72 | 2018-10-01T21:58:47.000Z | 2022-03-28T20:26:38.000Z | micro_dl/networks/unet_stack_stack.py | leanhphuong201/microDL | 6ba9ec242d38a0ec32183cb4a9974b27a8722e30 | [
"MIT"
] | 7 | 2018-11-20T19:30:12.000Z | 2022-02-23T06:56:14.000Z | """Unet for 3D volumes with anisotropic shape"""
import numpy as np
import tensorflow as tf
from keras.layers import Activation, Conv3D, Input
from micro_dl.networks.base_unet import BaseUNet
from micro_dl.networks.conv_blocks import conv_block, residual_conv_block
from micro_dl.utils.network_utils import get_keras_layer
class UNetStackToStack(BaseUNet):
"""Unet for anisotropic stacks"""
def __init__(self, network_config, predict=False):
"""Init
:param dict network_config: dict with all network associated parameters
"""
assert 'depth' in network_config and network_config['depth'] > 1, \
'depth is missing in network_config'
num_slices = network_config['depth']
msg = 'Depth of the input has to be in powers of 2 as this network' \
'upsamples and downsamples in factors of 2'
if not predict:
if np.mod(np.log2(num_slices), 1) > 0:
raise ValueError(msg)
assert ('padding' not in network_config or
network_config['padding'] == 'same'), \
'Due to anisotropic filter shape only padding=same allowed here'
super().__init__(network_config)
num_down_blocks = len(network_config['num_filters_per_block'])
self.num_down_blocks = num_down_blocks
def _get_filter_shape(self, filter_size, input_layer):
"""Get the filter shape depending on z dimension of input layer
:param keras.layer input_layer: as named
"""
if self.config['data_format'] == 'channels_first':
z_dim = 2
else:
z_dim = 1
num_slices = input_layer.shape.as_list()[z_dim]
if num_slices == 1:
if isinstance(filter_size, (tuple, list)):
filter_shape = (1, filter_size[0], filter_size[1])
else:
# assuming it is an int
filter_shape = (1, filter_size, filter_size)
down_up_sample_shape = (1, 2, 2)
else:
filter_shape = filter_size
down_up_sample_shape = (2, 2, 2)
return filter_shape, down_up_sample_shape
def _downsampling_block(self,
input_layer,
block_idx,
filter_shape=None,
downsample_shape=None):
"""Downsampling blocks of U-net
:param keras.layer input_layer: must be the output of Input layer
:param int block_idx: as named
:param tuple filter_shape: filter size is an int for most cases.
filter_shape enables passing anisotropic filter shapes
:return keras.layer layer: output layer of bridge/middle block
skip_layers_list: list of all skip layers
"""
if self.config['residual']:
layer = residual_conv_block(layer=input_layer,
network_config=self.config,
block_idx=block_idx)
else:
layer = conv_block(layer=input_layer,
network_config=self.config,
block_idx=block_idx)
skip_layers = layer
if block_idx < self.num_down_blocks - 1:
pool_object = get_keras_layer(type=self.config['pooling_type'],
num_dims=self.config['num_dims'])
layer = pool_object(pool_size=downsample_shape,
data_format=self.config['data_format'])(layer)
return layer, skip_layers
def build_net(self):
"""Assemble the network"""
with tf.name_scope('input'):
input_layer = inputs = Input(shape=self._get_input_shape)
# ---------- Downsampling + middle blocks ---------
init_filter_size = self.config['filter_size']
skip_layers_list = []
for block_idx in range(self.num_down_blocks):
block_name = 'down_block_{}'.format(block_idx + 1)
filter_shape, downsample_shape = self._get_filter_shape(
self.config['filter_size'],
input_layer
)
with tf.name_scope(block_name):
layer, cur_skip_layers = self._downsampling_block(
input_layer=input_layer,
block_idx=block_idx,
filter_shape=filter_shape,
downsample_shape=downsample_shape
)
skip_layers_list.append(cur_skip_layers)
input_layer = layer
# ------------- Upsampling / decoding blocks -------------
for block_idx in reversed(range(self.num_down_blocks - 1)):
cur_skip_layers = skip_layers_list[block_idx]
block_name = 'up_block_{}'.format(block_idx)
filter_shape, upsample_shape = self._get_filter_shape(
init_filter_size, cur_skip_layers
)
with tf.name_scope(block_name):
layer = super()._upsampling_block(
input_layers=input_layer,
skip_layers=cur_skip_layers,
block_idx=block_idx,
filter_shape=filter_shape,
upsampling_shape=upsample_shape
)
input_layer = layer
# ------------ output block ------------------------
final_activation = self.config['final_activation']
with tf.name_scope('output'):
layer = Conv3D(filters=1,
kernel_size=(1, 1, 1),
padding='same',
kernel_initializer='he_normal',
data_format=self.config['data_format'])(input_layer)
outputs = Activation(final_activation)(layer)
return inputs, outputs
@property
def _get_input_shape(self):
"""Return shape of input"""
if self.config['data_format'] == 'channels_first':
shape = (self.config['num_input_channels'],
self.config['depth'],
self.config['height'],
self.config['width'])
else:
shape = (self.config['depth'],
self.config['height'],
self.config['width'],
self.config['num_input_channels'])
return shape
| 39.361963 | 79 | 0.564059 |
b5c9671c587f6fb61fc54607f19c442f4a3ddb70 | 1,412 | py | Python | test/compute/test_rackspace.py | ggreer/libcloud | a391ccdc0d068d37cb906a703f1494af50d83c8f | [
"Apache-2.0"
] | 1 | 2021-06-14T11:11:39.000Z | 2021-06-14T11:11:39.000Z | test/compute/test_rackspace.py | ggreer/libcloud | a391ccdc0d068d37cb906a703f1494af50d83c8f | [
"Apache-2.0"
] | null | null | null | test/compute/test_rackspace.py | ggreer/libcloud | a391ccdc0d068d37cb906a703f1494af50d83c8f | [
"Apache-2.0"
] | null | null | null | # Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import unittest
from libcloud.compute.drivers.rackspace import RackspaceNodeDriver
from test.compute.test_openstack import OpenStack_1_0_Tests
from test.secrets import RACKSPACE_PARAMS
class RackspaceTests(OpenStack_1_0_Tests):
should_list_locations = True
should_have_pricing = True
driver_klass = RackspaceNodeDriver
driver_type = RackspaceNodeDriver
driver_args = RACKSPACE_PARAMS
def test_list_sizes_pricing(self):
sizes = self.driver.list_sizes()
for size in sizes:
self.assertTrue(size.price > 0)
if __name__ == '__main__':
sys.exit(unittest.main())
| 35.3 | 74 | 0.768414 |
4ca20a76e904aef7a4bb902af8fb391676d71a67 | 502 | py | Python | weborchestra/wsgi.py | loictessier/weborchestra | 80a7d8e3d94b84df65e8ecc52ea8b5cd200204e0 | [
"MIT"
] | null | null | null | weborchestra/wsgi.py | loictessier/weborchestra | 80a7d8e3d94b84df65e8ecc52ea8b5cd200204e0 | [
"MIT"
] | 4 | 2020-12-15T11:13:59.000Z | 2021-06-10T20:28:54.000Z | weborchestra/wsgi.py | loictessier/weborchestra | 80a7d8e3d94b84df65e8ecc52ea8b5cd200204e0 | [
"MIT"
] | null | null | null | """
WSGI config for weborchestra project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
import dotenv
dotenv.read_dotenv(os.path.join(os.path.dirname(os.path.dirname(__file__)), '.env'))
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'weborchestra.settings')
application = get_wsgi_application()
| 23.904762 | 84 | 0.782869 |
a41bf867f8b992a7ab000c496cf606f8b947de2a | 4,278 | py | Python | src/constraint.py | phyzhangyang/EasyScan_Hep | 7076a200fb5011ca2f9909ed7a688c2f2512f272 | [
"Apache-2.0"
] | 6 | 2017-04-12T06:22:01.000Z | 2021-10-18T00:22:39.000Z | src/constraint.py | phyzhangyang/EasyScan_Hep | 7076a200fb5011ca2f9909ed7a688c2f2512f272 | [
"Apache-2.0"
] | 40 | 2017-12-27T02:39:54.000Z | 2019-05-07T08:23:26.000Z | src/constraint.py | phyzhangyang/EasyScan_Hep | 7076a200fb5011ca2f9909ed7a688c2f2512f272 | [
"Apache-2.0"
] | 2 | 2018-05-09T00:41:01.000Z | 2019-12-01T14:19:51.000Z | ####################################################################
# Class CONSTRAINT: contral CONSTRAINT #
####################################################################
# Internal modules
import auxfun as af
class CONSTRAINT:
def __init__(self):
self._Gaussian=[]
#self._Limit=[]
self._FreeFormChi2=[]
self.Chi2={}
self.Chi2['Chi2'] = af.NaN
def setGaussian(self,var):
var = af.string2nestlist(var)
af.Info('Gaussian Constraint:')
for ii in var:
if len(ii) in [3]:
jj = ii+['symm','Gaussian_%s'%ii[0]]
self._Gaussian.append(jj)
self.Chi2[jj[4]] = af.NaN
af.Info(' varID= %s\tMean= %e\tDeviation= %e\tType= %s\tName= %s'%(jj[0],jj[1],jj[2],jj[3],jj[4]))
elif len(ii) in [4,5]:
if not ii[3].lower() in ['symm','lower','upper']:
af.ErrorStop( 'For the "Gaussian" constraint on "%s", the "Type" can only be "symm", "upper" or "lower", not "%s".'%(ii[0],ii[3]) )
## new 20180428 liang
if len(ii) == 4:
jj = ii+['Gaussian_%s'%ii[0]]
else:
jj = ii
self._Gaussian.append(jj)
self.Chi2[jj[4]] = af.NaN
af.Info(' varID= %s\tMean= %e\tDeviation= %e\tType= %s\tName= %s'%(jj[0],jj[1],jj[2],jj[3],jj[4]))
else:
af.ErrorStop( 'The "Gaussian" constraint on "%s" need 4 or 5 items( ID, Mean, Deviation, Type [, Name] ).'%(ii[0]) )
## new 20180428 liang
self.Chi2 = af.sortDic(self.Chi2)
## marked 20180430 liang
# def setLimit(self,var):
# var = af.string2nestlist(var)
# af.Info('Upper/Lower limit:')
# for ii in var:
# if len(ii) == 4:
# self._Limit.append(ii)
# af.Info(' varID(X)= %s\tvarID(Y)= %s\tConstraintFile= %s\tType= %s'%(ii[0],ii[1],ii[2],ii[3]))
# ## add check the ConstraintFile exist
# ## add check the ConstraintFile has two columns and >1 lines
# ## very useful, you can simply let a>b
# else:
# af.ErrorStop( 'The "Limit" constraint on "(%s,%s)" need 4 items( X ID, Y ID, ConstraintFile, Type ).'%(ii[0],ii[1]) )
## new 20180430 liang
def setFreeFormChi2(self,var):
var = af.string2nestlist(var)
af.Info('FreeFormChi2:')
for ii in var:
if len(ii) in [1,2]:
if len(ii) == 1:
jj = ii + ['FreeFormChi2_%s'%ii[0]]
else:
jj = ii
self._FreeFormChi2.append(jj)
af.Info(' varID= %s\tName= %s'%(jj[0], jj[1]))
self.Chi2[jj[1]] = af.NaN
else:
af.ErrorStop( 'The "FreeFormChi2" constraint on "%s" need 1 item or 2 items( VarID [, Name] ).'%(ii[0]) )
self.Chi2 = af.sortDic(self.Chi2)
def getChisq(self,par):
chisq = 0.0
## add for "math ..." in [constrain]
af.parseMath(par)
for ii in self._Gaussian:
if not af.is_number(par[ii[0]]):
return af.log_zero
if ii[3].lower() == 'symm':
ichisq = (ii[1] - par[ii[0]])**2/ii[2]**2
elif ii[3].lower() == 'upper':
if par[ii[0]] <= ii[1] :
ichisq = 0
else:
ichisq = (ii[1] - par[ii[0]])**2/ii[2]**2
elif ii[3].lower() == 'lower':
if par[ii[0]] >= ii[1]:
ichisq = 0
else:
ichisq = (ii[1] - par[ii[0]])**2/ii[2]**2
## new 20180428 liang
self.Chi2[ii[4]]=ichisq
chisq += ichisq
## marked 20180430 liang
#for ii in self._Limit:
# af.ErrorStop('Line limit constraint is not ready.')
for ii in self._FreeFormChi2:
ichisq = par[ii[0]]
self.Chi2[ii[1]]=ichisq
chisq += ichisq
## new 20180428 liang
self.Chi2['Chi2'] = chisq
return chisq
| 35.65 | 151 | 0.443198 |
c37da10c99475e5b471e4aec7210998f8392226b | 1,971 | py | Python | ambari-server/src/main/resources/stacks/ADH/1.0/services/HBASE/package/scripts/hbase_thrift.py | Arenadata/ambari | 4628267441121779113d98936dcdf5d9be60553c | [
"Apache-2.0"
] | 5 | 2017-07-20T11:15:10.000Z | 2020-04-16T15:42:55.000Z | ambari-server/src/main/resources/stacks/ADH/1.0/services/HBASE/package/scripts/hbase_thrift.py | Arenadata/ambari | 4628267441121779113d98936dcdf5d9be60553c | [
"Apache-2.0"
] | 3 | 2017-08-04T14:02:17.000Z | 2018-06-06T14:47:25.000Z | ambari-server/src/main/resources/stacks/ADH/1.0/services/HBASE/package/scripts/hbase_thrift.py | Arenadata/ambari | 4628267441121779113d98936dcdf5d9be60553c | [
"Apache-2.0"
] | 12 | 2017-05-17T09:48:01.000Z | 2021-08-05T19:01:25.000Z | #!/usr/bin/env python
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from resource_management.core.resources.system import Execute
from resource_management.core.resources.system import File
from resource_management.libraries.functions import check_process_status, format
def hbase_thrift_server(action = 'start'): # 'start', 'stop', 'status'
pid_file = format("{pid_dir}/hbase-{hbase_user}-thrift.pid")
no_op_test = format("ls {pid_file} >/dev/null 2>&1 && ps -p `cat {pid_file}` >/dev/null 2>&1")
if action == "status":
check_process_status(pid_file)
else:
env = {'JAVA_HOME': format("{java64_home}"), 'HBASE_CONF_DIR': format("{hbase_conf_dir}")}
if action == 'start':
Execute(format("{thrift_daemon_script} {action} thrift -p 9090 --infoport 9080"),
user=format("{hbase_user}"),
environment=env)
elif action == 'stop':
Execute(format("{thrift_daemon_script} {action} thrift"),
timeout = 30,
on_timeout = format("! ( {no_op_test} ) || {sudo} -H -E kill -9 `cat {pid_file}`"),
user=format("{hbase_user}"),
environment=env
)
File(pid_file,
action = "delete"
)
| 41.0625 | 99 | 0.682395 |
ff9f336ddc3b15b6d887e572fc3fa9e6c77483ba | 1,576 | py | Python | 24. Exam Prep/exam_02apr/tests/test_trap_card.py | elenaborisova/Python-OOP | 584882c08f84045b12322917f0716c7c7bd9befc | [
"MIT"
] | 1 | 2021-03-27T16:56:30.000Z | 2021-03-27T16:56:30.000Z | 24. Exam Prep/exam_02apr/tests/test_trap_card.py | elenaborisova/Python-OOP | 584882c08f84045b12322917f0716c7c7bd9befc | [
"MIT"
] | null | null | null | 24. Exam Prep/exam_02apr/tests/test_trap_card.py | elenaborisova/Python-OOP | 584882c08f84045b12322917f0716c7c7bd9befc | [
"MIT"
] | 1 | 2021-03-15T14:50:39.000Z | 2021-03-15T14:50:39.000Z | import unittest
from exam_02apr.project.card.trap_card import TrapCard
class TrapCardTests(unittest.TestCase):
def test_trapCardInit_whenValidName_shouldSetIt(self):
name = 'test'
trap_card = TrapCard(name)
self.assertEqual('test', trap_card.name)
def test_trapCardInit_whenEmptyStringName_shouldRaise(self):
name = ''
with self.assertRaises(ValueError) as context:
TrapCard(name)
self.assertIsNotNone(context.exception)
self.assertEqual(str(context.exception), "Card's name cannot be an empty string.")
def test_trapCardInit_shouldSetDamagePointsAndHealthPointsInInitialization(self):
name = 'test'
trap_card = TrapCard(name)
self.assertEqual(120, trap_card.damage_points)
self.assertEqual(5, trap_card.health_points)
def test_trapCardInit_whenDamagePointsNegative_shouldRaise(self):
name = 'test'
trap_card = TrapCard(name)
with self.assertRaises(ValueError) as context:
trap_card.damage_points = -50
self.assertIsNotNone(context.exception)
self.assertEqual(str(context.exception), "Card's damage points cannot be less than zero.")
def test_trapCardInit_whenHealthPointsNegative_shouldRaise(self):
name = 'test'
trap_card = TrapCard(name)
with self.assertRaises(ValueError) as context:
trap_card.health_points = -50
self.assertIsNotNone(context.exception)
self.assertEqual(str(context.exception), "Card's HP cannot be less than zero.")
| 33.531915 | 98 | 0.70368 |
5405c93721473b6002eeb80501680be7da3f30c0 | 1,089 | py | Python | manage.py | RodrigoSantosRodrigues/Biology | 8032238957f23ad50e9150f991c13dfdc77f11f1 | [
"MIT"
] | null | null | null | manage.py | RodrigoSantosRodrigues/Biology | 8032238957f23ad50e9150f991c13dfdc77f11f1 | [
"MIT"
] | null | null | null | manage.py | RodrigoSantosRodrigues/Biology | 8032238957f23ad50e9150f991c13dfdc77f11f1 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# /manage.py
"""
...Web Flask com autorização JWT (Jason web token authorization)
------------------------------------------------------------------------
Migrations
------------------------------------------------------------------------
URLs: https://codeburst.io/jwt-authorization-in-flask-c63c1acf4eeb
https://medium.com/@dushan14/create-a-web-application-with-python-flask-postgresql-and-deploy-on-heroku-243d548335cc
https://github.com/oleg-agapov/flask-jwt-auth
https://www.codementor.io/olawalealadeusi896/restful-api-with-python-flask-framework-and-postgres-db-part-1-kbrwbygx5
Migrar alterações de modelos para o banco de dados.
"""
import os
from flask_script import Manager
from flask_migrate import Migrate, MigrateCommand
from src.app import create_app, db
env_name = os.getenv('FLASK_ENV')
app = create_app(env_name)
migrate = Migrate(app=app, db=db)
manager = Manager(app=app)
manager.add_command('db', MigrateCommand)
if __name__ == '__main__':
manager.run()
| 32.029412 | 127 | 0.614325 |
45603213e9d3073fbeb0f3e3b8bc1f3bff3daedb | 615 | py | Python | tests/run_tests.py | fluxility/drf-haystack | 6ff951b9d3fcba0704f891c964bf09374438d530 | [
"MIT"
] | 201 | 2015-02-14T08:17:35.000Z | 2019-07-10T04:19:04.000Z | tests/run_tests.py | fluxility/drf-haystack | 6ff951b9d3fcba0704f891c964bf09374438d530 | [
"MIT"
] | 138 | 2015-02-17T09:28:33.000Z | 2019-07-30T10:29:52.000Z | tests/run_tests.py | fluxility/drf-haystack | 6ff951b9d3fcba0704f891c964bf09374438d530 | [
"MIT"
] | 60 | 2015-04-01T14:51:18.000Z | 2019-05-12T15:31:52.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function, unicode_literals
import os
import sys
import nose
def start(argv=None):
sys.exitfunc = lambda: sys.stderr.write("Shutting down...\n")
if argv is None:
argv = [
"nosetests",
"--verbose",
"--with-coverage",
"--cover-erase",
"--cover-branches",
"--cover-package=drf_haystack",
]
nose.run_exit(argv=argv, defaultTest=os.path.abspath(os.path.dirname(__file__)))
if __name__ == "__main__":
start(sys.argv)
| 20.5 | 84 | 0.590244 |
6b4d00c3a0ec24dd9556ec5259e6af324ddb60f3 | 4,636 | py | Python | SC101_Projects/SC101_Assignment4/babynames.py | TobyCCC/MystanCodeProJects | fac747e681875777b5d40bac3bcfebe204ca44f8 | [
"MIT"
] | null | null | null | SC101_Projects/SC101_Assignment4/babynames.py | TobyCCC/MystanCodeProJects | fac747e681875777b5d40bac3bcfebe204ca44f8 | [
"MIT"
] | null | null | null | SC101_Projects/SC101_Assignment4/babynames.py | TobyCCC/MystanCodeProJects | fac747e681875777b5d40bac3bcfebe204ca44f8 | [
"MIT"
] | null | null | null | """
SC101 Baby Names Project
Adapted from Nick Parlante's Baby Names assignment by
Jerry Liao.
YOUR DESCRIPTION HERE
"""
import sys
def add_data_for_name(name_data, year, rank, name):
"""
Adds the given year and rank to the associated name in the name_data dict.
Input:
name_data (dict): dict holding baby name data
year (str): the year of the data entry to add
rank (str): the rank of the data entry to add
name (str): the name of the data entry to add
Output:
This function modifies the name_data dict to store the provided
name, year, and rank. This function does not return any values.
"""
if name in name_data:
# Data which have same name and same year but rank higher are excluded
if year not in name_data[name] or int(rank) < int(name_data[name][year]):
name_data[name][year] = rank
else:
name_data[name] = {year: rank}
def add_file(name_data, filename):
"""
Reads the information from the specified file and populates the name_data
dict with the data found in the file.
Input:
name_data (dict): dict holding baby name data
filename (str): name of the file holding baby name data
Output:
This function modifies the name_data dict to store information from
the provided file name. This function does not return any value.
"""
with open(filename, 'r') as f:
for line in f:
data_list = line.split(',')
if len(data_list) == 1: # If it is the first line ("Year" is included.)
year = data_list[0].strip()
else:
rank = data_list[0].strip()
name1 = data_list[1].strip()
name2 = data_list[2].strip()
add_data_for_name(name_data, year, rank, name1)
add_data_for_name(name_data, year, rank, name2)
def read_files(filenames):
"""
Reads the data from all files specified in the provided list
into a single name_data dict and then returns that dict.
Input:
filenames (List[str]): a list of filenames containing baby name data
Returns:
name_data (dict): the dict storing all baby name data in a structured manner
"""
name_data = {} # creating "name_data" to store data.
for filename in filenames:
add_file(name_data, filename)
return name_data
def search_names(name_data, target):
"""
Given a name_data dict that stores baby name information and a target string,
returns a list of all names in the dict that contain the target string. This
function should be case-insensitive with respect to the target string.
Input:
name_data (dict): a dict containing baby name data organized by name
target (str): a string to look for in the names contained within name_data
Returns:
matching_names (List[str]): a list of all names from name_data that contain
the target string
"""
matching_names = []
for key in name_data:
if target.lower() in key.lower(): # Make it case-insensitive
matching_names.append(key)
return matching_names
def print_names(name_data):
"""
(provided, DO NOT MODIFY)
Given a name_data dict, print out all its data, one name per line.
The names are printed in alphabetical order,
with the corresponding years data displayed in increasing order.
Input:
name_data (dict): a dict containing baby name data organized by name
Returns:
This function does not return anything
"""
for key, value in sorted(name_data.items()):
print(key, sorted(value.items()))
def main():
# (provided, DO NOT MODIFY)
args = sys.argv[1:]
# Two command line forms
# 1. file1 file2 file3 ..
# 2. -search target file1 file2 file3 ..
# Assume no search, so list of filenames to read
# is the args list
filenames = args
# Check if we are doing search, set target variable
target = ''
if len(args) >= 2 and args[0] == '-search':
target = args[1]
filenames = args[2:] # Update filenames to skip first 2
# Read in all the filenames: baby-1990.txt, baby-2000.txt, ...
names = read_files(filenames)
# Either we do a search or just print everything.
if len(target) > 0:
search_results = search_names(names, target)
for name in search_results:
print(name)
else:
print_names(names)
if __name__ == '__main__':
main()
| 30.5 | 99 | 0.632657 |
d23be00962272ac0d53e31a5518cc388f3aafeed | 292 | py | Python | pdf_download/pdf_download/items.py | concongo/simple-scrapy-pdf-scrapper | 541559d89bdc849dd048bfae6d5a3b972e0d8edc | [
"Apache-2.0"
] | null | null | null | pdf_download/pdf_download/items.py | concongo/simple-scrapy-pdf-scrapper | 541559d89bdc849dd048bfae6d5a3b972e0d8edc | [
"Apache-2.0"
] | null | null | null | pdf_download/pdf_download/items.py | concongo/simple-scrapy-pdf-scrapper | 541559d89bdc849dd048bfae6d5a3b972e0d8edc | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# https://docs.scrapy.org/en/latest/topics/items.html
import scrapy
class PdfDownloadItem(scrapy.Item):
# define the fields for your item here like:
# name = scrapy.Field()
pass
| 19.466667 | 53 | 0.691781 |
99c90248366af6ae7aefe9cdd1eaa09156268b46 | 8,299 | py | Python | src/design.py | KimNikita/satellite_images_processing | 4b79eddbee3977fa6c227392e42745f50355ae90 | [
"Apache-2.0"
] | 1 | 2021-10-05T14:53:49.000Z | 2021-10-05T14:53:49.000Z | src/design.py | KimNikita/satellite_images_processing | 4b79eddbee3977fa6c227392e42745f50355ae90 | [
"Apache-2.0"
] | 5 | 2021-11-06T17:47:53.000Z | 2021-12-26T21:16:02.000Z | src/design.py | KimNikita/satellite_images_processing | 4b79eddbee3977fa6c227392e42745f50355ae90 | [
"Apache-2.0"
] | 3 | 2021-10-05T13:56:09.000Z | 2021-11-05T16:10:02.000Z | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'C:/Users/Никита/Desktop/GUI/satellite.ui'
#
# Created by: PyQt5 UI code generator 5.11.3
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.setWindowModality(QtCore.Qt.NonModal)
MainWindow.resize(789, 550)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(MainWindow.sizePolicy().hasHeightForWidth())
MainWindow.setSizePolicy(sizePolicy)
MainWindow.setAutoFillBackground(False)
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.gridLayout = QtWidgets.QGridLayout(self.centralwidget)
self.gridLayout.setObjectName("gridLayout")
self.splitter_all = QtWidgets.QSplitter(self.centralwidget)
self.splitter_all.setOrientation(QtCore.Qt.Vertical)
self.splitter_all.setObjectName("splitter_all")
self.splitter_map_prop_res = QtWidgets.QSplitter(self.splitter_all)
self.splitter_map_prop_res.setOrientation(QtCore.Qt.Horizontal)
self.splitter_map_prop_res.setObjectName("splitter_map_prop_res")
self.splitter_map_prop = QtWidgets.QSplitter(self.splitter_map_prop_res)
self.splitter_map_prop.setOrientation(QtCore.Qt.Vertical)
self.splitter_map_prop.setObjectName("splitter_map_prop")
self.gridLayoutWidget = QtWidgets.QWidget(self.splitter_map_prop)
self.gridLayoutWidget.setObjectName("gridLayoutWidget")
self.grid_map = QtWidgets.QGridLayout(self.gridLayoutWidget)
self.grid_map.setContentsMargins(0, 0, 0, 0)
self.grid_map.setObjectName("grid_map")
self.groupBox_map = QtWidgets.QGroupBox(self.gridLayoutWidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.groupBox_map.sizePolicy().hasHeightForWidth())
self.groupBox_map.setSizePolicy(sizePolicy)
self.groupBox_map.setMinimumSize(QtCore.QSize(530, 400))
self.groupBox_map.setSizeIncrement(QtCore.QSize(470, 452))
self.groupBox_map.setAlignment(QtCore.Qt.AlignCenter)
self.groupBox_map.setObjectName("groupBox_map")
self.gridLayout_2 = QtWidgets.QGridLayout(self.groupBox_map)
self.gridLayout_2.setObjectName("gridLayout_2")
self.grid_map.addWidget(self.groupBox_map, 0, 0, 1, 1)
self.gridLayoutWidget_2 = QtWidgets.QWidget(self.splitter_map_prop)
self.gridLayoutWidget_2.setObjectName("gridLayoutWidget_2")
self.grid_properties = QtWidgets.QGridLayout(self.gridLayoutWidget_2)
self.grid_properties.setContentsMargins(0, 0, 0, 0)
self.grid_properties.setObjectName("grid_properties")
self.groupBox_properties = QtWidgets.QGroupBox(self.gridLayoutWidget_2)
self.groupBox_properties.setMinimumSize(QtCore.QSize(257, 56))
self.groupBox_properties.setBaseSize(QtCore.QSize(470, 70))
self.groupBox_properties.setAlignment(QtCore.Qt.AlignCenter)
self.groupBox_properties.setObjectName("groupBox_properties")
self.horizontalLayout_2 = QtWidgets.QHBoxLayout(self.groupBox_properties)
self.horizontalLayout_2.setObjectName("horizontalLayout_2")
self.button_mode = QtWidgets.QPushButton(self.groupBox_properties)
self.button_mode.setObjectName("button_mode")
self.horizontalLayout_2.addWidget(self.button_mode)
self.button_clear = QtWidgets.QPushButton(self.groupBox_properties)
self.button_clear.setObjectName("button_clear")
self.horizontalLayout_2.addWidget(self.button_clear)
self.grid_properties.addWidget(self.groupBox_properties, 0, 0, 1, 1)
self.gridLayoutWidget_3 = QtWidgets.QWidget(self.splitter_map_prop_res)
self.gridLayoutWidget_3.setObjectName("gridLayoutWidget_3")
self.grid_results = QtWidgets.QGridLayout(self.gridLayoutWidget_3)
self.grid_results.setContentsMargins(0, 0, 0, 0)
self.grid_results.setObjectName("grid_results")
self.groupBox_results = QtWidgets.QGroupBox(self.gridLayoutWidget_3)
self.groupBox_results.setMinimumSize(QtCore.QSize(220, 450))
self.groupBox_results.setBaseSize(QtCore.QSize(310, 530))
self.groupBox_results.setAlignment(QtCore.Qt.AlignCenter)
self.groupBox_results.setObjectName("groupBox_results")
self.verticalLayout = QtWidgets.QVBoxLayout(self.groupBox_results)
self.verticalLayout.setObjectName("verticalLayout")
self.label_fire = QtWidgets.QLabel(self.groupBox_results)
self.label_fire.setMinimumSize(QtCore.QSize(200, 200))
self.label_fire.setStyleSheet("background-color: rgb(255, 255, 255);")
self.label_fire.setText("")
self.label_fire.setObjectName("label_fire")
self.verticalLayout.addWidget(self.label_fire)
self.label_cloud = QtWidgets.QLabel(self.groupBox_results)
self.label_cloud.setMinimumSize(QtCore.QSize(200, 200))
self.label_cloud.setStyleSheet("background-color: rgb(255, 255, 255);")
self.label_cloud.setText("")
self.label_cloud.setObjectName("label_cloud")
self.verticalLayout.addWidget(self.label_cloud)
self.grid_results.addWidget(self.groupBox_results, 0, 0, 1, 1)
self.gridLayoutWidget_4 = QtWidgets.QWidget(self.splitter_all)
self.gridLayoutWidget_4.setObjectName("gridLayoutWidget_4")
self.grid_buttons = QtWidgets.QGridLayout(self.gridLayoutWidget_4)
self.grid_buttons.setContentsMargins(0, 0, 0, 0)
self.grid_buttons.setObjectName("grid_buttons")
self.groupBox_buttons = QtWidgets.QGroupBox(self.gridLayoutWidget_4)
self.groupBox_buttons.setMinimumSize(QtCore.QSize(382, 56))
self.groupBox_buttons.setBaseSize(QtCore.QSize(788, 94))
self.groupBox_buttons.setAlignment(QtCore.Qt.AlignCenter)
self.groupBox_buttons.setObjectName("groupBox_buttons")
self.horizontalLayout = QtWidgets.QHBoxLayout(self.groupBox_buttons)
self.horizontalLayout.setObjectName("horizontalLayout")
self.button_analyze = QtWidgets.QPushButton(self.groupBox_buttons)
self.button_analyze.setObjectName("button_analyze")
self.horizontalLayout.addWidget(self.button_analyze)
self.button_save = QtWidgets.QPushButton(self.groupBox_buttons)
self.button_save.setObjectName("button_save")
self.horizontalLayout.addWidget(self.button_save)
self.button_exit = QtWidgets.QPushButton(self.groupBox_buttons)
self.button_exit.setObjectName("button_exit")
self.horizontalLayout.addWidget(self.button_exit)
self.grid_buttons.addWidget(self.groupBox_buttons, 1, 0, 1, 1)
self.gridLayout.addWidget(self.splitter_all, 0, 0, 1, 1)
MainWindow.setCentralWidget(self.centralwidget)
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "Satellite images processing"))
self.groupBox_map.setTitle(_translate("MainWindow", " Map "))
self.groupBox_properties.setTitle(_translate("MainWindow", "Map Properties"))
self.button_mode.setText(_translate("MainWindow", "Switch to offline mode"))
self.button_clear.setText(_translate("MainWindow", "Clear marker"))
self.groupBox_results.setTitle(_translate("MainWindow", "Results of models"))
self.groupBox_buttons.setTitle(_translate("MainWindow", "Buttons"))
self.button_analyze.setText(_translate("MainWindow", "Analyze"))
self.button_save.setText(_translate("MainWindow", "Save"))
self.button_exit.setText(_translate("MainWindow", "Exit"))
| 60.137681 | 108 | 0.742861 |
66053d252bc808a98c5b9285e0d01e880c2124dc | 3,260 | py | Python | backend/backend/settings.py | ExZos/Mound | 5d1e9ab1149ce7892f0f2d303f22db7d4af0b46e | [
"MIT"
] | null | null | null | backend/backend/settings.py | ExZos/Mound | 5d1e9ab1149ce7892f0f2d303f22db7d4af0b46e | [
"MIT"
] | 3 | 2021-06-09T18:09:07.000Z | 2021-09-30T14:34:52.000Z | backend/backend/settings.py | ExZos/Mound | 5d1e9ab1149ce7892f0f2d303f22db7d4af0b46e | [
"MIT"
] | null | null | null | """
Django settings for backend project.
Generated by 'django-admin startproject' using Django 3.0.6.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'cze4*9i3z#y)vef7v8cm@!w9)l(q5k(4n(w6q5ie5$z#8^#@ey'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'corsheaders',
'rest_framework',
'app.apps.AppConfig'
]
MIDDLEWARE = [
'corsheaders.middleware.CorsMiddleware',
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'backend.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'backend.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'EST'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = '/static/'
CORS_ORIGIN_WHITELIST = (
'http://localhost:3000',
)
| 25.271318 | 91 | 0.694479 |
7b50fac51b476d2a1399d8a93d6fa5be00c47dc1 | 12,795 | py | Python | pirates/chat/PiratesTalkGlobals.py | itsyaboyrocket/pirates | 6ca1e7d571c670b0d976f65e608235707b5737e3 | [
"BSD-3-Clause"
] | 3 | 2021-02-25T06:38:13.000Z | 2022-03-22T07:00:15.000Z | pirates/chat/PiratesTalkGlobals.py | itsyaboyrocket/pirates | 6ca1e7d571c670b0d976f65e608235707b5737e3 | [
"BSD-3-Clause"
] | null | null | null | pirates/chat/PiratesTalkGlobals.py | itsyaboyrocket/pirates | 6ca1e7d571c670b0d976f65e608235707b5737e3 | [
"BSD-3-Clause"
] | 1 | 2021-02-25T06:38:17.000Z | 2021-02-25T06:38:17.000Z | # uncompyle6 version 3.2.0
# Python bytecode 2.4 (62061)
# Decompiled from: Python 2.7.14 (v2.7.14:84471935ed, Sep 16 2017, 20:19:30) [MSC v.1500 32 bit (Intel)]
# Embedded file name: pirates.chat.PiratesTalkGlobals
from direct.gui.DirectGui import *
from pandac.PandaModules import *
from otp.chat.TalkGlobals import *
from pirates.piratesgui import GuiPanel, PiratesGuiGlobals
tpMgr = TextPropertiesManager.getGlobalPtr()
def MC(color, mult):
returnColor = Vec4(min(color[0] * mult, 1.0), min(color[1] * mult, 1.0), min(color[2] * mult, 1.0), min(color[3] * mult, 1.0))
return returnColor
def fRange(variable, bottom, top):
if variable < bottom:
return bottom
else:
if variable > top:
return top
else:
return variable
def TP(light, power, mult=1.0, add=0.0):
lR = fRange(pow(light[0] * mult + add, power), 0.0, 1.0)
lG = fRange(pow(light[1] * mult + add, power), 0.0, 1.0)
lB = fRange(pow(light[2] * mult + add, power), 0.0, 1.0)
lA = light[3]
poweredLight = Vec4(lR, lG, lB, lA)
return poweredLight
textMult = 0.35
headMult = 1.0
lightTextPow = 0.5
lightHeadPow = 1.0
tpSuper = TextProperties()
tpSuper.setGlyphShift(0.25)
tpSuper.setGlyphScale(2.0)
tpMgr.setProperties('Super', tpSuper)
CPYellow = tpMgr.getProperties('yellow')
CPYellow.setTextColor(*PiratesGuiGlobals.TextFG13)
tpMgr.setProperties('CPYellow', CPYellow)
CPCopperLt = tpMgr.getProperties('gold')
CPCopperLt.setTextColor(*PiratesGuiGlobals.TextLT17)
tpMgr.setProperties('CPCopperLt', CPCopperLt)
CPGoldSlant = tpMgr.getProperties('gold')
CPGoldSlant.setSlant(0.2)
tpMgr.setProperties('CPGoldSlant', CPGoldSlant)
CPGoldSlantLt = tpMgr.getProperties('gold')
CPGoldSlantLt.setSlant(0.2)
tpMgr.setProperties('CPGoldSlantLt', CPGoldSlantLt)
CPGreenSlant = tpMgr.getProperties('green')
CPGreenSlant.setTextColor(*PiratesGuiGlobals.TextFG19)
CPGreenSlant.setSlant(0.2)
tpMgr.setProperties('CPGreenSlant', CPGreenSlant)
CPGreen = tpMgr.getProperties('green')
CPGreen.setTextColor(*PiratesGuiGlobals.TextFG19)
tpMgr.setProperties('CPGreen', CPGreen)
CPPurpleSlant = tpMgr.getProperties('blue')
CPPurpleSlant.setTextColor(*PiratesGuiGlobals.TextFG20)
CPPurpleSlant.setSlant(0.2)
tpMgr.setProperties('CPPurpleSlant', CPPurpleSlant)
CPPurple = tpMgr.getProperties('blue')
CPPurple.setTextColor(*PiratesGuiGlobals.TextFG20)
tpMgr.setProperties('CPPurple', CPPurple)
CPRedSlant = tpMgr.getProperties('red')
CPRedSlant.setTextColor(*PiratesGuiGlobals.TextFG6)
CPRedSlant.setSlant(0.2)
tpMgr.setProperties('CPRedSlant', CPRedSlant)
CPRed = tpMgr.getProperties('red')
CPRed.setTextColor(*PiratesGuiGlobals.TextFG6)
tpMgr.setProperties('CPRed', CPRed)
CPWhite = tpMgr.getProperties('white')
tpMgr.setProperties('CPWhite', CPWhite)
CPLtBlueLt = tpMgr.getProperties('blue')
CPLtBlueLt.setTextColor(*PiratesGuiGlobals.TextLT5)
tpMgr.setProperties('CPLtBlueLt', CPLtBlueLt)
CPPurpleLt = tpMgr.getProperties('purple')
CPPurpleLt.setTextColor(*PiratesGuiGlobals.TextLT12)
tpMgr.setProperties('CPPurpleLt', CPPurpleLt)
CPOrange = tpMgr.getProperties('orange')
CPOrange.setTextColor(*PiratesGuiGlobals.TextFG11)
tpMgr.setProperties('CPOrange', CPOrange)
CPOrangeSlant = tpMgr.getProperties('orange')
CPOrangeSlant.setTextColor(*PiratesGuiGlobals.TextFG11)
CPOrangeSlant.setSlant(0.2)
tpMgr.setProperties('CPOrangeSlant', CPOrangeSlant)
CPMaroon = tpMgr.getProperties('maroon')
CPMaroon.setTextColor(*PiratesGuiGlobals.TextFG15)
tpMgr.setProperties('CPMaroon', CPMaroon)
CPLtGoldLt = tpMgr.getProperties('lightGold')
CPLtGoldLt.setTextColor(*PiratesGuiGlobals.TextLT14)
tpMgr.setProperties('CPLtGoldLt', CPLtGoldLt)
CPYellowHEAD = tpMgr.getProperties('yellow')
CPYellowHEAD.setTextColor(TP(PiratesGuiGlobals.TextFG21, headMult))
tpMgr.setProperties('CPYellowHEAD', CPYellowHEAD)
CPYellowHEAD = tpMgr.getProperties('yellow')
CPYellowHEAD.setTextColor(TP(PiratesGuiGlobals.TextFG21, textMult))
tpMgr.setProperties('CPYellowBODY', CPYellowHEAD)
CPYellowOVER = tpMgr.getProperties('yellow')
CPYellowOVER.setTextColor(*PiratesGuiGlobals.TextFG21)
tpMgr.setProperties('CPYellowOVER', CPYellowOVER)
CPOrangeHEAD = tpMgr.getProperties('orange')
CPOrangeHEAD.setTextColor(TP(PiratesGuiGlobals.TextFG11, headMult))
tpMgr.setProperties('CPOrangeHEAD', CPOrangeHEAD)
CPOrangeHEAD = tpMgr.getProperties('orange')
CPOrangeHEAD.setTextColor(TP(PiratesGuiGlobals.TextLT11, 1.0, 1.0, 0.0))
tpMgr.setProperties('CPOrangeBODY', CPOrangeHEAD)
CPOrangeOVER = tpMgr.getProperties('orange')
CPOrangeOVER.setTextColor(*PiratesGuiGlobals.TextOV11)
tpMgr.setProperties('CPOrangeOVER', CPOrangeOVER)
CPOrangeSlantHEAD = tpMgr.getProperties('orange')
CPOrangeSlantHEAD.setTextColor(TP(PiratesGuiGlobals.TextFG11, headMult))
CPOrangeSlantHEAD.setSlant(0.2)
tpMgr.setProperties('CPOrangeSlantHEAD', CPOrangeSlantHEAD)
CPOrangeSlantHEAD = tpMgr.getProperties('orange')
CPOrangeSlantHEAD.setTextColor(TP(PiratesGuiGlobals.TextFG11, textMult))
CPOrangeSlantHEAD.setSlant(0.2)
tpMgr.setProperties('CPOrangeSlantBODY', CPOrangeSlantHEAD)
CPGreyHEAD = tpMgr.getProperties('grey')
CPGreyHEAD.setTextColor(TP(PiratesGuiGlobals.TextFG3, headMult, 0.875))
tpMgr.setProperties('CPGreyHEAD', CPGreyHEAD)
CPGreyHEAD = tpMgr.getProperties('grey')
CPGreyHEAD.setTextColor(TP(PiratesGuiGlobals.TextFG3, textMult))
tpMgr.setProperties('CPGreyBODY', CPGreyHEAD)
CPGreyOVER = tpMgr.getProperties('grey')
CPGreyOVER.setTextColor(TP(PiratesGuiGlobals.TextFG3, 1.0, 0.8))
tpMgr.setProperties('CPGreyOVER', CPGreyOVER)
CPOrangeSlantOVER = tpMgr.getProperties('orange')
CPOrangeSlantOVER.setTextColor(*PiratesGuiGlobals.TextOV11)
CPOrangeSlantOVER.setSlant(0.2)
tpMgr.setProperties('CPOrangeSlantOVER', CPOrangeSlantOVER)
CPCopperHEAD = tpMgr.getProperties('gold')
CPCopperHEAD.setTextColor(TP(PiratesGuiGlobals.TextFG17, headMult))
tpMgr.setProperties('CPCopperHEAD', CPCopperHEAD)
CPCopperHEAD = tpMgr.getProperties('gold')
CPCopperHEAD.setTextColor(TP(PiratesGuiGlobals.TextFG17, textMult))
tpMgr.setProperties('CPCopperBODY', CPCopperHEAD)
CPCopperOVER = tpMgr.getProperties('gold')
CPCopperOVER.setTextColor(*PiratesGuiGlobals.TextOV17)
tpMgr.setProperties('CPCopperOVER', CPCopperOVER)
CPLtBlueHEAD = tpMgr.getProperties('blue')
CPLtBlueHEAD.setTextColor(TP(PiratesGuiGlobals.TextFG5, lightHeadPow))
tpMgr.setProperties('CPLtBlueHEAD', CPLtBlueHEAD)
CPLtBlueHEAD = tpMgr.getProperties('blue')
CPLtBlueHEAD.setTextColor(TP(PiratesGuiGlobals.TextFG5, lightTextPow))
tpMgr.setProperties('CPLtBlueBODY', CPLtBlueHEAD)
CPLtBlueOVER = tpMgr.getProperties('blue')
CPLtBlueOVER.setTextColor(TP(PiratesGuiGlobals.TextFG5, 1.0, 0.8))
tpMgr.setProperties('CPLtBlueOVER', CPLtBlueOVER)
CPPurpleHEAD = tpMgr.getProperties('purple')
CPPurpleHEAD.setTextColor(TP(PiratesGuiGlobals.TextFG22, lightHeadPow))
tpMgr.setProperties('CPPurpleHEAD', CPPurpleHEAD)
CPPurpleHEAD = tpMgr.getProperties('purple')
CPPurpleHEAD.setTextColor(TP(PiratesGuiGlobals.TextFG22, lightTextPow))
tpMgr.setProperties('CPPurpleBODY', CPPurpleHEAD)
CPPurpleOVER = tpMgr.getProperties('purple')
CPPurpleOVER.setTextColor(TP(PiratesGuiGlobals.TextFG22, 1.0, 0.8))
tpMgr.setProperties('CPPurpleOVER', CPPurpleOVER)
CPLtGoldHEAD = tpMgr.getProperties('lightGold')
CPLtGoldHEAD.setTextColor(TP(PiratesGuiGlobals.TextFG14, headMult))
tpMgr.setProperties('CPLtGoldHEAD', CPLtGoldHEAD)
CPLtGoldHEAD = tpMgr.getProperties('lightGold')
CPLtGoldHEAD.setTextColor(TP(PiratesGuiGlobals.TextFG14, textMult))
tpMgr.setProperties('CPLtGoldBODY', CPLtGoldHEAD)
CPLtGoldOVER = tpMgr.getProperties('lightGold')
CPLtGoldOVER.setTextColor(*PiratesGuiGlobals.TextFG14)
tpMgr.setProperties('CPLtGoldOVER', CPLtGoldOVER)
CPOliveHEAD = tpMgr.getProperties('green')
CPOliveHEAD.setTextColor(TP(PiratesGuiGlobals.TextFG18, lightHeadPow))
tpMgr.setProperties('CPOliveHEAD', CPOliveHEAD)
CPOliveHEAD = tpMgr.getProperties('green')
CPOliveHEAD.setTextColor(TP(PiratesGuiGlobals.TextFG18, lightTextPow))
tpMgr.setProperties('CPOliveBODY', CPOliveHEAD)
CPOliveOVER = tpMgr.getProperties('green')
CPOliveOVER.setTextColor(*PiratesGuiGlobals.TextOV18)
tpMgr.setProperties('CPOliveOVER', CPOliveOVER)
CANNON_DEFENSE = 100
del tpMgr
MESSAGE_COLOR_TABLE = {}
MESSAGE_COLOR_TABLE[TALK_OPEN] = ('CPOrangeHEAD', 'CPGreyHEAD', 'CPOrangeHEAD')
MESSAGE_COLOR_TABLE[TALK_WHISPER] = ('CPOrangeSlantHEAD', 'CPOrangeHEAD', 'CPOrangeSlantHEAD')
MESSAGE_COLOR_TABLE[TALK_ACCOUNT] = ('CPOrangeSlantHEAD', 'CPOrangeHEAD', 'CPOrangeSlantHEAD')
MESSAGE_COLOR_TABLE[TALK_GM] = ('CPPurpleHEAD', 'CPYellowHEAD', 'CPPurpleHEAD')
MESSAGE_COLOR_TABLE[TALK_GUILD] = ('CPLtBlueHEAD', 'CPLtBlueHEAD', 'CPLtBlueHEAD')
MESSAGE_COLOR_TABLE[TALK_PARTY] = ('CPOliveHEAD', 'CPOliveHEAD', 'CPOliveHEAD')
MESSAGE_COLOR_TABLE[TALK_PVP] = ('CPLtGoldHEAD', 'CPLtGoldHEAD', 'CPLtGoldHEAD')
MESSAGE_COLOR_TABLE[AVATAR_THOUGHT] = ('CPOrangeHEAD', 'CPGreyHEAD', 'CPOrangeHEAD')
MESSAGE_COLOR_TABLE[UPDATE_GUILD] = ('CPLtBlueHEAD', 'CPLtBlueHEAD', 'CPLtBlueHEAD')
MESSAGE_COLOR_TABLE[UPDATE_FRIEND] = ('CPOrangeHEAD', 'CPOrangeHEAD', 'CPOrangeHEAD')
MESSAGE_COLOR_TABLE[UPDATE_PARTY] = ('CPOliveHEAD', 'CPOliveHEAD', 'CPOliveHEAD')
MESSAGE_COLOR_TABLE[UPDATE_PVP] = ('CPLtGoldHEAD', 'CPLtGoldHEAD', 'CPLtGoldHEAD')
MESSAGE_COLOR_TABLE[INFO_SYSTEM] = ('CPRed', 'CPRed', 'CPRed')
MESSAGE_COLOR_TABLE[INFO_GAME] = ('CPGreen', 'CPGreen', 'CPGreen')
MESSAGE_COLOR_TABLE[INFO_DEV] = ('CPGreen', 'CPGreen', 'CPGreen')
MESSAGE_COLOR_TABLE[INFO_AVATAR_UNAVAILABLE] = ('CPOrangeHEAD', 'CPOrangeHEAD', 'CPOrangeHEAD')
MESSAGE_COLOR_TABLE[INFO_OPEN] = ('CPOrangeHEAD', 'CPGreyHEAD', 'CPOrangeHEAD')
MESSAGE_COLOR_TABLE[CANNON_DEFENSE] = ('CPLtBlueHEAD', 'CPLtBlueHEAD', 'CPLtBlueHEAD')
MESSAGE_COLOR_TABLE[INFO_GUILD] = ('CPLtBlueHEAD', 'CPLtBlueHEAD', 'CPLtBlueHEAD')
MESSAGE_OVER_TABLE = {}
MESSAGE_OVER_TABLE[TALK_OPEN] = ('CPOrangeOVER', 'CPGreyOVER', 'CPOrangeOVER')
MESSAGE_OVER_TABLE[TALK_WHISPER] = ('CPOrangeSlantOVER', 'CPOrangeOVER', 'CPOrangeSlantOVER')
MESSAGE_OVER_TABLE[TALK_ACCOUNT] = ('CPOrangeSlantOVER', 'CPOrangeOVER', 'CPOrangeSlantOVER')
MESSAGE_OVER_TABLE[TALK_GM] = ('CPPurpleOVER', 'CPYellowOVER', 'CPPurpleOVER')
MESSAGE_OVER_TABLE[TALK_GUILD] = ('CPLtBlueOVER', 'CPLtBlueOVER', 'CPLtBlueOVER')
MESSAGE_OVER_TABLE[TALK_PARTY] = ('CPOliveOVER', 'CPOliveOVER', 'CPOliveOVER')
MESSAGE_OVER_TABLE[TALK_PVP] = ('CPLtGoldOVER', 'CPLtGoldOVER', 'CPLtGoldOVER')
MESSAGE_OVER_TABLE[AVATAR_THOUGHT] = ('CPOrangeOVER', 'CPGreyOVER', 'CPOrangeOVER')
MESSAGE_OVER_TABLE[UPDATE_GUILD] = ('CPLtBlueOVER', 'CPLtBlueOVER', 'CPLtBlueOVER')
MESSAGE_OVER_TABLE[UPDATE_FRIEND] = ('CPOrangeOVER', 'CPOrangeOVER', 'CPOrangeOVER')
MESSAGE_OVER_TABLE[UPDATE_PARTY] = ('CPOliveOVER', 'CPOliveOVER', 'CPOliveOVER')
MESSAGE_OVER_TABLE[UPDATE_PVP] = ('CPLtGoldOVER', 'CPLtGoldOVER', 'CPLtGoldOVER')
MESSAGE_OVER_TABLE[INFO_SYSTEM] = ('CPRed', 'CPRed', 'CPRed')
MESSAGE_OVER_TABLE[INFO_GAME] = ('CPGreen', 'CPGreen', 'CPGreen')
MESSAGE_OVER_TABLE[INFO_DEV] = ('CPGreen', 'CPGreen', 'CPGreen')
MESSAGE_OVER_TABLE[INFO_AVATAR_UNAVAILABLE] = ('CPOrangeOVER', 'CPOrangeOVER', 'CPOrangeOVER')
MESSAGE_OVER_TABLE[INFO_OPEN] = ('CPOrangeHEAD', 'CPGreyOVER', 'CPOrangeHEAD')
MESSAGE_OVER_TABLE[CANNON_DEFENSE] = ('CPLtBlueOVER', 'CPLtBlueOVER', 'CPLtBlueOVER')
MESSAGE_OVER_TABLE[INFO_GUILD] = ('CPLtBlueOVER', 'CPLtBlueOVER', 'CPLtBlueOVER')
MESSAGE_STYLE_TABLE = {}
MESSAGE_STYLE_TABLE[TALK_OPEN] = ('CPWhite', 'CPGreyBODY', 'CPOrangeHEAD')
MESSAGE_STYLE_TABLE[TALK_WHISPER] = ('CPWhite', 'CPOrangeBODY', 'CPOrangeHEAD')
MESSAGE_STYLE_TABLE[TALK_ACCOUNT] = ('CPWhite', 'CPOrangeBODY', 'CPOrangeHEAD')
MESSAGE_STYLE_TABLE[TALK_GM] = ('CPWhite', 'CPYellowBODY', 'CPPurpleHEAD')
MESSAGE_STYLE_TABLE[TALK_GUILD] = ('CPWhite', 'CPLtBlueBODY', 'CPLtBlueHEAD')
MESSAGE_STYLE_TABLE[TALK_PARTY] = ('CPWhite', 'CPOliveBODY', 'CPOliveHEAD')
MESSAGE_STYLE_TABLE[TALK_PVP] = ('CPWhite', 'CPLtGoldBODY', 'CPLtGoldHEAD')
MESSAGE_STYLE_TABLE[AVATAR_THOUGHT] = ('CPWhite', 'CPGreyBODY', 'CPOrangeHEAD')
MESSAGE_STYLE_TABLE[UPDATE_GUILD] = ('CPLtBlueHEAD', 'CPLtBlueBODY', 'CPLtBlueHEAD')
MESSAGE_STYLE_TABLE[UPDATE_FRIEND] = ('CPOrangeHEAD', 'CPOrangeBODY', 'CPOrangeHEAD')
MESSAGE_STYLE_TABLE[UPDATE_PARTY] = ('CPOliveHEAD', 'CPOliveBODY', 'CPOliveHEAD')
MESSAGE_STYLE_TABLE[UPDATE_PVP] = ('CPLtGoldHEAD', 'CPGLtGoldBODY', 'CPLtGoldHEAD')
MESSAGE_STYLE_TABLE[INFO_SYSTEM] = ('CPRed', 'CPRed', 'CPRed')
MESSAGE_STYLE_TABLE[INFO_GAME] = ('CPGreen', 'CPGreen', 'CPGreen')
MESSAGE_STYLE_TABLE[INFO_DEV] = ('CPGreen', 'CPGreen', 'CPGreen')
MESSAGE_STYLE_TABLE[INFO_AVATAR_UNAVAILABLE] = ('CPOrange', 'CPOrangeBODY', 'CPOrange')
MESSAGE_STYLE_TABLE[INFO_OPEN] = ('CPOrangeHEAD', 'CPGreyBODY', 'CPOrangeHEAD')
MESSAGE_STYLE_TABLE[CANNON_DEFENSE] = ('CPWhite', 'CPLtBlueBODY', 'CPLtBlueHEAD')
MESSAGE_STYLE_TABLE[INFO_GUILD] = ('CPLtBlue', 'CPLtBlueBODY', 'CPLtBlueHEAD') | 52.871901 | 130 | 0.796561 |
dadec9b1738073061f4e16ab94b9eec838066008 | 683 | py | Python | practice/2008/qualification/A-Saving_the_universe/a.py | victorWeiFreelancer/CodeJam | edb8f921860a35985823cb3dbd3ebec8a8f3c12f | [
"MIT"
] | null | null | null | practice/2008/qualification/A-Saving_the_universe/a.py | victorWeiFreelancer/CodeJam | edb8f921860a35985823cb3dbd3ebec8a8f3c12f | [
"MIT"
] | null | null | null | practice/2008/qualification/A-Saving_the_universe/a.py | victorWeiFreelancer/CodeJam | edb8f921860a35985823cb3dbd3ebec8a8f3c12f | [
"MIT"
] | null | null | null | """
run by
python3 a.py < test.in > test.out
"""
import sys
sys.dont_write_bytecode = True
from TestCase import TestCase
def main():
numTestCases = int(input())
testCaseList = []
for i in range(numTestCases):
nEngine = int(input())
engineDict = {}
queryList = []
for j in range(nEngine):
engineDict[ input().rstrip('\n') ] = j
nQuery = int(input())
for _ in range(nQuery):
queryList.append( engineDict[input().rstrip('\n')] )
testCaseList.append( TestCase(i+1, nEngine, engineDict, nQuery, queryList) )
testCaseList[-1].findMinimalSwitch()
if __name__ == '__main__':
main() | 24.392857 | 84 | 0.594436 |
e57db0a805442e31a60b40de81cf9a122dd34b45 | 1,832 | py | Python | ProVulnScan.py | Sarthak044/ProVulnScanner | d0bfd54376e23650e8262cdd1157bcb10825966c | [
"Apache-2.0"
] | null | null | null | ProVulnScan.py | Sarthak044/ProVulnScanner | d0bfd54376e23650e8262cdd1157bcb10825966c | [
"Apache-2.0"
] | null | null | null | ProVulnScan.py | Sarthak044/ProVulnScanner | d0bfd54376e23650e8262cdd1157bcb10825966c | [
"Apache-2.0"
] | null | null | null | import urllib.parse as urlparse
from bs4 import BeautifulSoup
import scannerclass
#Banner
print('''
*@@@***@@m *@@@@* *@@@* *@@@ m@***@m@
@@ *@@m *@@ m@ @@ m@@ *@
@@ m@@ *@@@m@@@ m@@*@@m @@m m@ *@@@ *@@@ @@ *@@@@@@@@m *@@@m m@@*@@ m@*@@m *@@@@@@@@m
@@@@@@@ @@* ** @@* *@@ @@m @* @@ @@ !@ @@ @@ *@@@@@m@@* @@ @@ @@ @@ @@
@@ @! @@ @@ *!@ !* !@ @@ !@ @! @@ *@@@! m@@@!@ @! @@
@! @! @@ !@ !@@m !@ @! !@ @! !@ @@ @@@!m m@! !@ @! !@
@! !! !@ !! !! !* !@ !! !! !! !! ! *@!!! !!!!:! !! !!
!! !: !!! !!! !!:: !! !! :! !! !! !! !!!:! !!! :! !! !!
:!:!: : ::: : : : : : :: !: :!: : : : : ::: :!: ::!: : :! : : : :!: : !: : ::: :!: :
By:
Sarthak kul.
''')
target_url = input("[+] Enter the Target URL ~~> ")
links_to_ignore = [""] #enter links to ignore
data_dict = {"username": "admin", "password": "password", "Login": "submit"}
vuln = scannerclass.Scanner(target_url, links_to_ignore)
vuln.session.post("", data = data_dict)
vuln.crawl()
vuln.run_scanner()
| 55.515152 | 123 | 0.181223 |
ea1ffc56124bba60b454596be9e7148d2f3b5f25 | 2,481 | py | Python | safefileshare/file/forms.py | pdyba/safefileshare | db693bf623ad61495575501790795ac63037514b | [
"MIT"
] | null | null | null | safefileshare/file/forms.py | pdyba/safefileshare | db693bf623ad61495575501790795ac63037514b | [
"MIT"
] | null | null | null | safefileshare/file/forms.py | pdyba/safefileshare | db693bf623ad61495575501790795ac63037514b | [
"MIT"
] | null | null | null | from django import forms
from django.contrib.auth.forms import ReadOnlyPasswordHashField
from django.contrib.auth.hashers import make_password
from django.utils.translation import gettext_lazy as _
from safefileshare.file.models import SafeSecret
class UploadFileForm(forms.Form):
secret_link = forms.URLField(required=False)
secret_file = forms.FileField(required=False)
secret_password = forms.CharField(
max_length=64, label="Your passphrase", widget=forms.PasswordInput()
)
def is_valid(self):
valid = super().is_valid()
form_data = self.cleaned_data
check = [form_data.get("secret_link"), form_data.get("secret_file")]
if all(check) or not any(check):
return False
return valid
class GetSecretForm(forms.Form):
password = forms.CharField(
max_length=64, label="Passphrase", widget=forms.PasswordInput()
)
class SetPasswordForm(forms.ModelForm):
"""
A form that lets a admin set password without entering the old password on files
"""
password_hash = ReadOnlyPasswordHashField(
label=_("Password"), help_text=_("Raw passwords are not stored")
)
class Meta:
model = SafeSecret
fields = ["password_hash"]
field_classes = {"username": forms.CharField}
error_messages = {"password_mismatch": _("The two password fields didn't match.")}
new_password1 = forms.CharField(
label=_("New password"), widget=forms.PasswordInput, strip=False
)
new_password2 = forms.CharField(
label=_("New password confirmation"), strip=False, widget=forms.PasswordInput
)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.secret = kwargs.get("instance")
def clean_new_password2(self):
password1 = self.cleaned_data.get("new_password1")
password2 = self.cleaned_data.get("new_password2")
if password1 and password2:
if password1 != password2:
raise forms.ValidationError(
self.error_messages["password_mismatch"], code="password_mismatch"
)
return password2
def save(self, commit=True):
password = self.cleaned_data.get("new_password1")
self.secret.password_hash = make_password(password)
if commit:
self.secret.save()
return self.secret
def save_m2m(self):
"""Fails without it"""
pass
| 32.220779 | 86 | 0.664248 |
3314d7882038eeeae615d37a84d1baa386521fa5 | 2,182 | py | Python | src/clinic/parser.py | kvoss/optiwait | a941d4508209cac686a4d13348de378e0a582951 | [
"BSD-3-Clause"
] | 1 | 2019-02-14T19:23:26.000Z | 2019-02-14T19:23:26.000Z | src/clinic/parser.py | kvoss/optiwait | a941d4508209cac686a4d13348de378e0a582951 | [
"BSD-3-Clause"
] | null | null | null | src/clinic/parser.py | kvoss/optiwait | a941d4508209cac686a4d13348de378e0a582951 | [
"BSD-3-Clause"
] | null | null | null | import re
import unittest
# from dateutil.parser import parse
import logging
logger = logging.getLogger(__name__)
__all__ = ['Parser']
PATTERN = re.compile(r'(\d+)\s*min')
PATTERNH = re.compile(r'(\d+)\s*(hour|hr)')
PATTERN1H = re.compile(r'\b(one|an)\s+(hour|hr)\b')
class Parser(object):
def __init__(self, txt):
self.txt = txt.lower()
def get_minutes(self):
DEFAULT_WAIT = 66
sre = PATTERN.search(self.txt)
sreh = PATTERNH.search(self.txt)
sre1h = PATTERN1H.search(self.txt)
#print txt
if sre:
mins = sre.groups(1)
mins = int( mins[0])
elif sreh:
hours = sreh.groups(1)
mins = int(hours[0]) * 60
elif sre1h:
mins = 60
else:
mins = DEFAULT_WAIT
logger.debug('default wait: ' + self.txt)
return mins
class TestParser(unittest.TestCase):
"""
I take tests from https://twitter.com/mediclinicon8th
"""
def setUp(self):
pass
def test_45min(self):
txt = '45min wait at 10:51 am.'
self.assertEqual( Parser(txt).get_minutes(), 45)
def test_60_to_90(self):
txt = "wait time at 12:47 pm is 60 to 90 mins"
self.assertEqual( Parser(txt).get_minutes(), 90)
def test_an_hour(self):
txt = "It's at least an hour wait at 10:45am"
self.assertEqual( Parser(txt).get_minutes(), 60)
def test_one_hr(self):
txt = 'still one hour wait'
self.assertEqual( Parser(txt).get_minutes(), 60)
def test_2hr(self):
txt = 'still 2hr wait'
self.assertEqual( Parser(txt).get_minutes(), 120)
def test_45dash60_min(self):
txt = 'Wait time is about 45-60 minutes.'
self.assertEqual( Parser(txt).get_minutes(), 60)
def test_fractional(self):
txt = 'Our wait time is 1 hour to 1 1/2 hours @ 5:47 pm.'
self.assertEqual( Parser(txt).get_minutes(), 60)
def test_empty_room(self):
txt = 'We have an empty waiting room at 1:05pm!!'
self.assertEqual( Parser(txt).get_minutes(), 0)
if __name__ == '__main__':
unittest.main()
| 24.244444 | 65 | 0.589368 |
685069d90b62cb4efd4f02ec6001f64805fd682c | 24,427 | py | Python | lyrebird/mock/dm.py | echoyang7/lyrebird | 903fef4bd51ee6430447374b6d432a4b339f7392 | [
"MIT"
] | null | null | null | lyrebird/mock/dm.py | echoyang7/lyrebird | 903fef4bd51ee6430447374b6d432a4b339f7392 | [
"MIT"
] | null | null | null | lyrebird/mock/dm.py | echoyang7/lyrebird | 903fef4bd51ee6430447374b6d432a4b339f7392 | [
"MIT"
] | null | null | null | import os
import re
import uuid
import json
import codecs
import shutil
from pathlib import Path
from urllib.parse import urlparse
from lyrebird.log import get_logger
from jinja2 import Template
from lyrebird.application import config
from lyrebird.mock.handlers import snapshot_helper
PROP_FILE_NAME = '.lyrebird_prop'
logger = get_logger()
class DataManager:
def __init__(self):
self.root_path: Path = None
self.root = {
"id": str(uuid.uuid4()),
"name": "$",
"type": "group",
"parent_id": None,
"children": []
}
self.id_map = {}
self.activated_data = {}
self.activated_group = {}
self.secondary_activated_data = {}
self.LEVEL_SECONDARY_ACTIVATED = 2
self.clipboard = None
self.save_to_group_id = None
self.tmp_group = {'id': 'tmp_group', 'type': 'group', 'name': 'tmp-group', 'children': []}
self.snapshot_helper = snapshot_helper.SnapshotHelper()
def set_root(self, root_path):
"""
Set a new mock data root dir
-----
DataManager will reload all data from this new root dir.
And all activited data will be removed from DataManager.
"""
_root_path = Path(root_path).expanduser()
if not _root_path.is_dir():
raise RootPathIsNotDir(root_path)
_root_path.mkdir(parents=True, exist_ok=True)
self.root_path = _root_path
self.reload()
def reload(self):
if not self.root_path:
raise RootNotSet
_root_prop_path = self.root_path / PROP_FILE_NAME
if not _root_prop_path.exists():
self._save_prop()
with codecs.open(_root_prop_path) as f:
_root_prop = json.load(f)
self.root = _root_prop
self.id_map = {}
self._read_node(self.root)
self._sort_children_by_name()
def _read_node(self, node):
if 'id' in node:
self.id_map[node['id']] = node
if 'children' in node:
for child in node['children']:
self._read_node(child)
def get(self, _id):
"""
Get mock group or data by id
"""
if not self.root:
raise RootNotSet
node = self.id_map.get(_id)
if not node:
raise IDNotFound(_id)
if node.get('type') == 'group' or node.get('type') == None:
return node
elif node.get('type') == 'data':
return self._load_data(_id)
# -----
# Mock operations
# -----
def activate(self, _id):
"""
Activite data by id
"""
if not self.root:
raise RootNotSet
_node = self.id_map.get(_id)
if _node:
self._activate(_node)
else:
raise DataNotFound(f'ID:{_id}')
self._activate_super_node(_node, level_lefted=self.LEVEL_SECONDARY_ACTIVATED)
self.activated_group[_id] = _node
def _activate_super_node(self, node, level_lefted=1):
if level_lefted <= 0:
return
if not node.get('super_id'):
return
_secondary_search_node_id = node.get('super_id')
_secondary_search_node = self.id_map.get(_secondary_search_node_id)
if not _secondary_search_node:
raise DataNotFound(f'Secondary search node ID: {_secondary_search_node_id}')
self._activate(_secondary_search_node, secondary_search=True)
self._activate_super_node(_secondary_search_node, level_lefted=level_lefted-1)
def _activate(self, node, secondary_search=False):
if node.get('type', '') == 'data':
_mock_data = self._load_data(node['id'])
if _mock_data:
if not secondary_search:
self.activated_data[node['id']] = _mock_data
else:
self.secondary_activated_data[node['id']] = _mock_data
elif node.get('type', '') == 'group':
if 'children' in node:
for child in node['children']:
self._activate(child, secondary_search=secondary_search)
def _load_data(self, data_id):
_data_file = self.root_path / data_id
if not _data_file.exists():
raise DataNotFound(f'Data file {_data_file}')
with codecs.open(_data_file) as f:
return json.load(f)
def deactivate(self):
"""
Clear activated data
"""
self.activated_data = {}
self.activated_group = {}
self.secondary_activated_data = {}
def get_matched_data(self, flow):
"""
Find matched mock data from activated data
"""
_matched_data = []
for _data_id in self.activated_data:
_data = self.activated_data[_data_id]
if self._is_match_rule(flow, _data.get('rule')):
_matched_data.append(_data)
if len(_matched_data) <= 0:
for _data_id in self.secondary_activated_data:
_data = self.secondary_activated_data[_data_id]
if self._is_match_rule(flow, _data.get('rule')):
_matched_data.append(_data)
# TODO render mock data before response, support more functions
params = {
'ip': config.get('ip'),
'port': config.get('mock.port')
}
for response_data in _matched_data:
if 'response' not in response_data:
continue
if 'data' not in response_data['response']:
continue
if not response_data['response']['data']:
continue
resp_data_template = Template(response_data['response']['data'])
response_data['response']['data'] = resp_data_template.render(params)
return _matched_data
def _is_match_rule(self, flow, rules):
if not rules:
return False
for rule_key in rules:
pattern = rules[rule_key]
target = self._get_rule_target(rule_key, flow)
if not target or not re.search(pattern, target):
return False
return True
def _get_rule_target(self, rule_key, flow):
prop_keys = rule_key.split('.')
result = flow
for prop_key in prop_keys:
if not isinstance(result, dict):
return
result = result.get(prop_key)
if not result:
return None
return result
# -----
# Data tree operations
# -----
def _get_request_path(self, request):
path = request.get('path')
if not path:
if not request.get('url'):
return ''
parsed_url = urlparse(request['url'])
path = parsed_url.path
return path
def add_data(self, parent_id, raw_data):
if not isinstance(raw_data, dict):
raise DataObjectSouldBeADict
if parent_id == 'tmp_group':
parent_node = self.tmp_group
else:
parent_node = self.id_map.get(parent_id)
if not parent_node:
raise IDNotFound(parent_id)
if parent_node['type'] == 'data':
raise DataObjectCannotContainAnyOtherObject
data = dict(raw_data)
data_id = str(uuid.uuid4())
data['id'] = data_id
if 'request' in data:
# TODO remove it with inspector frontend
data['request'] = dict(raw_data['request'])
_data_name = self._get_request_path(data['request'])
_data_rule = {
'request.url': f'(?=.*{self._get_request_path(data["request"])})'
}
if 'data' in data['request']:
data['request']['data'] = self._flow_data_2_str(data['request']['data'])
else:
_data_name = data.get('name')
_data_rule = {'request.url': '(?=.*YOUR-REQUEST-PATH)(?=.*PARAMS)'}
if 'response' in data:
# TODO remove it with inspector frontend
data['response'] = dict(raw_data['response'])
if 'data' in data['response']:
data['response']['data'] = self._flow_data_2_str(data['response']['data'])
# proxy_response will not be saved
if 'proxy_response' in data:
del data['proxy_response']
data['name'] = _data_name
data['rule'] = _data_rule
data_node = {}
data_node['id'] = data_id
data_node['name'] = _data_name
data_node['type'] = 'data'
data_node['parent_id'] = parent_id
data_path = self.root_path / data_id
with codecs.open(data_path, 'w') as f:
# Save data file
json.dump(data, f, ensure_ascii=False)
logger.debug(f'*** Write file {data_path}')
# Update parent node
# New data added in the head of child list
parent_node['children'].insert(0, data_node)
logger.debug(f'*** Add to node {data_node}')
# Update ID mapping
self.id_map[data_id] = data_node
self._save_prop()
return data_id
def _flow_data_2_str(self, data):
if isinstance(data, str):
return data
return json.dumps(data, ensure_ascii=False)
def add_group(self, parent_id, name):
if parent_id == None:
parent_node = self.root
else:
parent_node = self.id_map.get(parent_id)
if not parent_node:
raise IDNotFound(parent_id)
if parent_node.get('type') == 'data':
raise DataObjectCannotContainAnyOtherObject
# Add group
group_id = str(uuid.uuid4())
new_group = {
'id': group_id,
'name': name,
'type': 'group',
'parent_id': parent_id,
'children': [],
'super_id': None
}
# New group added in the head of child list
if 'children' not in parent_node:
parent_node['children'] = []
parent_node['children'].insert(0, new_group)
# Register ID
self.id_map[group_id] = new_group
# Save prop
self._save_prop()
return group_id
def delete(self, _id):
target_node = self.id_map.get(_id)
if not target_node:
raise IDNotFound(_id)
parent_id = target_node.get('parent_id')
# Remove refs
if parent_id:
parent = self.id_map.get(parent_id)
parent['children'].remove(target_node)
else:
self.root['children'].remove(target_node)
self._delete(_id)
# Save prop
self._save_prop()
def _delete(self, _id):
target_node = self.id_map.get(_id)
if not target_node:
raise IDNotFound(_id)
if 'children' in target_node and len(target_node['children']) > 0:
for child in target_node['children']:
self._delete(child['id'])
# Remove from activated_group
if _id in self.activated_group:
self.activated_group.pop(_id)
# Delete from ID mapping
self.id_map.pop(_id)
# Delete from file system
if target_node['type'] == 'data':
data_file_path = self.root_path / _id
os.remove(data_file_path)
def cut(self, _id):
_node = self.id_map.get(_id)
if not _node:
raise IDNotFound(_id)
self.clipboard = {
'type': 'cut',
'id': _id,
'node': _node
}
def copy(self, _id):
_node = self.id_map.get(_id)
if not _node:
raise IDNotFound(_id)
self.clipboard = {
'type': 'copy',
'id': _id,
'node': _node
}
def import_(self, node):
self.clipboard = {
'type': 'import',
'id': node["id"],
'node': node
}
def paste(self, parent_id, **kwargs):
if not self.clipboard:
raise NoneClipbord
_parent_node = self.id_map.get(parent_id)
_node = self.clipboard['node']
if not _parent_node:
raise IDNotFound(parent_id)
if not _parent_node.get('children'):
_parent_node['children'] = []
if self.clipboard['type'] == 'cut':
_origin_parent = self.id_map.get(_node['parent_id'])
_origin_parent['children'].remove(_node)
_parent_node['children'].insert(0, _node)
_node['parent_id'] = parent_id
elif self.clipboard['type'] == 'copy':
self._copy_node(_parent_node, _node, **kwargs)
elif self.clipboard['type'] == 'import':
self._copy_node(_parent_node, _node, **kwargs)
self._save_prop()
def _copy_node(self, parent_node, node, **kwargs):
new_node = {}
new_node.update(node)
new_node['id'] = str(uuid.uuid4())
new_node['parent_id'] = parent_node['id']
# Add to target node
if not parent_node.get('children'):
parent_node['children'] = []
parent_node['children'].insert(0, new_node)
# Register ID
self.id_map[new_node['id']] = new_node
if new_node['type'] == 'group':
new_node['children'] = []
for child in node['children']:
self._copy_node(new_node, child, **kwargs)
elif new_node['type'] == 'data':
self._copy_file(new_node, node, **kwargs)
def _copy_file(self, target_data_node, data_node, **kwargs):
_id = data_node['id']
origin_file_path = self.root_path / _id
if kwargs.get('custom_input_file_path'):
origin_file_path = f'{kwargs.get("custom_input_file_path")}/{_id}'
new_file_id = target_data_node['id']
new_file_path = self.root_path / new_file_id
with codecs.open(origin_file_path, 'r') as inputfile, codecs.open(new_file_path, 'w') as outputfile:
origin_text = inputfile.read()
prop = json.loads(origin_text)
prop['id'] = new_file_id
new_prop_text = json.dumps(prop, ensure_ascii=False)
outputfile.write(new_prop_text)
def _save_prop(self):
self._sort_children_by_name()
prop_str = PropWriter().parse(self.root)
# Save prop
prop_file = self.root_path / PROP_FILE_NAME
with codecs.open(prop_file, 'w') as f:
f.write(prop_str)
# Reactive mock data
_activated_group = self.activated_group
self.deactivate()
for _group_id in _activated_group:
self.activate(_group_id)
def _sort_children_by_name(self):
for node_id in self.id_map:
node = self.id_map[node_id]
if 'children' not in node:
continue
node['children'] = sorted(node['children'], key=lambda sub_node: sub_node['name'])
# -----
# Conflict checker
# -----
def check_conflict(self, _id):
node = self.id_map.get(_id)
if not node:
raise IDNotFound(_id)
data_array = []
def _read_data(node):
if node.get('type') == 'data':
_data_file = self.root_path / node['id']
with codecs.open(_data_file, 'r') as f:
_data = json.load(f)
_data['parent_id'] = node['parent_id']
data_array.append(_data)
elif node.get('type') == 'group':
for child in node['children']:
_read_data(child)
_read_data(node)
return self.check_conflict_data(data_array)
def activated_data_check_conflict(self):
data_array = list(self.activated_data.values())
return self.check_conflict_data(data_array)
def check_conflict_data(self, data_array):
conflict_rules = []
for _data in data_array:
_rule = _data['rule']
_hit_data = []
for _test_data in data_array:
if self._is_match_rule(_test_data, _rule):
_target_node = {
'id': _test_data['id'],
'name': _test_data['name'],
'url': _test_data['request']['url'],
'abs_parent_path': self._get_abs_parent_path(_test_data)
}
_hit_data.append(_target_node)
if len(_hit_data) > 1:
_src_node = {
'id': _data['id'],
'name': _data['name'],
'rule': _data['rule'],
'abs_parent_path': self._get_abs_parent_path(_data)
}
conflict_rules.append(
{
'data': _src_node,
'conflict_data': _hit_data
}
)
return conflict_rules
def _get_abs_parent_path(self, node, path='/'):
parent_node = self._get_node_parent(node)
if parent_node is None:
return path
current_path = '/' + node['name'] + path
return self._get_abs_parent_path(parent_node, path=current_path)
def _get_abs_parent_obj(self, node, parent_obj=None):
if parent_obj is None:
parent_obj = []
if 'id' not in node:
return parent_obj
node_info = self.id_map.get(node['id'])
if not node_info:
return parent_obj
parent_obj.insert(0, {
'id': node_info['id'],
'name': node_info['name'],
'type': node_info['type'],
'parent_id': node_info['parent_id']
})
parent_node = self._get_node_parent(node)
if parent_node is None:
return parent_obj
return self._get_abs_parent_obj(parent_node, parent_obj=parent_obj)
def _get_node_parent(self, node):
if 'parent_id' not in node:
return None
parent_node = self.id_map.get(node['parent_id'])
if not parent_node:
return None
return parent_node
# -----
# Record API
# -----
def save_data(self, data):
if len(self.activated_group) > 0:
# TODO use self.save_to_group_id
target_group_id = list(self.activated_group.keys())[0]
self.add_data(target_group_id, data)
else:
self.add_data('tmp_group', data)
# -----
# Editor
# -----
def update_group(self, _id, data):
ignore_keys = ['id', 'parent_id', 'type', 'children']
node = self.id_map.get(_id)
if not node:
raise IDNotFound(_id)
update_data = {k: data[k] for k in data if k not in ignore_keys}
node.update(update_data)
delete_keys = [k for k in node if k not in data and k not in ignore_keys]
for key in delete_keys:
node.pop(key)
self._save_prop()
def update_data(self, _id, data):
node = self.id_map.get(_id)
if not node:
raise IDNotFound(_id)
node['name'] = data['name']
data_file = self.root_path / _id
if not data_file.exists():
raise DataNotFound(_id)
with codecs.open(data_file, 'w') as f:
data_str = json.dumps(data, ensure_ascii=False)
f.write(data_str)
self._save_prop()
# -----
# Snapshot
# -----
def _write_prop_to_custom_path(self, outfile_path, node):
prop_str = PropWriter().parse(node)
prop_file = outfile_path / PROP_FILE_NAME
with codecs.open(prop_file, "w") as f:
f.write(prop_str)
def _write_file_to_custom_path(self, outfile_path, file_content):
with codecs.open(outfile_path / file_content['id'], "w") as f:
f.write(json.dumps(file_content, ensure_ascii=False))
def import_snapshot(self, parent_id):
snapshot_path = self.snapshot_helper.get_snapshot_path()
self.snapshot_helper.save_compressed_file(snapshot_path)
self.snapshot_helper.decompress_snapshot(f"{snapshot_path}.lb", f"{snapshot_path}-decompressed")
if not Path(f"{snapshot_path}-decompressed/{PROP_FILE_NAME}").exists():
raise LyrebirdPropNotExists
with codecs.open(f"{snapshot_path}-decompressed/{PROP_FILE_NAME}") as f:
_prop = json.load(f)
self.import_(node=_prop)
self.paste(parent_id=parent_id, custom_input_file_path=f"{snapshot_path}-decompressed")
def export_snapshot_from_event(self, event_json):
snapshot_path = self.snapshot_helper.get_snapshot_path()
if not event_json.get("snapshot") or not event_json.get("events"):
raise SnapshotEventNotInCorrectFormat
_prop = event_json.get("snapshot")
self._write_prop_to_custom_path(snapshot_path, _prop)
for mock_data in event_json.get("events"):
self._write_file_to_custom_path(snapshot_path, mock_data)
self.snapshot_helper.compress_snapshot(snapshot_path, snapshot_path)
return f"{snapshot_path}.lb"
def export_snapshot_from_dm(self, node_id):
snapshot_path = self.snapshot_helper.get_snapshot_path()
_prop = self.id_map.get(node_id)
self._write_prop_to_custom_path(snapshot_path, _prop)
data_id_map = {}
self.snapshot_helper.get_data_id_map(_prop, data_id_map)
for mock_data_id in data_id_map:
shutil.copy(self.root_path / mock_data_id, snapshot_path / mock_data_id)
self.snapshot_helper.compress_snapshot(snapshot_path, snapshot_path)
return f"{snapshot_path}.lb"
# -----------------
# Exceptions
# -----------------
class RootNotSet(Exception):
pass
class RootPathNotExists(Exception):
pass
class RootPathIsNotDir(Exception):
pass
class LyrebirdPropNotExists(Exception):
pass
class DataNotFound(Exception):
pass
class DataObjectCannotContainAnyOtherObject(Exception):
pass
class DataObjectSouldBeADict(Exception):
pass
class IDNotFound(Exception):
pass
class NoneClipbord(Exception):
pass
class DumpPropError(Exception):
pass
class NonePropFile(Exception):
pass
class TooMuchPropFile(Exception):
pass
class NodeExist(Exception):
pass
class SnapshotEventNotInCorrectFormat(Exception):
pass
class PropWriter:
def __init__(self):
self.indent = 0
self.parsers = {
'dict': self.dict_parser,
'list': self.list_parser,
'int': self.int_parser,
'str': self.str_parser,
'bool': self.bool_parser,
'NoneType': self.none_parser
}
def parse(self, prop):
prop_type = type(prop)
parser = self.parsers.get(prop_type.__name__)
if not parser:
raise DumpPropError(f'Not support type {prop_type}')
return parser(prop)
def dict_parser(self, val):
dict_str = '{'
children = None
for k, v in val.items():
if k == 'children':
children = v
continue
dict_str += f'"{k}":{self.parse(v)},'
if children:
dict_str += self.children_parser(children)
if dict_str.endswith(','):
dict_str = dict_str[:-1]
dict_str += '}'
return dict_str
def list_parser(self, val):
list_str = '['
for item in val:
item_str = self.parse(item)
list_str += item_str + ','
if list_str.endswith(','):
list_str = list_str[:-1]
list_str += ']'
return list_str
def int_parser(self, val):
return f'{val}'
def str_parser(self, val):
return json.dumps(val, ensure_ascii=False)
def bool_parser(self, val):
return json.dumps(val)
def none_parser(self, val):
return "null"
def children_parser(self, val):
self.indent += 1
children_str = '"children":['
for child in val:
child_str = self.parse(child)
children_str += '\n' + ' '*self.indent + child_str + ','
if children_str.endswith(','):
children_str = children_str[:-1]
children_str += ']'
self.indent -= 1
return children_str
| 32.569333 | 108 | 0.570598 |
ffd799753017a7a5791d769d9be74a366e8b94bb | 7,526 | py | Python | old/Main.py | gumbald/photobooth | 930527e3f9232a7bbf637d445f4e5ce77d5cb0ec | [
"MIT"
] | null | null | null | old/Main.py | gumbald/photobooth | 930527e3f9232a7bbf637d445f4e5ce77d5cb0ec | [
"MIT"
] | null | null | null | old/Main.py | gumbald/photobooth | 930527e3f9232a7bbf637d445f4e5ce77d5cb0ec | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#import RPi.GPIO as GPIO
#from PIL import Image
import subprocess
from time import gmtime, strftime, sleep
import logging
import RPi.GPIO as GPIO
#from ImageManipulation import convert_photo_to_monochrome, crop_image_to_centre, create_image_montage, rotate_image
import InOut
import Defines as r
def flash_others():
GPIO.output(r.PIN_LED_PHOTO_RED, True)
GPIO.output(r.PIN_LED_PHOTO_YEL, True)
sleep(1)
GPIO.output(r.PIN_LED_PHOTO_RED, False)
GPIO.output(r.PIN_LED_PHOTO_YEL, False)
def actuate_camera_shutter():
'''
Actuates the camera and downloads the image onto the Raspberry Pi
:return: the filepath of the photo taken
'''
image_name = "photobooth_" + strftime("%Y-%m-%d_%H%M%S", gmtime()) + ".jpg"
image_filepath = r.FOLDER_PHOTOS_ORIGINAL + image_name
gpout = ""
try:
gpout = subprocess.check_output("gphoto2 --capture-image-and-download --filename " + image_filepath, stderr=subprocess.STDOUT, shell=True)
# CalledProcessError is raised when the camera is turned off (or battery dies?)
if "ERROR" in gpout:
print gpout
logging.error(gpout)
raise IOError("Not able to take photo as the command failed for photo " + image_filepath)
except subprocess.CalledProcessError as e:
logging.error("Unable to take photo, likely due to camera not responding - check batteries")
logging.error(e)
raise
except Exception as e:
logging.error("Unable to take photo as the command failed for photo " + image_filepath)
logging.error(e)
raise
else:
return image_filepath
def convert_image(original_image_filepath):
''' Transforms an image into the format of the image to be included in the montage and saves it to the folder
:param: The filepath of the image to be converted
:return: The filepath of the converted image
'''
original_image = Image.open(original_image_filepath, 'r')
rotated_image = rotate_image(original_image)
cropped_image = crop_image_to_centre(rotated_image)
monochrome_image = convert_photo_to_monochrome(cropped_image)
image_name = "photobooth_" + strftime("%Y-%m-%d_%H%M%S", gmtime()) + ".jpg"
converted_image_filepath = r.FOLDER_PHOTOS_CONVERTED + image_name
monochrome_image.save(converted_image_filepath, 'JPEG')
# Append the filepath to the textfile directory to allow the slideshow to capture it
text_file = open(r.SLIDESHOW_IMAGE_DIRECTORY, "a")
text_file.write(converted_image_filepath + "\n")
text_file.close()
return converted_image_filepath
def take_photo(gpio_pin_number):
'''
:param gpio_pin_number: The Pi's GPIO pin number to signal to the user that the photo is being taken
:return: The filepath of the new image
'''
photo_location = ""
InOut.led_take_photo(gpio_pin_number)
try:
photo_location = actuate_camera_shutter()
except subprocess.CalledProcessError:
InOut.script_event_indicator()
print "Unable to take photo as camera is not turned on or battery is dead"
logging.error("Unable to take photo as camera is not turned on or battery is dead")
raise Exception("Camera is not responding, battery is dead or camera is not turned on")
else:
return photo_location
def photobooth_main():
'''
Takes 4 photos in a series, tweets each one and then saves the individual files plus a vertical montage onto the pi
:return: True/False on whether the process was successful
'''
process_success = False
photo_file_locations_original = []
photo_file_locations_converted = []
# Take photo 1
photo_location = take_photo(r.PIN_LED_PHOTO_YEL)
photo_file_locations_original.append(photo_location)
print photo_file_locations_original
try:
# Turn on the "waiting" LED
InOut.turn_off_all_leds([r.PIN_LED_PHOTO_RED, r.PIN_LED_PHOTO_YEL, r.PIN_LED_PHOTO_GRE])
GPIO.output(r.PIN_LED_PHOTO_RED, True)
# Convert each of the captured photos into a standardised format
#for photo in photo_file_locations_original:
# mono_photo_filepath = convert_image(photo)
# photo_file_locations_converted.append(mono_photo_filepath)
sleep(2)
# Create a montage of the captured photos and save them down
#montage_image_name = "photobooth_" + strftime("%Y-%m-%d_%H%M%S", gmtime()) + ".jpg"
#montage_filepath = r.FOLDER_PHOTOS_MONTAGE + montage_image_name
#montage = create_image_montage(photo_file_locations_converted)
#montage.save(montage_filepath, 'JPEG')
#Turn off the "waiting" light
GPIO.output(r.PIN_LED_PHOTO_RED,False)
process_success = True
except Exception, e:
raise Exception(e)
finally:
# Turn off all photo LEDs and reset the 'wait' LED
InOut.turn_off_all_leds([r.PIN_LED_PHOTO_RED, r.PIN_LED_PHOTO_YEL, r.PIN_LED_PHOTO_GRE])
# Upload the montage into a public space where people can view the photos
return process_success
# MAIN ENTRY POINT OF PROGRAM
# Switch logic taken from http://razzpisampler.oreilly.com/ch07.html
# Set up logging for the process
logging.basicConfig(filename="Photobooth_Log.txt",
level=logging.DEBUG,
format = '%(levelname)s: %(asctime)s %(message)s',
datefmt='%m/%d/%Y %I:%M:%S')
logging.info("Photobooth main.py started")
# Set up the various GPIO pins on the Raspberry Pi
# Broad numbering convention for naming the pins
GPIO.setmode(GPIO.BCM)
# Output LEDs used for the photos taken
GPIO.setup(r.PIN_LED_PHOTO_RED, GPIO.OUT)
GPIO.setup(r.PIN_LED_PHOTO_YEL, GPIO.OUT)
GPIO.setup(r.PIN_LED_PHOTO_GRE, GPIO.OUT)
# LEDs used to indicate status to the user
#GPIO.setup(r.PIN_LED_PHOTO_READY, GPIO.OUT)
#GPIO.setup(r.PIN_LED_PHOTO_WAIT, GPIO.OUT)
# Indicate to the user that the script has started sucessfully by flashing all LEDs
InOut.script_event_indicator()
# Setup the input pin
# Sets the default of the pin as 'high'
# Pressing the switch drops the pin to 0v
GPIO.setup(r.PIN_SWITCH_IN, GPIO.IN, pull_up_down=GPIO.PUD_UP)
try: # Wrap in try loop in order to include KeyboardInterupt exception catch
while True: # Constantly cycles throuch script, waiting for the trigger event
# Activate the "ready" LED
GPIO.output(r.PIN_LED_PHOTO_GRE,True)
input_state = GPIO.input(r.PIN_SWITCH_IN)
if input_state == False:
# Deactivate the "ready" LED
GPIO.output(r.PIN_LED_PHOTO_GRE, False)
try:
photobooth_main()
#flash_others()
except Exception, e:
logging.error("photobooth_main.py failed to run correctly")
logging.error(e)
# TODO: Include status information at the point the script failed
InOut.script_event_indicator()
# Send error report somehow
else:
logging.debug("Photo montage created successfully")
finally:
sleep(0.2) #Used to prevent 'switch bounce'
except KeyboardInterrupt:
print "User ended process with KeyboardInterupt"
InOut.turn_off_all_leds([r.PIN_LED_PHOTO_RED, r.PIN_LED_PHOTO_YEL, r.PIN_LED_PHOTO_GRE])
logging.debug("Process interupted by keyboard interupt")
GPIO.cleanup()
| 33.300885 | 146 | 0.694659 |
9a0e04291dde8a4e9501704f77104a659166687b | 2,232 | py | Python | vyper/ast/utils.py | upgradvisor/vyper | 642884ea938a25793c1b2fac866e8458e63a7b49 | [
"Apache-2.0"
] | 1,471 | 2017-12-25T05:47:57.000Z | 2019-11-19T07:47:53.000Z | vyper/ast/utils.py | upgradvisor/vyper | 642884ea938a25793c1b2fac866e8458e63a7b49 | [
"Apache-2.0"
] | 915 | 2019-11-21T05:48:16.000Z | 2022-03-31T23:51:03.000Z | vyper/ast/utils.py | upgradvisor/vyper | 642884ea938a25793c1b2fac866e8458e63a7b49 | [
"Apache-2.0"
] | 321 | 2017-12-25T16:37:21.000Z | 2019-11-15T17:44:06.000Z | import ast as python_ast
from typing import Dict, List, Optional, Union
from vyper.ast import nodes as vy_ast
from vyper.ast.annotation import annotate_python_ast
from vyper.ast.pre_parser import pre_parse
from vyper.exceptions import CompilerPanic, ParserException, SyntaxException
def parse_to_ast(
source_code: str, source_id: int = 0, contract_name: Optional[str] = None
) -> vy_ast.Module:
"""
Parses a Vyper source string and generates basic Vyper AST nodes.
Parameters
----------
source_code : str
The Vyper source code to parse.
source_id : int, optional
Source id to use in the `src` member of each node.
Returns
-------
list
Untyped, unoptimized Vyper AST nodes.
"""
if "\x00" in source_code:
raise ParserException("No null bytes (\\x00) allowed in the source code.")
class_types, reformatted_code = pre_parse(source_code)
try:
py_ast = python_ast.parse(reformatted_code)
except SyntaxError as e:
# TODO: Ensure 1-to-1 match of source_code:reformatted_code SyntaxErrors
raise SyntaxException(str(e), source_code, e.lineno, e.offset) from e
annotate_python_ast(py_ast, source_code, class_types, source_id, contract_name)
# Convert to Vyper AST.
return vy_ast.get_node(py_ast) # type: ignore
def ast_to_dict(ast_struct: Union[vy_ast.VyperNode, List]) -> Union[Dict, List]:
"""
Converts a Vyper AST node, or list of nodes, into a dictionary suitable for
output to the user.
"""
if isinstance(ast_struct, vy_ast.VyperNode):
return ast_struct.to_dict()
elif isinstance(ast_struct, list):
return [i.to_dict() for i in ast_struct]
else:
raise CompilerPanic(f'Unknown Vyper AST node provided: "{type(ast_struct)}".')
def dict_to_ast(ast_struct: Union[Dict, List]) -> Union[vy_ast.VyperNode, List]:
"""
Converts an AST dict, or list of dicts, into Vyper AST node objects.
"""
if isinstance(ast_struct, dict):
return vy_ast.get_node(ast_struct)
if isinstance(ast_struct, list):
return [vy_ast.get_node(i) for i in ast_struct]
raise CompilerPanic(f'Unknown ast_struct provided: "{type(ast_struct)}".')
| 34.875 | 86 | 0.694892 |
3ce4ae8f34146986c57b87f3419f5cc5089fb81e | 1,788 | py | Python | synth/common/utils.py | DevicePilot/synth | c2b2345322e86c8dfac101ff375421e7dcd5f1b2 | [
"MIT"
] | 2 | 2021-03-15T18:02:46.000Z | 2021-12-09T20:54:25.000Z | synth/common/utils.py | DevicePilot/synth | c2b2345322e86c8dfac101ff375421e7dcd5f1b2 | [
"MIT"
] | 2 | 2017-04-18T08:41:49.000Z | 2017-10-04T15:13:06.000Z | synth/common/utils.py | DevicePilot/synth | c2b2345322e86c8dfac101ff375421e7dcd5f1b2 | [
"MIT"
] | 3 | 2018-02-06T17:07:06.000Z | 2020-01-27T17:05:11.000Z | #!/usr/bin/env python
#
# UTILS
#
# Copyright (c) 2017 DevicePilot Ltd.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import hashlib
def hashIt(n, limit):
"""Given a number or a string, return a non-obviously-correlated number [0..limit)."""
if type(n)==int:
x = n * 19079 # Prime
x = (int(str(x)[::-1])) # Reverse its string representation
n = n ^ x
n = (n * 7919) # Prime
n = n % limit
return n
else:
return abs(hash(n)) % limit
def consistent_hash(s):
"""Return a float 0..1 based on a string, consistently (Python's hash() is intentionally not consistent between runs!)"""
if s is None:
s = ""
return int(hashlib.sha256(s.encode('utf-8')).hexdigest(), 16) % 10**8 / 10**8.0
| 39.733333 | 125 | 0.696868 |
c3c3cfdf9a94ed3aac92f6ea1d8329e0a0079e74 | 487 | py | Python | tests/test_high_vre_capacity_expansion.py | sriharisundar/high_vre_capacity_expansion | 8dd9beaee72a82c6057a6191694c70837f66e905 | [
"MIT"
] | null | null | null | tests/test_high_vre_capacity_expansion.py | sriharisundar/high_vre_capacity_expansion | 8dd9beaee72a82c6057a6191694c70837f66e905 | [
"MIT"
] | null | null | null | tests/test_high_vre_capacity_expansion.py | sriharisundar/high_vre_capacity_expansion | 8dd9beaee72a82c6057a6191694c70837f66e905 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
"""Tests for `high_vre_capacity_expansion` package."""
import unittest
from high_vre_capacity_expansion import high_vre_capacity_expansion
class TestHigh_vre_capacity_expansion(unittest.TestCase):
"""Tests for `high_vre_capacity_expansion` package."""
def setUp(self):
"""Set up test fixtures, if any."""
def tearDown(self):
"""Tear down test fixtures, if any."""
def test_000_something(self):
"""Test something."""
| 22.136364 | 67 | 0.700205 |
34cc25f88fc084beae51b60bdb968df9497521c6 | 14,918 | py | Python | gscreen.py | pjamessteven/timetables | 9fe4ef53c6aef404e25abba766bec1380efe2818 | [
"MIT"
] | null | null | null | gscreen.py | pjamessteven/timetables | 9fe4ef53c6aef404e25abba766bec1380efe2818 | [
"MIT"
] | null | null | null | gscreen.py | pjamessteven/timetables | 9fe4ef53c6aef404e25abba766bec1380efe2818 | [
"MIT"
] | null | null | null | import os
import numpy as np
from PIL import Image, ImageQt, ImageDraw, ImageFont
import time
import datetime
import logging
logger = logging.Logger(name="main")
logger.setLevel(logging.DEBUG)
import achievements
class Score:
""" The game is scored using passenger kilometres. 1 passenger kilometres means 1 passenger was transported 1 km.
2 passenger km mean either 1 passenger was transported 2 km, or 2 passengers transported 1 km, etc.
The goal of the game get 100,000,000 passenger kilometres before 2050, with the allocated budget."""
def __init__(self):
self.current_pkm = 0.0
self.highscores = [] # this should be loaded and saved with player's name etc. maybe online scores?
self.year = 2020
self.previous_pkms = [0.0]
def increase(self, amount):
self.current_pkm += amount
def get_score(self):
return np.round(self.current_pkm, 0)
def get_lastyear_score(self):
index = self.year - 2020
return np.round(self.previous_pkms[index], 0)
def update_time(self, time):
if time.year != self.year:
assert time.year == self.year + 1 # the year must not increase by more than 1 at a time
self.year = time.year
self.previous_pkms.append(self.current_pkm)
self.current_pkm = 0.0
class Wallet:
def __init__(self, starting_amount=5e7):
self.file = None
self.money = starting_amount
self.overdraft = 0
self.overdraft_interest_rate = 0.1
self.account_records = []
def get_balance(self):
return np.round(self.money, 2)
def addsubtract(self, amount, datestr, details="No transaction details recorded"):
if len(self.account_records) > 20: # stop using too much memory
self.save_records()
if amount > 0:
return self.deposit(amount, details, datestr)
elif amount < 0:
return self.withdraw(amount, details, datestr) # note the negative withdrawal error will be corrected in the withdraw method
else:
return True # no money to add so no transaction
def withdraw(self, amount, details, date):
amount = np.abs(amount) # if you want to withdraw a negative amount, use a deposit!
if self.money + self.overdraft >= amount:
self.money -= amount
self.account_records.append([date, "withdrawal", amount, details])
return True # withdrawal successful
else:
return False # withdrawal failed
def deposit(self, amount, details, date):
amount = np.abs(amount) # if you want to deposit a negative amount, use a withdrawal!
self.money += amount
self.account_records.append([date, "deposit", amount, details])
return True
def get_records(self):
file_records = []
if os.path.exists(self.file):
with open(self.file, "r") as f:
records = f.readlines()
for record in records:
file_records.append(record.strip(" \n").split(","))
return file_records + self.account_records
def set_save_dir(self, save_dir):
self.file = os.path.join(save_dir, "monies.csv")
def save_records(self):
txt = ""
for entry in self.account_records:
txt += f"{entry[0]}, {entry[1]}, {entry[2]}, {entry[3]}\n"
del self.account_records
self.account_records = []
''' This needs to be changed to hand off saving to the save manager.
if self.file is not None:
with open(self.file, "a") as f:
f.write(txt)
'''
class ServiceColours:
def __init__(self):
self.current_number = 0
self.colours = [(36,160,167),
(8,194,157),
(78,208,122),
(238,222,120),
(238, 120, 120),
(13,73,70),
(34,108,81),
(255,145,77),
(170,50,50),
(109,109,109),
(150, 45, 62),
(151, 156, 156),
(52, 136, 153),
(136, 247, 226),
(68, 212, 146),
(245, 235, 103),
(255, 161, 92),
(250, 35, 62)
]
self.routes = []
def get_colour_number(self, route):
self.routes.append(route)
a = self.current_number
self.current_number += 1
return a
def remove(self, route):
for i in range(len(self.routes)):
if self.routes[i] is route:
self.routes[i] = "DEFUNCT"
print(self.routes)
def get_sort_key(t):
return t.population
class Map:
def __init__(self, width, height, towns, wallet, score, colours):
self.map_image = None
self.map_image_needs_update = True
self.img = Image.new("RGBA", (width, height), color=(255, 255, 255, 255))
self.game_time = datetime.datetime.strptime("01012020 0000", "%d%m%Y %H%M")
self.tick_rate = 0.99
self.wallet = wallet # we need a pointer to the wallet to know what acc balance to display
self.score = score
self.colours = colours
self.time_increment = datetime.timedelta(days=1)
self.connection_ids = []
self.connection_colours = []
self.town_draw_name = []
self.town_draw_name_complete = False
self.achievement_display_text = ""
self.achievement_display_counter = 0
self.legend_key = []
self.legend_colour = []
'''These represent the bounding latitude and longutiude of the area of the world we want to
draw. Needs to be altered for each country
todo: externalise bounding boxes to a file which is loaded so versions can be made for other countries'''
self.bounding_box_aotearoa = [166, 178.6, -47, -34]
self.bounding_box_te_waipounamu = [166, 175, -47, -40.4]
self.bounding_box_te_ika = [173, 178.6, -41.5, -34]
self.percent_connected = 0.0
self.current_bounding_box = self.bounding_box_aotearoa
self.towns = towns
self.report_mode = False
self.redraw()
self.about_to_skip_year = False
self.previous_time_increment = None
self.win_conditions = achievements.Conditions()
self.towns.sort(key=get_sort_key)
def skip_year(self):
new_year = self.game_time.year + 1
self.previous_time_increment = self.time_increment
self.time_increment = datetime.datetime.strptime(f"01-01-{new_year} 00:00", "%d-%m-%Y %H:%M") - self.game_time
self.about_to_skip_year = True
return new_year
def update_connection_ids(self):
""" This creates connection ids (of form `Dunedin-Mosgiel` etc) used to identify which colours to
use when drawing links between towns."""
self.connection_ids = []
self.connection_colours = []
for (col, route) in zip(self.colours.colours, self.colours.routes):
if route != "DEFUNCT":
startNode = route.stations[0]
endNode = route.stations[len(route.stations) - 1]
path = startNode.getNodesOnPath(endNode)
for i in range(1, len(path)):
self.connection_ids.append(path[i - 1].get_name() + '-' + path[i].get_name())
self.connection_colours.append(col)
print(self.connection_colours)
self.map_image_needs_update = True
self.redraw()
def load_wallet_score(self, wallet, score):
"""This is for reloading the wallet and score after unpickling them"""
self.wallet = wallet
self.score = score
def change_bounding_box(self, id):
if id == "north":
self.current_bounding_box = self.bounding_box_te_ika
elif id == "south":
self.current_bounding_box = self.bounding_box_te_waipounamu
elif id == "nz":
self.current_bounding_box = self.bounding_box_aotearoa
self.map_image_needs_update = True
def convert_latlgn_to_xy(self, latlgn):
deltaX = self.current_bounding_box[1] - self.current_bounding_box[0]
deltaY = self.current_bounding_box[3] - self.current_bounding_box[2]
x = self.img.width * (latlgn[1] - self.current_bounding_box[0]) / deltaX
y = self.img.height * (1 - (latlgn[0] - self.current_bounding_box[2]) / deltaY)
return x, y
def update_percent_connected(self, fraction):
self.percent_connected = fraction * 100
def get_increment(self):
return self.time_increment
def get_time(self):
return self.game_time
def change_speed(self, state):
if state:
self.time_increment = datetime.timedelta(days=(18250 / (3600 * self.tick_rate)))
else:
self.time_increment = datetime.timedelta(days=1)
def get_connection_colour(self, connection_id):
connection_reversed = connection_id.split("-")[1] + "-" + connection_id.split("-")[0]
colours_for_this_connection = []
for (conn, col) in zip(self.connection_ids, self.connection_colours):
if connection_id == conn:
colours_for_this_connection.append(col)
if connection_reversed == conn:
colours_for_this_connection.append(col)
if len(colours_for_this_connection) == 0:
return None
else:
return colours_for_this_connection
def add_legend(self, key, colour):
if not key in self.legend_key:
self.legend_key.append(key)
self.legend_colour.append(colour)
def redraw(self):
t = self.game_time.strftime("%d/%m/%Y, %H:%M")
money = "${:,}".format(self.wallet.get_balance())
score = "{:,} pkm this year, {:,} pkm last year".format(self.score.get_score(), self.score.get_lastyear_score())
percent_connected = "{:.1f} % towns serviced".format(self.percent_connected)
font = ImageFont.truetype(os.path.join('assets', 'fonts', 'Raleway-Medium.ttf'), 12)
font2 = ImageFont.truetype(os.path.join('assets', 'fonts', 'Arvo-Bold.ttf'), 14)
if self.map_image_needs_update:
self.map_image_needs_update = False
self.map_image = Image.new("RGBA", (self.img.width, self.img.height), color=(255, 255, 255, 0))
draw_map = ImageDraw.Draw(self.map_image)
for town in self.towns:
x, y = self.convert_latlgn_to_xy(town.get_latlgn())
draw_map.ellipse((x - 2, y - 2, x + 2, y + 2), fill=(200, 200, 200), outline=(0, 0, 0))
'''If there are nearby towns, we only draw the text for the most populus town.'''
if not self.town_draw_name_complete:
should_add = True
for town2 in self.town_draw_name:
x2, y2 = self.convert_latlgn_to_xy(town2.get_latlgn())
w, h = font.getsize(town.get_name())
if x-w-10 < x2 < x+w+10 and y-h/2-10 < y2 < y+h/2+10:
should_add = False
if should_add:
self.town_draw_name.append(town)
for conn in town.connections:
connection_id = town.get_name() + '-' + conn[0].get_name()
x2, y2 = self.convert_latlgn_to_xy(conn[0].get_latlgn())
line_colour = self.get_connection_colour(connection_id)
if line_colour is None:
draw_map.line((x, y, x2, y2), fill=(200,200,200), width=1)
self.add_legend("Unused track", (200, 200, 200))
else:
"""So we should get the normal vector and draw a colourline at intevals spaced along the normal vecto"""
connection_vector = np.array([x-x2, y-y2])
normal_vector = np.array([1, -connection_vector[0]/connection_vector[1]])
normal_vector /= np.linalg.norm(normal_vector) # normalise the vector
normal_vector *= 3 # we want to 3 pixels between each coloured connection line.
for i, colour in enumerate(line_colour):
draw_map.line((x + i*normal_vector[0], y + i*normal_vector[1], x2 + i*normal_vector[0],
y2 + i*normal_vector[1]), fill=colour, width=2)
for town in self.town_draw_name:
x, y = self.convert_latlgn_to_xy(town.get_latlgn())
w, h = font.getsize(town.get_name())
draw_map.text((x, y-h/2), town.get_name(), font=font, fill=(0, 0, 0, 255))
for i, (key, colour) in enumerate(zip(self.legend_key, self.legend_colour)):
draw_map.text((100,300+(i*30)), key, font=font, fill=(0, 0, 0, 255))
draw_map.rectangle((90,300+(i*30) - 2, 98, 300+(i*30) + 2), colour)
self.town_draw_name_complete = True # it will only generate the list (expensive) on first run
self.img = Image.new("RGBA", (self.img.width, self.img.height), color=(255, 255, 255, 0))
Image.Image.paste(self.img, self.map_image)
draw = ImageDraw.Draw(self.img)
draw.text((100,50), t, font=font2, fill=(0, 0, 0, 255))
draw.text((300,50), money, font=font2, fill=(0, 0, 0, 255))
draw.text((100, 100), score, font=font2, fill=(0,0,0,255))
draw.text((100, 150), percent_connected, font=font2, fill=(0, 0, 0, 255))
if self.achievement_display_counter > 0:
draw.text((int(self.img.width/2)-100, self.img.height - 100), self.achievement_display_text, font=font2, fill=(0, 0, 0, 255))
def display_new_achievement(self, achieves):
if len(achieves) > 0:
self.achievement_display_text = ""
for ach in achieves:
self.achievement_display_text += "Achievement Completed: " + ach + "\n"
self.redraw()
self.achievement_display_counter = 5
def update_time(self, tick_rate):
self.game_time += self.time_increment
if self.about_to_skip_year:
self.about_to_skip_year = False
self.time_increment = self.previous_time_increment
self.redraw()
self.score.update_time(self.game_time)
if self.achievement_display_counter > 0:
self.achievement_display_counter -= 1
return self.win_conditions.do_checks(self.game_time, self.percent_connected, self.score)
def get_image_qt(self):
return ImageQt.toqimage(self.img)
| 44.267062 | 137 | 0.588149 |
6dc6a5a0af6cf1f8d6b412a4fdc8c696842fead4 | 18,886 | py | Python | tools/bdconfig/lib/google_hadoop/bdconfig_lib/config_commands_test.py | zulily/bigdata-interop | 69906bbe2e5fcde9583f63a8f2c93bde3edfd512 | [
"Apache-2.0"
] | null | null | null | tools/bdconfig/lib/google_hadoop/bdconfig_lib/config_commands_test.py | zulily/bigdata-interop | 69906bbe2e5fcde9583f63a8f2c93bde3edfd512 | [
"Apache-2.0"
] | null | null | null | tools/bdconfig/lib/google_hadoop/bdconfig_lib/config_commands_test.py | zulily/bigdata-interop | 69906bbe2e5fcde9583f63a8f2c93bde3edfd512 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
#
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests for config_commands.py."""
import path_initializer
path_initializer.InitializeSysPath()
import copy
import os
import gflags as flags
from google.apputils import basetest
import unittest
from bdconfig_lib import config_commands
from bdconfig_lib import xml_configuration
FLAGS = flags.FLAGS
class ConfigCommandsTestBase(unittest.TestCase):
def setUp(self):
# We will pretend FLAGS.test_tmpdir is the hadoop installation dir.
self._conf_dir = os.path.join(FLAGS.test_tmpdir, 'conf')
if not os.path.exists(self._conf_dir):
os.makedirs(self._conf_dir)
self._core_site_filename = os.path.join(self._conf_dir, 'core-site.xml')
self._mapred_site_filename = os.path.join(self._conf_dir, 'mapred-site.xml')
self._hdfs_site_filename = os.path.join(self._conf_dir, 'hdfs-site.xml')
self._hadoop_env_filename = os.path.join(self._conf_dir, 'hadoop-env.sh')
self._fake_jarfile = os.path.join(self._conf_dir, 'fake-jarfile.jar')
self._CreateXmlFile(self._core_site_filename)
self._CreateXmlFile(self._mapred_site_filename)
self._CreateXmlFile(self._hdfs_site_filename)
with open(self._hadoop_env_filename, 'w') as f:
f.write(
'#export HADOOP_CLASSPATH=$HADOOP_CLASSPATH:' + self._fake_jarfile)
with open(self._fake_jarfile, 'w') as f:
f.write('Fake jarfile.')
self._flag_values_copy = copy.deepcopy(FLAGS)
# Derived classes define self._test_cmd for their particular appcommand.
self._test_cmd = None
def testValidationFailsAfterRegistrationBeforeSettingValues(self):
"""Check flag validation fails after appcommand has registered its flags."""
if self._test_cmd:
# Empty flags right after setup/registration should fail validator.
with self.assertRaises(flags.IllegalFlagValue):
self._flag_values_copy._AssertAllValidators()
def testDryRun(self):
"""Test --dry_run prevents modifications from being committed."""
if not self._test_cmd:
return
self._SetDefaultValidFlags()
self._flag_values_copy.dry_run = True
self._test_cmd.Run(None)
# Nothing added.
conf = xml_configuration.Configuration.FromFile(self._core_site_filename)
self.assertEqual(0, conf.GetNumProperties())
conf = xml_configuration.Configuration.FromFile(self._mapred_site_filename)
self.assertEqual(0, conf.GetNumProperties())
with open(self._hadoop_env_filename, 'r') as f:
self.assertEqual(1, len(f.readlines()))
def tearDown(self):
os.remove(self._core_site_filename)
os.remove(self._mapred_site_filename)
os.remove(self._hdfs_site_filename)
os.remove(self._hadoop_env_filename)
def _SetDefaultValidFlags(self):
"""Concrete classes should override to provide set of valid flags."""
pass
def _CreateXmlFile(self, filename):
"""Helper for creating empty XML file.
Args:
filename: The fully-qualified file name to create.
"""
with open(filename, 'w') as f:
f.write('<?xml version="1.0"?>')
f.write('<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>')
f.write('<configuration/>')
def _ValidateFlagFailures(self, failure_modes):
"""Helper for iterating over a list of expected illegal flag values.
Args:
failure_modes: List of flag/value pairs which should cause an error.
"""
for failure_arg in failure_modes:
with self.assertRaises(flags.IllegalFlagValue):
print 'Validating expected failure setting %s = %s' % (
failure_arg[0], failure_arg[1])
self._flag_values_copy.__setattr__(failure_arg[0], failure_arg[1])
class ConfigureHadoopTest(ConfigCommandsTestBase):
def setUp(self):
super(ConfigureHadoopTest, self).setUp()
# Before instantiating the command instance, there are no required flags.
self._flag_values_copy._AssertAllValidators()
self._test_cmd = config_commands.ConfigureHadoop(
'configure_hadoop', self._flag_values_copy)
# Point the command instance at our own flags reference for runtime values.
self._test_cmd.SetFlagsInstance(self._flag_values_copy)
def testFlagValidation(self):
"""Test basic flag failures for configure_hadoop."""
# Validators get invoked on __setattr__ (overload for '=').
failure_modes = [
('hadoop_conf_dir', '/unreadable/path'),
('hadoop_conf_dir', None),
('java_home', '/unreadable/path'),
('java_home', None),
('hadoop_tmp_dir', '/unreadable/path'),
]
self._ValidateFlagFailures(failure_modes)
def testNormalOperation(self):
"""Test configure_hadoop values wire through to correct xml keys."""
self._SetDefaultValidFlags()
self._flag_values_copy.dry_run = False
self._test_cmd.Run(None)
conf = xml_configuration.Configuration.FromFile(self._mapred_site_filename)
self.assertEqual(6, conf.GetNumProperties())
self.assertEquals('foo-host:9101', conf.GetPropertyValue(
'mapred.job.tracker'))
self.assertEquals('foo opts', conf.GetPropertyValue(
'mapred.child.java.opts'))
self.assertEquals('1', conf.GetPropertyValue(
'mapred.map.tasks'))
self.assertEquals('2', conf.GetPropertyValue(
'mapred.reduce.tasks'))
self.assertEquals('3', conf.GetPropertyValue(
'mapred.tasktracker.map.tasks.maximum'))
self.assertEquals('4', conf.GetPropertyValue(
'mapred.tasktracker.reduce.tasks.maximum'))
conf = xml_configuration.Configuration.FromFile(self._core_site_filename)
self.assertEqual(1, conf.GetNumProperties())
self.assertEquals(FLAGS.test_tmpdir, conf.GetPropertyValue(
'hadoop.tmp.dir'))
with open(self._hadoop_env_filename, 'r') as f:
self.assertEqual(2, len(f.readlines()))
def _SetDefaultValidFlags(self):
# Good set of flags.
self._flag_values_copy.hadoop_conf_dir = self._conf_dir
self._flag_values_copy.java_home = FLAGS.test_tmpdir
self._flag_values_copy.job_tracker_uri = 'foo-host:9101'
self._flag_values_copy.java_opts = 'foo opts'
self._flag_values_copy.default_num_maps = 1
self._flag_values_copy.default_num_reduces = 2
self._flag_values_copy.map_slots = 3
self._flag_values_copy.reduce_slots = 4
self._flag_values_copy.hadoop_tmp_dir = FLAGS.test_tmpdir
self._flag_values_copy._AssertAllValidators()
class ConfigureGhfsTest(ConfigCommandsTestBase):
def setUp(self):
super(ConfigureGhfsTest, self).setUp()
self._flag_values_copy._AssertAllValidators()
self._test_cmd = config_commands.ConfigureGhfs(
'configure_ghfs', self._flag_values_copy)
self.assertRaises(
flags.IllegalFlagValue, self._flag_values_copy._AssertAllValidators)
self._test_cmd.SetFlagsInstance(self._flag_values_copy)
def testFlagValidation(self):
"""Test basic flag failures for configure_ghfs."""
# Validators.
failure_modes = [
('hadoop_conf_dir', '/unreadable/path'),
('hadoop_conf_dir', None),
('ghfs_jar_path', '/unreadable/path'),
('ghfs_jar_path', self._conf_dir),
('ghfs_jar_path', None),
('system_bucket', None),
('enable_service_account_auth', None),
('project_id', None),
]
self._ValidateFlagFailures(failure_modes)
def testNormalOperation(self):
"""Test configure_ghfs values wire through to correct xml keys."""
self._SetDefaultValidFlags()
self._flag_values_copy.dry_run = False
self._test_cmd.Run(None)
conf = xml_configuration.Configuration.FromFile(self._core_site_filename)
self.assertEqual(6, conf.GetNumProperties())
self.assertEquals('foo-bucket', conf.GetPropertyValue(
'fs.gs.system.bucket'))
self.assertIsNotNone(conf.GetPropertyValue(
'fs.gs.impl'))
self.assertEquals('true', conf.GetPropertyValue(
'fs.gs.auth.service.account.enable'))
self.assertEquals('google.com:myproject', conf.GetPropertyValue(
'fs.gs.project.id'))
self.assertEquals('1', conf.GetPropertyValue(
'fs.gs.io.buffersize'))
self.assertIsNotNone(conf.GetPropertyValue(
'fs.gs.working.dir'))
with open(self._hadoop_env_filename, 'r') as f:
self.assertEqual(2, len(f.readlines()))
def testReconfiguringDoesntReappendLineToEnvFile(self):
"""Calling configure_ghfs twice shouldn't re-add lines to hadoop-env.sh."""
self._SetDefaultValidFlags()
self._flag_values_copy.dry_run = False
self._test_cmd.Run(None)
self._test_cmd.Run(None)
with open(self._hadoop_env_filename, 'r') as f:
self.assertEqual(2, len(f.readlines()))
def testServiceAccountRequiresClientIdAndSecret(self):
"""Setting enable_service_account_auth adds extra validators."""
self._SetDefaultValidFlags()
self._flag_values_copy.dry_run = False
# If we set enable_service_account_auth to False, we now require client_id
# and client_secret.
self._flag_values_copy.enable_service_account_auth = False
self.assertRaises(flags.IllegalFlagValue, self._test_cmd.Run, None)
self.assertRaises(
flags.IllegalFlagValue, self._flag_values_copy.__setattr__,
'client_id', None)
self._flag_values_copy.client_id = 'my-client-id'
self.assertRaises(flags.IllegalFlagValue, self._test_cmd.Run, None)
self.assertRaises(
flags.IllegalFlagValue, self._flag_values_copy.__setattr__,
'client_secret', None)
self._flag_values_copy.client_secret = 'my-client-secret'
self._test_cmd.Run(None)
conf = xml_configuration.Configuration.FromFile(self._core_site_filename)
self.assertEqual(8, conf.GetNumProperties())
self.assertEquals('false', conf.GetPropertyValue(
'fs.gs.auth.service.account.enable'))
self.assertEquals('my-client-id', conf.GetPropertyValue(
'fs.gs.auth.client.id'))
self.assertEquals('my-client-secret', conf.GetPropertyValue(
'fs.gs.auth.client.secret'))
def _SetDefaultValidFlags(self):
self._flag_values_copy.hadoop_conf_dir = self._conf_dir
self._flag_values_copy.ghfs_jar_path = self._fake_jarfile
self._flag_values_copy.system_bucket = 'foo-bucket'
self._flag_values_copy.ghfs_buffer_size = 1
self._flag_values_copy.enable_service_account_auth = True
self._flag_values_copy.project_id = 'google.com:myproject'
# As long as enable_service_account_auth is True, we don't need client_id
# and client_secret.
self._flag_values_copy.client_id = None
self._flag_values_copy.client_secret = None
self._flag_values_copy._AssertAllValidators()
class ConfigureHdfsTest(ConfigCommandsTestBase):
def setUp(self):
super(ConfigureHdfsTest, self).setUp()
self._flag_values_copy._AssertAllValidators()
self._test_cmd = config_commands.ConfigureHdfs(
'configure_hdfs', self._flag_values_copy)
self.assertRaises(
flags.IllegalFlagValue, self._flag_values_copy._AssertAllValidators)
self._test_cmd.SetFlagsInstance(self._flag_values_copy)
def testFlagValidation(self):
"""Test basic flag failures for configure_hdfs."""
# Validators.
failure_modes = [
('hadoop_conf_dir', '/unreadable/path'),
('hadoop_conf_dir', None),
('hdfs_data_dirs', None),
('hdfs_data_dirs', FLAGS.test_tmpdir + ' /unreadable.path'),
('hdfs_name_dir', None),
('hdfs_name_dir', 'conf'),
('namenode_uri', None),
('namenode_uri', 'badscheme://localhost:8020/'),
# Missing trailing slash.
('namenode_uri', 'hdfs://localhost:8020'),
]
self._ValidateFlagFailures(failure_modes)
def testNormalOperation(self):
"""Test configure_hdfs values wire through to correct xml keys."""
self._SetDefaultValidFlags()
self._flag_values_copy.dry_run = False
self._test_cmd.Run(None)
conf = xml_configuration.Configuration.FromFile(self._hdfs_site_filename)
self.assertEqual(3, conf.GetNumProperties())
self.assertEqual(
','.join(self._flag_values_copy.hdfs_data_dirs),
conf.GetPropertyValue('dfs.data.dir'))
self.assertEqual(
self._flag_values_copy.hdfs_name_dir,
conf.GetPropertyValue('dfs.name.dir'))
self.assertEqual(
self._flag_values_copy.namenode_uri,
conf.GetPropertyValue('dfs.namenode.rpc-address'))
def _SetDefaultValidFlags(self):
self._flag_values_copy.hadoop_conf_dir = self._conf_dir
self._flag_values_copy.hdfs_data_dirs = [FLAGS.test_tmpdir, self._conf_dir]
self._flag_values_copy.hdfs_name_dir = '/tmp'
self._flag_values_copy.namenode_uri = 'hdfs://localhost:8020/'
self._flag_values_copy._AssertAllValidators()
class SetDefaultFileSystemTest(ConfigCommandsTestBase):
def setUp(self):
super(SetDefaultFileSystemTest, self).setUp()
self._flag_values_copy._AssertAllValidators()
self._test_cmd = config_commands.SetDefaultFileSystem(
'set_default_fs', self._flag_values_copy)
self.assertRaises(
flags.IllegalFlagValue, self._flag_values_copy._AssertAllValidators)
self._test_cmd.SetFlagsInstance(self._flag_values_copy)
def testFlagValidation(self):
"""Test basic flag failures for set_default_fs."""
# Validators.
failure_modes = [
('hadoop_conf_dir', '/unreadable/path'),
('hadoop_conf_dir', None),
('default_fs', None),
]
self._ValidateFlagFailures(failure_modes)
def testNormalOperationWithSystemBucketFallback(self):
"""Test set_default_fs values wire through to correct xml keys."""
self._SetDefaultValidFlags()
self._flag_values_copy.dry_run = False
self._flag_values_copy.default_bucket = None
system_bucket = 'system-bucket'
conf = xml_configuration.Configuration.FromFile(self._core_site_filename)
conf.Update({'fs.gs.system.bucket': system_bucket}, {})
conf.WriteToFile(self._core_site_filename)
self._test_cmd.Run(None)
conf = xml_configuration.Configuration.FromFile(self._core_site_filename)
self.assertEqual(2, conf.GetNumProperties())
self.assertEqual(
'gs://' + system_bucket + '/',
conf.GetPropertyValue('fs.default.name'))
def testSystemBucketFallbackFailure(self):
"""Without default_bucket, attempts and fails to fetch system_bucket."""
self._SetDefaultValidFlags()
self._flag_values_copy.dry_run = False
self._flag_values_copy.default_bucket = None
# Default_bucket fails to be set using fs.gs.system.bucket without the
# corresponding value already present in the xml file.
self.assertRaises(KeyError, self._test_cmd.Run, None)
def testNormalOperationWithDefaultBucket(self):
"""With default_bucket provided, no fallback, check normal wiring."""
self._SetDefaultValidFlags()
self._flag_values_copy.dry_run = False
self._flag_values_copy.default_bucket = 'some-other-bucket'
system_bucket = 'system-bucket'
conf = xml_configuration.Configuration.FromFile(self._core_site_filename)
conf.Update({'fs.gs.system.bucket': system_bucket}, {})
conf.WriteToFile(self._core_site_filename)
self._test_cmd.Run(None)
conf = xml_configuration.Configuration.FromFile(self._core_site_filename)
self.assertEqual(2, conf.GetNumProperties())
self.assertEqual(
'gs://' + self._flag_values_copy.default_bucket + '/',
conf.GetPropertyValue('fs.default.name'))
def testHdfsSpecificValidators(self):
"""If default_fs is hdfs, then we add extra validators."""
self._SetDefaultValidFlags()
self._flag_values_copy.dry_run = False
# Using hdfs will invoke new validators.
self._flag_values_copy.default_fs = 'hdfs'
# namenode_uri fails to be set using hdfs-site.xml
self.assertRaises(KeyError, self._test_cmd.Run, None)
self.assertRaises(
flags.IllegalFlagValue, self._flag_values_copy.__setattr__,
'namenode_uri', 'badscheme://localhost:8020/')
# Missing trailing slash.
self.assertRaises(
flags.IllegalFlagValue, self._flag_values_copy.__setattr__,
'namenode_uri', 'hdfs://localhost:8020')
def testHdfsFallbackToNamenodeRpcAddressFailure(self):
"""With hdfs, absence of fs.default.name fetches rpc-address and fails."""
self._SetDefaultValidFlags()
self._flag_values_copy.dry_run = False
self._flag_values_copy.default_fs = 'hdfs'
conf = xml_configuration.Configuration.FromFile(self._hdfs_site_filename)
conf.Update({'dfs.namenode.rpc-address': 'badscheme://localhost:8020/'}, {})
conf.WriteToFile(self._hdfs_site_filename)
self.assertRaises(flags.IllegalFlagValue, self._test_cmd.Run, None)
conf.Update({'dfs.namenode.rpc-address': 'hdfs://localhost:8020'}, {})
conf.WriteToFile(self._hdfs_site_filename)
self.assertRaises(flags.IllegalFlagValue, self._test_cmd.Run, None)
def testHdfsFallbackToNamenodeRpcAddressSuccess(self):
"""With hdfs, absence of fs.default.name successfully uses rpc-address."""
self._SetDefaultValidFlags()
self._flag_values_copy.dry_run = False
self._flag_values_copy.default_fs = 'hdfs'
self._flag_values_copy.namenode_uri = None
namenode_uri = 'hdfs://localhost:8020/'
conf = xml_configuration.Configuration.FromFile(self._hdfs_site_filename)
conf.Update({'dfs.namenode.rpc-address': namenode_uri}, {})
conf.WriteToFile(self._hdfs_site_filename)
self._test_cmd.Run(None)
conf = xml_configuration.Configuration.FromFile(self._core_site_filename)
self.assertEqual(1, conf.GetNumProperties())
self.assertEqual(
namenode_uri, conf.GetPropertyValue('fs.default.name'))
self._flag_values_copy.namenode_uri = 'hdfs://some-other-host:8020/'
self._test_cmd.Run(None)
conf = xml_configuration.Configuration.FromFile(self._core_site_filename)
self.assertEqual(1, conf.GetNumProperties())
self.assertEqual(
self._flag_values_copy.namenode_uri,
conf.GetPropertyValue('fs.default.name'))
def _SetDefaultValidFlags(self):
self._flag_values_copy.hadoop_conf_dir = self._conf_dir
self._flag_values_copy.default_fs = 'gs'
self._flag_values_copy.default_bucket = 'foo-bucket'
self._flag_values_copy.namenode_uri = None
self._flag_values_copy._AssertAllValidators()
if __name__ == '__main__':
unittest.main()
| 37.472222 | 80 | 0.73017 |
8adf7b655be894325d02eba779c4d1cf2445d08d | 5,686 | py | Python | tests/regressiontests/model_fields/models.py | yarko/django | 90b6240c8753ece3e52cafc37e1088b0646b843f | [
"BSD-3-Clause"
] | 1 | 2015-05-14T11:23:36.000Z | 2015-05-14T11:23:36.000Z | tests/regressiontests/model_fields/models.py | yarko/django | 90b6240c8753ece3e52cafc37e1088b0646b843f | [
"BSD-3-Clause"
] | null | null | null | tests/regressiontests/model_fields/models.py | yarko/django | 90b6240c8753ece3e52cafc37e1088b0646b843f | [
"BSD-3-Clause"
] | null | null | null | import os
import tempfile
# Try to import PIL in either of the two ways it can end up installed.
# Checking for the existence of Image is enough for CPython, but for PyPy,
# you need to check for the underlying modules.
try:
from PIL import Image, _imaging
except ImportError:
try:
import Image, _imaging
except ImportError:
Image = None
from django.core.files.storage import FileSystemStorage
from django.db import models
from django.db.models.fields.files import ImageFieldFile, ImageField
class Foo(models.Model):
a = models.CharField(max_length=10)
d = models.DecimalField(max_digits=5, decimal_places=3)
def get_foo():
return Foo.objects.get(id=1)
class Bar(models.Model):
b = models.CharField(max_length=10)
a = models.ForeignKey(Foo, default=get_foo)
class Whiz(models.Model):
CHOICES = (
('Group 1', (
(1,'First'),
(2,'Second'),
)
),
('Group 2', (
(3,'Third'),
(4,'Fourth'),
)
),
(0,'Other'),
)
c = models.IntegerField(choices=CHOICES, null=True)
class BigD(models.Model):
d = models.DecimalField(max_digits=38, decimal_places=30)
class BigS(models.Model):
s = models.SlugField(max_length=255)
class BigInt(models.Model):
value = models.BigIntegerField()
null_value = models.BigIntegerField(null = True, blank = True)
class Post(models.Model):
title = models.CharField(max_length=100)
body = models.TextField()
class NullBooleanModel(models.Model):
nbfield = models.NullBooleanField()
class BooleanModel(models.Model):
bfield = models.BooleanField()
string = models.CharField(max_length=10, default='abc')
class RenamedField(models.Model):
modelname = models.IntegerField(name="fieldname", choices=((1,'One'),))
###############################################################################
# FileField
class Document(models.Model):
myfile = models.FileField(upload_to='unused')
###############################################################################
# ImageField
# If PIL available, do these tests.
if Image:
class TestImageFieldFile(ImageFieldFile):
"""
Custom Field File class that records whether or not the underlying file
was opened.
"""
def __init__(self, *args, **kwargs):
self.was_opened = False
super(TestImageFieldFile, self).__init__(*args,**kwargs)
def open(self):
self.was_opened = True
super(TestImageFieldFile, self).open()
class TestImageField(ImageField):
attr_class = TestImageFieldFile
# Set up a temp directory for file storage.
temp_storage_dir = tempfile.mkdtemp()
temp_storage = FileSystemStorage(temp_storage_dir)
temp_upload_to_dir = os.path.join(temp_storage.location, 'tests')
class Person(models.Model):
"""
Model that defines an ImageField with no dimension fields.
"""
name = models.CharField(max_length=50)
mugshot = TestImageField(storage=temp_storage, upload_to='tests')
class PersonWithHeight(models.Model):
"""
Model that defines an ImageField with only one dimension field.
"""
name = models.CharField(max_length=50)
mugshot = TestImageField(storage=temp_storage, upload_to='tests',
height_field='mugshot_height')
mugshot_height = models.PositiveSmallIntegerField()
class PersonWithHeightAndWidth(models.Model):
"""
Model that defines height and width fields after the ImageField.
"""
name = models.CharField(max_length=50)
mugshot = TestImageField(storage=temp_storage, upload_to='tests',
height_field='mugshot_height',
width_field='mugshot_width')
mugshot_height = models.PositiveSmallIntegerField()
mugshot_width = models.PositiveSmallIntegerField()
class PersonDimensionsFirst(models.Model):
"""
Model that defines height and width fields before the ImageField.
"""
name = models.CharField(max_length=50)
mugshot_height = models.PositiveSmallIntegerField()
mugshot_width = models.PositiveSmallIntegerField()
mugshot = TestImageField(storage=temp_storage, upload_to='tests',
height_field='mugshot_height',
width_field='mugshot_width')
class PersonTwoImages(models.Model):
"""
Model that:
* Defines two ImageFields
* Defines the height/width fields before the ImageFields
* Has a nullalble ImageField
"""
name = models.CharField(max_length=50)
mugshot_height = models.PositiveSmallIntegerField()
mugshot_width = models.PositiveSmallIntegerField()
mugshot = TestImageField(storage=temp_storage, upload_to='tests',
height_field='mugshot_height',
width_field='mugshot_width')
headshot_height = models.PositiveSmallIntegerField(
blank=True, null=True)
headshot_width = models.PositiveSmallIntegerField(
blank=True, null=True)
headshot = TestImageField(blank=True, null=True,
storage=temp_storage, upload_to='tests',
height_field='headshot_height',
width_field='headshot_width')
###############################################################################
| 34.670732 | 79 | 0.607281 |
9cfb46ed6b05cd0d4c7a16a81985cf1b4466d0db | 7,079 | py | Python | apps/combine-api/tests/run/test_run_get_simulators.py | freiburgermsu/biosimulations | 1c4f604f67c0924b58e1a3a45378c86bab7ace5b | [
"MIT"
] | 20 | 2021-09-05T02:47:07.000Z | 2022-01-25T10:46:47.000Z | apps/combine-api/tests/run/test_run_get_simulators.py | freiburgermsu/biosimulations | 1c4f604f67c0924b58e1a3a45378c86bab7ace5b | [
"MIT"
] | 1,884 | 2020-08-23T17:40:26.000Z | 2021-09-01T16:29:20.000Z | apps/combine-api/tests/run/test_run_get_simulators.py | freiburgermsu/biosimulations | 1c4f604f67c0924b58e1a3a45378c86bab7ace5b | [
"MIT"
] | 2 | 2019-11-04T15:08:05.000Z | 2020-01-02T21:17:51.000Z | from biosimulators_utils.config import get_config
from src import app
from src.handlers.run.utils import get_simulator_api, get_simulators, exec_in_subprocess
from unittest import mock
import os
import parameterized
import pytest
import requests
import shutil
import tempfile
import unittest
class GetSimulatorsTestCase(unittest.TestCase):
def test(self):
endpoint = '/run/simulators'
with app.app.app.test_client() as client:
simulators = [
{
'id': 'copasi',
'name': "COPASI",
'api': {
'module': 'biosimulators_copasi',
'package': 'biosimulators_copasi',
},
},
{
'id': 'gillespy2',
'name': 'GillesPy2',
'api': {
'module': 'biosimulators_gillespy2',
'package': 'biosimulators_gillespy2',
},
},
]
with mock.patch('src.handlers.run.utils.get_simulators', return_value=simulators):
response = client.get(endpoint)
self.assertEqual(response.status_code, 200, response.json)
simulators = response.json
id = 'copasi'
name = 'COPASI'
sim = next(simulator for simulator in simulators if simulator['id'] == id)
api_name = 'biosimulators_copasi'
self.assertEqual(sim, {
'_type': 'Simulator',
'id': id,
'name': name,
'version': sim['version'],
'api': {
'_type': 'SimulatorApi',
'module': api_name,
'package': api_name,
'version': sim['api']['version'],
},
'specs': 'https://api.biosimulators.org/simulators/{}/{}'.format(id, sim['version'])
})
SIMULATORS = os.environ.get('SIMULATORS', None)
if SIMULATORS is not None:
if SIMULATORS:
SIMULATORS = SIMULATORS.split(',')
else:
SIMULATORS = []
SKIPPED_SIMULATORS = os.environ.get('SKIPPED_SIMULATORS', None)
if SKIPPED_SIMULATORS is not None:
if SKIPPED_SIMULATORS:
SKIPPED_SIMULATORS = SKIPPED_SIMULATORS.split(',')
else:
SKIPPED_SIMULATORS = []
EXAMPLES_BASE_URL = 'https://github.com/biosimulators/Biosimulators_test_suite/raw/deploy/examples'
TIMEOUT = 5 * 60 # maximum execution time per test in seconds
class SimulatorsHaveValidApisTestCase(unittest.TestCase):
def setUp(self):
self.tmp_dirname = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self.tmp_dirname)
@parameterized.parameterized.expand(
(simulator['id'], simulator)
for simulator in get_simulators()
if (
(SIMULATORS is None or simulator['id'] in SIMULATORS)
and (SKIPPED_SIMULATORS is None or simulator['id'] not in SKIPPED_SIMULATORS)
)
)
@pytest.mark.timeout(TIMEOUT * 1.25)
def test(self, id, simulator):
exec_in_subprocess(self._test, simulator['api']['module'], simulator['exampleCombineArchive'], self.tmp_dirname,
timeout=TIMEOUT)
@staticmethod
def _test(simulator_module, example_combine_archive, tmp_dirname):
api = get_simulator_api(simulator_module, False)
# __version__
if not hasattr(api, '__version__'):
raise NotImplementedError('API must have a `__version__` attribute whose value is a non-empty string (e.g., 1.0.1)')
if not isinstance(api.__version__, str):
raise ValueError('API must have a `__version__` attribute whose value is a non-empty string (e.g., 1.0.1), not `{}`'.format(
api.__version__.__class__.__name__))
if api.__version__ == '':
raise ValueError('API must have a `__version__` attribute whose value is a non-empty string (e.g., 1.0.1), not `{}`'.format(
api.__version__))
# get_simulator_version
if not hasattr(api, 'get_simulator_version'):
raise NotImplementedError('API must have a `get_simulator_version` callable that returns a non-empty string (e.g., 1.0.1)')
if not callable(api.get_simulator_version):
raise ValueError('`get_simulator_version` must be a callable that returns a non-empty string (e.g., 1.0.1), not `{}`'.format(
api.get_simulator_version.__class__.__name__))
simulator_version = api.get_simulator_version()
if not isinstance(simulator_version, str):
raise ValueError('`get_simulator_version` must return a non-empty string (e.g., 1.0.1), not `{}`'.format(
simulator_version.__class__.__name__))
if simulator_version == '':
raise ValueError('`get_simulator_version` must return a non-empty string (e.g., 1.0.1), not `{}`'.format(
simulator_version))
# exec_sedml_docs_in_combine_archive
if not hasattr(api, 'exec_sedml_docs_in_combine_archive'):
raise NotImplementedError('API must have a `exec_sedml_docs_in_combine_archive` callable')
if not callable(api.exec_sedml_docs_in_combine_archive):
raise ValueError('`exec_sedml_docs_in_combine_archive` must be a callable, not `{}`'.format(
api.exec_sedml_docs_in_combine_archive.__class__.__name__))
response = requests.get(EXAMPLES_BASE_URL + '/' + example_combine_archive)
response.raise_for_status()
archive_filename = os.path.join(tmp_dirname, 'archive.omex')
with open(archive_filename, 'wb') as file:
file.write(response.content)
out_dir = os.path.join(tmp_dirname, 'out')
config = get_config()
config.COLLECT_COMBINE_ARCHIVE_RESULTS = True
config.COLLECT_SED_DOCUMENT_RESULTS = True
config.DEBUG = True
results, log = api.exec_sedml_docs_in_combine_archive(archive_filename, out_dir, config=config)
# exec_sed_doc
if not hasattr(api, 'exec_sed_doc'):
raise NotImplementedError('API must have a `exec_sed_doc` callable')
if not callable(api.exec_sed_doc):
raise ValueError('`exec_sed_doc` must be a callable, not `{}`'.format(
api.exec_sed_doc.__class__.__name__))
# exec_sed_task
if not hasattr(api, 'exec_sed_task'):
raise NotImplementedError('API must have a `exec_sed_task` callable')
if not callable(api.exec_sed_task):
raise ValueError('`exec_sed_task` must be a callable, not `{}`'.format(
api.exec_sed_task.__class__.__name__))
# preprocess_sed_task
if not hasattr(api, 'preprocess_sed_task'):
raise NotImplementedError('API must have a `preprocess_sed_task` callable')
if not callable(api.preprocess_sed_task):
raise ValueError('`preprocess_sed_task` must be a callable, not `{}`'.format(
api.preprocess_sed_task.__class__.__name__))
| 42.136905 | 137 | 0.621133 |
905bbf464035ca2e0857a5ab915be30ce2047757 | 2,660 | py | Python | src/scratch/test/spritesheetloader.py | vidalmatheus/MK-Project | 6646020c59367ba0424d73a5861e13bbc0daac1f | [
"MIT"
] | 1 | 2019-12-25T10:25:30.000Z | 2019-12-25T10:25:30.000Z | src/scratch/test/spritesheetloader.py | vidalmatheus/MK-Project | 6646020c59367ba0424d73a5861e13bbc0daac1f | [
"MIT"
] | 1 | 2019-12-25T10:27:15.000Z | 2019-12-25T10:27:15.000Z | src/scratch/test/spritesheetloader.py | vidalmatheus/MK-Project | 6646020c59367ba0424d73a5861e13bbc0daac1f | [
"MIT"
] | 1 | 2019-12-25T10:50:05.000Z | 2019-12-25T10:50:05.000Z | import pygame,os
from pygame.locals import *
class SpritesheetLoader:
def __init__(self,file,sprite_width,sprite_height, fullsheet=False):
self.sheet = pygame.image.load(os.path.join(file))
self.sprite_width = sprite_width
self.sprite_height = sprite_height
self.sprite_list=self.makeSpritelist()
if not fullsheet:
self.removeBlanks(file)
def getSpriteList(self):
return self.sprite_list
def getSpriteLines(self,*args):
for arg in args:
assert(isinstance(arg, int)) # Se for um indice de array
yield self.sprite_list[arg] # Retorna a animação e pega a próxima
def makeSprite(self,line=0,column=0):
sprite = pygame.Surface((self.sprite_width, self.sprite_height)).convert_alpha()
sprite.fill((0,0,0,0))
sprite.blit(self.sheet, (-(column*self.sprite_width),-(line*self.sprite_height)))
return sprite
def makeSpritelist(self):
size = self.sheet.get_size()
sprite_list=[]
for i in range(int(size[1]/self.sprite_height)):
sprite_line=[]
for j in range(int(size[0]/self.sprite_width)):
sprite_line.append(self.makeSprite(i,j))
sprite_list.append(sprite_line)
return sprite_list
def testBlankSprite(self,sprite):
for i in range(self.sprite_width):
for j in range(self.sprite_height):
if sprite.get_at((i,j))!=(0,0,0,0):
return False
return True
def removeBlanks(self, file):
try:
with open(file.replace('.png', '.txt'), encoding='utf-8') as txtfile:
i=0
for line in txtfile:
length = int(line)
while length < len(self.sprite_list[i]):
self.sprite_list[i].pop()
i+=1
except:
print('creating...')
for sprite_line in self.sprite_list:
j=0
while j < len(sprite_line):
if self.testBlankSprite(sprite_line[j]):
sprite_line[j] = None
j+=1
self.write(file)
def write(self,file):
txtfile = open(file.replace('.psd', '.txt'), mode='w', encoding='utf-8') #Estava png
for sprite_line in self.sprite_list:
i=0
for sprite in sprite_line:
if sprite == None:
break
else: i+=1
txtfile.write(str(i))
txtfile.write('\n')
| 35.945946 | 92 | 0.540226 |
272ee7da3a265e0326543a51e68c450bffc1682c | 164,980 | py | Python | modin/pandas/test/test_series.py | modin-project/modin | f1f3aabd0ecd75086dfe7fb70be6a040958e0045 | [
"ECL-2.0",
"Apache-2.0"
] | 7,258 | 2018-06-21T21:39:15.000Z | 2022-03-31T23:09:20.000Z | modin/pandas/test/test_series.py | modin-project/modin | f1f3aabd0ecd75086dfe7fb70be6a040958e0045 | [
"ECL-2.0",
"Apache-2.0"
] | 4,125 | 2018-06-22T18:04:48.000Z | 2022-03-31T17:13:19.000Z | modin/pandas/test/test_series.py | modin-project/modin | f1f3aabd0ecd75086dfe7fb70be6a040958e0045 | [
"ECL-2.0",
"Apache-2.0"
] | 547 | 2018-06-21T23:23:00.000Z | 2022-03-27T09:04:56.000Z | # Licensed to Modin Development Team under one or more contributor license agreements.
# See the NOTICE file distributed with this work for additional information regarding
# copyright ownership. The Modin Development Team licenses this file to you under the
# Apache License, Version 2.0 (the "License"); you may not use this file except in
# compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under
# the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific language
# governing permissions and limitations under the License.
import pytest
import numpy as np
import json
import pandas
import matplotlib
import modin.pandas as pd
from numpy.testing import assert_array_equal
from pandas.core.base import SpecificationError
from modin.utils import get_current_backend
import sys
from modin.utils import to_pandas
from .utils import (
random_state,
RAND_LOW,
RAND_HIGH,
df_equals,
arg_keys,
name_contains,
test_data,
test_data_values,
test_data_keys,
test_data_with_duplicates_values,
test_data_with_duplicates_keys,
test_string_data_values,
test_string_data_keys,
test_string_list_data_values,
test_string_list_data_keys,
string_sep_values,
string_sep_keys,
string_na_rep_values,
string_na_rep_keys,
numeric_dfs,
no_numeric_dfs,
agg_func_keys,
agg_func_values,
agg_func_except_keys,
agg_func_except_values,
numeric_agg_funcs,
quantiles_keys,
quantiles_values,
axis_keys,
axis_values,
bool_arg_keys,
bool_arg_values,
int_arg_keys,
int_arg_values,
encoding_types,
categories_equals,
eval_general,
test_data_small_values,
test_data_small_keys,
test_data_categorical_values,
test_data_categorical_keys,
generate_multiindex,
test_data_diff_dtype,
df_equals_with_non_stable_indices,
)
from modin.config import NPartitions
NPartitions.put(4)
# Force matplotlib to not use any Xwindows backend.
matplotlib.use("Agg")
def get_rop(op):
if op.startswith("__") and op.endswith("__"):
return "__r" + op[2:]
else:
return None
def inter_df_math_helper(modin_series, pandas_series, op):
inter_df_math_helper_one_side(modin_series, pandas_series, op)
rop = get_rop(op)
if rop:
inter_df_math_helper_one_side(modin_series, pandas_series, rop)
def inter_df_math_helper_one_side(modin_series, pandas_series, op):
try:
pandas_attr = getattr(pandas_series, op)
except Exception as e:
with pytest.raises(type(e)):
_ = getattr(modin_series, op)
return
modin_attr = getattr(modin_series, op)
try:
pandas_result = pandas_attr(4)
except Exception as e:
with pytest.raises(type(e)):
repr(modin_attr(4)) # repr to force materialization
else:
modin_result = modin_attr(4)
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_attr(4.0)
except Exception as e:
with pytest.raises(type(e)):
repr(modin_attr(4.0)) # repr to force materialization
else:
modin_result = modin_attr(4.0)
df_equals(modin_result, pandas_result)
# These operations don't support non-scalar `other` or have a strange behavior in
# the testing environment
if op in [
"__divmod__",
"divmod",
"rdivmod",
"floordiv",
"__floordiv__",
"rfloordiv",
"__rfloordiv__",
"mod",
"__mod__",
"rmod",
"__rmod__",
]:
return
try:
pandas_result = pandas_attr(pandas_series)
except Exception as e:
with pytest.raises(type(e)):
repr(modin_attr(modin_series)) # repr to force materialization
else:
modin_result = modin_attr(modin_series)
df_equals(modin_result, pandas_result)
list_test = random_state.randint(RAND_LOW, RAND_HIGH, size=(modin_series.shape[0]))
try:
pandas_result = pandas_attr(list_test)
except Exception as e:
with pytest.raises(type(e)):
repr(modin_attr(list_test)) # repr to force materialization
else:
modin_result = modin_attr(list_test)
df_equals(modin_result, pandas_result)
series_test_modin = pd.Series(list_test, index=modin_series.index)
series_test_pandas = pandas.Series(list_test, index=pandas_series.index)
try:
pandas_result = pandas_attr(series_test_pandas)
except Exception as e:
with pytest.raises(type(e)):
repr(modin_attr(series_test_modin)) # repr to force materialization
else:
modin_result = modin_attr(series_test_modin)
df_equals(modin_result, pandas_result)
# Level test
new_idx = pandas.MultiIndex.from_tuples(
[(i // 4, i // 2, i) for i in modin_series.index]
)
modin_df_multi_level = modin_series.copy()
modin_df_multi_level.index = new_idx
try:
# Defaults to pandas
with pytest.warns(UserWarning):
# Operation against self for sanity check
getattr(modin_df_multi_level, op)(modin_df_multi_level, level=1)
except TypeError:
# Some operations don't support multilevel `level` parameter
pass
def create_test_series(vals, sort=False, **kwargs):
if isinstance(vals, dict):
modin_series = pd.Series(vals[next(iter(vals.keys()))], **kwargs)
pandas_series = pandas.Series(vals[next(iter(vals.keys()))], **kwargs)
else:
modin_series = pd.Series(vals, **kwargs)
pandas_series = pandas.Series(vals, **kwargs)
if sort:
modin_series = modin_series.sort_values().reset_index(drop=True)
pandas_series = pandas_series.sort_values().reset_index(drop=True)
return modin_series, pandas_series
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_to_frame(data):
modin_series, pandas_series = create_test_series(data)
df_equals(modin_series.to_frame(name="miao"), pandas_series.to_frame(name="miao"))
def test_accessing_index_element_as_property():
s = pd.Series([10, 20, 30], index=["a", "b", "c"])
assert s.b == 20
with pytest.raises(Exception):
_ = s.d
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_callable_key_in_getitem(data):
modin_series, pandas_series = create_test_series(data)
df_equals(
modin_series[lambda s: s.index % 2 == 0],
pandas_series[lambda s: s.index % 2 == 0],
)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_T(data):
modin_series, pandas_series = create_test_series(data)
df_equals(modin_series.T, pandas_series.T)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___abs__(data):
modin_series, pandas_series = create_test_series(data)
df_equals(modin_series.__abs__(), pandas_series.__abs__())
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___add__(data):
modin_series, pandas_series = create_test_series(data)
inter_df_math_helper(modin_series, pandas_series, "__add__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___and__(data):
modin_series, pandas_series = create_test_series(data)
inter_df_math_helper(modin_series, pandas_series, "__and__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___array__(data):
modin_series, pandas_series = create_test_series(data)
modin_result = modin_series.__array__()
assert_array_equal(modin_result, pandas_series.__array__())
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___bool__(data):
modin_series, pandas_series = create_test_series(data)
try:
pandas_result = pandas_series.__bool__()
except Exception as e:
with pytest.raises(type(e)):
modin_series.__bool__()
else:
modin_result = modin_series.__bool__()
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___contains__(request, data):
modin_series, pandas_series = create_test_series(data)
result = False
key = "Not Exist"
assert result == modin_series.__contains__(key)
assert result == (key in modin_series)
if "empty_data" not in request.node.name:
result = True
key = pandas_series.keys()[0]
assert result == modin_series.__contains__(key)
assert result == (key in modin_series)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___copy__(data):
modin_series, pandas_series = create_test_series(data)
df_equals(modin_series.copy(), modin_series)
df_equals(modin_series.copy(), pandas_series.copy())
df_equals(modin_series.copy(), pandas_series)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___deepcopy__(data):
modin_series, pandas_series = create_test_series(data)
df_equals(modin_series.__deepcopy__(), modin_series)
df_equals(modin_series.__deepcopy__(), pandas_series.__deepcopy__())
df_equals(modin_series.__deepcopy__(), pandas_series)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___delitem__(data):
modin_series, pandas_series = create_test_series(data)
del modin_series[modin_series.index[0]]
del pandas_series[pandas_series.index[0]]
df_equals(modin_series, pandas_series)
del modin_series[modin_series.index[-1]]
del pandas_series[pandas_series.index[-1]]
df_equals(modin_series, pandas_series)
del modin_series[modin_series.index[0]]
del pandas_series[pandas_series.index[0]]
df_equals(modin_series, pandas_series)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_divmod(data):
modin_series, pandas_series = create_test_series(data)
inter_df_math_helper(modin_series, pandas_series, "divmod")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_rdivmod(data):
modin_series, pandas_series = create_test_series(data)
inter_df_math_helper(modin_series, pandas_series, "rdivmod")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___eq__(data):
modin_series, pandas_series = create_test_series(data)
inter_df_math_helper(modin_series, pandas_series, "__eq__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___floordiv__(data):
modin_series, pandas_series = create_test_series(data)
inter_df_math_helper(modin_series, pandas_series, "__floordiv__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___ge__(data):
modin_series, pandas_series = create_test_series(data)
inter_df_math_helper(modin_series, pandas_series, "__ge__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___getitem__(data):
modin_series, pandas_series = create_test_series(data)
df_equals(modin_series[0], pandas_series[0])
df_equals(
modin_series[modin_series.index[-1]], pandas_series[pandas_series.index[-1]]
)
modin_series = pd.Series(list(range(1000)))
pandas_series = pandas.Series(list(range(1000)))
df_equals(modin_series[:30], pandas_series[:30])
df_equals(modin_series[modin_series > 500], pandas_series[pandas_series > 500])
# Test empty series
df_equals(pd.Series([])[:30], pandas.Series([])[:30])
def test___getitem__1383():
# see #1383 for more details
data = ["", "a", "b", "c", "a"]
modin_series = pd.Series(data)
pandas_series = pandas.Series(data)
df_equals(modin_series[3:7], pandas_series[3:7])
@pytest.mark.parametrize("start", [-7, -5, -3, 0, None, 3, 5, 7])
@pytest.mark.parametrize("stop", [-7, -5, -3, 0, None, 3, 5, 7])
def test___getitem_edge_cases(start, stop):
data = ["", "a", "b", "c", "a"]
modin_series = pd.Series(data)
pandas_series = pandas.Series(data)
df_equals(modin_series[start:stop], pandas_series[start:stop])
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___gt__(data):
modin_series, pandas_series = create_test_series(data)
inter_df_math_helper(modin_series, pandas_series, "__gt__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___int__(data):
modin_series, pandas_series = create_test_series(data)
try:
pandas_result = int(pandas_series[0])
except Exception as e:
with pytest.raises(type(e)):
int(modin_series[0])
else:
assert int(modin_series[0]) == pandas_result
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___invert__(data):
modin_series, pandas_series = create_test_series(data)
try:
pandas_result = pandas_series.__invert__()
except Exception as e:
with pytest.raises(type(e)):
repr(modin_series.__invert__())
else:
df_equals(modin_series.__invert__(), pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___iter__(data):
modin_series, pandas_series = create_test_series(data)
for m, p in zip(modin_series.__iter__(), pandas_series.__iter__()):
np.testing.assert_equal(m, p)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___le__(data):
modin_series, pandas_series = create_test_series(data)
inter_df_math_helper(modin_series, pandas_series, "__le__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___len__(data):
modin_series, pandas_series = create_test_series(data)
assert len(modin_series) == len(pandas_series)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___long__(data):
modin_series, pandas_series = create_test_series(data)
try:
pandas_result = pandas_series[0].__long__()
except Exception as e:
with pytest.raises(type(e)):
modin_series[0].__long__()
else:
assert modin_series[0].__long__() == pandas_result
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___lt__(data):
modin_series, pandas_series = create_test_series(data)
inter_df_math_helper(modin_series, pandas_series, "__lt__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___mod__(data):
modin_series, pandas_series = create_test_series(data)
inter_df_math_helper(modin_series, pandas_series, "__mod__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___mul__(data):
modin_series, pandas_series = create_test_series(data)
inter_df_math_helper(modin_series, pandas_series, "__mul__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___ne__(data):
modin_series, pandas_series = create_test_series(data)
inter_df_math_helper(modin_series, pandas_series, "__ne__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___neg__(request, data):
modin_series, pandas_series = create_test_series(data)
try:
pandas_result = pandas_series.__neg__()
except Exception as e:
with pytest.raises(type(e)):
repr(modin_series.__neg__())
else:
df_equals(modin_series.__neg__(), pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___or__(data):
modin_series, pandas_series = create_test_series(data)
inter_df_math_helper(modin_series, pandas_series, "__or__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___pow__(data):
modin_series, pandas_series = create_test_series(data)
inter_df_math_helper(modin_series, pandas_series, "__pow__")
@pytest.mark.parametrize("name", ["Dates", None])
@pytest.mark.parametrize(
"dt_index", [True, False], ids=["dt_index_true", "dt_index_false"]
)
@pytest.mark.parametrize(
"data",
[*test_data_values, "empty"],
ids=[*test_data_keys, "empty"],
)
def test___repr__(name, dt_index, data):
if data == "empty":
modin_series, pandas_series = pd.Series(), pandas.Series()
else:
modin_series, pandas_series = create_test_series(data)
pandas_series.name = modin_series.name = name
if dt_index:
index = pandas.date_range(
"1/1/2000", periods=len(pandas_series.index), freq="T"
)
pandas_series.index = modin_series.index = index
if get_current_backend() == "BaseOnPython" and data == "empty":
# TODO: Remove this when default `dtype` of empty Series will be `object` in pandas (see #3142).
assert modin_series.dtype == np.object
assert pandas_series.dtype == np.float64
df_equals(modin_series.index, pandas_series.index)
else:
assert repr(modin_series) == repr(pandas_series)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___round__(data):
modin_series, pandas_series = create_test_series(data)
df_equals(round(modin_series), round(pandas_series))
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___setitem__(data):
modin_series, pandas_series = create_test_series(data)
for key in modin_series.keys():
modin_series[key] = 0
pandas_series[key] = 0
df_equals(modin_series, pandas_series)
@pytest.mark.parametrize(
"key",
[
pytest.param(lambda idx: slice(1, 3), id="location_based_slice"),
pytest.param(lambda idx: slice(idx[1], idx[-1]), id="index_based_slice"),
pytest.param(lambda idx: [idx[0], idx[2], idx[-1]], id="list_of_labels"),
pytest.param(
lambda idx: [True if i % 2 else False for i in range(len(idx))],
id="boolean_mask",
),
],
)
@pytest.mark.parametrize(
"index",
[
pytest.param(
lambda idx_len: [chr(x) for x in range(ord("a"), ord("a") + idx_len)],
id="str_index",
),
pytest.param(lambda idx_len: list(range(1, idx_len + 1)), id="int_index"),
],
)
def test___setitem___non_hashable(key, index):
data = np.arange(5)
index = index(len(data))
key = key(index)
md_sr, pd_sr = create_test_series(data, index=index)
md_sr[key] = 10
pd_sr[key] = 10
df_equals(md_sr, pd_sr)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___sizeof__(data):
modin_series, pandas_series = create_test_series(data)
with pytest.warns(UserWarning):
modin_series.__sizeof__()
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___str__(data):
modin_series, pandas_series = create_test_series(data)
assert str(modin_series) == str(pandas_series)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___sub__(data):
modin_series, pandas_series = create_test_series(data)
inter_df_math_helper(modin_series, pandas_series, "__sub__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___truediv__(data):
modin_series, pandas_series = create_test_series(data)
inter_df_math_helper(modin_series, pandas_series, "__truediv__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___xor__(data):
modin_series, pandas_series = create_test_series(data)
inter_df_math_helper(modin_series, pandas_series, "__xor__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_abs(data):
modin_series, pandas_series = create_test_series(data)
df_equals(modin_series.abs(), pandas_series.abs())
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_add(data):
modin_series, pandas_series = create_test_series(data)
inter_df_math_helper(modin_series, pandas_series, "add")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_add_prefix(data):
modin_series, pandas_series = create_test_series(data)
df_equals(
modin_series.add_prefix("PREFIX_ADD_"), pandas_series.add_prefix("PREFIX_ADD_")
)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_add_suffix(data):
modin_series, pandas_series = create_test_series(data)
df_equals(
modin_series.add_suffix("SUFFIX_ADD_"), pandas_series.add_suffix("SUFFIX_ADD_")
)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("func", agg_func_values, ids=agg_func_keys)
def test_agg(data, func):
eval_general(
*create_test_series(data),
lambda df: df.agg(func),
)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("func", agg_func_except_values, ids=agg_func_except_keys)
def test_agg_except(data, func):
# SpecificationError is arisen because we treat a Series as a DataFrame.
# See details in pandas issue 36036.
with pytest.raises(SpecificationError):
eval_general(
*create_test_series(data),
lambda df: df.agg(func),
)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("func", agg_func_values, ids=agg_func_keys)
def test_agg_numeric(request, data, func):
if name_contains(request.node.name, numeric_agg_funcs) and name_contains(
request.node.name, numeric_dfs
):
axis = 0
eval_general(
*create_test_series(data),
lambda df: df.agg(func, axis),
)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("func", agg_func_except_values, ids=agg_func_except_keys)
def test_agg_numeric_except(request, data, func):
if name_contains(request.node.name, numeric_agg_funcs) and name_contains(
request.node.name, numeric_dfs
):
axis = 0
# SpecificationError is arisen because we treat a Series as a DataFrame.
# See details in pandas issue 36036.
with pytest.raises(SpecificationError):
eval_general(
*create_test_series(data),
lambda df: df.agg(func, axis),
)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("func", agg_func_values, ids=agg_func_keys)
def test_aggregate(data, func):
axis = 0
eval_general(
*create_test_series(data),
lambda df: df.aggregate(func, axis),
)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("func", agg_func_except_values, ids=agg_func_except_keys)
def test_aggregate_except(data, func):
axis = 0
# SpecificationError is arisen because we treat a Series as a DataFrame.
# See details in pandas issues 36036.
with pytest.raises(SpecificationError):
eval_general(
*create_test_series(data),
lambda df: df.aggregate(func, axis),
)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("func", agg_func_values, ids=agg_func_keys)
def test_aggregate_numeric(request, data, func):
if name_contains(request.node.name, numeric_agg_funcs) and name_contains(
request.node.name, numeric_dfs
):
axis = 0
eval_general(
*create_test_series(data),
lambda df: df.agg(func, axis),
)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("func", agg_func_except_values, ids=agg_func_except_keys)
def test_aggregate_numeric_except(request, data, func):
if name_contains(request.node.name, numeric_agg_funcs) and name_contains(
request.node.name, numeric_dfs
):
axis = 0
# SpecificationError is arisen because we treat a Series as a DataFrame.
# See details in pandas issues 36036.
with pytest.raises(SpecificationError):
eval_general(
*create_test_series(data),
lambda df: df.agg(func, axis),
)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_aggregate_error_checking(data):
modin_series, pandas_series = create_test_series(data)
assert pandas_series.aggregate("ndim") == 1
assert modin_series.aggregate("ndim") == 1
def user_warning_checker(series, fn):
if isinstance(series, pd.Series):
with pytest.warns(UserWarning):
return fn(series)
return fn(series)
eval_general(
modin_series,
pandas_series,
lambda series: user_warning_checker(
series, fn=lambda series: series.aggregate("cumproduct")
),
)
eval_general(
modin_series, pandas_series, lambda series: series.aggregate("NOT_EXISTS")
)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_align(data):
modin_series, _ = create_test_series(data) # noqa: F841
with pytest.warns(UserWarning):
modin_series.align(modin_series)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize(
"skipna", bool_arg_values, ids=arg_keys("skipna", bool_arg_keys)
)
def test_all(data, skipna):
modin_series, pandas_series = create_test_series(data)
df_equals(modin_series.all(skipna=skipna), pandas_series.all(skipna=skipna))
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize(
"skipna", bool_arg_values, ids=arg_keys("skipna", bool_arg_keys)
)
def test_any(data, skipna):
modin_series, pandas_series = create_test_series(data)
df_equals(modin_series.any(skipna=skipna), pandas_series.any(skipna=skipna))
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_append(data):
modin_series, pandas_series = create_test_series(data)
data_to_append = {"append_a": 2, "append_b": 1000}
ignore_idx_values = [True, False]
for ignore in ignore_idx_values:
try:
pandas_result = pandas_series.append(data_to_append, ignore_index=ignore)
except Exception as e:
with pytest.raises(type(e)):
modin_series.append(data_to_append, ignore_index=ignore)
else:
modin_result = modin_series.append(data_to_append, ignore_index=ignore)
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_series.append(pandas_series.iloc[-1])
except Exception as e:
with pytest.raises(type(e)):
modin_series.append(modin_series.iloc[-1])
else:
modin_result = modin_series.append(modin_series.iloc[-1])
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_series.append([pandas_series.iloc[-1]])
except Exception as e:
with pytest.raises(type(e)):
modin_series.append([modin_series.iloc[-1]])
else:
modin_result = modin_series.append([modin_series.iloc[-1]])
df_equals(modin_result, pandas_result)
verify_integrity_values = [True, False]
for verify_integrity in verify_integrity_values:
try:
pandas_result = pandas_series.append(
[pandas_series, pandas_series], verify_integrity=verify_integrity
)
except Exception as e:
with pytest.raises(type(e)):
modin_series.append(
[modin_series, modin_series], verify_integrity=verify_integrity
)
else:
modin_result = modin_series.append(
[modin_series, modin_series], verify_integrity=verify_integrity
)
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_series.append(
pandas_series, verify_integrity=verify_integrity
)
except Exception as e:
with pytest.raises(type(e)):
modin_series.append(modin_series, verify_integrity=verify_integrity)
else:
modin_result = modin_series.append(
modin_series, verify_integrity=verify_integrity
)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("func", agg_func_values, ids=agg_func_keys)
def test_apply(data, func):
eval_general(
*create_test_series(data),
lambda df: df.apply(func),
)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("func", agg_func_except_values, ids=agg_func_except_keys)
def test_apply_except(data, func):
# SpecificationError is arisen because we treat a Series as a DataFrame.
# See details in pandas issues 36036.
with pytest.raises(SpecificationError):
eval_general(
*create_test_series(data),
lambda df: df.apply(func),
)
def test_apply_external_lib():
json_string = """
{
"researcher": {
"name": "Ford Prefect",
"species": "Betelgeusian",
"relatives": [
{
"name": "Zaphod Beeblebrox",
"species": "Betelgeusian"
}
]
}
}
"""
modin_result = pd.DataFrame.from_dict({"a": [json_string]}).a.apply(json.loads)
pandas_result = pandas.DataFrame.from_dict({"a": [json_string]}).a.apply(json.loads)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("func", agg_func_values, ids=agg_func_keys)
def test_apply_numeric(request, data, func):
if name_contains(request.node.name, numeric_dfs):
eval_general(
*create_test_series(data),
lambda df: df.apply(func),
)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("func", agg_func_except_values, ids=agg_func_except_keys)
def test_apply_numeric_except(request, data, func):
if name_contains(request.node.name, numeric_dfs):
# SpecificationError is arisen because we treat a Series as a DataFrame.
# See details in pandas issues 36036.
with pytest.raises(SpecificationError):
eval_general(
*create_test_series(data),
lambda df: df.apply(func),
)
@pytest.mark.parametrize("axis", [None, 0, 1])
@pytest.mark.parametrize("level", [None, -1, 0, 1])
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("func", ["count", "all", "kurt", "array", "searchsorted"])
def test_apply_text_func(level, data, func, axis):
func_kwargs = {}
if level:
func_kwargs.update({"level": level})
if axis:
func_kwargs.update({"axis": axis})
rows_number = len(next(iter(data.values()))) # length of the first data column
level_0 = np.random.choice([0, 1, 2], rows_number)
level_1 = np.random.choice([3, 4, 5], rows_number)
index = pd.MultiIndex.from_arrays([level_0, level_1])
modin_series, pandas_series = create_test_series(data)
modin_series.index = index
pandas_series.index = index
eval_general(modin_series, pandas_series, lambda df: df.apply(func), **func_kwargs)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("skipna", [True, False])
def test_argmax(data, skipna):
modin_series, pandas_series = create_test_series(data)
df_equals(modin_series.argmax(skipna=skipna), pandas_series.argmax(skipna=skipna))
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("skipna", [True, False])
def test_argmin(data, skipna):
modin_series, pandas_series = create_test_series(data)
df_equals(modin_series.argmin(skipna=skipna), pandas_series.argmin(skipna=skipna))
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_argsort(data):
modin_series, pandas_series = create_test_series(data)
with pytest.warns(UserWarning):
modin_result = modin_series.argsort()
df_equals(modin_result, pandas_series.argsort())
def test_asfreq():
index = pd.date_range("1/1/2000", periods=4, freq="T")
series = pd.Series([0.0, None, 2.0, 3.0], index=index)
with pytest.warns(UserWarning):
# We are only testing that this defaults to pandas, so we will just check for
# the warning
series.asfreq(freq="30S")
@pytest.mark.parametrize(
"where",
[
20,
30,
[10, 40],
[20, 30],
[20],
25,
[25, 45],
[25, 30],
pandas.Index([20, 30]),
pandas.Index([10]),
],
)
def test_asof(where):
# With NaN:
values = [1, 2, np.nan, 4]
index = [10, 20, 30, 40]
modin_series, pandas_series = pd.Series(values, index=index), pandas.Series(
values, index=index
)
df_equals(modin_series.asof(where), pandas_series.asof(where))
# No NaN:
values = [1, 2, 7, 4]
modin_series, pandas_series = pd.Series(values, index=index), pandas.Series(
values, index=index
)
df_equals(modin_series.asof(where), pandas_series.asof(where))
@pytest.mark.parametrize(
"where",
[
20,
30,
[10.5, 40.5],
[10],
pandas.Index([20, 30]),
pandas.Index([10.5]),
],
)
def test_asof_large(where):
values = test_data["float_nan_data"]["col1"]
index = list(range(len(values)))
modin_series, pandas_series = pd.Series(values, index=index), pandas.Series(
values, index=index
)
df_equals(modin_series.asof(where), pandas_series.asof(where))
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_astype(data):
modin_series, pandas_series = create_test_series(data)
try:
pandas_result = pandas_series.astype(str)
except Exception as e:
with pytest.raises(type(e)):
repr(modin_series.astype(str)) # repr to force materialization
else:
df_equals(modin_series.astype(str), pandas_result)
try:
pandas_result = pandas_series.astype(np.int64)
except Exception as e:
with pytest.raises(type(e)):
repr(modin_series.astype(np.int64)) # repr to force materialization
else:
df_equals(modin_series.astype(np.int64), pandas_result)
try:
pandas_result = pandas_series.astype(np.float64)
except Exception as e:
with pytest.raises(type(e)):
repr(modin_series.astype(np.float64)) # repr to force materialization
else:
df_equals(modin_series.astype(np.float64), pandas_result)
def test_astype_categorical():
modin_df = pd.Series(["A", "A", "B", "B", "A"])
pandas_df = pandas.Series(["A", "A", "B", "B", "A"])
modin_result = modin_df.astype("category")
pandas_result = pandas_df.astype("category")
df_equals(modin_result, pandas_result)
assert modin_result.dtype == pandas_result.dtype
modin_df = pd.Series([1, 1, 2, 1, 2, 2, 3, 1, 2, 1, 2])
pandas_df = pandas.Series([1, 1, 2, 1, 2, 2, 3, 1, 2, 1, 2])
df_equals(modin_result, pandas_result)
assert modin_result.dtype == pandas_result.dtype
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_at(data):
modin_series, pandas_series = create_test_series(data)
df_equals(
modin_series.at[modin_series.index[0]], pandas_series.at[pandas_series.index[0]]
)
df_equals(
modin_series.at[modin_series.index[-1]], pandas_series[pandas_series.index[-1]]
)
def test_at_time():
i = pd.date_range("2008-01-01", periods=1000, freq="12H")
modin_series = pd.Series(list(range(1000)), index=i)
pandas_series = pandas.Series(list(range(1000)), index=i)
df_equals(modin_series.at_time("12:00"), pandas_series.at_time("12:00"))
df_equals(modin_series.at_time("3:00"), pandas_series.at_time("3:00"))
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("lag", [1, 2, 3])
def test_autocorr(data, lag):
modin_series, pandas_series = create_test_series(data)
modin_result = modin_series.autocorr(lag=lag)
pandas_result = pandas_series.autocorr(lag=lag)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_axes(data):
modin_series, pandas_series = create_test_series(data)
assert modin_series.axes[0].equals(pandas_series.axes[0])
assert len(modin_series.axes) == len(pandas_series.axes)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_attrs(data):
modin_series, pandas_series = create_test_series(data)
eval_general(modin_series, pandas_series, lambda df: df.attrs)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_array(data):
modin_series, pandas_series = create_test_series(data)
eval_general(modin_series, pandas_series, lambda df: df.array)
@pytest.mark.xfail(reason="Using pandas Series.")
def test_between():
modin_series = create_test_series()
with pytest.raises(NotImplementedError):
modin_series.between(None, None)
def test_between_time():
i = pd.date_range("2008-01-01", periods=1000, freq="12H")
modin_series = pd.Series(list(range(1000)), index=i)
pandas_series = pandas.Series(list(range(1000)), index=i)
df_equals(
modin_series.between_time("12:00", "17:00"),
pandas_series.between_time("12:00", "17:00"),
)
df_equals(
modin_series.between_time("3:00", "8:00"),
pandas_series.between_time("3:00", "8:00"),
)
df_equals(
modin_series.between_time("3:00", "8:00", False),
pandas_series.between_time("3:00", "8:00", False),
)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_bfill(data):
modin_series, pandas_series = create_test_series(data)
df_equals(modin_series.bfill(), pandas_series.bfill())
# inplace
modin_series_cp = modin_series.copy()
pandas_series_cp = pandas_series.copy()
modin_series_cp.bfill(inplace=True)
pandas_series_cp.bfill(inplace=True)
df_equals(modin_series_cp, pandas_series_cp)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_bool(data):
modin_series, pandas_series = create_test_series(data)
with pytest.raises(ValueError):
modin_series.bool()
with pytest.raises(ValueError):
modin_series.__bool__()
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_clip(request, data):
modin_series, pandas_series = create_test_series(data)
if name_contains(request.node.name, numeric_dfs):
# set bounds
lower, upper = np.sort(random_state.random_integers(RAND_LOW, RAND_HIGH, 2))
# test only upper scalar bound
modin_result = modin_series.clip(None, upper)
pandas_result = pandas_series.clip(None, upper)
df_equals(modin_result, pandas_result)
# test lower and upper scalar bound
modin_result = modin_series.clip(lower, upper)
pandas_result = pandas_series.clip(lower, upper)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_combine(data):
modin_series, _ = create_test_series(data) # noqa: F841
modin_series2 = modin_series % (max(modin_series) // 2)
modin_series.combine(modin_series2, lambda s1, s2: s1 if s1 < s2 else s2)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_combine_first(data):
modin_series, pandas_series = create_test_series(data)
modin_series2 = modin_series % (max(modin_series) // 2)
pandas_series2 = pandas_series % (max(pandas_series) // 2)
modin_result = modin_series.combine_first(modin_series2)
pandas_result = pandas_series.combine_first(pandas_series2)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_compress(data):
modin_series, pandas_series = create_test_series(data) # noqa: F841
try:
pandas_series.compress(pandas_series > 30)
except Exception as e:
with pytest.raises(type(e)):
modin_series.compress(modin_series > 30)
else:
modin_series.compress(modin_series > 30)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_constructor(data):
modin_series, pandas_series = create_test_series(data)
df_equals(modin_series, pandas_series)
df_equals(pd.Series(modin_series), pandas.Series(pandas_series))
def test_constructor_columns_and_index():
modin_series = pd.Series([1, 1, 10], index=[1, 2, 3], name="health")
pandas_series = pandas.Series([1, 1, 10], index=[1, 2, 3], name="health")
df_equals(modin_series, pandas_series)
df_equals(pd.Series(modin_series), pandas.Series(pandas_series))
df_equals(
pd.Series(modin_series, name="max_speed"),
pandas.Series(pandas_series, name="max_speed"),
)
df_equals(
pd.Series(modin_series, index=[1, 2]),
pandas.Series(pandas_series, index=[1, 2]),
)
with pytest.raises(NotImplementedError):
pd.Series(modin_series, index=[1, 2, 99999])
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_copy(data):
modin_series, pandas_series = create_test_series(data)
df_equals(modin_series, modin_series.copy())
df_equals(modin_series.copy(), pandas_series)
df_equals(modin_series.copy(), pandas_series.copy())
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_corr(data):
modin_series, pandas_series = create_test_series(data)
modin_result = modin_series.corr(modin_series)
pandas_result = pandas_series.corr(pandas_series)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_count(data):
modin_series, pandas_series = create_test_series(data)
df_equals(modin_series.count(), pandas_series.count())
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_cov(data):
modin_series, pandas_series = create_test_series(data)
modin_result = modin_series.cov(modin_series)
pandas_result = pandas_series.cov(pandas_series)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize(
"skipna", bool_arg_values, ids=arg_keys("skipna", bool_arg_keys)
)
def test_cummax(data, skipna):
modin_series, pandas_series = create_test_series(data)
try:
pandas_result = pandas_series.cummax(skipna=skipna)
except Exception as e:
with pytest.raises(type(e)):
modin_series.cummax(skipna=skipna)
else:
df_equals(modin_series.cummax(skipna=skipna), pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize(
"skipna", bool_arg_values, ids=arg_keys("skipna", bool_arg_keys)
)
def test_cummin(data, skipna):
modin_series, pandas_series = create_test_series(data)
try:
pandas_result = pandas_series.cummin(skipna=skipna)
except Exception as e:
with pytest.raises(type(e)):
modin_series.cummin(skipna=skipna)
else:
df_equals(modin_series.cummin(skipna=skipna), pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize(
"skipna", bool_arg_values, ids=arg_keys("skipna", bool_arg_keys)
)
def test_cumprod(data, skipna):
modin_series, pandas_series = create_test_series(data)
try:
pandas_result = pandas_series.cumprod(skipna=skipna)
except Exception as e:
with pytest.raises(type(e)):
modin_series.cumprod(skipna=skipna)
else:
df_equals(modin_series.cumprod(skipna=skipna), pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize(
"skipna", bool_arg_values, ids=arg_keys("skipna", bool_arg_keys)
)
def test_cumsum(data, skipna):
modin_series, pandas_series = create_test_series(data)
try:
pandas_result = pandas_series.cumsum(skipna=skipna)
except Exception as e:
with pytest.raises(type(e)):
modin_series.cumsum(skipna=skipna)
else:
df_equals(modin_series.cumsum(skipna=skipna), pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_describe(data):
modin_series, pandas_series = create_test_series(data)
df_equals(modin_series.describe(), pandas_series.describe())
percentiles = [0.10, 0.11, 0.44, 0.78, 0.99]
df_equals(
modin_series.describe(percentiles=percentiles),
pandas_series.describe(percentiles=percentiles),
)
try:
pandas_result = pandas_series.describe(exclude=[np.float64])
except Exception as e:
with pytest.raises(type(e)):
modin_series.describe(exclude=[np.float64])
else:
modin_result = modin_series.describe(exclude=[np.float64])
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_series.describe(exclude=np.float64)
except Exception as e:
with pytest.raises(type(e)):
modin_series.describe(exclude=np.float64)
else:
modin_result = modin_series.describe(exclude=np.float64)
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_series.describe(
include=[np.timedelta64, np.datetime64, np.object, np.bool]
)
except Exception as e:
with pytest.raises(type(e)):
modin_series.describe(
include=[np.timedelta64, np.datetime64, np.object, np.bool]
)
else:
modin_result = modin_series.describe(
include=[np.timedelta64, np.datetime64, np.object, np.bool]
)
df_equals(modin_result, pandas_result)
modin_result = modin_series.describe(include=str(modin_series.dtypes))
pandas_result = pandas_series.describe(include=str(pandas_series.dtypes))
df_equals(modin_result, pandas_result)
modin_result = modin_series.describe(include=[np.number])
pandas_result = pandas_series.describe(include=[np.number])
df_equals(modin_result, pandas_result)
df_equals(
modin_series.describe(include="all"), pandas_series.describe(include="all")
)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize(
"periods", int_arg_values, ids=arg_keys("periods", int_arg_keys)
)
def test_diff(data, periods):
modin_series, pandas_series = create_test_series(data)
try:
pandas_result = pandas_series.diff(periods=periods)
except Exception as e:
with pytest.raises(type(e)):
modin_series.diff(periods=periods)
else:
modin_result = modin_series.diff(periods=periods)
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_series.T.diff(periods=periods)
except Exception as e:
with pytest.raises(type(e)):
modin_series.T.diff(periods=periods)
else:
modin_result = modin_series.T.diff(periods=periods)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_div(data):
modin_series, pandas_series = create_test_series(data)
inter_df_math_helper(modin_series, pandas_series, "div")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_divide(data):
modin_series, pandas_series = create_test_series(data)
inter_df_math_helper(modin_series, pandas_series, "divide")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_dot(data):
modin_series, pandas_series = create_test_series(data)
ind_len = len(modin_series)
# Test 1D array input
arr = np.arange(ind_len)
modin_result = modin_series.dot(arr)
pandas_result = pandas_series.dot(arr)
df_equals(modin_result, pandas_result)
# Test 2D array input
arr = np.arange(ind_len * 2).reshape(ind_len, 2)
modin_result = modin_series.dot(arr)
pandas_result = pandas_series.dot(arr)
assert_array_equal(modin_result, pandas_result)
# Test bad dimensions
with pytest.raises(ValueError):
modin_result = modin_series.dot(np.arange(ind_len + 10))
# Test dataframe input
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
modin_result = modin_series.dot(modin_df)
pandas_result = pandas_series.dot(pandas_df)
df_equals(modin_result, pandas_result)
# Test series input
modin_series_2 = pd.Series(np.arange(ind_len), index=modin_series.index)
pandas_series_2 = pandas.Series(np.arange(ind_len), index=pandas_series.index)
modin_result = modin_series.dot(modin_series_2)
pandas_result = pandas_series.dot(pandas_series_2)
df_equals(modin_result, pandas_result)
# Test when input series index doesn't line up with columns
with pytest.raises(ValueError):
modin_result = modin_series.dot(
pd.Series(
np.arange(ind_len), index=["a" for _ in range(len(modin_series.index))]
)
)
# Test case when left series has size (1 x 1)
# and right dataframe has size (1 x n)
modin_result = pd.Series([1]).dot(pd.DataFrame(modin_series).T)
pandas_result = pandas.Series([1]).dot(pandas.DataFrame(pandas_series).T)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_matmul(data):
modin_series, pandas_series = create_test_series(data) # noqa: F841
ind_len = len(modin_series)
# Test 1D array input
arr = np.arange(ind_len)
modin_result = modin_series @ arr
pandas_result = pandas_series @ arr
df_equals(modin_result, pandas_result)
# Test 2D array input
arr = np.arange(ind_len * 2).reshape(ind_len, 2)
modin_result = modin_series @ arr
pandas_result = pandas_series @ arr
assert_array_equal(modin_result, pandas_result)
# Test bad dimensions
with pytest.raises(ValueError):
modin_result = modin_series @ np.arange(ind_len + 10)
# Test dataframe input
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
modin_result = modin_series @ modin_df
pandas_result = pandas_series @ pandas_df
df_equals(modin_result, pandas_result)
# Test series input
modin_series_2 = pd.Series(np.arange(ind_len), index=modin_series.index)
pandas_series_2 = pandas.Series(np.arange(ind_len), index=pandas_series.index)
modin_result = modin_series @ modin_series_2
pandas_result = pandas_series @ pandas_series_2
df_equals(modin_result, pandas_result)
# Test when input series index doesn't line up with columns
with pytest.raises(ValueError):
modin_result = modin_series @ pd.Series(
np.arange(ind_len), index=["a" for _ in range(len(modin_series.index))]
)
@pytest.mark.xfail(reason="Using pandas Series.")
def test_drop():
modin_series = create_test_series()
with pytest.raises(NotImplementedError):
modin_series.drop(None, None, None, None)
@pytest.mark.parametrize(
"data", test_data_with_duplicates_values, ids=test_data_with_duplicates_keys
)
@pytest.mark.parametrize(
"keep", ["last", "first", False], ids=["last", "first", "False"]
)
@pytest.mark.parametrize("inplace", [True, False], ids=["True", "False"])
def test_drop_duplicates(data, keep, inplace):
modin_series, pandas_series = create_test_series(data)
df_equals(
modin_series.drop_duplicates(keep=keep, inplace=inplace),
pandas_series.drop_duplicates(keep=keep, inplace=inplace),
)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("how", ["any", "all"], ids=["any", "all"])
def test_dropna(data, how):
modin_series, pandas_series = create_test_series(data)
with pytest.raises(TypeError):
modin_series.dropna(how=None, thresh=None)
modin_result = modin_series.dropna(how=how)
pandas_result = pandas_series.dropna(how=how)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_dropna_inplace(data):
modin_series, pandas_series = create_test_series(data)
pandas_result = pandas_series.dropna()
modin_series.dropna(inplace=True)
df_equals(modin_series, pandas_result)
modin_series, pandas_series = create_test_series(data)
with pytest.raises(TypeError):
modin_series.dropna(thresh=2, inplace=True)
modin_series, pandas_series = create_test_series(data)
pandas_series.dropna(how="any", inplace=True)
modin_series.dropna(how="any", inplace=True)
df_equals(modin_series, pandas_series)
def test_dtype_empty():
modin_series, pandas_series = pd.Series(), pandas.Series()
if get_current_backend() == "BaseOnPython":
# TODO: Remove this when default `dtype` of empty Series will be `object` in pandas (see #3142).
assert modin_series.dtype == np.object
assert pandas_series.dtype == np.float64
else:
assert modin_series.dtype == pandas_series.dtype
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_dtype(data):
modin_series, pandas_series = create_test_series(data)
df_equals(modin_series.dtype, modin_series.dtypes)
df_equals(modin_series.dtype, pandas_series.dtype)
df_equals(modin_series.dtype, pandas_series.dtypes)
def test_dt():
data = pd.date_range("2016-12-31", periods=128, freq="D", tz="Europe/Berlin")
modin_series = pd.Series(data)
pandas_series = pandas.Series(data)
df_equals(modin_series.dt.date, pandas_series.dt.date)
df_equals(modin_series.dt.time, pandas_series.dt.time)
df_equals(modin_series.dt.timetz, pandas_series.dt.timetz)
df_equals(modin_series.dt.year, pandas_series.dt.year)
df_equals(modin_series.dt.month, pandas_series.dt.month)
df_equals(modin_series.dt.day, pandas_series.dt.day)
df_equals(modin_series.dt.hour, pandas_series.dt.hour)
df_equals(modin_series.dt.minute, pandas_series.dt.minute)
df_equals(modin_series.dt.second, pandas_series.dt.second)
df_equals(modin_series.dt.microsecond, pandas_series.dt.microsecond)
df_equals(modin_series.dt.nanosecond, pandas_series.dt.nanosecond)
df_equals(modin_series.dt.week, pandas_series.dt.week)
df_equals(modin_series.dt.weekofyear, pandas_series.dt.weekofyear)
df_equals(modin_series.dt.dayofweek, pandas_series.dt.dayofweek)
df_equals(modin_series.dt.weekday, pandas_series.dt.weekday)
df_equals(modin_series.dt.dayofyear, pandas_series.dt.dayofyear)
df_equals(modin_series.dt.quarter, pandas_series.dt.quarter)
df_equals(modin_series.dt.is_month_start, pandas_series.dt.is_month_start)
df_equals(modin_series.dt.is_month_end, pandas_series.dt.is_month_end)
df_equals(modin_series.dt.is_quarter_start, pandas_series.dt.is_quarter_start)
df_equals(modin_series.dt.is_quarter_end, pandas_series.dt.is_quarter_end)
df_equals(modin_series.dt.is_year_start, pandas_series.dt.is_year_start)
df_equals(modin_series.dt.is_year_end, pandas_series.dt.is_year_end)
df_equals(modin_series.dt.is_leap_year, pandas_series.dt.is_leap_year)
df_equals(modin_series.dt.daysinmonth, pandas_series.dt.daysinmonth)
df_equals(modin_series.dt.days_in_month, pandas_series.dt.days_in_month)
assert modin_series.dt.tz == pandas_series.dt.tz
assert modin_series.dt.freq == pandas_series.dt.freq
df_equals(modin_series.dt.to_period("W"), pandas_series.dt.to_period("W"))
assert_array_equal(
modin_series.dt.to_pydatetime(), pandas_series.dt.to_pydatetime()
)
df_equals(
modin_series.dt.tz_localize(None),
pandas_series.dt.tz_localize(None),
)
df_equals(
modin_series.dt.tz_convert(tz="Europe/Berlin"),
pandas_series.dt.tz_convert(tz="Europe/Berlin"),
)
df_equals(modin_series.dt.normalize(), pandas_series.dt.normalize())
df_equals(
modin_series.dt.strftime("%B %d, %Y, %r"),
pandas_series.dt.strftime("%B %d, %Y, %r"),
)
df_equals(modin_series.dt.round("H"), pandas_series.dt.round("H"))
df_equals(modin_series.dt.floor("H"), pandas_series.dt.floor("H"))
df_equals(modin_series.dt.ceil("H"), pandas_series.dt.ceil("H"))
df_equals(modin_series.dt.month_name(), pandas_series.dt.month_name())
df_equals(modin_series.dt.day_name(), pandas_series.dt.day_name())
modin_series = pd.Series(pd.to_timedelta(np.arange(128), unit="d"))
pandas_series = pandas.Series(pandas.to_timedelta(np.arange(128), unit="d"))
assert_array_equal(
modin_series.dt.to_pytimedelta(), pandas_series.dt.to_pytimedelta()
)
df_equals(modin_series.dt.total_seconds(), pandas_series.dt.total_seconds())
df_equals(modin_series.dt.days, pandas_series.dt.days)
df_equals(modin_series.dt.seconds, pandas_series.dt.seconds)
df_equals(modin_series.dt.microseconds, pandas_series.dt.microseconds)
df_equals(modin_series.dt.nanoseconds, pandas_series.dt.nanoseconds)
df_equals(modin_series.dt.components, pandas_series.dt.components)
data_per = pd.date_range("1/1/2012", periods=128, freq="M")
pandas_series = pandas.Series(data_per, index=data_per).dt.to_period()
modin_series = pd.Series(data_per, index=data_per).dt.to_period()
df_equals(modin_series.dt.qyear, pandas_series.dt.qyear)
df_equals(modin_series.dt.start_time, pandas_series.dt.start_time)
df_equals(modin_series.dt.end_time, pandas_series.dt.end_time)
df_equals(modin_series.dt.to_timestamp(), pandas_series.dt.to_timestamp())
@pytest.mark.parametrize(
"data", test_data_with_duplicates_values, ids=test_data_with_duplicates_keys
)
@pytest.mark.parametrize(
"keep", ["last", "first", False], ids=["last", "first", "False"]
)
def test_duplicated(data, keep):
modin_series, pandas_series = create_test_series(data)
modin_result = modin_series.duplicated(keep=keep)
df_equals(modin_result, pandas_series.duplicated(keep=keep))
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_empty(data):
modin_series, pandas_series = create_test_series(data)
assert modin_series.empty == pandas_series.empty
def test_empty_series():
modin_series = pd.Series()
assert modin_series.empty
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_eq(data):
modin_series, pandas_series = create_test_series(data)
inter_df_math_helper(modin_series, pandas_series, "eq")
def test_equals():
series_data = [2.9, 3, 3, 3]
modin_df1 = pd.Series(series_data)
modin_df2 = pd.Series(series_data)
assert modin_df1.equals(modin_df2)
assert modin_df1.equals(pd.Series(modin_df1))
df_equals(modin_df1, modin_df2)
df_equals(modin_df1, pd.Series(modin_df1))
series_data = [2, 3, 5, 1]
modin_df3 = pd.Series(series_data, index=list("abcd"))
assert not modin_df1.equals(modin_df3)
with pytest.raises(AssertionError):
df_equals(modin_df3, modin_df1)
with pytest.raises(AssertionError):
df_equals(modin_df3, modin_df2)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_ewm(data):
modin_series, _ = create_test_series(data) # noqa: F841
with pytest.warns(UserWarning):
modin_series.ewm(halflife=6)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_expanding(data):
modin_series, _ = create_test_series(data) # noqa: F841
with pytest.warns(UserWarning):
modin_series.expanding()
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_factorize(data):
modin_series, _ = create_test_series(data) # noqa: F841
with pytest.warns(UserWarning):
modin_series.factorize()
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_ffill(data):
modin_series, pandas_series = create_test_series(data)
df_equals(modin_series.ffill(), pandas_series.ffill())
# inplace
modin_series_cp = modin_series.copy()
pandas_series_cp = pandas_series.copy()
modin_series_cp.ffill(inplace=True)
pandas_series_cp.ffill(inplace=True)
df_equals(modin_series_cp, pandas_series_cp)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("reindex", [None, 2, -2])
@pytest.mark.parametrize("limit", [None, 1, 2, 0.5, -1, -2, 1.5])
def test_fillna(data, reindex, limit):
modin_series, pandas_series = create_test_series(data)
index = pandas_series.index
pandas_replace_series = index.to_series().sample(frac=1)
modin_replace_series = pd.Series(pandas_replace_series)
replace_dict = pandas_replace_series.to_dict()
if reindex is not None:
if reindex > 0:
pandas_series = pandas_series[:reindex].reindex(index)
modin_series = pd.Series(pandas_series)
else:
pandas_series = pandas_series[reindex:].reindex(index)
# Because of bug #3178 modin Series has to be created from pandas
# Series instead of performing the same slice and reindex operations.
modin_series = pd.Series(pandas_series)
if isinstance(limit, float):
limit = int(len(modin_series) * limit)
if limit is not None and limit < 0:
limit = len(modin_series) + limit
df_equals(modin_series.fillna(0, limit=limit), pandas_series.fillna(0, limit=limit))
df_equals(
modin_series.fillna(method="bfill", limit=limit),
pandas_series.fillna(method="bfill", limit=limit),
)
df_equals(
modin_series.fillna(method="ffill", limit=limit),
pandas_series.fillna(method="ffill", limit=limit),
)
df_equals(
modin_series.fillna(modin_replace_series, limit=limit),
pandas_series.fillna(pandas_replace_series, limit=limit),
)
df_equals(
modin_series.fillna(replace_dict, limit=limit),
pandas_series.fillna(replace_dict, limit=limit),
)
@pytest.mark.xfail(reason="Using pandas Series.")
def test_filter():
modin_series = create_test_series()
with pytest.raises(NotImplementedError):
modin_series.filter(None, None, None)
def test_first():
i = pd.date_range("2010-04-09", periods=400, freq="2D")
modin_series = pd.Series(list(range(400)), index=i)
pandas_series = pandas.Series(list(range(400)), index=i)
df_equals(modin_series.first("3D"), pandas_series.first("3D"))
df_equals(modin_series.first("20D"), pandas_series.first("20D"))
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_first_valid_index(data):
modin_series, pandas_series = create_test_series(data)
df_equals(modin_series.first_valid_index(), pandas_series.first_valid_index())
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_floordiv(data):
modin_series, pandas_series = create_test_series(data)
inter_df_math_helper(modin_series, pandas_series, "floordiv")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_ge(data):
modin_series, pandas_series = create_test_series(data)
inter_df_math_helper(modin_series, pandas_series, "ge")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_get(data):
modin_series, pandas_series = create_test_series(data)
for key in modin_series.keys():
df_equals(modin_series.get(key), pandas_series.get(key))
df_equals(
modin_series.get("NO_EXIST", "DEFAULT"),
pandas_series.get("NO_EXIST", "DEFAULT"),
)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_gt(data):
modin_series, pandas_series = create_test_series(data)
inter_df_math_helper(modin_series, pandas_series, "gt")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_hasnans(data):
modin_series, pandas_series = create_test_series(data)
assert modin_series.hasnans == pandas_series.hasnans
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("n", int_arg_values, ids=arg_keys("n", int_arg_keys))
def test_head(data, n):
modin_series, pandas_series = create_test_series(data)
df_equals(modin_series.head(n), pandas_series.head(n))
df_equals(
modin_series.head(len(modin_series)), pandas_series.head(len(pandas_series))
)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_hist(data):
modin_series, _ = create_test_series(data) # noqa: F841
with pytest.warns(UserWarning):
modin_series.hist(None)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_iat(data):
modin_series, pandas_series = create_test_series(data)
df_equals(modin_series.iat[0], pandas_series.iat[0])
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize(
"skipna", bool_arg_values, ids=arg_keys("skipna", bool_arg_keys)
)
def test_idxmax(data, skipna):
modin_series, pandas_series = create_test_series(data)
pandas_result = pandas_series.idxmax(skipna=skipna)
modin_result = modin_series.idxmax(skipna=skipna)
df_equals(modin_result, pandas_result)
pandas_result = pandas_series.T.idxmax(skipna=skipna)
modin_result = modin_series.T.idxmax(skipna=skipna)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize(
"skipna", bool_arg_values, ids=arg_keys("skipna", bool_arg_keys)
)
def test_idxmin(data, skipna):
modin_series, pandas_series = create_test_series(data)
pandas_result = pandas_series.idxmin(skipna=skipna)
modin_result = modin_series.idxmin(skipna=skipna)
df_equals(modin_result, pandas_result)
pandas_result = pandas_series.T.idxmin(skipna=skipna)
modin_result = modin_series.T.idxmin(skipna=skipna)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_iloc(request, data):
modin_series, pandas_series = create_test_series(data)
if not name_contains(request.node.name, ["empty_data"]):
# Scalar
np.testing.assert_equal(modin_series.iloc[0], pandas_series.iloc[0])
# Series
df_equals(modin_series.iloc[1:], pandas_series.iloc[1:])
df_equals(modin_series.iloc[1:2], pandas_series.iloc[1:2])
df_equals(modin_series.iloc[[1, 2]], pandas_series.iloc[[1, 2]])
# Write Item
modin_series.iloc[[1, 2]] = 42
pandas_series.iloc[[1, 2]] = 42
df_equals(modin_series, pandas_series)
with pytest.raises(IndexError):
modin_series.iloc[1:, 1]
else:
with pytest.raises(IndexError):
modin_series.iloc[0]
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_index(data):
modin_series, pandas_series = create_test_series(data)
df_equals(modin_series.index, pandas_series.index)
with pytest.raises(ValueError):
modin_series.index = list(modin_series.index) + [999]
modin_series.index = modin_series.index.map(str)
pandas_series.index = pandas_series.index.map(str)
df_equals(modin_series.index, pandas_series.index)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_interpolate(data):
modin_series, _ = create_test_series(data) # noqa: F841
with pytest.warns(UserWarning):
modin_series.interpolate()
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_is_monotonic(data):
modin_series, pandas_series = create_test_series(data)
assert modin_series.is_monotonic == pandas_series.is_monotonic
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_is_monotonic_decreasing(data):
modin_series, pandas_series = create_test_series(data)
assert modin_series.is_monotonic_decreasing == pandas_series.is_monotonic_decreasing
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_is_monotonic_increasing(data):
modin_series, pandas_series = create_test_series(data)
assert modin_series.is_monotonic_increasing == pandas_series.is_monotonic_increasing
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_is_unique(data):
modin_series, pandas_series = create_test_series(data)
assert modin_series.is_unique == pandas_series.is_unique
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_isin(data):
modin_series, pandas_series = create_test_series(data)
val = [1, 2, 3, 4]
pandas_result = pandas_series.isin(val)
modin_result = modin_series.isin(val)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_isnull(data):
modin_series, pandas_series = create_test_series(data)
df_equals(modin_series.isnull(), pandas_series.isnull())
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_items(data):
modin_series, pandas_series = create_test_series(data)
modin_items = modin_series.items()
pandas_items = pandas_series.items()
for modin_item, pandas_item in zip(modin_items, pandas_items):
modin_index, modin_scalar = modin_item
pandas_index, pandas_scalar = pandas_item
df_equals(modin_scalar, pandas_scalar)
assert pandas_index == modin_index
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_iteritems(data):
modin_series, pandas_series = create_test_series(data)
modin_items = modin_series.iteritems()
pandas_items = pandas_series.iteritems()
for modin_item, pandas_item in zip(modin_items, pandas_items):
modin_index, modin_scalar = modin_item
pandas_index, pandas_scalar = pandas_item
df_equals(modin_scalar, pandas_scalar)
assert pandas_index == modin_index
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_keys(data):
modin_series, pandas_series = create_test_series(data)
df_equals(modin_series.keys(), pandas_series.keys())
def test_kurtosis_alias():
# It's optimization. If failed, Series.kurt should be tested explicitly
# in tests: `test_kurt_kurtosis`, `test_kurt_kurtosis_level`.
assert pd.Series.kurt == pd.Series.kurtosis
@pytest.mark.parametrize("axis", [0, 1])
@pytest.mark.parametrize("skipna", bool_arg_values, ids=bool_arg_keys)
def test_kurtosis(axis, skipna):
eval_general(
*create_test_series(test_data["float_nan_data"]),
lambda df: df.kurtosis(axis=axis, skipna=skipna),
)
@pytest.mark.parametrize("axis", ["rows", "columns"])
@pytest.mark.parametrize("numeric_only", [True, False, None])
def test_kurtosis_numeric_only(axis, numeric_only):
eval_general(
*create_test_series(test_data_diff_dtype),
lambda df: df.kurtosis(axis=axis, numeric_only=numeric_only),
)
@pytest.mark.parametrize("level", [-1, 0, 1])
def test_kurtosis_level(level):
data = test_data["int_data"]
modin_s, pandas_s = create_test_series(data)
index = generate_multiindex(len(data.keys()))
modin_s.columns = index
pandas_s.columns = index
eval_general(
modin_s,
pandas_s,
lambda s: s.kurtosis(axis=1, level=level),
)
def test_last():
modin_index = pd.date_range("2010-04-09", periods=400, freq="2D")
pandas_index = pandas.date_range("2010-04-09", periods=400, freq="2D")
modin_series = pd.Series(list(range(400)), index=modin_index)
pandas_series = pandas.Series(list(range(400)), index=pandas_index)
df_equals(modin_series.last("3D"), pandas_series.last("3D"))
df_equals(modin_series.last("20D"), pandas_series.last("20D"))
@pytest.mark.parametrize("func", ["all", "any", "mad", "count"])
def test_index_order(func):
# see #1708 and #1869 for details
s_modin, s_pandas = create_test_series(test_data["float_nan_data"])
rows_number = len(s_modin.index)
level_0 = np.random.choice([x for x in range(10)], rows_number)
level_1 = np.random.choice([x for x in range(10)], rows_number)
index = pandas.MultiIndex.from_arrays([level_0, level_1])
s_modin.index = index
s_pandas.index = index
df_equals(
getattr(s_modin, func)(level=0).index,
getattr(s_pandas, func)(level=0).index,
)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_last_valid_index(data):
modin_series, pandas_series = create_test_series(data)
assert modin_series.last_valid_index() == (pandas_series.last_valid_index())
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_le(data):
modin_series, pandas_series = create_test_series(data)
inter_df_math_helper(modin_series, pandas_series, "le")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_loc(data):
modin_series, pandas_series = create_test_series(data)
for v in modin_series.index:
df_equals(modin_series.loc[v], pandas_series.loc[v])
df_equals(modin_series.loc[v:], pandas_series.loc[v:])
indices = [True if i % 3 == 0 else False for i in range(len(modin_series.index))]
modin_result = modin_series.loc[indices]
pandas_result = pandas_series.loc[indices]
df_equals(modin_result, pandas_result)
# From issue #1988
index = pd.MultiIndex.from_product([np.arange(10), np.arange(10)], names=["f", "s"])
data = np.arange(100)
modin_series = pd.Series(data, index=index).sort_index()
pandas_series = pandas.Series(data, index=index).sort_index()
modin_result = modin_series.loc[
(slice(None), 1),
]
pandas_result = pandas_series.loc[
(slice(None), 1),
]
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_lt(data):
modin_series, pandas_series = create_test_series(data)
inter_df_math_helper(modin_series, pandas_series, "lt")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", [None, 0])
@pytest.mark.parametrize("skipna", [None, True, False])
@pytest.mark.parametrize("level", [0, -1, None])
def test_mad(level, data, axis, skipna):
modin_series, pandas_series = create_test_series(data)
df_equals(
modin_series.mad(axis=axis, skipna=skipna, level=level),
pandas_series.mad(axis=axis, skipna=skipna, level=level),
)
@pytest.mark.parametrize("na_values", ["ignore", None], ids=["na_ignore", "na_none"])
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_map(data, na_values):
modin_series, pandas_series = create_test_series(data)
df_equals(
modin_series.map(str, na_action=na_values),
pandas_series.map(str, na_action=na_values),
)
mapper = {i: str(i) for i in range(100)}
df_equals(
modin_series.map(mapper, na_action=na_values),
pandas_series.map(mapper, na_action=na_values),
)
# Return list objects
modin_series_lists = modin_series.map(lambda s: [s, s, s])
pandas_series_lists = pandas_series.map(lambda s: [s, s, s])
df_equals(modin_series_lists, pandas_series_lists)
# Index into list objects
df_equals(
modin_series_lists.map(lambda l: l[0]), pandas_series_lists.map(lambda l: l[0])
)
def test_mask():
modin_series = pd.Series(np.arange(10))
m = modin_series % 3 == 0
with pytest.warns(UserWarning):
try:
modin_series.mask(~m, -modin_series)
except ValueError:
pass
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize(
"skipna", bool_arg_values, ids=arg_keys("skipna", bool_arg_keys)
)
def test_max(data, skipna):
modin_series, pandas_series = create_test_series(data)
df_equals(modin_series.max(skipna=skipna), pandas_series.max(skipna=skipna))
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize(
"skipna", bool_arg_values, ids=arg_keys("skipna", bool_arg_keys)
)
def test_mean(data, skipna):
modin_series, pandas_series = create_test_series(data)
df_equals(modin_series.mean(skipna=skipna), pandas_series.mean(skipna=skipna))
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize(
"skipna", bool_arg_values, ids=arg_keys("skipna", bool_arg_keys)
)
def test_median(data, skipna):
modin_series, pandas_series = create_test_series(data)
df_equals(modin_series.median(skipna=skipna), pandas_series.median(skipna=skipna))
@pytest.mark.parametrize(
"method", ["median", "skew", "std", "sum", "var", "prod", "sem"]
)
def test_median_skew_std_sum_var_prod_sem_1953(method):
# See #1953 for details
data = [3, 3, 3, 3, 3, 3, 3, 3, 3]
arrays = [
["1", "1", "1", "2", "2", "2", "3", "3", "3"],
["1", "2", "3", "4", "5", "6", "7", "8", "9"],
]
modin_s = pd.Series(data, index=arrays)
pandas_s = pandas.Series(data, index=arrays)
eval_general(modin_s, pandas_s, lambda s: getattr(s, method)(level=0))
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("index", [True, False], ids=["True", "False"])
def test_memory_usage(data, index):
modin_series, pandas_series = create_test_series(data)
df_equals(
modin_series.memory_usage(index=index), pandas_series.memory_usage(index=index)
)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize(
"skipna", bool_arg_values, ids=arg_keys("skipna", bool_arg_keys)
)
def test_min(data, skipna):
modin_series, pandas_series = create_test_series(data)
df_equals(modin_series.min(skipna=skipna), pandas_series.min(skipna=skipna))
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_mod(data):
modin_series, pandas_series = create_test_series(data)
inter_df_math_helper(modin_series, pandas_series, "mod")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_mode(data):
modin_series, pandas_series = create_test_series(data)
df_equals(modin_series.mode(), pandas_series.mode())
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_mul(data):
modin_series, pandas_series = create_test_series(data)
inter_df_math_helper(modin_series, pandas_series, "mul")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_multiply(data):
modin_series, pandas_series = create_test_series(data)
inter_df_math_helper(modin_series, pandas_series, "multiply")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_name(data):
modin_series, pandas_series = create_test_series(data)
assert modin_series.name == pandas_series.name
modin_series.name = pandas_series.name = "New_name"
assert modin_series.name == pandas_series.name
assert modin_series._query_compiler.columns == ["New_name"]
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_nbytes(data):
modin_series, pandas_series = create_test_series(data)
assert modin_series.nbytes == pandas_series.nbytes
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_ndim(data):
modin_series, _ = create_test_series(data) # noqa: F841
assert modin_series.ndim == 1
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_ne(data):
modin_series, pandas_series = create_test_series(data)
inter_df_math_helper(modin_series, pandas_series, "ne")
@pytest.mark.xfail(reason="Using pandas Series.")
def test_nlargest():
modin_series = create_test_series()
with pytest.raises(NotImplementedError):
modin_series.nlargest(None)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_notnull(data):
modin_series, pandas_series = create_test_series(data)
df_equals(modin_series.notnull(), pandas_series.notnull())
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_nsmallest(data):
modin_series, pandas_series = create_test_series(data)
df_equals(
modin_series.nsmallest(n=5, keep="first"),
pandas_series.nsmallest(n=5, keep="first"),
)
df_equals(
modin_series.nsmallest(n=10, keep="first"),
pandas_series.nsmallest(n=10, keep="first"),
)
df_equals(
modin_series.nsmallest(n=10, keep="last"),
pandas_series.nsmallest(n=10, keep="last"),
)
df_equals(modin_series.nsmallest(keep="all"), pandas_series.nsmallest(keep="all"))
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("dropna", [True, False], ids=["True", "False"])
def test_nunique(data, dropna):
modin_series, pandas_series = create_test_series(data)
df_equals(modin_series.nunique(dropna=dropna), pandas_series.nunique(dropna=dropna))
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_pct_change(data):
modin_series, pandas_series = create_test_series(data)
with pytest.warns(UserWarning):
modin_series.pct_change()
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_pipe(data):
modin_series, pandas_series = create_test_series(data)
n = len(modin_series.index)
a, b, c = 2 % n, 0, 3 % n
def h(x):
return x.dropna()
def g(x, arg1=0):
for _ in range(arg1):
x = x.append(x)
return x
def f(x, arg2=0, arg3=0):
return x.drop(x.index[[arg2, arg3]])
df_equals(
f(g(h(modin_series), arg1=a), arg2=b, arg3=c),
(modin_series.pipe(h).pipe(g, arg1=a).pipe(f, arg2=b, arg3=c)),
)
df_equals(
(modin_series.pipe(h).pipe(g, arg1=a).pipe(f, arg2=b, arg3=c)),
(pandas_series.pipe(h).pipe(g, arg1=a).pipe(f, arg2=b, arg3=c)),
)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_plot(request, data):
modin_series, pandas_series = create_test_series(data)
if name_contains(request.node.name, numeric_dfs):
# We have to test this way because equality in plots means same object.
zipped_plot_lines = zip(modin_series.plot().lines, pandas_series.plot().lines)
for left, right in zipped_plot_lines:
if isinstance(left.get_xdata(), np.ma.core.MaskedArray) and isinstance(
right.get_xdata(), np.ma.core.MaskedArray
):
assert all((left.get_xdata() == right.get_xdata()).data)
else:
assert np.array_equal(left.get_xdata(), right.get_xdata())
if isinstance(left.get_ydata(), np.ma.core.MaskedArray) and isinstance(
right.get_ydata(), np.ma.core.MaskedArray
):
assert all((left.get_ydata() == right.get_ydata()).data)
else:
assert np.array_equal(left.get_xdata(), right.get_xdata())
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_pop(data):
modin_series, pandas_series = create_test_series(data)
for key in modin_series.keys():
df_equals(modin_series.pop(key), pandas_series.pop(key))
df_equals(modin_series, pandas_series)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_pow(data):
modin_series, pandas_series = create_test_series(data)
inter_df_math_helper(modin_series, pandas_series, "pow")
def test_product_alias():
assert pd.Series.prod == pd.Series.product
@pytest.mark.parametrize("axis", [0, 1])
@pytest.mark.parametrize(
"skipna", bool_arg_values, ids=arg_keys("skipna", bool_arg_keys)
)
def test_prod(axis, skipna):
eval_general(
*create_test_series(test_data["float_nan_data"]),
lambda s: s.prod(axis=axis, skipna=skipna),
)
@pytest.mark.parametrize(
"numeric_only", bool_arg_values, ids=arg_keys("numeric_only", bool_arg_keys)
)
@pytest.mark.parametrize(
"min_count", int_arg_values, ids=arg_keys("min_count", int_arg_keys)
)
def test_prod_specific(min_count, numeric_only):
eval_general(
*create_test_series(test_data_diff_dtype),
lambda df: df.prod(min_count=min_count, numeric_only=numeric_only),
)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("q", quantiles_values, ids=quantiles_keys)
def test_quantile(request, data, q):
modin_series, pandas_series = create_test_series(data)
if not name_contains(request.node.name, no_numeric_dfs):
df_equals(modin_series.quantile(q), pandas_series.quantile(q))
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_radd(data):
modin_series, pandas_series = create_test_series(data)
inter_df_math_helper(modin_series, pandas_series, "radd")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize(
"na_option", ["keep", "top", "bottom"], ids=["keep", "top", "bottom"]
)
def test_rank(data, na_option):
modin_series, pandas_series = create_test_series(data)
try:
pandas_result = pandas_series.rank(na_option=na_option)
except Exception as e:
with pytest.raises(type(e)):
modin_series.rank(na_option=na_option)
else:
modin_result = modin_series.rank(na_option=na_option)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("order", [None, "C", "F", "A", "K"])
def test_ravel(data, order):
modin_series, pandas_series = create_test_series(data)
np.testing.assert_equal(
modin_series.ravel(order=order), pandas_series.ravel(order=order)
)
@pytest.mark.parametrize(
"data",
[
pandas.Categorical(np.arange(1000), ordered=True),
pandas.Categorical(np.arange(1000), ordered=False),
pandas.Categorical(np.arange(1000), categories=np.arange(500), ordered=True),
pandas.Categorical(np.arange(1000), categories=np.arange(500), ordered=False),
],
)
@pytest.mark.parametrize("order", [None, "C", "F", "A", "K"])
def test_ravel_category(data, order):
modin_series, pandas_series = create_test_series(data)
categories_equals(modin_series.ravel(order=order), pandas_series.ravel(order=order))
@pytest.mark.parametrize(
"data",
[
pandas.Categorical(np.arange(10), ordered=True),
pandas.Categorical(np.arange(10), ordered=False),
pandas.Categorical(np.arange(10), categories=np.arange(5), ordered=True),
pandas.Categorical(np.arange(10), categories=np.arange(5), ordered=False),
],
)
@pytest.mark.parametrize("order", [None, "C", "F", "A", "K"])
def test_ravel_simple_category(data, order):
modin_series, pandas_series = create_test_series(data)
categories_equals(modin_series.ravel(order=order), pandas_series.ravel(order=order))
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_rdiv(data):
modin_series, pandas_series = create_test_series(data)
inter_df_math_helper(modin_series, pandas_series, "rdiv")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_reindex(data):
modin_series, pandas_series = create_test_series(data)
pandas_result = pandas_series.reindex(
list(pandas_series.index) + ["_A_NEW_ROW"], fill_value=0
)
modin_result = modin_series.reindex(
list(modin_series.index) + ["_A_NEW_ROW"], fill_value=0
)
df_equals(pandas_result, modin_result)
frame_data = {
"col1": [0, 1, 2, 3],
"col2": [4, 5, 6, 7],
"col3": [8, 9, 10, 11],
"col4": [12, 13, 14, 15],
"col5": [0, 0, 0, 0],
}
pandas_df = pandas.DataFrame(frame_data)
modin_df = pd.DataFrame(frame_data)
for col in pandas_df.columns:
modin_series = modin_df[col]
pandas_series = pandas_df[col]
df_equals(
modin_series.reindex([0, 3, 2, 1]), pandas_series.reindex([0, 3, 2, 1])
)
df_equals(modin_series.reindex([0, 6, 2]), pandas_series.reindex([0, 6, 2]))
df_equals(
modin_series.reindex(index=[0, 1, 5]),
pandas_series.reindex(index=[0, 1, 5]),
)
# MultiIndex
modin_series, pandas_series = create_test_series(data)
modin_series.index, pandas_series.index = [
generate_multiindex(len(pandas_series))
] * 2
pandas_result = pandas_series.reindex(list(reversed(pandas_series.index)))
modin_result = modin_series.reindex(list(reversed(modin_series.index)))
df_equals(pandas_result, modin_result)
def test_reindex_like():
df1 = pd.DataFrame(
[
[24.3, 75.7, "high"],
[31, 87.8, "high"],
[22, 71.6, "medium"],
[35, 95, "medium"],
],
columns=["temp_celsius", "temp_fahrenheit", "windspeed"],
index=pd.date_range(start="2014-02-12", end="2014-02-15", freq="D"),
)
df2 = pd.DataFrame(
[[28, "low"], [30, "low"], [35.1, "medium"]],
columns=["temp_celsius", "windspeed"],
index=pd.DatetimeIndex(["2014-02-12", "2014-02-13", "2014-02-15"]),
)
series1 = df1["windspeed"]
series2 = df2["windspeed"]
with pytest.warns(UserWarning):
series2.reindex_like(series1)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_rename(data):
modin_series, pandas_series = create_test_series(data)
new_name = "NEW_NAME"
df_equals(modin_series.rename(new_name), pandas_series.rename(new_name))
modin_series_cp = modin_series.copy()
pandas_series_cp = pandas_series.copy()
modin_series_cp.rename(new_name, inplace=True)
pandas_series_cp.rename(new_name, inplace=True)
df_equals(modin_series_cp, pandas_series_cp)
modin_result = modin_series.rename("{}__".format)
pandas_result = pandas_series.rename("{}__".format)
df_equals(modin_result, pandas_result)
def test_reorder_levels():
data = np.random.randint(1, 100, 12)
modin_series = pd.Series(
data,
index=pd.MultiIndex.from_tuples(
[
(num, letter, color)
for num in range(1, 3)
for letter in ["a", "b", "c"]
for color in ["Red", "Green"]
],
names=["Number", "Letter", "Color"],
),
)
pandas_series = pandas.Series(
data,
index=pandas.MultiIndex.from_tuples(
[
(num, letter, color)
for num in range(1, 3)
for letter in ["a", "b", "c"]
for color in ["Red", "Green"]
],
names=["Number", "Letter", "Color"],
),
)
modin_result = modin_series.reorder_levels(["Letter", "Color", "Number"])
pandas_result = pandas_series.reorder_levels(["Letter", "Color", "Number"])
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize(
"repeats", [0, 2, 3, 4], ids=["repeats_{}".format(i) for i in [0, 2, 3, 4]]
)
def test_repeat(data, repeats):
eval_general(pd.Series(data), pandas.Series(data), lambda df: df.repeat(repeats))
@pytest.mark.parametrize("data", [np.arange(256)])
@pytest.mark.parametrize(
"repeats",
[
[0],
[2],
[3],
[4],
np.arange(256),
[0] * 64 + [2] * 64 + [3] * 32 + [4] * 32 + [5] * 64,
[2] * 257,
[2] * 128,
],
)
def test_repeat_lists(data, repeats):
eval_general(
pd.Series(data),
pandas.Series(data),
lambda df: df.repeat(repeats),
)
def test_replace():
modin_series = pd.Series([0, 1, 2, 3, 4])
pandas_series = pandas.Series([0, 1, 2, 3, 4])
modin_result = modin_series.replace(0, 5)
pandas_result = pandas_series.replace(0, 5)
df_equals(modin_result, pandas_result)
modin_result = modin_series.replace([1, 2], method="bfill")
pandas_result = pandas_series.replace([1, 2], method="bfill")
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("closed", ["left", "right"])
@pytest.mark.parametrize("label", ["right", "left"])
@pytest.mark.parametrize("level", [None, 1])
def test_resample(closed, label, level):
rule = "5T"
freq = "H"
base = 2
index = pandas.date_range("1/1/2000", periods=12, freq=freq)
pandas_series = pandas.Series(range(12), index=index)
modin_series = pd.Series(range(12), index=index)
if level is not None:
index = pandas.MultiIndex.from_product(
[["a", "b", "c"], pandas.date_range("31/12/2000", periods=4, freq=freq)]
)
pandas_series.index = index
modin_series.index = index
pandas_resampler = pandas_series.resample(
rule, closed=closed, label=label, base=base, level=level
)
modin_resampler = modin_series.resample(
rule, closed=closed, label=label, base=base, level=level
)
df_equals(modin_resampler.count(), pandas_resampler.count())
df_equals(modin_resampler.var(0), pandas_resampler.var(0))
df_equals(modin_resampler.sum(), pandas_resampler.sum())
df_equals(modin_resampler.std(), pandas_resampler.std())
df_equals(modin_resampler.sem(), pandas_resampler.sem())
df_equals(modin_resampler.size(), pandas_resampler.size())
df_equals(modin_resampler.prod(), pandas_resampler.prod())
df_equals(modin_resampler.ohlc(), pandas_resampler.ohlc())
df_equals(modin_resampler.min(), pandas_resampler.min())
df_equals(modin_resampler.median(), pandas_resampler.median())
df_equals(modin_resampler.mean(), pandas_resampler.mean())
df_equals(modin_resampler.max(), pandas_resampler.max())
df_equals(modin_resampler.last(), pandas_resampler.last())
df_equals(modin_resampler.first(), pandas_resampler.first())
df_equals(modin_resampler.nunique(), pandas_resampler.nunique())
df_equals(
modin_resampler.pipe(lambda x: x.max() - x.min()),
pandas_resampler.pipe(lambda x: x.max() - x.min()),
)
df_equals(
modin_resampler.transform(lambda x: (x - x.mean()) / x.std()),
pandas_resampler.transform(lambda x: (x - x.mean()) / x.std()),
)
df_equals(
modin_resampler.aggregate("max"),
pandas_resampler.aggregate("max"),
)
df_equals(
modin_resampler.apply("sum"),
pandas_resampler.apply("sum"),
)
df_equals(
modin_resampler.get_group(name=list(modin_resampler.groups)[0]),
pandas_resampler.get_group(name=list(pandas_resampler.groups)[0]),
)
assert pandas_resampler.indices == modin_resampler.indices
assert pandas_resampler.groups == modin_resampler.groups
df_equals(modin_resampler.quantile(), pandas_resampler.quantile())
# Upsampling from level= or on= selection is not supported
if level is None:
df_equals(
modin_resampler.interpolate(),
pandas_resampler.interpolate(),
)
df_equals(modin_resampler.asfreq(), pandas_resampler.asfreq())
df_equals(
modin_resampler.fillna(method="nearest"),
pandas_resampler.fillna(method="nearest"),
)
df_equals(modin_resampler.pad(), pandas_resampler.pad())
df_equals(modin_resampler.nearest(), pandas_resampler.nearest())
df_equals(modin_resampler.bfill(), pandas_resampler.bfill())
df_equals(modin_resampler.backfill(), pandas_resampler.backfill())
df_equals(modin_resampler.ffill(), pandas_resampler.ffill())
df_equals(
modin_resampler.apply(["sum", "mean", "max"]),
pandas_resampler.apply(["sum", "mean", "max"]),
)
df_equals(
modin_resampler.aggregate(["sum", "mean", "max"]),
pandas_resampler.aggregate(["sum", "mean", "max"]),
)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("drop", [True, False], ids=["True", "False"])
@pytest.mark.parametrize("name", [None, "Custom name"])
@pytest.mark.parametrize("inplace", [True, False])
def test_reset_index(data, drop, name, inplace):
eval_general(
*create_test_series(data),
lambda df, *args, **kwargs: df.reset_index(*args, **kwargs),
drop=drop,
name=name,
inplace=inplace,
__inplace__=inplace,
)
@pytest.mark.xfail(reason="Using pandas Series.")
def test_reshape():
modin_series = create_test_series()
with pytest.raises(NotImplementedError):
modin_series.reshape(None)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_rfloordiv(data):
modin_series, pandas_series = create_test_series(data)
inter_df_math_helper(modin_series, pandas_series, "rfloordiv")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_rmod(data):
modin_series, pandas_series = create_test_series(data)
inter_df_math_helper(modin_series, pandas_series, "rmod")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_rmul(data):
modin_series, pandas_series = create_test_series(data)
inter_df_math_helper(modin_series, pandas_series, "rmul")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_round(data):
modin_series, pandas_series = create_test_series(data)
df_equals(modin_series.round(), pandas_series.round())
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_rpow(data):
modin_series, pandas_series = create_test_series(data)
inter_df_math_helper(modin_series, pandas_series, "rpow")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_rsub(data):
modin_series, pandas_series = create_test_series(data)
inter_df_math_helper(modin_series, pandas_series, "rsub")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_rtruediv(data):
modin_series, pandas_series = create_test_series(data)
inter_df_math_helper(modin_series, pandas_series, "rtruediv")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_sample(data):
modin_series, pandas_series = create_test_series(data)
try:
pandas_result = pandas_series.sample(frac=0.5, random_state=21019)
except Exception as e:
with pytest.raises(type(e)):
modin_series.sample(frac=0.5, random_state=21019)
else:
modin_result = modin_series.sample(frac=0.5, random_state=21019)
df_equals(pandas_result, modin_result)
try:
pandas_result = pandas_series.sample(n=12, random_state=21019)
except Exception as e:
with pytest.raises(type(e)):
modin_series.sample(n=12, random_state=21019)
else:
modin_result = modin_series.sample(n=12, random_state=21019)
df_equals(pandas_result, modin_result)
with pytest.warns(UserWarning):
df_equals(
modin_series.sample(n=0, random_state=21019),
pandas_series.sample(n=0, random_state=21019),
)
with pytest.raises(ValueError):
modin_series.sample(n=-3)
@pytest.mark.parametrize("single_value_data", [True, False])
@pytest.mark.parametrize("use_multiindex", [True, False])
@pytest.mark.parametrize("sorter", [True, None])
@pytest.mark.parametrize("values_number", [1, 2, 5])
@pytest.mark.parametrize("side", ["left", "right"])
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_searchsorted(
data, side, values_number, sorter, use_multiindex, single_value_data
):
data = data if not single_value_data else data[next(iter(data.keys()))][0]
if not sorter:
modin_series, pandas_series = create_test_series(vals=data, sort=True)
else:
modin_series, pandas_series = create_test_series(vals=data)
sorter = np.argsort(list(modin_series))
if use_multiindex:
rows_number = len(modin_series.index)
level_0_series = random_state.choice([0, 1], rows_number)
level_1_series = random_state.choice([2, 3], rows_number)
index_series = pd.MultiIndex.from_arrays(
[level_0_series, level_1_series], names=["first", "second"]
)
modin_series.index = index_series
pandas_series.index = index_series
min_sample = modin_series.min(skipna=True)
max_sample = modin_series.max(skipna=True)
if single_value_data:
values = [data]
else:
values = []
values.append(pandas_series.sample(n=values_number, random_state=random_state))
values.append(
random_state.uniform(low=min_sample, high=max_sample, size=values_number)
)
values.append(
random_state.uniform(
low=max_sample, high=2 * max_sample, size=values_number
)
)
values.append(
random_state.uniform(
low=min_sample - max_sample, high=min_sample, size=values_number
)
)
pure_float = random_state.uniform(float(min_sample), float(max_sample))
pure_int = int(pure_float)
values.append(pure_float)
values.append(pure_int)
test_cases = [
modin_series.searchsorted(value=value, side=side, sorter=sorter)
== pandas_series.searchsorted(value=value, side=side, sorter=sorter)
for value in values
]
test_cases = [
case.all() if not isinstance(case, bool) else case for case in test_cases
]
for case in test_cases:
assert case
@pytest.mark.parametrize(
"skipna", bool_arg_values, ids=arg_keys("skipna", bool_arg_keys)
)
@pytest.mark.parametrize("ddof", int_arg_values, ids=arg_keys("ddof", int_arg_keys))
def test_sem_float_nan_only(skipna, ddof):
eval_general(
*create_test_series(test_data["float_nan_data"]),
lambda df: df.sem(skipna=skipna, ddof=ddof),
)
@pytest.mark.parametrize("ddof", int_arg_values, ids=arg_keys("ddof", int_arg_keys))
def test_sem_int_only(ddof):
eval_general(
*create_test_series(test_data["int_data"]),
lambda df: df.sem(ddof=ddof),
)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_set_axis(data):
modin_series, _ = create_test_series(data) # noqa: F841
modin_series.set_axis(labels=["{}_{}".format(i, i + 1) for i in modin_series.index])
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_shape(data):
modin_series, pandas_series = create_test_series(data)
assert modin_series.shape == pandas_series.shape
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_size(data):
modin_series, pandas_series = create_test_series(data)
assert modin_series.size == pandas_series.size
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize(
"skipna", bool_arg_values, ids=arg_keys("skipna", bool_arg_keys)
)
def test_skew(data, skipna):
modin_series, pandas_series = create_test_series(data)
df_equals(modin_series.skew(skipna=skipna), pandas_series.skew(skipna=skipna))
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("index", ["default", "ndarray", "has_duplicates"])
@pytest.mark.parametrize("periods", [0, 1, -1, 10, -10, 1000000000, -1000000000])
def test_shift_slice_shift(data, index, periods):
modin_series, pandas_series = create_test_series(data)
if index == "ndarray":
data_column_length = len(data[next(iter(data))])
modin_series.index = pandas_series.index = np.arange(2, data_column_length + 2)
elif index == "has_duplicates":
modin_series.index = pandas_series.index = list(modin_series.index[:-3]) + [
0,
1,
2,
]
df_equals(
modin_series.shift(periods=periods),
pandas_series.shift(periods=periods),
)
df_equals(
modin_series.shift(periods=periods, fill_value=777),
pandas_series.shift(periods=periods, fill_value=777),
)
eval_general(modin_series, pandas_series, lambda df: df.shift(axis=1))
df_equals(
modin_series.slice_shift(periods=periods),
pandas_series.slice_shift(periods=periods),
)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize(
"ascending", bool_arg_values, ids=arg_keys("ascending", bool_arg_keys)
)
@pytest.mark.parametrize(
"sort_remaining", bool_arg_values, ids=arg_keys("sort_remaining", bool_arg_keys)
)
@pytest.mark.parametrize("na_position", ["first", "last"], ids=["first", "last"])
def test_sort_index(data, ascending, sort_remaining, na_position):
modin_series, pandas_series = create_test_series(data)
eval_general(
modin_series,
pandas_series,
lambda df: df.sort_index(
ascending=ascending,
sort_remaining=sort_remaining,
na_position=na_position,
),
)
eval_general(
modin_series.copy(),
pandas_series.copy(),
lambda df: df.sort_index(
ascending=ascending,
sort_remaining=sort_remaining,
na_position=na_position,
inplace=True,
),
__inplace__=True,
)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("ascending", [True, False], ids=["True", "False"])
@pytest.mark.parametrize("na_position", ["first", "last"], ids=["first", "last"])
def test_sort_values(data, ascending, na_position):
modin_series, pandas_series = create_test_series(data)
modin_result = modin_series.sort_values(
ascending=ascending, na_position=na_position
)
pandas_result = pandas_series.sort_values(
ascending=ascending, na_position=na_position
)
# Note: For `ascending=False` only
# For some reason, the indexing of Series and DataFrame differ in the underlying
# algorithm. The order of values is the same, but the index values are shuffled.
# Since we use `DataFrame.sort_values` even for Series, the index can be different
# between `pandas.Series.sort_values`. For this reason, we check that the values are
# identical instead of the index as well.
if ascending:
df_equals(modin_result, pandas_result)
else:
np.testing.assert_equal(modin_result.values, pandas_result.values)
modin_series_cp = modin_series.copy()
pandas_series_cp = pandas_series.copy()
modin_series_cp.sort_values(
ascending=ascending, na_position=na_position, inplace=True
)
pandas_series_cp.sort_values(
ascending=ascending, na_position=na_position, inplace=True
)
# See above about `ascending=False`
if ascending:
df_equals(modin_series_cp, pandas_series_cp)
else:
np.testing.assert_equal(modin_series_cp.values, pandas_series_cp.values)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_squeeze(data):
modin_series, pandas_series = create_test_series(data)
df_equals(modin_series.squeeze(None), pandas_series.squeeze(None))
df_equals(modin_series.squeeze(0), pandas_series.squeeze(0))
with pytest.raises(ValueError):
modin_series.squeeze(1)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize(
"skipna", bool_arg_values, ids=arg_keys("skipna", bool_arg_keys)
)
@pytest.mark.parametrize("ddof", int_arg_values, ids=arg_keys("ddof", int_arg_keys))
def test_std(request, data, skipna, ddof):
modin_series, pandas_series = create_test_series(data)
try:
pandas_result = pandas_series.std(skipna=skipna, ddof=ddof)
except Exception as e:
with pytest.raises(type(e)):
modin_series.std(skipna=skipna, ddof=ddof)
else:
modin_result = modin_series.std(skipna=skipna, ddof=ddof)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_sub(data):
modin_series, pandas_series = create_test_series(data)
inter_df_math_helper(modin_series, pandas_series, "sub")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_subtract(data):
modin_series, pandas_series = create_test_series(data)
inter_df_math_helper(modin_series, pandas_series, "subtract")
@pytest.mark.parametrize(
"data",
test_data_values + test_data_small_values,
ids=test_data_keys + test_data_small_keys,
)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"skipna", bool_arg_values, ids=arg_keys("skipna", bool_arg_keys)
)
@pytest.mark.parametrize(
"numeric_only", bool_arg_values, ids=arg_keys("numeric_only", bool_arg_keys)
)
@pytest.mark.parametrize(
"min_count", int_arg_values, ids=arg_keys("min_count", int_arg_keys)
)
def test_sum(data, axis, skipna, numeric_only, min_count):
eval_general(
*create_test_series(data),
lambda df, *args, **kwargs: df.sum(*args, **kwargs),
axis=axis,
skipna=skipna,
numeric_only=numeric_only,
min_count=min_count,
)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis1", [0, 1, "columns", "index"])
@pytest.mark.parametrize("axis2", [0, 1, "columns", "index"])
def test_swapaxes(data, axis1, axis2):
modin_series, pandas_series = create_test_series(data)
try:
pandas_result = pandas_series.swapaxes(axis1, axis2)
except Exception as e:
with pytest.raises(type(e)):
modin_series.swapaxes(axis1, axis2)
else:
modin_result = modin_series.swapaxes(axis1, axis2)
df_equals(modin_result, pandas_result)
def test_swaplevel():
data = np.random.randint(1, 100, 12)
modin_s = pd.Series(
data,
index=pd.MultiIndex.from_tuples(
[
(num, letter, color)
for num in range(1, 3)
for letter in ["a", "b", "c"]
for color in ["Red", "Green"]
],
names=["Number", "Letter", "Color"],
),
)
pandas_s = pandas.Series(
data,
index=pandas.MultiIndex.from_tuples(
[
(num, letter, color)
for num in range(1, 3)
for letter in ["a", "b", "c"]
for color in ["Red", "Green"]
],
names=["Number", "Letter", "Color"],
),
)
df_equals(
modin_s.swaplevel("Number", "Color"), pandas_s.swaplevel("Number", "Color")
)
df_equals(modin_s.swaplevel(), pandas_s.swaplevel())
df_equals(modin_s.swaplevel(1, 0), pandas_s.swaplevel(1, 0))
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("n", int_arg_values, ids=arg_keys("n", int_arg_keys))
def test_tail(data, n):
modin_series, pandas_series = create_test_series(data)
df_equals(modin_series.tail(n), pandas_series.tail(n))
df_equals(
modin_series.tail(len(modin_series)), pandas_series.tail(len(pandas_series))
)
def test_take():
modin_s = pd.Series(["falcon", "parrot", "lion", "cat"], index=[0, 2, 3, 1])
pandas_s = pandas.Series(["falcon", "parrot", "lion", "cat"], index=[0, 2, 3, 1])
a = modin_s.take([0, 3])
df_equals(a, pandas_s.take([0, 3]))
try:
pandas_s.take([2], axis=1)
except Exception as e:
with pytest.raises(type(e)):
modin_s.take([2], axis=1)
@pytest.mark.parametrize(
"ignore_index", bool_arg_values, ids=arg_keys("ignore_index", bool_arg_keys)
)
def test_explode(ignore_index):
# Some items in this test data are lists that explode() should expand.
data = [[1, 2, 3], "foo", [], [3, 4]]
modin_series, pandas_series = create_test_series(data)
df_equals(
modin_series.explode(ignore_index=ignore_index),
pandas_series.explode(ignore_index=ignore_index),
)
def test_to_period():
idx = pd.date_range("1/1/2012", periods=5, freq="M")
series = pd.Series(np.random.randint(0, 100, size=(len(idx))), index=idx)
with pytest.warns(UserWarning):
series.to_period()
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_to_numpy(data):
modin_series, pandas_series = create_test_series(data)
assert_array_equal(modin_series.values, pandas_series.values)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_to_string(request, data):
eval_general(
*create_test_series(data),
lambda df: df.to_string(),
)
def test_to_timestamp():
idx = pd.date_range("1/1/2012", periods=5, freq="M")
series = pd.Series(np.random.randint(0, 100, size=(len(idx))), index=idx)
with pytest.warns(UserWarning):
series.to_period().to_timestamp()
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_to_xarray(data):
modin_series, _ = create_test_series(data) # noqa: F841
with pytest.warns(UserWarning):
modin_series.to_xarray()
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_tolist(data):
modin_series, _ = create_test_series(data) # noqa: F841
with pytest.warns(UserWarning):
modin_series.tolist()
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("func", agg_func_values, ids=agg_func_keys)
def test_transform(data, func):
eval_general(
*create_test_series(data),
lambda df: df.transform(func),
)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("func", agg_func_except_values, ids=agg_func_except_keys)
def test_transform_except(data, func):
eval_general(
*create_test_series(data),
lambda df: df.transform(func),
)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_transpose(data):
modin_series, pandas_series = create_test_series(data)
df_equals(modin_series.transpose(), modin_series)
df_equals(modin_series.transpose(), pandas_series.transpose())
df_equals(modin_series.transpose(), pandas_series)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_truediv(data):
modin_series, pandas_series = create_test_series(data)
inter_df_math_helper(modin_series, pandas_series, "truediv")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_truncate(data):
modin_series, pandas_series = create_test_series(data)
before = 1
after = len(modin_series - 3)
df_equals(
modin_series.truncate(before, after), pandas_series.truncate(before, after)
)
before = 1
after = 3
df_equals(
modin_series.truncate(before, after), pandas_series.truncate(before, after)
)
before = None
after = None
df_equals(
modin_series.truncate(before, after), pandas_series.truncate(before, after)
)
def test_tshift():
idx = pd.date_range("1/1/2012", periods=5, freq="M")
data = np.random.randint(0, 100, size=len(idx))
modin_series = pd.Series(data, index=idx)
pandas_series = pandas.Series(data, index=idx)
df_equals(modin_series.tshift(4), pandas_series.tshift(4))
def test_tz_convert():
modin_idx = pd.date_range(
"1/1/2012", periods=400, freq="2D", tz="America/Los_Angeles"
)
pandas_idx = pandas.date_range(
"1/1/2012", periods=400, freq="2D", tz="America/Los_Angeles"
)
data = np.random.randint(0, 100, size=len(modin_idx))
modin_series = pd.Series(data, index=modin_idx)
pandas_series = pandas.Series(data, index=pandas_idx)
modin_result = modin_series.tz_convert("UTC", axis=0)
pandas_result = pandas_series.tz_convert("UTC", axis=0)
df_equals(modin_result, pandas_result)
modin_multi = pd.MultiIndex.from_arrays([modin_idx, range(len(modin_idx))])
pandas_multi = pandas.MultiIndex.from_arrays([pandas_idx, range(len(modin_idx))])
modin_series = pd.Series(data, index=modin_multi)
pandas_series = pandas.Series(data, index=pandas_multi)
df_equals(
modin_series.tz_convert("UTC", axis=0, level=0),
pandas_series.tz_convert("UTC", axis=0, level=0),
)
def test_tz_localize():
idx = pd.date_range("1/1/2012", periods=400, freq="2D")
data = np.random.randint(0, 100, size=len(idx))
modin_series = pd.Series(data, index=idx)
pandas_series = pandas.Series(data, index=idx)
df_equals(
modin_series.tz_localize("America/Los_Angeles"),
pandas_series.tz_localize("America/Los_Angeles"),
)
df_equals(
modin_series.tz_localize("UTC"),
pandas_series.tz_localize("UTC"),
)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_unique(data):
modin_series, pandas_series = create_test_series(data)
modin_result = modin_series.unique()
pandas_result = pandas_series.unique()
assert_array_equal(modin_result, pandas_result)
assert modin_result.shape == pandas_result.shape
modin_result = pd.Series([2, 1, 3, 3], name="A").unique()
pandas_result = pandas.Series([2, 1, 3, 3], name="A").unique()
assert_array_equal(modin_result, pandas_result)
assert modin_result.shape == pandas_result.shape
modin_result = pd.Series([pd.Timestamp("2016-01-01") for _ in range(3)]).unique()
pandas_result = pandas.Series(
[pd.Timestamp("2016-01-01") for _ in range(3)]
).unique()
assert_array_equal(modin_result, pandas_result)
assert modin_result.shape == pandas_result.shape
modin_result = pd.Series(
[pd.Timestamp("2016-01-01", tz="US/Eastern") for _ in range(3)]
).unique()
pandas_result = pandas.Series(
[pd.Timestamp("2016-01-01", tz="US/Eastern") for _ in range(3)]
).unique()
assert_array_equal(modin_result, pandas_result)
assert modin_result.shape == pandas_result.shape
modin_result = pandas.Series(pd.Categorical(list("baabc"))).unique()
pandas_result = pd.Series(pd.Categorical(list("baabc"))).unique()
assert_array_equal(modin_result, pandas_result)
assert modin_result.shape == pandas_result.shape
modin_result = pd.Series(
pd.Categorical(list("baabc"), categories=list("abc"), ordered=True)
).unique()
pandas_result = pandas.Series(
pd.Categorical(list("baabc"), categories=list("abc"), ordered=True)
).unique()
assert_array_equal(modin_result, pandas_result)
assert modin_result.shape == pandas_result.shape
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_unstack(data):
modin_series, pandas_series = create_test_series(data)
index = generate_multiindex(len(pandas_series), nlevels=4, is_tree_like=True)
modin_series = pd.Series(data[next(iter(data.keys()))], index=index)
pandas_series = pandas.Series(data[next(iter(data.keys()))], index=index)
df_equals(modin_series.unstack(), pandas_series.unstack())
df_equals(modin_series.unstack(level=0), pandas_series.unstack(level=0))
df_equals(modin_series.unstack(level=[0, 1]), pandas_series.unstack(level=[0, 1]))
df_equals(
modin_series.unstack(level=[0, 1, 2]), pandas_series.unstack(level=[0, 1, 2])
)
@pytest.mark.parametrize(
"data, other_data",
[([1, 2, 3], [4, 5, 6]), ([1, 2, 3], [4, 5, 6, 7, 8]), ([1, 2, 3], [4, np.nan, 6])],
)
def test_update(data, other_data):
modin_series, pandas_series = pd.Series(data), pandas.Series(data)
modin_series.update(pd.Series(other_data))
pandas_series.update(pandas.Series(other_data))
df_equals(modin_series, pandas_series)
@pytest.mark.parametrize("sort", bool_arg_values, ids=bool_arg_keys)
@pytest.mark.parametrize("normalize", bool_arg_values, ids=bool_arg_keys)
@pytest.mark.parametrize("bins", [3, None])
@pytest.mark.parametrize("dropna", bool_arg_values, ids=bool_arg_keys)
@pytest.mark.parametrize("ascending", bool_arg_values, ids=bool_arg_keys)
def test_value_counts(sort, normalize, bins, dropna, ascending):
def sort_sensitive_comparator(df1, df2):
# We sort indices for Modin and pandas result because of issue #1650
return (
df_equals_with_non_stable_indices(df1, df2)
if sort
else df_equals(df1.sort_index(), df2.sort_index())
)
eval_general(
*create_test_series(test_data_values[0]),
lambda df: df.value_counts(
sort=sort,
bins=bins,
normalize=normalize,
dropna=dropna,
ascending=ascending,
),
comparator=sort_sensitive_comparator,
# Modin's `sort_values` does not validate `ascending` type and so
# does not raise an exception when it isn't a bool, when pandas do so,
# visit modin-issue#3388 for more info.
check_exception_type=None if sort and ascending is None else True,
)
# from issue #2365
arr = np.random.rand(2 ** 6)
arr[::10] = np.nan
eval_general(
*create_test_series(arr),
lambda df: df.value_counts(
sort=sort,
bins=bins,
normalize=normalize,
dropna=dropna,
ascending=ascending,
),
comparator=sort_sensitive_comparator,
# Modin's `sort_values` does not validate `ascending` type and so
# does not raise an exception when it isn't a bool, when pandas do so,
# visit modin-issue#3388 for more info.
check_exception_type=None if sort and ascending is None else True,
)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_values(data):
modin_series, pandas_series = create_test_series(data)
np.testing.assert_equal(modin_series.values, pandas_series.values)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize(
"skipna", bool_arg_values, ids=arg_keys("skipna", bool_arg_keys)
)
@pytest.mark.parametrize("ddof", int_arg_values, ids=arg_keys("ddof", int_arg_keys))
def test_var(data, skipna, ddof):
modin_series, pandas_series = create_test_series(data)
try:
pandas_result = pandas_series.var(skipna=skipna, ddof=ddof)
except Exception:
with pytest.raises(TypeError):
modin_series.var(skipna=skipna, ddof=ddof)
else:
modin_result = modin_series.var(skipna=skipna, ddof=ddof)
df_equals(modin_result, pandas_result)
def test_view():
modin_series = pd.Series([-2, -1, 0, 1, 2], dtype="int8")
pandas_series = pandas.Series([-2, -1, 0, 1, 2], dtype="int8")
modin_result = modin_series.view(dtype="uint8")
pandas_result = pandas_series.view(dtype="uint8")
df_equals(modin_result, pandas_result)
modin_series = pd.Series([-20, -10, 0, 10, 20], dtype="int32")
pandas_series = pandas.Series([-20, -10, 0, 10, 20], dtype="int32")
modin_result = modin_series.view(dtype="float32")
pandas_result = pandas_series.view(dtype="float32")
df_equals(modin_result, pandas_result)
modin_series = pd.Series([-200, -100, 0, 100, 200], dtype="int64")
pandas_series = pandas.Series([-200, -100, 0, 100, 200], dtype="int64")
modin_result = modin_series.view(dtype="float64")
pandas_result = pandas_series.view(dtype="float64")
df_equals(modin_result, pandas_result)
def test_where():
frame_data = random_state.randn(100)
pandas_series = pandas.Series(frame_data)
modin_series = pd.Series(frame_data)
pandas_cond_series = pandas_series % 5 < 2
modin_cond_series = modin_series % 5 < 2
pandas_result = pandas_series.where(pandas_cond_series, -pandas_series)
modin_result = modin_series.where(modin_cond_series, -modin_series)
assert all((to_pandas(modin_result) == pandas_result))
other = pandas.Series(random_state.randn(100))
pandas_result = pandas_series.where(pandas_cond_series, other, axis=0)
modin_result = modin_series.where(modin_cond_series, other, axis=0)
assert all(to_pandas(modin_result) == pandas_result)
pandas_result = pandas_series.where(pandas_series < 2, True)
modin_result = modin_series.where(modin_series < 2, True)
assert all(to_pandas(modin_result) == pandas_result)
@pytest.mark.parametrize("data", test_string_data_values, ids=test_string_data_keys)
@pytest.mark.parametrize(
"key",
[0, slice(0, len(test_string_data_values) / 2)],
ids=["single_key", "slice_key"],
)
def test_str___getitem__(data, key):
modin_series, pandas_series = create_test_series(data)
modin_result = modin_series.str[key]
pandas_result = pandas_series.str[key]
df_equals(modin_result, pandas_result)
# Test str operations
def test_str_cat():
data = ["abC|DeF,Hik", "gSaf,qWer|Gre", "asd3,4sad|", np.NaN]
modin_series, pandas_series = create_test_series(data)
others = data
with pytest.warns(UserWarning):
# We are only testing that this defaults to pandas, so we will just check for
# the warning
modin_series.str.cat(others)
with pytest.warns(UserWarning):
# We are only testing that this defaults to pandas, so we will just check for
# the warning
modin_series.str.cat(None)
@pytest.mark.parametrize("data", test_string_data_values, ids=test_string_data_keys)
@pytest.mark.parametrize("pat", string_sep_values, ids=string_sep_keys)
@pytest.mark.parametrize("n", int_arg_values, ids=int_arg_keys)
@pytest.mark.parametrize("expand", bool_arg_values, ids=bool_arg_keys)
def test_str_split(data, pat, n, expand):
# Empty pattern not supported on Python 3.7+
if sys.version_info[0] == 3 and sys.version_info[1] >= 7 and pat == "":
return
modin_series, pandas_series = create_test_series(data)
if n >= -1:
if expand and pat:
with pytest.warns(UserWarning):
# We are only testing that this defaults to pandas, so we will just check for
# the warning
modin_series.str.split(pat, n=n, expand=expand)
elif not expand:
try:
pandas_result = pandas_series.str.split(pat, n=n, expand=expand)
except Exception as e:
with pytest.raises(type(e)):
modin_series.str.split(pat, n=n, expand=expand)
else:
modin_result = modin_series.str.split(pat, n=n, expand=expand)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_string_data_values, ids=test_string_data_keys)
@pytest.mark.parametrize("pat", string_sep_values, ids=string_sep_keys)
@pytest.mark.parametrize("n", int_arg_values, ids=int_arg_keys)
@pytest.mark.parametrize("expand", bool_arg_values, ids=bool_arg_keys)
def test_str_rsplit(data, pat, n, expand):
modin_series, pandas_series = create_test_series(data)
if n >= -1:
if expand and pat:
with pytest.warns(UserWarning):
# We are only testing that this defaults to pandas, so we will just check for
# the warning
modin_series.str.rsplit(pat, n=n, expand=expand)
elif not expand:
try:
pandas_result = pandas_series.str.rsplit(pat, n=n, expand=expand)
except Exception as e:
with pytest.raises(type(e)):
modin_series.str.rsplit(pat, n=n, expand=expand)
else:
modin_result = modin_series.str.rsplit(pat, n=n, expand=expand)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_string_data_values, ids=test_string_data_keys)
@pytest.mark.parametrize("i", int_arg_values, ids=int_arg_keys)
def test_str_get(data, i):
modin_series, pandas_series = create_test_series(data)
try:
pandas_result = pandas_series.str.get(i)
except Exception as e:
with pytest.raises(type(e)):
modin_series.str.get(i)
else:
modin_result = modin_series.str.get(i)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize(
"data", test_string_list_data_values, ids=test_string_list_data_keys
)
@pytest.mark.parametrize("sep", string_sep_values, ids=string_sep_keys)
def test_str_join(data, sep):
modin_series, pandas_series = create_test_series(data)
try:
pandas_result = pandas_series.str.join(sep)
except Exception as e:
with pytest.raises(type(e)):
modin_series.str.join(sep)
else:
modin_result = modin_series.str.join(sep)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize(
"data", test_string_list_data_values, ids=test_string_list_data_keys
)
@pytest.mark.parametrize("sep", string_sep_values, ids=string_sep_keys)
def test_str_get_dummies(data, sep):
modin_series, pandas_series = create_test_series(data)
if sep:
with pytest.warns(UserWarning):
# We are only testing that this defaults to pandas, so we will just check for
# the warning
modin_series.str.get_dummies(sep)
@pytest.mark.parametrize("data", test_string_data_values, ids=test_string_data_keys)
@pytest.mark.parametrize("pat", string_sep_values, ids=string_sep_keys)
@pytest.mark.parametrize("case", bool_arg_values, ids=bool_arg_keys)
@pytest.mark.parametrize("na", string_na_rep_values, ids=string_na_rep_keys)
def test_str_contains(data, pat, case, na):
modin_series, pandas_series = create_test_series(data)
try:
pandas_result = pandas_series.str.contains(pat, case=case, na=na, regex=False)
except Exception as e:
with pytest.raises(type(e)):
modin_series.str.contains(pat, case=case, na=na, regex=False)
else:
modin_result = modin_series.str.contains(pat, case=case, na=na, regex=False)
df_equals(modin_result, pandas_result)
# Test regex
pat = ",|b"
try:
pandas_result = pandas_series.str.contains(pat, case=case, na=na, regex=True)
except Exception as e:
with pytest.raises(type(e)):
modin_series.str.contains(pat, case=case, na=na, regex=True)
else:
modin_result = modin_series.str.contains(pat, case=case, na=na, regex=True)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_string_data_values, ids=test_string_data_keys)
@pytest.mark.parametrize("pat", string_sep_values, ids=string_sep_keys)
@pytest.mark.parametrize("repl", string_sep_values, ids=string_sep_keys)
@pytest.mark.parametrize("n", int_arg_values, ids=int_arg_keys)
@pytest.mark.parametrize("case", bool_arg_values, ids=bool_arg_keys)
def test_str_replace(data, pat, repl, n, case):
eval_general(
*create_test_series(data),
lambda series: series.str.replace(pat, repl, n=n, case=case, regex=False),
)
# Test regex
eval_general(
*create_test_series(data),
lambda series: series.str.replace(
pat=",|b", repl=repl, n=n, case=case, regex=True
),
)
@pytest.mark.parametrize("data", test_string_data_values, ids=test_string_data_keys)
@pytest.mark.parametrize("repeats", int_arg_values, ids=int_arg_keys)
def test_str_repeat(data, repeats):
modin_series, pandas_series = create_test_series(data)
try:
pandas_result = pandas_series.str.repeat(repeats)
except Exception as e:
with pytest.raises(type(e)):
modin_series.str.repeat(repeats)
else:
modin_result = modin_series.str.repeat(repeats)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_string_data_values, ids=test_string_data_keys)
@pytest.mark.parametrize("width", int_arg_values, ids=int_arg_keys)
@pytest.mark.parametrize(
"side", ["left", "right", "both"], ids=["left", "right", "both"]
)
@pytest.mark.parametrize("fillchar", string_sep_values, ids=string_sep_keys)
def test_str_pad(data, width, side, fillchar):
modin_series, pandas_series = create_test_series(data)
try:
pandas_result = pandas_series.str.pad(width, side=side, fillchar=fillchar)
except Exception as e:
with pytest.raises(type(e)):
modin_series.str.pad(width, side=side, fillchar=fillchar)
else:
modin_result = modin_series.str.pad(width, side=side, fillchar=fillchar)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_string_data_values, ids=test_string_data_keys)
@pytest.mark.parametrize("width", int_arg_values, ids=int_arg_keys)
@pytest.mark.parametrize("fillchar", string_sep_values, ids=string_sep_keys)
def test_str_center(data, width, fillchar):
modin_series, pandas_series = create_test_series(data)
try:
pandas_result = pandas_series.str.center(width, fillchar=fillchar)
except Exception as e:
with pytest.raises(type(e)):
modin_series.str.center(width, fillchar=fillchar)
else:
modin_result = modin_series.str.center(width, fillchar=fillchar)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_string_data_values, ids=test_string_data_keys)
@pytest.mark.parametrize("width", int_arg_values, ids=int_arg_keys)
@pytest.mark.parametrize("fillchar", string_sep_values, ids=string_sep_keys)
def test_str_ljust(data, width, fillchar):
modin_series, pandas_series = create_test_series(data)
try:
pandas_result = pandas_series.str.ljust(width, fillchar=fillchar)
except Exception as e:
with pytest.raises(type(e)):
modin_series.str.ljust(width, fillchar=fillchar)
else:
modin_result = modin_series.str.ljust(width, fillchar=fillchar)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_string_data_values, ids=test_string_data_keys)
@pytest.mark.parametrize("width", int_arg_values, ids=int_arg_keys)
@pytest.mark.parametrize("fillchar", string_sep_values, ids=string_sep_keys)
def test_str_rjust(data, width, fillchar):
modin_series, pandas_series = create_test_series(data)
try:
pandas_result = pandas_series.str.rjust(width, fillchar=fillchar)
except Exception as e:
with pytest.raises(type(e)):
modin_series.str.rjust(width, fillchar=fillchar)
else:
modin_result = modin_series.str.rjust(width, fillchar=fillchar)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_string_data_values, ids=test_string_data_keys)
@pytest.mark.parametrize("width", int_arg_values, ids=int_arg_keys)
def test_str_zfill(data, width):
modin_series, pandas_series = create_test_series(data)
try:
pandas_result = pandas_series.str.zfill(width)
except Exception as e:
with pytest.raises(type(e)):
modin_series.str.zfill(width)
else:
modin_result = modin_series.str.zfill(width)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_string_data_values, ids=test_string_data_keys)
@pytest.mark.parametrize("width", int_arg_values, ids=int_arg_keys)
def test_str_wrap(data, width):
modin_series, pandas_series = create_test_series(data)
try:
pandas_result = pandas_series.str.wrap(width)
except Exception as e:
with pytest.raises(type(e)):
modin_series.str.wrap(width)
else:
modin_result = modin_series.str.wrap(width)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_string_data_values, ids=test_string_data_keys)
@pytest.mark.parametrize("start", int_arg_values, ids=int_arg_keys)
@pytest.mark.parametrize("stop", int_arg_values, ids=int_arg_keys)
@pytest.mark.parametrize("step", int_arg_values, ids=int_arg_keys)
def test_str_slice(data, start, stop, step):
modin_series, pandas_series = create_test_series(data)
try:
pandas_result = pandas_series.str.slice(start=start, stop=stop, step=step)
except Exception as e:
with pytest.raises(type(e)):
modin_series.str.slice(start=start, stop=stop, step=step)
else:
modin_result = modin_series.str.slice(start=start, stop=stop, step=step)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_string_data_values, ids=test_string_data_keys)
@pytest.mark.parametrize("start", int_arg_values, ids=int_arg_keys)
@pytest.mark.parametrize("stop", int_arg_values, ids=int_arg_keys)
@pytest.mark.parametrize("repl", string_sep_values, ids=string_sep_keys)
def test_str_slice_replace(data, start, stop, repl):
modin_series, pandas_series = create_test_series(data)
try:
pandas_result = pandas_series.str.slice_replace(
start=start, stop=stop, repl=repl
)
except Exception as e:
with pytest.raises(type(e)):
modin_series.str.slice_replace(start=start, stop=stop, repl=repl)
else:
modin_result = modin_series.str.slice_replace(start=start, stop=stop, repl=repl)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_string_data_values, ids=test_string_data_keys)
@pytest.mark.parametrize("pat", string_sep_values, ids=string_sep_keys)
def test_str_count(data, pat):
modin_series, pandas_series = create_test_series(data)
try:
pandas_result = pandas_series.str.count(pat)
except Exception as e:
with pytest.raises(type(e)):
modin_series.str.count(pat)
else:
modin_result = modin_series.str.count(pat)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_string_data_values, ids=test_string_data_keys)
@pytest.mark.parametrize("pat", string_sep_values, ids=string_sep_keys)
@pytest.mark.parametrize("na", string_na_rep_values, ids=string_na_rep_keys)
def test_str_startswith(data, pat, na):
modin_series, pandas_series = create_test_series(data)
try:
pandas_result = pandas_series.str.startswith(pat, na=na)
except Exception as e:
with pytest.raises(type(e)):
modin_series.str.startswith(pat, na=na)
else:
modin_result = modin_series.str.startswith(pat, na=na)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_string_data_values, ids=test_string_data_keys)
@pytest.mark.parametrize("pat", string_sep_values, ids=string_sep_keys)
@pytest.mark.parametrize("na", string_na_rep_values, ids=string_na_rep_keys)
def test_str_endswith(data, pat, na):
modin_series, pandas_series = create_test_series(data)
try:
pandas_result = pandas_series.str.endswith(pat, na=na)
except Exception as e:
with pytest.raises(type(e)):
modin_series.str.endswith(pat, na=na)
else:
modin_result = modin_series.str.endswith(pat, na=na)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_string_data_values, ids=test_string_data_keys)
@pytest.mark.parametrize("pat", string_sep_values, ids=string_sep_keys)
def test_str_findall(data, pat):
modin_series, pandas_series = create_test_series(data)
try:
pandas_result = pandas_series.str.findall(pat)
except Exception as e:
with pytest.raises(type(e)):
modin_series.str.findall(pat)
else:
modin_result = modin_series.str.findall(pat)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_string_data_values, ids=test_string_data_keys)
@pytest.mark.parametrize("pat", string_sep_values, ids=string_sep_keys)
@pytest.mark.parametrize("case", bool_arg_values, ids=bool_arg_keys)
@pytest.mark.parametrize("na", string_na_rep_values, ids=string_na_rep_keys)
def test_str_match(data, pat, case, na):
modin_series, pandas_series = create_test_series(data)
try:
pandas_result = pandas_series.str.match(pat, case=case, na=na)
except Exception as e:
with pytest.raises(type(e)):
modin_series.str.match(pat, case=case, na=na)
else:
modin_result = modin_series.str.match(pat, case=case, na=na)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_string_data_values, ids=test_string_data_keys)
@pytest.mark.parametrize("expand", bool_arg_values, ids=bool_arg_keys)
def test_str_extract(data, expand):
modin_series, pandas_series = create_test_series(data)
if expand is not None:
with pytest.warns(UserWarning):
# We are only testing that this defaults to pandas, so we will just check for
# the warning
modin_series.str.extract(r"([ab])(\d)", expand=expand)
@pytest.mark.parametrize("data", test_string_data_values, ids=test_string_data_keys)
def test_str_extractall(data):
modin_series, pandas_series = create_test_series(data)
with pytest.warns(UserWarning):
# We are only testing that this defaults to pandas, so we will just check for
# the warning
modin_series.str.extractall(r"([ab])(\d)")
@pytest.mark.parametrize("data", test_string_data_values, ids=test_string_data_keys)
def test_str_len(data):
modin_series, pandas_series = create_test_series(data)
try:
pandas_result = pandas_series.str.len()
except Exception as e:
with pytest.raises(type(e)):
modin_series.str.len()
else:
modin_result = modin_series.str.len()
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_string_data_values, ids=test_string_data_keys)
@pytest.mark.parametrize("to_strip", string_sep_values, ids=string_sep_keys)
def test_str_strip(data, to_strip):
modin_series, pandas_series = create_test_series(data)
try:
pandas_result = pandas_series.str.strip(to_strip=to_strip)
except Exception as e:
with pytest.raises(type(e)):
modin_series.str.strip(to_strip=to_strip)
else:
modin_result = modin_series.str.strip(to_strip=to_strip)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_string_data_values, ids=test_string_data_keys)
@pytest.mark.parametrize("to_strip", string_sep_values, ids=string_sep_keys)
def test_str_rstrip(data, to_strip):
modin_series, pandas_series = create_test_series(data)
try:
pandas_result = pandas_series.str.rstrip(to_strip=to_strip)
except Exception as e:
with pytest.raises(type(e)):
modin_series.str.rstrip(to_strip=to_strip)
else:
modin_result = modin_series.str.rstrip(to_strip=to_strip)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_string_data_values, ids=test_string_data_keys)
@pytest.mark.parametrize("to_strip", string_sep_values, ids=string_sep_keys)
def test_str_lstrip(data, to_strip):
modin_series, pandas_series = create_test_series(data)
try:
pandas_result = pandas_series.str.lstrip(to_strip=to_strip)
except Exception as e:
with pytest.raises(type(e)):
modin_series.str.lstrip(to_strip=to_strip)
else:
modin_result = modin_series.str.lstrip(to_strip=to_strip)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_string_data_values, ids=test_string_data_keys)
@pytest.mark.parametrize("sep", string_sep_values, ids=string_sep_keys)
@pytest.mark.parametrize("expand", bool_arg_values, ids=bool_arg_keys)
def test_str_partition(data, sep, expand):
modin_series, pandas_series = create_test_series(data)
try:
pandas_result = pandas_series.str.partition(sep, expand=expand)
except Exception as e:
with pytest.raises(type(e)):
modin_series.str.partition(sep, expand=expand)
else:
modin_result = modin_series.str.partition(sep, expand=expand)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_string_data_values, ids=test_string_data_keys)
@pytest.mark.parametrize("sep", string_sep_values, ids=string_sep_keys)
@pytest.mark.parametrize("expand", bool_arg_values, ids=bool_arg_keys)
def test_str_rpartition(data, sep, expand):
modin_series, pandas_series = create_test_series(data)
try:
pandas_result = pandas_series.str.rpartition(sep, expand=expand)
except Exception as e:
with pytest.raises(type(e)):
modin_series.str.rpartition(sep, expand=expand)
else:
modin_result = modin_series.str.rpartition(sep, expand=expand)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_string_data_values, ids=test_string_data_keys)
def test_str_lower(data):
modin_series, pandas_series = create_test_series(data)
try:
pandas_result = pandas_series.str.lower()
except Exception as e:
with pytest.raises(type(e)):
modin_series.str.lower()
else:
modin_result = modin_series.str.lower()
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_string_data_values, ids=test_string_data_keys)
def test_str_upper(data):
modin_series, pandas_series = create_test_series(data)
try:
pandas_result = pandas_series.str.upper()
except Exception as e:
with pytest.raises(type(e)):
modin_series.str.upper()
else:
modin_result = modin_series.str.upper()
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_string_data_values, ids=test_string_data_keys)
def test_str_title(data):
modin_series, pandas_series = create_test_series(data)
try:
pandas_result = pandas_series.str.title()
except Exception as e:
with pytest.raises(type(e)):
modin_series.str.title()
else:
modin_result = modin_series.str.title()
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_string_data_values, ids=test_string_data_keys)
@pytest.mark.parametrize("sub", string_sep_values, ids=string_sep_keys)
@pytest.mark.parametrize("start", int_arg_values, ids=int_arg_keys)
@pytest.mark.parametrize("end", int_arg_values, ids=int_arg_keys)
def test_str_find(data, sub, start, end):
modin_series, pandas_series = create_test_series(data)
try:
pandas_result = pandas_series.str.find(sub, start=start, end=end)
except Exception as e:
with pytest.raises(type(e)):
modin_series.str.find(sub, start=start, end=end)
else:
modin_result = modin_series.str.find(sub, start=start, end=end)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_string_data_values, ids=test_string_data_keys)
@pytest.mark.parametrize("sub", string_sep_values, ids=string_sep_keys)
@pytest.mark.parametrize("start", int_arg_values, ids=int_arg_keys)
@pytest.mark.parametrize("end", int_arg_values, ids=int_arg_keys)
def test_str_rfind(data, sub, start, end):
modin_series, pandas_series = create_test_series(data)
try:
pandas_result = pandas_series.str.rfind(sub, start=start, end=end)
except Exception as e:
with pytest.raises(type(e)):
modin_series.str.rfind(sub, start=start, end=end)
else:
modin_result = modin_series.str.rfind(sub, start=start, end=end)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_string_data_values, ids=test_string_data_keys)
@pytest.mark.parametrize("sub", string_sep_values, ids=string_sep_keys)
@pytest.mark.parametrize("start", int_arg_values, ids=int_arg_keys)
@pytest.mark.parametrize("end", int_arg_values, ids=int_arg_keys)
def test_str_index(data, sub, start, end):
modin_series, pandas_series = create_test_series(data)
try:
pandas_result = pandas_series.str.index(sub, start=start, end=end)
except ValueError:
# pytest does not get the RayGetErrors
assert True
except Exception as e:
with pytest.raises(type(e)):
modin_series.str.index(sub, start=start, end=end)
else:
modin_result = modin_series.str.index(sub, start=start, end=end)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_string_data_values, ids=test_string_data_keys)
@pytest.mark.parametrize("sub", string_sep_values, ids=string_sep_keys)
@pytest.mark.parametrize("start", int_arg_values, ids=int_arg_keys)
@pytest.mark.parametrize("end", int_arg_values, ids=int_arg_keys)
def test_str_rindex(data, sub, start, end):
modin_series, pandas_series = create_test_series(data)
try:
pandas_result = pandas_series.str.rindex(sub, start=start, end=end)
except ValueError:
# pytest does not get the RayGetErrors
assert True
except Exception as e:
with pytest.raises(type(e)):
modin_series.str.rindex(sub, start=start, end=end)
else:
modin_result = modin_series.str.rindex(sub, start=start, end=end)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_string_data_values, ids=test_string_data_keys)
def test_str_capitalize(data):
modin_series, pandas_series = create_test_series(data)
try:
pandas_result = pandas_series.str.capitalize()
except Exception as e:
with pytest.raises(type(e)):
modin_series.str.capitalize()
else:
modin_result = modin_series.str.capitalize()
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_string_data_values, ids=test_string_data_keys)
def test_str_swapcase(data):
modin_series, pandas_series = create_test_series(data)
try:
pandas_result = pandas_series.str.swapcase()
except Exception as e:
with pytest.raises(type(e)):
modin_series.str.swapcase()
else:
modin_result = modin_series.str.swapcase()
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_string_data_values, ids=test_string_data_keys)
@pytest.mark.parametrize(
"form", ["NFC", "NFKC", "NFD", "NFKD"], ids=["NFC", "NFKC", "NFD", "NFKD"]
)
def test_str_normalize(data, form):
modin_series, pandas_series = create_test_series(data)
try:
pandas_result = pandas_series.str.normalize(form)
except Exception as e:
with pytest.raises(type(e)):
modin_series.str.normalize(form)
else:
modin_result = modin_series.str.normalize(form)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_string_data_values, ids=test_string_data_keys)
@pytest.mark.parametrize("pat", string_sep_values, ids=string_sep_keys)
def test_str_translate(data, pat):
modin_series, pandas_series = create_test_series(data)
# Test none table
try:
pandas_result = pandas_series.str.translate(None)
except Exception as e:
with pytest.raises(type(e)):
modin_series.str.translate(None)
else:
modin_result = modin_series.str.translate(None)
df_equals(modin_result, pandas_result)
# Translation dictionary
table = {pat: "DDD"}
try:
pandas_result = pandas_series.str.translate(table)
except Exception as e:
with pytest.raises(type(e)):
modin_series.str.translate(table)
else:
modin_result = modin_series.str.translate(table)
df_equals(modin_result, pandas_result)
# Translation table with maketrans (python3 only)
if pat is not None:
table = str.maketrans(pat, "d" * len(pat))
try:
pandas_result = pandas_series.str.translate(table)
except Exception as e:
with pytest.raises(type(e)):
modin_series.str.translate(table)
else:
modin_result = modin_series.str.translate(table)
df_equals(modin_result, pandas_result)
# Test delete chars
deletechars = "|"
try:
pandas_result = pandas_series.str.translate(table, deletechars)
except Exception as e:
with pytest.raises(type(e)):
modin_series.str.translate(table, deletechars)
else:
modin_result = modin_series.str.translate(table, deletechars)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_string_data_values, ids=test_string_data_keys)
def test_str_isalnum(data):
modin_series, pandas_series = create_test_series(data)
try:
pandas_result = pandas_series.str.isalnum()
except Exception as e:
with pytest.raises(type(e)):
modin_series.str.isalnum()
else:
modin_result = modin_series.str.isalnum()
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_string_data_values, ids=test_string_data_keys)
def test_str_isalpha(data):
modin_series, pandas_series = create_test_series(data)
try:
pandas_result = pandas_series.str.isalpha()
except Exception as e:
with pytest.raises(type(e)):
modin_series.str.isalpha()
else:
modin_result = modin_series.str.isalpha()
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_string_data_values, ids=test_string_data_keys)
def test_str_isdigit(data):
modin_series, pandas_series = create_test_series(data)
try:
pandas_result = pandas_series.str.isdigit()
except Exception as e:
with pytest.raises(type(e)):
modin_series.str.isdigit()
else:
modin_result = modin_series.str.isdigit()
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_string_data_values, ids=test_string_data_keys)
def test_str_isspace(data):
modin_series, pandas_series = create_test_series(data)
try:
pandas_result = pandas_series.str.isspace()
except Exception as e:
with pytest.raises(type(e)):
modin_series.str.isspace()
else:
modin_result = modin_series.str.isspace()
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_string_data_values, ids=test_string_data_keys)
def test_str_islower(data):
modin_series, pandas_series = create_test_series(data)
try:
pandas_result = pandas_series.str.islower()
except Exception as e:
with pytest.raises(type(e)):
modin_series.str.islower()
else:
modin_result = modin_series.str.islower()
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_string_data_values, ids=test_string_data_keys)
def test_str_isupper(data):
modin_series, pandas_series = create_test_series(data)
try:
pandas_result = pandas_series.str.isupper()
except Exception as e:
with pytest.raises(type(e)):
modin_series.str.isupper()
else:
modin_result = modin_series.str.isupper()
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_string_data_values, ids=test_string_data_keys)
def test_str_istitle(data):
modin_series, pandas_series = create_test_series(data)
try:
pandas_result = pandas_series.str.istitle()
except Exception as e:
with pytest.raises(type(e)):
modin_series.str.istitle()
else:
modin_result = modin_series.str.istitle()
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_string_data_values, ids=test_string_data_keys)
def test_str_isnumeric(data):
modin_series, pandas_series = create_test_series(data)
try:
pandas_result = pandas_series.str.isnumeric()
except Exception as e:
with pytest.raises(type(e)):
modin_series.str.isnumeric()
else:
modin_result = modin_series.str.isnumeric()
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_string_data_values, ids=test_string_data_keys)
def test_str_isdecimal(data):
modin_series, pandas_series = create_test_series(data)
try:
pandas_result = pandas_series.str.isdecimal()
except Exception as e:
with pytest.raises(type(e)):
modin_series.str.isdecimal()
else:
modin_result = modin_series.str.isdecimal()
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_string_data_values, ids=test_string_data_keys)
def test_casefold(data):
modin_series, pandas_series = create_test_series(data)
try:
pandas_result = pandas_series.str.casefold()
except Exception as e:
with pytest.raises(type(e)):
modin_series.str.casefold()
else:
modin_result = modin_series.str.casefold()
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("encoding_type", encoding_types)
@pytest.mark.parametrize("data", test_string_data_values, ids=test_string_data_keys)
def test_encode(data, encoding_type):
modin_series, pandas_series = create_test_series(data)
try:
pandas_result = pandas_series.str.encode(encoding=encoding_type)
except Exception as e:
with pytest.raises(type(e)):
modin_series.str.encode(encoding=encoding_type)
else:
modin_result = modin_series.str.encode(encoding=encoding_type)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize(
"is_sparse_data", [True, False], ids=["is_sparse", "is_not_sparse"]
)
def test_hasattr_sparse(is_sparse_data):
modin_df, pandas_df = (
create_test_series(
pandas.arrays.SparseArray(test_data["float_nan_data"].values())
)
if is_sparse_data
else create_test_series(test_data["float_nan_data"])
)
eval_general(modin_df, pandas_df, lambda df: hasattr(df, "sparse"))
@pytest.mark.parametrize(
"data", test_data_categorical_values, ids=test_data_categorical_keys
)
def test_cat_categories(data):
modin_series, pandas_series = create_test_series(data.copy())
df_equals(modin_series.cat.categories, pandas_series.cat.categories)
pandas_series.cat.categories = list("qwert")
modin_series.cat.categories = list("qwert")
df_equals(modin_series, pandas_series)
@pytest.mark.parametrize(
"data", test_data_categorical_values, ids=test_data_categorical_keys
)
def test_cat_ordered(data):
modin_series, pandas_series = create_test_series(data.copy())
assert modin_series.cat.ordered == pandas_series.cat.ordered
@pytest.mark.parametrize(
"data", test_data_categorical_values, ids=test_data_categorical_keys
)
def test_cat_codes(data):
modin_series, pandas_series = create_test_series(data.copy())
pandas_result = pandas_series.cat.codes
modin_result = modin_series.cat.codes
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize(
"data", test_data_categorical_values, ids=test_data_categorical_keys
)
@pytest.mark.parametrize("inplace", [True, False])
def test_cat_rename_categories(data, inplace):
modin_series, pandas_series = create_test_series(data.copy())
pandas_result = pandas_series.cat.rename_categories(list("qwert"), inplace=inplace)
modin_result = modin_series.cat.rename_categories(list("qwert"), inplace=inplace)
df_equals(modin_series, pandas_series)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize(
"data", test_data_categorical_values, ids=test_data_categorical_keys
)
@pytest.mark.parametrize("ordered", bool_arg_values, ids=bool_arg_keys)
@pytest.mark.parametrize("inplace", [True, False])
def test_cat_reorder_categories(data, ordered, inplace):
modin_series, pandas_series = create_test_series(data.copy())
pandas_result = pandas_series.cat.reorder_categories(
list("tades"), ordered=ordered, inplace=inplace
)
modin_result = modin_series.cat.reorder_categories(
list("tades"), ordered=ordered, inplace=inplace
)
df_equals(modin_series, pandas_series)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize(
"data", test_data_categorical_values, ids=test_data_categorical_keys
)
@pytest.mark.parametrize("inplace", [True, False])
def test_cat_add_categories(data, inplace):
modin_series, pandas_series = create_test_series(data.copy())
pandas_result = pandas_series.cat.add_categories(list("qw"), inplace=inplace)
modin_result = modin_series.cat.add_categories(list("qw"), inplace=inplace)
df_equals(modin_series, pandas_series)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize(
"data", test_data_categorical_values, ids=test_data_categorical_keys
)
@pytest.mark.parametrize("inplace", [True, False])
def test_cat_remove_categories(data, inplace):
modin_series, pandas_series = create_test_series(data.copy())
pandas_result = pandas_series.cat.remove_categories(list("at"), inplace=inplace)
modin_result = modin_series.cat.remove_categories(list("at"), inplace=inplace)
df_equals(modin_series, pandas_series)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize(
"data", test_data_categorical_values, ids=test_data_categorical_keys
)
@pytest.mark.parametrize("inplace", [True, False])
def test_cat_remove_unused_categories(data, inplace):
modin_series, pandas_series = create_test_series(data.copy())
pandas_series[1] = np.nan
pandas_result = pandas_series.cat.remove_unused_categories(inplace=inplace)
modin_series[1] = np.nan
modin_result = modin_series.cat.remove_unused_categories(inplace=inplace)
df_equals(modin_series, pandas_series)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize(
"data", test_data_categorical_values, ids=test_data_categorical_keys
)
@pytest.mark.parametrize("ordered", bool_arg_values, ids=bool_arg_keys)
@pytest.mark.parametrize("rename", [True, False])
@pytest.mark.parametrize("inplace", [True, False])
def test_cat_set_categories(data, ordered, rename, inplace):
modin_series, pandas_series = create_test_series(data.copy())
pandas_result = pandas_series.cat.set_categories(
list("qwert"), ordered=ordered, rename=rename, inplace=inplace
)
modin_result = modin_series.cat.set_categories(
list("qwert"), ordered=ordered, rename=rename, inplace=inplace
)
df_equals(modin_series, pandas_series)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize(
"data", test_data_categorical_values, ids=test_data_categorical_keys
)
@pytest.mark.parametrize("inplace", [True, False])
def test_cat_as_ordered(data, inplace):
modin_series, pandas_series = create_test_series(data.copy())
pandas_result = pandas_series.cat.as_ordered(inplace=inplace)
modin_result = modin_series.cat.as_ordered(inplace=inplace)
df_equals(modin_series, pandas_series)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize(
"data", test_data_categorical_values, ids=test_data_categorical_keys
)
@pytest.mark.parametrize("inplace", [True, False])
def test_cat_as_unordered(data, inplace):
modin_series, pandas_series = create_test_series(data.copy())
pandas_result = pandas_series.cat.as_unordered(inplace=inplace)
modin_result = modin_series.cat.as_unordered(inplace=inplace)
df_equals(modin_series, pandas_series)
df_equals(modin_result, pandas_result)
def test_peculiar_callback():
def func(val):
if not isinstance(val, tuple):
raise BaseException("Urgh...")
return val
pandas_df = pandas.DataFrame({"col": [(0, 1)]})
pandas_series = pandas_df["col"].apply(func)
modin_df = pd.DataFrame({"col": [(0, 1)]})
modin_series = modin_df["col"].apply(func)
df_equals(modin_series, pandas_series)
| 36.1957 | 104 | 0.707468 |
6fdf50bee87ab7e57d204714584e585e1f803c6a | 1,448 | py | Python | setup/azure_resources/setup_key_vault_secrets.py | areed1192/trading-system | 6c0c1444b9d249c548c7989db2a83cf2f848ef6a | [
"MIT"
] | 10 | 2021-08-01T09:18:31.000Z | 2022-01-25T18:55:39.000Z | setup/azure_resources/setup_key_vault_secrets.py | areed1192/trading-system | 6c0c1444b9d249c548c7989db2a83cf2f848ef6a | [
"MIT"
] | null | null | null | setup/azure_resources/setup_key_vault_secrets.py | areed1192/trading-system | 6c0c1444b9d249c548c7989db2a83cf2f848ef6a | [
"MIT"
] | null | null | null | from configparser import ConfigParser
from tradesys.client import TradingSystem
from azure.keyvault.secrets import SecretClient
# Initialize the Parser.
config = ConfigParser()
# Read the file.
config.read('config/config.ini')
# Grab the Azure Credentials needed.
iex_api_key = config.get('iex', 'api-key')
sql_connection_string = config.get('sql', 'connection-string')
blob_storage_connectiong_string = config.get(
'blob_storage',
'connection-string'
)
# Initialize our Trading System.
trading_system_client = TradingSystem()
# Grab the `KeyVaultManagementClient`.
vault_mgmt_client = trading_system_client.vault_mgmt_client
# Step 1: Grab our new `AzureKeyVault` resource.
key_vault = vault_mgmt_client.vaults.get(
resource_group_name='azure-data-migration',
vault_name='azure-migration-vault'
)
# Step 2: Define a new `SecretClient` so we can upload secrets.
secret_client = SecretClient(
vault_url=key_vault.properties.vault_uri,
credential=trading_system_client.credentials_client.azure_credentials
)
# Step 2: Set our IEX API Key.
secret_client.set_secret(
name='iex-api-key',
value=iex_api_key
)
# Step 3: Set our SQL Connection String.
secret_client.set_secret(
name='sql-database-connection-string',
value=sql_connection_string
)
# Step 4: Set our Azure Blob Connection String.
secret_client.set_secret(
name='azure-blob-connection-string',
value=blob_storage_connectiong_string
)
| 26.814815 | 73 | 0.776934 |
7cf79c01fb9c5e4df53d0efe240c1a95cfde8722 | 3,235 | py | Python | tensorflow_datasets/audio/gtzan/gtzan.py | shashwat9kumar/datasets | 99b055408025f8e934fcbb0fc054488aa087ebfb | [
"Apache-2.0"
] | 1 | 2019-07-19T15:01:45.000Z | 2019-07-19T15:01:45.000Z | tensorflow_datasets/audio/gtzan/gtzan.py | shashwat9kumar/datasets | 99b055408025f8e934fcbb0fc054488aa087ebfb | [
"Apache-2.0"
] | null | null | null | tensorflow_datasets/audio/gtzan/gtzan.py | shashwat9kumar/datasets | 99b055408025f8e934fcbb0fc054488aa087ebfb | [
"Apache-2.0"
] | 1 | 2021-08-02T22:12:40.000Z | 2021-08-02T22:12:40.000Z | # coding=utf-8
# Copyright 2021 The TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""GZTAN dataset."""
import os
import tensorflow.compat.v2 as tf
import tensorflow_datasets.public_api as tfds
_CITATION = """
@misc{tzanetakis_essl_cook_2001,
author = "Tzanetakis, George and Essl, Georg and Cook, Perry",
title = "Automatic Musical Genre Classification Of Audio Signals",
url = "http://ismir2001.ismir.net/pdf/tzanetakis.pdf",
publisher = "The International Society for Music Information Retrieval",
year = "2001"
}
"""
_DESCRIPTION = """
The dataset consists of 1000 audio tracks each 30 seconds long.
It contains 10 genres, each represented by 100 tracks.
The tracks are all 22050Hz Mono 16-bit audio files in .wav format.
The genres are:
* blues
* classical
* country
* disco
* hiphop
* jazz
* metal
* pop
* reggae
* rock
"""
_DOWNLOAD_URL = "http://opihi.cs.uvic.ca/sound/genres.tar.gz"
_HOMEPAGE_URL = "http://marsyas.info/index.html"
_CLASS_LABELS = [
"blues", "classical", "country", "disco", "hiphop", "jazz", "metal", "pop",
"reggae", "rock"
]
class GTZAN(tfds.core.GeneratorBasedBuilder):
"""GTZAN Dataset."""
VERSION = tfds.core.Version("1.0.0")
def _info(self):
return tfds.core.DatasetInfo(
builder=self,
description=_DESCRIPTION,
features=tfds.features.FeaturesDict({
"audio": tfds.features.Audio(file_format="wav", sample_rate=22050),
"label": tfds.features.ClassLabel(names=_CLASS_LABELS),
"audio/filename": tfds.features.Text(),
}),
supervised_keys=("audio", "label"),
homepage=_HOMEPAGE_URL,
citation=_CITATION,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
dl_paths = dl_manager.download_and_extract({"genres": _DOWNLOAD_URL})
path = os.path.join(dl_paths["genres"], "genres")
# There is no predefined train/val/test split for this dataset.
return [
tfds.core.SplitGenerator(
name=tfds.Split.TRAIN, gen_kwargs={"path": path}),
]
def _generate_examples(self, path):
"""Yields examples.
Args:
path: Path of the downloaded and extracted directory
Yields:
Next examples
"""
for root, _, file_name in tf.io.gfile.walk(path):
for fname in file_name:
if fname.endswith(".wav"): # select only .wav files
# Each .wav file has name in the format of <genre>.<number>.wav
label = fname.split(".")[0]
key = fname
example = {
"audio": os.path.join(root, fname),
"label": label,
"audio/filename": fname,
}
yield key, example
| 28.883929 | 79 | 0.664606 |
235beefe6f5b4ef5410d20d9e06b194b484ed548 | 24,140 | py | Python | sdk/python/pulumi_azure_native/operationalinsights/v20210601/outputs.py | polivbr/pulumi-azure-native | 09571f3bf6bdc4f3621aabefd1ba6c0d4ecfb0e7 | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_native/operationalinsights/v20210601/outputs.py | polivbr/pulumi-azure-native | 09571f3bf6bdc4f3621aabefd1ba6c0d4ecfb0e7 | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_native/operationalinsights/v20210601/outputs.py | polivbr/pulumi-azure-native | 09571f3bf6bdc4f3621aabefd1ba6c0d4ecfb0e7 | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
from ._enums import *
__all__ = [
'AssociatedWorkspaceResponse',
'CapacityReservationPropertiesResponse',
'ClusterSkuResponse',
'IdentityResponse',
'KeyVaultPropertiesResponse',
'PrivateLinkScopedResourceResponse',
'UserIdentityPropertiesResponse',
'WorkspaceCappingResponse',
'WorkspaceFeaturesResponse',
'WorkspaceSkuResponse',
]
@pulumi.output_type
class AssociatedWorkspaceResponse(dict):
"""
The list of Log Analytics workspaces associated with the cluster.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "associateDate":
suggest = "associate_date"
elif key == "resourceId":
suggest = "resource_id"
elif key == "workspaceId":
suggest = "workspace_id"
elif key == "workspaceName":
suggest = "workspace_name"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in AssociatedWorkspaceResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
AssociatedWorkspaceResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
AssociatedWorkspaceResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
associate_date: str,
resource_id: str,
workspace_id: str,
workspace_name: str):
"""
The list of Log Analytics workspaces associated with the cluster.
:param str associate_date: The time of workspace association.
:param str resource_id: The ResourceId id the assigned workspace.
:param str workspace_id: The id of the assigned workspace.
:param str workspace_name: The name id the assigned workspace.
"""
pulumi.set(__self__, "associate_date", associate_date)
pulumi.set(__self__, "resource_id", resource_id)
pulumi.set(__self__, "workspace_id", workspace_id)
pulumi.set(__self__, "workspace_name", workspace_name)
@property
@pulumi.getter(name="associateDate")
def associate_date(self) -> str:
"""
The time of workspace association.
"""
return pulumi.get(self, "associate_date")
@property
@pulumi.getter(name="resourceId")
def resource_id(self) -> str:
"""
The ResourceId id the assigned workspace.
"""
return pulumi.get(self, "resource_id")
@property
@pulumi.getter(name="workspaceId")
def workspace_id(self) -> str:
"""
The id of the assigned workspace.
"""
return pulumi.get(self, "workspace_id")
@property
@pulumi.getter(name="workspaceName")
def workspace_name(self) -> str:
"""
The name id the assigned workspace.
"""
return pulumi.get(self, "workspace_name")
@pulumi.output_type
class CapacityReservationPropertiesResponse(dict):
"""
The Capacity Reservation properties.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "lastSkuUpdate":
suggest = "last_sku_update"
elif key == "minCapacity":
suggest = "min_capacity"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in CapacityReservationPropertiesResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
CapacityReservationPropertiesResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
CapacityReservationPropertiesResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
last_sku_update: str,
min_capacity: float):
"""
The Capacity Reservation properties.
:param str last_sku_update: The last time Sku was updated.
:param float min_capacity: Minimum CapacityReservation value in GB.
"""
pulumi.set(__self__, "last_sku_update", last_sku_update)
pulumi.set(__self__, "min_capacity", min_capacity)
@property
@pulumi.getter(name="lastSkuUpdate")
def last_sku_update(self) -> str:
"""
The last time Sku was updated.
"""
return pulumi.get(self, "last_sku_update")
@property
@pulumi.getter(name="minCapacity")
def min_capacity(self) -> float:
"""
Minimum CapacityReservation value in GB.
"""
return pulumi.get(self, "min_capacity")
@pulumi.output_type
class ClusterSkuResponse(dict):
"""
The cluster sku definition.
"""
def __init__(__self__, *,
capacity: Optional[float] = None,
name: Optional[str] = None):
"""
The cluster sku definition.
:param float capacity: The capacity value
:param str name: The name of the SKU.
"""
if capacity is not None:
pulumi.set(__self__, "capacity", capacity)
if name is not None:
pulumi.set(__self__, "name", name)
@property
@pulumi.getter
def capacity(self) -> Optional[float]:
"""
The capacity value
"""
return pulumi.get(self, "capacity")
@property
@pulumi.getter
def name(self) -> Optional[str]:
"""
The name of the SKU.
"""
return pulumi.get(self, "name")
@pulumi.output_type
class IdentityResponse(dict):
"""
Identity for the resource.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "principalId":
suggest = "principal_id"
elif key == "tenantId":
suggest = "tenant_id"
elif key == "userAssignedIdentities":
suggest = "user_assigned_identities"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in IdentityResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
IdentityResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
IdentityResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
principal_id: str,
tenant_id: str,
type: str,
user_assigned_identities: Optional[Mapping[str, 'outputs.UserIdentityPropertiesResponse']] = None):
"""
Identity for the resource.
:param str principal_id: The principal ID of resource identity.
:param str tenant_id: The tenant ID of resource.
:param str type: Type of managed service identity.
:param Mapping[str, 'UserIdentityPropertiesResponse'] user_assigned_identities: The list of user identities associated with the resource. The user identity dictionary key references will be ARM resource ids in the form: '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ManagedIdentity/userAssignedIdentities/{identityName}'.
"""
pulumi.set(__self__, "principal_id", principal_id)
pulumi.set(__self__, "tenant_id", tenant_id)
pulumi.set(__self__, "type", type)
if user_assigned_identities is not None:
pulumi.set(__self__, "user_assigned_identities", user_assigned_identities)
@property
@pulumi.getter(name="principalId")
def principal_id(self) -> str:
"""
The principal ID of resource identity.
"""
return pulumi.get(self, "principal_id")
@property
@pulumi.getter(name="tenantId")
def tenant_id(self) -> str:
"""
The tenant ID of resource.
"""
return pulumi.get(self, "tenant_id")
@property
@pulumi.getter
def type(self) -> str:
"""
Type of managed service identity.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter(name="userAssignedIdentities")
def user_assigned_identities(self) -> Optional[Mapping[str, 'outputs.UserIdentityPropertiesResponse']]:
"""
The list of user identities associated with the resource. The user identity dictionary key references will be ARM resource ids in the form: '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ManagedIdentity/userAssignedIdentities/{identityName}'.
"""
return pulumi.get(self, "user_assigned_identities")
@pulumi.output_type
class KeyVaultPropertiesResponse(dict):
"""
The key vault properties.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "keyName":
suggest = "key_name"
elif key == "keyRsaSize":
suggest = "key_rsa_size"
elif key == "keyVaultUri":
suggest = "key_vault_uri"
elif key == "keyVersion":
suggest = "key_version"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in KeyVaultPropertiesResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
KeyVaultPropertiesResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
KeyVaultPropertiesResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
key_name: Optional[str] = None,
key_rsa_size: Optional[int] = None,
key_vault_uri: Optional[str] = None,
key_version: Optional[str] = None):
"""
The key vault properties.
:param str key_name: The name of the key associated with the Log Analytics cluster.
:param int key_rsa_size: Selected key minimum required size.
:param str key_vault_uri: The Key Vault uri which holds they key associated with the Log Analytics cluster.
:param str key_version: The version of the key associated with the Log Analytics cluster.
"""
if key_name is not None:
pulumi.set(__self__, "key_name", key_name)
if key_rsa_size is not None:
pulumi.set(__self__, "key_rsa_size", key_rsa_size)
if key_vault_uri is not None:
pulumi.set(__self__, "key_vault_uri", key_vault_uri)
if key_version is not None:
pulumi.set(__self__, "key_version", key_version)
@property
@pulumi.getter(name="keyName")
def key_name(self) -> Optional[str]:
"""
The name of the key associated with the Log Analytics cluster.
"""
return pulumi.get(self, "key_name")
@property
@pulumi.getter(name="keyRsaSize")
def key_rsa_size(self) -> Optional[int]:
"""
Selected key minimum required size.
"""
return pulumi.get(self, "key_rsa_size")
@property
@pulumi.getter(name="keyVaultUri")
def key_vault_uri(self) -> Optional[str]:
"""
The Key Vault uri which holds they key associated with the Log Analytics cluster.
"""
return pulumi.get(self, "key_vault_uri")
@property
@pulumi.getter(name="keyVersion")
def key_version(self) -> Optional[str]:
"""
The version of the key associated with the Log Analytics cluster.
"""
return pulumi.get(self, "key_version")
@pulumi.output_type
class PrivateLinkScopedResourceResponse(dict):
"""
The private link scope resource reference.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "resourceId":
suggest = "resource_id"
elif key == "scopeId":
suggest = "scope_id"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in PrivateLinkScopedResourceResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
PrivateLinkScopedResourceResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
PrivateLinkScopedResourceResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
resource_id: Optional[str] = None,
scope_id: Optional[str] = None):
"""
The private link scope resource reference.
:param str resource_id: The full resource Id of the private link scope resource.
:param str scope_id: The private link scope unique Identifier.
"""
if resource_id is not None:
pulumi.set(__self__, "resource_id", resource_id)
if scope_id is not None:
pulumi.set(__self__, "scope_id", scope_id)
@property
@pulumi.getter(name="resourceId")
def resource_id(self) -> Optional[str]:
"""
The full resource Id of the private link scope resource.
"""
return pulumi.get(self, "resource_id")
@property
@pulumi.getter(name="scopeId")
def scope_id(self) -> Optional[str]:
"""
The private link scope unique Identifier.
"""
return pulumi.get(self, "scope_id")
@pulumi.output_type
class UserIdentityPropertiesResponse(dict):
"""
User assigned identity properties.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "clientId":
suggest = "client_id"
elif key == "principalId":
suggest = "principal_id"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in UserIdentityPropertiesResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
UserIdentityPropertiesResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
UserIdentityPropertiesResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
client_id: str,
principal_id: str):
"""
User assigned identity properties.
:param str client_id: The client id of user assigned identity.
:param str principal_id: The principal id of user assigned identity.
"""
pulumi.set(__self__, "client_id", client_id)
pulumi.set(__self__, "principal_id", principal_id)
@property
@pulumi.getter(name="clientId")
def client_id(self) -> str:
"""
The client id of user assigned identity.
"""
return pulumi.get(self, "client_id")
@property
@pulumi.getter(name="principalId")
def principal_id(self) -> str:
"""
The principal id of user assigned identity.
"""
return pulumi.get(self, "principal_id")
@pulumi.output_type
class WorkspaceCappingResponse(dict):
"""
The daily volume cap for ingestion.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "dataIngestionStatus":
suggest = "data_ingestion_status"
elif key == "quotaNextResetTime":
suggest = "quota_next_reset_time"
elif key == "dailyQuotaGb":
suggest = "daily_quota_gb"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in WorkspaceCappingResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
WorkspaceCappingResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
WorkspaceCappingResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
data_ingestion_status: str,
quota_next_reset_time: str,
daily_quota_gb: Optional[float] = None):
"""
The daily volume cap for ingestion.
:param str data_ingestion_status: The status of data ingestion for this workspace.
:param str quota_next_reset_time: The time when the quota will be rest.
:param float daily_quota_gb: The workspace daily quota for ingestion.
"""
pulumi.set(__self__, "data_ingestion_status", data_ingestion_status)
pulumi.set(__self__, "quota_next_reset_time", quota_next_reset_time)
if daily_quota_gb is not None:
pulumi.set(__self__, "daily_quota_gb", daily_quota_gb)
@property
@pulumi.getter(name="dataIngestionStatus")
def data_ingestion_status(self) -> str:
"""
The status of data ingestion for this workspace.
"""
return pulumi.get(self, "data_ingestion_status")
@property
@pulumi.getter(name="quotaNextResetTime")
def quota_next_reset_time(self) -> str:
"""
The time when the quota will be rest.
"""
return pulumi.get(self, "quota_next_reset_time")
@property
@pulumi.getter(name="dailyQuotaGb")
def daily_quota_gb(self) -> Optional[float]:
"""
The workspace daily quota for ingestion.
"""
return pulumi.get(self, "daily_quota_gb")
@pulumi.output_type
class WorkspaceFeaturesResponse(dict):
"""
Workspace features.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "clusterResourceId":
suggest = "cluster_resource_id"
elif key == "disableLocalAuth":
suggest = "disable_local_auth"
elif key == "enableDataExport":
suggest = "enable_data_export"
elif key == "enableLogAccessUsingOnlyResourcePermissions":
suggest = "enable_log_access_using_only_resource_permissions"
elif key == "immediatePurgeDataOn30Days":
suggest = "immediate_purge_data_on30_days"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in WorkspaceFeaturesResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
WorkspaceFeaturesResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
WorkspaceFeaturesResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
cluster_resource_id: Optional[str] = None,
disable_local_auth: Optional[bool] = None,
enable_data_export: Optional[bool] = None,
enable_log_access_using_only_resource_permissions: Optional[bool] = None,
immediate_purge_data_on30_days: Optional[bool] = None):
"""
Workspace features.
:param str cluster_resource_id: Dedicated LA cluster resourceId that is linked to the workspaces.
:param bool disable_local_auth: Disable Non-AAD based Auth.
:param bool enable_data_export: Flag that indicate if data should be exported.
:param bool enable_log_access_using_only_resource_permissions: Flag that indicate which permission to use - resource or workspace or both.
:param bool immediate_purge_data_on30_days: Flag that describes if we want to remove the data after 30 days.
"""
if cluster_resource_id is not None:
pulumi.set(__self__, "cluster_resource_id", cluster_resource_id)
if disable_local_auth is not None:
pulumi.set(__self__, "disable_local_auth", disable_local_auth)
if enable_data_export is not None:
pulumi.set(__self__, "enable_data_export", enable_data_export)
if enable_log_access_using_only_resource_permissions is not None:
pulumi.set(__self__, "enable_log_access_using_only_resource_permissions", enable_log_access_using_only_resource_permissions)
if immediate_purge_data_on30_days is not None:
pulumi.set(__self__, "immediate_purge_data_on30_days", immediate_purge_data_on30_days)
@property
@pulumi.getter(name="clusterResourceId")
def cluster_resource_id(self) -> Optional[str]:
"""
Dedicated LA cluster resourceId that is linked to the workspaces.
"""
return pulumi.get(self, "cluster_resource_id")
@property
@pulumi.getter(name="disableLocalAuth")
def disable_local_auth(self) -> Optional[bool]:
"""
Disable Non-AAD based Auth.
"""
return pulumi.get(self, "disable_local_auth")
@property
@pulumi.getter(name="enableDataExport")
def enable_data_export(self) -> Optional[bool]:
"""
Flag that indicate if data should be exported.
"""
return pulumi.get(self, "enable_data_export")
@property
@pulumi.getter(name="enableLogAccessUsingOnlyResourcePermissions")
def enable_log_access_using_only_resource_permissions(self) -> Optional[bool]:
"""
Flag that indicate which permission to use - resource or workspace or both.
"""
return pulumi.get(self, "enable_log_access_using_only_resource_permissions")
@property
@pulumi.getter(name="immediatePurgeDataOn30Days")
def immediate_purge_data_on30_days(self) -> Optional[bool]:
"""
Flag that describes if we want to remove the data after 30 days.
"""
return pulumi.get(self, "immediate_purge_data_on30_days")
@pulumi.output_type
class WorkspaceSkuResponse(dict):
"""
The SKU (tier) of a workspace.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "lastSkuUpdate":
suggest = "last_sku_update"
elif key == "capacityReservationLevel":
suggest = "capacity_reservation_level"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in WorkspaceSkuResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
WorkspaceSkuResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
WorkspaceSkuResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
last_sku_update: str,
name: str,
capacity_reservation_level: Optional[int] = None):
"""
The SKU (tier) of a workspace.
:param str last_sku_update: The last time when the sku was updated.
:param str name: The name of the SKU.
:param int capacity_reservation_level: The capacity reservation level in GB for this workspace, when CapacityReservation sku is selected.
"""
pulumi.set(__self__, "last_sku_update", last_sku_update)
pulumi.set(__self__, "name", name)
if capacity_reservation_level is not None:
pulumi.set(__self__, "capacity_reservation_level", capacity_reservation_level)
@property
@pulumi.getter(name="lastSkuUpdate")
def last_sku_update(self) -> str:
"""
The last time when the sku was updated.
"""
return pulumi.get(self, "last_sku_update")
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the SKU.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="capacityReservationLevel")
def capacity_reservation_level(self) -> Optional[int]:
"""
The capacity reservation level in GB for this workspace, when CapacityReservation sku is selected.
"""
return pulumi.get(self, "capacity_reservation_level")
| 35.395894 | 371 | 0.637283 |
3a05d0cd8143986a1a7dcdd723e4f5b4a11bacfa | 1,675 | py | Python | accesslink-API/accesslink/accesslink.py | mendelson/polar-data-analysis | 04c7b8615d88e3966e8a71c4353ad23c61ff022d | [
"MIT"
] | 115 | 2017-10-26T16:59:51.000Z | 2022-03-29T13:56:48.000Z | accesslink-API/accesslink/accesslink.py | mendelson/polar-data-analysis | 04c7b8615d88e3966e8a71c4353ad23c61ff022d | [
"MIT"
] | 14 | 2018-01-08T10:02:05.000Z | 2022-02-17T16:05:01.000Z | accesslink-API/accesslink/accesslink.py | mendelson/polar-data-analysis | 04c7b8615d88e3966e8a71c4353ad23c61ff022d | [
"MIT"
] | 61 | 2017-10-27T10:38:17.000Z | 2022-03-11T20:03:52.000Z | #!/usr/bin/env python
from . import endpoints
from .oauth2 import OAuth2Client
AUTHORIZATION_URL = "https://flow.polar.com/oauth2/authorization"
ACCESS_TOKEN_URL = "https://polarremote.com/v2/oauth2/token"
ACCESSLINK_URL = "https://www.polaraccesslink.com/v3"
class AccessLink(object):
"""Wrapper class for Polar Open AccessLink API v3"""
def __init__(self, client_id, client_secret, redirect_url=None):
if not client_id or not client_secret:
raise ValueError("Client id and secret must be provided.")
self.oauth = OAuth2Client(url=ACCESSLINK_URL,
authorization_url=AUTHORIZATION_URL,
access_token_url=ACCESS_TOKEN_URL,
redirect_url=redirect_url,
client_id=client_id,
client_secret=client_secret)
self.users = endpoints.Users(oauth=self.oauth)
self.pull_notifications = endpoints.PullNotifications(oauth=self.oauth)
self.training_data = endpoints.TrainingData(oauth=self.oauth)
self.physical_info = endpoints.PhysicalInfo(oauth=self.oauth)
self.daily_activity = endpoints.DailyActivity(oauth=self.oauth)
@property
def authorization_url(self):
"""Get the authorization url for the client"""
return self.oauth.get_authorization_url()
def get_access_token(self, authorization_code):
"""Request access token for a user.
:param authorization_code: authorization code received from authorization endpoint.
"""
return self.oauth.get_access_token(authorization_code)
| 39.880952 | 91 | 0.666269 |
3eaa203339032dee365a3b5148cdda06bcfee180 | 5,573 | py | Python | tempest/common/ssh.py | citrix-openstack-build/tempest | 385f0b116e8f02d24338e0f11f4ae3ccf2edd661 | [
"Apache-2.0"
] | null | null | null | tempest/common/ssh.py | citrix-openstack-build/tempest | 385f0b116e8f02d24338e0f11f4ae3ccf2edd661 | [
"Apache-2.0"
] | null | null | null | tempest/common/ssh.py | citrix-openstack-build/tempest | 385f0b116e8f02d24338e0f11f4ae3ccf2edd661 | [
"Apache-2.0"
] | null | null | null | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import cStringIO
import select
import socket
import time
import warnings
from tempest import exceptions
with warnings.catch_warnings():
warnings.simplefilter("ignore")
import paramiko
class Client(object):
def __init__(self, host, username, password=None, timeout=300, pkey=None,
channel_timeout=10, look_for_keys=False, key_filename=None):
self.host = host
self.username = username
self.password = password
if isinstance(pkey, basestring):
pkey = paramiko.RSAKey.from_private_key(
cStringIO.StringIO(str(pkey)))
self.pkey = pkey
self.look_for_keys = look_for_keys
self.key_filename = key_filename
self.timeout = int(timeout)
self.channel_timeout = float(channel_timeout)
self.buf_size = 1024
def _get_ssh_connection(self, sleep=1.5, backoff=1.01):
"""Returns an ssh connection to the specified host."""
_timeout = True
bsleep = sleep
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(
paramiko.AutoAddPolicy())
_start_time = time.time()
while not self._is_timed_out(_start_time):
try:
ssh.connect(self.host, username=self.username,
password=self.password,
look_for_keys=self.look_for_keys,
key_filename=self.key_filename,
timeout=self.timeout, pkey=self.pkey)
_timeout = False
break
except (socket.error,
paramiko.AuthenticationException):
time.sleep(bsleep)
bsleep *= backoff
continue
if _timeout:
raise exceptions.SSHTimeout(host=self.host,
user=self.username,
password=self.password)
return ssh
def _is_timed_out(self, start_time):
return (time.time() - self.timeout) > start_time
def connect_until_closed(self):
"""Connect to the server and wait until connection is lost."""
try:
ssh = self._get_ssh_connection()
_transport = ssh.get_transport()
_start_time = time.time()
_timed_out = self._is_timed_out(_start_time)
while _transport.is_active() and not _timed_out:
time.sleep(5)
_timed_out = self._is_timed_out(_start_time)
ssh.close()
except (EOFError, paramiko.AuthenticationException, socket.error):
return
def exec_command(self, cmd):
"""
Execute the specified command on the server.
Note that this method is reading whole command outputs to memory, thus
shouldn't be used for large outputs.
:returns: data read from standard output of the command.
:raises: SSHExecCommandFailed if command returns nonzero
status. The exception contains command status stderr content.
"""
ssh = self._get_ssh_connection()
transport = ssh.get_transport()
channel = transport.open_session()
channel.fileno() # Register event pipe
channel.exec_command(cmd)
channel.shutdown_write()
out_data = []
err_data = []
poll = select.poll()
poll.register(channel, select.POLLIN)
start_time = time.time()
while True:
ready = poll.poll(self.channel_timeout)
if not any(ready):
if not self._is_timed_out(start_time):
continue
raise exceptions.TimeoutException(
"Command: '{0}' executed on host '{1}'.".format(
cmd, self.host))
if not ready[0]: # If there is nothing to read.
continue
out_chunk = err_chunk = None
if channel.recv_ready():
out_chunk = channel.recv(self.buf_size)
out_data += out_chunk,
if channel.recv_stderr_ready():
err_chunk = channel.recv_stderr(self.buf_size)
err_data += err_chunk,
if channel.closed and not err_chunk and not out_chunk:
break
exit_status = channel.recv_exit_status()
if 0 != exit_status:
raise exceptions.SSHExecCommandFailed(
command=cmd, exit_status=exit_status,
strerror=''.join(err_data))
return ''.join(out_data)
def test_connection_auth(self):
"""Returns true if ssh can connect to server."""
try:
connection = self._get_ssh_connection()
connection.close()
except paramiko.AuthenticationException:
return False
return True
| 36.188312 | 78 | 0.59555 |
5350093435bb8a564661e6265d08f22407c86841 | 1,296 | py | Python | src/gen_per_key.py | jasonhavenD/triplesKB | e591adcb60d2611eef361f8ec412ace817c683eb | [
"Apache-2.0"
] | 14 | 2019-05-13T09:43:56.000Z | 2022-01-23T02:18:28.000Z | src/gen_per_key.py | jasonhavenD/triplesKB | e591adcb60d2611eef361f8ec412ace817c683eb | [
"Apache-2.0"
] | 1 | 2020-10-26T03:34:09.000Z | 2020-10-26T06:17:10.000Z | src/gen_per_key.py | jasonhavenD/triplesKB | e591adcb60d2611eef361f8ec412ace817c683eb | [
"Apache-2.0"
] | 7 | 2019-05-27T01:51:24.000Z | 2021-07-12T02:42:12.000Z | # encoding = utf-8
import re
import loguru
import json
from collections import defaultdict
def main(per_dict, field_dict, des):
# keys = ['aff', 'name', 'avatar','indices', 'tags', 'pos', 'contact', 'attr']
pers = json.load(open(per_dict, 'r'))
fields = json.load(open(field_dict, 'r'))
fields_table = {}#name -> id
for k, v in fields.items():
i, zh, en = v['id'], v['chinese'], v['english']
fields_table[zh] = i
fields_table[en] = i
with open(des, 'w') as f:
f.write('%s##%s##%s\n' % ('person', 'field', 'is'))
for per_id, per_val in pers.items():
ks = find_fields_for_per(per_val, fields_table)
if ks != []:
for k in ks:
f.write('%s##%s##%s\n' % (per_id, k, str(1)))
def find_fields_for_per(per_val, fields_table):
tags = per_val['tags']
result = []
if tags != '':
for tag in tags:
try:
field_id = fields_table[tag.lower()]
except KeyError:
# loguru.logger.error('KeyError')
continue
if field_id != '':
result.append(field_id)
return result
if __name__ == "__main__":
main('persons_dict.json', 'fields_dict.json', 'per_key_rel.csv')
| 28.173913 | 82 | 0.534722 |
a9fa2626f69f55b4bd173da0b7d528a5a7ee1293 | 2,340 | py | Python | shb-sanet/exp/11-30_16-21_SHHB_SANet_0.0001_[flip+crop]/code/config.py | mar-edw-kon/CCAugmentation-Experiments-Env | ea86a775a3088b851feb42e0fdd8bd6c4f07d4c7 | [
"MIT"
] | 1 | 2020-11-25T01:23:00.000Z | 2020-11-25T01:23:00.000Z | shb-sanet/exp/11-30_16-21_SHHB_SANet_0.0001_[flip+crop]/code/config.py | mar-edw-kon/CCAugmentation-Experiments-Env | ea86a775a3088b851feb42e0fdd8bd6c4f07d4c7 | [
"MIT"
] | null | null | null | shb-sanet/exp/11-30_16-21_SHHB_SANet_0.0001_[flip+crop]/code/config.py | mar-edw-kon/CCAugmentation-Experiments-Env | ea86a775a3088b851feb42e0fdd8bd6c4f07d4c7 | [
"MIT"
] | null | null | null | import os
from easydict import EasyDict as edict
import time
import torch
# init
__C = edict()
cfg = __C
#------------------------------TRAIN------------------------
__C.SEED = 3035 # random seed, for reporduction
__C.DATASET = 'SHHB' # dataset selection: GCC, SHHA, SHHB, UCF50, QNRF, WE
if __C.DATASET == 'UCF50':# only for UCF50
from datasets.UCF50.setting import cfg_data
__C.VAL_INDEX = cfg_data.VAL_INDEX
if __C.DATASET == 'GCC':# only for GCC
from datasets.GCC.setting import cfg_data
__C.VAL_MODE = cfg_data.VAL_MODE
__C.NET = 'SANet' # net selection: MCNN, VGG, VGG_DECODER, Res50, CSRNet, SANet
__C.PRE_GCC = False # use the pretrained model on GCC dataset
__C.PRE_GCC_MODEL = '' # path to model
__C.RESUME = False # contine training
__C.RESUME_PATH = './exp/11-26_17-34_SHHB_SANet_0.0001/latest_state.pth' #
__C.GPU_ID = [0] # sigle gpu: [0], [1] ...; multi gpus: [0,1]
# learning rate settings
__C.LR = 1e-4 # learning rate
__C.LR_DECAY = 0.995 # decay rate
__C.LR_DECAY_START = -1 # when training epoch is more than it, the learning rate will be begin to decay
__C.NUM_EPOCH_LR_DECAY = 1 # decay frequency
__C.MAX_EPOCH = 500
# multi-task learning weights, no use for single model, such as MCNN, VGG, VGG_DECODER, Res50, CSRNet, and so on
__C.LAMBDA_1 = 1e-3
# print
__C.PRINT_FREQ = 30
now = time.strftime("%m-%d_%H-%M", time.localtime())
__C.EXP_NAME = now \
+ '_' + __C.DATASET \
+ '_' + __C.NET \
+ '_' + str(__C.LR)
if __C.DATASET == 'UCF50':
__C.EXP_NAME += '_' + str(__C.VAL_INDEX)
if __C.DATASET == 'GCC':
__C.EXP_NAME += '_' + __C.VAL_MODE
__C.EXP_PATH = './exp' # the path of logs, checkpoints, and current codes
#------------------------------VAL------------------------
__C.VAL_DENSE_START = 200
__C.VAL_FREQ = 10 # Before __C.VAL_DENSE_START epoches, the freq is set as __C.VAL_FREQ
#------------------------------VIS------------------------
__C.VISIBLE_NUM_IMGS = 1 # must be 1 for training images with the different sizes
#================================================================================
#================================================================================
#================================================================================ | 31.2 | 113 | 0.551709 |
b6e0e44507ace462620a359b07cd5033fd53ecb0 | 391 | py | Python | chatBot/asgi.py | Anik-Bardhan/ChatBot | e503a795d695512cee59b62fa535a19501ae220c | [
"MIT"
] | null | null | null | chatBot/asgi.py | Anik-Bardhan/ChatBot | e503a795d695512cee59b62fa535a19501ae220c | [
"MIT"
] | null | null | null | chatBot/asgi.py | Anik-Bardhan/ChatBot | e503a795d695512cee59b62fa535a19501ae220c | [
"MIT"
] | null | null | null | """
ASGI config for chatBot project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'chatBot.settings')
application = get_asgi_application()
| 23 | 78 | 0.785166 |
29ffb852e04c37a2b16286cd30719037b0ff39f4 | 4,853 | py | Python | tests/algorithms/test_objective_functions.py | mihailescum/poisson_learning | d282e527ea001f3762db0c795f3bca9c9a0807f4 | [
"MIT"
] | 1 | 2022-01-13T12:31:39.000Z | 2022-01-13T12:31:39.000Z | tests/algorithms/test_objective_functions.py | mihailescum/poisson_learning | d282e527ea001f3762db0c795f3bca9c9a0807f4 | [
"MIT"
] | null | null | null | tests/algorithms/test_objective_functions.py | mihailescum/poisson_learning | d282e527ea001f3762db0c795f3bca9c9a0807f4 | [
"MIT"
] | null | null | null | import numpy as np
import scipy.sparse as spsparse
import pytest
import numpy.testing as npt
from poissonlearning.objective_functions import (
objective_p_laplace,
objective_p_laplace_gradient,
objective_weighted_mean,
objective_weighted_mean_gradient,
)
@pytest.mark.parametrize(
"u, W, b, p",
[
(
np.array([0.0, 0.0, 0.0, 0.0]),
spsparse.csr_matrix(
np.array(
[
[0.0, 0.5, 0.0, 0.0],
[0.5, 0.0, 1.0, 0.0],
[0.0, 1.0, 0.0, 1.0],
[0.0, 0.0, 0.5, 0.0],
]
)
),
np.array([1.0, 0.0, 0.0, 1.0]),
2,
),
(
np.array([-2.0, 0.0, 1.0, -1.0]),
spsparse.csr_matrix(
np.array(
[
[0.0, 0.5, 0.0, 0.0],
[0.5, 0.0, 1.0, 0.0],
[0.0, 1.0, 0.0, 1.0],
[0.0, 0.0, 0.5, 0.0],
]
)
),
np.array([1.0, 0.0, 0.0, 1.0]),
2,
),
(
np.array([-2.0, 0.0, 1.0, -1.0]),
spsparse.csr_matrix(
np.array(
[
[0.0, 0.5, 0.0, 0.0],
[0.5, 0.0, 1.0, 0.0],
[0.0, 1.0, 0.0, 1.0],
[0.0, 0.0, 0.5, 0.0],
]
)
),
np.array([1.0, 0.0, 0.0, 1.0]),
4,
),
],
)
def test_objective_p_laplace(u, W, b, p):
n = u.size
expected = 0.0
for i in range(n):
for j in range(n):
expected += 0.5 / p * W[i, j] * np.abs(u[i] - u[j]) ** p
expected -= b[i] * u[i]
output = objective_p_laplace(u, W, b, p)
npt.assert_allclose(expected, output)
@pytest.mark.parametrize(
"u, W, b, p",
[
(
np.array([0.0, 0.0, 0.0, 0.0]),
spsparse.csr_matrix(
np.array(
[
[0.0, 0.5, 0.0, 0.0],
[0.5, 0.0, 1.0, 0.0],
[0.0, 1.0, 0.0, 1.0],
[0.0, 0.0, 0.5, 0.0],
]
)
),
np.array([1.0, 0.0, 0.0, 1.0]),
2,
),
(
np.array([-2.0, 0.0, 1.0, -1.0]),
spsparse.csr_matrix(
np.array(
[
[0.0, 0.5, 0.0, 0.0],
[0.5, 0.0, 1.0, 0.0],
[0.0, 1.0, 0.0, 1.0],
[0.0, 0.0, 0.5, 0.0],
]
)
),
np.array([1.0, 0.0, 0.0, 1.0]),
2,
),
(
np.array([-2.0, 0.0, 1.0, -1.0]),
spsparse.csr_matrix(
np.array(
[
[0.0, 0.5, 0.0, 0.0],
[0.5, 0.0, 1.0, 0.0],
[0.0, 1.0, 0.0, 1.0],
[0.0, 0.0, 0.5, 0.0],
]
)
),
np.array([1.0, 0.0, 0.0, 0.5]),
4,
),
],
)
def test_objective_p_laplace_gradient(u, W, b, p):
n = u.size
expected = np.zeros(n)
for i in range(n):
for j in range(n):
expected[i] += W[i, j] * np.abs(u[i] - u[j]) ** (p - 2) * (u[i] - u[j])
expected[i] -= b[i]
output = objective_p_laplace_gradient(u, W, b, p)
npt.assert_allclose(expected, output)
@pytest.mark.parametrize(
"u, D, expected",
[
(np.array([0.0, 0.0, 0.0, 0.0]), np.array([1.0, 2.0, 2.0, 1.0]), 0.0),
(np.array([1.0, 1.0, 1.0, 1.0]), np.array([1.0, 2.0, 2.0, 1.0]), 6.0),
(np.array([1.0, -1.0, 0.0, 0.0]), np.array([1.0, 2.0, 2.0, 1.0]), -1.0),
],
)
def test_objective_weighted_mean(u, D, expected):
output = objective_weighted_mean(u, D)
npt.assert_allclose(expected, output)
@pytest.mark.parametrize(
"u, D, expected",
[
(
np.array([0.0, 0.0, 0.0, 0.0]),
np.array([1.0, 2.0, 2.0, 1.0]),
np.array([1.0, 2.0, 2.0, 1.0]),
),
(
np.array([1.0, 1.0, 1.0, 1.0]),
np.array([1.0, 2.0, 2.0, 1.0]),
np.array([1.0, 2.0, 2.0, 1.0]),
),
(
np.array([1.0, -1.0, 0.0, 0.0]),
np.array([1.0, 2.0, 2.0, 1.0]),
np.array([1.0, 2.0, 2.0, 1.0]),
),
],
)
def test_objective_weighted_mean_gradient(u, D, expected):
output = objective_weighted_mean_gradient(u, D)
npt.assert_allclose(expected, output)
| 27.418079 | 83 | 0.34659 |
1e6fbbc019923e967992e7632ff90d4d14be4ee8 | 4,491 | py | Python | examples/cluster/plot_kmeans_digits.py | NickVeld/scikit-learn-proj | 9694a5641a7abbec96c93817aed88ce827dbacd3 | [
"BSD-3-Clause"
] | 1 | 2021-11-26T12:22:13.000Z | 2021-11-26T12:22:13.000Z | examples/cluster/plot_kmeans_digits.py | NickVeld/scikit-learn-proj | 9694a5641a7abbec96c93817aed88ce827dbacd3 | [
"BSD-3-Clause"
] | null | null | null | examples/cluster/plot_kmeans_digits.py | NickVeld/scikit-learn-proj | 9694a5641a7abbec96c93817aed88ce827dbacd3 | [
"BSD-3-Clause"
] | null | null | null | """
===========================================================
A demo of K-Means clustering on the handwritten digits data
===========================================================
In this example we compare the various initialization strategies for
K-means in terms of runtime and quality of the results.
As the ground truth is known here, we also apply different cluster
quality metrics to judge the goodness of fit of the cluster labels to the
ground truth.
Cluster quality metrics evaluated (see :ref:`clustering_evaluation` for
definitions and discussions of the metrics):
=========== ========================================================
Shorthand full name
=========== ========================================================
homo homogeneity score
compl completeness score
v-meas V measure
ARI adjusted Rand index
AMI adjusted mutual information
silhouette silhouette coefficient
=========== ========================================================
"""
print(__doc__)
from time import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn import metrics
from sklearn.cluster import KMeans
from sklearn.datasets import load_digits
from sklearn.decomposition import PCA
from sklearn.preprocessing import scale
np.random.seed(42)
digits = load_digits()
data = scale(digits.data)
n_samples, n_features = data.shape
n_digits = len(np.unique(digits.target))
labels = digits.target
sample_size = 300
print("n_digits: %d, \t n_samples %d, \t n_features %d"
% (n_digits, n_samples, n_features))
print(82 * '_')
print('init\t\ttime\tinertia\thomo\tcompl\tv-meas\tARI\tAMI\tsilhouette')
def bench_k_means(estimator, name, data):
t0 = time()
estimator.fit(data)
print('%-9s\t%.2fs\t%i\t%.3f\t%.3f\t%.3f\t%.3f\t%.3f\t%.3f'
% (name, (time() - t0), estimator.inertia_,
metrics.homogeneity_score(labels, estimator.labels_),
metrics.completeness_score(labels, estimator.labels_),
metrics.v_measure_score(labels, estimator.labels_),
metrics.adjusted_rand_score(labels, estimator.labels_),
metrics.adjusted_mutual_info_score(labels, estimator.labels_),
metrics.silhouette_score(data, estimator.labels_,
metric='euclidean',
sample_size=sample_size)))
bench_k_means(KMeans(init='k-means++', n_clusters=n_digits, n_init=10),
name="k-means++", data=data)
bench_k_means(KMeans(init='random', n_clusters=n_digits, n_init=10),
name="random", data=data)
# in this case the seeding of the centers is deterministic, hence we run the
# kmeans algorithm only once with n_init=1
pca = PCA(n_components=n_digits).fit(data)
bench_k_means(KMeans(init=pca.components_, n_clusters=n_digits, n_init=1),
name="PCA-based",
data=data)
print(82 * '_')
###############################################################################
# Visualize the results on PCA-reduced data
reduced_data = PCA(n_components=2).fit_transform(data)
kmeans = KMeans(init='k-means++', n_clusters=n_digits, n_init=10)
kmeans.fit(reduced_data)
# Step size of the mesh. Decrease to increase the quality of the VQ.
h = .02 # point in the mesh [x_min, x_max]x[y_min, y_max].
# Plot the decision boundary. For that, we will assign a color to each
x_min, x_max = reduced_data[:, 0].min() - 1, reduced_data[:, 0].max() + 1
y_min, y_max = reduced_data[:, 1].min() - 1, reduced_data[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
# Obtain labels for each point in mesh. Use last trained model.
Z = kmeans.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.figure(1)
plt.clf()
plt.imshow(Z, interpolation='nearest',
extent=(xx.min(), xx.max(), yy.min(), yy.max()),
cmap=plt.cm.Paired,
aspect='auto', origin='lower')
plt.plot(reduced_data[:, 0], reduced_data[:, 1], 'k.', markersize=2)
# Plot the centroids as a white X
centroids = kmeans.cluster_centers_
plt.scatter(centroids[:, 0], centroids[:, 1],
marker='x', s=169, linewidths=3,
color='w', zorder=10)
plt.title('K-means clustering on the digits dataset (PCA-reduced data)\n'
'Centroids are marked with white cross')
plt.xlim(x_min, x_max)
plt.ylim(y_min, y_max)
plt.xticks(())
plt.yticks(())
plt.show()
| 35.362205 | 79 | 0.623914 |
cf6a97ece9025fe2a0f92440e48b582c8e44eaaf | 152 | py | Python | x86/Chapter1/Chapter1-printf.py | dunker4o/gray-hat-code | 309a2a746acdf79832c792b3c0b8d9d3f59b9cf0 | [
"Unlicense"
] | null | null | null | x86/Chapter1/Chapter1-printf.py | dunker4o/gray-hat-code | 309a2a746acdf79832c792b3c0b8d9d3f59b9cf0 | [
"Unlicense"
] | null | null | null | x86/Chapter1/Chapter1-printf.py | dunker4o/gray-hat-code | 309a2a746acdf79832c792b3c0b8d9d3f59b9cf0 | [
"Unlicense"
] | null | null | null | from ctypes import *
msvcrt = cdll.msvcrt
message_string = "Hello, Gray Hat Python!\n"
msvcrt.printf("A message has been received: %s", message_string) | 30.4 | 64 | 0.756579 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.