text
stringlengths 2
999k
|
|---|
import numpy as np
import os
import subprocess
import sys
from setuptools import find_packages, setup
from setuptools.command.build_ext import build_ext
from Cython.Build import cythonize
is_posix = (os.name == "posix")
if is_posix:
os_name = subprocess.check_output("uname").decode("utf8")
if "Darwin" in os_name:
os.environ["CFLAGS"] = "-stdlib=libc++ -std=c++11"
else:
os.environ["CFLAGS"] = "-std=c++11"
if os.environ.get('WITHOUT_CYTHON_OPTIMIZATIONS'):
os.environ["CFLAGS"] += " -O0"
# Avoid a gcc warning below:
# cc1plus: warning: command line option ???-Wstrict-prototypes??? is valid
# for C/ObjC but not for C++
class BuildExt(build_ext):
def build_extensions(self):
if os.name != "nt" and '-Wstrict-prototypes' in self.compiler.compiler_so:
self.compiler.compiler_so.remove('-Wstrict-prototypes')
super().build_extensions()
def main():
cpu_count = os.cpu_count() or 8
version = "20211201"
packages = find_packages(include=["hummingbot", "hummingbot.*"])
package_data = {
"hummingbot": [
"core/cpp/*",
"VERSION",
"templates/*TEMPLATE.yml"
],
}
install_requires = [
"0x-contract-addresses",
"0x-contract-wrappers",
"0x-order-utils",
"aioconsole",
"aiohttp",
"aiokafka",
"appdirs",
"appnope"
"sync-timeout",
"cachetools",
"certifi",
"cryptography",
"cython",
"cytoolz",
"diff-cover",
"dydx-python",
"dydx-v3-python",
"eth-abi",
"eth-account",
"eth-bloom",
"eth-keyfile",
"eth-typing",
"eth-utils",
"ethsnarks-loopring",
"flake8",
"hexbytes",
"importlib-metadata",
"mypy-extensions",
"numpy",
"pandas",
"pip",
"pre-commit",
"prompt-toolkit",
"psutil",
"pyjwt",
"pyperclip",
"python-binance==0.7.5",
"python-dateutil"
"python-telegram-bot",
"requests",
"rsa",
"ruamel-yaml",
"scipy",
"signalr-client-aio",
"simplejson",
"six",
"sqlalchemy",
"tzlocal",
"ujson",
"web3",
"websockets",
"yarl",
]
cython_kwargs = {
"language": "c++",
"language_level": 3,
}
cython_sources = ["hummingbot/**/*.pyx"]
if os.path.exists('test'):
cython_sources.append("test/**/*.pyx")
if os.environ.get('WITHOUT_CYTHON_OPTIMIZATIONS'):
compiler_directives = {
"optimize.use_switch": False,
"optimize.unpack_method_calls": False,
}
else:
compiler_directives = {}
if is_posix:
cython_kwargs["nthreads"] = cpu_count
if "DEV_MODE" in os.environ:
version += ".dev1"
package_data[""] = [
"*.pxd", "*.pyx", "*.h"
]
package_data["hummingbot"].append("core/cpp/*.cpp")
if len(sys.argv) > 1 and sys.argv[1] == "build_ext" and is_posix:
sys.argv.append(f"--parallel={cpu_count}")
setup(name="hummingbot",
version=version,
description="Hummingbot",
url="https://github.com/CoinAlpha/hummingbot",
author="CoinAlpha, Inc.",
author_email="dev@hummingbot.io",
license="Apache 2.0",
packages=packages,
package_data=package_data,
install_requires=install_requires,
ext_modules=cythonize(cython_sources, compiler_directives=compiler_directives, **cython_kwargs),
include_dirs=[
np.get_include()
],
scripts=[
"bin/hummingbot.py",
"bin/hummingbot_quickstart.py"
],
cmdclass={'build_ext': BuildExt},
)
if __name__ == "__main__":
main()
|
from simplesoccer.mini_env_states import SoccerStates
class EvalPolicy:
def compute_actions(self, states: SoccerStates):
raise NotImplementedError()
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.pipeline import ClientRawResponse
from .. import models
class IntModelOperations(object):
"""IntModelOperations operations.
You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.config = config
def get_null(
self, custom_headers=None, raw=False, **operation_config):
"""Get null Int value.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: int or ClientRawResponse if raw=true
:rtype: int or ~msrest.pipeline.ClientRawResponse
:raises: :class:`ErrorException<bodyinteger.models.ErrorException>`
"""
# Construct URL
url = self.get_null.metadata['url']
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('int', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
get_null.metadata = {'url': '/int/null'}
def get_invalid(
self, custom_headers=None, raw=False, **operation_config):
"""Get invalid Int value.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: int or ClientRawResponse if raw=true
:rtype: int or ~msrest.pipeline.ClientRawResponse
:raises: :class:`ErrorException<bodyinteger.models.ErrorException>`
"""
# Construct URL
url = self.get_invalid.metadata['url']
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('int', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
get_invalid.metadata = {'url': '/int/invalid'}
def get_overflow_int32(
self, custom_headers=None, raw=False, **operation_config):
"""Get overflow Int32 value.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: int or ClientRawResponse if raw=true
:rtype: int or ~msrest.pipeline.ClientRawResponse
:raises: :class:`ErrorException<bodyinteger.models.ErrorException>`
"""
# Construct URL
url = self.get_overflow_int32.metadata['url']
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('int', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
get_overflow_int32.metadata = {'url': '/int/overflowint32'}
def get_underflow_int32(
self, custom_headers=None, raw=False, **operation_config):
"""Get underflow Int32 value.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: int or ClientRawResponse if raw=true
:rtype: int or ~msrest.pipeline.ClientRawResponse
:raises: :class:`ErrorException<bodyinteger.models.ErrorException>`
"""
# Construct URL
url = self.get_underflow_int32.metadata['url']
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('int', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
get_underflow_int32.metadata = {'url': '/int/underflowint32'}
def get_overflow_int64(
self, custom_headers=None, raw=False, **operation_config):
"""Get overflow Int64 value.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: long or ClientRawResponse if raw=true
:rtype: long or ~msrest.pipeline.ClientRawResponse
:raises: :class:`ErrorException<bodyinteger.models.ErrorException>`
"""
# Construct URL
url = self.get_overflow_int64.metadata['url']
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('long', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
get_overflow_int64.metadata = {'url': '/int/overflowint64'}
def get_underflow_int64(
self, custom_headers=None, raw=False, **operation_config):
"""Get underflow Int64 value.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: long or ClientRawResponse if raw=true
:rtype: long or ~msrest.pipeline.ClientRawResponse
:raises: :class:`ErrorException<bodyinteger.models.ErrorException>`
"""
# Construct URL
url = self.get_underflow_int64.metadata['url']
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('long', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
get_underflow_int64.metadata = {'url': '/int/underflowint64'}
def put_max32(
self, int_body, custom_headers=None, raw=False, **operation_config):
"""Put max int32 value.
:param int_body:
:type int_body: int
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: None or ClientRawResponse if raw=true
:rtype: None or ~msrest.pipeline.ClientRawResponse
:raises: :class:`ErrorException<bodyinteger.models.ErrorException>`
"""
# Construct URL
url = self.put_max32.metadata['url']
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
body_content = self._serialize.body(int_body, 'int')
# Construct and send request
request = self._client.put(url, query_parameters, header_parameters, body_content)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
put_max32.metadata = {'url': '/int/max/32'}
def put_max64(
self, int_body, custom_headers=None, raw=False, **operation_config):
"""Put max int64 value.
:param int_body:
:type int_body: long
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: None or ClientRawResponse if raw=true
:rtype: None or ~msrest.pipeline.ClientRawResponse
:raises: :class:`ErrorException<bodyinteger.models.ErrorException>`
"""
# Construct URL
url = self.put_max64.metadata['url']
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
body_content = self._serialize.body(int_body, 'long')
# Construct and send request
request = self._client.put(url, query_parameters, header_parameters, body_content)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
put_max64.metadata = {'url': '/int/max/64'}
def put_min32(
self, int_body, custom_headers=None, raw=False, **operation_config):
"""Put min int32 value.
:param int_body:
:type int_body: int
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: None or ClientRawResponse if raw=true
:rtype: None or ~msrest.pipeline.ClientRawResponse
:raises: :class:`ErrorException<bodyinteger.models.ErrorException>`
"""
# Construct URL
url = self.put_min32.metadata['url']
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
body_content = self._serialize.body(int_body, 'int')
# Construct and send request
request = self._client.put(url, query_parameters, header_parameters, body_content)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
put_min32.metadata = {'url': '/int/min/32'}
def put_min64(
self, int_body, custom_headers=None, raw=False, **operation_config):
"""Put min int64 value.
:param int_body:
:type int_body: long
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: None or ClientRawResponse if raw=true
:rtype: None or ~msrest.pipeline.ClientRawResponse
:raises: :class:`ErrorException<bodyinteger.models.ErrorException>`
"""
# Construct URL
url = self.put_min64.metadata['url']
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
body_content = self._serialize.body(int_body, 'long')
# Construct and send request
request = self._client.put(url, query_parameters, header_parameters, body_content)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
put_min64.metadata = {'url': '/int/min/64'}
def get_unix_time(
self, custom_headers=None, raw=False, **operation_config):
"""Get datetime encoded as Unix time value.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: datetime or ClientRawResponse if raw=true
:rtype: datetime or ~msrest.pipeline.ClientRawResponse
:raises: :class:`ErrorException<bodyinteger.models.ErrorException>`
"""
# Construct URL
url = self.get_unix_time.metadata['url']
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('unix-time', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
get_unix_time.metadata = {'url': '/int/unixtime'}
def put_unix_time_date(
self, int_body, custom_headers=None, raw=False, **operation_config):
"""Put datetime encoded as Unix time.
:param int_body:
:type int_body: datetime
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: None or ClientRawResponse if raw=true
:rtype: None or ~msrest.pipeline.ClientRawResponse
:raises: :class:`ErrorException<bodyinteger.models.ErrorException>`
"""
# Construct URL
url = self.put_unix_time_date.metadata['url']
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
body_content = self._serialize.body(int_body, 'unix-time')
# Construct and send request
request = self._client.put(url, query_parameters, header_parameters, body_content)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
put_unix_time_date.metadata = {'url': '/int/unixtime'}
def get_invalid_unix_time(
self, custom_headers=None, raw=False, **operation_config):
"""Get invalid Unix time value.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: datetime or ClientRawResponse if raw=true
:rtype: datetime or ~msrest.pipeline.ClientRawResponse
:raises: :class:`ErrorException<bodyinteger.models.ErrorException>`
"""
# Construct URL
url = self.get_invalid_unix_time.metadata['url']
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('unix-time', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
get_invalid_unix_time.metadata = {'url': '/int/invalidunixtime'}
def get_null_unix_time(
self, custom_headers=None, raw=False, **operation_config):
"""Get null Unix time value.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: datetime or ClientRawResponse if raw=true
:rtype: datetime or ~msrest.pipeline.ClientRawResponse
:raises: :class:`ErrorException<bodyinteger.models.ErrorException>`
"""
# Construct URL
url = self.get_null_unix_time.metadata['url']
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('unix-time', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
get_null_unix_time.metadata = {'url': '/int/nullunixtime'}
|
#-----------------------------------------------------------------------------
# Copyright (c) 2005-2020, PyInstaller Development Team.
#
# Distributed under the terms of the GNU General Public License (version 2
# or later) with exception for distributing the bootloader.
#
# The full license is in the file COPYING.txt, distributed with this software.
#
# SPDX-License-Identifier: (GPL-2.0-or-later WITH Bootloader-exception)
#-----------------------------------------------------------------------------
from PyInstaller.utils.hooks import collect_data_files, collect_submodules
# Pendulum checks for locale modules via os.path.exists before import.
# If the include_py_files option is turned off, this check fails, pendulum
# will raise a ValueError.
datas = collect_data_files("pendulum.locales", include_py_files=True)
hiddenimports = collect_submodules("pendulum.locales")
|
#!/usr/bin/env python3
# Copyright (c) 2018-2020 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test blinkhash-wallet."""
import hashlib
import os
import stat
import subprocess
import textwrap
from collections import OrderedDict
from test_framework.test_framework import BlinkhashTestFramework
from test_framework.util import assert_equal
BUFFER_SIZE = 16 * 1024
class ToolWalletTest(BlinkhashTestFramework):
def set_test_params(self):
self.num_nodes = 1
self.setup_clean_chain = True
self.rpc_timeout = 120
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
self.skip_if_no_wallet_tool()
def blinkhash_wallet_process(self, *args):
binary = self.config["environment"]["BUILDDIR"] + '/src/blinkhash-wallet' + self.config["environment"]["EXEEXT"]
default_args = ['-datadir={}'.format(self.nodes[0].datadir), '-chain=%s' % self.chain]
if self.options.descriptors and 'create' in args:
default_args.append('-descriptors')
return subprocess.Popen([binary] + default_args + list(args), stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True)
def assert_raises_tool_error(self, error, *args):
p = self.blinkhash_wallet_process(*args)
stdout, stderr = p.communicate()
assert_equal(p.poll(), 1)
assert_equal(stdout, '')
assert_equal(stderr.strip(), error)
def assert_tool_output(self, output, *args):
p = self.blinkhash_wallet_process(*args)
stdout, stderr = p.communicate()
assert_equal(stderr, '')
assert_equal(stdout, output)
assert_equal(p.poll(), 0)
def wallet_shasum(self):
h = hashlib.sha1()
mv = memoryview(bytearray(BUFFER_SIZE))
with open(self.wallet_path, 'rb', buffering=0) as f:
for n in iter(lambda: f.readinto(mv), 0):
h.update(mv[:n])
return h.hexdigest()
def wallet_timestamp(self):
return os.path.getmtime(self.wallet_path)
def wallet_permissions(self):
return oct(os.lstat(self.wallet_path).st_mode)[-3:]
def log_wallet_timestamp_comparison(self, old, new):
result = 'unchanged' if new == old else 'increased!'
self.log.debug('Wallet file timestamp {}'.format(result))
def get_expected_info_output(self, name="", transactions=0, keypool=2, address=0):
wallet_name = self.default_wallet_name if name == "" else name
output_types = 3 # p2pkh, p2sh, segwit
if self.options.descriptors:
return textwrap.dedent('''\
Wallet info
===========
Name: %s
Format: sqlite
Descriptors: yes
Encrypted: no
HD (hd seed available): yes
Keypool Size: %d
Transactions: %d
Address Book: %d
''' % (wallet_name, keypool * output_types, transactions, address))
else:
return textwrap.dedent('''\
Wallet info
===========
Name: %s
Format: bdb
Descriptors: no
Encrypted: no
HD (hd seed available): yes
Keypool Size: %d
Transactions: %d
Address Book: %d
''' % (wallet_name, keypool, transactions, address * output_types))
def read_dump(self, filename):
dump = OrderedDict()
with open(filename, "r", encoding="utf8") as f:
for row in f:
row = row.strip()
key, value = row.split(',')
dump[key] = value
return dump
def assert_is_sqlite(self, filename):
with open(filename, 'rb') as f:
file_magic = f.read(16)
assert file_magic == b'SQLite format 3\x00'
def assert_is_bdb(self, filename):
with open(filename, 'rb') as f:
f.seek(12, 0)
file_magic = f.read(4)
assert file_magic == b'\x00\x05\x31\x62' or file_magic == b'\x62\x31\x05\x00'
def write_dump(self, dump, filename, magic=None, skip_checksum=False):
if magic is None:
magic = "BLINKHASH_CORE_WALLET_DUMP"
with open(filename, "w", encoding="utf8") as f:
row = ",".join([magic, dump[magic]]) + "\n"
f.write(row)
for k, v in dump.items():
if k == magic or k == "checksum":
continue
row = ",".join([k, v]) + "\n"
f.write(row)
if not skip_checksum:
row = ",".join(["checksum", dump["checksum"]]) + "\n"
f.write(row)
def assert_dump(self, expected, received):
e = expected.copy()
r = received.copy()
# BDB will add a "version" record that is not present in sqlite
# In that case, we should ignore this record in both
# But because this also effects the checksum, we also need to drop that.
v_key = "0776657273696f6e" # Version key
if v_key in e and v_key not in r:
del e[v_key]
del e["checksum"]
del r["checksum"]
if v_key not in e and v_key in r:
del r[v_key]
del e["checksum"]
del r["checksum"]
assert_equal(len(e), len(r))
for k, v in e.items():
assert_equal(v, r[k])
def do_tool_createfromdump(self, wallet_name, dumpfile, file_format=None):
dumppath = os.path.join(self.nodes[0].datadir, dumpfile)
rt_dumppath = os.path.join(self.nodes[0].datadir, "rt-{}.dump".format(wallet_name))
dump_data = self.read_dump(dumppath)
args = ["-wallet={}".format(wallet_name),
"-dumpfile={}".format(dumppath)]
if file_format is not None:
args.append("-format={}".format(file_format))
args.append("createfromdump")
load_output = ""
if file_format is not None and file_format != dump_data["format"]:
load_output += "Warning: Dumpfile wallet format \"{}\" does not match command line specified format \"{}\".\n".format(dump_data["format"], file_format)
self.assert_tool_output(load_output, *args)
assert os.path.isdir(os.path.join(self.nodes[0].datadir, "regtest/wallets", wallet_name))
self.assert_tool_output("The dumpfile may contain private keys. To ensure the safety of your Blinkhash, do not share the dumpfile.\n", '-wallet={}'.format(wallet_name), '-dumpfile={}'.format(rt_dumppath), 'dump')
rt_dump_data = self.read_dump(rt_dumppath)
wallet_dat = os.path.join(self.nodes[0].datadir, "regtest/wallets/", wallet_name, "wallet.dat")
if rt_dump_data["format"] == "bdb":
self.assert_is_bdb(wallet_dat)
else:
self.assert_is_sqlite(wallet_dat)
def test_invalid_tool_commands_and_args(self):
self.log.info('Testing that various invalid commands raise with specific error messages')
self.assert_raises_tool_error("Error parsing command line arguments: Invalid command 'foo'", 'foo')
# `blinkhash-wallet help` raises an error. Use `blinkhash-wallet -help`.
self.assert_raises_tool_error("Error parsing command line arguments: Invalid command 'help'", 'help')
self.assert_raises_tool_error('Error: Additional arguments provided (create). Methods do not take arguments. Please refer to `-help`.', 'info', 'create')
self.assert_raises_tool_error('Error parsing command line arguments: Invalid parameter -foo', '-foo')
self.assert_raises_tool_error('No method provided. Run `blinkhash-wallet -help` for valid methods.')
self.assert_raises_tool_error('Wallet name must be provided when creating a new wallet.', 'create')
locked_dir = os.path.join(self.options.tmpdir, "node0", "regtest", "wallets")
error = 'Error initializing wallet database environment "{}"!'.format(locked_dir)
if self.options.descriptors:
error = "SQLiteDatabase: Unable to obtain an exclusive lock on the database, is it being used by another blinkhashd?"
self.assert_raises_tool_error(
error,
'-wallet=' + self.default_wallet_name,
'info',
)
path = os.path.join(self.options.tmpdir, "node0", "regtest", "wallets", "nonexistent.dat")
self.assert_raises_tool_error("Failed to load database path '{}'. Path does not exist.".format(path), '-wallet=nonexistent.dat', 'info')
def test_tool_wallet_info(self):
# Stop the node to close the wallet to call the info command.
self.stop_node(0)
self.log.info('Calling wallet tool info, testing output')
#
# TODO: Wallet tool info should work with wallet file permissions set to
# read-only without raising:
# "Error loading wallet.dat. Is wallet being used by another process?"
# The following lines should be uncommented and the tests still succeed:
#
# self.log.debug('Setting wallet file permissions to 400 (read-only)')
# os.chmod(self.wallet_path, stat.S_IRUSR)
# assert self.wallet_permissions() in ['400', '666'] # Sanity check. 666 because Appveyor.
# shasum_before = self.wallet_shasum()
timestamp_before = self.wallet_timestamp()
self.log.debug('Wallet file timestamp before calling info: {}'.format(timestamp_before))
out = self.get_expected_info_output(address=1)
self.assert_tool_output(out, '-wallet=' + self.default_wallet_name, 'info')
timestamp_after = self.wallet_timestamp()
self.log.debug('Wallet file timestamp after calling info: {}'.format(timestamp_after))
self.log_wallet_timestamp_comparison(timestamp_before, timestamp_after)
self.log.debug('Setting wallet file permissions back to 600 (read/write)')
os.chmod(self.wallet_path, stat.S_IRUSR | stat.S_IWUSR)
assert self.wallet_permissions() in ['600', '666'] # Sanity check. 666 because Appveyor.
#
# TODO: Wallet tool info should not write to the wallet file.
# The following lines should be uncommented and the tests still succeed:
#
# assert_equal(timestamp_before, timestamp_after)
# shasum_after = self.wallet_shasum()
# assert_equal(shasum_before, shasum_after)
# self.log.debug('Wallet file shasum unchanged\n')
def test_tool_wallet_info_after_transaction(self):
"""
Mutate the wallet with a transaction to verify that the info command
output changes accordingly.
"""
self.start_node(0)
self.log.info('Generating transaction to mutate wallet')
self.nodes[0].generate(1)
self.stop_node(0)
self.log.info('Calling wallet tool info after generating a transaction, testing output')
shasum_before = self.wallet_shasum()
timestamp_before = self.wallet_timestamp()
self.log.debug('Wallet file timestamp before calling info: {}'.format(timestamp_before))
out = self.get_expected_info_output(transactions=1, address=1)
self.assert_tool_output(out, '-wallet=' + self.default_wallet_name, 'info')
shasum_after = self.wallet_shasum()
timestamp_after = self.wallet_timestamp()
self.log.debug('Wallet file timestamp after calling info: {}'.format(timestamp_after))
self.log_wallet_timestamp_comparison(timestamp_before, timestamp_after)
#
# TODO: Wallet tool info should not write to the wallet file.
# This assertion should be uncommented and succeed:
# assert_equal(timestamp_before, timestamp_after)
assert_equal(shasum_before, shasum_after)
self.log.debug('Wallet file shasum unchanged\n')
def test_tool_wallet_create_on_existing_wallet(self):
self.log.info('Calling wallet tool create on an existing wallet, testing output')
shasum_before = self.wallet_shasum()
timestamp_before = self.wallet_timestamp()
self.log.debug('Wallet file timestamp before calling create: {}'.format(timestamp_before))
out = "Topping up keypool...\n" + self.get_expected_info_output(name="foo", keypool=2000)
self.assert_tool_output(out, '-wallet=foo', 'create')
shasum_after = self.wallet_shasum()
timestamp_after = self.wallet_timestamp()
self.log.debug('Wallet file timestamp after calling create: {}'.format(timestamp_after))
self.log_wallet_timestamp_comparison(timestamp_before, timestamp_after)
assert_equal(timestamp_before, timestamp_after)
assert_equal(shasum_before, shasum_after)
self.log.debug('Wallet file shasum unchanged\n')
def test_getwalletinfo_on_different_wallet(self):
self.log.info('Starting node with arg -wallet=foo')
self.start_node(0, ['-nowallet', '-wallet=foo'])
self.log.info('Calling getwalletinfo on a different wallet ("foo"), testing output')
shasum_before = self.wallet_shasum()
timestamp_before = self.wallet_timestamp()
self.log.debug('Wallet file timestamp before calling getwalletinfo: {}'.format(timestamp_before))
out = self.nodes[0].getwalletinfo()
self.stop_node(0)
shasum_after = self.wallet_shasum()
timestamp_after = self.wallet_timestamp()
self.log.debug('Wallet file timestamp after calling getwalletinfo: {}'.format(timestamp_after))
assert_equal(0, out['txcount'])
if not self.options.descriptors:
assert_equal(1000, out['keypoolsize'])
assert_equal(1000, out['keypoolsize_hd_internal'])
assert_equal(True, 'hdseedid' in out)
else:
assert_equal(3000, out['keypoolsize'])
assert_equal(3000, out['keypoolsize_hd_internal'])
self.log_wallet_timestamp_comparison(timestamp_before, timestamp_after)
assert_equal(timestamp_before, timestamp_after)
assert_equal(shasum_after, shasum_before)
self.log.debug('Wallet file shasum unchanged\n')
def test_salvage(self):
# TODO: Check salvage actually salvages and doesn't break things. https://github.com/bitcoin/bitcoin/issues/7463
self.log.info('Check salvage')
self.start_node(0)
self.nodes[0].createwallet("salvage")
self.stop_node(0)
self.assert_tool_output('', '-wallet=salvage', 'salvage')
def test_dump_createfromdump(self):
self.start_node(0)
self.nodes[0].createwallet("todump")
file_format = self.nodes[0].get_wallet_rpc("todump").getwalletinfo()["format"]
self.nodes[0].createwallet("todump2")
self.stop_node(0)
self.log.info('Checking dump arguments')
self.assert_raises_tool_error('No dump file provided. To use dump, -dumpfile=<filename> must be provided.', '-wallet=todump', 'dump')
self.log.info('Checking basic dump')
wallet_dump = os.path.join(self.nodes[0].datadir, "wallet.dump")
self.assert_tool_output('The dumpfile may contain private keys. To ensure the safety of your Blinkhash, do not share the dumpfile.\n', '-wallet=todump', '-dumpfile={}'.format(wallet_dump), 'dump')
dump_data = self.read_dump(wallet_dump)
orig_dump = dump_data.copy()
# Check the dump magic
assert_equal(dump_data['BLINKHASH_CORE_WALLET_DUMP'], '1')
# Check the file format
assert_equal(dump_data["format"], file_format)
self.log.info('Checking that a dumpfile cannot be overwritten')
self.assert_raises_tool_error('File {} already exists. If you are sure this is what you want, move it out of the way first.'.format(wallet_dump), '-wallet=todump2', '-dumpfile={}'.format(wallet_dump), 'dump')
self.log.info('Checking createfromdump arguments')
self.assert_raises_tool_error('No dump file provided. To use createfromdump, -dumpfile=<filename> must be provided.', '-wallet=todump', 'createfromdump')
non_exist_dump = os.path.join(self.nodes[0].datadir, "wallet.nodump")
self.assert_raises_tool_error('Unknown wallet file format "notaformat" provided. Please provide one of "bdb" or "sqlite".', '-wallet=todump', '-format=notaformat', '-dumpfile={}'.format(wallet_dump), 'createfromdump')
self.assert_raises_tool_error('Dump file {} does not exist.'.format(non_exist_dump), '-wallet=todump', '-dumpfile={}'.format(non_exist_dump), 'createfromdump')
wallet_path = os.path.join(self.nodes[0].datadir, 'regtest/wallets/todump2')
self.assert_raises_tool_error('Failed to create database path \'{}\'. Database already exists.'.format(wallet_path), '-wallet=todump2', '-dumpfile={}'.format(wallet_dump), 'createfromdump')
self.assert_raises_tool_error("The -descriptors option can only be used with the 'create' command.", '-descriptors', '-wallet=todump2', '-dumpfile={}'.format(wallet_dump), 'createfromdump')
self.log.info('Checking createfromdump')
self.do_tool_createfromdump("load", "wallet.dump")
if self.is_bdb_compiled():
self.do_tool_createfromdump("load-bdb", "wallet.dump", "bdb")
if self.is_sqlite_compiled():
self.do_tool_createfromdump("load-sqlite", "wallet.dump", "sqlite")
self.log.info('Checking createfromdump handling of magic and versions')
bad_ver_wallet_dump = os.path.join(self.nodes[0].datadir, "wallet-bad_ver1.dump")
dump_data["BLINKHASH_CORE_WALLET_DUMP"] = "0"
self.write_dump(dump_data, bad_ver_wallet_dump)
self.assert_raises_tool_error('Error: Dumpfile version is not supported. This version of blinkhash-wallet only supports version 1 dumpfiles. Got dumpfile with version 0', '-wallet=badload', '-dumpfile={}'.format(bad_ver_wallet_dump), 'createfromdump')
assert not os.path.isdir(os.path.join(self.nodes[0].datadir, "regtest/wallets", "badload"))
bad_ver_wallet_dump = os.path.join(self.nodes[0].datadir, "wallet-bad_ver2.dump")
dump_data["BLINKHASH_CORE_WALLET_DUMP"] = "2"
self.write_dump(dump_data, bad_ver_wallet_dump)
self.assert_raises_tool_error('Error: Dumpfile version is not supported. This version of blinkhash-wallet only supports version 1 dumpfiles. Got dumpfile with version 2', '-wallet=badload', '-dumpfile={}'.format(bad_ver_wallet_dump), 'createfromdump')
assert not os.path.isdir(os.path.join(self.nodes[0].datadir, "regtest/wallets", "badload"))
bad_magic_wallet_dump = os.path.join(self.nodes[0].datadir, "wallet-bad_magic.dump")
del dump_data["BLINKHASH_CORE_WALLET_DUMP"]
dump_data["not_the_right_magic"] = "1"
self.write_dump(dump_data, bad_magic_wallet_dump, "not_the_right_magic")
self.assert_raises_tool_error('Error: Dumpfile identifier record is incorrect. Got "not_the_right_magic", expected "BLINKHASH_CORE_WALLET_DUMP".', '-wallet=badload', '-dumpfile={}'.format(bad_magic_wallet_dump), 'createfromdump')
assert not os.path.isdir(os.path.join(self.nodes[0].datadir, "regtest/wallets", "badload"))
self.log.info('Checking createfromdump handling of checksums')
bad_sum_wallet_dump = os.path.join(self.nodes[0].datadir, "wallet-bad_sum1.dump")
dump_data = orig_dump.copy()
checksum = dump_data["checksum"]
dump_data["checksum"] = "1" * 64
self.write_dump(dump_data, bad_sum_wallet_dump)
self.assert_raises_tool_error('Error: Dumpfile checksum does not match. Computed {}, expected {}'.format(checksum, "1" * 64), '-wallet=bad', '-dumpfile={}'.format(bad_sum_wallet_dump), 'createfromdump')
assert not os.path.isdir(os.path.join(self.nodes[0].datadir, "regtest/wallets", "badload"))
bad_sum_wallet_dump = os.path.join(self.nodes[0].datadir, "wallet-bad_sum2.dump")
del dump_data["checksum"]
self.write_dump(dump_data, bad_sum_wallet_dump, skip_checksum=True)
self.assert_raises_tool_error('Error: Missing checksum', '-wallet=badload', '-dumpfile={}'.format(bad_sum_wallet_dump), 'createfromdump')
assert not os.path.isdir(os.path.join(self.nodes[0].datadir, "regtest/wallets", "badload"))
bad_sum_wallet_dump = os.path.join(self.nodes[0].datadir, "wallet-bad_sum3.dump")
dump_data["checksum"] = "2" * 10
self.write_dump(dump_data, bad_sum_wallet_dump)
self.assert_raises_tool_error('Error: Dumpfile checksum does not match. Computed {}, expected {}{}'.format(checksum, "2" * 10, "0" * 54), '-wallet=badload', '-dumpfile={}'.format(bad_sum_wallet_dump), 'createfromdump')
assert not os.path.isdir(os.path.join(self.nodes[0].datadir, "regtest/wallets", "badload"))
def run_test(self):
self.wallet_path = os.path.join(self.nodes[0].datadir, self.chain, 'wallets', self.default_wallet_name, self.wallet_data_filename)
self.test_invalid_tool_commands_and_args()
# Warning: The following tests are order-dependent.
self.test_tool_wallet_info()
self.test_tool_wallet_info_after_transaction()
self.test_tool_wallet_create_on_existing_wallet()
self.test_getwalletinfo_on_different_wallet()
if not self.options.descriptors:
# Salvage is a legacy wallet only thing
self.test_salvage()
self.test_dump_createfromdump()
if __name__ == '__main__':
ToolWalletTest().main()
|
# -*- coding: utf-8 -*-
"""
Python Markdown
A Python implementation of John Gruber's Markdown.
Documentation: https://python-markdown.github.io/
GitHub: https://github.com/Python-Markdown/markdown/
PyPI: https://pypi.org/project/Markdown/
Started by Manfred Stienstra (http://www.dwerg.net/).
Maintained for a few years by Yuri Takhteyev (http://www.freewisdom.org).
Currently maintained by Waylan Limberg (https://github.com/waylan),
Dmitry Shachnev (https://github.com/mitya57) and Isaac Muse (https://github.com/facelessuser).
Copyright 2007-2018 The Python Markdown Project (v. 1.7 and later)
Copyright 2004, 2005, 2006 Yuri Takhteyev (v. 0.2-1.6b)
Copyright 2004 Manfred Stienstra (the original version)
License: BSD (see LICENSE.md for details).
INLINE PATTERNS
=============================================================================
Inline patterns such as *emphasis* are handled by means of auxiliary
objects, one per pattern. Pattern objects must be instances of classes
that extend markdown.Pattern. Each pattern object uses a single regular
expression and needs support the following methods:
pattern.getCompiledRegExp() # returns a regular expression
pattern.handleMatch(m) # takes a match object and returns
# an ElementTree element or just plain text
All of python markdown's built-in patterns subclass from Pattern,
but you can add additional patterns that don't.
Also note that all the regular expressions used by inline must
capture the whole block. For this reason, they all start with
'^(.*)' and end with '(.*)!'. In case with built-in expression
Pattern takes care of adding the "^(.*)" and "(.*)!".
Finally, the order in which regular expressions are applied is very
important - e.g. if we first replace http://.../ links with <a> tags
and _then_ try to replace inline html, we would end up with a mess.
So, we apply the expressions in the following order:
* escape and backticks have to go before everything else, so
that we can preempt any markdown patterns by escaping them.
* then we handle auto-links (must be done before inline html)
* then we handle inline HTML. At this point we will simply
replace all inline HTML strings with a placeholder and add
the actual HTML to a hash.
* then inline images (must be done before links)
* then bracketed links, first regular then reference-style
* finally we apply strong and emphasis
"""
from __future__ import absolute_import
from __future__ import unicode_literals
from . import util
from collections import namedtuple
import re
try: # pragma: no cover
from html import entities
except ImportError: # pragma: no cover
import htmlentitydefs as entities
def build_inlinepatterns(md, **kwargs):
""" Build the default set of inline patterns for Markdown. """
inlinePatterns = util.Registry()
inlinePatterns.register(BacktickInlineProcessor(BACKTICK_RE), 'backtick', 190)
inlinePatterns.register(EscapeInlineProcessor(ESCAPE_RE, md), 'escape', 180)
inlinePatterns.register(ReferenceInlineProcessor(REFERENCE_RE, md), 'reference', 170)
inlinePatterns.register(LinkInlineProcessor(LINK_RE, md), 'link', 160)
inlinePatterns.register(ImageInlineProcessor(IMAGE_LINK_RE, md), 'image_link', 150)
inlinePatterns.register(
ImageReferenceInlineProcessor(IMAGE_REFERENCE_RE, md), 'image_reference', 140
)
inlinePatterns.register(
ShortReferenceInlineProcessor(REFERENCE_RE, md), 'short_reference', 130
)
inlinePatterns.register(AutolinkInlineProcessor(AUTOLINK_RE, md), 'autolink', 120)
inlinePatterns.register(AutomailInlineProcessor(AUTOMAIL_RE, md), 'automail', 110)
inlinePatterns.register(SubstituteTagInlineProcessor(LINE_BREAK_RE, 'br'), 'linebreak', 100)
inlinePatterns.register(HtmlInlineProcessor(HTML_RE, md), 'html', 90)
inlinePatterns.register(HtmlInlineProcessor(ENTITY_RE, md), 'entity', 80)
inlinePatterns.register(SimpleTextInlineProcessor(NOT_STRONG_RE), 'not_strong', 70)
inlinePatterns.register(AsteriskProcessor(r'\*'), 'em_strong', 60)
inlinePatterns.register(UnderscoreProcessor(r'_'), 'em_strong2', 50)
return inlinePatterns
"""
The actual regular expressions for patterns
-----------------------------------------------------------------------------
"""
NOIMG = r'(?<!\!)'
# `e=f()` or ``e=f("`")``
BACKTICK_RE = r'(?:(?<!\\)((?:\\{2})+)(?=`+)|(?<!\\)(`+)(.+?)(?<!`)\2(?!`))'
# \<
ESCAPE_RE = r'\\(.)'
# *emphasis*
EMPHASIS_RE = r'(\*)([^\*]+)\1'
# **strong**
STRONG_RE = r'(\*{2})(.+?)\1'
# __smart__strong__
SMART_STRONG_RE = r'(?<!\w)(_{2})(?!_)(.+?)(?<!_)\1(?!\w)'
# _smart_emphasis_
SMART_EMPHASIS_RE = r'(?<!\w)(_)(?!_)(.+?)(?<!_)\1(?!\w)'
# __strong _em__
SMART_STRONG_EM_RE = r'(?<!\w)(\_)\1(?!\1)(.+?)(?<!\w)\1(?!\1)(.+?)\1{3}(?!\w)'
# ***strongem*** or ***em*strong**
EM_STRONG_RE = r'(\*)\1{2}(.+?)\1(.*?)\1{2}'
# ___strongem___ or ___em_strong__
EM_STRONG2_RE = r'(_)\1{2}(.+?)\1(.*?)\1{2}'
# ***strong**em*
STRONG_EM_RE = r'(\*)\1{2}(.+?)\1{2}(.*?)\1'
# ___strong__em_
STRONG_EM2_RE = r'(_)\1{2}(.+?)\1{2}(.*?)\1'
# __strong_em___
STRONG_EM3_RE = r'(\*)\1(?!\1)(.+?)\1(?!\1)(.+?)\1{3}'
# [text](url) or [text](<url>) or [text](url "title")
LINK_RE = NOIMG + r'\['
#  or 
IMAGE_LINK_RE = r'\!\['
# [Google][3]
REFERENCE_RE = LINK_RE
# ![alt text][2]
IMAGE_REFERENCE_RE = IMAGE_LINK_RE
# stand-alone * or _
NOT_STRONG_RE = r'((^|\s)(\*|_)(\s|$))'
# <http://www.123.com>
AUTOLINK_RE = r'<((?:[Ff]|[Hh][Tt])[Tt][Pp][Ss]?://[^<>]*)>'
# <me@example.com>
AUTOMAIL_RE = r'<([^<> !]*@[^@<> ]*)>'
# <...>
HTML_RE = r'(<([a-zA-Z/][^<>]*|!--(?:(?!<!--|-->).)*--)>)'
# "&" (decimal) or "&" (hex) or "&" (named)
ENTITY_RE = r'(&(?:\#[0-9]+|\#x[0-9a-fA-F]+|[a-zA-Z0-9]+);)'
# two spaces at end of line
LINE_BREAK_RE = r' \n'
def dequote(string):
"""Remove quotes from around a string."""
if ((string.startswith('"') and string.endswith('"')) or
(string.startswith("'") and string.endswith("'"))):
return string[1:-1]
else:
return string
class EmStrongItem(namedtuple('EmStrongItem', ['pattern', 'builder', 'tags'])):
"""Emphasis/strong pattern item."""
"""
The pattern classes
-----------------------------------------------------------------------------
"""
class Pattern(object): # pragma: no cover
"""Base class that inline patterns subclass. """
ANCESTOR_EXCLUDES = tuple()
def __init__(self, pattern, md=None):
"""
Create an instant of an inline pattern.
Keyword arguments:
* pattern: A regular expression that matches a pattern
"""
self.pattern = pattern
self.compiled_re = re.compile(r"^(.*?)%s(.*)$" % pattern,
re.DOTALL | re.UNICODE)
self.md = md
@property
@util.deprecated("Use 'md' instead.")
def markdown(self):
# TODO: remove this later
return self.md
def getCompiledRegExp(self):
""" Return a compiled regular expression. """
return self.compiled_re
def handleMatch(self, m):
"""Return a ElementTree element from the given match.
Subclasses should override this method.
Keyword arguments:
* m: A re match object containing a match of the pattern.
"""
pass # pragma: no cover
def type(self):
""" Return class name, to define pattern type """
return self.__class__.__name__
def unescape(self, text):
""" Return unescaped text given text with an inline placeholder. """
try:
stash = self.md.treeprocessors['inline'].stashed_nodes
except KeyError: # pragma: no cover
return text
def get_stash(m):
id = m.group(1)
if id in stash:
value = stash.get(id)
if isinstance(value, util.string_type):
return value
else:
# An etree Element - return text content only
return ''.join(value.itertext())
return util.INLINE_PLACEHOLDER_RE.sub(get_stash, text)
class InlineProcessor(Pattern):
"""
Base class that inline patterns subclass.
This is the newer style inline processor that uses a more
efficient and flexible search approach.
"""
def __init__(self, pattern, md=None):
"""
Create an instant of an inline pattern.
Keyword arguments:
* pattern: A regular expression that matches a pattern
"""
self.pattern = pattern
self.compiled_re = re.compile(pattern, re.DOTALL | re.UNICODE)
# Api for Markdown to pass safe_mode into instance
self.safe_mode = False
self.md = md
def handleMatch(self, m, data):
"""Return a ElementTree element from the given match and the
start and end index of the matched text.
If `start` and/or `end` are returned as `None`, it will be
assumed that the processor did not find a valid region of text.
Subclasses should override this method.
Keyword arguments:
* m: A re match object containing a match of the pattern.
* data: The buffer current under analysis
Returns:
* el: The ElementTree element, text or None.
* start: The start of the region that has been matched or None.
* end: The end of the region that has been matched or None.
"""
pass # pragma: no cover
class SimpleTextPattern(Pattern): # pragma: no cover
""" Return a simple text of group(2) of a Pattern. """
def handleMatch(self, m):
return m.group(2)
class SimpleTextInlineProcessor(InlineProcessor):
""" Return a simple text of group(1) of a Pattern. """
def handleMatch(self, m, data):
return m.group(1), m.start(0), m.end(0)
class EscapeInlineProcessor(InlineProcessor):
""" Return an escaped character. """
def handleMatch(self, m, data):
char = m.group(1)
if char in self.md.ESCAPED_CHARS:
return '%s%s%s' % (util.STX, ord(char), util.ETX), m.start(0), m.end(0)
else:
return None, m.start(0), m.end(0)
class SimpleTagPattern(Pattern): # pragma: no cover
"""
Return element of type `tag` with a text attribute of group(3)
of a Pattern.
"""
def __init__(self, pattern, tag):
Pattern.__init__(self, pattern)
self.tag = tag
def handleMatch(self, m):
el = util.etree.Element(self.tag)
el.text = m.group(3)
return el
class SimpleTagInlineProcessor(InlineProcessor):
"""
Return element of type `tag` with a text attribute of group(2)
of a Pattern.
"""
def __init__(self, pattern, tag):
InlineProcessor.__init__(self, pattern)
self.tag = tag
def handleMatch(self, m, data): # pragma: no cover
el = util.etree.Element(self.tag)
el.text = m.group(2)
return el, m.start(0), m.end(0)
class SubstituteTagPattern(SimpleTagPattern): # pragma: no cover
""" Return an element of type `tag` with no children. """
def handleMatch(self, m):
return util.etree.Element(self.tag)
class SubstituteTagInlineProcessor(SimpleTagInlineProcessor):
""" Return an element of type `tag` with no children. """
def handleMatch(self, m, data):
return util.etree.Element(self.tag), m.start(0), m.end(0)
class BacktickInlineProcessor(InlineProcessor):
""" Return a `<code>` element containing the matching text. """
def __init__(self, pattern):
InlineProcessor.__init__(self, pattern)
self.ESCAPED_BSLASH = '%s%s%s' % (util.STX, ord('\\'), util.ETX)
self.tag = 'code'
def handleMatch(self, m, data):
if m.group(3):
el = util.etree.Element(self.tag)
el.text = util.AtomicString(util.code_escape(m.group(3).strip()))
return el, m.start(0), m.end(0)
else:
return m.group(1).replace('\\\\', self.ESCAPED_BSLASH), m.start(0), m.end(0)
class DoubleTagPattern(SimpleTagPattern): # pragma: no cover
"""Return a ElementTree element nested in tag2 nested in tag1.
Useful for strong emphasis etc.
"""
def handleMatch(self, m):
tag1, tag2 = self.tag.split(",")
el1 = util.etree.Element(tag1)
el2 = util.etree.SubElement(el1, tag2)
el2.text = m.group(3)
if len(m.groups()) == 5:
el2.tail = m.group(4)
return el1
class DoubleTagInlineProcessor(SimpleTagInlineProcessor):
"""Return a ElementTree element nested in tag2 nested in tag1.
Useful for strong emphasis etc.
"""
def handleMatch(self, m, data): # pragma: no cover
tag1, tag2 = self.tag.split(",")
el1 = util.etree.Element(tag1)
el2 = util.etree.SubElement(el1, tag2)
el2.text = m.group(2)
if len(m.groups()) == 3:
el2.tail = m.group(3)
return el1, m.start(0), m.end(0)
class HtmlInlineProcessor(InlineProcessor):
""" Store raw inline html and return a placeholder. """
def handleMatch(self, m, data):
rawhtml = self.unescape(m.group(1))
place_holder = self.md.htmlStash.store(rawhtml)
return place_holder, m.start(0), m.end(0)
def unescape(self, text):
""" Return unescaped text given text with an inline placeholder. """
try:
stash = self.md.treeprocessors['inline'].stashed_nodes
except KeyError: # pragma: no cover
return text
def get_stash(m):
id = m.group(1)
value = stash.get(id)
if value is not None:
try:
return self.md.serializer(value)
except Exception:
return r'\%s' % value
return util.INLINE_PLACEHOLDER_RE.sub(get_stash, text)
class AsteriskProcessor(InlineProcessor):
"""Emphasis processor for handling strong and em matches inside asterisks."""
PATTERNS = [
EmStrongItem(re.compile(EM_STRONG_RE, re.DOTALL | re.UNICODE), 'double', 'strong,em'),
EmStrongItem(re.compile(STRONG_EM_RE, re.DOTALL | re.UNICODE), 'double', 'em,strong'),
EmStrongItem(re.compile(STRONG_EM3_RE, re.DOTALL | re.UNICODE), 'double2', 'strong,em'),
EmStrongItem(re.compile(STRONG_RE, re.DOTALL | re.UNICODE), 'single', 'strong'),
EmStrongItem(re.compile(EMPHASIS_RE, re.DOTALL | re.UNICODE), 'single', 'em')
]
def build_single(self, m, tag, idx):
"""Return single tag."""
el1 = util.etree.Element(tag)
text = m.group(2)
self.parse_sub_patterns(text, el1, None, idx)
return el1
def build_double(self, m, tags, idx):
"""Return double tag."""
tag1, tag2 = tags.split(",")
el1 = util.etree.Element(tag1)
el2 = util.etree.Element(tag2)
text = m.group(2)
self.parse_sub_patterns(text, el2, None, idx)
el1.append(el2)
if len(m.groups()) == 3:
text = m.group(3)
self.parse_sub_patterns(text, el1, el2, idx)
return el1
def build_double2(self, m, tags, idx):
"""Return double tags (variant 2): `<strong>text <em>text</em></strong>`."""
tag1, tag2 = tags.split(",")
el1 = util.etree.Element(tag1)
el2 = util.etree.Element(tag2)
text = m.group(2)
self.parse_sub_patterns(text, el1, None, idx)
text = m.group(3)
el1.append(el2)
self.parse_sub_patterns(text, el2, None, idx)
return el1
def parse_sub_patterns(self, data, parent, last, idx):
"""
Parses sub patterns.
`data` (`str`):
text to evaluate.
`parent` (`etree.Element`):
Parent to attach text and sub elements to.
`last` (`etree.Element`):
Last appended child to parent. Can also be None if parent has no children.
`idx` (`int`):
Current pattern index that was used to evaluate the parent.
"""
offset = 0
pos = 0
length = len(data)
while pos < length:
# Find the start of potential emphasis or strong tokens
if self.compiled_re.match(data, pos):
matched = False
# See if the we can match an emphasis/strong pattern
for index, item in enumerate(self.PATTERNS):
# Only evaluate patterns that are after what was used on the parent
if index <= idx:
continue
m = item.pattern.match(data, pos)
if m:
# Append child nodes to parent
# Text nodes should be appended to the last
# child if present, and if not, it should
# be added as the parent's text node.
text = data[offset:m.start(0)]
if text:
if last is not None:
last.tail = text
else:
parent.text = text
el = self.build_element(m, item.builder, item.tags, index)
parent.append(el)
last = el
# Move our position past the matched hunk
offset = pos = m.end(0)
matched = True
if not matched:
# We matched nothing, move on to the next character
pos += 1
else:
# Increment position as no potential emphasis start was found.
pos += 1
# Append any leftover text as a text node.
text = data[offset:]
if text:
if last is not None:
last.tail = text
else:
parent.text = text
def build_element(self, m, builder, tags, index):
"""Element builder."""
if builder == 'double2':
return self.build_double2(m, tags, index)
elif builder == 'double':
return self.build_double(m, tags, index)
else:
return self.build_single(m, tags, index)
def handleMatch(self, m, data):
"""Parse patterns."""
el = None
start = None
end = None
for index, item in enumerate(self.PATTERNS):
m1 = item.pattern.match(data, m.start(0))
if m1:
start = m1.start(0)
end = m1.end(0)
el = self.build_element(m1, item.builder, item.tags, index)
break
return el, start, end
class UnderscoreProcessor(AsteriskProcessor):
"""Emphasis processor for handling strong and em matches inside underscores."""
PATTERNS = [
EmStrongItem(re.compile(EM_STRONG2_RE, re.DOTALL | re.UNICODE), 'double', 'strong,em'),
EmStrongItem(re.compile(STRONG_EM2_RE, re.DOTALL | re.UNICODE), 'double', 'em,strong'),
EmStrongItem(re.compile(SMART_STRONG_EM_RE, re.DOTALL | re.UNICODE), 'double2', 'strong,em'),
EmStrongItem(re.compile(SMART_STRONG_RE, re.DOTALL | re.UNICODE), 'single', 'strong'),
EmStrongItem(re.compile(SMART_EMPHASIS_RE, re.DOTALL | re.UNICODE), 'single', 'em')
]
class LinkInlineProcessor(InlineProcessor):
""" Return a link element from the given match. """
RE_LINK = re.compile(r'''\(\s*(?:(<[^<>]*>)\s*(?:('[^']*'|"[^"]*")\s*)?\))?''', re.DOTALL | re.UNICODE)
RE_TITLE_CLEAN = re.compile(r'\s')
def handleMatch(self, m, data):
text, index, handled = self.getText(data, m.end(0))
if not handled:
return None, None, None
href, title, index, handled = self.getLink(data, index)
if not handled:
return None, None, None
el = util.etree.Element("a")
el.text = text
el.set("href", href)
if title is not None:
el.set("title", title)
return el, m.start(0), index
def getLink(self, data, index):
"""Parse data between `()` of `[Text]()` allowing recursive `()`. """
href = ''
title = None
handled = False
m = self.RE_LINK.match(data, pos=index)
if m and m.group(1):
# Matches [Text](<link> "title")
href = m.group(1)[1:-1].strip()
if m.group(2):
title = m.group(2)[1:-1]
index = m.end(0)
handled = True
elif m:
# Track bracket nesting and index in string
bracket_count = 1
backtrack_count = 1
start_index = m.end()
index = start_index
last_bracket = -1
# Primary (first found) quote tracking.
quote = None
start_quote = -1
exit_quote = -1
ignore_matches = False
# Secondary (second found) quote tracking.
alt_quote = None
start_alt_quote = -1
exit_alt_quote = -1
# Track last character
last = ''
for pos in util.iterrange(index, len(data)):
c = data[pos]
if c == '(':
# Count nested (
# Don't increment the bracket count if we are sure we're in a title.
if not ignore_matches:
bracket_count += 1
elif backtrack_count > 0:
backtrack_count -= 1
elif c == ')':
# Match nested ) to (
# Don't decrement if we are sure we are in a title that is unclosed.
if ((exit_quote != -1 and quote == last) or (exit_alt_quote != -1 and alt_quote == last)):
bracket_count = 0
elif not ignore_matches:
bracket_count -= 1
elif backtrack_count > 0:
backtrack_count -= 1
# We've found our backup end location if the title doesn't reslove.
if backtrack_count == 0:
last_bracket = index + 1
elif c in ("'", '"'):
# Quote has started
if not quote:
# We'll assume we are now in a title.
# Brackets are quoted, so no need to match them (except for the final one).
ignore_matches = True
backtrack_count = bracket_count
bracket_count = 1
start_quote = index + 1
quote = c
# Secondary quote (in case the first doesn't resolve): [text](link'"title")
elif c != quote and not alt_quote:
start_alt_quote = index + 1
alt_quote = c
# Update primary quote match
elif c == quote:
exit_quote = index + 1
# Update secondary quote match
elif alt_quote and c == alt_quote:
exit_alt_quote = index + 1
index += 1
# Link is closed, so let's break out of the loop
if bracket_count == 0:
# Get the title if we closed a title string right before link closed
if exit_quote >= 0 and quote == last:
href = data[start_index:start_quote - 1]
title = ''.join(data[start_quote:exit_quote - 1])
elif exit_alt_quote >= 0 and alt_quote == last:
href = data[start_index:start_alt_quote - 1]
title = ''.join(data[start_alt_quote:exit_alt_quote - 1])
else:
href = data[start_index:index - 1]
break
if c != ' ':
last = c
# We have a scenario: [test](link"notitle)
# When we enter a string, we stop tracking bracket resolution in the main counter,
# but we do keep a backup counter up until we discover where we might resolve all brackets
# if the title string fails to resolve.
if bracket_count != 0 and backtrack_count == 0:
href = data[start_index:last_bracket - 1]
index = last_bracket
bracket_count = 0
handled = bracket_count == 0
if title is not None:
title = self.RE_TITLE_CLEAN.sub(' ', dequote(self.unescape(title.strip())))
href = self.unescape(href).strip()
return href, title, index, handled
def getText(self, data, index):
"""Parse the content between `[]` of the start of an image or link
resolving nested square brackets.
"""
bracket_count = 1
text = []
for pos in util.iterrange(index, len(data)):
c = data[pos]
if c == ']':
bracket_count -= 1
elif c == '[':
bracket_count += 1
index += 1
if bracket_count == 0:
break
text.append(c)
return ''.join(text), index, bracket_count == 0
class ImageInlineProcessor(LinkInlineProcessor):
""" Return a img element from the given match. """
def handleMatch(self, m, data):
text, index, handled = self.getText(data, m.end(0))
if not handled:
return None, None, None
src, title, index, handled = self.getLink(data, index)
if not handled:
return None, None, None
el = util.etree.Element("img")
el.set("src", src)
if title is not None:
el.set("title", title)
el.set('alt', self.unescape(text))
return el, m.start(0), index
class ReferenceInlineProcessor(LinkInlineProcessor):
""" Match to a stored reference and return link element. """
NEWLINE_CLEANUP_RE = re.compile(r'\s+', re.MULTILINE)
RE_LINK = re.compile(r'\s?\[([^\]]*)\]', re.DOTALL | re.UNICODE)
def handleMatch(self, m, data):
text, index, handled = self.getText(data, m.end(0))
if not handled:
return None, None, None
id, end, handled = self.evalId(data, index, text)
if not handled:
return None, None, None
# Clean up linebreaks in id
id = self.NEWLINE_CLEANUP_RE.sub(' ', id)
if id not in self.md.references: # ignore undefined refs
return None, m.start(0), end
href, title = self.md.references[id]
return self.makeTag(href, title, text), m.start(0), end
def evalId(self, data, index, text):
"""
Evaluate the id portion of [ref][id].
If [ref][] use [ref].
"""
m = self.RE_LINK.match(data, pos=index)
if not m:
return None, index, False
else:
id = m.group(1).lower()
end = m.end(0)
if not id:
id = text.lower()
return id, end, True
def makeTag(self, href, title, text):
el = util.etree.Element('a')
el.set('href', href)
if title:
el.set('title', title)
el.text = text
return el
class ShortReferenceInlineProcessor(ReferenceInlineProcessor):
"""Shorte form of reference: [google]. """
def evalId(self, data, index, text):
"""Evaluate the id from of [ref] """
return text.lower(), index, True
class ImageReferenceInlineProcessor(ReferenceInlineProcessor):
""" Match to a stored reference and return img element. """
def makeTag(self, href, title, text):
el = util.etree.Element("img")
el.set("src", href)
if title:
el.set("title", title)
el.set("alt", self.unescape(text))
return el
class AutolinkInlineProcessor(InlineProcessor):
""" Return a link Element given an autolink (`<http://example/com>`). """
def handleMatch(self, m, data):
el = util.etree.Element("a")
el.set('href', self.unescape(m.group(1)))
el.text = util.AtomicString(m.group(1))
return el, m.start(0), m.end(0)
class AutomailInlineProcessor(InlineProcessor):
"""
Return a mailto link Element given an automail link (`<foo@example.com>`).
"""
def handleMatch(self, m, data):
el = util.etree.Element('a')
email = self.unescape(m.group(1))
if email.startswith("mailto:"):
email = email[len("mailto:"):]
def codepoint2name(code):
"""Return entity definition by code, or the code if not defined."""
entity = entities.codepoint2name.get(code)
if entity:
return "%s%s;" % (util.AMP_SUBSTITUTE, entity)
else:
return "%s#%d;" % (util.AMP_SUBSTITUTE, code)
letters = [codepoint2name(ord(letter)) for letter in email]
el.text = util.AtomicString(''.join(letters))
mailto = "mailto:" + email
mailto = "".join([util.AMP_SUBSTITUTE + '#%d;' %
ord(letter) for letter in mailto])
el.set('href', mailto)
return el, m.start(0), m.end(0)
|
def countdown (n):
for i in range(n):
print "%d..." % (n - i)
print "Happy New Years!"
|
# -*- coding: utf-8 -*-
# copyright 2003-2013 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
#
# This file is part of logilab-common.
#
# logilab-common is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the Free
# Software Foundation, either version 2.1 of the License, or (at your option) any
# later version.
#
# logilab-common is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with logilab-common. If not, see <http://www.gnu.org/licenses/>.
"""Python modules manipulation utility functions.
:type PY_SOURCE_EXTS: tuple(str)
:var PY_SOURCE_EXTS: list of possible python source file extension
:type STD_LIB_DIR: str
:var STD_LIB_DIR: directory where standard modules are located
:type BUILTIN_MODULES: dict
:var BUILTIN_MODULES: dictionary with builtin module names as key
"""
__docformat__ = "restructuredtext en"
import sys
import os
from os.path import (splitext, join, abspath, isdir, dirname, exists,
basename, expanduser, normcase, realpath)
from imp import find_module, load_module, C_BUILTIN, PY_COMPILED, PKG_DIRECTORY
from distutils.sysconfig import get_config_var, get_python_lib, get_python_version
from distutils.errors import DistutilsPlatformError
from six import PY3
from six.moves import map, range
try:
import zipimport
except ImportError:
zipimport = None
ZIPFILE = object()
from logilab.common import STD_BLACKLIST, _handle_blacklist
from logilab.common.deprecation import deprecated
# Notes about STD_LIB_DIR
# Consider arch-specific installation for STD_LIB_DIR definition
# :mod:`distutils.sysconfig` contains to much hardcoded values to rely on
#
# :see: `Problems with /usr/lib64 builds <http://bugs.python.org/issue1294959>`_
# :see: `FHS <http://www.pathname.com/fhs/pub/fhs-2.3.html#LIBLTQUALGTALTERNATEFORMATESSENTIAL>`_
if sys.platform.startswith('win'):
PY_SOURCE_EXTS = ('py', 'pyw')
PY_COMPILED_EXTS = ('dll', 'pyd')
else:
PY_SOURCE_EXTS = ('py',)
PY_COMPILED_EXTS = ('so',)
try:
STD_LIB_DIR = get_python_lib(standard_lib=True)
# get_python_lib(standard_lib=1) is not available on pypy, set STD_LIB_DIR to
# non-valid path, see https://bugs.pypy.org/issue1164
except DistutilsPlatformError:
STD_LIB_DIR = '//'
EXT_LIB_DIR = get_python_lib()
BUILTIN_MODULES = dict.fromkeys(sys.builtin_module_names, True)
class NoSourceFile(Exception):
"""exception raised when we are not able to get a python
source file for a precompiled file
"""
class LazyObject(object):
def __init__(self, module, obj):
self.module = module
self.obj = obj
self._imported = None
def _getobj(self):
if self._imported is None:
self._imported = getattr(load_module_from_name(self.module),
self.obj)
return self._imported
def __getattribute__(self, attr):
try:
return super(LazyObject, self).__getattribute__(attr)
except AttributeError as ex:
return getattr(self._getobj(), attr)
def __call__(self, *args, **kwargs):
return self._getobj()(*args, **kwargs)
def load_module_from_name(dotted_name, path=None, use_sys=True):
"""Load a Python module from its name.
:type dotted_name: str
:param dotted_name: python name of a module or package
:type path: list or None
:param path:
optional list of path where the module or package should be
searched (use sys.path if nothing or None is given)
:type use_sys: bool
:param use_sys:
boolean indicating whether the sys.modules dictionary should be
used or not
:raise ImportError: if the module or package is not found
:rtype: module
:return: the loaded module
"""
return load_module_from_modpath(dotted_name.split('.'), path, use_sys)
def load_module_from_modpath(parts, path=None, use_sys=True):
"""Load a python module from its splitted name.
:type parts: list(str) or tuple(str)
:param parts:
python name of a module or package splitted on '.'
:type path: list or None
:param path:
optional list of path where the module or package should be
searched (use sys.path if nothing or None is given)
:type use_sys: bool
:param use_sys:
boolean indicating whether the sys.modules dictionary should be used or not
:raise ImportError: if the module or package is not found
:rtype: module
:return: the loaded module
"""
if use_sys:
try:
return sys.modules['.'.join(parts)]
except KeyError:
pass
modpath = []
prevmodule = None
for part in parts:
modpath.append(part)
curname = '.'.join(modpath)
module = None
if len(modpath) != len(parts):
# even with use_sys=False, should try to get outer packages from sys.modules
module = sys.modules.get(curname)
elif use_sys:
# because it may have been indirectly loaded through a parent
module = sys.modules.get(curname)
if module is None:
mp_file, mp_filename, mp_desc = find_module(part, path)
try:
module = load_module(curname, mp_file, mp_filename, mp_desc)
finally:
if mp_file is not None:
mp_file.close()
if prevmodule:
setattr(prevmodule, part, module)
_file = getattr(module, '__file__', '')
prevmodule = module
if not _file and _is_namespace(curname):
continue
if not _file and len(modpath) != len(parts):
raise ImportError('no module in %s' % '.'.join(parts[len(modpath):]) )
path = [dirname( _file )]
return module
def load_module_from_file(filepath, path=None, use_sys=True, extrapath=None):
"""Load a Python module from it's path.
:type filepath: str
:param filepath: path to the python module or package
:type path: list or None
:param path:
optional list of path where the module or package should be
searched (use sys.path if nothing or None is given)
:type use_sys: bool
:param use_sys:
boolean indicating whether the sys.modules dictionary should be
used or not
:raise ImportError: if the module or package is not found
:rtype: module
:return: the loaded module
"""
modpath = modpath_from_file(filepath, extrapath)
return load_module_from_modpath(modpath, path, use_sys)
def _check_init(path, mod_path):
"""check there are some __init__.py all along the way"""
modpath = []
for part in mod_path:
modpath.append(part)
path = join(path, part)
if not _is_namespace('.'.join(modpath)) and not _has_init(path):
return False
return True
def _canonicalize_path(path):
return realpath(expanduser(path))
def _path_from_filename(filename):
if PY3:
return filename
else:
if filename.endswith(".pyc"):
return filename[:-1]
return filename
@deprecated('you should avoid using modpath_from_file()')
def modpath_from_file(filename, extrapath=None):
"""DEPRECATED: doens't play well with symlinks and sys.meta_path
Given a file path return the corresponding splitted module's name
(i.e name of a module or package splitted on '.')
:type filename: str
:param filename: file's path for which we want the module's name
:type extrapath: dict
:param extrapath:
optional extra search path, with path as key and package name for the path
as value. This is usually useful to handle package splitted in multiple
directories using __path__ trick.
:raise ImportError:
if the corresponding module's name has not been found
:rtype: list(str)
:return: the corresponding splitted module's name
"""
filename = _path_from_filename(filename)
filename = _canonicalize_path(filename)
base = os.path.splitext(filename)[0]
if extrapath is not None:
for path_ in map(_canonicalize_path, extrapath):
path = abspath(path_)
if path and normcase(base[:len(path)]) == normcase(path):
submodpath = [pkg for pkg in base[len(path):].split(os.sep)
if pkg]
if _check_init(path, submodpath[:-1]):
return extrapath[path_].split('.') + submodpath
for path in map(_canonicalize_path, sys.path):
if path and normcase(base).startswith(path):
modpath = [pkg for pkg in base[len(path):].split(os.sep) if pkg]
if _check_init(path, modpath[:-1]):
return modpath
raise ImportError('Unable to find module for %s in %s' % (
filename, ', \n'.join(sys.path)))
def file_from_modpath(modpath, path=None, context_file=None):
"""given a mod path (i.e. splitted module / package name), return the
corresponding file, giving priority to source file over precompiled
file if it exists
:type modpath: list or tuple
:param modpath:
splitted module's name (i.e name of a module or package splitted
on '.')
(this means explicit relative imports that start with dots have
empty strings in this list!)
:type path: list or None
:param path:
optional list of path where the module or package should be
searched (use sys.path if nothing or None is given)
:type context_file: str or None
:param context_file:
context file to consider, necessary if the identifier has been
introduced using a relative import unresolvable in the actual
context (i.e. modutils)
:raise ImportError: if there is no such module in the directory
:rtype: str or None
:return:
the path to the module's file or None if it's an integrated
builtin module such as 'sys'
"""
if context_file is not None:
context = dirname(context_file)
else:
context = context_file
if modpath[0] == 'xml':
# handle _xmlplus
try:
return _file_from_modpath(['_xmlplus'] + modpath[1:], path, context)
except ImportError:
return _file_from_modpath(modpath, path, context)
elif modpath == ['os', 'path']:
# FIXME: currently ignoring search_path...
return os.path.__file__
return _file_from_modpath(modpath, path, context)
def get_module_part(dotted_name, context_file=None):
"""given a dotted name return the module part of the name :
>>> get_module_part('logilab.common.modutils.get_module_part')
'logilab.common.modutils'
:type dotted_name: str
:param dotted_name: full name of the identifier we are interested in
:type context_file: str or None
:param context_file:
context file to consider, necessary if the identifier has been
introduced using a relative import unresolvable in the actual
context (i.e. modutils)
:raise ImportError: if there is no such module in the directory
:rtype: str or None
:return:
the module part of the name or None if we have not been able at
all to import the given name
XXX: deprecated, since it doesn't handle package precedence over module
(see #10066)
"""
# os.path trick
if dotted_name.startswith('os.path'):
return 'os.path'
parts = dotted_name.split('.')
if context_file is not None:
# first check for builtin module which won't be considered latter
# in that case (path != None)
if parts[0] in BUILTIN_MODULES:
if len(parts) > 2:
raise ImportError(dotted_name)
return parts[0]
# don't use += or insert, we want a new list to be created !
path = None
starti = 0
if parts[0] == '':
assert context_file is not None, \
'explicit relative import, but no context_file?'
path = [] # prevent resolving the import non-relatively
starti = 1
while parts[starti] == '': # for all further dots: change context
starti += 1
context_file = dirname(context_file)
for i in range(starti, len(parts)):
try:
file_from_modpath(parts[starti:i+1],
path=path, context_file=context_file)
except ImportError:
if not i >= max(1, len(parts) - 2):
raise
return '.'.join(parts[:i])
return dotted_name
def get_modules(package, src_directory, blacklist=STD_BLACKLIST):
"""given a package directory return a list of all available python
modules in the package and its subpackages
:type package: str
:param package: the python name for the package
:type src_directory: str
:param src_directory:
path of the directory corresponding to the package
:type blacklist: list or tuple
:param blacklist:
optional list of files or directory to ignore, default to
the value of `logilab.common.STD_BLACKLIST`
:rtype: list
:return:
the list of all available python modules in the package and its
subpackages
"""
modules = []
for directory, dirnames, filenames in os.walk(src_directory):
_handle_blacklist(blacklist, dirnames, filenames)
# check for __init__.py
if not '__init__.py' in filenames:
dirnames[:] = ()
continue
if directory != src_directory:
dir_package = directory[len(src_directory):].replace(os.sep, '.')
modules.append(package + dir_package)
for filename in filenames:
if _is_python_file(filename) and filename != '__init__.py':
src = join(directory, filename)
module = package + src[len(src_directory):-3]
modules.append(module.replace(os.sep, '.'))
return modules
def get_module_files(src_directory, blacklist=STD_BLACKLIST):
"""given a package directory return a list of all available python
module's files in the package and its subpackages
:type src_directory: str
:param src_directory:
path of the directory corresponding to the package
:type blacklist: list or tuple
:param blacklist:
optional list of files or directory to ignore, default to the value of
`logilab.common.STD_BLACKLIST`
:rtype: list
:return:
the list of all available python module's files in the package and
its subpackages
"""
files = []
for directory, dirnames, filenames in os.walk(src_directory):
_handle_blacklist(blacklist, dirnames, filenames)
# check for __init__.py
if not '__init__.py' in filenames:
dirnames[:] = ()
continue
for filename in filenames:
if _is_python_file(filename):
src = join(directory, filename)
files.append(src)
return files
def get_source_file(filename, include_no_ext=False):
"""given a python module's file name return the matching source file
name (the filename will be returned identically if it's a already an
absolute path to a python source file...)
:type filename: str
:param filename: python module's file name
:raise NoSourceFile: if no source file exists on the file system
:rtype: str
:return: the absolute path of the source file if it exists
"""
base, orig_ext = splitext(abspath(filename))
for ext in PY_SOURCE_EXTS:
source_path = '%s.%s' % (base, ext)
if exists(source_path):
return source_path
if include_no_ext and not orig_ext and exists(base):
return base
raise NoSourceFile(filename)
def cleanup_sys_modules(directories):
"""remove submodules of `directories` from `sys.modules`"""
cleaned = []
for modname, module in list(sys.modules.items()):
modfile = getattr(module, '__file__', None)
if modfile:
for directory in directories:
if modfile.startswith(directory):
cleaned.append(modname)
del sys.modules[modname]
break
return cleaned
def clean_sys_modules(names):
"""remove submodules starting with name from `names` from `sys.modules`"""
cleaned = set()
for modname in list(sys.modules):
for name in names:
if modname.startswith(name):
del sys.modules[modname]
cleaned.add(modname)
break
return cleaned
def is_python_source(filename):
"""
rtype: bool
return: True if the filename is a python source file
"""
return splitext(filename)[1][1:] in PY_SOURCE_EXTS
def is_standard_module(modname, std_path=(STD_LIB_DIR,)):
"""try to guess if a module is a standard python module (by default,
see `std_path` parameter's description)
:type modname: str
:param modname: name of the module we are interested in
:type std_path: list(str) or tuple(str)
:param std_path: list of path considered as standard
:rtype: bool
:return:
true if the module:
- is located on the path listed in one of the directory in `std_path`
- is a built-in module
Note: this function is known to return wrong values when inside virtualenv.
See https://www.logilab.org/ticket/294756.
"""
modname = modname.split('.')[0]
try:
filename = file_from_modpath([modname])
except ImportError as ex:
# import failed, i'm probably not so wrong by supposing it's
# not standard...
return False
# modules which are not living in a file are considered standard
# (sys and __builtin__ for instance)
if filename is None:
# we assume there are no namespaces in stdlib
return not _is_namespace(modname)
filename = abspath(filename)
if filename.startswith(EXT_LIB_DIR):
return False
for path in std_path:
if filename.startswith(abspath(path)):
return True
return False
def is_relative(modname, from_file):
"""return true if the given module name is relative to the given
file name
:type modname: str
:param modname: name of the module we are interested in
:type from_file: str
:param from_file:
path of the module from which modname has been imported
:rtype: bool
:return:
true if the module has been imported relatively to `from_file`
"""
if not isdir(from_file):
from_file = dirname(from_file)
if from_file in sys.path:
return False
try:
find_module(modname.split('.')[0], [from_file])
return True
except ImportError:
return False
# internal only functions #####################################################
def _file_from_modpath(modpath, path=None, context=None):
"""given a mod path (i.e. splitted module / package name), return the
corresponding file
this function is used internally, see `file_from_modpath`'s
documentation for more information
"""
assert len(modpath) > 0
if context is not None:
try:
mtype, mp_filename = _module_file(modpath, [context])
except ImportError:
mtype, mp_filename = _module_file(modpath, path)
else:
mtype, mp_filename = _module_file(modpath, path)
if mtype == PY_COMPILED:
try:
return get_source_file(mp_filename)
except NoSourceFile:
return mp_filename
elif mtype == C_BUILTIN:
# integrated builtin module
return None
elif mtype == PKG_DIRECTORY:
mp_filename = _has_init(mp_filename)
return mp_filename
def _search_zip(modpath, pic):
for filepath, importer in pic.items():
if importer is not None:
if importer.find_module(modpath[0]):
if not importer.find_module('/'.join(modpath)):
raise ImportError('No module named %s in %s/%s' % (
'.'.join(modpath[1:]), filepath, modpath))
return ZIPFILE, abspath(filepath) + '/' + '/'.join(modpath), filepath
raise ImportError('No module named %s' % '.'.join(modpath))
try:
import pkg_resources
except ImportError:
pkg_resources = None
def _is_namespace(modname):
return (pkg_resources is not None
and modname in pkg_resources._namespace_packages)
def _module_file(modpath, path=None):
"""get a module type / file path
:type modpath: list or tuple
:param modpath:
splitted module's name (i.e name of a module or package splitted
on '.'), with leading empty strings for explicit relative import
:type path: list or None
:param path:
optional list of path where the module or package should be
searched (use sys.path if nothing or None is given)
:rtype: tuple(int, str)
:return: the module type flag and the file path for a module
"""
# egg support compat
try:
pic = sys.path_importer_cache
_path = (path is None and sys.path or path)
for __path in _path:
if not __path in pic:
try:
pic[__path] = zipimport.zipimporter(__path)
except zipimport.ZipImportError:
pic[__path] = None
checkeggs = True
except AttributeError:
checkeggs = False
# pkg_resources support (aka setuptools namespace packages)
if (_is_namespace(modpath[0]) and modpath[0] in sys.modules):
# setuptools has added into sys.modules a module object with proper
# __path__, get back information from there
module = sys.modules[modpath.pop(0)]
# use list() to protect against _NamespacePath instance we get with python 3, which
# find_module later doesn't like
path = list(module.__path__)
if not modpath:
return C_BUILTIN, None
imported = []
while modpath:
modname = modpath[0]
# take care to changes in find_module implementation wrt builtin modules
#
# Python 2.6.6 (r266:84292, Sep 11 2012, 08:34:23)
# >>> imp.find_module('posix')
# (None, 'posix', ('', '', 6))
#
# Python 3.3.1 (default, Apr 26 2013, 12:08:46)
# >>> imp.find_module('posix')
# (None, None, ('', '', 6))
try:
_, mp_filename, mp_desc = find_module(modname, path)
except ImportError:
if checkeggs:
return _search_zip(modpath, pic)[:2]
raise
else:
if checkeggs and mp_filename:
fullabspath = [abspath(x) for x in _path]
try:
pathindex = fullabspath.index(dirname(abspath(mp_filename)))
emtype, emp_filename, zippath = _search_zip(modpath, pic)
if pathindex > _path.index(zippath):
# an egg takes priority
return emtype, emp_filename
except ValueError:
# XXX not in _path
pass
except ImportError:
pass
checkeggs = False
imported.append(modpath.pop(0))
mtype = mp_desc[2]
if modpath:
if mtype != PKG_DIRECTORY:
raise ImportError('No module %s in %s' % ('.'.join(modpath),
'.'.join(imported)))
# XXX guess if package is using pkgutil.extend_path by looking for
# those keywords in the first four Kbytes
try:
with open(join(mp_filename, '__init__.py')) as stream:
data = stream.read(4096)
except IOError:
path = [mp_filename]
else:
if 'pkgutil' in data and 'extend_path' in data:
# extend_path is called, search sys.path for module/packages
# of this name see pkgutil.extend_path documentation
path = [join(p, *imported) for p in sys.path
if isdir(join(p, *imported))]
else:
path = [mp_filename]
return mtype, mp_filename
def _is_python_file(filename):
"""return true if the given filename should be considered as a python file
.pyc and .pyo are ignored
"""
for ext in ('.py', '.so', '.pyd', '.pyw'):
if filename.endswith(ext):
return True
return False
def _has_init(directory):
"""if the given directory has a valid __init__ file, return its path,
else return None
"""
mod_or_pack = join(directory, '__init__')
for ext in PY_SOURCE_EXTS + ('pyc', 'pyo'):
if exists(mod_or_pack + '.' + ext):
return mod_or_pack + '.' + ext
return None
|
def extractLingTranslatesSometimes(item):
"""
# 'Ling Translates Sometimes'
"""
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol or frag) or 'preview' in item['title'].lower():
return None
return False
|
from flask import g
from wtforms.validators import ValidationError
from mod_auth.forms import unique_username, valid_password
from mod_auth.models import User
from tests.base import BaseTestCase
class Field:
def __init__(self, data):
self.data = data
class TestForm(BaseTestCase):
def test_unique_username(self):
"""
Test that username is always unique.
"""
user = User(name="thealphadollar")
g.db.add(user)
g.db.commit()
user_field = Field("thealphadollar")
with self.assertRaises(ValidationError):
unique_username(None, user_field)
def test_empty_invalid_password(self):
"""
Test validation fail for zero length password.
"""
pass_field = Field("")
with self.assertRaises(ValidationError):
valid_password(None, pass_field)
def test_less_than_min_length_invalid_password(self):
"""
Test validation fail for password of length less than min length.
"""
pass_field = Field("".join(['x' * (int(self.app.config['MIN_PWD_LEN']) - 1)]))
with self.assertRaises(ValidationError):
valid_password(None, pass_field)
def test_more_than_max_length_invalid_password(self):
"""
Test validation fail for password of length more than max length.
"""
pass_field = Field("".join(['x' * (int(self.app.config['MAX_PWD_LEN']) + 1)]))
with self.assertRaises(ValidationError):
valid_password(None, pass_field)
def test_valid_password(self):
"""
Test validation pass for valid password.
"""
pass_field = Field("".join(['x' * (int(self.app.config['MAX_PWD_LEN']))]))
valid_password(None, pass_field)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# This file is part of the SPORCO package. Details of the copyright
# and user license can be found in the 'LICENSE.txt' file distributed
# with the package.
"""
Parallel CSC with a Spatial Mask
================================
This example compares the use of :class:`.parcbpdn.ParConvBPDN` with :class:`.cbpdn.ConvBPDNMaskDcpl` for convolutional sparse coding with a spatial mask :cite:`wohlberg-2016-boundary`. The example problem is inpainting of randomly distributed corruption of a greyscale image.
"""
from __future__ import print_function
from builtins import input
from builtins import range
import pyfftw # See https://github.com/pyFFTW/pyFFTW/issues/40
import numpy as np
from sporco.admm import tvl2
from sporco.admm import cbpdn
from sporco.admm import parcbpdn
from sporco import util
from sporco import metric
from sporco import plot
"""
Load a reference image.
"""
img = util.ExampleImages().image('monarch.png', zoom=0.25, scaled=True,
gray=True, idxexp=np.s_[:, 160:672])
"""
Create random mask and apply to reference image to obtain test image. (The call to ``numpy.random.seed`` ensures that the pseudo-random noise is reproducible.)
"""
t = 0.5
np.random.seed(12345)
msk = np.random.randn(*(img.shape))
msk[np.abs(msk) > t] = 1
msk[np.abs(msk) < t] = 0
imgw = msk * img
"""
Define pad and crop functions.
"""
pn = 8
spad = lambda x: np.pad(x, pn, mode='symmetric')
zpad = lambda x: np.pad(x, pn, mode='constant')
crop = lambda x: x[pn:-pn, pn:-pn]
"""
Construct padded mask and test image.
"""
mskp = zpad(msk)
imgwp = spad(imgw)
"""
$\ell_2$-TV denoising with a spatial mask as a non-linear lowpass filter. The highpass component is the difference between the test image and the lowpass component, multiplied by the mask for faster convergence of the convolutional sparse coding (see :cite:`wohlberg-2017-convolutional3`).
"""
lmbda = 0.05
opt = tvl2.TVL2Denoise.Options({'Verbose': False, 'MaxMainIter': 200,
'DFidWeight': mskp, 'gEvalY': False,
'AutoRho': {'Enabled': True}})
b = tvl2.TVL2Denoise(imgwp, lmbda, opt)
sl = b.solve()
sh = mskp * (imgwp - sl)
"""
Load dictionary.
"""
D = util.convdicts()['G:12x12x216']
plot.imview(util.tiledict(D), fgsz=(7, 7))
lmbda = 2e-2
"""
The RelStopTol was chosen for the two different methods to stop with similar functional values
"""
"""
Initialise and run serial CSC solver using masked decoupling :cite:`heide-2015-fast`.
"""
opt = cbpdn.ConvBPDNMaskDcpl.Options({'Verbose': True, 'MaxMainIter': 200,
'HighMemSolve': True, 'RelStopTol': 5e-2,
'AuxVarObj': False, 'RelaxParam': 1.8,
'rho': 5e1*lmbda + 1e-1, 'AutoRho':
{'Enabled': False, 'StdResiduals': False}})
b = cbpdn.ConvBPDNMaskDcpl(D, sh, lmbda, mskp, opt=opt)
X = b.solve()
"""
Initialise and run parallel CSC solver using an ADMM dictionary partition :cite:`skau-2018-fast`.
"""
opt_par = parcbpdn.ParConvBPDN.Options({'Verbose': True, 'MaxMainIter': 200,
'HighMemSolve': True, 'RelStopTol': 1e-2,
'AuxVarObj': False, 'RelaxParam': 1.8,
'rho': 5e1*lmbda + 1e-1, 'alpha': 1.5,
'AutoRho': {'Enabled': False,
'StdResiduals': False}})
b_par = parcbpdn.ParConvBPDN(D, sh, lmbda, mskp, opt=opt_par)
X_par = b_par.solve()
"""
Report runtimes of different methods of solving the same problem.
"""
print("ConvBPDNMaskDcpl solve time: %.2fs" % b.timer.elapsed('solve_wo_rsdl'))
print("ParConvBPDN solve time: %.2fs" % b_par.timer.elapsed('solve_wo_rsdl'))
print("ParConvBPDN was %.2f times faster than ConvBPDNMaskDcpl\n" %
(b.timer.elapsed('solve_wo_rsdl')/b_par.timer.elapsed('solve_wo_rsdl')))
"""
Reconstruct images from sparse representations.
"""
imgr = crop(sl + b.reconstruct().squeeze())
imgr_par = crop(sl + b_par.reconstruct().squeeze())
"""
Report performances of different methods of solving the same problem.
"""
print("Corrupted image PSNR: %5.2f dB" % metric.psnr(img, imgw))
print("Serial Reconstruction PSNR: %5.2f dB" % metric.psnr(img, imgr))
print("Parallel Reconstruction PSNR: %5.2f dB\n" % metric.psnr(img, imgr_par))
"""
Display reference, test, and reconstructed images
"""
fig = plot.figure(figsize=(14, 14))
plot.subplot(2, 2, 1)
plot.imview(img, fig=fig, title='Reference Image')
plot.subplot(2, 2, 2)
plot.imview(imgw, fig=fig, title=('Corrupted Image PSNR: %5.2f dB' %
metric.psnr(img, imgw)))
plot.subplot(2, 2, 3)
plot.imview(imgr, fig=fig, title=('Serial reconstruction PSNR: %5.2f dB' %
metric.psnr(img, imgr)))
plot.subplot(2, 2, 4)
plot.imview(imgr_par, fig=fig, title=('Parallel reconstruction PSNR: %5.2f dB' %
metric.psnr(img, imgr_par)))
fig.show()
"""
Display lowpass component and sparse representation
"""
fig = plot.figure(figsize=(21, 7))
plot.subplot(1, 3, 1)
plot.imview(sl, fig=fig, cmap=plot.cm.Blues, title='Lowpass component')
plot.subplot(1, 3, 2)
plot.imview(np.squeeze(np.sum(abs(X), axis=b.cri.axisM)), fig=fig,
cmap=plot.cm.Blues, title='Serial sparse representation')
plot.subplot(1, 3, 3)
plot.imview(np.squeeze(np.sum(abs(X_par), axis=b.cri.axisM)), fig=fig,
cmap=plot.cm.Blues, title='Parallel sparse representation')
fig.show()
# Wait for enter on keyboard
input()
|
class A1:
def mymethod1(self):
print("It is an instance method of A1 class")
class B1(A1):
def mymethod1(self):
print("It is an instance method of B1 class")
class C1(B1):
def mymethod1(self):
print("It is an instance method of C1 class")
class D1(C1):
def mymethod1(self):
print("It is an instance method of D1 class")
class E1(D1):
def mymethod1(self):
C1.mymethod1(self)# calling C1 class instance method
myobj = E1()
myobj.mymethod1()
|
""" Leetcode 1446 - Consecutive Characters
https://leetcode.com/problems/consecutive-characters/
1. MINE Straight-Forward: Time: O(N) Space: O(1) (N is len_of_s)
2. Python-Method: Time: O(N) Space: O(N) (N is len_of_s)
"""
class Solution1:
""" 1. MINE Straight-Forward """
def max_power(self, s: str) -> int:
max_power = 1
count = 1
for i in range(1, len(s)):
if s[i] == s[i-1]:
count += 1
max_power = max(max_power, count)
else:
count = 1
return max_power
class Solution2:
""" 2. Python-Method """
def max_power(self, s):
from itertools import groupby
return max(len(list(y)) for _, y in groupby(s))
if __name__ == '__main__':
s = "leetcode"
res = Solution2().max_power(s)
print(res)
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
from .v2017_07_01.models import *
from .v2022_05_02_preview.models import *
|
import inspect
from PYB11Generator import *
from FieldBase import FieldBase
from Field import Field
#-------------------------------------------------------------------------------
# Add numeric operations to a Field
#-------------------------------------------------------------------------------
@PYB11template("Dimension", "Value")
@PYB11pycppname("Field")
class ArithmeticField(FieldBase):
PYB11typedefs = """
typedef Field<%(Dimension)s, %(Value)s> FieldType;
"""
def __add__(self):
return
def __sub__(self):
return
def __iadd__(self):
return
def __isub__(self):
return
@PYB11pyname("__add__")
def __add__V(self, rhs="%(Value)s()"):
return
@PYB11pyname("__sub__")
def __sub__V(self, rhs="%(Value)s()"):
return
@PYB11pyname("__iadd__")
def __iadd__V(self, rhs="%(Value)s()"):
return
@PYB11pyname("__isub__")
def __isub__V(self, rhs="%(Value)s()"):
return
def __imul__(self, rhs="double()"):
return
def __idiv__(self, rhs="double()"):
return
@PYB11const
def sumElements(self):
"Return the sum of the elements in the Field."
return
@PYB11const
def localSumElements(self):
"Return the sum of the elements in the Field local to each processor."
return
#...........................................................................
# Comparators
def __gt__(self):
return
def __lt__(self):
return
def __ge__(self):
return "bool"
def __le__(self):
return "bool"
def __gt__(self, rhs="%(Value)s()"):
"Greater than comparision with a %(Value)s"
return "bool"
def __lt__(self, rhs="%(Value)s()"):
"Less than comparision with a %(Value)s"
return "bool"
def __ge__(self, rhs="%(Value)s()"):
"Greater than or equal comparision with a %(Value)s"
return "bool"
def __le__(self, rhs="%(Value)s()"):
"Less than or equal comparision with a %(Value)s"
return "bool"
def applyMin(self):
"Enforce a floor on the values of the Field."
return
def applyMax(self):
"Enforce a ceiling on the values of the Field."
return
@PYB11const
def min(self):
"Return the mimimum value in the Field."
return
@PYB11const
def max(self):
"Return the maximum value in the Field."
return
@PYB11const
def localMin(self):
"Return the mimimum value in the Field local to each processor."
return
@PYB11const
def localMax(self):
"Return the maximum value in the Field local to each processor."
return
#-------------------------------------------------------------------------------
# Inject base field methods
#-------------------------------------------------------------------------------
PYB11inject(Field, ArithmeticField)
|
#
# compressedColumn.py
#
# This source file is part of the FoundationDB open source project
#
# Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import fdb
import fdb.tuple
import struct
_unpackedPrefix = 'unpacked'
_packedPrefix = 'packed'
fdb.api_version(16)
class _MergedData:
def __init__(self):
self.results = []
self.finishedPack = False
self.finishedUnpack = False
self.packedIndex = 0
pass
class Column:
def __init__(self, columnName):
self.columnName = columnName
self.packFetchCount = 10
self.targetChunkSize = 5000
self.maxChunkSize = 10000
# self.mergeChunkSize = 2500
def _getSubKeyTuple(self, key):
return fdb.tuple.unpack(key)[2:]
def _getSubKey(self, key):
return fdb.tuple.unpack(key)[2]
def _isPackedKey(self, key):
return str(key).startswith(fdb.tuple.pack((self.columnName, _packedPrefix)))
# This results in slight inefficiencies when the key being searched for comes before the first packed segment with strictRange=False
def _getPackedData(self, key, packedRange, requireKey=True, strictRange=True):
found = False
keyRange = None
packedKeyRange = None
packedData = None
for k, v in packedRange:
# print 'Searching ' + k + ' for ' + key
if self._isPackedKey(k):
if found:
endRange = self._getSubKeyTuple(k)
keyRange = (keyRange[0], endRange[0])
break
keyRange = self._getSubKeyTuple(k)
packedKeyRange = keyRange
# print str(keyRange)
if (not requireKey or key >= keyRange[0]) and key <= keyRange[1]:
if strictRange:
packedData = _PackedData(v)
break
else:
found = True
keyRange = (keyRange[0], keyRange[1] + chr(0))
packedData = _PackedData(v)
elif found:
keyRange = (keyRange[0], '\xff')
if strictRange:
return [keyRange, packedData]
else:
return [packedKeyRange, keyRange, packedData]
def _getPackedRange(self, tr, key):
return tr.get_range(fdb.KeySelector.last_less_than(fdb.tuple.pack((self.columnName, _packedPrefix, key + chr(0)))),
fdb.tuple.pack((self.columnName, _packedPrefix + chr(0))), 2)
def _getUnpackedData(self, tr, key):
return tr[fdb.tuple.pack((self.columnName, _unpackedPrefix, key))]
def _getUnpackedRange(self, tr, keyBegin, keyEnd, limit):
return tr.get_range(fdb.tuple.pack((self.columnName, _unpackedPrefix, keyBegin)),
fdb.tuple.pack((self.columnName, _unpackedPrefix, keyEnd)), limit)
def _mergeResults(self, packed, unpacked, totalUnpacked, packedIndex=0, minPackedKey='', maxKey=None):
data = _MergedData()
if packed is None:
# print 'No merge necessary'
data.finishedUnpack = True
data.finishedPack = True
data.packedIndex = 0
if maxKey is None:
data.results = [fdb.KeyValue(self._getSubKey(k), v) for k, v in unpacked]
else:
for k, v in unpacked:
if k < maxKey:
data.results.append(fdb.KeyValue(self._getSubKey(k), v))
else:
data.finishedUnpack = False
break
else:
# print 'Merging packed'
unpackedCount = 0
for k, v in unpacked:
subKey = self._getSubKey(k)
# print 'Unpacked: ' + subKey
if maxKey is not None and subKey >= maxKey:
# print 'subKey >= maxKey %s, %s' % (subKey, maxKey)
break
exactMatch = False
while packedIndex < len(packed.rows) \
and packed.rows[packedIndex].key <= subKey \
and (maxKey is None or packed.rows[packedIndex].key < maxKey):
exactMatch = packed.rows[packedIndex].key == subKey
if packed.rows[packedIndex].key < subKey and packed.rows[packedIndex].key >= minPackedKey:
data.results.append(packed.rows[packedIndex])
packedIndex += 1
if maxKey is None and packedIndex == len(packed.rows):
if exactMatch:
data.results.append(fdb.KeyValue(self._getSubKey(k), v))
# print 'packedIndex == len(packed.rows)'
break
data.results.append(fdb.KeyValue(self._getSubKey(k), v))
unpackedCount += 1
# print "Packed index: %d, Unpacked: %d, total: %d" % (packedIndex, unpackedCount, totalUnpacked)
if unpackedCount < totalUnpacked:
while packedIndex < len(packed.rows) and (maxKey is None or packed.rows[packedIndex].key < maxKey):
if packed.rows[packedIndex].key >= minPackedKey:
data.results.append(packed.rows[packedIndex])
packedIndex += 1
data.finishedPack = packedIndex == len(packed.rows)
data.finishedUnpack = unpackedCount == totalUnpacked
data.packedIndex = packedIndex
# print str(data.results)
# print 'Num Results: %d' % len(data.results)
return data
@fdb.transactional
def setRow(self, tr, rowName, value):
tr[fdb.tuple.pack((self.columnName, _unpackedPrefix, rowName))] = value
@fdb.transactional
def getRow(self, tr, rowName):
unpacked = self._getUnpackedData(tr, rowName)
packedRange = self._getPackedRange(tr, rowName)
if unpacked.present():
return unpacked
else:
packedData = self._getPackedData(rowName, packedRange)[1]
if packedData is None:
return None
return packedData.getRow(rowName)
return None
@fdb.transactional
def delete(self, tr):
tr.clear_range_startswith(self.columnName)
def getColumnStream(self, db, startRow=''):
return _ColumnStream(db, self, startRow)
# This function is not fully transactional. Each compressed block will be created in a transaction
def pack(self, db, startRow='', endRow='\xff'):
currentRow = startRow
numFetched = self.packFetchCount
while numFetched == self.packFetchCount:
# print 'outer: \'' + repr(currentRow) + '\''
try:
tr = db.create_transaction()
packedIndex = 0
packedData = None
lastRow = currentRow
newPack = _PackedData()
oldRows = []
while True:
# print 'inner: \'' + repr(currentRow) + '\''
unpacked = list(self._getUnpackedRange(tr, lastRow, endRow, self.packFetchCount))
unpackedCount = len(unpacked)
if len(unpacked) == 0:
break
if packedData is None:
subKey = self._getSubKey(unpacked[0].key)
packedRange = self._getPackedRange(tr, subKey)
[packedKeyRange, keyRange, packedData] = self._getPackedData(subKey, packedRange, False, False)
if packedKeyRange is not None:
# print 'Deleting old rows'
oldRows.append(fdb.tuple.pack((self.columnName, _packedPrefix, packedKeyRange[0], packedKeyRange[1])))
maxKey = None
if keyRange is not None:
maxKey = keyRange[1]
merged = self._mergeResults(packedData, unpacked, self.packFetchCount, packedIndex, lastRow, maxKey)
for row in merged.results:
oldRows.append(fdb.tuple.pack((self.columnName, _unpackedPrefix, row.key)))
newPack.addRow(row)
lastRow = row.key
# print 'Set lastRow = \'' + repr(lastRow) + '\''
lastRow = lastRow + chr(0)
if (maxKey is not None and merged.finishedPack) or (maxKey is None and newPack.bytes > self.targetChunkSize):
break
# print 'Deleting rows'
for row in oldRows:
# print 'Deleting row ' + repr(row)
del tr[row]
for k, v in newPack.getPackedKeyValues(self, self.targetChunkSize, self.maxChunkSize):
tr[k] = v
tr.commit().wait()
currentRow = lastRow
numFetched = unpackedCount
except fdb.FDBError as e:
if e.code == 1007: # transaction_too_old
pass
# FIXME: Unpack the overlapping packed block and try again
tr.on_error(e.code).wait()
class _ColumnStream:
def __init__(self, db, column, startKey):
self.column = column
self.db = db
self.currentKey = startKey
self.results = []
self.resultsIndex = 0
self.firstRead = True
self.fetchCount = 5
self.packedData = None
self.packedIndex = 0
def __iter__(self):
return self
def next(self):
value = self._readNextRow(self.db)
if value is None:
raise StopIteration
else:
return value
def _readNextRow(self, db):
# print 'Reading next row'
if self.resultsIndex >= len(self.results):
# print 'Fetching rows'
self._fetchRows(db)
if self.resultsIndex >= len(self.results):
# print 'Finished iterating: (%d/%d)' % (self.resultsIndex, len(self.results))
return None
else:
self.currentKey = self.results[self.resultsIndex].key
value = self.results[self.resultsIndex].value
self.resultsIndex += 1
# print 'Returning value (%s, %s)' % (self.currentKey, value)
return (self.currentKey, value)
@fdb.transactional
def _fetchRows(self, tr):
if self.firstRead:
# print 'First fetch'
startKey = self.currentKey
else:
# print 'Subsequent fetch %s' % self.currentKey
startKey = self.currentKey + chr(0)
# print 'Using start key %s' % startKey
# Read next packed and unpacked entries
# FIXME: Should we read unpacked after getting the result of the packed data? If we do, then we can more accurately limit the number
# of results that we get back
unpacked = self.column._getUnpackedRange(tr, startKey, '\xff', self.fetchCount)
if self.packedData is None:
packedRange = self.column._getPackedRange(tr, startKey)
[keyRange, self.packedData] = self.column._getPackedData(startKey, packedRange, False)
merged = self.column._mergeResults(self.packedData, unpacked, self.fetchCount, self.packedIndex, startKey)
if merged.finishedPack:
# print 'reset packed'
self.packedData = None
self.packedIndex = 0
else:
# print 'more packed %d' % merged.packedIndex
self.packedIndex = merged.packedIndex
self.results = merged.results
# print 'Getting range %s - %s (%d)' % (startKey, fdb.tuple.pack((self.column.columnName + chr(0))), self.fetchCount)
self.resultsIndex = 0
self.firstRead = False
class _PackedData:
def __init__(self, packedValue=None):
self.rows = []
self.bytes = 0
if packedValue is not None:
self._unpack(packedValue)
def addRow(self, row):
# print 'adding row %s' % row.key
self.rows.append(row)
self.bytes += len(row.key) + len(row.value) + 12
def getRow(self, rowName):
for row in self.rows:
if row.key == rowName:
return row.value
return None
def getPackedKeyValues(self, column, targetSize, maxSize):
rowIndex = 0
currentByte = 0
results = []
while currentByte < self.bytes:
headerItems = []
size = targetSize
if self.bytes - currentByte < maxSize:
size = maxSize
startRowIndex = rowIndex
startKey = None
endKey = None
bodyLength = 0
packBytes = 0
while rowIndex < len(self.rows) and packBytes < size:
row = self.rows[rowIndex]
headerItems.append(struct.pack('iii', len(row.key), len(row.value), bodyLength) + row.key)
bodyLength += len(row.value)
packBytes += len(row.key) + len(row.value) + 12
rowIndex += 1
if startKey is None:
startKey = row.key
endKey = row.key
header = ''.join(headerItems)
body = ''.join(row.value for row in self.rows[startRowIndex:rowIndex])
results.append(fdb.KeyValue(fdb.tuple.pack((column.columnName, _packedPrefix, startKey, endKey)),
struct.pack('i', len(header)) + header + body))
currentByte += packBytes
return results
def _unpack(self, str):
self.bytes = len(str)
headerLength = struct.unpack('i', str[0:4])[0]
header = str[4:4 + headerLength]
body = str[4 + headerLength:]
index = 0
while index < headerLength:
# print 'header length: %d, %d' % (len(self.header), index)
(keyLength, valueLength, valueOffset) = struct.unpack('iii', header[index:index + 12])
key = header[index + 12:index + 12 + keyLength]
index = index + 12 + keyLength
value = body[valueOffset:valueOffset + valueLength]
self.rows.append(fdb.KeyValue(key, value))
|
import numpy as np
from ._base import BaseGrid2D, BaseGrid3D, BaseTraveltime
from ._fteik import ray2d, ray3d
from ._interp import vinterp2d, vinterp3d
class Grid2D(BaseGrid2D):
def __init__(self, *args, **kwargs):
"""
2D grid class.
Parameters
----------
grid : array_like
Grid array.
gridsize : array_like
Grid size (dz, dx).
origin : array_like
Grid origin coordinates.
"""
super().__init__(*args, **kwargs)
class Grid3D(BaseGrid3D):
def __init__(self, *args, **kwargs):
"""
3D grid class.
Parameters
----------
grid : array_like
Grid array.
gridsize : array_like
Grid size (dz, dx, dy).
origin : array_like
Grid origin coordinates.
"""
super().__init__(*args, **kwargs)
class TraveltimeGrid2D(BaseGrid2D, BaseTraveltime):
def __init__(self, grid, gridsize, origin, source, gradient, vzero):
"""
2D traveltime grid class.
Parameters
----------
grid : array_like
Traveltime grid array.
gridsize : array_like
Grid size (dz, dx).
origin : array_like
Grid origin coordinates.
source : array_like
Source coordinates.
gradient : array_like
Gradient grid.
vzero : scalar
Slowness at the source.
"""
super().__init__(
grid=grid,
gridsize=gridsize,
origin=np.asarray(origin, dtype=np.float64),
source=np.asarray(source, dtype=np.float64),
gradient=(
np.asarray(gradient, dtype=np.float64) if gradient is not None else None
),
vzero=vzero,
)
def __call__(self, points, fill_value=np.nan):
"""
Bilinear apparent velocity interpolation.
Parameters
----------
points : array_like
Query point coordinates or list of point coordinates.
fill_value : scalar, optional, default nan
Returned value for out-of-bound query points.
Returns
-------
scalar or :class:`np.ndarray`
Interpolated traveltime(s).
"""
return vinterp2d(
self.zaxis,
self.xaxis,
self._grid,
np.asarray(points, dtype=np.float64),
self._source,
self._vzero,
fill_value,
)
def raytrace(self, points, stepsize=None, max_step=None, honor_grid=False):
"""
2D a posteriori ray-tracing.
Parameters
----------
points : array_like
Query point coordinates or list of point coordinates.
stepsize : scalar or None, optional, default None
Unit length of ray. `stepsize` is ignored if `honor_grid` is `True`.
max_step : scalar or None, optional, default None
Maximum number of steps.
honor_grid : bool, optional, default False
If `True`, coordinates of raypaths are calculated with respect to traveltime grid discretization.
Returns
-------
:class:`np.ndarray` or list of :class:`np.ndarray`
Raypath(s).
"""
gradient = self.gradient
if honor_grid or not stepsize:
stepsize = np.min(self._gridsize)
if not max_step:
nz, nx = self.shape
dz, dx = self._gridsize
max_dist = 2.0 * ((nz * dz) ** 2 + (nx * dx) ** 2) ** 0.5
max_step = int(max_dist / stepsize)
return ray2d(
self.zaxis,
self.xaxis,
gradient[0].grid,
gradient[1].grid,
np.asarray(points, dtype=np.float64),
self._source,
stepsize,
max_step,
honor_grid,
)
@property
def gradient(self):
"""Return Z and X gradient grids as a list of :class:`fteikpy.Grid2D`."""
if self._gradient is None:
raise ValueError(
"no gradient grid, use option `return_gradient` to return gradient grids"
)
return [
Grid2D(self._gradient[:, :, i], self._gridsize, self._origin)
for i in range(2)
]
class TraveltimeGrid3D(BaseGrid3D, BaseTraveltime):
def __init__(self, grid, gridsize, origin, source, gradient, vzero):
"""
3D traveltime grid class.
Parameters
----------
grid : array_like
Traveltime grid array.
gridsize : array_like
Grid size (dz, dx, dy).
origin : array_like
Grid origin coordinates.
source : array_like
Source coordinates.
gradient : array_like
Gradient grid.
vzero : scalar
Slowness at the source.
"""
super().__init__(
grid=grid,
gridsize=gridsize,
origin=np.asarray(origin, dtype=np.float64),
source=np.asarray(source, dtype=np.float64),
gradient=(
np.asarray(gradient, dtype=np.float64) if gradient is not None else None
),
vzero=vzero,
)
def __call__(self, points, fill_value=np.nan):
"""
Trilinear apparent velocity interpolation.
Parameters
----------
points : array_like
Query point coordinates or list of point coordinates.
fill_value : scalar, optional, default nan
Returned value for out-of-bound query points.
Returns
-------
scalar or :class:`np.ndarray`
Interpolated traveltime(s).
"""
return vinterp3d(
self.zaxis,
self.xaxis,
self.yaxis,
self._grid,
np.asarray(points, dtype=np.float64),
self._source,
self._vzero,
fill_value,
)
def raytrace(self, points, stepsize=None, max_step=None, honor_grid=False):
"""
3D a posteriori ray-tracing.
Parameters
----------
points : array_like
Query point coordinates or list of point coordinates.
stepsize : scalar or None, optional, default None
Unit length of ray. `stepsize` is ignored if `honor_grid` is `True`.
max_step : scalar or None, optional, default None
Maximum number of steps.
honor_grid : bool, optional, default False
If `True`, coordinates of raypaths are calculated with respect to traveltime grid discretization.
Returns
-------
:class:`np.ndarray` or list of :class:`np.ndarray`
Raypath(s).
"""
gradient = self.gradient
if honor_grid or not stepsize:
stepsize = np.min(self._gridsize)
if not max_step:
nz, nx, ny = self.shape
dz, dx, dy = self._gridsize
max_dist = 2.0 * ((nz * dz) ** 2 + (nx * dx) ** 2 + (ny * dy) ** 2) ** 0.5
max_step = int(max_dist / stepsize)
return ray3d(
self.zaxis,
self.xaxis,
self.yaxis,
gradient[0].grid,
gradient[1].grid,
gradient[2].grid,
np.asarray(points, dtype=np.float64),
self._source,
stepsize,
max_step,
honor_grid,
)
@property
def gradient(self):
"""Return Z, X and Y gradient grids as a list of :class:`fteikpy.Grid3D`."""
if self._gradient is None:
raise ValueError(
"no gradient grid, use option `return_gradient` to return gradient grids"
)
return [
Grid3D(self._gradient[:, :, :, i], self._gridsize, self._origin)
for i in range(3)
]
|
import numpy as np
def decimal_to_binlist(decimal, digits): # ex) 6,3 --> [1,1,0]
bin_str = "{:0{digits}b}".format(decimal, digits=digits)
return [int(s) for s in list(bin_str)]
def binlist_to_decimal(bin_list): # ex) [0,1,1] --> 3
return int("".join([str(i) for i in bin_list]), 2)
def make_hamming_matrix(r):
# parity check matrix (H)
A = []
for x in range(1,2**r):
bin_list = decimal_to_binlist(x, r)
if sum(bin_list) == 1: continue
A.append(bin_list)
A = np.array(A)
I_H = np.eye(r, dtype=int)
H = np.concatenate([A, I_H])
# represent integer each row of H matrix (for error correction algorithm)
H_int = [binlist_to_decimal(row) for row in H]
# generator matrix (G)
I_G = np.eye(2**r-r-1, dtype=int)
G = np.concatenate([I_G, A], 1)
return G, H, H_int
def generate_data(k, N): # random k-bits data
for _ in range(N):
yield np.random.randint(2, size=k)
def add_noise(d_in): # bit flip to one bit (select randomly)
idx = np.random.randint(len(d_in))
err = np.array([1 if i == idx else 0 for i in range(len(d_in))])
d_out = (d_in + err) % 2
return d_out
def correct_error(d_in, H_int):
d_out = d_in.copy()
p = (d_out @ H) % 2
x = binlist_to_decimal(p)
err_idx = H_int.index(x)
d_out[err_idx] = (d_out[err_idx] + 1) % 2 # bit flip (recover)
return d_out
if __name__ == '__main__':
r = 3
n = 2**r - 1
k = 2**r - r - 1
N = 10
G, H, H_int = make_hamming_matrix(r)
print("* input(random) -> encode -> add noise(random 1-bit flip) -> correct -> decode:")
err_count = 0
for x in generate_data(k, N):
y = (x @ G)%2
y_error = add_noise(y)
y_correct = correct_error(y_error, H_int)
x_correct = y_correct[0:k] # decode (= extract 1st to k-th elements)
print("{0:} -> {1:} -> {2:} -> {3:} -> {4:}".format(x,y,y_error,y_correct,x_correct))
if sum((x+x_correct)%2) == 1: # if x != x_correct --> 1
err_count += 1
err_rate = err_count / N
print("* error rate = {0:} (count:{1:} / total:{2:})".format(err_rate, err_count, N))
|
from __future__ import division
import os
import time
from shutil import copyfile
from glob import glob
import tensorflow as tf
import numpy as np
import config
from collections import namedtuple
from module import *
from utils import *
from ops import *
from metrics import *
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
class cyclegan(object):
def __init__(self, sess, args):
self.sess = sess
self.batch_size = args.batch_size
self.image_size = args.fine_size # cropped size
self.time_step = args.time_step
self.pitch_range = args.pitch_range
self.input_c_dim = args.input_nc # number of input image channels
self.output_c_dim = args.output_nc # number of output image channels
self.L1_lambda = args.L1_lambda
self.gamma = args.gamma
self.sigma_d = args.sigma_d
self.dataset_dir = args.dataset_dir
self.dataset_A_dir = args.dataset_A_dir
self.dataset_B_dir = args.dataset_B_dir
self.sample_dir = args.sample_dir
self.model = args.model
self.discriminator = discriminator
self.generator = generator_resnet
self.criterionGAN = mae_criterion
OPTIONS = namedtuple('OPTIONS', 'batch_size '
'image_size '
'gf_dim '
'df_dim '
'output_c_dim '
'is_training')
self.options = OPTIONS._make((args.batch_size,
args.fine_size,
args.ngf,
args.ndf,
args.output_nc,
args.phase == 'train'))
self._build_model()
self.saver = tf.train.Saver(max_to_keep=30)
self.now_datetime = get_now_datetime()
self.pool = ImagePool(args.max_size)
def _build_model(self):
# define some placeholders
self.real_data = tf.placeholder(tf.float32, [self.batch_size, self.time_step, self.pitch_range,
self.input_c_dim + self.output_c_dim], name='real_A_and_B')
if self.model != 'base':
self.real_mixed = tf.placeholder(tf.float32, [self.batch_size, self.time_step, self.pitch_range,
self.input_c_dim], name='real_A_and_B_mixed')
self.real_A = self.real_data[:, :, :, :self.input_c_dim]
self.real_B = self.real_data[:, :, :, self.input_c_dim:self.input_c_dim + self.output_c_dim]
self.gaussian_noise = tf.placeholder(tf.float32, [self.batch_size, self.time_step, self.pitch_range,
self.input_c_dim], name='gaussian_noise')
# Generator: A - B - A
self.fake_B = self.generator(self.real_A, self.options, False, name="generatorA2B")
self.fake_A_ = self.generator(self.fake_B, self.options, False, name="generatorB2A")
# Generator: B - A - B
self.fake_A = self.generator(self.real_B, self.options, True, name="generatorB2A")
self.fake_B_ = self.generator(self.fake_A, self.options, True, name="generatorA2B")
# to binary
self.real_A_binary = to_binary(self.real_A, 0.5)
self.real_B_binary = to_binary(self.real_B, 0.5)
self.fake_A_binary = to_binary(self.fake_A, 0.5)
self.fake_B_binary = to_binary(self.fake_B, 0.5)
self.fake_A__binary = to_binary(self.fake_A_, 0.5)
self.fake_B__binary = to_binary(self.fake_B_, 0.5)
# Discriminator: Fake
self.DB_fake = self.discriminator(self.fake_B + self.gaussian_noise, self.options,
reuse=False, name="discriminatorB")
self.DA_fake = self.discriminator(self.fake_A + self.gaussian_noise, self.options,
reuse=False, name="discriminatorA")
# Discriminator: Real
self.DA_real = self.discriminator(self.real_A + self.gaussian_noise, self.options, reuse=True,
name="discriminatorA")
self.DB_real = self.discriminator(self.real_B + self.gaussian_noise, self.options, reuse=True,
name="discriminatorB")
self.fake_A_sample = tf.placeholder(tf.float32, [self.batch_size, self.time_step, self.pitch_range,
self.input_c_dim], name='fake_A_sample')
self.fake_B_sample = tf.placeholder(tf.float32, [self.batch_size, self.time_step, self.pitch_range,
self.input_c_dim], name='fake_B_sample')
self.DA_fake_sample = self.discriminator(self.fake_A_sample + self.gaussian_noise,
self.options, reuse=True, name="discriminatorA")
self.DB_fake_sample = self.discriminator(self.fake_B_sample + self.gaussian_noise,
self.options, reuse=True, name="discriminatorB")
if self.model != 'base':
# Discriminator: All
self.DA_real_all = self.discriminator(self.real_mixed + self.gaussian_noise, self.options, reuse=False,
name="discriminatorA_all")
self.DA_fake_sample_all = self.discriminator(self.fake_A_sample + self.gaussian_noise,
self.options, reuse=True, name="discriminatorA_all")
self.DB_real_all = self.discriminator(self.real_mixed + self.gaussian_noise, self.options, reuse=False,
name="discriminatorB_all")
self.DB_fake_sample_all = self.discriminator(self.fake_B_sample + self.gaussian_noise,
self.options, reuse=True, name="discriminatorB_all")
# Generator loss
self.cycle_loss = self.L1_lambda * abs_criterion(self.real_A, self.fake_A_) \
+ self.L1_lambda * abs_criterion(self.real_B, self.fake_B_)
self.g_loss_a2b = self.criterionGAN(self.DB_fake, tf.ones_like(self.DB_fake)) + self.cycle_loss
self.g_loss_b2a = self.criterionGAN(self.DA_fake, tf.ones_like(self.DA_fake)) + self.cycle_loss
self.g_loss = self.g_loss_a2b + self.g_loss_b2a - self.cycle_loss
# Discriminator loss
self.db_loss_real = self.criterionGAN(self.DB_real, tf.ones_like(self.DB_real))
self.db_loss_fake = self.criterionGAN(self.DB_fake_sample, tf.zeros_like(self.DB_fake_sample))
self.db_loss = (self.db_loss_real + self.db_loss_fake) / 2
self.da_loss_real = self.criterionGAN(self.DA_real, tf.ones_like(self.DA_real))
self.da_loss_fake = self.criterionGAN(self.DA_fake_sample, tf.zeros_like(self.DA_fake_sample))
self.da_loss = (self.da_loss_real + self.da_loss_fake) / 2
self.d_loss = self.da_loss + self.db_loss
if self.model != 'base':
self.db_all_loss_real = self.criterionGAN(self.DB_real_all, tf.ones_like(self.DB_real_all))
self.db_all_loss_fake = self.criterionGAN(self.DB_fake_sample_all, tf.zeros_like(self.DB_fake_sample_all))
self.db_all_loss = (self.db_all_loss_real + self.db_all_loss_fake) / 2
self.da_all_loss_real = self.criterionGAN(self.DA_real_all, tf.ones_like(self.DA_real_all))
self.da_all_loss_fake = self.criterionGAN(self.DA_fake_sample_all, tf.zeros_like(self.DA_fake_sample_all))
self.da_all_loss = (self.da_all_loss_real + self.da_all_loss_fake) / 2
self.d_all_loss = self.da_all_loss + self.db_all_loss
self.D_loss = self.d_loss + self.gamma * self.d_all_loss
# Define all summaries
self.g_loss_a2b_sum = tf.summary.scalar("g_loss_a2b", self.g_loss_a2b)
self.g_loss_b2a_sum = tf.summary.scalar("g_loss_b2a", self.g_loss_b2a)
self.g_loss_sum = tf.summary.scalar("g_loss", self.g_loss)
self.cycle_loss_sum = tf.summary.scalar("cycle_loss", self.cycle_loss)
self.g_sum = tf.summary.merge([self.g_loss_a2b_sum, self.g_loss_b2a_sum, self.g_loss_sum, self.cycle_loss_sum])
self.db_loss_sum = tf.summary.scalar("db_loss", self.db_loss)
self.da_loss_sum = tf.summary.scalar("da_loss", self.da_loss)
self.d_loss_sum = tf.summary.scalar("d_loss", self.d_loss)
self.db_loss_real_sum = tf.summary.scalar("db_loss_real", self.db_loss_real)
self.db_loss_fake_sum = tf.summary.scalar("db_loss_fake", self.db_loss_fake)
self.da_loss_real_sum = tf.summary.scalar("da_loss_real", self.da_loss_real)
self.da_loss_fake_sum = tf.summary.scalar("da_loss_fake", self.da_loss_fake)
if self.model != 'base':
self.d_all_loss_sum = tf.summary.scalar("d_all_loss", self.d_all_loss)
self.D_loss_sum = tf.summary.scalar("D_loss", self.d_loss)
self.d_sum = tf.summary.merge([self.da_loss_sum, self.da_loss_real_sum, self.da_loss_fake_sum,
self.db_loss_sum, self.db_loss_real_sum, self.db_loss_fake_sum,
self.d_loss_sum, self.d_all_loss_sum, self.D_loss_sum])
else:
self.d_sum = tf.summary.merge([self.da_loss_sum, self.da_loss_real_sum, self.da_loss_fake_sum,
self.db_loss_sum, self.db_loss_real_sum, self.db_loss_fake_sum,
self.d_loss_sum])
# Test
self.test_A = tf.placeholder(tf.float32, [None, self.time_step, self.pitch_range,
self.input_c_dim], name='test_A')
self.test_B = tf.placeholder(tf.float32, [None, self.time_step, self.pitch_range,
self.output_c_dim], name='test_B')
# A - B - A
self.testB = self.generator(self.test_A, self.options, True, name="generatorA2B")
self.testA_ = self.generator(self.testB, self.options, True, name='generatorB2A')
# B - A - B
self.testA = self.generator(self.test_B, self.options, True, name="generatorB2A")
self.testB_ = self.generator(self.testA, self.options, True, name='generatorA2B')
# to binary
self.test_A_binary = to_binary(self.test_A, 0.5)
self.test_B_binary = to_binary(self.test_B, 0.5)
self.testA_binary = to_binary(self.testA, 0.5)
self.testB_binary = to_binary(self.testB, 0.5)
self.testA__binary = to_binary(self.testA_, 0.5)
self.testB__binary = to_binary(self.testB_, 0.5)
t_vars = tf.trainable_variables()
self.d_vars = [var for var in t_vars if 'discriminator' in var.name]
self.g_vars = [var for var in t_vars if 'generator' in var.name]
for var in t_vars:
pass
# print(var.name)
def train(self, args):
# Learning rate
self.lr = tf.placeholder(tf.float32, None, name='learning_rate')
# Discriminator and Generator Optimizer
if self.model == 'base':
self.d_optim = tf.train.AdamOptimizer(self.lr, beta1=args.beta1).minimize(self.d_loss, var_list=self.d_vars)
else:
self.d_optim = tf.train.AdamOptimizer(self.lr, beta1=args.beta1).minimize(self.D_loss, var_list=self.d_vars)
self.g_optim = tf.train.AdamOptimizer(self.lr, beta1=args.beta1).minimize(self.g_loss, var_list=self.g_vars)
init_op = tf.global_variables_initializer()
self.sess.run(init_op)
# define the path which stores the log file, format is "{A}2{B}_{date}_{model}_{sigma}".
log_dir = './logs/{}2{}_{}_{}_{}'.format(self.dataset_A_dir, self.dataset_B_dir, self.now_datetime,
self.model, self.sigma_d)
# log_dir = './logs/{}2{}_{}_{}_{}'.format(self.dataset_A_dir, self.dataset_B_dir, '2018-06-10',
# self.model, self.sigma_d)
self.writer = tf.summary.FileWriter(log_dir, self.sess.graph)
# Data from domain A and B, and mixed dataset for partial and full models.
dA_filepath='./{}/train/*.*'.format(self.dataset_A_dir)
dB_filepath='./{}/train/*.*'.format(self.dataset_B_dir)
dataA = glob(dA_filepath)
dataB = glob(dB_filepath)
if self.model == 'partial':
data_mixed = dataA + dataB
if self.model == 'full':
data_mixed = glob('./Dataset (use directly)/JCP_mixed/*.*')
# TODO(1641367382@qq.com):need to replace constant value with a new variable
counter = 1
start_time = time.time()
if args.continue_train:
if self.load(args.checkpoint_dir):
print(" [*] Load SUCCESS")
else:
print(" [!] Load failed...")
for epoch in range(args.epoch):
# Shuffle training data
np.random.shuffle(dataA)
np.random.shuffle(dataB)
if self.model != 'base': np.random.shuffle(data_mixed)
# Get the proper number of batches
batch_idxs = min(min(len(dataA), len(dataB)), args.train_size) // self.batch_size
print(batch_idxs)
# learning rate starts to decay when reaching the threshold
lr = args.lr if epoch < args.epoch_step else args.lr * (args.epoch-epoch) / (args.epoch-args.epoch_step)
for idx in range(0, batch_idxs):
# To feed real_data
batch_files = list(zip(dataA[idx * self.batch_size:(idx + 1) * self.batch_size],
dataB[idx * self.batch_size:(idx + 1) * self.batch_size]))
batch_images = [load_npy_data(batch_file) for batch_file in batch_files]
batch_images = np.array(batch_images).astype(np.float32)
# To feed gaussian noise
gaussian_noise = np.abs(np.random.normal(0, self.sigma_d, [self.batch_size, self.time_step,
self.pitch_range, self.input_c_dim]))
if self.model == 'base':
# Update G network and record fake outputs
fake_A, fake_B, _, summary_str, g_loss_a2b, g_loss_b2a, cycle_loss, g_loss = self.sess.run([self.fake_A,
self.fake_B, self.g_optim, self.g_sum, self.g_loss_a2b, self.g_loss_b2a, self.cycle_loss,
self.g_loss], feed_dict={self.real_data: batch_images, self.gaussian_noise: gaussian_noise,
self.lr: lr})
# Update D network
_, summary_str, da_loss, db_loss, d_loss = self.sess.run([
self.d_optim, self.d_sum, self.da_loss, self.db_loss, self.d_loss],
feed_dict={self.real_data: batch_images, self.fake_A_sample: fake_A, self.fake_B_sample: fake_B,
self.lr: lr, self.gaussian_noise: gaussian_noise})
print('=================================================================')
print(("Epoch: [%2d] [%4d/%4d] time: %4.4f, d_loss: %6.2f, G_loss: %6.2f" %
(epoch, idx, batch_idxs, time.time() - start_time, d_loss, g_loss)))
print(("++++++++++G_loss_A2B: %6.2f G_loss_B2A: %6.2f Cycle_loss: %6.2f DA_loss: %6.2f DB_loss: %6.2f" %
(g_loss_a2b, g_loss_b2a, cycle_loss, da_loss, db_loss)))
else:
# To feed real_mixed
batch_files_mixed = data_mixed[idx * self.batch_size:(idx + 1) * self.batch_size]
batch_images_mixed = [np.load(batch_file) * 1. for batch_file in batch_files_mixed]
batch_images_mixed = np.array(batch_images_mixed).astype(np.float32)
# Update G network and record fake outputs
fake_A, fake_B, _, summary_str, g_loss_a2b, g_loss_b2a, cycle_loss, g_loss = self.sess.run([
self.fake_A,self.fake_B, self.g_optim, self.g_sum, self.g_loss_a2b, self.g_loss_b2a,
self.cycle_loss, self.g_loss], feed_dict={self.real_data: batch_images,
self.gaussian_noise: gaussian_noise, self.lr: lr,
self.real_mixed: batch_images_mixed})
self.writer.add_summary(summary_str, counter)
[fake_A, fake_B] = self.pool([fake_A, fake_B])
# Update D network
_, summary_str, da_loss, db_loss, d_loss, da_all_loss, db_all_loss, d_all_loss, D_loss = self.sess.run([
self.d_optim, self.d_sum, self.da_loss, self.db_loss, self.d_loss, self.da_all_loss,
self.db_all_loss, self.d_all_loss, self.D_loss],
feed_dict={self.real_data: batch_images, self.fake_A_sample: fake_A, self.fake_B_sample: fake_B,
self.lr: lr, self.gaussian_noise: gaussian_noise, self.real_mixed: batch_images_mixed})
self.writer.add_summary(summary_str, counter)
print('=================================================================')
print(("Epoch: [%2d] [%4d/%4d] time: %4.4f D_loss: %6.2f, d_loss: %6.2f, d_all_loss: %6.2f, "
"G_loss: %6.2f" %
(epoch, idx, batch_idxs, time.time() - start_time, D_loss, d_loss, d_all_loss, g_loss)))
print(("++++++++++G_loss_A2B: %6.2f G_loss_B2A: %6.2f Cycle_loss: %6.2f DA_loss: %6.2f DB_loss: %6.2f, "
"DA_all_loss: %6.2f DB_all_loss: %6.2f" %
(g_loss_a2b, g_loss_b2a, cycle_loss, da_loss, db_loss, da_all_loss, db_all_loss)))
counter += 1
if np.mod(counter, args.print_freq) == 1:
sample_dir = os.path.join(self.sample_dir, '{}2{}_{}_{}_{}'.format(self.dataset_A_dir,
self.dataset_B_dir,
self.now_datetime,
self.model,
self.sigma_d))
# sample_dir = os.path.join(self.sample_dir, '{}2{}_{}_{}_{}'.format(self.dataset_A_dir,
# self.dataset_B_dir,
# '2018-06-10',
# self.model,
# self.sigma_d))
if not os.path.exists(sample_dir):
os.makedirs(sample_dir)
self.sample_model(sample_dir, epoch, idx)
if np.mod(counter, batch_idxs) == 1:
self.save(args.checkpoint_dir, counter)
def save(self, checkpoint_dir, step):
model_name = "cyclegan.model"
model_dir = "{}2{}_{}_{}_{}".format(self.dataset_A_dir, self.dataset_B_dir, self.now_datetime, self.model,
self.sigma_d)
# model_dir = "{}2{}_{}_{}_{}".format(self.dataset_A_dir, self.dataset_B_dir, '2018-06-14', self.model,
# self.sigma_d)
checkpoint_dir = os.path.join(checkpoint_dir, model_dir)
if not os.path.exists(checkpoint_dir):
os.makedirs(checkpoint_dir)
self.saver.save(self.sess, os.path.join(checkpoint_dir, model_name), global_step=step)
def load(self, checkpoint_dir):
print(" [*] Reading checkpoint...")
model_dir = "{}2{}_{}_{}_{}".format(self.dataset_A_dir, self.dataset_B_dir, self.now_datetime, self.model,
self.sigma_d)
# model_dir = "{}2{}_{}_{}_{}".format(self.dataset_A_dir, self.dataset_B_dir, '2018-06-14', self.model,
# self.sigma_d)
checkpoint_dir = os.path.join(checkpoint_dir, model_dir)
ckpt = tf.train.get_checkpoint_state(checkpoint_dir)
if ckpt and ckpt.model_checkpoint_path:
ckpt_name = os.path.basename(ckpt.model_checkpoint_path)
self.saver.restore(self.sess, os.path.join(checkpoint_dir, ckpt_name))
# self.saver.restore(self.sess, os.path.join(checkpoint_dir, 'cyclegan.model-7011'))
return True
else:
return False
def sample_model(self, sample_dir, epoch, idx):
print('Processing sample......')
# Testing data from 2 domains A and B and sorted in ascending order
dataA = glob('./{}/train/*.*'.format(self.dataset_A_dir))
dataB = glob('./{}/train/*.*'.format(self.dataset_B_dir))
dataA.sort(key=lambda x: int(os.path.splitext(os.path.basename(x))[0].split('_')[-1]))
dataB.sort(key=lambda x: int(os.path.splitext(os.path.basename(x))[0].split('_')[-1]))
batch_files = list(zip(dataA[:self.batch_size], dataB[:self.batch_size]))
sample_images = [load_npy_data(batch_file) for batch_file in batch_files]
sample_images = np.array(sample_images).astype(np.float32)
real_A_binary, fake_A_binary, fake_B_binary = self.sess.run([self.real_A_binary, self.fake_A_binary,
self.fake_B_binary],
feed_dict={self.real_data: sample_images})
real_B_binary, fake_A__binary, fake_B__binary = self.sess.run([self.real_B_binary, self.fake_A__binary,
self.fake_B__binary],
feed_dict={self.real_data: sample_images})
if not os.path.exists(os.path.join(sample_dir, 'B2A')):
os.makedirs(os.path.join(sample_dir, 'B2A'))
if not os.path.exists(os.path.join(sample_dir, 'A2B')):
os.makedirs(os.path.join(sample_dir, 'A2B'))
save_midis(real_A_binary, './{}/A2B/{:02d}_{:04d}_origin.mid'.format(sample_dir, epoch, idx))
save_midis(fake_B_binary, './{}/A2B/{:02d}_{:04d}_transfer.mid'.format(sample_dir, epoch, idx))
save_midis(fake_A__binary, './{}/A2B/{:02d}_{:04d}_cycle.mid'.format(sample_dir, epoch, idx))
save_midis(real_B_binary, './{}/B2A/{:02d}_{:04d}_origin.mid'.format(sample_dir, epoch, idx))
save_midis(fake_A_binary, './{}/B2A/{:02d}_{:04d}_transfer.mid'.format(sample_dir, epoch, idx))
save_midis(fake_B__binary, './{}/B2A/{:02d}_{:04d}_cycle.mid'.format(sample_dir, epoch, idx))
def test(self, args):
init_op = tf.global_variables_initializer()
self.sess.run(init_op)
if args.which_direction == 'AtoB':
sample_files = glob('./{}/test/*.*'.format(self.dataset_A_dir))
elif args.which_direction == 'BtoA':
sample_files = glob('./{}/test/*.*'.format(self.dataset_B_dir))
else:
raise Exception('--which_direction must be AtoB or BtoA')
sample_files.sort(key=lambda x: int(os.path.splitext(os.path.basename(x))[0].split('_')[-1]))
if self.load(args.checkpoint_dir):
print(" [*] Load SUCCESS")
else:
print(" [!] Load failed...")
if args.which_direction == 'AtoB':
out_origin, out_var, out_var_cycle, in_var = (self.test_A_binary, self.testB_binary, self.testA__binary,
self.test_A)
else:
out_origin, out_var, out_var_cycle, in_var = (self.test_B_binary, self.testA_binary, self.testB__binary,
self.test_B)
test_dir_mid = os.path.join(args.test_dir, '{}2{}_{}_{}_{}/{}/mid'.format(self.dataset_A_dir,
self.dataset_B_dir,
self.now_datetime,
self.model,
self.sigma_d,
args.which_direction))
if not os.path.exists(test_dir_mid):
os.makedirs(test_dir_mid)
test_dir_npy = os.path.join(args.test_dir, '{}2{}_{}_{}_{}/{}/npy'.format(self.dataset_A_dir,
self.dataset_B_dir,
self.now_datetime,
self.model,
self.sigma_d,
args.which_direction))
if not os.path.exists(test_dir_npy):
os.makedirs(test_dir_npy)
for idx in range(len(sample_files)):
print('Processing midi: ', sample_files[idx])
sample_npy = np.load(sample_files[idx]) * 1.
sample_npy_re = sample_npy.reshape(1, sample_npy.shape[0], sample_npy.shape[1], 1)
midi_path_origin = os.path.join(test_dir_mid, '{}_origin.mid'.format(idx + 1))
midi_path_transfer = os.path.join(test_dir_mid, '{}_transfer.mid'.format(idx + 1))
midi_path_cycle = os.path.join(test_dir_mid, '{}_cycle.mid'.format(idx + 1))
origin_midi, fake_midi, fake_midi_cycle = self.sess.run([out_origin, out_var, out_var_cycle],
feed_dict={in_var: sample_npy_re})
save_midis(origin_midi, midi_path_origin)
save_midis(fake_midi, midi_path_transfer)
save_midis(fake_midi_cycle, midi_path_cycle)
npy_path_origin = os.path.join(test_dir_npy, 'origin')
npy_path_transfer = os.path.join(test_dir_npy, 'transfer')
npy_path_cycle = os.path.join(test_dir_npy, 'cycle')
if not os.path.exists(npy_path_origin):
os.makedirs(npy_path_origin)
if not os.path.exists(npy_path_transfer):
os.makedirs(npy_path_transfer)
if not os.path.exists(npy_path_cycle):
os.makedirs(npy_path_cycle)
np.save(os.path.join(npy_path_origin, '{}_origin.npy'.format(idx + 1)), origin_midi)
np.save(os.path.join(npy_path_transfer, '{}_transfer.npy'.format(idx + 1)), fake_midi)
np.save(os.path.join(npy_path_cycle, '{}_cycle.npy'.format(idx + 1)), fake_midi_cycle)
def test_famous(self, args):
init_op = tf.global_variables_initializer()
self.sess.run(init_op)
song = np.load('./datasets/famous_songs/P2C/merged_npy/YMCA.npy')
# TODO(1641367382@qq.com): there is no such a file in the dir
print(song.shape)
if self.load(args.checkpoint_dir):
print(" [*] Load SUCCESS")
else:
print(" [!] Load failed...")
if args.which_direction == 'AtoB':
out_var, in_var = (self.testB_binary, self.test_A)
else:
out_var, in_var = (self.testA_binary, self.test_B)
transfer = self.sess.run(out_var, feed_dict={in_var: song * 1.})
save_midis(transfer, './datasets/famous_songs/P2C/transfer/YMCA.mid', 127)
# TODO(1641367382@qq.com): there is no such a file in the dir
np.save('./datasets/famous_songs/P2C/transfer/YMCA.npy', transfer)
# TODO(1641367382@qq.com): there is no such a file in the dir
|
import sys
from math import log
from itertools import combinations
class CipherDescription:
def __init__(self, state_size):
'''
Create an empty instance of a cipher of given state size
'''
self.state_size = state_size
self.temporaries = set()
self.rounds = 1
self.transition = []
self.sboxes = dict()
self.description = ''
def apply_permutation(self, permutation):
'''
Extend current round transition by a permutation
A permutation must be given as a list of state variables.
'''
self.check_permutation(permutation)
self.transition.append((permutation, 'PERM'))
self.description += permutation[-1] + ' -> '
self.description += ' -> '.join(var for var in permutation) + '\n'
def add_sbox(self, sbox_name, sbox):
'''
Add an S-box definition to the available S-boxes
The S-box must be given as a list of integers. It is then
available under the supplied name.
'''
if not log(len(sbox), 2).is_integer():
print("Invalid S-box size: needs to be power of two.")
exit()
self.sboxes[sbox_name] = sbox
def apply_sbox(self, sbox_name, input_bits, output_bits):
'''
Extend current round transition by application of an S-box
'''
if sbox_name not in self.sboxes:
raise KeyError('{} not in self.sboxes'.format(sbox_name))
if len(self.sboxes[sbox_name]) != 2**len(input_bits):
print('Size of S-box "{}" incompatible with number of bits'
'({})'.format(sbox_name, len(input_bits)))
sys.exit(1)
self.transition.append((sbox_name, input_bits, output_bits, 'SBOX'))
self.description += ', '.join(input_bits)
self.description += ' -> [' + sbox_name + '] -> '
self.description += ', '.join(output_bits) + '\n'
def apply_xor(self, source_1, source_2, target, description=True):
'''
Extend current round transition by application of an XOR
'''
self.check_source(source_1)
self.check_source(source_2)
self.check_target(target)
self.transition.append((target, source_1, source_2, 'XOR'))
if description:
self.description += '{} = {} + {}\n'.format(target, source_1, source_2)
def apply_and(self, source_1, source_2, target, description=True):
'''
Extend current round transition by application of an AND
'''
self.check_source(source_1)
self.check_source(source_2)
self.check_target(target)
self.transition.append((target, source_1, source_2, 'AND'))
if description:
self.description += '{} = {} & {}\n'.format(target, source_1, source_2)
def apply_mov(self, source, target, description=True):
'''
Extend current round transition by application of an MOV
'''
self.check_source(source)
self.check_target(target)
self.transition.append((target, source, 'MOV'))
if description:
self.description += '{} = {}\n'.format(target, source)
def set_rounds(self, rounds):
'''
Set the number of transitions used in the cipher
'''
self.rounds = rounds
def print_description(self):
'''
Print the description of the round transition
'''
print(self.description[:-1])
def check_source(self, source):
'''
Check that a given source variable is valid and available
'''
if source[0] == 's':
number = int(source[1:])
if number >= self.state_size:
raise ValueError("There are only {} state bits."\
.format(self.state_size))
elif source[0] == 't':
if source not in self.temporaries:
raise ValueError("There is no temporary bit named '{}' "
"at this point.".format(source))
else:
raise ValueError("Only 's' and 't' are allowed in variables.")
def check_target(self, target):
'''
Check that a given target variable is valid and potentially add it
'''
if target[0] == 's':
number = int(target[1:])
if number >= self.state_size:
raise ValueError("There are only {} state bits."\
.format(self.state_size))
elif target[0] == 't' or target[0] == 'b':
self.temporaries.add(target)
else:
raise ValueError("Only 's' and 't' are allowed in variables.")
def check_permutation(self, permutation):
'''
Check that permutation is correctly specified
'''
for var in permutation:
if var[0] != 's':
raise ValueError("Only 's' variables are allowed "
"in permutations.")
number = int(var[1:])
if number >= self.state_size:
raise ValueError("There are only {} state bits."\
.format(self.state_size))
def apply_MC(self,wordsize,MC,Rp,nc,nr):
'''
Apply MixColumns.
Assumes state bits are numbered columnwise and that MC is stored row-wise
'''
#Get bits of the irreducible polynomial
Rpb = list(format(Rp,'0{}b'.format(wordsize)))
#Create a basis using the powers of 2
basis = []
bm = []
for i in range(wordsize):
bm.append([])
for j in range(wordsize):
if i==j:
bm[i].append(1)
else:
bm[i].append(0)
basis.append(bm)
bm = []
for i in range(wordsize):
bm.append([int(Rpb[i])])
for j in range(wordsize-1):
if i==j:
bm[i].append(1)
else:
bm[i].append(0)
basis.append(bm)
for i in range(wordsize-2):
bmtemp = bm
bm = []
for a in range(wordsize):
bm.append([])
for b in range(wordsize):
bm[a].append(0)
for j in range(wordsize):
bm[a][b] ^= bmtemp[a][j]*basis[1][j][b]
basis.append(bm)
BM = []
for i in range(len(MC)*wordsize):
BM.append([])
for j in range(len(MC)*wordsize):
BM[i].append(0)
#Use the basis to create a binary representation of the MC
for i in range(len(MC)):
for j in range(len(MC)):
e = MC[i][j]
for a in range(wordsize):
for b in range(wordsize):
for c in range(wordsize):
BM[i*wordsize+b][j*wordsize+c] ^= (basis[a][b][c]&(e>>a)&1)
#Create temp variables for a column
for i in range(nc*nr*wordsize):
self.apply_mov("s{}".format(i),"t{}".format(i))
#Apply the binary MC
for c in range(nc):
for i in range(wordsize*nr):
k = BM[i].index(1)
self.apply_mov("t{}".format(nr*wordsize*c+k),"s{}".format(nr*wordsize*c+i))
for j in range(k+1,len(BM[i])):
if BM[i][j]==1:
self.apply_xor("s{}".format(nr*wordsize*c+i),'t{}'.format(nr*wordsize*c+j),"s{}".format(nr*wordsize*c+i))
def apply_MC_serial(self,wordsize,Z,Rp):
#NOT FINISHED
#STILL NEEDS TESTING
'''
Apply MixColumns for serial matrices. Z is the last row.
Assumes state bits are numbered columnwise and that MC is stored row-wise
'''
#Get dimension of the matrix
d = len(Z)
#Get bits of the irreducible polynomial
Rpb = list(format(Rp,'0{}b'.format(wordsize)))
#Create a basis using the powers of 2
basis = []
bm = []
for i in range(wordsize):
bm.append([])
for j in range(wordsize):
if i==j:
bm[i].append(1)
else:
bm[i].append(0)
basis.append(bm)
bm = []
for i in range(wordsize):
bm.append([int(Rpb[i])])
for j in range(wordsize-1):
if i==j:
bm[i].append(1)
else:
bm[i].append(0)
basis.append(bm)
for i in range(wordsize-2):
bmtemp = bm
bm = []
for a in range(wordsize):
bm.append([])
for b in range(wordsize):
bm[a].append(0)
for j in range(wordsize):
bm[a][b] ^= bmtemp[a][j]*basis[1][j][b]
basis.append(bm)
BZ = []
for i in range(wordsize):
BZ.append([])
for j in range(d*wordsize):
BZ[i].append(0)
#Use the basis to create a binary representation of the MC
for i in range(d):
e = Z[i]
for a in range(wordsize):
for b in range(wordsize):
for c in range(wordsize):
BZ[b][i*wordsize+c] ^= (basis[a][b][c]&(e>>a)&1)
shuffle = []
for i in range(d):
shuffle.extend([d*i+(j+1)%d for j in range(d)])
for q in range(d):
for c in range(d):
for i in range(wordsize):
k = BZ[i].index(1)
self.apply_mov("s{}".format(d*wordsize*c+k),"t{}".format(wordsize*c+i))
for j in range(k+1,len(BZ[i])):
if BZ[i][j]==1:
self.apply_xor("t{}".format(wordsize*c+i),'s{}'.format(d*wordsize*c+j),"t{}".format(wordsize*c+i))
self.shufflewords(shuffle,wordsize,1)
for i in range(d*wordsize):
#print (d-1)*wordsize*(i/wordsize+1)+i
self.apply_mov("t{}".format(i),"s{}".format((d-1)*wordsize*(i/wordsize+1)+i))
def shufflewords(self,shuffle,wordsize,rev):
'''
Apply shuffle to the words of the state.
If rev == 1
updates word i with word shuffle[i]
else
updates word shuffle[i] with word i
'''
#Decompose cycles
cycles = []
for i in range(len(shuffle)):
cycles.append([i,shuffle[i]])
while cycles[len(cycles)-1][0] != cycles[len(cycles)-1][len(cycles[len(cycles)-1])-1]:
cycles[len(cycles)-1].append(shuffle[cycles[len(cycles)-1][len(cycles[len(cycles)-1])-1]])
#Remove duplicate cycles and cycles of length 1
for c in cycles:
if len(c)==2:
cycles.remove(c)
for c in cycles:
for cp in cycles[cycles.index(c)+1:]:
if c[0] in cp:
cycles.remove(cp)
#Last entry is equal to first so first is removed
for c in cycles:
c.pop(0)
#Reverse cycles
if rev == 1:
for c in cycles:
c = c.reverse()
#Apply the cycles to the bits
bitshuffle = []
for bit in range(wordsize):
for c in cycles:
t = []
for i in c:
t.append("s{}".format(i*wordsize+bit))
bitshuffle.append(t)
for b in bitshuffle:
self.apply_permutation(b)
def add_mod(self,x,y,z,n,toffset):
'''
Calculates z = x+y mod 2^n
where n is the lenght of x, y, and z
x,y are list of state variables with x[0] and y[0] as the LSB
'''
#Make copies
for i in range(n):
self.apply_mov(x[i],'t{}'.format(toffset+i))
self.apply_mov(y[i],'t{}'.format(toffset+i+n))
#Set z to x
for i in range(n):
self.apply_mov(x[i],z[i])
#Carry bit
self.apply_and('t{}'.format(toffset),'t{}'.format(toffset+n),'t{}'.format(toffset+2*n))
self.apply_xor(z[0],'t{}'.format(toffset+n),z[0])
for i in range(1,n):
self.apply_xor(z[i],'t{}'.format(toffset+n+i),z[i])
self.apply_xor(z[i],'t{}'.format(toffset+2*n),z[i])
#Update carry bit
self.apply_xor('t{}'.format(toffset+i),'t{}'.format(toffset+i+n),'t{}'.format(toffset+2*n+1))
self.apply_and('t{}'.format(toffset+i),'t{}'.format(toffset+i+n),'t{}'.format(toffset+2*n+2))
self.apply_and('t{}'.format(toffset+2*n+1),'t{}'.format(toffset+2*n),'t{}'.format(toffset+2*n))
self.apply_xor('t{}'.format(toffset+2*n+2),'t{}'.format(toffset+2*n),'t{}'.format(toffset+2*n))
def addconstant_mod(self,x,z,n,toffset):
'''
Calculates z = x+k mod 2^n
where n is the lenght of x and z
x is a list of state variables with x[0] as the LSB
k is a constant
'''
#Make copies
for i in range(n):
self.apply_mov(x[i],'t{}'.format(toffset+i))
#Carry bit
self.apply_mov(x[0],'t{}'.format(toffset+n))
self.apply_mov(x[0],z[0])
for i in range(1,n):
self.apply_xor('t{}'.format(toffset+i),'t{}'.format(toffset+n),z[i])
#Update carry bit
self.apply_and('t{}'.format(toffset+i),'t{}'.format(toffset+n),'t{}'.format(toffset+n))
self.apply_xor('t{}'.format(toffset+i),'t{}'.format(toffset+n),'t{}'.format(toffset+n))
|
import asyncio
import datetime
import io
import os.path
import aiofiles
import falcon
import PIL.Image
class Image:
def __init__(self, config, image_id, size):
self.config = config
self.image_id = image_id
self.size = size
self.modified = datetime.datetime.utcnow()
@property
def path(self):
return os.path.join(self.config.storage_path, self.image_id)
@property
def uri(self):
return f'/images/{self.image_id}.jpeg'
def serialize(self):
return {
'id': self.image_id,
'image': self.uri,
'modified': falcon.dt_to_http(self.modified),
'size': self.size,
}
class Store:
def __init__(self, config):
self.config = config
self._images = {}
def _load_from_bytes(self, data):
return PIL.Image.open(io.BytesIO(data))
def _convert(self, image):
rgb_image = image.convert('RGB')
converted = io.BytesIO()
rgb_image.save(converted, 'JPEG')
return converted.getvalue()
def get(self, image_id):
return self._images.get(image_id)
def list_images(self):
return sorted(self._images.values(), key=lambda item: item.modified)
async def save(self, image_id, data):
loop = asyncio.get_running_loop()
image = await loop.run_in_executor(None, self._load_from_bytes, data)
converted = await loop.run_in_executor(None, self._convert, image)
path = os.path.join(self.config.storage_path, image_id)
async with aiofiles.open(path, 'wb') as output:
await output.write(converted)
stored = Image(self.config, image_id, image.size)
self._images[image_id] = stored
return stored
|
# Copyright 2020 BigBitBus
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from django.db import models
class Todo(models.Model):
title = models.CharField(max_length=200)
description = models.TextField()
def __str__(self):
return self.title
|
##########################################################################
#
# pgAdmin 4 - PostgreSQL Tools
#
# Copyright (C) 2013 - 2017, The pgAdmin Development Team
# This software is released under the PostgreSQL Licence
#
##########################################################################
import time
from selenium.common.exceptions import NoSuchElementException, \
WebDriverException
from selenium.webdriver import ActionChains
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.common.by import By
class PgadminPage:
"""
Helper class for interacting with the page, given a selenium driver
"""
def __init__(self, driver, app_config):
self.driver = driver
self.app_config = app_config
self.timeout = 30
self.app_start_timeout = 60
def reset_layout(self):
self.click_element(self.find_by_partial_link_text("File"))
self.find_by_partial_link_text("Reset Layout").click()
self.click_modal('OK')
self.wait_for_reloading_indicator_to_disappear()
def click_modal(self, button_text):
time.sleep(0.5)
# Find active alertify dialog in case of multiple alertify dialog & click on that dialog
modal_button = self.find_by_xpath(
"//div[contains(@class, 'alertify') and not(contains(@class, 'ajs-hidden'))]//button[.='%s']"
% button_text)
self.click_element(modal_button)
def add_server(self, server_config):
self.find_by_xpath("//*[@class='aciTreeText' and contains(.,'Servers')]").click()
self.driver.find_element_by_link_text("Object").click()
ActionChains(self.driver) \
.move_to_element(self.driver.find_element_by_link_text("Create")) \
.perform()
self.find_by_partial_link_text("Server...").click()
self.fill_input_by_field_name("name", server_config['name'])
self.find_by_partial_link_text("Connection").click()
self.fill_input_by_field_name("host", server_config['host'])
self.fill_input_by_field_name("port", server_config['port'])
self.fill_input_by_field_name("username", server_config['username'])
self.fill_input_by_field_name("password", server_config['db_password'])
self.find_by_xpath("//button[contains(.,'Save')]").click()
self.find_by_xpath("//*[@id='tree']//*[.='" + server_config['name'] + "']")
def open_query_tool(self):
self.driver.find_element_by_link_text("Tools").click()
tools_menu = self.driver.find_element_by_id('mnu_tools')
# Query Tool is first li
query_tool = tools_menu.find_element_by_tag_name('li')
self.enable_menu_item(query_tool, 10)
self.find_by_partial_link_text("Query Tool").click()
self.click_tab('Query -')
def enable_menu_item(self, menu_item, wait_time):
start_time = time.time()
# wait until menu becomes enabled.
while time.time() - start_time < wait_time: # wait_time seconds
# if menu is disabled then it will have
# two classes 'menu-item disabled'.
# And if menu is enabled the it will have
# only one class 'menu-item'.
if 'menu-item' == str(menu_item.get_attribute('class')):
break
time.sleep(0.1)
else:
assert False, "'Tools -> Query Tool' menu did not enable."
def close_query_tool(self):
self.driver.switch_to.default_content()
tab = self.find_by_xpath("//*[contains(@class,'wcPanelTab') and contains(.,'" + "Query" + "')]")
ActionChains(self.driver).context_click(tab).perform()
self.find_by_xpath("//li[contains(@class, 'context-menu-item')]/span[contains(text(), 'Remove Panel')]").click()
self.driver.switch_to.frame(self.driver.find_elements_by_tag_name("iframe")[0])
time.sleep(.5)
self.click_element(self.find_by_xpath('//button[contains(@class, "ajs-button") and contains(.,"Don\'t save")]'))
self.driver.switch_to.default_content()
def close_data_grid(self):
self.driver.switch_to_default_content()
xpath = "//*[@id='dockerContainer']/div/div[3]/div/div[2]/div[1]"
self.click_element(self.find_by_xpath(xpath))
def remove_server(self, server_config):
self.driver.switch_to.default_content()
server_to_remove = self.find_by_xpath("//*[@id='tree']//*[.='" + server_config['name'] + "' and @class='aciTreeItem']")
self.click_element(server_to_remove)
object_menu_item = self.find_by_partial_link_text("Object")
self.click_element(object_menu_item)
delete_menu_item = self.find_by_partial_link_text("Delete/Drop")
self.click_element(delete_menu_item)
self.click_modal('OK')
def select_tree_item(self, tree_item_text):
self.find_by_xpath("//*[@id='tree']//*[.='" + tree_item_text + "' and @class='aciTreeItem']").click()
def toggle_open_tree_item(self, tree_item_text):
self.find_by_xpath("//*[@id='tree']//*[.='" + tree_item_text + "']/../*[@class='aciTreeButton']").click()
def toggle_open_server(self, tree_item_text):
def check_for_password_dialog_or_tree_open(driver):
try:
dialog = driver.find_element_by_id("frmPassword")
except WebDriverException:
dialog = None
try:
database_node = driver.find_element_by_xpath("//*[@id='tree']//*[.='Databases']/../*[@class='aciTreeButton']")
except WebDriverException:
database_node = None
return dialog is not None or database_node is not None
self.toggle_open_tree_item(tree_item_text)
self._wait_for("Waiting for password dialog or tree to open", check_for_password_dialog_or_tree_open)
try:
self.driver.find_element_by_id("frmPassword")
# Enter password here if needed
self.click_modal('OK')
except WebDriverException:
return
def find_by_xpath(self, xpath):
return self.wait_for_element(lambda driver: driver.find_element_by_xpath(xpath))
def find_by_id(self, element_id):
return self.wait_for_element(lambda driver: driver.find_element_by_id(element_id))
def find_by_css_selector(self, css_selector):
return self.wait_for_element(lambda driver: driver.find_element_by_css_selector(css_selector))
def find_by_partial_link_text(self, link_text):
return self._wait_for(
'link with text "{0}"'.format(link_text),
EC.element_to_be_clickable((By.PARTIAL_LINK_TEXT, link_text))
)
def click_element(self, element):
# driver must be here to adhere to the method contract in selenium.webdriver.support.wait.WebDriverWait.until()
def click_succeeded(driver):
try:
element.click()
return True
except WebDriverException:
return False
return self._wait_for("clicking the element not to throw an exception", click_succeeded)
def fill_input_by_field_name(self, field_name, field_content):
field = self.find_by_xpath("//input[@name='" + field_name + "']")
backspaces = [Keys.BACKSPACE] * len(field.get_attribute('value'))
field.click()
field.send_keys(backspaces)
field.send_keys(str(field_content))
self.wait_for_input_field_content(field_name, field_content)
def fill_codemirror_area_with(self, field_content):
def find_codemirror(driver):
try:
driver.switch_to.default_content()
driver.switch_to_frame(driver.find_element_by_tag_name("iframe"))
element = driver.find_element_by_xpath(
"//pre[contains(@class,'CodeMirror-line')]/../../../*[contains(@class,'CodeMirror-code')]")
if element.is_displayed() and element.is_enabled():
return element
except (NoSuchElementException, WebDriverException):
return False
time.sleep(1)
WebDriverWait(self.driver, timeout=self.timeout, poll_frequency=0.01).\
until(find_codemirror, "Timed out waiting for codemirror to appear").\
click()
time.sleep(1)
action = ActionChains(self.driver)
action.send_keys(field_content)
action.perform()
def click_tab(self, tab_name):
tab = self.find_by_xpath("//*[contains(@class,'wcTabTop')]//*[contains(@class,'wcPanelTab') "
"and contains(.,'" + tab_name + "')]")
self.click_element(tab)
def wait_for_input_field_content(self, field_name, content):
def input_field_has_content(driver):
element = driver.find_element_by_xpath(
"//input[@name='" + field_name + "']")
return str(content) == element.get_attribute('value')
return self._wait_for("field to contain '" + str(content) + "'", input_field_has_content)
def wait_for_element(self, find_method_with_args):
def element_if_it_exists(driver):
try:
element = find_method_with_args(driver)
if element.is_displayed() and element.is_enabled():
return element
except NoSuchElementException:
return False
return self._wait_for("element to exist", element_if_it_exists)
def wait_for_reloading_indicator_to_disappear(self):
def reloading_indicator_has_disappeared(driver):
try:
driver.find_element_by_id("reloading-indicator")
return False
except NoSuchElementException:
return True
self._wait_for("reloading indicator to disappear", reloading_indicator_has_disappeared)
def wait_for_spinner_to_disappear(self):
def spinner_has_disappeared(driver):
try:
driver.find_element_by_id("pg-spinner")
return False
except NoSuchElementException:
return True
self._wait_for("spinner to disappear", spinner_has_disappeared)
def wait_for_query_tool_loading_indicator_to_disappear(self):
def spinner_has_disappeared(driver):
try:
driver.find_element_by_xpath(
"//*[@id='fetching_data' and @class='hide']"
)
return False
except NoSuchElementException:
# wait for loading indicator disappear animation to complete.
time.sleep(0.5)
return True
self._wait_for("spinner to disappear", spinner_has_disappeared)
def wait_for_app(self):
def page_shows_app(driver):
if driver.title == self.app_config.APP_NAME:
return True
else:
driver.refresh()
return False
self._wait_for("app to start", page_shows_app, self.app_start_timeout)
def _wait_for(self, waiting_for_message, condition_met_function, timeout = None):
if timeout is None:
timeout = self.timeout
return WebDriverWait(self.driver, timeout, 0.01).until(condition_met_function,
"Timed out waiting for " + waiting_for_message)
|
# coding: utf-8
"""
LUSID API
# Introduction This page documents the [LUSID APIs](https://www.lusid.com/api/swagger), which allows authorised clients to query and update their data within the LUSID platform. SDKs to interact with the LUSID APIs are available in the following languages : * [C#](https://github.com/finbourne/lusid-sdk-csharp) * [Java](https://github.com/finbourne/lusid-sdk-java) * [JavaScript](https://github.com/finbourne/lusid-sdk-js) * [Python](https://github.com/finbourne/lusid-sdk-python) # Data Model The LUSID API has a relatively lightweight but extremely powerful data model. One of the goals of LUSID was not to enforce on clients a single rigid data model but rather to provide a flexible foundation onto which clients can map their own data models. The core entities in LUSID provide a minimal structure and set of relationships, and the data model can be extended using Properties. The LUSID data model is exposed through the LUSID APIs. The APIs provide access to both business objects and the meta data used to configure the systems behaviours. The key business entities are: - * **Portfolios** A portfolio is a container for transactions and holdings (a **Transaction Portfolio**) or constituents (a **Reference Portfolio**). * **Derived Portfolios**. Derived Portfolios allow Portfolios to be created based on other Portfolios, by overriding or adding specific items. * **Holdings** A Holding is a quantity of an Instrument or a balance of cash within a Portfolio. Holdings can only be adjusted via Transactions. * **Transactions** A Transaction is an economic event that occurs in a Portfolio, causing its holdings to change. * **Corporate Actions** A corporate action is a market event which occurs to an Instrument and thus applies to all portfolios which holding the instrument. Examples are stock splits or mergers. * **Constituents** A constituent is a record in a Reference Portfolio containing an Instrument and an associated weight. * **Instruments** An instrument represents a currency, tradable instrument or OTC contract that is attached to a transaction and a holding. * **Properties** All major entities allow additional user defined properties to be associated with them. For example, a Portfolio manager may be associated with a portfolio. Meta data includes: - * **Transaction Types** Transactions are booked with a specific transaction type. The types are client defined and are used to map the Transaction to a series of movements which update the portfolio holdings. * **Properties Types** Types of user defined properties used within the system. ## Scope All data in LUSID is segregated at the client level. Entities in LUSID are identifiable by a unique code. Every entity lives within a logical data partition known as a Scope. Scope is an identity namespace allowing two entities with the same unique code to co-exist within individual address spaces. For example, prices for equities from different vendors may be uploaded into different scopes such as `client/vendor1` and `client/vendor2`. A portfolio may then be valued using either of the price sources by referencing the appropriate scope. LUSID Clients cannot access scopes of other clients. ## Instruments LUSID has its own built-in instrument master which you can use to master your own instrument universe. Every instrument must be created with one or more unique market identifiers, such as [FIGI](https://openfigi.com/). For any non-listed instruments (eg OTCs), you can upload an instrument against a custom ID of your choosing. In addition, LUSID will allocate each instrument a unique 'LUSID instrument identifier'. The LUSID instrument identifier is what is used when uploading transactions, holdings, prices, etc. The API exposes an `instrument/lookup` endpoint which can be used to lookup these LUSID identifiers using their market identifiers. Cash can be referenced using the ISO currency code prefixed with \"`CCY_`\" e.g. `CCY_GBP` ## Instrument Data Instrument data can be uploaded to the system using the [Instrument Properties](#tag/InstrumentProperties) endpoint. | Field|Type|Description | | ---|---|--- | | Key|propertykey|The key of the property. This takes the format {domain}/{scope}/{code} e.g. 'Instrument/system/Name' or 'Transaction/strategy/quantsignal'. | | Value|string|The value of the property. | | EffectiveFrom|datetimeoffset|The effective datetime from which the property is valid. | ## Transaction Portfolios Portfolios are the top-level entity containers within LUSID, containing transactions, corporate actions and holdings. The transactions build up the portfolio holdings on which valuations, analytics profit & loss and risk can be calculated. Properties can be associated with Portfolios to add in additional data. Portfolio properties can be changed over time, for example to allow a Portfolio Manager to be linked with a Portfolio. Additionally, portfolios can be securitised and held by other portfolios, allowing LUSID to perform \"drill-through\" into underlying fund holdings ### Derived Portfolios LUSID also allows for a portfolio to be composed of another portfolio via derived portfolios. A derived portfolio can contain its own transactions and also inherits any transactions from its parent portfolio. Any changes made to the parent portfolio are automatically reflected in derived portfolio. Derived portfolios in conjunction with scopes are a powerful construct. For example, to do pre-trade what-if analysis, a derived portfolio could be created a new namespace linked to the underlying live (parent) portfolio. Analysis can then be undertaken on the derived portfolio without affecting the live portfolio. ### Transactions A transaction represents an economic activity against a Portfolio. Transactions are processed according to a configuration. This will tell the LUSID engine how to interpret the transaction and correctly update the holdings. LUSID comes with a set of transaction types you can use out of the box, or you can configure your own set(s) of transactions. For more details see the [LUSID Getting Started Guide for transaction configuration.](https://support.lusid.com/configuring-transaction-types) | Field|Type|Description | | ---|---|--- | | TransactionId|string|The unique identifier for the transaction. | | Type|string|The type of the transaction e.g. 'Buy', 'Sell'. The transaction type should have been pre-configured via the System Configuration API endpoint. If it hasn't been pre-configured the transaction will still be updated or inserted however you will be unable to generate the resultant holdings for the portfolio that contains this transaction as LUSID does not know how to process it. | | InstrumentIdentifiers|map|A set of instrument identifiers to use to resolve the transaction to a unique instrument. | | TransactionDate|dateorcutlabel|The date of the transaction. | | SettlementDate|dateorcutlabel|The settlement date of the transaction. | | Units|decimal|The number of units transacted in the associated instrument. | | TransactionPrice|transactionprice|The price for each unit of the transacted instrument in the transaction currency. | | TotalConsideration|currencyandamount|The total value of the transaction in the settlement currency. | | ExchangeRate|decimal|The exchange rate between the transaction and settlement currency. For example if the transaction currency is in USD and the settlement currency is in GBP this this the USD/GBP rate. | | TransactionCurrency|currency|The transaction currency. | | Properties|map|Set of unique transaction properties and associated values to store with the transaction. Each property must be from the 'Transaction' domain. | | CounterpartyId|string|The identifier for the counterparty of the transaction. | | Source|string|The source of the transaction. This is used to look up the appropriate transaction group set in the transaction type configuration. | From these fields, the following values can be calculated * **Transaction value in Transaction currency**: TotalConsideration / ExchangeRate * **Transaction value in Portfolio currency**: Transaction value in Transaction currency * TradeToPortfolioRate #### Example Transactions ##### A Common Purchase Example Three example transactions are shown in the table below. They represent a purchase of USD denominated IBM shares within a Sterling denominated portfolio. * The first two transactions are for separate buy and fx trades * Buying 500 IBM shares for $71,480.00 * A spot foreign exchange conversion to fund the IBM purchase. (Buy $71,480.00 for £54,846.60) * The third transaction is an alternate version of the above trades. Buying 500 IBM shares and settling directly in Sterling. | Column | Buy Trade | Fx Trade | Buy Trade with foreign Settlement | | ----- | ----- | ----- | ----- | | TransactionId | FBN00001 | FBN00002 | FBN00003 | | Type | Buy | FxBuy | Buy | | InstrumentIdentifiers | { \"figi\", \"BBG000BLNNH6\" } | { \"CCY\", \"CCY_USD\" } | { \"figi\", \"BBG000BLNNH6\" } | | TransactionDate | 2018-08-02 | 2018-08-02 | 2018-08-02 | | SettlementDate | 2018-08-06 | 2018-08-06 | 2018-08-06 | | Units | 500 | 71480 | 500 | | TransactionPrice | 142.96 | 1 | 142.96 | | TradeCurrency | USD | USD | USD | | ExchangeRate | 1 | 0.7673 | 0.7673 | | TotalConsideration.Amount | 71480.00 | 54846.60 | 54846.60 | | TotalConsideration.Currency | USD | GBP | GBP | | Trade/default/TradeToPortfolioRate* | 0.7673 | 0.7673 | 0.7673 | [* This is a property field] ##### A Forward FX Example LUSID has a flexible transaction modelling system, meaning there are a number of different ways of modelling forward fx trades. The default LUSID transaction types are FwdFxBuy and FwdFxSell. Using these transaction types, LUSID will generate two holdings for each Forward FX trade, one for each currency in the trade. An example Forward Fx trade to sell GBP for USD in a JPY-denominated portfolio is shown below: | Column | Forward 'Sell' Trade | Notes | | ----- | ----- | ---- | | TransactionId | FBN00004 | | | Type | FwdFxSell | | | InstrumentIdentifiers | { \"Instrument/default/Currency\", \"GBP\" } | | | TransactionDate | 2018-08-02 | | | SettlementDate | 2019-02-06 | Six month forward | | Units | 10000.00 | Units of GBP | | TransactionPrice | 1 | | | TradeCurrency | GBP | Currency being sold | | ExchangeRate | 1.3142 | Agreed rate between GBP and USD | | TotalConsideration.Amount | 13142.00 | Amount in the settlement currency, USD | | TotalConsideration.Currency | USD | Settlement currency | | Trade/default/TradeToPortfolioRate | 142.88 | Rate between trade currency, GBP and portfolio base currency, JPY | Please note that exactly the same economic behaviour could be modelled using the FwdFxBuy Transaction Type with the amounts and rates reversed. ### Holdings A holding represents a position in an instrument or cash on a given date. | Field|Type|Description | | ---|---|--- | | InstrumentUid|string|The unqiue Lusid Instrument Id (LUID) of the instrument that the holding is in. | | SubHoldingKeys|map|The sub-holding properties which identify the holding. Each property will be from the 'Transaction' domain. These are configured when a transaction portfolio is created. | | Properties|map|The properties which have been requested to be decorated onto the holding. These will be from the 'Instrument' or 'Holding' domain. | | HoldingType|string|The type of the holding e.g. Position, Balance, CashCommitment, Receivable, ForwardFX etc. | | Units|decimal|The total number of units of the holding. | | SettledUnits|decimal|The total number of settled units of the holding. | | Cost|currencyandamount|The total cost of the holding in the transaction currency. | | CostPortfolioCcy|currencyandamount|The total cost of the holding in the portfolio currency. | | Transaction|transaction|The transaction associated with an unsettled holding. | ## Corporate Actions Corporate actions are represented within LUSID in terms of a set of instrument-specific 'transitions'. These transitions are used to specify the participants of the corporate action, and the effect that the corporate action will have on holdings in those participants. ### Corporate Action | Field|Type|Description | | ---|---|--- | | CorporateActionCode|code|The unique identifier of this corporate action | | Description|string| | | AnnouncementDate|datetimeoffset|The announcement date of the corporate action | | ExDate|datetimeoffset|The ex date of the corporate action | | RecordDate|datetimeoffset|The record date of the corporate action | | PaymentDate|datetimeoffset|The payment date of the corporate action | | Transitions|corporateactiontransition[]|The transitions that result from this corporate action | ### Transition | Field|Type|Description | | ---|---|--- | | InputTransition|corporateactiontransitioncomponent|Indicating the basis of the corporate action - which security and how many units | | OutputTransitions|corporateactiontransitioncomponent[]|What will be generated relative to the input transition | ### Example Corporate Action Transitions #### A Dividend Action Transition In this example, for each share of IBM, 0.20 units (or 20 pence) of GBP are generated. | Column | Input Transition | Output Transition | | ----- | ----- | ----- | | Instrument Identifiers | { \"figi\" : \"BBG000BLNNH6\" } | { \"ccy\" : \"CCY_GBP\" } | | Units Factor | 1 | 0.20 | | Cost Factor | 1 | 0 | #### A Split Action Transition In this example, for each share of IBM, we end up with 2 units (2 shares) of IBM, with total value unchanged. | Column | Input Transition | Output Transition | | ----- | ----- | ----- | | Instrument Identifiers | { \"figi\" : \"BBG000BLNNH6\" } | { \"figi\" : \"BBG000BLNNH6\" } | | Units Factor | 1 | 2 | | Cost Factor | 1 | 1 | #### A Spinoff Action Transition In this example, for each share of IBM, we end up with 1 unit (1 share) of IBM and 3 units (3 shares) of Celestica, with 85% of the value remaining on the IBM share, and 5% in each Celestica share (15% total). | Column | Input Transition | Output Transition 1 | Output Transition 2 | | ----- | ----- | ----- | ----- | | Instrument Identifiers | { \"figi\" : \"BBG000BLNNH6\" } | { \"figi\" : \"BBG000BLNNH6\" } | { \"figi\" : \"BBG000HBGRF3\" } | | Units Factor | 1 | 1 | 3 | | Cost Factor | 1 | 0.85 | 0.15 | ## Reference Portfolios Reference portfolios are portfolios that contain constituents with weights. They are designed to represent entities such as indices and benchmarks. ### Constituents | Field|Type|Description | | ---|---|--- | | InstrumentIdentifiers|map|Unique instrument identifiers | | InstrumentUid|string|LUSID's internal unique instrument identifier, resolved from the instrument identifiers | | Currency|decimal| | | Weight|decimal| | | FloatingWeight|decimal| | ## Portfolio Groups Portfolio groups allow the construction of a hierarchy from portfolios and groups. Portfolio operations on the group are executed on an aggregated set of portfolios in the hierarchy. For example: * Global Portfolios _(group)_ * APAC _(group)_ * Hong Kong _(portfolio)_ * Japan _(portfolio)_ * Europe _(group)_ * France _(portfolio)_ * Germany _(portfolio)_ * UK _(portfolio)_ In this example **Global Portfolios** is a group that consists of an aggregate of **Hong Kong**, **Japan**, **France**, **Germany** and **UK** portfolios. ## Properties Properties are key-value pairs that can be applied to any entity within a domain (where a domain is `trade`, `portfolio`, `security` etc). Properties must be defined before use with a `PropertyDefinition` and can then subsequently be added to entities. ## Schema A detailed description of the entities used by the API and parameters for endpoints which take a JSON document can be retrieved via the `schema` endpoint. ## Meta data The following headers are returned on all responses from LUSID | Name | Purpose | | --- | --- | | lusid-meta-duration | Duration of the request | | lusid-meta-success | Whether or not LUSID considered the request to be successful | | lusid-meta-requestId | The unique identifier for the request | | lusid-schema-url | Url of the schema for the data being returned | | lusid-property-schema-url | Url of the schema for any properties | # Error Codes | Code|Name|Description | | ---|---|--- | | <a name=\"-10\">-10</a>|Server Configuration Error| | | <a name=\"-1\">-1</a>|Unknown error|An unexpected error was encountered on our side. | | <a name=\"102\">102</a>|Version Not Found| | | <a name=\"103\">103</a>|Api Rate Limit Violation| | | <a name=\"104\">104</a>|Instrument Not Found| | | <a name=\"105\">105</a>|Property Not Found| | | <a name=\"106\">106</a>|Portfolio Recursion Depth| | | <a name=\"108\">108</a>|Group Not Found| | | <a name=\"109\">109</a>|Portfolio Not Found| | | <a name=\"110\">110</a>|Property Schema Not Found| | | <a name=\"111\">111</a>|Portfolio Ancestry Not Found| | | <a name=\"112\">112</a>|Portfolio With Id Already Exists| | | <a name=\"113\">113</a>|Orphaned Portfolio| | | <a name=\"119\">119</a>|Missing Base Claims| | | <a name=\"121\">121</a>|Property Not Defined| | | <a name=\"122\">122</a>|Cannot Delete System Property| | | <a name=\"123\">123</a>|Cannot Modify Immutable Property Field| | | <a name=\"124\">124</a>|Property Already Exists| | | <a name=\"125\">125</a>|Invalid Property Life Time| | | <a name=\"126\">126</a>|Property Constraint Style Excludes Properties| | | <a name=\"127\">127</a>|Cannot Modify Default Data Type| | | <a name=\"128\">128</a>|Group Already Exists| | | <a name=\"129\">129</a>|No Such Data Type| | | <a name=\"130\">130</a>|Undefined Value For Data Type| | | <a name=\"131\">131</a>|Unsupported Value Type Defined On Data Type| | | <a name=\"132\">132</a>|Validation Error| | | <a name=\"133\">133</a>|Loop Detected In Group Hierarchy| | | <a name=\"134\">134</a>|Undefined Acceptable Values| | | <a name=\"135\">135</a>|Sub Group Already Exists| | | <a name=\"138\">138</a>|Price Source Not Found| | | <a name=\"139\">139</a>|Analytic Store Not Found| | | <a name=\"141\">141</a>|Analytic Store Already Exists| | | <a name=\"143\">143</a>|Client Instrument Already Exists| | | <a name=\"144\">144</a>|Duplicate In Parameter Set| | | <a name=\"147\">147</a>|Results Not Found| | | <a name=\"148\">148</a>|Order Field Not In Result Set| | | <a name=\"149\">149</a>|Operation Failed| | | <a name=\"150\">150</a>|Elastic Search Error| | | <a name=\"151\">151</a>|Invalid Parameter Value| | | <a name=\"153\">153</a>|Command Processing Failure| | | <a name=\"154\">154</a>|Entity State Construction Failure| | | <a name=\"155\">155</a>|Entity Timeline Does Not Exist| | | <a name=\"156\">156</a>|Concurrency Conflict Failure| | | <a name=\"157\">157</a>|Invalid Request| | | <a name=\"158\">158</a>|Event Publish Unknown| | | <a name=\"159\">159</a>|Event Query Failure| | | <a name=\"160\">160</a>|Blob Did Not Exist| | | <a name=\"162\">162</a>|Sub System Request Failure| | | <a name=\"163\">163</a>|Sub System Configuration Failure| | | <a name=\"165\">165</a>|Failed To Delete| | | <a name=\"166\">166</a>|Upsert Client Instrument Failure| | | <a name=\"167\">167</a>|Illegal As At Interval| | | <a name=\"168\">168</a>|Illegal Bitemporal Query| | | <a name=\"169\">169</a>|Invalid Alternate Id| | | <a name=\"170\">170</a>|Cannot Add Source Portfolio Property Explicitly| | | <a name=\"171\">171</a>|Entity Already Exists In Group| | | <a name=\"173\">173</a>|Entity With Id Already Exists| | | <a name=\"174\">174</a>|Derived Portfolio Details Do Not Exist| | | <a name=\"176\">176</a>|Portfolio With Name Already Exists| | | <a name=\"177\">177</a>|Invalid Transactions| | | <a name=\"178\">178</a>|Reference Portfolio Not Found| | | <a name=\"179\">179</a>|Duplicate Id| | | <a name=\"180\">180</a>|Command Retrieval Failure| | | <a name=\"181\">181</a>|Data Filter Application Failure| | | <a name=\"182\">182</a>|Search Failed| | | <a name=\"183\">183</a>|Movements Engine Configuration Key Failure| | | <a name=\"184\">184</a>|Fx Rate Source Not Found| | | <a name=\"185\">185</a>|Accrual Source Not Found| | | <a name=\"186\">186</a>|Access Denied| | | <a name=\"187\">187</a>|Invalid Identity Token| | | <a name=\"188\">188</a>|Invalid Request Headers| | | <a name=\"189\">189</a>|Price Not Found| | | <a name=\"190\">190</a>|Invalid Sub Holding Keys Provided| | | <a name=\"191\">191</a>|Duplicate Sub Holding Keys Provided| | | <a name=\"192\">192</a>|Cut Definition Not Found| | | <a name=\"193\">193</a>|Cut Definition Invalid| | | <a name=\"194\">194</a>|Time Variant Property Deletion Date Unspecified| | | <a name=\"195\">195</a>|Perpetual Property Deletion Date Specified| | | <a name=\"196\">196</a>|Time Variant Property Upsert Date Unspecified| | | <a name=\"197\">197</a>|Perpetual Property Upsert Date Specified| | | <a name=\"200\">200</a>|Invalid Unit For Data Type| | | <a name=\"201\">201</a>|Invalid Type For Data Type| | | <a name=\"202\">202</a>|Invalid Value For Data Type| | | <a name=\"203\">203</a>|Unit Not Defined For Data Type| | | <a name=\"204\">204</a>|Units Not Supported On Data Type| | | <a name=\"205\">205</a>|Cannot Specify Units On Data Type| | | <a name=\"206\">206</a>|Unit Schema Inconsistent With Data Type| | | <a name=\"207\">207</a>|Unit Definition Not Specified| | | <a name=\"208\">208</a>|Duplicate Unit Definitions Specified| | | <a name=\"209\">209</a>|Invalid Units Definition| | | <a name=\"210\">210</a>|Invalid Instrument Identifier Unit| | | <a name=\"211\">211</a>|Holdings Adjustment Does Not Exist| | | <a name=\"212\">212</a>|Could Not Build Excel Url| | | <a name=\"213\">213</a>|Could Not Get Excel Version| | | <a name=\"214\">214</a>|Instrument By Code Not Found| | | <a name=\"215\">215</a>|Entity Schema Does Not Exist| | | <a name=\"216\">216</a>|Feature Not Supported On Portfolio Type| | | <a name=\"217\">217</a>|Quote Not Found| | | <a name=\"218\">218</a>|Invalid Quote Identifier| | | <a name=\"219\">219</a>|Invalid Metric For Data Type| | | <a name=\"220\">220</a>|Invalid Instrument Definition| | | <a name=\"221\">221</a>|Instrument Upsert Failure| | | <a name=\"222\">222</a>|Reference Portfolio Request Not Supported| | | <a name=\"223\">223</a>|Transaction Portfolio Request Not Supported| | | <a name=\"224\">224</a>|Invalid Property Value Assignment| | | <a name=\"230\">230</a>|Transaction Type Not Found| | | <a name=\"231\">231</a>|Transaction Type Duplication| | | <a name=\"232\">232</a>|Portfolio Does Not Exist At Given Date| | | <a name=\"233\">233</a>|Query Parser Failure| | | <a name=\"234\">234</a>|Duplicate Constituent| | | <a name=\"235\">235</a>|Unresolved Instrument Constituent| | | <a name=\"236\">236</a>|Unresolved Instrument In Transition| | | <a name=\"237\">237</a>|Missing Side Definitions| | | <a name=\"299\">299</a>|Invalid Recipe| | | <a name=\"300\">300</a>|Missing Recipe| | | <a name=\"301\">301</a>|Dependencies| | | <a name=\"304\">304</a>|Portfolio Preprocess Failure| | | <a name=\"310\">310</a>|Valuation Engine Failure| | | <a name=\"311\">311</a>|Task Factory Failure| | | <a name=\"312\">312</a>|Task Evaluation Failure| | | <a name=\"313\">313</a>|Task Generation Failure| | | <a name=\"314\">314</a>|Engine Configuration Failure| | | <a name=\"315\">315</a>|Model Specification Failure| | | <a name=\"320\">320</a>|Market Data Key Failure| | | <a name=\"321\">321</a>|Market Resolver Failure| | | <a name=\"322\">322</a>|Market Data Failure| | | <a name=\"330\">330</a>|Curve Failure| | | <a name=\"331\">331</a>|Volatility Surface Failure| | | <a name=\"332\">332</a>|Volatility Cube Failure| | | <a name=\"350\">350</a>|Instrument Failure| | | <a name=\"351\">351</a>|Cash Flows Failure| | | <a name=\"352\">352</a>|Reference Data Failure| | | <a name=\"360\">360</a>|Aggregation Failure| | | <a name=\"361\">361</a>|Aggregation Measure Failure| | | <a name=\"370\">370</a>|Result Retrieval Failure| | | <a name=\"371\">371</a>|Result Processing Failure| | | <a name=\"372\">372</a>|Vendor Result Processing Failure| | | <a name=\"373\">373</a>|Vendor Result Mapping Failure| | | <a name=\"374\">374</a>|Vendor Library Unauthorised| | | <a name=\"375\">375</a>|Vendor Connectivity Error| | | <a name=\"376\">376</a>|Vendor Interface Error| | | <a name=\"377\">377</a>|Vendor Pricing Failure| | | <a name=\"378\">378</a>|Vendor Translation Failure| | | <a name=\"379\">379</a>|Vendor Key Mapping Failure| | | <a name=\"380\">380</a>|Vendor Reflection Failure| | | <a name=\"390\">390</a>|Attempt To Upsert Duplicate Quotes| | | <a name=\"391\">391</a>|Corporate Action Source Does Not Exist| | | <a name=\"392\">392</a>|Corporate Action Source Already Exists| | | <a name=\"393\">393</a>|Instrument Identifier Already In Use| | | <a name=\"394\">394</a>|Properties Not Found| | | <a name=\"395\">395</a>|Batch Operation Aborted| | | <a name=\"400\">400</a>|Invalid Iso4217 Currency Code| | | <a name=\"401\">401</a>|Cannot Assign Instrument Identifier To Currency| | | <a name=\"402\">402</a>|Cannot Assign Currency Identifier To Non Currency| | | <a name=\"403\">403</a>|Currency Instrument Cannot Be Deleted| | | <a name=\"404\">404</a>|Currency Instrument Cannot Have Economic Definition| | | <a name=\"405\">405</a>|Currency Instrument Cannot Have Lookthrough Portfolio| | | <a name=\"406\">406</a>|Cannot Create Currency Instrument With Multiple Identifiers| | | <a name=\"407\">407</a>|Specified Currency Is Undefined| | | <a name=\"410\">410</a>|Index Does Not Exist| | | <a name=\"411\">411</a>|Sort Field Does Not Exist| | | <a name=\"413\">413</a>|Negative Pagination Parameters| | | <a name=\"414\">414</a>|Invalid Search Syntax| | | <a name=\"415\">415</a>|Filter Execution Timeout| | | <a name=\"420\">420</a>|Side Definition Inconsistent| | | <a name=\"450\">450</a>|Invalid Quote Access Metadata Rule| | | <a name=\"451\">451</a>|Access Metadata Not Found| | | <a name=\"452\">452</a>|Invalid Access Metadata Identifier| | | <a name=\"460\">460</a>|Standard Resource Not Found| | | <a name=\"461\">461</a>|Standard Resource Conflict| | | <a name=\"462\">462</a>|Calendar Not Found| | | <a name=\"463\">463</a>|Date In A Calendar Not Found| | | <a name=\"464\">464</a>|Invalid Date Source Data| | | <a name=\"465\">465</a>|Invalid Timezone| | | <a name=\"601\">601</a>|Person Identifier Already In Use| | | <a name=\"602\">602</a>|Person Not Found| | | <a name=\"603\">603</a>|Cannot Set Identifier| | | <a name=\"617\">617</a>|Invalid Recipe Specification In Request| | | <a name=\"618\">618</a>|Inline Recipe Deserialisation Failure| | | <a name=\"619\">619</a>|Identifier Types Not Set For Entity| | | <a name=\"620\">620</a>|Cannot Delete All Client Defined Identifiers| | | <a name=\"650\">650</a>|The Order requested was not found.| | | <a name=\"654\">654</a>|The Allocation requested was not found.| | | <a name=\"655\">655</a>|Cannot build the fx forward target with the given holdings.| | | <a name=\"656\">656</a>|Group does not contain expected entities.| | | <a name=\"667\">667</a>|Relation definition already exists| | | <a name=\"673\">673</a>|Missing entitlements for entities in Group| | | <a name=\"674\">674</a>|Next Best Action not found| | | <a name=\"676\">676</a>|Relation definition not defined| | | <a name=\"677\">677</a>|Invalid entity identifier for relation| | | <a name=\"681\">681</a>|Sorting by specified field not supported|One or more of the provided fields to order by were either invalid or not supported. | | <a name=\"682\">682</a>|Too many fields to sort by|The number of fields to sort the data by exceeds the number allowed by the endpoint | | <a name=\"684\">684</a>|Sequence Not Found| | | <a name=\"685\">685</a>|Sequence Already Exists| | | <a name=\"686\">686</a>|Non-cycling sequence has been exhausted| | | <a name=\"687\">687</a>|Legal Entity Identifier Already In Use| | | <a name=\"688\">688</a>|Legal Entity Not Found| | | <a name=\"689\">689</a>|The supplied pagination token is invalid| | | <a name=\"690\">690</a>|Property Type Is Not Supported| | | <a name=\"691\">691</a>|Multiple Tax-lots For Currency Type Is Not Supported| | # noqa: E501
The version of the OpenAPI document: 0.11.2220
Contact: info@finbourne.com
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
class PortfolioCashFlow(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
required_map (dict): The key is attribute name
and the value is whether it is 'required' or 'optional'.
"""
openapi_types = {
'group_by_id': 'int',
'sequence_number': 'int',
'effective_date': 'datetime',
'sub_holding_keys': 'dict(str, PerpetualProperty)',
'type': 'str',
'movement_name': 'str',
'cashflow': 'CurrencyAndAmount',
'balance': 'CurrencyAndAmount',
'fx_rate': 'float',
'cashflow_reporting_currency': 'CurrencyAndAmount',
'balance_reporting_currency': 'CurrencyAndAmount',
'translation_gain_loss': 'CurrencyAndAmount',
'cost_basis_reporting_currency': 'CurrencyAndAmount',
'transaction': 'Transaction',
'links': 'list[Link]'
}
attribute_map = {
'group_by_id': 'groupById',
'sequence_number': 'sequenceNumber',
'effective_date': 'effectiveDate',
'sub_holding_keys': 'subHoldingKeys',
'type': 'type',
'movement_name': 'movementName',
'cashflow': 'cashflow',
'balance': 'balance',
'fx_rate': 'fxRate',
'cashflow_reporting_currency': 'cashflowReportingCurrency',
'balance_reporting_currency': 'balanceReportingCurrency',
'translation_gain_loss': 'translationGainLoss',
'cost_basis_reporting_currency': 'costBasisReportingCurrency',
'transaction': 'transaction',
'links': 'links'
}
required_map = {
'group_by_id': 'required',
'sequence_number': 'required',
'effective_date': 'optional',
'sub_holding_keys': 'optional',
'type': 'required',
'movement_name': 'required',
'cashflow': 'required',
'balance': 'required',
'fx_rate': 'required',
'cashflow_reporting_currency': 'required',
'balance_reporting_currency': 'required',
'translation_gain_loss': 'required',
'cost_basis_reporting_currency': 'required',
'transaction': 'optional',
'links': 'optional'
}
def __init__(self, group_by_id=None, sequence_number=None, effective_date=None, sub_holding_keys=None, type=None, movement_name=None, cashflow=None, balance=None, fx_rate=None, cashflow_reporting_currency=None, balance_reporting_currency=None, translation_gain_loss=None, cost_basis_reporting_currency=None, transaction=None, links=None): # noqa: E501
"""
PortfolioCashFlow - a model defined in OpenAPI
:param group_by_id: The groupBy subHoldings and currency. (required)
:type group_by_id: int
:param sequence_number: Sequence number determining the order of the cash flow records. (required)
:type sequence_number: int
:param effective_date: Indicates the date when the cash-flow settles.
:type effective_date: datetime
:param sub_holding_keys: The sub-holding properties which identify the holding. Each property will be from the 'Transaction' domain. These are configured when a transaction portfolio is created.
:type sub_holding_keys: dict[str, lusid.PerpetualProperty]
:param type: Indicates the record type (Closed, Open, Activity). (required)
:type type: str
:param movement_name: Indicates the specific movement of the transaction that generated this cash flow. (required)
:type movement_name: str
:param cashflow: (required)
:type cashflow: lusid.CurrencyAndAmount
:param balance: (required)
:type balance: lusid.CurrencyAndAmount
:param fx_rate: Exchange rate between the currency of this cash flow and the reporting currency. (required)
:type fx_rate: float
:param cashflow_reporting_currency: (required)
:type cashflow_reporting_currency: lusid.CurrencyAndAmount
:param balance_reporting_currency: (required)
:type balance_reporting_currency: lusid.CurrencyAndAmount
:param translation_gain_loss: (required)
:type translation_gain_loss: lusid.CurrencyAndAmount
:param cost_basis_reporting_currency: (required)
:type cost_basis_reporting_currency: lusid.CurrencyAndAmount
:param transaction:
:type transaction: lusid.Transaction
:param links:
:type links: list[lusid.Link]
""" # noqa: E501
self._group_by_id = None
self._sequence_number = None
self._effective_date = None
self._sub_holding_keys = None
self._type = None
self._movement_name = None
self._cashflow = None
self._balance = None
self._fx_rate = None
self._cashflow_reporting_currency = None
self._balance_reporting_currency = None
self._translation_gain_loss = None
self._cost_basis_reporting_currency = None
self._transaction = None
self._links = None
self.discriminator = None
self.group_by_id = group_by_id
self.sequence_number = sequence_number
if effective_date is not None:
self.effective_date = effective_date
self.sub_holding_keys = sub_holding_keys
self.type = type
self.movement_name = movement_name
self.cashflow = cashflow
self.balance = balance
self.fx_rate = fx_rate
self.cashflow_reporting_currency = cashflow_reporting_currency
self.balance_reporting_currency = balance_reporting_currency
self.translation_gain_loss = translation_gain_loss
self.cost_basis_reporting_currency = cost_basis_reporting_currency
if transaction is not None:
self.transaction = transaction
self.links = links
@property
def group_by_id(self):
"""Gets the group_by_id of this PortfolioCashFlow. # noqa: E501
The groupBy subHoldings and currency. # noqa: E501
:return: The group_by_id of this PortfolioCashFlow. # noqa: E501
:rtype: int
"""
return self._group_by_id
@group_by_id.setter
def group_by_id(self, group_by_id):
"""Sets the group_by_id of this PortfolioCashFlow.
The groupBy subHoldings and currency. # noqa: E501
:param group_by_id: The group_by_id of this PortfolioCashFlow. # noqa: E501
:type: int
"""
if group_by_id is None:
raise ValueError("Invalid value for `group_by_id`, must not be `None`") # noqa: E501
self._group_by_id = group_by_id
@property
def sequence_number(self):
"""Gets the sequence_number of this PortfolioCashFlow. # noqa: E501
Sequence number determining the order of the cash flow records. # noqa: E501
:return: The sequence_number of this PortfolioCashFlow. # noqa: E501
:rtype: int
"""
return self._sequence_number
@sequence_number.setter
def sequence_number(self, sequence_number):
"""Sets the sequence_number of this PortfolioCashFlow.
Sequence number determining the order of the cash flow records. # noqa: E501
:param sequence_number: The sequence_number of this PortfolioCashFlow. # noqa: E501
:type: int
"""
if sequence_number is None:
raise ValueError("Invalid value for `sequence_number`, must not be `None`") # noqa: E501
self._sequence_number = sequence_number
@property
def effective_date(self):
"""Gets the effective_date of this PortfolioCashFlow. # noqa: E501
Indicates the date when the cash-flow settles. # noqa: E501
:return: The effective_date of this PortfolioCashFlow. # noqa: E501
:rtype: datetime
"""
return self._effective_date
@effective_date.setter
def effective_date(self, effective_date):
"""Sets the effective_date of this PortfolioCashFlow.
Indicates the date when the cash-flow settles. # noqa: E501
:param effective_date: The effective_date of this PortfolioCashFlow. # noqa: E501
:type: datetime
"""
self._effective_date = effective_date
@property
def sub_holding_keys(self):
"""Gets the sub_holding_keys of this PortfolioCashFlow. # noqa: E501
The sub-holding properties which identify the holding. Each property will be from the 'Transaction' domain. These are configured when a transaction portfolio is created. # noqa: E501
:return: The sub_holding_keys of this PortfolioCashFlow. # noqa: E501
:rtype: dict(str, PerpetualProperty)
"""
return self._sub_holding_keys
@sub_holding_keys.setter
def sub_holding_keys(self, sub_holding_keys):
"""Sets the sub_holding_keys of this PortfolioCashFlow.
The sub-holding properties which identify the holding. Each property will be from the 'Transaction' domain. These are configured when a transaction portfolio is created. # noqa: E501
:param sub_holding_keys: The sub_holding_keys of this PortfolioCashFlow. # noqa: E501
:type: dict(str, PerpetualProperty)
"""
self._sub_holding_keys = sub_holding_keys
@property
def type(self):
"""Gets the type of this PortfolioCashFlow. # noqa: E501
Indicates the record type (Closed, Open, Activity). # noqa: E501
:return: The type of this PortfolioCashFlow. # noqa: E501
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""Sets the type of this PortfolioCashFlow.
Indicates the record type (Closed, Open, Activity). # noqa: E501
:param type: The type of this PortfolioCashFlow. # noqa: E501
:type: str
"""
if type is None:
raise ValueError("Invalid value for `type`, must not be `None`") # noqa: E501
self._type = type
@property
def movement_name(self):
"""Gets the movement_name of this PortfolioCashFlow. # noqa: E501
Indicates the specific movement of the transaction that generated this cash flow. # noqa: E501
:return: The movement_name of this PortfolioCashFlow. # noqa: E501
:rtype: str
"""
return self._movement_name
@movement_name.setter
def movement_name(self, movement_name):
"""Sets the movement_name of this PortfolioCashFlow.
Indicates the specific movement of the transaction that generated this cash flow. # noqa: E501
:param movement_name: The movement_name of this PortfolioCashFlow. # noqa: E501
:type: str
"""
if movement_name is None:
raise ValueError("Invalid value for `movement_name`, must not be `None`") # noqa: E501
self._movement_name = movement_name
@property
def cashflow(self):
"""Gets the cashflow of this PortfolioCashFlow. # noqa: E501
:return: The cashflow of this PortfolioCashFlow. # noqa: E501
:rtype: CurrencyAndAmount
"""
return self._cashflow
@cashflow.setter
def cashflow(self, cashflow):
"""Sets the cashflow of this PortfolioCashFlow.
:param cashflow: The cashflow of this PortfolioCashFlow. # noqa: E501
:type: CurrencyAndAmount
"""
if cashflow is None:
raise ValueError("Invalid value for `cashflow`, must not be `None`") # noqa: E501
self._cashflow = cashflow
@property
def balance(self):
"""Gets the balance of this PortfolioCashFlow. # noqa: E501
:return: The balance of this PortfolioCashFlow. # noqa: E501
:rtype: CurrencyAndAmount
"""
return self._balance
@balance.setter
def balance(self, balance):
"""Sets the balance of this PortfolioCashFlow.
:param balance: The balance of this PortfolioCashFlow. # noqa: E501
:type: CurrencyAndAmount
"""
if balance is None:
raise ValueError("Invalid value for `balance`, must not be `None`") # noqa: E501
self._balance = balance
@property
def fx_rate(self):
"""Gets the fx_rate of this PortfolioCashFlow. # noqa: E501
Exchange rate between the currency of this cash flow and the reporting currency. # noqa: E501
:return: The fx_rate of this PortfolioCashFlow. # noqa: E501
:rtype: float
"""
return self._fx_rate
@fx_rate.setter
def fx_rate(self, fx_rate):
"""Sets the fx_rate of this PortfolioCashFlow.
Exchange rate between the currency of this cash flow and the reporting currency. # noqa: E501
:param fx_rate: The fx_rate of this PortfolioCashFlow. # noqa: E501
:type: float
"""
if fx_rate is None:
raise ValueError("Invalid value for `fx_rate`, must not be `None`") # noqa: E501
self._fx_rate = fx_rate
@property
def cashflow_reporting_currency(self):
"""Gets the cashflow_reporting_currency of this PortfolioCashFlow. # noqa: E501
:return: The cashflow_reporting_currency of this PortfolioCashFlow. # noqa: E501
:rtype: CurrencyAndAmount
"""
return self._cashflow_reporting_currency
@cashflow_reporting_currency.setter
def cashflow_reporting_currency(self, cashflow_reporting_currency):
"""Sets the cashflow_reporting_currency of this PortfolioCashFlow.
:param cashflow_reporting_currency: The cashflow_reporting_currency of this PortfolioCashFlow. # noqa: E501
:type: CurrencyAndAmount
"""
if cashflow_reporting_currency is None:
raise ValueError("Invalid value for `cashflow_reporting_currency`, must not be `None`") # noqa: E501
self._cashflow_reporting_currency = cashflow_reporting_currency
@property
def balance_reporting_currency(self):
"""Gets the balance_reporting_currency of this PortfolioCashFlow. # noqa: E501
:return: The balance_reporting_currency of this PortfolioCashFlow. # noqa: E501
:rtype: CurrencyAndAmount
"""
return self._balance_reporting_currency
@balance_reporting_currency.setter
def balance_reporting_currency(self, balance_reporting_currency):
"""Sets the balance_reporting_currency of this PortfolioCashFlow.
:param balance_reporting_currency: The balance_reporting_currency of this PortfolioCashFlow. # noqa: E501
:type: CurrencyAndAmount
"""
if balance_reporting_currency is None:
raise ValueError("Invalid value for `balance_reporting_currency`, must not be `None`") # noqa: E501
self._balance_reporting_currency = balance_reporting_currency
@property
def translation_gain_loss(self):
"""Gets the translation_gain_loss of this PortfolioCashFlow. # noqa: E501
:return: The translation_gain_loss of this PortfolioCashFlow. # noqa: E501
:rtype: CurrencyAndAmount
"""
return self._translation_gain_loss
@translation_gain_loss.setter
def translation_gain_loss(self, translation_gain_loss):
"""Sets the translation_gain_loss of this PortfolioCashFlow.
:param translation_gain_loss: The translation_gain_loss of this PortfolioCashFlow. # noqa: E501
:type: CurrencyAndAmount
"""
if translation_gain_loss is None:
raise ValueError("Invalid value for `translation_gain_loss`, must not be `None`") # noqa: E501
self._translation_gain_loss = translation_gain_loss
@property
def cost_basis_reporting_currency(self):
"""Gets the cost_basis_reporting_currency of this PortfolioCashFlow. # noqa: E501
:return: The cost_basis_reporting_currency of this PortfolioCashFlow. # noqa: E501
:rtype: CurrencyAndAmount
"""
return self._cost_basis_reporting_currency
@cost_basis_reporting_currency.setter
def cost_basis_reporting_currency(self, cost_basis_reporting_currency):
"""Sets the cost_basis_reporting_currency of this PortfolioCashFlow.
:param cost_basis_reporting_currency: The cost_basis_reporting_currency of this PortfolioCashFlow. # noqa: E501
:type: CurrencyAndAmount
"""
if cost_basis_reporting_currency is None:
raise ValueError("Invalid value for `cost_basis_reporting_currency`, must not be `None`") # noqa: E501
self._cost_basis_reporting_currency = cost_basis_reporting_currency
@property
def transaction(self):
"""Gets the transaction of this PortfolioCashFlow. # noqa: E501
:return: The transaction of this PortfolioCashFlow. # noqa: E501
:rtype: Transaction
"""
return self._transaction
@transaction.setter
def transaction(self, transaction):
"""Sets the transaction of this PortfolioCashFlow.
:param transaction: The transaction of this PortfolioCashFlow. # noqa: E501
:type: Transaction
"""
self._transaction = transaction
@property
def links(self):
"""Gets the links of this PortfolioCashFlow. # noqa: E501
:return: The links of this PortfolioCashFlow. # noqa: E501
:rtype: list[Link]
"""
return self._links
@links.setter
def links(self, links):
"""Sets the links of this PortfolioCashFlow.
:param links: The links of this PortfolioCashFlow. # noqa: E501
:type: list[Link]
"""
self._links = links
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, PortfolioCashFlow):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
from base64 import b64encode
from datetime import date
from onegov.ballot import Ballot
from onegov.ballot import Election
from onegov.ballot import ElectionCompound
from onegov.ballot import Vote
from onegov.election_day import _
from onegov.election_day.utils.d3_renderer import D3Renderer
from unittest.mock import patch, MagicMock
def test_d3_renderer_scripts(election_day_app):
generator = D3Renderer(election_day_app)
assert len(generator.scripts)
def test_d3_renderer_translatation(election_day_app):
generator = D3Renderer(election_day_app)
assert generator.translate(_('Election'), 'de_CH') == 'Wahl'
assert generator.translate(_('Election'), 'fr_CH') == 'Election'
assert generator.translate(_('Election'), 'it_CH') == 'Elezione'
assert generator.translate(_('Election'), 'rm_CH') == 'Elecziun'
def test_d3_renderer_get_chart(election_day_app):
d3 = D3Renderer(election_day_app)
with patch('onegov.election_day.utils.d3_renderer.post',
return_value=MagicMock(text='<svg></svg>')) as post:
data = {'key': 'value'}
params = {'p': '1'}
chart = d3.get_chart('bar', 'svg', data, 1000, params)
assert chart.read() == '<svg></svg>'
assert post.call_count == 1
assert post.call_args[0] == ('http://localhost:1337/d3/svg',)
assert post.call_args[1]['json']['main'] == 'barChart'
assert post.call_args[1]['json']['params'] == {
'p': '1',
'viewport_width': 1000,
'data': {'key': 'value'},
'width': 1000
}
chart = d3.get_chart('grouped', 'svg', data, 800, params)
assert chart.read() == '<svg></svg>'
assert post.call_count == 2
assert post.call_args[0] == ('http://localhost:1337/d3/svg',)
assert post.call_args[1]['json']['main'] == 'groupedChart'
assert post.call_args[1]['json']['params'] == {
'p': '1',
'viewport_width': 800,
'data': {'key': 'value'},
'width': 800
}
chart = d3.get_chart('sankey', 'svg', data, 600, params)
assert chart.read() == '<svg></svg>'
assert post.call_count == 3
assert post.call_args[0] == ('http://localhost:1337/d3/svg',)
assert post.call_args[1]['json']['main'] == 'sankeyChart'
assert post.call_args[1]['json']['params'] == {
'p': '1',
'viewport_width': 600,
'data': {'key': 'value'},
'width': 600
}
chart = d3.get_chart('entities-map', 'svg', data, 400, params)
assert chart.read() == '<svg></svg>'
assert post.call_count == 4
assert post.call_args[0] == ('http://localhost:1337/d3/svg',)
assert post.call_args[1]['json']['main'] == 'entitiesMap'
assert post.call_args[1]['json']['params'] == {
'p': '1',
'viewport_width': 400,
'data': {'key': 'value'},
'width': 400
}
chart = d3.get_chart('districts-map', 'svg', data, 400, params)
assert chart.read() == '<svg></svg>'
assert post.call_count == 5
assert post.call_args[0] == ('http://localhost:1337/d3/svg',)
assert post.call_args[1]['json']['main'] == 'districtsMap'
assert post.call_args[1]['json']['params'] == {
'p': '1',
'viewport_width': 400,
'data': {'key': 'value'},
'width': 400
}
chart = d3.get_map('entities', 'svg', data, 2015, 400, params)
assert chart.read() == '<svg></svg>'
assert post.call_count == 6
assert post.call_args[0] == ('http://localhost:1337/d3/svg',)
assert post.call_args[1]['json']['main'] == 'entitiesMap'
assert post.call_args[1]['json']['params']['width'] == 400
assert post.call_args[1]['json']['params']['viewport_width'] == 400
assert post.call_args[1]['json']['params']['p'] == '1'
assert post.call_args[1]['json']['params']['data'] == data
assert post.call_args[1]['json']['params']['mapdata']
assert post.call_args[1]['json']['params']['canton'] == 'zg'
chart = d3.get_map('districts', 'svg', data, 2015, 400, params)
assert chart.read() == '<svg></svg>'
assert post.call_count == 7
assert post.call_args[0] == ('http://localhost:1337/d3/svg',)
assert post.call_args[1]['json']['main'] == 'districtsMap'
assert post.call_args[1]['json']['params']['width'] == 400
assert post.call_args[1]['json']['params']['viewport_width'] == 400
assert post.call_args[1]['json']['params']['p'] == '1'
assert post.call_args[1]['json']['params']['data'] == data
assert post.call_args[1]['json']['params']['mapdata']
assert post.call_args[1]['json']['params']['canton'] == 'zg'
with patch('onegov.election_day.utils.d3_renderer.post',
return_value=MagicMock(text=b64encode('PDF'.encode()))) as post:
data = {'key': 'value'}
d3.get_chart('bar', 'pdf', data).read().decode() == 'PDF'
d3.get_chart('grouped', 'pdf', data).read().decode() == 'PDF'
d3.get_chart('sankey', 'pdf', data).read().decode() == 'PDF'
d3.get_chart('entities-map', 'pdf', data).read().decode() == 'PDF'
d3.get_chart('districts-map', 'pdf', data).read().decode() == 'PDF'
d3.get_map('entities', 'pdf', data, 2015).read().decode() == 'PDF'
d3.get_map('districts', 'pdf', data, 2015).read().decode() == 'PDF'
assert post.call_args[0] == ('http://localhost:1337/d3/pdf',)
def test_d3_renderer_get_charts(election_day_app):
election = Election(
title="Election",
domain='federation',
date=date(2011, 1, 1),
)
compound = ElectionCompound(
title="ElectionCompound",
domain='canton',
date=date(2011, 1, 1),
)
vote = Vote(
title="Vote",
domain='federation',
date=date(2011, 1, 1),
)
vote.ballots.append(Ballot(type='proposal'))
session = election_day_app.session()
session.add(election)
session.add(compound)
session.add(vote)
d3 = D3Renderer(election_day_app)
assert d3.get_lists_chart(election, 'svg') is None
assert d3.get_lists_chart(compound, 'svg') is None
assert d3.get_lists_chart(vote, 'svg') is None
assert d3.get_lists_chart(vote.proposal, 'svg') is None
assert d3.get_candidates_chart(election, 'svg') is None
assert d3.get_candidates_chart(compound, 'svg') is None
assert d3.get_candidates_chart(vote, 'svg') is None
assert d3.get_candidates_chart(vote.proposal, 'svg') is None
assert d3.get_connections_chart(election, 'svg') is None
assert d3.get_connections_chart(compound, 'svg') is None
assert d3.get_connections_chart(vote, 'svg') is None
assert d3.get_connections_chart(vote.proposal, 'svg') is None
assert d3.get_party_strengths_chart(election, 'svg') is None
assert d3.get_party_strengths_chart(compound, 'svg') is None
assert d3.get_party_strengths_chart(vote, 'svg') is None
assert d3.get_party_strengths_chart(vote.proposal, 'svg') is None
assert d3.get_lists_panachage_chart(election, 'svg') is None
assert d3.get_lists_panachage_chart(compound, 'svg') is None
assert d3.get_lists_panachage_chart(vote, 'svg') is None
assert d3.get_lists_panachage_chart(vote.proposal, 'svg') is None
assert d3.get_parties_panachage_chart(election, 'svg') is None
assert d3.get_parties_panachage_chart(compound, 'svg') is None
assert d3.get_parties_panachage_chart(vote, 'svg') is None
assert d3.get_parties_panachage_chart(vote.proposal, 'svg') is None
assert d3.get_entities_map(election, 'svg') is None
assert d3.get_entities_map(compound, 'svg') is None
assert d3.get_entities_map(vote, 'svg') is None
assert d3.get_entities_map(vote.proposal, 'svg') is None
assert d3.get_districts_map(election, 'svg') is None
assert d3.get_districts_map(compound, 'svg') is None
assert d3.get_districts_map(vote, 'svg') is None
assert d3.get_districts_map(vote.proposal, 'svg') is None
|
import os
import json
import html
import re
from collections import OrderedDict
from collections import Counter
from string import punctuation
import numpy as np
import pandas as pd
import torch
from torch.utils.data import TensorDataset, DataLoader
from hparams import hps_data
def read_data(dataset, max_count=hps_data.max_count):
# https://webhose.io/free-datasets/popular-news-articles/
data_path = f'data/{hps_data.data_l[dataset]}'
files = os.listdir(data_path)
data_all = []
if hps_data.use_all_data:
max_count = len(files)
for file in os.listdir(data_path)[:max_count]:
with open(f'{data_path}/{file}', encoding='utf-8') as json_file:
data = json.load(json_file)
data_all.append(data)
return data_all
def remove_keys(data_all):
for file in data_all:
for thread_key in file['thread'].keys():
file[thread_key] = file['thread'][thread_key]
keys = list(file.keys())
for key in keys:
if not key in hps_data.keep_keys:
file.pop(key, None)
return data_all
def pprint(file):
print(json.dumps(file, indent=hps_data.indent))
def cal_all_engagements(data_all):
for file in data_all:
engagements = {}
engagements['log_weigh'] = cal_engagement(file)
engagements['log_no_weigh'] = cal_engagement(file, comment_weight=1)
engagements['no_log_weigh'] = cal_engagement(file, take_log=False)
engagements['no_log_no_weight'] = cal_engagement(file, comment_weight=1, take_log=False)
file['engagement_scores'] = engagements
file['engagement_scores']['original'] = file['performance_score']
file.pop('performance_score', None)
file.pop('social', None)
return data_all
def cal_engagement(file, comment_weight=hps_data.comment_weight, take_log=hps_data.take_log):
engagement = 0
for key in file['social'].keys():
for metric in file['social'][key]:
if metric != 'likes':
if metric == 'comments':
engagement += comment_weight * file['social'][key][metric]
else:
engagement += file['social'][key][metric]
try:
domain_rank = math.log(file['domain_rank']) if take_log else file['domain_rank']
except:
domain_rank = 1
engagement *= domain_rank
return engagement
def order_keys(data_all):
data_all_ordered = []
for file in data_all:
if file['sanitized_title'] != 'fail':
new_file = {key : file[key] for key in hps_data.key_order if key in file}
data_all_ordered.append(new_file)
return data_all_ordered
def sanitize_text(text):
text = text.lower()
text = html.unescape(text)
text = re.sub(re.compile('<.*?>'), '', text)
try:
right = text[:text.index("|")]
left = text[text.index("|"):]
text = right if len(right) > len(left) else left
except:
pass
try:
remaining = text[text.rindex("-"):]
if any(x in remaining for x in hps_data.banned):
text = text[:text.rindex("-")]
except:
pass
text = ''.join([c for c in text if c not in hps_data.punct])
text = ' '.join(text.split())
test_text = ''.join([c for c in text if c not in hps_data.allowed])
try:
test_text.encode('ascii')
return text
except UnicodeEncodeError:
return 'fail'
def remove_words(text, dictionary):
text = ' '.join([word for word in text.split() if word in dictionary])
return text
def get_titles(data_all):
titles = []
for file in data_all:
title = sanitize_text(file['title'])
file['sanitized_title'] = title
if title != 'fail':
titles.append(title)
return titles
def get_all_text(titles):
all_text = ''
for title in titles:
all_text += title + ' '
return all_text
def get_words(all_text):
return all_text.split()
def get_scores(data_all_ordered, score=hps_data.score):
scores = []
for file in data_all_ordered:
scores.append(file['engagement_scores'][score])
return scores
def tokenize_words(words):
counts = Counter(words)
vocab = sorted(counts, key=counts.get, reverse=True)
tokens = {word:ii for ii, word in enumerate(vocab, 1)}
return tokens
def tokenize_titles(titles, tokens):
title_tokens = []
for title in titles:
title_tokens.append([tokens[word] for word in title.split()])
return title_tokens
def get_title_lengths(title_tokens):
title_lengths = Counter([len(title) for title in title_tokens])
return title_lengths
def remove_shorts(title_tokens, titles, scores, min_len=hps_data.min_len):
title_tokens_temp = title_tokens.copy()
for title in title_tokens:
if len(title) < min_len:
scores.pop(title_tokens.index(title))
titles.pop(title_tokens.index(title))
title_tokens.remove(title)
return title_tokens, titles, scores
def pad_titles(title_tokens, seq_length=hps_data.seq_length):
padded_titles = []
for title in title_tokens:
if len(title) > seq_length:
title = title[:seq_length]
else:
zeros = np.zeros(seq_length - len(title), dtype=int)
title = np.concatenate((zeros, title))
padded_titles.append(title)
return np.array(padded_titles)
def update_hps(hps, **kwargs):
hps.paddings = {k:k//2 for k in hps.kernel_sizes}
hps.linear_in = len(hps.kernel_sizes)*hps.conv_out+hps.hidden_dim*hps.num_layers
hps.linear_in2 = hps.linear_in//2
hps.patience = hps.epochs//5
hps.dataset_s = hps.dataset.replace('-', '_')
hps.count_s = hps.max_count
hps.update(**kwargs)
return hps
def get_mean_std(data):
return np.mean(data), np.std(data)
def scale_data(data, mean, std):
return list((data-mean)/std)
def split_data(data, split_frac=hps_data.split_frac):
data = np.array(data)
split_idx = int(len(data)*split_frac)
train, remaining = data[:split_idx], data[split_idx:]
remaining_idx = int(len(remaining)*.5)
val, test = remaining[:remaining_idx], remaining[remaining_idx:]
return train, val, test
def create_tensor_dataset(data_x, data_y):
return TensorDataset(torch.from_numpy(data_x).to(torch.int64), torch.from_numpy(data_y))
def create_loader(data, shuffle=hps_data.shuffle, batch_size=hps_data.batch_size, drop_last=hps_data.drop_last):
return DataLoader(data, shuffle=shuffle, batch_size=batch_size, drop_last=drop_last)
|
COPY_SQL = """
COPY {}
FROM '{}'
ACCESS_KEY_ID '{{}}'
SECRET_ACCESS_KEY '{{}}'
IGNOREHEADER 1
DELIMITER ';'
"""
COPY_ALL_RIDES_SQL = COPY_SQL.format(
"staging_rides",
f's3://uber-tracking-expenses-bucket-s3-{{}}/rides/rides_receipts.csv'
)
COPY_ALL_EATS_SQL = COPY_SQL.format(
"staging_eats",
f's3://uber-tracking-expenses-bucket-s3-{{}}/eats/eats_receipts.csv'
)
COPY_ALL_EATS_ITEMS_SQL = COPY_SQL.format(
"staging_eats_items",
f's3://uber-tracking-expenses-bucket-s3-{{}}/eats/items_eats_receipts.csv'
)
create_staging_eats = ("""
DROP TABLE IF EXISTS staging_eats;
CREATE TABLE staging_eats(
event_id INT IDENTITY(0,1),
subject VARCHAR(255),
userservice VARCHAR(255),
uber_email VARCHAR(255),
date TIMESTAMP,
filename VARCHAR(255),
service VARCHAR(50),
amount_charged DOUBLE PRECISION,
total DOUBLE PRECISION,
subtotal DOUBLE PRECISION,
delivery_fee DOUBLE PRECISION,
service_Fee DOUBLE PRECISION,
change DOUBLE PRECISION,
restaurant VARCHAR(255),
picked_up_from VARCHAR(255),
delivered_to VARCHAR(255),
lat_from DECIMAL(10, 8),
long_from DECIMAL(11, 8),
lat_to DECIMAL(10, 8),
long_to DECIMAL(11, 8),
items INTEGER);
""")
create_staging_rides = ("""
DROP TABLE IF EXISTS staging_rides;
CREATE TABLE staging_rides(
event_id INT IDENTITY(0,1),
subject VARCHAR(255),
userservice VARCHAR(255),
uber_email VARCHAR(255),
date TIMESTAMP,
filename VARCHAR(255),
service VARCHAR(50),
amount_charged DOUBLE PRECISION,
total DOUBLE PRECISION,
subtotal DOUBLE PRECISION,
booking_fee DOUBLE PRECISION,
government_contribution DOUBLE PRECISION,
wait_time DOUBLE PRECISION,
trip_fare DOUBLE PRECISION,
discounts DOUBLE PRECISION,
before_Taxes DOUBLE PRECISION,
balance DOUBLE PRECISION,
time_payment DOUBLE PRECISION,
distance DOUBLE PRECISION,
unsettled_past_uber_trip DOUBLE PRECISION,
distance_service DOUBLE PRECISION,
time_from_service TIMESTAMP,
time_to_service TIMESTAMP,
from_address VARCHAR(255),
to_address VARCHAR(255),
lat_from DECIMAL(10, 8),
long_from DECIMAL(11, 8),
lat_to DECIMAL(10, 8),
long_to DECIMAL(11, 8));
""")
create_staging_eats_items = ("""
DROP TABLE IF EXISTS staging_eats_items;
CREATE TABLE staging_eats_items(
event_id INT IDENTITY(0,1),
item VARCHAR(255),
qty INTEGER,
cost DOUBLE PRECISION,
id INTEGER);
""")
create_fact_rides = ("""
DROP TABLE IF EXISTS fact_rides;
CREATE TABLE fact_rides(
id_ride INTEGER NOT NULL PRIMARY KEY,
id_date timestamp NOT NULL,
id_user INTEGER NOT NULL,
amount_charged DOUBLE PRECISION NOT NULL,
total DOUBLE PRECISION NOT NULL,
subtotal DOUBLE PRECISION NOT NULL,
booking_fee DOUBLE PRECISION NOT NULL,
government_contribution DOUBLE PRECISION NOT NULL,
wait_time DOUBLE PRECISION,
trip_Fare DOUBLE PRECISION,
discounts DOUBLE PRECISION,
before_Taxes DOUBLE PRECISION,
balance DOUBLE PRECISION,
time_payment DOUBLE PRECISION,
distance_payment DOUBLE PRECISION,
unsettled_past_uber_trip DOUBLE PRECISION,
distance_service DOUBLE PRECISION NOT NULL,
time_service DOUBLE PRECISION NOT NULL,
id_time_from_service TIMESTAMP NOT NULL,
id_time_to_service TIMESTAMP NOT NULL,
id_from_location INTEGER NOT NULL,
id_to_location INTEGER NOT NULL)
DISTSTYLE AUTO
SORTKEY(id_date, id_ride);
""")
create_fact_eats = ("""
DROP TABLE IF EXISTS fact_eats;
CREATE TABLE fact_eats(
id_order INTEGER NOT NULL PRIMARY KEY,
id_date timestamp NOT NULL,
id_user INTEGER NOT NULL,
amount_charged DOUBLE PRECISION NOT NULL,
total DOUBLE PRECISION NOT NULL,
subtotal DOUBLE PRECISION NOT NULL,
delivery_fee DOUBLE PRECISION NOT NULL,
service_Fee DOUBLE PRECISION NOT NULL,
change DOUBLE PRECISION NOT NULL,
id_restaurant INTEGER NOT NULL,
id_delivered_to_location INTEGER NOT NULL)
DISTSTYLE AUTO
SORTKEY(id_date, id_order);
""")
create_dim_products = ("""
DROP TABLE IF EXISTS dim_products;
CREATE TABLE dim_products(
id INTEGER NOT NULL PRIMARY KEY identity(1,1),
product_name NVARCHAR(255));
""")
create_dim_products_order = ("""
DROP TABLE IF EXISTS dim_products_order;
CREATE TABLE dim_products_order(
id_product_order INTEGER NOT NULL PRIMARY KEY identity(1,1),
id_order INTEGER NOT NULL,
id_product INTEGER NOT NULL,
qty INTEGER NOT NULL,
cost DOUBLE PRECISION NOT NULL);
""")
create_dim_restaurants = ("""
DROP TABLE IF EXISTS dim_restaurants;
CREATE TABLE dim_restaurants(
id INTEGER NOT NULL PRIMARY KEY identity(1,1),
name NVARCHAR(255) NOT NULL,
id_location INTEGER NOT NULL);
""")
create_dim_users = ("""
DROP TABLE IF EXISTS dim_users;
CREATE TABLE dim_users(
id INTEGER NOT NULL PRIMARY KEY identity(1,1),
email NVARCHAR(1000) NOT NULL);
""")
create_dim_locations = ("""
DROP TABLE IF EXISTS dim_locations;
CREATE TABLE dim_locations(
id INTEGER NOT NULL PRIMARY KEY identity(1,1),
lat DECIMAL(10, 8) NOT NULL,
long DECIMAL(11, 8) NOT NULL,
address NVARCHAR(1000) NOT NULL);
""")
create_dim_times = ("""
DROP TABLE IF EXISTS dim_times;
CREATE TABLE dim_times(
date TIMESTAMP not null distkey sortkey,
hour INTEGER,
day INTEGER,
week INTEGER,
month INTEGER,
year INTEGER,
weekday INTEGER,
is_weekend boolean,
PRIMARY KEY(date));
""")
create_dim_weekday = ("""
DROP TABLE IF EXISTS dim_weekday;
CREATE TABLE dim_weekday(
id INTEGER not null PRIMARY KEY,
weekday_name NVARCHAR(20));
""")
create_dim_month = ("""
DROP TABLE IF EXISTS dim_month;
CREATE TABLE dim_month(
id INTEGER not null PRIMARY KEY,
month_name NVARCHAR(20));
""")
create_dim_year = ("""
DROP TABLE IF EXISTS dim_year;
CREATE TABLE dim_year(
id INTEGER not null PRIMARY KEY);
""")
create_dim_hour = ("""
DROP TABLE IF EXISTS dim_hour;
CREATE TABLE dim_hour(
id INTEGER not null PRIMARY KEY);
""")
load_dim_times = ("""
INSERT INTO dim_times(date, hour, day, week, month, year, weekday, is_weekend)
(SELECT date,
extract(hour from date) as hour,
extract(day from date) as day ,
extract(week from date) as week,
extract(month from date) as month,
extract(year from date) as year,
extract(weekday from date) as weekday,
decode(date_part(dow,date),0,true,6,true,false) as is_weekend
FROM staging_eats
UNION
SELECT date,
extract(hour from date) as hour,
extract(day from date) as day ,
extract(week from date) as week,
extract(month from date) as month,
extract(year from date) as year,
extract(weekday from date) as weekday,
decode(date_part(dow,date),0,true,6,true,false) as is_weekend
FROM staging_rides
UNION
SELECT time_from_service,
extract(hour from time_from_service) as hour,
extract(day from time_from_service) as day ,
extract(week from time_from_service) as week,
extract(month from time_from_service) as month,
extract(year from time_from_service) as year,
extract(weekday from time_from_service) as weekday,
decode(date_part(dow,time_from_service),0,true,6,true,false) as is_weekend
FROM staging_rides
UNION
SELECT time_to_service,
extract(hour from time_to_service) as hour,
extract(day from time_to_service) as day ,
extract(week from time_to_service) as week,
extract(month from time_to_service) as month,
extract(year from time_to_service) as year,
extract(weekday from time_to_service) as weekday,
decode(date_part(dow,time_to_service),0,true,6,true,false) as is_weekend
FROM staging_rides);
""")
load_dim_hour = ("""
INSERT INTO dim_hour(id) values(1),(2),(3),(4),(5),(6),(7),(8),(9),(10),(11),(12),(13),(14),(15),(16),(17),(18),(19),(20),(21),(22),(23),(0);
""")
load_dim_month = ("""
INSERT INTO dim_month values(1, 'January'),
(2, 'February'),
(3, 'March'),
(4, 'April'),
(5, 'May'),
(6, 'June'),
(7, 'July'),
(8, 'August'),
(9, 'September'),
(10, 'October'),
(11, 'November'),
(12, 'December');
""")
load_dim_weekday = ("""
INSERT INTO dim_weekday values(1, 'Monday'),
(2, 'Tuesday'),
(3, 'Wednesday'),
(4, 'Thursday'),
(5, 'Friday'),
(6, 'Saturday'),
(0, 'Sunday');
""")
load_dim_year = ("""
INSERT INTO dim_year(id) values(2013), (2014), (2015), (2016), (2017), (2018), (2019), (2020), (2021);
""")
load_dim_products = ("""
insert into dim_products(product_name)(select distinct item from staging_eats_items);
""")
load_dim_products_order = ("""
insert into dim_products_order(id_order, id_product, qty, cost)
(SELECT se.event_id as id_order,
p.id as id_product,
sei.qty,
sei.cost
FROM staging_eats_items sei,
staging_eats se,
dim_products p
WHERE p.product_name = sei.item and
sei.id = se.items);
""")
load_dim_locations = ("""
INSERT INTO dim_locations(address, lat, long)
(
select distinct delivered_to as address, 0.0 as lat, 0.0 as long
FROM staging_eats
UNION
select distinct picked_up_from as address, 0.0 as lat, 0.0 as long
from staging_eats
UNION
select distinct from_address as address, 0.0 as lat, 0.0 as long
from staging_rides
UNION
select distinct to_address as address, 0.0 as lat, 0.0 as long
from staging_rides
);
""")
load_dim_restaurants = ("""
insert into dim_restaurants(name, id_location)
(select distinct restaurant as name,
(select id from dim_locations where address = picked_up_from) as id_location
from staging_eats);
""")
load_dim_users = ("""
insert into dim_users(email)
(select distinct userservice
from staging_eats
union
select distinct userservice
from staging_rides)
""")
load_fact_eats = ("""
INSERT INTO fact_eats
(SELECT EVENT_ID as id_order,
date as id_date,
(select id from dim_users where email = userservice) as id_user,
amount_charged,
total,
subtotal,
delivery_fee,
nvl2(service_Fee, service_Fee, 0.0) as service_Fee,
nvl2(change, change, 0.0) as change,
(select id from dim_restaurants where name = restaurant) as id_restaurant,
(select id from dim_locations where address = delivered_to) as id_delivered_to_location
FROM staging_eats);
""")
load_fact_rides = ("""
INSERT INTO fact_rides
(
SELECT
event_id as id_ride,
date as id_date,
(select id from dim_users where email = userservice) as id_user,
nvl2(amount_charged, amount_charged, 0.0) as amount_charged,
nvl2(total, total, 0.0) as total,
nvl2(subtotal, subtotal, 0.0) as subtotal,
nvl2(booking_fee, booking_fee, 0.0) as booking_fee,
nvl2(government_contribution, government_contribution, 0.0) as government_contribution,
nvl2(wait_time, wait_time, 0.0) as wait_time,
nvl2(trip_fare, trip_fare, 0.0) as trip_fare,
nvl2(discounts, discounts, 0.0) as discounts,
nvl2(before_Taxes, before_Taxes, 0.0) as before_Taxes,
nvl2(balance, balance, 0.0) as balance,
nvl2(time_payment, time_payment, 0.0) as time_payment,
nvl2(distance, distance, 0.0) as distance_payment,
nvl2(unsettled_past_uber_trip, unsettled_past_uber_trip, 0.0) as unsettled_past_uber_trip,
nvl2(distance_service, distance_service, 0.0) as distance_service,
datediff(minute,time_from_service,time_to_service) as time_service,
time_from_service as id_time_from_service,
time_to_service as id_time_to_service,
(select id from dim_locations where address = from_address) as id_from_location,
(select id from dim_locations where address = to_address) as id_to_location
FROM staging_rides
);
""")
drop_staging = ("""
DROP TABLE IF EXISTS staging_eats;
DROP TABLE IF EXISTS staging_rides;
DROP TABLE IF EXISTS staging_eats_items;
""")
fixing_locations = ("""
update staging_eats
set picked_up_from = 'Laureles 1300, Belenes Nte., 45130 Zapopan, Jal., México'
where restaurant = 'K F C( Terraza Belenes-726)';
update staging_eats
set picked_up_from = 'Av. Juan Gil Preciado, Plaza La Cima, La Cima, 45134 Zapopan, Jal., México'
where restaurant = 'Cuartode Kilo( La Cima)';
update staging_eats
set picked_up_from = 'Av. Juan Gil Preciado 1600 , La Cima, 45130 Zapopan, Jal., Mexico'
where restaurant = 'Los Tarascos( La Cima)';
update staging_eats
set picked_up_from = 'Av Federalistas 1100-3, Colinas del Rey, 45130 Zapopan, Jal., México'
where restaurant = 'Punto Salad';
update staging_eats
set picked_up_from = 'Av Torremolinos 3465 , Colinasdel Rey, 45130 Zapopan, Jal., Mexico'
where restaurant = 'La Desayuneria';
update staging_eats
set picked_up_from = 'Avenida Santa Margarita #3600 Local FS-04, Colonia Residencial, 45136 Zapopan, Jal., México'
where restaurant = 'Popeyes Real Center';
update staging_eats
set picked_up_from = 'Laureles 1300, Belenes Nte., 45130 Zapopan, Jal., México'
where restaurant = 'Pizza Hut Belenes';
update staging_eats
set picked_up_from = 'Av. Juan Gil Preciado 1806, Los Robles, 45134 Zapopan, Jal., México'
where restaurant = 'Starbucks( Plaza Los Robles)';
update staging_eats
set picked_up_from = 'Calz Federalistas 2380, Jardines del Valle, 45138 Zapopan, Jal., México'
where restaurant = 'Pollo Pepe( Jardines)';
update staging_eats
set picked_up_from = 'Av Valdepeñas 2380, Lomas de Zapopan, 45130 Zapopan, Jal.'
where restaurant = 'Pollo Pepe( Lomasde Zapopan)';
update staging_eats
set picked_up_from = 'Av. Base Aerea 465, Nuevo México, 45132 Zapopan, Jal., México'
where restaurant = 'Carlo Cocina Artesanal';
update staging_eats
set picked_up_from = 'Av Valdepeñas 8819, Real de Valdepeñas, 45130 Zapopan, Jal., México'
where restaurant = 'Lascazuelasdelaabuela';
update staging_eats
set picked_up_from = 'Camino Viejo a Tesistan 1579, Santa Margarita1a Secc., 45140 Zapopan, Jal., México'
where restaurant = 'Breakfast México| Restaurante Digital';
update staging_eats
set picked_up_from = 'Av. Acueducto 849, Santa Margarita1a Secc., 45140 Zapopan, Jal., México'
where restaurant = 'Pollo Bronco( S A N T A M A R G A R I T A)';
update staging_eats
set picked_up_from = 'Av. Bosques de San Isidro 780, Local B-6 y B-7, La Grana Fraccionamiento, 45157 Zapopan, Jal.'
where restaurant = 'Mia Mia Pizzería( San Isidro)';
""")
# .....................CONSTRAINTS......................
# alter table dim_products_order ADD CONSTRAINT fk_dim_products_order FOREIGN KEY(id_product) REFERENCES dim_products(id);
# alter table dim_products_order ADD CONSTRAINT fk_dim_products_order_fact_eats FOREIGN KEY(id_order) REFERENCES fact_eats(id_order);
# alter table dim_restaurants ADD CONSTRAINT fk_dim_restaurants_dim_locations FOREIGN KEY(id_location) REFERENCES dim_locations(id);
# alter table fact_eats ADD CONSTRAINT fk_fact_eats_dim_restaurants FOREIGN KEY(id_restaurant) REFERENCES dim_restaurants(id);
# alter table fact_eats ADD CONSTRAINT fk_fact_eats_dim_locations FOREIGN KEY(id_delivered_to_location) REFERENCES dim_locations(id);
# alter table fact_eats ADD CONSTRAINT fk_fact_eats_dim_users FOREIGN KEY(id_user) REFERENCES dim_users(id);
# alter table fact_eats ADD CONSTRAINT fk_fact_eats_dim_times FOREIGN KEY(id_date) REFERENCES dim_times(date);
# alter table fact_rides ADD CONSTRAINT fk_fact_rides_dim_users FOREIGN KEY(id_user) REFERENCES dim_users(id);
# alter table fact_rides ADD CONSTRAINT fk_fact_rides_dim_location1 FOREIGN KEY(id_from_location) REFERENCES dim_locations(id);
# alter table fact_rides ADD CONSTRAINT fk_fact_rides_dim_location2 FOREIGN KEY(id_to_location) REFERENCES dim_locations(id);
# alter table fact_rides ADD CONSTRAINT fk_fact_rides_dim_times FOREIGN KEY(id_date) REFERENCES dim_times(date);
# alter table fact_rides ADD CONSTRAINT fk_fact_rides_dim_times1 FOREIGN KEY(id_time_from_service) REFERENCES dim_times(date);
# alter table fact_rides ADD CONSTRAINT fk_fact_rides_dim_times2 FOREIGN KEY(id_time_to_service) REFERENCES dim_times(date);
# alter table dim_times ADD CONSTRAINT fk_dim_times_dim_year FOREIGN KEY(year) REFERENCES dim_year(id);
# alter table dim_times ADD CONSTRAINT fk_dim_times_dim_month FOREIGN KEY(month) REFERENCES dim_month(id);
# alter table dim_times ADD CONSTRAINT fk_dim_times_dim_weekday FOREIGN KEY(weekday) REFERENCES dim_weekday(id);
# alter table dim_times ADD CONSTRAINT fk_dim_times_dim_hour FOREIGN KEY(hour) REFERENCES dim_hour(id);
|
"""Unit tests for the 3D heisenberg group in vector representation."""
import geomstats.backend as gs
from geomstats.geometry.heisenberg import HeisenbergVectors
from tests.conftest import Parametrizer
from tests.data.heisenberg_data import HeisenbergVectorsTestData
from tests.geometry_test_cases import LieGroupTestCase, VectorSpaceTestCase
class TestHeisenbergVectors(
LieGroupTestCase, VectorSpaceTestCase, metaclass=Parametrizer
):
space = group = HeisenbergVectors
testing_data = HeisenbergVectorsTestData()
def test_dimension(self, expected):
self.assertAllClose(self.space().dim, expected)
def test_jacobian_translation(self, vec, expected):
self.assertAllClose(
self.space().jacobian_translation(gs.array(vec)), gs.array(expected)
)
def test_random_point_belongs(self, n_samples, bound):
gs.assertTrue(gs.all(self.space().random_point(n_samples, bound)))
def test_is_tangent(self, vector, expected):
group = self.space()
result = group.is_tangent(gs.array(vector))
self.assertAllClose(result, gs.array(expected))
|
import ssl
from pathlib import Path
from typing import Dict
import numpy as np
import torch
from maskrcnn_benchmark.config import cfg
from maskrcnn_benchmark.structures.bounding_box import BoxList
from common.engine import BaseEngine
from .mask_rcnn_predictor import COCODemo
# cancel ssl certificate verify
ssl._create_default_https_context = ssl._create_unverified_context
class Engine(BaseEngine):
CFG_ROOT = Path(__file__).parent.absolute() / 'third/maskrcnn-benchmark/configs'
def __init__(self, config):
super().__init__(config)
self._load_model(self.config)
def _load_model(self, model_name: str):
self._model_name = model_name
self._config = self._load_cfg()
self._model = COCODemo(
cfg,
min_image_size=800,
confidence_threshold=0.7,
)
def reset_model_version(self, model_name: str):
self._load_model(model_name)
def _load_cfg(self):
model_path = Path(self._model_name).with_suffix('.yaml')
full_path = self.CFG_ROOT / model_path
print('loading configuration from {}'.format(full_path))
cfg.merge_from_file(full_path)
return cfg.merge_from_list(["MODEL.DEVICE", "cuda"])
@staticmethod
def decode_bbox(predictions: BoxList):
"""
Arguments:
predictions (BoxList): the result of the computation by the model.
It should contain the field `labels`.
Returns:
label, boxes, scores (list, list, list): a tuple containing list of
labels, boxes and scores.
"""
# get label
label_ids = predictions.get_field('labels').tolist()
boxes = predictions.bbox
boxes = boxes.to(torch.int64).tolist()
scores = predictions.get_field('scores').tolist()
if predictions.has_field('mask'):
mask = predictions.get_field('mask').tolist()
else:
mask = None
return label_ids, boxes, scores, mask
def single_predict(self, np_array: np.ndarray, **kwargs) -> Dict[str, list]:
height, width, _ = np_array.shape
predictions = self._model.compute_prediction(np_array)
top_predictions = self._model.select_top_predictions(predictions)
label_ids, boxes, scores, mask = self.decode_bbox(top_predictions)
labels = [self._model.CATEGORIES[i] for i in label_ids]
return {
'labels': labels,
'label_ids': label_ids,
'boxes': boxes,
'scores': scores,
'mask': mask,
'width': width,
'height': height
}
def batch_predict(self, *args, **kwargs):
print('Hello world from batch predict.')
|
import sys
import torch
from torchvision.datasets import FakeData
import train
from arguments import get_arguments
def get_fake_datasets(**kwargs):
return FakeData(), FakeData()
def test_setup_data_loaders(mocker):
# NOTE: Removing any sys.argv to get default arguments
sys.argv = [""]
args = get_arguments()
mocker.patch.object(train, "get_datasets", side_effect=get_fake_datasets)
train_loaders, val_loader = train.setup_data_loaders(args)
assert len(train_loaders) == args.num_stages
for train_loader in train_loaders:
assert isinstance(train_loader, torch.utils.data.DataLoader)
assert isinstance(val_loader, torch.utils.data.DataLoader)
|
#!/usr/bin/env python3
#
# Copyright 2014 Simone Campagna
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
__author__ = "Simone Campagna"
__all__ = [
'lrange',
'irange',
'BASE_STRING',
'StringIO',
'get_input',
'decode',
'iteritems',
]
import sys
if sys.version_info[0] == 2: # pragma: no cover
lrange = range
irange = xrange
from StringIO import StringIO
BASE_STRING = basestring
get_input = raw_input
def decode(t):
return t
def iteritems(dct):
return dct.iteritems()
|
import scrapy
import logging
from scrapy.loader import ItemLoader
from scrapy.http import FormRequest
from scrapy.exceptions import CloseSpider
from datetime import datetime
from fbposts.items import FbPostItem, parse_date
class FacebookSpider(scrapy.Spider):
"""
Parse FB pages (needs credentials)
"""
name = "fb"
def __init__(self, *args, **kwargs):
# turn off annoying logging, set LOG_LEVEL=DEBUG in settings.py to see more logs
logger = logging.getLogger("scrapy.middleware")
logger.setLevel(logging.WARNING)
super().__init__(*args, **kwargs)
# email & pass need to be passed as attributes!
if "email" not in kwargs or "password" not in kwargs:
raise AttributeError(
"You need to provide valid email and password:\n"
'scrapy fb -a email="EMAIL" -a password="PASSWORD"'
)
else:
self.logger.info("Email and password provided, will be used to log in")
# page name parsing (added support for full urls)
if "page" in kwargs:
if self.page.find("/groups/") != -1:
self.group = 1
else:
self.group = 0
if self.page.find("https://www.facebook.com/") != -1:
self.page = self.page[25:]
elif self.page.find("https://mbasic.facebook.com/") != -1:
self.page = self.page[28:]
elif self.page.find("https://m.facebook.com/") != -1:
self.page = self.page[23:]
# parse date
if "date" not in kwargs:
self.date = datetime.today()
self.logger.info(
f"Date attribute not provided, scraping date set to {self.date.strftime('%Y-%M-%D')} (fb launch date)"
)
else:
self.date = datetime.strptime(kwargs["date"], "%Y-%m-%d")
self.logger.info(
"Date attribute provided, fbcrawl will start crawling at {}".format(
kwargs["date"]
)
)
self.year = self.date.year
# parse lang, if not provided (but is supported) it will be guessed in parse_home
self.lang = "pt"
# max num of posts to crawl
if "max" not in kwargs:
self.max = int(10e5)
else:
self.max = int(kwargs["max"])
# current year, this variable is needed for proper parse_page recursion
self.k = datetime.now().year
# count number of posts, used to enforce DFS and insert posts orderly in the csv
self.count = 0
self.start_urls = ["https://mbasic.facebook.com"]
def parse(self, response):
"""
Handle login with provided credentials
"""
return FormRequest.from_response(
response,
formxpath='//form[contains(@action, "login")]',
formdata={"email": self.email, "pass": self.password},
callback=self.parse_home,
)
def parse_home(self, response):
"""
This method has multiple purposes:
1) Handle failed logins due to facebook 'save-device' redirection
2) Set language interface, if not already provided
3) Navigate to given page
"""
# handle 'save-device' redirection
if response.xpath("//div/a[contains(@href,'save-device')]"):
self.logger.info('Going through the "save-device" checkpoint')
return FormRequest.from_response(
response,
formdata={"name_action_selected": "dont_save"},
callback=self.parse_home,
)
# navigate to provided page
href = response.urljoin(self.page)
self.logger.info("Scraping facebook page {}".format(href))
return scrapy.Request(url=href, callback=self.parse_page, meta={"index": 1})
def parse_page(self, response):
"""
Parse the given page selecting the posts.
Then ask recursively for another page.
"""
#open page in browser for debug
# from scrapy.utils.response import open_in_browser
# open_in_browser(response)
# select all posts
for post in response.xpath("//article[contains(@data-ft,'top_level_post_id')]"):
many_features = post.xpath("./@data-ft").get()
post_date = parse_date([many_features], {"lang": self.lang})
post_date = (
datetime.strptime(post_date, "%Y-%m-%d %H:%M:%S")
if post_date is not None
else post_date
)
if post_date is None:
post_date = datetime(self.date.year, self.date.month, 1)
# if 'date' argument is reached stop crawling
if post_date < self.date:
raise CloseSpider(
"Reached date: {} - post_date: {}".format(self.date, post_date)
)
new = ItemLoader(item=FbPostItem(), selector=post)
if abs(self.count) + 1 > self.max:
raise CloseSpider(
"Reached max num of post: {}. Crawling finished".format(
abs(self.count)
)
)
self.logger.info(
"Parsing post n = {}, post_date = {}".format(
abs(self.count) + 1, post_date
)
)
new.add_value("date", post_date)
new.add_xpath('post_id','./@data-ft')
new.add_xpath('url', ".//a[contains(@href,'footer')]/@href")
# returns full post-link in a list
post = post.xpath(".//a[contains(@href,'footer')]/@href").extract()
temp_post = response.urljoin(post[0])
self.count -= 1
yield scrapy.Request(
temp_post, self.parse_post, priority=self.count, meta={"item": new}
)
# load following page, try to click on "more"
# after few pages have been scraped, the "more" link might disappears
# if not present look for the highest year not parsed yet
# click once on the year and go back to clicking "more"
# new_page is different for groups
if self.group == 1:
new_page = response.xpath(
"//div[contains(@id,'stories_container')]/div[2]/a/@href"
).extract()
else:
new_page = response.xpath(
"//div[2]/a[contains(@href,'timestart=') and not(contains(text(),'ent')) and not(contains(text(),number()))]/@href"
).extract()
# this is why lang is needed ^^^^^^^^^^^^^^^^^^^^^^^^^^
if not new_page:
self.logger.info('[!] "more" link not found, will look for a "year" link')
# self.k is the year link that we look for
if response.meta["flag"] == self.k and self.k >= self.year:
xpath = (
"//div/a[contains(@href,'time') and contains(text(),'"
+ str(self.k)
+ "')]/@href"
)
new_page = response.xpath(xpath).extract()
if new_page:
new_page = response.urljoin(new_page[0])
self.k -= 1
self.logger.info(
'Found a link for year "{}", new_page = {}'.format(
self.k, new_page
)
)
yield scrapy.Request(
new_page, callback=self.parse_page, meta={"flag": self.k}
)
else:
while (
not new_page
): # sometimes the years are skipped this handles small year gaps
self.logger.info(
"Link not found for year {}, trying with previous year {}".format(
self.k, self.k - 1
)
)
self.k -= 1
if self.k < self.year:
raise CloseSpider(
"Reached date: {}. Crawling finished".format(self.date)
)
xpath = (
"//div/a[contains(@href,'time') and contains(text(),'"
+ str(self.k)
+ "')]/@href"
)
new_page = response.xpath(xpath).extract()
self.logger.info(
'Found a link for year "{}", new_page = {}'.format(
self.k, new_page
)
)
new_page = response.urljoin(new_page[0])
self.k -= 1
yield scrapy.Request(
new_page, callback=self.parse_page, meta={"flag": self.k}
)
else:
self.logger.info("Crawling has finished with no errors!")
else:
new_page = response.urljoin(new_page[0])
if "flag" in response.meta:
self.logger.info(
'Page scraped, clicking on "more"! new_page = {}'.format(new_page)
)
yield scrapy.Request(
new_page,
callback=self.parse_page,
meta={"flag": response.meta["flag"]},
)
else:
self.logger.info(
'First page scraped, clicking on "more"! new_page = {}'.format(
new_page
)
)
yield scrapy.Request(
new_page, callback=self.parse_page, meta={"flag": self.k}
)
def parse_post(self, response):
new = ItemLoader(
item=FbPostItem(), response=response, parent=response.meta["item"]
)
new.context["lang"] = self.lang
new.add_xpath(
"source",
"//td/div/h3/strong/a/text() | //span/strong/a/text() | //div/div/div/a[contains(@href,'post_id')]/strong/text()",
)
new.add_xpath("image", "//a/img[contains(@src,'content')]/@src")
new.add_xpath(
"text",
"//div[@data-ft]//p//text() | //div[@data-ft]/div[@class]/div[@class]/text()",
)
yield new.load_item()
|
import gi
from ghue.controller import Controller
from ghue.device.hue import HueDeviceManager
gi.require_version('Gtk', '3.0')
from gi.repository import Gtk, GLib
import phue
from .application import GHueApplication
if __name__ == '__main__':
GLib.set_application_name("Philips Hue")
controller = Controller()
hue_device_manager = HueDeviceManager(bridge=phue.Bridge('philips-hue.local'),
controller=controller)
controller.add_device_manager(hue_device_manager)
app = GHueApplication(controller)
app.run(None)
|
import pymonetdb
import datetime, time
import itertools
import csv
import json
import os
import multiprocessing
from subprocess import call
from common import util
class IDEBenchDriver:
def init(self, options, schema, driver_arg):
pass
def create_connection(self):
connection = pymonetdb.connect(username="monetdb", password="monetdb", hostname="localhost", port=50000, database="demo")
cursor = connection.cursor()
return connection, cursor
def process_request(self, viz_request, options, schema, result_queue):
print("processsing..." + str(viz_request.operation_id))
viz = viz_request.viz
sql_statement = viz.get_computed_filter_as_sql(schema)
connection, cursor = self.create_connection()
viz_request.start_time = util.get_current_ms_time()
cursor.execute(sql_statement)
data = cursor.fetchall()
viz_request.end_time = util.get_current_ms_time()
connection.close()
results = {}
for row in data:
keys = []
for i, bin_desc in enumerate(viz.binning):
if "width" in bin_desc:
bin_width = bin_desc["width"]
keys.append(str(int(row[i])))
else:
keys.append(str(row[i]))
key = ",".join(keys)
results[key] = row[len(viz.binning):]
viz_request.result = results
result_queue.put(viz_request)
def workflow_start(self):
# clear cache here
pass
def workflow_end(self):
pass
|
#!/usr/bin/env/python
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import sys
from apache_atlas.utils import next_id
from apache_atlas.utils import non_null
class AtlasBase(dict):
def __init__(self, attrs):
pass
def __getattr__(self, attr):
return self.get(attr)
def __setattr__(self, key, value):
self.__setitem__(key, value)
def __setitem__(self, key, value):
super(AtlasBase, self).__setitem__(key, value)
self.__dict__.update({key: value})
def __delattr__(self, item):
self.__delitem__(item)
def __delitem__(self, key):
super(AtlasBase, self).__delitem__(key)
del self.__dict__[key]
def __repr__(self):
return json.dumps(self)
def type_coerce_attrs(self):
pass
class AtlasBaseModelObject(AtlasBase):
def __init__(self, members):
AtlasBase.__init__(self, members)
self.guid = members.get('guid')
if self.guid is None:
self.guid = next_id()
class TimeBoundary(AtlasBase):
def __init__(self, attrs=None):
attrs = attrs or {}
AtlasBase.__init__(self, attrs)
self.startTime = attrs.get('startTime')
self.endTime = attrs.get('endTime')
self.timeZone = attrs.get('timeZone')
class Plist(AtlasBase):
def __init__(self, attrs=None):
attrs = attrs or {}
AtlasBase.__init__(self, attrs)
self.list = non_null(attrs.get('list'), [])
self.startIndex = non_null(attrs.get('startIndex'), 0)
self.pageSize = non_null(attrs.get('pageSize'), 0)
self.totalCount = non_null(attrs.get('totalCount'), 0)
self.sortBy = attrs.get('sortBy')
self.sortType = attrs.get('sortType')
class SearchFilter(AtlasBase):
def __init__(self, attrs=None):
attrs = attrs or {}
AtlasBase.__init__(self, attrs)
self.startIndex = non_null(attrs.get('startIndex'), 0)
self.maxsize = non_null(attrs.get('maxsize'), sys.maxsize)
self.getCount = non_null(attrs.get('getCount'), True)
|
# --------------
#Importing header files
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
#Code starts here
data = pd.read_csv(path)
data.Rating.plot(kind='hist')
data = data[data.Rating<=5]
data.Rating.plot(kind='hist')
#Code ends here
# --------------
# code starts here
total_null = data.isnull().sum()
percent_null = total_null/len(data)*100
missing_data = pd.concat([total_null,percent_null],keys=['Total','Percent'],axis=1)
print(missing_data)
data.dropna(inplace=True)
total_null_1 = data.isnull().sum()
percent_null_1 = total_null_1/len(data)*100
missing_data_1 = pd.concat([total_null_1,percent_null_1],keys=['Total','Percent'],axis=1)
print(missing_data_1)
# code ends here
# --------------
#Code starts here
fig = sns.catplot(x='Category', y='Rating', data=data, kind='box', height=10).set_xticklabels(rotation=90)
fig.set(title="Rating vs Category [BoxPlot]")
#Code ends here
# --------------
#Importing header files
from sklearn.preprocessing import MinMaxScaler, LabelEncoder
#Code starts here
print(data.Installs.value_counts())
data.Installs = data['Installs'].apply(lambda x:int(x[:-1].replace(",","")))
le = LabelEncoder()
le.fit(data.Installs)
data.Installs = le.transform(data.Installs)
sns.regplot(x='Installs', y='Rating', data=data).set(title="Rating vs Installs [RegPlot]")
#Code ends here
# --------------
#Code starts here
data.Price = data.Price.apply(lambda x: float(x[1:]) if len(x)>1 else float(x))
sns.regplot(x="Price", y="Rating", data=data).set(title = "Rating vs Price [RegPlot]")
#Code ends here
# --------------
#Code starts here
data.Genres.unique()
data.Genres = data.Genres.apply(lambda x: x.split(";")[0])
gr_mean = data[["Genres", "Rating"]].groupby(by="Genres", as_index=False).mean()
gr_mean.describe()
gr_mean = gr_mean.sort_values(by='Rating')
print(gr_mean.iloc[0])
print(gr_mean.iloc[-1])
#Code ends here
# --------------
#Code starts here
data['Last Updated'].head()
data['Last Updated'] = data['Last Updated'].apply(lambda x: pd.to_datetime(x))
max_date = data['Last Updated'].max()
data['Last Updated Days'] = (max_date - data['Last Updated']).apply(lambda x: x.days)
sns.regplot(x="Last Updated Days", y="Rating", data=data).set(title="Rating vs Last Updated [RegPlot]")
#Code ends here
|
##################################################################################################
# BSD 3-Clause License
#
# Copyright (c) 2020, Jose R. Garcia
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##################################################################################################
# File name : wb4_master_seq.py
# Author : Jose R Garcia
# Created : 2020/11/22 10:24:13
# Last modified : 2021/06/26 23:52:32
# Project Name : UVM Python Verification Library
# Module Name : wb4_master_seq, wb4_master_base_sequence
# Description : Wishbone Bus Sequence Item and Sequences.
#
# Additional Comments:
# Create a a read or write transaction.
##################################################################################################
from uvm import *
class wb4_master_seq(UVMSequenceItem):
"""
Class: Wishbone Master Sequence Item
Definition: Contains functions, tasks and methods of this
"""
def __init__(self, name="wb4_master_seq"):
super().__init__(name)
self.data_in = 0
self.data_out = 0
self.address = 0
self.select = 0
self.we = 0
self.strobe = 0
self.acknowledge = 0
self.cycle = 0
self.stall = 0
self.data_tag = 0
self.address_tag = 0
self.cycle_tag = 0
self.transmit_delay = 0
def do_copy(self, rhs):
self.data_in = rhs.data_in
self.data_out = rhs.data_out
self.address = rhs.address
self.select = rhs.select
self.we = rhs.we
self.strobe = rhs.strobe
self.cycle = rhs.cycle
self.stall = rhs.stall
self.data_tag = rhs.data_tag
self.address_tag = rhs.address_tag
self.cycle_tag = rhs.cycle_tag
self.acknowledge = rhs.acknowledge
self.transmit_delay = rhs.transmit_delay
def do_clone(self):
new_obj = wb4_master_seq()
new_obj.copy(self)
return new_obj
def compare(self, rhs):
if (self.data_in == rhs.data_in and
self.data_out == rhs.data_out and
self.address == rhs.address and
self.select == rhs.select and
self.we == rhs.we and
self.strobe == rhs.strobe and
self.cycle == rhs.cycle and
self.stall == rhs.stall and
self.data_tag == rhs.data_tag and
self.address_tag == rhs.address_tag and
self.cycle_tag == rhs.cycle_tag and
self.acknowledge == rhs.acknowledge and
self.transmit_delay == rhs.transmit_delay):
# match
return True
else:
return False
def convert2string(self):
return sv.sformatf("\n =================================== \n DATA_i : 0x%0h \n DATA_o : 0x%0h \n ADDR_o : 0x%0h \n SEL_o : 0x%0h \n WE_o : %d \n CYC_o : %d \n STB_o : %d \n ACK_i : %d \n STALL_i : %d \n TDG_i : 0x%0h\n Delay : %d clocks \n =================================== \n ",
self.data_in, self.data_out, self.address, self.select, self.we, self.cycle, self.strobe, self.acknowledge, self.stall, self.data_tag, self.transmit_delay)
uvm_object_utils(wb4_master_seq)
class wb4_master_base_sequence(UVMSequence):
def __init__(self, name="wb4_master_base_sequence"):
super().__init__(name)
self.set_automatic_phase_objection(1)
self.req = wb4_master_seq()
self.rsp = wb4_master_seq()
uvm_object_utils(wb4_master_base_sequence)
class wb4_master_single_read_seq(wb4_master_base_sequence):
"""
Class: Wishbone Read Sequence
Definition: Contains functions, tasks and methods
"""
def __init__(self, name="wb4_master_single_read_seq"):
wb4_master_base_sequence.__init__(self, name)
self.data = 0
self.address = 0
self.stall = 0
self.transmit_delay = 0
self.acknowledge = 1
self.select = 0
self.we = 0
self.strobe = 0
self.cycle = 0
self.data_tag = 0
self.address_tag = 0
self.cycle_tag = 0
async def body(self):
# Build the sequence item
self.req.data_out = 0
self.req.data_in = self.data
self.req.address = self.address
self.req.stall = self.stall
self.req.acknowledge = self.acknowledge
self.req.transmit_delay = self.transmit_delay
self.req.select = self.select
self.req.we = self.we
self.req.strobe = self.strobe
self.req.cycle = self.cycle
self.req.data_tag = self.data_tag
self.req.address_tag = self.address_tag
self.req.cycle_tag = self.cycle_tag
await uvm_do_with(self, self.req) # start_item
uvm_object_utils(wb4_master_single_read_seq)
class wb4_master_single_write_seq(wb4_master_base_sequence):
"""
Class: Wishbone Write Sequence
Definition: Contains functions, tasks and methods
"""
def __init__(self, name="wb4_master_single_write_seq"):
wb4_master_base_sequence.__init__(self, name)
self.data = 0
self.address = 0
self.stall = 0
self.transmit_delay = 0
self.acknowledge = 1
self.select = 0
self.we = 0
self.strobe = 0
self.cycle = 0
self.data_tag = 0
self.address_tag = 0
self.cycle_tag = 0
async def body(self):
# Build the sequence item
self.req.data_out = self.data
self.req.data_in = 0
self.req.address = self.address
self.req.stall = self.stall
self.req.acknowledge = self.acknowledge
self.req.transmit_delay = self.transmit_delay
self.req.select = self.select
self.req.we = self.we
self.req.strobe = self.strobe
self.req.cycle = self.cycle
self.req.data_tag = self.data_tag
self.req.address_tag = self.address_tag
self.req.cycle_tag = self.cycle_tag
await uvm_do_with(self, self.req) # start_item
uvm_object_utils(wb4_master_single_write_seq)
|
# Generated by Django 4.0.3 on 2022-03-25 16:37
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('recipes', '0003_recipe_picture'),
]
operations = [
migrations.AddField(
model_name='recipe',
name='slug',
field=models.SlugField(default='a'),
preserve_default=False,
),
]
|
from __future__ import print_function
from __future__ import absolute_import
import warnings
import copy
import time
import numpy as np
import multiprocessing
import threading
try:
import queue
except ImportError:
import Queue as queue
from .topology import Container
from .. import backend as K
from .. import optimizers
from .. import objectives
from .. import metrics as metrics_module
from ..utils.generic_utils import Progbar
from .. import callbacks as cbks
def standardize_input_data(data, names, shapes=None,
check_batch_dim=True,
exception_prefix=''):
'''Users may pass data as a list of arrays, dictionary of arrays,
or as a single array. We normalize this to an ordered list of
arrays (same order as `names`), while checking that the provided
arrays have shapes that match the network's expectations.
'''
if type(data) is dict:
arrays = []
for name in names:
if name not in data:
raise Exception('No data provided for "' +
name + '". Need data for each key in: ' +
str(data.keys()))
arrays.append(data[name])
elif type(data) is list:
if len(data) != len(names):
if len(data) > 0 and hasattr(data[0], 'shape'):
raise Exception('Error when checking ' + exception_prefix +
': the list of Numpy arrays '
'that you are passing to your model '
'is not the size the model expected. '
'Expected to see ' + str(len(names)) +
' arrays but instead got '
'the following list of ' + str(len(data)) +
' arrays: ' + str(data)[:200] +
'...')
else:
if len(names) == 1:
data = [np.asarray(data)]
else:
raise Exception('Error when checking ' + exception_prefix +
': you are passing a list as '
'input to your model, '
'but the model expects '
'a list of ' + str(len(names)) +
' Numpy arrays instead. '
'The list you passed was: ' +
str(data)[:200])
arrays = data
else:
if not hasattr(data, 'shape'):
raise Exception('Error when checking ' + exception_prefix +
': data should be a Numpy array, '
'or list/dict of Numpy arrays. '
'Found: ' + str(data)[:200] + '...')
if len(names) != 1:
# case: model expects multiple inputs but only received
# a single Numpy array
raise Exception('The model expects ' + str(len(names)) +
' input arrays, but only received one array. '
'Found: array with shape ' + str(data.shape))
arrays = [data]
# make arrays at least 2D
for i in range(len(names)):
array = arrays[i]
if len(array.shape) == 1:
array = np.expand_dims(array, 1)
arrays[i] = array
# check shapes compatibility
if shapes:
for i in range(len(names)):
if shapes[i] is None:
continue
array = arrays[i]
if len(array.shape) != len(shapes[i]):
raise Exception('Error when checking ' + exception_prefix +
': expected ' + names[i] +
' to have ' + str(len(shapes[i])) +
' dimensions, but got array with shape ' +
str(array.shape))
for j, (dim, ref_dim) in enumerate(zip(array.shape, shapes[i])):
if not j and not check_batch_dim:
# skip the first axis
continue
if ref_dim:
if ref_dim != dim:
raise Exception('Error when checking ' + exception_prefix +
': expected ' + names[i] +
' to have shape ' + str(shapes[i]) +
' but got array with shape ' +
str(array.shape))
return arrays
def standardize_sample_or_class_weights(x_weight, output_names, weight_type):
if x_weight is None or len(x_weight) == 0:
return [None for _ in output_names]
if len(output_names) == 1:
if type(x_weight) is list and len(x_weight) == 1:
return x_weight
if type(x_weight) is dict and output_names[0] in x_weight:
return [x_weight[output_names[0]]]
else:
return [x_weight]
if type(x_weight) is list:
if len(x_weight) != len(output_names):
raise Exception('Provided `' + weight_type + '` was a list of ' +
str(len(x_weight)) +
' elements, but the model has ' +
str(len(output_names)) + ' outputs. '
'You should provide one `' + weight_type + '`'
'array per model output.')
return x_weight
if type(x_weight) is dict:
x_weights = []
for name in output_names:
x_weights.append(x_weight.get(name))
return x_weights
else:
raise Exception('The model has multiple outputs, so `' +
weight_type + '` '
'should be either a list of a dict. '
'Provided `' + weight_type +
'` type not understood: ' +
str(x_weight))
def standardize_class_weights(class_weight, output_names):
return standardize_sample_or_class_weights(class_weight,
output_names,
'class_weight')
def standardize_sample_weights(sample_weight, output_names):
return standardize_sample_or_class_weights(sample_weight,
output_names,
'sample_weight')
def check_array_lengths(X, Y, W):
x_lengths = [x.shape[0] for x in X]
y_lengths = [y.shape[0] for y in Y]
w_lengths = [w.shape[0] for w in W]
set_x = set(x_lengths)
if len(set_x) != 1:
raise Exception('All input arrays (x) should have '
'the same number of samples.')
set_y = set(y_lengths)
if len(set_y) != 1:
raise Exception('All target arrays (y) should have '
'the same number of samples.')
set_w = set(w_lengths)
if len(set_w) != 1:
raise Exception('All sample_weight arrays should have '
'the same number of samples.')
if list(set_x)[0] != list(set_y)[0]:
raise Exception('Input arrays should have '
'the same number of samples as target arrays. Found ' +
str(list(set_x)[0]) + ' input samples and ' +
str(list(set_y)[0]) + ' target samples.')
if list(set_x)[0] != list(set_w)[0]:
raise Exception('Sample_weight arrays should have '
'the same number of samples as input arrays. Found ' +
str(list(set_x)[0]) + ' input samples and ' +
str(list(set_w)[0]) + ' target samples.')
def check_loss_and_target_compatibility(targets, losses, output_shapes):
assert len(targets) == len(losses) == len(output_shapes)
key_losses = {'mean_square_error',
'binary_crossentropy',
'categorical_crossentropy'}
for y, loss, shape in zip(targets, losses, output_shapes):
if loss.__name__ == 'categorical_crossentropy':
if y.shape[1] == 1:
raise Exception('You are passing a target array of shape ' + str(y.shape) +
' while using as loss `categorical_crossentropy`. '
'`categorical_crossentropy` expects '
'targets to be binary matrices (1s and 0s) '
'of shape (samples, classes). '
'If your targets are integer classes, '
'you can convert them to the expected format via:\n'
'```\n'
'from keras.utils.np_utils import to_categorical\n'
'y_binary = to_categorical(y_int)\n'
'```\n'
'\n'
'Alternatively, you can use the loss function '
'`sparse_categorical_crossentropy` instead, '
'which does expect integer targets.')
if loss.__name__ in key_losses and shape[1] is not None and y.shape[1] != shape[1]:
raise Exception('A target array with shape ' + str(y.shape) +
' was passed for an output of shape ' + str(shape) +
' while using as loss `' + loss.__name__ + '`. '
'This loss expects '
'targets to have the same shape '
'as the output.')
def collect_metrics(metrics, output_names):
if not metrics:
return [[] for _ in output_names]
if type(metrics) is list:
# we then apply all metrics to all outputs.
return [copy.copy(metrics) for _ in output_names]
elif type(metrics) is dict:
nested_metrics = []
for name in output_names:
output_metrics = metrics.get(name, [])
if type(output_metrics) is not list:
output_metrics = [output_metrics]
nested_metrics.append(output_metrics)
return nested_metrics
else:
raise Exception('Type of `metrics` argument not understood. '
'Expected a list or dictionary, found: ' +
str(metrics))
def collect_trainable_weights(layer):
'''Collects all `trainable_weights` attributes,
excluding any sublayers where `trainable` is set the `False`.
'''
trainable = getattr(layer, 'trainable', True)
if not trainable:
return []
weights = []
if layer.__class__.__name__ == 'Sequential':
for sublayer in layer.flattened_layers:
weights += collect_trainable_weights(sublayer)
elif layer.__class__.__name__ == 'Model':
for sublayer in layer.layers:
weights += collect_trainable_weights(sublayer)
elif layer.__class__.__name__ == 'Graph':
for sublayer in layer._graph_nodes.values():
weights += collect_trainable_weights(sublayer)
else:
weights += layer.trainable_weights
# dedupe weights
weights = list(set(weights))
weights.sort(key=lambda x: x.name)
return weights
def batch_shuffle(index_array, batch_size):
'''This shuffles an array in a batch-wise fashion.
Useful for shuffling HDF5 arrays
(where one cannot access arbitrary indices).
'''
batch_count = int(len(index_array) / batch_size)
# to reshape we need to be cleanly divisible by batch size
# we stash extra items and reappend them after shuffling
last_batch = index_array[batch_count * batch_size:]
index_array = index_array[:batch_count * batch_size]
index_array = index_array.reshape((batch_count, batch_size))
np.random.shuffle(index_array)
index_array = index_array.flatten()
return np.append(index_array, last_batch)
def make_batches(size, batch_size):
'''Returns a list of batch indices (tuples of indices).
'''
nb_batch = int(np.ceil(size / float(batch_size)))
return [(i * batch_size, min(size, (i + 1) * batch_size))
for i in range(0, nb_batch)]
def slice_X(X, start=None, stop=None):
'''This takes an array-like, or a list of
array-likes, and outputs:
- X[start:stop] if X is an array-like
- [x[start:stop] for x in X] if X in a list
Can also work on list/array of indices: `slice_X(x, indices)`
# Arguments:
start: can be an integer index (start index)
or a list/array of indices
stop: integer (stop index); should be None if
`start` was a list.
'''
if type(X) == list:
if hasattr(start, '__len__'):
# hdf5 datasets only support list objects as indices
if hasattr(start, 'shape'):
start = start.tolist()
return [x[start] for x in X]
else:
return [x[start:stop] for x in X]
else:
if hasattr(start, '__len__'):
if hasattr(start, 'shape'):
start = start.tolist()
return X[start]
else:
return X[start:stop]
def weighted_objective(fn):
'''Transforms an objective function `fn(y_true, y_pred)`
into a sample-weighted, cost-masked objective function
`fn(y_true, y_pred, weights, mask)`.
'''
def weighted(y_true, y_pred, weights, mask=None):
# score_array has ndim >= 2
score_array = fn(y_true, y_pred)
if mask is not None:
# Cast the mask to floatX to avoid float64 upcasting in theano
mask = K.cast(mask, K.floatx())
# mask should have the same shape as score_array
score_array *= mask
# the loss per batch should be proportional
# to the number of unmasked samples.
score_array /= K.mean(mask)
# reduce score_array to same ndim as weight array
ndim = K.ndim(score_array)
weight_ndim = K.ndim(weights)
score_array = K.mean(score_array, axis=list(range(weight_ndim, ndim)))
# apply sample weighting
if weights is not None:
score_array *= weights
score_array /= K.mean(K.cast(K.not_equal(weights, 0), K.floatx()))
return K.mean(score_array)
return weighted
def standardize_weights(y, sample_weight=None, class_weight=None,
sample_weight_mode=None):
'''Performs weight input validation and standardization
to a single sample-wise (or timestep-wise) weight array.
'''
if sample_weight_mode is not None:
if sample_weight_mode != 'temporal':
raise Exception('"sample_weight_mode '
'should be None or "temporal". '
'Found: ' + str(sample_weight_mode))
if len(y.shape) < 3:
raise Exception('Found a sample_weight array for '
'an input with shape ' +
str(y.shape) + '. '
'Timestep-wise sample weighting (use of '
'sample_weight_mode="temporal") is restricted to '
'outputs that are at least 3D, i.e. that have '
'a time dimension.')
if sample_weight is not None and len(sample_weight.shape) != 2:
raise Exception('Found a sample_weight array with shape ' +
str(sample_weight.shape) + '. '
'In order to use timestep-wise sample weighting, '
'you should pass a 2D sample_weight array.')
else:
if sample_weight is not None and len(sample_weight.shape) != 1:
raise Exception('Found a sample_weight array with shape ' +
str(sample_weight.shape) + '. '
'In order to use timestep-wise sample weights, '
'you should specify sample_weight_mode="temporal" '
'in compile(). If you just mean to use '
'sample-wise weights, make sure your '
'sample_weight array is 1D.')
if sample_weight is not None:
assert len(sample_weight.shape) <= len(y.shape)
# TODO: proper error message
assert y.shape[:sample_weight.ndim] == sample_weight.shape
return sample_weight
elif isinstance(class_weight, dict):
if len(y.shape) > 2:
raise Exception('class_weight not supported for '
'3+ dimensional targets.')
if y.shape[1] > 1:
y_classes = y.argmax(axis=1)
elif y.shape[1] == 1:
y_classes = np.reshape(y, y.shape[0])
else:
y_classes = y
weights = np.asarray([class_weight[cls] for cls in y_classes])
return weights
else:
if sample_weight_mode is None:
return np.ones((y.shape[0],), dtype=K.floatx())
else:
return np.ones((y.shape[0], y.shape[1]), dtype=K.floatx())
def generator_queue(generator, max_q_size=10,
wait_time=0.05, nb_worker=1, pickle_safe=False):
'''Builds a queue out of a data generator.
If pickle_safe, use a multiprocessing approach. Else, use threading.
Used in `fit_generator`, `evaluate_generator`, `predict_generator`.
'''
generator_threads = []
if pickle_safe:
q = multiprocessing.Queue(maxsize=max_q_size)
_stop = multiprocessing.Event()
else:
q = queue.Queue()
_stop = threading.Event()
try:
def data_generator_task():
while not _stop.is_set():
try:
if q.qsize() < max_q_size:
try:
generator_output = next(generator)
except ValueError:
continue
q.put(generator_output)
else:
time.sleep(wait_time)
except Exception:
_stop.set()
raise
for i in range(nb_worker):
if pickle_safe:
# Reset random seed else all children processes share the same seed
np.random.seed()
thread = multiprocessing.Process(target=data_generator_task)
else:
thread = threading.Thread(target=data_generator_task)
generator_threads.append(thread)
thread.daemon = True
thread.start()
except:
_stop.set()
if pickle_safe:
# Terminate all daemon processes
for p in generator_threads:
if p.is_alive():
p.terminate()
q.close()
raise
return q, _stop
class Model(Container):
def compile(self, optimizer, loss, metrics=[], loss_weights=None,
sample_weight_mode=None, **kwargs):
'''Configures the model for training.
# Arguments
optimizer: str (name of optimizer) or optimizer object.
See [optimizers](/optimizers).
loss: str (name of objective function) or objective function.
See [objectives](/objectives).
If the model has multiple outputs, you can use a different loss
on each output by passing a dictionary or a list of objectives.
metrics: list of metrics to be evaluated by the model
during training and testing.
Typically you will use `metrics=['accuracy']`.
To specify different metrics for different outputs of a
multi-output model, you could also pass a dictionary,
such as `metrics={'output_a': 'accuracy'}`.
sample_weight_mode: if you need to do timestep-wise
sample weighting (2D weights), set this to "temporal".
"None" defaults to sample-wise weights (1D).
If the model has multiple outputs, you can use a different
`sample_weight_mode` on each output by passing a
dictionary or a list of modes.
kwargs: when using the Theano backend, these arguments
are passed into K.function. Ignored for Tensorflow backend.
'''
self.optimizer = optimizers.get(optimizer)
self.sample_weight_mode = sample_weight_mode
self.loss = loss
self.loss_weights = loss_weights
# prepare loss weights
if loss_weights is None:
loss_weights_list = [1. for _ in range(len(self.outputs))]
elif type(loss_weights) is dict:
for name in loss_weights:
if name not in self.output_names:
raise Exception('Unknown entry in loss_weights '
'dictionary: "' + name + '". '
'Only expected the following keys: ' +
str(self.output_names))
loss_weights_list = []
for name in self.output_names:
loss_weights_list.append(loss_weights.get(name, 1.))
elif type(loss_weights) is list:
if len(loss_weights) != len(self.outputs):
raise Exception('When passing a list as loss_weights, '
'it should have one entry per model outputs. '
'The model has ' + str(len(self.outputs)) +
' outputs, but you passed loss_weights=' +
str(loss_weights))
loss_weights_list = loss_weights
else:
raise Exception('Could not interpret loss_weights argument: ' +
str(loss_weights))
# prepare loss functions
if type(loss) is dict:
for name in loss:
if name not in self.output_names:
raise Exception('Unknown entry in loss '
'dictionary: "' + name + '". '
'Only expected the following keys: ' +
str(self.output_names))
loss_functions = []
for name in self.output_names:
if name not in loss:
raise Exception('Output "' + name +
'" missing from loss dictionary')
loss_functions.append(objectives.get(loss[name]))
elif type(loss) is list:
if len(loss) != len(self.outputs):
raise Exception('When passing a list as loss, '
'it should have one entry per model outputs. '
'The model has ' + str(len(self.outputs)) +
' outputs, but you passed loss=' +
str(loss))
loss_functions = [objectives.get(l) for l in loss]
else:
loss_function = objectives.get(loss)
loss_functions = [loss_function for _ in range(len(self.outputs))]
self.loss_functions = loss_functions
weighted_losses = [weighted_objective(fn) for fn in loss_functions]
# prepare output masks
masks = self.compute_mask(self.inputs, mask=None)
if masks is None:
masks = [None for _ in self.outputs]
if type(masks) is not list:
masks = [masks]
# prepare sample weights
if type(sample_weight_mode) is dict:
for name in sample_weight_mode:
if name not in self.output_names:
raise Exception('Unknown entry in '
'sample_weight_mode dictionary: "' +
name + '". '
'Only expected the following keys: ' +
str(self.output_names))
sample_weights = []
sample_weight_modes = []
for name in self.output_names:
if name not in sample_weight_mode:
raise Exception('Output "' + name +
'" missing from sample_weight_modes '
'dictionary')
if sample_weight_mode.get(name) == 'temporal':
weight = K.placeholder(ndim=2, name=name + '_sample_weights')
sample_weight_modes.append('temporal')
else:
weight = K.placeholder(ndim=1, name=name + '_sample_weights')
sample_weight_modes.append(None)
sample_weights.append(weight)
elif type(sample_weight_mode) is list:
if len(sample_weight_mode) != len(self.outputs):
raise Exception('When passing a list as sample_weight_mode, ' +
'it should have one entry per model outputs. '
'The model has ' + str(len(self.outputs)) +
' outputs, but you passed sample_weight_mode=' +
str(sample_weight_mode))
sample_weights = []
sample_weight_modes = []
for mode, name in zip(sample_weight_mode, self.output_names):
if mode == 'temporal':
weight = K.placeholder(ndim=2, name=name + '_sample_weights')
sample_weight_modes.append('temporal')
else:
weight = K.placeholder(ndim=1, name=name + '_sample_weights')
sample_weight_modes.append(None)
sample_weights.append(weight)
else:
if sample_weight_mode == 'temporal':
sample_weights = [K.placeholder(ndim=2, name=name + '_sample_weights')
for name in self.output_names]
sample_weight_modes = ['temporal' for name in self.output_names]
else:
sample_weights = [K.placeholder(ndim=1, name=name + '_sample_weights')
for name in self.output_names]
sample_weight_modes = [None for name in self.output_names]
self.sample_weight_modes = sample_weight_modes
# prepare targets of model
self.targets = []
for i in range(len(self.outputs)):
shape = self.internal_output_shapes[i]
name = self.output_names[i]
self.targets.append(K.placeholder(ndim=len(shape), name=name + '_target'))
# prepare metrics
self.metrics = metrics
self.metrics_names = ['loss']
self.metrics_tensors = []
# compute total loss
total_loss = None
for i in range(len(self.outputs)):
y_true = self.targets[i]
y_pred = self.outputs[i]
weighted_loss = weighted_losses[i]
sample_weight = sample_weights[i]
mask = masks[i]
loss_weight = loss_weights_list[i]
output_loss = weighted_loss(y_true, y_pred,
sample_weight, mask)
if len(self.outputs) > 1:
self.metrics_tensors.append(output_loss)
self.metrics_names.append(self.output_names[i] + '_loss')
if total_loss is None:
total_loss = loss_weight * output_loss
else:
total_loss += loss_weight * output_loss
# add regularization penalties to the loss
for r in self.regularizers:
total_loss = r(total_loss)
# list of same size as output_names.
# contains tuples (metrics for output, names of metrics)
nested_metrics = collect_metrics(metrics, self.output_names)
for i in range(len(self.outputs)):
y_true = self.targets[i]
y_pred = self.outputs[i]
output_metrics = nested_metrics[i]
for metric in output_metrics:
if metric == 'accuracy' or metric == 'acc':
# custom handling of accuracy (because of class mode duality)
output_shape = self.internal_output_shapes[i]
if output_shape[-1] == 1 or self.loss_functions[i] == objectives.binary_crossentropy:
# case: binary accuracy
self.metrics_tensors.append(metrics_module.binary_accuracy(y_true, y_pred))
elif self.loss_functions[i] == objectives.sparse_categorical_crossentropy:
# case: categorical accuracy with sparse targets
self.metrics_tensors.append(
metrics_module.sparse_categorical_accuracy(y_true, y_pred))
else:
# case: categorical accuracy with dense targets
self.metrics_tensors.append(metrics_module.categorical_accuracy(y_true, y_pred))
if len(self.output_names) == 1:
self.metrics_names.append('acc')
else:
self.metrics_names.append(self.output_layers[i].name + '_acc')
else:
metric_fn = metrics_module.get(metric)
self.metrics_tensors.append(metric_fn(y_true, y_pred))
if len(self.output_names) == 1:
self.metrics_names.append(metric_fn.__name__)
else:
self.metrics_names.append(self.output_layers[i].name + '_' + metric_fn.__name__)
# prepare gradient updates and state updates
self.optimizer = optimizers.get(optimizer)
self.total_loss = total_loss
self.sample_weights = sample_weights
# functions for train, test and predict will
# be compiled lazily when required.
# This saves time when the user is not using all functions.
self._function_kwargs = kwargs
self.train_function = None
self.test_function = None
self.predict_function = None
def _make_train_function(self):
if not hasattr(self, 'train_function'):
raise Exception('You must compile your model before using it.')
if self.train_function is None:
if self.uses_learning_phase and type(K.learning_phase()) is not int:
inputs = self.inputs + self.targets + self.sample_weights + [K.learning_phase()]
else:
inputs = self.inputs + self.targets + self.sample_weights
# get trainable weights
trainable_weights = collect_trainable_weights(self)
training_updates = self.optimizer.get_updates(trainable_weights, self.constraints, self.total_loss)
updates = self.updates + training_updates
# returns loss and metrics. Updates weights at each call.
self.train_function = K.function(inputs,
[self.total_loss] + self.metrics_tensors,
updates=updates,
**self._function_kwargs)
def _make_test_function(self):
if not hasattr(self, 'test_function'):
raise Exception('You must compile your model before using it.')
if self.test_function is None:
if self.uses_learning_phase and type(K.learning_phase()) is not int:
inputs = self.inputs + self.targets + self.sample_weights + [K.learning_phase()]
else:
inputs = self.inputs + self.targets + self.sample_weights
# return loss and metrics, no gradient updates.
# Does update the network states.
self.test_function = K.function(inputs,
[self.total_loss] + self.metrics_tensors,
updates=self.state_updates,
**self._function_kwargs)
def _make_predict_function(self):
if not hasattr(self, 'predict_function'):
self.predict_function = None
if self.predict_function is None:
if self.uses_learning_phase and type(K.learning_phase()) is not int:
inputs = self.inputs + [K.learning_phase()]
else:
inputs = self.inputs
# returns network outputs. Does not update weights.
# Does update the network states.
kwargs = getattr(self, '_function_kwargs', {})
self.predict_function = K.function(inputs,
self.outputs,
updates=self.state_updates,
**kwargs)
def _fit_loop(self, f, ins, out_labels=[], batch_size=32,
nb_epoch=100, verbose=1, callbacks=[],
val_f=None, val_ins=None, shuffle=True,
callback_metrics=[]):
'''Abstract fit function for f(ins).
Assume that f returns a list, labeled by out_labels.
# Arguments
f: Keras function returning a list of tensors
ins: list of tensors to be fed to `f`
out_labels: list of strings, display names of
the outputs of `f`
batch_size: integer batch size
nb_epoch: number of times to iterate over the data
verbose: verbosity mode, 0, 1 or 2
callbacks: list of callbacks to be called during training
val_f: Keras function to call for validation
val_ins: list of tensors to be fed to `val_f`
shuffle: whether to shuffle the data at the beginning of each epoch
callback_metrics: list of strings, the display names of the metrics
passed to the callbacks. They should be the
concatenation of list the display names of the outputs of
`f` and the list of display names of the outputs of `f_val`.
# Returns
`History` object.
'''
do_validation = False
if val_f and val_ins:
do_validation = True
if verbose:
print('Train on %d samples, validate on %d samples' %
(len(ins[0]), len(val_ins[0])))
nb_train_sample = len(ins[0])
index_array = np.arange(nb_train_sample)
self.history = cbks.History()
callbacks = [cbks.BaseLogger()] + callbacks + [self.history]
if verbose:
callbacks += [cbks.ProgbarLogger()]
callbacks = cbks.CallbackList(callbacks)
# it's possible to callback a different model than self
# (used by Sequential models)
if hasattr(self, 'callback_model') and self.callback_model:
callback_model = self.callback_model
else:
callback_model = self
callbacks._set_model(callback_model)
callbacks._set_params({
'batch_size': batch_size,
'nb_epoch': nb_epoch,
'nb_sample': nb_train_sample,
'verbose': verbose,
'do_validation': do_validation,
'metrics': callback_metrics,
})
callbacks.on_train_begin()
callback_model.stop_training = False
self.validation_data = val_ins
for epoch in range(nb_epoch):
callbacks.on_epoch_begin(epoch)
if shuffle == 'batch':
index_array = batch_shuffle(index_array, batch_size)
elif shuffle:
np.random.shuffle(index_array)
batches = make_batches(nb_train_sample, batch_size)
epoch_logs = {}
for batch_index, (batch_start, batch_end) in enumerate(batches):
batch_ids = index_array[batch_start:batch_end]
try:
if type(ins[-1]) is float:
# do not slice the training phase flag
ins_batch = slice_X(ins[:-1], batch_ids) + [ins[-1]]
else:
ins_batch = slice_X(ins, batch_ids)
except TypeError:
raise Exception('TypeError while preparing batch. '
'If using HDF5 input data, '
'pass shuffle="batch".')
batch_logs = {}
batch_logs['batch'] = batch_index
batch_logs['size'] = len(batch_ids)
callbacks.on_batch_begin(batch_index, batch_logs)
outs = f(ins_batch)
if type(outs) != list:
outs = [outs]
for l, o in zip(out_labels, outs):
batch_logs[l] = o
callbacks.on_batch_end(batch_index, batch_logs)
if batch_index == len(batches) - 1: # last batch
# validation
if do_validation:
# replace with self._evaluate
val_outs = self._test_loop(val_f, val_ins,
batch_size=batch_size,
verbose=0)
if type(val_outs) != list:
val_outs = [val_outs]
# same labels assumed
for l, o in zip(out_labels, val_outs):
epoch_logs['val_' + l] = o
callbacks.on_epoch_end(epoch, epoch_logs)
if callback_model.stop_training:
break
callbacks.on_train_end()
return self.history
def _predict_loop(self, f, ins, batch_size=32, verbose=0):
'''Abstract method to loop over some data in batches.
# Arguments
f: Keras function returning a list of tensors.
ins: list of tensors to be fed to `f`.
batch_size: integer batch size.
verbose: verbosity mode.
# Returns
Array of predictions (if the model has a single output)
or list of arrays of predictions
(if the model has multiple outputs).
'''
nb_sample = len(ins[0])
outs = []
if verbose == 1:
progbar = Progbar(target=nb_sample)
batches = make_batches(nb_sample, batch_size)
index_array = np.arange(nb_sample)
for batch_index, (batch_start, batch_end) in enumerate(batches):
batch_ids = index_array[batch_start:batch_end]
if type(ins[-1]) is float:
# do not slice the training phase flag
ins_batch = slice_X(ins[:-1], batch_ids) + [ins[-1]]
else:
ins_batch = slice_X(ins, batch_ids)
batch_outs = f(ins_batch)
if type(batch_outs) != list:
batch_outs = [batch_outs]
if batch_index == 0:
for batch_out in batch_outs:
shape = (nb_sample,) + batch_out.shape[1:]
outs.append(np.zeros(shape, dtype=K.floatx()))
for i, batch_out in enumerate(batch_outs):
outs[i][batch_start:batch_end] = batch_out
if verbose == 1:
progbar.update(batch_end)
if len(outs) == 1:
return outs[0]
return outs
def _test_loop(self, f, ins, batch_size=32, verbose=0):
'''Abstract method to loop over some data in batches.
# Arguments
f: Keras function returning a list of tensors.
ins: list of tensors to be fed to `f`.
batch_size: integer batch size.
verbose: verbosity mode.
# Returns
Scalar loss (if the model has a single output and no metrics)
or list of scalars (if the model has multiple outputs
and/or metrics). The attribute `model.metrics_names` will give you
the display labels for the scalar outputs.
'''
nb_sample = len(ins[0])
outs = []
if verbose == 1:
progbar = Progbar(target=nb_sample)
batches = make_batches(nb_sample, batch_size)
index_array = np.arange(nb_sample)
for batch_index, (batch_start, batch_end) in enumerate(batches):
batch_ids = index_array[batch_start:batch_end]
if type(ins[-1]) is float:
# do not slice the training phase flag
ins_batch = slice_X(ins[:-1], batch_ids) + [ins[-1]]
else:
ins_batch = slice_X(ins, batch_ids)
batch_outs = f(ins_batch)
if type(batch_outs) == list:
if batch_index == 0:
for batch_out in enumerate(batch_outs):
outs.append(0.)
for i, batch_out in enumerate(batch_outs):
outs[i] += batch_out * len(batch_ids)
else:
if batch_index == 0:
outs.append(0.)
outs[0] += batch_outs * len(batch_ids)
if verbose == 1:
progbar.update(batch_end)
for i, out in enumerate(outs):
outs[i] /= nb_sample
if len(outs) == 1:
return outs[0]
return outs
def _standardize_user_data(self, x, y,
sample_weight=None, class_weight=None,
check_batch_dim=True, batch_size=None):
if not hasattr(self, 'optimizer'):
raise Exception('You must compile a model before training/testing.'
' Use `model.compile(optimizer, loss)`.')
output_shapes = []
for output_shape, loss_fn in zip(self.internal_output_shapes, self.loss_functions):
if loss_fn.__name__ == 'sparse_categorical_crossentropy':
output_shapes.append(output_shape[:-1] + (1,))
elif getattr(objectives, loss_fn.__name__, None) is None:
output_shapes.append(None)
else:
output_shapes.append(output_shape)
x = standardize_input_data(x, self.input_names,
self.internal_input_shapes,
check_batch_dim=False,
exception_prefix='model input')
y = standardize_input_data(y, self.output_names,
output_shapes,
check_batch_dim=False,
exception_prefix='model target')
sample_weights = standardize_sample_weights(sample_weight,
self.output_names)
class_weights = standardize_class_weights(class_weight,
self.output_names)
sample_weights = [standardize_weights(ref, sw, cw, mode)
for (ref, sw, cw, mode)
in zip(y, sample_weights, class_weights, self.sample_weight_modes)]
check_array_lengths(x, y, sample_weights)
check_loss_and_target_compatibility(y, self.loss_functions, self.internal_output_shapes)
if self.stateful and batch_size:
if x[0].shape[0] % batch_size != 0:
raise Exception('In a stateful network, '
'you should only pass inputs with '
'a number of samples that can be '
'divided by the batch size. Found: ' +
str(x[0].shape[0]) + ' samples')
return x, y, sample_weights
def fit(self, x, y, batch_size=32, nb_epoch=10, verbose=1, callbacks=[],
validation_split=0., validation_data=None, shuffle=True,
class_weight=None, sample_weight=None):
'''Trains the model for a fixed number of epochs (iterations on a dataset).
# Arguments
x: Numpy array of training data,
or list of Numpy arrays if the model has multiple inputs.
If all inputs in the model are named, you can also pass a dictionary
mapping input names to Numpy arrays.
y: Numpy array of target data,
or list of Numpy arrays if the model has multiple outputs.
If all outputs in the model are named, you can also pass a dictionary
mapping output names to Numpy arrays.
batch_size: integer. Number of samples per gradient update.
nb_epoch: integer, the number of times to iterate over the training data arrays.
verbose: 0, 1, or 2. Verbosity mode. 0 = silent, 1 = verbose, 2 = one log line per epoch.
callbacks: list of callbacks to be called during training.
See [callbacks](/callbacks).
validation_split: float between 0 and 1:
fraction of the training data to be used as validation data.
The model will set apart this fraction of the training data,
will not train on it, and will evaluate the loss and any model metrics
on this data at the end of each epoch.
validation_data: data on which to evaluate the loss and any model metrics
at the end of each epoch. The model will not be trained on this data.
This could be a tuple (x_val, y_val) or a tuple (val_x, val_y, val_sample_weights).
shuffle: boolean, whether to shuffle the training data before each epoch.
class_weight: optional dictionary mapping class indices (integers) to
a weight (float) to apply to the model's loss for the samples
from this class during training.
This can be useful to tell the model to "pay more attention" to
samples from an under-represented class.
sample_weight: optional array of the same length as x, containing
weights to apply to the model's loss for each sample.
In the case of temporal data, you can pass a 2D array
with shape (samples, sequence_length),
to apply a different weight to every timestep of every sample.
In this case you should make sure to specify sample_weight_mode="temporal" in compile().
# Returns
A `History` instance. Its `history` attribute contains
all information collected during training.
'''
# validate user data
x, y, sample_weights = self._standardize_user_data(x, y,
sample_weight=sample_weight,
class_weight=class_weight,
check_batch_dim=False,
batch_size=batch_size)
# prepare validation data
if validation_data:
do_validation = True
if len(validation_data) == 2:
val_x, val_y = validation_data
val_sample_weight = None
elif len(validation_data) == 3:
val_x, val_y, val_sample_weight = validation_data
else:
raise
val_x, val_y, val_sample_weights = self._standardize_user_data(val_x, val_y,
sample_weight=val_sample_weight,
check_batch_dim=False,
batch_size=batch_size)
self._make_test_function()
val_f = self.test_function
if self.uses_learning_phase and type(K.learning_phase()) is not int:
val_ins = val_x + val_y + val_sample_weights + [0.]
else:
val_ins = val_x + val_y + val_sample_weights
elif validation_split and 0. < validation_split < 1.:
do_validation = True
split_at = int(len(x[0]) * (1. - validation_split))
x, val_x = (slice_X(x, 0, split_at), slice_X(x, split_at))
y, val_y = (slice_X(y, 0, split_at), slice_X(y, split_at))
sample_weights, val_sample_weights = (
slice_X(sample_weights, 0, split_at), slice_X(sample_weights, split_at))
self._make_test_function()
val_f = self.test_function
if self.uses_learning_phase and type(K.learning_phase()) is not int:
val_ins = val_x + val_y + val_sample_weights + [0.]
else:
val_ins = val_x + val_y + val_sample_weights
else:
do_validation = False
val_f = None
val_ins = None
# prepare input arrays and training function
if self.uses_learning_phase and type(K.learning_phase()) is not int:
ins = x + y + sample_weights + [1.]
else:
ins = x + y + sample_weights
self._make_train_function()
f = self.train_function
# prepare display labels
out_labels = self.metrics_names
# rename duplicated metrics name
# (can happen with an output layer shared among multiple dataflows)
deduped_out_labels = []
for i, label in enumerate(out_labels):
new_label = label
if out_labels.count(label) > 1:
dup_idx = out_labels[:i].count(label)
new_label += '_' + str(dup_idx + 1)
deduped_out_labels.append(new_label)
out_labels = deduped_out_labels
if do_validation:
callback_metrics = copy.copy(out_labels) + ['val_' + n for n in out_labels]
else:
callback_metrics = copy.copy(out_labels)
# delegate logic to _fit_loop
return self._fit_loop(f, ins, out_labels=out_labels,
batch_size=batch_size, nb_epoch=nb_epoch,
verbose=verbose, callbacks=callbacks,
val_f=val_f, val_ins=val_ins, shuffle=shuffle,
callback_metrics=callback_metrics)
def evaluate(self, x, y, batch_size=32, verbose=1, sample_weight=None):
'''Returns the loss value and metrics values for the model
in test mode. Computation is done in batches.
# Arguments
x: Numpy array of test data,
or list of Numpy arrays if the model has multiple inputs.
If all inputs in the model are named, you can also pass a dictionary
mapping input names to Numpy arrays.
y: Numpy array of target data,
or list of Numpy arrays if the model has multiple outputs.
If all outputs in the model are named, you can also pass a dictionary
mapping output names to Numpy arrays.
batch_size: integer. Number of samples per gradient update.
# Returns
Scalar test loss (if the model has a single output and no metrics)
or list of scalars (if the model has multiple outputs
and/or metrics). The attribute `model.metrics_names` will give you
the display labels for the scalar outputs.
'''
# validate user data
x, y, sample_weights = self._standardize_user_data(x, y,
sample_weight=sample_weight,
check_batch_dim=False,
batch_size=batch_size)
# prepare inputs, delegate logic to _test_loop
if self.uses_learning_phase and type(K.learning_phase()) is not int:
ins = x + y + sample_weights + [0.]
else:
ins = x + y + sample_weights
self._make_test_function()
f = self.test_function
return self._test_loop(f, ins,
batch_size=batch_size,
verbose=verbose)
def predict(self, x, batch_size=32, verbose=0):
'''Generates output predictions for the input samples,
processing the samples in a batched way.
# Arguments
x: the input data, as a Numpy array
(or list of Numpy arrays if the model has multiple outputs).
batch_size: integer.
verbose: verbosity mode, 0 or 1.
# Returns
A Numpy array of predictions.
'''
# validate user data
x = standardize_input_data(x, self.input_names,
self.internal_input_shapes,
check_batch_dim=False)
if self.stateful:
if x[0].shape[0] > batch_size and x[0].shape[0] % batch_size != 0:
raise Exception('In a stateful network, '
'you should only pass inputs with '
'a number of samples that can be '
'divided by the batch size. Found: ' +
str(x[0].shape[0]) + ' samples. '
'Batch size: ' + str(batch_size) + '.')
# prepare inputs, delegate logic to _predict_loop
if self.uses_learning_phase and type(K.learning_phase()) is not int:
ins = x + [0.]
else:
ins = x
self._make_predict_function()
f = self.predict_function
return self._predict_loop(f, ins,
batch_size=batch_size, verbose=verbose)
def train_on_batch(self, x, y,
sample_weight=None, class_weight=None):
'''Runs a single gradient update on a single batch of data.
# Arguments
x: Numpy array of training data,
or list of Numpy arrays if the model has multiple inputs.
If all inputs in the model are named, you can also pass a dictionary
mapping input names to Numpy arrays.
y: Numpy array of target data,
or list of Numpy arrays if the model has multiple outputs.
If all outputs in the model are named, you can also pass a dictionary
mapping output names to Numpy arrays.
sample_weight: optional array of the same length as x, containing
weights to apply to the model's loss for each sample.
In the case of temporal data, you can pass a 2D array
with shape (samples, sequence_length),
to apply a different weight to every timestep of every sample.
In this case you should make sure to specify sample_weight_mode="temporal" in compile().
class_weight: optional dictionary mapping class indices (integers) to
a weight (float) to apply to the model's loss for the samples
from this class during training.
This can be useful to tell the model to "pay more attention" to
samples from an under-represented class.
# Returns
Scalar training loss (if the model has a single output and no metrics)
or list of scalars (if the model has multiple outputs
and/or metrics). The attribute `model.metrics_names` will give you
the display labels for the scalar outputs.
'''
x, y, sample_weights = self._standardize_user_data(x, y,
sample_weight=sample_weight,
class_weight=class_weight,
check_batch_dim=True)
if self.uses_learning_phase and type(K.learning_phase()) is not int:
ins = x + y + sample_weights + [1.]
else:
ins = x + y + sample_weights
self._make_train_function()
outputs = self.train_function(ins)
if len(outputs) == 1:
return outputs[0]
return outputs
def test_on_batch(self, x, y, sample_weight=None):
'''Test the model on a single batch of samples.
# Arguments
x: Numpy array of test data,
or list of Numpy arrays if the model has multiple inputs.
If all inputs in the model are named, you can also pass a dictionary
mapping input names to Numpy arrays.
y: Numpy array of target data,
or list of Numpy arrays if the model has multiple outputs.
If all outputs in the model are named, you can also pass a dictionary
mapping output names to Numpy arrays.
sample_weight: optional array of the same length as x, containing
weights to apply to the model's loss for each sample.
In the case of temporal data, you can pass a 2D array
with shape (samples, sequence_length),
to apply a different weight to every timestep of every sample.
In this case you should make sure to specify sample_weight_mode="temporal" in compile().
# Returns
Scalar test loss (if the model has a single output and no metrics)
or list of scalars (if the model has multiple outputs
and/or metrics). The attribute `model.metrics_names` will give you
the display labels for the scalar outputs.
'''
x, y, sample_weights = self._standardize_user_data(x, y,
sample_weight=sample_weight,
check_batch_dim=True)
if self.uses_learning_phase and type(K.learning_phase()) is not int:
ins = x + y + sample_weights + [0.]
else:
ins = x + y + sample_weights
self._make_test_function()
outputs = self.test_function(ins)
if len(outputs) == 1:
return outputs[0]
return outputs
def predict_on_batch(self, x):
'''Returns predictions for a single batch of samples.
'''
x = standardize_input_data(x, self.input_names,
self.internal_input_shapes)
if self.uses_learning_phase and type(K.learning_phase()) is not int:
ins = x + [0.]
else:
ins = x
self._make_predict_function()
outputs = self.predict_function(ins)
if len(outputs) == 1:
return outputs[0]
return outputs
def fit_generator(self, generator, samples_per_epoch, nb_epoch,
verbose=1, callbacks=[],
validation_data=None, nb_val_samples=None,
class_weight={}, max_q_size=10, nb_worker=1, pickle_safe=False):
'''Fits the model on data generated batch-by-batch by
a Python generator.
The generator is run in parallel to the model, for efficiency.
For instance, this allows you to do real-time data augmentation
on images on CPU in parallel to training your model on GPU.
# Arguments
generator: a generator.
The output of the generator must be either
- a tuple (inputs, targets)
- a tuple (inputs, targets, sample_weights).
All arrays should contain the same number of samples.
The generator is expected to loop over its data
indefinitely. An epoch finishes when `samples_per_epoch`
samples have been seen by the model.
samples_per_epoch: integer, number of samples to process before
going to the next epoch.
nb_epoch: integer, total number of iterations on the data.
verbose: verbosity mode, 0, 1, or 2.
callbacks: list of callbacks to be called during training.
validation_data: this can be either
- a generator for the validation data
- a tuple (inputs, targets)
- a tuple (inputs, targets, sample_weights).
nb_val_samples: only relevant if `validation_data` is a generator.
number of samples to use from validation generator
at the end of every epoch.
class_weight: dictionary mapping class indices to a weight
for the class.
max_q_size: maximum size for the generator queue
nb_worker: maximum number of processes to spin up when using process based threading
pickle_safe: if True, use process based threading. Note that because
this implementation relies on multiprocessing, you should not pass non
non picklable arguments to the generator as they can't be passed
easily to children processes.
# Returns
A `History` object.
# Example
```python
def generate_arrays_from_file(path):
while 1:
f = open(path)
for line in f:
# create numpy arrays of input data
# and labels, from each line in the file
x1, x2, y = process_line(line)
yield ({'input_1': x1, 'input_2': x2}, {'output': y})
f.close()
model.fit_generator(generate_arrays_from_file('/my_file.txt'),
samples_per_epoch=10000, nb_epoch=10)
```
'''
wait_time = 0.01 # in seconds
epoch = 0
do_validation = bool(validation_data)
self._make_train_function()
if do_validation:
self._make_test_function()
# python 2 has 'next', 3 has '__next__'
# avoid any explicit version checks
val_gen = (hasattr(validation_data, 'next') or
hasattr(validation_data, '__next__'))
if val_gen and not nb_val_samples:
raise Exception('When using a generator for validation data, '
'you must specify a value for "nb_val_samples".')
out_labels = self.metrics_names
callback_metrics = out_labels + ['val_' + n for n in out_labels]
# prepare callbacks
self.history = cbks.History()
callbacks = [cbks.BaseLogger()] + callbacks + [self.history]
if verbose:
callbacks += [cbks.ProgbarLogger()]
callbacks = cbks.CallbackList(callbacks)
# it's possible to callback a different model than self:
if hasattr(self, 'callback_model') and self.callback_model:
callback_model = self.callback_model
else:
callback_model = self
callbacks._set_model(callback_model)
callbacks._set_params({
'nb_epoch': nb_epoch,
'nb_sample': samples_per_epoch,
'verbose': verbose,
'do_validation': do_validation,
'metrics': callback_metrics,
})
callbacks.on_train_begin()
if do_validation and not val_gen:
if len(validation_data) == 2:
val_x, val_y = validation_data
val_sample_weight = None
elif len(validation_data) == 3:
val_x, val_y, val_sample_weight = validation_data
else:
raise Exception('validation_data should be a tuple '
'(val_x, val_y, val_sample_weight) '
'or (val_x, val_y). Found: ' + str(validation_data))
val_x, val_y, val_sample_weights = self._standardize_user_data(val_x, val_y, val_sample_weight)
self.validation_data = val_x + [val_y, val_sample_weights]
else:
self.validation_data = None
# start generator thread storing batches into a queue
data_gen_queue, _stop = generator_queue(generator, max_q_size=max_q_size, nb_worker=nb_worker,
pickle_safe=pickle_safe)
callback_model.stop_training = False
while epoch < nb_epoch:
callbacks.on_epoch_begin(epoch)
samples_seen = 0
batch_index = 0
while samples_seen < samples_per_epoch:
generator_output = None
while not _stop.is_set():
if not data_gen_queue.empty():
generator_output = data_gen_queue.get()
break
else:
time.sleep(wait_time)
if not hasattr(generator_output, '__len__'):
_stop.set()
raise Exception('output of generator should be a tuple '
'(x, y, sample_weight) '
'or (x, y). Found: ' + str(generator_output))
if len(generator_output) == 2:
x, y = generator_output
sample_weight = None
elif len(generator_output) == 3:
x, y, sample_weight = generator_output
else:
_stop.set()
raise Exception('output of generator should be a tuple '
'(x, y, sample_weight) '
'or (x, y). Found: ' + str(generator_output))
# build batch logs
batch_logs = {}
if type(x) is list:
batch_size = len(x[0])
elif type(x) is dict:
batch_size = len(list(x.values())[0])
else:
batch_size = len(x)
batch_logs['batch'] = batch_index
batch_logs['size'] = batch_size
callbacks.on_batch_begin(batch_index, batch_logs)
try:
outs = self.train_on_batch(x, y,
sample_weight=sample_weight,
class_weight=class_weight)
except:
_stop.set()
raise
if type(outs) != list:
outs = [outs]
for l, o in zip(out_labels, outs):
batch_logs[l] = o
callbacks.on_batch_end(batch_index, batch_logs)
# construct epoch logs
epoch_logs = {}
batch_index += 1
samples_seen += batch_size
# epoch finished
if samples_seen > samples_per_epoch:
warnings.warn('Epoch comprised more than '
'`samples_per_epoch` samples, '
'which might affect learning results. '
'Set `samples_per_epoch` correctly '
'to avoid this warning.')
if samples_seen >= samples_per_epoch and do_validation:
if val_gen:
val_outs = self.evaluate_generator(validation_data,
nb_val_samples,
max_q_size=max_q_size)
else:
# no need for try/except because
# data has already been validated
val_outs = self.evaluate(val_x, val_y,
sample_weight=val_sample_weights,
verbose=0)
if type(val_outs) is not list:
val_outs = [val_outs]
# same labels assumed
for l, o in zip(out_labels, val_outs):
epoch_logs['val_' + l] = o
callbacks.on_epoch_end(epoch, epoch_logs)
epoch += 1
if callback_model.stop_training:
break
_stop.set()
if pickle_safe:
data_gen_queue.close()
callbacks.on_train_end()
return self.history
def evaluate_generator(self, generator, val_samples, max_q_size=10, nb_worker=1, pickle_safe=False):
'''Evaluates the model on a data generator. The generator should
return the same kind of data as accepted by `test_on_batch`.
Arguments:
generator:
generator yielding tuples (inputs, targets)
or (inputs, targets, sample_weights)
val_samples:
total number of samples to generate from `generator`
before returning.
max_q_size: maximum size for the generator queue
nb_worker: maximum number of processes to spin up when using process based threading
pickle_safe: if True, use process based threading. Note that because
this implementation relies on multiprocessing, you should not pass non
non picklable arguments to the generator as they can't be passed
easily to children processes.
# Returns
Scalar test loss (if the model has a single output and no metrics)
or list of scalars (if the model has multiple outputs
and/or metrics). The attribute `model.metrics_names` will give you
the display labels for the scalar outputs.
'''
self._make_test_function()
processed_samples = 0
wait_time = 0.01
all_outs = []
weights = []
data_gen_queue, _stop = generator_queue(generator, max_q_size=max_q_size, nb_worker=nb_worker,
pickle_safe=pickle_safe)
while processed_samples < val_samples:
generator_output = None
while not _stop.is_set():
if not data_gen_queue.empty():
generator_output = data_gen_queue.get()
break
else:
time.sleep(wait_time)
if not hasattr(generator_output, '__len__'):
_stop.set()
raise Exception('output of generator should be a tuple '
'(x, y, sample_weight) '
'or (x, y). Found: ' + str(generator_output))
if len(generator_output) == 2:
x, y = generator_output
sample_weight = None
elif len(generator_output) == 3:
x, y, sample_weight = generator_output
else:
_stop.set()
raise Exception('output of generator should be a tuple '
'(x, y, sample_weight) '
'or (x, y). Found: ' + str(generator_output))
try:
outs = self.test_on_batch(x, y, sample_weight=sample_weight)
except:
_stop.set()
raise
if type(x) is list:
nb_samples = len(x[0])
elif type(x) is dict:
nb_samples = len(list(x.values())[0])
else:
nb_samples = len(x)
all_outs.append(outs)
processed_samples += nb_samples
weights.append(nb_samples)
_stop.set()
if pickle_safe:
data_gen_queue.close()
if type(outs) is not list:
return np.average(np.asarray(all_outs),
weights=weights)
else:
averages = []
for i in range(len(outs)):
averages.append(np.average([out[i] for out in all_outs],
weights=weights))
return averages
def predict_generator(self, generator, val_samples, max_q_size=10, nb_worker=1, pickle_safe=False):
'''Generates predictions for the input samples from a data generator.
The generator should return the same kind of data as accepted by
`predict_on_batch`.
# Arguments
generator: generator yielding batches of input samples.
val_samples: total number of samples to generate from `generator`
before returning.
max_q_size: maximum size for the generator queue
nb_worker: maximum number of processes to spin up when using process based threading
pickle_safe: if True, use process based threading. Note that because
this implementation relies on multiprocessing, you should not pass non
non picklable arguments to the generator as they can't be passed
easily to children processes.
# Returns
Numpy array(s) of predictions.
'''
self._make_predict_function()
processed_samples = 0
wait_time = 0.01
all_outs = []
data_gen_queue, _stop = generator_queue(generator, max_q_size=max_q_size, nb_worker=nb_worker,
pickle_safe=pickle_safe)
while processed_samples < val_samples:
generator_output = None
while not _stop.is_set():
if not data_gen_queue.empty():
generator_output = data_gen_queue.get()
break
else:
time.sleep(wait_time)
if isinstance(generator_output, tuple):
if len(generator_output) == 2:
x, y = generator_output
sample_weight = None
elif len(generator_output) == 3:
x, y, sample_weight = generator_output
else:
_stop.set()
raise Exception('output of generator should be a tuple '
'(x, y, sample_weight) '
'or (x, y). Found: ' + str(generator_output))
else:
x = generator_output
try:
outs = self.predict_on_batch(x)
except:
_stop.set()
raise
if type(x) is list:
nb_samples = len(x[0])
elif type(x) is dict:
nb_samples = len(list(x.values())[0])
else:
nb_samples = len(x)
if type(outs) != list:
outs = [outs]
if len(all_outs) == 0:
for out in outs:
shape = (val_samples,) + out.shape[1:]
all_outs.append(np.zeros(shape, dtype=K.floatx()))
for i, out in enumerate(outs):
all_outs[i][processed_samples:(processed_samples + nb_samples)] = out
processed_samples += nb_samples
_stop.set()
if pickle_safe:
data_gen_queue.close()
if len(all_outs) == 1:
return all_outs[0]
return all_outs
|
# -*- coding: utf-8 -*-
import pytest
class TestAccountSettings:
"""Tests for the /account/settings page."""
def test_submit_email_form_without_xhr_returns_full_html_page(self, app):
res = app.get("/account/settings")
email_form = res.forms["email"]
email_form["email"] = "new_email@example.com"
email_form["password"] = "pass"
res = email_form.submit().follow()
assert res.text.startswith("<!DOCTYPE html>")
def test_submit_email_form_with_xhr_returns_partial_html_snippet(self, app):
res = app.get("/account/settings")
email_form = res.forms["email"]
email_form["email"] = "new_email@example.com"
email_form["password"] = "pass"
res = email_form.submit(xhr=True, status=200)
assert res.text.strip("\n").startswith("<form")
def test_submit_email_form_with_xhr_returns_plain_text(self, app):
res = app.get("/account/settings")
email_form = res.forms["email"]
email_form["email"] = "new_email@example.com"
email_form["password"] = "pass"
res = email_form.submit(xhr=True)
assert res.content_type == "text/plain"
def test_submit_password_form_without_xhr_returns_full_html_page(self, app):
res = app.get("/account/settings")
password_form = res.forms["password"]
password_form["password"] = "pass"
password_form["new_password"] = "new_password"
password_form["new_password_confirm"] = "new_password"
res = password_form.submit().follow()
assert res.text.startswith("<!DOCTYPE html>")
def test_submit_password_form_with_xhr_returns_partial_html_snippet(self, app):
res = app.get("/account/settings")
password_form = res.forms["password"]
password_form["password"] = "pass"
password_form["new_password"] = "new_password"
password_form["new_password_confirm"] = "new_password"
res = password_form.submit(xhr=True)
assert res.text.strip("\n").startswith("<form")
def test_submit_password_form_with_xhr_returns_plain_text(self, app):
res = app.get("/account/settings")
password_form = res.forms["password"]
password_form["password"] = "pass"
password_form["new_password"] = "new_password"
password_form["new_password_confirm"] = "new_password"
res = password_form.submit(xhr=True)
assert res.content_type == "text/plain"
def test_submit_invalid_password_form_with_xhr_returns_400(self, app):
res = app.get("/account/settings")
password_form = res.forms["password"]
password_form["password"] = "pass"
password_form["new_password"] = "new_password"
password_form["new_password_confirm"] = "WRONG"
password_form.submit(xhr=True, status=400)
@pytest.fixture
def user(self, db_session, factories):
# Password is 'pass'
user = factories.User(
password="$2b$12$21I1LjTlGJmLXzTDrQA8gusckjHEMepTmLY5WN3Kx8hSaqEEKj9V6"
)
db_session.commit()
return user
@pytest.fixture
def app(self, app, user):
res = app.get("/login")
res.form["username"] = user.username
res.form["password"] = "pass"
res.form.submit()
return app
|
import os
import shutil
from io import StringIO
from types import SimpleNamespace
import pkg_resources
#
from colt import Colt
#
from .qm.qm import QM, implemented_qm_software
from .molecule.terms import Terms
from .dihedral_scan import DihedralScan
from .misc import LOGO
class Initialize(Colt):
_user_input = """
[ff]
# Number of n equivalent neighbors needed to consider two atoms equivalent
# Negative values turns off equivalence, 0 makes same elements equivalent
n_equiv = 4 :: int
# Number of first n neighbors to exclude in the forcefield
n_excl = 2 :: int :: [2, 3]
# Lennard jones method for the forcefield
lennard_jones = opls_auto :: str :: [gromos_auto, gromos, opls_auto, opls, gaff, gaff2, ext]
# Use externally provided point charges in the file "ext_q" in the job directyory
ext_charges = no :: bool
# Scale QM charges to account for condensed phase polarization (should be set to 1 for gas phase)
charge_scaling = 1.2 :: float
# If user chooses ext_charges=True, by default fragments will still use the chosen QM method for
# determining fragment charges. This is to avoid spuriously high charges on capping hydrogens.
# However, using QM charges for fragments and ext_charges for the molecule can also result in
# inaccuracies if these two charges are very different.
use_ext_charges_for_frags = no :: bool
# Additional exclusions (GROMACS format)
exclusions = :: literal
# Switch standard non-bonded interactions between two atoms to pair interactions
# (provide atom pairs in each row)
pairs = :: literal
# Path for the external FF library for Lennard-Jones parameters (GROMACS format).
ext_lj_lib = :: folder, optional
# Lennard-Jones fudge parameter for 1-4 interactions for when "lennard_jones" is set to "ext".
ext_lj_fudge = :: float, optional
# Coulomb fudge parameter for 1-4 interactions for when "lennard_jones" is set to "ext".
ext_q_fudge = :: float, optional
# Lennard-Jones combinations rules for when "lennard_jones" is set to "ext" (GROMACS numbering).
ext_comb_rule = :: int, optional :: [1, 2, 3]
# Name of the atom type for capping hydrogens for when "lennard_jones" is set to "ext"
ext_h_cap = :: str, optional
# Set all dihedrals as rigid (no dihedral scans)
all_rigid = no :: bool
# Use D4 method
_d4 = no :: bool
# Residue name printed on the force field file (Max 5 characters)
res_name = MOL :: str
# Polarize a coordinate file and quit (requires itp_file)
_polarize = no :: bool
# Name of itp file (only needed for polarize option)
_itp_file = itp_file_missing :: str
# Make a polarizable FF
_polar = no :: bool
# Scale the C6 dispersion interactions in the polarizable version of the FF
_polar_c6_scale = 0.8 :: float
# Specifically not scale some of the atoms
_polar_not_scale_c6 = :: literal
# Manual polarizabilities in the file ext_alpha
_ext_alpha = no :: bool
"""
@staticmethod
def _set_config(config):
config['qm'].update(config['qm']['software'])
config['qm'].update({'software': config['qm']['software'].value})
config.update({key: SimpleNamespace(**val) for key, val in config.items()})
return SimpleNamespace(**config)
@classmethod
def _extend_user_input(cls, questions):
questions.generate_block("qm", QM.colt_user_input)
questions.generate_block("scan", DihedralScan.colt_user_input)
questions.generate_cases("software", {key: software.colt_user_input for key, software in
implemented_qm_software.items()}, block='qm')
questions.generate_block("terms", Terms.get_questions())
@classmethod
def from_config(cls, config):
return cls._set_config(config)
@staticmethod
def set_basis(value):
if value.endswith('**'):
return f'{value[:-2]}(D,P)'.upper()
if value.endswith('*'):
return f'{value[:-1]}(D)'.upper()
return value.upper()
@staticmethod
def set_dispersion(value):
if value.lower() in ["no", "false", "n", "f"]:
return False
return value.upper()
def _get_job_info(filename):
job = {}
filename = filename.rstrip('/')
base = os.path.basename(filename)
path = os.path.dirname(filename)
if path != '':
path = f'{path}/'
if os.path.isfile(filename):
job['coord_file'] = filename
job['name'] = base.split('.')[0]
else:
job['coord_file'] = False
job['name'] = base.split('_qforce')[0]
job['dir'] = f'{path}{job["name"]}_qforce'
job['frag_dir'] = f'{job["dir"]}/fragments'
job['md_data'] = pkg_resources.resource_filename('qforce', 'data')
os.makedirs(job['dir'], exist_ok=True)
return SimpleNamespace(**job)
def _check_and_copy_settings_file(job_dir, config_file):
"""
If options are provided as a file, copy that to job directory.
If options are provided as StringIO, write that to job directory.
"""
settings_file = os.path.join(job_dir, 'settings.ini')
if config_file is not None:
if isinstance(config_file, StringIO):
with open(settings_file, 'w') as fh:
config_file.seek(0)
fh.write(config_file.read())
else:
shutil.copy2(config_file, settings_file)
return settings_file
def initialize(filename, config_file, presets=None):
print(LOGO)
job_info = _get_job_info(filename)
settings_file = _check_and_copy_settings_file(job_info.dir, config_file)
config = Initialize.from_questions(config=settings_file, presets=presets, check_only=True)
return config, job_info
|
import sys
import argparse
import sqlite3
from collections import defaultdict as dd
parser = argparse.ArgumentParser(
description='Initialize the Epigraph Database',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument(
'epi_db', help='epigraph database')
parser.add_argument(
'--data_dir', default = 'data', help='directory with data')
args = parser.parse_args()
dbfile = args.epi_db
conn = sqlite3.connect(dbfile) # loads dbfile as con
c = conn.cursor()
log = open("epi_fails.log", 'w')
c.execute("""select eid, eyear, wyear, etitle, wtitle from raw where eyear > wyear and eyear != 'NULL' and wyear != 'NULL';""")
print ("""Epigraph year is after work year""", file=log)
print("ID", "Epigraph Year", "Work year", "Source", "work",
sep='\n', file=log)
print("\n", file=log)
for r in c:
print(f"""{str(r[0])}
{str(r[1])}\t{str(r[3])}
{str(r[2])}\t{str(r[4])}
""", file=log)
print("\n-----------------------------\n", file=log)
conn.commit()
|
#!/usr/bin/env python
#
# Copyright 2018 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This example registers all live stream event-sessions ids for monitoring."""
# Import appropriate modules from the client library.
from googleads import ad_manager
# List of session ids
SESSION_IDS = ['INSERT_SESSION_IDS_HERE']
def main(client, session_ids):
# Initialize appropriate service.
live_stream_event_service = client.GetService(
'LiveStreamEventService', version='v201908')
# Retrieve all sessions id that are being monitored
session_ids = live_stream_event_service.registerSessionsForMonitoring(
session_ids)
if session_ids:
for session_id in session_ids:
# Print out some information for each live stream event.
print('Session with ID "%s" registered for monitoring.' % (session_id))
print('\nNumber of results found: %s' % session_ids.size)
if __name__ == '__main__':
# Initialize client object.
ad_manager_client = ad_manager.AdManagerClient.LoadFromStorage()
main(ad_manager_client, SESSION_IDS)
|
"""
This module houses the ctypes function prototypes for OGR DataSource
related data structures. OGR_Dr_*, OGR_DS_*, OGR_L_*, OGR_F_*,
OGR_Fld_* routines are relevant here.
"""
from ctypes import POINTER, c_char_p, c_double, c_int, c_long, c_void_p
from django.contrib.gis.gdal.envelope import OGREnvelope
from django.contrib.gis.gdal.libgdal import GDAL_VERSION, lgdal
from django.contrib.gis.gdal.prototypes.generation import (
const_string_output, double_output, geom_output, int64_output, int_output,
srs_output, void_output, voidptr_output,
)
c_int_p = POINTER(c_int) # shortcut type
# Driver Routines
register_all = void_output(lgdal.OGRRegisterAll, [], errcheck=False)
cleanup_all = void_output(lgdal.OGRCleanupAll, [], errcheck=False)
get_driver = voidptr_output(lgdal.OGRGetDriver, [c_int])
get_driver_by_name = voidptr_output(lgdal.OGRGetDriverByName, [c_char_p], errcheck=False)
get_driver_count = int_output(lgdal.OGRGetDriverCount, [])
get_driver_name = const_string_output(lgdal.OGR_Dr_GetName, [c_void_p], decoding='ascii')
# DataSource
open_ds = voidptr_output(lgdal.OGROpen, [c_char_p, c_int, POINTER(c_void_p)])
destroy_ds = void_output(lgdal.OGR_DS_Destroy, [c_void_p], errcheck=False)
release_ds = void_output(lgdal.OGRReleaseDataSource, [c_void_p])
get_ds_name = const_string_output(lgdal.OGR_DS_GetName, [c_void_p])
get_layer = voidptr_output(lgdal.OGR_DS_GetLayer, [c_void_p, c_int])
get_layer_by_name = voidptr_output(lgdal.OGR_DS_GetLayerByName, [c_void_p, c_char_p])
get_layer_count = int_output(lgdal.OGR_DS_GetLayerCount, [c_void_p])
# Layer Routines
get_extent = void_output(lgdal.OGR_L_GetExtent, [c_void_p, POINTER(OGREnvelope), c_int])
get_feature = voidptr_output(lgdal.OGR_L_GetFeature, [c_void_p, c_long])
get_feature_count = int_output(lgdal.OGR_L_GetFeatureCount, [c_void_p, c_int])
get_layer_defn = voidptr_output(lgdal.OGR_L_GetLayerDefn, [c_void_p])
get_layer_srs = srs_output(lgdal.OGR_L_GetSpatialRef, [c_void_p])
get_next_feature = voidptr_output(lgdal.OGR_L_GetNextFeature, [c_void_p])
reset_reading = void_output(lgdal.OGR_L_ResetReading, [c_void_p], errcheck=False)
test_capability = int_output(lgdal.OGR_L_TestCapability, [c_void_p, c_char_p])
get_spatial_filter = geom_output(lgdal.OGR_L_GetSpatialFilter, [c_void_p])
set_spatial_filter = void_output(lgdal.OGR_L_SetSpatialFilter, [c_void_p, c_void_p], errcheck=False)
set_spatial_filter_rect = void_output(
lgdal.OGR_L_SetSpatialFilterRect,
[c_void_p, c_double, c_double, c_double, c_double], errcheck=False
)
# Feature Definition Routines
get_fd_geom_type = int_output(lgdal.OGR_FD_GetGeomType, [c_void_p])
get_fd_name = const_string_output(lgdal.OGR_FD_GetName, [c_void_p])
get_feat_name = const_string_output(lgdal.OGR_FD_GetName, [c_void_p])
get_field_count = int_output(lgdal.OGR_FD_GetFieldCount, [c_void_p])
get_field_defn = voidptr_output(lgdal.OGR_FD_GetFieldDefn, [c_void_p, c_int])
# Feature Routines
clone_feature = voidptr_output(lgdal.OGR_F_Clone, [c_void_p])
destroy_feature = void_output(lgdal.OGR_F_Destroy, [c_void_p], errcheck=False)
feature_equal = int_output(lgdal.OGR_F_Equal, [c_void_p, c_void_p])
get_feat_geom_ref = geom_output(lgdal.OGR_F_GetGeometryRef, [c_void_p])
get_feat_field_count = int_output(lgdal.OGR_F_GetFieldCount, [c_void_p])
get_feat_field_defn = voidptr_output(lgdal.OGR_F_GetFieldDefnRef, [c_void_p, c_int])
get_fid = int_output(lgdal.OGR_F_GetFID, [c_void_p])
get_field_as_datetime = int_output(
lgdal.OGR_F_GetFieldAsDateTime,
[c_void_p, c_int, c_int_p, c_int_p, c_int_p, c_int_p, c_int_p, c_int_p]
)
get_field_as_double = double_output(lgdal.OGR_F_GetFieldAsDouble, [c_void_p, c_int])
get_field_as_integer = int_output(lgdal.OGR_F_GetFieldAsInteger, [c_void_p, c_int])
if GDAL_VERSION >= (2, 0):
get_field_as_integer64 = int64_output(lgdal.OGR_F_GetFieldAsInteger64, [c_void_p, c_int])
get_field_as_string = const_string_output(lgdal.OGR_F_GetFieldAsString, [c_void_p, c_int])
get_field_index = int_output(lgdal.OGR_F_GetFieldIndex, [c_void_p, c_char_p])
# Field Routines
get_field_name = const_string_output(lgdal.OGR_Fld_GetNameRef, [c_void_p])
get_field_precision = int_output(lgdal.OGR_Fld_GetPrecision, [c_void_p])
get_field_type = int_output(lgdal.OGR_Fld_GetType, [c_void_p])
get_field_type_name = const_string_output(lgdal.OGR_GetFieldTypeName, [c_int])
get_field_width = int_output(lgdal.OGR_Fld_GetWidth, [c_void_p])
|
import os
import re
import json
import string
import hashlib
import requests
import datetime
import pandas as pd
import pymongo as pm
from bson import Binary
from bs4 import BeautifulSoup
from urllib.parse import quote_plus
class MongoOps():
"""
docstring for MongoOps
"""
def __init__(self, username="admin",
password="admin",
host="mongo:27017",
testing=False):
self.username = username
self.password = password
self.host = host
self.uri = "mongodb://%s:%s@%s" % (quote_plus(self.username),
quote_plus(self.password), self.host)
self.connection = pm.MongoClient(self.uri)
self.homes_database = self.connection.homes_database
self.homes_collection = self.homes_database.homes
self.directions_collection = self.homes_database.directions
self.create_loc_index(self.homes_collection)
self.create_loc_index(self.directions_collection)
self.testing = testing
def drop_table(self, collection):
collection.drop()
def create_loc_index(self, collection, name = "geometry", location_type = "2dsphere"):
collection.create_index([(name, location_type)])
def load_dict(self, collection, data):
collection.insert_one(data)
def search_collection(self, collection, search_val = None, limit = 10):
if limit != None:
return collection.find(search_val).limit(limit)
else:
return collection.find(search_val)
def address_info_in_database(self, collection, address_hash):
search_val = {"address_hash" : address_hash}
cursor = self.search_collection(collection, search_val = search_val, limit=1)
return cursor.count() > 0
def format_address(self, address):
formatted_address = address.lower()
remove_spaces = lambda s : remove_spaces(re.sub(" ", " ", s)) if " " in s else s
whitelist = string.ascii_lowercase + string.digits + ' '
formatted_address = remove_spaces(formatted_address)
formatted_address = ''.join(c for c in formatted_address if c in whitelist)
return formatted_address
def flask_request(self, url):
try:
r = requests.get(url)
except Exception as e:
return "proxy service error: " + str(e), 503
soup = BeautifulSoup(r.content, "html.parser")
address_info = json.loads(str(soup))
return address_info
def _query_for_location_info(self, address):
address = self.format_address(address)
url = 'http://esri:5000/get_geocode/%s' % quote_plus(address)
r = self.flask_request(url)
r['address'] = address
return r
def safe_query_for_location_info(self, event):
address = ""
if type(event)==dict:
try:
if len(event.keys()) < 1:
return None
except:
return None
address = event["location"]
elif type(event)==str:
address = event
address = self.format_address(address)
address_hash = self.get_hash(address)
if not self.address_info_in_database(self.homes_database.homes, address_hash):
self.print_test("%s not found! Gathering data from Esri." % address)
data = self._query_for_location_info(address)
if type(event)==dict:
event["location"] = data
event["address_hash"] = address_hash
elif type(event)==str:
event = {"location": data, "address_hash": address_hash}
self.load_dict(self.homes_database.homes, event)
else:
self.print_test("%s found! Gathering data from MongoDB." % address)
result = self.search_collection(self.homes_database.homes, {"address_hash" : address_hash}).next()
return result
def get_hash(self, strings):
hasher = hashlib.sha1()
string = "".join(strings)
hasher.update(string.encode('utf-8'))
hashed_directions = hasher.digest()
return hashed_directions
def get_address_from_loc_data(self, loc):
if isinstance(loc, dict):
try:
loc_str = loc["location"]["address"]
except:
print("Dict passed is not properly formatted. Should have {\"location\": {..., \"address\": <address>}} format.")
return None
elif isinstance(loc, str):
try:
loc_str = json.loads(loc).get("location.address", loc.get("location", loc.get("address")))
except:
loc_str = loc
else:
print("Location data not string or dict.")
return None
return loc_str.strip()
def get_directions(self, start, stop):
"""
get_directions is designed to receive jsons for the following schema.
A schema checker may be added at some point in the future.
{"geometry": {"x" : <float>, "y": <float> }, *}
"""
start_str = json.dumps(start, skipkeys=True)
stop_str = json.dumps(stop, skipkeys=True)
directions_json = {}
url_path = 'get_directions/{ "location_1": %s, "location_2": %s }' % (start_str, stop_str)
url = 'http://esri:5000/%s' % url_path
directions = self.flask_request(url)
directions_json['directions'] = directions
hashed_directions = hashed_directions = self.get_hash([start_str, stop_str])
directions_json["directions_hash"] = hashed_directions
return directions_json
def direction_in_database(self, collection, direction_hash):
search_val = {"directions_hash": Binary(data = direction_hash)}
cursor = self.search_collection(collection, search_val = search_val, limit=1)
return cursor.count() > 0
def _query_for_directions(self, start, stop):
directions = self.get_directions(start, stop)
return directions
def safe_query_for_directions(self, start, stop):
start_str = self.get_address_from_loc_data(start)
start_data = self.safe_query_for_location_info(start_str)
stop_str = self.get_address_from_loc_data(stop)
stop_data = self.safe_query_for_location_info(stop_str)
directions_hash = self.get_hash([start_str, stop_str])
if not self.direction_in_database(self.directions_collection, directions_hash):
self.print_test("Directions from %s to %s not found! Gathering data from Esri." % (start_str, stop_str))
directions = self._query_for_directions(start_data["location"], stop_data["location"])
directions["directions_hash"] = directions_hash
directions["start"] = start_data["location"]["address"]
directions["stop"] = stop_data["location"]["address"]
self.load_dict(self.directions_collection, directions)
else:
self.print_test("Directions from %s to %s found! Gathering data from MongoDB." % (start_str, stop_str))
directions = self.search_collection(self.directions_collection, {"directions_hash": directions_hash}).next()
return directions
def print_test(self, string):
if self.testing:
print(string)
if __name__ == "__main__":
from ICSParser import ICSParser
mops = MongoOps()
ics = ICSParser()
ics.parse_ics("/data/download.ics")
start = mops.safe_query_for_location_info(ics.to_dict())
ics.parse_ics("/data/download (1).ics")
stop = mops.safe_query_for_location_info(ics.to_dict())
print(mops.safe_query_for_directions(start, stop))
|
# -*- coding: utf-8 -*-
from tests.renderer.xml import xml_templates
from pyramid_oereb.lib.records.view_service import ViewServiceRecord
from pyramid_oereb.views.webservice import Parameter
from pyramid_oereb.lib.renderer.extract.xml_ import Renderer
template = xml_templates().get_template('view_service.xml')
def test_empty():
map = ViewServiceRecord(
reference_wms='',
layer_index=0,
layer_opacity=1.0
)
content = template.render(**{
'map': map
}).decode('utf-8').split('\n')
assert content[0] == '' # empty filler line
assert content[1] == '<data:layerIndex>0</data:layerIndex>'
assert content[2] == '<data:layerOpacity>1.0</data:layerOpacity>'
assert content[3] == '' # empty filler line
assert len(content) == 4
def test_reference_wms():
map = ViewServiceRecord(
reference_wms='http://example.com',
layer_index=0,
layer_opacity=1.0
)
content = template.render(**{
'map': map
}).decode('utf-8').split('\n')
assert content[1] == '<data:ReferenceWMS>http%3A%2F%2Fexample.com</data:ReferenceWMS>'
assert len(content) == 5
def test_legend_at_web():
parameters = Parameter('reduced', 'xml', False, False, 'BL0200002829', '1000', 'CH775979211712', 'de')
renderer = Renderer(None)
map = ViewServiceRecord(
reference_wms='',
legend_at_web={'de': 'http://example-legend.com'},
layer_index=0,
layer_opacity=1.0
)
content = template.render(**{
'params': parameters,
'localized': renderer.get_localized_text,
'map': map
}).decode('utf-8').split('\n')
assert len(content) == 6
assert content[2] == '<data:LegendAtWeb>http%3A%2F%2Fexample-legend.com</data:LegendAtWeb>'
def test_legend_at_web_no_language():
# Requests italian, but only german is available -> best effort: deliver german instead
parameters = Parameter('reduced', 'xml', False, False, 'BL0200002829', '1000', 'CH775979211712', 'it')
renderer = Renderer(None)
map = ViewServiceRecord(
reference_wms='',
legend_at_web={'de': 'http://example-legend.com'},
layer_index=0,
layer_opacity=1.0
)
content = template.render(**{
'params': parameters,
'localized': renderer.get_localized_text,
'map': map
}).decode('utf-8').split('\n')
assert len(content) == 6
assert content[2] == '<data:LegendAtWeb>http%3A%2F%2Fexample-legend.com</data:LegendAtWeb>'
|
from setuptools import setup
import os.path as osp
from torch.utils.cpp_extension import BuildExtension, CUDAExtension
ROOT_DIR = osp.dirname(osp.abspath(__file__))
__version__ = None
exec(open('svox/version.py', 'r').read())
CUDA_FLAGS = []
INSTALL_REQUIREMENTS = []
try:
ext_modules = [
CUDAExtension('svox.csrc', [
'svox/csrc/svox.cpp',
'svox/csrc/svox_kernel.cu',
'svox/csrc/rt_kernel.cu',
'svox/csrc/quantizer.cpp',
], include_dirs=[osp.join(ROOT_DIR, "svox", "csrc", "include")],
optional=True),
]
except:
import warnings
warnings.warn("Failed to build CUDA extension")
ext_modules = []
setup(
name='svox',
version=__version__,
author='Alex Yu',
author_email='alexyu99126@gmail.com',
description='Sparse voxel N^3-tree data structure using CUDA',
long_description='Sparse voxel N^3-tree data structure PyTorch extension, using CUDA',
ext_modules=ext_modules,
setup_requires=['pybind11>=2.5.0'],
packages=['svox', 'svox.csrc'],
cmdclass={'build_ext': BuildExtension},
zip_safe=False,
)
|
import torch
import torch.nn as nn
import model.ops as ops
import torch.nn.functional as F
def make_model(args, parent=False):
return DRLN(args)
class CALayer(nn.Module):
def __init__(self, channel, reduction=16):
super(CALayer, self).__init__()
self.avg_pool = nn.AdaptiveAvgPool2d(1)
self.c1 = ops.BasicBlock(channel , channel // reduction, 3, 1, 3, 3)
self.c2 = ops.BasicBlock(channel , channel // reduction, 3, 1, 5, 5)
self.c3 = ops.BasicBlock(channel , channel // reduction, 3, 1, 7, 7)
self.c4 = ops.BasicBlockSig((channel // reduction)*3, channel , 3, 1, 1)
def forward(self, x):
y = self.avg_pool(x)
c1 = self.c1(y)
c2 = self.c2(y)
c3 = self.c3(y)
c_out = torch.cat([c1, c2, c3], dim=1)
y = self.c4(c_out)
return x * y
class Block(nn.Module):
def __init__(self, in_channels, out_channels, group=1):
super(Block, self).__init__()
self.r1 = ops.ResidualBlock(in_channels, out_channels)
self.r2 = ops.ResidualBlock(in_channels*2, out_channels*2)
self.r3 = ops.ResidualBlock(in_channels*4, out_channels*4)
self.g = ops.BasicBlock(in_channels*8, out_channels, 1, 1, 0)
self.ca = CALayer(in_channels)
def forward(self, x):
c0 = x
r1 = self.r1(c0)
c1 = torch.cat([c0, r1], dim=1)
r2 = self.r2(c1)
c2 = torch.cat([c1, r2], dim=1)
r3 = self.r3(c2)
c3 = torch.cat([c2, r3], dim=1)
g = self.g(c3)
out = self.ca(g)
return out
class DRLN(nn.Module):
def __init__(self, args):
super(DRLN, self).__init__()
#n_resgroups = args.n_resgroups
#n_resblocks = args.n_resblocks
#n_feats = args.n_feats
#kernel_size = 3
#reduction = args.reduction
#scale = args.scale[0]
#act = nn.ReLU(True)
self.scale = args.scale[0]
chs=64
self.sub_mean = ops.MeanShift((0.4488, 0.4371, 0.4040), sub=True)
self.add_mean = ops.MeanShift((0.4488, 0.4371, 0.4040), sub=False)
self.head = nn.Conv2d(3, chs, 3, 1, 1)
self.b1 = Block(chs, chs)
self.b2 = Block(chs, chs)
self.b3 = Block(chs, chs)
self.b4 = Block(chs, chs)
self.b5 = Block(chs, chs)
self.b6 = Block(chs, chs)
self.b7 = Block(chs, chs)
self.b8 = Block(chs, chs)
self.b9 = Block(chs, chs)
self.b10 = Block(chs, chs)
self.b11 = Block(chs, chs)
self.b12 = Block(chs, chs)
self.b13 = Block(chs, chs)
self.b14 = Block(chs, chs)
self.b15 = Block(chs, chs)
self.b16 = Block(chs, chs)
self.b17 = Block(chs, chs)
self.b18 = Block(chs, chs)
self.b19 = Block(chs, chs)
self.b20 = Block(chs, chs)
self.c1 = ops.BasicBlock(chs*2, chs, 3, 1, 1)
self.c2 = ops.BasicBlock(chs*3, chs, 3, 1, 1)
self.c3 = ops.BasicBlock(chs*4, chs, 3, 1, 1)
self.c4 = ops.BasicBlock(chs*2, chs, 3, 1, 1)
self.c5 = ops.BasicBlock(chs*3, chs, 3, 1, 1)
self.c6 = ops.BasicBlock(chs*4, chs, 3, 1, 1)
self.c7 = ops.BasicBlock(chs*2, chs, 3, 1, 1)
self.c8 = ops.BasicBlock(chs*3, chs, 3, 1, 1)
self.c9 = ops.BasicBlock(chs*4, chs, 3, 1, 1)
self.c10 = ops.BasicBlock(chs*2, chs, 3, 1, 1)
self.c11 = ops.BasicBlock(chs*3, chs, 3, 1, 1)
self.c12 = ops.BasicBlock(chs*4, chs, 3, 1, 1)
self.c13 = ops.BasicBlock(chs*2, chs, 3, 1, 1)
self.c14 = ops.BasicBlock(chs*3, chs, 3, 1, 1)
self.c15 = ops.BasicBlock(chs*4, chs, 3, 1, 1)
self.c16 = ops.BasicBlock(chs*5, chs, 3, 1, 1)
self.c17 = ops.BasicBlock(chs*2, chs, 3, 1, 1)
self.c18 = ops.BasicBlock(chs*3, chs, 3, 1, 1)
self.c19 = ops.BasicBlock(chs*4, chs, 3, 1, 1)
self.c20 = ops.BasicBlock(chs*5, chs, 3, 1, 1)
self.upsample = ops.UpsampleBlock(chs, self.scale , multi_scale=False)
#self.convert = ops.ConvertBlock(chs, chs, 20)
self.tail = nn.Conv2d(chs, 3, 3, 1, 1)
def forward(self, x):
x = self.sub_mean(x)
x = self.head(x)
c0 = o0 = x
b1 = self.b1(o0)
c1 = torch.cat([c0, b1], dim=1)
o1 = self.c1(c1)
b2 = self.b2(o1)
c2 = torch.cat([c1, b2], dim=1)
o2 = self.c2(c2)
b3 = self.b3(o2)
c3 = torch.cat([c2, b3], dim=1)
o3 = self.c3(c3)
a1 = o3 + c0
b4 = self.b4(a1)
c4 = torch.cat([o3, b4], dim=1)
o4 = self.c4(c4)
b5 = self.b5(a1)
c5 = torch.cat([c4, b5], dim=1)
o5 = self.c5(c5)
b6 = self.b6(o5)
c6 = torch.cat([c5, b6], dim=1)
o6 = self.c6(c6)
a2 = o6 + a1
b7 = self.b7(a2)
c7 = torch.cat([o6, b7], dim=1)
o7 = self.c7(c7)
b8 = self.b8(o7)
c8 = torch.cat([c7, b8], dim=1)
o8 = self.c8(c8)
b9 = self.b9(o8)
c9 = torch.cat([c8, b9], dim=1)
o9 = self.c9(c9)
a3 = o9 + a2
b10 = self.b10(a3)
c10 = torch.cat([o9, b10], dim=1)
o10 = self.c10(c10)
b11 = self.b11(o10)
c11 = torch.cat([c10, b11], dim=1)
o11 = self.c11(c11)
b12 = self.b12(o11)
c12 = torch.cat([c11, b12], dim=1)
o12 = self.c12(c12)
a4 = o12 + a3
b13 = self.b13(a4)
c13 = torch.cat([o12, b13], dim=1)
o13 = self.c13(c13)
b14 = self.b14(o13)
c14 = torch.cat([c13, b14], dim=1)
o14 = self.c14(c14)
b15 = self.b15(o14)
c15 = torch.cat([c14, b15], dim=1)
o15 = self.c15(c15)
b16 = self.b16(o15)
c16 = torch.cat([c15, b16], dim=1)
o16 = self.c16(c16)
a5 = o16 + a4
b17 = self.b17(a5)
c17 = torch.cat([o16, b17], dim=1)
o17 = self.c17(c17)
b18 = self.b18(o17)
c18 = torch.cat([c17, b18], dim=1)
o18 = self.c18(c18)
b19 = self.b19(o18)
c19 = torch.cat([c18, b19], dim=1)
o19 = self.c19(c19)
b20 = self.b20(o19)
c20 = torch.cat([c19, b20], dim=1)
o20 = self.c20(c20)
a6 = o20 + a5
#c_out = torch.cat([b1, b2, b3, b4, b5, b6, b7, b8, b9, b10, b11, b12, b13, b14, b15, b16, b17, b18, b19, b20], dim=1)
#b = self.convert(c_out)
b_out = a6 + x
out = self.upsample(b_out, scale=self.scale )
out = self.tail(out)
f_out = self.add_mean(out)
return f_out
def load_state_dict(self, state_dict, strict=False):
own_state = self.state_dict()
for name, param in state_dict.items():
if name in own_state:
if isinstance(param, nn.Parameter):
param = param.data
try:
own_state[name].copy_(param)
except Exception:
if name.find('tail') >= 0 or name.find('upsample') >= 0:
print('Replace pre-trained upsampler to new one...')
else:
raise RuntimeError('While copying the parameter named {}, '
'whose dimensions in the model are {} and '
'whose dimensions in the checkpoint are {}.'
.format(name, own_state[name].size(), param.size()))
elif strict:
if name.find('tail') == -1:
raise KeyError('unexpected key "{}" in state_dict'
.format(name))
if strict:
missing = set(own_state.keys()) - set(state_dict.keys())
if len(missing) > 0:
raise KeyError('missing keys in state_dict: "{}"'.format(missing))
|
a = b = 0
_ = (not a and not b)
_ = (not a or not b)
_ = not (b + a)
_ = (not b + a)
|
# global
import math
import tensorflow as tf
from numbers import Number
from typing import Union, Tuple, Optional, List
from tensorflow.python.types.core import Tensor
def flip(x: Tensor,
axis: Optional[Union[int, Tuple[int], List[int]]] = None)\
-> Tensor:
num_dims = len(x.shape)
if not num_dims:
return x
if axis is None:
new_axis = list(range(num_dims))
else:
new_axis = axis
if type(new_axis) is int:
new_axis = [new_axis]
else:
new_axis = new_axis
new_axis = [item + num_dims if item < 0 else item for item in new_axis]
return tf.reverse(x, new_axis)
def expand_dims(x: Tensor,
axis: Optional[Union[int, Tuple[int], List[int]]] = None) \
-> Tensor:
try:
return tf.expand_dims(x, axis)
except tf.errors.InvalidArgumentError as error:
raise IndexError(error)
# Extra #
# ------#
def split(x, num_or_size_splits=None, axis=0, with_remainder=False):
if x.shape == ():
if num_or_size_splits is not None and num_or_size_splits != 1:
raise Exception('input array had no shape, but num_sections specified was {}'.format(num_or_size_splits))
return [x]
if num_or_size_splits is None:
dim_size = tf.shape(x)[axis]
num_or_size_splits = dim_size
elif isinstance(num_or_size_splits, int) and with_remainder:
num_chunks = x.shape[axis] / num_or_size_splits
num_chunks_int = math.floor(num_chunks)
remainder = num_chunks - num_chunks_int
if remainder != 0:
num_or_size_splits = [num_or_size_splits]*num_chunks_int + [int(remainder*num_or_size_splits)]
return tf.split(x, num_or_size_splits, axis)
repeat = tf.repeat
def tile(x, reps):
if x.shape == ():
x = tf.reshape(x, (-1,))
if isinstance(reps, Number):
reps = [reps]
if isinstance(reps, Tensor) and reps.shape == ():
reps = tf.reshape(reps, (-1,))
return tf.tile(x, reps)
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import msrest.serialization
class Address(msrest.serialization.Model):
"""The shipping address of the customer.
All required parameters must be populated in order to send to Azure.
:param address_line1: Required. The address line1.
:type address_line1: str
:param address_line2: The address line2.
:type address_line2: str
:param address_line3: The address line3.
:type address_line3: str
:param postal_code: Required. The postal code.
:type postal_code: str
:param city: Required. The city name.
:type city: str
:param state: Required. The state name.
:type state: str
:param country: Required. The country name.
:type country: str
"""
_validation = {
'address_line1': {'required': True},
'postal_code': {'required': True},
'city': {'required': True},
'state': {'required': True},
'country': {'required': True},
}
_attribute_map = {
'address_line1': {'key': 'addressLine1', 'type': 'str'},
'address_line2': {'key': 'addressLine2', 'type': 'str'},
'address_line3': {'key': 'addressLine3', 'type': 'str'},
'postal_code': {'key': 'postalCode', 'type': 'str'},
'city': {'key': 'city', 'type': 'str'},
'state': {'key': 'state', 'type': 'str'},
'country': {'key': 'country', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(Address, self).__init__(**kwargs)
self.address_line1 = kwargs['address_line1']
self.address_line2 = kwargs.get('address_line2', None)
self.address_line3 = kwargs.get('address_line3', None)
self.postal_code = kwargs['postal_code']
self.city = kwargs['city']
self.state = kwargs['state']
self.country = kwargs['country']
class ARMBaseModel(msrest.serialization.Model):
"""Represents the base class for all object models.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: The path ID that uniquely identifies the object.
:vartype id: str
:ivar name: The object name.
:vartype name: str
:ivar type: The hierarchical type of the object.
:vartype type: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ARMBaseModel, self).__init__(**kwargs)
self.id = None
self.name = None
self.type = None
class Alert(ARMBaseModel):
"""Alert on the data box edge/gateway device.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: The path ID that uniquely identifies the object.
:vartype id: str
:ivar name: The object name.
:vartype name: str
:ivar type: The hierarchical type of the object.
:vartype type: str
:ivar title: Alert title.
:vartype title: str
:ivar alert_type: Alert type.
:vartype alert_type: str
:ivar appeared_at_date_time: UTC time when the alert appeared.
:vartype appeared_at_date_time: ~datetime.datetime
:ivar recommendation: Alert recommendation.
:vartype recommendation: str
:ivar severity: Severity of the alert. Possible values include: "Informational", "Warning",
"Critical".
:vartype severity: str or ~azure.mgmt.databoxedge.v2020_05_01_preview.models.AlertSeverity
:ivar error_details: Error details of the alert.
:vartype error_details: ~azure.mgmt.databoxedge.v2020_05_01_preview.models.AlertErrorDetails
:ivar detailed_information: Alert details.
:vartype detailed_information: dict[str, str]
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'title': {'readonly': True},
'alert_type': {'readonly': True},
'appeared_at_date_time': {'readonly': True},
'recommendation': {'readonly': True},
'severity': {'readonly': True},
'error_details': {'readonly': True},
'detailed_information': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'title': {'key': 'properties.title', 'type': 'str'},
'alert_type': {'key': 'properties.alertType', 'type': 'str'},
'appeared_at_date_time': {'key': 'properties.appearedAtDateTime', 'type': 'iso-8601'},
'recommendation': {'key': 'properties.recommendation', 'type': 'str'},
'severity': {'key': 'properties.severity', 'type': 'str'},
'error_details': {'key': 'properties.errorDetails', 'type': 'AlertErrorDetails'},
'detailed_information': {'key': 'properties.detailedInformation', 'type': '{str}'},
}
def __init__(
self,
**kwargs
):
super(Alert, self).__init__(**kwargs)
self.title = None
self.alert_type = None
self.appeared_at_date_time = None
self.recommendation = None
self.severity = None
self.error_details = None
self.detailed_information = None
class AlertErrorDetails(msrest.serialization.Model):
"""Error details for the alert.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar error_code: Error code.
:vartype error_code: str
:ivar error_message: Error Message.
:vartype error_message: str
:ivar occurrences: Number of occurrences.
:vartype occurrences: int
"""
_validation = {
'error_code': {'readonly': True},
'error_message': {'readonly': True},
'occurrences': {'readonly': True},
}
_attribute_map = {
'error_code': {'key': 'errorCode', 'type': 'str'},
'error_message': {'key': 'errorMessage', 'type': 'str'},
'occurrences': {'key': 'occurrences', 'type': 'int'},
}
def __init__(
self,
**kwargs
):
super(AlertErrorDetails, self).__init__(**kwargs)
self.error_code = None
self.error_message = None
self.occurrences = None
class AlertList(msrest.serialization.Model):
"""Collection of alerts.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar value: The value.
:vartype value: list[~azure.mgmt.databoxedge.v2020_05_01_preview.models.Alert]
:ivar next_link: Link to the next set of results.
:vartype next_link: str
"""
_validation = {
'value': {'readonly': True},
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[Alert]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(AlertList, self).__init__(**kwargs)
self.value = None
self.next_link = None
class AsymmetricEncryptedSecret(msrest.serialization.Model):
"""Represent the secrets intended for encryption with asymmetric key pair.
All required parameters must be populated in order to send to Azure.
:param value: Required. The value of the secret.
:type value: str
:param encryption_cert_thumbprint: Thumbprint certificate used to encrypt \"Value\". If the
value is unencrypted, it will be null.
:type encryption_cert_thumbprint: str
:param encryption_algorithm: Required. The algorithm used to encrypt "Value". Possible values
include: "None", "AES256", "RSAES_PKCS1_v_1_5".
:type encryption_algorithm: str or
~azure.mgmt.databoxedge.v2020_05_01_preview.models.EncryptionAlgorithm
"""
_validation = {
'value': {'required': True},
'encryption_algorithm': {'required': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': 'str'},
'encryption_cert_thumbprint': {'key': 'encryptionCertThumbprint', 'type': 'str'},
'encryption_algorithm': {'key': 'encryptionAlgorithm', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(AsymmetricEncryptedSecret, self).__init__(**kwargs)
self.value = kwargs['value']
self.encryption_cert_thumbprint = kwargs.get('encryption_cert_thumbprint', None)
self.encryption_algorithm = kwargs['encryption_algorithm']
class Authentication(msrest.serialization.Model):
"""Authentication mechanism for IoT devices.
:param symmetric_key: Symmetric key for authentication.
:type symmetric_key: ~azure.mgmt.databoxedge.v2020_05_01_preview.models.SymmetricKey
"""
_attribute_map = {
'symmetric_key': {'key': 'symmetricKey', 'type': 'SymmetricKey'},
}
def __init__(
self,
**kwargs
):
super(Authentication, self).__init__(**kwargs)
self.symmetric_key = kwargs.get('symmetric_key', None)
class AzureContainerInfo(msrest.serialization.Model):
"""Azure container mapping of the endpoint.
All required parameters must be populated in order to send to Azure.
:param storage_account_credential_id: Required. ID of the storage account credential used to
access storage.
:type storage_account_credential_id: str
:param container_name: Required. Container name (Based on the data format specified, this
represents the name of Azure Files/Page blob/Block blob).
:type container_name: str
:param data_format: Required. Storage format used for the file represented by the share.
Possible values include: "BlockBlob", "PageBlob", "AzureFile".
:type data_format: str or
~azure.mgmt.databoxedge.v2020_05_01_preview.models.AzureContainerDataFormat
"""
_validation = {
'storage_account_credential_id': {'required': True},
'container_name': {'required': True},
'data_format': {'required': True},
}
_attribute_map = {
'storage_account_credential_id': {'key': 'storageAccountCredentialId', 'type': 'str'},
'container_name': {'key': 'containerName', 'type': 'str'},
'data_format': {'key': 'dataFormat', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(AzureContainerInfo, self).__init__(**kwargs)
self.storage_account_credential_id = kwargs['storage_account_credential_id']
self.container_name = kwargs['container_name']
self.data_format = kwargs['data_format']
class BandwidthSchedule(ARMBaseModel):
"""The bandwidth schedule details.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: The path ID that uniquely identifies the object.
:vartype id: str
:ivar name: The object name.
:vartype name: str
:ivar type: The hierarchical type of the object.
:vartype type: str
:param start: Required. The start time of the schedule in UTC.
:type start: str
:param stop: Required. The stop time of the schedule in UTC.
:type stop: str
:param rate_in_mbps: Required. The bandwidth rate in Mbps.
:type rate_in_mbps: int
:param days: Required. The days of the week when this schedule is applicable.
:type days: list[str or ~azure.mgmt.databoxedge.v2020_05_01_preview.models.DayOfWeek]
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'start': {'required': True},
'stop': {'required': True},
'rate_in_mbps': {'required': True},
'days': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'start': {'key': 'properties.start', 'type': 'str'},
'stop': {'key': 'properties.stop', 'type': 'str'},
'rate_in_mbps': {'key': 'properties.rateInMbps', 'type': 'int'},
'days': {'key': 'properties.days', 'type': '[str]'},
}
def __init__(
self,
**kwargs
):
super(BandwidthSchedule, self).__init__(**kwargs)
self.start = kwargs['start']
self.stop = kwargs['stop']
self.rate_in_mbps = kwargs['rate_in_mbps']
self.days = kwargs['days']
class BandwidthSchedulesList(msrest.serialization.Model):
"""The collection of bandwidth schedules.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar value: The list of bandwidth schedules.
:vartype value: list[~azure.mgmt.databoxedge.v2020_05_01_preview.models.BandwidthSchedule]
:ivar next_link: Link to the next set of results.
:vartype next_link: str
"""
_validation = {
'value': {'readonly': True},
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[BandwidthSchedule]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(BandwidthSchedulesList, self).__init__(**kwargs)
self.value = None
self.next_link = None
class ClientAccessRight(msrest.serialization.Model):
"""The mapping between a particular client IP and the type of access client has on the NFS share.
All required parameters must be populated in order to send to Azure.
:param client: Required. IP of the client.
:type client: str
:param access_permission: Required. Type of access to be allowed for the client. Possible
values include: "NoAccess", "ReadOnly", "ReadWrite".
:type access_permission: str or
~azure.mgmt.databoxedge.v2020_05_01_preview.models.ClientPermissionType
"""
_validation = {
'client': {'required': True},
'access_permission': {'required': True},
}
_attribute_map = {
'client': {'key': 'client', 'type': 'str'},
'access_permission': {'key': 'accessPermission', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ClientAccessRight, self).__init__(**kwargs)
self.client = kwargs['client']
self.access_permission = kwargs['access_permission']
class CloudErrorBody(msrest.serialization.Model):
"""An error response from the service.
:param code: An identifier for the error. Codes are invariant and are intended to be consumed
programmatically.
:type code: str
:param message: A message describing the error, intended to be suitable for display in a user
interface.
:type message: str
:param details: A list of additional details about the error.
:type details: list[~azure.mgmt.databoxedge.v2020_05_01_preview.models.CloudErrorBody]
"""
_attribute_map = {
'code': {'key': 'code', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
'details': {'key': 'details', 'type': '[CloudErrorBody]'},
}
def __init__(
self,
**kwargs
):
super(CloudErrorBody, self).__init__(**kwargs)
self.code = kwargs.get('code', None)
self.message = kwargs.get('message', None)
self.details = kwargs.get('details', None)
class ContactDetails(msrest.serialization.Model):
"""Contains all the contact details of the customer.
All required parameters must be populated in order to send to Azure.
:param contact_person: Required. The contact person name.
:type contact_person: str
:param company_name: Required. The name of the company.
:type company_name: str
:param phone: Required. The phone number.
:type phone: str
:param email_list: Required. The email list.
:type email_list: list[str]
"""
_validation = {
'contact_person': {'required': True},
'company_name': {'required': True},
'phone': {'required': True},
'email_list': {'required': True},
}
_attribute_map = {
'contact_person': {'key': 'contactPerson', 'type': 'str'},
'company_name': {'key': 'companyName', 'type': 'str'},
'phone': {'key': 'phone', 'type': 'str'},
'email_list': {'key': 'emailList', 'type': '[str]'},
}
def __init__(
self,
**kwargs
):
super(ContactDetails, self).__init__(**kwargs)
self.contact_person = kwargs['contact_person']
self.company_name = kwargs['company_name']
self.phone = kwargs['phone']
self.email_list = kwargs['email_list']
class Container(ARMBaseModel):
"""Represents a container on the Data Box Edge/Gateway device.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: The path ID that uniquely identifies the object.
:vartype id: str
:ivar name: The object name.
:vartype name: str
:ivar type: The hierarchical type of the object.
:vartype type: str
:ivar container_status: Current status of the container. Possible values include: "OK",
"Offline", "Unknown", "Updating", "NeedsAttention".
:vartype container_status: str or
~azure.mgmt.databoxedge.v2020_05_01_preview.models.ContainerStatus
:param data_format: Required. DataFormat for Container. Possible values include: "BlockBlob",
"PageBlob", "AzureFile".
:type data_format: str or
~azure.mgmt.databoxedge.v2020_05_01_preview.models.AzureContainerDataFormat
:ivar refresh_details: Details of the refresh job on this container.
:vartype refresh_details: ~azure.mgmt.databoxedge.v2020_05_01_preview.models.RefreshDetails
:ivar created_date_time: The UTC time when container got created.
:vartype created_date_time: ~datetime.datetime
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'container_status': {'readonly': True},
'data_format': {'required': True},
'refresh_details': {'readonly': True},
'created_date_time': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'container_status': {'key': 'properties.containerStatus', 'type': 'str'},
'data_format': {'key': 'properties.dataFormat', 'type': 'str'},
'refresh_details': {'key': 'properties.refreshDetails', 'type': 'RefreshDetails'},
'created_date_time': {'key': 'properties.createdDateTime', 'type': 'iso-8601'},
}
def __init__(
self,
**kwargs
):
super(Container, self).__init__(**kwargs)
self.container_status = None
self.data_format = kwargs['data_format']
self.refresh_details = None
self.created_date_time = None
class ContainerList(msrest.serialization.Model):
"""Collection of all the containers on the Data Box Edge/Gateway device.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar value: The list of containers.
:vartype value: list[~azure.mgmt.databoxedge.v2020_05_01_preview.models.Container]
:ivar next_link: Link to the next set of results.
:vartype next_link: str
"""
_validation = {
'value': {'readonly': True},
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[Container]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ContainerList, self).__init__(**kwargs)
self.value = None
self.next_link = None
class DataBoxEdgeDevice(ARMBaseModel):
"""The Data Box Edge/Gateway device.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: The path ID that uniquely identifies the object.
:vartype id: str
:ivar name: The object name.
:vartype name: str
:ivar type: The hierarchical type of the object.
:vartype type: str
:param location: Required. The location of the device. This is a supported and registered Azure
geographical region (for example, West US, East US, or Southeast Asia). The geographical region
of a device cannot be changed once it is created, but if an identical geographical region is
specified on update, the request will succeed.
:type location: str
:param tags: A set of tags. The list of tags that describe the device. These tags can be used
to view and group this device (across resource groups).
:type tags: dict[str, str]
:param sku: The SKU type.
:type sku: ~azure.mgmt.databoxedge.v2020_05_01_preview.models.Sku
:param etag: The etag for the devices.
:type etag: str
:param data_box_edge_device_status: The status of the Data Box Edge/Gateway device. Possible
values include: "ReadyToSetup", "Online", "Offline", "NeedsAttention", "Disconnected",
"PartiallyDisconnected", "Maintenance".
:type data_box_edge_device_status: str or
~azure.mgmt.databoxedge.v2020_05_01_preview.models.DataBoxEdgeDeviceStatus
:ivar serial_number: The Serial Number of Data Box Edge/Gateway device.
:vartype serial_number: str
:param description: The Description of the Data Box Edge/Gateway device.
:type description: str
:param model_description: The description of the Data Box Edge/Gateway device model.
:type model_description: str
:ivar device_type: The type of the Data Box Edge/Gateway device. Possible values include:
"DataBoxEdgeDevice".
:vartype device_type: str or ~azure.mgmt.databoxedge.v2020_05_01_preview.models.DeviceType
:param friendly_name: The Data Box Edge/Gateway device name.
:type friendly_name: str
:ivar culture: The Data Box Edge/Gateway device culture.
:vartype culture: str
:ivar device_model: The Data Box Edge/Gateway device model.
:vartype device_model: str
:ivar device_software_version: The Data Box Edge/Gateway device software version.
:vartype device_software_version: str
:ivar device_local_capacity: The Data Box Edge/Gateway device local capacity in MB.
:vartype device_local_capacity: long
:ivar time_zone: The Data Box Edge/Gateway device timezone.
:vartype time_zone: str
:ivar device_hcs_version: The device software version number of the device (eg: 1.2.18105.6).
:vartype device_hcs_version: str
:ivar configured_role_types: Type of compute roles configured.
:vartype configured_role_types: list[str or
~azure.mgmt.databoxedge.v2020_05_01_preview.models.RoleTypes]
:ivar node_count: The number of nodes in the cluster.
:vartype node_count: int
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'location': {'required': True},
'serial_number': {'readonly': True},
'device_type': {'readonly': True},
'culture': {'readonly': True},
'device_model': {'readonly': True},
'device_software_version': {'readonly': True},
'device_local_capacity': {'readonly': True},
'time_zone': {'readonly': True},
'device_hcs_version': {'readonly': True},
'configured_role_types': {'readonly': True},
'node_count': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'sku': {'key': 'sku', 'type': 'Sku'},
'etag': {'key': 'etag', 'type': 'str'},
'data_box_edge_device_status': {'key': 'properties.dataBoxEdgeDeviceStatus', 'type': 'str'},
'serial_number': {'key': 'properties.serialNumber', 'type': 'str'},
'description': {'key': 'properties.description', 'type': 'str'},
'model_description': {'key': 'properties.modelDescription', 'type': 'str'},
'device_type': {'key': 'properties.deviceType', 'type': 'str'},
'friendly_name': {'key': 'properties.friendlyName', 'type': 'str'},
'culture': {'key': 'properties.culture', 'type': 'str'},
'device_model': {'key': 'properties.deviceModel', 'type': 'str'},
'device_software_version': {'key': 'properties.deviceSoftwareVersion', 'type': 'str'},
'device_local_capacity': {'key': 'properties.deviceLocalCapacity', 'type': 'long'},
'time_zone': {'key': 'properties.timeZone', 'type': 'str'},
'device_hcs_version': {'key': 'properties.deviceHcsVersion', 'type': 'str'},
'configured_role_types': {'key': 'properties.configuredRoleTypes', 'type': '[str]'},
'node_count': {'key': 'properties.nodeCount', 'type': 'int'},
}
def __init__(
self,
**kwargs
):
super(DataBoxEdgeDevice, self).__init__(**kwargs)
self.location = kwargs['location']
self.tags = kwargs.get('tags', None)
self.sku = kwargs.get('sku', None)
self.etag = kwargs.get('etag', None)
self.data_box_edge_device_status = kwargs.get('data_box_edge_device_status', None)
self.serial_number = None
self.description = kwargs.get('description', None)
self.model_description = kwargs.get('model_description', None)
self.device_type = None
self.friendly_name = kwargs.get('friendly_name', None)
self.culture = None
self.device_model = None
self.device_software_version = None
self.device_local_capacity = None
self.time_zone = None
self.device_hcs_version = None
self.configured_role_types = None
self.node_count = None
class DataBoxEdgeDeviceExtendedInfo(ARMBaseModel):
"""The extended Info of the Data Box Edge/Gateway device.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: The path ID that uniquely identifies the object.
:vartype id: str
:ivar name: The object name.
:vartype name: str
:ivar type: The hierarchical type of the object.
:vartype type: str
:param encryption_key_thumbprint: The digital signature of encrypted certificate.
:type encryption_key_thumbprint: str
:param encryption_key: The public part of the encryption certificate. Client uses this to
encrypt any secret.
:type encryption_key: str
:ivar resource_key: The Resource ID of the Resource.
:vartype resource_key: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'resource_key': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'encryption_key_thumbprint': {'key': 'properties.encryptionKeyThumbprint', 'type': 'str'},
'encryption_key': {'key': 'properties.encryptionKey', 'type': 'str'},
'resource_key': {'key': 'properties.resourceKey', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(DataBoxEdgeDeviceExtendedInfo, self).__init__(**kwargs)
self.encryption_key_thumbprint = kwargs.get('encryption_key_thumbprint', None)
self.encryption_key = kwargs.get('encryption_key', None)
self.resource_key = None
class DataBoxEdgeDeviceList(msrest.serialization.Model):
"""The collection of Data Box Edge/Gateway devices.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar value: The list of Data Box Edge/Gateway devices.
:vartype value: list[~azure.mgmt.databoxedge.v2020_05_01_preview.models.DataBoxEdgeDevice]
:ivar next_link: Link to the next set of results.
:vartype next_link: str
"""
_validation = {
'value': {'readonly': True},
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[DataBoxEdgeDevice]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(DataBoxEdgeDeviceList, self).__init__(**kwargs)
self.value = None
self.next_link = None
class DataBoxEdgeDevicePatch(msrest.serialization.Model):
"""The Data Box Edge/Gateway device patch.
:param tags: A set of tags. The tags attached to the Data Box Edge/Gateway resource.
:type tags: dict[str, str]
"""
_attribute_map = {
'tags': {'key': 'tags', 'type': '{str}'},
}
def __init__(
self,
**kwargs
):
super(DataBoxEdgeDevicePatch, self).__init__(**kwargs)
self.tags = kwargs.get('tags', None)
class DataBoxEdgeSku(msrest.serialization.Model):
"""The Sku information.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar resource_type: The type of the resource.
:vartype resource_type: str
:ivar name: The Sku name. Possible values include: "Gateway", "Edge", "TEA_1Node",
"TEA_1Node_UPS", "TEA_1Node_Heater", "TEA_1Node_UPS_Heater", "TEA_4Node_Heater",
"TEA_4Node_UPS_Heater", "TMA", "TDC", "TCA_Large", "TCA_Small", "GPU".
:vartype name: str or ~azure.mgmt.databoxedge.v2020_05_01_preview.models.SkuName
:ivar kind: The Sku kind.
:vartype kind: str
:ivar tier: The Sku tier. Possible values include: "Standard".
:vartype tier: str or ~azure.mgmt.databoxedge.v2020_05_01_preview.models.SkuTier
:ivar size: The Sku kind.
:vartype size: str
:ivar family: The Sku family.
:vartype family: str
:ivar locations: Availability of the Sku for the region.
:vartype locations: list[str]
:ivar api_versions: The API versions in which Sku is available.
:vartype api_versions: list[str]
:ivar location_info: Availability of the Sku for the location/zone/site.
:vartype location_info:
list[~azure.mgmt.databoxedge.v2020_05_01_preview.models.SkuLocationInfo]
:ivar costs: The pricing info of the Sku.
:vartype costs: list[~azure.mgmt.databoxedge.v2020_05_01_preview.models.SkuCost]
:ivar restrictions: Restriction info of the SKU.
:vartype restrictions: list[~azure.mgmt.databoxedge.v2020_05_01_preview.models.SkuRestriction]
:ivar signup_option: Can the SKU be signed up.. Possible values include: "None", "Available".
:vartype signup_option: str or
~azure.mgmt.databoxedge.v2020_05_01_preview.models.SkuSignupOption
:ivar version: Sku version. Possible values include: "Stable", "Preview".
:vartype version: str or ~azure.mgmt.databoxedge.v2020_05_01_preview.models.SkuVersion
:ivar availability: Is SKU available. Possible values include: "Available", "Unavailable".
:vartype availability: str or
~azure.mgmt.databoxedge.v2020_05_01_preview.models.SkuAvailability
"""
_validation = {
'resource_type': {'readonly': True},
'name': {'readonly': True},
'kind': {'readonly': True},
'tier': {'readonly': True},
'size': {'readonly': True},
'family': {'readonly': True},
'locations': {'readonly': True},
'api_versions': {'readonly': True},
'location_info': {'readonly': True},
'costs': {'readonly': True},
'restrictions': {'readonly': True},
'signup_option': {'readonly': True},
'version': {'readonly': True},
'availability': {'readonly': True},
}
_attribute_map = {
'resource_type': {'key': 'resourceType', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'tier': {'key': 'tier', 'type': 'str'},
'size': {'key': 'size', 'type': 'str'},
'family': {'key': 'family', 'type': 'str'},
'locations': {'key': 'locations', 'type': '[str]'},
'api_versions': {'key': 'apiVersions', 'type': '[str]'},
'location_info': {'key': 'locationInfo', 'type': '[SkuLocationInfo]'},
'costs': {'key': 'costs', 'type': '[SkuCost]'},
'restrictions': {'key': 'restrictions', 'type': '[SkuRestriction]'},
'signup_option': {'key': 'signupOption', 'type': 'str'},
'version': {'key': 'version', 'type': 'str'},
'availability': {'key': 'availability', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(DataBoxEdgeSku, self).__init__(**kwargs)
self.resource_type = None
self.name = None
self.kind = None
self.tier = None
self.size = None
self.family = None
self.locations = None
self.api_versions = None
self.location_info = None
self.costs = None
self.restrictions = None
self.signup_option = None
self.version = None
self.availability = None
class DataBoxEdgeSkuList(msrest.serialization.Model):
"""List of SKU Information objects.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar value: List of ResourceType Sku.
:vartype value: list[~azure.mgmt.databoxedge.v2020_05_01_preview.models.DataBoxEdgeSku]
:ivar next_link: Links to the next set of results.
:vartype next_link: str
"""
_validation = {
'value': {'readonly': True},
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[DataBoxEdgeSku]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(DataBoxEdgeSkuList, self).__init__(**kwargs)
self.value = None
self.next_link = None
class Trigger(ARMBaseModel):
"""Trigger details.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: FileEventTrigger, PeriodicTimerEventTrigger.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: The path ID that uniquely identifies the object.
:vartype id: str
:ivar name: The object name.
:vartype name: str
:ivar type: The hierarchical type of the object.
:vartype type: str
:param kind: Required. Trigger Kind.Constant filled by server. Possible values include:
"FileEvent", "PeriodicTimerEvent".
:type kind: str or ~azure.mgmt.databoxedge.v2020_05_01_preview.models.TriggerEventType
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'kind': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
}
_subtype_map = {
'kind': {'FileEvent': 'FileEventTrigger', 'PeriodicTimerEvent': 'PeriodicTimerEventTrigger'}
}
def __init__(
self,
**kwargs
):
super(Trigger, self).__init__(**kwargs)
self.kind = 'Trigger' # type: str
class FileEventTrigger(Trigger):
"""Trigger details.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: The path ID that uniquely identifies the object.
:vartype id: str
:ivar name: The object name.
:vartype name: str
:ivar type: The hierarchical type of the object.
:vartype type: str
:param kind: Required. Trigger Kind.Constant filled by server. Possible values include:
"FileEvent", "PeriodicTimerEvent".
:type kind: str or ~azure.mgmt.databoxedge.v2020_05_01_preview.models.TriggerEventType
:param source_info: Required. File event source details.
:type source_info: ~azure.mgmt.databoxedge.v2020_05_01_preview.models.FileSourceInfo
:param sink_info: Required. Role sink info.
:type sink_info: ~azure.mgmt.databoxedge.v2020_05_01_preview.models.RoleSinkInfo
:param custom_context_tag: A custom context tag typically used to correlate the trigger against
its usage. For example, if a periodic timer trigger is intended for certain specific IoT
modules in the device, the tag can be the name or the image URL of the module.
:type custom_context_tag: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'kind': {'required': True},
'source_info': {'required': True},
'sink_info': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'source_info': {'key': 'properties.sourceInfo', 'type': 'FileSourceInfo'},
'sink_info': {'key': 'properties.sinkInfo', 'type': 'RoleSinkInfo'},
'custom_context_tag': {'key': 'properties.customContextTag', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(FileEventTrigger, self).__init__(**kwargs)
self.kind = 'FileEvent' # type: str
self.source_info = kwargs['source_info']
self.sink_info = kwargs['sink_info']
self.custom_context_tag = kwargs.get('custom_context_tag', None)
class FileSourceInfo(msrest.serialization.Model):
"""File source details.
All required parameters must be populated in order to send to Azure.
:param share_id: Required. File share ID.
:type share_id: str
"""
_validation = {
'share_id': {'required': True},
}
_attribute_map = {
'share_id': {'key': 'shareId', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(FileSourceInfo, self).__init__(**kwargs)
self.share_id = kwargs['share_id']
class ImageRepositoryCredential(msrest.serialization.Model):
"""Image repository credential.
All required parameters must be populated in order to send to Azure.
:param image_repository_url: Required. Image repository url (e.g.: mcr.microsoft.com).
:type image_repository_url: str
:param user_name: Required. Repository user name.
:type user_name: str
:param password: Repository user password.
:type password: ~azure.mgmt.databoxedge.v2020_05_01_preview.models.AsymmetricEncryptedSecret
"""
_validation = {
'image_repository_url': {'required': True},
'user_name': {'required': True},
}
_attribute_map = {
'image_repository_url': {'key': 'imageRepositoryUrl', 'type': 'str'},
'user_name': {'key': 'userName', 'type': 'str'},
'password': {'key': 'password', 'type': 'AsymmetricEncryptedSecret'},
}
def __init__(
self,
**kwargs
):
super(ImageRepositoryCredential, self).__init__(**kwargs)
self.image_repository_url = kwargs['image_repository_url']
self.user_name = kwargs['user_name']
self.password = kwargs.get('password', None)
class IoTDeviceInfo(msrest.serialization.Model):
"""Metadata of IoT device/IoT Edge device to be configured.
All required parameters must be populated in order to send to Azure.
:param device_id: Required. ID of the IoT device/edge device.
:type device_id: str
:param io_t_host_hub: Required. Host name for the IoT hub associated to the device.
:type io_t_host_hub: str
:param io_t_host_hub_id: Id for the IoT hub associated to the device.
:type io_t_host_hub_id: str
:param authentication: Encrypted IoT device/IoT edge device connection string.
:type authentication: ~azure.mgmt.databoxedge.v2020_05_01_preview.models.Authentication
"""
_validation = {
'device_id': {'required': True},
'io_t_host_hub': {'required': True},
}
_attribute_map = {
'device_id': {'key': 'deviceId', 'type': 'str'},
'io_t_host_hub': {'key': 'ioTHostHub', 'type': 'str'},
'io_t_host_hub_id': {'key': 'ioTHostHubId', 'type': 'str'},
'authentication': {'key': 'authentication', 'type': 'Authentication'},
}
def __init__(
self,
**kwargs
):
super(IoTDeviceInfo, self).__init__(**kwargs)
self.device_id = kwargs['device_id']
self.io_t_host_hub = kwargs['io_t_host_hub']
self.io_t_host_hub_id = kwargs.get('io_t_host_hub_id', None)
self.authentication = kwargs.get('authentication', None)
class IoTEdgeAgentInfo(msrest.serialization.Model):
"""IoT edge agent details is optional, this will be used for download system Agent module while bootstrapping IoT Role if specified.
All required parameters must be populated in order to send to Azure.
:param image_name: Required. Name of the IoT edge agent image.
:type image_name: str
:param tag: Required. Image Tag.
:type tag: str
:param image_repository: Image repository details.
:type image_repository:
~azure.mgmt.databoxedge.v2020_05_01_preview.models.ImageRepositoryCredential
"""
_validation = {
'image_name': {'required': True},
'tag': {'required': True},
}
_attribute_map = {
'image_name': {'key': 'imageName', 'type': 'str'},
'tag': {'key': 'tag', 'type': 'str'},
'image_repository': {'key': 'imageRepository', 'type': 'ImageRepositoryCredential'},
}
def __init__(
self,
**kwargs
):
super(IoTEdgeAgentInfo, self).__init__(**kwargs)
self.image_name = kwargs['image_name']
self.tag = kwargs['tag']
self.image_repository = kwargs.get('image_repository', None)
class Role(ARMBaseModel):
"""Compute role.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: IoTRole.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: The path ID that uniquely identifies the object.
:vartype id: str
:ivar name: The object name.
:vartype name: str
:ivar type: The hierarchical type of the object.
:vartype type: str
:param kind: Required. Role type.Constant filled by server. Possible values include: "IOT",
"ASA", "Functions", "Cognitive".
:type kind: str or ~azure.mgmt.databoxedge.v2020_05_01_preview.models.RoleTypes
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'kind': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
}
_subtype_map = {
'kind': {'IOT': 'IoTRole'}
}
def __init__(
self,
**kwargs
):
super(Role, self).__init__(**kwargs)
self.kind = 'Role' # type: str
class IoTRole(Role):
"""Compute role.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: The path ID that uniquely identifies the object.
:vartype id: str
:ivar name: The object name.
:vartype name: str
:ivar type: The hierarchical type of the object.
:vartype type: str
:param kind: Required. Role type.Constant filled by server. Possible values include: "IOT",
"ASA", "Functions", "Cognitive".
:type kind: str or ~azure.mgmt.databoxedge.v2020_05_01_preview.models.RoleTypes
:param host_platform: Host OS supported by the IoT role. Possible values include: "Windows",
"Linux".
:type host_platform: str or ~azure.mgmt.databoxedge.v2020_05_01_preview.models.PlatformType
:param io_t_device_details: IoT device metadata to which data box edge device needs to be
connected.
:type io_t_device_details: ~azure.mgmt.databoxedge.v2020_05_01_preview.models.IoTDeviceInfo
:param io_t_edge_device_details: IoT edge device to which the IoT role needs to be configured.
:type io_t_edge_device_details:
~azure.mgmt.databoxedge.v2020_05_01_preview.models.IoTDeviceInfo
:param share_mappings: Mount points of shares in role(s).
:type share_mappings: list[~azure.mgmt.databoxedge.v2020_05_01_preview.models.MountPointMap]
:param io_t_edge_agent_info: Iot edge agent details to download the agent and bootstrap iot
runtime.
:type io_t_edge_agent_info: ~azure.mgmt.databoxedge.v2020_05_01_preview.models.IoTEdgeAgentInfo
:ivar host_platform_type: Platform where the Iot runtime is hosted. Possible values include:
"KubernetesCluster", "LinuxVM".
:vartype host_platform_type: str or
~azure.mgmt.databoxedge.v2020_05_01_preview.models.HostPlatformType
:param role_status: Role status. Possible values include: "Enabled", "Disabled".
:type role_status: str or ~azure.mgmt.databoxedge.v2020_05_01_preview.models.RoleStatus
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'kind': {'required': True},
'host_platform_type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'host_platform': {'key': 'properties.hostPlatform', 'type': 'str'},
'io_t_device_details': {'key': 'properties.ioTDeviceDetails', 'type': 'IoTDeviceInfo'},
'io_t_edge_device_details': {'key': 'properties.ioTEdgeDeviceDetails', 'type': 'IoTDeviceInfo'},
'share_mappings': {'key': 'properties.shareMappings', 'type': '[MountPointMap]'},
'io_t_edge_agent_info': {'key': 'properties.ioTEdgeAgentInfo', 'type': 'IoTEdgeAgentInfo'},
'host_platform_type': {'key': 'properties.hostPlatformType', 'type': 'str'},
'role_status': {'key': 'properties.roleStatus', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(IoTRole, self).__init__(**kwargs)
self.kind = 'IOT' # type: str
self.host_platform = kwargs.get('host_platform', None)
self.io_t_device_details = kwargs.get('io_t_device_details', None)
self.io_t_edge_device_details = kwargs.get('io_t_edge_device_details', None)
self.share_mappings = kwargs.get('share_mappings', None)
self.io_t_edge_agent_info = kwargs.get('io_t_edge_agent_info', None)
self.host_platform_type = None
self.role_status = kwargs.get('role_status', None)
class Ipv4Config(msrest.serialization.Model):
"""Details related to the IPv4 address configuration.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar ip_address: The IPv4 address of the network adapter.
:vartype ip_address: str
:ivar subnet: The IPv4 subnet of the network adapter.
:vartype subnet: str
:ivar gateway: The IPv4 gateway of the network adapter.
:vartype gateway: str
"""
_validation = {
'ip_address': {'readonly': True},
'subnet': {'readonly': True},
'gateway': {'readonly': True},
}
_attribute_map = {
'ip_address': {'key': 'ipAddress', 'type': 'str'},
'subnet': {'key': 'subnet', 'type': 'str'},
'gateway': {'key': 'gateway', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(Ipv4Config, self).__init__(**kwargs)
self.ip_address = None
self.subnet = None
self.gateway = None
class Ipv6Config(msrest.serialization.Model):
"""Details related to the IPv6 address configuration.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar ip_address: The IPv6 address of the network adapter.
:vartype ip_address: str
:ivar prefix_length: The IPv6 prefix of the network adapter.
:vartype prefix_length: int
:ivar gateway: The IPv6 gateway of the network adapter.
:vartype gateway: str
"""
_validation = {
'ip_address': {'readonly': True},
'prefix_length': {'readonly': True},
'gateway': {'readonly': True},
}
_attribute_map = {
'ip_address': {'key': 'ipAddress', 'type': 'str'},
'prefix_length': {'key': 'prefixLength', 'type': 'int'},
'gateway': {'key': 'gateway', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(Ipv6Config, self).__init__(**kwargs)
self.ip_address = None
self.prefix_length = None
self.gateway = None
class Job(msrest.serialization.Model):
"""A device job.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: The path ID that uniquely identifies the object.
:vartype id: str
:ivar name: The name of the object.
:vartype name: str
:ivar type: The hierarchical type of the object.
:vartype type: str
:ivar status: The current status of the job. Possible values include: "Invalid", "Running",
"Succeeded", "Failed", "Canceled", "Paused", "Scheduled".
:vartype status: str or ~azure.mgmt.databoxedge.v2020_05_01_preview.models.JobStatus
:ivar start_time: The UTC date and time at which the job started.
:vartype start_time: ~datetime.datetime
:ivar end_time: The UTC date and time at which the job completed.
:vartype end_time: ~datetime.datetime
:ivar percent_complete: The percentage of the job that is complete.
:vartype percent_complete: int
:ivar error: The error details.
:vartype error: ~azure.mgmt.databoxedge.v2020_05_01_preview.models.JobErrorDetails
:ivar job_type: The type of the job. Possible values include: "Invalid", "ScanForUpdates",
"DownloadUpdates", "InstallUpdates", "RefreshShare", "RefreshContainer".
:vartype job_type: str or ~azure.mgmt.databoxedge.v2020_05_01_preview.models.JobType
:ivar current_stage: Current stage of the update operation. Possible values include: "Unknown",
"Initial", "ScanStarted", "ScanComplete", "ScanFailed", "DownloadStarted", "DownloadComplete",
"DownloadFailed", "InstallStarted", "InstallComplete", "InstallFailed", "RebootInitiated",
"Success", "Failure", "RescanStarted", "RescanComplete", "RescanFailed".
:vartype current_stage: str or
~azure.mgmt.databoxedge.v2020_05_01_preview.models.UpdateOperationStage
:ivar download_progress: The download progress.
:vartype download_progress:
~azure.mgmt.databoxedge.v2020_05_01_preview.models.UpdateDownloadProgress
:ivar install_progress: The install progress.
:vartype install_progress:
~azure.mgmt.databoxedge.v2020_05_01_preview.models.UpdateInstallProgress
:ivar total_refresh_errors: Total number of errors encountered during the refresh process.
:vartype total_refresh_errors: int
:ivar error_manifest_file: Local share/remote container relative path to the error manifest
file of the refresh.
:vartype error_manifest_file: str
:ivar refreshed_entity_id: ARM ID of the entity that was refreshed.
:vartype refreshed_entity_id: str
:param folder: If only subfolders need to be refreshed, then the subfolder path inside the
share or container. (The path is empty if there are no subfolders.).
:type folder: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'status': {'readonly': True},
'start_time': {'readonly': True},
'end_time': {'readonly': True},
'percent_complete': {'readonly': True},
'error': {'readonly': True},
'job_type': {'readonly': True},
'current_stage': {'readonly': True},
'download_progress': {'readonly': True},
'install_progress': {'readonly': True},
'total_refresh_errors': {'readonly': True},
'error_manifest_file': {'readonly': True},
'refreshed_entity_id': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'status': {'key': 'status', 'type': 'str'},
'start_time': {'key': 'startTime', 'type': 'iso-8601'},
'end_time': {'key': 'endTime', 'type': 'iso-8601'},
'percent_complete': {'key': 'percentComplete', 'type': 'int'},
'error': {'key': 'error', 'type': 'JobErrorDetails'},
'job_type': {'key': 'properties.jobType', 'type': 'str'},
'current_stage': {'key': 'properties.currentStage', 'type': 'str'},
'download_progress': {'key': 'properties.downloadProgress', 'type': 'UpdateDownloadProgress'},
'install_progress': {'key': 'properties.installProgress', 'type': 'UpdateInstallProgress'},
'total_refresh_errors': {'key': 'properties.totalRefreshErrors', 'type': 'int'},
'error_manifest_file': {'key': 'properties.errorManifestFile', 'type': 'str'},
'refreshed_entity_id': {'key': 'properties.refreshedEntityId', 'type': 'str'},
'folder': {'key': 'properties.folder', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(Job, self).__init__(**kwargs)
self.id = None
self.name = None
self.type = None
self.status = None
self.start_time = None
self.end_time = None
self.percent_complete = None
self.error = None
self.job_type = None
self.current_stage = None
self.download_progress = None
self.install_progress = None
self.total_refresh_errors = None
self.error_manifest_file = None
self.refreshed_entity_id = None
self.folder = kwargs.get('folder', None)
class JobErrorDetails(msrest.serialization.Model):
"""The job error information containing the list of job errors.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar error_details: The error details.
:vartype error_details: list[~azure.mgmt.databoxedge.v2020_05_01_preview.models.JobErrorItem]
:ivar code: The code intended for programmatic access.
:vartype code: str
:ivar message: The message that describes the error in detail.
:vartype message: str
"""
_validation = {
'error_details': {'readonly': True},
'code': {'readonly': True},
'message': {'readonly': True},
}
_attribute_map = {
'error_details': {'key': 'errorDetails', 'type': '[JobErrorItem]'},
'code': {'key': 'code', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(JobErrorDetails, self).__init__(**kwargs)
self.error_details = None
self.code = None
self.message = None
class JobErrorItem(msrest.serialization.Model):
"""The job error items.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar recommendations: The recommended actions.
:vartype recommendations: list[str]
:ivar code: The code intended for programmatic access.
:vartype code: str
:ivar message: The message that describes the error in detail.
:vartype message: str
"""
_validation = {
'recommendations': {'readonly': True},
'code': {'readonly': True},
'message': {'readonly': True},
}
_attribute_map = {
'recommendations': {'key': 'recommendations', 'type': '[str]'},
'code': {'key': 'code', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(JobErrorItem, self).__init__(**kwargs)
self.recommendations = None
self.code = None
self.message = None
class MetricDimensionV1(msrest.serialization.Model):
"""Metric Dimension v1.
:param name: Name of the metrics dimension.
:type name: str
:param display_name: Display name of the metrics dimension.
:type display_name: str
:param to_be_exported_for_shoebox: To be exported to shoe box.
:type to_be_exported_for_shoebox: bool
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'display_name': {'key': 'displayName', 'type': 'str'},
'to_be_exported_for_shoebox': {'key': 'toBeExportedForShoebox', 'type': 'bool'},
}
def __init__(
self,
**kwargs
):
super(MetricDimensionV1, self).__init__(**kwargs)
self.name = kwargs.get('name', None)
self.display_name = kwargs.get('display_name', None)
self.to_be_exported_for_shoebox = kwargs.get('to_be_exported_for_shoebox', None)
class MetricSpecificationV1(msrest.serialization.Model):
"""Metric specification version 1.
:param name: Name of the metric.
:type name: str
:param display_name: Display name of the metric.
:type display_name: str
:param display_description: Description of the metric to be displayed.
:type display_description: str
:param unit: Metric units. Possible values include: "NotSpecified", "Percent", "Count",
"Seconds", "Milliseconds", "Bytes", "BytesPerSecond", "CountPerSecond".
:type unit: str or ~azure.mgmt.databoxedge.v2020_05_01_preview.models.MetricUnit
:param aggregation_type: Metric aggregation type. Possible values include: "NotSpecified",
"None", "Average", "Minimum", "Maximum", "Total", "Count".
:type aggregation_type: str or
~azure.mgmt.databoxedge.v2020_05_01_preview.models.MetricAggregationType
:param dimensions: Metric dimensions, other than default dimension which is resource.
:type dimensions: list[~azure.mgmt.databoxedge.v2020_05_01_preview.models.MetricDimensionV1]
:param fill_gap_with_zero: Set true to fill the gaps with zero.
:type fill_gap_with_zero: bool
:param category: Metric category. Possible values include: "Capacity", "Transaction".
:type category: str or ~azure.mgmt.databoxedge.v2020_05_01_preview.models.MetricCategory
:param resource_id_dimension_name_override: Resource name override.
:type resource_id_dimension_name_override: str
:param supported_time_grain_types: Support granularity of metrics.
:type supported_time_grain_types: list[str or
~azure.mgmt.databoxedge.v2020_05_01_preview.models.TimeGrain]
:param supported_aggregation_types: Support metric aggregation type.
:type supported_aggregation_types: list[str or
~azure.mgmt.databoxedge.v2020_05_01_preview.models.MetricAggregationType]
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'display_name': {'key': 'displayName', 'type': 'str'},
'display_description': {'key': 'displayDescription', 'type': 'str'},
'unit': {'key': 'unit', 'type': 'str'},
'aggregation_type': {'key': 'aggregationType', 'type': 'str'},
'dimensions': {'key': 'dimensions', 'type': '[MetricDimensionV1]'},
'fill_gap_with_zero': {'key': 'fillGapWithZero', 'type': 'bool'},
'category': {'key': 'category', 'type': 'str'},
'resource_id_dimension_name_override': {'key': 'resourceIdDimensionNameOverride', 'type': 'str'},
'supported_time_grain_types': {'key': 'supportedTimeGrainTypes', 'type': '[str]'},
'supported_aggregation_types': {'key': 'supportedAggregationTypes', 'type': '[str]'},
}
def __init__(
self,
**kwargs
):
super(MetricSpecificationV1, self).__init__(**kwargs)
self.name = kwargs.get('name', None)
self.display_name = kwargs.get('display_name', None)
self.display_description = kwargs.get('display_description', None)
self.unit = kwargs.get('unit', None)
self.aggregation_type = kwargs.get('aggregation_type', None)
self.dimensions = kwargs.get('dimensions', None)
self.fill_gap_with_zero = kwargs.get('fill_gap_with_zero', None)
self.category = kwargs.get('category', None)
self.resource_id_dimension_name_override = kwargs.get('resource_id_dimension_name_override', None)
self.supported_time_grain_types = kwargs.get('supported_time_grain_types', None)
self.supported_aggregation_types = kwargs.get('supported_aggregation_types', None)
class MountPointMap(msrest.serialization.Model):
"""The share mount point.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param share_id: Required. ID of the share mounted to the role VM.
:type share_id: str
:ivar role_id: ID of the role to which share is mounted.
:vartype role_id: str
:ivar mount_point: Mount point for the share.
:vartype mount_point: str
:ivar mount_type: Mounting type. Possible values include: "Volume", "HostPath".
:vartype mount_type: str or ~azure.mgmt.databoxedge.v2020_05_01_preview.models.MountType
:ivar role_type: Role type. Possible values include: "IOT", "ASA", "Functions", "Cognitive".
:vartype role_type: str or ~azure.mgmt.databoxedge.v2020_05_01_preview.models.RoleTypes
"""
_validation = {
'share_id': {'required': True},
'role_id': {'readonly': True},
'mount_point': {'readonly': True},
'mount_type': {'readonly': True},
'role_type': {'readonly': True},
}
_attribute_map = {
'share_id': {'key': 'shareId', 'type': 'str'},
'role_id': {'key': 'roleId', 'type': 'str'},
'mount_point': {'key': 'mountPoint', 'type': 'str'},
'mount_type': {'key': 'mountType', 'type': 'str'},
'role_type': {'key': 'roleType', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(MountPointMap, self).__init__(**kwargs)
self.share_id = kwargs['share_id']
self.role_id = None
self.mount_point = None
self.mount_type = None
self.role_type = None
class NetworkAdapter(msrest.serialization.Model):
"""Represents the networkAdapter on a device.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar adapter_id: Instance ID of network adapter.
:vartype adapter_id: str
:ivar adapter_position: Hardware position of network adapter.
:vartype adapter_position:
~azure.mgmt.databoxedge.v2020_05_01_preview.models.NetworkAdapterPosition
:ivar index: Logical index of the adapter.
:vartype index: int
:ivar node_id: Node ID of the network adapter.
:vartype node_id: str
:ivar network_adapter_name: Network adapter name.
:vartype network_adapter_name: str
:ivar label: Hardware label for the adapter.
:vartype label: str
:ivar mac_address: MAC address.
:vartype mac_address: str
:ivar link_speed: Link speed.
:vartype link_speed: long
:ivar status: Value indicating whether this adapter is valid. Possible values include:
"Inactive", "Active".
:vartype status: str or ~azure.mgmt.databoxedge.v2020_05_01_preview.models.NetworkAdapterStatus
:param rdma_status: Value indicating whether this adapter is RDMA capable. Possible values
include: "Incapable", "Capable".
:type rdma_status: str or
~azure.mgmt.databoxedge.v2020_05_01_preview.models.NetworkAdapterRDMAStatus
:param dhcp_status: Value indicating whether this adapter has DHCP enabled. Possible values
include: "Disabled", "Enabled".
:type dhcp_status: str or
~azure.mgmt.databoxedge.v2020_05_01_preview.models.NetworkAdapterDHCPStatus
:ivar ipv4_configuration: The IPv4 configuration of the network adapter.
:vartype ipv4_configuration: ~azure.mgmt.databoxedge.v2020_05_01_preview.models.Ipv4Config
:ivar ipv6_configuration: The IPv6 configuration of the network adapter.
:vartype ipv6_configuration: ~azure.mgmt.databoxedge.v2020_05_01_preview.models.Ipv6Config
:ivar ipv6_link_local_address: The IPv6 local address.
:vartype ipv6_link_local_address: str
:ivar dns_servers: The list of DNS Servers of the device.
:vartype dns_servers: list[str]
"""
_validation = {
'adapter_id': {'readonly': True},
'adapter_position': {'readonly': True},
'index': {'readonly': True},
'node_id': {'readonly': True},
'network_adapter_name': {'readonly': True},
'label': {'readonly': True},
'mac_address': {'readonly': True},
'link_speed': {'readonly': True},
'status': {'readonly': True},
'ipv4_configuration': {'readonly': True},
'ipv6_configuration': {'readonly': True},
'ipv6_link_local_address': {'readonly': True},
'dns_servers': {'readonly': True},
}
_attribute_map = {
'adapter_id': {'key': 'adapterId', 'type': 'str'},
'adapter_position': {'key': 'adapterPosition', 'type': 'NetworkAdapterPosition'},
'index': {'key': 'index', 'type': 'int'},
'node_id': {'key': 'nodeId', 'type': 'str'},
'network_adapter_name': {'key': 'networkAdapterName', 'type': 'str'},
'label': {'key': 'label', 'type': 'str'},
'mac_address': {'key': 'macAddress', 'type': 'str'},
'link_speed': {'key': 'linkSpeed', 'type': 'long'},
'status': {'key': 'status', 'type': 'str'},
'rdma_status': {'key': 'rdmaStatus', 'type': 'str'},
'dhcp_status': {'key': 'dhcpStatus', 'type': 'str'},
'ipv4_configuration': {'key': 'ipv4Configuration', 'type': 'Ipv4Config'},
'ipv6_configuration': {'key': 'ipv6Configuration', 'type': 'Ipv6Config'},
'ipv6_link_local_address': {'key': 'ipv6LinkLocalAddress', 'type': 'str'},
'dns_servers': {'key': 'dnsServers', 'type': '[str]'},
}
def __init__(
self,
**kwargs
):
super(NetworkAdapter, self).__init__(**kwargs)
self.adapter_id = None
self.adapter_position = None
self.index = None
self.node_id = None
self.network_adapter_name = None
self.label = None
self.mac_address = None
self.link_speed = None
self.status = None
self.rdma_status = kwargs.get('rdma_status', None)
self.dhcp_status = kwargs.get('dhcp_status', None)
self.ipv4_configuration = None
self.ipv6_configuration = None
self.ipv6_link_local_address = None
self.dns_servers = None
class NetworkAdapterPosition(msrest.serialization.Model):
"""The network adapter position.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar network_group: The network group. Possible values include: "None", "NonRDMA", "RDMA".
:vartype network_group: str or ~azure.mgmt.databoxedge.v2020_05_01_preview.models.NetworkGroup
:ivar port: The port.
:vartype port: int
"""
_validation = {
'network_group': {'readonly': True},
'port': {'readonly': True},
}
_attribute_map = {
'network_group': {'key': 'networkGroup', 'type': 'str'},
'port': {'key': 'port', 'type': 'int'},
}
def __init__(
self,
**kwargs
):
super(NetworkAdapterPosition, self).__init__(**kwargs)
self.network_group = None
self.port = None
class NetworkSettings(ARMBaseModel):
"""The network settings of a device.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: The path ID that uniquely identifies the object.
:vartype id: str
:ivar name: The object name.
:vartype name: str
:ivar type: The hierarchical type of the object.
:vartype type: str
:ivar network_adapters: The network adapter list on the device.
:vartype network_adapters:
list[~azure.mgmt.databoxedge.v2020_05_01_preview.models.NetworkAdapter]
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'network_adapters': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'network_adapters': {'key': 'properties.networkAdapters', 'type': '[NetworkAdapter]'},
}
def __init__(
self,
**kwargs
):
super(NetworkSettings, self).__init__(**kwargs)
self.network_adapters = None
class Node(ARMBaseModel):
"""Represents a single node in a Data box Edge/Gateway device
Gateway devices, standalone Edge devices and a single node cluster Edge device will all have 1 node
Multi-node Edge devices will have more than 1 nodes.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: The path ID that uniquely identifies the object.
:vartype id: str
:ivar name: The object name.
:vartype name: str
:ivar type: The hierarchical type of the object.
:vartype type: str
:ivar node_status: The current status of the individual node. Possible values include:
"Unknown", "Up", "Down", "Rebooting", "ShuttingDown".
:vartype node_status: str or ~azure.mgmt.databoxedge.v2020_05_01_preview.models.NodeStatus
:ivar node_chassis_serial_number: Serial number of the Chassis.
:vartype node_chassis_serial_number: str
:ivar node_serial_number: Serial number of the individual node.
:vartype node_serial_number: str
:ivar node_display_name: Display Name of the individual node.
:vartype node_display_name: str
:ivar node_friendly_software_version: Friendly software version name that is currently
installed on the node.
:vartype node_friendly_software_version: str
:ivar node_hcs_version: HCS version that is currently installed on the node.
:vartype node_hcs_version: str
:ivar node_instance_id: Guid instance id of the node.
:vartype node_instance_id: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'node_status': {'readonly': True},
'node_chassis_serial_number': {'readonly': True},
'node_serial_number': {'readonly': True},
'node_display_name': {'readonly': True},
'node_friendly_software_version': {'readonly': True},
'node_hcs_version': {'readonly': True},
'node_instance_id': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'node_status': {'key': 'properties.nodeStatus', 'type': 'str'},
'node_chassis_serial_number': {'key': 'properties.nodeChassisSerialNumber', 'type': 'str'},
'node_serial_number': {'key': 'properties.nodeSerialNumber', 'type': 'str'},
'node_display_name': {'key': 'properties.nodeDisplayName', 'type': 'str'},
'node_friendly_software_version': {'key': 'properties.nodeFriendlySoftwareVersion', 'type': 'str'},
'node_hcs_version': {'key': 'properties.nodeHcsVersion', 'type': 'str'},
'node_instance_id': {'key': 'properties.nodeInstanceId', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(Node, self).__init__(**kwargs)
self.node_status = None
self.node_chassis_serial_number = None
self.node_serial_number = None
self.node_display_name = None
self.node_friendly_software_version = None
self.node_hcs_version = None
self.node_instance_id = None
class NodeList(msrest.serialization.Model):
"""Collection of Nodes.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar value: The list of Nodes.
:vartype value: list[~azure.mgmt.databoxedge.v2020_05_01_preview.models.Node]
"""
_validation = {
'value': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[Node]'},
}
def __init__(
self,
**kwargs
):
super(NodeList, self).__init__(**kwargs)
self.value = None
class Operation(msrest.serialization.Model):
"""Operations.
:param name: Name of the operation.
:type name: str
:param display: Properties displayed for the operation.
:type display: ~azure.mgmt.databoxedge.v2020_05_01_preview.models.OperationDisplay
:param origin: Origin of the operation.
:type origin: str
:param service_specification: Service specification.
:type service_specification:
~azure.mgmt.databoxedge.v2020_05_01_preview.models.ServiceSpecification
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'display': {'key': 'display', 'type': 'OperationDisplay'},
'origin': {'key': 'origin', 'type': 'str'},
'service_specification': {'key': 'properties.serviceSpecification', 'type': 'ServiceSpecification'},
}
def __init__(
self,
**kwargs
):
super(Operation, self).__init__(**kwargs)
self.name = kwargs.get('name', None)
self.display = kwargs.get('display', None)
self.origin = kwargs.get('origin', None)
self.service_specification = kwargs.get('service_specification', None)
class OperationDisplay(msrest.serialization.Model):
"""Operation display properties.
:param provider: Provider name.
:type provider: str
:param resource: The type of resource in which the operation is performed.
:type resource: str
:param operation: Operation to be performed on the resource.
:type operation: str
:param description: Description of the operation to be performed.
:type description: str
"""
_attribute_map = {
'provider': {'key': 'provider', 'type': 'str'},
'resource': {'key': 'resource', 'type': 'str'},
'operation': {'key': 'operation', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(OperationDisplay, self).__init__(**kwargs)
self.provider = kwargs.get('provider', None)
self.resource = kwargs.get('resource', None)
self.operation = kwargs.get('operation', None)
self.description = kwargs.get('description', None)
class OperationsList(msrest.serialization.Model):
"""The list of operations used for the discovery of available provider operations.
All required parameters must be populated in order to send to Azure.
:param value: Required. The value.
:type value: list[~azure.mgmt.databoxedge.v2020_05_01_preview.models.Operation]
:param next_link: Link to the next set of results.
:type next_link: str
"""
_validation = {
'value': {'required': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[Operation]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(OperationsList, self).__init__(**kwargs)
self.value = kwargs['value']
self.next_link = kwargs.get('next_link', None)
class Order(ARMBaseModel):
"""The order details.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: The path ID that uniquely identifies the object.
:vartype id: str
:ivar name: The object name.
:vartype name: str
:ivar type: The hierarchical type of the object.
:vartype type: str
:param contact_information: The contact details.
:type contact_information: ~azure.mgmt.databoxedge.v2020_05_01_preview.models.ContactDetails
:param shipping_address: The shipping address.
:type shipping_address: ~azure.mgmt.databoxedge.v2020_05_01_preview.models.Address
:param current_status: Current status of the order.
:type current_status: ~azure.mgmt.databoxedge.v2020_05_01_preview.models.OrderStatus
:ivar order_history: List of status changes in the order.
:vartype order_history: list[~azure.mgmt.databoxedge.v2020_05_01_preview.models.OrderStatus]
:ivar serial_number: Serial number of the device.
:vartype serial_number: str
:ivar delivery_tracking_info: Tracking information for the package delivered to the customer
whether it has an original or a replacement device.
:vartype delivery_tracking_info:
list[~azure.mgmt.databoxedge.v2020_05_01_preview.models.TrackingInfo]
:ivar return_tracking_info: Tracking information for the package returned from the customer
whether it has an original or a replacement device.
:vartype return_tracking_info:
list[~azure.mgmt.databoxedge.v2020_05_01_preview.models.TrackingInfo]
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'order_history': {'readonly': True},
'serial_number': {'readonly': True},
'delivery_tracking_info': {'readonly': True},
'return_tracking_info': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'contact_information': {'key': 'properties.contactInformation', 'type': 'ContactDetails'},
'shipping_address': {'key': 'properties.shippingAddress', 'type': 'Address'},
'current_status': {'key': 'properties.currentStatus', 'type': 'OrderStatus'},
'order_history': {'key': 'properties.orderHistory', 'type': '[OrderStatus]'},
'serial_number': {'key': 'properties.serialNumber', 'type': 'str'},
'delivery_tracking_info': {'key': 'properties.deliveryTrackingInfo', 'type': '[TrackingInfo]'},
'return_tracking_info': {'key': 'properties.returnTrackingInfo', 'type': '[TrackingInfo]'},
}
def __init__(
self,
**kwargs
):
super(Order, self).__init__(**kwargs)
self.contact_information = kwargs.get('contact_information', None)
self.shipping_address = kwargs.get('shipping_address', None)
self.current_status = kwargs.get('current_status', None)
self.order_history = None
self.serial_number = None
self.delivery_tracking_info = None
self.return_tracking_info = None
class OrderList(msrest.serialization.Model):
"""List of order entities.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar value: The list of orders.
:vartype value: list[~azure.mgmt.databoxedge.v2020_05_01_preview.models.Order]
:ivar next_link: Link to the next set of results.
:vartype next_link: str
"""
_validation = {
'value': {'readonly': True},
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[Order]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(OrderList, self).__init__(**kwargs)
self.value = None
self.next_link = None
class OrderStatus(msrest.serialization.Model):
"""Represents a single status change.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param status: Required. Status of the order as per the allowed status types. Possible values
include: "Untracked", "AwaitingFulfilment", "AwaitingPreparation", "AwaitingShipment",
"Shipped", "Arriving", "Delivered", "ReplacementRequested", "LostDevice", "Declined",
"ReturnInitiated", "AwaitingReturnShipment", "ShippedBack", "CollectedAtMicrosoft".
:type status: str or ~azure.mgmt.databoxedge.v2020_05_01_preview.models.OrderState
:ivar update_date_time: Time of status update.
:vartype update_date_time: ~datetime.datetime
:param comments: Comments related to this status change.
:type comments: str
:ivar additional_order_details: Dictionary to hold generic information which is not stored
by the already existing properties.
:vartype additional_order_details: dict[str, str]
"""
_validation = {
'status': {'required': True},
'update_date_time': {'readonly': True},
'additional_order_details': {'readonly': True},
}
_attribute_map = {
'status': {'key': 'status', 'type': 'str'},
'update_date_time': {'key': 'updateDateTime', 'type': 'iso-8601'},
'comments': {'key': 'comments', 'type': 'str'},
'additional_order_details': {'key': 'additionalOrderDetails', 'type': '{str}'},
}
def __init__(
self,
**kwargs
):
super(OrderStatus, self).__init__(**kwargs)
self.status = kwargs['status']
self.update_date_time = None
self.comments = kwargs.get('comments', None)
self.additional_order_details = None
class PeriodicTimerEventTrigger(Trigger):
"""Trigger details.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: The path ID that uniquely identifies the object.
:vartype id: str
:ivar name: The object name.
:vartype name: str
:ivar type: The hierarchical type of the object.
:vartype type: str
:param kind: Required. Trigger Kind.Constant filled by server. Possible values include:
"FileEvent", "PeriodicTimerEvent".
:type kind: str or ~azure.mgmt.databoxedge.v2020_05_01_preview.models.TriggerEventType
:param source_info: Required. Periodic timer details.
:type source_info: ~azure.mgmt.databoxedge.v2020_05_01_preview.models.PeriodicTimerSourceInfo
:param sink_info: Required. Role Sink information.
:type sink_info: ~azure.mgmt.databoxedge.v2020_05_01_preview.models.RoleSinkInfo
:param custom_context_tag: A custom context tag typically used to correlate the trigger against
its usage. For example, if a periodic timer trigger is intended for certain specific IoT
modules in the device, the tag can be the name or the image URL of the module.
:type custom_context_tag: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'kind': {'required': True},
'source_info': {'required': True},
'sink_info': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'source_info': {'key': 'properties.sourceInfo', 'type': 'PeriodicTimerSourceInfo'},
'sink_info': {'key': 'properties.sinkInfo', 'type': 'RoleSinkInfo'},
'custom_context_tag': {'key': 'properties.customContextTag', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(PeriodicTimerEventTrigger, self).__init__(**kwargs)
self.kind = 'PeriodicTimerEvent' # type: str
self.source_info = kwargs['source_info']
self.sink_info = kwargs['sink_info']
self.custom_context_tag = kwargs.get('custom_context_tag', None)
class PeriodicTimerSourceInfo(msrest.serialization.Model):
"""Periodic timer event source.
All required parameters must be populated in order to send to Azure.
:param start_time: Required. The time of the day that results in a valid trigger. Schedule is
computed with reference to the time specified upto seconds. If timezone is not specified the
time will considered to be in device timezone. The value will always be returned as UTC time.
:type start_time: ~datetime.datetime
:param schedule: Required. Periodic frequency at which timer event needs to be raised. Supports
daily, hourly, minutes, and seconds.
:type schedule: str
:param topic: Topic where periodic events are published to IoT device.
:type topic: str
"""
_validation = {
'start_time': {'required': True},
'schedule': {'required': True},
}
_attribute_map = {
'start_time': {'key': 'startTime', 'type': 'iso-8601'},
'schedule': {'key': 'schedule', 'type': 'str'},
'topic': {'key': 'topic', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(PeriodicTimerSourceInfo, self).__init__(**kwargs)
self.start_time = kwargs['start_time']
self.schedule = kwargs['schedule']
self.topic = kwargs.get('topic', None)
class RefreshDetails(msrest.serialization.Model):
"""Fields for tracking refresh job on the share or container.
:param in_progress_refresh_job_id: If a refresh job is currently in progress on this share or
container, this field indicates the ARM resource ID of that job. The field is empty if no job
is in progress.
:type in_progress_refresh_job_id: str
:param last_completed_refresh_job_time_in_utc: Indicates the completed time for the last
refresh job on this particular share or container, if any.This could be a failed job or a
successful job.
:type last_completed_refresh_job_time_in_utc: ~datetime.datetime
:param error_manifest_file: Indicates the relative path of the error xml for the last refresh
job on this particular share or container, if any. This could be a failed job or a successful
job.
:type error_manifest_file: str
:param last_job: Indicates the id of the last refresh job on this particular share or
container,if any. This could be a failed job or a successful job.
:type last_job: str
"""
_attribute_map = {
'in_progress_refresh_job_id': {'key': 'inProgressRefreshJobId', 'type': 'str'},
'last_completed_refresh_job_time_in_utc': {'key': 'lastCompletedRefreshJobTimeInUTC', 'type': 'iso-8601'},
'error_manifest_file': {'key': 'errorManifestFile', 'type': 'str'},
'last_job': {'key': 'lastJob', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(RefreshDetails, self).__init__(**kwargs)
self.in_progress_refresh_job_id = kwargs.get('in_progress_refresh_job_id', None)
self.last_completed_refresh_job_time_in_utc = kwargs.get('last_completed_refresh_job_time_in_utc', None)
self.error_manifest_file = kwargs.get('error_manifest_file', None)
self.last_job = kwargs.get('last_job', None)
class ResourceTypeSku(msrest.serialization.Model):
"""SkuInformation object.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar resource_type: The type of the resource.
:vartype resource_type: str
:ivar name: The Sku name. Possible values include: "Gateway", "Edge", "TEA_1Node",
"TEA_1Node_UPS", "TEA_1Node_Heater", "TEA_1Node_UPS_Heater", "TEA_4Node_Heater",
"TEA_4Node_UPS_Heater", "TMA", "TDC", "TCA_Large", "TCA_Small", "GPU".
:vartype name: str or ~azure.mgmt.databoxedge.v2020_05_01_preview.models.SkuName
:ivar kind: The Sku kind.
:vartype kind: str
:ivar tier: The Sku tier. Possible values include: "Standard".
:vartype tier: str or ~azure.mgmt.databoxedge.v2020_05_01_preview.models.SkuTier
:ivar family: The Sku family.
:vartype family: str
:ivar locations: Availability of the SKU for the region.
:vartype locations: list[str]
:ivar api_versions: The API versions in which SKU is available.
:vartype api_versions: list[str]
:ivar location_info: Availability of the SKU for the location/zone.
:vartype location_info:
list[~azure.mgmt.databoxedge.v2020_05_01_preview.models.SkuLocationInfo]
:ivar costs: The pricing info of the Sku.
:vartype costs: list[~azure.mgmt.databoxedge.v2020_05_01_preview.models.SkuCost]
:ivar restrictions: Restrictions of the SKU availability.
:vartype restrictions: list[~azure.mgmt.databoxedge.v2020_05_01_preview.models.SkuRestriction]
"""
_validation = {
'resource_type': {'readonly': True},
'name': {'readonly': True},
'kind': {'readonly': True},
'tier': {'readonly': True},
'family': {'readonly': True},
'locations': {'readonly': True},
'api_versions': {'readonly': True},
'location_info': {'readonly': True},
'costs': {'readonly': True},
'restrictions': {'readonly': True},
}
_attribute_map = {
'resource_type': {'key': 'resourceType', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'tier': {'key': 'tier', 'type': 'str'},
'family': {'key': 'family', 'type': 'str'},
'locations': {'key': 'locations', 'type': '[str]'},
'api_versions': {'key': 'apiVersions', 'type': '[str]'},
'location_info': {'key': 'locationInfo', 'type': '[SkuLocationInfo]'},
'costs': {'key': 'costs', 'type': '[SkuCost]'},
'restrictions': {'key': 'restrictions', 'type': '[SkuRestriction]'},
}
def __init__(
self,
**kwargs
):
super(ResourceTypeSku, self).__init__(**kwargs)
self.resource_type = None
self.name = None
self.kind = None
self.tier = None
self.family = None
self.locations = None
self.api_versions = None
self.location_info = None
self.costs = None
self.restrictions = None
class RoleList(msrest.serialization.Model):
"""Collection of all the roles on the Data Box Edge device.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar value: The Value.
:vartype value: list[~azure.mgmt.databoxedge.v2020_05_01_preview.models.Role]
:ivar next_link: Link to the next set of results.
:vartype next_link: str
"""
_validation = {
'value': {'readonly': True},
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[Role]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(RoleList, self).__init__(**kwargs)
self.value = None
self.next_link = None
class RoleSinkInfo(msrest.serialization.Model):
"""Compute role against which events will be raised.
All required parameters must be populated in order to send to Azure.
:param role_id: Required. Compute role ID.
:type role_id: str
"""
_validation = {
'role_id': {'required': True},
}
_attribute_map = {
'role_id': {'key': 'roleId', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(RoleSinkInfo, self).__init__(**kwargs)
self.role_id = kwargs['role_id']
class SecuritySettings(ARMBaseModel):
"""The security settings of a device.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: The path ID that uniquely identifies the object.
:vartype id: str
:ivar name: The object name.
:vartype name: str
:ivar type: The hierarchical type of the object.
:vartype type: str
:param device_admin_password: Required. Device administrator password as an encrypted string
(encrypted using RSA PKCS #1) is used to sign into the local web UI of the device. The Actual
password should have at least 8 characters that are a combination of uppercase, lowercase,
numeric, and special characters.
:type device_admin_password:
~azure.mgmt.databoxedge.v2020_05_01_preview.models.AsymmetricEncryptedSecret
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'device_admin_password': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'device_admin_password': {'key': 'properties.deviceAdminPassword', 'type': 'AsymmetricEncryptedSecret'},
}
def __init__(
self,
**kwargs
):
super(SecuritySettings, self).__init__(**kwargs)
self.device_admin_password = kwargs['device_admin_password']
class ServiceSpecification(msrest.serialization.Model):
"""Service specification.
:param metric_specifications: Metric specification as defined by shoebox.
:type metric_specifications:
list[~azure.mgmt.databoxedge.v2020_05_01_preview.models.MetricSpecificationV1]
"""
_attribute_map = {
'metric_specifications': {'key': 'metricSpecifications', 'type': '[MetricSpecificationV1]'},
}
def __init__(
self,
**kwargs
):
super(ServiceSpecification, self).__init__(**kwargs)
self.metric_specifications = kwargs.get('metric_specifications', None)
class Share(ARMBaseModel):
"""Represents a share on the Data Box Edge/Gateway device.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: The path ID that uniquely identifies the object.
:vartype id: str
:ivar name: The object name.
:vartype name: str
:ivar type: The hierarchical type of the object.
:vartype type: str
:param description: Description for the share.
:type description: str
:param share_status: Required. Current status of the share. Possible values include: "Offline",
"Unknown", "OK", "Updating", "NeedsAttention".
:type share_status: str or ~azure.mgmt.databoxedge.v2020_05_01_preview.models.ShareStatus
:param monitoring_status: Required. Current monitoring status of the share. Possible values
include: "Enabled", "Disabled".
:type monitoring_status: str or
~azure.mgmt.databoxedge.v2020_05_01_preview.models.MonitoringStatus
:param azure_container_info: Azure container mapping for the share.
:type azure_container_info:
~azure.mgmt.databoxedge.v2020_05_01_preview.models.AzureContainerInfo
:param access_protocol: Required. Access protocol to be used by the share. Possible values
include: "SMB", "NFS".
:type access_protocol: str or
~azure.mgmt.databoxedge.v2020_05_01_preview.models.ShareAccessProtocol
:param user_access_rights: Mapping of users and corresponding access rights on the share
(required for SMB protocol).
:type user_access_rights:
list[~azure.mgmt.databoxedge.v2020_05_01_preview.models.UserAccessRight]
:param client_access_rights: List of IP addresses and corresponding access rights on the
share(required for NFS protocol).
:type client_access_rights:
list[~azure.mgmt.databoxedge.v2020_05_01_preview.models.ClientAccessRight]
:param refresh_details: Details of the refresh job on this share.
:type refresh_details: ~azure.mgmt.databoxedge.v2020_05_01_preview.models.RefreshDetails
:ivar share_mappings: Share mount point to the role.
:vartype share_mappings: list[~azure.mgmt.databoxedge.v2020_05_01_preview.models.MountPointMap]
:param data_policy: Data policy of the share. Possible values include: "Cloud", "Local".
:type data_policy: str or ~azure.mgmt.databoxedge.v2020_05_01_preview.models.DataPolicy
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'share_status': {'required': True},
'monitoring_status': {'required': True},
'access_protocol': {'required': True},
'share_mappings': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'description': {'key': 'properties.description', 'type': 'str'},
'share_status': {'key': 'properties.shareStatus', 'type': 'str'},
'monitoring_status': {'key': 'properties.monitoringStatus', 'type': 'str'},
'azure_container_info': {'key': 'properties.azureContainerInfo', 'type': 'AzureContainerInfo'},
'access_protocol': {'key': 'properties.accessProtocol', 'type': 'str'},
'user_access_rights': {'key': 'properties.userAccessRights', 'type': '[UserAccessRight]'},
'client_access_rights': {'key': 'properties.clientAccessRights', 'type': '[ClientAccessRight]'},
'refresh_details': {'key': 'properties.refreshDetails', 'type': 'RefreshDetails'},
'share_mappings': {'key': 'properties.shareMappings', 'type': '[MountPointMap]'},
'data_policy': {'key': 'properties.dataPolicy', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(Share, self).__init__(**kwargs)
self.description = kwargs.get('description', None)
self.share_status = kwargs['share_status']
self.monitoring_status = kwargs['monitoring_status']
self.azure_container_info = kwargs.get('azure_container_info', None)
self.access_protocol = kwargs['access_protocol']
self.user_access_rights = kwargs.get('user_access_rights', None)
self.client_access_rights = kwargs.get('client_access_rights', None)
self.refresh_details = kwargs.get('refresh_details', None)
self.share_mappings = None
self.data_policy = kwargs.get('data_policy', None)
class ShareAccessRight(msrest.serialization.Model):
"""Specifies the mapping between this particular user and the type of access he has on shares on this device.
All required parameters must be populated in order to send to Azure.
:param share_id: Required. The share ID.
:type share_id: str
:param access_type: Required. Type of access to be allowed on the share for this user. Possible
values include: "Change", "Read", "Custom".
:type access_type: str or ~azure.mgmt.databoxedge.v2020_05_01_preview.models.ShareAccessType
"""
_validation = {
'share_id': {'required': True},
'access_type': {'required': True},
}
_attribute_map = {
'share_id': {'key': 'shareId', 'type': 'str'},
'access_type': {'key': 'accessType', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ShareAccessRight, self).__init__(**kwargs)
self.share_id = kwargs['share_id']
self.access_type = kwargs['access_type']
class ShareList(msrest.serialization.Model):
"""Collection of all the shares on the Data Box Edge/Gateway device.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar value: The list of shares.
:vartype value: list[~azure.mgmt.databoxedge.v2020_05_01_preview.models.Share]
:ivar next_link: Link to the next set of results.
:vartype next_link: str
"""
_validation = {
'value': {'readonly': True},
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[Share]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ShareList, self).__init__(**kwargs)
self.value = None
self.next_link = None
class Sku(msrest.serialization.Model):
"""The SKU type.
:param name: SKU name. Possible values include: "Gateway", "Edge", "TEA_1Node",
"TEA_1Node_UPS", "TEA_1Node_Heater", "TEA_1Node_UPS_Heater", "TEA_4Node_Heater",
"TEA_4Node_UPS_Heater", "TMA", "TDC", "TCA_Large", "TCA_Small", "GPU".
:type name: str or ~azure.mgmt.databoxedge.v2020_05_01_preview.models.SkuName
:param tier: The SKU tier. This is based on the SKU name. Possible values include: "Standard".
:type tier: str or ~azure.mgmt.databoxedge.v2020_05_01_preview.models.SkuTier
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'tier': {'key': 'tier', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(Sku, self).__init__(**kwargs)
self.name = kwargs.get('name', None)
self.tier = kwargs.get('tier', None)
class SKUCapability(msrest.serialization.Model):
"""The capabilities of the SKU.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar name: The capability name.
:vartype name: str
:ivar value: The capability value.
:vartype value: str
"""
_validation = {
'name': {'readonly': True},
'value': {'readonly': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'value': {'key': 'value', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(SKUCapability, self).__init__(**kwargs)
self.name = None
self.value = None
class SkuCost(msrest.serialization.Model):
"""The metadata for retrieving price info.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar meter_id: Used for querying price from commerce.
:vartype meter_id: str
:ivar quantity: The cost quantity.
:vartype quantity: long
:ivar extended_unit: The extended unit.
:vartype extended_unit: str
"""
_validation = {
'meter_id': {'readonly': True},
'quantity': {'readonly': True},
'extended_unit': {'readonly': True},
}
_attribute_map = {
'meter_id': {'key': 'meterId', 'type': 'str'},
'quantity': {'key': 'quantity', 'type': 'long'},
'extended_unit': {'key': 'extendedUnit', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(SkuCost, self).__init__(**kwargs)
self.meter_id = None
self.quantity = None
self.extended_unit = None
class SkuInformation(msrest.serialization.Model):
"""Sku information.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar resource_type: The resource type.
:vartype resource_type: str
:ivar name: The sku name.
:vartype name: str
:ivar tier: The sku tier.
:vartype tier: str
:ivar kind: The sku kind.
:vartype kind: str
:ivar size: The sku size.
:vartype size: str
:ivar family: The sku family.
:vartype family: str
:ivar locations: The locations where Sku is available.
:vartype locations: list[str]
:ivar location_info: The locations where Sku is available with zones and sites info.
:vartype location_info:
list[~azure.mgmt.databoxedge.v2020_05_01_preview.models.SkuLocationInfo]
:ivar required_quota_ids: The required quotaIds for the sku to be available.
:vartype required_quota_ids: list[str]
:ivar required_features: The required features for the sku to be available.
:vartype required_features: list[str]
:ivar costs: The cost of the SKU.
:vartype costs: list[~azure.mgmt.databoxedge.v2020_05_01_preview.models.SkuCost]
:ivar capabilities: The capabilities of the SKU.
:vartype capabilities: list[~azure.mgmt.databoxedge.v2020_05_01_preview.models.SKUCapability]
"""
_validation = {
'resource_type': {'readonly': True},
'name': {'readonly': True},
'tier': {'readonly': True},
'kind': {'readonly': True},
'size': {'readonly': True},
'family': {'readonly': True},
'locations': {'readonly': True},
'location_info': {'readonly': True},
'required_quota_ids': {'readonly': True},
'required_features': {'readonly': True},
'costs': {'readonly': True},
'capabilities': {'readonly': True},
}
_attribute_map = {
'resource_type': {'key': 'resourceType', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'tier': {'key': 'tier', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'size': {'key': 'size', 'type': 'str'},
'family': {'key': 'family', 'type': 'str'},
'locations': {'key': 'locations', 'type': '[str]'},
'location_info': {'key': 'locationInfo', 'type': '[SkuLocationInfo]'},
'required_quota_ids': {'key': 'requiredQuotaIds', 'type': '[str]'},
'required_features': {'key': 'requiredFeatures', 'type': '[str]'},
'costs': {'key': 'costs', 'type': '[SkuCost]'},
'capabilities': {'key': 'capabilities', 'type': '[SKUCapability]'},
}
def __init__(
self,
**kwargs
):
super(SkuInformation, self).__init__(**kwargs)
self.resource_type = None
self.name = None
self.tier = None
self.kind = None
self.size = None
self.family = None
self.locations = None
self.location_info = None
self.required_quota_ids = None
self.required_features = None
self.costs = None
self.capabilities = None
class SkuInformationList(msrest.serialization.Model):
"""List of SKU Information objects.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar value: List of ResourceType Sku.
:vartype value: list[~azure.mgmt.databoxedge.v2020_05_01_preview.models.ResourceTypeSku]
:ivar next_link: Links to the next set of results.
:vartype next_link: str
"""
_validation = {
'value': {'readonly': True},
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[ResourceTypeSku]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(SkuInformationList, self).__init__(**kwargs)
self.value = None
self.next_link = None
class SkuLocationInfo(msrest.serialization.Model):
"""The location info.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar location: The location.
:vartype location: str
:ivar zones: The zones.
:vartype zones: list[str]
:ivar sites: The sites.
:vartype sites: list[str]
"""
_validation = {
'location': {'readonly': True},
'zones': {'readonly': True},
'sites': {'readonly': True},
}
_attribute_map = {
'location': {'key': 'location', 'type': 'str'},
'zones': {'key': 'zones', 'type': '[str]'},
'sites': {'key': 'sites', 'type': '[str]'},
}
def __init__(
self,
**kwargs
):
super(SkuLocationInfo, self).__init__(**kwargs)
self.location = None
self.zones = None
self.sites = None
class SkuRestriction(msrest.serialization.Model):
"""The restrictions because of which SKU cannot be used.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar type: The type of the restriction.
:vartype type: str
:ivar values: The locations where sku is restricted.
:vartype values: list[str]
:ivar reason_code: The SKU restriction reason. Possible values include:
"NotAvailableForSubscription", "QuotaId".
:vartype reason_code: str or
~azure.mgmt.databoxedge.v2020_05_01_preview.models.SkuRestrictionReasonCode
:ivar restriction_info: Restriction of the SKU for the location/zone.
:vartype restriction_info:
~azure.mgmt.databoxedge.v2020_05_01_preview.models.SkuRestrictionInfo
"""
_validation = {
'type': {'readonly': True},
'values': {'readonly': True},
'reason_code': {'readonly': True},
'restriction_info': {'readonly': True},
}
_attribute_map = {
'type': {'key': 'type', 'type': 'str'},
'values': {'key': 'values', 'type': '[str]'},
'reason_code': {'key': 'reasonCode', 'type': 'str'},
'restriction_info': {'key': 'restrictionInfo', 'type': 'SkuRestrictionInfo'},
}
def __init__(
self,
**kwargs
):
super(SkuRestriction, self).__init__(**kwargs)
self.type = None
self.values = None
self.reason_code = None
self.restriction_info = None
class SkuRestrictionInfo(msrest.serialization.Model):
"""The restriction info with locations and zones.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar locations: The locations.
:vartype locations: list[str]
:ivar zones: The zones.
:vartype zones: list[str]
"""
_validation = {
'locations': {'readonly': True},
'zones': {'readonly': True},
}
_attribute_map = {
'locations': {'key': 'locations', 'type': '[str]'},
'zones': {'key': 'zones', 'type': '[str]'},
}
def __init__(
self,
**kwargs
):
super(SkuRestrictionInfo, self).__init__(**kwargs)
self.locations = None
self.zones = None
class StorageAccount(ARMBaseModel):
"""Represents a Storage Account on the Data Box Edge/Gateway device.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: The path ID that uniquely identifies the object.
:vartype id: str
:ivar name: The object name.
:vartype name: str
:ivar type: The hierarchical type of the object.
:vartype type: str
:param description: Description for the storage Account.
:type description: str
:param storage_account_status: Current status of the storage account. Possible values include:
"OK", "Offline", "Unknown", "Updating", "NeedsAttention".
:type storage_account_status: str or
~azure.mgmt.databoxedge.v2020_05_01_preview.models.StorageAccountStatus
:param data_policy: Data policy of the storage Account. Possible values include: "Cloud",
"Local".
:type data_policy: str or ~azure.mgmt.databoxedge.v2020_05_01_preview.models.DataPolicy
:param storage_account_credential_id: Storage Account Credential Id.
:type storage_account_credential_id: str
:ivar blob_endpoint: BlobEndpoint of Storage Account.
:vartype blob_endpoint: str
:ivar container_count: The Container Count. Present only for Storage Accounts with DataPolicy
set to Cloud.
:vartype container_count: int
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'blob_endpoint': {'readonly': True},
'container_count': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'description': {'key': 'properties.description', 'type': 'str'},
'storage_account_status': {'key': 'properties.storageAccountStatus', 'type': 'str'},
'data_policy': {'key': 'properties.dataPolicy', 'type': 'str'},
'storage_account_credential_id': {'key': 'properties.storageAccountCredentialId', 'type': 'str'},
'blob_endpoint': {'key': 'properties.blobEndpoint', 'type': 'str'},
'container_count': {'key': 'properties.containerCount', 'type': 'int'},
}
def __init__(
self,
**kwargs
):
super(StorageAccount, self).__init__(**kwargs)
self.description = kwargs.get('description', None)
self.storage_account_status = kwargs.get('storage_account_status', None)
self.data_policy = kwargs.get('data_policy', None)
self.storage_account_credential_id = kwargs.get('storage_account_credential_id', None)
self.blob_endpoint = None
self.container_count = None
class StorageAccountCredential(ARMBaseModel):
"""The storage account credential.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: The path ID that uniquely identifies the object.
:vartype id: str
:ivar name: The object name.
:vartype name: str
:ivar type: The hierarchical type of the object.
:vartype type: str
:param alias: Required. Alias for the storage account.
:type alias: str
:param user_name: Username for the storage account.
:type user_name: str
:param account_key: Encrypted storage key.
:type account_key: ~azure.mgmt.databoxedge.v2020_05_01_preview.models.AsymmetricEncryptedSecret
:param connection_string: Connection string for the storage account. Use this string if
username and account key are not specified.
:type connection_string: str
:param ssl_status: Required. Signifies whether SSL needs to be enabled or not. Possible values
include: "Enabled", "Disabled".
:type ssl_status: str or ~azure.mgmt.databoxedge.v2020_05_01_preview.models.SSLStatus
:param blob_domain_name: Blob end point for private clouds.
:type blob_domain_name: str
:param account_type: Required. Type of storage accessed on the storage account. Possible values
include: "GeneralPurposeStorage", "BlobStorage".
:type account_type: str or ~azure.mgmt.databoxedge.v2020_05_01_preview.models.AccountType
:param storage_account_id: Id of the storage account.
:type storage_account_id: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'alias': {'required': True},
'ssl_status': {'required': True},
'account_type': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'alias': {'key': 'properties.alias', 'type': 'str'},
'user_name': {'key': 'properties.userName', 'type': 'str'},
'account_key': {'key': 'properties.accountKey', 'type': 'AsymmetricEncryptedSecret'},
'connection_string': {'key': 'properties.connectionString', 'type': 'str'},
'ssl_status': {'key': 'properties.sslStatus', 'type': 'str'},
'blob_domain_name': {'key': 'properties.blobDomainName', 'type': 'str'},
'account_type': {'key': 'properties.accountType', 'type': 'str'},
'storage_account_id': {'key': 'properties.storageAccountId', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(StorageAccountCredential, self).__init__(**kwargs)
self.alias = kwargs['alias']
self.user_name = kwargs.get('user_name', None)
self.account_key = kwargs.get('account_key', None)
self.connection_string = kwargs.get('connection_string', None)
self.ssl_status = kwargs['ssl_status']
self.blob_domain_name = kwargs.get('blob_domain_name', None)
self.account_type = kwargs['account_type']
self.storage_account_id = kwargs.get('storage_account_id', None)
class StorageAccountCredentialList(msrest.serialization.Model):
"""The collection of storage account credentials.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar value: The value.
:vartype value:
list[~azure.mgmt.databoxedge.v2020_05_01_preview.models.StorageAccountCredential]
:ivar next_link: Link to the next set of results.
:vartype next_link: str
"""
_validation = {
'value': {'readonly': True},
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[StorageAccountCredential]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(StorageAccountCredentialList, self).__init__(**kwargs)
self.value = None
self.next_link = None
class StorageAccountList(msrest.serialization.Model):
"""Collection of all the Storage Accounts on the Data Box Edge/Gateway device.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar value: The list of storageAccounts.
:vartype value: list[~azure.mgmt.databoxedge.v2020_05_01_preview.models.StorageAccount]
:ivar next_link: Link to the next set of results.
:vartype next_link: str
"""
_validation = {
'value': {'readonly': True},
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[StorageAccount]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(StorageAccountList, self).__init__(**kwargs)
self.value = None
self.next_link = None
class SymmetricKey(msrest.serialization.Model):
"""Symmetric key for authentication.
:param connection_string: Connection string based on the symmetric key.
:type connection_string:
~azure.mgmt.databoxedge.v2020_05_01_preview.models.AsymmetricEncryptedSecret
"""
_attribute_map = {
'connection_string': {'key': 'connectionString', 'type': 'AsymmetricEncryptedSecret'},
}
def __init__(
self,
**kwargs
):
super(SymmetricKey, self).__init__(**kwargs)
self.connection_string = kwargs.get('connection_string', None)
class TrackingInfo(msrest.serialization.Model):
"""Tracking courier information.
:param serial_number: Serial number of the device being tracked.
:type serial_number: str
:param carrier_name: Name of the carrier used in the delivery.
:type carrier_name: str
:param tracking_id: Tracking ID of the shipment.
:type tracking_id: str
:param tracking_url: Tracking URL of the shipment.
:type tracking_url: str
"""
_attribute_map = {
'serial_number': {'key': 'serialNumber', 'type': 'str'},
'carrier_name': {'key': 'carrierName', 'type': 'str'},
'tracking_id': {'key': 'trackingId', 'type': 'str'},
'tracking_url': {'key': 'trackingUrl', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(TrackingInfo, self).__init__(**kwargs)
self.serial_number = kwargs.get('serial_number', None)
self.carrier_name = kwargs.get('carrier_name', None)
self.tracking_id = kwargs.get('tracking_id', None)
self.tracking_url = kwargs.get('tracking_url', None)
class TriggerList(msrest.serialization.Model):
"""Collection of all trigger on the data box edge device.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar value: The list of triggers.
:vartype value: list[~azure.mgmt.databoxedge.v2020_05_01_preview.models.Trigger]
:ivar next_link: Link to the next set of results.
:vartype next_link: str
"""
_validation = {
'value': {'readonly': True},
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[Trigger]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(TriggerList, self).__init__(**kwargs)
self.value = None
self.next_link = None
class UpdateDownloadProgress(msrest.serialization.Model):
"""Details about the download progress of update.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar download_phase: The download phase. Possible values include: "Unknown", "Initializing",
"Downloading", "Verifying".
:vartype download_phase: str or
~azure.mgmt.databoxedge.v2020_05_01_preview.models.DownloadPhase
:ivar percent_complete: Percentage of completion.
:vartype percent_complete: int
:ivar total_bytes_to_download: Total bytes to download.
:vartype total_bytes_to_download: float
:ivar total_bytes_downloaded: Total bytes downloaded.
:vartype total_bytes_downloaded: float
:ivar number_of_updates_to_download: Number of updates to download.
:vartype number_of_updates_to_download: int
:ivar number_of_updates_downloaded: Number of updates downloaded.
:vartype number_of_updates_downloaded: int
"""
_validation = {
'download_phase': {'readonly': True},
'percent_complete': {'readonly': True},
'total_bytes_to_download': {'readonly': True},
'total_bytes_downloaded': {'readonly': True},
'number_of_updates_to_download': {'readonly': True},
'number_of_updates_downloaded': {'readonly': True},
}
_attribute_map = {
'download_phase': {'key': 'downloadPhase', 'type': 'str'},
'percent_complete': {'key': 'percentComplete', 'type': 'int'},
'total_bytes_to_download': {'key': 'totalBytesToDownload', 'type': 'float'},
'total_bytes_downloaded': {'key': 'totalBytesDownloaded', 'type': 'float'},
'number_of_updates_to_download': {'key': 'numberOfUpdatesToDownload', 'type': 'int'},
'number_of_updates_downloaded': {'key': 'numberOfUpdatesDownloaded', 'type': 'int'},
}
def __init__(
self,
**kwargs
):
super(UpdateDownloadProgress, self).__init__(**kwargs)
self.download_phase = None
self.percent_complete = None
self.total_bytes_to_download = None
self.total_bytes_downloaded = None
self.number_of_updates_to_download = None
self.number_of_updates_downloaded = None
class UpdateInstallProgress(msrest.serialization.Model):
"""Progress details during installation of updates.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar percent_complete: Percentage completed.
:vartype percent_complete: int
:ivar number_of_updates_to_install: Number of updates to install.
:vartype number_of_updates_to_install: int
:ivar number_of_updates_installed: Number of updates installed.
:vartype number_of_updates_installed: int
"""
_validation = {
'percent_complete': {'readonly': True},
'number_of_updates_to_install': {'readonly': True},
'number_of_updates_installed': {'readonly': True},
}
_attribute_map = {
'percent_complete': {'key': 'percentComplete', 'type': 'int'},
'number_of_updates_to_install': {'key': 'numberOfUpdatesToInstall', 'type': 'int'},
'number_of_updates_installed': {'key': 'numberOfUpdatesInstalled', 'type': 'int'},
}
def __init__(
self,
**kwargs
):
super(UpdateInstallProgress, self).__init__(**kwargs)
self.percent_complete = None
self.number_of_updates_to_install = None
self.number_of_updates_installed = None
class UpdateSummary(ARMBaseModel):
"""Details about ongoing updates and availability of updates on the device.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: The path ID that uniquely identifies the object.
:vartype id: str
:ivar name: The object name.
:vartype name: str
:ivar type: The hierarchical type of the object.
:vartype type: str
:param device_version_number: The current version of the device in format: 1.2.17312.13.",.
:type device_version_number: str
:param friendly_device_version_name: The current version of the device in text format.
:type friendly_device_version_name: str
:param device_last_scanned_date_time: The last time when a scan was done on the device.
:type device_last_scanned_date_time: ~datetime.datetime
:param last_completed_scan_job_date_time: The time when the last scan job was completed
(success/cancelled/failed) on the appliance.
:type last_completed_scan_job_date_time: ~datetime.datetime
:ivar last_completed_download_job_date_time: The time when the last Download job was completed
(success/cancelled/failed) on the appliance.
:vartype last_completed_download_job_date_time: ~datetime.datetime
:ivar last_completed_install_job_date_time: The time when the last Install job was completed
(success/cancelled/failed) on the appliance.
:vartype last_completed_install_job_date_time: ~datetime.datetime
:ivar total_number_of_updates_available: The number of updates available for the current device
version as per the last device scan.
:vartype total_number_of_updates_available: int
:ivar total_number_of_updates_pending_download: The total number of items pending download.
:vartype total_number_of_updates_pending_download: int
:ivar total_number_of_updates_pending_install: The total number of items pending install.
:vartype total_number_of_updates_pending_install: int
:ivar reboot_behavior: Indicates if updates are available and at least one of the updates needs
a reboot. Possible values include: "NeverReboots", "RequiresReboot", "RequestReboot".
:vartype reboot_behavior: str or
~azure.mgmt.databoxedge.v2020_05_01_preview.models.InstallRebootBehavior
:ivar ongoing_update_operation: The current update operation. Possible values include: "None",
"Scan", "Download", "Install".
:vartype ongoing_update_operation: str or
~azure.mgmt.databoxedge.v2020_05_01_preview.models.UpdateOperation
:ivar in_progress_download_job_id: The job ID of the download job in progress.
:vartype in_progress_download_job_id: str
:ivar in_progress_install_job_id: The job ID of the install job in progress.
:vartype in_progress_install_job_id: str
:ivar in_progress_download_job_started_date_time: The time when the currently running download
(if any) started.
:vartype in_progress_download_job_started_date_time: ~datetime.datetime
:ivar in_progress_install_job_started_date_time: The time when the currently running install
(if any) started.
:vartype in_progress_install_job_started_date_time: ~datetime.datetime
:ivar update_titles: The list of updates available for install.
:vartype update_titles: list[str]
:ivar total_update_size_in_bytes: The total size of updates available for download in bytes.
:vartype total_update_size_in_bytes: float
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'last_completed_download_job_date_time': {'readonly': True},
'last_completed_install_job_date_time': {'readonly': True},
'total_number_of_updates_available': {'readonly': True},
'total_number_of_updates_pending_download': {'readonly': True},
'total_number_of_updates_pending_install': {'readonly': True},
'reboot_behavior': {'readonly': True},
'ongoing_update_operation': {'readonly': True},
'in_progress_download_job_id': {'readonly': True},
'in_progress_install_job_id': {'readonly': True},
'in_progress_download_job_started_date_time': {'readonly': True},
'in_progress_install_job_started_date_time': {'readonly': True},
'update_titles': {'readonly': True},
'total_update_size_in_bytes': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'device_version_number': {'key': 'properties.deviceVersionNumber', 'type': 'str'},
'friendly_device_version_name': {'key': 'properties.friendlyDeviceVersionName', 'type': 'str'},
'device_last_scanned_date_time': {'key': 'properties.deviceLastScannedDateTime', 'type': 'iso-8601'},
'last_completed_scan_job_date_time': {'key': 'properties.lastCompletedScanJobDateTime', 'type': 'iso-8601'},
'last_completed_download_job_date_time': {'key': 'properties.lastCompletedDownloadJobDateTime', 'type': 'iso-8601'},
'last_completed_install_job_date_time': {'key': 'properties.lastCompletedInstallJobDateTime', 'type': 'iso-8601'},
'total_number_of_updates_available': {'key': 'properties.totalNumberOfUpdatesAvailable', 'type': 'int'},
'total_number_of_updates_pending_download': {'key': 'properties.totalNumberOfUpdatesPendingDownload', 'type': 'int'},
'total_number_of_updates_pending_install': {'key': 'properties.totalNumberOfUpdatesPendingInstall', 'type': 'int'},
'reboot_behavior': {'key': 'properties.rebootBehavior', 'type': 'str'},
'ongoing_update_operation': {'key': 'properties.ongoingUpdateOperation', 'type': 'str'},
'in_progress_download_job_id': {'key': 'properties.inProgressDownloadJobId', 'type': 'str'},
'in_progress_install_job_id': {'key': 'properties.inProgressInstallJobId', 'type': 'str'},
'in_progress_download_job_started_date_time': {'key': 'properties.inProgressDownloadJobStartedDateTime', 'type': 'iso-8601'},
'in_progress_install_job_started_date_time': {'key': 'properties.inProgressInstallJobStartedDateTime', 'type': 'iso-8601'},
'update_titles': {'key': 'properties.updateTitles', 'type': '[str]'},
'total_update_size_in_bytes': {'key': 'properties.totalUpdateSizeInBytes', 'type': 'float'},
}
def __init__(
self,
**kwargs
):
super(UpdateSummary, self).__init__(**kwargs)
self.device_version_number = kwargs.get('device_version_number', None)
self.friendly_device_version_name = kwargs.get('friendly_device_version_name', None)
self.device_last_scanned_date_time = kwargs.get('device_last_scanned_date_time', None)
self.last_completed_scan_job_date_time = kwargs.get('last_completed_scan_job_date_time', None)
self.last_completed_download_job_date_time = None
self.last_completed_install_job_date_time = None
self.total_number_of_updates_available = None
self.total_number_of_updates_pending_download = None
self.total_number_of_updates_pending_install = None
self.reboot_behavior = None
self.ongoing_update_operation = None
self.in_progress_download_job_id = None
self.in_progress_install_job_id = None
self.in_progress_download_job_started_date_time = None
self.in_progress_install_job_started_date_time = None
self.update_titles = None
self.total_update_size_in_bytes = None
class UploadCertificateRequest(msrest.serialization.Model):
"""The upload certificate request.
All required parameters must be populated in order to send to Azure.
:param authentication_type: The authentication type. Possible values include: "Invalid",
"AzureActiveDirectory".
:type authentication_type: str or
~azure.mgmt.databoxedge.v2020_05_01_preview.models.AuthenticationType
:param certificate: Required. The base64 encoded certificate raw data.
:type certificate: str
"""
_validation = {
'certificate': {'required': True},
}
_attribute_map = {
'authentication_type': {'key': 'properties.authenticationType', 'type': 'str'},
'certificate': {'key': 'properties.certificate', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(UploadCertificateRequest, self).__init__(**kwargs)
self.authentication_type = kwargs.get('authentication_type', None)
self.certificate = kwargs['certificate']
class UploadCertificateResponse(msrest.serialization.Model):
"""The upload registration certificate response.
Variables are only populated by the server, and will be ignored when sending a request.
:param auth_type: Specifies authentication type. Possible values include: "Invalid",
"AzureActiveDirectory".
:type auth_type: str or ~azure.mgmt.databoxedge.v2020_05_01_preview.models.AuthenticationType
:ivar resource_id: The resource ID of the Data Box Edge/Gateway device.
:vartype resource_id: str
:ivar aad_authority: Azure Active Directory tenant authority.
:vartype aad_authority: str
:ivar aad_tenant_id: Azure Active Directory tenant ID.
:vartype aad_tenant_id: str
:ivar service_principal_client_id: Azure Active Directory service principal client ID.
:vartype service_principal_client_id: str
:ivar service_principal_object_id: Azure Active Directory service principal object ID.
:vartype service_principal_object_id: str
:ivar azure_management_endpoint_audience: The azure management endpoint audience.
:vartype azure_management_endpoint_audience: str
:ivar aad_audience: Identifier of the target resource that is the recipient of the requested
token.
:vartype aad_audience: str
"""
_validation = {
'resource_id': {'readonly': True},
'aad_authority': {'readonly': True},
'aad_tenant_id': {'readonly': True},
'service_principal_client_id': {'readonly': True},
'service_principal_object_id': {'readonly': True},
'azure_management_endpoint_audience': {'readonly': True},
'aad_audience': {'readonly': True},
}
_attribute_map = {
'auth_type': {'key': 'authType', 'type': 'str'},
'resource_id': {'key': 'resourceId', 'type': 'str'},
'aad_authority': {'key': 'aadAuthority', 'type': 'str'},
'aad_tenant_id': {'key': 'aadTenantId', 'type': 'str'},
'service_principal_client_id': {'key': 'servicePrincipalClientId', 'type': 'str'},
'service_principal_object_id': {'key': 'servicePrincipalObjectId', 'type': 'str'},
'azure_management_endpoint_audience': {'key': 'azureManagementEndpointAudience', 'type': 'str'},
'aad_audience': {'key': 'aadAudience', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(UploadCertificateResponse, self).__init__(**kwargs)
self.auth_type = kwargs.get('auth_type', None)
self.resource_id = None
self.aad_authority = None
self.aad_tenant_id = None
self.service_principal_client_id = None
self.service_principal_object_id = None
self.azure_management_endpoint_audience = None
self.aad_audience = None
class User(ARMBaseModel):
"""Represents a user who has access to one or more shares on the Data Box Edge/Gateway device.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: The path ID that uniquely identifies the object.
:vartype id: str
:ivar name: The object name.
:vartype name: str
:ivar type: The hierarchical type of the object.
:vartype type: str
:param encrypted_password: The password details.
:type encrypted_password:
~azure.mgmt.databoxedge.v2020_05_01_preview.models.AsymmetricEncryptedSecret
:ivar share_access_rights: List of shares that the user has rights on. This field should not be
specified during user creation.
:vartype share_access_rights:
list[~azure.mgmt.databoxedge.v2020_05_01_preview.models.ShareAccessRight]
:param user_type: Required. Type of the user. Possible values include: "Share",
"LocalManagement", "ARM".
:type user_type: str or ~azure.mgmt.databoxedge.v2020_05_01_preview.models.UserType
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'share_access_rights': {'readonly': True},
'user_type': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'encrypted_password': {'key': 'properties.encryptedPassword', 'type': 'AsymmetricEncryptedSecret'},
'share_access_rights': {'key': 'properties.shareAccessRights', 'type': '[ShareAccessRight]'},
'user_type': {'key': 'properties.userType', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(User, self).__init__(**kwargs)
self.encrypted_password = kwargs.get('encrypted_password', None)
self.share_access_rights = None
self.user_type = kwargs['user_type']
class UserAccessRight(msrest.serialization.Model):
"""The mapping between a particular user and the access type on the SMB share.
All required parameters must be populated in order to send to Azure.
:param user_id: Required. User ID (already existing in the device).
:type user_id: str
:param access_type: Required. Type of access to be allowed for the user. Possible values
include: "Change", "Read", "Custom".
:type access_type: str or ~azure.mgmt.databoxedge.v2020_05_01_preview.models.ShareAccessType
"""
_validation = {
'user_id': {'required': True},
'access_type': {'required': True},
}
_attribute_map = {
'user_id': {'key': 'userId', 'type': 'str'},
'access_type': {'key': 'accessType', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(UserAccessRight, self).__init__(**kwargs)
self.user_id = kwargs['user_id']
self.access_type = kwargs['access_type']
class UserList(msrest.serialization.Model):
"""Collection of users.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar value: The list of users.
:vartype value: list[~azure.mgmt.databoxedge.v2020_05_01_preview.models.User]
:ivar next_link: Link to the next set of results.
:vartype next_link: str
"""
_validation = {
'value': {'readonly': True},
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[User]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(UserList, self).__init__(**kwargs)
self.value = None
self.next_link = None
|
"""
Websocket based API for Home Assistant.
For more details about this component, please refer to the documentation at
https://home-assistant.io/developers/websocket_api/
"""
import asyncio
from concurrent import futures
from contextlib import suppress
from functools import partial
import json
import logging
from aiohttp import web
import voluptuous as vol
from voluptuous.humanize import humanize_error
from homeassistant.const import (
MATCH_ALL, EVENT_TIME_CHANGED, EVENT_HOMEASSISTANT_STOP,
__version__)
from homeassistant.components import frontend
from homeassistant.core import callback
from homeassistant.remote import JSONEncoder
from homeassistant.helpers import config_validation as cv
from homeassistant.helpers.service import async_get_all_descriptions
from homeassistant.components.http import HomeAssistantView
from homeassistant.components.http.auth import validate_password
from homeassistant.components.http.const import KEY_AUTHENTICATED
from homeassistant.components.http.ban import process_wrong_login
DOMAIN = 'websocket_api'
URL = '/api/websocket'
DEPENDENCIES = ('http',)
MAX_PENDING_MSG = 512
ERR_ID_REUSE = 1
ERR_INVALID_FORMAT = 2
ERR_NOT_FOUND = 3
TYPE_AUTH = 'auth'
TYPE_AUTH_INVALID = 'auth_invalid'
TYPE_AUTH_OK = 'auth_ok'
TYPE_AUTH_REQUIRED = 'auth_required'
TYPE_CALL_SERVICE = 'call_service'
TYPE_EVENT = 'event'
TYPE_GET_CONFIG = 'get_config'
TYPE_GET_PANELS = 'get_panels'
TYPE_GET_SERVICES = 'get_services'
TYPE_GET_STATES = 'get_states'
TYPE_PING = 'ping'
TYPE_PONG = 'pong'
TYPE_RESULT = 'result'
TYPE_SUBSCRIBE_EVENTS = 'subscribe_events'
TYPE_UNSUBSCRIBE_EVENTS = 'unsubscribe_events'
_LOGGER = logging.getLogger(__name__)
JSON_DUMP = partial(json.dumps, cls=JSONEncoder)
AUTH_MESSAGE_SCHEMA = vol.Schema({
vol.Required('type'): TYPE_AUTH,
vol.Required('api_password'): str,
})
SUBSCRIBE_EVENTS_MESSAGE_SCHEMA = vol.Schema({
vol.Required('id'): cv.positive_int,
vol.Required('type'): TYPE_SUBSCRIBE_EVENTS,
vol.Optional('event_type', default=MATCH_ALL): str,
})
UNSUBSCRIBE_EVENTS_MESSAGE_SCHEMA = vol.Schema({
vol.Required('id'): cv.positive_int,
vol.Required('type'): TYPE_UNSUBSCRIBE_EVENTS,
vol.Required('subscription'): cv.positive_int,
})
CALL_SERVICE_MESSAGE_SCHEMA = vol.Schema({
vol.Required('id'): cv.positive_int,
vol.Required('type'): TYPE_CALL_SERVICE,
vol.Required('domain'): str,
vol.Required('service'): str,
vol.Optional('service_data', default=None): dict
})
GET_STATES_MESSAGE_SCHEMA = vol.Schema({
vol.Required('id'): cv.positive_int,
vol.Required('type'): TYPE_GET_STATES,
})
GET_SERVICES_MESSAGE_SCHEMA = vol.Schema({
vol.Required('id'): cv.positive_int,
vol.Required('type'): TYPE_GET_SERVICES,
})
GET_CONFIG_MESSAGE_SCHEMA = vol.Schema({
vol.Required('id'): cv.positive_int,
vol.Required('type'): TYPE_GET_CONFIG,
})
GET_PANELS_MESSAGE_SCHEMA = vol.Schema({
vol.Required('id'): cv.positive_int,
vol.Required('type'): TYPE_GET_PANELS,
})
PING_MESSAGE_SCHEMA = vol.Schema({
vol.Required('id'): cv.positive_int,
vol.Required('type'): TYPE_PING,
})
BASE_COMMAND_MESSAGE_SCHEMA = vol.Schema({
vol.Required('id'): cv.positive_int,
vol.Required('type'): vol.Any(TYPE_CALL_SERVICE,
TYPE_SUBSCRIBE_EVENTS,
TYPE_UNSUBSCRIBE_EVENTS,
TYPE_GET_STATES,
TYPE_GET_SERVICES,
TYPE_GET_CONFIG,
TYPE_GET_PANELS,
TYPE_PING)
}, extra=vol.ALLOW_EXTRA)
# Define the possible errors that occur when connections are cancelled.
# Originally, this was just asyncio.CancelledError, but issue #9546 showed
# that futures.CancelledErrors can also occur in some situations.
CANCELLATION_ERRORS = (asyncio.CancelledError, futures.CancelledError)
def auth_ok_message():
"""Return an auth_ok message."""
return {
'type': TYPE_AUTH_OK,
'ha_version': __version__,
}
def auth_required_message():
"""Return an auth_required message."""
return {
'type': TYPE_AUTH_REQUIRED,
'ha_version': __version__,
}
def auth_invalid_message(message):
"""Return an auth_invalid message."""
return {
'type': TYPE_AUTH_INVALID,
'message': message,
}
def event_message(iden, event):
"""Return an event message."""
return {
'id': iden,
'type': TYPE_EVENT,
'event': event.as_dict(),
}
def error_message(iden, code, message):
"""Return an error result message."""
return {
'id': iden,
'type': TYPE_RESULT,
'success': False,
'error': {
'code': code,
'message': message,
},
}
def pong_message(iden):
"""Return a pong message."""
return {
'id': iden,
'type': TYPE_PONG,
}
def result_message(iden, result=None):
"""Return a success result message."""
return {
'id': iden,
'type': TYPE_RESULT,
'success': True,
'result': result,
}
@asyncio.coroutine
def async_setup(hass, config):
"""Initialize the websocket API."""
hass.http.register_view(WebsocketAPIView)
return True
class WebsocketAPIView(HomeAssistantView):
"""View to serve a websockets endpoint."""
name = "websocketapi"
url = URL
requires_auth = False
@asyncio.coroutine
def get(self, request):
"""Handle an incoming websocket connection."""
# pylint: disable=no-self-use
return ActiveConnection(request.app['hass'], request).handle()
class ActiveConnection:
"""Handle an active websocket client connection."""
def __init__(self, hass, request):
"""Initialize an active connection."""
self.hass = hass
self.request = request
self.wsock = None
self.event_listeners = {}
self.to_write = asyncio.Queue(maxsize=MAX_PENDING_MSG, loop=hass.loop)
self._handle_task = None
self._writer_task = None
def debug(self, message1, message2=''):
"""Print a debug message."""
_LOGGER.debug("WS %s: %s %s", id(self.wsock), message1, message2)
def log_error(self, message1, message2=''):
"""Print an error message."""
_LOGGER.error("WS %s: %s %s", id(self.wsock), message1, message2)
@asyncio.coroutine
def _writer(self):
"""Write outgoing messages."""
# Exceptions if Socket disconnected or cancelled by connection handler
with suppress(RuntimeError, *CANCELLATION_ERRORS):
while not self.wsock.closed:
message = yield from self.to_write.get()
if message is None:
break
self.debug("Sending", message)
yield from self.wsock.send_json(message, dumps=JSON_DUMP)
@callback
def send_message_outside(self, message):
"""Send a message to the client outside of the main task.
Closes connection if the client is not reading the messages.
Async friendly.
"""
try:
self.to_write.put_nowait(message)
except asyncio.QueueFull:
self.log_error("Client exceeded max pending messages [2]:",
MAX_PENDING_MSG)
self.cancel()
@callback
def cancel(self):
"""Cancel the connection."""
self._handle_task.cancel()
self._writer_task.cancel()
@asyncio.coroutine
def handle(self):
"""Handle the websocket connection."""
request = self.request
wsock = self.wsock = web.WebSocketResponse(heartbeat=55)
yield from wsock.prepare(request)
self.debug("Connected")
# Get a reference to current task so we can cancel our connection
self._handle_task = asyncio.Task.current_task(loop=self.hass.loop)
@callback
def handle_hass_stop(event):
"""Cancel this connection."""
self.cancel()
unsub_stop = self.hass.bus.async_listen(
EVENT_HOMEASSISTANT_STOP, handle_hass_stop)
self._writer_task = self.hass.async_add_job(self._writer())
final_message = None
msg = None
authenticated = False
try:
if request[KEY_AUTHENTICATED]:
authenticated = True
else:
yield from self.wsock.send_json(auth_required_message())
msg = yield from wsock.receive_json()
msg = AUTH_MESSAGE_SCHEMA(msg)
if validate_password(request, msg['api_password']):
authenticated = True
else:
self.debug("Invalid password")
yield from self.wsock.send_json(
auth_invalid_message('Invalid password'))
if not authenticated:
yield from process_wrong_login(request)
return wsock
yield from self.wsock.send_json(auth_ok_message())
# ---------- AUTH PHASE OVER ----------
msg = yield from wsock.receive_json()
last_id = 0
while msg:
self.debug("Received", msg)
msg = BASE_COMMAND_MESSAGE_SCHEMA(msg)
cur_id = msg['id']
if cur_id <= last_id:
self.to_write.put_nowait(error_message(
cur_id, ERR_ID_REUSE,
'Identifier values have to increase.'))
else:
handler_name = 'handle_{}'.format(msg['type'])
getattr(self, handler_name)(msg)
last_id = cur_id
msg = yield from wsock.receive_json()
except vol.Invalid as err:
error_msg = "Message incorrectly formatted: "
if msg:
error_msg += humanize_error(msg, err)
else:
error_msg += str(err)
self.log_error(error_msg)
if not authenticated:
final_message = auth_invalid_message(error_msg)
else:
if isinstance(msg, dict):
iden = msg.get('id')
else:
iden = None
final_message = error_message(
iden, ERR_INVALID_FORMAT, error_msg)
except TypeError as err:
if wsock.closed:
self.debug("Connection closed by client")
else:
_LOGGER.exception("Unexpected TypeError: %s", msg)
except ValueError as err:
msg = "Received invalid JSON"
value = getattr(err, 'doc', None) # Py3.5+ only
if value:
msg += ': {}'.format(value)
self.log_error(msg)
self._writer_task.cancel()
except CANCELLATION_ERRORS:
self.debug("Connection cancelled by server")
except asyncio.QueueFull:
self.log_error("Client exceeded max pending messages [1]:",
MAX_PENDING_MSG)
self._writer_task.cancel()
except Exception: # pylint: disable=broad-except
error = "Unexpected error inside websocket API. "
if msg is not None:
error += str(msg)
_LOGGER.exception(error)
finally:
unsub_stop()
for unsub in self.event_listeners.values():
unsub()
try:
if final_message is not None:
self.to_write.put_nowait(final_message)
self.to_write.put_nowait(None)
# Make sure all error messages are written before closing
yield from self._writer_task
except asyncio.QueueFull:
self._writer_task.cancel()
yield from wsock.close()
self.debug("Closed connection")
return wsock
def handle_subscribe_events(self, msg):
"""Handle subscribe events command.
Async friendly.
"""
msg = SUBSCRIBE_EVENTS_MESSAGE_SCHEMA(msg)
@asyncio.coroutine
def forward_events(event):
"""Forward events to websocket."""
if event.event_type == EVENT_TIME_CHANGED:
return
self.send_message_outside(event_message(msg['id'], event))
self.event_listeners[msg['id']] = self.hass.bus.async_listen(
msg['event_type'], forward_events)
self.to_write.put_nowait(result_message(msg['id']))
def handle_unsubscribe_events(self, msg):
"""Handle unsubscribe events command.
Async friendly.
"""
msg = UNSUBSCRIBE_EVENTS_MESSAGE_SCHEMA(msg)
subscription = msg['subscription']
if subscription in self.event_listeners:
self.event_listeners.pop(subscription)()
self.to_write.put_nowait(result_message(msg['id']))
else:
self.to_write.put_nowait(error_message(
msg['id'], ERR_NOT_FOUND,
'Subscription not found.'))
def handle_call_service(self, msg):
"""Handle call service command.
Async friendly.
"""
msg = CALL_SERVICE_MESSAGE_SCHEMA(msg)
@asyncio.coroutine
def call_service_helper(msg):
"""Call a service and fire complete message."""
yield from self.hass.services.async_call(
msg['domain'], msg['service'], msg['service_data'], True)
self.send_message_outside(result_message(msg['id']))
self.hass.async_add_job(call_service_helper(msg))
def handle_get_states(self, msg):
"""Handle get states command.
Async friendly.
"""
msg = GET_STATES_MESSAGE_SCHEMA(msg)
self.to_write.put_nowait(result_message(
msg['id'], self.hass.states.async_all()))
def handle_get_services(self, msg):
"""Handle get services command.
Async friendly.
"""
msg = GET_SERVICES_MESSAGE_SCHEMA(msg)
@asyncio.coroutine
def get_services_helper(msg):
"""Get available services and fire complete message."""
descriptions = yield from async_get_all_descriptions(self.hass)
self.send_message_outside(result_message(msg['id'], descriptions))
self.hass.async_add_job(get_services_helper(msg))
def handle_get_config(self, msg):
"""Handle get config command.
Async friendly.
"""
msg = GET_CONFIG_MESSAGE_SCHEMA(msg)
self.to_write.put_nowait(result_message(
msg['id'], self.hass.config.as_dict()))
def handle_get_panels(self, msg):
"""Handle get panels command.
Async friendly.
"""
msg = GET_PANELS_MESSAGE_SCHEMA(msg)
panels = {
panel:
self.hass.data[frontend.DATA_PANELS][panel].to_response(
self.hass, self.request)
for panel in self.hass.data[frontend.DATA_PANELS]}
self.to_write.put_nowait(result_message(
msg['id'], panels))
def handle_ping(self, msg):
"""Handle ping command.
Async friendly.
"""
self.to_write.put_nowait(pong_message(msg['id']))
|
# -------------------------------------------------------------------------
#
# Part of the CodeChecker project, under the Apache License v2.0 with
# LLVM Exceptions. See LICENSE for license information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
#
# -------------------------------------------------------------------------
"""
Main CodeChecker script.
"""
import argparse
from importlib import machinery
import json
import os
import signal
import sys
def add_subcommand(subparsers, sub_cmd, cmd_module_path):
"""
Load the subcommand module and then add the subcommand to the available
subcommands in the given subparsers collection.
subparsers has to be the return value of the add_parsers() method on an
argparse.ArgumentParser.
"""
m_path, m_name = os.path.split(cmd_module_path)
module_name = os.path.splitext(m_name)[0]
cc_bin = os.path.dirname(os.path.realpath(__file__))
full_module_path = os.path.join(cc_bin, '..', 'lib', 'python3', m_path)
# Load the module named as the argument.
cmd_spec = machinery.PathFinder().find_spec(module_name,
[full_module_path])
command_module = cmd_spec.loader.load_module(module_name)
# Now that the module is loaded, construct an ArgumentParser for it.
sc_parser = subparsers.add_parser(
sub_cmd, **command_module.get_argparser_ctor_args())
# Run the method which adds the arguments to the subcommand's handler.
command_module.add_arguments_to_parser(sc_parser)
def main(subcommands=None):
"""
CodeChecker main command line.
"""
def signal_handler(signum, frame):
"""
Without this handler the PostgreSQL
server does not terminate at signal.
"""
sys.exit(128 + signum)
signal.signal(signal.SIGINT, signal_handler)
signal.signal(signal.SIGTERM, signal_handler)
try:
parser = argparse.ArgumentParser(
prog="CodeChecker",
formatter_class=argparse.RawDescriptionHelpFormatter,
description="""Run the CodeChecker sourcecode analyzer framework.
Please specify a subcommand to access individual features.""",
epilog="""Example scenario: Analyzing, and storing results
------------------------------------------------
Start the server where the results will be stored and can be viewed
after the analysis is done:
CodeChecker server
Analyze a project with default settings:
CodeChecker check -b "cd ~/myproject && make" -o "~/results"
Store the analyzer results to the server:
CodeChecker store "~/results" -n myproject
The results can be viewed:
* In a web browser: http://localhost:8001
* In the command line:
CodeChecker cmd results myproject
Example scenario: Analyzing, and printing results to Terminal (no storage)
--------------------------------------------------------------------------
In this case, no database is used, and the results are printed on the standard
output.
CodeChecker check -b "cd ~/myproject && make\" """)
subparsers = parser.add_subparsers(help='commands')
if subcommands:
# Try to check if the user has already given us a subcommand to
# execute. If so, don't load every available parts of CodeChecker
# to ensure a more optimised run.
if len(sys.argv) > 1:
first_command = sys.argv[1]
if first_command in subcommands:
# Consider only the given command as an available one.
subcommands = {first_command: subcommands[first_command]}
for subcommand in subcommands:
try:
add_subcommand(subparsers, subcommand,
subcommands[subcommand])
except (IOError, ImportError):
print("Couldn't import module for subcommand '" +
subcommand + "'... ignoring.")
import traceback
traceback.print_exc(file=sys.stdout)
args = parser.parse_args()
# Call handler function to process configuration files. If there are
# any configuration options available in one of the given file than
# extend the system argument list with these options and try to parse
# the argument list again to validate it.
if 'func_process_config_file' in args:
if len(sys.argv) > 1:
called_sub_command = sys.argv[1]
cfg_args = args.func_process_config_file(args, called_sub_command)
if cfg_args:
# Expand environment variables in the arguments.
cfg_args = [os.path.expandvars(cfg) for cfg in cfg_args]
# Replace --config option with the options inside the config
# file.
cfg_idx = sys.argv.index("--config")
sys.argv = sys.argv[:cfg_idx] + cfg_args + \
sys.argv[cfg_idx + 2:]
args = parser.parse_args()
try:
args.func(args)
except AttributeError:
parser.print_help()
except KeyboardInterrupt as kb_err:
print(str(kb_err))
print("Interrupted by user...")
sys.exit(1)
# Handle all exception, but print stacktrace. It is needed for atexit.
# atexit does not work correctly when an unhandled exception occurred.
# So in this case, the servers left running when the script exited.
except Exception:
import traceback
traceback.print_exc(file=sys.stdout)
sys.exit(1)
# -----------------------------------------------------------------------------
if __name__ == "__main__":
# Load the available CodeChecker subcommands.
# This list is generated dynamically by scripts/build_package.py, and is
# always meant to be available alongside the CodeChecker.py.
commands_cfg = os.path.join(os.path.dirname(__file__), "commands.json")
with open(commands_cfg, encoding="utf-8", errors="ignore") as cfg_file:
commands = json.load(cfg_file)
main(commands)
|
import pytest
import numpy as np
from glue.core import roi
from cubeviz.utils.contour import ContourSettings
from cubeviz.tools.moment_maps import MomentMapsGUI
@pytest.fixture(scope='module')
def moment_maps_gui(cubeviz_layout):
cl = cubeviz_layout
mm = MomentMapsGUI(cl._data, cl.session.data_collection, parent=cl)
return mm
def test_stats_box_without_subset(cubeviz_layout):
"""
Tests the stat box underneath the ImageViewer when it is the full spectrum
"""
cl_viewer = cubeviz_layout.split_views[1]._widget
cl_viewer._subset = None
data = cl_viewer._data[0][cl_viewer.current_component_id][cl_viewer._slice_index]
wave = cl_viewer.cubeviz_layout.get_wavelength(cl_viewer.slice_index)
data_wave = cl_viewer.cubeviz_unit.convert_value(data, wave=wave)
assert data_wave is not None
results = (np.nanmin(data_wave), np.nanmax(data_wave), np.median(data_wave), data_wave.mean(), data_wave.std())
results_string = r"min={:.4}, max={:.4}, median={:.4}, μ={:.4}, σ={:.4}".format(*results)
assert results_string == cl_viewer.parent().stats_text.text()
def test_stats_box_with_subset(cubeviz_layout):
"""
Tests the stat box underneath the ImageViewer when there is an ROI
"""
cl_viewer = cubeviz_layout.split_views[1]._widget
# Create a subset (ROI) if there is none
cl_viewer.apply_roi(roi.CircularROI(xc=6, yc=10, radius=3))
assert cl_viewer._subset is not None
mask = cl_viewer._subset.to_mask()[cl_viewer._slice_index]
data = cl_viewer._data[0][cl_viewer.current_component_id][cl_viewer._slice_index][mask]
wave = cl_viewer.cubeviz_layout.get_wavelength(cl_viewer.slice_index)
data_wave = cl_viewer.cubeviz_unit.convert_value(data, wave=wave)
assert data_wave is not None
results = (np.nanmin(data_wave), np.nanmax(data_wave), np.median(data_wave), data_wave.mean(), data_wave.std())
results_string = r"min={:.4}, max={:.4}, median={:.4}, μ={:.4}, σ={:.4}".format(*results)
assert results_string == cl_viewer.parent().stats_text.text()
# Remove the ROI/subset
dc = cubeviz_layout.session.application.data_collection
dc.remove_subset_group(dc.subset_groups[0])
def test_overlay(moment_maps_gui, cubeviz_layout):
# Only "No Overlay" option available in combobox
assert cubeviz_layout._overlay_controller._overlay_image_combo.count() == 1
# Create moment map GUI
mm = moment_maps_gui
mm.display()
mm.order_combobox.setCurrentIndex(0)
mm.data_combobox.setCurrentIndex(0)
# Call calculate function and get result
mm.calculateButton.click()
# Second option in combobox for moment map overlay
assert cubeviz_layout._overlay_controller._overlay_image_combo.count() == 2
cl_viewer = cubeviz_layout.split_views[1]._widget
assert len(cl_viewer.axes.images) == 1
# Set overlay and check changing of settings works
cubeviz_layout._overlay_controller._overlay_image_combo.setCurrentIndex(1)
cubeviz_layout._overlay_controller._overlay_colormap_combo.setCurrentIndex(10)
cubeviz_layout._overlay_controller._alpha_slider.setValue(50)
assert len(cl_viewer.axes.images) == 2
# Return to "No Overlay"
cubeviz_layout._overlay_controller._overlay_image_combo.setCurrentIndex(0)
assert len(cl_viewer.axes.images) == 1
# Remove the moment map data set
for helper in cubeviz_layout._viewer_combo_helpers:
helper.remove_data(cubeviz_layout._data.container_2d)
for cid in cubeviz_layout._data.container_2d.component_ids():
cubeviz_layout._data.container_2d.remove_component(cid)
del cubeviz_layout._data.container_2d
dc = cubeviz_layout.session.data_collection
dc.remove(dc[1])
def check_viewer_title(cubeviz_layout, viewer_idx, component_idx):
viewer = cubeviz_layout.cube_views[viewer_idx]._widget
combo = cubeviz_layout.get_viewer_combo(viewer_idx)
combo.setCurrentIndex(component_idx)
component_id = cubeviz_layout.data_components[component_idx]
component = cubeviz_layout._data.get_component(component_id)
title = '{} [{}]'.format(component_id, component.units)
assert viewer.axes.title.get_text() == title
def test_viewer_title(cubeviz_layout):
"""
Test whether viewer titles accurately reflect data component and flux unit
"""
for idx in range(len(cubeviz_layout.cube_views[1:])):
combo = cubeviz_layout.get_viewer_combo(idx)
current_idx = combo.currentIndex()
check_viewer_title(cubeviz_layout, idx, 0)
check_viewer_title(cubeviz_layout, idx, 1)
# Reset the combo when we're done
combo = cubeviz_layout.get_viewer_combo(idx)
combo.setCurrentIndex(current_idx)
def test_viewer_flux_units_change(cubeviz_layout):
"""
Test whether viewer titles update appropriately with flux unit changes
"""
# Test after flux units change
specviz = cubeviz_layout.specviz._widget
current_units = specviz.hub.plot_widget.data_unit
specviz.hub.plot_widget.set_data_unit('mJy')
for idx in range(len(cubeviz_layout.cube_views)):
combo = cubeviz_layout.get_viewer_combo(idx)
current_idx = combo.currentIndex()
check_viewer_title(cubeviz_layout, idx, 0)
# Check whether the flux units associated with this viewer is correct.
# This is reflected in the mouseover display of flux values
assert cubeviz_layout.cube_views[idx]._widget.cubeviz_unit.unit == 'mJy'
# Reset the combo when we're done
combo = cubeviz_layout.get_viewer_combo(idx)
combo.setCurrentIndex(current_idx)
# Restore original units
specviz.hub.plot_widget.set_data_unit(current_units)
def test_viewer_wavelength_units_change(cubeviz_layout):
"""
Test whether image viewer wavelength units are updated appropriately
These units are used by the viewer to populate the viewer status bar during
mouseover.
"""
current_units = str(cubeviz_layout._wavelength_controller.current_units)
for viewer in cubeviz_layout.cube_views:
assert viewer._widget._wavelength_units == current_units
specviz = cubeviz_layout.specviz._widget
specviz.hub.plot_widget.set_spectral_axis_unit('Angstrom')
for viewer in cubeviz_layout.cube_views:
assert viewer._widget._wavelength_units == 'Angstrom'
# Restore original wavelength units
specviz.hub.plot_widget.set_spectral_axis_unit(current_units)
def test_default_contour(cubeviz_layout):
"""
Make sure that default contour works
"""
# Keep track of children in viewer to check that number increases later
cl_viewer = cubeviz_layout.split_views[1]._widget
cl_viewer_children = len(cl_viewer.axes.get_children())
# Create a subset (ROI) if there is none
cl_viewer.apply_roi(roi.CircularROI(xc=6, yc=10, radius=3))
cl_viewer.default_contour()
assert len(cl_viewer.axes.get_children()) > cl_viewer_children
assert cl_viewer.is_contour_active
cl_viewer.remove_contour()
# Remove the ROI/subset
dc = cubeviz_layout.session.application.data_collection
dc.remove_subset_group(dc.subset_groups[0])
assert cl_viewer.is_contour_active == False
assert len(cl_viewer.axes.get_children()) == cl_viewer_children
def test_contour_preview(cubeviz_layout):
"""
Create contour preview from code
"""
# Keep track of children in viewer to check that number increases later
cl_viewer = cubeviz_layout.split_views[1]._widget
cl_viewer_children = len(cl_viewer.axes.get_children())
preview_settings = ContourSettings(cl_viewer)
preview_settings.default_options()
cl_viewer.set_contour_preview(preview_settings)
assert cl_viewer.is_contour_preview_active
assert cl_viewer.contour_preview_settings == preview_settings
assert len(cl_viewer.axes.get_children()) > cl_viewer_children
cl_viewer.end_contour_preview()
assert cl_viewer.is_contour_preview_active == False
assert cl_viewer.contour_preview_settings is None
assert len(cl_viewer.axes.get_children()) == cl_viewer_children
def test_change_contour_settings(cubeviz_layout):
"""
Create default contour, open settings menu, change settings, press preview, press ok
"""
# Keep track of children in viewer to check that number increases later
cl_viewer = cubeviz_layout.split_views[1]._widget
cl_viewer_children = len(cl_viewer.axes.get_children())
cl_viewer.default_contour()
assert cl_viewer.is_contour_active
assert len(cl_viewer.axes.get_children()) > cl_viewer_children
contour_dialog = cl_viewer.edit_contour_settings()
# Set settings in dialog box
contour_dialog.contour_label_checkBox.setChecked(True)
contour_dialog.font_size_input.setText("12")
contour_dialog.custom_spacing_checkBox.setChecked(True)
contour_dialog.spacing_input.setText("1")
contour_dialog.vmax_checkBox.setChecked(True)
contour_dialog.vmax_input.setText("2")
contour_dialog.vmin_checkBox.setChecked(True)
contour_dialog.vmin_input.setText("1")
contour_dialog.previewButton.click()
assert cl_viewer.is_contour_preview_active
contour_dialog.okButton.click()
# Check to make sure setting changes went through
assert contour_dialog.font_size_input.text() == "12"
assert contour_dialog.spacing_input.text() == "1"
assert contour_dialog.vmax_input.text() == "2"
assert contour_dialog.vmin_input.text() == "1"
assert cl_viewer.is_contour_preview_active == False
cl_viewer.remove_contour()
assert cl_viewer.is_contour_active == False
assert len(cl_viewer.axes.get_children()) == cl_viewer_children
|
from context import dbconnect
def mockup_data(connection):
Query = dbconnect.models.Query
create_sql = "create table mockdata(id integer primary key, name text not null, value integer not null)"
insert_sql = "insert into mockdata(id, name, value) values (?, ?, ?)"
queries = []
queries.append(Query(create_sql, ()))
queries.append(Query(insert_sql, (0, "A", 3)))
queries.append(Query(insert_sql, (1, "A", 2)))
queries.append(Query(insert_sql, (2, "B", 6)))
queries.append(Query(insert_sql, (3, "B", 5)))
queries.append(Query(insert_sql, (4, "C", 9)))
connection.batch_update(queries, lambda res, num, c: None)
def setup():
c = dbconnect.connection.create_connection(dbconnect.connection.SQLITE3_ID, ":memory:")
mockup_data(c)
return c
def test_select():
connection = setup()
try:
name = "Get_A"
sql = "select id, name from mockdata where name = ? "
params = ("A",)
query = dbconnect.models.NamedQuery(name, sql, params)
result = connection.select(query)
assert result.rowcount == 2
assert len(result.rows) == result.rowcount
assert result.query.sql == sql
assert result.query.params == params
assert result.query.name == name
assert result.start_time is not None
assert result.end_time is not None
assert result.duration is not None
finally:
connection.close()
def test_update():
connection = setup()
try:
name = "Update_2"
sql = "update mockdata set value = 33 where name = ? "
params = ("C",)
query = dbconnect.models.NamedQuery(name, sql, params)
result = connection.update(query)
assert result.rowcount == 1
assert len(result.rows) == 0
assert result.query.sql == sql
assert result.query.params == params
assert result.query.name == name
assert result.start_time is not None
assert result.end_time is not None
assert result.duration is not None
check_value(connection, 4, 33)
finally:
connection.close()
def check_value(connection, mockid, expected_value):
sql = "select value from mockdata where id = ?"
params = (mockid,)
query = dbconnect.models.Query(sql, params)
result = connection.select(query)
print(result.rows)
actual_value = result.get_value(0, 0)
assert expected_value == actual_value
|
# -*- coding: utf-8 -*-
# 画出特征雷达图,代码接KMeans_cluster.py
def print_cluster_result(data, kmodel):
import pandas as pd
# 简单打印结果
r1 = pd.Series(kmodel.labels_).value_counts() # 统计各个类别的数目
r2 = pd.DataFrame(kmodel.cluster_centers_) # 找出聚类中心
r = pd.concat([r2, r1], axis=1) # 横向连接(0是纵向),得到聚类中心对应的类别下的数目
r.columns = list(data.columns) + [u'类别数目'] # 重命名表头
print(r)
# print(kmodel.cluster_centers_) # 查看聚类中心
# print('labels_=', kmodel.labels_) # 查看各样本对应的类别
def plot_cluster(data, kmodel):
import numpy as np
import matplotlib.pyplot as plt
plt.rcParams['font.sans-serif'] = ['SimHei'] # 用来正常显示中文标签
# plt.rcParams['axes.unicode_minus'] = False # 用来正常显示负号
labels = data.columns # 标签
k = 5 # 数据个数
plot_data = kmodel.cluster_centers_
color = ['b', 'g', 'r', 'c', 'y'] # 指定颜色
angles = np.linspace(0, 2 * np.pi, k, endpoint=False)
plot_data = np.concatenate((plot_data, plot_data[:, [0]]), axis=1) # 闭合
angles = np.concatenate((angles, [angles[0]])) # 闭合
fig = plt.figure()
ax = fig.add_subplot(111, polar=True) # polar参数!!
for i in range(len(plot_data)):
ax.plot(angles, plot_data[i], 'o-', color=color[i],
label=u'客户群' + str(i), linewidth=2) # 画线
ax.set_rgrids(np.arange(0.01, 3.5, 0.5),
np.arange(-1, 2.5, 0.5), fontproperties="SimHei")
ax.set_thetagrids(angles * 180 / np.pi, labels, fontproperties="SimHei")
plt.legend(loc=4)
plt.show()
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# pylint: disable=too-few-public-methods, protected-access
import json
from azure.core.exceptions import ClientAuthenticationError, ResourceExistsError, ResourceNotFoundError, \
HttpResponseError
from ._arg_browser import AAZArgBrowser
from ._base import AAZUndefined, AAZBaseValue, AAZBaseType
from ._content_builder import AAZContentBuilder
from ._field_type import AAZSimpleType
try:
from urllib import quote # type: ignore
except ImportError:
from urllib.parse import quote # type: ignore
class AAZOperation:
def __init__(self, ctx):
self.ctx = ctx
class AAZHttpOperation(AAZOperation):
""" Http Operation
"""
CLIENT_TYPE = None # http client registered, its value should be in the keys of aaz._client.registered_clients
def __init__(self, ctx):
super().__init__(ctx)
self.client = ctx.get_http_client(self.CLIENT_TYPE)
# common http errors by status code
self.error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
}
def __call__(self, *args, **kwargs):
raise NotImplementedError()
@staticmethod
def serialize_url_param(name, value, required=True, skip_quote=False, **kwargs): # pylint: disable=unused-argument
if isinstance(value, AAZBaseValue):
value = value.to_serialized_data()
if value == AAZUndefined or value == None: # noqa: E711, pylint: disable=singleton-comparison
if required:
raise ValueError(f"url parameter {name} is required.")
return {} # return empty dict
if isinstance(value, (list, dict)):
raise NotImplementedError(f"not support type {type(value)} for url parameter")
if isinstance(value, bool):
value = json.dumps(value)
if skip_quote is True:
value = str(value)
else:
value = quote(str(value), safe='')
return {name: value}
@staticmethod
def serialize_query_param(name, value, required=False, skip_quote=False, **kwargs):
if isinstance(value, AAZBaseValue):
value = value.to_serialized_data()
if value == AAZUndefined:
if required:
raise ValueError(f"query parameter {name} is required.")
return {}
def process_element(e):
if isinstance(e, (list, dict)):
raise NotImplementedError(f"Not support {type(e)} type element")
if isinstance(e, bool):
e = json.dumps(e)
elif e is None:
e = ""
if skip_quote is True:
e = str(e)
else:
e = quote(str(e), safe='')
return e
if isinstance(value, list):
value = [process_element(v)
for v in value]
# Determines the format of the array. Possible formats are:
# csv - comma separated values 'foo,bar'
# ssv - space separated values 'foo bar'
# tsv - tab separated values 'foo\tbar'
# pipes - pipe separated values 'foo|bar'
# default is csv format
div = kwargs.get('div', ',')
if div:
value = div.join(value)
value = str(value)
else:
# Not a list
value = process_element(value)
return {name: value}
@staticmethod
def serialize_header_param(name, value, required=False, **kwargs): # pylint: disable=unused-argument
if isinstance(value, AAZBaseValue):
value = value.to_serialized_data()
if value == AAZUndefined:
if required:
raise ValueError(f"query parameter {name} is required.")
return {}
def process_element(e):
if isinstance(e, (list, dict)):
raise NotImplementedError(f"Not support {type(e)} type element")
if isinstance(e, bool):
e = json.dumps(e)
elif e is None:
e = ""
return e
if isinstance(value, list):
value = [process_element(v) for v in value]
else:
value = process_element(value)
value = str(value)
return {name: value}
@staticmethod
def serialize_content(value, required=False):
def processor(schema, result):
if schema._flags.get('read_only', False):
# ignore read_only fields when serialize content
return AAZUndefined
return result
if isinstance(value, AAZBaseValue):
value = value.to_serialized_data(processor=processor)
if value == AAZUndefined or value == None: # noqa: E711, pylint: disable=singleton-comparison
if required:
raise ValueError("content is required.")
return None
return value
@staticmethod
def deserialize_http_content(session):
from azure.core.pipeline.policies import ContentDecodePolicy
if ContentDecodePolicy.CONTEXT_NAME in session.context:
return session.context[ContentDecodePolicy.CONTEXT_NAME]
if session.context.options['stream']:
# Cannot handle stream response now
raise NotImplementedError()
raise ValueError("This pipeline didn't have the ContentDecode Policy; can't deserialize")
@staticmethod
def new_content_builder(arg_value, value=None, typ=None, typ_kwargs=None):
""" Create a Content Builder
"""
assert isinstance(arg_value, AAZBaseValue)
arg_data = arg_value.to_serialized_data()
if value is None:
assert issubclass(typ, AAZBaseType)
schema = typ(**typ_kwargs) if typ_kwargs else typ()
if isinstance(schema, AAZSimpleType):
value = typ._ValueCls(
schema=schema,
data=schema.process_data(arg_data)
)
else:
value = typ._ValueCls(
schema=schema,
data=schema.process_data(None)
)
else:
assert isinstance(value, AAZBaseValue)
builder = AAZContentBuilder(
values=[value],
args=[AAZArgBrowser(arg_value=arg_value, arg_data=arg_data)]
)
return value, builder
# properties
@property
def url(self):
return None
@property
def method(self):
return None
@property
def url_parameters(self):
return {}
@property
def query_parameters(self):
return {}
@property
def header_parameters(self):
return {}
@property
def content(self):
return None
@property
def form_content(self):
return None
@property
def stream_content(self):
return None
@property
def error_format(self):
# value should be in the keys of aaz._error_format.registered_error_formats
return None
def make_request(self):
""" Make http request based on the properties.
"""
if self.method in ("GET", ):
# support making request for next link
if self.ctx.next_link:
url = self.ctx.next_link
query_parameters = {}
else:
url = self.url
query_parameters = self.query_parameters
request = self.client._request(
self.method, url, query_parameters, self.header_parameters,
self.content, self.form_content, None)
elif self.method in ("DELETE", "MERGE", "OPTIONS"):
request = self.client._request(
self.method, self.url, self.query_parameters, self.header_parameters,
self.content, self.form_content, None)
elif self.method in ("PUT", "POST", "HEAD", "PATCH",):
request = self.client._request(
self.method, self.url, self.query_parameters, self.header_parameters,
self.content, self.form_content, self.stream_content)
else:
raise ValueError(f"Invalid request method {self.method}")
return request
def on_error(self, response):
""" handle errors in response
"""
# raise common http errors
error_type = self.error_map.get(response.status_code)
if error_type:
raise error_type(response=response)
# raise HttpResponseError
error_format = self.ctx.get_error_format(self.error_format)
raise HttpResponseError(response=response, error_format=error_format)
class AAZJsonInstanceUpdateOperation(AAZOperation):
""" Instance Update Operation
"""
def __call__(self, *args, **kwargs):
raise NotImplementedError()
@staticmethod
def new_content_builder(arg_value, value=None, typ=None, typ_kwargs=None):
""" Create a Content Builder
"""
assert isinstance(arg_value, AAZBaseValue)
arg_data = arg_value.to_serialized_data()
if value is None:
assert issubclass(typ, AAZBaseType)
schema = typ(**typ_kwargs) if typ_kwargs else typ()
if isinstance(schema, AAZSimpleType):
value = typ._ValueCls(
schema=schema,
data=schema.process_data(arg_data)
)
else:
value = typ._ValueCls(
schema=schema,
data=schema.process_data(None)
)
else:
assert isinstance(value, AAZBaseValue)
builder = AAZContentBuilder(
values=[value],
args=[AAZArgBrowser(arg_value=arg_value, arg_data=arg_data)]
)
return value, builder
class AAZGenericInstanceUpdateOperation(AAZOperation):
""" Generic Instance Update Operation
"""
def __call__(self, *args, **kwargs):
raise NotImplementedError()
@staticmethod
def _update_instance_by_generic(instance, args): # pylint: disable=unused-argument
# TODO: implement generic instance update
return instance
|
from PySide6 import QtUiTools
Ui_VolView, VolViewBase = QtUiTools.loadUiType("volview.ui")
class VolViewWindow(VolViewBase):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.ui = Ui_VolView()
self.ui.setupUi(self)
__all__ = [
"VolViewWindow",
]
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import importlib
import os
import openstack
from openstack import _log
from openstack import utils
from otcextensions.sdk.compute.v2 import server
from otcextensions.common import exc
_logger = _log.setup_logging('openstack')
__all__ = [
'register_otc_extensions',
]
_DOC_TEMPLATE = (
":class:`{class_name}` for {service_type} aka project")
_PROXY_TEMPLATE = """Proxy for {service_type} aka project
This proxy object could be an instance of
{class_doc_strings}
depending on client configuration and which version of the service is
found on remotely on the cloud.
"""
# List OTC services here
# Generally it is possible to iterate over known endpoints, but some
# services requires injection of AK/SK
OTC_SERVICES = {
'anti_ddos': {
'service_type': 'anti_ddos',
'append_project_id': True,
'endpoint_service_type': 'antiddos',
},
'auto_scaling': {
'service_type': 'auto_scaling',
'endpoint_service_type': 'as',
'append_project_id': True,
},
'cce': {
'service_type': 'cce',
'endpoint_service_type': 'ccev2.0',
},
'ces': {
'service_type': 'ces',
'append_project_id': True,
},
'cts': {
'service_type': 'cts',
},
'css': {
'service_type': 'css',
},
'dcaas': {
'service_type': 'dcaas',
},
'dcs': {
'service_type': 'dcs',
'append_project_id': True,
},
'dds': {
'service_type': 'dds',
},
'deh': {
'service_type': 'deh',
'append_project_id': True,
},
'dis': {
'service_type': 'dis',
'endpoint_service_type': 'disv2'
},
'dms': {
'service_type': 'dms',
'endpoint_service_type': 'dms',
'append_project_id': True,
},
'dns': {
'service_type': 'dns',
'replace_system': True,
},
'dws': {
'service_type': 'dws',
'endpoint_service_type': 'dwsv1'
},
'ecs': {
'service_type': 'ecs',
},
'kms': {
'service_type': 'kms',
'append_project_id': True,
},
'lts': {
'service_type': 'lts'
},
'maas': {
'service_type': 'maas',
'append_project_id': True,
},
'mrs': {
'service_type': 'mrs'
},
'nat': {
'service_type': 'nat',
},
'obs': {
'service_type': 'obs',
'require_ak': True,
'endpoint_service_type': 'object',
'set_endpoint_override': True
},
'plas': {
'service_type': 'plas'
},
'rds': {
'service_type': 'rds',
# 'additional_headers': {'content-type': 'application/json'},
'endpoint_service_type': 'rdsv3',
'append_project_id': True,
},
'sdrs': {
'service_type': 'sdrs'
},
'smn': {
'service_type': 'smn',
'append_project_id': True
},
'volume_backup': {
'service_type': 'volume_backup',
'append_project_id': True,
'endpoint_service_type': 'vbs',
},
'waf': {
'service_type': 'waf',
'append_project_id': True,
}
}
def _get_descriptor(service_name):
"""Find ServiceDescriptor class by the service name
and instanciate it
"""
service = OTC_SERVICES.get(service_name, None)
if service:
service_type = service['service_type']
desc_class = _find_service_description_class(service_type)
# _logger.debug('descriptor class %s' % desc_class)
descriptor_args = {
'service_type': service.get('endpoint_service_type', service_type),
'aliases': [service.get('service_type', service_type)]
}
if not desc_class.supported_versions:
doc = _DOC_TEMPLATE.format(
class_name="{service_type} Proxy".format(
service_type=service_type),
**service)
elif len(desc_class.supported_versions) == 1:
supported_version = list(
desc_class.supported_versions.keys())[0]
doc = _DOC_TEMPLATE.format(
class_name="{service_type} Proxy <{name}>".format(
service_type=service_type, name=supported_version),
**service)
else:
class_doc_strings = "\n".join([
":class:`{class_name}`".format(
class_name=proxy_class.__name__)
for proxy_class in desc_class.supported_versions.values()])
doc = _PROXY_TEMPLATE.format(
class_doc_strings=class_doc_strings, **service)
descriptor = desc_class(**descriptor_args)
descriptor.__doc__ = doc
# _logger.debug('proxy is %s' % descriptor.proxy_class)
return descriptor
else:
_logger.warn('unknown service %s was requested' % service_name)
return None
def _find_service_description_class(service_type):
package_name = 'otcextensions.sdk.{service_type}'.format(
service_type=service_type).replace('-', '_')
module_name = service_type.replace('-', '_') + '_service'
class_name = ''.join(
[part.capitalize() for part in module_name.split('_')])
# try:
import_name = '.'.join([package_name, module_name])
service_description_module = importlib.import_module(import_name)
# except ImportError as e:
# # ImportWarning is ignored by default. This warning is here
# # as an opt-in for people trying to figure out why something
# # didn't work.
# _logger.warn("Could not import {service_type} "
# "service description: {e}".format(
# service_type=service_type, e=str(e)),
# ImportWarning)
# return service_description.ServiceDescription
# There are no cases in which we should have a module but not the class
# inside it.
service_description_class = getattr(service_description_module, class_name)
return service_description_class
def get_ak_sk(conn):
"""Fetch AK/SK from the cloud configuration or ENV
This method might be called by the proxy.
"""
config = conn.config.config
ak = config.get('access_key', config.get('ak'))
sk = config.get('secret_key', config.get('sk'))
if not ak:
ak = os.getenv('OS_ACCESS_KEY', os.getenv('S3_ACCESS_KEY_ID'))
if not sk:
sk = os.getenv('OS_SECRET_KEY', os.getenv('S3_SECRET_ACCESS_KEY'))
if not (ak and sk):
_logger.error('AK/SK pair is not configured in the connection, '
'but is requested by the service.')
return (None, None)
else:
return(ak, sk)
def patch_openstack_resources():
openstack.compute.v2.server.Server._get_tag_struct = \
server.Server._get_tag_struct
openstack.compute.v2.server.Server.add_tag = server.Server.add_tag
openstack.compute.v2.server.Server.remove_tag = server.Server.remove_tag
openstack.exceptions.raise_from_response = \
exc.raise_from_response
def load(conn, **kwargs):
"""Register supported OTC services and make them known to the OpenStackSDK
:param conn: An established OpenStack cloud connection
:returns: none
"""
conn.authorize()
project_id = conn._get_project_info().id
for (service_name, service) in OTC_SERVICES.items():
_logger.debug('trying to register service %s' % service_name)
if service.get('replace_system', False):
# system_proxy = getattr(conn, service['service_type'])
# for service_type in system_proxy.all_types:
if service['service_type'] in conn._proxies:
del conn._proxies[service['service_type']]
# attr = getattr(conn, service_name)
# delattr(conn, service['service_type'])
sd = _get_descriptor(service_name)
conn.add_service(sd)
if service.get('append_project_id', False):
# If service requires project_id, but it is not present in the
# service catalog - set endpoint_override
ep = conn.endpoint_for(sd.service_type)
if ep and not ep.rstrip('/').endswith('\\%(project_id)s') \
and not ep.rstrip('/').endswith('$(tenant_id)s') \
and not ep.rstrip('/').endswith(project_id):
key = '_'.join([
sd.service_type.lower().replace('-', '_'),
'endpoint_override'])
if key not in conn.config.config:
conn.config.config[key] = utils.urljoin(ep,
'%(project_id)s')
elif service.get('set_endpoint_override', False):
# SDK respects skip_discovery only if endpoint_override is set.
# In case, when append_project_id is skipped for the service,
# but the discovery on the service is not working - we might be
# failing dramatically.
ep = conn.endpoint_for(sd.service_type)
conn.config.config[
'_'.join([
sd.service_type.lower().replace('-', '_'),
'endpoint_override'
])
] = utils.urljoin(ep)
# Inject get_ak_sk into the connection to give possibility
# for some proxies to use them
setattr(conn, 'get_ak_sk', get_ak_sk)
patch_openstack_resources()
return None
register_otc_extensions = load
|
#!/Users/tom/GitHub/ga_statistics/bin/python
from django.core import management
if __name__ == "__main__":
management.execute_from_command_line()
|
from django.utils.datastructures import MultiValueDict
from orgs.utils import get_current_org
from .models import Attachment, OperateLog
from .serializers import AttachmentSerializer, OperateLogSerializer
from rest_framework import views, status, generics
from rest_framework.response import Response
from django.db import transaction
import os
# Create your views here.
from rest_framework.parsers import MultiPartParser, FormParser
class AttachmentCreateView(generics.CreateAPIView):
parser_classes = (FormParser, MultiPartParser,)
queryset = Attachment.objects.all()
serializer_class = AttachmentSerializer
def create(self, request, *args, **kwargs):
files = request.FILES
if isinstance(files, MultiValueDict):
for key in files:
file = files[key]
instance = Attachment(file=file, filename=file.name, filesize=file.size)
instance.save()
return Response(status=status.HTTP_200_OK, data=AttachmentSerializer(instance).data)
class AttachmentDeleteView(views.APIView):
def delete(self, request, *args, **kwargs):
instance = Attachment.objects.get(id=kwargs.get("attachment_id"))
if os.path.isfile(instance.file.path):
with transaction.atomic():
instance.file.delete(save=False)
instance.delete()
return Response(status=status.HTTP_200_OK, data=AttachmentSerializer(instance).data)
class OperationLogListView(generics.ListAPIView):
queryset = OperateLog.objects.select_related("created_by")
serializer_class = OperateLogSerializer
filter_fields = ('resource_id', 'resource_type', 'project_id')
def get_queryset(self):
return super().get_queryset().filter(org=get_current_org())
|
import configparser
import kafka
from kafka_producer_consumer import push_message,get_connection_consumer,get_connection_producer,create_topic
from webscraping import get_movies
config = configparser.ConfigParser()
config.read('config.properties')
# global properties
bootstrap_ip_port=config['KAFKA']['KAFKA_BROKER_IP_PORT']
num_partitions=config['KAFKA']['NUM_PARTITIONS']
replication_factor=config['KAFKA']['REPLICATION_FACTOR']
url=config['SCRAPING_SITE']['URL']
producer = None
consumer = None
# connect to kafka
producer = get_connection_producer(bootstrap_ip_port)
# get list of all the topics present
topic_set=set()
try:
consumer_topics = kafka.KafkaConsumer(bootstrap_servers=[bootstrap_ip_port])
topic_set=consumer_topics.topics()
except Exception as ex:
print('Exception while getting Kafka topics')
print(str(ex))
finally:
consumer_topics.close()
print("Press:\n1 to push custom message to kafka topic\n2 to fetch message from a topic\
\n3 to push message from website and fetch back\n4 to exit\n")
valid_input = [1,2,3,4]
while(True):
option=int(input("Your input : "))
if option not in valid_input:
print(" Enter a valid input ")
continue
if option == 4:
break
kafka_topic=raw_input("Enter kafka topic name : ")
if kafka_topic not in topic_set:
res = create_topic(bootstrap_ip_port,kafka_topic,num_partitions, replication_factor)
if res == 1:
topic_set.add(kafka_topic)
else:
print(res)
break
if option == 1:
kafka_message = raw_input("Enter message to push : ")
print(push_message(producer,kafka_topic,str(option),kafka_message))
elif option == 2:
message_item=[]
consumer = get_connection_consumer(bootstrap_ip_port,kafka_topic)
message_item=[msg for msg in consumer]
print(message_item)
elif option == 3:
all_movies = get_movies(url)
for movies in all_movies:
_=push_message(producer,kafka_topic,str(option),movies)
print(_)
if producer is not None :
producer.close()
if consumer is not None :
consumer.close()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# This file is part of OTC Tool released under MIT
# Copyright (C) 2016 T-systems Kurt Garloff, Zsolt Nagy
import json
import prettytable
import jmespath
def defaultprettytable( cols ):
p = prettytable.PrettyTable(cols)
p.align = 'l'
p.sortby = None
return p
x=defaultprettytable({"name", "value"})
def printLevel2(respjson, outformat, mainkey, listkey, subkey=None):
if mainkey:
parsed = json.loads(respjson)
p=defaultprettytable(listkey)
if(outformat.startswith("json")) :
print (json.dumps(parsed, indent=4, sort_keys=True))
else:
if(outformat.startswith("text")) :
p.set_style(prettytable.PLAIN_COLUMNS)
mainId = respjson
if mainkey and len(mainkey) > 0:
mainId = parsed[mainkey]
for n in range(len(mainId)):
item = mainId[n]
#for item in mainId:
if not (subkey is None):
item = item[subkey]
vals = list()
for colkey in listkey:
if colkey in item :
vals.append(item[colkey])
else:
vals.append(" ")
p.add_row(vals)
print (p.get_string())
def handleQuery(result, query):
if isinstance(result, (str, unicode)):
parsed = json.loads(result)
else:
parsed = result
sr = jmespath.search(query, parsed)
if isinstance(sr, list):
for object_ in sr:
print (object_)
else:
print (sr)
def printJsonTableTransverse(jsonval, outformat, mainkey):
parsed = json.loads(jsonval)
if(outformat.startswith("json")) :
print (json.dumps(parsed, indent=4, sort_keys=True))
else:
if(outformat.startswith("text")) :
x.set_style(prettytable.PLAIN_COLUMNS)
if mainkey:
id_generator(parsed[mainkey], "")
else:
id_generator(parsed, "")
print (x.get_string())
def id_generator(parsed, headkey):
for k, v in parsed.items():
if isinstance(v, dict):
id_generator(v, headkey + "." + k)
elif isinstance(v, list):
for v2 in v:
if isinstance(v2, dict):
id_generator(v2, headkey + "." + k)
else :
pass
else :
if not v :
v = ""
x.add_row([headkey + "." + k, v ])
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkpolardb.endpoint import endpoint_data
class DescribeSQLLogTemplatesRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'polardb', '2017-08-01', 'DescribeSQLLogTemplates','polardb')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_ResourceOwnerId(self):
return self.get_query_params().get('ResourceOwnerId')
def set_ResourceOwnerId(self,ResourceOwnerId):
self.add_query_param('ResourceOwnerId',ResourceOwnerId)
def get_StartTime(self):
return self.get_query_params().get('StartTime')
def set_StartTime(self,StartTime):
self.add_query_param('StartTime',StartTime)
def get_JobId(self):
return self.get_query_params().get('JobId')
def set_JobId(self,JobId):
self.add_query_param('JobId',JobId)
def get_SortKey(self):
return self.get_query_params().get('SortKey')
def set_SortKey(self,SortKey):
self.add_query_param('SortKey',SortKey)
def get_MinAvgScanRows(self):
return self.get_query_params().get('MinAvgScanRows')
def set_MinAvgScanRows(self,MinAvgScanRows):
self.add_query_param('MinAvgScanRows',MinAvgScanRows)
def get_SecurityToken(self):
return self.get_query_params().get('SecurityToken')
def set_SecurityToken(self,SecurityToken):
self.add_query_param('SecurityToken',SecurityToken)
def get_PageNumbers(self):
return self.get_query_params().get('PageNumbers')
def set_PageNumbers(self,PageNumbers):
self.add_query_param('PageNumbers',PageNumbers)
def get_PagingId(self):
return self.get_query_params().get('PagingId')
def set_PagingId(self,PagingId):
self.add_query_param('PagingId',PagingId)
def get_DBInstanceId(self):
return self.get_query_params().get('DBInstanceId')
def set_DBInstanceId(self,DBInstanceId):
self.add_query_param('DBInstanceId',DBInstanceId)
def get_MaxAvgScanRows(self):
return self.get_query_params().get('MaxAvgScanRows')
def set_MaxAvgScanRows(self,MaxAvgScanRows):
self.add_query_param('MaxAvgScanRows',MaxAvgScanRows)
def get_SqlType(self):
return self.get_query_params().get('SqlType')
def set_SqlType(self,SqlType):
self.add_query_param('SqlType',SqlType)
def get_ResourceOwnerAccount(self):
return self.get_query_params().get('ResourceOwnerAccount')
def set_ResourceOwnerAccount(self,ResourceOwnerAccount):
self.add_query_param('ResourceOwnerAccount',ResourceOwnerAccount)
def get_MinAvgConsume(self):
return self.get_query_params().get('MinAvgConsume')
def set_MinAvgConsume(self,MinAvgConsume):
self.add_query_param('MinAvgConsume',MinAvgConsume)
def get_OwnerAccount(self):
return self.get_query_params().get('OwnerAccount')
def set_OwnerAccount(self,OwnerAccount):
self.add_query_param('OwnerAccount',OwnerAccount)
def get_MaxRecordsPerPage(self):
return self.get_query_params().get('MaxRecordsPerPage')
def set_MaxRecordsPerPage(self,MaxRecordsPerPage):
self.add_query_param('MaxRecordsPerPage',MaxRecordsPerPage)
def get_EndTime(self):
return self.get_query_params().get('EndTime')
def set_EndTime(self,EndTime):
self.add_query_param('EndTime',EndTime)
def get_OwnerId(self):
return self.get_query_params().get('OwnerId')
def set_OwnerId(self,OwnerId):
self.add_query_param('OwnerId',OwnerId)
def get_MaxAvgConsume(self):
return self.get_query_params().get('MaxAvgConsume')
def set_MaxAvgConsume(self,MaxAvgConsume):
self.add_query_param('MaxAvgConsume',MaxAvgConsume)
def get_ChildDBInstanceIDs(self):
return self.get_query_params().get('ChildDBInstanceIDs')
def set_ChildDBInstanceIDs(self,ChildDBInstanceIDs):
self.add_query_param('ChildDBInstanceIDs',ChildDBInstanceIDs)
def get_SortMethod(self):
return self.get_query_params().get('SortMethod')
def set_SortMethod(self,SortMethod):
self.add_query_param('SortMethod',SortMethod)
|
#!/usr/bin/env python
from setuptools import find_namespace_packages
from distutils.core import setup
import os
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
package_name = "dbt-core"
package_version = "0.15.0b1"
description = """dbt (data build tool) is a command line tool that helps \
analysts and engineers transform data in their warehouse more effectively"""
setup(
name=package_name,
version=package_version,
description=description,
long_description=description,
author="Fishtown Analytics",
author_email="info@fishtownanalytics.com",
url="https://github.com/fishtown-analytics/dbt",
packages=find_namespace_packages(include=['dbt.*']),
package_data={
'dbt': [
'include/index.html',
'include/global_project/dbt_project.yml',
'include/global_project/docs/*.md',
'include/global_project/macros/*.sql',
'include/global_project/macros/**/*.sql',
'include/global_project/macros/**/**/*.sql',
'py.typed',
]
},
test_suite='test',
entry_points={
'console_scripts': [
'dbt = dbt.main:main',
],
},
scripts=[
'scripts/dbt',
],
install_requires=[
'Jinja2>=2.10',
'PyYAML>=3.11',
'sqlparse>=0.2.3,<0.4',
'networkx>=2.3,<3',
'minimal-snowplow-tracker==0.0.2',
'requests>=2.18.0,<3',
'colorama>=0.3.9,<0.5',
'agate>=1.6,<2',
'jsonschema>=3.0.1,<4',
'json-rpc>=1.12,<2',
'werkzeug>=0.15,<0.17',
'dataclasses==0.6;python_version<"3.7"',
'hologram==0.0.4',
'logbook>=1.5,<1.6',
'pytest-logbook>=1.2.0,<1.3',
'typing-extensions>=3.7.4,<3.8',
]
)
|
#!/usr/bin/env python
# test BLE Scanning software
# jcs 6/8/2014
import blescan
import sys
import bluetooth._bluetooth as bluez
dev_id = 0
try:
sock = bluez.hci_open_dev(dev_id)
except:
print "error accessing bluetooth device..."
sys.exit(1)
blescan.hci_le_set_scan_parameters(sock)
blescan.hci_enable_le_scan(sock)
returnedList = blescan.parse_events(sock, 10)
for beacon in returnedList:
print beacon
|
# coding: utf-8
# author: Fei Gao <leetcode.com@feigao.xyz>
# Problem: binary tree paths
#
# Given a binary tree, return all root-to-leaf paths.
#
# For example, given the following binary tree:
#
# 1
# / \
# 2 3
# \
# 5
#
# All root-to-leaf paths are:
# ["1->2->5", "1->3"]
# Credits:Special thanks to @jianchao.li.fighter for adding this problem and
# creating all test cases.
#
# Subscribe to see which companies asked this question
#
# Show Tags
#
# Tree
# Depth-first Search
#
# Show Similar Problems
#
# (M) Path Sum II
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
from utils import *
class Solution:
# @param {TreeNode} root
# @return {string[]}
def binaryTreePaths(self, root):
results = []
def gao(node, pre):
if node is not None:
leaf = True
if node.left:
gao(node.left, pre + [node.val])
leaf = False
if node.right:
gao(node.right, pre + [node.val])
leaf = False
if leaf:
results.append(pre + [node.val])
gao(root, [])
# print(results)
return ['->'.join(map(str, path)) for path in results]
def main():
solver = Solution()
tests = [
((build_binary_tree([1, 2, 3, None, 5]),), ['1->2->5', '1->3']),
((build_binary_tree([]),), []),
]
for params, expect in tests:
print('-' * 5 + 'TEST' + '-' * 5)
print('Input: ' + str(params))
print('Expect: ' + str(expect))
result = solver.binaryTreePaths(*params)
print('Result: ' + str(result))
pass
if __name__ == '__main__':
main()
pass
|
import scripts.inputmanager as inp
STARTING_MESSAGE = "Before saving new parameters try testing to see the best accuracy I got."
print(STARTING_MESSAGE.upper())
inp.input_loop()
|
248
|
"""
This is unsupported legacy code.
Would be supported lately.
"""
from json import loads, dumps
from django.test import TestCase
from django.urls import reverse
from core.models import Links
from core.tests.utils import set_links_in_db_from_file, is_equal_lists, \
get_dict_from_link, set_links_in_db_from_list
import pytest
class TestSearchView(TestCase):
ok_status_code = 200
incorrect_status_code = 400
invalid_request_text = '{"code":400,"message":"invalid request"}'
erase_message = -1
@classmethod
def setUpClass(cls):
set_links_in_db_from_file('core/tests/links_example.json')
links = Links.objects.all()
cls.links = [get_dict_from_link(link) for link in links]
@classmethod
def tearDownClass(cls):
links = Links.objects.all()
for link in links:
link.delete()
@pytest.mark.skip
def test_url_search_access_by_name(self):
context = {'doc_id_to': self.erase_message,
'doc_id_from': self.links[0]['doc_id_from']}
resp = self.client.post(reverse('core:search'), dumps(context),
content_type="application/json")
assert self.ok_status_code == resp.status_code
@pytest.mark.skip
def test_url_search_access_by_url(self):
context = {'doc_id_to': self.erase_message,
'doc_id_from': self.links[0]['doc_id_from']}
resp = self.client.post('/api/search/get', dumps(context),
content_type="application/json")
assert self.ok_status_code == resp.status_code
@pytest.mark.skip
def test_empty_query(self):
context = {'doc_id_to': self.erase_message,
'doc_id_from': self.erase_message}
resp = self.client.post('/api/search/get', dumps(context),
content_type="application/json")
assert self.incorrect_status_code == resp.status_code
assert self.invalid_request_text == resp.content.decode()
@pytest.mark.skip
def test_search_all_links(self):
context = {'doc_id_to': self.erase_message,
'doc_id_from': self.links[0]['doc_id_from']}
resp = self.client.post('/api/search/get', dumps(context),
content_type="application/json")
assert self.ok_status_code == resp.status_code
assert is_equal_lists(self.links, loads(resp.content.decode()))
@pytest.mark.skip
def test_search_no_links_found(self):
context = {'doc_id_to': self.erase_message,
'doc_id_from': "no such file"}
resp = self.client.post('/api/search/get', dumps(context),
content_type="application/json")
assert self.incorrect_status_code == resp.status_code
assert self.invalid_request_text == resp.content.decode()
@pytest.mark.skip
def test_search_one_link_one_field_doc_id_to(self):
# Random number from 0 - 6.
link_number = 0
context = {'doc_id_to': self.links[link_number]['doc_id_to'],
'doc_id_from': self.erase_message}
resp = self.client.post('/api/search/get', dumps(context),
content_type="application/json")
assert self.ok_status_code == resp.status_code
assert is_equal_lists(self.links[link_number:link_number + 1],
loads(resp.content.decode()))
@pytest.mark.skip
def test_search_one_link_two_field(self):
# Random number from 0 - 6.
link_number = 1
context = {'doc_id_to': self.links[link_number]['doc_id_to'],
'doc_id_from': self.links[link_number]['doc_id_from']}
resp = self.client.post('/api/search/get', dumps(context),
content_type="application/json")
assert self.ok_status_code == resp.status_code
assert is_equal_lists(self.links[link_number:link_number + 1],
loads(resp.content.decode()))
@pytest.mark.skip
def test_search_not_link(self):
context = {'doc_id_to': 'it is not a link',
'doc_id_from': 'it is not a link, too'}
resp = self.client.post('/api/search/get', dumps(context),
content_type="application/json")
assert self.incorrect_status_code == resp.status_code
assert self.invalid_request_text == resp.content.decode()
@pytest.mark.skip
def test_search_empty_fields(self):
context = {'doc_id_to': '', 'doc_id_from': ''}
resp = self.client.post('/api/search/get', dumps(context),
content_type="application/json")
assert self.incorrect_status_code == resp.status_code
assert self.invalid_request_text == resp.content.decode()
@pytest.mark.skip
def test_search_no_fields(self):
resp = self.client.post('/api/search/get', dumps({}),
content_type="application/json")
assert self.incorrect_status_code == resp.status_code
assert self.invalid_request_text == resp.content.decode()
@pytest.mark.skip
def test_search_one_link_one_field_doc_id_from(self):
# Random number from 0 - 6.
link_number = 1
link_fields = self.links[link_number].copy()
link_fields['doc_id_to'], link_fields['doc_id_from'] = \
link_fields['doc_id_from'], link_fields['doc_id_to']
set_links_in_db_from_list([link_fields])
context = {'doc_id_to': self.erase_message,
'doc_id_from': link_fields['doc_id_from']}
resp = self.client.post('/api/search/get', dumps(context),
content_type="application/json")
link = Links.objects.get(doc_id_from=link_fields['doc_id_from'])
link_fields['id'] = link.id
link.delete()
assert self.ok_status_code == resp.status_code
assert is_equal_lists([link_fields], loads(resp.content.decode()))
|
from datetime import datetime, timedelta
from typing import List
import warnings
from dateutil.relativedelta import FR, MO, SA, SU, TH, TU, WE # noqa
import numpy as np
from pandas.errors import PerformanceWarning
from pandas import DateOffset, Series, Timestamp, date_range
from pandas.tseries.offsets import Day, Easter
def next_monday(dt):
"""
If holiday falls on Saturday, use following Monday instead;
if holiday falls on Sunday, use Monday instead
"""
if dt.weekday() == 5:
return dt + timedelta(2)
elif dt.weekday() == 6:
return dt + timedelta(1)
return dt
def next_monday_or_tuesday(dt):
"""
For second holiday of two adjacent ones!
If holiday falls on Saturday, use following Monday instead;
if holiday falls on Sunday or Monday, use following Tuesday instead
(because Monday is already taken by adjacent holiday on the day before)
"""
dow = dt.weekday()
if dow == 5 or dow == 6:
return dt + timedelta(2)
elif dow == 0:
return dt + timedelta(1)
return dt
def previous_friday(dt):
"""
If holiday falls on Saturday or Sunday, use previous Friday instead.
"""
if dt.weekday() == 5:
return dt - timedelta(1)
elif dt.weekday() == 6:
return dt - timedelta(2)
return dt
def sunday_to_monday(dt):
"""
If holiday falls on Sunday, use day thereafter (Monday) instead.
"""
if dt.weekday() == 6:
return dt + timedelta(1)
return dt
def weekend_to_monday(dt):
"""
If holiday falls on Sunday or Saturday,
use day thereafter (Monday) instead.
Needed for holidays such as Christmas observation in Europe
"""
if dt.weekday() == 6:
return dt + timedelta(1)
elif dt.weekday() == 5:
return dt + timedelta(2)
return dt
def nearest_workday(dt):
"""
If holiday falls on Saturday, use day before (Friday) instead;
if holiday falls on Sunday, use day thereafter (Monday) instead.
"""
if dt.weekday() == 5:
return dt - timedelta(1)
elif dt.weekday() == 6:
return dt + timedelta(1)
return dt
def next_workday(dt):
"""
returns next weekday used for observances
"""
dt += timedelta(days=1)
while dt.weekday() > 4:
# Mon-Fri are 0-4
dt += timedelta(days=1)
return dt
def previous_workday(dt):
"""
returns previous weekday used for observances
"""
dt -= timedelta(days=1)
while dt.weekday() > 4:
# Mon-Fri are 0-4
dt -= timedelta(days=1)
return dt
def before_nearest_workday(dt):
"""
returns previous workday after nearest workday
"""
return previous_workday(nearest_workday(dt))
def after_nearest_workday(dt):
"""
returns next workday after nearest workday
needed for Boxing day or multiple holidays in a series
"""
return next_workday(nearest_workday(dt))
class Holiday:
"""
Class that defines a holiday with start/end dates and rules
for observance.
"""
def __init__(self, name, year=None, month=None, day=None, offset=None,
observance=None, start_date=None, end_date=None,
days_of_week=None):
"""
Parameters
----------
name : str
Name of the holiday , defaults to class name
offset : array of pandas.tseries.offsets or
class from pandas.tseries.offsets
computes offset from date
observance: function
computes when holiday is given a pandas Timestamp
days_of_week:
provide a tuple of days e.g (0,1,2,3,) for Monday Through Thursday
Monday=0,..,Sunday=6
Examples
--------
>>> from pandas.tseries.holiday import Holiday, nearest_workday
>>> from dateutil.relativedelta import MO
>>> USMemorialDay = Holiday('MemorialDay', month=5, day=24,
offset=pd.DateOffset(weekday=MO(1)))
>>> USLaborDay = Holiday('Labor Day', month=9, day=1,
offset=pd.DateOffset(weekday=MO(1)))
>>> July3rd = Holiday('July 3rd', month=7, day=3,)
>>> NewYears = Holiday('New Years Day', month=1, day=1,
observance=nearest_workday),
>>> July3rd = Holiday('July 3rd', month=7, day=3,
days_of_week=(0, 1, 2, 3))
"""
if offset is not None and observance is not None:
raise NotImplementedError("Cannot use both offset and observance.")
self.name = name
self.year = year
self.month = month
self.day = day
self.offset = offset
self.start_date = Timestamp(
start_date) if start_date is not None else start_date
self.end_date = Timestamp(
end_date) if end_date is not None else end_date
self.observance = observance
assert (days_of_week is None or type(days_of_week) == tuple)
self.days_of_week = days_of_week
def __repr__(self):
info = ''
if self.year is not None:
info += 'year={year}, '.format(year=self.year)
info += 'month={mon}, day={day}, '.format(mon=self.month, day=self.day)
if self.offset is not None:
info += 'offset={offset}'.format(offset=self.offset)
if self.observance is not None:
info += 'observance={obs}'.format(obs=self.observance)
repr = 'Holiday: {name} ({info})'.format(name=self.name, info=info)
return repr
def dates(self, start_date, end_date, return_name=False):
"""
Calculate holidays observed between start date and end date
Parameters
----------
start_date : starting date, datetime-like, optional
end_date : ending date, datetime-like, optional
return_name : bool, optional, default=False
If True, return a series that has dates and holiday names.
False will only return dates.
"""
start_date = Timestamp(start_date)
end_date = Timestamp(end_date)
filter_start_date = start_date
filter_end_date = end_date
if self.year is not None:
dt = Timestamp(datetime(self.year, self.month, self.day))
if return_name:
return Series(self.name, index=[dt])
else:
return [dt]
dates = self._reference_dates(start_date, end_date)
holiday_dates = self._apply_rule(dates)
if self.days_of_week is not None:
holiday_dates = holiday_dates[np.in1d(holiday_dates.dayofweek,
self.days_of_week)]
if self.start_date is not None:
filter_start_date = max(self.start_date.tz_localize(
filter_start_date.tz), filter_start_date)
if self.end_date is not None:
filter_end_date = min(self.end_date.tz_localize(
filter_end_date.tz), filter_end_date)
holiday_dates = holiday_dates[(holiday_dates >= filter_start_date) &
(holiday_dates <= filter_end_date)]
if return_name:
return Series(self.name, index=holiday_dates)
return holiday_dates
def _reference_dates(self, start_date, end_date):
"""
Get reference dates for the holiday.
Return reference dates for the holiday also returning the year
prior to the start_date and year following the end_date. This ensures
that any offsets to be applied will yield the holidays within
the passed in dates.
"""
if self.start_date is not None:
start_date = self.start_date.tz_localize(start_date.tz)
if self.end_date is not None:
end_date = self.end_date.tz_localize(start_date.tz)
year_offset = DateOffset(years=1)
reference_start_date = Timestamp(
datetime(start_date.year - 1, self.month, self.day))
reference_end_date = Timestamp(
datetime(end_date.year + 1, self.month, self.day))
# Don't process unnecessary holidays
dates = date_range(start=reference_start_date,
end=reference_end_date,
freq=year_offset, tz=start_date.tz)
return dates
def _apply_rule(self, dates):
"""
Apply the given offset/observance to a DatetimeIndex of dates.
Parameters
----------
dates : DatetimeIndex
Dates to apply the given offset/observance rule
Returns
-------
Dates with rules applied
"""
if self.observance is not None:
return dates.map(lambda d: self.observance(d))
if self.offset is not None:
if not isinstance(self.offset, list):
offsets = [self.offset]
else:
offsets = self.offset
for offset in offsets:
# if we are adding a non-vectorized value
# ignore the PerformanceWarnings:
with warnings.catch_warnings():
warnings.simplefilter("ignore", PerformanceWarning)
dates += offset
return dates
holiday_calendars = {}
def register(cls):
try:
name = cls.name
except AttributeError:
name = cls.__name__
holiday_calendars[name] = cls
def get_calendar(name):
"""
Return an instance of a calendar based on its name.
Parameters
----------
name : str
Calendar name to return an instance of
"""
return holiday_calendars[name]()
class HolidayCalendarMetaClass(type):
def __new__(cls, clsname, bases, attrs):
calendar_class = super().__new__(cls, clsname, bases, attrs)
register(calendar_class)
return calendar_class
class AbstractHolidayCalendar(metaclass=HolidayCalendarMetaClass):
"""
Abstract interface to create holidays following certain rules.
"""
rules = [] # type: List[Holiday]
start_date = Timestamp(datetime(1970, 1, 1))
end_date = Timestamp(datetime(2030, 12, 31))
_cache = None
def __init__(self, name=None, rules=None):
"""
Initializes holiday object with a given set a rules. Normally
classes just have the rules defined within them.
Parameters
----------
name : str
Name of the holiday calendar, defaults to class name
rules : array of Holiday objects
A set of rules used to create the holidays.
"""
super().__init__()
if name is None:
name = self.__class__.__name__
self.name = name
if rules is not None:
self.rules = rules
def rule_from_name(self, name):
for rule in self.rules:
if rule.name == name:
return rule
return None
def holidays(self, start=None, end=None, return_name=False):
"""
Returns a curve with holidays between start_date and end_date
Parameters
----------
start : starting date, datetime-like, optional
end : ending date, datetime-like, optional
return_name : bool, optional
If True, return a series that has dates and holiday names.
False will only return a DatetimeIndex of dates.
Returns
-------
DatetimeIndex of holidays
"""
if self.rules is None:
raise Exception('Holiday Calendar {name} does not have any '
'rules specified'.format(name=self.name))
if start is None:
start = AbstractHolidayCalendar.start_date
if end is None:
end = AbstractHolidayCalendar.end_date
start = Timestamp(start)
end = Timestamp(end)
holidays = None
# If we don't have a cache or the dates are outside the prior cache, we
# get them again
if (self._cache is None or start < self._cache[0] or
end > self._cache[1]):
for rule in self.rules:
rule_holidays = rule.dates(start, end, return_name=True)
if holidays is None:
holidays = rule_holidays
else:
holidays = holidays.append(rule_holidays)
self._cache = (start, end, holidays.sort_index())
holidays = self._cache[2]
holidays = holidays[start:end]
if return_name:
return holidays
else:
return holidays.index
@staticmethod
def merge_class(base, other):
"""
Merge holiday calendars together. The base calendar
will take precedence to other. The merge will be done
based on each holiday's name.
Parameters
----------
base : AbstractHolidayCalendar
instance/subclass or array of Holiday objects
other : AbstractHolidayCalendar
instance/subclass or array of Holiday objects
"""
try:
other = other.rules
except AttributeError:
pass
if not isinstance(other, list):
other = [other]
other_holidays = {holiday.name: holiday for holiday in other}
try:
base = base.rules
except AttributeError:
pass
if not isinstance(base, list):
base = [base]
base_holidays = {holiday.name: holiday for holiday in base}
other_holidays.update(base_holidays)
return list(other_holidays.values())
def merge(self, other, inplace=False):
"""
Merge holiday calendars together. The caller's class
rules take precedence. The merge will be done
based on each holiday's name.
Parameters
----------
other : holiday calendar
inplace : bool (default=False)
If True set rule_table to holidays, else return array of Holidays
"""
holidays = self.merge_class(self, other)
if inplace:
self.rules = holidays
else:
return holidays
USMemorialDay = Holiday('MemorialDay', month=5, day=31,
offset=DateOffset(weekday=MO(-1)))
USLaborDay = Holiday('Labor Day', month=9, day=1,
offset=DateOffset(weekday=MO(1)))
USColumbusDay = Holiday('Columbus Day', month=10, day=1,
offset=DateOffset(weekday=MO(2)))
USThanksgivingDay = Holiday('Thanksgiving', month=11, day=1,
offset=DateOffset(weekday=TH(4)))
USMartinLutherKingJr = Holiday('Dr. Martin Luther King Jr.',
start_date=datetime(1986, 1, 1), month=1, day=1,
offset=DateOffset(weekday=MO(3)))
USPresidentsDay = Holiday('President''s Day', month=2, day=1,
offset=DateOffset(weekday=MO(3)))
GoodFriday = Holiday("Good Friday", month=1, day=1, offset=[Easter(), Day(-2)])
EasterMonday = Holiday("Easter Monday", month=1, day=1,
offset=[Easter(), Day(1)])
class USFederalHolidayCalendar(AbstractHolidayCalendar):
"""
US Federal Government Holiday Calendar based on rules specified by:
https://www.opm.gov/policy-data-oversight/
snow-dismissal-procedures/federal-holidays/
"""
rules = [
Holiday('New Years Day', month=1, day=1, observance=nearest_workday),
USMartinLutherKingJr,
USPresidentsDay,
USMemorialDay,
Holiday('July 4th', month=7, day=4, observance=nearest_workday),
USLaborDay,
USColumbusDay,
Holiday('Veterans Day', month=11, day=11, observance=nearest_workday),
USThanksgivingDay,
Holiday('Christmas', month=12, day=25, observance=nearest_workday)
]
def HolidayCalendarFactory(name, base, other,
base_class=AbstractHolidayCalendar):
rules = AbstractHolidayCalendar.merge_class(base, other)
calendar_class = type(name, (base_class,), {"rules": rules, "name": name})
return calendar_class
|
import numpy as np
class Dice:
def __init__(self, min_value=1, max_value=6):
self.min_value = min_value
self.max_value = max_value
def launch(self):
return np.random.randint(self.min_value, self.max_value + 1)
|
# -*- coding: utf-8 -*-
# Copyright (C) 2014 Floris Bruynooghe <flub@devork.be>
r"""
Use remote Mercurial repository as a Pillar source.
.. versionadded:: 2015.8.0
The module depends on the ``hglib`` python module being available.
This is the same requirement as for hgfs\_ so should not pose any extra
hurdles.
This external Pillar source can be configured in the master config file as such:
.. code-block:: yaml
ext_pillar:
- hg: ssh://hg@example.co/user/repo
"""
# Import Python Libs
from __future__ import absolute_import, print_function, unicode_literals
import copy
import hashlib
import logging
import os
# Import Salt Libs
import salt.pillar
import salt.utils.stringutils
# Import Third Party Libs
from salt.ext import six
try:
import hglib
except ImportError:
hglib = None
__virtualname__ = "hg"
log = logging.getLogger(__name__)
# The default option values
__opts__ = {}
def __virtual__():
"""
Only load if hglib is available.
"""
ext_pillar_sources = [x for x in __opts__.get("ext_pillar", [])]
if not any(["hg" in x for x in ext_pillar_sources]):
return False
if not hglib:
log.error("hglib not present")
return False
return __virtualname__
def __init__(__opts__):
"""
Initialise
This is called every time a minion calls this external pillar.
"""
def ext_pillar(minion_id, pillar, repo, branch="default", root=None):
"""
Extract pillar from an hg repository
"""
with Repo(repo) as repo:
repo.update(branch)
envname = "base" if branch == "default" else branch
if root:
path = os.path.normpath(os.path.join(repo.working_dir, root))
else:
path = repo.working_dir
opts = copy.deepcopy(__opts__)
opts["pillar_roots"][envname] = [path]
pil = salt.pillar.Pillar(opts, __grains__, minion_id, envname)
return pil.compile_pillar(ext=False)
def update(repo_uri):
"""
Execute an hg pull on all the repos
"""
with Repo(repo_uri) as repo:
repo.pull()
class Repo(object):
"""
Deal with remote hg (mercurial) repository for Pillar
"""
def __init__(self, repo_uri):
""" Initialize a hg repo (or open it if it already exists) """
self.repo_uri = repo_uri
cachedir = os.path.join(__opts__["cachedir"], "hg_pillar")
hash_type = getattr(hashlib, __opts__.get("hash_type", "md5"))
if six.PY2:
repo_hash = hash_type(repo_uri).hexdigest()
else:
repo_hash = hash_type(salt.utils.stringutils.to_bytes(repo_uri)).hexdigest()
self.working_dir = os.path.join(cachedir, repo_hash)
if not os.path.isdir(self.working_dir):
self.repo = hglib.clone(repo_uri, self.working_dir)
self.repo.open()
else:
self.repo = hglib.open(self.working_dir)
def pull(self):
log.debug("Updating hg repo from hg_pillar module (pull)")
self.repo.pull()
def update(self, branch="default"):
"""
Ensure we are using the latest revision in the hg repository
"""
log.debug("Updating hg repo from hg_pillar module (pull)")
self.repo.pull()
log.debug("Updating hg repo from hg_pillar module (update)")
self.repo.update(branch, clean=True)
def close(self):
"""
Cleanup mercurial command server
"""
self.repo.close()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
|
# -*- coding: utf-8 -*-
'''
Copyright (c) 2021, MIT Interactive Robotics Group, PI Julie A. Shah.
Authors: Shen Li, Nadia Figueroa, Ankit Shah, Julie A. Shah
All rights reserved.
'''
import os
import hr_planning
import math
starting_seed = 0
reps = 30
n_files_to_split = 2
def main():
"""
Generate .sh files for running the benchmark
"""
run_ca = []
run_ca_si = []
run_greedy = []
for hmdp_name in ["hmdp1.yaml", "hmdp2.yaml", "hmdp3.yaml", "hmdp4.yaml", "hmdp5.yaml"]:
for mode in ["CA", "CA_SI"]:
# Start from the same seed for all modes
seed = starting_seed
s = "python3 run_mpc_iterations_experiments.py"
if mode == "greedy":
s = "python3 run_mpc_greedy_iterations_experiments.py"
for task in ["coll_avoid"]:
for pH_mode in ["pH_indep_pR", "pH_avoid_pR", "pH_move_to_pR"]:
for i in range(reps):
x = s
if mode != "greedy":
x += " --pR_mode=" + mode
x += " --task=" + task
x += " --pH_mode=" + pH_mode
x += " --seed=" + str(seed)
x += " --hmdp_name=" + hmdp_name
seed += 1
if mode == "greedy":
run_greedy.append(x)
elif mode == "CA":
run_ca.append(x)
elif mode == "CA_SI":
run_ca_si.append(x)
else:
raise ValueError()
path = os.path.abspath(hr_planning.__file__)
module_dir = os.path.split(path)[0]
split_and_write(module_dir=module_dir,
file_name="run_ca",
cmds=run_ca)
split_and_write(module_dir=module_dir,
file_name="run_ca_si",
cmds=run_ca_si)
if len(run_greedy) > 0:
split_and_write(module_dir=module_dir,
file_name="run_greedy",
cmds=run_greedy)
def split_and_write(module_dir, file_name, cmds):
# cmds = [1, 2, 3, 4, 5]
cmds_per_file = math.ceil(len(cmds) / n_files_to_split)
chunks = [cmds[x:x+cmds_per_file] for x in range(
0, len(cmds), cmds_per_file)]
assert len(chunks) == n_files_to_split
count = 0
for x in chunks:
count += len(x)
assert len(x) <= cmds_per_file
assert len(x) >= cmds_per_file - 1
assert count == len(cmds)
for i in range(len(chunks)):
for j in range(i + 1, len(chunks)):
tmp = set(chunks[i]).intersection(set(chunks[j]))
assert len(tmp) == 0
for i, chunk in enumerate(chunks):
tmp = file_name + "_" + str(i) + ".sh"
path = os.path.join(module_dir, tmp)
with open(path, 'w') as the_file:
the_file.write("#!/bin/sh\n")
for x in chunk:
the_file.write(x + "\n")
if __name__ == "__main__":
main()
|
import requests
from app_settings.db import settings_collection
from app_settings.models import Settings
from settings import MAILGUN_API_KEY
DEFAULT_EMAIL = (
"Block Tracker <postmaster@sandbox38920d794f42405e81f5689097f0cc19.mailgun.org>"
)
class MailGunClient:
url = "https://api.mailgun.net/v3/sandbox38920d794f42405e81f5689097f0cc19.mailgun.org/messages"
def send_email(
self,
to: str,
subject: str,
text: str,
from_=DEFAULT_EMAIL,
):
return requests.post(
self.url,
auth=("api", MAILGUN_API_KEY),
data={
"from": from_,
"to": to,
"subject": subject,
"text": text,
},
)
def send_email(user_id: str, subject: str, text: str):
settings = settings_collection.find_one({"userId": user_id})
if not settings:
return
settings = Settings.parse_obj(settings)
email = settings.email
if email and settings.allowNotifications:
client = MailGunClient()
client.send_email(to=email, subject=subject, text=text)
|
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
def polyfit(dates, levels, p):
x = matplotlib.dates.date2num(dates)
y = levels
# new edit below
p_coeff = np.polyfit(x-x[0], y, p)
poly = np.poly1d(p_coeff)
return poly, x[0]
|
import PySimpleGUI as sg
"""
PySimpleGUI The Complete Course
Lesson 7 - Multiple Windows
1-lvl nested window
"""
# Design pattern 1 - First window does not remain active
layout = [[ sg.Text('Window 1'),],
[sg.Input()],
[sg.Text('', size=(20,1), key='-OUTPUT-')],
[sg.Button('Launch 2')]]
window1 = sg.Window('Window 1', layout)
window2_active=False
while True:
event1, values1 = window1.read(timeout=100)
if event1 is None:
break
window1['-OUTPUT-'].update(values1[0])
if event1 == 'Launch 2' and not window2_active:
window2_active = True
window1.hide()
layout2 = [[sg.Text('Window 2')],
[sg.Button('Exit')]]
window2 = sg.Window('Window 2', layout2)
while True:
ev2, vals2 = window2.read()
if ev2 is None or ev2 == 'Exit':
window2.close()
window2_active = False
window1.un_hide()
break
window1.close()
|
#!/usr/bin/env python3
# encoding: utf-8
# @Date : 2017-07-24 00:00
# @Author : Bluethon (j5088794@gmail.com)
# @Link : http://github.com/bluethon
from rest_framework.routers import DefaultRouter
from . import views
router = DefaultRouter()
router.register(r'sprints', views.SprintViewSet)
router.register(r'tasks', views.TaskViewSet)
router.register(r'users', views.UserViewSet)
|
import torch.nn as nn
import torch
import torch.nn.functional as F
import numpy as np
class SoftmaxCrossEntropyLoss(nn.Module):
def __init__(self):
"""
:param num_negs: number of negative instances in bpr loss.
"""
super(SoftmaxCrossEntropyLoss, self).__init__()
def forward(self, y_pred, y_true):
"""
:param y_true: Labels
:param y_pred: Predicted result.
"""
probs = F.softmax(y_pred, dim=1)
hit_probs = probs[:, 0]
loss = -torch.log(hit_probs).mean()
return loss
|
import os
import re
import sys
# Read arguments
if len(sys.argv) != 2:
raise ValueError('Please provide a filename input')
filename = sys.argv[1]
# Read file
file_data = open(os.getcwd() + '/' + filename, 'r')
# Parse file
tiles = []
for line in file_data.readlines():
line = line.replace('\n', '')
line = re.sub(r'(e|w|se|sw|nw|ne)', r'\1|', line)
dirs = line.split('|')
dirs.remove('')
tiles.append(dirs)
# Get answer
pos = [{'e': 0, 'w': 0, 'se': 0, 'sw': 0, 'nw': 0, 'ne': 0} for i in range(len(tiles))]
for i, t in enumerate(tiles):
for m in t:
pos[i][m] += 1
pos_p = []
for p in pos:
# Remove unecessary moves
se = p['se'] - p['nw']
ne = p['ne'] - p['sw']
e = p['e'] - p['w']
# Transform to only two coords
x = e
y = se
x += ne
y -= ne
pos_p.append({
'x': x,
'y': y,
})
counts = {}
for p in pos_p:
t = f'{p["x"]}|{p["y"]}'
if t in counts:
counts[t] += 1
else:
counts[t] = 1
answer = 0
for c in counts:
if counts[c] % 2 != 0:
answer += 1
print(answer)
|
# Copyright 2019, Kay Hayen, mailto:kay.hayen@gmail.com
#
# Python tests originally created or extracted from other peoples work. The
# parts were too small to be protected.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
def tryWhileContinueFinallyTest():
print("Check if finally is executed in a continue using for loop:")
x = 0
while x < 10:
x += 1
try:
if x % 2 == 1:
continue
finally:
print(x, end=" ")
print("-", end=" ")
print()
def tryForContinueFinallyTest():
print("Check if finally is executed in a continue using for loop:")
for x in range(10):
try:
if x % 2 == 1:
continue
finally:
print(x, end=" ")
print("-", end=" ")
print()
def tryWhileBreakFinallyTest():
print("Check if finally is executed in a break using while loop:")
x = 0
while x < 10:
x += 1
try:
if x == 5:
break
finally:
print(x, end=" ")
print("-", end=" ")
print()
def tryForBreakFinallyTest():
print("Check if finally is executed in a break using for loop:")
for x in range(10):
try:
if x == 5:
break
finally:
print(x, end=" ")
print("-", end=" ")
print()
tryWhileContinueFinallyTest()
tryWhileBreakFinallyTest()
tryForContinueFinallyTest()
tryForBreakFinallyTest()
|
from __future__ import absolute_import
from pyspark.sql import SQLContext
from pyspark.mllib.regression import LabeledPoint
from ..utils.rdd_utils import from_labeled_point, to_labeled_point, lp_to_simple_rdd
from pyspark.mllib.linalg import Vector as MLLibVector, Vectors as MLLibVectors
def to_data_frame(sc, features, labels, categorical=False):
"""Convert numpy arrays of features and labels into Spark DataFrame
"""
lp_rdd = to_labeled_point(sc, features, labels, categorical)
sql_context = SQLContext(sc)
df = sql_context.createDataFrame(lp_rdd)
return df
def from_data_frame(df, categorical=False, nb_classes=None):
"""Convert DataFrame back to pair of numpy arrays
"""
lp_rdd = df.rdd.map(lambda row: LabeledPoint(row.label, row.features))
features, labels = from_labeled_point(lp_rdd, categorical, nb_classes)
return features, labels
def df_to_simple_rdd(df, categorical=False, nb_classes=None, features_col='features', label_col='label'):
"""Convert DataFrame into RDD of pairs
"""
sql_context = df.sql_ctx
sql_context.registerDataFrameAsTable(df, "temp_table")
selected_df = sql_context.sql(
"SELECT {0} AS features, {1} as label from temp_table".format(features_col, label_col))
if isinstance(selected_df.first().features, MLLibVector):
lp_rdd = selected_df.rdd.map(
lambda row: LabeledPoint(row.label, row.features))
else:
lp_rdd = selected_df.rdd.map(lambda row: LabeledPoint(
row.label, MLLibVectors.fromML(row.features)))
rdd = lp_to_simple_rdd(lp_rdd, categorical, nb_classes)
return rdd
|
# Copyright 2021 Northern.tech AS
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import pytest
import uuid
import os
from testutils.common import Tenant, User, update_tenant, new_tenant_client
from testutils.infra.cli import CliTenantadm
from testutils.infra.container_manager.kubernetes_manager import isK8S
from testutils.api.client import ApiClient
import testutils.api.deviceconnect as deviceconnect
import testutils.api.deviceconfig as deviceconfig
import testutils.api.auditlogs as auditlogs
import testutils.api.useradm as useradm
import testutils.api.tenantadm as tenantadm
import testutils.api.tenantadm_v2 as tenantadm_v2
import testutils.integration.stripe as stripeutils
from ..common_setup import (
enterprise_no_client_class,
standard_setup_one_client,
)
from .common_connect import wait_for_connect
from ..MenderAPI import (
auth,
devauth,
DeviceAuthV2,
Authentication,
get_container_manager,
)
class _TestAccessBase:
"""Access checking functions.
Probe a selected EP from every addon feature to see if it's enabled or not.
Other endpoints are spelled out in detail in acceptance and unit tests for
useradm/deviceauth access layers (assume they're restricted correctly as well).
"""
# troubleshoot
def check_access_remote_term(self, auth, devid, forbid=False):
devconn = ApiClient(
host=get_container_manager().get_mender_gateway(),
base_url=deviceconnect.URL_MGMT,
)
res = devconn.call(
"GET",
deviceconnect.URL_MGMT_DEVICE,
headers=auth.get_auth_token(),
path_params={"id": devid},
)
if forbid:
assert res.status_code == 403
else:
assert res.ok
def check_access_file_transfer(self, auth, devid, forbid=False):
devconn = ApiClient(
host=get_container_manager().get_mender_gateway(),
base_url=deviceconnect.URL_MGMT,
)
res = devconn.call(
"GET",
deviceconnect.URL_MGMT_FDOWNLOAD,
headers=auth.get_auth_token(),
path_params={"id": devid},
qs_params={"path": "/etc/mender/mender.conf"},
)
if forbid:
assert res.status_code == 403
else:
assert res.ok
res = devconn.call(
"PUT",
deviceconnect.URL_MGMT_FUPLOAD,
headers=auth.get_auth_token(),
path_params={"id": devid},
qs_params={"path": "/etc/mender/mender.conf"},
)
if forbid:
assert res.status_code == 403
else:
assert res.status_code != 403
def check_access_auditlogs(self, auth, forbid=False):
alogs = ApiClient(
host=get_container_manager().get_mender_gateway(),
base_url=auditlogs.URL_MGMT,
)
res = alogs.call("GET", auditlogs.URL_LOGS, headers=auth.get_auth_token(),)
if forbid:
assert res.status_code == 403
else:
assert res.ok
def check_access_sessionlogs(self, auth, forbid=False):
devconn = ApiClient(
host=get_container_manager().get_mender_gateway(),
base_url=deviceconnect.URL_MGMT,
)
res = devconn.call(
"GET",
deviceconnect.URL_MGMT_PLAYBACK,
headers=auth.get_auth_token(),
path_params={"session_id": "foo"},
)
if forbid:
assert res.status_code == 403
else:
assert res.status_code != 403
# configure
def check_access_deviceconfig(self, auth, devid, forbid=False):
devconf = ApiClient(
host=get_container_manager().get_mender_gateway(),
base_url=deviceconfig.URL_MGMT,
)
res = devconf.call(
"GET",
deviceconfig.URL_MGMT_DEVICE_CONFIGURATION,
headers=auth.get_auth_token(),
path_params={"id": devid},
)
if forbid:
assert res.status_code == 403
else:
assert res.ok
# rbac (no addon)
def check_access_rbac(self, auth, forbid=False):
uadm = ApiClient(
host=get_container_manager().get_mender_gateway(),
base_url=useradm.URL_MGMT,
)
res = uadm.call("GET", useradm.URL_ROLES, headers=auth.get_auth_token(),)
if forbid:
assert res.status_code == 403
else:
assert res.ok
class TestAccess(_TestAccessBase):
"""Onprem OS.
Quite a few addon features are available here (despite being
hidden behind paid addons in hosted).
"""
def test_ok(self, standard_setup_one_client):
devauth.accept_devices(1)
devices = list(
set([device["id"] for device in devauth.get_devices_status("accepted")])
)
assert 1 == len(devices)
wait_for_connect(auth, devices[0])
devid = devices[0]
auth.get_auth_token()
self.check_access_remote_term(auth, devid)
self.check_access_file_transfer(auth, devid)
self.check_access_sessionlogs(auth)
self.check_access_deviceconfig(auth, devid)
class TestAccessEnterprise(_TestAccessBase):
""" Full enterprise setup, with HAVE_ADDONS and MT."""
@pytest.fixture(scope="class")
def docker_env(self, enterprise_no_client_class):
"""Prepare 4 tenants across all plans (trial...enterprise) + a device each."""
env = enterprise_no_client_class
env.tenants = {}
for p in ["os", "professional", "enterprise"]:
env.tenants[p] = self._make_tenant(p, env)
env.tenants["trial"] = self._make_trial_tenant(env)
yield env
def test_initial_restrictions(self, docker_env):
""" Test that existing users are in fact under new addon restrictions, to incentivize addon upgrades. """
for plan in ["os", "professional", "enterprise"]:
tenant = docker_env.tenants[plan]
self.check_access_remote_term(tenant.auth, tenant.device_id, forbid=True)
self.check_access_file_transfer(tenant.auth, tenant.device_id, forbid=True)
self.check_access_deviceconfig(tenant.auth, tenant.device_id, forbid=True)
if plan == "enterprise":
self.check_access_rbac(tenant.auth)
self.check_access_auditlogs(tenant.auth, forbid=False)
self.check_access_sessionlogs(tenant.auth)
else:
self.check_access_auditlogs(tenant.auth, forbid=True)
self.check_access_sessionlogs(tenant.auth, forbid=True)
# self.check_access_rbac(tenant.auth, forbid=True)
for plan in ["trial"]:
tenant = docker_env.tenants[plan]
# to actually access any RT/FT - wait for device
wait_for_connect(tenant.auth, tenant.device_id)
self.check_access_remote_term(tenant.auth, tenant.device_id)
self.check_access_file_transfer(tenant.auth, tenant.device_id)
self.check_access_auditlogs(tenant.auth)
self.check_access_sessionlogs(tenant.auth)
self.check_access_deviceconfig(tenant.auth, tenant.device_id)
self.check_access_rbac(tenant.auth)
@pytest.mark.skipif(
not bool(os.environ.get("STRIPE_API_KEY")),
reason="STRIPE_API_KEY not provided",
)
def test_upgrades(self, docker_env):
"""Test that plan/addon upgrades take effect on feature availability.
Special case is the trial tenant upgrade to a paid plan.
"""
tenant = docker_env.tenants["os"]
# add troubleshoot
update_tenant(
tenant.id,
addons=["troubleshoot"],
container_manager=get_container_manager(),
)
tenant.auth.reset_auth_token()
wait_for_connect(tenant.auth, tenant.device_id)
self.check_access_remote_term(tenant.auth, tenant.device_id)
self.check_access_file_transfer(tenant.auth, tenant.device_id)
self.check_access_auditlogs(tenant.auth, forbid=True)
self.check_access_sessionlogs(tenant.auth, forbid=True)
self.check_access_deviceconfig(
tenant.auth, tenant.device_id, forbid=True,
)
# self.check_access_rbac(tenant.auth, forbid=True)
# add configure
update_tenant(
tenant.id,
addons=["troubleshoot", "configure"],
container_manager=get_container_manager(),
)
tenant.auth.reset_auth_token()
wait_for_connect(tenant.auth, tenant.device_id)
self.check_access_remote_term(tenant.auth, tenant.device_id)
self.check_access_file_transfer(tenant.auth, tenant.device_id)
self.check_access_deviceconfig(tenant.auth, tenant.device_id)
self.check_access_auditlogs(tenant.auth, forbid=True)
self.check_access_sessionlogs(tenant.auth, forbid=True)
# self.check_access_rbac(tenant.auth, forbid=True)
# upgrade to "enterprise" - makes audit, session logs and rbac available
update_tenant(
tenant.id, plan="enterprise", container_manager=get_container_manager(),
)
tenant.auth.reset_auth_token()
wait_for_connect(tenant.auth, tenant.device_id)
self.check_access_remote_term(tenant.auth, tenant.device_id)
self.check_access_file_transfer(tenant.auth, tenant.device_id)
self.check_access_deviceconfig(tenant.auth, tenant.device_id)
self.check_access_auditlogs(tenant.auth)
self.check_access_sessionlogs(tenant.auth)
self.check_access_rbac(tenant.auth)
# upgrade trial tenant - straight to enterprise
tenant = docker_env.tenants["trial"]
tadmm = ApiClient(
host=get_container_manager().get_mender_gateway(),
base_url=tenantadm_v2.URL_MGMT,
)
res = tadmm.call(
"POST",
tenantadm_v2.URL_TENANT_UPGRADE_START,
path_params={"id": tenant.id},
headers=tenant.auth.get_auth_token(),
)
assert res.status_code == 200
res = res.json()
stripeutils.confirm("pm_card_visa", res["intent_id"])
body = {
"plan": "enterprise",
}
res = tadmm.call(
"POST",
tenantadm_v2.URL_TENANT_UPGRADE_COMPLETE,
path_params={"id": tenant.id},
body=body,
headers=tenant.auth.get_auth_token(),
)
assert res.status_code == 202
update_tenant(
tenant.id,
addons=["troubleshoot", "configure"],
container_manager=get_container_manager(),
)
tenant.auth.reset_auth_token()
wait_for_connect(tenant.auth, tenant.device_id)
self.check_access_remote_term(tenant.auth, tenant.device_id)
self.check_access_file_transfer(tenant.auth, tenant.device_id)
self.check_access_deviceconfig(tenant.auth, tenant.device_id)
self.check_access_auditlogs(tenant.auth)
self.check_access_sessionlogs(tenant.auth)
self.check_access_rbac(tenant.auth)
def _make_tenant(self, plan, env):
uuidv4 = str(uuid.uuid4())
tname = "test.mender.io-{}-{}".format(uuidv4, plan)
email = "some.user+{}@example.com".format(uuidv4)
cli = CliTenantadm(containers_namespace=env.name)
tid = cli.create_org(tname, email, "correcthorse", plan)
tenant = cli.get_tenant(tid)
tenant = json.loads(tenant)
ttoken = tenant["tenant_token"]
# the cli now sets all addons to 'enabled' -
# disable them for initial 'all disabled' state
update_tenant(
tenant["id"], addons=[], container_manager=get_container_manager(),
)
auth = Authentication(name=tname, username=email, password="correcthorse")
auth.create_org = False
auth.reset_auth_token()
devauth = DeviceAuthV2(auth)
new_tenant_client(env, "test-container-{}".format(plan), ttoken)
devauth.accept_devices(1)
devices = list(
set([device["id"] for device in devauth.get_devices_status("accepted")])
)
assert 1 == len(devices)
tenant = Tenant(tname, tid, ttoken)
u = User("", email, "correcthorse")
tenant.users.append(u)
tenant.device_id = devices[0]
tenant.auth = auth
tenant.devauth = devauth
return tenant
def _make_trial_tenant(self, env):
uuidv4 = str(uuid.uuid4())
tname = "test.mender.io-{}-{}".format(uuidv4, "trial")
email = "some.user+{}@example.com".format(uuidv4)
tadmm = ApiClient(
host=get_container_manager().get_mender_gateway(),
base_url=tenantadm_v2.URL_MGMT,
)
args = {
"organization": tname,
"email": email,
"password": "correcthorse",
"name": "foo",
"g-recaptcha-response": "dummy",
"plan": "enterprise",
}
res = tadmm.call("POST", tenantadm_v2.URL_CREATE_ORG_TRIAL, body=args,)
assert res.status_code == 202
# get tenant id
tenantadm_host = (
tenantadm.HOST
if isK8S()
else get_container_manager().get_ip_of_service("mender-tenantadm")[0]
+ ":8080"
)
tadmi = ApiClient(
host=tenantadm_host, base_url=tenantadm.URL_INTERNAL, schema="http://",
)
res = tadmi.call(
"GET", tenantadm.URL_INTERNAL_TENANTS, qs_params={"username": email}
)
assert res.status_code == 200
assert len(res.json()) == 1
apitenant = res.json()[0]
cli = CliTenantadm(containers_namespace=env.name)
tenant = cli.get_tenant(apitenant["id"])
tenant = json.loads(tenant)
ttoken = tenant["tenant_token"]
auth = Authentication(name=tname, username=email, password="correcthorse")
auth.create_org = False
auth.reset_auth_token()
devauth = DeviceAuthV2(auth)
new_tenant_client(env, "test-container-trial", ttoken)
devauth.accept_devices(1)
devices = list(
set([device["id"] for device in devauth.get_devices_status("accepted")])
)
assert 1 == len(devices)
tenant = Tenant(tname, apitenant["id"], ttoken)
u = User("", email, "correcthorse")
tenant.users.append(u)
tenant.device_id = devices[0]
tenant.auth = auth
tenant.devauth = devauth
return tenant
|
from django.contrib import admin
from .models import Agent, Category, Lead, User, UserProfile
admin.site.register(Agent)
admin.site.register(Lead)
admin.site.register(User)
admin.site.register(UserProfile)
admin.site.register(Category)
|
from hsmclient.tests.unit import utils
from hsmclient.tests.unit.v1 import fakes
from hsmclient.v1 import rbds
cs = fakes.FakeClient()
class RbdsTest(utils.TestCase):
def test_list(self):
rl = cs.rbds.list()
cs.assert_called('GET', '/rbds')
for r in rl:
self.assertIsInstance(r, rbds.Rbd)
def test_get(self):
rbd_id = 1234
cs.rbds.get(rbd_id)
cs.assert_called('GET', '/rbds/%s' % rbd_id)
def test_refresh(self):
cs.rbds.refresh()
cs.assert_called('POST', '/rbds/refresh')
|
"""Download files from the specified ftp server."""
# standard
import datetime
import functools
from os import path
# third party
import paramiko
def print_callback(filename, bytes_so_far, bytes_total):
"""Log file transfer progress."""
rough_percent_transferred = int(100 * (bytes_so_far / bytes_total))
if (rough_percent_transferred % 25) == 0:
print(f'{filename} transfer: {rough_percent_transferred}%')
def get_files_from_dir(sftp, filedate, out_path):
"""Download files from sftp server tagged with the specified day.
Args:
sftp: SFTP Session from Paramiko client
filedate: YYYYmmdd string for which the files are named
out_path: Path to local directory into which to download the files
"""
# go through files in recieving dir
filepaths_to_download = {}
for fileattr in sftp.listdir_attr():
filename = fileattr.filename
if fileattr.filename.startswith(filedate) and \
not path.exists(path.join(out_path, filename)):
filepaths_to_download[filename] = path.join(out_path, filename)
# make sure we don't download more than 6 files per day
assert len(filepaths_to_download) <= 6, "more files dropped than expected"
# download!
for infile, outfile in filepaths_to_download.items():
callback_for_filename = functools.partial(print_callback, infile)
sftp.get(infile, outfile, callback=callback_for_filename)
def download_covid(filedate, out_path, ftp_conn):
"""Download files necessary to create chng-covid signal from ftp server.
Args:
filedate: YYYYmmdd string for which the files are named
out_path: Path to local directory into which to download the files
ftp_conn: Dict containing login credentials to ftp server
"""
# open client
try:
client = paramiko.SSHClient()
client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
client.connect(ftp_conn["host"], username=ftp_conn["user"],
password=ftp_conn["pass"],
port=ftp_conn["port"],
allow_agent=False, look_for_keys=False)
sftp = client.open_sftp()
sftp.chdir('/countproducts')
get_files_from_dir(sftp, filedate, out_path)
finally:
if client:
client.close()
def download_cli(filedate, out_path, ftp_conn):
"""Download files necessary to create chng-cli signal from ftp server.
Args:
filedate: YYYYmmdd string for which the files are named
out_path: Path to local directory into which to download the files
ftp_conn: Dict containing login credentials to ftp server
"""
# open client
try:
client = paramiko.SSHClient()
client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
client.connect(ftp_conn["host"], username=ftp_conn["user"],
password=ftp_conn["pass"],
port=ftp_conn["port"],
allow_agent=False, look_for_keys=False)
sftp = client.open_sftp()
sftp.chdir('/countproducts')
get_files_from_dir(sftp, filedate, out_path)
finally:
if client:
client.close()
|
class Vitals:
LEARNING_RATE = 0.1
MOMENTUM_RATE = 0.8
FIRST_LAYER = 5 * 7
SECOND_LAYER = 14
OUTPUT_LAYER = 3
|
c=input("enter city name:").strip()
if c=='Gujarat':
print('namaste')
elif c=='Hydrabad':
print('vannakam')
else:
print('city not availabel')
|
# Copyright 2022 The AI Flow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import os
import unittest
import notification_service.settings
from sqlalchemy import create_engine
from notification_service.cli import cli_parser
from notification_service.cli.commands import db_command
from notification_service.settings import get_configuration
from notification_service.util import db
class TestCliDb(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.parser = cli_parser.get_parser()
notification_service.settings.NOTIFICATION_HOME = os.path.join(os.path.dirname(__file__), '../../')
cls.config = get_configuration()
def _remove_db_file(self):
if os.path.exists('ns.db'):
os.remove('ns.db')
def setUp(self) -> None:
self._remove_db_file()
db.SQL_ALCHEMY_CONN = self.config.db_uri
def tearDown(self) -> None:
self._remove_db_file()
db.clear_engine_and_session()
def test_cli_db_init(self):
db_command.init(self.parser.parse_args(['db', 'init']))
engine = create_engine(self.config.db_uri)
self.assertTrue('event_model' in engine.table_names())
self.assertTrue('member_model' in engine.table_names())
self.assertTrue('notification_client' in engine.table_names())
def test_cli_db_reset(self):
db_command.init(self.parser.parse_args(['db', 'init']))
db.prepare_db()
with db.create_session() as session:
client = db.ClientModel()
client.namespace = 'a'
client.sender = 'a'
client.create_time = 1
session.add(client)
session.commit()
client_res = session.query(db.ClientModel).all()
self.assertEqual(1, len(client_res))
db_command.reset(self.parser.parse_args(['db', 'reset', '-y']))
client_res = session.query(db.ClientModel).all()
self.assertEqual(0, len(client_res))
def test_cli_db_upgrade(self):
db_command.upgrade(self.parser.parse_args(['db', 'upgrade', '--version', '87cb292bcc31']))
engine = create_engine(self.config.db_uri)
self.assertTrue('event_model' in engine.table_names())
self.assertTrue('notification_client' in engine.table_names())
self.assertFalse('member_model' in engine.table_names())
def test_cli_db_downgrade(self):
db_command.init(self.parser.parse_args(['db', 'init']))
db_command.downgrade(self.parser.parse_args(['db', 'downgrade', '--version', '87cb292bcc31']))
engine = create_engine(self.config.db_uri)
self.assertTrue('event_model' in engine.table_names())
self.assertTrue('notification_client' in engine.table_names())
self.assertFalse('member_model' in engine.table_names())
if __name__ == '__main__':
unittest.main()
|
from collections import defaultdict
import json
from pandas.core import frame
import torch
import pandas as pd
import os
import pickle as pkl
import numpy as np
import cv2
import h5py
import tqdm
import functools
import lmdb
class EGTEA_GAZE_DATASET(torch.utils.data.Dataset):
def __init__(self, logger, config, root = None):
super().__init__()
self.root = './data/EG+'
self.name = config.name
self.split = config.split
self.config = config
self.model_fps = config.fps
self.tau_a = config.tau_a
self.feature = config.feature
self.feature_fps = config.feature_fps
self.feature_dim = config.feature_dim
assert config.name == 'EGTEA_GAZE+'
self.class_info = pd.read_csv(os.path.join(self.root,'actions.csv'), names=['action_class','verb_noun_class','text'])
self.num_action = self.class_info.shape[0]
self.vn2action = []
for _, a in self.class_info.iterrows():
v,n = list(map(int,a.verb_noun_class.split('_')))
self.vn2action.append([v,n])
self.num_verb = len(set([a[0] for a in self.vn2action]))
self.num_noun = len(set([a[1] for a in self.vn2action]))
annotation_file = {
'train1':'training1.csv',
'train2':'training2.csv',
'train3':'training3.csv',
'valid1':'validation1.csv',
'valid2':'validation2.csv',
'valid3':'validation3.csv',
}[config.split]
annotation_file = os.path.join(self.root,annotation_file)
assert config.past_frame > 0
self.data = []
info = pd.read_csv(annotation_file, header=None, names=['video','start','end','verb','noun','action'])
for idx,a in info.iterrows():
video_name = a.video
start_frame = a.start
end_frame = a.end
aid = a.action
vid = a.verb
nid = a.noun
segment = {
'id' : idx,
'video_id' : video_name,
'next_verb_class' : vid,
'next_noun_class' : nid,
'next_action_class' : aid,
}
if config.drop and start_frame<=self.tau_a * self.feature_fps:
continue
frame_index = np.arange(
start_frame - self.tau_a * self.feature_fps + config.forward_frame * self.feature_fps / self.model_fps,
start_frame - self.tau_a * self.feature_fps - config.past_frame * self.feature_fps / self.model_fps,
- self.feature_fps / self.model_fps
).astype(int)[::-1]
assert len(frame_index) == config.past_frame + config.forward_frame
frame_index[frame_index<1] = 1
segment['frame_index'] = frame_index
self.data.append(segment)
# debug
# break
self.verb_weight, self.noun_weight, self.action_weight = None, None, None
##### feature
assert config.feat_file
self.f = lmdb.open(config.feat_file, readonly=True, lock=False)
logger.info('[%s] # Frame: Past %d. Forward %d.' % (
config.split, config.past_frame,config.forward_frame))
logger.info('[%s] # segment %d. verb %d. noun %d. action %d.' % (
config.split, len(self.data), self.num_verb, self.num_noun, self.num_action))
self.cache = {}
if config.cache:
self.make_cache(logger)
def make_cache(self,logger):
logger.info('Cache: Load all feature into memory')
for segment in self.data:
for fid in segment['frame_index']:
key = '%s_frame_%010d.jpg' % (segment['video_id'],fid)
if key not in self.cache:
res = self._read_one_frame_feat(key)
self.cache[key] = res
logger.info('Cache: Finish loading. Cache Size %d' % len(self.cache))
def _read_one_frame_feat(self,key):
if key in self.cache:
return self.cache[key]
with self.f.begin() as e:
buf = e.get(key.strip().encode('utf-8'))
if buf is not None:
res = np.frombuffer(buf,'float32')
else:
res = None
return res
def _load_feat(self,video_id, frame_ids):
frames = []
dim = self.feature_dim
for fid in frame_ids:
key = '%s_frame_%010d.jpg' % (video_id,fid)
frame_feat = self._read_one_frame_feat(key)
if frame_feat is not None:
frames.append(frame_feat)
elif len(frames) > 0:
frames.append(frames[-1])
# print('Copy frame: %s' % key)
else:
frames.append(np.zeros(dim))
# print('Zero frame: %s' % key)
return torch.from_numpy(np.stack(frames,0)).float()
def __len__(self):
return len(self.data)
def __getitem__(self,i):
segment = self.data[i]
out = {
'id' : segment['id'],
'index' : i
}
out['next_action_class'] = segment['next_action_class']
out['next_verb_class'] = segment['next_verb_class']
out['next_noun_class'] = segment['next_noun_class']
out['past_frame'] = self._load_feat(
segment['video_id'],
segment['frame_index'],
)
return out
|
#
# Secret Labs' Regular Expression Engine
#
# convert re-style regular expression to sre pattern
#
# Copyright (c) 1998-2001 by Secret Labs AB. All rights reserved.
#
# See the sre.py file for information on usage and redistribution.
#
"""Internal support module for sre"""
# XXX: show string offset and offending character for all errors
from sre_constants import *
import sys
SPECIAL_CHARS = ".\\[{()*+?^$|"
REPEAT_CHARS = "*+?{"
DIGITS = frozenset("0123456789")
OCTDIGITS = frozenset("01234567")
HEXDIGITS = frozenset("0123456789abcdefABCDEF")
ASCIILETTERS = frozenset("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ")
WHITESPACE = frozenset(" \t\n\r\v\f")
_REPEATCODES = frozenset({MIN_REPEAT, MAX_REPEAT})
_UNITCODES = frozenset({ANY, RANGE, IN, LITERAL, NOT_LITERAL, CATEGORY})
ESCAPES = {
r"\a": (LITERAL, ord("\a")),
r"\b": (LITERAL, ord("\b")),
r"\f": (LITERAL, ord("\f")),
r"\n": (LITERAL, ord("\n")),
r"\r": (LITERAL, ord("\r")),
r"\t": (LITERAL, ord("\t")),
r"\v": (LITERAL, ord("\v")),
r"\\": (LITERAL, ord("\\"))
}
CATEGORIES = {
r"\A": (AT, AT_BEGINNING_STRING), # start of string
r"\b": (AT, AT_BOUNDARY),
r"\B": (AT, AT_NON_BOUNDARY),
r"\d": (IN, [(CATEGORY, CATEGORY_DIGIT)]),
r"\D": (IN, [(CATEGORY, CATEGORY_NOT_DIGIT)]),
r"\s": (IN, [(CATEGORY, CATEGORY_SPACE)]),
r"\S": (IN, [(CATEGORY, CATEGORY_NOT_SPACE)]),
r"\w": (IN, [(CATEGORY, CATEGORY_WORD)]),
r"\W": (IN, [(CATEGORY, CATEGORY_NOT_WORD)]),
r"\Z": (AT, AT_END_STRING), # end of string
}
FLAGS = {
# standard flags
"i": SRE_FLAG_IGNORECASE,
"L": SRE_FLAG_LOCALE,
"m": SRE_FLAG_MULTILINE,
"s": SRE_FLAG_DOTALL,
"x": SRE_FLAG_VERBOSE,
# extensions
"a": SRE_FLAG_ASCII,
"t": SRE_FLAG_TEMPLATE,
"u": SRE_FLAG_UNICODE,
}
GLOBAL_FLAGS = (SRE_FLAG_ASCII | SRE_FLAG_LOCALE | SRE_FLAG_UNICODE |
SRE_FLAG_DEBUG | SRE_FLAG_TEMPLATE)
class Verbose(Exception):
pass
class Pattern:
# master pattern object. keeps track of global attributes
def __init__(self):
self.flags = 0
self.groupdict = {}
self.groupwidths = [None] # group 0
self.lookbehindgroups = None
@property
def groups(self):
return len(self.groupwidths)
def opengroup(self, name=None):
gid = self.groups
self.groupwidths.append(None)
if self.groups > MAXGROUPS:
raise error("too many groups")
if name is not None:
ogid = self.groupdict.get(name, None)
if ogid is not None:
raise error("redefinition of group name %r as group %d; "
"was group %d" % (name, gid, ogid))
self.groupdict[name] = gid
return gid
def closegroup(self, gid, p):
self.groupwidths[gid] = p.getwidth()
def checkgroup(self, gid):
return gid < self.groups and self.groupwidths[gid] is not None
def checklookbehindgroup(self, gid, source):
if self.lookbehindgroups is not None:
if not self.checkgroup(gid):
raise source.error('cannot refer to an open group')
if gid >= self.lookbehindgroups:
raise source.error('cannot refer to group defined in the same '
'lookbehind subpattern')
class SubPattern:
# a subpattern, in intermediate form
def __init__(self, pattern, data=None):
self.pattern = pattern
if data is None:
data = []
self.data = data
self.width = None
def dump(self, level=0):
nl = True
seqtypes = (tuple, list)
for op, av in self.data:
print level*" " + str(op),; sys.stdout.write('')
if op is IN:
# member sublanguage
print
for op, a in av:
print (level+1)*" " + str(op), a
elif op is BRANCH:
print
for i, a in enumerate(av[1]):
if i:
print level*" " + "OR"
a.dump(level+1)
elif op is GROUPREF_EXISTS:
condgroup, item_yes, item_no = av
print '', condgroup
item_yes.dump(level+1)
if item_no:
print level*" " + "ELSE"
item_no.dump(level+1)
elif isinstance(av, seqtypes):
nl = False
for a in av:
if isinstance(a, SubPattern):
if not nl:
print
a.dump(level+1)
nl = True
else:
if not nl:
print ' ',; sys.stdout.write('')
print a,; sys.stdout.write('')
nl = False
if not nl:
print
else:
print '', av
def __repr__(self):
return repr(self.data)
def __len__(self):
return len(self.data)
def __delitem__(self, index):
del self.data[index]
def __getitem__(self, index):
if isinstance(index, slice):
return SubPattern(self.pattern, self.data[index])
return self.data[index]
def __setitem__(self, index, code):
self.data[index] = code
def insert(self, index, code):
self.data.insert(index, code)
def append(self, code):
self.data.append(code)
def getwidth(self):
# determine the width (min, max) for this subpattern
if self.width is not None:
return self.width
lo = hi = 0
for op, av in self.data:
if op is BRANCH:
i = MAXREPEAT - 1
j = 0
for av in av[1]:
l, h = av.getwidth()
i = min(i, l)
j = max(j, h)
lo = lo + i
hi = hi + j
elif op is CALL:
i, j = av.getwidth()
lo = lo + i
hi = hi + j
elif op is SUBPATTERN:
i, j = av[-1].getwidth()
lo = lo + i
hi = hi + j
elif op in _REPEATCODES:
i, j = av[2].getwidth()
lo = lo + i * av[0]
hi = hi + j * av[1]
elif op in _UNITCODES:
lo = lo + 1
hi = hi + 1
elif op is GROUPREF:
i, j = self.pattern.groupwidths[av]
lo = lo + i
hi = hi + j
elif op is GROUPREF_EXISTS:
i, j = av[1].getwidth()
if av[2] is not None:
l, h = av[2].getwidth()
i = min(i, l)
j = max(j, h)
else:
i = 0
lo = lo + i
hi = hi + j
elif op is SUCCESS:
break
self.width = min(lo, MAXREPEAT - 1), min(hi, MAXREPEAT)
return self.width
class Tokenizer:
def __init__(self, string):
self.istext = isinstance(string, str)
self.string = string
if not self.istext:
string = str(string, 'latin1')
self.decoded_string = string
self.index = 0
self.next = None
self.__next()
def __next(self):
index = self.index
try:
char = self.decoded_string[index]
except IndexError:
self.next = None
return
if char == "\\":
index += 1
try:
char += self.decoded_string[index]
except IndexError:
raise error("bad escape (end of pattern)",
self.string, len(self.string) - 1) from None
self.index = index + 1
self.next = char
def match(self, char):
if char == self.next:
self.__next()
return True
return False
def get(self):
this = self.next
self.__next()
return this
def getwhile(self, n, charset):
result = ''
for _ in range(n):
c = self.next
if c not in charset:
break
result += c
self.__next()
return result
def getuntil(self, terminator):
result = ''
while True:
c = self.next
self.__next()
if c is None:
if not result:
raise self.error("missing group name")
raise self.error("missing %s, unterminated name" % terminator,
len(result))
if c == terminator:
if not result:
raise self.error("missing group name", 1)
break
result += c
return result
@property
def pos(self):
return self.index - len(self.next or '')
def tell(self):
return self.index - len(self.next or '')
def seek(self, index):
self.index = index
self.__next()
def error(self, msg, offset=0):
return error(msg, self.string, self.tell() - offset)
def _class_escape(source, escape):
# handle escape code inside character class
code = ESCAPES.get(escape)
if code:
return code
code = CATEGORIES.get(escape)
if code and code[0] is IN:
return code
try:
c = escape[1:2]
if c == "x":
# hexadecimal escape (exactly two digits)
escape += source.getwhile(2, HEXDIGITS)
if len(escape) != 4:
raise source.error("incomplete escape %s" % escape, len(escape))
return LITERAL, int(escape[2:], 16)
elif c == "u" and source.istext:
# unicode escape (exactly four digits)
escape += source.getwhile(4, HEXDIGITS)
if len(escape) != 6:
raise source.error("incomplete escape %s" % escape, len(escape))
return LITERAL, int(escape[2:], 16)
elif c == "U" and source.istext:
# unicode escape (exactly eight digits)
escape += source.getwhile(8, HEXDIGITS)
if len(escape) != 10:
raise source.error("incomplete escape %s" % escape, len(escape))
c = int(escape[2:], 16)
chr(c) # raise ValueError for invalid code
return LITERAL, c
elif c in OCTDIGITS:
# octal escape (up to three digits)
escape += source.getwhile(2, OCTDIGITS)
c = int(escape[1:], 8)
if c > 0o377:
raise source.error('octal escape value %s outside of '
'range 0-0o377' % escape, len(escape))
return LITERAL, c
elif c in DIGITS:
raise ValueError
if len(escape) == 2:
if c in ASCIILETTERS:
raise source.error('bad escape %s' % escape, len(escape))
return LITERAL, ord(escape[1])
except ValueError:
pass
raise source.error("bad escape %s" % escape, len(escape))
def _escape(source, escape, state):
# handle escape code in expression
code = CATEGORIES.get(escape)
if code:
return code
code = ESCAPES.get(escape)
if code:
return code
try:
c = escape[1:2]
if c == "x":
# hexadecimal escape
escape += source.getwhile(2, HEXDIGITS)
if len(escape) != 4:
raise source.error("incomplete escape %s" % escape, len(escape))
return LITERAL, int(escape[2:], 16)
elif c == "u" and source.istext:
# unicode escape (exactly four digits)
escape += source.getwhile(4, HEXDIGITS)
if len(escape) != 6:
raise source.error("incomplete escape %s" % escape, len(escape))
return LITERAL, int(escape[2:], 16)
elif c == "U" and source.istext:
# unicode escape (exactly eight digits)
escape += source.getwhile(8, HEXDIGITS)
if len(escape) != 10:
raise source.error("incomplete escape %s" % escape, len(escape))
c = int(escape[2:], 16)
chr(c) # raise ValueError for invalid code
return LITERAL, c
elif c == "0":
# octal escape
escape += source.getwhile(2, OCTDIGITS)
return LITERAL, int(escape[1:], 8)
elif c in DIGITS:
# octal escape *or* decimal group reference (sigh)
if source.next in DIGITS:
escape += source.get()
if (escape[1] in OCTDIGITS and escape[2] in OCTDIGITS and
source.next in OCTDIGITS):
# got three octal digits; this is an octal escape
escape += source.get()
c = int(escape[1:], 8)
if c > 0o377:
raise source.error('octal escape value %s outside of '
'range 0-0o377' % escape,
len(escape))
return LITERAL, c
# not an octal escape, so this is a group reference
group = int(escape[1:])
if group < state.groups:
if not state.checkgroup(group):
raise source.error("cannot refer to an open group",
len(escape))
state.checklookbehindgroup(group, source)
return GROUPREF, group
raise source.error("invalid group reference %d" % group, len(escape) - 1)
if len(escape) == 2:
if c in ASCIILETTERS:
raise source.error("bad escape %s" % escape, len(escape))
return LITERAL, ord(escape[1])
except ValueError:
pass
raise source.error("bad escape %s" % escape, len(escape))
def _parse_sub(source, state, verbose, nested):
# parse an alternation: a|b|c
items = []
itemsappend = items.append
sourcematch = source.match
start = source.tell()
while True:
itemsappend(_parse(source, state, verbose, nested + 1,
not nested and not items))
if not sourcematch("|"):
break
if len(items) == 1:
return items[0]
subpattern = SubPattern(state)
subpatternappend = subpattern.append
# check if all items share a common prefix
while True:
prefix = None
for item in items:
if not item:
break
if prefix is None:
prefix = item[0]
elif item[0] != prefix:
break
else:
# all subitems start with a common "prefix".
# move it out of the branch
for item in items:
del item[0]
subpatternappend(prefix)
continue # check next one
break
# check if the branch can be replaced by a character set
for item in items:
if len(item) != 1 or item[0][0] is not LITERAL:
break
else:
# we can store this as a character set instead of a
# branch (the compiler may optimize this even more)
subpatternappend((IN, [item[0] for item in items]))
return subpattern
subpattern.append((BRANCH, (None, items)))
return subpattern
def _parse_sub_cond(source, state, condgroup, verbose, nested):
item_yes = _parse(source, state, verbose, nested + 1)
if source.match("|"):
item_no = _parse(source, state, verbose, nested + 1)
if source.next == "|":
raise source.error("conditional backref with more than two branches")
else:
item_no = None
subpattern = SubPattern(state)
subpattern.append((GROUPREF_EXISTS, (condgroup, item_yes, item_no)))
return subpattern
def _parse(source, state, verbose, nested, first=False):
# parse a simple pattern
subpattern = SubPattern(state)
# precompute constants into local variables
subpatternappend = subpattern.append
sourceget = source.get
sourcematch = source.match
_len = len
_ord = ord
while True:
this = source.next
if this is None:
break # end of pattern
if this in "|)":
break # end of subpattern
sourceget()
if verbose:
# skip whitespace and comments
if this in WHITESPACE:
continue
if this == "#":
while True:
this = sourceget()
if this is None or this == "\n":
break
continue
if this[0] == "\\":
code = _escape(source, this, state)
subpatternappend(code)
elif this not in SPECIAL_CHARS:
subpatternappend((LITERAL, _ord(this)))
elif this == "[":
here = source.tell() - 1
# character set
set = []
setappend = set.append
## if sourcematch(":"):
## pass # handle character classes
if sourcematch("^"):
setappend((NEGATE, None))
# check remaining characters
start = set[:]
while True:
this = sourceget()
if this is None:
raise source.error("unterminated character set",
source.tell() - here)
if this == "]" and set != start:
break
elif this[0] == "\\":
code1 = _class_escape(source, this)
else:
code1 = LITERAL, _ord(this)
if sourcematch("-"):
# potential range
that = sourceget()
if that is None:
raise source.error("unterminated character set",
source.tell() - here)
if that == "]":
if code1[0] is IN:
code1 = code1[1][0]
setappend(code1)
setappend((LITERAL, _ord("-")))
break
if that[0] == "\\":
code2 = _class_escape(source, that)
else:
code2 = LITERAL, _ord(that)
if code1[0] != LITERAL or code2[0] != LITERAL:
msg = "bad character range %s-%s" % (this, that)
raise source.error(msg, len(this) + 1 + len(that))
lo = code1[1]
hi = code2[1]
if hi < lo:
msg = "bad character range %s-%s" % (this, that)
raise source.error(msg, len(this) + 1 + len(that))
setappend((RANGE, (lo, hi)))
else:
if code1[0] is IN:
code1 = code1[1][0]
setappend(code1)
# XXX: <fl> should move set optimization to compiler!
if _len(set)==1 and set[0][0] is LITERAL:
subpatternappend(set[0]) # optimization
elif _len(set)==2 and set[0][0] is NEGATE and set[1][0] is LITERAL:
subpatternappend((NOT_LITERAL, set[1][1])) # optimization
else:
# XXX: <fl> should add charmap optimization here
subpatternappend((IN, set))
elif this in REPEAT_CHARS:
# repeat previous item
here = source.tell()
if this == "?":
min, max = 0, 1
elif this == "*":
min, max = 0, MAXREPEAT
elif this == "+":
min, max = 1, MAXREPEAT
elif this == "{":
if source.next == "}":
subpatternappend((LITERAL, _ord(this)))
continue
min, max = 0, MAXREPEAT
lo = hi = ""
while source.next in DIGITS:
lo += sourceget()
if sourcematch(","):
while source.next in DIGITS:
hi += sourceget()
else:
hi = lo
if not sourcematch("}"):
subpatternappend((LITERAL, _ord(this)))
source.seek(here)
continue
if lo:
min = int(lo)
if min >= MAXREPEAT:
raise OverflowError("the repetition number is too large")
if hi:
max = int(hi)
if max >= MAXREPEAT:
raise OverflowError("the repetition number is too large")
if max < min:
raise source.error("min repeat greater than max repeat",
source.tell() - here)
else:
raise AssertionError("unsupported quantifier %r" % (char,))
# figure out which item to repeat
if subpattern:
item = subpattern[-1:]
else:
item = None
if not item or (_len(item) == 1 and item[0][0] is AT):
raise source.error("nothing to repeat",
source.tell() - here + len(this))
if item[0][0] in _REPEATCODES:
raise source.error("multiple repeat",
source.tell() - here + len(this))
if sourcematch("?"):
subpattern[-1] = (MIN_REPEAT, (min, max, item))
else:
subpattern[-1] = (MAX_REPEAT, (min, max, item))
elif this == ".":
subpatternappend((ANY, None))
elif this == "(":
start = source.tell() - 1
group = True
name = None
condgroup = None
add_flags = 0
del_flags = 0
if sourcematch("?"):
# options
char = sourceget()
if char is None:
raise source.error("unexpected end of pattern")
if char == "P":
# python extensions
if sourcematch("<"):
# named group: skip forward to end of name
name = source.getuntil(">")
if not name.isidentifier():
msg = "bad character in group name %r" % name
raise source.error(msg, len(name) + 1)
elif sourcematch("="):
# named backreference
name = source.getuntil(")")
if not name.isidentifier():
msg = "bad character in group name %r" % name
raise source.error(msg, len(name) + 1)
gid = state.groupdict.get(name)
if gid is None:
msg = "unknown group name %r" % name
raise source.error(msg, len(name) + 1)
if not state.checkgroup(gid):
raise source.error("cannot refer to an open group",
len(name) + 1)
state.checklookbehindgroup(gid, source)
subpatternappend((GROUPREF, gid))
continue
else:
char = sourceget()
if char is None:
raise source.error("unexpected end of pattern")
raise source.error("unknown extension ?P" + char,
len(char) + 2)
elif char == ":":
# non-capturing group
group = None
elif char == "#":
# comment
while True:
if source.next is None:
raise source.error("missing ), unterminated comment",
source.tell() - start)
if sourceget() == ")":
break
continue
elif char in "=!<":
# lookahead assertions
dir = 1
if char == "<":
char = sourceget()
if char is None:
raise source.error("unexpected end of pattern")
if char not in "=!":
raise source.error("unknown extension ?<" + char,
len(char) + 2)
dir = -1 # lookbehind
lookbehindgroups = state.lookbehindgroups
if lookbehindgroups is None:
state.lookbehindgroups = state.groups
p = _parse_sub(source, state, verbose, nested + 1)
if dir < 0:
if lookbehindgroups is None:
state.lookbehindgroups = None
if not sourcematch(")"):
raise source.error("missing ), unterminated subpattern",
source.tell() - start)
if char == "=":
subpatternappend((ASSERT, (dir, p)))
else:
subpatternappend((ASSERT_NOT, (dir, p)))
continue
elif char == "(":
# conditional backreference group
condname = source.getuntil(")")
group = None
if condname.isidentifier():
condgroup = state.groupdict.get(condname)
if condgroup is None:
msg = "unknown group name %r" % condname
raise source.error(msg, len(condname) + 1)
else:
try:
condgroup = int(condname)
if condgroup < 0:
raise ValueError
except ValueError:
msg = "bad character in group name %r" % condname
raise source.error(msg, len(condname) + 1) from None
if not condgroup:
raise source.error("bad group number",
len(condname) + 1)
if condgroup >= MAXGROUPS:
msg = "invalid group reference %d" % condgroup
raise source.error(msg, len(condname) + 1)
state.checklookbehindgroup(condgroup, source)
elif char in FLAGS or char == "-":
# flags
flags = _parse_flags(source, state, char)
if flags is None: # global flags
if not first or subpattern:
import warnings
warnings.warn(
'Flags not at the start of the expression %r%s' % (
source.string[:20], # truncate long regexes
' (truncated)' if len(source.string) > 20 else '',
),
DeprecationWarning, stacklevel=nested + 6
)
if (state.flags & SRE_FLAG_VERBOSE) and not verbose:
raise Verbose
continue
add_flags, del_flags = flags
group = None
else:
raise source.error("unknown extension ?" + char,
len(char) + 1)
# parse group contents
if group is not None:
try:
group = state.opengroup(name)
except error as err:
raise source.error(err.msg, len(name) + 1) from None
if condgroup:
p = _parse_sub_cond(source, state, condgroup, verbose, nested + 1)
else:
sub_verbose = ((verbose or (add_flags & SRE_FLAG_VERBOSE)) and
not (del_flags & SRE_FLAG_VERBOSE))
p = _parse_sub(source, state, sub_verbose, nested + 1)
if not source.match(")"):
raise source.error("missing ), unterminated subpattern",
source.tell() - start)
if group is not None:
state.closegroup(group, p)
subpatternappend((SUBPATTERN, (group, add_flags, del_flags, p)))
elif this == "^":
subpatternappend((AT, AT_BEGINNING))
elif this == "$":
subpattern.append((AT, AT_END))
else:
raise AssertionError("unsupported special character %r" % (char,))
return subpattern
def _parse_flags(source, state, char):
sourceget = source.get
add_flags = 0
del_flags = 0
if char != "-":
while True:
add_flags |= FLAGS[char]
char = sourceget()
if char is None:
raise source.error("missing -, : or )")
if char in ")-:":
break
if char not in FLAGS:
msg = "unknown flag" if char.isalpha() else "missing -, : or )"
raise source.error(msg, len(char))
if char == ")":
state.flags |= add_flags
return None
if add_flags & GLOBAL_FLAGS:
raise source.error("bad inline flags: cannot turn on global flag", 1)
if char == "-":
char = sourceget()
if char is None:
raise source.error("missing flag")
if char not in FLAGS:
msg = "unknown flag" if char.isalpha() else "missing flag"
raise source.error(msg, len(char))
while True:
del_flags |= FLAGS[char]
char = sourceget()
if char is None:
raise source.error("missing :")
if char == ":":
break
if char not in FLAGS:
msg = "unknown flag" if char.isalpha() else "missing :"
raise source.error(msg, len(char))
assert char == ":"
if del_flags & GLOBAL_FLAGS:
raise source.error("bad inline flags: cannot turn off global flag", 1)
if add_flags & del_flags:
raise source.error("bad inline flags: flag turned on and off", 1)
return add_flags, del_flags
def fix_flags(src, flags):
# Check and fix flags according to the type of pattern (str or bytes)
if isinstance(src, str):
if flags & SRE_FLAG_LOCALE:
raise ValueError("cannot use LOCALE flag with a str pattern")
if not flags & SRE_FLAG_ASCII:
flags |= SRE_FLAG_UNICODE
elif flags & SRE_FLAG_UNICODE:
raise ValueError("ASCII and UNICODE flags are incompatible")
else:
if flags & SRE_FLAG_UNICODE:
raise ValueError("cannot use UNICODE flag with a bytes pattern")
if flags & SRE_FLAG_LOCALE and flags & SRE_FLAG_ASCII:
raise ValueError("ASCII and LOCALE flags are incompatible")
return flags
def parse(str, flags=0, pattern=None):
# parse 're' pattern into list of (opcode, argument) tuples
source = Tokenizer(str)
if pattern is None:
pattern = Pattern()
pattern.flags = flags
pattern.str = str
try:
p = _parse_sub(source, pattern, flags & SRE_FLAG_VERBOSE, 0)
except Verbose:
# the VERBOSE flag was switched on inside the pattern. to be
# on the safe side, we'll parse the whole thing again...
pattern = Pattern()
pattern.flags = flags | SRE_FLAG_VERBOSE
pattern.str = str
source.seek(0)
p = _parse_sub(source, pattern, True, 0)
p.pattern.flags = fix_flags(str, p.pattern.flags)
if source.next is not None:
assert source.next == ")"
raise source.error("unbalanced parenthesis")
if flags & SRE_FLAG_DEBUG:
p.dump()
return p
def parse_template(source, pattern):
# parse 're' replacement string into list of literals and
# group references
s = Tokenizer(source)
sget = s.get
groups = []
literals = []
literal = []
lappend = literal.append
def addgroup(index, pos):
if index > pattern.groups:
raise s.error("invalid group reference %d" % index, pos)
if literal:
literals.append(''.join(literal))
del literal[:]
groups.append((len(literals), index))
literals.append(None)
groupindex = pattern.groupindex
while True:
this = sget()
if this is None:
break # end of replacement string
if this[0] == "\\":
# group
c = this[1]
if c == "g":
name = ""
if not s.match("<"):
raise s.error("missing <")
name = s.getuntil(">")
if name.isidentifier():
try:
index = groupindex[name]
except KeyError:
raise IndexError("unknown group name %r" % name)
else:
try:
index = int(name)
if index < 0:
raise ValueError
except ValueError:
raise s.error("bad character in group name %r" % name,
len(name) + 1) from None
if index >= MAXGROUPS:
raise s.error("invalid group reference %d" % index,
len(name) + 1)
addgroup(index, len(name) + 1)
elif c == "0":
if s.next in OCTDIGITS:
this += sget()
if s.next in OCTDIGITS:
this += sget()
lappend(chr(int(this[1:], 8) & 0xff))
elif c in DIGITS:
isoctal = False
if s.next in DIGITS:
this += sget()
if (c in OCTDIGITS and this[2] in OCTDIGITS and
s.next in OCTDIGITS):
this += sget()
isoctal = True
c = int(this[1:], 8)
if c > 0o377:
raise s.error('octal escape value %s outside of '
'range 0-0o377' % this, len(this))
lappend(chr(c))
if not isoctal:
addgroup(int(this[1:]), len(this) - 1)
else:
try:
this = chr(ESCAPES[this][1])
except KeyError:
if c in ASCIILETTERS:
import warnings
warnings.warn('bad escape %s' % this,
DeprecationWarning, stacklevel=4)
lappend(this)
else:
lappend(this)
if literal:
literals.append(''.join(literal))
if not isinstance(source, str):
# The tokenizer implicitly decodes bytes objects as latin-1, we must
# therefore re-encode the final representation.
literals = [None if s is None else s.encode('latin-1') for s in literals]
return groups, literals
def expand_template(template, match):
g = match.group
empty = match.string[:0]
groups, literals = template
literals = literals[:]
try:
for index, group in groups:
literals[index] = g(group) or empty
except IndexError:
raise error("invalid group reference %d" % index)
return empty.join(literals)
|
import os
import time
while 1:
exit_code = os.system("cd /home/pi/usb4vc/rpi_app; python3 -u usb4vc_main.py") >> 8
print("App died! Exit code:", exit_code)
if exit_code == 169:
exit()
time.sleep(0.5)
|
"""
"""
import support
support.compileJPythonc("test254c.py", deep=1, core=1, jar="test254.jar", output="test254.err")
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from azure.core.exceptions import HttpResponseError
import msrest.serialization
class Resource(msrest.serialization.Model):
"""Common fields that are returned in the response for all Azure Resource Manager resources.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Fully qualified resource ID for the resource. Ex -
/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
"Microsoft.Storage/storageAccounts".
:vartype type: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(Resource, self).__init__(**kwargs)
self.id = None
self.name = None
self.type = None
class TrackedResource(Resource):
"""The resource model definition for an Azure Resource Manager tracked top level resource which has 'tags' and a 'location'.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: Fully qualified resource ID for the resource. Ex -
/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
"Microsoft.Storage/storageAccounts".
:vartype type: str
:param tags: A set of tags. Resource tags.
:type tags: dict[str, str]
:param location: Required. The geo-location where the resource lives.
:type location: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'location': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'location': {'key': 'location', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(TrackedResource, self).__init__(**kwargs)
self.tags = kwargs.get('tags', None)
self.location = kwargs['location']
class ConnectedCluster(TrackedResource):
"""Represents a connected cluster.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: Fully qualified resource ID for the resource. Ex -
/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
"Microsoft.Storage/storageAccounts".
:vartype type: str
:param tags: A set of tags. Resource tags.
:type tags: dict[str, str]
:param location: Required. The geo-location where the resource lives.
:type location: str
:param identity: Required. The identity of the connected cluster.
:type identity: ~connected_kubernetes_client.models.ConnectedClusterIdentity
:ivar system_data: Metadata pertaining to creation and last modification of the resource.
:vartype system_data: ~connected_kubernetes_client.models.SystemData
:param agent_public_key_certificate: Required. Base64 encoded public certificate used by the
agent to do the initial handshake to the backend services in Azure.
:type agent_public_key_certificate: str
:ivar kubernetes_version: The Kubernetes version of the connected cluster resource.
:vartype kubernetes_version: str
:ivar total_node_count: Number of nodes present in the connected cluster resource.
:vartype total_node_count: int
:ivar total_core_count: Number of CPU cores present in the connected cluster resource.
:vartype total_core_count: int
:ivar agent_version: Version of the agent running on the connected cluster resource.
:vartype agent_version: str
:param provisioning_state: Provisioning state of the connected cluster resource. Possible
values include: "Succeeded", "Failed", "Canceled", "Provisioning", "Updating", "Deleting",
"Accepted".
:type provisioning_state: str or ~connected_kubernetes_client.models.ProvisioningState
:param distribution: The Kubernetes distribution running on this connected cluster.
:type distribution: str
:param infrastructure: The infrastructure on which the Kubernetes cluster represented by this
connected cluster is running on.
:type infrastructure: str
:ivar offering: Connected cluster offering.
:vartype offering: str
:ivar managed_identity_certificate_expiration_time: Expiration time of the managed identity
certificate.
:vartype managed_identity_certificate_expiration_time: ~datetime.datetime
:ivar last_connectivity_time: Time representing the last instance when heart beat was received
from the cluster.
:vartype last_connectivity_time: ~datetime.datetime
:ivar connectivity_status: Represents the connectivity status of the connected cluster.
Possible values include: "Connecting", "Connected", "Offline", "Expired".
:vartype connectivity_status: str or ~connected_kubernetes_client.models.ConnectivityStatus
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'location': {'required': True},
'identity': {'required': True},
'system_data': {'readonly': True},
'agent_public_key_certificate': {'required': True},
'kubernetes_version': {'readonly': True},
'total_node_count': {'readonly': True},
'total_core_count': {'readonly': True},
'agent_version': {'readonly': True},
'offering': {'readonly': True},
'managed_identity_certificate_expiration_time': {'readonly': True},
'last_connectivity_time': {'readonly': True},
'connectivity_status': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'location': {'key': 'location', 'type': 'str'},
'identity': {'key': 'identity', 'type': 'ConnectedClusterIdentity'},
'system_data': {'key': 'systemData', 'type': 'SystemData'},
'agent_public_key_certificate': {'key': 'properties.agentPublicKeyCertificate', 'type': 'str'},
'kubernetes_version': {'key': 'properties.kubernetesVersion', 'type': 'str'},
'total_node_count': {'key': 'properties.totalNodeCount', 'type': 'int'},
'total_core_count': {'key': 'properties.totalCoreCount', 'type': 'int'},
'agent_version': {'key': 'properties.agentVersion', 'type': 'str'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'distribution': {'key': 'properties.distribution', 'type': 'str'},
'infrastructure': {'key': 'properties.infrastructure', 'type': 'str'},
'offering': {'key': 'properties.offering', 'type': 'str'},
'managed_identity_certificate_expiration_time': {'key': 'properties.managedIdentityCertificateExpirationTime', 'type': 'iso-8601'},
'last_connectivity_time': {'key': 'properties.lastConnectivityTime', 'type': 'iso-8601'},
'connectivity_status': {'key': 'properties.connectivityStatus', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ConnectedCluster, self).__init__(**kwargs)
self.identity = kwargs['identity']
self.system_data = None
self.agent_public_key_certificate = kwargs['agent_public_key_certificate']
self.kubernetes_version = None
self.total_node_count = None
self.total_core_count = None
self.agent_version = None
self.provisioning_state = kwargs.get('provisioning_state', None)
self.distribution = kwargs.get('distribution', None)
self.infrastructure = kwargs.get('infrastructure', None)
self.offering = None
self.managed_identity_certificate_expiration_time = None
self.last_connectivity_time = None
self.connectivity_status = None
class ConnectedClusterIdentity(msrest.serialization.Model):
"""Identity for the connected cluster.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar principal_id: The principal id of connected cluster identity. This property will only be
provided for a system assigned identity.
:vartype principal_id: str
:ivar tenant_id: The tenant id associated with the connected cluster. This property will only
be provided for a system assigned identity.
:vartype tenant_id: str
:param type: Required. The type of identity used for the connected cluster. The type
'SystemAssigned, includes a system created identity. The type 'None' means no identity is
assigned to the connected cluster. Possible values include: "None", "SystemAssigned". Default
value: "SystemAssigned".
:type type: str or ~connected_kubernetes_client.models.ResourceIdentityType
"""
_validation = {
'principal_id': {'readonly': True},
'tenant_id': {'readonly': True},
'type': {'required': True},
}
_attribute_map = {
'principal_id': {'key': 'principalId', 'type': 'str'},
'tenant_id': {'key': 'tenantId', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ConnectedClusterIdentity, self).__init__(**kwargs)
self.principal_id = None
self.tenant_id = None
self.type = kwargs.get('type', "SystemAssigned")
class ConnectedClusterList(msrest.serialization.Model):
"""The paginated list of connected Clusters.
:param value: The list of connected clusters.
:type value: list[~connected_kubernetes_client.models.ConnectedCluster]
:param next_link: The link to fetch the next page of connected cluster.
:type next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[ConnectedCluster]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ConnectedClusterList, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
self.next_link = kwargs.get('next_link', None)
class ConnectedClusterPatch(msrest.serialization.Model):
"""Object containing updates for patch operations.
:param tags: A set of tags. Resource tags.
:type tags: dict[str, str]
:param properties: Describes the connected cluster resource properties that can be updated
during PATCH operation.
:type properties: str
"""
_attribute_map = {
'tags': {'key': 'tags', 'type': '{str}'},
'properties': {'key': 'properties', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ConnectedClusterPatch, self).__init__(**kwargs)
self.tags = kwargs.get('tags', None)
self.properties = kwargs.get('properties', None)
class ErrorAdditionalInfo(msrest.serialization.Model):
"""The resource management error additional info.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar type: The additional info type.
:vartype type: str
:ivar info: The additional info.
:vartype info: str
"""
_validation = {
'type': {'readonly': True},
'info': {'readonly': True},
}
_attribute_map = {
'type': {'key': 'type', 'type': 'str'},
'info': {'key': 'info', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ErrorAdditionalInfo, self).__init__(**kwargs)
self.type = None
self.info = None
class ErrorDetail(msrest.serialization.Model):
"""The error detail.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar code: The error code.
:vartype code: str
:ivar message: The error message.
:vartype message: str
:ivar target: The error target.
:vartype target: str
:ivar details: The error details.
:vartype details: list[~connected_kubernetes_client.models.ErrorDetail]
:ivar additional_info: The error additional info.
:vartype additional_info: list[~connected_kubernetes_client.models.ErrorAdditionalInfo]
"""
_validation = {
'code': {'readonly': True},
'message': {'readonly': True},
'target': {'readonly': True},
'details': {'readonly': True},
'additional_info': {'readonly': True},
}
_attribute_map = {
'code': {'key': 'code', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
'target': {'key': 'target', 'type': 'str'},
'details': {'key': 'details', 'type': '[ErrorDetail]'},
'additional_info': {'key': 'additionalInfo', 'type': '[ErrorAdditionalInfo]'},
}
def __init__(
self,
**kwargs
):
super(ErrorDetail, self).__init__(**kwargs)
self.code = None
self.message = None
self.target = None
self.details = None
self.additional_info = None
class ErrorResponse(msrest.serialization.Model):
"""Common error response for all Azure Resource Manager APIs to return error details for failed operations. (This also follows the OData error response format.).
:param error: The error object.
:type error: ~connected_kubernetes_client.models.ErrorDetail
"""
_attribute_map = {
'error': {'key': 'error', 'type': 'ErrorDetail'},
}
def __init__(
self,
**kwargs
):
super(ErrorResponse, self).__init__(**kwargs)
self.error = kwargs.get('error', None)
class Operation(msrest.serialization.Model):
"""The Connected cluster API operation.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar name: Operation name: {Microsoft.Kubernetes}/{resource}/{operation}.
:vartype name: str
:ivar display: The object that represents the operation.
:vartype display: ~connected_kubernetes_client.models.OperationDisplay
"""
_validation = {
'name': {'readonly': True},
'display': {'readonly': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'display': {'key': 'display', 'type': 'OperationDisplay'},
}
def __init__(
self,
**kwargs
):
super(Operation, self).__init__(**kwargs)
self.name = None
self.display = None
class OperationDisplay(msrest.serialization.Model):
"""The object that represents the operation.
:param provider: Service provider: Microsoft.connectedClusters.
:type provider: str
:param resource: Connected Cluster Resource on which the operation is performed.
:type resource: str
:param operation: Operation type: Read, write, delete, etc.
:type operation: str
:param description: Description of the operation.
:type description: str
"""
_attribute_map = {
'provider': {'key': 'provider', 'type': 'str'},
'resource': {'key': 'resource', 'type': 'str'},
'operation': {'key': 'operation', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(OperationDisplay, self).__init__(**kwargs)
self.provider = kwargs.get('provider', None)
self.resource = kwargs.get('resource', None)
self.operation = kwargs.get('operation', None)
self.description = kwargs.get('description', None)
class OperationList(msrest.serialization.Model):
"""The paginated list of connected cluster API operations.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar value: The list of connected cluster API operations.
:vartype value: list[~connected_kubernetes_client.models.Operation]
:param next_link: The link to fetch the next page of connected cluster API operations.
:type next_link: str
"""
_validation = {
'value': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[Operation]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(OperationList, self).__init__(**kwargs)
self.value = None
self.next_link = kwargs.get('next_link', None)
class SystemData(msrest.serialization.Model):
"""Metadata pertaining to creation and last modification of the resource.
:param created_by: The identity that created the resource.
:type created_by: str
:param created_by_type: The type of identity that created the resource. Possible values
include: "User", "Application", "ManagedIdentity", "Key".
:type created_by_type: str or ~connected_kubernetes_client.models.CreatedByType
:param created_at: The timestamp of resource creation (UTC).
:type created_at: ~datetime.datetime
:param last_modified_by: The identity that last modified the resource.
:type last_modified_by: str
:param last_modified_by_type: The type of identity that last modified the resource. Possible
values include: "User", "Application", "ManagedIdentity", "Key".
:type last_modified_by_type: str or ~connected_kubernetes_client.models.LastModifiedByType
:param last_modified_at: The timestamp of resource modification (UTC).
:type last_modified_at: ~datetime.datetime
"""
_attribute_map = {
'created_by': {'key': 'createdBy', 'type': 'str'},
'created_by_type': {'key': 'createdByType', 'type': 'str'},
'created_at': {'key': 'createdAt', 'type': 'iso-8601'},
'last_modified_by': {'key': 'lastModifiedBy', 'type': 'str'},
'last_modified_by_type': {'key': 'lastModifiedByType', 'type': 'str'},
'last_modified_at': {'key': 'lastModifiedAt', 'type': 'iso-8601'},
}
def __init__(
self,
**kwargs
):
super(SystemData, self).__init__(**kwargs)
self.created_by = kwargs.get('created_by', None)
self.created_by_type = kwargs.get('created_by_type', None)
self.created_at = kwargs.get('created_at', None)
self.last_modified_by = kwargs.get('last_modified_by', None)
self.last_modified_by_type = kwargs.get('last_modified_by_type', None)
self.last_modified_at = kwargs.get('last_modified_at', None)
|
from pydantic import BaseModel
STATUS201_DESC = 'Created.'
class Status201(BaseModel):
detail: str
class Config:
schema_extra ={
'example': {
'detail': 'User Created.'
}
}
|
'''
Plan:
1. Build a LSTM classifier to predict the action from historical env, rewards and actions
2. Implement the rollout to collect simulation data
3. Use rewards to update the weights using sample_weights
'''
from keras.models import Sequential, model_from_json
from keras.losses import binary_crossentropy
from keras import metrics
import copy, os
from keras.layers import LSTM, Dense
from keras.preprocessing.sequence import pad_sequences
import numpy as np
'''
This is Q-Learning, we don't need off policy update, there's no approximation here, we are doing exhaustive search over action space
TODO @charles
1. Save/Load
2. train on batch of episodes, implement parallel batch roll out to collect batches
3. Have more epoch for positive final results, try to "over-fit" the final positive rewards
'''
class LSTMAgent(object):
def __init__(self, observation_space, action_space, timesteps, label, hidden_dim=32, discount_factor=1, loss_function='mean_absolute_error', verbose=False):
self.action_space = [0 for _ in range(action_space.n)]
self.data_dim = int(observation_space.shape[0]+action_space.n)
self.timesteps = timesteps
self.label = label
self.memory = pad_sequences([[[0 for _ in range(self.data_dim)]]], maxlen=self.timesteps, padding='pre')[0]
# this Q function predict accumulative reward from state and action Q(s,a)
# regression models output unbounded / normalized score
# expected input data shape: (batch_size, timesteps, data_dim)
self.model = Sequential()
self.model.add(LSTM(hidden_dim, return_sequences=True,
input_shape=(self.timesteps, self.data_dim))) # returns a sequence of vectors of dimension 32
# models.add(LSTM(32, return_sequences=True)) # returns a sequence of vectors of dimension 32
self.model.add(LSTM(hidden_dim, return_sequences=False)) # return a single vector of dimension 32
# models.add(LSTM(num_classes, return_sequences=True, activation='softmax')) # return a single vector of dimension 32
self.model.add(Dense(1, activation='linear'))
self.discount = discount_factor
self.loss_function = loss_function
self.model.compile(loss=self.loss_function,
optimizer='adam',
metrics=[self.loss_function],
# sample_weight_mode='temporal'
)
self.train_all = True
self.verbose = verbose
def learn(self, seq, rewards, batch_size=1, epoch=2, ratio=0.8, ):
# for non-greedy, use the final rewards for the entire sequence
# for greedy, use current reword for each time step in seq
memory = pad_sequences([[[0 for _ in range(self.data_dim)]]], maxlen=self.timesteps, padding='pre')[0]
# sequence can be longer than time steps
X = []
Y = []
if self.verbose:
print('final rewards', sum(rewards)/len(rewards))
print('rewards history', rewards)
for i in range(len(seq)):
observation, action_idx = seq[i]
action = self.action_space[:]
# one-hot action vector
action[action_idx] = 1
_X = self._get_SA(observation, action)
reward = rewards[i] if rewards[i] > 0 else rewards[i] * 1
Y.append(np.array(reward))
memory = np.concatenate((memory, [_X])) # append new event in memory
memory = np.delete(memory, 0, axis=0) # pop the first from memory
_X = np.array(memory)
# print(_X.shape)
X.append(_X)
# for i in range(len(X)):
# print(X[i], Y[i])
divider = int(len(X)*ratio)
if divider == 0:
x_train = np.array(X)
y_train = np.array(Y)
x_test = np.array(X)
y_test = np.array(Y)
else:
x_train = np.array(X[:divider])
y_train = np.array(Y[:divider])
x_test = np.array(X[divider:])
y_test = np.array(Y[divider:])
# print(x_train.shape)
# apply discount here
sample_weights = []
acc = 1
if self.train_all:
for _ in range(len(X)):
sample_weights.insert(0, acc)
acc *= self.discount
else:
for _ in range(len(x_train)):
sample_weights.insert(0, acc)
acc *= self.discount
tmp = []
for i in range(len(sample_weights)):
if self.train_all:
tmp.append(sample_weights[i] * Y[i])
else:
tmp.append(sample_weights[i] * y_train[i])
print('discount rewards', tmp)
sample_weights = np.array(sample_weights)
if self.train_all:
X = np.array(X)
Y = np.array(Y)
self.model.fit(X, Y,
batch_size=batch_size, epochs=epoch,
sample_weight=sample_weights,
verbose=self.verbose
)
else:
self.model.fit(x_train, y_train,
batch_size=batch_size, epochs=epoch,
validation_data=(x_test, y_test),
sample_weight=sample_weights,
verbose = self.verbose
)
def online_learning(self, observation, action, reward, done=False, batch_size=1, epochs=1):
raise NotImplemented()
def _get_SA(self, observation, action):
return np.append(observation, action)
def _get_action_onehot(self, idx):
ret = self.action_space[:]
ret[idx] = 1
return ret
def act(self, observation, done=None):
# argmaxQ(s,a) here, select a with largest final result
sa_pairs = []
for action_index in range(len(self.action_space)):
sa_pairs.append(self._get_SA(observation, self._get_action_onehot(action_index)))
rewards = []
for sa in sa_pairs:
seq = copy.deepcopy(self.memory)
seq = np.concatenate((seq, [sa]))
seq = np.delete(seq, 0, axis=0) # pop the first from memory
seq = np.array([seq])
reward = self.model.predict(seq)
# print(sa, reward)
rewards.append(reward)
self.update_memory(observation, self._get_action_onehot(np.argmax(rewards)))
action = np.argmax(rewards)
if done:
self.memory = pad_sequences([[[0 for _ in range(self.data_dim)]]], maxlen=self.timesteps, padding='pre')[0]
return action
def update_memory(self, observation,action):
sa = self._get_SA(observation, action)
self.memory = np.concatenate((self.memory, [sa]))
self.memory = np.delete(self.memory, 0, 0)
@staticmethod
def _simulation(env, time_limit, render=False, epsilon=0.01, mode='heuristic'):
'''
Static simulation method, used to create episode in parallel
:param env:
:param time_limit:
:param render:
:param epsilon:
:param mode:
:return:
'''
# observation = env.reset()
# env.reset()
# episodes = []
# rewards = []
# action_history = []
# for t in range(time_limit):
# if render:
# env.render()
# action = self.act(observation)
# pre_observation = observation
# if np.random.uniform(0, 1) < epsilon:
# ind = np.random.randint(0, len(self.action_space))
# if self.verbose:
# print('exploration action:', ind)
# action = ind
# observation, reward, done, info = env.step(action)
# action_history.append(action)
#
# # what i'm doing here is to store the episode until the end
# # when episode finish update the models with entire episode with eligibility trace and discount
# episodes.append([pre_observation, action])
# rewards.append(reward)
# self.update_memory(pre_observation, self._get_action_onehot(action))
#
# if done or t == time_limit - 1:
# # final training after end of episode
# if mode == 'global':
# # use final rewards as label
# rewards = [rewards[-1] for _ in range(len(rewards))]
# elif mode == 'heuristic':
# if rewards[-1] > 0:
# # if the termination reward is positive, backpropagate the positive signal all the way to the beginning of the episode
# rewards = [(x + rewards[-1]) for x in rewards]
# else:
# rewards = rewards # use heuristic and final result
# elif mode == 'greedy':
# rewards = [(x + rewards[-1]) / 2 for x in rewards]
# else:
# raise Exception(
# 'unknown mode {1}, supported mode are {0}'.format(' '.join(['global', 'greedy', 'heuristic']),
# mode))
# if self.verbose:
# print('actions', action_history)
# epoch = 5
# # if the final reward is positive, train with more epoch
# if rewards[-1] > 0:
# epoch *= 3
# print(reward)
# print("Episode finished after {} timesteps".format(t + 1))
# break
def roll_out(self, env, num_episode, epsilon=0.01, discount=1, mode='greedy', save_every_epoch=False, folder_to_save='.', train_all=False, load_saved_model=True, render=True, time_limit=1000):
'''
:param env: env object
:param num_episode: if None run forever
:param epsilon: epsilon greedy for random exploration
:param discount: the discount factor
:param mode:
`greedy`: use heuristic and final reward
`global`: only use final reward
`heuristic`: use heuristic for non termination states, use final reward for termination state
:save_every_epoch: whether save current mode after each epoch
:return:
'''
if load_saved_model:
try:
self.load(folder_to_save, self.label)
except FileNotFoundError:
print('No saved file found, have you created the models yet? Loading is ignored, start with new models.')
self.train_all = train_all
self.discount = discount
episode = 0
episode_count = 0
reset = True if num_episode is None else False
num_episode = num_episode if num_episode is not None else 100
while episode < num_episode:
episode_count += 1
print('episode:', episode_count)
if reset:
episode = 0
episode += 1
observation = env.reset()
env.reset()
episodes = []
rewards = []
action_history = []
for t in range(time_limit):
if render:
env.render()
action = self.act(observation)
pre_observation = observation
if np.random.uniform(0,1) < epsilon:
ind = np.random.randint(0, len(self.action_space))
if self.verbose:
print('exploration action:', ind)
action = ind
observation, reward, done, info = env.step(action)
action_history.append(action)
# what i'm doing here is to store the episode until the end
# when episode finish update the models with entire episode with eligibility trace and discount
episodes.append([pre_observation, action])
rewards.append(reward)
self.update_memory(pre_observation, self._get_action_onehot(action))
if done:
# final training after end of episode
if mode == 'global':
# use final rewards as label
rewards = [rewards[-1] for _ in range(len(rewards))]
elif mode == 'heuristic':
if rewards[-1] > 0:
# if the termination reward is positive, backpropagate the positive signal all the way to the beginning of the episode
rewards = [(x + rewards[-1]) for x in rewards]
else:
rewards = rewards # use heuristic and final result
elif mode == 'greedy':
rewards = [(x + rewards[-1]) / 2 for x in rewards]
else:
raise Exception('unknown mode {1}, supported mode are {0}'.format(' '.join(['global', 'greedy', 'heuristic']), mode))
if self.verbose:
print('actions', action_history)
epoch = 5
# if the final reward is positive, train with more epoch
if rewards[-1] > 0:
epoch *= 3
print('learning...', end='')
self.learn(episodes, rewards, batch_size=10, epoch=epoch)
print('done.')
if save_every_epoch:
self.save(folder_to_save, self.label)
if episode_count % 1000==0:
self.save(folder_to_save, self.label+'_'+str(episode_count))
print(reward)
print("Episode finished after {} timesteps".format(t + 1))
break
def load(self, folder_path, model_name):
arch_file = os.path.join(folder_path, '.'.join(['_'.join([model_name, 'arch']), 'json']))
weights_file = os.path.join(folder_path, '.'.join(['_'.join([model_name, 'weights']), 'json']))
# Load autoencoder architecture + weights + shapes
json_file = open(arch_file, 'r+') # read architecture json
autoencoder_json = json_file.read()
json_file.close()
self.model = model_from_json(autoencoder_json) # convert json -> models architecture
self.model.load_weights(weights_file) # load models weights
self.model_input_shape = self.model.input_shape # set input shape from loaded models
self.model_output_shape = self.model.output_shape # set output shape from loaded models
self.model.compile(loss=self.loss_function,
optimizer='adam',
metrics=[self.loss_function],
# sample_weight_mode='temporal'
)
def save(self, folder_path, model_name):
arch_file = os.path.join(folder_path, '.'.join(['_'.join([model_name, 'arch']), 'json']))
weights_file = os.path.join(folder_path, '.'.join(['_'.join([model_name, 'weights']), 'json']))
# Save autoencoder models arch + weights
with open(arch_file, "w+") as json_file:
json_file.write(self.model.to_json()) # arch: json format
self.model.save_weights(weights_file) # weights: hdf5 format
|
# -*- coding: utf-8 -*-
#
# Copyright © Spyder Project Contributors
# Licensed under the terms of the MIT License
# (see spyder/__init__.py for details)
"""Spyder completion container."""
# Standard library imports
# Third-party imports
from qtpy.QtWidgets import QMessageBox
# Local imports
from spyder.api.widgets.main_container import PluginMainContainer
class CompletionContainer(PluginMainContainer):
"""Stateless class used to store graphical widgets."""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.statusbar_widgets = {}
def setup(self, options=None):
pass
def update_actions(self):
pass
def show_widget(self, Widget):
widget = Widget(self)
if isinstance(widget, QMessageBox):
if hasattr(widget, 'sig_restart_spyder'):
widget.sig_restart_spyder.connect(self.sig_restart_requested)
widget.exec_()
def register_statusbar_widgets(self, statusbar_classes):
for StatusBar in statusbar_classes:
statusbar = StatusBar(self)
self.statusbar_widgets[statusbar.ID] = statusbar
def all_statusbar_widgets(self):
return [self.statusbar_widgets[k] for k in self.statusbar_widgets]
def statusbar_rpc(self, status_key: str, method: str, args: tuple,
kwargs: dict):
"""
Perform a remote call on the status bar with ID `status_key`.
Parameters
----------
status_key: str
Identifier of the status call that should recieve the method call.
method: str
Name of the method.
args: tuple
Positional arguments of the method call.
kwargs: dict
Optional arguments of the method call.
"""
statusbar = self.statusbar_widgets[status_key]
call = getattr(statusbar, method)
call(*args, **kwargs)
|
from __future__ import absolute_import
from __future__ import print_function
import veriloggen
import submodule_read_verilog_nested
expected_verilog = """
module top #
(
parameter WIDTH = 8
)
(
input CLK,
input RST,
output [WIDTH-1:0] LED,
output [inst_blinkled_WIDTH-1+1-1:0] inst_blinkled_dummy_out0,
input [inst_blinkled_WIDTH-1+1-1:0] inst_blinkled_dummy_in0
);
localparam inst_blinkled_WIDTH = WIDTH;
wire [inst_blinkled_WIDTH-1+1-1:0] inst_blinkled_dummy_out2;
reg [inst_blinkled_WIDTH-1+1-1:0] inst_blinkled_dummy_in2;
wire [inst_blinkled_WIDTH-1+1-1:0] inst_blinkled_dummy_out1;
wire [inst_blinkled_WIDTH-1+1-1:0] inst_blinkled_dummy_in1;
blinkled
#(
.WIDTH(inst_blinkled_WIDTH)
)
inst_blinkled
(
.CLK(CLK),
.RST(RST),
.LED(LED),
.dummy_out0(inst_blinkled_dummy_out0),
.dummy_out1(inst_blinkled_dummy_out1),
.dummy_out2(inst_blinkled_dummy_out2),
.dummy_in0(inst_blinkled_dummy_in0),
.dummy_in1(inst_blinkled_dummy_in1),
.dummy_in2(inst_blinkled_dummy_in2)
);
endmodule
module blinkled #
(
parameter WIDTH = 8
)
(
input CLK,
input RST,
output [WIDTH-1:0] LED,
output [WIDTH-1:0] dummy_out0,
output [WIDTH-1:0] dummy_out1,
output [WIDTH-1:0] dummy_out2,
input [WIDTH-1:0] dummy_in0,
input [WIDTH-1:0] dummy_in1,
input [WIDTH-1:0] dummy_in2
);
sub_blinkled
#(
.WIDTH(WIDTH)
)
inst_sub_blinkled
(
.CLK(CLK),
.RST(RST),
.LED(LED),
.dummy_out0(dummy_out0),
.dummy_out1(dummy_out1),
.dummy_out2(dummy_out2),
.dummy_in0(dummy_in0),
.dummy_in1(dummy_in1),
.dummy_in2(dummy_in2)
);
endmodule
module sub_blinkled #
(
parameter WIDTH = 8
)
(
input CLK,
input RST,
output reg [WIDTH-1:0] LED,
output [WIDTH-1:0] dummy_out0,
output [WIDTH-1:0] dummy_out1,
output [WIDTH-1:0] dummy_out2,
input [WIDTH-1:0] dummy_in0,
input [WIDTH-1:0] dummy_in1,
input [WIDTH-1:0] dummy_in2
);
reg [32-1:0] count;
always @(posedge CLK) begin
if(RST) begin
count <= 0;
end else begin
if(count == 1023) begin
count <= 0;
end else begin
count <= count + 1;
end
end
end
always @(posedge CLK) begin
if(RST) begin
LED <= 0;
end else begin
if(count == 1023) begin
LED <= LED + 1;
end
end
end
endmodule
"""
def test():
test_module = submodule_read_verilog_nested.mkTop()
code = test_module.to_verilog()
from pyverilog.vparser.parser import VerilogParser
from pyverilog.ast_code_generator.codegen import ASTCodeGenerator
parser = VerilogParser()
expected_ast = parser.parse(expected_verilog)
codegen = ASTCodeGenerator()
expected_code = codegen.visit(expected_ast)
assert(expected_code == code)
|
# -*- coding: utf-8 -*-
# Copyright (C) 2021 Davide Gessa
'''
MIT License
Copyright (c) 2021 Davide Gessa
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
from .inputs import *
from .outputs import *
|
#!/bin/python3
import math
import os
import random
import re
import sys
#
# Complete the 'minNum' function below.
#
# The function is expected to return an INTEGER.
# The function accepts following parameters:
# 1. INTEGER samDaily
# 2. INTEGER kellyDaily
# 3. INTEGER difference
#
def minNum(samDaily, kellyDaily, difference):
day_count = 0
tot_kelly = 0
tot_sam = 0
# scenario where kelly cannot surpass sam
# 1. kelly solves less problems daily than sam
# # 2. sam is already behind kelly even after the headstart(difference)
if (kellyDaily <= samDaily): # or (samDaily + difference < kellyDaily)
day_count = -1
# day1
elif day_count == 0:
tot_sam = samDaily + difference
tot_kelly = kellyDaily
# kellyDaily += kellyDaily
day_count += 1
while tot_sam >= tot_kelly:
tot_kelly += kellyDaily
tot_sam += samDaily
day_count += 1
return day_count
while kellyDaily < samDaily:
kellyDaily += kellyDaily
if __name__ == '__main__':
# fptr = open(os.environ['OUTPUT_PATH'], 'w')
samDaily = int(input().strip())
kellyDaily = int(input().strip())
difference = int(input().strip())
result = minNum(samDaily, kellyDaily, difference)
print(result)
# fptr.write(str(result) + '\n')
# fptr.close()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.