hexsha stringlengths 40 40 | size int64 2 1.02M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 245 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 245 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 245 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 2 1.02M | avg_line_length float64 1 417k | max_line_length int64 1 987k | alphanum_fraction float64 0 1 | content_no_comment stringlengths 0 1.01M | is_comment_constant_removed bool 1
class | is_sharp_comment_removed bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f73740f023ab34c3fce503fa5857fc4349b078d8 | 8,293 | py | Python | tern/analyze/default/command_lib/command_lib.py | ReconPangolin/tern | ed8b2b721397358f5ff8c4253aa4f0aa70a55afe | [
"BSD-2-Clause"
] | 2 | 2020-05-21T00:00:36.000Z | 2020-12-28T20:43:25.000Z | tern/analyze/default/command_lib/command_lib.py | JamieMagee/tern | 230d6d812d6a74e084c6d3d9d9396f2c716254b7 | [
"BSD-2-Clause"
] | null | null | null | tern/analyze/default/command_lib/command_lib.py | JamieMagee/tern | 230d6d812d6a74e084c6d3d9d9396f2c716254b7 | [
"BSD-2-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
#
# Copyright (c) 2017-2020 VMware, Inc. All Rights Reserved.
# SPDX-License-Identifier: BSD-2-Clause
"""
Invoking commands in the command library
"""
import logging
import os
import yaml
import copy
import pkg_resources
from tern.utils import constants
from tern.report import errors
# base image command library
base_file = pkg_resources.resource_filename(
'tern', 'analyze/default/command_lib/base.yml')
# general snippets in command library
snippet_file = pkg_resources.resource_filename(
'tern', 'analyze/default/command_lib/snippets.yml')
# common information
common_file = pkg_resources.resource_filename(
'tern', 'analyze/default/command_lib/common.yml')
# command library
command_lib = {'common': {}, 'base': {}, 'snippets': {}}
with open(os.path.abspath(common_file)) as f:
command_lib['common'] = yaml.safe_load(f)
with open(os.path.abspath(base_file)) as f:
command_lib['base'] = yaml.safe_load(f)
with open(os.path.abspath(snippet_file)) as f:
command_lib['snippets'] = yaml.safe_load(f)
# list of package information keys that the command library can accomodate
base_keys = {'names', 'versions', 'licenses', 'copyrights', 'proj_urls',
'srcs', 'files'}
package_keys = {'name', 'version', 'license', 'copyright', 'proj_url', 'src',
'files'}
# global logger
logger = logging.getLogger(constants.logger_name)
class FormatAwk(dict):
'''Code snippets will sometimes use awk and some of the formatting
syntax resembles python's formatting. This class is meant to override
the KeyError error that occurs for a missing key when trying to format
a string such as "awk '{print $1}'"'''
def __missing__(self, key):
return '{' + key + '}'
def get_base_listing(key):
'''Given the key listing in base.yml, return the dictionary'''
listing = {}
if key in command_lib['base'].keys():
listing = copy.deepcopy(command_lib['base'][key])
else:
logger.warning("%s", errors.no_listing_for_base_key.format(
listing_key=key))
return listing
def get_command_listing(command_name):
'''Given a command name retrieve the listing if it exists'''
listing = {}
if command_name in command_lib['snippets'].keys():
listing = command_lib['snippets'][command_name]
else:
logger.warning("%s", errors.no_listing_for_snippet_key.format(
listing_key=command_name))
return listing
def check_for_unique_package(package_list, package_name):
'''In the snippet library the command name has a list of packages that can
be installed with that command. A package name called 'default' indicates
that the method of retrieving information applies to any package.
However if there is an element with a specific name, the default is
overridden with that name.
Given a list of package dictionaries, find the package dictionary with the
given package name. If not there look for a package dictionary with the
name as 'default'. If that is not there, return an empty dictionary'''
pkg = {}
for package in package_list:
if package['name'] == package_name:
pkg = package
break
if not pkg:
for package in package_list:
if package['name'] == 'default':
pkg = package
break
return pkg
def check_library_key(listing, key):
'''Given the command library listing, check to see if a key is present.
If the key is in the list of keys that should be in there then provide
a note'''
try:
return listing[key], ''
except KeyError as e:
if e in base_keys and e not in package_keys:
return {}, errors.no_listing_for_base_key.format(
listing_key=e)
if e in package_keys and e not in base_keys:
return {}, errors.no_listing_for_package_key.format(
listing_key=e)
return {}, errors.unsupported_listing_for_key.format(listing_key=e)
def get_package_listing(command_name):
'''Given a command name, return the package listing from the snippet
library.'''
return get_command_listing(command_name)['packages']
def set_subcommand(command_obj, subcommand_type, subcommand_words):
"""This subroutine will check to see if subcommand_words can be reassigned
as a subcommand. If it can, then set the command as the subcommand_type.
If not, then do not set the command_obj as anything. subcommand_type can
be 'install', 'remove' or 'ignore'"""
for word in subcommand_words:
if command_obj.reassign_word(word, 'subcommand'):
if subcommand_type == 'install':
command_obj.set_install()
elif subcommand_type == 'remove':
command_obj.set_remove()
else:
command_obj.set_ignore()
return True
return False
def set_command_attrs(command_obj):
'''Given the command object, move the install and remove listings to
subcommands and set the flags, then return True. If the command name
is not in the snippets library then return False'''
command_listing = get_command_listing(command_obj.name)
if command_listing:
# the command is in the library
# look for install, remove and ignore commands
if 'install' in command_listing.keys():
set_subcommand(command_obj, 'install', command_listing['install'])
if 'remove' in command_listing.keys():
set_subcommand(command_obj, 'remove', command_listing['remove'])
if 'ignore' in command_listing.keys():
# check if any of the words in the ignore list are in
set_subcommand(command_obj, 'ignore', command_listing['ignore'])
return True
return False
def collate_snippets(snippet_list, package=''):
'''Given a list of snippets, make a concatenated string with all the
commands'''
# Escape any braces that might confuse Python formatting
for i, snip in enumerate(snippet_list):
if '{}' in snip:
snippet_list[i] = snip.replace('{}', '{{}}')
full_cmd = ''
last_index = len(snippet_list) - 1
for index in range(0, last_index):
full_cmd = full_cmd + snippet_list[index].format_map(
FormatAwk(package=package)) + ' && '
full_cmd = full_cmd + snippet_list[last_index].format_map(
FormatAwk(package=package))
return full_cmd
def check_sourcable(command, package_name):
'''Given a command and package name find out if the sources can be traced
back. We find this out by checking the package against the command library
If the package has a url or source retrieval steps associated with it
then we return True. If not then we return false'''
sourcable = False
if command in command_lib['snippets'].keys():
for package in command_lib['snippets'][command]['packages']:
if package['name'] == package_name or \
package['name'] == 'default':
if 'url' in package.keys() or \
'src' in package.keys():
sourcable = True
return sourcable
def check_pkg_format(binary):
'''Given a binary package manager, return the associated pkg_format from
base.yml. If the binary is not valid in base.yml, return an empty
string.'''
try:
return command_lib['base'][binary]['pkg_format']
except KeyError:
return ''
def check_os_guess(binary):
'''Given a binary package manager, return the associated os_guess from
base.yml. If the binary is not valid in base.yml, return an empty
string.'''
os_list = []
try:
for o in command_lib['base'][binary]['os_guess']:
os_list.append(o)
return ', '.join(os_list)
except KeyError:
return ''
def check_pinning_separator(command_name):
'''Given command name, look up the name in snippets.yml and find the
corresponding package manager's pinning separator'''
pkg_listing = get_package_listing(command_name)
if isinstance(pkg_listing, str):
try:
return command_lib['base'][pkg_listing]['pinning_separator']
except KeyError:
return ''
return ''
| 36.857778 | 78 | 0.669721 |
import logging
import os
import yaml
import copy
import pkg_resources
from tern.utils import constants
from tern.report import errors
base_file = pkg_resources.resource_filename(
'tern', 'analyze/default/command_lib/base.yml')
snippet_file = pkg_resources.resource_filename(
'tern', 'analyze/default/command_lib/snippets.yml')
common_file = pkg_resources.resource_filename(
'tern', 'analyze/default/command_lib/common.yml')
command_lib = {'common': {}, 'base': {}, 'snippets': {}}
with open(os.path.abspath(common_file)) as f:
command_lib['common'] = yaml.safe_load(f)
with open(os.path.abspath(base_file)) as f:
command_lib['base'] = yaml.safe_load(f)
with open(os.path.abspath(snippet_file)) as f:
command_lib['snippets'] = yaml.safe_load(f)
base_keys = {'names', 'versions', 'licenses', 'copyrights', 'proj_urls',
'srcs', 'files'}
package_keys = {'name', 'version', 'license', 'copyright', 'proj_url', 'src',
'files'}
logger = logging.getLogger(constants.logger_name)
class FormatAwk(dict):
def __missing__(self, key):
return '{' + key + '}'
def get_base_listing(key):
listing = {}
if key in command_lib['base'].keys():
listing = copy.deepcopy(command_lib['base'][key])
else:
logger.warning("%s", errors.no_listing_for_base_key.format(
listing_key=key))
return listing
def get_command_listing(command_name):
listing = {}
if command_name in command_lib['snippets'].keys():
listing = command_lib['snippets'][command_name]
else:
logger.warning("%s", errors.no_listing_for_snippet_key.format(
listing_key=command_name))
return listing
def check_for_unique_package(package_list, package_name):
pkg = {}
for package in package_list:
if package['name'] == package_name:
pkg = package
break
if not pkg:
for package in package_list:
if package['name'] == 'default':
pkg = package
break
return pkg
def check_library_key(listing, key):
try:
return listing[key], ''
except KeyError as e:
if e in base_keys and e not in package_keys:
return {}, errors.no_listing_for_base_key.format(
listing_key=e)
if e in package_keys and e not in base_keys:
return {}, errors.no_listing_for_package_key.format(
listing_key=e)
return {}, errors.unsupported_listing_for_key.format(listing_key=e)
def get_package_listing(command_name):
return get_command_listing(command_name)['packages']
def set_subcommand(command_obj, subcommand_type, subcommand_words):
for word in subcommand_words:
if command_obj.reassign_word(word, 'subcommand'):
if subcommand_type == 'install':
command_obj.set_install()
elif subcommand_type == 'remove':
command_obj.set_remove()
else:
command_obj.set_ignore()
return True
return False
def set_command_attrs(command_obj):
command_listing = get_command_listing(command_obj.name)
if command_listing:
if 'install' in command_listing.keys():
set_subcommand(command_obj, 'install', command_listing['install'])
if 'remove' in command_listing.keys():
set_subcommand(command_obj, 'remove', command_listing['remove'])
if 'ignore' in command_listing.keys():
set_subcommand(command_obj, 'ignore', command_listing['ignore'])
return True
return False
def collate_snippets(snippet_list, package=''):
for i, snip in enumerate(snippet_list):
if '{}' in snip:
snippet_list[i] = snip.replace('{}', '{{}}')
full_cmd = ''
last_index = len(snippet_list) - 1
for index in range(0, last_index):
full_cmd = full_cmd + snippet_list[index].format_map(
FormatAwk(package=package)) + ' && '
full_cmd = full_cmd + snippet_list[last_index].format_map(
FormatAwk(package=package))
return full_cmd
def check_sourcable(command, package_name):
sourcable = False
if command in command_lib['snippets'].keys():
for package in command_lib['snippets'][command]['packages']:
if package['name'] == package_name or \
package['name'] == 'default':
if 'url' in package.keys() or \
'src' in package.keys():
sourcable = True
return sourcable
def check_pkg_format(binary):
try:
return command_lib['base'][binary]['pkg_format']
except KeyError:
return ''
def check_os_guess(binary):
os_list = []
try:
for o in command_lib['base'][binary]['os_guess']:
os_list.append(o)
return ', '.join(os_list)
except KeyError:
return ''
def check_pinning_separator(command_name):
pkg_listing = get_package_listing(command_name)
if isinstance(pkg_listing, str):
try:
return command_lib['base'][pkg_listing]['pinning_separator']
except KeyError:
return ''
return ''
| true | true |
f73741dd958e866559e2c020134efd449df74c19 | 33,925 | py | Python | pyrosetta/models/_models.py | blockjoe/rosetta-api-client-python | 707f325f7560ffa6d5dfe361aff4779cc0b7182f | [
"Apache-2.0"
] | null | null | null | pyrosetta/models/_models.py | blockjoe/rosetta-api-client-python | 707f325f7560ffa6d5dfe361aff4779cc0b7182f | [
"Apache-2.0"
] | null | null | null | pyrosetta/models/_models.py | blockjoe/rosetta-api-client-python | 707f325f7560ffa6d5dfe361aff4779cc0b7182f | [
"Apache-2.0"
] | null | null | null | # generated by datamodel-codegen:
# filename: api.json
# timestamp: 2021-01-16T01:13:01+00:00
from __future__ import annotations
from enum import Enum
from typing import Any, Dict, List, Optional
from pydantic import BaseModel, Field
class SubNetworkIdentifier(BaseModel):
network: str = Field(..., example='shard 1')
metadata: Optional[Dict[str, Any]] = Field(
None, example={'producer': '0x52bc44d5378309ee2abf1539bf71de1b7d7be3b5'}
)
class BlockIdentifier(BaseModel):
index: int = Field(
..., description='This is also known as the block height.', example=1123941
)
hash_: str = Field(
...,
alias='hash',
example='0x1f2cc6c5027d2f201a5453ad1119574d2aed23a392654742ac3c78783c071f85',
)
class PartialBlockIdentifier(BaseModel):
index: Optional[int] = Field(None, example=1123941)
hash_: Optional[str] = Field(
None,
alias='hash',
example='0x1f2cc6c5027d2f201a5453ad1119574d2aed23a392654742ac3c78783c071f85',
)
class TransactionIdentifier(BaseModel):
hash_: str = Field(
...,
alias='hash',
description='Any transactions that are attributable only to a block (ex: a block event) should use the hash of the block as the identifier.',
example='0x2f23fd8cca835af21f3ac375bac601f97ead75f2e79143bdf71fe2c4be043e8f',
)
class OperationIdentifier(BaseModel):
index: int = Field(
...,
description='The operation index is used to ensure each operation has a unique identifier within a transaction. This index is only relative to the transaction and NOT GLOBAL. The operations in each transaction should start from index 0. To clarify, there may not be any notion of an operation index in the blockchain being described.',
example=5,
ge=0.0,
)
network_index: Optional[int] = Field(
None,
description='Some blockchains specify an operation index that is essential for client use. For example, Bitcoin uses a network_index to identify which UTXO was used in a transaction. network_index should not be populated if there is no notion of an operation index in a blockchain (typically most account-based blockchains).',
example=0,
ge=0.0,
)
class SubAccountIdentifier(BaseModel):
address: str = Field(
...,
description='The SubAccount address may be a cryptographic value or some other identifier (ex: bonded) that uniquely specifies a SubAccount.',
example='0x6b175474e89094c44da98b954eedeac495271d0f',
)
metadata: Optional[Dict[str, Any]] = Field(
None,
description='If the SubAccount address is not sufficient to uniquely specify a SubAccount, any other identifying information can be stored here. It is important to note that two SubAccounts with identical addresses but differing metadata will not be considered equal by clients.',
)
class Currency(BaseModel):
symbol: str = Field(
..., description='Canonical symbol associated with a currency.', example='BTC'
)
decimals: int = Field(
...,
description='Number of decimal places in the standard unit representation of the amount. For example, BTC has 8 decimals. Note that it is not possible to represent the value of some currency in atomic units that is not base 10.',
example=8,
ge=0.0,
)
metadata: Optional[Dict[str, Any]] = Field(
None,
description='Any additional information related to the currency itself. For example, it would be useful to populate this object with the contract address of an ERC-20 token.',
example={'Issuer': 'Satoshi'},
)
class SyncStatus(BaseModel):
current_index: Optional[int] = Field(
None,
description='CurrentIndex is the index of the last synced block in the current stage. This is a separate field from current_block_identifier in NetworkStatusResponse because blocks with indices up to and including the current_index may not yet be queryable by the caller. To reiterate, all indices up to and including current_block_identifier in NetworkStatusResponse must be queryable via the /block endpoint (excluding indices less than oldest_block_identifier).',
example=100,
)
target_index: Optional[int] = Field(
None,
description='TargetIndex is the index of the block that the implementation is attempting to sync to in the current stage.',
example=150,
)
stage: Optional[str] = Field(
None,
description='Stage is the phase of the sync process.',
example='header sync',
)
synced: Optional[bool] = Field(
None,
description='sycned is a boolean that indicates if an implementation has synced up to the most recent block. If this field is not populated, the caller should rely on a traditional tip timestamp comparison to determine if an implementation is synced. This field is particularly useful for quiescent blockchains (blocks only produced when there are pending transactions). In these blockchains, the most recent block could have a timestamp far behind the current time but the node could be healthy and at tip.',
)
class Peer(BaseModel):
peer_id: str = Field(..., example='0x52bc44d5378309ee2abf1539bf71de1b7d7be3b5')
metadata: Optional[Dict[str, Any]] = None
class Version(BaseModel):
rosetta_version: str = Field(
...,
description='The rosetta_version is the version of the Rosetta interface the implementation adheres to. This can be useful for clients looking to reliably parse responses.',
example='1.2.5',
)
node_version: str = Field(
...,
description='The node_version is the canonical version of the node runtime. This can help clients manage deployments.',
example='1.0.2',
)
middleware_version: Optional[str] = Field(
None,
description='When a middleware server is used to adhere to the Rosetta interface, it should return its version here. This can help clients manage deployments.',
example='0.2.7',
)
metadata: Optional[Dict[str, Any]] = Field(
None,
description='Any other information that may be useful about versioning of dependent services should be returned here.',
)
class OperationStatus(BaseModel):
status: str = Field(
..., description='The status is the network-specific status of the operation.'
)
successful: bool = Field(
...,
description='An Operation is considered successful if the Operation.Amount should affect the Operation.Account. Some blockchains (like Bitcoin) only include successful operations in blocks but other blockchains (like Ethereum) include unsuccessful operations that incur a fee. To reconcile the computed balance from the stream of Operations, it is critical to understand which Operation.Status indicate an Operation is successful and should affect an Account.',
)
class Timestamp(BaseModel):
__root__: int = Field(
...,
description='The timestamp of the block in milliseconds since the Unix Epoch. The timestamp is stored in milliseconds because some blockchains produce blocks more often than once a second.',
example=1582833600000,
ge=0.0,
)
class CurveType(Enum):
secp256k1 = 'secp256k1'
secp256r1 = 'secp256r1'
edwards25519 = 'edwards25519'
tweedle = 'tweedle'
class SignatureType(Enum):
ecdsa = 'ecdsa'
ecdsa_recovery = 'ecdsa_recovery'
ed25519 = 'ed25519'
schnorr_1 = 'schnorr_1'
schnorr_poseidon = 'schnorr_poseidon'
class CoinAction(Enum):
coin_created = 'coin_created'
coin_spent = 'coin_spent'
class CoinIdentifier(BaseModel):
identifier: str = Field(
...,
description='Identifier should be populated with a globally unique identifier of a Coin. In Bitcoin, this identifier would be transaction_hash:index.',
example='0x2f23fd8cca835af21f3ac375bac601f97ead75f2e79143bdf71fe2c4be043e8f:1',
)
class CoinChange(BaseModel):
coin_identifier: CoinIdentifier
coin_action: CoinAction
class ExemptionType(Enum):
greater_or_equal = 'greater_or_equal'
less_or_equal = 'less_or_equal'
dynamic = 'dynamic'
class BlockEventType(Enum):
block_added = 'block_added'
block_removed = 'block_removed'
class Operator(Enum):
or_ = 'or'
and_ = 'and'
class Direction(Enum):
forward = 'forward'
backward = 'backward'
class MempoolResponse(BaseModel):
transaction_identifiers: List[TransactionIdentifier]
class MetadataRequest(BaseModel):
metadata: Optional[Dict[str, Any]] = None
class NetworkStatusResponse(BaseModel):
current_block_identifier: BlockIdentifier
current_block_timestamp: Timestamp
genesis_block_identifier: BlockIdentifier
oldest_block_identifier: Optional[BlockIdentifier] = None
sync_status: Optional[SyncStatus] = None
peers: List[Peer]
class ConstructionCombineResponse(BaseModel):
signed_transaction: str
class TransactionIdentifierResponse(BaseModel):
transaction_identifier: TransactionIdentifier
metadata: Optional[Dict[str, Any]] = None
class CallResponse(BaseModel):
result: Dict[str, Any] = Field(
...,
description='Result contains the result of the `/call` invocation. This result will not be inspected or interpreted by Rosetta tooling and is left to the caller to decode.',
example={'count': 1000},
)
idempotent: bool = Field(
...,
description='Idempotent indicates that if `/call` is invoked with the same CallRequest again, at any point in time, it will return the same CallResponse. Integrators may cache the CallResponse if this is set to true to avoid making unnecessary calls to the Rosetta implementation. For this reason, implementers should be very conservative about returning true here or they could cause issues for the caller.',
)
class Error(BaseModel):
code: int = Field(
...,
description='Code is a network-specific error code. If desired, this code can be equivalent to an HTTP status code.',
example=12,
ge=0.0,
)
message: str = Field(
...,
description='Message is a network-specific error message. The message MUST NOT change for a given code. In particular, this means that any contextual information should be included in the details field.',
example='Invalid account format',
)
description: Optional[str] = Field(
None,
description='Description allows the implementer to optionally provide additional information about an error. In many cases, the content of this field will be a copy-and-paste from existing developer documentation. Description can ONLY be populated with generic information about a particular type of error. It MUST NOT be populated with information about a particular instantiation of an error (use `details` for this). Whereas the content of Error.Message should stay stable across releases, the content of Error.Description will likely change across releases (as implementers improve error documentation). For this reason, the content in this field is not part of any type assertion (unlike Error.Message).',
example='This error is returned when the requested AccountIdentifier is improperly formatted.',
)
retriable: bool = Field(
...,
description='An error is retriable if the same request may succeed if submitted again.',
)
details: Optional[Dict[str, Any]] = Field(
None,
description='Often times it is useful to return context specific to the request that caused the error (i.e. a sample of the stack trace or impacted account) in addition to the standard error message.',
example={'address': '0x1dcc4de8dec75d7aab85b567b6', 'error': 'not base64'},
)
class NetworkIdentifier(BaseModel):
blockchain: str = Field(..., example='bitcoin')
network: str = Field(
...,
description='If a blockchain has a specific chain-id or network identifier, it should go in this field. It is up to the client to determine which network-specific identifier is mainnet or testnet.',
example='mainnet',
)
sub_network_identifier: Optional[SubNetworkIdentifier] = None
class AccountIdentifier(BaseModel):
address: str = Field(
...,
description='The address may be a cryptographic public key (or some encoding of it) or a provided username.',
example='0x3a065000ab4183c6bf581dc1e55a605455fc6d61',
)
sub_account: Optional[SubAccountIdentifier] = None
metadata: Optional[Dict[str, Any]] = Field(
None,
description='Blockchains that utilize a username model (where the address is not a derivative of a cryptographic public key) should specify the public key(s) owned by the address in metadata.',
)
class Amount(BaseModel):
value: str = Field(
...,
description='Value of the transaction in atomic units represented as an arbitrary-sized signed integer. For example, 1 BTC would be represented by a value of 100000000.',
example='1238089899992',
)
currency: Currency
metadata: Optional[Dict[str, Any]] = None
class PublicKey(BaseModel):
hex_bytes: str = Field(
...,
description='Hex-encoded public key bytes in the format specified by the CurveType.',
)
curve_type: CurveType
class SigningPayload(BaseModel):
address: Optional[str] = Field(
None,
description='[DEPRECATED by `account_identifier` in `v1.4.4`] The network-specific address of the account that should sign the payload.',
)
account_identifier: Optional[AccountIdentifier] = None
hex_bytes: str
signature_type: Optional[SignatureType] = None
class Signature(BaseModel):
signing_payload: SigningPayload
public_key: PublicKey
signature_type: SignatureType
hex_bytes: str
class Coin(BaseModel):
coin_identifier: CoinIdentifier
amount: Amount
class BalanceExemption(BaseModel):
sub_account_address: Optional[str] = Field(
None,
description='SubAccountAddress is the SubAccountIdentifier.Address that the BalanceExemption applies to (regardless of the value of SubAccountIdentifier.Metadata).',
example='staking',
)
currency: Optional[Currency] = None
exemption_type: Optional[ExemptionType] = None
class BlockEvent(BaseModel):
sequence: int = Field(
...,
description='sequence is the unique identifier of a BlockEvent within the context of a NetworkIdentifier.',
example=5,
ge=0.0,
)
block_identifier: BlockIdentifier
type_: BlockEventType = Field(..., alias='type')
class RelatedTransaction(BaseModel):
network_identifier: Optional[NetworkIdentifier] = None
transaction_identifier: TransactionIdentifier
direction: Direction
class AccountBalanceRequest(BaseModel):
network_identifier: NetworkIdentifier
account_identifier: AccountIdentifier
block_identifier: Optional[PartialBlockIdentifier] = None
currencies: Optional[List[Currency]] = Field(
None,
description='In some cases, the caller may not want to retrieve all available balances for an AccountIdentifier. If the currencies field is populated, only balances for the specified currencies will be returned. If not populated, all available balances will be returned.',
)
class AccountBalanceResponse(BaseModel):
block_identifier: BlockIdentifier
balances: List[Amount] = Field(
..., description='A single account may have a balance in multiple currencies.'
)
metadata: Optional[Dict[str, Any]] = Field(
None,
description='Account-based blockchains that utilize a nonce or sequence number should include that number in the metadata. This number could be unique to the identifier or global across the account address.',
example={'sequence_number': 23},
)
class AccountCoinsRequest(BaseModel):
network_identifier: NetworkIdentifier
account_identifier: AccountIdentifier
include_mempool: bool = Field(
...,
description="Include state from the mempool when looking up an account's unspent coins. Note, using this functionality breaks any guarantee of idempotency.",
)
currencies: Optional[List[Currency]] = Field(
None,
description='In some cases, the caller may not want to retrieve coins for all currencies for an AccountIdentifier. If the currencies field is populated, only coins for the specified currencies will be returned. If not populated, all unspent coins will be returned.',
)
class AccountCoinsResponse(BaseModel):
block_identifier: BlockIdentifier
coins: List[Coin] = Field(
...,
description="If a blockchain is UTXO-based, all unspent Coins owned by an account_identifier should be returned alongside the balance. It is highly recommended to populate this field so that users of the Rosetta API implementation don't need to maintain their own indexer to track their UTXOs.",
)
metadata: Optional[Dict[str, Any]] = Field(
None,
description='Account-based blockchains that utilize a nonce or sequence number should include that number in the metadata. This number could be unique to the identifier or global across the account address.',
example={'sequence_number': 23},
)
class BlockRequest(BaseModel):
network_identifier: NetworkIdentifier
block_identifier: PartialBlockIdentifier
class BlockTransactionRequest(BaseModel):
network_identifier: NetworkIdentifier
block_identifier: BlockIdentifier
transaction_identifier: TransactionIdentifier
class MempoolTransactionRequest(BaseModel):
network_identifier: NetworkIdentifier
transaction_identifier: TransactionIdentifier
class NetworkListResponse(BaseModel):
network_identifiers: List[NetworkIdentifier]
class NetworkRequest(BaseModel):
network_identifier: NetworkIdentifier
metadata: Optional[Dict[str, Any]] = None
class ConstructionMetadataRequest(BaseModel):
network_identifier: NetworkIdentifier
options: Optional[Dict[str, Any]] = Field(
None,
description='Some blockchains require different metadata for different types of transaction construction (ex: delegation versus a transfer). Instead of requiring a blockchain node to return all possible types of metadata for construction (which may require multiple node fetches), the client can populate an options object to limit the metadata returned to only the subset required.',
)
public_keys: Optional[List[PublicKey]] = None
class ConstructionMetadataResponse(BaseModel):
metadata: Dict[str, Any] = Field(
...,
example={
'account_sequence': 23,
'recent_block_hash': '0x52bc44d5378309ee2abf1539bf71de1b7d7be3b5',
},
)
suggested_fee: Optional[List[Amount]] = None
class ConstructionDeriveRequest(BaseModel):
network_identifier: NetworkIdentifier
public_key: PublicKey
metadata: Optional[Dict[str, Any]] = None
class ConstructionDeriveResponse(BaseModel):
address: Optional[str] = Field(
None,
description='[DEPRECATED by `account_identifier` in `v1.4.4`] Address in network-specific format.',
)
account_identifier: Optional[AccountIdentifier] = None
metadata: Optional[Dict[str, Any]] = None
class ConstructionPreprocessResponse(BaseModel):
options: Optional[Dict[str, Any]] = Field(
None,
description='The options that will be sent directly to `/construction/metadata` by the caller.',
)
required_public_keys: Optional[List[AccountIdentifier]] = None
class ConstructionPayloadsResponse(BaseModel):
unsigned_transaction: str
payloads: List[SigningPayload]
class ConstructionCombineRequest(BaseModel):
network_identifier: NetworkIdentifier
unsigned_transaction: str
signatures: List[Signature]
class ConstructionParseRequest(BaseModel):
network_identifier: NetworkIdentifier
signed: bool = Field(
...,
description='Signed is a boolean indicating whether the transaction is signed.',
)
transaction: str = Field(
...,
description='This must be either the unsigned transaction blob returned by `/construction/payloads` or the signed transaction blob returned by `/construction/combine`.',
)
class ConstructionHashRequest(BaseModel):
network_identifier: NetworkIdentifier
signed_transaction: str
class ConstructionSubmitRequest(BaseModel):
network_identifier: NetworkIdentifier
signed_transaction: str
class CallRequest(BaseModel):
network_identifier: NetworkIdentifier
method: str = Field(
...,
description='Method is some network-specific procedure call. This method could map to a network-specific RPC endpoint, a method in an SDK generated from a smart contract, or some hybrid of the two. The implementation must define all available methods in the Allow object. However, it is up to the caller to determine which parameters to provide when invoking `/call`.',
example='eth_call',
)
parameters: Dict[str, Any] = Field(
...,
description='Parameters is some network-specific argument for a method. It is up to the caller to determine which parameters to provide when invoking `/call`.',
example={
'block_number': 23,
'address': '0x52bc44d5378309ee2abf1539bf71de1b7d7be3b5',
},
)
class EventsBlocksRequest(BaseModel):
network_identifier: NetworkIdentifier
offset: Optional[int] = Field(
None,
description='offset is the offset into the event stream to sync events from. If this field is not populated, we return the limit events backwards from tip. If this is set to 0, we start from the beginning.',
example=5,
ge=0.0,
)
limit: Optional[int] = Field(
None,
description='limit is the maximum number of events to fetch in one call. The implementation may return <= limit events.',
example=5,
ge=0.0,
)
class EventsBlocksResponse(BaseModel):
max_sequence: int = Field(
...,
description='max_sequence is the maximum available sequence number to fetch.',
example=5,
ge=0.0,
)
events: List[BlockEvent] = Field(
...,
description='events is an array of BlockEvents indicating the order to add and remove blocks to maintain a canonical view of blockchain state. Lightweight clients can use this event stream to update state without implementing their own block syncing logic.',
)
class SearchTransactionsRequest(BaseModel):
network_identifier: NetworkIdentifier
operator: Optional[Operator] = None
max_block: Optional[int] = Field(
None,
description='max_block is the largest block index to consider when searching for transactions. If this field is not populated, the current block is considered the max_block. If you do not specify a max_block, it is possible a newly synced block will interfere with paginated transaction queries (as the offset could become invalid with newly added rows).',
example=5,
ge=0.0,
)
offset: Optional[int] = Field(
None,
description='offset is the offset into the query result to start returning transactions. If any search conditions are changed, the query offset will change and you must restart your search iteration.',
example=5,
ge=0.0,
)
limit: Optional[int] = Field(
None,
description='limit is the maximum number of transactions to return in one call. The implementation may return <= limit transactions.',
example=5,
ge=0.0,
)
transaction_identifier: Optional[TransactionIdentifier] = None
account_identifier: Optional[AccountIdentifier] = None
coin_identifier: Optional[CoinIdentifier] = None
currency: Optional[Currency] = None
status: Optional[str] = Field(
None,
description='status is the network-specific operation type.',
example='reverted',
)
type_: Optional[str] = Field(
None,
alias='type',
description='type is the network-specific operation type.',
example='transfer',
)
address: Optional[str] = Field(
None,
description='address is AccountIdentifier.Address. This is used to get all transactions related to an AccountIdentifier.Address, regardless of SubAccountIdentifier.',
example='0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347',
)
success: Optional[bool] = Field(
None,
description='success is a synthetic condition populated by parsing network-specific operation statuses (using the mapping provided in `/network/options`).',
)
class Operation(BaseModel):
operation_identifier: OperationIdentifier
related_operations: Optional[List[OperationIdentifier]] = Field(
None,
description='Restrict referenced related_operations to identifier indices < the current operation_identifier.index. This ensures there exists a clear DAG-structure of relations. Since operations are one-sided, one could imagine relating operations in a single transfer or linking operations in a call tree.',
example=[{'index': 1}, {'index': 2}],
)
type_: str = Field(
...,
alias='type',
description='Type is the network-specific type of the operation. Ensure that any type that can be returned here is also specified in the NetworkOptionsResponse. This can be very useful to downstream consumers that parse all block data.',
example='Transfer',
)
status: Optional[str] = Field(
None,
description='Status is the network-specific status of the operation. Status is not defined on the transaction object because blockchains with smart contracts may have transactions that partially apply (some operations are successful and some are not). Blockchains with atomic transactions (all operations succeed or all operations fail) will have the same status for each operation. On-chain operations (operations retrieved in the `/block` and `/block/transaction` endpoints) MUST have a populated status field (anything on-chain must have succeeded or failed). However, operations provided during transaction construction (often times called "intent" in the documentation) MUST NOT have a populated status field (operations yet to be included on-chain have not yet succeeded or failed).',
example='Reverted',
)
account: Optional[AccountIdentifier] = None
amount: Optional[Amount] = None
coin_change: Optional[CoinChange] = None
metadata: Optional[Dict[str, Any]] = Field(
None,
example={
'asm': '304502201fd8abb11443f8b1b9a04e0495e0543d05611473a790c8939f089d073f90509a022100f4677825136605d732e2126d09a2d38c20c75946cd9fc239c0497e84c634e3dd01 03301a8259a12e35694cc22ebc45fee635f4993064190f6ce96e7fb19a03bb6be2',
'hex': '48304502201fd8abb11443f8b1b9a04e0495e0543d05611473a790c8939f089d073f90509a022100f4677825136605d732e2126d09a2d38c20c75946cd9fc239c0497e84c634e3dd012103301a8259a12e35694cc22ebc45fee635f4993064190f6ce96e7fb19a03bb6be2',
},
)
class Allow(BaseModel):
operation_statuses: List[OperationStatus] = Field(
...,
description='All Operation.Status this implementation supports. Any status that is returned during parsing that is not listed here will cause client validation to error.',
)
operation_types: List[str] = Field(
...,
description='All Operation.Type this implementation supports. Any type that is returned during parsing that is not listed here will cause client validation to error.',
)
errors: List[Error] = Field(
...,
description='All Errors that this implementation could return. Any error that is returned during parsing that is not listed here will cause client validation to error.',
)
historical_balance_lookup: bool = Field(
...,
description='Any Rosetta implementation that supports querying the balance of an account at any height in the past should set this to true.',
)
timestamp_start_index: Optional[int] = Field(
None,
description='If populated, `timestamp_start_index` indicates the first block index where block timestamps are considered valid (i.e. all blocks less than `timestamp_start_index` could have invalid timestamps). This is useful when the genesis block (or blocks) of a network have timestamp 0. If not populated, block timestamps are assumed to be valid for all available blocks.',
ge=0.0,
)
call_methods: Optional[List[str]] = Field(
None,
description='All methods that are supported by the /call endpoint. Communicating which parameters should be provided to /call is the responsibility of the implementer (this is en lieu of defining an entire type system and requiring the implementer to define that in Allow).',
)
balance_exemptions: Optional[List[BalanceExemption]] = Field(
None,
description='BalanceExemptions is an array of BalanceExemption indicating which account balances could change without a corresponding Operation. BalanceExemptions should be used sparingly as they may introduce significant complexity for integrators that attempt to reconcile all account balance changes. If your implementation relies on any BalanceExemptions, you MUST implement historical balance lookup (the ability to query an account balance at any BlockIdentifier).',
)
mempool_coins: Optional[bool] = Field(
None,
description="Any Rosetta implementation that can update an AccountIdentifier's unspent coins based on the contents of the mempool should populate this field as true. If false, requests to `/account/coins` that set `include_mempool` as true will be automatically rejected.",
)
class NetworkOptionsResponse(BaseModel):
version: Version
allow: Allow
class ConstructionPreprocessRequest(BaseModel):
network_identifier: NetworkIdentifier
operations: List[Operation]
metadata: Optional[Dict[str, Any]] = None
max_fee: Optional[List[Amount]] = None
suggested_fee_multiplier: Optional[float] = Field(None, ge=0.0)
class ConstructionPayloadsRequest(BaseModel):
network_identifier: NetworkIdentifier
operations: List[Operation]
metadata: Optional[Dict[str, Any]] = None
public_keys: Optional[List[PublicKey]] = None
class ConstructionParseResponse(BaseModel):
operations: List[Operation]
signers: Optional[List[str]] = Field(
None,
description='[DEPRECATED by `account_identifier_signers` in `v1.4.4`] All signers (addresses) of a particular transaction. If the transaction is unsigned, it should be empty.',
)
account_identifier_signers: Optional[List[AccountIdentifier]] = None
metadata: Optional[Dict[str, Any]] = None
class Transaction(BaseModel):
transaction_identifier: TransactionIdentifier
operations: List[Operation]
related_transactions: Optional[List[RelatedTransaction]] = None
metadata: Optional[Dict[str, Any]] = Field(
None,
description='Transactions that are related to other transactions (like a cross-shard transaction) should include the tranaction_identifier of these transactions in the metadata.',
example={'size': 12378, 'lockTime': 1582272577},
)
class BlockTransaction(BaseModel):
block_identifier: BlockIdentifier
transaction: Transaction
class BlockTransactionResponse(BaseModel):
transaction: Transaction
class MempoolTransactionResponse(BaseModel):
transaction: Transaction
metadata: Optional[Dict[str, Any]] = Field(
None, example={'descendant_fees': 123923, 'ancestor_count': 2}
)
class SearchTransactionsResponse(BaseModel):
transactions: List[BlockTransaction] = Field(
...,
description='transactions is an array of BlockTransactions sorted by most recent BlockIdentifier (meaning that transactions in recent blocks appear first). If there are many transactions for a particular search, transactions may not contain all matching transactions. It is up to the caller to paginate these transactions using the max_block field.',
)
total_count: int = Field(
...,
description='total_count is the number of results for a given search. Callers typically use this value to concurrently fetch results by offset or to display a virtual page number associated with results.',
example=5,
ge=0.0,
)
next_offset: Optional[int] = Field(
None,
description='next_offset is the next offset to use when paginating through transaction results. If this field is not populated, there are no more transactions to query.',
example=5,
ge=0.0,
)
class Block(BaseModel):
block_identifier: BlockIdentifier
parent_block_identifier: BlockIdentifier
timestamp: Timestamp
transactions: List[Transaction]
metadata: Optional[Dict[str, Any]] = Field(
None,
example={
'transactions_root': '0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347',
'difficulty': '123891724987128947',
},
)
class BlockResponse(BaseModel):
block: Optional[Block] = None
other_transactions: Optional[List[TransactionIdentifier]] = Field(
None,
description="Some blockchains may require additional transactions to be fetched that weren't returned in the block response (ex: block only returns transaction hashes). For blockchains with a lot of transactions in each block, this can be very useful as consumers can concurrently fetch all transactions returned.",
)
| 43.774194 | 798 | 0.724687 |
from __future__ import annotations
from enum import Enum
from typing import Any, Dict, List, Optional
from pydantic import BaseModel, Field
class SubNetworkIdentifier(BaseModel):
network: str = Field(..., example='shard 1')
metadata: Optional[Dict[str, Any]] = Field(
None, example={'producer': '0x52bc44d5378309ee2abf1539bf71de1b7d7be3b5'}
)
class BlockIdentifier(BaseModel):
index: int = Field(
..., description='This is also known as the block height.', example=1123941
)
hash_: str = Field(
...,
alias='hash',
example='0x1f2cc6c5027d2f201a5453ad1119574d2aed23a392654742ac3c78783c071f85',
)
class PartialBlockIdentifier(BaseModel):
index: Optional[int] = Field(None, example=1123941)
hash_: Optional[str] = Field(
None,
alias='hash',
example='0x1f2cc6c5027d2f201a5453ad1119574d2aed23a392654742ac3c78783c071f85',
)
class TransactionIdentifier(BaseModel):
hash_: str = Field(
...,
alias='hash',
description='Any transactions that are attributable only to a block (ex: a block event) should use the hash of the block as the identifier.',
example='0x2f23fd8cca835af21f3ac375bac601f97ead75f2e79143bdf71fe2c4be043e8f',
)
class OperationIdentifier(BaseModel):
index: int = Field(
...,
description='The operation index is used to ensure each operation has a unique identifier within a transaction. This index is only relative to the transaction and NOT GLOBAL. The operations in each transaction should start from index 0. To clarify, there may not be any notion of an operation index in the blockchain being described.',
example=5,
ge=0.0,
)
network_index: Optional[int] = Field(
None,
description='Some blockchains specify an operation index that is essential for client use. For example, Bitcoin uses a network_index to identify which UTXO was used in a transaction. network_index should not be populated if there is no notion of an operation index in a blockchain (typically most account-based blockchains).',
example=0,
ge=0.0,
)
class SubAccountIdentifier(BaseModel):
address: str = Field(
...,
description='The SubAccount address may be a cryptographic value or some other identifier (ex: bonded) that uniquely specifies a SubAccount.',
example='0x6b175474e89094c44da98b954eedeac495271d0f',
)
metadata: Optional[Dict[str, Any]] = Field(
None,
description='If the SubAccount address is not sufficient to uniquely specify a SubAccount, any other identifying information can be stored here. It is important to note that two SubAccounts with identical addresses but differing metadata will not be considered equal by clients.',
)
class Currency(BaseModel):
symbol: str = Field(
..., description='Canonical symbol associated with a currency.', example='BTC'
)
decimals: int = Field(
...,
description='Number of decimal places in the standard unit representation of the amount. For example, BTC has 8 decimals. Note that it is not possible to represent the value of some currency in atomic units that is not base 10.',
example=8,
ge=0.0,
)
metadata: Optional[Dict[str, Any]] = Field(
None,
description='Any additional information related to the currency itself. For example, it would be useful to populate this object with the contract address of an ERC-20 token.',
example={'Issuer': 'Satoshi'},
)
class SyncStatus(BaseModel):
current_index: Optional[int] = Field(
None,
description='CurrentIndex is the index of the last synced block in the current stage. This is a separate field from current_block_identifier in NetworkStatusResponse because blocks with indices up to and including the current_index may not yet be queryable by the caller. To reiterate, all indices up to and including current_block_identifier in NetworkStatusResponse must be queryable via the /block endpoint (excluding indices less than oldest_block_identifier).',
example=100,
)
target_index: Optional[int] = Field(
None,
description='TargetIndex is the index of the block that the implementation is attempting to sync to in the current stage.',
example=150,
)
stage: Optional[str] = Field(
None,
description='Stage is the phase of the sync process.',
example='header sync',
)
synced: Optional[bool] = Field(
None,
description='sycned is a boolean that indicates if an implementation has synced up to the most recent block. If this field is not populated, the caller should rely on a traditional tip timestamp comparison to determine if an implementation is synced. This field is particularly useful for quiescent blockchains (blocks only produced when there are pending transactions). In these blockchains, the most recent block could have a timestamp far behind the current time but the node could be healthy and at tip.',
)
class Peer(BaseModel):
peer_id: str = Field(..., example='0x52bc44d5378309ee2abf1539bf71de1b7d7be3b5')
metadata: Optional[Dict[str, Any]] = None
class Version(BaseModel):
rosetta_version: str = Field(
...,
description='The rosetta_version is the version of the Rosetta interface the implementation adheres to. This can be useful for clients looking to reliably parse responses.',
example='1.2.5',
)
node_version: str = Field(
...,
description='The node_version is the canonical version of the node runtime. This can help clients manage deployments.',
example='1.0.2',
)
middleware_version: Optional[str] = Field(
None,
description='When a middleware server is used to adhere to the Rosetta interface, it should return its version here. This can help clients manage deployments.',
example='0.2.7',
)
metadata: Optional[Dict[str, Any]] = Field(
None,
description='Any other information that may be useful about versioning of dependent services should be returned here.',
)
class OperationStatus(BaseModel):
status: str = Field(
..., description='The status is the network-specific status of the operation.'
)
successful: bool = Field(
...,
description='An Operation is considered successful if the Operation.Amount should affect the Operation.Account. Some blockchains (like Bitcoin) only include successful operations in blocks but other blockchains (like Ethereum) include unsuccessful operations that incur a fee. To reconcile the computed balance from the stream of Operations, it is critical to understand which Operation.Status indicate an Operation is successful and should affect an Account.',
)
class Timestamp(BaseModel):
__root__: int = Field(
...,
description='The timestamp of the block in milliseconds since the Unix Epoch. The timestamp is stored in milliseconds because some blockchains produce blocks more often than once a second.',
example=1582833600000,
ge=0.0,
)
class CurveType(Enum):
secp256k1 = 'secp256k1'
secp256r1 = 'secp256r1'
edwards25519 = 'edwards25519'
tweedle = 'tweedle'
class SignatureType(Enum):
ecdsa = 'ecdsa'
ecdsa_recovery = 'ecdsa_recovery'
ed25519 = 'ed25519'
schnorr_1 = 'schnorr_1'
schnorr_poseidon = 'schnorr_poseidon'
class CoinAction(Enum):
coin_created = 'coin_created'
coin_spent = 'coin_spent'
class CoinIdentifier(BaseModel):
identifier: str = Field(
...,
description='Identifier should be populated with a globally unique identifier of a Coin. In Bitcoin, this identifier would be transaction_hash:index.',
example='0x2f23fd8cca835af21f3ac375bac601f97ead75f2e79143bdf71fe2c4be043e8f:1',
)
class CoinChange(BaseModel):
coin_identifier: CoinIdentifier
coin_action: CoinAction
class ExemptionType(Enum):
greater_or_equal = 'greater_or_equal'
less_or_equal = 'less_or_equal'
dynamic = 'dynamic'
class BlockEventType(Enum):
block_added = 'block_added'
block_removed = 'block_removed'
class Operator(Enum):
or_ = 'or'
and_ = 'and'
class Direction(Enum):
forward = 'forward'
backward = 'backward'
class MempoolResponse(BaseModel):
transaction_identifiers: List[TransactionIdentifier]
class MetadataRequest(BaseModel):
metadata: Optional[Dict[str, Any]] = None
class NetworkStatusResponse(BaseModel):
current_block_identifier: BlockIdentifier
current_block_timestamp: Timestamp
genesis_block_identifier: BlockIdentifier
oldest_block_identifier: Optional[BlockIdentifier] = None
sync_status: Optional[SyncStatus] = None
peers: List[Peer]
class ConstructionCombineResponse(BaseModel):
signed_transaction: str
class TransactionIdentifierResponse(BaseModel):
transaction_identifier: TransactionIdentifier
metadata: Optional[Dict[str, Any]] = None
class CallResponse(BaseModel):
result: Dict[str, Any] = Field(
...,
description='Result contains the result of the `/call` invocation. This result will not be inspected or interpreted by Rosetta tooling and is left to the caller to decode.',
example={'count': 1000},
)
idempotent: bool = Field(
...,
description='Idempotent indicates that if `/call` is invoked with the same CallRequest again, at any point in time, it will return the same CallResponse. Integrators may cache the CallResponse if this is set to true to avoid making unnecessary calls to the Rosetta implementation. For this reason, implementers should be very conservative about returning true here or they could cause issues for the caller.',
)
class Error(BaseModel):
code: int = Field(
...,
description='Code is a network-specific error code. If desired, this code can be equivalent to an HTTP status code.',
example=12,
ge=0.0,
)
message: str = Field(
...,
description='Message is a network-specific error message. The message MUST NOT change for a given code. In particular, this means that any contextual information should be included in the details field.',
example='Invalid account format',
)
description: Optional[str] = Field(
None,
description='Description allows the implementer to optionally provide additional information about an error. In many cases, the content of this field will be a copy-and-paste from existing developer documentation. Description can ONLY be populated with generic information about a particular type of error. It MUST NOT be populated with information about a particular instantiation of an error (use `details` for this). Whereas the content of Error.Message should stay stable across releases, the content of Error.Description will likely change across releases (as implementers improve error documentation). For this reason, the content in this field is not part of any type assertion (unlike Error.Message).',
example='This error is returned when the requested AccountIdentifier is improperly formatted.',
)
retriable: bool = Field(
...,
description='An error is retriable if the same request may succeed if submitted again.',
)
details: Optional[Dict[str, Any]] = Field(
None,
description='Often times it is useful to return context specific to the request that caused the error (i.e. a sample of the stack trace or impacted account) in addition to the standard error message.',
example={'address': '0x1dcc4de8dec75d7aab85b567b6', 'error': 'not base64'},
)
class NetworkIdentifier(BaseModel):
blockchain: str = Field(..., example='bitcoin')
network: str = Field(
...,
description='If a blockchain has a specific chain-id or network identifier, it should go in this field. It is up to the client to determine which network-specific identifier is mainnet or testnet.',
example='mainnet',
)
sub_network_identifier: Optional[SubNetworkIdentifier] = None
class AccountIdentifier(BaseModel):
address: str = Field(
...,
description='The address may be a cryptographic public key (or some encoding of it) or a provided username.',
example='0x3a065000ab4183c6bf581dc1e55a605455fc6d61',
)
sub_account: Optional[SubAccountIdentifier] = None
metadata: Optional[Dict[str, Any]] = Field(
None,
description='Blockchains that utilize a username model (where the address is not a derivative of a cryptographic public key) should specify the public key(s) owned by the address in metadata.',
)
class Amount(BaseModel):
value: str = Field(
...,
description='Value of the transaction in atomic units represented as an arbitrary-sized signed integer. For example, 1 BTC would be represented by a value of 100000000.',
example='1238089899992',
)
currency: Currency
metadata: Optional[Dict[str, Any]] = None
class PublicKey(BaseModel):
hex_bytes: str = Field(
...,
description='Hex-encoded public key bytes in the format specified by the CurveType.',
)
curve_type: CurveType
class SigningPayload(BaseModel):
address: Optional[str] = Field(
None,
description='[DEPRECATED by `account_identifier` in `v1.4.4`] The network-specific address of the account that should sign the payload.',
)
account_identifier: Optional[AccountIdentifier] = None
hex_bytes: str
signature_type: Optional[SignatureType] = None
class Signature(BaseModel):
signing_payload: SigningPayload
public_key: PublicKey
signature_type: SignatureType
hex_bytes: str
class Coin(BaseModel):
coin_identifier: CoinIdentifier
amount: Amount
class BalanceExemption(BaseModel):
sub_account_address: Optional[str] = Field(
None,
description='SubAccountAddress is the SubAccountIdentifier.Address that the BalanceExemption applies to (regardless of the value of SubAccountIdentifier.Metadata).',
example='staking',
)
currency: Optional[Currency] = None
exemption_type: Optional[ExemptionType] = None
class BlockEvent(BaseModel):
sequence: int = Field(
...,
description='sequence is the unique identifier of a BlockEvent within the context of a NetworkIdentifier.',
example=5,
ge=0.0,
)
block_identifier: BlockIdentifier
type_: BlockEventType = Field(..., alias='type')
class RelatedTransaction(BaseModel):
network_identifier: Optional[NetworkIdentifier] = None
transaction_identifier: TransactionIdentifier
direction: Direction
class AccountBalanceRequest(BaseModel):
network_identifier: NetworkIdentifier
account_identifier: AccountIdentifier
block_identifier: Optional[PartialBlockIdentifier] = None
currencies: Optional[List[Currency]] = Field(
None,
description='In some cases, the caller may not want to retrieve all available balances for an AccountIdentifier. If the currencies field is populated, only balances for the specified currencies will be returned. If not populated, all available balances will be returned.',
)
class AccountBalanceResponse(BaseModel):
block_identifier: BlockIdentifier
balances: List[Amount] = Field(
..., description='A single account may have a balance in multiple currencies.'
)
metadata: Optional[Dict[str, Any]] = Field(
None,
description='Account-based blockchains that utilize a nonce or sequence number should include that number in the metadata. This number could be unique to the identifier or global across the account address.',
example={'sequence_number': 23},
)
class AccountCoinsRequest(BaseModel):
network_identifier: NetworkIdentifier
account_identifier: AccountIdentifier
include_mempool: bool = Field(
...,
description="Include state from the mempool when looking up an account's unspent coins. Note, using this functionality breaks any guarantee of idempotency.",
)
currencies: Optional[List[Currency]] = Field(
None,
description='In some cases, the caller may not want to retrieve coins for all currencies for an AccountIdentifier. If the currencies field is populated, only coins for the specified currencies will be returned. If not populated, all unspent coins will be returned.',
)
class AccountCoinsResponse(BaseModel):
block_identifier: BlockIdentifier
coins: List[Coin] = Field(
...,
description="If a blockchain is UTXO-based, all unspent Coins owned by an account_identifier should be returned alongside the balance. It is highly recommended to populate this field so that users of the Rosetta API implementation don't need to maintain their own indexer to track their UTXOs.",
)
metadata: Optional[Dict[str, Any]] = Field(
None,
description='Account-based blockchains that utilize a nonce or sequence number should include that number in the metadata. This number could be unique to the identifier or global across the account address.',
example={'sequence_number': 23},
)
class BlockRequest(BaseModel):
network_identifier: NetworkIdentifier
block_identifier: PartialBlockIdentifier
class BlockTransactionRequest(BaseModel):
network_identifier: NetworkIdentifier
block_identifier: BlockIdentifier
transaction_identifier: TransactionIdentifier
class MempoolTransactionRequest(BaseModel):
network_identifier: NetworkIdentifier
transaction_identifier: TransactionIdentifier
class NetworkListResponse(BaseModel):
network_identifiers: List[NetworkIdentifier]
class NetworkRequest(BaseModel):
network_identifier: NetworkIdentifier
metadata: Optional[Dict[str, Any]] = None
class ConstructionMetadataRequest(BaseModel):
network_identifier: NetworkIdentifier
options: Optional[Dict[str, Any]] = Field(
None,
description='Some blockchains require different metadata for different types of transaction construction (ex: delegation versus a transfer). Instead of requiring a blockchain node to return all possible types of metadata for construction (which may require multiple node fetches), the client can populate an options object to limit the metadata returned to only the subset required.',
)
public_keys: Optional[List[PublicKey]] = None
class ConstructionMetadataResponse(BaseModel):
metadata: Dict[str, Any] = Field(
...,
example={
'account_sequence': 23,
'recent_block_hash': '0x52bc44d5378309ee2abf1539bf71de1b7d7be3b5',
},
)
suggested_fee: Optional[List[Amount]] = None
class ConstructionDeriveRequest(BaseModel):
network_identifier: NetworkIdentifier
public_key: PublicKey
metadata: Optional[Dict[str, Any]] = None
class ConstructionDeriveResponse(BaseModel):
address: Optional[str] = Field(
None,
description='[DEPRECATED by `account_identifier` in `v1.4.4`] Address in network-specific format.',
)
account_identifier: Optional[AccountIdentifier] = None
metadata: Optional[Dict[str, Any]] = None
class ConstructionPreprocessResponse(BaseModel):
options: Optional[Dict[str, Any]] = Field(
None,
description='The options that will be sent directly to `/construction/metadata` by the caller.',
)
required_public_keys: Optional[List[AccountIdentifier]] = None
class ConstructionPayloadsResponse(BaseModel):
unsigned_transaction: str
payloads: List[SigningPayload]
class ConstructionCombineRequest(BaseModel):
network_identifier: NetworkIdentifier
unsigned_transaction: str
signatures: List[Signature]
class ConstructionParseRequest(BaseModel):
network_identifier: NetworkIdentifier
signed: bool = Field(
...,
description='Signed is a boolean indicating whether the transaction is signed.',
)
transaction: str = Field(
...,
description='This must be either the unsigned transaction blob returned by `/construction/payloads` or the signed transaction blob returned by `/construction/combine`.',
)
class ConstructionHashRequest(BaseModel):
network_identifier: NetworkIdentifier
signed_transaction: str
class ConstructionSubmitRequest(BaseModel):
network_identifier: NetworkIdentifier
signed_transaction: str
class CallRequest(BaseModel):
network_identifier: NetworkIdentifier
method: str = Field(
...,
description='Method is some network-specific procedure call. This method could map to a network-specific RPC endpoint, a method in an SDK generated from a smart contract, or some hybrid of the two. The implementation must define all available methods in the Allow object. However, it is up to the caller to determine which parameters to provide when invoking `/call`.',
example='eth_call',
)
parameters: Dict[str, Any] = Field(
...,
description='Parameters is some network-specific argument for a method. It is up to the caller to determine which parameters to provide when invoking `/call`.',
example={
'block_number': 23,
'address': '0x52bc44d5378309ee2abf1539bf71de1b7d7be3b5',
},
)
class EventsBlocksRequest(BaseModel):
network_identifier: NetworkIdentifier
offset: Optional[int] = Field(
None,
description='offset is the offset into the event stream to sync events from. If this field is not populated, we return the limit events backwards from tip. If this is set to 0, we start from the beginning.',
example=5,
ge=0.0,
)
limit: Optional[int] = Field(
None,
description='limit is the maximum number of events to fetch in one call. The implementation may return <= limit events.',
example=5,
ge=0.0,
)
class EventsBlocksResponse(BaseModel):
max_sequence: int = Field(
...,
description='max_sequence is the maximum available sequence number to fetch.',
example=5,
ge=0.0,
)
events: List[BlockEvent] = Field(
...,
description='events is an array of BlockEvents indicating the order to add and remove blocks to maintain a canonical view of blockchain state. Lightweight clients can use this event stream to update state without implementing their own block syncing logic.',
)
class SearchTransactionsRequest(BaseModel):
network_identifier: NetworkIdentifier
operator: Optional[Operator] = None
max_block: Optional[int] = Field(
None,
description='max_block is the largest block index to consider when searching for transactions. If this field is not populated, the current block is considered the max_block. If you do not specify a max_block, it is possible a newly synced block will interfere with paginated transaction queries (as the offset could become invalid with newly added rows).',
example=5,
ge=0.0,
)
offset: Optional[int] = Field(
None,
description='offset is the offset into the query result to start returning transactions. If any search conditions are changed, the query offset will change and you must restart your search iteration.',
example=5,
ge=0.0,
)
limit: Optional[int] = Field(
None,
description='limit is the maximum number of transactions to return in one call. The implementation may return <= limit transactions.',
example=5,
ge=0.0,
)
transaction_identifier: Optional[TransactionIdentifier] = None
account_identifier: Optional[AccountIdentifier] = None
coin_identifier: Optional[CoinIdentifier] = None
currency: Optional[Currency] = None
status: Optional[str] = Field(
None,
description='status is the network-specific operation type.',
example='reverted',
)
type_: Optional[str] = Field(
None,
alias='type',
description='type is the network-specific operation type.',
example='transfer',
)
address: Optional[str] = Field(
None,
description='address is AccountIdentifier.Address. This is used to get all transactions related to an AccountIdentifier.Address, regardless of SubAccountIdentifier.',
example='0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347',
)
success: Optional[bool] = Field(
None,
description='success is a synthetic condition populated by parsing network-specific operation statuses (using the mapping provided in `/network/options`).',
)
class Operation(BaseModel):
operation_identifier: OperationIdentifier
related_operations: Optional[List[OperationIdentifier]] = Field(
None,
description='Restrict referenced related_operations to identifier indices < the current operation_identifier.index. This ensures there exists a clear DAG-structure of relations. Since operations are one-sided, one could imagine relating operations in a single transfer or linking operations in a call tree.',
example=[{'index': 1}, {'index': 2}],
)
type_: str = Field(
...,
alias='type',
description='Type is the network-specific type of the operation. Ensure that any type that can be returned here is also specified in the NetworkOptionsResponse. This can be very useful to downstream consumers that parse all block data.',
example='Transfer',
)
status: Optional[str] = Field(
None,
description='Status is the network-specific status of the operation. Status is not defined on the transaction object because blockchains with smart contracts may have transactions that partially apply (some operations are successful and some are not). Blockchains with atomic transactions (all operations succeed or all operations fail) will have the same status for each operation. On-chain operations (operations retrieved in the `/block` and `/block/transaction` endpoints) MUST have a populated status field (anything on-chain must have succeeded or failed). However, operations provided during transaction construction (often times called "intent" in the documentation) MUST NOT have a populated status field (operations yet to be included on-chain have not yet succeeded or failed).',
example='Reverted',
)
account: Optional[AccountIdentifier] = None
amount: Optional[Amount] = None
coin_change: Optional[CoinChange] = None
metadata: Optional[Dict[str, Any]] = Field(
None,
example={
'asm': '304502201fd8abb11443f8b1b9a04e0495e0543d05611473a790c8939f089d073f90509a022100f4677825136605d732e2126d09a2d38c20c75946cd9fc239c0497e84c634e3dd01 03301a8259a12e35694cc22ebc45fee635f4993064190f6ce96e7fb19a03bb6be2',
'hex': '48304502201fd8abb11443f8b1b9a04e0495e0543d05611473a790c8939f089d073f90509a022100f4677825136605d732e2126d09a2d38c20c75946cd9fc239c0497e84c634e3dd012103301a8259a12e35694cc22ebc45fee635f4993064190f6ce96e7fb19a03bb6be2',
},
)
class Allow(BaseModel):
operation_statuses: List[OperationStatus] = Field(
...,
description='All Operation.Status this implementation supports. Any status that is returned during parsing that is not listed here will cause client validation to error.',
)
operation_types: List[str] = Field(
...,
description='All Operation.Type this implementation supports. Any type that is returned during parsing that is not listed here will cause client validation to error.',
)
errors: List[Error] = Field(
...,
description='All Errors that this implementation could return. Any error that is returned during parsing that is not listed here will cause client validation to error.',
)
historical_balance_lookup: bool = Field(
...,
description='Any Rosetta implementation that supports querying the balance of an account at any height in the past should set this to true.',
)
timestamp_start_index: Optional[int] = Field(
None,
description='If populated, `timestamp_start_index` indicates the first block index where block timestamps are considered valid (i.e. all blocks less than `timestamp_start_index` could have invalid timestamps). This is useful when the genesis block (or blocks) of a network have timestamp 0. If not populated, block timestamps are assumed to be valid for all available blocks.',
ge=0.0,
)
call_methods: Optional[List[str]] = Field(
None,
description='All methods that are supported by the /call endpoint. Communicating which parameters should be provided to /call is the responsibility of the implementer (this is en lieu of defining an entire type system and requiring the implementer to define that in Allow).',
)
balance_exemptions: Optional[List[BalanceExemption]] = Field(
None,
description='BalanceExemptions is an array of BalanceExemption indicating which account balances could change without a corresponding Operation. BalanceExemptions should be used sparingly as they may introduce significant complexity for integrators that attempt to reconcile all account balance changes. If your implementation relies on any BalanceExemptions, you MUST implement historical balance lookup (the ability to query an account balance at any BlockIdentifier).',
)
mempool_coins: Optional[bool] = Field(
None,
description="Any Rosetta implementation that can update an AccountIdentifier's unspent coins based on the contents of the mempool should populate this field as true. If false, requests to `/account/coins` that set `include_mempool` as true will be automatically rejected.",
)
class NetworkOptionsResponse(BaseModel):
version: Version
allow: Allow
class ConstructionPreprocessRequest(BaseModel):
network_identifier: NetworkIdentifier
operations: List[Operation]
metadata: Optional[Dict[str, Any]] = None
max_fee: Optional[List[Amount]] = None
suggested_fee_multiplier: Optional[float] = Field(None, ge=0.0)
class ConstructionPayloadsRequest(BaseModel):
network_identifier: NetworkIdentifier
operations: List[Operation]
metadata: Optional[Dict[str, Any]] = None
public_keys: Optional[List[PublicKey]] = None
class ConstructionParseResponse(BaseModel):
operations: List[Operation]
signers: Optional[List[str]] = Field(
None,
description='[DEPRECATED by `account_identifier_signers` in `v1.4.4`] All signers (addresses) of a particular transaction. If the transaction is unsigned, it should be empty.',
)
account_identifier_signers: Optional[List[AccountIdentifier]] = None
metadata: Optional[Dict[str, Any]] = None
class Transaction(BaseModel):
transaction_identifier: TransactionIdentifier
operations: List[Operation]
related_transactions: Optional[List[RelatedTransaction]] = None
metadata: Optional[Dict[str, Any]] = Field(
None,
description='Transactions that are related to other transactions (like a cross-shard transaction) should include the tranaction_identifier of these transactions in the metadata.',
example={'size': 12378, 'lockTime': 1582272577},
)
class BlockTransaction(BaseModel):
block_identifier: BlockIdentifier
transaction: Transaction
class BlockTransactionResponse(BaseModel):
transaction: Transaction
class MempoolTransactionResponse(BaseModel):
transaction: Transaction
metadata: Optional[Dict[str, Any]] = Field(
None, example={'descendant_fees': 123923, 'ancestor_count': 2}
)
class SearchTransactionsResponse(BaseModel):
transactions: List[BlockTransaction] = Field(
...,
description='transactions is an array of BlockTransactions sorted by most recent BlockIdentifier (meaning that transactions in recent blocks appear first). If there are many transactions for a particular search, transactions may not contain all matching transactions. It is up to the caller to paginate these transactions using the max_block field.',
)
total_count: int = Field(
...,
description='total_count is the number of results for a given search. Callers typically use this value to concurrently fetch results by offset or to display a virtual page number associated with results.',
example=5,
ge=0.0,
)
next_offset: Optional[int] = Field(
None,
description='next_offset is the next offset to use when paginating through transaction results. If this field is not populated, there are no more transactions to query.',
example=5,
ge=0.0,
)
class Block(BaseModel):
block_identifier: BlockIdentifier
parent_block_identifier: BlockIdentifier
timestamp: Timestamp
transactions: List[Transaction]
metadata: Optional[Dict[str, Any]] = Field(
None,
example={
'transactions_root': '0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347',
'difficulty': '123891724987128947',
},
)
class BlockResponse(BaseModel):
block: Optional[Block] = None
other_transactions: Optional[List[TransactionIdentifier]] = Field(
None,
description="Some blockchains may require additional transactions to be fetched that weren't returned in the block response (ex: block only returns transaction hashes). For blockchains with a lot of transactions in each block, this can be very useful as consumers can concurrently fetch all transactions returned.",
)
| true | true |
f73741f6d33de3f69a119c74600af379993fdf34 | 9,003 | py | Python | samples_validator/utils.py | PlatformOfTrust/code-samples-validator | 75fa24d93ccafaa51f7e1c0ebae447ac2bf933e0 | [
"MIT"
] | null | null | null | samples_validator/utils.py | PlatformOfTrust/code-samples-validator | 75fa24d93ccafaa51f7e1c0ebae447ac2bf933e0 | [
"MIT"
] | null | null | null | samples_validator/utils.py | PlatformOfTrust/code-samples-validator | 75fa24d93ccafaa51f7e1c0ebae447ac2bf933e0 | [
"MIT"
] | 1 | 2020-04-28T09:54:33.000Z | 2020-04-28T09:54:33.000Z | import ast
from collections import defaultdict
from pathlib import Path
from typing import List, Optional
from samples_validator.base import ApiTestResult, CodeSample, HttpMethod
class TestExecutionResultMap:
"""
Data structure for storing results of test runs for each code sample
based on its HTTP resource path
"""
def __init__(self):
self._map = {}
def put(self,
test_result: ApiTestResult,
replace_keys: Optional[List[dict]] = None,
extra: Optional[dict] = None):
replace_keys = replace_keys or []
parent_body = test_result.json_body or {}
for replacement in replace_keys:
for key_from, key_to in replacement.items():
if key_from in parent_body:
parent_body[key_to] = parent_body[key_from]
parent_body.update(extra or {})
self._put_test_result(
self._map, test_result, path=test_result.sample.name,
)
def get_parent_result(self, sample: CodeSample) -> Optional[ApiTestResult]:
return self._get_parent_test_result(
self._map, sample, path=sample.name, current_body={},
)
def get_parent_body(
self,
sample: CodeSample,
escaped: bool = False) -> dict:
body: dict = {}
self._get_parent_test_result(
self._map, sample, path=sample.name, current_body=body,
)
if escaped:
# we want to replace placeholders like "{data}",
# not the pure strings like "data"
body = {
f'{{{k}}}' if not k.startswith('{') else k: v
for k, v in body.items()
}
return body
def _put_test_result(
self,
current_dict: dict,
test_result: ApiTestResult,
path: str) -> dict:
"""
Place test result in the maximum nested structure based on its path
:param current_dict: Dict on current nesting level
:param test_result: Result of running the code sample
:param path: Path of the sample relative to current nesting level
:return: Modified version of original dict containing the test_result
For example, sample's path is 'a/b', then resulted dict will look like
{'a': {'b': {'methods': {<HttpMethod.get: 'GET'>: ApiTestResult(..)}}}}
"""
path_parts = path.split('/')
current_path = path_parts[0]
further_path = '/'.join(path_parts[1:])
if not current_dict.get(current_path):
current_dict[current_path] = defaultdict(dict)
if not further_path:
http_method = test_result.sample.http_method
current_dict[current_path]['methods'][http_method] = test_result
else:
current_dict[current_path] = self._put_test_result(
current_dict[current_path], test_result, further_path,
)
return current_dict
def _get_parent_test_result(
self,
current_dict: dict,
sample: CodeSample,
path: str,
current_body: dict,
current_parent: Optional[ApiTestResult] = None,
) -> Optional[ApiTestResult]:
"""
Get the result of POST sample of parent resource in REST terminology.
For example, we have a result of POST /parent. So for the
/parent/{id} we want to get the result of previous request, mainly
for substitution of the `id` param in the future
:param current_dict: Dict on current nesting level
:param sample: "Child" code sample
:param path: Path of the sample relative to current nesting level
:param current_parent: Current result of a method
:return: Test result if it's present in the structure
"""
path_parts = path.split('/')
current_path = path_parts[0]
further_path = '/'.join(path_parts[1:])
current_methods = current_dict.get('methods', {})
current_parent = current_methods.get(HttpMethod.post, current_parent)
next_dict = current_dict.get(current_path)
if current_parent and current_parent.json_body:
current_body.update(current_parent.json_body)
if not next_dict:
return current_parent
if not further_path:
return current_parent
else:
return self._get_parent_test_result(
next_dict, sample, further_path, current_body, current_parent,
)
class CodeSamplesTree:
"""
Data structure for storing code samples in a tree form based on
HTTP resource path
"""
def __init__(self):
self._tree = {}
def put(self, sample: CodeSample):
self._put_code_sample(
self._tree, f'{sample.lang.value}{sample.name}', sample,
)
def list_sorted_samples(self) -> List[CodeSample]:
sorted_samples: List[CodeSample] = []
self._sort_samples(self._tree, sorted_samples)
return sorted_samples
def _put_code_sample(self,
current_dict: dict,
path: str,
sample: CodeSample) -> dict:
"""
Place code sample in the maximum nested structure based on its path
:param current_dict: Dict on current nesting level
:param path: Path of the sample relative to current nesting level
:param sample: Code sample to put
:return: Modified version of original dict containing the code sample
For example, sample's path is 'a/b', then resulted dict will look like
{'a': {'b': {'methods': {<HttpMethod.get: 'GET'>: CodeSample(..)}}}}
"""
path_parts = path.split('/')
current_path = path_parts[0]
further_path = '/'.join(path_parts[1:])
if not current_dict.get(current_path):
current_dict[current_path] = defaultdict(dict)
if not further_path:
current_dict[current_path]['methods'][sample.http_method] = sample
else:
current_dict[current_path] = self._put_code_sample(
current_dict[current_path], further_path, sample,
)
return current_dict
def _sort_samples(
self,
endpoints: dict,
result_list: List[CodeSample]):
"""
DFS implementation for loading code samples from nested structure
created by _put_code_sample method. It takes into account child-parent
relations and sorting HTTP methods in logical order, e.g create parent,
create child, delete child, delete parent.
:param endpoints: Result of _put_code_sample function
:param result_list: List to put sorted samples into
:return: None. This function is mutate the result_list argument
"""
methods = endpoints.get('methods', {})
for method in (HttpMethod.post, HttpMethod.get, HttpMethod.put):
if method in methods:
result_list.append(methods[method])
further_paths = [
name for name in endpoints.keys() if name != 'methods'
]
deepest_level = not further_paths
if not deepest_level:
for value in further_paths:
self._sort_samples(endpoints[value], result_list)
if HttpMethod.delete in methods:
result_list.append(methods[HttpMethod.delete])
def parse_edn_spec_file(path: Path) -> dict:
"""Find a possible API param examples in a debug .edn file.
If the keyword has a 'type', 'example', and 'description' property
then it's considered to be an API param.
Example of entry in edn:
`{:name {:description "Product", :type "string", :example "Whiskey"}}`
It will be parsed to {"name": "Whiskey"}
"""
import edn_format
from edn_format import Keyword
from edn_format.immutable_dict import ImmutableDict
edn = edn_format.loads(path.read_text())
edn_dump = {}
def search(current_dict: dict):
for key in current_dict.keys():
data = current_dict[key]
if not isinstance(data, ImmutableDict):
continue
param_type = data.get(Keyword('type'))
param_example = data.get(Keyword('example'))
param_description = data.get(Keyword('description'))
if param_type and param_example:
param_key = key.name.replace('?', '')
if param_type == 'array':
param_value = ast.literal_eval(param_example)
else:
param_value = str(param_example)
edn_dump[param_key] = param_value
elif param_type and param_description:
param_key = key.name.replace('?', '')
edn_dump[param_key] = 'STUB'
else:
search(current_dict[key])
search(edn)
return edn_dump
| 36.156627 | 79 | 0.608908 | import ast
from collections import defaultdict
from pathlib import Path
from typing import List, Optional
from samples_validator.base import ApiTestResult, CodeSample, HttpMethod
class TestExecutionResultMap:
def __init__(self):
self._map = {}
def put(self,
test_result: ApiTestResult,
replace_keys: Optional[List[dict]] = None,
extra: Optional[dict] = None):
replace_keys = replace_keys or []
parent_body = test_result.json_body or {}
for replacement in replace_keys:
for key_from, key_to in replacement.items():
if key_from in parent_body:
parent_body[key_to] = parent_body[key_from]
parent_body.update(extra or {})
self._put_test_result(
self._map, test_result, path=test_result.sample.name,
)
def get_parent_result(self, sample: CodeSample) -> Optional[ApiTestResult]:
return self._get_parent_test_result(
self._map, sample, path=sample.name, current_body={},
)
def get_parent_body(
self,
sample: CodeSample,
escaped: bool = False) -> dict:
body: dict = {}
self._get_parent_test_result(
self._map, sample, path=sample.name, current_body=body,
)
if escaped:
body = {
f'{{{k}}}' if not k.startswith('{') else k: v
for k, v in body.items()
}
return body
def _put_test_result(
self,
current_dict: dict,
test_result: ApiTestResult,
path: str) -> dict:
path_parts = path.split('/')
current_path = path_parts[0]
further_path = '/'.join(path_parts[1:])
if not current_dict.get(current_path):
current_dict[current_path] = defaultdict(dict)
if not further_path:
http_method = test_result.sample.http_method
current_dict[current_path]['methods'][http_method] = test_result
else:
current_dict[current_path] = self._put_test_result(
current_dict[current_path], test_result, further_path,
)
return current_dict
def _get_parent_test_result(
self,
current_dict: dict,
sample: CodeSample,
path: str,
current_body: dict,
current_parent: Optional[ApiTestResult] = None,
) -> Optional[ApiTestResult]:
path_parts = path.split('/')
current_path = path_parts[0]
further_path = '/'.join(path_parts[1:])
current_methods = current_dict.get('methods', {})
current_parent = current_methods.get(HttpMethod.post, current_parent)
next_dict = current_dict.get(current_path)
if current_parent and current_parent.json_body:
current_body.update(current_parent.json_body)
if not next_dict:
return current_parent
if not further_path:
return current_parent
else:
return self._get_parent_test_result(
next_dict, sample, further_path, current_body, current_parent,
)
class CodeSamplesTree:
def __init__(self):
self._tree = {}
def put(self, sample: CodeSample):
self._put_code_sample(
self._tree, f'{sample.lang.value}{sample.name}', sample,
)
def list_sorted_samples(self) -> List[CodeSample]:
sorted_samples: List[CodeSample] = []
self._sort_samples(self._tree, sorted_samples)
return sorted_samples
def _put_code_sample(self,
current_dict: dict,
path: str,
sample: CodeSample) -> dict:
path_parts = path.split('/')
current_path = path_parts[0]
further_path = '/'.join(path_parts[1:])
if not current_dict.get(current_path):
current_dict[current_path] = defaultdict(dict)
if not further_path:
current_dict[current_path]['methods'][sample.http_method] = sample
else:
current_dict[current_path] = self._put_code_sample(
current_dict[current_path], further_path, sample,
)
return current_dict
def _sort_samples(
self,
endpoints: dict,
result_list: List[CodeSample]):
methods = endpoints.get('methods', {})
for method in (HttpMethod.post, HttpMethod.get, HttpMethod.put):
if method in methods:
result_list.append(methods[method])
further_paths = [
name for name in endpoints.keys() if name != 'methods'
]
deepest_level = not further_paths
if not deepest_level:
for value in further_paths:
self._sort_samples(endpoints[value], result_list)
if HttpMethod.delete in methods:
result_list.append(methods[HttpMethod.delete])
def parse_edn_spec_file(path: Path) -> dict:
import edn_format
from edn_format import Keyword
from edn_format.immutable_dict import ImmutableDict
edn = edn_format.loads(path.read_text())
edn_dump = {}
def search(current_dict: dict):
for key in current_dict.keys():
data = current_dict[key]
if not isinstance(data, ImmutableDict):
continue
param_type = data.get(Keyword('type'))
param_example = data.get(Keyword('example'))
param_description = data.get(Keyword('description'))
if param_type and param_example:
param_key = key.name.replace('?', '')
if param_type == 'array':
param_value = ast.literal_eval(param_example)
else:
param_value = str(param_example)
edn_dump[param_key] = param_value
elif param_type and param_description:
param_key = key.name.replace('?', '')
edn_dump[param_key] = 'STUB'
else:
search(current_dict[key])
search(edn)
return edn_dump
| true | true |
f737422821f3088bf2c40b3a202a363213f9441e | 5,683 | py | Python | capa/features/extractors/viv/indirect_calls.py | evandowning/capa | 03b15ce28977ffcc617d3c67c9dff20de7ee6196 | [
"Apache-2.0"
] | 1 | 2020-07-16T20:04:55.000Z | 2020-07-16T20:04:55.000Z | capa/features/extractors/viv/indirect_calls.py | evandowning/capa | 03b15ce28977ffcc617d3c67c9dff20de7ee6196 | [
"Apache-2.0"
] | 47 | 2021-03-17T10:41:44.000Z | 2022-03-28T04:03:01.000Z | capa/features/extractors/viv/indirect_calls.py | LaudateCorpus1/capa | ccf39334cecec4b8bfda5607f1d899822c59c72c | [
"Apache-2.0"
] | 3 | 2021-05-19T20:18:35.000Z | 2021-05-19T21:02:18.000Z | # Copyright (C) 2020 FireEye, Inc. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at: [package root]/LICENSE.txt
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and limitations under the License.
import collections
from typing import TYPE_CHECKING, Set, List, Deque, Tuple, Union, Optional
import envi
import vivisect.const
import envi.archs.i386.disasm
import envi.archs.amd64.disasm
from vivisect import VivWorkspace
if TYPE_CHECKING:
from capa.features.extractors.viv.extractor import InstructionHandle
# pull out consts for lookup performance
i386RegOper = envi.archs.i386.disasm.i386RegOper
i386ImmOper = envi.archs.i386.disasm.i386ImmOper
i386ImmMemOper = envi.archs.i386.disasm.i386ImmMemOper
Amd64RipRelOper = envi.archs.amd64.disasm.Amd64RipRelOper
LOC_OP = vivisect.const.LOC_OP
IF_NOFALL = envi.IF_NOFALL
REF_CODE = vivisect.const.REF_CODE
FAR_BRANCH_MASK = envi.BR_PROC | envi.BR_DEREF | envi.BR_ARCH
DESTRUCTIVE_MNEMONICS = ("mov", "lea", "pop", "xor")
def get_previous_instructions(vw: VivWorkspace, va: int) -> List[int]:
"""
collect the instructions that flow to the given address, local to the current function.
args:
vw (vivisect.Workspace)
va (int): the virtual address to inspect
returns:
List[int]: the prior instructions, which may fallthrough and/or jump here
"""
ret = []
# find the immediate prior instruction.
# ensure that it fallsthrough to this one.
loc = vw.getPrevLocation(va, adjacent=True)
if loc is not None:
ploc = vw.getPrevLocation(va, adjacent=True)
if ploc is not None:
# from vivisect.const:
# location: (L_VA, L_SIZE, L_LTYPE, L_TINFO)
(pva, _, ptype, pinfo) = ploc
if ptype == LOC_OP and not (pinfo & IF_NOFALL):
ret.append(pva)
# find any code refs, e.g. jmp, to this location.
# ignore any calls.
#
# from vivisect.const:
# xref: (XR_FROM, XR_TO, XR_RTYPE, XR_RFLAG)
for (xfrom, _, _, xflag) in vw.getXrefsTo(va, REF_CODE):
if (xflag & FAR_BRANCH_MASK) != 0:
continue
ret.append(xfrom)
return ret
class NotFoundError(Exception):
pass
def find_definition(vw: VivWorkspace, va: int, reg: int) -> Tuple[int, Union[int, None]]:
"""
scan backwards from the given address looking for assignments to the given register.
if a constant, return that value.
args:
vw (vivisect.Workspace)
va (int): the virtual address at which to start analysis
reg (int): the vivisect register to study
returns:
(va: int, value?: int|None): the address of the assignment and the value, if a constant.
raises:
NotFoundError: when the definition cannot be found.
"""
q = collections.deque() # type: Deque[int]
seen = set([]) # type: Set[int]
q.extend(get_previous_instructions(vw, va))
while q:
cur = q.popleft()
# skip if we've already processed this location
if cur in seen:
continue
seen.add(cur)
insn = vw.parseOpcode(cur)
if len(insn.opers) == 0:
q.extend(get_previous_instructions(vw, cur))
continue
opnd0 = insn.opers[0]
if not (isinstance(opnd0, i386RegOper) and opnd0.reg == reg and insn.mnem in DESTRUCTIVE_MNEMONICS):
q.extend(get_previous_instructions(vw, cur))
continue
# if we reach here, the instruction is destructive to our target register.
# we currently only support extracting the constant from something like: `mov $reg, IAT`
# so, any other pattern results in an unknown value, represented by None.
# this is a good place to extend in the future, if we need more robust support.
if insn.mnem != "mov":
return (cur, None)
else:
opnd1 = insn.opers[1]
if isinstance(opnd1, i386ImmOper):
return (cur, opnd1.getOperValue(opnd1))
elif isinstance(opnd1, i386ImmMemOper):
return (cur, opnd1.getOperAddr(opnd1))
elif isinstance(opnd1, Amd64RipRelOper):
return (cur, opnd1.getOperAddr(insn))
else:
# might be something like: `mov $reg, dword_401000[eax]`
return (cur, None)
raise NotFoundError()
def is_indirect_call(vw: VivWorkspace, va: int, insn: Optional["InstructionHandle"] = None) -> bool:
if insn is None:
insn = vw.parseOpcode(va)
return insn.mnem in ("call", "jmp") and isinstance(insn.opers[0], envi.archs.i386.disasm.i386RegOper)
def resolve_indirect_call(
vw: VivWorkspace, va: int, insn: Optional["InstructionHandle"] = None
) -> Tuple[int, Optional[int]]:
"""
inspect the given indirect call instruction and attempt to resolve the target address.
args:
vw (vivisect.Workspace)
va (int): the virtual address at which to start analysis
returns:
(va: int, value?: int|None): the address of the assignment and the value, if a constant.
raises:
NotFoundError: when the definition cannot be found.
"""
if insn is None:
insn = vw.parseOpcode(va)
assert is_indirect_call(vw, va, insn=insn)
return find_definition(vw, va, insn.opers[0].reg)
| 34.02994 | 111 | 0.664614 |
import collections
from typing import TYPE_CHECKING, Set, List, Deque, Tuple, Union, Optional
import envi
import vivisect.const
import envi.archs.i386.disasm
import envi.archs.amd64.disasm
from vivisect import VivWorkspace
if TYPE_CHECKING:
from capa.features.extractors.viv.extractor import InstructionHandle
i386RegOper = envi.archs.i386.disasm.i386RegOper
i386ImmOper = envi.archs.i386.disasm.i386ImmOper
i386ImmMemOper = envi.archs.i386.disasm.i386ImmMemOper
Amd64RipRelOper = envi.archs.amd64.disasm.Amd64RipRelOper
LOC_OP = vivisect.const.LOC_OP
IF_NOFALL = envi.IF_NOFALL
REF_CODE = vivisect.const.REF_CODE
FAR_BRANCH_MASK = envi.BR_PROC | envi.BR_DEREF | envi.BR_ARCH
DESTRUCTIVE_MNEMONICS = ("mov", "lea", "pop", "xor")
def get_previous_instructions(vw: VivWorkspace, va: int) -> List[int]:
ret = []
loc = vw.getPrevLocation(va, adjacent=True)
if loc is not None:
ploc = vw.getPrevLocation(va, adjacent=True)
if ploc is not None:
(pva, _, ptype, pinfo) = ploc
if ptype == LOC_OP and not (pinfo & IF_NOFALL):
ret.append(pva)
for (xfrom, _, _, xflag) in vw.getXrefsTo(va, REF_CODE):
if (xflag & FAR_BRANCH_MASK) != 0:
continue
ret.append(xfrom)
return ret
class NotFoundError(Exception):
pass
def find_definition(vw: VivWorkspace, va: int, reg: int) -> Tuple[int, Union[int, None]]:
q = collections.deque()
seen = set([])
q.extend(get_previous_instructions(vw, va))
while q:
cur = q.popleft()
if cur in seen:
continue
seen.add(cur)
insn = vw.parseOpcode(cur)
if len(insn.opers) == 0:
q.extend(get_previous_instructions(vw, cur))
continue
opnd0 = insn.opers[0]
if not (isinstance(opnd0, i386RegOper) and opnd0.reg == reg and insn.mnem in DESTRUCTIVE_MNEMONICS):
q.extend(get_previous_instructions(vw, cur))
continue
# if we reach here, the instruction is destructive to our target register.
# we currently only support extracting the constant from something like: `mov $reg, IAT`
# so, any other pattern results in an unknown value, represented by None.
# this is a good place to extend in the future, if we need more robust support.
if insn.mnem != "mov":
return (cur, None)
else:
opnd1 = insn.opers[1]
if isinstance(opnd1, i386ImmOper):
return (cur, opnd1.getOperValue(opnd1))
elif isinstance(opnd1, i386ImmMemOper):
return (cur, opnd1.getOperAddr(opnd1))
elif isinstance(opnd1, Amd64RipRelOper):
return (cur, opnd1.getOperAddr(insn))
else:
# might be something like: `mov $reg, dword_401000[eax]`
return (cur, None)
raise NotFoundError()
def is_indirect_call(vw: VivWorkspace, va: int, insn: Optional["InstructionHandle"] = None) -> bool:
if insn is None:
insn = vw.parseOpcode(va)
return insn.mnem in ("call", "jmp") and isinstance(insn.opers[0], envi.archs.i386.disasm.i386RegOper)
def resolve_indirect_call(
vw: VivWorkspace, va: int, insn: Optional["InstructionHandle"] = None
) -> Tuple[int, Optional[int]]:
if insn is None:
insn = vw.parseOpcode(va)
assert is_indirect_call(vw, va, insn=insn)
return find_definition(vw, va, insn.opers[0].reg)
| true | true |
f73745513c10c7524fe40f28d9f4a8a7f35bd720 | 32,215 | py | Python | 3rdparty/pytorch/caffe2/python/checkpoint.py | WoodoLee/TorchCraft | 999f68aab9e7d50ed3ae138297226dc95fefc458 | [
"MIT"
] | 15 | 2019-08-10T02:36:38.000Z | 2021-07-14T13:45:32.000Z | 3rdparty/pytorch/caffe2/python/checkpoint.py | WoodoLee/TorchCraft | 999f68aab9e7d50ed3ae138297226dc95fefc458 | [
"MIT"
] | 7 | 2019-10-21T03:08:51.000Z | 2022-03-11T23:54:28.000Z | 3rdparty/pytorch/caffe2/python/checkpoint.py | WoodoLee/TorchCraft | 999f68aab9e7d50ed3ae138297226dc95fefc458 | [
"MIT"
] | 6 | 2020-10-16T13:28:31.000Z | 2021-08-25T12:08:34.000Z | ## @package checkpoint
# Module caffe2.python.checkpoint
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import os
import logging
from caffe2.python import core, context
from caffe2.python.net_builder import ops
from caffe2.python.task import (
final_output,
Node,
Task,
TaskGroup,
TaskOutput,
WorkspaceType,
)
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
@context.define_context()
class Job(object):
"""
A Job defines three TaskGroups: the `init_group`, the `epoch_group` and the
`exit_group` which will be run by a JobRunner.
The `init_group` will be run only once at startup. Its role is to
initialize globally persistent blobs such as model weights, accumulators
and data file lists.
The `epoch_group` will be run in a loop after init_group. The loop will
exit when any of the stop signals added with `add_stop_condition` is True
at the end of an epoch.
The download_group will be run only once, after all the executions of
epoch_group finish. Its role is to collect the distribute scattered
parameters back after training.
The `exit_group` will be run only once at the very end of the job, the
role of this group is to save the results of training in the end of the job.
Jobs are context-driven, so that Tasks can be added to the active Job
without having to explicitly pass the job object around.
Example of usage:
def build_reader(partitions):
with Job.current().init_group:
reader = HiveReader(init_reader, ..., partitions)
Task(step=init_reader)
with Job.current().epoch_group:
limited_reader = ReaderWithLimit(reader, num_iter=10000)
data_queue = pipe(limited_reader, num_threads=8)
Job.current().add_stop_condition(limited_reader.data_finished())
return data_queue
def build_hogwild_trainer(reader, model):
with Job.current().init_group:
Task(step=model.param_init_net)
with Job.current().epoch_group:
pipe(reader, processor=model, num_threads=8)
with Job.current().exit_group:
Task(step=model.save_model_net)
with Job() as job:
reader = build_reader(partitions)
model = build_model(params)
build_hogwild_trainer(reader, model)
"""
def __init__(self,
init_group=None, epoch_group=None,
download_group=None, exit_group=None,
stop_conditions=None, nodes_to_checkpoint=None):
self.init_group = init_group or TaskGroup(
workspace_type=WorkspaceType.GLOBAL)
self.epoch_group = epoch_group or TaskGroup()
self.download_group = download_group or TaskGroup()
self.exit_group = exit_group or TaskGroup()
self.stop_conditions = stop_conditions or []
self._nodes_to_checkpoint = nodes_to_checkpoint
def nodes_to_checkpoint(self):
if self._nodes_to_checkpoint:
return self._nodes_to_checkpoint
else:
return self.init_group.used_nodes()
def compile(self, session_class):
self._nodes_to_checkpoint = self.nodes_to_checkpoint()
self.init_group = session_class.compile(self.init_group)
self.epoch_group = session_class.compile(self.epoch_group)
self.download_group = session_class.compile(self.download_group)
self.exit_group = session_class.compile(self.exit_group)
def __enter__(self):
self.epoch_group.__enter__()
return self
def __exit__(self, *args):
self.epoch_group.__exit__()
def add_stop_condition(self, output):
if isinstance(output, core.BlobReference):
t = Task(outputs=[output], group=self.epoch_group)
output = t.outputs()[0]
assert isinstance(output, TaskOutput)
self.stop_conditions.append(output)
def get_ckpt_filename(node_name, epoch):
"""Returns the checkpoint filename.
Args:
node_name: A string. The name of the node.
epoch: An integer. The checkpoint epoch.
Returns:
ckpt_filename: A string. The filename of the checkpoint.
"""
return node_name + '.' + str(epoch)
def db_name(epoch, node_name, db_prefix, path_prefix=None):
"""Returns the full db name where checkpoint files are saved.
Args:
epoch: An integer. The checkpoint epoch.
node_name: A string. The name of the node.
db_prefix: A string. The prefix used to construct full db name.
path_prefix: A string. Optional param used to construct db name or path
where checkpoint files are are stored.
Returns:
db_name: A string. The absolute path of full_db_name where checkpoint
files are saved
"""
if path_prefix:
db_name = path_prefix + get_ckpt_filename(node_name, epoch)
else:
ckpt_filename = get_ckpt_filename(node_name, epoch)
db_name = os.path.join(db_prefix, ckpt_filename)
return db_name
class CheckpointManager(object):
"""
Controls saving and loading of workspaces on every epoch boundary of a job.
If a CheckpointManager instance is passed to JobRunner, then JobRunner will
call `init`, `read` and `save` at different moments in between epoch runs.
Args:
db_prefix: The prefix used to construct full db name. Since `absolute_path`
is set to True, this will be used as db_name in SaveOp.
node_name: Name of the node where this checkpoint_manager is used.
db_type: Type of database to use for storing checkpoint.
metadata_handler: An optional object capable of reading/writing
checkpoint info in storage of choice.
"""
BLOB_NAMES = "blob_names"
def __init__(self, db_prefix, node_name, db_type, metadata_handler=None):
self._db_prefix = db_prefix
self._node_name = node_name
self._db_type = db_type
self._metadata_handler = metadata_handler
# make sure these blobs are the first in the checkpoint file.
self._net = core.Net('!!checkpoint_mngr')
self._blob_names = self._net.AddExternalInput(self.BLOB_NAMES)
self._names_output = None
self._path_prefix = None
self._path_type = None
self._current_db_name = None
self._current_checkpoint_duration = None
"""
Initialize the checkpoint manager. Determines all blobs that need to be saved
or loads from a checkpoint.
Args:
nodes: An array of nodes where this checkpoint manager is running. Should
only contain a single node.
retrieve_from_epoch: Set to a number to load blobs from this epoch.
path_prefix: Used to construct db name or path where checkpoint files are
stored.
path_type: Indicate the type of path where checkpoint files are stored.
"""
def init(
self,
nodes=None,
retrieve_from_epoch=None,
path_prefix=None,
path_type=None
):
"""
Build a Task that will be run once after the job's `init_group` is run.
This task will determine which blobs need to be checkpointed.
If retrieve_from_epoch is not None, then the checkpoint metadata is
retrieved from a previously saved checkpoint.
"""
assert nodes is None or len(nodes) == 1, (
'CheckpointManager only supports single node.')
with Task(outputs=[self._blob_names]) as task:
if retrieve_from_epoch is None:
ops.GetAllBlobNames(
[],
self._blob_names,
include_shared=False)
else:
full_db_name = db_name(retrieve_from_epoch,
self._node_name, self._db_prefix, path_prefix)
db_type = path_type or self._db_type
logger.info("Initializing checkpoints from = %s"
% full_db_name)
ops.Load(
[], self._blob_names,
db=full_db_name,
db_type=db_type,
absolute_path=True,
keep_device=True,
)
self._names_output = task.outputs()[0]
return task
def blob_list(self):
assert self._names_output
return self._names_output.fetch().tolist()
def _timed_task(self, cp_op_name, add_op):
"""
Build a Task that will measure the time span of checkpoint operations,
once operation is done, time can be read from _current_checkpoint_duration.
Args:
cp_op_name: A string name of the checkpoint operation.
add_op: A functor to add the checkpoint operation.
Returns:
A task with timer.
"""
with Task(name=cp_op_name) as task:
with ops.task_init():
timer = ops.TimerBegin([], counter_name=self._node_name)
add_op()
with ops.task_exit():
time_span_blob = ops.TimerGetAndEnd(timer)
self._current_checkpoint_duration = final_output(time_span_blob)
return task
def collect_checkpoint_stats(self, stats):
"""
Add one checkpoint stats into the stats.
Args:
stats: A dict of checkpoint stats that will be reported.
"""
if self._current_db_name and self._current_checkpoint_duration:
stats[self._current_db_name] = self._current_checkpoint_duration.fetch()[0]
else:
logger.info(
"Failed to collect checkpoint stats: {}".format(
self._current_db_name
)
)
def load(self, epoch, path_prefix=None, path_type=None):
"""
Build a Task that will be run by JobRunner when the job is to be
resumed from a given epoch. This task will run a Load op that will
load and deserialize all relevant blobs from a persistent storage.
"""
self._current_db_name = db_name(
epoch, self._node_name, self._db_prefix, path_prefix
)
db_type = path_type or self._db_type
logger.info("Loading checkpoints from = %s" % self._current_db_name)
def add_op():
ops.Load(
[],
self.blob_list(),
db=self._current_db_name,
db_type=db_type,
absolute_path=True,
keep_device=True,
)
return self._timed_task('checkpoint_load', add_op)
def load_blobs_from_checkpoint(self, blob_names, epoch):
"""
Builds a Task that loads only the necessary blobs from a checkpoint of
the given epoch. The necessary blobs are given in the blob_names
argument.
Args:
blob_names: A list of strings. Each string is the name of a
blob.
epoch: The checkpoint epoch to load from.
Returns:
A Task which loads the specified blobs from the checkpoint of the
given epoch.
"""
self._current_db_name = db_name(epoch, self._node_name, self._db_prefix)
logger.info('Load from %s' % self._current_db_name)
def add_op():
ops.Load(
[],
blob_names,
db=self._current_db_name,
db_type=self._db_type,
absolute_path=True,
allow_incomplete=True)
return self._timed_task('checkpoint_partial_load', add_op)
def check_db_exists(self, epoch):
logger.info('Check existence of %s' %
db_name(epoch, self._node_name, self._db_prefix))
with Task() as task:
existence = ops.Const(False)
ops.DBExists(
[],
[existence],
db_name=db_name(epoch, self._node_name, self._db_prefix),
db_type=self._db_type,
absolute_path=True)
task.add_output(existence)
return task
def report_checkpoint_stats(self, action_name):
"""
Report checkpoint operation stats for current node.
Args:
action_name: A string of the name of checkpoint operation.
"""
all_stats = {}
self.collect_checkpoint_stats(all_stats)
if self._metadata_handler:
self._metadata_handler.report(action_name, all_stats)
def save(self, epoch):
"""
Build a Task that is run once after `init_group` and after each
epoch is run. This will execute a Save ops to serialize and persist
blobs present in the global workspace.
"""
self._current_db_name = db_name(epoch, self._node_name, self._db_prefix)
logger.info('Saving to %s' % self._current_db_name)
def add_op():
ops.Save(
self.blob_list(), [],
db=self._current_db_name,
db_type=self._db_type,
absolute_path=True)
return self._timed_task('checkpoint_save', add_op)
def write_checkpoint_metadata(self, epoch):
"""
Write metadata for checkpoint
Args:
epoch: An integer. The epoch-id for which checkpoint metadata is
written
"""
if self._metadata_handler is not None:
self._metadata_handler.write(epoch=epoch)
def get_resume_from_epoch_id(self, user_epoch=None):
"""
Identify the epoch-id from which Job must resume
Args:
user_epoch: An integer. Optional parameter for user to explicitly
identify the epoch-id to load checkpoint from
Retruns:
epoch: the epoch-id to load checkpoints from
or None if no checkpoints were written
"""
last_epoch = user_epoch
if self._metadata_handler is not None:
last_epoch = self._metadata_handler.last_epoch(user_epoch=user_epoch)
return last_epoch
def set_params(self, nodes, path_prefix=None, path_type=None):
"""Set parameters associated with CP manager
Args:
nodes: An array of nodes where this checkpoint manager is running.
path_prefix: Used to construct db name or path where checkpoint files are
stored.
path_type: Indicate the type of path where checkpoint files are stored.
"""
if path_prefix:
self._path_prefix = path_prefix
if path_type:
self._path_type = path_type
if self._metadata_handler:
self._metadata_handler.set_params(
db_prefix=self._db_prefix,
db_type=self._db_type,
node_names=[str(self._node_name)],
path_prefix=self._path_prefix,
path_type=self._path_type)
def cp_accessible(self, epoch=None):
"""Returns True if Checkpoint data is accessible
Args:
epoch: An integer. The epoch of the checkpoint. If None,
it implies we need to check if checkpoint directory is accessible
Returns:
is_cp_accessible: A boolean. Returns True if Checkpoint data is accessible
"""
if self._metadata_handler is not None:
return self._metadata_handler.cp_accessible(epoch)
else:
return True
class MultiNodeCheckpointManager(object):
"""
Coordinates checkpointing and checkpointing across multiple nodes.
Each of `init`, `load` and `save` will build TaskGroups which will
trigger checkpointing on each of the nodes involved in a distributed job.
Args:
db_prefix: The prefix used to construct full db name. Since `absolute_path`
is set to True, this will be used as db_name in SaveOp.
db_type: Type of database to use for storing checkpoint.
metadata_handler: An optional object capable of reading/writing
checkpoint info in storage of choice.
"""
def __init__(self, db_prefix, db_type, metadata_handler=None):
self._node_managers = None
self._db_prefix = db_prefix
self._db_type = db_type
self._metadata_handler = metadata_handler
self._path_prefix = None
self._path_type = None
def _task_group(self, func, *args, **kw):
assert self._node_managers is not None, 'init must be called first.'
with TaskGroup(WorkspaceType.GLOBAL) as task_group:
for node, manager in self._node_managers:
with Node(node):
func(manager, *args, **kw)
return task_group
"""
Args:
nodes: An array of nodes where this checkpoint manager is running.
retrieve_from_epoch: Set to a number to load blobs from this epoch.
path_prefix: Used to construct db name or path where checkpoint files are
stored.
path_type: Indicate the type of path where checkpoint files are stored.
"""
def init(
self, nodes, retrieve_from_epoch=None, path_prefix=None, path_type=None
):
if self._node_managers is not None:
assert [node for node, _ in self._node_managers] == nodes
return TaskGroup(WorkspaceType.GLOBAL)
self._node_managers = []
for node in nodes:
with Node(node):
manager = CheckpointManager(
db_prefix=self._db_prefix,
node_name=str(node),
db_type=self._db_type)
self._node_managers.append((node, manager))
return self._task_group(
CheckpointManager.init,
nodes=[node],
retrieve_from_epoch=retrieve_from_epoch,
path_prefix=path_prefix,
path_type=path_type)
def load(self, epoch, path_prefix=None, path_type=None):
return self._task_group(
CheckpointManager.load,
epoch,
path_prefix=path_prefix,
path_type=path_type)
def load_blobs_locally(self, nodes, blob_names, epoch, session):
"""Loads the necessary blobs from the checkpoints to the current node.
Args:
blob_names: A list of strings. Each string is the name of a
blob.
epoch: An integer. The checkpoint epoch to load from.
session: A Session object to execute the Load ops.
"""
if self._node_managers is not None:
assert [node for node, _ in self._node_managers] == nodes
else:
self._node_managers = []
for node in nodes:
with Node(node):
manager = CheckpointManager(
db_prefix=self._db_prefix,
node_name=str(node),
db_type=self._db_type)
self._node_managers.append((node, manager))
assert self._node_managers is not None, 'must initialize node managers'
for _, manager in self._node_managers:
existence_task = manager.check_db_exists(epoch)
session.run(existence_task)
existence = existence_task.outputs()[0].fetch()
if not existence:
logger.info('DB %s does not exist!' %
db_name(epoch, manager._node_name, manager._db_prefix))
return False
load_task = manager.load_blobs_from_checkpoint(blob_names, epoch)
session.run(load_task)
logger.info('Successfully loaded from checkpoints.')
return True
def get_ckpt_db_name(self, node_name, epoch):
"""Returns the DB name of the given node and the given epoch.
The DB name is effectively the checkpoint path of the given node and
the given epoch.
Args:
node_name: A string. The node name of interest.
epoch: An integer. The epoch of the checkpoint.
Returns:
checkpoint_db_name: A string. The checkpoint path of the given
node and the given epoch.
"""
for node, manager in self._node_managers:
if str(node) == node_name:
return db_name(epoch, manager._node_name, manager._db_prefix)
def report_checkpoint_stats(self, action_name):
"""
Report the checkpoint stats for all the nodes, we need to aggregate all
the node's stats together so that we know which node's checkpoint
operation dominates.
Args:
action_name: A string of the name of checkpoint operation.
"""
all_stats = {}
for _, manager in self._node_managers:
manager.collect_checkpoint_stats(all_stats)
logger.debug("checkpoint stats: {}".format(all_stats))
if self._metadata_handler:
self._metadata_handler.report(action_name, all_stats)
def save(self, epoch):
"""
Build a Task that will execute a Save ops to serialize and persist
blobs present in the global workspace.
"""
return self._task_group(CheckpointManager.save, epoch)
def write_checkpoint_metadata(self, epoch):
"""
Write metadata for checkpoint
Args:
epoch: An integer. The epoch-id for which checkpoint metadata is
written
"""
if self._metadata_handler is not None:
self._metadata_handler.write(epoch=epoch)
def get_resume_from_epoch_id(self, user_epoch=None):
"""
Identify the epoch-id from which Job must resume
Args:
user_epoch: An integer. Optional parameter for user to explicitly
identify the epoch-id to load checkpoint from
Retruns:
epoch: the epoch-id to load checkpoints from
or None if no checkpoints were written
"""
last_epoch = user_epoch
if self._metadata_handler is not None:
last_epoch = self._metadata_handler.last_epoch(user_epoch=user_epoch)
return last_epoch
def set_params(self, nodes, path_prefix=None, path_type=None):
"""Set parameters associated with CP manager
Args:
nodes: An array of nodes where this checkpoint manager is running.
path_prefix: Used to construct db name or path where checkpoint files are
stored.
path_type: Indicate the type of path where checkpoint files are stored.
"""
self._node_names = [str(node) for node in nodes]
if path_prefix:
self._path_prefix = path_prefix
if path_type:
self._path_type = path_type
if self._metadata_handler:
self._metadata_handler.set_params(
db_prefix=self._db_prefix,
db_type=self._db_type,
node_names=self._node_names,
path_prefix=self._path_prefix,
path_type=self._path_type)
def cp_accessible(self, epoch=None):
"""Returns True if Checkpoint data is accessible
Args:
epoch: An integer. The epoch of the checkpoint. If None,
it implies we need to check if checkpoint directory is accessible
Returns:
is_cp_accessible: A boolean. Returns True if Checkpoint data is accessible
"""
if self._metadata_handler is not None:
return self._metadata_handler.cp_accessible(epoch)
else:
return True
class UploadTaskGroupBuilder(object):
"""A simple class to upload checkpoints."""
def build(self, epoch, checkpoint_manager):
"""Builds the task group to upload checkpoints.
Args:
epoch: An integer. The checkpoint epoch to be uploaded.
checkpoint_manager: Can be a CheckpointManager for single machine
or a MultiNodeCheckpointManager for multi-machine. The manager
that initializes/saves/loads checkpoints.
Raises:
NotImplementedError: This base class only has the interface,
the implementation will be in the subclasses.
"""
raise NotImplementedError()
class JobRunner(object):
"""
Implement the runtime logic for jobs with checkpointing at the level of
epoch. Can be used to run either single-host or distributed jobs. Job
runner is a callable to be called once from the master, passing a session
as an argument. This call will block until the Job execution is complete.
If a checkpoint_manager is passed, checkpoints will be taken after
initialization and after each epoch execution. If, in addition,
`resume_from_epoch` is an epoch number, the corresponding checkpoint will
be loaded and job execution will continue from the given epoch. In
this case, the job's init_group will not be run.
Refer to checkpoint_test.py for an example.
"""
def __init__(self, job, checkpoint_manager=None, resume_from_epoch=None,
upload_task_group_builder=None):
"""Initializes the JobRunner.
Args:
job: A Job object. The job to be executed.
checkpoint_manager: Can be a CheckpointManager for single machine
or a MultiNodeCheckpointManager for multi-machine. The manager
that initializes/saves/loads checkpoints.
resume_from_epoch: An integer. The epoch to resume from.
upload_task_group_builder: A subclass of the
UploadTaskGroupBuilder. Creates a task group to upload
checkpoints.
"""
self.resume_from_epoch = resume_from_epoch
self.checkpoint_manager = checkpoint_manager
self.job = job
self.upload_task_group_builder = upload_task_group_builder
def train(self, session):
"""Runs the training flow.
Args:
session: A Session object. Valid choises are: LocalSession,
LocalHostScheduler, and DistributedSession. It is used to
execute one TaskGroup a time.
"""
# identify the epoch we must resume from
if self.checkpoint_manager:
self.checkpoint_manager.set_params(nodes=self.job.nodes_to_checkpoint())
self.resume_from_epoch = self.checkpoint_manager.\
get_resume_from_epoch_id(self.resume_from_epoch)
if self.resume_from_epoch is not None:
logger.info('Resuming from epoch {}'.format(self.resume_from_epoch))
# Initialize all the nodes.
from_scratch = self.resume_from_epoch is None
if from_scratch:
session.run(self.job.init_group)
if self.checkpoint_manager:
logger.info('Preparing checkpoints ...')
session.run(self.checkpoint_manager.init(
self.job.nodes_to_checkpoint(),
retrieve_from_epoch=self.resume_from_epoch))
# Save the first checkpoint before training starts, or resume from
# a previously saved checkpoint.
if from_scratch:
self.save_checkpoints(0, session)
else:
logger.info('Loading checkpoints for epoch {} ...'.format(
self.resume_from_epoch))
session.run(
self.checkpoint_manager.load(self.resume_from_epoch))
self.checkpoint_manager.report_checkpoint_stats('checkpoint_load')
logger.info('Checkpoint loaded')
logger.info("Finished initializing")
# Start training.
epoch = 1 if from_scratch else self.resume_from_epoch + 1
while True:
logger.info('Starting epoch %d' % epoch)
session.run(self.job.epoch_group)
logger.info('Finished epoch %d' % epoch)
stop_conditions = [o.fetch() for o in self.job.stop_conditions]
if self.checkpoint_manager:
self.save_checkpoints(epoch, session)
if any(stop_conditions):
logger.info('Stopping')
break
epoch += 1
logger.info('Finished training')
# Upload the checkpoints.
if (self.upload_task_group_builder):
upload_task_group = self.upload_task_group_builder.build(
epoch, self.checkpoint_manager)
session.run(upload_task_group)
logger.info('Finished uploading the checkpoints')
# Download the parameters to save
session.run(self.job.download_group)
logger.info('Finished downloading the parameters')
# Finally run the exit step to save nets
session.run(self.job.exit_group)
logger.info('Finished running the exit group')
return epoch
def load_blobs_from_checkpoints(self, blob_names, epoch, session):
"""Loads the necessary blobs from the checkpoints.
Checkpoints store the snapshots of the workspace in each node.
Sometimes we only need to load a subset of the blobs from the
checkpoints. One common scenario is to load only the model blobs from
the checkpoints for evaluation purpose. Given the names of the
necessary blobs, this function goes over all the checkpoints of all the
nodes, but only loads the blobs specified in the blob_names to the
current workspace.
Args:
blob_names: A list of strings. Each string is the name of a
blob.
epoch: An integer. The checkpoint epoch to load from.
session: A Session object to execute the load ops.
Raises:
ValueError: When the checkpoint manager is invalid.
"""
if not self.checkpoint_manager:
raise ValueError('Checkpoint manager is None')
logger.info('Loading checkpoint for epoch {} ...'.format(epoch))
result = self.checkpoint_manager.load_blobs_locally(
self.job.nodes_to_checkpoint(), blob_names, epoch, session)
self.checkpoint_manager.report_checkpoint_stats('checkpoint_partial_load')
return result
def save_checkpoints(self, epoch, session):
"""Triggers operation to save checkpoints
This method will trigger the Save ops to serialize and persist the
blobs present in the global workspaace.
Args:
epoch: An integer. The checkpoint epoch-id that we are saving.
session: A Session object to execute the save ops.
Raises:
ValueError: When the checkpoint manager is invalid.
"""
if not self.checkpoint_manager:
raise ValueError('Checkpoint manager is None')
try:
is_accessible = self.checkpoint_manager.cp_accessible(epoch=None)
if is_accessible:
logger.info('Saving checkpoints for epoch {}'.format(epoch))
session.run(self.checkpoint_manager.save(epoch))
self.checkpoint_manager.write_checkpoint_metadata(epoch)
logger.info('Checkpoints saved')
self.checkpoint_manager.report_checkpoint_stats('checkpoint_save')
else:
logger.warning("Checkpoint files cannot be accessed!")
except Exception as ex:
logger.warning("Unable to write checkpoint for epoch {}. Error={}".
format(epoch, ex))
def epoch_limiter(job, num_epochs):
"""
Creates a task that will output True when a given
number of epochs has finished.
"""
with job.init_group:
init_net = core.Net('epoch_counter_init')
counter = init_net.CreateCounter([], init_count=num_epochs - 1)
Task(step=init_net)
with job.epoch_group:
epoch_net = core.Net('epoch_countdown')
finished = epoch_net.CountDown(counter)
output = Task(step=epoch_net, outputs=finished).outputs()[0]
job.add_stop_condition(output)
| 38.627098 | 87 | 0.629738 | ort absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import os
import logging
from caffe2.python import core, context
from caffe2.python.net_builder import ops
from caffe2.python.task import (
final_output,
Node,
Task,
TaskGroup,
TaskOutput,
WorkspaceType,
)
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
@context.define_context()
class Job(object):
def __init__(self,
init_group=None, epoch_group=None,
download_group=None, exit_group=None,
stop_conditions=None, nodes_to_checkpoint=None):
self.init_group = init_group or TaskGroup(
workspace_type=WorkspaceType.GLOBAL)
self.epoch_group = epoch_group or TaskGroup()
self.download_group = download_group or TaskGroup()
self.exit_group = exit_group or TaskGroup()
self.stop_conditions = stop_conditions or []
self._nodes_to_checkpoint = nodes_to_checkpoint
def nodes_to_checkpoint(self):
if self._nodes_to_checkpoint:
return self._nodes_to_checkpoint
else:
return self.init_group.used_nodes()
def compile(self, session_class):
self._nodes_to_checkpoint = self.nodes_to_checkpoint()
self.init_group = session_class.compile(self.init_group)
self.epoch_group = session_class.compile(self.epoch_group)
self.download_group = session_class.compile(self.download_group)
self.exit_group = session_class.compile(self.exit_group)
def __enter__(self):
self.epoch_group.__enter__()
return self
def __exit__(self, *args):
self.epoch_group.__exit__()
def add_stop_condition(self, output):
if isinstance(output, core.BlobReference):
t = Task(outputs=[output], group=self.epoch_group)
output = t.outputs()[0]
assert isinstance(output, TaskOutput)
self.stop_conditions.append(output)
def get_ckpt_filename(node_name, epoch):
return node_name + '.' + str(epoch)
def db_name(epoch, node_name, db_prefix, path_prefix=None):
if path_prefix:
db_name = path_prefix + get_ckpt_filename(node_name, epoch)
else:
ckpt_filename = get_ckpt_filename(node_name, epoch)
db_name = os.path.join(db_prefix, ckpt_filename)
return db_name
class CheckpointManager(object):
BLOB_NAMES = "blob_names"
def __init__(self, db_prefix, node_name, db_type, metadata_handler=None):
self._db_prefix = db_prefix
self._node_name = node_name
self._db_type = db_type
self._metadata_handler = metadata_handler
self._net = core.Net('!!checkpoint_mngr')
self._blob_names = self._net.AddExternalInput(self.BLOB_NAMES)
self._names_output = None
self._path_prefix = None
self._path_type = None
self._current_db_name = None
self._current_checkpoint_duration = None
def init(
self,
nodes=None,
retrieve_from_epoch=None,
path_prefix=None,
path_type=None
):
assert nodes is None or len(nodes) == 1, (
'CheckpointManager only supports single node.')
with Task(outputs=[self._blob_names]) as task:
if retrieve_from_epoch is None:
ops.GetAllBlobNames(
[],
self._blob_names,
include_shared=False)
else:
full_db_name = db_name(retrieve_from_epoch,
self._node_name, self._db_prefix, path_prefix)
db_type = path_type or self._db_type
logger.info("Initializing checkpoints from = %s"
% full_db_name)
ops.Load(
[], self._blob_names,
db=full_db_name,
db_type=db_type,
absolute_path=True,
keep_device=True,
)
self._names_output = task.outputs()[0]
return task
def blob_list(self):
assert self._names_output
return self._names_output.fetch().tolist()
def _timed_task(self, cp_op_name, add_op):
with Task(name=cp_op_name) as task:
with ops.task_init():
timer = ops.TimerBegin([], counter_name=self._node_name)
add_op()
with ops.task_exit():
time_span_blob = ops.TimerGetAndEnd(timer)
self._current_checkpoint_duration = final_output(time_span_blob)
return task
def collect_checkpoint_stats(self, stats):
if self._current_db_name and self._current_checkpoint_duration:
stats[self._current_db_name] = self._current_checkpoint_duration.fetch()[0]
else:
logger.info(
"Failed to collect checkpoint stats: {}".format(
self._current_db_name
)
)
def load(self, epoch, path_prefix=None, path_type=None):
self._current_db_name = db_name(
epoch, self._node_name, self._db_prefix, path_prefix
)
db_type = path_type or self._db_type
logger.info("Loading checkpoints from = %s" % self._current_db_name)
def add_op():
ops.Load(
[],
self.blob_list(),
db=self._current_db_name,
db_type=db_type,
absolute_path=True,
keep_device=True,
)
return self._timed_task('checkpoint_load', add_op)
def load_blobs_from_checkpoint(self, blob_names, epoch):
self._current_db_name = db_name(epoch, self._node_name, self._db_prefix)
logger.info('Load from %s' % self._current_db_name)
def add_op():
ops.Load(
[],
blob_names,
db=self._current_db_name,
db_type=self._db_type,
absolute_path=True,
allow_incomplete=True)
return self._timed_task('checkpoint_partial_load', add_op)
def check_db_exists(self, epoch):
logger.info('Check existence of %s' %
db_name(epoch, self._node_name, self._db_prefix))
with Task() as task:
existence = ops.Const(False)
ops.DBExists(
[],
[existence],
db_name=db_name(epoch, self._node_name, self._db_prefix),
db_type=self._db_type,
absolute_path=True)
task.add_output(existence)
return task
def report_checkpoint_stats(self, action_name):
all_stats = {}
self.collect_checkpoint_stats(all_stats)
if self._metadata_handler:
self._metadata_handler.report(action_name, all_stats)
def save(self, epoch):
self._current_db_name = db_name(epoch, self._node_name, self._db_prefix)
logger.info('Saving to %s' % self._current_db_name)
def add_op():
ops.Save(
self.blob_list(), [],
db=self._current_db_name,
db_type=self._db_type,
absolute_path=True)
return self._timed_task('checkpoint_save', add_op)
def write_checkpoint_metadata(self, epoch):
if self._metadata_handler is not None:
self._metadata_handler.write(epoch=epoch)
def get_resume_from_epoch_id(self, user_epoch=None):
last_epoch = user_epoch
if self._metadata_handler is not None:
last_epoch = self._metadata_handler.last_epoch(user_epoch=user_epoch)
return last_epoch
def set_params(self, nodes, path_prefix=None, path_type=None):
if path_prefix:
self._path_prefix = path_prefix
if path_type:
self._path_type = path_type
if self._metadata_handler:
self._metadata_handler.set_params(
db_prefix=self._db_prefix,
db_type=self._db_type,
node_names=[str(self._node_name)],
path_prefix=self._path_prefix,
path_type=self._path_type)
def cp_accessible(self, epoch=None):
if self._metadata_handler is not None:
return self._metadata_handler.cp_accessible(epoch)
else:
return True
class MultiNodeCheckpointManager(object):
def __init__(self, db_prefix, db_type, metadata_handler=None):
self._node_managers = None
self._db_prefix = db_prefix
self._db_type = db_type
self._metadata_handler = metadata_handler
self._path_prefix = None
self._path_type = None
def _task_group(self, func, *args, **kw):
assert self._node_managers is not None, 'init must be called first.'
with TaskGroup(WorkspaceType.GLOBAL) as task_group:
for node, manager in self._node_managers:
with Node(node):
func(manager, *args, **kw)
return task_group
def init(
self, nodes, retrieve_from_epoch=None, path_prefix=None, path_type=None
):
if self._node_managers is not None:
assert [node for node, _ in self._node_managers] == nodes
return TaskGroup(WorkspaceType.GLOBAL)
self._node_managers = []
for node in nodes:
with Node(node):
manager = CheckpointManager(
db_prefix=self._db_prefix,
node_name=str(node),
db_type=self._db_type)
self._node_managers.append((node, manager))
return self._task_group(
CheckpointManager.init,
nodes=[node],
retrieve_from_epoch=retrieve_from_epoch,
path_prefix=path_prefix,
path_type=path_type)
def load(self, epoch, path_prefix=None, path_type=None):
return self._task_group(
CheckpointManager.load,
epoch,
path_prefix=path_prefix,
path_type=path_type)
def load_blobs_locally(self, nodes, blob_names, epoch, session):
if self._node_managers is not None:
assert [node for node, _ in self._node_managers] == nodes
else:
self._node_managers = []
for node in nodes:
with Node(node):
manager = CheckpointManager(
db_prefix=self._db_prefix,
node_name=str(node),
db_type=self._db_type)
self._node_managers.append((node, manager))
assert self._node_managers is not None, 'must initialize node managers'
for _, manager in self._node_managers:
existence_task = manager.check_db_exists(epoch)
session.run(existence_task)
existence = existence_task.outputs()[0].fetch()
if not existence:
logger.info('DB %s does not exist!' %
db_name(epoch, manager._node_name, manager._db_prefix))
return False
load_task = manager.load_blobs_from_checkpoint(blob_names, epoch)
session.run(load_task)
logger.info('Successfully loaded from checkpoints.')
return True
def get_ckpt_db_name(self, node_name, epoch):
for node, manager in self._node_managers:
if str(node) == node_name:
return db_name(epoch, manager._node_name, manager._db_prefix)
def report_checkpoint_stats(self, action_name):
all_stats = {}
for _, manager in self._node_managers:
manager.collect_checkpoint_stats(all_stats)
logger.debug("checkpoint stats: {}".format(all_stats))
if self._metadata_handler:
self._metadata_handler.report(action_name, all_stats)
def save(self, epoch):
return self._task_group(CheckpointManager.save, epoch)
def write_checkpoint_metadata(self, epoch):
if self._metadata_handler is not None:
self._metadata_handler.write(epoch=epoch)
def get_resume_from_epoch_id(self, user_epoch=None):
last_epoch = user_epoch
if self._metadata_handler is not None:
last_epoch = self._metadata_handler.last_epoch(user_epoch=user_epoch)
return last_epoch
def set_params(self, nodes, path_prefix=None, path_type=None):
self._node_names = [str(node) for node in nodes]
if path_prefix:
self._path_prefix = path_prefix
if path_type:
self._path_type = path_type
if self._metadata_handler:
self._metadata_handler.set_params(
db_prefix=self._db_prefix,
db_type=self._db_type,
node_names=self._node_names,
path_prefix=self._path_prefix,
path_type=self._path_type)
def cp_accessible(self, epoch=None):
if self._metadata_handler is not None:
return self._metadata_handler.cp_accessible(epoch)
else:
return True
class UploadTaskGroupBuilder(object):
def build(self, epoch, checkpoint_manager):
raise NotImplementedError()
class JobRunner(object):
def __init__(self, job, checkpoint_manager=None, resume_from_epoch=None,
upload_task_group_builder=None):
self.resume_from_epoch = resume_from_epoch
self.checkpoint_manager = checkpoint_manager
self.job = job
self.upload_task_group_builder = upload_task_group_builder
def train(self, session):
if self.checkpoint_manager:
self.checkpoint_manager.set_params(nodes=self.job.nodes_to_checkpoint())
self.resume_from_epoch = self.checkpoint_manager.\
get_resume_from_epoch_id(self.resume_from_epoch)
if self.resume_from_epoch is not None:
logger.info('Resuming from epoch {}'.format(self.resume_from_epoch))
from_scratch = self.resume_from_epoch is None
if from_scratch:
session.run(self.job.init_group)
if self.checkpoint_manager:
logger.info('Preparing checkpoints ...')
session.run(self.checkpoint_manager.init(
self.job.nodes_to_checkpoint(),
retrieve_from_epoch=self.resume_from_epoch))
if from_scratch:
self.save_checkpoints(0, session)
else:
logger.info('Loading checkpoints for epoch {} ...'.format(
self.resume_from_epoch))
session.run(
self.checkpoint_manager.load(self.resume_from_epoch))
self.checkpoint_manager.report_checkpoint_stats('checkpoint_load')
logger.info('Checkpoint loaded')
logger.info("Finished initializing")
epoch = 1 if from_scratch else self.resume_from_epoch + 1
while True:
logger.info('Starting epoch %d' % epoch)
session.run(self.job.epoch_group)
logger.info('Finished epoch %d' % epoch)
stop_conditions = [o.fetch() for o in self.job.stop_conditions]
if self.checkpoint_manager:
self.save_checkpoints(epoch, session)
if any(stop_conditions):
logger.info('Stopping')
break
epoch += 1
logger.info('Finished training')
if (self.upload_task_group_builder):
upload_task_group = self.upload_task_group_builder.build(
epoch, self.checkpoint_manager)
session.run(upload_task_group)
logger.info('Finished uploading the checkpoints')
session.run(self.job.download_group)
logger.info('Finished downloading the parameters')
session.run(self.job.exit_group)
logger.info('Finished running the exit group')
return epoch
def load_blobs_from_checkpoints(self, blob_names, epoch, session):
if not self.checkpoint_manager:
raise ValueError('Checkpoint manager is None')
logger.info('Loading checkpoint for epoch {} ...'.format(epoch))
result = self.checkpoint_manager.load_blobs_locally(
self.job.nodes_to_checkpoint(), blob_names, epoch, session)
self.checkpoint_manager.report_checkpoint_stats('checkpoint_partial_load')
return result
def save_checkpoints(self, epoch, session):
if not self.checkpoint_manager:
raise ValueError('Checkpoint manager is None')
try:
is_accessible = self.checkpoint_manager.cp_accessible(epoch=None)
if is_accessible:
logger.info('Saving checkpoints for epoch {}'.format(epoch))
session.run(self.checkpoint_manager.save(epoch))
self.checkpoint_manager.write_checkpoint_metadata(epoch)
logger.info('Checkpoints saved')
self.checkpoint_manager.report_checkpoint_stats('checkpoint_save')
else:
logger.warning("Checkpoint files cannot be accessed!")
except Exception as ex:
logger.warning("Unable to write checkpoint for epoch {}. Error={}".
format(epoch, ex))
def epoch_limiter(job, num_epochs):
with job.init_group:
init_net = core.Net('epoch_counter_init')
counter = init_net.CreateCounter([], init_count=num_epochs - 1)
Task(step=init_net)
with job.epoch_group:
epoch_net = core.Net('epoch_countdown')
finished = epoch_net.CountDown(counter)
output = Task(step=epoch_net, outputs=finished).outputs()[0]
job.add_stop_condition(output)
| true | true |
f737482e1e85deb52326908d600926e4fd080cc0 | 2,415 | py | Python | open this for text file manipulation/splitFileAndPerformAnalysis.py | ayandeephazra/Natural_Language_Processing_Research | affe0bfe59f3a861c67e37cbd29d50b4d04e43b6 | [
"MIT"
] | null | null | null | open this for text file manipulation/splitFileAndPerformAnalysis.py | ayandeephazra/Natural_Language_Processing_Research | affe0bfe59f3a861c67e37cbd29d50b4d04e43b6 | [
"MIT"
] | null | null | null | open this for text file manipulation/splitFileAndPerformAnalysis.py | ayandeephazra/Natural_Language_Processing_Research | affe0bfe59f3a861c67e37cbd29d50b4d04e43b6 | [
"MIT"
] | null | null | null | # f = open("file.txt", "r", encoding="utf-8")
# content = f.readLines()
# f.close()
# content_list = content.split(". ")
# i = 0
# for sentence in content:
# print("i", sentence)
# //f2 = open("FILE" + str(i) + ".txt", "w+", encoding="utf-8")
# / i = i + 1
# f2.write(sentence)
# f2.close()
# print(content_list)
# file = open(folder_path, encoding="cp1252")
# csvreader = csv.reader(file)
import csv
i = 0
folder_path = "C:\\Users\\Ayan Deep Hazra\\Desktop\\Repos\\Natural_Language_Processing_Research\\open this for text file " \
"manipulation\\labeled_papers_in_txt(w_crystals).csv"
# try multiple encodings if one fails
file = open(folder_path, encoding="cp1252")
csvreader = csv.reader(file)
rows = []
statuses = []
for row in csvreader:
rows.append(row[0])
statuses.append(row[1])
i = 0
i = 0
tn = 0
tp = 0
fn = 0
fp = 0
total = 0
with open("file.txt", "r", encoding="utf-8") as file:
for line in file:
j = 0
#print(line)
words = line.split()
if len(words) > 0:
if words[0] == "input:":
total = total + 1
# print(words[1])
# print(" ".join(words[1:len(words)-3]))
string = " ".join(words[1:len(words) - 3])
#print(string)
i = i + 1
# print(i)
# for (row, status) in zip(rows, statuses):
for t in range(len(rows)):
# print(row[0], len(row[0]))
j = j + 1
print("string", string)
print("row", rows[t])
if string.strip() == rows[t].strip():
if statuses[t] == "entailment":
if float(words[len(words) - 1]) < 0.5:
tn = tn + 1
else:
tp = tp + 1
else:
print(float(words[len(words) - 1]))
if float(words[len(words) - 1]) < 0.5:
fp = fp + 1
else:
fn = fn + 1
else:
print("bsdk")
# rows.append(row[0])
print("tn:", tn)
print("tp:", tp)
print("fp:", fp)
print("fn:", fn)
print(total)
| 27.134831 | 124 | 0.438509 |
import csv
i = 0
folder_path = "C:\\Users\\Ayan Deep Hazra\\Desktop\\Repos\\Natural_Language_Processing_Research\\open this for text file " \
"manipulation\\labeled_papers_in_txt(w_crystals).csv"
file = open(folder_path, encoding="cp1252")
csvreader = csv.reader(file)
rows = []
statuses = []
for row in csvreader:
rows.append(row[0])
statuses.append(row[1])
i = 0
i = 0
tn = 0
tp = 0
fn = 0
fp = 0
total = 0
with open("file.txt", "r", encoding="utf-8") as file:
for line in file:
j = 0
words = line.split()
if len(words) > 0:
if words[0] == "input:":
total = total + 1
string = " ".join(words[1:len(words) - 3])
i = i + 1
for t in range(len(rows)):
j = j + 1
print("string", string)
print("row", rows[t])
if string.strip() == rows[t].strip():
if statuses[t] == "entailment":
if float(words[len(words) - 1]) < 0.5:
tn = tn + 1
else:
tp = tp + 1
else:
print(float(words[len(words) - 1]))
if float(words[len(words) - 1]) < 0.5:
fp = fp + 1
else:
fn = fn + 1
else:
print("bsdk")
print("tn:", tn)
print("tp:", tp)
print("fp:", fp)
print("fn:", fn)
print(total)
| true | true |
f73748404c294e89740bbf1ee0a0e5ac227efb1b | 9,731 | py | Python | isitphishing_connector.py | shiyingtu/isitphishing | 191172484ec7093364adfbde9a651253ebf2c57a | [
"Apache-2.0"
] | null | null | null | isitphishing_connector.py | shiyingtu/isitphishing | 191172484ec7093364adfbde9a651253ebf2c57a | [
"Apache-2.0"
] | null | null | null | isitphishing_connector.py | shiyingtu/isitphishing | 191172484ec7093364adfbde9a651253ebf2c57a | [
"Apache-2.0"
] | null | null | null | # File: isitphishing_connector.py
#
# Copyright (c) 2017-2021 Splunk Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under
# the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific language governing permissions
# and limitations under the License.
#
#
# Phantom App imports
import phantom.app as phantom
from phantom.base_connector import BaseConnector
from phantom.action_result import ActionResult
import requests
import json
from bs4 import BeautifulSoup
class RetVal(tuple):
def __new__(cls, val1, val2):
return tuple.__new__(RetVal, (val1, val2))
class IsitphishingConnector(BaseConnector):
def __init__(self):
# Call the BaseConnectors init first
super(IsitphishingConnector, self).__init__()
self._state = None
self._base_url = 'https://ws.isitphishing.org'
def _process_empty_reponse(self, response, action_result):
if response.status_code == 200:
return RetVal(phantom.APP_SUCCESS, {})
return RetVal(action_result.set_status(phantom.APP_ERROR, "Empty response and no information in the header"), None)
def _process_html_response(self, response, action_result):
# An html response, treat it like an error
status_code = response.status_code
try:
soup = BeautifulSoup(response.text, "html.parser")
# Remove the script, style, footer and navigation part from the HTML message
for element in soup(["script", "style", "footer", "nav"]):
element.extract()
error_text = soup.text
split_lines = error_text.split('\n')
split_lines = [x.strip() for x in split_lines if x.strip()]
error_text = '\n'.join(split_lines)
except:
error_text = "Cannot parse error details"
message = "Status Code: {0}. Data from server:\n{1}\n".format(status_code,
error_text)
message = message.replace('{', '{{').replace('}', '}}')
return RetVal(action_result.set_status(phantom.APP_ERROR, message), None)
def _process_text_response(self, r, action_result):
try:
resp_text = r.text
except Exception as e:
return RetVal(action_result.set_status(phantom.APP_ERROR, "Unable to parse response. Error: {0}".format(str(e))), None)
# Please specify the status codes here
if 200 <= r.status_code < 399:
return RetVal(phantom.APP_SUCCESS, resp_text)
# You should process the error returned in the json
message = "Error from server. Status Code: {0} Data from server: {1}".format(
r.status_code, r.text.replace('{', '{{').replace('}', '}}'))
return RetVal(action_result.set_status(phantom.APP_ERROR, message), None)
def _process_response(self, r, action_result):
# store the r_text in debug data, it will get dumped in the logs if the action fails
if hasattr(action_result, 'add_debug_data'):
action_result.add_debug_data({'r_status_code': r.status_code})
action_result.add_debug_data({'r_text': r.text})
action_result.add_debug_data({'r_headers': r.headers})
# Process each 'Content-Type' of response separately
# Process a json response
if 'text' in r.headers.get('Content-Type', ''):
return self._process_text_response(r, action_result)
# Process an HTML resonse, Do this no matter what the api talks.
# There is a high chance of a PROXY in between phantom and the rest of
# world, in case of errors, PROXY's return HTML, this function parses
# the error and adds it to the action_result.
if 'html' in r.headers.get('Content-Type', ''):
return self._process_html_response(r, action_result)
# it's not content-type that is to be parsed, handle an empty response
if not r.text:
return self._process_empty_reponse(r, action_result)
# everything else is actually an error at this point
message = "Can't process response from server. Status Code: {0} Data from server: {1}".format(
r.status_code, r.text.replace('{', '{{').replace('}', '}}'))
return RetVal(action_result.set_status(phantom.APP_ERROR, message), None)
def _make_rest_call(self, endpoint, action_result, data, method="post"):
config = self.get_config()
resp_text = None
request_func = getattr(requests, method)
if (not request_func):
action_result.set_status( phantom.APP_ERROR, "Invalid method: {0}".format(method))
# Create a URL to connect to
url = self._base_url + endpoint
data.update({'version': 2, 'force': 'true', 'name': config['customer_name'], 'license': config['customer_license']})
try:
r = request_func(url, data=data)
except Exception as e:
return RetVal(action_result.set_status( phantom.APP_ERROR, "Error Connecting to server. Details: {0}".format(str(e))), resp_text)
return self._process_response(r, action_result)
def _handle_test_connectivity(self, param):
# Add an action result object to self (BaseConnector) to represent the action for this param
action_result = self.add_action_result(ActionResult(dict(param)))
self.save_progress("Connecting to {0}".format(self._base_url))
data = {}
data['url'] = "https://www.phantomcyber.com"
# make rest call
ret_val, response = self._make_rest_call('/check', action_result, data)
if (phantom.is_fail(ret_val)):
# the call to the 3rd party device or service failed, action result should contain all the error details
# so just return from here
self.save_progress("Test Connectivity Failed. Error: {0}".format(action_result.get_message()))
return action_result.get_status()
# Return success
self.save_progress("Test Connectivity Passed")
return action_result.set_status(phantom.APP_SUCCESS)
def _handle_url_reputation(self, param):
# Implement the handler here, some basic code is already in
self.save_progress("In action handler for: {0}".format(self.get_action_identifier()))
# Add an action result object to self (BaseConnector) to represent the action for this param
action_result = self.add_action_result(ActionResult(dict(param)))
data = {}
data['url'] = param['url']
# isitphishing website and API work differently, if www.google.com (minus the protocol) is queried
# on the website, a valid reply is returned,
# however for the same value, the API will return back with an 'ERROR (Invalid url)'
# so add a protocol before sending it out if not present
if (not phantom.is_url(data['url'])):
data['url'] = 'http://' + data['url']
# make rest call
ret_val, response = self._make_rest_call('/check', action_result, data)
if (phantom.is_fail(ret_val)):
# the call to the 3rd party device or service failed, action result should contain all the error details
# so just return from here
return action_result.get_status()
# Now post process the data, uncomment code as you deem fit
response = response.strip()
# Add the response into the data section
action_result.add_data({'reputation': response})
action_result.add_data({})
# Add a dictionary that is made up of the most important values from data into the summary
action_result.update_summary({'reputation': response})
# Return success, no need to set the message, only the status
# BaseConnector will create a textual message based off of the summary dictionary
return action_result.set_status(phantom.APP_SUCCESS)
def handle_action(self, param):
ret_val = phantom.APP_SUCCESS
# Get the action that we are supposed to execute for this App Run
action_id = self.get_action_identifier()
self.debug_print("action_id", self.get_action_identifier())
if action_id == 'test_connectivity':
ret_val = self._handle_test_connectivity(param)
elif action_id == 'url_reputation':
ret_val = self._handle_url_reputation(param)
return ret_val
def initialize(self):
# Load the state in initialize, use it to store data
# that needs to be accessed across actions
self._state = self.load_state()
return phantom.APP_SUCCESS
def finalize(self):
# Save the state, this data is saved accross actions and app upgrades
self.save_state(self._state)
return phantom.APP_SUCCESS
if __name__ == '__main__':
import sys
import pudb
pudb.set_trace()
if (len(sys.argv) < 2):
print("No test json specified as input")
exit(0)
with open(sys.argv[1]) as f:
in_json = f.read()
in_json = json.loads(in_json)
print(json.dumps(in_json, indent=4))
connector = IsitphishingConnector()
connector.print_progress_message = True
ret_val = connector._handle_action(json.dumps(in_json), None)
print(json.dumps(json.loads(ret_val), indent=4))
exit(0)
| 37.141221 | 141 | 0.658206 |
import phantom.app as phantom
from phantom.base_connector import BaseConnector
from phantom.action_result import ActionResult
import requests
import json
from bs4 import BeautifulSoup
class RetVal(tuple):
def __new__(cls, val1, val2):
return tuple.__new__(RetVal, (val1, val2))
class IsitphishingConnector(BaseConnector):
def __init__(self):
super(IsitphishingConnector, self).__init__()
self._state = None
self._base_url = 'https://ws.isitphishing.org'
def _process_empty_reponse(self, response, action_result):
if response.status_code == 200:
return RetVal(phantom.APP_SUCCESS, {})
return RetVal(action_result.set_status(phantom.APP_ERROR, "Empty response and no information in the header"), None)
def _process_html_response(self, response, action_result):
status_code = response.status_code
try:
soup = BeautifulSoup(response.text, "html.parser")
for element in soup(["script", "style", "footer", "nav"]):
element.extract()
error_text = soup.text
split_lines = error_text.split('\n')
split_lines = [x.strip() for x in split_lines if x.strip()]
error_text = '\n'.join(split_lines)
except:
error_text = "Cannot parse error details"
message = "Status Code: {0}. Data from server:\n{1}\n".format(status_code,
error_text)
message = message.replace('{', '{{').replace('}', '}}')
return RetVal(action_result.set_status(phantom.APP_ERROR, message), None)
def _process_text_response(self, r, action_result):
try:
resp_text = r.text
except Exception as e:
return RetVal(action_result.set_status(phantom.APP_ERROR, "Unable to parse response. Error: {0}".format(str(e))), None)
if 200 <= r.status_code < 399:
return RetVal(phantom.APP_SUCCESS, resp_text)
message = "Error from server. Status Code: {0} Data from server: {1}".format(
r.status_code, r.text.replace('{', '{{').replace('}', '}}'))
return RetVal(action_result.set_status(phantom.APP_ERROR, message), None)
def _process_response(self, r, action_result):
if hasattr(action_result, 'add_debug_data'):
action_result.add_debug_data({'r_status_code': r.status_code})
action_result.add_debug_data({'r_text': r.text})
action_result.add_debug_data({'r_headers': r.headers})
if 'text' in r.headers.get('Content-Type', ''):
return self._process_text_response(r, action_result)
# the error and adds it to the action_result.
if 'html' in r.headers.get('Content-Type', ''):
return self._process_html_response(r, action_result)
# it's not content-type that is to be parsed, handle an empty response
if not r.text:
return self._process_empty_reponse(r, action_result)
message = "Can't process response from server. Status Code: {0} Data from server: {1}".format(
r.status_code, r.text.replace('{', '{{').replace('}', '}}'))
return RetVal(action_result.set_status(phantom.APP_ERROR, message), None)
def _make_rest_call(self, endpoint, action_result, data, method="post"):
config = self.get_config()
resp_text = None
request_func = getattr(requests, method)
if (not request_func):
action_result.set_status( phantom.APP_ERROR, "Invalid method: {0}".format(method))
# Create a URL to connect to
url = self._base_url + endpoint
data.update({'version': 2, 'force': 'true', 'name': config['customer_name'], 'license': config['customer_license']})
try:
r = request_func(url, data=data)
except Exception as e:
return RetVal(action_result.set_status( phantom.APP_ERROR, "Error Connecting to server. Details: {0}".format(str(e))), resp_text)
return self._process_response(r, action_result)
def _handle_test_connectivity(self, param):
# Add an action result object to self (BaseConnector) to represent the action for this param
action_result = self.add_action_result(ActionResult(dict(param)))
self.save_progress("Connecting to {0}".format(self._base_url))
data = {}
data['url'] = "https://www.phantomcyber.com"
# make rest call
ret_val, response = self._make_rest_call('/check', action_result, data)
if (phantom.is_fail(ret_val)):
# the call to the 3rd party device or service failed, action result should contain all the error details
# so just return from here
self.save_progress("Test Connectivity Failed. Error: {0}".format(action_result.get_message()))
return action_result.get_status()
# Return success
self.save_progress("Test Connectivity Passed")
return action_result.set_status(phantom.APP_SUCCESS)
def _handle_url_reputation(self, param):
# Implement the handler here, some basic code is already in
self.save_progress("In action handler for: {0}".format(self.get_action_identifier()))
# Add an action result object to self (BaseConnector) to represent the action for this param
action_result = self.add_action_result(ActionResult(dict(param)))
data = {}
data['url'] = param['url']
# isitphishing website and API work differently, if www.google.com (minus the protocol) is queried
# on the website, a valid reply is returned,
# however for the same value, the API will return back with an 'ERROR (Invalid url)'
# so add a protocol before sending it out if not present
if (not phantom.is_url(data['url'])):
data['url'] = 'http://' + data['url']
# make rest call
ret_val, response = self._make_rest_call('/check', action_result, data)
if (phantom.is_fail(ret_val)):
# the call to the 3rd party device or service failed, action result should contain all the error details
# so just return from here
return action_result.get_status()
# Now post process the data, uncomment code as you deem fit
response = response.strip()
# Add the response into the data section
action_result.add_data({'reputation': response})
action_result.add_data({})
# Add a dictionary that is made up of the most important values from data into the summary
action_result.update_summary({'reputation': response})
# Return success, no need to set the message, only the status
# BaseConnector will create a textual message based off of the summary dictionary
return action_result.set_status(phantom.APP_SUCCESS)
def handle_action(self, param):
ret_val = phantom.APP_SUCCESS
# Get the action that we are supposed to execute for this App Run
action_id = self.get_action_identifier()
self.debug_print("action_id", self.get_action_identifier())
if action_id == 'test_connectivity':
ret_val = self._handle_test_connectivity(param)
elif action_id == 'url_reputation':
ret_val = self._handle_url_reputation(param)
return ret_val
def initialize(self):
# Load the state in initialize, use it to store data
# that needs to be accessed across actions
self._state = self.load_state()
return phantom.APP_SUCCESS
def finalize(self):
# Save the state, this data is saved accross actions and app upgrades
self.save_state(self._state)
return phantom.APP_SUCCESS
if __name__ == '__main__':
import sys
import pudb
pudb.set_trace()
if (len(sys.argv) < 2):
print("No test json specified as input")
exit(0)
with open(sys.argv[1]) as f:
in_json = f.read()
in_json = json.loads(in_json)
print(json.dumps(in_json, indent=4))
connector = IsitphishingConnector()
connector.print_progress_message = True
ret_val = connector._handle_action(json.dumps(in_json), None)
print(json.dumps(json.loads(ret_val), indent=4))
exit(0)
| true | true |
f737486a3c82737b2cf0ca3c559eb6dd39b06bd3 | 5,304 | py | Python | proglearn/transformers.py | KhelmholtzR/ProgLearn | f5177c720e53d2f5936272998b94e0746135a3b9 | [
"MIT"
] | 18 | 2020-05-17T21:56:36.000Z | 2020-09-18T17:39:26.000Z | proglearn/transformers.py | KhelmholtzR/ProgLearn | f5177c720e53d2f5936272998b94e0746135a3b9 | [
"MIT"
] | 209 | 2020-06-05T19:08:51.000Z | 2020-10-03T16:49:39.000Z | proglearn/transformers.py | KhelmholtzR/ProgLearn | f5177c720e53d2f5936272998b94e0746135a3b9 | [
"MIT"
] | 33 | 2020-06-10T23:12:09.000Z | 2020-09-28T05:09:44.000Z | """
Main Author: Will LeVine
Corresponding Email: levinewill@icloud.com
"""
from tensorflow import keras
import numpy as np
from sklearn.tree import DecisionTreeClassifier
from sklearn.utils.validation import check_array, check_is_fitted, check_X_y
from .base import BaseTransformer
class NeuralClassificationTransformer(BaseTransformer):
"""
A class used to transform data from a category to a specialized representation.
Parameters
----------
network : object
A neural network used in the classification transformer.
euclidean_layer_idx : int
An integer to represent the final layer of the transformer.
optimizer : str or keras.optimizers instance
An optimizer used when compiling the neural network.
loss : str, default="categorical_crossentropy"
A loss function used when compiling the neural network.
pretrained : bool, default=False
A boolean used to identify if the network is pretrained.
compile_kwargs : dict, default={"metrics": ["acc"]}
A dictionary containing metrics for judging network performance.
fit_kwargs : dict, default={
"epochs": 100,
"callbacks": [keras.callbacks.EarlyStopping(patience=5, monitor="val_acc")],
"verbose": False,
"validation_split": 0.33,
},
A dictionary to hold epochs, callbacks, verbose, and validation split for the network.
Attributes
----------
encoder_ : object
A Keras model with inputs and outputs based on the network attribute.
Output layers are determined by the euclidean_layer_idx parameter.
fitted_ : boolean
A boolean flag initialized after the model is fitted.
"""
def __init__(
self,
network,
euclidean_layer_idx,
optimizer,
loss="categorical_crossentropy",
pretrained=False,
compile_kwargs={"metrics": ["acc"]},
fit_kwargs={
"epochs": 100,
"callbacks": [keras.callbacks.EarlyStopping(patience=5, monitor="val_acc")],
"verbose": False,
"validation_split": 0.33,
},
):
self.network = keras.models.clone_model(network)
self.encoder_ = keras.models.Model(
inputs=self.network.inputs,
outputs=self.network.layers[euclidean_layer_idx].output,
)
self.pretrained = pretrained
self.optimizer = optimizer
self.loss = loss
self.compile_kwargs = compile_kwargs
self.fit_kwargs = fit_kwargs
def fit(self, X, y):
"""
Fits the transformer to data X with labels y.
Parameters
----------
X : ndarray
Input data matrix.
y : ndarray
Output (i.e. response data matrix).
Returns
-------
self : NeuralClassificationTransformer
The object itself.
"""
check_X_y(X, y, ensure_2d=False, allow_nd=True)
_, y = np.unique(y, return_inverse=True)
self.network.compile(
loss=self.loss, optimizer=self.optimizer, **self.compile_kwargs
)
self.network.fit(X, keras.utils.to_categorical(y), **self.fit_kwargs)
self.fitted_ = True
return self
def transform(self, X):
"""
Performs inference using the transformer.
Parameters
----------
X : ndarray
Input data matrix.
Returns
-------
X_transformed : ndarray
The transformed input.
Raises
------
NotFittedError
When the model is not fitted.
"""
check_array(X, ensure_2d=False, allow_nd=True)
check_is_fitted(self, attributes="fitted_")
return self.encoder_.predict(X)
class TreeClassificationTransformer(BaseTransformer):
"""
A class used to transform data from a category to a specialized representation.
Parameters
----------
kwargs : dict, default={}
A dictionary to contain parameters of the tree.
Attributes
----------
transformer : sklearn.tree.DecisionTreeClassifier
an internal sklearn DecisionTreeClassifier
"""
def __init__(self, kwargs={}):
self.kwargs = kwargs
def fit(self, X, y):
"""
Fits the transformer to data X with labels y.
Parameters
----------
X : ndarray
Input data matrix.
y : ndarray
Output (i.e. response data matrix).
Returns
-------
self : TreeClassificationTransformer
The object itself.
"""
X, y = check_X_y(X, y)
self.transformer_ = DecisionTreeClassifier(**self.kwargs).fit(X, y)
return self
def transform(self, X):
"""
Performs inference using the transformer.
Parameters
----------
X : ndarray
Input data matrix.
Returns
-------
X_transformed : ndarray
The transformed input.
Raises
------
NotFittedError
When the model is not fitted.
"""
X = check_array(X)
check_is_fitted(self)
return self.transformer_.apply(X)
| 27.481865 | 94 | 0.59276 | from tensorflow import keras
import numpy as np
from sklearn.tree import DecisionTreeClassifier
from sklearn.utils.validation import check_array, check_is_fitted, check_X_y
from .base import BaseTransformer
class NeuralClassificationTransformer(BaseTransformer):
def __init__(
self,
network,
euclidean_layer_idx,
optimizer,
loss="categorical_crossentropy",
pretrained=False,
compile_kwargs={"metrics": ["acc"]},
fit_kwargs={
"epochs": 100,
"callbacks": [keras.callbacks.EarlyStopping(patience=5, monitor="val_acc")],
"verbose": False,
"validation_split": 0.33,
},
):
self.network = keras.models.clone_model(network)
self.encoder_ = keras.models.Model(
inputs=self.network.inputs,
outputs=self.network.layers[euclidean_layer_idx].output,
)
self.pretrained = pretrained
self.optimizer = optimizer
self.loss = loss
self.compile_kwargs = compile_kwargs
self.fit_kwargs = fit_kwargs
def fit(self, X, y):
check_X_y(X, y, ensure_2d=False, allow_nd=True)
_, y = np.unique(y, return_inverse=True)
self.network.compile(
loss=self.loss, optimizer=self.optimizer, **self.compile_kwargs
)
self.network.fit(X, keras.utils.to_categorical(y), **self.fit_kwargs)
self.fitted_ = True
return self
def transform(self, X):
check_array(X, ensure_2d=False, allow_nd=True)
check_is_fitted(self, attributes="fitted_")
return self.encoder_.predict(X)
class TreeClassificationTransformer(BaseTransformer):
def __init__(self, kwargs={}):
self.kwargs = kwargs
def fit(self, X, y):
X, y = check_X_y(X, y)
self.transformer_ = DecisionTreeClassifier(**self.kwargs).fit(X, y)
return self
def transform(self, X):
X = check_array(X)
check_is_fitted(self)
return self.transformer_.apply(X)
| true | true |
f73748716bfeaea7b22165f703b95e1d7a8d55fb | 2,824 | py | Python | azure/durable_functions/tasks/call_activity_with_retry.py | asedighi/azure-functions-durable-python | ad2da30060f5ce7188da3a61dadbbaad5784f842 | [
"MIT"
] | null | null | null | azure/durable_functions/tasks/call_activity_with_retry.py | asedighi/azure-functions-durable-python | ad2da30060f5ce7188da3a61dadbbaad5784f842 | [
"MIT"
] | null | null | null | azure/durable_functions/tasks/call_activity_with_retry.py | asedighi/azure-functions-durable-python | ad2da30060f5ce7188da3a61dadbbaad5784f842 | [
"MIT"
] | null | null | null | from typing import List, Any
from .task_utilities import find_task_scheduled, \
find_task_retry_timer_created, set_processed, parse_history_event, \
find_task_completed, find_task_failed, find_task_retry_timer_fired
from ..models.RetryOptions import RetryOptions
from ..models.Task import (
Task)
from ..models.actions.CallActivityWithRetryAction import \
CallActivityWithRetryAction
from ..models.history import HistoryEvent
def call_activity_with_retry_task(
state: List[HistoryEvent],
retry_options: RetryOptions,
name: str,
input_: Any = None) -> Task:
"""Determine the state of scheduling an activity for execution with retry options.
Parameters
----------
state: List[HistoryEvent]
The list of history events to search to determine the current state of the activity.
retry_options: RetryOptions
The retry options for the activity function.
name: str
The name of the activity function to call.
input_: Any
The JSON-serializable input to pass to the activity function.
Returns
-------
Task
A Durable Task that completes when the called activity function completes or fails
completely.
"""
new_action = CallActivityWithRetryAction(
function_name=name, retry_options=retry_options, input_=input_)
for attempt in range(retry_options.max_number_of_attempts):
task_scheduled = find_task_scheduled(state, name)
task_completed = find_task_completed(state, task_scheduled)
task_failed = find_task_failed(state, task_scheduled)
task_retry_timer = find_task_retry_timer_created(state, task_failed)
task_retry_timer_fired = find_task_retry_timer_fired(
state, task_retry_timer)
set_processed([task_scheduled, task_completed,
task_failed, task_retry_timer, task_retry_timer_fired])
if not task_scheduled:
break
if task_completed:
return Task(
is_completed=True,
is_faulted=False,
action=new_action,
result=parse_history_event(task_completed),
timestamp=task_completed.timestamp,
id_=task_completed.TaskScheduledId)
if task_failed and task_retry_timer and attempt + 1 >= \
retry_options.max_number_of_attempts:
return Task(
is_completed=True,
is_faulted=True,
action=new_action,
timestamp=task_failed.timestamp,
id_=task_failed.TaskScheduledId,
exc=Exception(
f"{task_failed.Reason} \n {task_failed.Details}")
)
return Task(is_completed=False, is_faulted=False, action=new_action)
| 37.653333 | 92 | 0.669972 | from typing import List, Any
from .task_utilities import find_task_scheduled, \
find_task_retry_timer_created, set_processed, parse_history_event, \
find_task_completed, find_task_failed, find_task_retry_timer_fired
from ..models.RetryOptions import RetryOptions
from ..models.Task import (
Task)
from ..models.actions.CallActivityWithRetryAction import \
CallActivityWithRetryAction
from ..models.history import HistoryEvent
def call_activity_with_retry_task(
state: List[HistoryEvent],
retry_options: RetryOptions,
name: str,
input_: Any = None) -> Task:
new_action = CallActivityWithRetryAction(
function_name=name, retry_options=retry_options, input_=input_)
for attempt in range(retry_options.max_number_of_attempts):
task_scheduled = find_task_scheduled(state, name)
task_completed = find_task_completed(state, task_scheduled)
task_failed = find_task_failed(state, task_scheduled)
task_retry_timer = find_task_retry_timer_created(state, task_failed)
task_retry_timer_fired = find_task_retry_timer_fired(
state, task_retry_timer)
set_processed([task_scheduled, task_completed,
task_failed, task_retry_timer, task_retry_timer_fired])
if not task_scheduled:
break
if task_completed:
return Task(
is_completed=True,
is_faulted=False,
action=new_action,
result=parse_history_event(task_completed),
timestamp=task_completed.timestamp,
id_=task_completed.TaskScheduledId)
if task_failed and task_retry_timer and attempt + 1 >= \
retry_options.max_number_of_attempts:
return Task(
is_completed=True,
is_faulted=True,
action=new_action,
timestamp=task_failed.timestamp,
id_=task_failed.TaskScheduledId,
exc=Exception(
f"{task_failed.Reason} \n {task_failed.Details}")
)
return Task(is_completed=False, is_faulted=False, action=new_action)
| true | true |
f73749784992082837d34a09b1edff318dd51abd | 2,851 | py | Python | Book_fetch.py | Karthik4293/Book-Finder | 2bbe93b797a3d429f3dd36dd4911debf4a571163 | [
"Apache-2.0"
] | null | null | null | Book_fetch.py | Karthik4293/Book-Finder | 2bbe93b797a3d429f3dd36dd4911debf4a571163 | [
"Apache-2.0"
] | null | null | null | Book_fetch.py | Karthik4293/Book-Finder | 2bbe93b797a3d429f3dd36dd4911debf4a571163 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Spyder Editor
This is a temporary script file.
"""
import requests
import cloudsight
from goodreads import client
import goodreads
import os
import sys
import re
import subprocess
from googlesearch import *
print("Welcome to the simple book-review tool")
print("--------------------------------------------")
print("Authentication for cloud services in progress")
gc = client.GoodreadsClient('YZGDbGQrMRAAygCAr8Z8tw', 'iH9xo6jhIkNEEHQ9a0nJopDPNEL0TLfq3Z2E2ZNgBDc')
result = requests.get("https://firebasestorage.googleapis.com/v0/b/book-finder-1f3de.appspot.com/o/image_0?alt=media&token=61ce6b03-fead-45fb-b875-71e869c3015c")
auth = cloudsight.SimpleAuth('nHo9nAczgUTzB6pLiCv1UA')
api = cloudsight.API(auth)
print("Authentication complete!!")
print("Your book is being recognised..")
response = api.remote_image_request('https://firebasestorage.googleapis.com/v0/b/book-finder-1f3de.appspot.com/o/image_0?alt=media&token=48e00dec-ffc4-494b-aa4c-5424cca5b9cc', {
'image_request[locale]': 'en-US',
})
status = api.image_response(response['token'])
if status['status'] != cloudsight.STATUS_NOT_COMPLETED:
# Done!
pass
status = api.wait(response['token'], timeout=30)
res = status['name']
print("Book recognition complete !!!!\n")
print("Querying the book id from Goodreads...\n")
res = "goodreads " + res
results = []
print("url's queried so far:")
print("--------------------------\n")
for url in search(res, tld='us', lang='en', stop=5):
print(url)
results.append(url)
print("\n" + "Query complete")
book_id = ''
for result in results:
if result[0:36] == "https://www.goodreads.com/book/show/":
i = 36
while result[i].isnumeric():
book_id += result[i]
i += 1;
break
else: continue
print("Book-ID Query Successful...!! \n\n")
book = gc.book(int(book_id))
print("Book Title :" + book.title +"\n")
print("----------------------------------------------------------")
print("Author :")
print( book.authors)
print("\n")
print("Released in : ")
print(book.publication_date)
print("\n")
print("ISBN : " + book.isbn)
print("ISBN13 : " + book.isbn13)
print("\n")
print("Description : ")
print(book.description)
print("\n")
print("Average user rating : " + book.average_rating)
print("Rating Distribution : " + book.rating_dist)
print("\n")
print("Similar Books: ")
print(book.similar_books)
print("\n\n\n")
'''
f = open("Book_review.txt", 'w')
f.write("Book Title :" + book.title +"\n")
f.write("----------------------------------------------------------")
f.write("Author :")
f.write(book.authors)
f.write("\n")
f.write("Released in : ")
f.write(book.publication_date)
f.write("\n")
f.write("ISBN : " + book.isbn + "\n")
f.write("Description : ")
f.write(book.description)
f.write("\n")
f.write("\n\n")
f.close()
'''
| 26.155963 | 177 | 0.635566 |
import requests
import cloudsight
from goodreads import client
import goodreads
import os
import sys
import re
import subprocess
from googlesearch import *
print("Welcome to the simple book-review tool")
print("--------------------------------------------")
print("Authentication for cloud services in progress")
gc = client.GoodreadsClient('YZGDbGQrMRAAygCAr8Z8tw', 'iH9xo6jhIkNEEHQ9a0nJopDPNEL0TLfq3Z2E2ZNgBDc')
result = requests.get("https://firebasestorage.googleapis.com/v0/b/book-finder-1f3de.appspot.com/o/image_0?alt=media&token=61ce6b03-fead-45fb-b875-71e869c3015c")
auth = cloudsight.SimpleAuth('nHo9nAczgUTzB6pLiCv1UA')
api = cloudsight.API(auth)
print("Authentication complete!!")
print("Your book is being recognised..")
response = api.remote_image_request('https://firebasestorage.googleapis.com/v0/b/book-finder-1f3de.appspot.com/o/image_0?alt=media&token=48e00dec-ffc4-494b-aa4c-5424cca5b9cc', {
'image_request[locale]': 'en-US',
})
status = api.image_response(response['token'])
if status['status'] != cloudsight.STATUS_NOT_COMPLETED:
pass
status = api.wait(response['token'], timeout=30)
res = status['name']
print("Book recognition complete !!!!\n")
print("Querying the book id from Goodreads...\n")
res = "goodreads " + res
results = []
print("url's queried so far:")
print("--------------------------\n")
for url in search(res, tld='us', lang='en', stop=5):
print(url)
results.append(url)
print("\n" + "Query complete")
book_id = ''
for result in results:
if result[0:36] == "https://www.goodreads.com/book/show/":
i = 36
while result[i].isnumeric():
book_id += result[i]
i += 1;
break
else: continue
print("Book-ID Query Successful...!! \n\n")
book = gc.book(int(book_id))
print("Book Title :" + book.title +"\n")
print("----------------------------------------------------------")
print("Author :")
print( book.authors)
print("\n")
print("Released in : ")
print(book.publication_date)
print("\n")
print("ISBN : " + book.isbn)
print("ISBN13 : " + book.isbn13)
print("\n")
print("Description : ")
print(book.description)
print("\n")
print("Average user rating : " + book.average_rating)
print("Rating Distribution : " + book.rating_dist)
print("\n")
print("Similar Books: ")
print(book.similar_books)
print("\n\n\n")
| true | true |
f73749b19a84005c51f69cae1bf1b3e5111fa3dc | 1,930 | py | Python | tests/integration/tree/test_total_tree_length_integration.py | yuzhenpeng/PhyKIT | 167b9dfe0dd0bddd4b23492d9a3dc34e56debbd7 | [
"MIT"
] | 26 | 2020-10-28T10:33:33.000Z | 2022-02-04T14:59:22.000Z | tests/integration/tree/test_total_tree_length_integration.py | yuzhenpeng/PhyKIT | 167b9dfe0dd0bddd4b23492d9a3dc34e56debbd7 | [
"MIT"
] | 4 | 2021-03-28T22:05:39.000Z | 2022-03-22T00:33:01.000Z | tests/integration/tree/test_total_tree_length_integration.py | JLSteenwyk/PhyKIT | 0b3194d1bb5c189993b256fe96011cce48b9bbb4 | [
"MIT"
] | 4 | 2020-11-06T11:58:25.000Z | 2021-08-17T16:57:51.000Z | import pytest
import sys
from math import isclose
from mock import patch, call
from pathlib import Path
from textwrap import dedent
from phykit.phykit import Phykit
here = Path(__file__)
@pytest.mark.integration
class TestTotalTreeLength(object):
@patch("builtins.print")
def test_total_tree_length0(self, mocked_print):
expected_result = 277.2772
testargs = [
"phykit",
"total_tree_length",
f"{here.parent.parent.parent}/sample_files/tree_simple.tre",
]
with patch.object(sys, "argv", testargs):
Phykit()
assert mocked_print.mock_calls == [call(expected_result)]
@patch("builtins.print")
def test_total_tree_length1(self, mocked_print):
expected_result = 0.0675
testargs = [
"phykit",
"tree_len",
f"{here.parent.parent.parent}/sample_files/small_Aspergillus_tree.tre",
]
with patch.object(sys, "argv", testargs):
Phykit()
assert mocked_print.mock_calls == [call(expected_result)]
@patch("builtins.print")
def test_total_tree_length_alias(self, mocked_print):
expected_result = 277.2772
testargs = [
"phykit",
"tree_len",
f"{here.parent.parent.parent}/sample_files/tree_simple.tre",
]
with patch.object(sys, "argv", testargs):
Phykit()
assert mocked_print.mock_calls == [call(expected_result)]
@patch("builtins.print")
def test_total_tree_length_incorrect_file_path(self, mocked_print):
testargs = [
"phykit",
"total_tree_length",
f"{here.parent.parent.parent}/sample_files/tree_simple.tr",
]
with pytest.raises(SystemExit) as pytest_wrapped_e:
Phykit()
assert pytest_wrapped_e.type == SystemExit
assert pytest_wrapped_e.value.code == 2
| 30.15625 | 83 | 0.627979 | import pytest
import sys
from math import isclose
from mock import patch, call
from pathlib import Path
from textwrap import dedent
from phykit.phykit import Phykit
here = Path(__file__)
@pytest.mark.integration
class TestTotalTreeLength(object):
@patch("builtins.print")
def test_total_tree_length0(self, mocked_print):
expected_result = 277.2772
testargs = [
"phykit",
"total_tree_length",
f"{here.parent.parent.parent}/sample_files/tree_simple.tre",
]
with patch.object(sys, "argv", testargs):
Phykit()
assert mocked_print.mock_calls == [call(expected_result)]
@patch("builtins.print")
def test_total_tree_length1(self, mocked_print):
expected_result = 0.0675
testargs = [
"phykit",
"tree_len",
f"{here.parent.parent.parent}/sample_files/small_Aspergillus_tree.tre",
]
with patch.object(sys, "argv", testargs):
Phykit()
assert mocked_print.mock_calls == [call(expected_result)]
@patch("builtins.print")
def test_total_tree_length_alias(self, mocked_print):
expected_result = 277.2772
testargs = [
"phykit",
"tree_len",
f"{here.parent.parent.parent}/sample_files/tree_simple.tre",
]
with patch.object(sys, "argv", testargs):
Phykit()
assert mocked_print.mock_calls == [call(expected_result)]
@patch("builtins.print")
def test_total_tree_length_incorrect_file_path(self, mocked_print):
testargs = [
"phykit",
"total_tree_length",
f"{here.parent.parent.parent}/sample_files/tree_simple.tr",
]
with pytest.raises(SystemExit) as pytest_wrapped_e:
Phykit()
assert pytest_wrapped_e.type == SystemExit
assert pytest_wrapped_e.value.code == 2
| true | true |
f73749d457ba15d6489c64897e21070a672f8706 | 8,578 | py | Python | ai/oldai/base.py | JackieChiles/Cinch | f8b9e8c073f555ff827fa7887153e82b263a8aab | [
"MIT"
] | null | null | null | ai/oldai/base.py | JackieChiles/Cinch | f8b9e8c073f555ff827fa7887153e82b263a8aab | [
"MIT"
] | 5 | 2015-01-10T02:18:54.000Z | 2015-12-07T02:07:28.000Z | ai/oldai/base.py | JackieChiles/Cinch | f8b9e8c073f555ff827fa7887153e82b263a8aab | [
"MIT"
] | null | null | null | #!/usr/bin/python3
"""Base functionality for Cinch AI agents.
Method reference:
class AIBase
--send_data(data)
--handle_daemon_command(raw_msg)
--run()
--start()
--stop()
--bid(bid)
--chat(chat_msg)
--play(card_val)
--is_legal_bid(bid)
--is_legal_play(card)
--act()
TODO:
- have 'thinking' timeout value (halt thinking after certain interval)
--- mandate a timeout; can be a Timer call to change a loop value & publish
--- can specify timeout in this file? want all agent models equal in this
--- To devs: don't publish an AI that takes forever to do anything!
- if AI are allowed to use DB, impl methods here
"""
from multiprocessing import Pipe
import logging
log = logging.getLogger(__name__)
from core.cards import RANKS_SHORT, SUITS_SHORT, NUM_RANKS
# Settings
SERVER_HOST = "localhost"
SERVER_PORT = 2424
SERVER_URL = "{0}:{1}".format(SERVER_HOST, SERVER_PORT)
# Not currently used
THINKING_TIMEOUT = 10.0 # Secs to allow AI to think before demanding response
# Constants
EVENT_NEW_GAME = 0 # Integer constants for error handling
EVENT_JOIN_GAME = 1 #
EVENT_BID = 2 #
EVENT_PLAY = 3 #
# Hardcoded values to increase performance in decoding cards
NUM_TEAMS = 2
NUM_PLAYERS = 4
class AIBase:
"""Common features of all Cinch AI Agents."""
####################
# Agent Management -- Creation/maintenance of Agent; metagame functions
####################
def __init__(self, pipe, identity):
# Instance variables
self.uid = 0
self.manager = None
self.running = False
self.pipe = pipe # type = multiprocessing.Pipe
self.queue = None # will be a multiprocessing.Queue for sending to Mgr
self.name = identity['name']
self.label = self.name
# Game
self.in_game = False
self.pNum = -1
self.hand = [] # list of Card objects will get cloned from the Game
# Game state
self.gs = None
log.info("{0}AI loaded".format(self.name))
def __del__(self):
"""Safely shutdown AI Agent subprocess."""
# Let manager know agent is shutting down
pass #TODO
self.running = False
#TODO - log final state?
####################
# Message interface
####################
def send_data(self, data):
"""Send information to game via AI Manager queue
data (dict): data to send
"""
self.queue.put(data)
def handle_command(self, command):
"""Process command from input pipe.
command (dict): data sent with following values:
- {'cmd': command number (int)} - command indicator from the AI manager
- {'gs': (message, game)} - a message and a new game state
command numbers:
-1: shutdown
1: enter game, includes uid and pNum
"""
if 'cmd' in command:
op = command['cmd'][0]
if op == -1: # Shutdown
log.info("AI Agent {0} received shutdown command".format(
self.label))
self.stop()
elif op == 1: # New game
self.uid = command['cmd'][1]
self.pNum = command['cmd'][2]
self.label = "{0}/{1}".format(self.name, self.pNum)
elif 'gs' in command:
self.msg = command['gs'][0] # May contain chats
self.game = command['gs'][1] # TODO: Protect hands of other players
self.gs = self.game.gs
self.hand = self.game.players[self.pNum].hand # Refresh hand
self.act()
else:
log.warn("Unknown daemon command: {0}".format(str(command)))
def run(self):
# Read from pipe -- does block ai thread, but start() is final action
readline = self.pipe.recv # Function references for speed++
handle_command = self.handle_command
while self.running:
try:
data = readline()
handle_command(data)
except KeyboardInterrupt:
self.stop()
except Exception as e:
self.stop()
log.exception("Killing daemon loop...")
return
def start(self, queue):
log.debug("AI Agent {0} listening to Manager".format(self.label))
self.queue = queue
self.running = True
self.run()
def stop(self):
log.debug("AI Agent {0} stopped listening to Manager"
"".format(self.label))
self.running = False
####################
# Message Transmitters -- Convenience methods for sending messages
####################
def bid(self, bid):
"""Send bid to server. Handle error response.
bid (int): bid value (0-5), assumed to have been legality checked
"""
res = self.send_data({'uid':self.uid, 'bid':bid}) # res=None is OK
# Bid may be illegal anyway
if res:
log.error("Agent made illegal bid of {0}; adjusting bid to PASS."
"".format(bid))
self.bid(0) # Pass
else:
if bid > 0:
log.info("{0} bids {1}".format(self.label, bid))
else:
log.info("{0} passes".format(self.label))
def chat(self, chat_msg):
"""Send chat-style message to Comet server (for debugging & hijinks).
chat_msg (str): message to send via chat channels
"""
self.send_data({'uid':self.uid, 'msg':chat_msg})
def play(self, card):
"""Send proposed play to server. Handle error response.
card (Card): card object, assumed to have been legality checked
"""
card_val = card.code
res = self.send_data({'uid':self.uid, 'card':card_val}) # res=None is OK
# Play may be deemed illegal by server anyway
if res:
# No fallback option defined for an illegal play
log.error("{1} made illegal play with card_val {0}"
"".format(str(card), self.label))
else:
log.info("{0} plays {1}".format(self.label,
str(card)))
self.hand.remove(card) # Complete play
####################
# Game Rules -- Adapted versions of core game functionality
####################
def is_legal_bid(self, bid):
"""Check if proposed bid is legal.
bid (int): bid value (0=PASS, 5=CINCH)
"""
if bid == 0:
return True # Always legal to pass
elif bid < 0 or bid > 5:
return False # Bid out of bounds
elif bid > self.gs.high_bid:
return True
elif bid == 5 & self.pNum == self.gs.dealer:
return True
else:
return False
def is_legal_play(self, card):
"""Check if proposed play is legal.
card (Card): proposed play
"""
if len(self.gs.cards_in_play) == 0:
return True # No restriction on what can be led
else:
if card.suit == self.gs.trump:
return True # Trump is always OK
else:
led = self.gs.cards_in_play[0].suit
if card.suit == led:
return True # Followed suit
else:
for c in self.hand:
if led == c.suit:
return False # Could've followed suit but didn't
return True # Throwing off
# get_legal_plays() and get_winning_card() will be reimplemented when HAL is repaired.
####################
# Intelligence -- Implement in subclasses
####################
def act(self):
"""Initiate action.
Called after processing each message block from Comet server.
Typical implementation is to check if AI is active player, and if so,
trigger the appropriate action (bid or play) and related analysis.
This is called regardless of who is active player. This allows, for
example, the AI to do preliminary play analysis before its own turn.
Subclasses are responsible for performing any needed checks.
Also, the current game mode should be considered.
"""
raise NotImplementedError("act() needs to be implemented in subclass.")
| 29.993007 | 90 | 0.547214 |
from multiprocessing import Pipe
import logging
log = logging.getLogger(__name__)
from core.cards import RANKS_SHORT, SUITS_SHORT, NUM_RANKS
SERVER_HOST = "localhost"
SERVER_PORT = 2424
SERVER_URL = "{0}:{1}".format(SERVER_HOST, SERVER_PORT)
THINKING_TIMEOUT = 10.0
EVENT_NEW_GAME = 0
EVENT_JOIN_GAME = 1
EVENT_BID = 2
EVENT_PLAY = 3
NUM_TEAMS = 2
NUM_PLAYERS = 4
class AIBase:
self.gs = None
log.info("{0}AI loaded".format(self.name))
def __del__(self):
pass
self.running = False
lif op == 1:
self.uid = command['cmd'][1]
self.pNum = command['cmd'][2]
self.label = "{0}/{1}".format(self.name, self.pNum)
elif 'gs' in command:
self.msg = command['gs'][0]
self.game = command['gs'][1]
self.gs = self.game.gs
self.hand = self.game.players[self.pNum].hand
self.act()
else:
log.warn("Unknown daemon command: {0}".format(str(command)))
def run(self):
readline = self.pipe.recv
handle_command = self.handle_command
while self.running:
try:
data = readline()
handle_command(data)
except KeyboardInterrupt:
self.stop()
except Exception as e:
self.stop()
log.exception("Killing daemon loop...")
return
def start(self, queue):
log.debug("AI Agent {0} listening to Manager".format(self.label))
self.queue = queue
self.running = True
self.run()
def stop(self):
log.debug("AI Agent {0} stopped listening to Manager"
"".format(self.label))
self.running = False
lse:
log.info("{0} passes".format(self.label))
def chat(self, chat_msg):
self.send_data({'uid':self.uid, 'msg':chat_msg})
def play(self, card):
card_val = card.code
res = self.send_data({'uid':self.uid, 'card':card_val})
if res:
log.error("{1} made illegal play with card_val {0}"
"".format(str(card), self.label))
else:
log.info("{0} plays {1}".format(self.label,
str(card)))
self.hand.remove(card)
f len(self.gs.cards_in_play) == 0:
return True
else:
if card.suit == self.gs.trump:
return True
else:
led = self.gs.cards_in_play[0].suit
if card.suit == led:
return True
else:
for c in self.hand:
if led == c.suit:
return False
return True
| true | true |
f7374b0c917bfd4ed66b10c34f9763a55f8aa4fc | 2,840 | py | Python | test/functional/interface_rpc.py | rojarsmith/eleccoin | 2f1bb02eb75f244fe9f1b46a91ee72b756d85912 | [
"MIT"
] | null | null | null | test/functional/interface_rpc.py | rojarsmith/eleccoin | 2f1bb02eb75f244fe9f1b46a91ee72b756d85912 | [
"MIT"
] | null | null | null | test/functional/interface_rpc.py | rojarsmith/eleccoin | 2f1bb02eb75f244fe9f1b46a91ee72b756d85912 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) 2020-2021 The Eleccoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Tests some generic aspects of the RPC interface."""
import os
from test_framework.authproxy import JSONRPCException
from test_framework.test_framework import EleccoinTestFramework
from test_framework.util import assert_equal, assert_greater_than_or_equal
def expect_http_status(expected_http_status, expected_rpc_code,
fcn, *args):
try:
fcn(*args)
raise AssertionError("Expected RPC error %d, got none" % expected_rpc_code)
except JSONRPCException as exc:
assert_equal(exc.error["code"], expected_rpc_code)
assert_equal(exc.http_status, expected_http_status)
class RPCInterfaceTest(EleccoinTestFramework):
def set_test_params(self):
self.num_nodes = 1
self.setup_clean_chain = True
self.supports_cli = False
def test_getrpcinfo(self):
self.log.info("Testing getrpcinfo...")
info = self.nodes[0].getrpcinfo()
assert_equal(len(info['active_commands']), 1)
command = info['active_commands'][0]
assert_equal(command['method'], 'getrpcinfo')
assert_greater_than_or_equal(command['duration'], 0)
assert_equal(info['logpath'], os.path.join(self.nodes[0].datadir, self.chain, 'debug.log'))
def test_batch_request(self):
self.log.info("Testing basic JSON-RPC batch request...")
results = self.nodes[0].batch([
# A basic request that will work fine.
{"method": "getblockcount", "id": 1},
# Request that will fail. The whole batch request should still
# work fine.
{"method": "invalidmethod", "id": 2},
# Another call that should succeed.
{"method": "getblockhash", "id": 3, "params": [0]},
])
result_by_id = {}
for res in results:
result_by_id[res["id"]] = res
assert_equal(result_by_id[1]['error'], None)
assert_equal(result_by_id[1]['result'], 0)
assert_equal(result_by_id[2]['error']['code'], -32601)
assert_equal(result_by_id[2]['result'], None)
assert_equal(result_by_id[3]['error'], None)
assert result_by_id[3]['result'] is not None
def test_http_status_codes(self):
self.log.info("Testing HTTP status codes for JSON-RPC requests...")
expect_http_status(404, -32601, self.nodes[0].invalidmethod)
expect_http_status(500, -8, self.nodes[0].getblockhash, 42)
def run_test(self):
self.test_getrpcinfo()
self.test_batch_request()
self.test_http_status_codes()
if __name__ == '__main__':
RPCInterfaceTest().main()
| 36.410256 | 99 | 0.659507 |
import os
from test_framework.authproxy import JSONRPCException
from test_framework.test_framework import EleccoinTestFramework
from test_framework.util import assert_equal, assert_greater_than_or_equal
def expect_http_status(expected_http_status, expected_rpc_code,
fcn, *args):
try:
fcn(*args)
raise AssertionError("Expected RPC error %d, got none" % expected_rpc_code)
except JSONRPCException as exc:
assert_equal(exc.error["code"], expected_rpc_code)
assert_equal(exc.http_status, expected_http_status)
class RPCInterfaceTest(EleccoinTestFramework):
def set_test_params(self):
self.num_nodes = 1
self.setup_clean_chain = True
self.supports_cli = False
def test_getrpcinfo(self):
self.log.info("Testing getrpcinfo...")
info = self.nodes[0].getrpcinfo()
assert_equal(len(info['active_commands']), 1)
command = info['active_commands'][0]
assert_equal(command['method'], 'getrpcinfo')
assert_greater_than_or_equal(command['duration'], 0)
assert_equal(info['logpath'], os.path.join(self.nodes[0].datadir, self.chain, 'debug.log'))
def test_batch_request(self):
self.log.info("Testing basic JSON-RPC batch request...")
results = self.nodes[0].batch([
{"method": "getblockcount", "id": 1},
{"method": "invalidmethod", "id": 2},
{"method": "getblockhash", "id": 3, "params": [0]},
])
result_by_id = {}
for res in results:
result_by_id[res["id"]] = res
assert_equal(result_by_id[1]['error'], None)
assert_equal(result_by_id[1]['result'], 0)
assert_equal(result_by_id[2]['error']['code'], -32601)
assert_equal(result_by_id[2]['result'], None)
assert_equal(result_by_id[3]['error'], None)
assert result_by_id[3]['result'] is not None
def test_http_status_codes(self):
self.log.info("Testing HTTP status codes for JSON-RPC requests...")
expect_http_status(404, -32601, self.nodes[0].invalidmethod)
expect_http_status(500, -8, self.nodes[0].getblockhash, 42)
def run_test(self):
self.test_getrpcinfo()
self.test_batch_request()
self.test_http_status_codes()
if __name__ == '__main__':
RPCInterfaceTest().main()
| true | true |
f7374ec9a59d2f83495606d1f9acaf052e81adbc | 1,759 | py | Python | src/bripy/bllb/q.py | brl0/bripy | 3754b5db651180d58645bd7d32c3d5d12528ebde | [
"MIT"
] | null | null | null | src/bripy/bllb/q.py | brl0/bripy | 3754b5db651180d58645bd7d32c3d5d12528ebde | [
"MIT"
] | null | null | null | src/bripy/bllb/q.py | brl0/bripy | 3754b5db651180d58645bd7d32c3d5d12528ebde | [
"MIT"
] | null | null | null | from queue import Queue
from threading import Thread
from time import sleep
from bripy.bllb.logging import logger, DBG
def unloadq(q, stop, limit=2000, rest=.1, check=100):
i = limit
loops = 0
results = []
while True and ((i and not stop()) or q.qsize()):
loops += 1
if loops % check == 0:
DBG(i, loops, len(results))
if q.qsize():
x = q.get()
DBG(x)
results.append(x)
i = min(i + 1, limit)
else:
i -= 1
if i % check == 0:
DBG(i)
sleep(rest)
return results
def multiplex(n, q, **kwargs):
""" Convert one queue into several equivalent Queues
>>> q1, q2, q3 = multiplex(3, in_q)
"""
out_queues = [Queue(**kwargs) for i in range(n)]
def f():
while True:
x = q.get()
for out_q in out_queues:
out_q.put(x)
t = Thread(target=f)
t.daemon = True
t.start()
return out_queues
def push(in_q, out_q):
while True:
x = in_q.get()
out_q.put(x)
def merge(*in_qs, **kwargs):
""" Merge multiple queues together
>>> out_q = merge(q1, q2, q3)
"""
out_q = Queue(**kwargs)
threads = [Thread(target=push, args=(q, out_q)) for q in in_qs]
for t in threads:
t.daemon = True
t.start()
return out_q
def iterq(q):
while q.qsize():
yield q.get()
def get_q(q):
results = []
while not q.empty() or q.qsize():
item = q.get()
if item == 'STOP':
DBG('STOP get_q')
q.task_done()
break
DBG(item)
if item:
results.append(item)
q.task_done()
return results
| 20.940476 | 67 | 0.503695 | from queue import Queue
from threading import Thread
from time import sleep
from bripy.bllb.logging import logger, DBG
def unloadq(q, stop, limit=2000, rest=.1, check=100):
i = limit
loops = 0
results = []
while True and ((i and not stop()) or q.qsize()):
loops += 1
if loops % check == 0:
DBG(i, loops, len(results))
if q.qsize():
x = q.get()
DBG(x)
results.append(x)
i = min(i + 1, limit)
else:
i -= 1
if i % check == 0:
DBG(i)
sleep(rest)
return results
def multiplex(n, q, **kwargs):
out_queues = [Queue(**kwargs) for i in range(n)]
def f():
while True:
x = q.get()
for out_q in out_queues:
out_q.put(x)
t = Thread(target=f)
t.daemon = True
t.start()
return out_queues
def push(in_q, out_q):
while True:
x = in_q.get()
out_q.put(x)
def merge(*in_qs, **kwargs):
out_q = Queue(**kwargs)
threads = [Thread(target=push, args=(q, out_q)) for q in in_qs]
for t in threads:
t.daemon = True
t.start()
return out_q
def iterq(q):
while q.qsize():
yield q.get()
def get_q(q):
results = []
while not q.empty() or q.qsize():
item = q.get()
if item == 'STOP':
DBG('STOP get_q')
q.task_done()
break
DBG(item)
if item:
results.append(item)
q.task_done()
return results
| true | true |
f7375389767f10b8cf95526e94fdc87ce6010f53 | 10,482 | py | Python | flexget/plugins/input/next_series_seasons.py | viggy96/Flexget | 464becced8f0e0e6c94b8a2f865c50f0305d2b81 | [
"MIT"
] | 1 | 2021-03-16T18:41:47.000Z | 2021-03-16T18:41:47.000Z | flexget/plugins/input/next_series_seasons.py | viggy96/Flexget | 464becced8f0e0e6c94b8a2f865c50f0305d2b81 | [
"MIT"
] | null | null | null | flexget/plugins/input/next_series_seasons.py | viggy96/Flexget | 464becced8f0e0e6c94b8a2f865c50f0305d2b81 | [
"MIT"
] | null | null | null | from __future__ import unicode_literals, division, absolute_import
from builtins import * # noqa pylint: disable=unused-import, redefined-builtin
import logging
import re
from flexget import plugin
from flexget.event import event
from flexget.entry import Entry
from flexget.manager import Session
from flexget.plugins.filter.series import SeriesTask, Series, get_latest_release, get_latest_season_pack_release
from flexget.plugins.filter.series import get_latest_episode_release
plugin_name = 'next_series_seasons'
log = logging.getLogger(plugin_name)
MAX_SEASON_DIFF_WITHOUT_BEGIN = 15
MAX_SEASON_DIFF_WITH_BEGIN = 30
class NextSeriesSeasons(object):
"""
Emit next season number from all series configured in this task.
Supports only 'ep' mode series.
"""
schema = {
'oneOf': [
{'type': 'boolean'},
{
'type': 'object',
'properties': {
'from_start': {'type': 'boolean', 'default': False},
'backfill': {'type': 'boolean', 'default': False},
'threshold': {'type': 'integer', 'minimum': 0}
},
'additionalProperties': False
}
]
}
def __init__(self):
self.rerun_entries = []
def season_identifiers(self, season):
return ['S%02d' % season]
def search_entry(self, series, season, task, rerun=True):
# Extract the alternate names for the series
alts = [alt.alt_name for alt in series.alternate_names]
# Also consider series name without parenthetical (year, country) an alternate name
paren_match = re.match(r'(.+?)( \(.+\))?$', series.name)
if paren_match.group(2):
alts.append(paren_match.group(1))
search_strings = ['%s %s' % (series.name, id) for id in self.season_identifiers(season)]
series_id = 'S%02d' % season
for alt in alts:
search_strings.extend(['%s %s' % (alt, id) for id in self.season_identifiers(season)])
entry = Entry(title=search_strings[0], url='',
search_strings=search_strings,
series_name=series.name,
series_alternate_names=alts, # Not sure if this field is useful down the road.
series_season=season,
season_pack_lookup=True,
series_id=series_id,
series_id_type=series.identified_by)
if rerun:
entry.on_complete(self.on_search_complete, task=task, identified_by=series.identified_by)
return entry
def on_task_input(self, task, config):
if not config:
return
if isinstance(config, bool):
config = {}
if task.is_rerun:
# Just return calculated next eps on reruns
entries = self.rerun_entries
self.rerun_entries = []
return entries
else:
self.rerun_entries = []
threshold = config.get('threshold')
entries = []
impossible = {}
with Session() as session:
for seriestask in session.query(SeriesTask).filter(SeriesTask.name == task.name).all():
series = seriestask.series
log.trace('evaluating %s', series.name)
if not series:
# TODO: How can this happen?
log.debug('Found SeriesTask item without series specified. Cleaning up.')
session.delete(seriestask)
continue
if series.identified_by not in ['ep']:
log.trace('unsupported identified_by scheme')
reason = series.identified_by or 'auto'
impossible.setdefault(reason, []).append(series.name)
continue
low_season = 0
# Don't look for seasons older than begin ep
if series.begin and series.begin.season and series.begin.season > 1:
low_season = max(series.begin.season - 1, 0)
new_season = None
check_downloaded = not config.get('backfill')
latest_season = get_latest_release(series, downloaded=check_downloaded)
if latest_season:
if latest_season.season <= low_season:
latest_season = new_season = low_season + 1
elif latest_season.season in series.completed_seasons:
latest_season = new_season = latest_season.season + 1
else:
latest_season = latest_season.season
else:
latest_season = low_season + 1
if (latest_season - low_season > MAX_SEASON_DIFF_WITHOUT_BEGIN and not series.begin) or (series.begin and
latest_season - series.begin.season > MAX_SEASON_DIFF_WITH_BEGIN):
if series.begin:
log.error('Series `%s` has a begin episode set (`%s`), but the season currently being processed '
'(%s) is %s seasons later than it. To prevent emitting incorrect seasons, this '
'series will not emit unless the begin episode is adjusted to a season that is less '
'than %s seasons from season %s.', series.name, series.begin.identifier, latest_season,
(latest_season - series.begin.season), MAX_SEASON_DIFF_WITH_BEGIN, latest_season)
else:
log.error('Series `%s` does not have a begin episode set and continuing this task would result ' 'in more than %s seasons being emitted. To prevent emitting incorrect seasons, this '
'series will not emit unless the begin episode is set in your series config or by '
'using the CLI subcommand `series begin "%s" <SxxExx>`.', series.name,
MAX_SEASON_DIFF_WITHOUT_BEGIN, series.name)
continue
for season in range(latest_season, low_season, -1):
if season in series.completed_seasons:
log.debug('season %s is marked as completed, skipping', season)
continue
if threshold is not None and series.episodes_for_season(season) > threshold:
log.debug('season %s has met threshold of threshold of %s, skipping', season, threshold)
continue
log.trace('Evaluating season %s for series `%s`', season, series.name)
latest = get_latest_release(series, season=season, downloaded=check_downloaded)
if series.begin and season == series.begin.season and (not latest or latest < series.begin):
# In case series.begin season is already completed, look in next available season
lookup_season = series.begin.season
while lookup_season in series.completed_seasons:
lookup_season += 1
entries.append(self.search_entry(series, lookup_season, task))
elif latest:
entries.append(self.search_entry(series, latest.season, task))
# First iteration of a new season with no show begin and show has downloads
elif new_season and season == new_season:
entries.append(self.search_entry(series, season, task))
else:
if config.get('from_start') or config.get('backfill'):
entries.append(self.search_entry(series, season, task))
else:
log.verbose('Series `%s` has no history. Set the begin option in your config, '
'or use the CLI subcommand `series begin "%s" <SxxExx>` '
'to set the first episode to emit', series.name, series.name)
break
# Skip older seasons if we are not in backfill mode
if not config.get('backfill'):
log.debug('backfill is not enabled; skipping older seasons')
break
for reason, series in impossible.items():
log.verbose('Series `%s` with identified_by value `%s` are not supported. ',
', '.join(sorted(series)), reason)
return entries
def on_search_complete(self, entry, task=None, identified_by=None, **kwargs):
"""Decides whether we should look for next season based on whether we found/accepted any seasons."""
with Session() as session:
series = session.query(Series).filter(Series.name == entry['series_name']).first()
latest = get_latest_season_pack_release(series)
latest_ep = get_latest_episode_release(series, season=entry['series_season'])
if entry.accepted:
if not latest and latest_ep:
log.debug('season lookup produced an episode result; assuming no season match, no need to rerun')
return
else:
log.debug('%s %s was accepted, rerunning to look for next season.', entry['series_name'],
entry['series_id'])
if not any(e.get('series_season') == latest.season + 1 for e in self.rerun_entries):
self.rerun_entries.append(self.search_entry(series, latest.season + 1, task))
# Increase rerun limit by one if we have matches, this way
# we keep searching as long as matches are found!
# TODO: this should ideally be in discover so it would be more generic
task.max_reruns += 1
task.rerun(plugin=plugin_name, reason='Look for next season')
elif latest and not latest.completed:
# There are known releases of this season, but none were accepted
return
@event('plugin.register')
def register_plugin():
plugin.register(NextSeriesSeasons, plugin_name, api_ver=2)
| 51.131707 | 240 | 0.565159 | from __future__ import unicode_literals, division, absolute_import
from builtins import *
import logging
import re
from flexget import plugin
from flexget.event import event
from flexget.entry import Entry
from flexget.manager import Session
from flexget.plugins.filter.series import SeriesTask, Series, get_latest_release, get_latest_season_pack_release
from flexget.plugins.filter.series import get_latest_episode_release
plugin_name = 'next_series_seasons'
log = logging.getLogger(plugin_name)
MAX_SEASON_DIFF_WITHOUT_BEGIN = 15
MAX_SEASON_DIFF_WITH_BEGIN = 30
class NextSeriesSeasons(object):
schema = {
'oneOf': [
{'type': 'boolean'},
{
'type': 'object',
'properties': {
'from_start': {'type': 'boolean', 'default': False},
'backfill': {'type': 'boolean', 'default': False},
'threshold': {'type': 'integer', 'minimum': 0}
},
'additionalProperties': False
}
]
}
def __init__(self):
self.rerun_entries = []
def season_identifiers(self, season):
return ['S%02d' % season]
def search_entry(self, series, season, task, rerun=True):
alts = [alt.alt_name for alt in series.alternate_names]
paren_match = re.match(r'(.+?)( \(.+\))?$', series.name)
if paren_match.group(2):
alts.append(paren_match.group(1))
search_strings = ['%s %s' % (series.name, id) for id in self.season_identifiers(season)]
series_id = 'S%02d' % season
for alt in alts:
search_strings.extend(['%s %s' % (alt, id) for id in self.season_identifiers(season)])
entry = Entry(title=search_strings[0], url='',
search_strings=search_strings,
series_name=series.name,
series_alternate_names=alts,
series_season=season,
season_pack_lookup=True,
series_id=series_id,
series_id_type=series.identified_by)
if rerun:
entry.on_complete(self.on_search_complete, task=task, identified_by=series.identified_by)
return entry
def on_task_input(self, task, config):
if not config:
return
if isinstance(config, bool):
config = {}
if task.is_rerun:
entries = self.rerun_entries
self.rerun_entries = []
return entries
else:
self.rerun_entries = []
threshold = config.get('threshold')
entries = []
impossible = {}
with Session() as session:
for seriestask in session.query(SeriesTask).filter(SeriesTask.name == task.name).all():
series = seriestask.series
log.trace('evaluating %s', series.name)
if not series:
log.debug('Found SeriesTask item without series specified. Cleaning up.')
session.delete(seriestask)
continue
if series.identified_by not in ['ep']:
log.trace('unsupported identified_by scheme')
reason = series.identified_by or 'auto'
impossible.setdefault(reason, []).append(series.name)
continue
low_season = 0
if series.begin and series.begin.season and series.begin.season > 1:
low_season = max(series.begin.season - 1, 0)
new_season = None
check_downloaded = not config.get('backfill')
latest_season = get_latest_release(series, downloaded=check_downloaded)
if latest_season:
if latest_season.season <= low_season:
latest_season = new_season = low_season + 1
elif latest_season.season in series.completed_seasons:
latest_season = new_season = latest_season.season + 1
else:
latest_season = latest_season.season
else:
latest_season = low_season + 1
if (latest_season - low_season > MAX_SEASON_DIFF_WITHOUT_BEGIN and not series.begin) or (series.begin and
latest_season - series.begin.season > MAX_SEASON_DIFF_WITH_BEGIN):
if series.begin:
log.error('Series `%s` has a begin episode set (`%s`), but the season currently being processed '
'(%s) is %s seasons later than it. To prevent emitting incorrect seasons, this '
'series will not emit unless the begin episode is adjusted to a season that is less '
'than %s seasons from season %s.', series.name, series.begin.identifier, latest_season,
(latest_season - series.begin.season), MAX_SEASON_DIFF_WITH_BEGIN, latest_season)
else:
log.error('Series `%s` does not have a begin episode set and continuing this task would result ' 'in more than %s seasons being emitted. To prevent emitting incorrect seasons, this '
'series will not emit unless the begin episode is set in your series config or by '
'using the CLI subcommand `series begin "%s" <SxxExx>`.', series.name,
MAX_SEASON_DIFF_WITHOUT_BEGIN, series.name)
continue
for season in range(latest_season, low_season, -1):
if season in series.completed_seasons:
log.debug('season %s is marked as completed, skipping', season)
continue
if threshold is not None and series.episodes_for_season(season) > threshold:
log.debug('season %s has met threshold of threshold of %s, skipping', season, threshold)
continue
log.trace('Evaluating season %s for series `%s`', season, series.name)
latest = get_latest_release(series, season=season, downloaded=check_downloaded)
if series.begin and season == series.begin.season and (not latest or latest < series.begin):
# In case series.begin season is already completed, look in next available season
lookup_season = series.begin.season
while lookup_season in series.completed_seasons:
lookup_season += 1
entries.append(self.search_entry(series, lookup_season, task))
elif latest:
entries.append(self.search_entry(series, latest.season, task))
# First iteration of a new season with no show begin and show has downloads
elif new_season and season == new_season:
entries.append(self.search_entry(series, season, task))
else:
if config.get('from_start') or config.get('backfill'):
entries.append(self.search_entry(series, season, task))
else:
log.verbose('Series `%s` has no history. Set the begin option in your config, '
'or use the CLI subcommand `series begin "%s" <SxxExx>` '
'to set the first episode to emit', series.name, series.name)
break
# Skip older seasons if we are not in backfill mode
if not config.get('backfill'):
log.debug('backfill is not enabled; skipping older seasons')
break
for reason, series in impossible.items():
log.verbose('Series `%s` with identified_by value `%s` are not supported. ',
', '.join(sorted(series)), reason)
return entries
def on_search_complete(self, entry, task=None, identified_by=None, **kwargs):
with Session() as session:
series = session.query(Series).filter(Series.name == entry['series_name']).first()
latest = get_latest_season_pack_release(series)
latest_ep = get_latest_episode_release(series, season=entry['series_season'])
if entry.accepted:
if not latest and latest_ep:
log.debug('season lookup produced an episode result; assuming no season match, no need to rerun')
return
else:
log.debug('%s %s was accepted, rerunning to look for next season.', entry['series_name'],
entry['series_id'])
if not any(e.get('series_season') == latest.season + 1 for e in self.rerun_entries):
self.rerun_entries.append(self.search_entry(series, latest.season + 1, task))
# Increase rerun limit by one if we have matches, this way
# we keep searching as long as matches are found!
# TODO: this should ideally be in discover so it would be more generic
task.max_reruns += 1
task.rerun(plugin=plugin_name, reason='Look for next season')
elif latest and not latest.completed:
# There are known releases of this season, but none were accepted
return
@event('plugin.register')
def register_plugin():
plugin.register(NextSeriesSeasons, plugin_name, api_ver=2)
| true | true |
f73753ddcd8c27c3d3d95ab7faea324198795791 | 1,195 | py | Python | api/generated/python/azure-iiot-opc-history/models/replace_events_details_api_model.py | benjguin/Industrial-IoT | 1bc68a62383f0849bbb18f373c9566d8d30c1d68 | [
"MIT"
] | 2 | 2021-08-06T19:40:53.000Z | 2021-08-07T05:21:24.000Z | api/generated/python/azure-iiot-opc-history/models/replace_events_details_api_model.py | benjguin/Industrial-IoT | 1bc68a62383f0849bbb18f373c9566d8d30c1d68 | [
"MIT"
] | null | null | null | api/generated/python/azure-iiot-opc-history/models/replace_events_details_api_model.py | benjguin/Industrial-IoT | 1bc68a62383f0849bbb18f373c9566d8d30c1d68 | [
"MIT"
] | null | null | null | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator 2.3.33.0
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class ReplaceEventsDetailsApiModel(Model):
"""Replace historic events.
:param filter: The filter to use to select the events
:type filter: object
:param events: The events to replace
:type events: list[~azure-iiot-opc-history.models.HistoricEventApiModel]
"""
_validation = {
'events': {'required': True},
}
_attribute_map = {
'filter': {'key': 'filter', 'type': 'object'},
'events': {'key': 'events', 'type': '[HistoricEventApiModel]'},
}
def __init__(self, events, filter=None):
super(ReplaceEventsDetailsApiModel, self).__init__()
self.filter = filter
self.events = events
| 32.297297 | 76 | 0.591632 |
from msrest.serialization import Model
class ReplaceEventsDetailsApiModel(Model):
_validation = {
'events': {'required': True},
}
_attribute_map = {
'filter': {'key': 'filter', 'type': 'object'},
'events': {'key': 'events', 'type': '[HistoricEventApiModel]'},
}
def __init__(self, events, filter=None):
super(ReplaceEventsDetailsApiModel, self).__init__()
self.filter = filter
self.events = events
| true | true |
f737549106aae46c5f3c52680763d4c20159508b | 3,717 | py | Python | openstack-congress-9.0.0/congress/tests/fake_datasource.py | scottwedge/OpenStack-Stein | 7077d1f602031dace92916f14e36b124f474de15 | [
"Apache-2.0"
] | 50 | 2015-04-21T14:12:01.000Z | 2020-06-01T06:23:13.000Z | congress/tests/fake_datasource.py | openstack-archive/congress | 85243abf63dfc7c086e28e9bdb3fb0b7c9d2ad94 | [
"Apache-2.0"
] | 5 | 2019-08-14T06:46:03.000Z | 2021-12-13T20:01:25.000Z | congress/tests/fake_datasource.py | openstack-archive/congress | 85243abf63dfc7c086e28e9bdb3fb0b7c9d2ad94 | [
"Apache-2.0"
] | 25 | 2015-05-22T04:02:33.000Z | 2020-01-14T12:15:12.000Z | # Copyright (c) 2015 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
import mock
from oslo_log import log as logging
from congress.datasources import datasource_driver
from congress.datasources import datasource_utils
from congress.datasources.json_ingester import exec_api
from congress.datasources.json_ingester import json_ingester
LOG = logging.getLogger(__name__)
class FakeDataSource(datasource_driver.PollingDataSourceDriver,
datasource_driver.PushedDataSourceDriver,
datasource_driver.ExecutionDriver):
value_trans = {'translation-type': 'VALUE'}
fake_translator = {
'translation-type': 'HDICT',
'table-name': 'fake_table',
'selector-type': 'DICT_SELECTOR',
'field-translators':
({'fieldname': 'id', 'translator': value_trans},
{'fieldname': 'name', 'translator': value_trans})}
TRANSLATORS = [fake_translator]
def __init__(self, name='', args=None):
super(FakeDataSource, self).__init__(name, args)
datasource_driver.ExecutionDriver.__init__(self)
self.add_executable_method('fake_act',
[{'name': 'server_id',
'description': 'server to act'}],
'fake action')
self.update_number = 0
self.initialize_update_method()
self.exec_history = []
self._init_end_start_poll()
@staticmethod
def get_datasource_info():
result = {}
result['id'] = 'fake_datasource'
result['description'] = 'This is a fake driver used for testing'
result['config'] = datasource_utils.get_openstack_required_config()
result['secret'] = ['password']
return result
def initialize_update_method(self):
self.add_update_method(self.update_fake_table, self.fake_translator)
def update_fake_table(self):
LOG.info("fake:: update_from_datasource")
self.update_number += 1
def execute(self, action, action_args):
self.exec_history.append((action, action_args))
def _webhook_handler(self, payload):
self.webhook_payload = payload
class FakeJsonIngester(json_ingester.JsonIngester):
def __init__(self, name='fake_json', config=None):
if config is None:
config = {
"tables": {
"alarms": {
"webhook": {
"record_jsonpath": "$.payload",
"id_jsonpath": "$.id"
}
}
},
"name": name
}
super(FakeJsonIngester, self).__init__(
name, config, mock.Mock(spec_set=exec_api.ExecApiManager))
# override for unit testing
def _create_schema_and_tables(self):
pass
# override for unit testing
def json_ingester_webhook_handler(self, table_name, body):
self.webhook_table_name = table_name
self.webhook_payload = body
| 34.416667 | 78 | 0.633844 |
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
import mock
from oslo_log import log as logging
from congress.datasources import datasource_driver
from congress.datasources import datasource_utils
from congress.datasources.json_ingester import exec_api
from congress.datasources.json_ingester import json_ingester
LOG = logging.getLogger(__name__)
class FakeDataSource(datasource_driver.PollingDataSourceDriver,
datasource_driver.PushedDataSourceDriver,
datasource_driver.ExecutionDriver):
value_trans = {'translation-type': 'VALUE'}
fake_translator = {
'translation-type': 'HDICT',
'table-name': 'fake_table',
'selector-type': 'DICT_SELECTOR',
'field-translators':
({'fieldname': 'id', 'translator': value_trans},
{'fieldname': 'name', 'translator': value_trans})}
TRANSLATORS = [fake_translator]
def __init__(self, name='', args=None):
super(FakeDataSource, self).__init__(name, args)
datasource_driver.ExecutionDriver.__init__(self)
self.add_executable_method('fake_act',
[{'name': 'server_id',
'description': 'server to act'}],
'fake action')
self.update_number = 0
self.initialize_update_method()
self.exec_history = []
self._init_end_start_poll()
@staticmethod
def get_datasource_info():
result = {}
result['id'] = 'fake_datasource'
result['description'] = 'This is a fake driver used for testing'
result['config'] = datasource_utils.get_openstack_required_config()
result['secret'] = ['password']
return result
def initialize_update_method(self):
self.add_update_method(self.update_fake_table, self.fake_translator)
def update_fake_table(self):
LOG.info("fake:: update_from_datasource")
self.update_number += 1
def execute(self, action, action_args):
self.exec_history.append((action, action_args))
def _webhook_handler(self, payload):
self.webhook_payload = payload
class FakeJsonIngester(json_ingester.JsonIngester):
def __init__(self, name='fake_json', config=None):
if config is None:
config = {
"tables": {
"alarms": {
"webhook": {
"record_jsonpath": "$.payload",
"id_jsonpath": "$.id"
}
}
},
"name": name
}
super(FakeJsonIngester, self).__init__(
name, config, mock.Mock(spec_set=exec_api.ExecApiManager))
def _create_schema_and_tables(self):
pass
def json_ingester_webhook_handler(self, table_name, body):
self.webhook_table_name = table_name
self.webhook_payload = body
| true | true |
f737562ba8c10007ccc61b1746ce27337d0a99dc | 499 | py | Python | data/scripts/templates/object/tangible/ship/components/shield_generator/shared_shd_incom_rayshield_z7.py | obi-two/GameServer | 7d37024e2291a97d49522610cd8f1dbe5666afc2 | [
"MIT"
] | 20 | 2015-02-23T15:11:56.000Z | 2022-03-18T20:56:48.000Z | data/scripts/templates/object/tangible/ship/components/shield_generator/shared_shd_incom_rayshield_z7.py | apathyboy/swganh | 665128efe9154611dec4cb5efc61d246dd095984 | [
"MIT"
] | null | null | null | data/scripts/templates/object/tangible/ship/components/shield_generator/shared_shd_incom_rayshield_z7.py | apathyboy/swganh | 665128efe9154611dec4cb5efc61d246dd095984 | [
"MIT"
] | 20 | 2015-04-04T16:35:59.000Z | 2022-03-24T14:54:37.000Z | #### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Tangible()
result.template = "object/tangible/ship/components/shield_generator/shared_shd_incom_rayshield_z7.iff"
result.attribute_template_id = 8
result.stfName("space/space_item","shd_incom_rayshield_z7_n")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result | 29.352941 | 103 | 0.751503 | true | true | |
f73758045548673b3de3df3b8adcc5cdff011f1a | 324 | py | Python | models/utils/FileMgmt.py | GabrielAmare/Models | 7e2ec367faf360fd0e294a17e4e2b33cae71124a | [
"MIT"
] | null | null | null | models/utils/FileMgmt.py | GabrielAmare/Models | 7e2ec367faf360fd0e294a17e4e2b33cae71124a | [
"MIT"
] | null | null | null | models/utils/FileMgmt.py | GabrielAmare/Models | 7e2ec367faf360fd0e294a17e4e2b33cae71124a | [
"MIT"
] | null | null | null | import json
class FileMgmt:
@classmethod
def save_json(cls, fp: str, data):
with open(fp, mode='w', encoding='utf-8') as file:
json.dump(data, file)
@classmethod
def load_json(cls, fp: str):
with open(fp, mode='r', encoding='utf-8') as file:
return json.load(file)
| 23.142857 | 58 | 0.583333 | import json
class FileMgmt:
@classmethod
def save_json(cls, fp: str, data):
with open(fp, mode='w', encoding='utf-8') as file:
json.dump(data, file)
@classmethod
def load_json(cls, fp: str):
with open(fp, mode='r', encoding='utf-8') as file:
return json.load(file)
| true | true |
f737591b84805072b3e43ea08c16281438208dc1 | 709 | py | Python | authentication/migrations/0002_token.py | Mangeneh/akkaskhooneh-backend | 2a81e73fbe0d55d5821ba1670a997bd8851c4af6 | [
"MIT"
] | 7 | 2018-09-17T18:34:49.000Z | 2019-09-15T11:39:15.000Z | authentication/migrations/0002_token.py | Mangeneh/akkaskhooneh-backend | 2a81e73fbe0d55d5821ba1670a997bd8851c4af6 | [
"MIT"
] | 9 | 2019-10-21T17:12:21.000Z | 2022-03-11T23:28:14.000Z | authentication/migrations/0002_token.py | Mangeneh/akkaskhooneh-backend | 2a81e73fbe0d55d5821ba1670a997bd8851c4af6 | [
"MIT"
] | 1 | 2019-11-29T16:12:12.000Z | 2019-11-29T16:12:12.000Z | # Generated by Django 2.1 on 2018-09-08 08:56
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('authentication', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Token',
fields=[
('key', models.CharField(max_length=6, primary_key=True, serialize=False)),
('created_time', models.DateTimeField(auto_now_add=True)),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='token', to=settings.AUTH_USER_MODEL)),
],
),
]
| 29.541667 | 143 | 0.631876 |
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('authentication', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Token',
fields=[
('key', models.CharField(max_length=6, primary_key=True, serialize=False)),
('created_time', models.DateTimeField(auto_now_add=True)),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='token', to=settings.AUTH_USER_MODEL)),
],
),
]
| true | true |
f7375a746b79c9c9e49edccf57c7c0994c8f0a23 | 195 | py | Python | wechat-demo/main.py | l2m2/dirty-projects | e3bff1f829e20b78462e3e91ab094cfad2f1a692 | [
"MIT"
] | 1 | 2020-06-04T00:54:00.000Z | 2020-06-04T00:54:00.000Z | wechat-demo/main.py | l2m2/dirty-projects | e3bff1f829e20b78462e3e91ab094cfad2f1a692 | [
"MIT"
] | null | null | null | wechat-demo/main.py | l2m2/dirty-projects | e3bff1f829e20b78462e3e91ab094cfad2f1a692 | [
"MIT"
] | null | null | null | # -*- coding utf-8 -*-
# filename: main.py
import web
from handle import Handle
urls = (
'/wx', 'Handle',
)
if __name__ == '__main__':
app = web.application(urls, globals())
app.run()
| 13 | 40 | 0.610256 |
import web
from handle import Handle
urls = (
'/wx', 'Handle',
)
if __name__ == '__main__':
app = web.application(urls, globals())
app.run()
| true | true |
f7375c3466a45f3592ae08fd3f66147b9baac50b | 4,419 | py | Python | google/cloud/bigquery/opentelemetry_tracing.py | grooveygr/python-bigquery | 54852ae18ce9e33e8d21968b3e4d62987bfcf129 | [
"Apache-2.0"
] | null | null | null | google/cloud/bigquery/opentelemetry_tracing.py | grooveygr/python-bigquery | 54852ae18ce9e33e8d21968b3e4d62987bfcf129 | [
"Apache-2.0"
] | null | null | null | google/cloud/bigquery/opentelemetry_tracing.py | grooveygr/python-bigquery | 54852ae18ce9e33e8d21968b3e4d62987bfcf129 | [
"Apache-2.0"
] | 1 | 2020-10-04T11:46:17.000Z | 2020-10-04T11:46:17.000Z | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from contextlib import contextmanager
from google.api_core.exceptions import GoogleAPICallError
logger = logging.getLogger(__name__)
try:
from opentelemetry import trace
from opentelemetry.instrumentation.utils import http_status_to_canonical_code
from opentelemetry.trace.status import Status
HAS_OPENTELEMETRY = True
except ImportError:
logger.info(
"This service is instrumented using OpenTelemetry."
"OpenTelemetry could not be imported; please"
"add opentelemetry-api and opentelemetry-instrumentation"
"packages in order to get BigQuery Tracing data."
)
HAS_OPENTELEMETRY = False
_default_attributes = {
"db.system": "BigQuery"
} # static, default values assigned to all spans
@contextmanager
def create_span(name, attributes=None, client=None, job_ref=None):
"""Creates a ContextManager for a Span to be exported to the configured exporter.
If no configuration exists yields None.
Args:
name (str): Name that will be set for the span being created
attributes (Optional[dict]):
Additional attributes that pertain to
the specific API call (i.e. not a default attribute)
client (Optional[google.cloud.bigquery.client.Client]):
Pass in a Client object to extract any attributes that may be
relevant to it and add them to the created spans.
job_ref (Optional[google.cloud.bigquery.job._AsyncJob])
Pass in a _AsyncJob object to extract any attributes that may be
relevant to it and add them to the created spans.
Yields:
opentelemetry.trace.Span: Yields the newly created Span.
Raises:
google.api_core.exceptions.GoogleAPICallError:
Raised if a span could not be yielded or issue with call to
OpenTelemetry.
"""
final_attributes = _get_final_span_attributes(attributes, client, job_ref)
if not HAS_OPENTELEMETRY:
yield None
return
tracer = trace.get_tracer(__name__)
# yield new span value
with tracer.start_as_current_span(name=name, attributes=final_attributes) as span:
try:
yield span
except GoogleAPICallError as error:
if error.code is not None:
span.set_status(Status(http_status_to_canonical_code(error.code)))
raise
def _get_final_span_attributes(attributes=None, client=None, job_ref=None):
final_attributes = {}
final_attributes.update(_default_attributes.copy())
if client:
client_attributes = _set_client_attributes(client)
final_attributes.update(client_attributes)
if job_ref:
job_attributes = _set_job_attributes(job_ref)
final_attributes.update(job_attributes)
if attributes:
final_attributes.update(attributes)
return final_attributes
def _set_client_attributes(client):
return {"db.name": client.project, "location": client.location}
def _set_job_attributes(job_ref):
job_attributes = {
"db.name": job_ref.project,
"location": job_ref.location,
"num_child_jobs": job_ref.num_child_jobs,
"job_id": job_ref.job_id,
"parent_job_id": job_ref.parent_job_id,
"state": job_ref.state,
}
job_attributes["hasErrors"] = job_ref.error_result is not None
if job_ref.created is not None:
job_attributes["timeCreated"] = job_ref.created.isoformat()
if job_ref.started is not None:
job_attributes["timeStarted"] = job_ref.started.isoformat()
if job_ref.ended is not None:
job_attributes["timeEnded"] = job_ref.ended.isoformat()
return job_attributes
| 35.926829 | 86 | 0.685449 |
import logging
from contextlib import contextmanager
from google.api_core.exceptions import GoogleAPICallError
logger = logging.getLogger(__name__)
try:
from opentelemetry import trace
from opentelemetry.instrumentation.utils import http_status_to_canonical_code
from opentelemetry.trace.status import Status
HAS_OPENTELEMETRY = True
except ImportError:
logger.info(
"This service is instrumented using OpenTelemetry."
"OpenTelemetry could not be imported; please"
"add opentelemetry-api and opentelemetry-instrumentation"
"packages in order to get BigQuery Tracing data."
)
HAS_OPENTELEMETRY = False
_default_attributes = {
"db.system": "BigQuery"
}
@contextmanager
def create_span(name, attributes=None, client=None, job_ref=None):
final_attributes = _get_final_span_attributes(attributes, client, job_ref)
if not HAS_OPENTELEMETRY:
yield None
return
tracer = trace.get_tracer(__name__)
with tracer.start_as_current_span(name=name, attributes=final_attributes) as span:
try:
yield span
except GoogleAPICallError as error:
if error.code is not None:
span.set_status(Status(http_status_to_canonical_code(error.code)))
raise
def _get_final_span_attributes(attributes=None, client=None, job_ref=None):
final_attributes = {}
final_attributes.update(_default_attributes.copy())
if client:
client_attributes = _set_client_attributes(client)
final_attributes.update(client_attributes)
if job_ref:
job_attributes = _set_job_attributes(job_ref)
final_attributes.update(job_attributes)
if attributes:
final_attributes.update(attributes)
return final_attributes
def _set_client_attributes(client):
return {"db.name": client.project, "location": client.location}
def _set_job_attributes(job_ref):
job_attributes = {
"db.name": job_ref.project,
"location": job_ref.location,
"num_child_jobs": job_ref.num_child_jobs,
"job_id": job_ref.job_id,
"parent_job_id": job_ref.parent_job_id,
"state": job_ref.state,
}
job_attributes["hasErrors"] = job_ref.error_result is not None
if job_ref.created is not None:
job_attributes["timeCreated"] = job_ref.created.isoformat()
if job_ref.started is not None:
job_attributes["timeStarted"] = job_ref.started.isoformat()
if job_ref.ended is not None:
job_attributes["timeEnded"] = job_ref.ended.isoformat()
return job_attributes
| true | true |
f7375c55dd3e96ada9b553d461fcb398eac1c10f | 2,770 | py | Python | client.py | Horze-International/hzpydav | 4f854f75a710cd1fcc52034cfb09480849e074b8 | [
"MIT"
] | null | null | null | client.py | Horze-International/hzpydav | 4f854f75a710cd1fcc52034cfb09480849e074b8 | [
"MIT"
] | null | null | null | client.py | Horze-International/hzpydav | 4f854f75a710cd1fcc52034cfb09480849e074b8 | [
"MIT"
] | null | null | null | import requests
from bs4 import BeautifulSoup
class Client:
def __init__(self, base_url, user, passw, cert=None, cafile=None):
self.base_url = base_url
self.session = requests.Session()
self.session.auth = (user, passw)
self.session.cert = cert
self.session.verify = cafile
def exists(self, url):
response = self.session.request('PROPFIND', self.base_url + url)
code = response.status_code
if code == 404:
return False
else:
return True
def propfind(self, url):
return_value = {}
response = self.session.request('PROPFIND', self.base_url + url)
code = response.status_code
if code == 404:
print('Could not find ' + url)
return return_value
elif code != 200 and code != 207:
print('Propfind failed for ' + url + ': unknown error (' + str(code) + ')')
return return_value
soup = BeautifulSoup(response.text, 'lxml')
return_value['is_dir'] = False
return_value['entries'] = []
metadata = soup.find('response')
if metadata.find('propstat').find('prop').find('resourcetype').find('collection') != None:
return_value['is_dir'] = True
first = True
for file in soup.find_all('response'):
# First entry is the file itself, subsequent entries are directory entries
if first:
first = False
continue
return_value['entries'].append(file.find('href').text)
return return_value
def mkdir(self, url, recursive=False):
if url[-1] == '/':
url = url[:-1]
# Since this is the base case for recursion, don't print any errors
if self.exists(url):
return
parent = '/'.join(url.split('/')[:-1])
if not self.exists(parent):
if recursive == False:
print('Could not create directory ' + url + ', parent does not exist')
return
else:
self.mkdir(parent, True)
response = self.session.request('MKCOL', self.base_url + url)
code = response.status_code
if code == 201:
return
elif code == 405:
print('Could not create ' + url + ': already exists')
else:
print('Could not create ' + url + ': unknown error (' + str(code) + ')')
def upload(self, url, file):
data = file.read()
parent = '/'.join(url.split('/')[:-1])
self.mkdir(parent, True)
print('Uploading: ' + url)
self.session.put(self.base_url + url, data=data, headers={'Content-Type': 'application/octet-stream'})
def download(self, url, out):
response = self.session.get(self.base_url + '/' + url)
out.write(response.content)
# Traverse folder recursively, returning a list of absolute filenames
def traverse(self, folder):
entries = self.propfind(folder)['entries']
results = []
for entry in entries:
# if folder, recurse
if entry[-1] == '/':
results = results + self.traverse(entry)
else:
results.append(entry)
return results
| 23.87931 | 104 | 0.659928 | import requests
from bs4 import BeautifulSoup
class Client:
def __init__(self, base_url, user, passw, cert=None, cafile=None):
self.base_url = base_url
self.session = requests.Session()
self.session.auth = (user, passw)
self.session.cert = cert
self.session.verify = cafile
def exists(self, url):
response = self.session.request('PROPFIND', self.base_url + url)
code = response.status_code
if code == 404:
return False
else:
return True
def propfind(self, url):
return_value = {}
response = self.session.request('PROPFIND', self.base_url + url)
code = response.status_code
if code == 404:
print('Could not find ' + url)
return return_value
elif code != 200 and code != 207:
print('Propfind failed for ' + url + ': unknown error (' + str(code) + ')')
return return_value
soup = BeautifulSoup(response.text, 'lxml')
return_value['is_dir'] = False
return_value['entries'] = []
metadata = soup.find('response')
if metadata.find('propstat').find('prop').find('resourcetype').find('collection') != None:
return_value['is_dir'] = True
first = True
for file in soup.find_all('response'):
if first:
first = False
continue
return_value['entries'].append(file.find('href').text)
return return_value
def mkdir(self, url, recursive=False):
if url[-1] == '/':
url = url[:-1]
if self.exists(url):
return
parent = '/'.join(url.split('/')[:-1])
if not self.exists(parent):
if recursive == False:
print('Could not create directory ' + url + ', parent does not exist')
return
else:
self.mkdir(parent, True)
response = self.session.request('MKCOL', self.base_url + url)
code = response.status_code
if code == 201:
return
elif code == 405:
print('Could not create ' + url + ': already exists')
else:
print('Could not create ' + url + ': unknown error (' + str(code) + ')')
def upload(self, url, file):
data = file.read()
parent = '/'.join(url.split('/')[:-1])
self.mkdir(parent, True)
print('Uploading: ' + url)
self.session.put(self.base_url + url, data=data, headers={'Content-Type': 'application/octet-stream'})
def download(self, url, out):
response = self.session.get(self.base_url + '/' + url)
out.write(response.content)
# Traverse folder recursively, returning a list of absolute filenames
def traverse(self, folder):
entries = self.propfind(folder)['entries']
results = []
for entry in entries:
# if folder, recurse
if entry[-1] == '/':
results = results + self.traverse(entry)
else:
results.append(entry)
return results
| true | true |
f7375cd9a25ac50899ed01c5de9c119ce0ea561b | 9,787 | py | Python | configs/example/arm/devices.py | fusiled/gem5 | 670436b9cd7a23f03c9d7248abb8eb19939c83a6 | [
"BSD-3-Clause"
] | 22 | 2018-07-03T16:46:51.000Z | 2022-03-22T08:29:36.000Z | configs/example/arm/devices.py | fusiled/gem5 | 670436b9cd7a23f03c9d7248abb8eb19939c83a6 | [
"BSD-3-Clause"
] | 1 | 2022-02-21T07:56:08.000Z | 2022-02-21T07:56:18.000Z | configs/example/arm/devices.py | fusiled/gem5 | 670436b9cd7a23f03c9d7248abb8eb19939c83a6 | [
"BSD-3-Clause"
] | 25 | 2017-12-02T00:46:04.000Z | 2022-02-18T19:28:53.000Z | # Copyright (c) 2016-2017 ARM Limited
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Andreas Sandberg
# Gabor Dozsa
# System components used by the bigLITTLE.py configuration script
import m5
from m5.objects import *
m5.util.addToPath('../../')
from common.Caches import *
from common import CpuConfig
have_kvm = "kvm" in CpuConfig.cpu_names()
class L1I(L1_ICache):
tag_latency = 1
data_latency = 1
response_latency = 1
mshrs = 4
tgts_per_mshr = 8
size = '48kB'
assoc = 3
class L1D(L1_DCache):
tag_latency = 2
data_latency = 2
response_latency = 1
mshrs = 16
tgts_per_mshr = 16
size = '32kB'
assoc = 2
write_buffers = 16
class WalkCache(PageTableWalkerCache):
tag_latency = 4
data_latency = 4
response_latency = 4
mshrs = 6
tgts_per_mshr = 8
size = '1kB'
assoc = 8
write_buffers = 16
class L2(L2Cache):
tag_latency = 12
data_latency = 12
response_latency = 5
mshrs = 32
tgts_per_mshr = 8
size = '1MB'
assoc = 16
write_buffers = 8
clusivity='mostly_excl'
class L3(Cache):
size = '16MB'
assoc = 16
tag_latency = 20
data_latency = 20
response_latency = 20
mshrs = 20
tgts_per_mshr = 12
clusivity='mostly_excl'
class MemBus(SystemXBar):
badaddr_responder = BadAddr(warn_access="warn")
default = Self.badaddr_responder.pio
class CpuCluster(SubSystem):
def __init__(self, system, num_cpus, cpu_clock, cpu_voltage,
cpu_type, l1i_type, l1d_type, wcache_type, l2_type):
super(CpuCluster, self).__init__()
self._cpu_type = cpu_type
self._l1i_type = l1i_type
self._l1d_type = l1d_type
self._wcache_type = wcache_type
self._l2_type = l2_type
assert num_cpus > 0
self.voltage_domain = VoltageDomain(voltage=cpu_voltage)
self.clk_domain = SrcClockDomain(clock=cpu_clock,
voltage_domain=self.voltage_domain)
self.cpus = [ self._cpu_type(cpu_id=system.numCpus() + idx,
clk_domain=self.clk_domain)
for idx in range(num_cpus) ]
for cpu in self.cpus:
cpu.createThreads()
cpu.createInterruptController()
cpu.socket_id = system.numCpuClusters()
system.addCpuCluster(self, num_cpus)
def requireCaches(self):
return self._cpu_type.require_caches()
def memoryMode(self):
return self._cpu_type.memory_mode()
def addL1(self):
for cpu in self.cpus:
l1i = None if self._l1i_type is None else self._l1i_type()
l1d = None if self._l1d_type is None else self._l1d_type()
iwc = None if self._wcache_type is None else self._wcache_type()
dwc = None if self._wcache_type is None else self._wcache_type()
cpu.addPrivateSplitL1Caches(l1i, l1d, iwc, dwc)
def addL2(self, clk_domain):
if self._l2_type is None:
return
self.toL2Bus = L2XBar(width=64, clk_domain=clk_domain)
self.l2 = self._l2_type()
for cpu in self.cpus:
cpu.connectAllPorts(self.toL2Bus)
self.toL2Bus.master = self.l2.cpu_side
def connectMemSide(self, bus):
bus.slave
try:
self.l2.mem_side = bus.slave
except AttributeError:
for cpu in self.cpus:
cpu.connectAllPorts(bus)
class AtomicCluster(CpuCluster):
def __init__(self, system, num_cpus, cpu_clock, cpu_voltage="1.0V"):
cpu_config = [ CpuConfig.get("atomic"), None, None, None, None ]
super(AtomicCluster, self).__init__(system, num_cpus, cpu_clock,
cpu_voltage, *cpu_config)
def addL1(self):
pass
class KvmCluster(CpuCluster):
def __init__(self, system, num_cpus, cpu_clock, cpu_voltage="1.0V"):
cpu_config = [ CpuConfig.get("kvm"), None, None, None, None ]
super(KvmCluster, self).__init__(system, num_cpus, cpu_clock,
cpu_voltage, *cpu_config)
def addL1(self):
pass
class SimpleSystem(LinuxArmSystem):
cache_line_size = 64
def __init__(self, caches, mem_size, **kwargs):
super(SimpleSystem, self).__init__(**kwargs)
self.voltage_domain = VoltageDomain(voltage="1.0V")
self.clk_domain = SrcClockDomain(clock="1GHz",
voltage_domain=Parent.voltage_domain)
self.realview = VExpress_GEM5_V1()
self.gic_cpu_addr = self.realview.gic.cpu_addr
self.flags_addr = self.realview.realview_io.pio_addr + 0x30
self.membus = MemBus()
self.intrctrl = IntrControl()
self.terminal = Terminal()
self.vncserver = VncServer()
self.iobus = IOXBar()
# CPUs->PIO
self.iobridge = Bridge(delay='50ns')
# Device DMA -> MEM
mem_range = self.realview._mem_regions[0]
mem_range_size = long(mem_range[1]) - long(mem_range[0])
assert mem_range_size >= long(Addr(mem_size))
self._mem_range = AddrRange(start=mem_range[0], size=mem_size)
self._caches = caches
if self._caches:
self.iocache = IOCache(addr_ranges=[self._mem_range])
else:
self.dmabridge = Bridge(delay='50ns',
ranges=[self._mem_range])
self._pci_devices = 0
self._clusters = []
self._num_cpus = 0
def attach_pci(self, dev):
dev.pci_bus, dev.pci_dev, dev.pci_func = (0, self._pci_devices + 1, 0)
self._pci_devices += 1
self.realview.attachPciDevice(dev, self.iobus)
def connect(self):
self.iobridge.master = self.iobus.slave
self.iobridge.slave = self.membus.master
if self._caches:
self.iocache.mem_side = self.membus.slave
self.iocache.cpu_side = self.iobus.master
else:
self.dmabridge.master = self.membus.slave
self.dmabridge.slave = self.iobus.master
self.gic_cpu_addr = self.realview.gic.cpu_addr
self.realview.attachOnChipIO(self.membus, self.iobridge)
self.realview.attachIO(self.iobus)
self.system_port = self.membus.slave
def numCpuClusters(self):
return len(self._clusters)
def addCpuCluster(self, cpu_cluster, num_cpus):
assert cpu_cluster not in self._clusters
assert num_cpus > 0
self._clusters.append(cpu_cluster)
self._num_cpus += num_cpus
def numCpus(self):
return self._num_cpus
def addCaches(self, need_caches, last_cache_level):
if not need_caches:
# connect each cluster to the memory hierarchy
for cluster in self._clusters:
cluster.connectMemSide(self.membus)
return
cluster_mem_bus = self.membus
assert last_cache_level >= 1 and last_cache_level <= 3
for cluster in self._clusters:
cluster.addL1()
if last_cache_level > 1:
for cluster in self._clusters:
cluster.addL2(cluster.clk_domain)
if last_cache_level > 2:
max_clock_cluster = max(self._clusters,
key=lambda c: c.clk_domain.clock[0])
self.l3 = L3(clk_domain=max_clock_cluster.clk_domain)
self.toL3Bus = L2XBar(width=64)
self.toL3Bus.master = self.l3.cpu_side
self.l3.mem_side = self.membus.slave
cluster_mem_bus = self.toL3Bus
# connect each cluster to the memory hierarchy
for cluster in self._clusters:
cluster.connectMemSide(cluster_mem_bus)
| 34.583039 | 78 | 0.654644 |
import m5
from m5.objects import *
m5.util.addToPath('../../')
from common.Caches import *
from common import CpuConfig
have_kvm = "kvm" in CpuConfig.cpu_names()
class L1I(L1_ICache):
tag_latency = 1
data_latency = 1
response_latency = 1
mshrs = 4
tgts_per_mshr = 8
size = '48kB'
assoc = 3
class L1D(L1_DCache):
tag_latency = 2
data_latency = 2
response_latency = 1
mshrs = 16
tgts_per_mshr = 16
size = '32kB'
assoc = 2
write_buffers = 16
class WalkCache(PageTableWalkerCache):
tag_latency = 4
data_latency = 4
response_latency = 4
mshrs = 6
tgts_per_mshr = 8
size = '1kB'
assoc = 8
write_buffers = 16
class L2(L2Cache):
tag_latency = 12
data_latency = 12
response_latency = 5
mshrs = 32
tgts_per_mshr = 8
size = '1MB'
assoc = 16
write_buffers = 8
clusivity='mostly_excl'
class L3(Cache):
size = '16MB'
assoc = 16
tag_latency = 20
data_latency = 20
response_latency = 20
mshrs = 20
tgts_per_mshr = 12
clusivity='mostly_excl'
class MemBus(SystemXBar):
badaddr_responder = BadAddr(warn_access="warn")
default = Self.badaddr_responder.pio
class CpuCluster(SubSystem):
def __init__(self, system, num_cpus, cpu_clock, cpu_voltage,
cpu_type, l1i_type, l1d_type, wcache_type, l2_type):
super(CpuCluster, self).__init__()
self._cpu_type = cpu_type
self._l1i_type = l1i_type
self._l1d_type = l1d_type
self._wcache_type = wcache_type
self._l2_type = l2_type
assert num_cpus > 0
self.voltage_domain = VoltageDomain(voltage=cpu_voltage)
self.clk_domain = SrcClockDomain(clock=cpu_clock,
voltage_domain=self.voltage_domain)
self.cpus = [ self._cpu_type(cpu_id=system.numCpus() + idx,
clk_domain=self.clk_domain)
for idx in range(num_cpus) ]
for cpu in self.cpus:
cpu.createThreads()
cpu.createInterruptController()
cpu.socket_id = system.numCpuClusters()
system.addCpuCluster(self, num_cpus)
def requireCaches(self):
return self._cpu_type.require_caches()
def memoryMode(self):
return self._cpu_type.memory_mode()
def addL1(self):
for cpu in self.cpus:
l1i = None if self._l1i_type is None else self._l1i_type()
l1d = None if self._l1d_type is None else self._l1d_type()
iwc = None if self._wcache_type is None else self._wcache_type()
dwc = None if self._wcache_type is None else self._wcache_type()
cpu.addPrivateSplitL1Caches(l1i, l1d, iwc, dwc)
def addL2(self, clk_domain):
if self._l2_type is None:
return
self.toL2Bus = L2XBar(width=64, clk_domain=clk_domain)
self.l2 = self._l2_type()
for cpu in self.cpus:
cpu.connectAllPorts(self.toL2Bus)
self.toL2Bus.master = self.l2.cpu_side
def connectMemSide(self, bus):
bus.slave
try:
self.l2.mem_side = bus.slave
except AttributeError:
for cpu in self.cpus:
cpu.connectAllPorts(bus)
class AtomicCluster(CpuCluster):
def __init__(self, system, num_cpus, cpu_clock, cpu_voltage="1.0V"):
cpu_config = [ CpuConfig.get("atomic"), None, None, None, None ]
super(AtomicCluster, self).__init__(system, num_cpus, cpu_clock,
cpu_voltage, *cpu_config)
def addL1(self):
pass
class KvmCluster(CpuCluster):
def __init__(self, system, num_cpus, cpu_clock, cpu_voltage="1.0V"):
cpu_config = [ CpuConfig.get("kvm"), None, None, None, None ]
super(KvmCluster, self).__init__(system, num_cpus, cpu_clock,
cpu_voltage, *cpu_config)
def addL1(self):
pass
class SimpleSystem(LinuxArmSystem):
cache_line_size = 64
def __init__(self, caches, mem_size, **kwargs):
super(SimpleSystem, self).__init__(**kwargs)
self.voltage_domain = VoltageDomain(voltage="1.0V")
self.clk_domain = SrcClockDomain(clock="1GHz",
voltage_domain=Parent.voltage_domain)
self.realview = VExpress_GEM5_V1()
self.gic_cpu_addr = self.realview.gic.cpu_addr
self.flags_addr = self.realview.realview_io.pio_addr + 0x30
self.membus = MemBus()
self.intrctrl = IntrControl()
self.terminal = Terminal()
self.vncserver = VncServer()
self.iobus = IOXBar()
self.iobridge = Bridge(delay='50ns')
mem_range = self.realview._mem_regions[0]
mem_range_size = long(mem_range[1]) - long(mem_range[0])
assert mem_range_size >= long(Addr(mem_size))
self._mem_range = AddrRange(start=mem_range[0], size=mem_size)
self._caches = caches
if self._caches:
self.iocache = IOCache(addr_ranges=[self._mem_range])
else:
self.dmabridge = Bridge(delay='50ns',
ranges=[self._mem_range])
self._pci_devices = 0
self._clusters = []
self._num_cpus = 0
def attach_pci(self, dev):
dev.pci_bus, dev.pci_dev, dev.pci_func = (0, self._pci_devices + 1, 0)
self._pci_devices += 1
self.realview.attachPciDevice(dev, self.iobus)
def connect(self):
self.iobridge.master = self.iobus.slave
self.iobridge.slave = self.membus.master
if self._caches:
self.iocache.mem_side = self.membus.slave
self.iocache.cpu_side = self.iobus.master
else:
self.dmabridge.master = self.membus.slave
self.dmabridge.slave = self.iobus.master
self.gic_cpu_addr = self.realview.gic.cpu_addr
self.realview.attachOnChipIO(self.membus, self.iobridge)
self.realview.attachIO(self.iobus)
self.system_port = self.membus.slave
def numCpuClusters(self):
return len(self._clusters)
def addCpuCluster(self, cpu_cluster, num_cpus):
assert cpu_cluster not in self._clusters
assert num_cpus > 0
self._clusters.append(cpu_cluster)
self._num_cpus += num_cpus
def numCpus(self):
return self._num_cpus
def addCaches(self, need_caches, last_cache_level):
if not need_caches:
for cluster in self._clusters:
cluster.connectMemSide(self.membus)
return
cluster_mem_bus = self.membus
assert last_cache_level >= 1 and last_cache_level <= 3
for cluster in self._clusters:
cluster.addL1()
if last_cache_level > 1:
for cluster in self._clusters:
cluster.addL2(cluster.clk_domain)
if last_cache_level > 2:
max_clock_cluster = max(self._clusters,
key=lambda c: c.clk_domain.clock[0])
self.l3 = L3(clk_domain=max_clock_cluster.clk_domain)
self.toL3Bus = L2XBar(width=64)
self.toL3Bus.master = self.l3.cpu_side
self.l3.mem_side = self.membus.slave
cluster_mem_bus = self.toL3Bus
for cluster in self._clusters:
cluster.connectMemSide(cluster_mem_bus)
| true | true |
f7375cfdfb43c0cef59a9247b683f8c2a729f448 | 260 | py | Python | www/models/__init__.py | JFK/python-tornado-site-template | 206ab073d251389552cba986f9492d6c6953e911 | [
"MIT"
] | 2 | 2020-06-28T15:21:13.000Z | 2020-09-06T02:32:25.000Z | www/models/__init__.py | JFK/python-tornado-site-template | 206ab073d251389552cba986f9492d6c6953e911 | [
"MIT"
] | null | null | null | www/models/__init__.py | JFK/python-tornado-site-template | 206ab073d251389552cba986f9492d6c6953e911 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2014 fumikazu.kiyota@gmail.com
#
from mongoengine import connect
def load(db):
u"""データベースにコネクトする
"""
connect(
db['name'],
host=db['host'],
port=db['port']
)
| 14.444444 | 42 | 0.553846 |
from mongoengine import connect
def load(db):
connect(
db['name'],
host=db['host'],
port=db['port']
)
| true | true |
f7375d4cc6866d4d7fea883ed6e0f246a2e8be6f | 761 | py | Python | metrics/measures/code_style.py | sebMathieu/code_metrics | f188041c8f2c0950c5f63a1f719cdb05aaeb42c9 | [
"MIT"
] | null | null | null | metrics/measures/code_style.py | sebMathieu/code_metrics | f188041c8f2c0950c5f63a1f719cdb05aaeb42c9 | [
"MIT"
] | null | null | null | metrics/measures/code_style.py | sebMathieu/code_metrics | f188041c8f2c0950c5f63a1f719cdb05aaeb42c9 | [
"MIT"
] | null | null | null | # -*- coding: UTF-8 -*-
import pycodestyle
from metrics.report_keys import CODE_STYLE
def code_style(code_path, results, ignore_codes=None):
"""
Check code style.
:param code_path: Path to the source code.
:param results: Dictionary with the results.
:param ignore_codes: List of PEP8 code to ignore.
"""
# Run style guide checker
if ignore_codes is None:
ignore_codes = ['E121', 'E123', 'E126', 'E133', 'E226', 'E241', 'E242', 'E704', 'E501', 'W']
style_guide = pycodestyle.StyleGuide(quiet=True, ignore=ignore_codes)
report = style_guide.check_files([code_path])
# Summarize metrics
results[CODE_STYLE] = 1.0 - max(min(report.total_errors / max(1.0, report.counters['physical lines']), 1.0), 0.0)
| 31.708333 | 117 | 0.672799 |
import pycodestyle
from metrics.report_keys import CODE_STYLE
def code_style(code_path, results, ignore_codes=None):
if ignore_codes is None:
ignore_codes = ['E121', 'E123', 'E126', 'E133', 'E226', 'E241', 'E242', 'E704', 'E501', 'W']
style_guide = pycodestyle.StyleGuide(quiet=True, ignore=ignore_codes)
report = style_guide.check_files([code_path])
results[CODE_STYLE] = 1.0 - max(min(report.total_errors / max(1.0, report.counters['physical lines']), 1.0), 0.0)
| true | true |
f7375d8903388f8c6bb3749a4b61521de31f66d6 | 9,993 | py | Python | lib/spack/spack/test/cmd/view.py | RemoteConnectionManager/spack | f2967b6c16effd26ce007cf86cadbb645c574f50 | [
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 3 | 2019-06-27T13:26:50.000Z | 2019-07-01T16:24:54.000Z | lib/spack/spack/test/cmd/view.py | openbiox/spack | bb6ec7fb40c14b37e094a860e3625af53f633174 | [
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 75 | 2016-07-27T11:43:00.000Z | 2020-12-08T15:56:53.000Z | lib/spack/spack/test/cmd/view.py | openbiox/spack | bb6ec7fb40c14b37e094a860e3625af53f633174 | [
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 8 | 2015-10-16T13:51:49.000Z | 2021-10-18T13:58:03.000Z | # Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack.main import SpackCommand
import os.path
import pytest
import spack.util.spack_yaml as s_yaml
activate = SpackCommand('activate')
extensions = SpackCommand('extensions')
install = SpackCommand('install')
view = SpackCommand('view')
def create_projection_file(tmpdir, projection):
if 'projections' not in projection:
projection = {'projections': projection}
projection_file = tmpdir.mkdir('projection').join('projection.yaml')
projection_file.write(s_yaml.dump(projection))
return projection_file
@pytest.mark.parametrize('cmd', ['hardlink', 'symlink', 'hard', 'add'])
def test_view_link_type(
tmpdir, mock_packages, mock_archive, mock_fetch, config,
install_mockery, cmd):
install('libdwarf')
viewpath = str(tmpdir.mkdir('view_{0}'.format(cmd)))
view(cmd, viewpath, 'libdwarf')
package_prefix = os.path.join(viewpath, 'libdwarf')
assert os.path.exists(package_prefix)
assert os.path.islink(package_prefix) == (not cmd.startswith('hard'))
@pytest.mark.parametrize('cmd', ['hardlink', 'symlink', 'hard', 'add'])
def test_view_projections(
tmpdir, mock_packages, mock_archive, mock_fetch, config,
install_mockery, cmd):
install('libdwarf@20130207')
viewpath = str(tmpdir.mkdir('view_{0}'.format(cmd)))
view_projection = {
'projections': {
'all': '{name}-{version}'
}
}
projection_file = create_projection_file(tmpdir, view_projection)
view(cmd, viewpath, '--projection-file={0}'.format(projection_file),
'libdwarf')
package_prefix = os.path.join(viewpath, 'libdwarf-20130207/libdwarf')
assert os.path.exists(package_prefix)
assert os.path.islink(package_prefix) == (not cmd.startswith('hard'))
def test_view_multiple_projections(
tmpdir, mock_packages, mock_archive, mock_fetch, config,
install_mockery):
install('libdwarf@20130207')
install('extendee@1.0%gcc')
viewpath = str(tmpdir.mkdir('view'))
view_projection = s_yaml.syaml_dict(
[('extendee', '{name}-{compiler.name}'),
('all', '{name}-{version}')]
)
projection_file = create_projection_file(tmpdir, view_projection)
view('add', viewpath, '--projection-file={0}'.format(projection_file),
'libdwarf', 'extendee')
libdwarf_prefix = os.path.join(viewpath, 'libdwarf-20130207/libdwarf')
extendee_prefix = os.path.join(viewpath, 'extendee-gcc/bin')
assert os.path.exists(libdwarf_prefix)
assert os.path.exists(extendee_prefix)
def test_view_multiple_projections_all_first(
tmpdir, mock_packages, mock_archive, mock_fetch, config,
install_mockery):
install('libdwarf@20130207')
install('extendee@1.0%gcc')
viewpath = str(tmpdir.mkdir('view'))
view_projection = s_yaml.syaml_dict(
[('all', '{name}-{version}'),
('extendee', '{name}-{compiler.name}')]
)
projection_file = create_projection_file(tmpdir, view_projection)
view('add', viewpath, '--projection-file={0}'.format(projection_file),
'libdwarf', 'extendee')
libdwarf_prefix = os.path.join(viewpath, 'libdwarf-20130207/libdwarf')
extendee_prefix = os.path.join(viewpath, 'extendee-gcc/bin')
assert os.path.exists(libdwarf_prefix)
assert os.path.exists(extendee_prefix)
def test_view_external(
tmpdir, mock_packages, mock_archive, mock_fetch, config,
install_mockery):
install('externaltool')
viewpath = str(tmpdir.mkdir('view'))
output = view('symlink', viewpath, 'externaltool')
assert 'Skipping external package: externaltool' in output
def test_view_extension(
tmpdir, mock_packages, mock_archive, mock_fetch, config,
install_mockery):
install('extendee')
install('extension1@1.0')
install('extension1@2.0')
install('extension2@1.0')
viewpath = str(tmpdir.mkdir('view'))
view('symlink', viewpath, 'extension1@1.0')
all_installed = extensions('--show', 'installed', 'extendee')
assert 'extension1@1.0' in all_installed
assert 'extension1@2.0' in all_installed
assert 'extension2@1.0' in all_installed
global_activated = extensions('--show', 'activated', 'extendee')
assert 'extension1@1.0' not in global_activated
assert 'extension1@2.0' not in global_activated
assert 'extension2@1.0' not in global_activated
view_activated = extensions('--show', 'activated',
'-v', viewpath,
'extendee')
assert 'extension1@1.0' in view_activated
assert 'extension1@2.0' not in view_activated
assert 'extension2@1.0' not in view_activated
assert os.path.exists(os.path.join(viewpath, 'bin', 'extension1'))
def test_view_extension_projection(
tmpdir, mock_packages, mock_archive, mock_fetch, config,
install_mockery):
install('extendee@1.0')
install('extension1@1.0')
install('extension1@2.0')
install('extension2@1.0')
viewpath = str(tmpdir.mkdir('view'))
view_projection = {'all': '{name}-{version}'}
projection_file = create_projection_file(tmpdir, view_projection)
view('symlink', viewpath, '--projection-file={0}'.format(projection_file),
'extension1@1.0')
all_installed = extensions('--show', 'installed', 'extendee')
assert 'extension1@1.0' in all_installed
assert 'extension1@2.0' in all_installed
assert 'extension2@1.0' in all_installed
global_activated = extensions('--show', 'activated', 'extendee')
assert 'extension1@1.0' not in global_activated
assert 'extension1@2.0' not in global_activated
assert 'extension2@1.0' not in global_activated
view_activated = extensions('--show', 'activated',
'-v', viewpath,
'extendee')
assert 'extension1@1.0' in view_activated
assert 'extension1@2.0' not in view_activated
assert 'extension2@1.0' not in view_activated
assert os.path.exists(os.path.join(viewpath, 'extendee-1.0',
'bin', 'extension1'))
def test_view_extension_remove(
tmpdir, mock_packages, mock_archive, mock_fetch, config,
install_mockery):
install('extendee')
install('extension1@1.0')
viewpath = str(tmpdir.mkdir('view'))
view('symlink', viewpath, 'extension1@1.0')
view('remove', viewpath, 'extension1@1.0')
all_installed = extensions('--show', 'installed', 'extendee')
assert 'extension1@1.0' in all_installed
global_activated = extensions('--show', 'activated', 'extendee')
assert 'extension1@1.0' not in global_activated
view_activated = extensions('--show', 'activated',
'-v', viewpath,
'extendee')
assert 'extension1@1.0' not in view_activated
assert not os.path.exists(os.path.join(viewpath, 'bin', 'extension1'))
def test_view_extension_conflict(
tmpdir, mock_packages, mock_archive, mock_fetch, config,
install_mockery):
install('extendee')
install('extension1@1.0')
install('extension1@2.0')
viewpath = str(tmpdir.mkdir('view'))
view('symlink', viewpath, 'extension1@1.0')
output = view('symlink', viewpath, 'extension1@2.0')
assert 'Package conflict detected' in output
def test_view_extension_conflict_ignored(
tmpdir, mock_packages, mock_archive, mock_fetch, config,
install_mockery):
install('extendee')
install('extension1@1.0')
install('extension1@2.0')
viewpath = str(tmpdir.mkdir('view'))
view('symlink', viewpath, 'extension1@1.0')
view('symlink', viewpath, '-i', 'extension1@2.0')
with open(os.path.join(viewpath, 'bin', 'extension1'), 'r') as fin:
assert fin.read() == '1.0'
def test_view_extension_global_activation(
tmpdir, mock_packages, mock_archive, mock_fetch, config,
install_mockery):
install('extendee')
install('extension1@1.0')
install('extension1@2.0')
install('extension2@1.0')
viewpath = str(tmpdir.mkdir('view'))
view('symlink', viewpath, 'extension1@1.0')
activate('extension1@2.0')
activate('extension2@1.0')
all_installed = extensions('--show', 'installed', 'extendee')
assert 'extension1@1.0' in all_installed
assert 'extension1@2.0' in all_installed
assert 'extension2@1.0' in all_installed
global_activated = extensions('--show', 'activated', 'extendee')
assert 'extension1@1.0' not in global_activated
assert 'extension1@2.0' in global_activated
assert 'extension2@1.0' in global_activated
view_activated = extensions('--show', 'activated',
'-v', viewpath,
'extendee')
assert 'extension1@1.0' in view_activated
assert 'extension1@2.0' not in view_activated
assert 'extension2@1.0' not in view_activated
assert os.path.exists(os.path.join(viewpath, 'bin', 'extension1'))
assert not os.path.exists(os.path.join(viewpath, 'bin', 'extension2'))
def test_view_extendee_with_global_activations(
tmpdir, mock_packages, mock_archive, mock_fetch, config,
install_mockery):
install('extendee')
install('extension1@1.0')
install('extension1@2.0')
install('extension2@1.0')
viewpath = str(tmpdir.mkdir('view'))
activate('extension1@2.0')
output = view('symlink', viewpath, 'extension1@1.0')
assert 'Error: Globally activated extensions cannot be used' in output
def test_view_fails_with_missing_projections_file(tmpdir):
viewpath = str(tmpdir.mkdir('view'))
projection_file = os.path.join(str(tmpdir), 'nonexistent')
with pytest.raises(SystemExit):
view('symlink', '--projection-file', projection_file, viewpath, 'foo')
| 37.996198 | 78 | 0.673471 |
from spack.main import SpackCommand
import os.path
import pytest
import spack.util.spack_yaml as s_yaml
activate = SpackCommand('activate')
extensions = SpackCommand('extensions')
install = SpackCommand('install')
view = SpackCommand('view')
def create_projection_file(tmpdir, projection):
if 'projections' not in projection:
projection = {'projections': projection}
projection_file = tmpdir.mkdir('projection').join('projection.yaml')
projection_file.write(s_yaml.dump(projection))
return projection_file
@pytest.mark.parametrize('cmd', ['hardlink', 'symlink', 'hard', 'add'])
def test_view_link_type(
tmpdir, mock_packages, mock_archive, mock_fetch, config,
install_mockery, cmd):
install('libdwarf')
viewpath = str(tmpdir.mkdir('view_{0}'.format(cmd)))
view(cmd, viewpath, 'libdwarf')
package_prefix = os.path.join(viewpath, 'libdwarf')
assert os.path.exists(package_prefix)
assert os.path.islink(package_prefix) == (not cmd.startswith('hard'))
@pytest.mark.parametrize('cmd', ['hardlink', 'symlink', 'hard', 'add'])
def test_view_projections(
tmpdir, mock_packages, mock_archive, mock_fetch, config,
install_mockery, cmd):
install('libdwarf@20130207')
viewpath = str(tmpdir.mkdir('view_{0}'.format(cmd)))
view_projection = {
'projections': {
'all': '{name}-{version}'
}
}
projection_file = create_projection_file(tmpdir, view_projection)
view(cmd, viewpath, '--projection-file={0}'.format(projection_file),
'libdwarf')
package_prefix = os.path.join(viewpath, 'libdwarf-20130207/libdwarf')
assert os.path.exists(package_prefix)
assert os.path.islink(package_prefix) == (not cmd.startswith('hard'))
def test_view_multiple_projections(
tmpdir, mock_packages, mock_archive, mock_fetch, config,
install_mockery):
install('libdwarf@20130207')
install('extendee@1.0%gcc')
viewpath = str(tmpdir.mkdir('view'))
view_projection = s_yaml.syaml_dict(
[('extendee', '{name}-{compiler.name}'),
('all', '{name}-{version}')]
)
projection_file = create_projection_file(tmpdir, view_projection)
view('add', viewpath, '--projection-file={0}'.format(projection_file),
'libdwarf', 'extendee')
libdwarf_prefix = os.path.join(viewpath, 'libdwarf-20130207/libdwarf')
extendee_prefix = os.path.join(viewpath, 'extendee-gcc/bin')
assert os.path.exists(libdwarf_prefix)
assert os.path.exists(extendee_prefix)
def test_view_multiple_projections_all_first(
tmpdir, mock_packages, mock_archive, mock_fetch, config,
install_mockery):
install('libdwarf@20130207')
install('extendee@1.0%gcc')
viewpath = str(tmpdir.mkdir('view'))
view_projection = s_yaml.syaml_dict(
[('all', '{name}-{version}'),
('extendee', '{name}-{compiler.name}')]
)
projection_file = create_projection_file(tmpdir, view_projection)
view('add', viewpath, '--projection-file={0}'.format(projection_file),
'libdwarf', 'extendee')
libdwarf_prefix = os.path.join(viewpath, 'libdwarf-20130207/libdwarf')
extendee_prefix = os.path.join(viewpath, 'extendee-gcc/bin')
assert os.path.exists(libdwarf_prefix)
assert os.path.exists(extendee_prefix)
def test_view_external(
tmpdir, mock_packages, mock_archive, mock_fetch, config,
install_mockery):
install('externaltool')
viewpath = str(tmpdir.mkdir('view'))
output = view('symlink', viewpath, 'externaltool')
assert 'Skipping external package: externaltool' in output
def test_view_extension(
tmpdir, mock_packages, mock_archive, mock_fetch, config,
install_mockery):
install('extendee')
install('extension1@1.0')
install('extension1@2.0')
install('extension2@1.0')
viewpath = str(tmpdir.mkdir('view'))
view('symlink', viewpath, 'extension1@1.0')
all_installed = extensions('--show', 'installed', 'extendee')
assert 'extension1@1.0' in all_installed
assert 'extension1@2.0' in all_installed
assert 'extension2@1.0' in all_installed
global_activated = extensions('--show', 'activated', 'extendee')
assert 'extension1@1.0' not in global_activated
assert 'extension1@2.0' not in global_activated
assert 'extension2@1.0' not in global_activated
view_activated = extensions('--show', 'activated',
'-v', viewpath,
'extendee')
assert 'extension1@1.0' in view_activated
assert 'extension1@2.0' not in view_activated
assert 'extension2@1.0' not in view_activated
assert os.path.exists(os.path.join(viewpath, 'bin', 'extension1'))
def test_view_extension_projection(
tmpdir, mock_packages, mock_archive, mock_fetch, config,
install_mockery):
install('extendee@1.0')
install('extension1@1.0')
install('extension1@2.0')
install('extension2@1.0')
viewpath = str(tmpdir.mkdir('view'))
view_projection = {'all': '{name}-{version}'}
projection_file = create_projection_file(tmpdir, view_projection)
view('symlink', viewpath, '--projection-file={0}'.format(projection_file),
'extension1@1.0')
all_installed = extensions('--show', 'installed', 'extendee')
assert 'extension1@1.0' in all_installed
assert 'extension1@2.0' in all_installed
assert 'extension2@1.0' in all_installed
global_activated = extensions('--show', 'activated', 'extendee')
assert 'extension1@1.0' not in global_activated
assert 'extension1@2.0' not in global_activated
assert 'extension2@1.0' not in global_activated
view_activated = extensions('--show', 'activated',
'-v', viewpath,
'extendee')
assert 'extension1@1.0' in view_activated
assert 'extension1@2.0' not in view_activated
assert 'extension2@1.0' not in view_activated
assert os.path.exists(os.path.join(viewpath, 'extendee-1.0',
'bin', 'extension1'))
def test_view_extension_remove(
tmpdir, mock_packages, mock_archive, mock_fetch, config,
install_mockery):
install('extendee')
install('extension1@1.0')
viewpath = str(tmpdir.mkdir('view'))
view('symlink', viewpath, 'extension1@1.0')
view('remove', viewpath, 'extension1@1.0')
all_installed = extensions('--show', 'installed', 'extendee')
assert 'extension1@1.0' in all_installed
global_activated = extensions('--show', 'activated', 'extendee')
assert 'extension1@1.0' not in global_activated
view_activated = extensions('--show', 'activated',
'-v', viewpath,
'extendee')
assert 'extension1@1.0' not in view_activated
assert not os.path.exists(os.path.join(viewpath, 'bin', 'extension1'))
def test_view_extension_conflict(
tmpdir, mock_packages, mock_archive, mock_fetch, config,
install_mockery):
install('extendee')
install('extension1@1.0')
install('extension1@2.0')
viewpath = str(tmpdir.mkdir('view'))
view('symlink', viewpath, 'extension1@1.0')
output = view('symlink', viewpath, 'extension1@2.0')
assert 'Package conflict detected' in output
def test_view_extension_conflict_ignored(
tmpdir, mock_packages, mock_archive, mock_fetch, config,
install_mockery):
install('extendee')
install('extension1@1.0')
install('extension1@2.0')
viewpath = str(tmpdir.mkdir('view'))
view('symlink', viewpath, 'extension1@1.0')
view('symlink', viewpath, '-i', 'extension1@2.0')
with open(os.path.join(viewpath, 'bin', 'extension1'), 'r') as fin:
assert fin.read() == '1.0'
def test_view_extension_global_activation(
tmpdir, mock_packages, mock_archive, mock_fetch, config,
install_mockery):
install('extendee')
install('extension1@1.0')
install('extension1@2.0')
install('extension2@1.0')
viewpath = str(tmpdir.mkdir('view'))
view('symlink', viewpath, 'extension1@1.0')
activate('extension1@2.0')
activate('extension2@1.0')
all_installed = extensions('--show', 'installed', 'extendee')
assert 'extension1@1.0' in all_installed
assert 'extension1@2.0' in all_installed
assert 'extension2@1.0' in all_installed
global_activated = extensions('--show', 'activated', 'extendee')
assert 'extension1@1.0' not in global_activated
assert 'extension1@2.0' in global_activated
assert 'extension2@1.0' in global_activated
view_activated = extensions('--show', 'activated',
'-v', viewpath,
'extendee')
assert 'extension1@1.0' in view_activated
assert 'extension1@2.0' not in view_activated
assert 'extension2@1.0' not in view_activated
assert os.path.exists(os.path.join(viewpath, 'bin', 'extension1'))
assert not os.path.exists(os.path.join(viewpath, 'bin', 'extension2'))
def test_view_extendee_with_global_activations(
tmpdir, mock_packages, mock_archive, mock_fetch, config,
install_mockery):
install('extendee')
install('extension1@1.0')
install('extension1@2.0')
install('extension2@1.0')
viewpath = str(tmpdir.mkdir('view'))
activate('extension1@2.0')
output = view('symlink', viewpath, 'extension1@1.0')
assert 'Error: Globally activated extensions cannot be used' in output
def test_view_fails_with_missing_projections_file(tmpdir):
viewpath = str(tmpdir.mkdir('view'))
projection_file = os.path.join(str(tmpdir), 'nonexistent')
with pytest.raises(SystemExit):
view('symlink', '--projection-file', projection_file, viewpath, 'foo')
| true | true |
f73760737b4dc0d587f0e89b1c5b103eb368d834 | 10,400 | py | Python | App.py | AndreyBuyanov/ImageProcessing.Lb5.TextureSegmentation | 1509817ee2719573b04eba6f49154d7b38af853d | [
"MIT"
] | null | null | null | App.py | AndreyBuyanov/ImageProcessing.Lb5.TextureSegmentation | 1509817ee2719573b04eba6f49154d7b38af853d | [
"MIT"
] | null | null | null | App.py | AndreyBuyanov/ImageProcessing.Lb5.TextureSegmentation | 1509817ee2719573b04eba6f49154d7b38af853d | [
"MIT"
] | null | null | null | from PyQt5 import QtWidgets, uic
from PyQt5.QtGui import QImage, QPixmap, QPalette, qRgb, qGray
import sys
import numpy as np
from typing import Callable
from numbers import Number
def process_image(
input_image: np.array,
kernel_size: int,
kernel_fn: Callable[[np.array], float]) -> np.array:
padding_width: int = kernel_size // 2
padding_height: int = kernel_size // 2
padding = ((padding_height, padding_height), (padding_width, padding_width))
input_image_padding: np.array = np.pad(
array=input_image,
pad_width=padding,
mode='edge')
result_image: np.array = np.zeros(input_image.shape, dtype='float')
image_height, image_width = result_image.shape
for image_x in range(image_width):
for image_y in range(image_height):
x_pos_begin = image_x
x_pos_end = image_x + kernel_size
y_pos_begin = image_y
y_pos_end = image_y + kernel_size
image_segment: np.array = input_image_padding[y_pos_begin:y_pos_end, x_pos_begin:x_pos_end]
result_image[image_y][image_x] = kernel_fn(image_segment)
return result_image
def mean_fn(
image_segment: np.array) -> float:
return float(np.mean(image_segment))
def std_fn(
image_segment: np.array) -> float:
return float(np.std(image_segment))
def convert_to_binary(
input_image: np.array,
threshold: int = 127) -> np.array:
max_val: int = 255
min_val: int = 0
initial_conv: np.array = np.where((input_image <= threshold), input_image, max_val)
final_conv: np.array = np.where((initial_conv > threshold), initial_conv, min_val)
return final_conv
def normalize_image(
input_image: np.array) -> np.array:
result_image: np.array = np.zeros(input_image.shape)
input_max = input_image.max()
input_min = input_image.min()
input_range = input_max - input_min
height, width = input_image.shape
for y in range(height):
for x in range(width):
input_value = input_image[y][x]
scaled_input_value = (input_value - input_min) / input_range if input_range != 0 else 0
result_image[y][x] = scaled_input_value * 255.0
return result_image
def fill_image(
input_image: np.array,
value: Number,
replace_value: Number):
height, width = input_image.shape
for y in range(height):
for x in range(width):
if input_image[y, x] == value:
input_image[y, x] = replace_value
def mark_objects(
input_image: np.array) -> np.array:
result_image: np.array = np.copy(input_image)
current_object_id = 1
height, width = input_image.shape
for y in range(height):
for x in range(width):
if y == 0:
c = 0
else:
c = result_image[y - 1, x]
if x == 0:
b = 0
else:
b = result_image[y, x - 1]
a = result_image[y, x]
if a == 0:
pass
elif b == 0 and c == 0:
current_object_id += 1
result_image[y, x] = current_object_id
elif b != 0 and c == 0:
result_image[y, x] = b
elif b == 0 and c != 0:
result_image[y, x] = c
elif b != 0 and c != 0:
if b == c:
result_image[y, x] = b
else:
result_image[y, x] = b
fill_image(
input_image=result_image,
value=c,
replace_value=b)
return result_image
def delete_objects(
input_image: np.array,
object_size: int):
unique_mask, hist = np.unique(input_image, return_counts=True)
for i in range(1, len(unique_mask)):
if hist[i] < object_size:
for (y, x), _ in np.ndenumerate(input_image):
if input_image[y, x] == unique_mask[i]:
input_image[y, x] = 0
class Ui(QtWidgets.QMainWindow):
def __init__(self):
super(Ui, self).__init__()
uic.loadUi('Main.ui', self)
self.action_open = self.findChild(QtWidgets.QAction, 'actionOpen')
self.action_open.triggered.connect(self.action_open_triggered)
self.action_exit = self.findChild(QtWidgets.QAction, 'actionExit')
self.action_exit.triggered.connect(self.action_exit_triggered)
self.bt_apply = self.findChild(QtWidgets.QPushButton, 'btApply')
self.bt_apply.clicked.connect(self.bt_apply_pressed)
self.input_image_canvas = QtWidgets.QLabel()
self.input_image_canvas.setBackgroundRole(QPalette.Base)
self.input_image_canvas.setSizePolicy(
QtWidgets.QSizePolicy.Ignored,
QtWidgets.QSizePolicy.Ignored)
self.input_image_canvas.setScaledContents(True)
self.sa_input_image = self.findChild(QtWidgets.QScrollArea, 'saInputImage')
self.sa_input_image.setWidget(self.input_image_canvas)
self.sa_input_image.setWidgetResizable(False)
self.processed_image_canvas = QtWidgets.QLabel()
self.processed_image_canvas.setBackgroundRole(QPalette.Base)
self.processed_image_canvas.setSizePolicy(
QtWidgets.QSizePolicy.Ignored,
QtWidgets.QSizePolicy.Ignored)
self.processed_image_canvas.setScaledContents(True)
self.sa_processed_image = self.findChild(QtWidgets.QScrollArea, 'saProcessedImage')
self.sa_processed_image.setWidget(self.processed_image_canvas)
self.sa_processed_image.setWidgetResizable(False)
self.mask_image_canvas = QtWidgets.QLabel()
self.mask_image_canvas.setBackgroundRole(QPalette.Base)
self.mask_image_canvas.setSizePolicy(
QtWidgets.QSizePolicy.Ignored,
QtWidgets.QSizePolicy.Ignored)
self.mask_image_canvas.setScaledContents(True)
self.sa_mask_image = self.findChild(QtWidgets.QScrollArea, 'saMask')
self.sa_mask_image.setWidget(self.mask_image_canvas)
self.sa_mask_image.setWidgetResizable(False)
self.segmented_image_canvas = QtWidgets.QLabel()
self.segmented_image_canvas.setBackgroundRole(QPalette.Base)
self.segmented_image_canvas.setSizePolicy(
QtWidgets.QSizePolicy.Ignored,
QtWidgets.QSizePolicy.Ignored)
self.segmented_image_canvas.setScaledContents(True)
self.sa_segmented_image = self.findChild(QtWidgets.QScrollArea, 'saSegmentedImage')
self.sa_segmented_image.setWidget(self.segmented_image_canvas)
self.sa_segmented_image.setWidgetResizable(False)
self.cb_method = self.findChild(QtWidgets.QComboBox, 'cbMethod')
self.cb_method.addItems(['Mean', 'Std'])
self.le_kernel_size = self.findChild(QtWidgets.QLineEdit, 'leKernelSize')
self.le_threshold = self.findChild(QtWidgets.QLineEdit, 'leThreshold')
self.le_delete_objects = self.findChild(QtWidgets.QLineEdit, 'leDeleteObjects')
self.show()
def action_open_triggered(self):
options = QtWidgets.QFileDialog.Options()
file_name, _ = QtWidgets.QFileDialog.\
getOpenFileName(self,
'QFileDialog.getOpenFileName()',
'',
'Images (*.png *.jpeg *.jpg *.bmp *.gif)',
options=options)
if file_name:
image = QImage(file_name).convertToFormat(QImage.Format_Grayscale8)
if image.isNull():
QtWidgets.QMessageBox.\
information(self,
"Texture segmentation",
"Cannot load %s." % file_name)
return
self.input_image_canvas.setPixmap(QPixmap.fromImage(image))
self.input_image_canvas.adjustSize()
def action_exit_triggered(self):
self.close()
def bt_apply_pressed(self):
method = self.cb_method.currentIndex()
kernel_size = int(self.le_kernel_size.text())
threshold = int(self.le_threshold.text())
object_size = int(self.le_delete_objects.text())
input_q_image = self.input_image_canvas.pixmap().toImage().convertToFormat(QImage.Format_Grayscale8)
input_image = np.zeros((input_q_image.height(), input_q_image.width()), dtype='float')
for (y, x), _ in np.ndenumerate(input_image):
input_image[y, x] = qGray(input_q_image.pixel(x, y))
if method == 0:
kernel_fn = mean_fn
elif method == 1:
kernel_fn = std_fn
else:
return
processed_image: np.array = process_image(
input_image=input_image,
kernel_size=kernel_size,
kernel_fn=kernel_fn)
normalized_image: np.array = normalize_image(input_image=processed_image)
binarized_image: np.array = convert_to_binary(input_image=normalized_image, threshold=threshold)
marked_image = mark_objects(input_image=binarized_image)
delete_objects(
input_image=marked_image,
object_size=object_size)
segmented_image = np.copy(input_image)
for (y, x), _ in np.ndenumerate(segmented_image):
if marked_image[y, x] == 0:
segmented_image[y, x] = 0
self.set_image(
input_image=normalized_image,
canvas=self.processed_image_canvas)
self.set_image(
input_image=normalize_image(
input_image=marked_image),
canvas=self.mask_image_canvas)
self.set_image(
input_image=segmented_image,
canvas=self.segmented_image_canvas)
@staticmethod
def set_image(input_image: np.array, canvas: QtWidgets.QLineEdit):
height, width = input_image.shape
q_image = QImage(width, height, QImage.Format_RGB32)
for y in range(height):
for x in range(width):
pixel = int(input_image[y, x])
q_image.setPixel(x, y, qRgb(pixel, pixel, pixel))
canvas.setPixmap(QPixmap.fromImage(q_image))
canvas.adjustSize()
if __name__ == '__main__':
app = QtWidgets.QApplication(sys.argv)
window = Ui()
app.exec_()
| 38.095238 | 108 | 0.6275 | from PyQt5 import QtWidgets, uic
from PyQt5.QtGui import QImage, QPixmap, QPalette, qRgb, qGray
import sys
import numpy as np
from typing import Callable
from numbers import Number
def process_image(
input_image: np.array,
kernel_size: int,
kernel_fn: Callable[[np.array], float]) -> np.array:
padding_width: int = kernel_size // 2
padding_height: int = kernel_size // 2
padding = ((padding_height, padding_height), (padding_width, padding_width))
input_image_padding: np.array = np.pad(
array=input_image,
pad_width=padding,
mode='edge')
result_image: np.array = np.zeros(input_image.shape, dtype='float')
image_height, image_width = result_image.shape
for image_x in range(image_width):
for image_y in range(image_height):
x_pos_begin = image_x
x_pos_end = image_x + kernel_size
y_pos_begin = image_y
y_pos_end = image_y + kernel_size
image_segment: np.array = input_image_padding[y_pos_begin:y_pos_end, x_pos_begin:x_pos_end]
result_image[image_y][image_x] = kernel_fn(image_segment)
return result_image
def mean_fn(
image_segment: np.array) -> float:
return float(np.mean(image_segment))
def std_fn(
image_segment: np.array) -> float:
return float(np.std(image_segment))
def convert_to_binary(
input_image: np.array,
threshold: int = 127) -> np.array:
max_val: int = 255
min_val: int = 0
initial_conv: np.array = np.where((input_image <= threshold), input_image, max_val)
final_conv: np.array = np.where((initial_conv > threshold), initial_conv, min_val)
return final_conv
def normalize_image(
input_image: np.array) -> np.array:
result_image: np.array = np.zeros(input_image.shape)
input_max = input_image.max()
input_min = input_image.min()
input_range = input_max - input_min
height, width = input_image.shape
for y in range(height):
for x in range(width):
input_value = input_image[y][x]
scaled_input_value = (input_value - input_min) / input_range if input_range != 0 else 0
result_image[y][x] = scaled_input_value * 255.0
return result_image
def fill_image(
input_image: np.array,
value: Number,
replace_value: Number):
height, width = input_image.shape
for y in range(height):
for x in range(width):
if input_image[y, x] == value:
input_image[y, x] = replace_value
def mark_objects(
input_image: np.array) -> np.array:
result_image: np.array = np.copy(input_image)
current_object_id = 1
height, width = input_image.shape
for y in range(height):
for x in range(width):
if y == 0:
c = 0
else:
c = result_image[y - 1, x]
if x == 0:
b = 0
else:
b = result_image[y, x - 1]
a = result_image[y, x]
if a == 0:
pass
elif b == 0 and c == 0:
current_object_id += 1
result_image[y, x] = current_object_id
elif b != 0 and c == 0:
result_image[y, x] = b
elif b == 0 and c != 0:
result_image[y, x] = c
elif b != 0 and c != 0:
if b == c:
result_image[y, x] = b
else:
result_image[y, x] = b
fill_image(
input_image=result_image,
value=c,
replace_value=b)
return result_image
def delete_objects(
input_image: np.array,
object_size: int):
unique_mask, hist = np.unique(input_image, return_counts=True)
for i in range(1, len(unique_mask)):
if hist[i] < object_size:
for (y, x), _ in np.ndenumerate(input_image):
if input_image[y, x] == unique_mask[i]:
input_image[y, x] = 0
class Ui(QtWidgets.QMainWindow):
def __init__(self):
super(Ui, self).__init__()
uic.loadUi('Main.ui', self)
self.action_open = self.findChild(QtWidgets.QAction, 'actionOpen')
self.action_open.triggered.connect(self.action_open_triggered)
self.action_exit = self.findChild(QtWidgets.QAction, 'actionExit')
self.action_exit.triggered.connect(self.action_exit_triggered)
self.bt_apply = self.findChild(QtWidgets.QPushButton, 'btApply')
self.bt_apply.clicked.connect(self.bt_apply_pressed)
self.input_image_canvas = QtWidgets.QLabel()
self.input_image_canvas.setBackgroundRole(QPalette.Base)
self.input_image_canvas.setSizePolicy(
QtWidgets.QSizePolicy.Ignored,
QtWidgets.QSizePolicy.Ignored)
self.input_image_canvas.setScaledContents(True)
self.sa_input_image = self.findChild(QtWidgets.QScrollArea, 'saInputImage')
self.sa_input_image.setWidget(self.input_image_canvas)
self.sa_input_image.setWidgetResizable(False)
self.processed_image_canvas = QtWidgets.QLabel()
self.processed_image_canvas.setBackgroundRole(QPalette.Base)
self.processed_image_canvas.setSizePolicy(
QtWidgets.QSizePolicy.Ignored,
QtWidgets.QSizePolicy.Ignored)
self.processed_image_canvas.setScaledContents(True)
self.sa_processed_image = self.findChild(QtWidgets.QScrollArea, 'saProcessedImage')
self.sa_processed_image.setWidget(self.processed_image_canvas)
self.sa_processed_image.setWidgetResizable(False)
self.mask_image_canvas = QtWidgets.QLabel()
self.mask_image_canvas.setBackgroundRole(QPalette.Base)
self.mask_image_canvas.setSizePolicy(
QtWidgets.QSizePolicy.Ignored,
QtWidgets.QSizePolicy.Ignored)
self.mask_image_canvas.setScaledContents(True)
self.sa_mask_image = self.findChild(QtWidgets.QScrollArea, 'saMask')
self.sa_mask_image.setWidget(self.mask_image_canvas)
self.sa_mask_image.setWidgetResizable(False)
self.segmented_image_canvas = QtWidgets.QLabel()
self.segmented_image_canvas.setBackgroundRole(QPalette.Base)
self.segmented_image_canvas.setSizePolicy(
QtWidgets.QSizePolicy.Ignored,
QtWidgets.QSizePolicy.Ignored)
self.segmented_image_canvas.setScaledContents(True)
self.sa_segmented_image = self.findChild(QtWidgets.QScrollArea, 'saSegmentedImage')
self.sa_segmented_image.setWidget(self.segmented_image_canvas)
self.sa_segmented_image.setWidgetResizable(False)
self.cb_method = self.findChild(QtWidgets.QComboBox, 'cbMethod')
self.cb_method.addItems(['Mean', 'Std'])
self.le_kernel_size = self.findChild(QtWidgets.QLineEdit, 'leKernelSize')
self.le_threshold = self.findChild(QtWidgets.QLineEdit, 'leThreshold')
self.le_delete_objects = self.findChild(QtWidgets.QLineEdit, 'leDeleteObjects')
self.show()
def action_open_triggered(self):
options = QtWidgets.QFileDialog.Options()
file_name, _ = QtWidgets.QFileDialog.\
getOpenFileName(self,
'QFileDialog.getOpenFileName()',
'',
'Images (*.png *.jpeg *.jpg *.bmp *.gif)',
options=options)
if file_name:
image = QImage(file_name).convertToFormat(QImage.Format_Grayscale8)
if image.isNull():
QtWidgets.QMessageBox.\
information(self,
"Texture segmentation",
"Cannot load %s." % file_name)
return
self.input_image_canvas.setPixmap(QPixmap.fromImage(image))
self.input_image_canvas.adjustSize()
def action_exit_triggered(self):
self.close()
def bt_apply_pressed(self):
method = self.cb_method.currentIndex()
kernel_size = int(self.le_kernel_size.text())
threshold = int(self.le_threshold.text())
object_size = int(self.le_delete_objects.text())
input_q_image = self.input_image_canvas.pixmap().toImage().convertToFormat(QImage.Format_Grayscale8)
input_image = np.zeros((input_q_image.height(), input_q_image.width()), dtype='float')
for (y, x), _ in np.ndenumerate(input_image):
input_image[y, x] = qGray(input_q_image.pixel(x, y))
if method == 0:
kernel_fn = mean_fn
elif method == 1:
kernel_fn = std_fn
else:
return
processed_image: np.array = process_image(
input_image=input_image,
kernel_size=kernel_size,
kernel_fn=kernel_fn)
normalized_image: np.array = normalize_image(input_image=processed_image)
binarized_image: np.array = convert_to_binary(input_image=normalized_image, threshold=threshold)
marked_image = mark_objects(input_image=binarized_image)
delete_objects(
input_image=marked_image,
object_size=object_size)
segmented_image = np.copy(input_image)
for (y, x), _ in np.ndenumerate(segmented_image):
if marked_image[y, x] == 0:
segmented_image[y, x] = 0
self.set_image(
input_image=normalized_image,
canvas=self.processed_image_canvas)
self.set_image(
input_image=normalize_image(
input_image=marked_image),
canvas=self.mask_image_canvas)
self.set_image(
input_image=segmented_image,
canvas=self.segmented_image_canvas)
@staticmethod
def set_image(input_image: np.array, canvas: QtWidgets.QLineEdit):
height, width = input_image.shape
q_image = QImage(width, height, QImage.Format_RGB32)
for y in range(height):
for x in range(width):
pixel = int(input_image[y, x])
q_image.setPixel(x, y, qRgb(pixel, pixel, pixel))
canvas.setPixmap(QPixmap.fromImage(q_image))
canvas.adjustSize()
if __name__ == '__main__':
app = QtWidgets.QApplication(sys.argv)
window = Ui()
app.exec_()
| true | true |
f737607743fca96e5afb41d35f748752886a28b7 | 764 | py | Python | test/test_add_project.py | liliasapurina/python_training_mantiss | 6b986247e26d5eb020e265402760c41679fd878d | [
"Apache-2.0"
] | null | null | null | test/test_add_project.py | liliasapurina/python_training_mantiss | 6b986247e26d5eb020e265402760c41679fd878d | [
"Apache-2.0"
] | null | null | null | test/test_add_project.py | liliasapurina/python_training_mantiss | 6b986247e26d5eb020e265402760c41679fd878d | [
"Apache-2.0"
] | null | null | null | __author__ = '1'
from model.project import Project
import pytest
import random
import string
def random_string(prefix, maxlen):
symbols = string.ascii_letters + string.digits + string.punctuation + " "*10
return prefix + "".join([random.choice(symbols) for i in range(random.randrange(maxlen))])
testdata =[
Project(name=random_string("name",10),description=random_string("footer",20))
for i in range(1)
]
@pytest.mark.parametrize("current_project",testdata)
def test_add_project(app, current_project):
app.session.login("administrator","root")
old_projects = app.project.get_project_list()
app.project.create(current_project)
new_projects = app.project.get_project_list()
assert len(old_projects) + 1 == len(new_projects)
| 30.56 | 94 | 0.742147 | __author__ = '1'
from model.project import Project
import pytest
import random
import string
def random_string(prefix, maxlen):
symbols = string.ascii_letters + string.digits + string.punctuation + " "*10
return prefix + "".join([random.choice(symbols) for i in range(random.randrange(maxlen))])
testdata =[
Project(name=random_string("name",10),description=random_string("footer",20))
for i in range(1)
]
@pytest.mark.parametrize("current_project",testdata)
def test_add_project(app, current_project):
app.session.login("administrator","root")
old_projects = app.project.get_project_list()
app.project.create(current_project)
new_projects = app.project.get_project_list()
assert len(old_projects) + 1 == len(new_projects)
| true | true |
f73760b1802ef12f7c8c9420d6a6d9de30cf5123 | 1,108 | py | Python | ants/users/tests/test_forms.py | seydimomogueye/ants.io | b2b5b3d8bdc8e8409b467ac295a690110a87654d | [
"MIT"
] | null | null | null | ants/users/tests/test_forms.py | seydimomogueye/ants.io | b2b5b3d8bdc8e8409b467ac295a690110a87654d | [
"MIT"
] | null | null | null | ants/users/tests/test_forms.py | seydimomogueye/ants.io | b2b5b3d8bdc8e8409b467ac295a690110a87654d | [
"MIT"
] | null | null | null | import pytest
from ants.users.forms import UserCreationForm
from ants.users.tests.factories import UserFactory
pytestmark = pytest.mark.django_db
class TestUserCreationForm:
def test_clean_username(self):
# A user with proto_user params does not exist yet.
proto_user = UserFactory.build()
form = UserCreationForm(
{
"username": proto_user.username,
"password1": proto_user._password,
"password2": proto_user._password,
}
)
assert form.is_valid()
assert form.clean_username() == proto_user.username
# Creating a user.
form.save()
# The user with proto_user params already exists,
# hence cannot be created.
form = UserCreationForm(
{
"username": proto_user.username,
"password1": proto_user._password,
"password2": proto_user._password,
}
)
assert not form.is_valid()
assert len(form.errors) == 1
assert "username" in form.errors
| 26.380952 | 59 | 0.590253 | import pytest
from ants.users.forms import UserCreationForm
from ants.users.tests.factories import UserFactory
pytestmark = pytest.mark.django_db
class TestUserCreationForm:
def test_clean_username(self):
proto_user = UserFactory.build()
form = UserCreationForm(
{
"username": proto_user.username,
"password1": proto_user._password,
"password2": proto_user._password,
}
)
assert form.is_valid()
assert form.clean_username() == proto_user.username
form.save()
form = UserCreationForm(
{
"username": proto_user.username,
"password1": proto_user._password,
"password2": proto_user._password,
}
)
assert not form.is_valid()
assert len(form.errors) == 1
assert "username" in form.errors
| true | true |
f73760d02165dba1710b7ae42b9cc64a8cd13528 | 10,907 | py | Python | Transformer_training_V2.py | BUTSpeechFIT/ASR_Transformer | 814f720aa8265e9a377869f93dc65b251338e985 | [
"MIT"
] | 1 | 2020-10-25T00:21:40.000Z | 2020-10-25T00:21:40.000Z | Transformer_training_V2.py | BUTSpeechFIT/ASR_Transformer | 814f720aa8265e9a377869f93dc65b251338e985 | [
"MIT"
] | null | null | null | Transformer_training_V2.py | BUTSpeechFIT/ASR_Transformer | 814f720aa8265e9a377869f93dc65b251338e985 | [
"MIT"
] | 1 | 2021-09-08T10:32:55.000Z | 2021-09-08T10:32:55.000Z | #!/usr/bin/python
import sys
import os
import subprocess
from os.path import join, isdir
import torch
#*************************************************************************************************************************
####### Loading the Parser and default arguments
#import pdb;pdb.set_trace()
#sys.path.insert(0,'/mnt/matylda3/vydana/HOW2_EXP/Gen_V1/ATTNCODE/Trans_V1')
sys.path.insert(0,'/mnt/matylda3/vydana/HOW2_EXP/ASR_Transformer/ASR_TransV1')
import Transformer_arg
from Transformer_arg import parser
args = parser.parse_args()
#************************
import Set_gpus
from Set_gpus import Set_gpu
if args.gpu:
Set_gpu()
#import safe_gpu
#from safe_gpu import safe_gpu
#gpu_owner = safe_gpu.GPUOwner()
#***********************
import numpy as np
import fileinput
import json
import random
from itertools import chain
from numpy.random import permutation
##------------------------------------------------------------------
#import torch
from torch.autograd import Variable
#----------------------------------------
import torch.nn as nn
from torch import autograd, nn, optim
os.environ['PYTHONUNBUFFERED'] = '0'
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim as optim
from random import shuffle
from statistics import mean
import matplotlib
import matplotlib.pyplot as plt
plt.switch_backend('agg')
matplotlib.pyplot.viridis()
import glob
###save architecture for decoding
model_path_name=join(args.model_dir,'model_architecture_')
with open(model_path_name, 'w') as f:
json.dump(args.__dict__, f, indent=2)
print(args)
sys.path.insert(0,'/mnt/matylda3/vydana/HOW2_EXP/ASR_Transformer/ASR_TransV1')
# #####setting the gpus in the gpu cluster
# #**********************************
#import Set_gpus
#from Set_gpus import Set_gpu
#if args.gpu:
# Set_gpu()
###----------------------------------------
from Dataloader_for_AM_v2 import DataLoader
from utils__ import weights_init,reduce_learning_rate,read_as_list,gaussian_noise,plotting
#==============================================================
sys.path.insert(0,'/mnt/matylda3/vydana/HOW2_EXP/ASR_Transformer/ASR_TransV1')
from TRANSFORMER_ASR_V1 import Transformer
from Initializing_Transformer_ASR import Initialize_Att_model
from Transformer_Training_loop import train_val_model
from Load_sp_model import Load_sp_models
##==================================
#==============================================================
if not isdir(args.model_dir):
os.makedirs(args.model_dir)
png_dir=args.model_dir+'_png'
if not isdir(png_dir):
os.makedirs(png_dir)
############################################
#=============================================================
def main():
##Load setpiece models for Dataloaders
Word_model=Load_sp_models(args.Word_model_path)
Char_model=Load_sp_models(args.Char_model_path)
###initilize the model
model,optimizer=Initialize_Att_model(args)
#============================================================
#------------------------------------------------------------
#
train_gen = DataLoader(files=glob.glob(args.data_dir + "train_splits/*"),
max_batch_label_len=args.max_batch_label_len,
max_batch_len=args.max_batch_len,
max_feat_len=args.max_feat_len,
max_label_len=args.max_label_len,
Word_model=Word_model,
Char_model=Char_model,
apply_cmvn=int(args.apply_cmvn))
dev_gen = DataLoader(files=glob.glob(args.data_dir + "dev_splits/*"),
max_batch_label_len=2000,
max_batch_len=args.max_batch_len,
max_feat_len=5000,
max_label_len=1000,
Word_model=Word_model,
Char_model=Char_model,
apply_cmvn=int(args.apply_cmvn))
#Flags that may change while training
if args.spec_aug_flag==2:
weight_noise_flag=False
spec_aug_flag=True
else:
weight_noise_flag=False
spec_aug_flag=False
val_history=np.zeros(args.nepochs)
#======================================
for epoch in range(args.nepochs):
##start of the epoch
tr_CER=[]; tr_BPE_CER=[]; L_train_cost=[]
model.train();
validate_interval = int(args.validate_interval * args.accm_grad) if args.accm_grad>0 else args.validate_interval
for trs_no in range(validate_interval):
B1 = train_gen.next()
assert B1 is not None, "None should never come out of the DataLoader"
Output_trainval_dict=train_val_model(smp_no=trs_no,
args = args,
model = model,
optimizer = optimizer,
data_dict = B1,
weight_noise_flag=weight_noise_flag,
spec_aug_flag=spec_aug_flag,
trainflag = True)
#
#
#get the losses form the dict
L_train_cost.append(Output_trainval_dict.get('cost_cpu'))
tr_CER.append(Output_trainval_dict.get('Char_cer'))
tr_BPE_CER.append(Output_trainval_dict.get('Word_cer'))
#attention_map=Output_trainval_dict.get('attention_record').data.cpu().numpy()
#==========================================
if (trs_no%args.tr_disp==0):
print("tr ep:==:>",epoch,"sampl no:==:>",trs_no,"train_cost==:>",mean(L_train_cost),"CER:",mean(tr_CER),'BPE_CER',mean(tr_BPE_CER),flush=True)
#------------------------
if args.plot_fig_training:
plot_name=join(png_dir,'train_epoch'+str(epoch)+'_attention_single_file_'+str(trs_no)+'.png')
plotting(plot_name,attention_map)
###validate the model
model.eval()
#=======================================================
Vl_CER=[]; Vl_BPE_CER=[];L_val_cost=[]
val_examples=0
for vl_smp in range(args.max_val_examples):
B1 = dev_gen.next()
smp_feat = B1.get('smp_feat')
val_examples+=smp_feat.shape[0]
assert B1 is not None, "None should never come out of the DataLoader"
##brak when the examples are more
if (val_examples >= args.max_val_examples):
break;
#--------------------------------------
Val_Output_trainval_dict=train_val_model(smp_no=trs_no,
args=args,
model = model,
optimizer = optimizer,
data_dict = B1,
weight_noise_flag=False,
spec_aug_flag=False,
trainflag = False)
L_val_cost.append(Val_Output_trainval_dict.get('cost_cpu'))
Vl_CER.append(Val_Output_trainval_dict.get('Char_cer'))
Vl_BPE_CER.append(Val_Output_trainval_dict.get('Word_cer'))
#attention_map=Val_Output_trainval_dict.get('attention_record').data.cpu().numpy()
#======================================================
#======================================================
if (vl_smp%args.vl_disp==0) or (val_examples==args.max_val_examples-1):
print("val epoch:==:>",epoch,"val smp no:==:>",vl_smp,"val_cost:==:>",mean(L_val_cost),"CER:",mean(Vl_CER),'BPE_CER',mean(Vl_BPE_CER),flush=True)
if args.plot_fig_validation:
plot_name=join(png_dir,'val_epoch'+str(epoch)+'_attention_single_file_'+str(vl_smp)+'.png')
plotting(plot_name,attention_map)
#----------------------------------------------------
#==================================================================
val_history[epoch]=(mean(Vl_CER)*100)
print("val_history:",val_history[:epoch+1])
#==================================================================
####saving_weights
ct="model_epoch_"+str(epoch)+"_sample_"+str(trs_no)+"_"+str(mean(L_train_cost))+"___"+str(mean(L_val_cost))+"__"+str(mean(Vl_CER))
print(ct)
torch.save(model.state_dict(),join(args.model_dir,str(ct)))
####saving otpimizer helped Transformer
#torch.save(optimizer.state_dict(),join(args.model_dir,str(ct)+'_opt'))
#######################################################
#######################################################
###open the file write and close it to avoid delays
with open(args.weight_text_file,'a+') as weight_saving_file:
print(join(args.model_dir,str(ct)), file=weight_saving_file)
with open(args.Res_text_file,'a+') as Res_saving_file:
print(float(mean(Vl_CER)), file=Res_saving_file)
#=================================
# early_stopping and checkpoint averaging:
if args.early_stopping:
A=val_history
Non_zero_loss=A[A>0]
min_cpts=np.argmin(Non_zero_loss)
Non_zero_len=len(Non_zero_loss)
if ((Non_zero_len-min_cpts)>1):
weight_noise_flag=True
spec_aug_flag=True
if (Non_zero_len-min_cpts) > args.early_stopping_patience:
print("The model is early stopping........","minimum value of model is:",min_cpts)
exit(0)
#=======================================================
#=============================================================================================
if __name__ == '__main__':
main()
| 46.021097 | 169 | 0.473641 |
import sys
import os
import subprocess
from os.path import join, isdir
import torch
import fileinput
import json
import random
from itertools import chain
from numpy.random import permutation
orch import autograd, nn, optim
os.environ['PYTHONUNBUFFERED'] = '0'
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim as optim
from random import shuffle
from statistics import mean
import matplotlib
import matplotlib.pyplot as plt
plt.switch_backend('agg')
matplotlib.pyplot.viridis()
import glob
h open(model_path_name, 'w') as f:
json.dump(args.__dict__, f, indent=2)
print(args)
sys.path.insert(0,'/mnt/matylda3/vydana/HOW2_EXP/ASR_Transformer/ASR_TransV1')
rt Initialize_Att_model
from Transformer_Training_loop import train_val_model
from Load_sp_model import Load_sp_models
os.makedirs(args.model_dir)
png_dir=args.model_dir+'_png'
if not isdir(png_dir):
os.makedirs(png_dir)
max_label_len=1000,
Word_model=Word_model,
Char_model=Char_model,
apply_cmvn=int(args.apply_cmvn))
if args.spec_aug_flag==2:
weight_noise_flag=False
spec_aug_flag=True
else:
weight_noise_flag=False
spec_aug_flag=False
val_history=np.zeros(args.nepochs)
for epoch in range(args.nepochs):
=[]; tr_BPE_CER=[]; L_train_cost=[]
model.train();
validate_interval = int(args.validate_interval * args.accm_grad) if args.accm_grad>0 else args.validate_interval
for trs_no in range(validate_interval):
B1 = train_gen.next()
assert B1 is not None, "None should never come out of the DataLoader"
Output_trainval_dict=train_val_model(smp_no=trs_no,
args = args,
model = model,
optimizer = optimizer,
data_dict = B1,
weight_noise_flag=weight_noise_flag,
spec_aug_flag=spec_aug_flag,
trainflag = True)
L_train_cost.append(Output_trainval_dict.get('cost_cpu'))
tr_CER.append(Output_trainval_dict.get('Char_cer'))
tr_BPE_CER.append(Output_trainval_dict.get('Word_cer'))
if (trs_no%args.tr_disp==0):
print("tr ep:==:>",epoch,"sampl no:==:>",trs_no,"train_cost==:>",mean(L_train_cost),"CER:",mean(tr_CER),'BPE_CER',mean(tr_BPE_CER),flush=True)
if args.plot_fig_training:
plot_name=join(png_dir,'train_epoch'+str(epoch)+'_attention_single_file_'+str(trs_no)+'.png')
plotting(plot_name,attention_map)
Vl_CER=[]; Vl_BPE_CER=[];L_val_cost=[]
val_examples=0
for vl_smp in range(args.max_val_examples):
B1 = dev_gen.next()
smp_feat = B1.get('smp_feat')
val_examples+=smp_feat.shape[0]
assert B1 is not None, "None should never come out of the DataLoader"
s >= args.max_val_examples):
break;
Val_Output_trainval_dict=train_val_model(smp_no=trs_no,
args=args,
model = model,
optimizer = optimizer,
data_dict = B1,
weight_noise_flag=False,
spec_aug_flag=False,
trainflag = False)
L_val_cost.append(Val_Output_trainval_dict.get('cost_cpu'))
Vl_CER.append(Val_Output_trainval_dict.get('Char_cer'))
Vl_BPE_CER.append(Val_Output_trainval_dict.get('Word_cer'))
if (vl_smp%args.vl_disp==0) or (val_examples==args.max_val_examples-1):
print("val epoch:==:>",epoch,"val smp no:==:>",vl_smp,"val_cost:==:>",mean(L_val_cost),"CER:",mean(Vl_CER),'BPE_CER',mean(Vl_BPE_CER),flush=True)
if args.plot_fig_validation:
plot_name=join(png_dir,'val_epoch'+str(epoch)+'_attention_single_file_'+str(vl_smp)+'.png')
plotting(plot_name,attention_map)
val_history[epoch]=(mean(Vl_CER)*100)
print("val_history:",val_history[:epoch+1])
"+str(trs_no)+"_"+str(mean(L_train_cost))+"___"+str(mean(L_val_cost))+"__"+str(mean(Vl_CER))
print(ct)
torch.save(model.state_dict(),join(args.model_dir,str(ct)))
| true | true |
f7376150b6506bd81ac975c125267a31c4f46a52 | 10,894 | py | Python | app.py | mikkomaa/tunnusluvut | a8b07000d1d0ee071d63bee941f50c4a25318158 | [
"MIT"
] | null | null | null | app.py | mikkomaa/tunnusluvut | a8b07000d1d0ee071d63bee941f50c4a25318158 | [
"MIT"
] | null | null | null | app.py | mikkomaa/tunnusluvut | a8b07000d1d0ee071d63bee941f50c4a25318158 | [
"MIT"
] | null | null | null | import dash
import dash_bootstrap_components as dbc
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output, State
import numpy as np
import fileutility
app = dash.Dash(__name__, suppress_callback_exceptions=True)
app.title = 'Tunnusluvut'
# For Heroku
server = app.server
#######################################
# Global variables
#######################################
df, units = fileutility.prepare_data()
yhtiot = sorted(df['Yhtiö'].unique())
tunnusluvut = {
'Sijoitukset': sorted((df.columns[[6, 7, 13, 14, 15, 16, 17, 18, 19]])),
'Muut': sorted((df.columns[[4, 5, 8, 9, 10, 11, 12, 20, 21, 22, 23, 24, 25]]))
}
vuodet = sorted(df['Vuosi'].unique())
# Companies' own colors for Elo, Etera, Ilmarinen, Varma, and Veritas
colors = {
'Alandia': '#b97454',
'Elo': '#FFD200',
'Etera': '#9ACD68',
'Fennia': '#69BDD1',
'Ilmarinen': '#003975',
'Varma': '#D10168',
'Veritas': '#00990F',
'background': '#F9FBFD'
}
#######################################
# Modals
#######################################
tietoa_yhtioista = html.Div(children=[
html.Section([
html.H6('Sulautumiset'),
html.P(['Nykyiset yhtiöt ovat Elo, Ilmarinen, Varma ja Veritas.',
html.Br(),
'Alandia sulautui Veritakseen 1.1.2019.',
html.Br(),
'Etera sulautui Ilmariseen 1.1.2018.',
html.Br(),
'Fennia sulautui Eloon 1.1.2014.'
])
]),
html.Section([
html.H6('Viralliset nimet'),
html.P(['Försäkringsaktiebolaget Pensions-Alandia',
html.Br(),
'Keskinäinen Työeläkevakuutusyhtiö Elo',
html.Br(),
'Keskinäinen Eläkevakuutusyhtiö Etera',
html.Br(),
'Keskinäinen vakuutusyhtiö Eläke-Fennia',
html.Br(),
'Keskinäinen Eläkevakuutusyhtiö Ilmarinen',
html.Br(),
'Keskinäinen työeläkevakuutusyhtiö Varma',
html.Br(),
'Pensionsförsäkringsaktiebolaget Veritas'
]),
html.P('''Elo toimi vuonna 2013 nimellä LähiTapiola Keskinäinen Eläkevakuutusyhtiö
ja sitä ennen nimellä Keskinäinen Eläkevakuutusyhtiö Tapiola.''')
])
])
tietoa_sivusta = html.Div(children=[
html.P('''Sivulla voit luoda kaavioita työeläkevakuutusyhtiöiden tunnusluvuista.
Hiirellä tai kaavion painikkeilla voit esimerkiksi suurentaa tai vierittää kaavioita.
Osaa tunnusluvuista ei ole kaikilta vuosilta.'''),
html.P(['Luvut ovat Finanssivalvonnan julkaisemista ',
html.A('tilastoista', href='https://www.finanssivalvonta.fi/tilastot/vakuutus/elakevakuutus/'),
' ja yhtiöiden tilinpäätöksistä. Kunkin vuoden luvut ovat tilanne 31.12. ',
'Lukujen pyöristystarkkuus vaihtelee.']),
html.P(['Sivun lähdekoodi on ',
html.A('GitHubissa', href='https://github.com/mikkomaa/tunnusluvut'),
'.']),
html.P('''Kysymyksiä ja kommentteja voit lähettää sähköpostilla mkkmatis at hotmail.com.''')
])
yhtio_info_button = dbc.Button('Tietoa yhtiöistä',
id='open-yhtio-modal',
outline=True,
style={
'margin-top': 20,
})
sivu_info_button = dbc.Button('Tietoa sivusta',
id='open-sivu-modal',
outline=True,
style={
'margin-top': 20,
'margin-left': 20,
})
yhtio_modal = html.Div(
[
yhtio_info_button,
dbc.Modal(
[
dbc.ModalHeader('Tietoa yhtiöistä'),
dbc.ModalBody(
children=[
tietoa_yhtioista,
]
),
dbc.ModalFooter(
dbc.Button('Sulje',
id='close-yhtio-modal',
outline=True)
),
],
id='yhtio-modal',
),
]
)
sivu_modal = html.Div(
[
sivu_info_button,
dbc.Modal(
[
dbc.ModalHeader('Tietoa sivusta'),
dbc.ModalBody(
children=[
tietoa_sivusta,
]
),
dbc.ModalFooter(
dbc.Button('Sulje',
id='close-sivu-modal',
outline=True)
),
],
id='sivu-modal',
),
]
)
#######################################
# Options for the user
#######################################
yhtio_checklist = html.Div([
html.H6('Yhtiöt'),
dcc.Checklist(
id='yhtio-checklist',
options=[{'label': i, 'value': i} for i in yhtiot],
value=['Elo', 'Ilmarinen', 'Varma', 'Veritas'],
labelStyle={'display': 'inline-block', 'margin-right': 10},
inputStyle={'margin-right': 2},
),
])
sijoitukset_dropdown = html.Div([
html.H6('Sijoitusluvut'),
dcc.Dropdown(
id='sijoitukset-dropdown',
options=[{'label': i, 'value': i} for i in tunnusluvut['Sijoitukset']],
value=[tunnusluvut['Sijoitukset'][0]],
placeholder='Valitse...',
multi=True,
style={'width': 450}
)
])
muut_dropdown = html.Div([
html.H6('Muut luvut'),
dcc.Dropdown(
id='muut-dropdown',
options=[{'label': i, 'value': i} for i in tunnusluvut['Muut']],
value=[tunnusluvut['Muut'][0]],
placeholder='Valitse...',
multi=True,
style={'width': 450}
)
])
vuosi_slider = html.Div([
html.H6('Vuodet'),
dcc.RangeSlider(
id='vuosi-slider',
min=vuodet[0],
max=vuodet[-1],
value=[vuodet[-5], vuodet[-1]],
marks={str(year): str(year) for year in vuodet},
step=None,
)],
style={'width': 450}
)
kaavio_radioitems = html.Div([
html.H6('Kaavio'),
dcc.RadioItems(
id='kaavio-radioitems',
className='radio-group',
options=[{'label': 'Pylväs', 'value': 'bar'},
{'label': 'Viiva', 'value': 'line'}],
value='bar',
labelStyle={'display': 'inline-block', 'margin-right': 10},
inputStyle={'margin-right': 2},
)
])
#######################################
# Page layout
#######################################
app.layout = html.Div(
html.Div([
# Header
html.Div([
html.H2('Työeläkevakuutusyhtiöiden tunnusluvut',
style={'margin-left': 20},
),
html.Div([yhtio_modal],
style={'margin-left': 50},
),
html.Div([sivu_modal],
),
], className='row'),
# Options: yhtiöt, kaavio, vuodet
html.Div([
html.Div([yhtio_checklist],
style={'margin-left': 20}
),
html.Div([kaavio_radioitems],
style={'margin-left': 20}
),
html.Div([vuosi_slider],
style={'margin-left': 20}
)
], className='row'),
# Options: sijoitusluvut, muut luvut
html.Div([
html.Div([sijoitukset_dropdown],
style={'margin-left': 20},
),
html.Div([muut_dropdown],
style={'margin-left': 20},
)
], className='row'),
# Graphs, from create_graphs-method
html.Div(id='container'),
])
)
#######################################
# Callbacks
#######################################
@app.callback(
Output('container', 'children'),
[Input('yhtio-checklist', 'value'),
Input('sijoitukset-dropdown', 'value'),
Input('muut-dropdown', 'value'),
Input('vuosi-slider', 'value'),
Input('kaavio-radioitems', 'value')]
)
def create_graphs(yhtiot, sijoitusluvut, muutluvut, vuodet, kaavio):
"""Create graphs to display"""
yhtiot.sort()
tunnusluvut = sorted(sijoitusluvut + muutluvut)
vuodet = [i for i in range(vuodet[0], vuodet[1] + 1)]
dff = df[df['Vuosi'].isin(vuodet)]
graphs = []
for t in tunnusluvut:
graphs.append(dcc.Graph(
id='graph-{}'.format(t),
figure=create_figure(t, yhtiot, kaavio, dff),
))
return html.Div(graphs,
className='row')
def create_figure(tunnusluku, yhtiot, kaavio, df):
"""Create a figure for a graph"""
data = []
for yhtio in yhtiot:
dff = df[(df['Yhtiö'] == yhtio) & (df[tunnusluku] != np.nan)]
if dff.empty:
continue
data.append({'x': dff['Vuosi'], 'y': dff[tunnusluku],
'type': kaavio, 'name': yhtio,
'marker': {'color': colors[yhtio]},
'hovertemplate': '%{y}',
'hoverlabel': {'bgcolor': 'white'},
}
)
return {
'data': data,
'layout': dict(
title=get_figure_title(tunnusluku),
xaxis={
'title': 'Vuosi',
'dtick': 1
},
height=550,
width=get_figure_width(yhtiot, kaavio, df),
hovermode='closest',
paper_bgcolor=colors['background'],
plot_bgcolor=colors['background'])
}
def get_figure_title(tunnusluku):
"""Return a figure title"""
if units[tunnusluku] in ['euroa', 'kpl', '%']:
return f'{tunnusluku} ({units[tunnusluku]})'
return tunnusluku
def get_figure_width(yhtiot, kaavio, df):
years = len(df['Vuosi'].unique())
if kaavio == 'bar':
width = max(550, 37 * years * len(yhtiot))
return min(1200, width)
return max(550, 75 * years)
# Modal callbacks
@app.callback(
Output('sivu-modal', 'is_open'),
[Input('open-sivu-modal', 'n_clicks'), Input('close-sivu-modal', 'n_clicks')],
[State('sivu-modal', 'is_open')]
)
def toggle_modal(n1, n2, is_open):
if n1 or n2:
return not is_open
return is_open
@app.callback(
Output('yhtio-modal', 'is_open'),
[Input('open-yhtio-modal', 'n_clicks'), Input('close-yhtio-modal', 'n_clicks')],
[State('yhtio-modal', 'is_open')]
)
def toggle_modal(n1, n2, is_open):
if n1 or n2:
return not is_open
return is_open
if __name__ == '__main__':
app.run_server(debug=False) # Set debug=False for the production server
| 30.093923 | 107 | 0.494951 | import dash
import dash_bootstrap_components as dbc
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output, State
import numpy as np
import fileutility
app = dash.Dash(__name__, suppress_callback_exceptions=True)
app.title = 'Tunnusluvut'
server = app.server
työeläkevakuutusyhtiö Varma',
html.Br(),
'Pensionsförsäkringsaktiebolaget Veritas'
]),
html.P('''Elo toimi vuonna 2013 nimellä LähiTapiola Keskinäinen Eläkevakuutusyhtiö
ja sitä ennen nimellä Keskinäinen Eläkevakuutusyhtiö Tapiola.''')
])
])
tietoa_sivusta = html.Div(children=[
html.P('''Sivulla voit luoda kaavioita työeläkevakuutusyhtiöiden tunnusluvuista.
Hiirellä tai kaavion painikkeilla voit esimerkiksi suurentaa tai vierittää kaavioita.
Osaa tunnusluvuista ei ole kaikilta vuosilta.'''),
html.P(['Luvut ovat Finanssivalvonnan julkaisemista ',
html.A('tilastoista', href='https://www.finanssivalvonta.fi/tilastot/vakuutus/elakevakuutus/'),
' ja yhtiöiden tilinpäätöksistä. Kunkin vuoden luvut ovat tilanne 31.12. ',
'Lukujen pyöristystarkkuus vaihtelee.']),
html.P(['Sivun lähdekoodi on ',
html.A('GitHubissa', href='https://github.com/mikkomaa/tunnusluvut'),
'.']),
html.P('''Kysymyksiä ja kommentteja voit lähettää sähköpostilla mkkmatis at hotmail.com.''')
])
yhtio_info_button = dbc.Button('Tietoa yhtiöistä',
id='open-yhtio-modal',
outline=True,
style={
'margin-top': 20,
})
sivu_info_button = dbc.Button('Tietoa sivusta',
id='open-sivu-modal',
outline=True,
style={
'margin-top': 20,
'margin-left': 20,
})
yhtio_modal = html.Div(
[
yhtio_info_button,
dbc.Modal(
[
dbc.ModalHeader('Tietoa yhtiöistä'),
dbc.ModalBody(
children=[
tietoa_yhtioista,
]
),
dbc.ModalFooter(
dbc.Button('Sulje',
id='close-yhtio-modal',
outline=True)
),
],
id='yhtio-modal',
),
]
)
sivu_modal = html.Div(
[
sivu_info_button,
dbc.Modal(
[
dbc.ModalHeader('Tietoa sivusta'),
dbc.ModalBody(
children=[
tietoa_sivusta,
]
),
dbc.ModalFooter(
dbc.Button('Sulje',
id='close-sivu-modal',
outline=True)
),
],
id='sivu-modal',
),
]
)
#######################################
# Options for the user
#######################################
yhtio_checklist = html.Div([
html.H6('Yhtiöt'),
dcc.Checklist(
id='yhtio-checklist',
options=[{'label': i, 'value': i} for i in yhtiot],
value=['Elo', 'Ilmarinen', 'Varma', 'Veritas'],
labelStyle={'display': 'inline-block', 'margin-right': 10},
inputStyle={'margin-right': 2},
),
])
sijoitukset_dropdown = html.Div([
html.H6('Sijoitusluvut'),
dcc.Dropdown(
id='sijoitukset-dropdown',
options=[{'label': i, 'value': i} for i in tunnusluvut['Sijoitukset']],
value=[tunnusluvut['Sijoitukset'][0]],
placeholder='Valitse...',
multi=True,
style={'width': 450}
)
])
muut_dropdown = html.Div([
html.H6('Muut luvut'),
dcc.Dropdown(
id='muut-dropdown',
options=[{'label': i, 'value': i} for i in tunnusluvut['Muut']],
value=[tunnusluvut['Muut'][0]],
placeholder='Valitse...',
multi=True,
style={'width': 450}
)
])
vuosi_slider = html.Div([
html.H6('Vuodet'),
dcc.RangeSlider(
id='vuosi-slider',
min=vuodet[0],
max=vuodet[-1],
value=[vuodet[-5], vuodet[-1]],
marks={str(year): str(year) for year in vuodet},
step=None,
)],
style={'width': 450}
)
kaavio_radioitems = html.Div([
html.H6('Kaavio'),
dcc.RadioItems(
id='kaavio-radioitems',
className='radio-group',
options=[{'label': 'Pylväs', 'value': 'bar'},
{'label': 'Viiva', 'value': 'line'}],
value='bar',
labelStyle={'display': 'inline-block', 'margin-right': 10},
inputStyle={'margin-right': 2},
)
])
#######################################
# Page layout
#######################################
app.layout = html.Div(
html.Div([
# Header
html.Div([
html.H2('Työeläkevakuutusyhtiöiden tunnusluvut',
style={'margin-left': 20},
),
html.Div([yhtio_modal],
style={'margin-left': 50},
),
html.Div([sivu_modal],
),
], className='row'),
# Options: yhtiöt, kaavio, vuodet
html.Div([
html.Div([yhtio_checklist],
style={'margin-left': 20}
),
html.Div([kaavio_radioitems],
style={'margin-left': 20}
),
html.Div([vuosi_slider],
style={'margin-left': 20}
)
], className='row'),
# Options: sijoitusluvut, muut luvut
html.Div([
html.Div([sijoitukset_dropdown],
style={'margin-left': 20},
),
html.Div([muut_dropdown],
style={'margin-left': 20},
)
], className='row'),
# Graphs, from create_graphs-method
html.Div(id='container'),
])
)
#######################################
# Callbacks
#######################################
@app.callback(
Output('container', 'children'),
[Input('yhtio-checklist', 'value'),
Input('sijoitukset-dropdown', 'value'),
Input('muut-dropdown', 'value'),
Input('vuosi-slider', 'value'),
Input('kaavio-radioitems', 'value')]
)
def create_graphs(yhtiot, sijoitusluvut, muutluvut, vuodet, kaavio):
yhtiot.sort()
tunnusluvut = sorted(sijoitusluvut + muutluvut)
vuodet = [i for i in range(vuodet[0], vuodet[1] + 1)]
dff = df[df['Vuosi'].isin(vuodet)]
graphs = []
for t in tunnusluvut:
graphs.append(dcc.Graph(
id='graph-{}'.format(t),
figure=create_figure(t, yhtiot, kaavio, dff),
))
return html.Div(graphs,
className='row')
def create_figure(tunnusluku, yhtiot, kaavio, df):
data = []
for yhtio in yhtiot:
dff = df[(df['Yhtiö'] == yhtio) & (df[tunnusluku] != np.nan)]
if dff.empty:
continue
data.append({'x': dff['Vuosi'], 'y': dff[tunnusluku],
'type': kaavio, 'name': yhtio,
'marker': {'color': colors[yhtio]},
'hovertemplate': '%{y}',
'hoverlabel': {'bgcolor': 'white'},
}
)
return {
'data': data,
'layout': dict(
title=get_figure_title(tunnusluku),
xaxis={
'title': 'Vuosi',
'dtick': 1
},
height=550,
width=get_figure_width(yhtiot, kaavio, df),
hovermode='closest',
paper_bgcolor=colors['background'],
plot_bgcolor=colors['background'])
}
def get_figure_title(tunnusluku):
if units[tunnusluku] in ['euroa', 'kpl', '%']:
return f'{tunnusluku} ({units[tunnusluku]})'
return tunnusluku
def get_figure_width(yhtiot, kaavio, df):
years = len(df['Vuosi'].unique())
if kaavio == 'bar':
width = max(550, 37 * years * len(yhtiot))
return min(1200, width)
return max(550, 75 * years)
# Modal callbacks
@app.callback(
Output('sivu-modal', 'is_open'),
[Input('open-sivu-modal', 'n_clicks'), Input('close-sivu-modal', 'n_clicks')],
[State('sivu-modal', 'is_open')]
)
def toggle_modal(n1, n2, is_open):
if n1 or n2:
return not is_open
return is_open
@app.callback(
Output('yhtio-modal', 'is_open'),
[Input('open-yhtio-modal', 'n_clicks'), Input('close-yhtio-modal', 'n_clicks')],
[State('yhtio-modal', 'is_open')]
)
def toggle_modal(n1, n2, is_open):
if n1 or n2:
return not is_open
return is_open
if __name__ == '__main__':
app.run_server(debug=False) # Set debug=False for the production server
| true | true |
f73761a9ffacc5c604bd742de04d2b9cbe4a196a | 5,938 | py | Python | views/forecasts.py | limejump/agile-insights | 2e4f454f83ad7d3d6e071c3dbc297946f46a671c | [
"MIT"
] | null | null | null | views/forecasts.py | limejump/agile-insights | 2e4f454f83ad7d3d6e071c3dbc297946f46a671c | [
"MIT"
] | 3 | 2021-02-24T15:01:03.000Z | 2021-04-07T17:28:37.000Z | views/forecasts.py | limejump/agile-insights | 2e4f454f83ad7d3d6e071c3dbc297946f46a671c | [
"MIT"
] | null | null | null | import dash_html_components as html
import dash_bootstrap_components as dbc
import dash_core_components as dcc
import pandas as pd
import plotly.express as px
import plotly.graph_objects as go
from models import Forecast as ForecastModel
class Forecast:
def __init__(self, team_name, remaining_issues=None):
self.model = ForecastModel(team_name)
self.remaining_issues = remaining_issues
def mk_throughput_line(self):
df = self.model.throughput_df()
fig = go.Figure(data=go.Scatter(
x=df['end'].tolist(),
y=df['throughput'].tolist(),
name='throughput'
))
if self.remaining_issues:
self.add_uncertaintity_cone(df, fig)
return fig
def add_uncertaintity_cone(self, throughput_df, fig):
current_throughput = throughput_df.iloc[-1]['throughput']
target = current_throughput + self.remaining_issues
quick, slow = self.model.uncertainty_cone_coords(target)
x_a, y_a, x_b, y_b = quick
fig.add_trace(go.Scatter(
x=[x_a, x_b],
y=[y_a, y_b],
name='optimistic',
mode='lines',
line={
'color': 'green',
'dash': 'dash',
'width': 1
},
legendgroup='optimistic'
))
fig.add_trace(go.Scatter(
x=[x_b, x_b],
y=[y_b, 0],
mode='lines',
line={
'color': 'green',
'dash': 'dash',
'width': 1
},
showlegend=False,
legendgroup='optimistic'
))
x_a, y_a, x_b, y_b = slow
fig.add_trace(go.Scatter(
x=[x_a, x_b],
y=[y_a, y_b],
name='pesimistic',
mode='lines',
line={
'color': 'red',
'dash': 'dash',
'width': 1
},
legendgroup='optimistic'
))
fig.add_trace(go.Scatter(
x=[x_b, x_b],
y=[y_b, 0],
mode='lines',
line={
'color': 'red',
'dash': 'dash',
'width': 1
},
showlegend=False,
legendgroup='pesimistic'
))
def mk_story_point_scatter(self):
self.model.throughput_df()
return px.scatter(
self.model.historic_df, x="story_points", y="days_taken")
def mk_time_per_issue_scatter(self):
percent_80 = self.model.historic_df['days_taken'].quantile(0.8)
percent_50 = self.model.historic_df['days_taken'].quantile(0.5)
issue_min = self.model.historic_df['end_time'].min(numeric_only=False)
issue_max = self.model.historic_df['end_time'].max(numeric_only=False)
quantiles_df = pd.DataFrame({
'x': [issue_min, issue_max, issue_min, issue_max],
'y': [percent_50, percent_50, percent_80, percent_80],
'name': [
f"50% {round(percent_50)} days",
f"50% {round(percent_50)} days",
f"80% {round(percent_80)} days",
f"80% {round(percent_80)} days"]
})
fig = px.scatter(
self.model.historic_df, x='end_time', y="days_taken",
hover_name="name",
hover_data={'end_time': False, 'days_taken': True})
for trace in px.line(
quantiles_df, x='x', y='y', color='name',
color_discrete_sequence=px.colors.qualitative.Vivid).data:
fig.add_trace(trace)
return fig
def mk_montecarlo_plot(self, num_issues=5):
df = self.model.run_montecarlo(num_issues)
percent_80 = df['days_taken'].quantile(0.8)
percent_70 = df['days_taken'].quantile(0.7)
percent_60 = df['days_taken'].quantile(0.6)
percent_50 = df['days_taken'].quantile(0.5)
issue_min = df['simulation'].min(numeric_only=False)
issue_max = df['simulation'].max(numeric_only=False)
quantiles_df = pd.DataFrame({
'x': [
issue_min, issue_max,
issue_min, issue_max,
issue_min, issue_max,
issue_min, issue_max
],
'y': [
percent_50, percent_50,
percent_60, percent_60,
percent_70, percent_70,
percent_80, percent_80
],
'name': [
f"50% {round(percent_50)} days",
f"50% {round(percent_50)} days",
f"60% {round(percent_60)} days",
f"60% {round(percent_60)} days",
f"70% {round(percent_70)} days",
f"70% {round(percent_70)} days",
f"80% {round(percent_80)} days",
f"80% {round(percent_80)} days"]
})
fig = px.bar(df, x=df.index, y='days_taken')
for trace in px.line(
quantiles_df, x='x', y='y', color='name',
color_discrete_sequence=px.colors.qualitative.Vivid).data:
fig.add_trace(trace)
return fig
def render(self):
return [
html.P("Input the number of remaining issues to reach your goal."),
dbc.Input(
id="issues-input", type="number",
min=0, step=1,
value=self.remaining_issues),
dcc.Graph(
id='throuput',
figure=self.mk_throughput_line()
),
# dcc.Graph(
# id="story-points",
# figure=self.mk_story_point_scatter()),
# dcc.Graph(
# id="overview",
# figure=self.mk_time_per_issue_scatter())
]
@classmethod
def callback_elements(cls):
return [
dbc.Input(id='issues-input')
]
| 33.548023 | 79 | 0.513641 | import dash_html_components as html
import dash_bootstrap_components as dbc
import dash_core_components as dcc
import pandas as pd
import plotly.express as px
import plotly.graph_objects as go
from models import Forecast as ForecastModel
class Forecast:
def __init__(self, team_name, remaining_issues=None):
self.model = ForecastModel(team_name)
self.remaining_issues = remaining_issues
def mk_throughput_line(self):
df = self.model.throughput_df()
fig = go.Figure(data=go.Scatter(
x=df['end'].tolist(),
y=df['throughput'].tolist(),
name='throughput'
))
if self.remaining_issues:
self.add_uncertaintity_cone(df, fig)
return fig
def add_uncertaintity_cone(self, throughput_df, fig):
current_throughput = throughput_df.iloc[-1]['throughput']
target = current_throughput + self.remaining_issues
quick, slow = self.model.uncertainty_cone_coords(target)
x_a, y_a, x_b, y_b = quick
fig.add_trace(go.Scatter(
x=[x_a, x_b],
y=[y_a, y_b],
name='optimistic',
mode='lines',
line={
'color': 'green',
'dash': 'dash',
'width': 1
},
legendgroup='optimistic'
))
fig.add_trace(go.Scatter(
x=[x_b, x_b],
y=[y_b, 0],
mode='lines',
line={
'color': 'green',
'dash': 'dash',
'width': 1
},
showlegend=False,
legendgroup='optimistic'
))
x_a, y_a, x_b, y_b = slow
fig.add_trace(go.Scatter(
x=[x_a, x_b],
y=[y_a, y_b],
name='pesimistic',
mode='lines',
line={
'color': 'red',
'dash': 'dash',
'width': 1
},
legendgroup='optimistic'
))
fig.add_trace(go.Scatter(
x=[x_b, x_b],
y=[y_b, 0],
mode='lines',
line={
'color': 'red',
'dash': 'dash',
'width': 1
},
showlegend=False,
legendgroup='pesimistic'
))
def mk_story_point_scatter(self):
self.model.throughput_df()
return px.scatter(
self.model.historic_df, x="story_points", y="days_taken")
def mk_time_per_issue_scatter(self):
percent_80 = self.model.historic_df['days_taken'].quantile(0.8)
percent_50 = self.model.historic_df['days_taken'].quantile(0.5)
issue_min = self.model.historic_df['end_time'].min(numeric_only=False)
issue_max = self.model.historic_df['end_time'].max(numeric_only=False)
quantiles_df = pd.DataFrame({
'x': [issue_min, issue_max, issue_min, issue_max],
'y': [percent_50, percent_50, percent_80, percent_80],
'name': [
f"50% {round(percent_50)} days",
f"50% {round(percent_50)} days",
f"80% {round(percent_80)} days",
f"80% {round(percent_80)} days"]
})
fig = px.scatter(
self.model.historic_df, x='end_time', y="days_taken",
hover_name="name",
hover_data={'end_time': False, 'days_taken': True})
for trace in px.line(
quantiles_df, x='x', y='y', color='name',
color_discrete_sequence=px.colors.qualitative.Vivid).data:
fig.add_trace(trace)
return fig
def mk_montecarlo_plot(self, num_issues=5):
df = self.model.run_montecarlo(num_issues)
percent_80 = df['days_taken'].quantile(0.8)
percent_70 = df['days_taken'].quantile(0.7)
percent_60 = df['days_taken'].quantile(0.6)
percent_50 = df['days_taken'].quantile(0.5)
issue_min = df['simulation'].min(numeric_only=False)
issue_max = df['simulation'].max(numeric_only=False)
quantiles_df = pd.DataFrame({
'x': [
issue_min, issue_max,
issue_min, issue_max,
issue_min, issue_max,
issue_min, issue_max
],
'y': [
percent_50, percent_50,
percent_60, percent_60,
percent_70, percent_70,
percent_80, percent_80
],
'name': [
f"50% {round(percent_50)} days",
f"50% {round(percent_50)} days",
f"60% {round(percent_60)} days",
f"60% {round(percent_60)} days",
f"70% {round(percent_70)} days",
f"70% {round(percent_70)} days",
f"80% {round(percent_80)} days",
f"80% {round(percent_80)} days"]
})
fig = px.bar(df, x=df.index, y='days_taken')
for trace in px.line(
quantiles_df, x='x', y='y', color='name',
color_discrete_sequence=px.colors.qualitative.Vivid).data:
fig.add_trace(trace)
return fig
def render(self):
return [
html.P("Input the number of remaining issues to reach your goal."),
dbc.Input(
id="issues-input", type="number",
min=0, step=1,
value=self.remaining_issues),
dcc.Graph(
id='throuput',
figure=self.mk_throughput_line()
),
]
@classmethod
def callback_elements(cls):
return [
dbc.Input(id='issues-input')
]
| true | true |
f737622280f77f2df3ab140ff96b9fc6ef4c5f2e | 6,212 | py | Python | software/Opal/spud/diamond/build/lib.linux-x86_64-2.7/diamond/useview.py | msc-acse/acse-9-independent-research-project-Wade003 | cfcba990d52ccf535171cf54c0a91b184db6f276 | [
"MIT"
] | 2 | 2020-05-11T02:39:46.000Z | 2020-05-11T03:08:38.000Z | software/multifluids_icferst/libspud/diamond/diamond/useview.py | msc-acse/acse-9-independent-research-project-Wade003 | cfcba990d52ccf535171cf54c0a91b184db6f276 | [
"MIT"
] | null | null | null | software/multifluids_icferst/libspud/diamond/diamond/useview.py | msc-acse/acse-9-independent-research-project-Wade003 | cfcba990d52ccf535171cf54c0a91b184db6f276 | [
"MIT"
] | 2 | 2020-05-21T22:50:19.000Z | 2020-10-28T17:16:31.000Z | #!/usr/bin/env python
# This file is part of Diamond.
#
# Diamond is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Diamond is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Diamond. If not, see <http://www.gnu.org/licenses/>.
import gobject
import gtk
import os
import threading
import schemausage
RELAXNGNS = "http://relaxng.org/ns/structure/1.0"
RELAXNG = "{" + RELAXNGNS + "}"
class UseView(gtk.Window):
def __init__(self, schema, suffix, folder = None):
gtk.Window.__init__(self)
self.__add_controls()
if folder is None:
dialog = gtk.FileChooserDialog(title = "Input directory",
action = gtk.FILE_CHOOSER_ACTION_SELECT_FOLDER,
buttons = (gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL, gtk.STOCK_OPEN, gtk.RESPONSE_OK))
response = dialog.run()
if response != gtk.RESPONSE_OK:
dialog.destroy()
return
folder = os.path.abspath(dialog.get_filename())
dialog.destroy()
#endif
paths = []
for dirpath, dirnames, filenames in os.walk(folder):
paths.extend([os.path.join(dirpath, filename) for filename in filenames if filename.endswith(suffix)])
self.__update(schema, paths)
self.show_all()
def __add_controls(self):
self.set_title("Unused schema entries")
self.set_default_size(800, 600)
vbox = gtk.VBox()
scrolledwindow = gtk.ScrolledWindow()
scrolledwindow.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC)
self.treeview = gtk.TreeView()
self.treeview.get_selection().set_mode(gtk.SELECTION_SINGLE)
# Node column
celltext = gtk.CellRendererText()
column = gtk.TreeViewColumn("Node", celltext)
column.set_cell_data_func(celltext, self.set_celltext)
self.treeview.append_column(column)
# 0: The node tag
# 1: Used (0 == Not used, 1 = Child not used, 2 = Used)
self.treestore = gtk.TreeStore(gobject.TYPE_PYOBJECT, gobject.TYPE_PYOBJECT)
self.treeview.set_enable_search(False)
scrolledwindow.add(self.treeview)
vbox.pack_start(scrolledwindow)
self.statusbar = gtk.Statusbar()
vbox.pack_end(self.statusbar, expand = False)
self.add(vbox)
def __set_treestore(self, node):
def set_treestore(node, iter, type):
if node.tag == RELAXNG + "element":
name = schemausage.node_name(node)
if name == "comment":
return #early out to skip comment nodes
tag = name + (type if type else "")
child_iter = self.treestore.append(iter, [tag, 2])
self.mapping[self.tree.getpath(node)] = self.treestore.get_path(child_iter)
type = None
elif node.tag == RELAXNG + "choice" and all(n.tag != RELAXNG + "value" for n in node):
tag = "choice" + (type if type else "")
child_iter = self.treestore.append(iter, [tag, 2])
self.mapping[self.tree.getpath(node)] = self.treestore.get_path(child_iter)
type = None
elif node.tag == RELAXNG + "optional":
child_iter = iter
type = " ?"
elif node.tag == RELAXNG + "oneOrMore":
child_iter = iter
type = " +"
elif node.tag == RELAXNG + "zeroOrMore":
child_iter = iter
type = " *"
elif node.tag == RELAXNG + "ref":
query = '/t:grammar/t:define[@name="' + node.get("name") + '"]'
if query not in cache:
cache[query] = self.tree.xpath(query, namespaces={'t': RELAXNGNS})[0]
node = cache[query]
child_iter = iter
elif node.tag == RELAXNG + "group" or node.tag == RELAXNG + "interleave":
child_iter = iter
else:
return
for child in node:
set_treestore(child, child_iter, type)
cache = {}
set_treestore(node, None, None)
def __set_useage(self, useage):
for xpath in useage:
try:
iter = self.treestore.get_iter(self.mapping[xpath])
self.treestore.set_value(iter, 1, 0)
except KeyError:
pass #probably a comment node
def __floodfill(self, iter, parent = 2):
"""
Floodfill the tree with the correct useage.
"""
if parent == 0: #parent is not used
self.treestore.set_value(iter, 1, 0) #color us not used
useage = self.treestore.get_value(iter, 1)
child = self.treestore.iter_children(iter)
while child is not None:
change = self.__floodfill(child, useage)
if change != 2 and useage == 2:
self.treestore.set(iter, 1, 1)
child = self.treestore.iter_next(child)
return self.treestore.get_value(iter, 1)
def __update(self, schema, paths):
self.tree = schema.tree
start = self.tree.xpath('/t:grammar/t:start', namespaces={'t': RELAXNGNS})[0]
self.mapping = {}
def async_update(self, start, schema, paths, context):
gtk.idle_add(self.statusbar.push, context, "Parsing schema")
self.__set_treestore(start[0])
gtk.idle_add(self.statusbar.push, context, "Schema parsed... finding usage")
self.__set_useage(schemausage.find_unusedset(schema, paths))
gtk.idle_add(self.statusbar.push, context, "Usage found")
self.__floodfill(self.treestore.get_iter_root())
gtk.idle_add(self.statusbar.push, context, "")
gtk.idle_add(self.treeview.set_model, self.treestore)
t = threading.Thread(target = async_update, args = (self, start, schema, paths, self.statusbar.get_context_id("update")))
t.start()
def set_celltext(self, column, cell, model, iter):
tag, useage = model.get(iter, 0, 1)
cell.set_property("text", tag)
if useage == 0:
cell.set_property("foreground", "red")
elif useage == 1:
cell.set_property("foreground", "indianred")
else:
cell.set_property("foreground", "black")
| 33.945355 | 125 | 0.652125 |
import gobject
import gtk
import os
import threading
import schemausage
RELAXNGNS = "http://relaxng.org/ns/structure/1.0"
RELAXNG = "{" + RELAXNGNS + "}"
class UseView(gtk.Window):
def __init__(self, schema, suffix, folder = None):
gtk.Window.__init__(self)
self.__add_controls()
if folder is None:
dialog = gtk.FileChooserDialog(title = "Input directory",
action = gtk.FILE_CHOOSER_ACTION_SELECT_FOLDER,
buttons = (gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL, gtk.STOCK_OPEN, gtk.RESPONSE_OK))
response = dialog.run()
if response != gtk.RESPONSE_OK:
dialog.destroy()
return
folder = os.path.abspath(dialog.get_filename())
dialog.destroy()
paths = []
for dirpath, dirnames, filenames in os.walk(folder):
paths.extend([os.path.join(dirpath, filename) for filename in filenames if filename.endswith(suffix)])
self.__update(schema, paths)
self.show_all()
def __add_controls(self):
self.set_title("Unused schema entries")
self.set_default_size(800, 600)
vbox = gtk.VBox()
scrolledwindow = gtk.ScrolledWindow()
scrolledwindow.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC)
self.treeview = gtk.TreeView()
self.treeview.get_selection().set_mode(gtk.SELECTION_SINGLE)
celltext = gtk.CellRendererText()
column = gtk.TreeViewColumn("Node", celltext)
column.set_cell_data_func(celltext, self.set_celltext)
self.treeview.append_column(column)
self.treestore = gtk.TreeStore(gobject.TYPE_PYOBJECT, gobject.TYPE_PYOBJECT)
self.treeview.set_enable_search(False)
scrolledwindow.add(self.treeview)
vbox.pack_start(scrolledwindow)
self.statusbar = gtk.Statusbar()
vbox.pack_end(self.statusbar, expand = False)
self.add(vbox)
def __set_treestore(self, node):
def set_treestore(node, iter, type):
if node.tag == RELAXNG + "element":
name = schemausage.node_name(node)
if name == "comment":
return
tag = name + (type if type else "")
child_iter = self.treestore.append(iter, [tag, 2])
self.mapping[self.tree.getpath(node)] = self.treestore.get_path(child_iter)
type = None
elif node.tag == RELAXNG + "choice" and all(n.tag != RELAXNG + "value" for n in node):
tag = "choice" + (type if type else "")
child_iter = self.treestore.append(iter, [tag, 2])
self.mapping[self.tree.getpath(node)] = self.treestore.get_path(child_iter)
type = None
elif node.tag == RELAXNG + "optional":
child_iter = iter
type = " ?"
elif node.tag == RELAXNG + "oneOrMore":
child_iter = iter
type = " +"
elif node.tag == RELAXNG + "zeroOrMore":
child_iter = iter
type = " *"
elif node.tag == RELAXNG + "ref":
query = '/t:grammar/t:define[@name="' + node.get("name") + '"]'
if query not in cache:
cache[query] = self.tree.xpath(query, namespaces={'t': RELAXNGNS})[0]
node = cache[query]
child_iter = iter
elif node.tag == RELAXNG + "group" or node.tag == RELAXNG + "interleave":
child_iter = iter
else:
return
for child in node:
set_treestore(child, child_iter, type)
cache = {}
set_treestore(node, None, None)
def __set_useage(self, useage):
for xpath in useage:
try:
iter = self.treestore.get_iter(self.mapping[xpath])
self.treestore.set_value(iter, 1, 0)
except KeyError:
pass
def __floodfill(self, iter, parent = 2):
if parent == 0:
self.treestore.set_value(iter, 1, 0)
useage = self.treestore.get_value(iter, 1)
child = self.treestore.iter_children(iter)
while child is not None:
change = self.__floodfill(child, useage)
if change != 2 and useage == 2:
self.treestore.set(iter, 1, 1)
child = self.treestore.iter_next(child)
return self.treestore.get_value(iter, 1)
def __update(self, schema, paths):
self.tree = schema.tree
start = self.tree.xpath('/t:grammar/t:start', namespaces={'t': RELAXNGNS})[0]
self.mapping = {}
def async_update(self, start, schema, paths, context):
gtk.idle_add(self.statusbar.push, context, "Parsing schema")
self.__set_treestore(start[0])
gtk.idle_add(self.statusbar.push, context, "Schema parsed... finding usage")
self.__set_useage(schemausage.find_unusedset(schema, paths))
gtk.idle_add(self.statusbar.push, context, "Usage found")
self.__floodfill(self.treestore.get_iter_root())
gtk.idle_add(self.statusbar.push, context, "")
gtk.idle_add(self.treeview.set_model, self.treestore)
t = threading.Thread(target = async_update, args = (self, start, schema, paths, self.statusbar.get_context_id("update")))
t.start()
def set_celltext(self, column, cell, model, iter):
tag, useage = model.get(iter, 0, 1)
cell.set_property("text", tag)
if useage == 0:
cell.set_property("foreground", "red")
elif useage == 1:
cell.set_property("foreground", "indianred")
else:
cell.set_property("foreground", "black")
| true | true |
f737622ef52affbf32fe22c242360e70175a3571 | 1,009 | py | Python | scripts/misc/csvavg.py | petabricks/petabricks | b498b93880b0c4ac3924ddb82cff2e6541e60bd1 | [
"MIT"
] | 10 | 2015-03-12T18:09:57.000Z | 2022-03-17T03:18:36.000Z | scripts/misc/csvavg.py | petabricks/petabricks | b498b93880b0c4ac3924ddb82cff2e6541e60bd1 | [
"MIT"
] | 2 | 2021-01-12T15:12:21.000Z | 2022-03-22T07:47:37.000Z | scripts/misc/csvavg.py | petabricks/petabricks | b498b93880b0c4ac3924ddb82cff2e6541e60bd1 | [
"MIT"
] | 3 | 2017-06-28T06:01:03.000Z | 2021-01-12T15:05:34.000Z | #!/usr/bin/python
import csv, sys
import numpy
dialect = csv.excel_tab
multi_file=len(sys.argv[1:])>1
inputs = map(lambda x: csv.DictReader(x, dialect=dialect), map(open, sys.argv[1:]))
rows = map(csv.DictReader.next, inputs)
headers = inputs[0].fieldnames
output = csv.writer(sys.stdout, dialect=dialect)
output.writerow(headers)
def mkavg(k):
try:
values = map(lambda x: float(x[k]), rows)
return "%f +- %f" % (numpy.mean(values), numpy.std(values))
except:
return 'error'
if multi_file:
try:
while True:
output.writerow(map(mkavg, headers))
rows = map(csv.DictReader.next, inputs)
except StopIteration:
pass
else:
counts=dict()
sums=dict()
for k in headers:
try:
sums[k]=float(rows[0][k])
except:
sums[k]=0.0
counts[k]=1.0
for row in inputs[0]:
for k in headers:
try:
sums[k]+=float(row[k])
except:
sums[k]=0.0
counts[k]+=1.0
output.writerow(map(lambda k: sums[k]/counts[k], headers))
| 20.18 | 83 | 0.627354 |
import csv, sys
import numpy
dialect = csv.excel_tab
multi_file=len(sys.argv[1:])>1
inputs = map(lambda x: csv.DictReader(x, dialect=dialect), map(open, sys.argv[1:]))
rows = map(csv.DictReader.next, inputs)
headers = inputs[0].fieldnames
output = csv.writer(sys.stdout, dialect=dialect)
output.writerow(headers)
def mkavg(k):
try:
values = map(lambda x: float(x[k]), rows)
return "%f +- %f" % (numpy.mean(values), numpy.std(values))
except:
return 'error'
if multi_file:
try:
while True:
output.writerow(map(mkavg, headers))
rows = map(csv.DictReader.next, inputs)
except StopIteration:
pass
else:
counts=dict()
sums=dict()
for k in headers:
try:
sums[k]=float(rows[0][k])
except:
sums[k]=0.0
counts[k]=1.0
for row in inputs[0]:
for k in headers:
try:
sums[k]+=float(row[k])
except:
sums[k]=0.0
counts[k]+=1.0
output.writerow(map(lambda k: sums[k]/counts[k], headers))
| true | true |
f737627b38f77632b7d3ec16af70019cdad3dc2c | 28,961 | py | Python | panel/widgets/indicators.py | MaxCodeXTC/panel | 1d34e8ce4734eec10f8e64af11c5a3fecaab5bac | [
"BSD-3-Clause"
] | 1 | 2021-07-25T17:27:51.000Z | 2021-07-25T17:27:51.000Z | panel/widgets/indicators.py | MaxCodeXTC/panel | 1d34e8ce4734eec10f8e64af11c5a3fecaab5bac | [
"BSD-3-Clause"
] | null | null | null | panel/widgets/indicators.py | MaxCodeXTC/panel | 1d34e8ce4734eec10f8e64af11c5a3fecaab5bac | [
"BSD-3-Clause"
] | null | null | null | import os
import sys
from math import pi
import numpy as np
import param
from bokeh.plotting import figure
from bokeh.models import ColumnDataSource
from tqdm.asyncio import tqdm as _tqdm
from ..layout import Column, Row
from ..models import (
HTML, Progress as _BkProgress, TrendIndicator as _BkTrendIndicator
)
from ..pane.markup import Str
from ..reactive import SyncableData
from ..util import escape, updating
from ..viewable import Viewable
from .base import Widget
RED = "#d9534f"
GREEN = "#5cb85c"
BLUE = "#428bca"
class Indicator(Widget):
"""
Indicator is a baseclass for widgets which indicate some state.
"""
sizing_mode = param.ObjectSelector(default='fixed', objects=[
'fixed', 'stretch_width', 'stretch_height', 'stretch_both',
'scale_width', 'scale_height', 'scale_both', None])
__abstract = True
class BooleanIndicator(Indicator):
value = param.Boolean(default=False, doc="""
Whether the indicator is active or not.""")
__abstract = True
class BooleanStatus(BooleanIndicator):
color = param.ObjectSelector(default='dark', objects=[
'primary', 'secondary', 'success', 'info', 'danger', 'warning',
'light', 'dark'])
height = param.Integer(default=20, doc="""
height of the circle.""")
width = param.Integer(default=20, doc="""
Width of the circle.""")
value = param.Boolean(default=False, doc="""
Whether the indicator is active or not.""")
_rename = {'color': None}
_source_transforms = {'value': None}
_widget_type = HTML
def _process_param_change(self, msg):
msg = super()._process_param_change(msg)
value = msg.pop('value', None)
if value is None:
return msg
msg['css_classes'] = ['dot-filled', self.color] if value else ['dot']
return msg
class LoadingSpinner(BooleanIndicator):
bgcolor = param.ObjectSelector(default='light', objects=['dark', 'light'])
color = param.ObjectSelector(default='dark', objects=[
'primary', 'secondary', 'success', 'info', 'danger', 'warning',
'light', 'dark'])
height = param.Integer(default=125, doc="""
height of the circle.""")
width = param.Integer(default=125, doc="""
Width of the circle.""")
value = param.Boolean(default=False, doc="""
Whether the indicator is active or not.""")
_rename = {'color': None, 'bgcolor': None}
_source_transforms = {'value': None}
_widget_type = HTML
def _process_param_change(self, msg):
msg = super()._process_param_change(msg)
value = msg.pop('value', None)
if value is None:
return msg
color_cls = f'{self.color}-{self.bgcolor}'
msg['css_classes'] = ['loader', 'spin', color_cls] if value else ['loader', self.bgcolor]
return msg
class ValueIndicator(Indicator):
"""
A ValueIndicator provides a visual representation for a numeric
value.
"""
value = param.Number(default=None, allow_None=True)
__abstract = True
class Progress(ValueIndicator):
active = param.Boolean(default=True, doc="""
If no value is set the active property toggles animation of the
progress bar on and off.""")
bar_color = param.ObjectSelector(default='success', objects=[
'primary', 'secondary', 'success', 'info', 'danger', 'warning',
'light', 'dark'])
max = param.Integer(default=100, doc="The maximum value of the progress bar.")
value = param.Integer(default=None, bounds=(-1, None), doc="""
The current value of the progress bar. If set to None the progress
bar will be indeterminate and animate depending on the active
parameter. If set to -1 the progress bar will be empty.""")
_rename = {'name': None}
_widget_type = _BkProgress
@param.depends('max', watch=True)
def _update_value_bounds(self):
self.param.value.bounds = (-1, self.max)
def __init__(self,**params):
super().__init__(**params)
self._update_value_bounds()
class Number(ValueIndicator):
"""
The Number indicator renders the value as text optionally colored
according to the color thresholds.
"""
default_color = param.String(default='black')
colors = param.List(default=None)
format = param.String(default='{value}')
font_size = param.String(default='54pt')
nan_format = param.String(default='-', doc="""
How to format nan values.""")
title_size = param.String(default='18pt')
_rename = {}
_source_transforms = {
'value': None, 'colors': None, 'default_color': None,
'font_size': None, 'format': None, 'nan_format': None,
'title_size': None
}
_widget_type = HTML
def _process_param_change(self, msg):
msg = super()._process_param_change(msg)
font_size = msg.pop('font_size', self.font_size)
title_font_size = msg.pop('title_size', self.title_size)
name = msg.pop('name', self.name)
format = msg.pop('format', self.format)
value = msg.pop('value', self.value)
nan_format = msg.pop('nan_format', self.nan_format)
color = msg.pop('default_color', self.default_color)
colors = msg.pop('colors', self.colors)
for val, clr in (colors or [])[::-1]:
if value is not None and value <= val:
color = clr
if value is None:
value = float('nan')
value = format.format(value=value).replace('nan', nan_format)
text = f'<div style="font-size: {font_size}; color: {color}">{value}</div>'
if self.name:
title_font_size = msg.pop('title_size', self.title_size)
text = f'<div style="font-size: {title_font_size}; color: {color}">{name}</div>\n{text}'
msg['text'] = escape(text)
return msg
class String(ValueIndicator):
"""
The String indicator renders a string with a title.
"""
default_color = param.String(default='black')
font_size = param.String(default='54pt')
title_size = param.String(default='18pt')
value = param.String(default=None, allow_None=True)
_rename = {}
_source_transforms = {
'value': None, 'default_color': None, 'font_size': None, 'title_size': None
}
_widget_type = HTML
def _process_param_change(self, msg):
msg = super()._process_param_change(msg)
font_size = msg.pop('font_size', self.font_size)
title_font_size = msg.pop('title_size', self.title_size)
name = msg.pop('name', self.name)
value = msg.pop('value', self.value)
color = msg.pop('default_color', self.default_color)
text = f'<div style="font-size: {font_size}; color: {color}">{value}</div>'
if self.name:
title_font_size = msg.pop('title_size', self.title_size)
text = f'<div style="font-size: {title_font_size}; color: {color}">{name}</div>\n{text}'
msg['text'] = escape(text)
return msg
class Gauge(ValueIndicator):
"""
A Gauge represents a value in some range as a position on
speedometer or gauge. It is similar to a Dial but visually a lot
busier.
"""
annulus_width = param.Integer(default=10, doc="""
Width of the gauge annulus.""")
bounds = param.Range(default=(0, 100), doc="""
The upper and lower bound of the dial.""")
colors = param.List(default=None, doc="""
Color thresholds for the Gauge, specified as a list of tuples
of the fractional threshold and the color to switch to.""")
custom_opts = param.Dict(doc="""
Additional options to pass to the ECharts Gauge definition.""")
height = param.Integer(default=300, bounds=(0, None))
end_angle = param.Number(default=-45, doc="""
Angle at which the gauge ends.""")
format = param.String(default='{value}%', doc="""
Formatting string for the value indicator.""")
num_splits = param.Integer(default=10, doc="""
Number of splits along the gauge.""")
show_ticks = param.Boolean(default=True, doc="""
Whether to show ticks along the dials.""")
show_labels = param.Boolean(default=True, doc="""
Whether to show tick labels along the dials.""")
start_angle = param.Number(default=225, doc="""
Angle at which the gauge starts.""")
tooltip_format = param.String(default='{b} : {c}%', doc="""
Formatting string for the hover tooltip.""")
title_size = param.Integer(default=18, doc="""
Size of title font.""")
value = param.Number(default=25, doc="""
Value to indicate on the gauge a value within the declared bounds.""")
width = param.Integer(default=300, bounds=(0, None))
_rename = {}
_source_transforms = {
'annulus_width': None, 'bounds': None, 'colors': None,
'custom_opts': None, 'end_angle': None, 'format': None,
'num_splits': None, 'show_ticks': None, 'show_labels': None,
'start_angle': None, 'tooltip_format': None, 'title_size': None,
'value': None
}
@property
def _widget_type(self):
if 'panel.models.echarts' not in sys.modules:
from ..models.echarts import ECharts
else:
ECharts = getattr(sys.modules['panel.models.echarts'], 'ECharts')
return ECharts
def __init__(self, **params):
super().__init__(**params)
self._update_value_bounds()
@param.depends('bounds', watch=True)
def _update_value_bounds(self):
self.param.value.bounds = self.bounds
def _process_param_change(self, msg):
msg = super()._process_param_change(msg)
vmin, vmax = msg.pop('bounds', self.bounds)
msg['data'] = {
'tooltip': {
'formatter': msg.pop('tooltip_format', self.tooltip_format)
},
'series': [{
'name': 'Gauge',
'type': 'gauge',
'axisTick': {'show': msg.pop('show_ticks', self.show_ticks)},
'axisLabel': {'show': msg.pop('show_labels', self.show_labels)},
'title': {'fontWeight': 'bold', 'fontSize': msg.pop('title_size', self.title_size)},
'splitLine': {'show': True},
'radius': '100%',
'detail': {'formatter': msg.pop('format', self.format)},
'min': vmin,
'max': vmax,
'startAngle': msg.pop('start_angle', self.start_angle),
'endAngle': msg.pop('end_angle', self.end_angle),
'splitNumber': msg.pop('num_splits', self.num_splits),
'data': [{'value': msg.pop('value', self.value), 'name': self.name}],
'axisLine': {
'lineStyle': {
'width': msg.pop('annulus_width', self.annulus_width),
}
}
}]
}
colors = msg.pop('colors', self.colors)
if colors:
msg['data']['series'][0]['axisLine']['lineStyle']['color'] = colors
custom_opts = msg.pop('custom_opts', self.custom_opts)
if custom_opts:
gauge = msg['data']['series'][0]
for k, v in custom_opts.items():
if k not in gauge or not isinstance(gauge[k], dict):
gauge[k] = v
else:
gauge[k].update(v)
return msg
class Dial(ValueIndicator):
"""
A Dial represents a value in some range as a position on an
annular dial. It is similar to a Gauge but more minimal visually.
"""
annulus_width = param.Number(default=0.2, doc="""
Width of the radial annulus as a fraction of the total.""")
bounds = param.Range(default=(0, 100), doc="""
The upper and lower bound of the dial.""")
colors = param.List(default=None, doc="""
Color thresholds for the Dial, specified as a list of tuples
of the fractional threshold and the color to switch to.""")
default_color = param.String(default='lightblue', doc="""
Color of the radial annulus if not color thresholds are supplied.""")
end_angle = param.Number(default=25, doc="""
Angle at which the dial ends.""")
format = param.String(default='{value}%', doc="""
Formatting string for the value indicator and lower/upper bounds.""")
height = param.Integer(default=250, bounds=(1, None))
nan_format = param.String(default='-', doc="""
How to format nan values.""")
needle_color = param.String(default='black', doc="""
Color of the Dial needle.""")
needle_width = param.Number(default=0.1, doc="""
Radial width of the needle.""")
start_angle = param.Number(default=-205, doc="""
Angle at which the dial starts.""")
tick_size = param.String(default=None, doc="""
Font size of the Dial min/max labels.""")
title_size = param.String(default=None, doc="""
Font size of the Dial title.""")
unfilled_color = param.String(default='whitesmoke', doc="""
Color of the unfilled region of the Dial.""")
value_size = param.String(default=None, doc="""
Font size of the Dial value label.""")
value = param.Number(default=25, allow_None=True, doc="""
Value to indicate on the dial a value within the declared bounds.""")
width = param.Integer(default=250, bounds=(1, None))
_manual_params = [
'value', 'start_angle', 'end_angle', 'bounds',
'annulus_width', 'format', 'background', 'needle_width',
'tick_size', 'title_size', 'value_size', 'colors',
'default_color', 'unfilled_color', 'height',
'width', 'nan_format', 'needle_color'
]
_data_params = _manual_params
_rename = {'background': 'background_fill_color'}
def __init__(self, **params):
super().__init__(**params)
self._update_value_bounds()
@param.depends('bounds', watch=True)
def _update_value_bounds(self):
self.param.value.bounds = self.bounds
def _get_data(self):
vmin, vmax = self.bounds
value = self.value
if value is None:
value = float('nan')
fraction = (value-vmin)/(vmax-vmin)
start = (np.radians(360-self.start_angle) - pi % (2*pi)) + pi
end = (np.radians(360-self.end_angle) - pi % (2*pi)) + pi
distance = (abs(end-start) % (pi*2))
if end>start:
distance = (pi*2)-distance
radial_fraction = distance*fraction
angle = start if np.isnan(fraction) else (start-radial_fraction)
inner_radius = 1-self.annulus_width
color = self.default_color
for val, clr in (self.colors or [])[::-1]:
if fraction <= val:
color = clr
annulus_data = {
'starts': np.array([start, angle]),
'ends' : np.array([angle, end]),
'color': [color, self.unfilled_color],
'radius': np.array([inner_radius, inner_radius])
}
x0s, y0s, x1s, y1s, clrs = [], [], [], [], []
colors = self.colors or []
for (val, _), (_, clr) in zip(colors[:-1], colors[1:]):
tangle = start-(distance*val)
if (vmin + val * (vmax-vmin)) <= value:
continue
x0, y0 = np.cos(tangle), np.sin(tangle)
x1, y1 = x0*inner_radius, y0*inner_radius
x0s.append(x0)
y0s.append(y0)
x1s.append(x1)
y1s.append(y1)
clrs.append(clr)
threshold_data = {
'x0': x0s, 'y0': y0s, 'x1': x1s, 'y1': y1s, 'color': clrs
}
center_radius = 1-self.annulus_width/2.
x, y = np.cos(angle)*center_radius, np.sin(angle)*center_radius
needle_start = pi+angle-(self.needle_width/2.)
needle_end = pi+angle+(self.needle_width/2.)
needle_data = {
'x': np.array([x]),
'y': np.array([y]),
'start': np.array([needle_start]),
'end': np.array([needle_end]),
'radius': np.array([center_radius])
}
value = self.format.format(value=value).replace('nan', self.nan_format)
min_value = self.format.format(value=vmin)
max_value = self.format.format(value=vmax)
tminx, tminy = np.cos(start)*center_radius, np.sin(start)*center_radius
tmaxx, tmaxy = np.cos(end)*center_radius, np.sin(end)*center_radius
tmin_angle, tmax_angle = start+pi, end+pi % pi
scale = (self.height/400)
title_size = self.title_size if self.title_size else '%spt' % (scale*32)
value_size = self.value_size if self.value_size else '%spt' % (scale*48)
tick_size = self.tick_size if self.tick_size else '%spt' % (scale*18)
text_data= {
'x': np.array([0, 0, tminx, tmaxx]),
'y': np.array([-.2, -.5, tminy, tmaxy]),
'text': [self.name, value, min_value, max_value],
'rot': np.array([0, 0, tmin_angle, tmax_angle]),
'size': [title_size, value_size, tick_size, tick_size],
'color': ['black', color, 'black', 'black']
}
return annulus_data, needle_data, threshold_data, text_data
def _get_model(self, doc, root=None, parent=None, comm=None):
params = self._process_param_change(self._init_params())
model = figure(
x_range=(-1,1), y_range=(-1,1), tools=[],
outline_line_color=None, toolbar_location=None,
width=self.width, height=self.height, **params
)
model.xaxis.visible = False
model.yaxis.visible = False
model.grid.visible = False
annulus, needle, threshold, text = self._get_data()
# Draw annulus
annulus_source = ColumnDataSource(data=annulus, name='annulus_source')
model.annular_wedge(
x=0, y=0, inner_radius='radius', outer_radius=1, start_angle='starts',
end_angle='ends', line_color='gray', color='color', direction='clock',
source=annulus_source
)
# Draw needle
needle_source = ColumnDataSource(data=needle, name='needle_source')
model.wedge(
x='x', y='y', radius='radius', start_angle='start', end_angle='end',
fill_color=self.needle_color, line_color=self.needle_color,
source=needle_source, name='needle_renderer'
)
# Draw thresholds
threshold_source = ColumnDataSource(data=threshold, name='threshold_source')
model.segment(
x0='x0', x1='x1', y0='y0', y1='y1', line_color='color', source=threshold_source,
line_width=2
)
# Draw labels
text_source = ColumnDataSource(data=text, name='label_source')
model.text(
x='x', y='y', text='text', font_size='size', text_align='center',
text_color='color', source=text_source, text_baseline='top',
angle='rot'
)
if root is None:
root = model
self._models[root.ref['id']] = (model, parent)
return model
def _manual_update(self, events, model, doc, root, parent, comm):
update_data = False
for event in events:
if event.name in ('width', 'height'):
model.update(**{event.name: event.new})
if event.name in self._data_params:
update_data = True
elif event.name == 'needle_color':
needle_r = model.select(name='needle_renderer')
needle_r.glyph.line_color = event.new
needle_r.glyph.fill_color = event.new
if not update_data:
return
annulus, needle, threshold, labels = self._get_data()
model.select(name='annulus_source').data.update(annulus)
model.select(name='needle_source').data.update(needle)
model.select(name='threshold_source').data.update(threshold)
model.select(name='label_source').data.update(labels)
class Trend(SyncableData, Indicator):
"""
The Trend indicator enables the user to display a Dashboard KPI Card.
The card can be layout out as:
* a column (text and plot on top of each other) or
* a row (text and plot after each other)
The text section is responsive and resizes on window resize.
"""
data = param.Parameter(doc="""
The plot data declared as a dictionary of arrays or a DataFrame.""")
layout = param.ObjectSelector(default="column", objects=["column", "row"])
plot_x = param.String(default="x", doc="""
The name of the key in the plot_data to use on the x-axis.""")
plot_y = param.String(default="y", doc="""
The name of the key in the plot_data to use on the y-axis.""")
plot_color = param.String(default=BLUE, doc="""
The color to use in the plot.""")
plot_type = param.ObjectSelector(default="bar", objects=["line", "step", "area", "bar"], doc="""
The plot type to render the plot data as.""")
pos_color = param.String(GREEN, doc="""
The color used to indicate a positive change.""")
neg_color = param.String(RED, doc="""
The color used to indicate a negative change.""")
title = param.String(doc="""The title or a short description of the card""")
value = param.Parameter(default='auto', doc="""
The primary value to be displayed.""")
value_change = param.Parameter(default='auto', doc="""
A secondary value. For example the change in percent.""")
_data_params = ['data']
_manual_params = ['data']
_rename = {'data': None, 'selection': None}
_widget_type = _BkTrendIndicator
def _get_data(self):
if self.data is None:
return None, {self.plot_x: [], self.plot_y: []}
elif isinstance(self.data, dict):
return self.data, self.data
return self.data, ColumnDataSource.from_df(self.data)
def _init_params(self):
props = super()._init_params()
self._processed, self._data = self._get_data()
props['source'] = ColumnDataSource(data=self._data)
return props
def _trigger_auto_values(self):
trigger = []
if self.value == 'auto':
trigger.append('value')
if self.value_change == 'auto':
trigger.append('value_change')
if trigger:
self.param.trigger(*trigger)
@updating
def _stream(self, stream, rollover=None):
self._trigger_auto_values()
super()._stream(stream, rollover)
def _update_cds(self, *events):
super()._update_cds(*events)
self._trigger_auto_values()
def _process_param_change(self, msg):
msg = super()._process_param_change(msg)
ys = self._data.get(self.plot_y, [])
if 'value' in msg and msg['value'] == 'auto':
if len(ys):
msg['value'] = ys[-1]
else:
msg['value'] = 0
if 'value_change' in msg and msg['value_change'] == 'auto':
if len(ys) > 1:
y1, y2 = self._data.get(self.plot_y)[-2:]
msg['value_change'] = 0 if y1 == 0 else (y2/y1 - 1)
else:
msg['value_change'] = 0
return msg
MARGIN = {
"text_pane": {"column": (5, 10, 0, 10), "row": (0, 10, 0, 10)},
"progress": {"column": (0, 10, 5, 10), "row": (12, 10, 0, 10)},
}
class ptqdm(_tqdm):
def __init__(self, *args, **kwargs):
self._indicator = kwargs.pop('indicator')
super().__init__(*args, **kwargs)
def display(self, msg=None, pos=None, bar_style=None):
super().display(msg, pos)
style = self._indicator.text_pane.style or {}
color = self.colour or 'black'
self._indicator.text_pane.style = dict(style, color=color)
if self.total is not None and self.n is not None:
self._indicator.max = int(self.total) # Can be numpy.int64
self._indicator.value = int(self.n)
self._indicator.text = self._to_text(**self.format_dict)
return True
def _to_text(self, n, total, **kwargs):
return self.format_meter(n, total, **{**kwargs, "ncols": 0})
def close(self):
super().close()
if not self.leave:
self._indicator.reset()
return _tqdm
class Tqdm(Indicator):
layout = param.ClassSelector(class_=(Column, Row), precedence=-1, constant=True, doc="""
The layout for the text and progress indicator.""",)
max = param.Integer(default=100, doc="""
The maximum value of the progress indicator.""")
progress = param.ClassSelector(class_=Progress, precedence=-1, doc="""
The Progress indicator used to display the progress.""",)
text = param.String(default='', doc="""
The current tqdm style progress text.""")
text_pane = param.ClassSelector(class_=Str, precedence=-1, doc="""
The pane to display the text to.""")
value = param.Integer(default=0, bounds=(0, None), doc="""
The current value of the progress bar. If set to None the progress
bar will be indeterminate and animate depending on the active
parameter.""")
margin = param.Parameter(default=0, doc="""
Allows to create additional space around the component. May
be specified as a two-tuple of the form (vertical, horizontal)
or a four-tuple (top, right, bottom, left).""")
width = param.Integer(default=400, bounds=(0, None), doc="""
The width of the component (in pixels). This can be either
fixed or preferred width, depending on width sizing policy.""")
write_to_console = param.Boolean(default=False, doc="""
Whether or not to also write to the console.""")
_layouts = {Row: 'row', Column: 'column'}
_rename = {'value': None, 'min': None, 'max': None, 'text': None}
def __init__(self, **params):
layout = params.pop('layout', 'column')
layout = self._layouts.get(layout, layout)
if "text_pane" not in params:
sizing_mode = 'stretch_width' if layout == 'column' else 'fixed'
params["text_pane"] = Str(
None, min_height=20, min_width=280, sizing_mode=sizing_mode,
margin=MARGIN["text_pane"][layout],
)
if "progress" not in params:
params["progress"] = Progress(
active=False,
sizing_mode="stretch_width",
min_width=100,
margin=MARGIN["progress"][layout],
)
layout_params = {p: params.get(p, getattr(self, p)) for p in Viewable.param}
if layout == 'row' or layout is Row:
params['layout'] = Row(
params['progress'], params['text_pane'], **layout_params
)
else:
params['layout'] = Column(
params['text_pane'], params['progress'], **layout_params
)
super().__init__(**params)
self.param.watch(self._update_layout, list(Viewable.param))
if self.value == 0:
# Hack: to give progress the initial look
self.progress.max = 100000
self.progress.value = 1
else:
self.progress.max = self.max
self.progress.value = self.value
self.text_pane.object = self.text
def _get_model(self, doc, root=None, parent=None, comm=None):
model = self.layout._get_model(doc, root, parent, comm)
if root is None:
root = model
self._models[root.ref['id']] = (model, parent)
return model
def _cleanup(self, root):
super()._cleanup(root)
self.layout._cleanup(root)
def _update_layout(self, *events):
self.layout.param.set_param(**{event.name: event.new for event in events})
@param.depends("text", watch=True)
def _update_text(self):
if self.text_pane:
self.text_pane.object = self.text
@param.depends("value", watch=True)
def _update_value(self):
if self.progress:
self.progress.value = self.value
@param.depends("max", watch=True)
def _update_max(self):
if self.progress:
self.progress.max = self.max
def __call__(self, *args, **kwargs):
kwargs['indicator'] = self
if not self.write_to_console:
f = open(os.devnull, 'w')
kwargs['file'] = f
return ptqdm(*args, **kwargs)
__call__.__doc__ = ptqdm.__doc__
def pandas(self, *args, **kwargs):
kwargs['indicator'] = self
if not self.write_to_console and 'file' not in kwargs:
f = open(os.devnull, 'w')
kwargs['file'] = f
return ptqdm.pandas(*args, **kwargs)
def reset(self):
"""Resets the parameters"""
self.value = self.param.value.default
self.text = self.param.text.default
| 34.395487 | 100 | 0.596043 | import os
import sys
from math import pi
import numpy as np
import param
from bokeh.plotting import figure
from bokeh.models import ColumnDataSource
from tqdm.asyncio import tqdm as _tqdm
from ..layout import Column, Row
from ..models import (
HTML, Progress as _BkProgress, TrendIndicator as _BkTrendIndicator
)
from ..pane.markup import Str
from ..reactive import SyncableData
from ..util import escape, updating
from ..viewable import Viewable
from .base import Widget
RED = "#d9534f"
GREEN = "#5cb85c"
BLUE = "#428bca"
class Indicator(Widget):
sizing_mode = param.ObjectSelector(default='fixed', objects=[
'fixed', 'stretch_width', 'stretch_height', 'stretch_both',
'scale_width', 'scale_height', 'scale_both', None])
__abstract = True
class BooleanIndicator(Indicator):
value = param.Boolean(default=False, doc="""
Whether the indicator is active or not.""")
__abstract = True
class BooleanStatus(BooleanIndicator):
color = param.ObjectSelector(default='dark', objects=[
'primary', 'secondary', 'success', 'info', 'danger', 'warning',
'light', 'dark'])
height = param.Integer(default=20, doc="""
height of the circle.""")
width = param.Integer(default=20, doc="""
Width of the circle.""")
value = param.Boolean(default=False, doc="""
Whether the indicator is active or not.""")
_rename = {'color': None}
_source_transforms = {'value': None}
_widget_type = HTML
def _process_param_change(self, msg):
msg = super()._process_param_change(msg)
value = msg.pop('value', None)
if value is None:
return msg
msg['css_classes'] = ['dot-filled', self.color] if value else ['dot']
return msg
class LoadingSpinner(BooleanIndicator):
bgcolor = param.ObjectSelector(default='light', objects=['dark', 'light'])
color = param.ObjectSelector(default='dark', objects=[
'primary', 'secondary', 'success', 'info', 'danger', 'warning',
'light', 'dark'])
height = param.Integer(default=125, doc="""
height of the circle.""")
width = param.Integer(default=125, doc="""
Width of the circle.""")
value = param.Boolean(default=False, doc="""
Whether the indicator is active or not.""")
_rename = {'color': None, 'bgcolor': None}
_source_transforms = {'value': None}
_widget_type = HTML
def _process_param_change(self, msg):
msg = super()._process_param_change(msg)
value = msg.pop('value', None)
if value is None:
return msg
color_cls = f'{self.color}-{self.bgcolor}'
msg['css_classes'] = ['loader', 'spin', color_cls] if value else ['loader', self.bgcolor]
return msg
class ValueIndicator(Indicator):
value = param.Number(default=None, allow_None=True)
__abstract = True
class Progress(ValueIndicator):
active = param.Boolean(default=True, doc="""
If no value is set the active property toggles animation of the
progress bar on and off.""")
bar_color = param.ObjectSelector(default='success', objects=[
'primary', 'secondary', 'success', 'info', 'danger', 'warning',
'light', 'dark'])
max = param.Integer(default=100, doc="The maximum value of the progress bar.")
value = param.Integer(default=None, bounds=(-1, None), doc="""
The current value of the progress bar. If set to None the progress
bar will be indeterminate and animate depending on the active
parameter. If set to -1 the progress bar will be empty.""")
_rename = {'name': None}
_widget_type = _BkProgress
@param.depends('max', watch=True)
def _update_value_bounds(self):
self.param.value.bounds = (-1, self.max)
def __init__(self,**params):
super().__init__(**params)
self._update_value_bounds()
class Number(ValueIndicator):
default_color = param.String(default='black')
colors = param.List(default=None)
format = param.String(default='{value}')
font_size = param.String(default='54pt')
nan_format = param.String(default='-', doc="""
How to format nan values.""")
title_size = param.String(default='18pt')
_rename = {}
_source_transforms = {
'value': None, 'colors': None, 'default_color': None,
'font_size': None, 'format': None, 'nan_format': None,
'title_size': None
}
_widget_type = HTML
def _process_param_change(self, msg):
msg = super()._process_param_change(msg)
font_size = msg.pop('font_size', self.font_size)
title_font_size = msg.pop('title_size', self.title_size)
name = msg.pop('name', self.name)
format = msg.pop('format', self.format)
value = msg.pop('value', self.value)
nan_format = msg.pop('nan_format', self.nan_format)
color = msg.pop('default_color', self.default_color)
colors = msg.pop('colors', self.colors)
for val, clr in (colors or [])[::-1]:
if value is not None and value <= val:
color = clr
if value is None:
value = float('nan')
value = format.format(value=value).replace('nan', nan_format)
text = f'<div style="font-size: {font_size}; color: {color}">{value}</div>'
if self.name:
title_font_size = msg.pop('title_size', self.title_size)
text = f'<div style="font-size: {title_font_size}; color: {color}">{name}</div>\n{text}'
msg['text'] = escape(text)
return msg
class String(ValueIndicator):
default_color = param.String(default='black')
font_size = param.String(default='54pt')
title_size = param.String(default='18pt')
value = param.String(default=None, allow_None=True)
_rename = {}
_source_transforms = {
'value': None, 'default_color': None, 'font_size': None, 'title_size': None
}
_widget_type = HTML
def _process_param_change(self, msg):
msg = super()._process_param_change(msg)
font_size = msg.pop('font_size', self.font_size)
title_font_size = msg.pop('title_size', self.title_size)
name = msg.pop('name', self.name)
value = msg.pop('value', self.value)
color = msg.pop('default_color', self.default_color)
text = f'<div style="font-size: {font_size}; color: {color}">{value}</div>'
if self.name:
title_font_size = msg.pop('title_size', self.title_size)
text = f'<div style="font-size: {title_font_size}; color: {color}">{name}</div>\n{text}'
msg['text'] = escape(text)
return msg
class Gauge(ValueIndicator):
annulus_width = param.Integer(default=10, doc="""
Width of the gauge annulus.""")
bounds = param.Range(default=(0, 100), doc="""
The upper and lower bound of the dial.""")
colors = param.List(default=None, doc="""
Color thresholds for the Gauge, specified as a list of tuples
of the fractional threshold and the color to switch to.""")
custom_opts = param.Dict(doc="""
Additional options to pass to the ECharts Gauge definition.""")
height = param.Integer(default=300, bounds=(0, None))
end_angle = param.Number(default=-45, doc="""
Angle at which the gauge ends.""")
format = param.String(default='{value}%', doc="""
Formatting string for the value indicator.""")
num_splits = param.Integer(default=10, doc="""
Number of splits along the gauge.""")
show_ticks = param.Boolean(default=True, doc="""
Whether to show ticks along the dials.""")
show_labels = param.Boolean(default=True, doc="""
Whether to show tick labels along the dials.""")
start_angle = param.Number(default=225, doc="""
Angle at which the gauge starts.""")
tooltip_format = param.String(default='{b} : {c}%', doc="""
Formatting string for the hover tooltip.""")
title_size = param.Integer(default=18, doc="""
Size of title font.""")
value = param.Number(default=25, doc="""
Value to indicate on the gauge a value within the declared bounds.""")
width = param.Integer(default=300, bounds=(0, None))
_rename = {}
_source_transforms = {
'annulus_width': None, 'bounds': None, 'colors': None,
'custom_opts': None, 'end_angle': None, 'format': None,
'num_splits': None, 'show_ticks': None, 'show_labels': None,
'start_angle': None, 'tooltip_format': None, 'title_size': None,
'value': None
}
@property
def _widget_type(self):
if 'panel.models.echarts' not in sys.modules:
from ..models.echarts import ECharts
else:
ECharts = getattr(sys.modules['panel.models.echarts'], 'ECharts')
return ECharts
def __init__(self, **params):
super().__init__(**params)
self._update_value_bounds()
@param.depends('bounds', watch=True)
def _update_value_bounds(self):
self.param.value.bounds = self.bounds
def _process_param_change(self, msg):
msg = super()._process_param_change(msg)
vmin, vmax = msg.pop('bounds', self.bounds)
msg['data'] = {
'tooltip': {
'formatter': msg.pop('tooltip_format', self.tooltip_format)
},
'series': [{
'name': 'Gauge',
'type': 'gauge',
'axisTick': {'show': msg.pop('show_ticks', self.show_ticks)},
'axisLabel': {'show': msg.pop('show_labels', self.show_labels)},
'title': {'fontWeight': 'bold', 'fontSize': msg.pop('title_size', self.title_size)},
'splitLine': {'show': True},
'radius': '100%',
'detail': {'formatter': msg.pop('format', self.format)},
'min': vmin,
'max': vmax,
'startAngle': msg.pop('start_angle', self.start_angle),
'endAngle': msg.pop('end_angle', self.end_angle),
'splitNumber': msg.pop('num_splits', self.num_splits),
'data': [{'value': msg.pop('value', self.value), 'name': self.name}],
'axisLine': {
'lineStyle': {
'width': msg.pop('annulus_width', self.annulus_width),
}
}
}]
}
colors = msg.pop('colors', self.colors)
if colors:
msg['data']['series'][0]['axisLine']['lineStyle']['color'] = colors
custom_opts = msg.pop('custom_opts', self.custom_opts)
if custom_opts:
gauge = msg['data']['series'][0]
for k, v in custom_opts.items():
if k not in gauge or not isinstance(gauge[k], dict):
gauge[k] = v
else:
gauge[k].update(v)
return msg
class Dial(ValueIndicator):
annulus_width = param.Number(default=0.2, doc="""
Width of the radial annulus as a fraction of the total.""")
bounds = param.Range(default=(0, 100), doc="""
The upper and lower bound of the dial.""")
colors = param.List(default=None, doc="""
Color thresholds for the Dial, specified as a list of tuples
of the fractional threshold and the color to switch to.""")
default_color = param.String(default='lightblue', doc="""
Color of the radial annulus if not color thresholds are supplied.""")
end_angle = param.Number(default=25, doc="""
Angle at which the dial ends.""")
format = param.String(default='{value}%', doc="""
Formatting string for the value indicator and lower/upper bounds.""")
height = param.Integer(default=250, bounds=(1, None))
nan_format = param.String(default='-', doc="""
How to format nan values.""")
needle_color = param.String(default='black', doc="""
Color of the Dial needle.""")
needle_width = param.Number(default=0.1, doc="""
Radial width of the needle.""")
start_angle = param.Number(default=-205, doc="""
Angle at which the dial starts.""")
tick_size = param.String(default=None, doc="""
Font size of the Dial min/max labels.""")
title_size = param.String(default=None, doc="""
Font size of the Dial title.""")
unfilled_color = param.String(default='whitesmoke', doc="""
Color of the unfilled region of the Dial.""")
value_size = param.String(default=None, doc="""
Font size of the Dial value label.""")
value = param.Number(default=25, allow_None=True, doc="""
Value to indicate on the dial a value within the declared bounds.""")
width = param.Integer(default=250, bounds=(1, None))
_manual_params = [
'value', 'start_angle', 'end_angle', 'bounds',
'annulus_width', 'format', 'background', 'needle_width',
'tick_size', 'title_size', 'value_size', 'colors',
'default_color', 'unfilled_color', 'height',
'width', 'nan_format', 'needle_color'
]
_data_params = _manual_params
_rename = {'background': 'background_fill_color'}
def __init__(self, **params):
super().__init__(**params)
self._update_value_bounds()
@param.depends('bounds', watch=True)
def _update_value_bounds(self):
self.param.value.bounds = self.bounds
def _get_data(self):
vmin, vmax = self.bounds
value = self.value
if value is None:
value = float('nan')
fraction = (value-vmin)/(vmax-vmin)
start = (np.radians(360-self.start_angle) - pi % (2*pi)) + pi
end = (np.radians(360-self.end_angle) - pi % (2*pi)) + pi
distance = (abs(end-start) % (pi*2))
if end>start:
distance = (pi*2)-distance
radial_fraction = distance*fraction
angle = start if np.isnan(fraction) else (start-radial_fraction)
inner_radius = 1-self.annulus_width
color = self.default_color
for val, clr in (self.colors or [])[::-1]:
if fraction <= val:
color = clr
annulus_data = {
'starts': np.array([start, angle]),
'ends' : np.array([angle, end]),
'color': [color, self.unfilled_color],
'radius': np.array([inner_radius, inner_radius])
}
x0s, y0s, x1s, y1s, clrs = [], [], [], [], []
colors = self.colors or []
for (val, _), (_, clr) in zip(colors[:-1], colors[1:]):
tangle = start-(distance*val)
if (vmin + val * (vmax-vmin)) <= value:
continue
x0, y0 = np.cos(tangle), np.sin(tangle)
x1, y1 = x0*inner_radius, y0*inner_radius
x0s.append(x0)
y0s.append(y0)
x1s.append(x1)
y1s.append(y1)
clrs.append(clr)
threshold_data = {
'x0': x0s, 'y0': y0s, 'x1': x1s, 'y1': y1s, 'color': clrs
}
center_radius = 1-self.annulus_width/2.
x, y = np.cos(angle)*center_radius, np.sin(angle)*center_radius
needle_start = pi+angle-(self.needle_width/2.)
needle_end = pi+angle+(self.needle_width/2.)
needle_data = {
'x': np.array([x]),
'y': np.array([y]),
'start': np.array([needle_start]),
'end': np.array([needle_end]),
'radius': np.array([center_radius])
}
value = self.format.format(value=value).replace('nan', self.nan_format)
min_value = self.format.format(value=vmin)
max_value = self.format.format(value=vmax)
tminx, tminy = np.cos(start)*center_radius, np.sin(start)*center_radius
tmaxx, tmaxy = np.cos(end)*center_radius, np.sin(end)*center_radius
tmin_angle, tmax_angle = start+pi, end+pi % pi
scale = (self.height/400)
title_size = self.title_size if self.title_size else '%spt' % (scale*32)
value_size = self.value_size if self.value_size else '%spt' % (scale*48)
tick_size = self.tick_size if self.tick_size else '%spt' % (scale*18)
text_data= {
'x': np.array([0, 0, tminx, tmaxx]),
'y': np.array([-.2, -.5, tminy, tmaxy]),
'text': [self.name, value, min_value, max_value],
'rot': np.array([0, 0, tmin_angle, tmax_angle]),
'size': [title_size, value_size, tick_size, tick_size],
'color': ['black', color, 'black', 'black']
}
return annulus_data, needle_data, threshold_data, text_data
def _get_model(self, doc, root=None, parent=None, comm=None):
params = self._process_param_change(self._init_params())
model = figure(
x_range=(-1,1), y_range=(-1,1), tools=[],
outline_line_color=None, toolbar_location=None,
width=self.width, height=self.height, **params
)
model.xaxis.visible = False
model.yaxis.visible = False
model.grid.visible = False
annulus, needle, threshold, text = self._get_data()
annulus_source = ColumnDataSource(data=annulus, name='annulus_source')
model.annular_wedge(
x=0, y=0, inner_radius='radius', outer_radius=1, start_angle='starts',
end_angle='ends', line_color='gray', color='color', direction='clock',
source=annulus_source
)
needle_source = ColumnDataSource(data=needle, name='needle_source')
model.wedge(
x='x', y='y', radius='radius', start_angle='start', end_angle='end',
fill_color=self.needle_color, line_color=self.needle_color,
source=needle_source, name='needle_renderer'
)
threshold_source = ColumnDataSource(data=threshold, name='threshold_source')
model.segment(
x0='x0', x1='x1', y0='y0', y1='y1', line_color='color', source=threshold_source,
line_width=2
)
text_source = ColumnDataSource(data=text, name='label_source')
model.text(
x='x', y='y', text='text', font_size='size', text_align='center',
text_color='color', source=text_source, text_baseline='top',
angle='rot'
)
if root is None:
root = model
self._models[root.ref['id']] = (model, parent)
return model
def _manual_update(self, events, model, doc, root, parent, comm):
update_data = False
for event in events:
if event.name in ('width', 'height'):
model.update(**{event.name: event.new})
if event.name in self._data_params:
update_data = True
elif event.name == 'needle_color':
needle_r = model.select(name='needle_renderer')
needle_r.glyph.line_color = event.new
needle_r.glyph.fill_color = event.new
if not update_data:
return
annulus, needle, threshold, labels = self._get_data()
model.select(name='annulus_source').data.update(annulus)
model.select(name='needle_source').data.update(needle)
model.select(name='threshold_source').data.update(threshold)
model.select(name='label_source').data.update(labels)
class Trend(SyncableData, Indicator):
data = param.Parameter(doc="""
The plot data declared as a dictionary of arrays or a DataFrame.""")
layout = param.ObjectSelector(default="column", objects=["column", "row"])
plot_x = param.String(default="x", doc="""
The name of the key in the plot_data to use on the x-axis.""")
plot_y = param.String(default="y", doc="""
The name of the key in the plot_data to use on the y-axis.""")
plot_color = param.String(default=BLUE, doc="""
The color to use in the plot.""")
plot_type = param.ObjectSelector(default="bar", objects=["line", "step", "area", "bar"], doc="""
The plot type to render the plot data as.""")
pos_color = param.String(GREEN, doc="""
The color used to indicate a positive change.""")
neg_color = param.String(RED, doc="""
The color used to indicate a negative change.""")
title = param.String(doc="""The title or a short description of the card""")
value = param.Parameter(default='auto', doc="""
The primary value to be displayed.""")
value_change = param.Parameter(default='auto', doc="""
A secondary value. For example the change in percent.""")
_data_params = ['data']
_manual_params = ['data']
_rename = {'data': None, 'selection': None}
_widget_type = _BkTrendIndicator
def _get_data(self):
if self.data is None:
return None, {self.plot_x: [], self.plot_y: []}
elif isinstance(self.data, dict):
return self.data, self.data
return self.data, ColumnDataSource.from_df(self.data)
def _init_params(self):
props = super()._init_params()
self._processed, self._data = self._get_data()
props['source'] = ColumnDataSource(data=self._data)
return props
def _trigger_auto_values(self):
trigger = []
if self.value == 'auto':
trigger.append('value')
if self.value_change == 'auto':
trigger.append('value_change')
if trigger:
self.param.trigger(*trigger)
@updating
def _stream(self, stream, rollover=None):
self._trigger_auto_values()
super()._stream(stream, rollover)
def _update_cds(self, *events):
super()._update_cds(*events)
self._trigger_auto_values()
def _process_param_change(self, msg):
msg = super()._process_param_change(msg)
ys = self._data.get(self.plot_y, [])
if 'value' in msg and msg['value'] == 'auto':
if len(ys):
msg['value'] = ys[-1]
else:
msg['value'] = 0
if 'value_change' in msg and msg['value_change'] == 'auto':
if len(ys) > 1:
y1, y2 = self._data.get(self.plot_y)[-2:]
msg['value_change'] = 0 if y1 == 0 else (y2/y1 - 1)
else:
msg['value_change'] = 0
return msg
MARGIN = {
"text_pane": {"column": (5, 10, 0, 10), "row": (0, 10, 0, 10)},
"progress": {"column": (0, 10, 5, 10), "row": (12, 10, 0, 10)},
}
class ptqdm(_tqdm):
def __init__(self, *args, **kwargs):
self._indicator = kwargs.pop('indicator')
super().__init__(*args, **kwargs)
def display(self, msg=None, pos=None, bar_style=None):
super().display(msg, pos)
style = self._indicator.text_pane.style or {}
color = self.colour or 'black'
self._indicator.text_pane.style = dict(style, color=color)
if self.total is not None and self.n is not None:
self._indicator.max = int(self.total)
self._indicator.value = int(self.n)
self._indicator.text = self._to_text(**self.format_dict)
return True
def _to_text(self, n, total, **kwargs):
return self.format_meter(n, total, **{**kwargs, "ncols": 0})
def close(self):
super().close()
if not self.leave:
self._indicator.reset()
return _tqdm
class Tqdm(Indicator):
layout = param.ClassSelector(class_=(Column, Row), precedence=-1, constant=True, doc="""
The layout for the text and progress indicator.""",)
max = param.Integer(default=100, doc="""
The maximum value of the progress indicator.""")
progress = param.ClassSelector(class_=Progress, precedence=-1, doc="""
The Progress indicator used to display the progress.""",)
text = param.String(default='', doc="""
The current tqdm style progress text.""")
text_pane = param.ClassSelector(class_=Str, precedence=-1, doc="""
The pane to display the text to.""")
value = param.Integer(default=0, bounds=(0, None), doc="""
The current value of the progress bar. If set to None the progress
bar will be indeterminate and animate depending on the active
parameter.""")
margin = param.Parameter(default=0, doc="""
Allows to create additional space around the component. May
be specified as a two-tuple of the form (vertical, horizontal)
or a four-tuple (top, right, bottom, left).""")
width = param.Integer(default=400, bounds=(0, None), doc="""
The width of the component (in pixels). This can be either
fixed or preferred width, depending on width sizing policy.""")
write_to_console = param.Boolean(default=False, doc="""
Whether or not to also write to the console.""")
_layouts = {Row: 'row', Column: 'column'}
_rename = {'value': None, 'min': None, 'max': None, 'text': None}
def __init__(self, **params):
layout = params.pop('layout', 'column')
layout = self._layouts.get(layout, layout)
if "text_pane" not in params:
sizing_mode = 'stretch_width' if layout == 'column' else 'fixed'
params["text_pane"] = Str(
None, min_height=20, min_width=280, sizing_mode=sizing_mode,
margin=MARGIN["text_pane"][layout],
)
if "progress" not in params:
params["progress"] = Progress(
active=False,
sizing_mode="stretch_width",
min_width=100,
margin=MARGIN["progress"][layout],
)
layout_params = {p: params.get(p, getattr(self, p)) for p in Viewable.param}
if layout == 'row' or layout is Row:
params['layout'] = Row(
params['progress'], params['text_pane'], **layout_params
)
else:
params['layout'] = Column(
params['text_pane'], params['progress'], **layout_params
)
super().__init__(**params)
self.param.watch(self._update_layout, list(Viewable.param))
if self.value == 0:
self.progress.max = 100000
self.progress.value = 1
else:
self.progress.max = self.max
self.progress.value = self.value
self.text_pane.object = self.text
def _get_model(self, doc, root=None, parent=None, comm=None):
model = self.layout._get_model(doc, root, parent, comm)
if root is None:
root = model
self._models[root.ref['id']] = (model, parent)
return model
def _cleanup(self, root):
super()._cleanup(root)
self.layout._cleanup(root)
def _update_layout(self, *events):
self.layout.param.set_param(**{event.name: event.new for event in events})
@param.depends("text", watch=True)
def _update_text(self):
if self.text_pane:
self.text_pane.object = self.text
@param.depends("value", watch=True)
def _update_value(self):
if self.progress:
self.progress.value = self.value
@param.depends("max", watch=True)
def _update_max(self):
if self.progress:
self.progress.max = self.max
def __call__(self, *args, **kwargs):
kwargs['indicator'] = self
if not self.write_to_console:
f = open(os.devnull, 'w')
kwargs['file'] = f
return ptqdm(*args, **kwargs)
__call__.__doc__ = ptqdm.__doc__
def pandas(self, *args, **kwargs):
kwargs['indicator'] = self
if not self.write_to_console and 'file' not in kwargs:
f = open(os.devnull, 'w')
kwargs['file'] = f
return ptqdm.pandas(*args, **kwargs)
def reset(self):
self.value = self.param.value.default
self.text = self.param.text.default
| true | true |
f737627cd940ed5a9bb36289cbc04521c38182b3 | 258 | py | Python | diagrams/onprem/proxmox.py | shadowkrusha/diagrams | 25b561b61ae16371437fdce830e9192a6fde38d5 | [
"MIT"
] | null | null | null | diagrams/onprem/proxmox.py | shadowkrusha/diagrams | 25b561b61ae16371437fdce830e9192a6fde38d5 | [
"MIT"
] | null | null | null | diagrams/onprem/proxmox.py | shadowkrusha/diagrams | 25b561b61ae16371437fdce830e9192a6fde38d5 | [
"MIT"
] | null | null | null | # This module is automatically generated by autogen.sh. DO NOT EDIT.
from . import _OnPrem
class _Proxmox(_OnPrem):
_type = "proxmox"
_icon_dir = "resources/onprem/proxmox"
class Pve(_Proxmox):
_icon = "pve.png"
# Aliases
PVE = ProxmoxVE
| 14.333333 | 68 | 0.697674 |
from . import _OnPrem
class _Proxmox(_OnPrem):
_type = "proxmox"
_icon_dir = "resources/onprem/proxmox"
class Pve(_Proxmox):
_icon = "pve.png"
PVE = ProxmoxVE
| true | true |
f7376415a15ce36dab3ad8a495e9883284da19ba | 235 | py | Python | Exercicios/Ex005.py | silveriogabriel/Exercicios_Python | 6bed118b54ad202e3fe4b53cb538ff967a71d855 | [
"MIT"
] | null | null | null | Exercicios/Ex005.py | silveriogabriel/Exercicios_Python | 6bed118b54ad202e3fe4b53cb538ff967a71d855 | [
"MIT"
] | null | null | null | Exercicios/Ex005.py | silveriogabriel/Exercicios_Python | 6bed118b54ad202e3fe4b53cb538ff967a71d855 | [
"MIT"
] | null | null | null | '''Faça um programa que leia um valor pelo teclado e mostre na tela seu sucessor e seu antesessor'''
n1 = int(input('Digite um numero inteiro: '))
print(f'O valor digitado foi {n1} seu sucessor é {n1 + 1} e seu antecessor é {n1 - 1}') | 58.75 | 100 | 0.706383 |
n1 = int(input('Digite um numero inteiro: '))
print(f'O valor digitado foi {n1} seu sucessor é {n1 + 1} e seu antecessor é {n1 - 1}') | true | true |
f7376541694fdd122db8c1761e0366c73019c486 | 2,160 | py | Python | examples/pwr_run/gpu_pwr.py | boringlee24/keras_old | 1e1176c45c4952ba1b9b9e58e9cc4df027ab111d | [
"MIT"
] | null | null | null | examples/pwr_run/gpu_pwr.py | boringlee24/keras_old | 1e1176c45c4952ba1b9b9e58e9cc4df027ab111d | [
"MIT"
] | null | null | null | examples/pwr_run/gpu_pwr.py | boringlee24/keras_old | 1e1176c45c4952ba1b9b9e58e9cc4df027ab111d | [
"MIT"
] | null | null | null | import pandas
import pdb
from datetime import datetime
import matplotlib
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import glob
import sys
from matplotlib.ticker import MultipleLocator
testcase = sys.argv[1] # K80_vgg19_32
print(testcase)
base_dir = '/scratch/li.baol/GPU_pwr_meas/tensorflow/round1/'
log_dir = base_dir + testcase + '_*/' # /scratch/li.baol/GPU_pwr_meas/pytorch/K80_vgg19_32_*/
dirs = glob.glob(log_dir)
dirs.sort()
pwr_all = []
avg_all = []
for tc in dirs:
model = tc.split('/')[5+1]
files = glob.glob(tc + "sample*.csv")
files.sort()
avg_pwr = [0] * (len(files) + 1)
for fil in files:
file_path = fil
minute = int(fil.split('/')[6+1].split('_')[1].split('.')[0])
try: # in case the file is empty
data = pandas.read_csv(file_path)
pwr = data[data.columns[2]].tolist()
pwr_array = np.asarray(pwr)
if (len(pwr) == 0):
avg_pwr[minute] = 0
else:
avg_pwr[minute] = np.average(pwr_array)
except pandas.errors.EmptyDataError:
avg_pwr[minute] = 0
pass
pwr_all.append(avg_pwr)
avg_pwr_filter = [i for i in avg_pwr if i > 10] # remove power measurements below 10W
avg_all.append(sum(avg_pwr_filter) / len(avg_pwr_filter))
#------------- plot ---------------#
width = 0.1
fig, axs = plt.subplots(1, 1, gridspec_kw={'hspace': 0, 'wspace': 0}, figsize=(12,5))
fig.suptitle(testcase + " GPU power (W) during training epochs")
for i in range(len(pwr_all)):
x = np.arange(len(pwr_all[i]))
axs.scatter(x, pwr_all[i], label = str(i))
axs.set_xlabel('# of sample with 10s interval')
axs.set_ylabel('power(W)')
#axs.set_yticks(minor=True)
axs.get_yaxis().set_minor_locator(MultipleLocator(5))
axs.legend()
axs.grid(which='both', axis='y', linestyle=':', color='black')
pwr = int(sum(avg_all) / len(avg_all))
plt.savefig(base_dir + "png/" + testcase + '_pwr' + str(pwr) + ".png")
df = pandas.DataFrame(avg_all, columns=["power(W)"])
df.to_csv(base_dir + 'csv/' + testcase + '.csv', index=False)
| 30.857143 | 93 | 0.630556 | import pandas
import pdb
from datetime import datetime
import matplotlib
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import glob
import sys
from matplotlib.ticker import MultipleLocator
testcase = sys.argv[1]
print(testcase)
base_dir = '/scratch/li.baol/GPU_pwr_meas/tensorflow/round1/'
log_dir = base_dir + testcase + '_*/'
dirs = glob.glob(log_dir)
dirs.sort()
pwr_all = []
avg_all = []
for tc in dirs:
model = tc.split('/')[5+1]
files = glob.glob(tc + "sample*.csv")
files.sort()
avg_pwr = [0] * (len(files) + 1)
for fil in files:
file_path = fil
minute = int(fil.split('/')[6+1].split('_')[1].split('.')[0])
try:
data = pandas.read_csv(file_path)
pwr = data[data.columns[2]].tolist()
pwr_array = np.asarray(pwr)
if (len(pwr) == 0):
avg_pwr[minute] = 0
else:
avg_pwr[minute] = np.average(pwr_array)
except pandas.errors.EmptyDataError:
avg_pwr[minute] = 0
pass
pwr_all.append(avg_pwr)
avg_pwr_filter = [i for i in avg_pwr if i > 10]
avg_all.append(sum(avg_pwr_filter) / len(avg_pwr_filter))
width = 0.1
fig, axs = plt.subplots(1, 1, gridspec_kw={'hspace': 0, 'wspace': 0}, figsize=(12,5))
fig.suptitle(testcase + " GPU power (W) during training epochs")
for i in range(len(pwr_all)):
x = np.arange(len(pwr_all[i]))
axs.scatter(x, pwr_all[i], label = str(i))
axs.set_xlabel('# of sample with 10s interval')
axs.set_ylabel('power(W)')
axs.get_yaxis().set_minor_locator(MultipleLocator(5))
axs.legend()
axs.grid(which='both', axis='y', linestyle=':', color='black')
pwr = int(sum(avg_all) / len(avg_all))
plt.savefig(base_dir + "png/" + testcase + '_pwr' + str(pwr) + ".png")
df = pandas.DataFrame(avg_all, columns=["power(W)"])
df.to_csv(base_dir + 'csv/' + testcase + '.csv', index=False)
| true | true |
f737669af564b4e98a57828a25dda2274cb0331c | 4,354 | py | Python | hubspot/crm/deals/models/collection_response_associated_id_forward_paging.py | Ronfer/hubspot-api-python | 1c87274ecbba4aa3c7728f890ccc6e77b2b6d2e4 | [
"Apache-2.0"
] | 117 | 2020-04-06T08:22:53.000Z | 2022-03-18T03:41:29.000Z | hubspot/crm/deals/models/collection_response_associated_id_forward_paging.py | Ronfer/hubspot-api-python | 1c87274ecbba4aa3c7728f890ccc6e77b2b6d2e4 | [
"Apache-2.0"
] | 62 | 2020-04-06T16:21:06.000Z | 2022-03-17T16:50:44.000Z | hubspot/crm/deals/models/collection_response_associated_id_forward_paging.py | Ronfer/hubspot-api-python | 1c87274ecbba4aa3c7728f890ccc6e77b2b6d2e4 | [
"Apache-2.0"
] | 45 | 2020-04-06T16:13:52.000Z | 2022-03-30T21:33:17.000Z | # coding: utf-8
"""
Deals
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: v3
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from hubspot.crm.deals.configuration import Configuration
class CollectionResponseAssociatedIdForwardPaging(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {"results": "list[AssociatedId]", "paging": "ForwardPaging"}
attribute_map = {"results": "results", "paging": "paging"}
def __init__(self, results=None, paging=None, local_vars_configuration=None): # noqa: E501
"""CollectionResponseAssociatedIdForwardPaging - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._results = None
self._paging = None
self.discriminator = None
self.results = results
if paging is not None:
self.paging = paging
@property
def results(self):
"""Gets the results of this CollectionResponseAssociatedIdForwardPaging. # noqa: E501
:return: The results of this CollectionResponseAssociatedIdForwardPaging. # noqa: E501
:rtype: list[AssociatedId]
"""
return self._results
@results.setter
def results(self, results):
"""Sets the results of this CollectionResponseAssociatedIdForwardPaging.
:param results: The results of this CollectionResponseAssociatedIdForwardPaging. # noqa: E501
:type: list[AssociatedId]
"""
if self.local_vars_configuration.client_side_validation and results is None: # noqa: E501
raise ValueError("Invalid value for `results`, must not be `None`") # noqa: E501
self._results = results
@property
def paging(self):
"""Gets the paging of this CollectionResponseAssociatedIdForwardPaging. # noqa: E501
:return: The paging of this CollectionResponseAssociatedIdForwardPaging. # noqa: E501
:rtype: ForwardPaging
"""
return self._paging
@paging.setter
def paging(self, paging):
"""Sets the paging of this CollectionResponseAssociatedIdForwardPaging.
:param paging: The paging of this CollectionResponseAssociatedIdForwardPaging. # noqa: E501
:type: ForwardPaging
"""
self._paging = paging
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items()))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, CollectionResponseAssociatedIdForwardPaging):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, CollectionResponseAssociatedIdForwardPaging):
return True
return self.to_dict() != other.to_dict()
| 32.251852 | 139 | 0.642398 |
import pprint
import re
import six
from hubspot.crm.deals.configuration import Configuration
class CollectionResponseAssociatedIdForwardPaging(object):
openapi_types = {"results": "list[AssociatedId]", "paging": "ForwardPaging"}
attribute_map = {"results": "results", "paging": "paging"}
def __init__(self, results=None, paging=None, local_vars_configuration=None):
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._results = None
self._paging = None
self.discriminator = None
self.results = results
if paging is not None:
self.paging = paging
@property
def results(self):
return self._results
@results.setter
def results(self, results):
if self.local_vars_configuration.client_side_validation and results is None:
raise ValueError("Invalid value for `results`, must not be `None`")
self._results = results
@property
def paging(self):
return self._paging
@paging.setter
def paging(self, paging):
self._paging = paging
def to_dict(self):
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items()))
else:
result[attr] = value
return result
def to_str(self):
return pprint.pformat(self.to_dict())
def __repr__(self):
return self.to_str()
def __eq__(self, other):
if not isinstance(other, CollectionResponseAssociatedIdForwardPaging):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
if not isinstance(other, CollectionResponseAssociatedIdForwardPaging):
return True
return self.to_dict() != other.to_dict()
| true | true |
f73766e249ccc282cd3aefc84109b97ce5a7c7eb | 9,314 | py | Python | src/jstools/records.py | daboross/screeps-ai-v2 | 8bb49f59c8c745801d1aef54e1734e28414171e7 | [
"MIT"
] | 2 | 2020-08-28T01:26:05.000Z | 2021-04-04T03:03:34.000Z | src/jstools/records.py | daboross/screeps-ai-v2 | 8bb49f59c8c745801d1aef54e1734e28414171e7 | [
"MIT"
] | null | null | null | src/jstools/records.py | daboross/screeps-ai-v2 | 8bb49f59c8c745801d1aef54e1734e28414171e7 | [
"MIT"
] | null | null | null | from typing import Any, Optional
from jstools.screeps import *
__pragma__('noalias', 'name')
__pragma__('noalias', 'undefined')
__pragma__('noalias', 'Infinity')
__pragma__('noalias', 'keys')
__pragma__('noalias', 'get')
__pragma__('noalias', 'set')
__pragma__('noalias', 'type')
__pragma__('noalias', 'update')
__pragma__('noalias', 'values')
###
# Creep role recording
###
_recording_now = False
_main_recording_now = False
_sub_recording_now = False
_single_record_start = 0 # type: int
_sub_record_start = 0 # type: int
_main_loop_record_start = 0 # type: int
_averages = None # type: _Memory
_sub_records = None # type: _Memory
def prep_recording():
# type: () -> None
global _recording_now, _main_recording_now, _averages, _sub_recording_now, _sub_records
_averages = Memory['_averages']
if not _averages:
_averages = Memory['_averages'] = {}
_recording_now = not not _averages['_recording_now']
_main_recording_now = _recording_now or not not _averages['_only_recording_main']
_sub_recording_now = _averages['_sub_recording_now'] or False
if _sub_recording_now:
_sub_records = _averages['_sub_records']
if not _sub_records:
_sub_records = _averages['_sub_records'] = {}
def start_recording():
# type: () -> None
Memory['_averages']['_recording_now'] = True
def start_recording_main_only():
# type: () -> None
Memory['_averages']['_only_recording_main'] = True
def stop_recording():
# type: () -> None
Memory['_averages']['_recording_now'] = False
Memory['_averages']['_sub_recording_now'] = False
def start_sub_recording():
# type: () -> None
Memory['_averages']['_sub_recording_now'] = True
Memory['_averages']['_recording_now'] = True
def reset_records():
# type: () -> None
Memory['_averages'] = {}
def start_record():
# type: () -> None
if _recording_now:
global _single_record_start
_single_record_start = Game.cpu.getUsed()
def finish_record(identity):
# type: (str) -> None
if _recording_now and _single_record_start is not None:
end = Game.cpu.getUsed()
if identity in _averages:
_averages[identity].calls += 1
_averages[identity].time += end - _single_record_start
else:
_averages[identity] = {
'calls': 1,
'time': end - _single_record_start,
}
def start_sub_record():
# type: () -> None
if _sub_recording_now:
global _sub_record_start
_sub_record_start = Game.cpu.getUsed()
def finish_sub_record(identity):
# type: (str) -> None
global _sub_record_start
if _sub_recording_now and _sub_record_start is not None:
end = Game.cpu.getUsed()
if identity in _sub_records:
_sub_records[identity].calls += 1
_sub_records[identity].time += end - _sub_record_start
else:
_sub_records[identity] = {
'calls': 1,
'time': end - _sub_record_start,
}
_sub_record_start = None # type: Optional[int]
def start_main_record():
# type: () -> None
if _main_recording_now:
global _main_loop_record_start
_main_loop_record_start = Game.cpu.getUsed()
def finish_main_record():
# type: () -> None
if _main_recording_now and _main_loop_record_start is not None:
end = Game.cpu.getUsed()
if '_main' in _averages:
_averages['_main'] += end - _main_loop_record_start
else:
_averages['_main'] = end - _main_loop_record_start
if '_total' in _averages:
_averages['_total'] += end
else:
_averages['_total'] = end
if '_ticks' in _averages:
_averages['_ticks'] += 1
else:
_averages['_ticks'] = 1
if _sub_recording_now:
if '_ticks' in _sub_records:
_sub_records['_ticks'] += 1
else:
_sub_records['_ticks'] = 1
def record_memory_amount(time):
# type: (int) -> None
if _main_recording_now:
if 'memory.init' in _averages:
_averages['memory.init'].calls += 1
_averages['memory.init'].time += time
else:
_averages['memory.init'] = {
'calls': 1,
'time': time,
}
# `(a / b).toFixed(2)` is incorrectly translated to `a / b.toFixed(2)` instead of `(a / b).toFixed(2)`
def display_num(num, val = 2):
# type: (Any, int) -> str
return num.toFixed(val)
def output_records_full():
# type: () -> str
rows = ["time\tcalls\ttime/t\tcalls/t\taverage\tname"]
total_time_in_records = 0
for identity, obj in _(_averages).pairs().sortBy(lambda t: -t[1].time).value():
if identity.startswith('_'):
continue
if identity != 'memory.init' and identity != 'code.compile':
total_time_in_records += obj.time
rows.push("\n{}\t{}\t{}\t{}\t{}\t{}".format(
display_num(obj.time),
display_num(obj.calls, 1),
display_num(obj.time / _averages['_ticks']),
display_num(obj.calls / _averages['_ticks']),
display_num(obj.time / obj.calls),
identity,
))
missing_time = _averages['_main'] - total_time_in_records
rows.push("\n{}\t{}\t{}\t{}\t{}\t{}".format(
display_num(missing_time),
display_num(_averages['_ticks']),
display_num(missing_time / _averages['_ticks']),
display_num(1),
display_num(missing_time / _averages['_ticks']),
'unprofiled',
))
rows.push("\n{}\t{}\t{}\t{}\t{}\t{}".format(
display_num(_averages['_main']),
display_num(_averages['_ticks']),
display_num(_averages['_main'] / _averages['_ticks']),
display_num(1),
display_num(_averages['_main'] / _averages['_ticks']),
'total.main_loop',
))
compile_time = _averages['_total'] - _averages['_main'] - _averages['memory.init']
rows.push("\n{}\t{}\t{}\t{}\t{}\t{}".format(
display_num(compile_time),
display_num(_averages['_ticks']),
display_num(compile_time / _averages['_ticks']),
display_num(1),
display_num(compile_time / _averages['_ticks']),
'total.compile'.format(Game.cpu.limit),
))
rows.push("\n{}\t{}\t{}\t{}\t{}\t{}".format(
display_num(_averages['_total']),
display_num(_averages['_ticks']),
display_num(_averages['_total'] / _averages['_ticks']),
display_num(1),
display_num(_averages['_total'] / _averages['_ticks']),
'total (limit: {})'.format(Game.cpu.limit),
))
return "".join(rows)
def output_records():
# type: () -> str
if not _averages['_ticks']:
return "no data collected"
rows = ["time/t\tcalls/t\taverage\tname"]
total_time_in_records = 0
for identity, obj in _(_averages).pairs().sortBy(lambda t: -t[1].time).value():
if identity.startswith('_'):
continue
if identity != 'memory.init' and identity != 'code.compile':
total_time_in_records += obj.time
rows.push("\n{}\t{}\t{}\t{}".format(
display_num(obj.time / _averages['_ticks']),
display_num(obj.calls / _averages['_ticks'], 1),
display_num(obj.time / obj.calls),
identity,
))
missing_time = _averages['_main'] - total_time_in_records
rows.push("\n{}\t{}\t{}\t{}".format(
display_num(missing_time / _averages['_ticks']),
display_num(1, 1),
display_num(missing_time / _averages['_ticks']),
'unprofiled',
))
rows.push("\n{}\t{}\t{}\t{}".format(
display_num(_averages['_main'] / _averages['_ticks']),
display_num(1, 1),
display_num(_averages['_main'] / _averages['_ticks']),
'total.main_loop'.format(Game.cpu.limit),
))
compile_time = _averages['_total'] - _averages['_main'] - _averages['memory.init'].time
rows.push("\n{}\t{}\t{}\t{}".format(
display_num(compile_time / _averages['_ticks']),
display_num(1, 1),
display_num(compile_time / _averages['_ticks']),
'total.compile'.format(Game.cpu.limit),
))
rows.push("\n{}\t{}\t{}\t{}".format(
display_num(_averages['_total'] / _averages['_ticks']),
display_num(1, 1),
display_num(_averages['_total'] / _averages['_ticks']),
'total (limit: {})'.format(Game.cpu.limit),
))
return "".join(rows)
def output_sub_records():
# type: () -> str
if not _sub_records['_ticks']:
return "no data collected"
rows = ["time/t\tcalls/t\taverage\tname"]
total_time_in_records = 0
for identity, obj in _(_sub_records).pairs().sortBy(lambda t: -t[1].time).value():
if identity.startswith('_'):
continue
if identity != 'memory.init' and identity != 'code.compile':
total_time_in_records += obj.time
rows.push("\n{}\t{}\t{}\t{}".format(
display_num(obj.time / _sub_records['_ticks']),
display_num(obj.calls / _sub_records['_ticks'], 1),
display_num(obj.time / obj.calls),
identity,
))
return "".join(rows)
| 32.566434 | 102 | 0.596414 | from typing import Any, Optional
from jstools.screeps import *
__pragma__('noalias', 'name')
__pragma__('noalias', 'undefined')
__pragma__('noalias', 'Infinity')
__pragma__('noalias', 'keys')
__pragma__('noalias', 'get')
__pragma__('noalias', 'set')
__pragma__('noalias', 'type')
__pragma__('noalias', 'update')
__pragma__('noalias', 'values')
ecording_now = False
_main_recording_now = False
_sub_recording_now = False
_single_record_start = 0
_sub_record_start = 0
_main_loop_record_start = 0
_averages = None
_sub_records = None
def prep_recording():
global _recording_now, _main_recording_now, _averages, _sub_recording_now, _sub_records
_averages = Memory['_averages']
if not _averages:
_averages = Memory['_averages'] = {}
_recording_now = not not _averages['_recording_now']
_main_recording_now = _recording_now or not not _averages['_only_recording_main']
_sub_recording_now = _averages['_sub_recording_now'] or False
if _sub_recording_now:
_sub_records = _averages['_sub_records']
if not _sub_records:
_sub_records = _averages['_sub_records'] = {}
def start_recording():
Memory['_averages']['_recording_now'] = True
def start_recording_main_only():
Memory['_averages']['_only_recording_main'] = True
def stop_recording():
Memory['_averages']['_recording_now'] = False
Memory['_averages']['_sub_recording_now'] = False
def start_sub_recording():
Memory['_averages']['_sub_recording_now'] = True
Memory['_averages']['_recording_now'] = True
def reset_records():
Memory['_averages'] = {}
def start_record():
if _recording_now:
global _single_record_start
_single_record_start = Game.cpu.getUsed()
def finish_record(identity):
if _recording_now and _single_record_start is not None:
end = Game.cpu.getUsed()
if identity in _averages:
_averages[identity].calls += 1
_averages[identity].time += end - _single_record_start
else:
_averages[identity] = {
'calls': 1,
'time': end - _single_record_start,
}
def start_sub_record():
if _sub_recording_now:
global _sub_record_start
_sub_record_start = Game.cpu.getUsed()
def finish_sub_record(identity):
global _sub_record_start
if _sub_recording_now and _sub_record_start is not None:
end = Game.cpu.getUsed()
if identity in _sub_records:
_sub_records[identity].calls += 1
_sub_records[identity].time += end - _sub_record_start
else:
_sub_records[identity] = {
'calls': 1,
'time': end - _sub_record_start,
}
_sub_record_start = None
def start_main_record():
if _main_recording_now:
global _main_loop_record_start
_main_loop_record_start = Game.cpu.getUsed()
def finish_main_record():
if _main_recording_now and _main_loop_record_start is not None:
end = Game.cpu.getUsed()
if '_main' in _averages:
_averages['_main'] += end - _main_loop_record_start
else:
_averages['_main'] = end - _main_loop_record_start
if '_total' in _averages:
_averages['_total'] += end
else:
_averages['_total'] = end
if '_ticks' in _averages:
_averages['_ticks'] += 1
else:
_averages['_ticks'] = 1
if _sub_recording_now:
if '_ticks' in _sub_records:
_sub_records['_ticks'] += 1
else:
_sub_records['_ticks'] = 1
def record_memory_amount(time):
if _main_recording_now:
if 'memory.init' in _averages:
_averages['memory.init'].calls += 1
_averages['memory.init'].time += time
else:
_averages['memory.init'] = {
'calls': 1,
'time': time,
}
def display_num(num, val = 2):
return num.toFixed(val)
def output_records_full():
rows = ["time\tcalls\ttime/t\tcalls/t\taverage\tname"]
total_time_in_records = 0
for identity, obj in _(_averages).pairs().sortBy(lambda t: -t[1].time).value():
if identity.startswith('_'):
continue
if identity != 'memory.init' and identity != 'code.compile':
total_time_in_records += obj.time
rows.push("\n{}\t{}\t{}\t{}\t{}\t{}".format(
display_num(obj.time),
display_num(obj.calls, 1),
display_num(obj.time / _averages['_ticks']),
display_num(obj.calls / _averages['_ticks']),
display_num(obj.time / obj.calls),
identity,
))
missing_time = _averages['_main'] - total_time_in_records
rows.push("\n{}\t{}\t{}\t{}\t{}\t{}".format(
display_num(missing_time),
display_num(_averages['_ticks']),
display_num(missing_time / _averages['_ticks']),
display_num(1),
display_num(missing_time / _averages['_ticks']),
'unprofiled',
))
rows.push("\n{}\t{}\t{}\t{}\t{}\t{}".format(
display_num(_averages['_main']),
display_num(_averages['_ticks']),
display_num(_averages['_main'] / _averages['_ticks']),
display_num(1),
display_num(_averages['_main'] / _averages['_ticks']),
'total.main_loop',
))
compile_time = _averages['_total'] - _averages['_main'] - _averages['memory.init']
rows.push("\n{}\t{}\t{}\t{}\t{}\t{}".format(
display_num(compile_time),
display_num(_averages['_ticks']),
display_num(compile_time / _averages['_ticks']),
display_num(1),
display_num(compile_time / _averages['_ticks']),
'total.compile'.format(Game.cpu.limit),
))
rows.push("\n{}\t{}\t{}\t{}\t{}\t{}".format(
display_num(_averages['_total']),
display_num(_averages['_ticks']),
display_num(_averages['_total'] / _averages['_ticks']),
display_num(1),
display_num(_averages['_total'] / _averages['_ticks']),
'total (limit: {})'.format(Game.cpu.limit),
))
return "".join(rows)
def output_records():
if not _averages['_ticks']:
return "no data collected"
rows = ["time/t\tcalls/t\taverage\tname"]
total_time_in_records = 0
for identity, obj in _(_averages).pairs().sortBy(lambda t: -t[1].time).value():
if identity.startswith('_'):
continue
if identity != 'memory.init' and identity != 'code.compile':
total_time_in_records += obj.time
rows.push("\n{}\t{}\t{}\t{}".format(
display_num(obj.time / _averages['_ticks']),
display_num(obj.calls / _averages['_ticks'], 1),
display_num(obj.time / obj.calls),
identity,
))
missing_time = _averages['_main'] - total_time_in_records
rows.push("\n{}\t{}\t{}\t{}".format(
display_num(missing_time / _averages['_ticks']),
display_num(1, 1),
display_num(missing_time / _averages['_ticks']),
'unprofiled',
))
rows.push("\n{}\t{}\t{}\t{}".format(
display_num(_averages['_main'] / _averages['_ticks']),
display_num(1, 1),
display_num(_averages['_main'] / _averages['_ticks']),
'total.main_loop'.format(Game.cpu.limit),
))
compile_time = _averages['_total'] - _averages['_main'] - _averages['memory.init'].time
rows.push("\n{}\t{}\t{}\t{}".format(
display_num(compile_time / _averages['_ticks']),
display_num(1, 1),
display_num(compile_time / _averages['_ticks']),
'total.compile'.format(Game.cpu.limit),
))
rows.push("\n{}\t{}\t{}\t{}".format(
display_num(_averages['_total'] / _averages['_ticks']),
display_num(1, 1),
display_num(_averages['_total'] / _averages['_ticks']),
'total (limit: {})'.format(Game.cpu.limit),
))
return "".join(rows)
def output_sub_records():
if not _sub_records['_ticks']:
return "no data collected"
rows = ["time/t\tcalls/t\taverage\tname"]
total_time_in_records = 0
for identity, obj in _(_sub_records).pairs().sortBy(lambda t: -t[1].time).value():
if identity.startswith('_'):
continue
if identity != 'memory.init' and identity != 'code.compile':
total_time_in_records += obj.time
rows.push("\n{}\t{}\t{}\t{}".format(
display_num(obj.time / _sub_records['_ticks']),
display_num(obj.calls / _sub_records['_ticks'], 1),
display_num(obj.time / obj.calls),
identity,
))
return "".join(rows)
| true | true |
f73767be82fe98bcf0a4b19467dff0a58c7aaa1d | 3,158 | py | Python | scripts/urdf_remove_pedestal.py | cmsc421/mobility_base_tools | e82436d1f241c1b01bbccfd4620564ace939e53d | [
"BSD-3-Clause"
] | null | null | null | scripts/urdf_remove_pedestal.py | cmsc421/mobility_base_tools | e82436d1f241c1b01bbccfd4620564ace939e53d | [
"BSD-3-Clause"
] | null | null | null | scripts/urdf_remove_pedestal.py | cmsc421/mobility_base_tools | e82436d1f241c1b01bbccfd4620564ace939e53d | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
# Software License Agreement (BSD License)
#
# Copyright (c) 2014-2015, Dataspeed Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of Dataspeed Inc. nor the names of its
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import rospy
import re
match_link = "(.*)<link[^>]*name\s*=\s*\"pedestal\"[^>]*>.*?[^<]<\/link>(.*)"
match_joint = "(.*)<joint[^>]*name\s*=\s*\"pedestal_fixed\"[^>]*>.*?[^<]<\/joint>(.*)"
if __name__ == '__main__':
try:
rospy.init_node('urdf_remove_pedestal', anonymous=True)
param_src = rospy.get_param('~param_src', "/robot_description")
param_dest = rospy.get_param('~param_dest', "/robot_description_mod")
urdf = rospy.get_param(param_src, "")
changed = False
if urdf:
obj = re.match(match_link, urdf, re.S)
if obj:
urdf = obj.group(1) + obj.group(2)
changed = True
rospy.loginfo("Removed link 'pedestal'")
else:
rospy.logwarn("Failed to find link 'pedestal'")
obj = re.match(match_joint, urdf, re.S)
if obj:
urdf = obj.group(1) + obj.group(2)
changed = True
rospy.loginfo("Removed joint 'pedestal_fixed'")
else:
rospy.logwarn("Failed to find joint 'pedestal_fixed'")
rospy.set_param(param_dest, urdf)
if changed:
rospy.loginfo("Updated parameter '%s'", param_dest)
else:
rospy.loginfo("Copied parameter '%s' to '%s'", param_src, param_dest)
else:
rospy.logwarn("Parameter '%s' not found", param_src)
except rospy.ROSInterruptException: pass
| 44.478873 | 86 | 0.652945 |
import rospy
import re
match_link = "(.*)<link[^>]*name\s*=\s*\"pedestal\"[^>]*>.*?[^<]<\/link>(.*)"
match_joint = "(.*)<joint[^>]*name\s*=\s*\"pedestal_fixed\"[^>]*>.*?[^<]<\/joint>(.*)"
if __name__ == '__main__':
try:
rospy.init_node('urdf_remove_pedestal', anonymous=True)
param_src = rospy.get_param('~param_src', "/robot_description")
param_dest = rospy.get_param('~param_dest', "/robot_description_mod")
urdf = rospy.get_param(param_src, "")
changed = False
if urdf:
obj = re.match(match_link, urdf, re.S)
if obj:
urdf = obj.group(1) + obj.group(2)
changed = True
rospy.loginfo("Removed link 'pedestal'")
else:
rospy.logwarn("Failed to find link 'pedestal'")
obj = re.match(match_joint, urdf, re.S)
if obj:
urdf = obj.group(1) + obj.group(2)
changed = True
rospy.loginfo("Removed joint 'pedestal_fixed'")
else:
rospy.logwarn("Failed to find joint 'pedestal_fixed'")
rospy.set_param(param_dest, urdf)
if changed:
rospy.loginfo("Updated parameter '%s'", param_dest)
else:
rospy.loginfo("Copied parameter '%s' to '%s'", param_src, param_dest)
else:
rospy.logwarn("Parameter '%s' not found", param_src)
except rospy.ROSInterruptException: pass
| true | true |
f737681bd7848c1db588ead12063fa7e408e5f39 | 386 | py | Python | strange_case/configurators/ignore.py | colinta/StrangeCase | 822634d986020bc35d49f5355ec4823690372cd5 | [
"BSD-2-Clause-FreeBSD"
] | 10 | 2015-11-05T01:38:29.000Z | 2021-06-09T06:35:27.000Z | strange_case/configurators/ignore.py | colinta/StrangeCase | 822634d986020bc35d49f5355ec4823690372cd5 | [
"BSD-2-Clause-FreeBSD"
] | 9 | 2019-07-17T15:21:43.000Z | 2021-09-08T01:08:00.000Z | strange_case/configurators/ignore.py | colinta/StrangeCase | 822634d986020bc35d49f5355ec4823690372cd5 | [
"BSD-2-Clause-FreeBSD"
] | 2 | 2016-06-01T01:51:22.000Z | 2017-03-16T00:11:39.000Z | import os
from fnmatch import fnmatch
def ignore(source_file, config):
file_name = os.path.basename(source_file)
if config['ignore'] is True or \
config['ignore'] and any(pattern for pattern in config['ignore'] if fnmatch(file_name, pattern)):
return
return config
ignore.defaults = {
'ignore': [
u'.*',
u'config.yaml',
],
}
| 19.3 | 109 | 0.61658 | import os
from fnmatch import fnmatch
def ignore(source_file, config):
file_name = os.path.basename(source_file)
if config['ignore'] is True or \
config['ignore'] and any(pattern for pattern in config['ignore'] if fnmatch(file_name, pattern)):
return
return config
ignore.defaults = {
'ignore': [
u'.*',
u'config.yaml',
],
}
| true | true |
f7376ae52743c8d3c9b9943a496eeee3e3505f29 | 33,887 | py | Python | cinder/brick/local_dev/lvm.py | ilay09/cinder | 86f084d42f18bd5971cc7a0df3e6d815543a472d | [
"Apache-2.0"
] | null | null | null | cinder/brick/local_dev/lvm.py | ilay09/cinder | 86f084d42f18bd5971cc7a0df3e6d815543a472d | [
"Apache-2.0"
] | null | null | null | cinder/brick/local_dev/lvm.py | ilay09/cinder | 86f084d42f18bd5971cc7a0df3e6d815543a472d | [
"Apache-2.0"
] | null | null | null | # Copyright 2013 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
LVM class for performing LVM operations.
"""
import math
import os
import re
from os_brick import executor
from oslo_concurrency import processutils as putils
from oslo_log import log as logging
from oslo_utils import excutils
from six import moves
from cinder import exception
from cinder import utils
LOG = logging.getLogger(__name__)
class LVM(executor.Executor):
"""LVM object to enable various LVM related operations."""
LVM_CMD_PREFIX = ['env', 'LC_ALL=C']
_supports_pvs_ignoreskippedcluster = None
def __init__(self, vg_name, root_helper, create_vg=False,
physical_volumes=None, lvm_type='default',
executor=putils.execute, lvm_conf=None,
suppress_fd_warn=False):
"""Initialize the LVM object.
The LVM object is based on an LVM VolumeGroup, one instantiation
for each VolumeGroup you have/use.
:param vg_name: Name of existing VG or VG to create
:param root_helper: Execution root_helper method to use
:param create_vg: Indicates the VG doesn't exist
and we want to create it
:param physical_volumes: List of PVs to build VG on
:param lvm_type: VG and Volume type (default, or thin)
:param executor: Execute method to use, None uses common/processutils
:param suppress_fd_warn: Add suppress FD Warn to LVM env
"""
super(LVM, self).__init__(execute=executor, root_helper=root_helper)
self.vg_name = vg_name
self.pv_list = []
self.vg_size = 0.0
self.vg_free_space = 0.0
self.vg_lv_count = 0
self.vg_uuid = None
self.vg_thin_pool = None
self.vg_thin_pool_size = 0.0
self.vg_thin_pool_free_space = 0.0
self._supports_snapshot_lv_activation = None
self._supports_lvchange_ignoreskipactivation = None
self.vg_provisioned_capacity = 0.0
# Ensure LVM_SYSTEM_DIR has been added to LVM.LVM_CMD_PREFIX
# before the first LVM command is executed, and use the directory
# where the specified lvm_conf file is located as the value.
# NOTE(jdg): We use the temp var here because LVM_CMD_PREFIX is a
# class global and if you use append here, you'll literally just keep
# appending values to the global.
_lvm_cmd_prefix = ['env', 'LC_ALL=C']
if lvm_conf and os.path.isfile(lvm_conf):
lvm_sys_dir = os.path.dirname(lvm_conf)
_lvm_cmd_prefix.append('LVM_SYSTEM_DIR=' + lvm_sys_dir)
if suppress_fd_warn:
_lvm_cmd_prefix.append('LVM_SUPPRESS_FD_WARNINGS=1')
LVM.LVM_CMD_PREFIX = _lvm_cmd_prefix
if create_vg and physical_volumes is not None:
self.pv_list = physical_volumes
try:
self._create_vg(physical_volumes)
except putils.ProcessExecutionError as err:
LOG.exception('Error creating Volume Group')
LOG.error('Cmd :%s', err.cmd)
LOG.error('StdOut :%s', err.stdout)
LOG.error('StdErr :%s', err.stderr)
raise exception.VolumeGroupCreationFailed(vg_name=self.vg_name)
if self._vg_exists() is False:
LOG.error('Unable to locate Volume Group %s', vg_name)
raise exception.VolumeGroupNotFound(vg_name=vg_name)
# NOTE: we assume that the VG has been activated outside of Cinder
if lvm_type == 'thin':
pool_name = "%s-pool" % self.vg_name
if self.get_volume(pool_name) is None:
try:
self.create_thin_pool(pool_name)
except putils.ProcessExecutionError:
# Maybe we just lost the race against another copy of
# this driver being in init in parallel - e.g.
# cinder-volume and cinder-backup starting in parallel
if self.get_volume(pool_name) is None:
raise
self.vg_thin_pool = pool_name
self.activate_lv(self.vg_thin_pool)
self.pv_list = self.get_all_physical_volumes(root_helper, vg_name)
def _vg_exists(self):
"""Simple check to see if VG exists.
:returns: True if vg specified in object exists, else False
"""
exists = False
cmd = LVM.LVM_CMD_PREFIX + ['vgs', '--noheadings',
'-o', 'name', self.vg_name]
(out, _err) = self._execute(*cmd,
root_helper=self._root_helper,
run_as_root=True)
if out is not None:
volume_groups = out.split()
if self.vg_name in volume_groups:
exists = True
return exists
def _create_vg(self, pv_list):
cmd = ['vgcreate', self.vg_name, ','.join(pv_list)]
self._execute(*cmd, root_helper=self._root_helper, run_as_root=True)
def _get_thin_pool_free_space(self, vg_name, thin_pool_name):
"""Returns available thin pool free space.
:param vg_name: the vg where the pool is placed
:param thin_pool_name: the thin pool to gather info for
:returns: Free space in GB (float), calculated using data_percent
"""
cmd = LVM.LVM_CMD_PREFIX +\
['lvs', '--noheadings', '--unit=g',
'-o', 'size,data_percent', '--separator',
':', '--nosuffix']
# NOTE(gfidente): data_percent only applies to some types of LV so we
# make sure to append the actual thin pool name
cmd.append("/dev/%s/%s" % (vg_name, thin_pool_name))
free_space = 0.0
try:
(out, err) = self._execute(*cmd,
root_helper=self._root_helper,
run_as_root=True)
if out is not None:
out = out.strip()
data = out.split(':')
pool_size = float(data[0])
data_percent = float(data[1])
consumed_space = pool_size / 100 * data_percent
free_space = pool_size - consumed_space
free_space = round(free_space, 2)
except putils.ProcessExecutionError as err:
LOG.exception('Error querying thin pool about data_percent')
LOG.error('Cmd :%s', err.cmd)
LOG.error('StdOut :%s', err.stdout)
LOG.error('StdErr :%s', err.stderr)
return free_space
@staticmethod
def get_lvm_version(root_helper):
"""Static method to get LVM version from system.
:param root_helper: root_helper to use for execute
:returns: version 3-tuple
"""
cmd = LVM.LVM_CMD_PREFIX + ['vgs', '--version']
(out, _err) = putils.execute(*cmd,
root_helper=root_helper,
run_as_root=True)
lines = out.split('\n')
for line in lines:
if 'LVM version' in line:
version_list = line.split()
# NOTE(gfidente): version is formatted as follows:
# major.minor.patchlevel(library API version)[-customisation]
version = version_list[2]
version_filter = r"(\d+)\.(\d+)\.(\d+).*"
r = re.search(version_filter, version)
version_tuple = tuple(map(int, r.group(1, 2, 3)))
return version_tuple
@staticmethod
def supports_thin_provisioning(root_helper):
"""Static method to check for thin LVM support on a system.
:param root_helper: root_helper to use for execute
:returns: True if supported, False otherwise
"""
return LVM.get_lvm_version(root_helper) >= (2, 2, 95)
@property
def supports_snapshot_lv_activation(self):
"""Property indicating whether snap activation changes are supported.
Check for LVM version >= 2.02.91.
(LVM2 git: e8a40f6 Allow to activate snapshot)
:returns: True/False indicating support
"""
if self._supports_snapshot_lv_activation is not None:
return self._supports_snapshot_lv_activation
self._supports_snapshot_lv_activation = (
self.get_lvm_version(self._root_helper) >= (2, 2, 91))
return self._supports_snapshot_lv_activation
@property
def supports_lvchange_ignoreskipactivation(self):
"""Property indicating whether lvchange can ignore skip activation.
Check for LVM version >= 2.02.99.
(LVM2 git: ab789c1bc add --ignoreactivationskip to lvchange)
"""
if self._supports_lvchange_ignoreskipactivation is not None:
return self._supports_lvchange_ignoreskipactivation
self._supports_lvchange_ignoreskipactivation = (
self.get_lvm_version(self._root_helper) >= (2, 2, 99))
return self._supports_lvchange_ignoreskipactivation
@staticmethod
def supports_pvs_ignoreskippedcluster(root_helper):
"""Property indicating whether pvs supports --ignoreskippedcluster
Check for LVM version >= 2.02.103.
(LVM2 git: baf95bbff cmdline: Add --ignoreskippedcluster.
"""
if LVM._supports_pvs_ignoreskippedcluster is not None:
return LVM._supports_pvs_ignoreskippedcluster
LVM._supports_pvs_ignoreskippedcluster = (
LVM.get_lvm_version(root_helper) >= (2, 2, 103))
return LVM._supports_pvs_ignoreskippedcluster
@staticmethod
def get_lv_info(root_helper, vg_name=None, lv_name=None):
"""Retrieve info about LVs (all, in a VG, or a single LV).
:param root_helper: root_helper to use for execute
:param vg_name: optional, gathers info for only the specified VG
:param lv_name: optional, gathers info for only the specified LV
:returns: List of Dictionaries with LV info
"""
cmd = LVM.LVM_CMD_PREFIX + ['lvs', '--noheadings', '--unit=g',
'-o', 'vg_name,name,size', '--nosuffix']
if lv_name is not None and vg_name is not None:
cmd.append("%s/%s" % (vg_name, lv_name))
elif vg_name is not None:
cmd.append(vg_name)
try:
(out, _err) = putils.execute(*cmd,
root_helper=root_helper,
run_as_root=True)
except putils.ProcessExecutionError as err:
with excutils.save_and_reraise_exception(reraise=True) as ctx:
if "not found" in err.stderr or "Failed to find" in err.stderr:
ctx.reraise = False
LOG.info("Logical Volume not found when querying "
"LVM info. (vg_name=%(vg)s, lv_name=%(lv)s",
{'vg': vg_name, 'lv': lv_name})
out = None
lv_list = []
if out is not None:
volumes = out.split()
iterator = moves.zip(*[iter(volumes)] * 3) # pylint: disable=E1101
for vg, name, size in iterator:
lv_list.append({"vg": vg, "name": name, "size": size})
return lv_list
def get_volumes(self, lv_name=None):
"""Get all LV's associated with this instantiation (VG).
:returns: List of Dictionaries with LV info
"""
return self.get_lv_info(self._root_helper,
self.vg_name,
lv_name)
def get_volume(self, name):
"""Get reference object of volume specified by name.
:returns: dict representation of Logical Volume if exists
"""
ref_list = self.get_volumes(name)
for r in ref_list:
if r['name'] == name:
return r
return None
@staticmethod
def get_all_physical_volumes(root_helper, vg_name=None):
"""Static method to get all PVs on a system.
:param root_helper: root_helper to use for execute
:param vg_name: optional, gathers info for only the specified VG
:returns: List of Dictionaries with PV info
"""
field_sep = '|'
cmd = LVM.LVM_CMD_PREFIX + ['pvs', '--noheadings',
'--unit=g',
'-o', 'vg_name,name,size,free',
'--separator', field_sep,
'--nosuffix']
if LVM.supports_pvs_ignoreskippedcluster(root_helper):
cmd.append('--ignoreskippedcluster')
(out, _err) = putils.execute(*cmd,
root_helper=root_helper,
run_as_root=True)
pvs = out.split()
if vg_name is not None:
pvs = [pv for pv in pvs if vg_name == pv.split(field_sep)[0]]
pv_list = []
for pv in pvs:
fields = pv.split(field_sep)
pv_list.append({'vg': fields[0],
'name': fields[1],
'size': float(fields[2]),
'available': float(fields[3])})
return pv_list
@staticmethod
def get_all_volume_groups(root_helper, vg_name=None):
"""Static method to get all VGs on a system.
:param root_helper: root_helper to use for execute
:param vg_name: optional, gathers info for only the specified VG
:returns: List of Dictionaries with VG info
"""
cmd = LVM.LVM_CMD_PREFIX + ['vgs', '--noheadings',
'--unit=g', '-o',
'name,size,free,lv_count,uuid',
'--separator', ':',
'--nosuffix']
if vg_name is not None:
cmd.append(vg_name)
(out, _err) = putils.execute(*cmd,
root_helper=root_helper,
run_as_root=True)
vg_list = []
if out is not None:
vgs = out.split()
for vg in vgs:
fields = vg.split(':')
vg_list.append({'name': fields[0],
'size': float(fields[1]),
'available': float(fields[2]),
'lv_count': int(fields[3]),
'uuid': fields[4]})
return vg_list
def update_volume_group_info(self):
"""Update VG info for this instantiation.
Used to update member fields of object and
provide a dict of info for caller.
:returns: Dictionaries of VG info
"""
vg_list = self.get_all_volume_groups(self._root_helper, self.vg_name)
if len(vg_list) != 1:
LOG.error('Unable to find VG: %s', self.vg_name)
raise exception.VolumeGroupNotFound(vg_name=self.vg_name)
self.vg_size = float(vg_list[0]['size'])
self.vg_free_space = float(vg_list[0]['available'])
self.vg_lv_count = int(vg_list[0]['lv_count'])
self.vg_uuid = vg_list[0]['uuid']
total_vols_size = 0.0
if self.vg_thin_pool is not None:
# NOTE(xyang): If providing only self.vg_name,
# get_lv_info will output info on the thin pool and all
# individual volumes.
# get_lv_info(self._root_helper, 'stack-vg')
# sudo lvs --noheadings --unit=g -o vg_name,name,size
# --nosuffix stack-vg
# stack-vg stack-pool 9.51
# stack-vg volume-13380d16-54c3-4979-9d22-172082dbc1a1 1.00
# stack-vg volume-629e13ab-7759-46a5-b155-ee1eb20ca892 1.00
# stack-vg volume-e3e6281c-51ee-464c-b1a7-db6c0854622c 1.00
#
# If providing both self.vg_name and self.vg_thin_pool,
# get_lv_info will output only info on the thin pool, but not
# individual volumes.
# get_lv_info(self._root_helper, 'stack-vg', 'stack-pool')
# sudo lvs --noheadings --unit=g -o vg_name,name,size
# --nosuffix stack-vg/stack-pool
# stack-vg stack-pool 9.51
#
# We need info on both the thin pool and the volumes,
# therefore we should provide only self.vg_name, but not
# self.vg_thin_pool here.
for lv in self.get_lv_info(self._root_helper,
self.vg_name):
lvsize = lv['size']
# get_lv_info runs "lvs" command with "--nosuffix".
# This removes "g" from "1.00g" and only outputs "1.00".
# Running "lvs" command without "--nosuffix" will output
# "1.00g" if "g" is the unit.
# Remove the unit if it is in lv['size'].
if not lv['size'][-1].isdigit():
lvsize = lvsize[:-1]
if lv['name'] == self.vg_thin_pool:
self.vg_thin_pool_size = lvsize
tpfs = self._get_thin_pool_free_space(self.vg_name,
self.vg_thin_pool)
self.vg_thin_pool_free_space = tpfs
else:
total_vols_size = total_vols_size + float(lvsize)
total_vols_size = round(total_vols_size, 2)
self.vg_provisioned_capacity = total_vols_size
def _calculate_thin_pool_size(self):
"""Calculates the correct size for a thin pool.
Ideally we would use 100% of the containing volume group and be done.
But the 100%VG notation to lvcreate is not implemented and thus cannot
be used. See https://bugzilla.redhat.com/show_bug.cgi?id=998347
Further, some amount of free space must remain in the volume group for
metadata for the contained logical volumes. The exact amount depends
on how much volume sharing you expect.
:returns: An lvcreate-ready string for the number of calculated bytes.
"""
# make sure volume group information is current
self.update_volume_group_info()
# leave 5% free for metadata
return "%sg" % (self.vg_free_space * 0.95)
def create_thin_pool(self, name=None, size_str=None):
"""Creates a thin provisioning pool for this VG.
The syntax here is slightly different than the default
lvcreate -T, so we'll just write a custom cmd here
and do it.
:param name: Name to use for pool, default is "<vg-name>-pool"
:param size_str: Size to allocate for pool, default is entire VG
:returns: The size string passed to the lvcreate command
"""
if not self.supports_thin_provisioning(self._root_helper):
LOG.error('Requested to setup thin provisioning, '
'however current LVM version does not '
'support it.')
return None
if name is None:
name = '%s-pool' % self.vg_name
vg_pool_name = '%s/%s' % (self.vg_name, name)
if not size_str:
size_str = self._calculate_thin_pool_size()
cmd = LVM.LVM_CMD_PREFIX + ['lvcreate', '-T', '-L', size_str,
vg_pool_name]
LOG.debug("Creating thin pool '%(pool)s' with size %(size)s of "
"total %(free)sg", {'pool': vg_pool_name,
'size': size_str,
'free': self.vg_free_space})
self._execute(*cmd,
root_helper=self._root_helper,
run_as_root=True)
self.vg_thin_pool = name
return size_str
def create_volume(self, name, size_str, lv_type='default', mirror_count=0):
"""Creates a logical volume on the object's VG.
:param name: Name to use when creating Logical Volume
:param size_str: Size to use when creating Logical Volume
:param lv_type: Type of Volume (default or thin)
:param mirror_count: Use LVM mirroring with specified count
"""
if lv_type == 'thin':
pool_path = '%s/%s' % (self.vg_name, self.vg_thin_pool)
cmd = LVM.LVM_CMD_PREFIX + ['lvcreate', '-T', '-V', size_str, '-n',
name, pool_path]
else:
cmd = LVM.LVM_CMD_PREFIX + ['lvcreate', '-n', name, self.vg_name,
'-L', size_str]
if mirror_count > 0:
cmd.extend(['-m', mirror_count, '--nosync',
'--mirrorlog', 'mirrored'])
terras = int(size_str[:-1]) / 1024.0
if terras >= 1.5:
rsize = int(2 ** math.ceil(math.log(terras) / math.log(2)))
# NOTE(vish): Next power of two for region size. See:
# http://red.ht/U2BPOD
cmd.extend(['-R', str(rsize)])
try:
self._execute(*cmd,
root_helper=self._root_helper,
run_as_root=True)
except putils.ProcessExecutionError as err:
LOG.exception('Error creating Volume')
LOG.error('Cmd :%s', err.cmd)
LOG.error('StdOut :%s', err.stdout)
LOG.error('StdErr :%s', err.stderr)
LOG.error('Current state: %s',
self.get_all_volume_groups(self._root_helper))
raise
@utils.retry(putils.ProcessExecutionError)
def create_lv_snapshot(self, name, source_lv_name, lv_type='default'):
"""Creates a snapshot of a logical volume.
:param name: Name to assign to new snapshot
:param source_lv_name: Name of Logical Volume to snapshot
:param lv_type: Type of LV (default or thin)
"""
source_lvref = self.get_volume(source_lv_name)
if source_lvref is None:
LOG.error("Trying to create snapshot by non-existent LV: %s",
source_lv_name)
raise exception.VolumeDeviceNotFound(device=source_lv_name)
cmd = LVM.LVM_CMD_PREFIX + ['lvcreate', '--name', name, '--snapshot',
'%s/%s' % (self.vg_name, source_lv_name)]
if lv_type != 'thin':
size = source_lvref['size']
cmd.extend(['-L', '%sg' % (size)])
try:
self._execute(*cmd,
root_helper=self._root_helper,
run_as_root=True)
except putils.ProcessExecutionError as err:
LOG.exception('Error creating snapshot')
LOG.error('Cmd :%s', err.cmd)
LOG.error('StdOut :%s', err.stdout)
LOG.error('StdErr :%s', err.stderr)
raise
def _mangle_lv_name(self, name):
# Linux LVM reserves name that starts with snapshot, so that
# such volume name can't be created. Mangle it.
if not name.startswith('snapshot'):
return name
return '_' + name
def _lv_is_active(self, name):
cmd = LVM.LVM_CMD_PREFIX + ['lvdisplay', '--noheading', '-C', '-o',
'Attr', '%s/%s' % (self.vg_name, name)]
out, _err = self._execute(*cmd,
root_helper=self._root_helper,
run_as_root=True)
if out:
out = out.strip()
if (out[4] == 'a'):
return True
return False
def deactivate_lv(self, name):
lv_path = self.vg_name + '/' + self._mangle_lv_name(name)
cmd = ['lvchange', '-a', 'n']
cmd.append(lv_path)
try:
self._execute(*cmd,
root_helper=self._root_helper,
run_as_root=True)
except putils.ProcessExecutionError as err:
LOG.exception('Error deactivating LV')
LOG.error('Cmd :%s', err.cmd)
LOG.error('StdOut :%s', err.stdout)
LOG.error('StdErr :%s', err.stderr)
raise
# Wait until lv is deactivated to return in
# order to prevent a race condition.
self._wait_for_volume_deactivation(name)
@utils.retry(exceptions=exception.VolumeNotDeactivated, retries=5,
backoff_rate=2)
def _wait_for_volume_deactivation(self, name):
LOG.debug("Checking to see if volume %s has been deactivated.",
name)
if self._lv_is_active(name):
LOG.debug("Volume %s is still active.", name)
raise exception.VolumeNotDeactivated(name=name)
else:
LOG.debug("Volume %s has been deactivated.", name)
def activate_lv(self, name, is_snapshot=False, permanent=False):
"""Ensure that logical volume/snapshot logical volume is activated.
:param name: Name of LV to activate
:param is_snapshot: whether LV is a snapshot
:param permanent: whether we should drop skipactivation flag
:raises putils.ProcessExecutionError:
"""
# This is a no-op if requested for a snapshot on a version
# of LVM that doesn't support snapshot activation.
# (Assume snapshot LV is always active.)
if is_snapshot and not self.supports_snapshot_lv_activation:
return
lv_path = self.vg_name + '/' + self._mangle_lv_name(name)
# Must pass --yes to activate both the snap LV and its origin LV.
# Otherwise lvchange asks if you would like to do this interactively,
# and fails.
cmd = ['lvchange', '-a', 'y', '--yes']
if self.supports_lvchange_ignoreskipactivation:
cmd.append('-K')
# If permanent=True is specified, drop the skipactivation flag in
# order to make this LV automatically activated after next reboot.
if permanent:
cmd += ['-k', 'n']
cmd.append(lv_path)
try:
self._execute(*cmd,
root_helper=self._root_helper,
run_as_root=True)
except putils.ProcessExecutionError as err:
LOG.exception('Error activating LV')
LOG.error('Cmd :%s', err.cmd)
LOG.error('StdOut :%s', err.stdout)
LOG.error('StdErr :%s', err.stderr)
raise
@utils.retry(putils.ProcessExecutionError)
def delete(self, name):
"""Delete logical volume or snapshot.
:param name: Name of LV to delete
"""
def run_udevadm_settle():
self._execute('udevadm', 'settle',
root_helper=self._root_helper, run_as_root=True,
check_exit_code=False)
# LV removal seems to be a race with other writers or udev in
# some cases (see LP #1270192), so we enable retry deactivation
LVM_CONFIG = 'activation { retry_deactivation = 1} '
try:
self._execute(
'lvremove',
'--config', LVM_CONFIG,
'-f',
'%s/%s' % (self.vg_name, name),
root_helper=self._root_helper, run_as_root=True)
except putils.ProcessExecutionError as err:
LOG.debug('Error reported running lvremove: CMD: %(command)s, '
'RESPONSE: %(response)s',
{'command': err.cmd, 'response': err.stderr})
LOG.debug('Attempting udev settle and retry of lvremove...')
run_udevadm_settle()
# The previous failing lvremove -f might leave behind
# suspended devices; when lvmetad is not available, any
# further lvm command will block forever.
# Therefore we need to skip suspended devices on retry.
LVM_CONFIG += 'devices { ignore_suspended_devices = 1}'
self._execute(
'lvremove',
'--config', LVM_CONFIG,
'-f',
'%s/%s' % (self.vg_name, name),
root_helper=self._root_helper, run_as_root=True)
LOG.debug('Successfully deleted volume: %s after '
'udev settle.', name)
def revert(self, snapshot_name):
"""Revert an LV from snapshot.
:param snapshot_name: Name of snapshot to revert
"""
self._execute('lvconvert', '--merge',
snapshot_name, root_helper=self._root_helper,
run_as_root=True)
def lv_has_snapshot(self, name):
cmd = LVM.LVM_CMD_PREFIX + ['lvdisplay', '--noheading', '-C', '-o',
'Attr', '%s/%s' % (self.vg_name, name)]
out, _err = self._execute(*cmd,
root_helper=self._root_helper,
run_as_root=True)
if out:
out = out.strip()
if (out[0] == 'o') or (out[0] == 'O'):
return True
return False
def lv_is_snapshot(self, name):
"""Return True if LV is a snapshot, False otherwise."""
cmd = LVM.LVM_CMD_PREFIX + ['lvdisplay', '--noheading', '-C', '-o',
'Attr', '%s/%s' % (self.vg_name, name)]
out, _err = self._execute(*cmd,
root_helper=self._root_helper,
run_as_root=True)
out = out.strip()
if out:
if (out[0] == 's'):
return True
return False
def lv_is_open(self, name):
"""Return True if LV is currently open, False otherwise."""
cmd = LVM.LVM_CMD_PREFIX + ['lvdisplay', '--noheading', '-C', '-o',
'Attr', '%s/%s' % (self.vg_name, name)]
out, _err = self._execute(*cmd,
root_helper=self._root_helper,
run_as_root=True)
out = out.strip()
if out:
if (out[5] == 'o'):
return True
return False
def lv_get_origin(self, name):
"""Return the origin of an LV that is a snapshot, None otherwise."""
cmd = LVM.LVM_CMD_PREFIX + ['lvdisplay', '--noheading', '-C', '-o',
'Origin', '%s/%s' % (self.vg_name, name)]
out, _err = self._execute(*cmd,
root_helper=self._root_helper,
run_as_root=True)
out = out.strip()
if out:
return out
return None
def extend_volume(self, lv_name, new_size):
"""Extend the size of an existing volume."""
# Volumes with snaps have attributes 'o' or 'O' and will be
# deactivated, but Thin Volumes with snaps have attribute 'V'
# and won't be deactivated because the lv_has_snapshot method looks
# for 'o' or 'O'
has_snapshot = self.lv_has_snapshot(lv_name)
if has_snapshot:
self.deactivate_lv(lv_name)
try:
cmd = LVM.LVM_CMD_PREFIX + ['lvextend', '-L', new_size,
'%s/%s' % (self.vg_name, lv_name)]
self._execute(*cmd, root_helper=self._root_helper,
run_as_root=True)
except putils.ProcessExecutionError as err:
LOG.exception('Error extending Volume')
LOG.error('Cmd :%s', err.cmd)
LOG.error('StdOut :%s', err.stdout)
LOG.error('StdErr :%s', err.stderr)
raise
if has_snapshot:
self.activate_lv(lv_name)
def vg_mirror_free_space(self, mirror_count):
free_capacity = 0.0
disks = []
for pv in self.pv_list:
disks.append(float(pv['available']))
while True:
disks = sorted([a for a in disks if a > 0.0], reverse=True)
if len(disks) <= mirror_count:
break
# consume the smallest disk
disk = disks[-1]
disks = disks[:-1]
# match extents for each mirror on the largest disks
for index in list(range(mirror_count)):
disks[index] -= disk
free_capacity += disk
return free_capacity
def vg_mirror_size(self, mirror_count):
return (self.vg_free_space / (mirror_count + 1))
def rename_volume(self, lv_name, new_name):
"""Change the name of an existing volume."""
try:
self._execute('lvrename', self.vg_name, lv_name, new_name,
root_helper=self._root_helper,
run_as_root=True)
except putils.ProcessExecutionError as err:
LOG.exception('Error renaming logical volume')
LOG.error('Cmd :%s', err.cmd)
LOG.error('StdOut :%s', err.stdout)
LOG.error('StdErr :%s', err.stderr)
raise
| 39.312065 | 79 | 0.556408 |
import math
import os
import re
from os_brick import executor
from oslo_concurrency import processutils as putils
from oslo_log import log as logging
from oslo_utils import excutils
from six import moves
from cinder import exception
from cinder import utils
LOG = logging.getLogger(__name__)
class LVM(executor.Executor):
LVM_CMD_PREFIX = ['env', 'LC_ALL=C']
_supports_pvs_ignoreskippedcluster = None
def __init__(self, vg_name, root_helper, create_vg=False,
physical_volumes=None, lvm_type='default',
executor=putils.execute, lvm_conf=None,
suppress_fd_warn=False):
super(LVM, self).__init__(execute=executor, root_helper=root_helper)
self.vg_name = vg_name
self.pv_list = []
self.vg_size = 0.0
self.vg_free_space = 0.0
self.vg_lv_count = 0
self.vg_uuid = None
self.vg_thin_pool = None
self.vg_thin_pool_size = 0.0
self.vg_thin_pool_free_space = 0.0
self._supports_snapshot_lv_activation = None
self._supports_lvchange_ignoreskipactivation = None
self.vg_provisioned_capacity = 0.0
# appending values to the global.
_lvm_cmd_prefix = ['env', 'LC_ALL=C']
if lvm_conf and os.path.isfile(lvm_conf):
lvm_sys_dir = os.path.dirname(lvm_conf)
_lvm_cmd_prefix.append('LVM_SYSTEM_DIR=' + lvm_sys_dir)
if suppress_fd_warn:
_lvm_cmd_prefix.append('LVM_SUPPRESS_FD_WARNINGS=1')
LVM.LVM_CMD_PREFIX = _lvm_cmd_prefix
if create_vg and physical_volumes is not None:
self.pv_list = physical_volumes
try:
self._create_vg(physical_volumes)
except putils.ProcessExecutionError as err:
LOG.exception('Error creating Volume Group')
LOG.error('Cmd :%s', err.cmd)
LOG.error('StdOut :%s', err.stdout)
LOG.error('StdErr :%s', err.stderr)
raise exception.VolumeGroupCreationFailed(vg_name=self.vg_name)
if self._vg_exists() is False:
LOG.error('Unable to locate Volume Group %s', vg_name)
raise exception.VolumeGroupNotFound(vg_name=vg_name)
# NOTE: we assume that the VG has been activated outside of Cinder
if lvm_type == 'thin':
pool_name = "%s-pool" % self.vg_name
if self.get_volume(pool_name) is None:
try:
self.create_thin_pool(pool_name)
except putils.ProcessExecutionError:
# Maybe we just lost the race against another copy of
# this driver being in init in parallel - e.g.
# cinder-volume and cinder-backup starting in parallel
if self.get_volume(pool_name) is None:
raise
self.vg_thin_pool = pool_name
self.activate_lv(self.vg_thin_pool)
self.pv_list = self.get_all_physical_volumes(root_helper, vg_name)
def _vg_exists(self):
exists = False
cmd = LVM.LVM_CMD_PREFIX + ['vgs', '--noheadings',
'-o', 'name', self.vg_name]
(out, _err) = self._execute(*cmd,
root_helper=self._root_helper,
run_as_root=True)
if out is not None:
volume_groups = out.split()
if self.vg_name in volume_groups:
exists = True
return exists
def _create_vg(self, pv_list):
cmd = ['vgcreate', self.vg_name, ','.join(pv_list)]
self._execute(*cmd, root_helper=self._root_helper, run_as_root=True)
def _get_thin_pool_free_space(self, vg_name, thin_pool_name):
cmd = LVM.LVM_CMD_PREFIX +\
['lvs', '--noheadings', '--unit=g',
'-o', 'size,data_percent', '--separator',
':', '--nosuffix']
# NOTE(gfidente): data_percent only applies to some types of LV so we
# make sure to append the actual thin pool name
cmd.append("/dev/%s/%s" % (vg_name, thin_pool_name))
free_space = 0.0
try:
(out, err) = self._execute(*cmd,
root_helper=self._root_helper,
run_as_root=True)
if out is not None:
out = out.strip()
data = out.split(':')
pool_size = float(data[0])
data_percent = float(data[1])
consumed_space = pool_size / 100 * data_percent
free_space = pool_size - consumed_space
free_space = round(free_space, 2)
except putils.ProcessExecutionError as err:
LOG.exception('Error querying thin pool about data_percent')
LOG.error('Cmd :%s', err.cmd)
LOG.error('StdOut :%s', err.stdout)
LOG.error('StdErr :%s', err.stderr)
return free_space
@staticmethod
def get_lvm_version(root_helper):
cmd = LVM.LVM_CMD_PREFIX + ['vgs', '--version']
(out, _err) = putils.execute(*cmd,
root_helper=root_helper,
run_as_root=True)
lines = out.split('\n')
for line in lines:
if 'LVM version' in line:
version_list = line.split()
# NOTE(gfidente): version is formatted as follows:
# major.minor.patchlevel(library API version)[-customisation]
version = version_list[2]
version_filter = r"(\d+)\.(\d+)\.(\d+).*"
r = re.search(version_filter, version)
version_tuple = tuple(map(int, r.group(1, 2, 3)))
return version_tuple
@staticmethod
def supports_thin_provisioning(root_helper):
return LVM.get_lvm_version(root_helper) >= (2, 2, 95)
@property
def supports_snapshot_lv_activation(self):
if self._supports_snapshot_lv_activation is not None:
return self._supports_snapshot_lv_activation
self._supports_snapshot_lv_activation = (
self.get_lvm_version(self._root_helper) >= (2, 2, 91))
return self._supports_snapshot_lv_activation
@property
def supports_lvchange_ignoreskipactivation(self):
if self._supports_lvchange_ignoreskipactivation is not None:
return self._supports_lvchange_ignoreskipactivation
self._supports_lvchange_ignoreskipactivation = (
self.get_lvm_version(self._root_helper) >= (2, 2, 99))
return self._supports_lvchange_ignoreskipactivation
@staticmethod
def supports_pvs_ignoreskippedcluster(root_helper):
if LVM._supports_pvs_ignoreskippedcluster is not None:
return LVM._supports_pvs_ignoreskippedcluster
LVM._supports_pvs_ignoreskippedcluster = (
LVM.get_lvm_version(root_helper) >= (2, 2, 103))
return LVM._supports_pvs_ignoreskippedcluster
@staticmethod
def get_lv_info(root_helper, vg_name=None, lv_name=None):
cmd = LVM.LVM_CMD_PREFIX + ['lvs', '--noheadings', '--unit=g',
'-o', 'vg_name,name,size', '--nosuffix']
if lv_name is not None and vg_name is not None:
cmd.append("%s/%s" % (vg_name, lv_name))
elif vg_name is not None:
cmd.append(vg_name)
try:
(out, _err) = putils.execute(*cmd,
root_helper=root_helper,
run_as_root=True)
except putils.ProcessExecutionError as err:
with excutils.save_and_reraise_exception(reraise=True) as ctx:
if "not found" in err.stderr or "Failed to find" in err.stderr:
ctx.reraise = False
LOG.info("Logical Volume not found when querying "
"LVM info. (vg_name=%(vg)s, lv_name=%(lv)s",
{'vg': vg_name, 'lv': lv_name})
out = None
lv_list = []
if out is not None:
volumes = out.split()
iterator = moves.zip(*[iter(volumes)] * 3) # pylint: disable=E1101
for vg, name, size in iterator:
lv_list.append({"vg": vg, "name": name, "size": size})
return lv_list
def get_volumes(self, lv_name=None):
return self.get_lv_info(self._root_helper,
self.vg_name,
lv_name)
def get_volume(self, name):
ref_list = self.get_volumes(name)
for r in ref_list:
if r['name'] == name:
return r
return None
@staticmethod
def get_all_physical_volumes(root_helper, vg_name=None):
field_sep = '|'
cmd = LVM.LVM_CMD_PREFIX + ['pvs', '--noheadings',
'--unit=g',
'-o', 'vg_name,name,size,free',
'--separator', field_sep,
'--nosuffix']
if LVM.supports_pvs_ignoreskippedcluster(root_helper):
cmd.append('--ignoreskippedcluster')
(out, _err) = putils.execute(*cmd,
root_helper=root_helper,
run_as_root=True)
pvs = out.split()
if vg_name is not None:
pvs = [pv for pv in pvs if vg_name == pv.split(field_sep)[0]]
pv_list = []
for pv in pvs:
fields = pv.split(field_sep)
pv_list.append({'vg': fields[0],
'name': fields[1],
'size': float(fields[2]),
'available': float(fields[3])})
return pv_list
@staticmethod
def get_all_volume_groups(root_helper, vg_name=None):
cmd = LVM.LVM_CMD_PREFIX + ['vgs', '--noheadings',
'--unit=g', '-o',
'name,size,free,lv_count,uuid',
'--separator', ':',
'--nosuffix']
if vg_name is not None:
cmd.append(vg_name)
(out, _err) = putils.execute(*cmd,
root_helper=root_helper,
run_as_root=True)
vg_list = []
if out is not None:
vgs = out.split()
for vg in vgs:
fields = vg.split(':')
vg_list.append({'name': fields[0],
'size': float(fields[1]),
'available': float(fields[2]),
'lv_count': int(fields[3]),
'uuid': fields[4]})
return vg_list
def update_volume_group_info(self):
vg_list = self.get_all_volume_groups(self._root_helper, self.vg_name)
if len(vg_list) != 1:
LOG.error('Unable to find VG: %s', self.vg_name)
raise exception.VolumeGroupNotFound(vg_name=self.vg_name)
self.vg_size = float(vg_list[0]['size'])
self.vg_free_space = float(vg_list[0]['available'])
self.vg_lv_count = int(vg_list[0]['lv_count'])
self.vg_uuid = vg_list[0]['uuid']
total_vols_size = 0.0
if self.vg_thin_pool is not None:
# NOTE(xyang): If providing only self.vg_name,
# get_lv_info will output info on the thin pool and all
# individual volumes.
# get_lv_info(self._root_helper, 'stack-vg')
# sudo lvs --noheadings --unit=g -o vg_name,name,size
# --nosuffix stack-vg
# stack-vg stack-pool 9.51
# stack-vg volume-13380d16-54c3-4979-9d22-172082dbc1a1 1.00
# stack-vg volume-629e13ab-7759-46a5-b155-ee1eb20ca892 1.00
# stack-vg volume-e3e6281c-51ee-464c-b1a7-db6c0854622c 1.00
#
# If providing both self.vg_name and self.vg_thin_pool,
# get_lv_info will output only info on the thin pool, but not
# individual volumes.
# get_lv_info(self._root_helper, 'stack-vg', 'stack-pool')
# sudo lvs --noheadings --unit=g -o vg_name,name,size
# --nosuffix stack-vg/stack-pool
# stack-vg stack-pool 9.51
#
# We need info on both the thin pool and the volumes,
# therefore we should provide only self.vg_name, but not
# self.vg_thin_pool here.
for lv in self.get_lv_info(self._root_helper,
self.vg_name):
lvsize = lv['size']
# get_lv_info runs "lvs" command with "--nosuffix".
# This removes "g" from "1.00g" and only outputs "1.00".
# Running "lvs" command without "--nosuffix" will output
# "1.00g" if "g" is the unit.
# Remove the unit if it is in lv['size'].
if not lv['size'][-1].isdigit():
lvsize = lvsize[:-1]
if lv['name'] == self.vg_thin_pool:
self.vg_thin_pool_size = lvsize
tpfs = self._get_thin_pool_free_space(self.vg_name,
self.vg_thin_pool)
self.vg_thin_pool_free_space = tpfs
else:
total_vols_size = total_vols_size + float(lvsize)
total_vols_size = round(total_vols_size, 2)
self.vg_provisioned_capacity = total_vols_size
def _calculate_thin_pool_size(self):
# make sure volume group information is current
self.update_volume_group_info()
# leave 5% free for metadata
return "%sg" % (self.vg_free_space * 0.95)
def create_thin_pool(self, name=None, size_str=None):
if not self.supports_thin_provisioning(self._root_helper):
LOG.error('Requested to setup thin provisioning, '
'however current LVM version does not '
'support it.')
return None
if name is None:
name = '%s-pool' % self.vg_name
vg_pool_name = '%s/%s' % (self.vg_name, name)
if not size_str:
size_str = self._calculate_thin_pool_size()
cmd = LVM.LVM_CMD_PREFIX + ['lvcreate', '-T', '-L', size_str,
vg_pool_name]
LOG.debug("Creating thin pool '%(pool)s' with size %(size)s of "
"total %(free)sg", {'pool': vg_pool_name,
'size': size_str,
'free': self.vg_free_space})
self._execute(*cmd,
root_helper=self._root_helper,
run_as_root=True)
self.vg_thin_pool = name
return size_str
def create_volume(self, name, size_str, lv_type='default', mirror_count=0):
if lv_type == 'thin':
pool_path = '%s/%s' % (self.vg_name, self.vg_thin_pool)
cmd = LVM.LVM_CMD_PREFIX + ['lvcreate', '-T', '-V', size_str, '-n',
name, pool_path]
else:
cmd = LVM.LVM_CMD_PREFIX + ['lvcreate', '-n', name, self.vg_name,
'-L', size_str]
if mirror_count > 0:
cmd.extend(['-m', mirror_count, '--nosync',
'--mirrorlog', 'mirrored'])
terras = int(size_str[:-1]) / 1024.0
if terras >= 1.5:
rsize = int(2 ** math.ceil(math.log(terras) / math.log(2)))
# NOTE(vish): Next power of two for region size. See:
# http://red.ht/U2BPOD
cmd.extend(['-R', str(rsize)])
try:
self._execute(*cmd,
root_helper=self._root_helper,
run_as_root=True)
except putils.ProcessExecutionError as err:
LOG.exception('Error creating Volume')
LOG.error('Cmd :%s', err.cmd)
LOG.error('StdOut :%s', err.stdout)
LOG.error('StdErr :%s', err.stderr)
LOG.error('Current state: %s',
self.get_all_volume_groups(self._root_helper))
raise
@utils.retry(putils.ProcessExecutionError)
def create_lv_snapshot(self, name, source_lv_name, lv_type='default'):
source_lvref = self.get_volume(source_lv_name)
if source_lvref is None:
LOG.error("Trying to create snapshot by non-existent LV: %s",
source_lv_name)
raise exception.VolumeDeviceNotFound(device=source_lv_name)
cmd = LVM.LVM_CMD_PREFIX + ['lvcreate', '--name', name, '--snapshot',
'%s/%s' % (self.vg_name, source_lv_name)]
if lv_type != 'thin':
size = source_lvref['size']
cmd.extend(['-L', '%sg' % (size)])
try:
self._execute(*cmd,
root_helper=self._root_helper,
run_as_root=True)
except putils.ProcessExecutionError as err:
LOG.exception('Error creating snapshot')
LOG.error('Cmd :%s', err.cmd)
LOG.error('StdOut :%s', err.stdout)
LOG.error('StdErr :%s', err.stderr)
raise
def _mangle_lv_name(self, name):
# Linux LVM reserves name that starts with snapshot, so that
# such volume name can't be created. Mangle it.
if not name.startswith('snapshot'):
return name
return '_' + name
def _lv_is_active(self, name):
cmd = LVM.LVM_CMD_PREFIX + ['lvdisplay', '--noheading', '-C', '-o',
'Attr', '%s/%s' % (self.vg_name, name)]
out, _err = self._execute(*cmd,
root_helper=self._root_helper,
run_as_root=True)
if out:
out = out.strip()
if (out[4] == 'a'):
return True
return False
def deactivate_lv(self, name):
lv_path = self.vg_name + '/' + self._mangle_lv_name(name)
cmd = ['lvchange', '-a', 'n']
cmd.append(lv_path)
try:
self._execute(*cmd,
root_helper=self._root_helper,
run_as_root=True)
except putils.ProcessExecutionError as err:
LOG.exception('Error deactivating LV')
LOG.error('Cmd :%s', err.cmd)
LOG.error('StdOut :%s', err.stdout)
LOG.error('StdErr :%s', err.stderr)
raise
self._wait_for_volume_deactivation(name)
@utils.retry(exceptions=exception.VolumeNotDeactivated, retries=5,
backoff_rate=2)
def _wait_for_volume_deactivation(self, name):
LOG.debug("Checking to see if volume %s has been deactivated.",
name)
if self._lv_is_active(name):
LOG.debug("Volume %s is still active.", name)
raise exception.VolumeNotDeactivated(name=name)
else:
LOG.debug("Volume %s has been deactivated.", name)
def activate_lv(self, name, is_snapshot=False, permanent=False):
# (Assume snapshot LV is always active.)
if is_snapshot and not self.supports_snapshot_lv_activation:
return
lv_path = self.vg_name + '/' + self._mangle_lv_name(name)
# Must pass --yes to activate both the snap LV and its origin LV.
# Otherwise lvchange asks if you would like to do this interactively,
# and fails.
cmd = ['lvchange', '-a', 'y', '--yes']
if self.supports_lvchange_ignoreskipactivation:
cmd.append('-K')
# If permanent=True is specified, drop the skipactivation flag in
# order to make this LV automatically activated after next reboot.
if permanent:
cmd += ['-k', 'n']
cmd.append(lv_path)
try:
self._execute(*cmd,
root_helper=self._root_helper,
run_as_root=True)
except putils.ProcessExecutionError as err:
LOG.exception('Error activating LV')
LOG.error('Cmd :%s', err.cmd)
LOG.error('StdOut :%s', err.stdout)
LOG.error('StdErr :%s', err.stderr)
raise
@utils.retry(putils.ProcessExecutionError)
def delete(self, name):
def run_udevadm_settle():
self._execute('udevadm', 'settle',
root_helper=self._root_helper, run_as_root=True,
check_exit_code=False)
# LV removal seems to be a race with other writers or udev in
# some cases (see LP #1270192), so we enable retry deactivation
LVM_CONFIG = 'activation { retry_deactivation = 1} '
try:
self._execute(
'lvremove',
'--config', LVM_CONFIG,
'-f',
'%s/%s' % (self.vg_name, name),
root_helper=self._root_helper, run_as_root=True)
except putils.ProcessExecutionError as err:
LOG.debug('Error reported running lvremove: CMD: %(command)s, '
'RESPONSE: %(response)s',
{'command': err.cmd, 'response': err.stderr})
LOG.debug('Attempting udev settle and retry of lvremove...')
run_udevadm_settle()
# The previous failing lvremove -f might leave behind
# suspended devices; when lvmetad is not available, any
# further lvm command will block forever.
# Therefore we need to skip suspended devices on retry.
LVM_CONFIG += 'devices { ignore_suspended_devices = 1}'
self._execute(
'lvremove',
'--config', LVM_CONFIG,
'-f',
'%s/%s' % (self.vg_name, name),
root_helper=self._root_helper, run_as_root=True)
LOG.debug('Successfully deleted volume: %s after '
'udev settle.', name)
def revert(self, snapshot_name):
self._execute('lvconvert', '--merge',
snapshot_name, root_helper=self._root_helper,
run_as_root=True)
def lv_has_snapshot(self, name):
cmd = LVM.LVM_CMD_PREFIX + ['lvdisplay', '--noheading', '-C', '-o',
'Attr', '%s/%s' % (self.vg_name, name)]
out, _err = self._execute(*cmd,
root_helper=self._root_helper,
run_as_root=True)
if out:
out = out.strip()
if (out[0] == 'o') or (out[0] == 'O'):
return True
return False
def lv_is_snapshot(self, name):
cmd = LVM.LVM_CMD_PREFIX + ['lvdisplay', '--noheading', '-C', '-o',
'Attr', '%s/%s' % (self.vg_name, name)]
out, _err = self._execute(*cmd,
root_helper=self._root_helper,
run_as_root=True)
out = out.strip()
if out:
if (out[0] == 's'):
return True
return False
def lv_is_open(self, name):
cmd = LVM.LVM_CMD_PREFIX + ['lvdisplay', '--noheading', '-C', '-o',
'Attr', '%s/%s' % (self.vg_name, name)]
out, _err = self._execute(*cmd,
root_helper=self._root_helper,
run_as_root=True)
out = out.strip()
if out:
if (out[5] == 'o'):
return True
return False
def lv_get_origin(self, name):
cmd = LVM.LVM_CMD_PREFIX + ['lvdisplay', '--noheading', '-C', '-o',
'Origin', '%s/%s' % (self.vg_name, name)]
out, _err = self._execute(*cmd,
root_helper=self._root_helper,
run_as_root=True)
out = out.strip()
if out:
return out
return None
def extend_volume(self, lv_name, new_size):
# Volumes with snaps have attributes 'o' or 'O' and will be
# deactivated, but Thin Volumes with snaps have attribute 'V'
# and won't be deactivated because the lv_has_snapshot method looks
has_snapshot = self.lv_has_snapshot(lv_name)
if has_snapshot:
self.deactivate_lv(lv_name)
try:
cmd = LVM.LVM_CMD_PREFIX + ['lvextend', '-L', new_size,
'%s/%s' % (self.vg_name, lv_name)]
self._execute(*cmd, root_helper=self._root_helper,
run_as_root=True)
except putils.ProcessExecutionError as err:
LOG.exception('Error extending Volume')
LOG.error('Cmd :%s', err.cmd)
LOG.error('StdOut :%s', err.stdout)
LOG.error('StdErr :%s', err.stderr)
raise
if has_snapshot:
self.activate_lv(lv_name)
def vg_mirror_free_space(self, mirror_count):
free_capacity = 0.0
disks = []
for pv in self.pv_list:
disks.append(float(pv['available']))
while True:
disks = sorted([a for a in disks if a > 0.0], reverse=True)
if len(disks) <= mirror_count:
break
disk = disks[-1]
disks = disks[:-1]
for index in list(range(mirror_count)):
disks[index] -= disk
free_capacity += disk
return free_capacity
def vg_mirror_size(self, mirror_count):
return (self.vg_free_space / (mirror_count + 1))
def rename_volume(self, lv_name, new_name):
try:
self._execute('lvrename', self.vg_name, lv_name, new_name,
root_helper=self._root_helper,
run_as_root=True)
except putils.ProcessExecutionError as err:
LOG.exception('Error renaming logical volume')
LOG.error('Cmd :%s', err.cmd)
LOG.error('StdOut :%s', err.stdout)
LOG.error('StdErr :%s', err.stderr)
raise
| true | true |
f7376b6ebe5cc47b677f9a0ae0155429d3c68f68 | 3,842 | py | Python | examples/pipeline/hetero_ftl/pipeline-hetero-ftl-with-predict.py | rubenlozanoaht3m/DataDogm | cd605e8072cca31e8418830c3300657ae2fa5b16 | [
"Apache-2.0"
] | 715 | 2019-01-24T10:52:03.000Z | 2019-10-31T12:19:22.000Z | examples/pipeline/hetero_ftl/pipeline-hetero-ftl-with-predict.py | rubenlozanoaht3m/DataDogm | cd605e8072cca31e8418830c3300657ae2fa5b16 | [
"Apache-2.0"
] | 270 | 2019-02-11T02:57:36.000Z | 2019-08-29T11:22:33.000Z | examples/pipeline/hetero_ftl/pipeline-hetero-ftl-with-predict.py | rubenlozanoaht3m/DataDogm | cd605e8072cca31e8418830c3300657ae2fa5b16 | [
"Apache-2.0"
] | 200 | 2019-01-26T14:21:35.000Z | 2019-11-01T01:14:36.000Z | #
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
from pipeline.backend.pipeline import PipeLine
from pipeline.component import DataTransform
from pipeline.component.hetero_ftl import HeteroFTL
from pipeline.component.reader import Reader
from pipeline.interface.data import Data
from tensorflow.keras import optimizers
from tensorflow.keras.layers import Dense
from tensorflow.keras import initializers
from pipeline.component.evaluation import Evaluation
from pipeline.utils.tools import load_job_config
def main(config="../../config.yaml", namespace=""):
# obtain config
if isinstance(config, str):
config = load_job_config(config)
parties = config.parties
guest = parties.guest[0]
host = parties.host[0]
guest_train_data = {"name": "nus_wide_guest", "namespace": f"experiment{namespace}"}
host_train_data = {"name": "nus_wide_host", "namespace": f"experiment{namespace}"}
pipeline = PipeLine().set_initiator(role='guest', party_id=guest).set_roles(guest=guest, host=host)
reader_0 = Reader(name="reader_0")
reader_0.get_party_instance(role='guest', party_id=guest).component_param(table=guest_train_data)
reader_0.get_party_instance(role='host', party_id=host).component_param(table=host_train_data)
data_transform_0 = DataTransform(name="data_transform_0")
data_transform_0.get_party_instance(
role='guest', party_id=guest).component_param(
with_label=True, output_format="dense")
data_transform_0.get_party_instance(role='host', party_id=host).component_param(with_label=False)
hetero_ftl_0 = HeteroFTL(name='hetero_ftl_0',
epochs=10, alpha=1, batch_size=-1, mode='plain')
hetero_ftl_0.add_nn_layer(Dense(units=32, activation='sigmoid',
kernel_initializer=initializers.RandomNormal(stddev=1.0),
bias_initializer=initializers.Zeros()))
hetero_ftl_0.compile(optimizer=optimizers.Adam(lr=0.01))
evaluation_0 = Evaluation(name='evaluation_0', eval_type="binary")
pipeline.add_component(reader_0)
pipeline.add_component(data_transform_0, data=Data(data=reader_0.output.data))
pipeline.add_component(hetero_ftl_0, data=Data(train_data=data_transform_0.output.data))
pipeline.add_component(evaluation_0, data=Data(data=hetero_ftl_0.output.data))
pipeline.compile()
pipeline.fit()
# predict
# deploy required components
pipeline.deploy_component([data_transform_0, hetero_ftl_0])
predict_pipeline = PipeLine()
# add data reader onto predict pipeline
predict_pipeline.add_component(reader_0)
# add selected components from train pipeline onto predict pipeline
# specify data source
predict_pipeline.add_component(
pipeline, data=Data(
predict_input={
pipeline.data_transform_0.input.data: reader_0.output.data}))
# run predict model
predict_pipeline.predict()
if __name__ == "__main__":
parser = argparse.ArgumentParser("PIPELINE DEMO")
parser.add_argument("-config", type=str,
help="config file")
args = parser.parse_args()
if args.config is not None:
main(args.config)
else:
main()
| 38.808081 | 103 | 0.724623 |
import argparse
from pipeline.backend.pipeline import PipeLine
from pipeline.component import DataTransform
from pipeline.component.hetero_ftl import HeteroFTL
from pipeline.component.reader import Reader
from pipeline.interface.data import Data
from tensorflow.keras import optimizers
from tensorflow.keras.layers import Dense
from tensorflow.keras import initializers
from pipeline.component.evaluation import Evaluation
from pipeline.utils.tools import load_job_config
def main(config="../../config.yaml", namespace=""):
if isinstance(config, str):
config = load_job_config(config)
parties = config.parties
guest = parties.guest[0]
host = parties.host[0]
guest_train_data = {"name": "nus_wide_guest", "namespace": f"experiment{namespace}"}
host_train_data = {"name": "nus_wide_host", "namespace": f"experiment{namespace}"}
pipeline = PipeLine().set_initiator(role='guest', party_id=guest).set_roles(guest=guest, host=host)
reader_0 = Reader(name="reader_0")
reader_0.get_party_instance(role='guest', party_id=guest).component_param(table=guest_train_data)
reader_0.get_party_instance(role='host', party_id=host).component_param(table=host_train_data)
data_transform_0 = DataTransform(name="data_transform_0")
data_transform_0.get_party_instance(
role='guest', party_id=guest).component_param(
with_label=True, output_format="dense")
data_transform_0.get_party_instance(role='host', party_id=host).component_param(with_label=False)
hetero_ftl_0 = HeteroFTL(name='hetero_ftl_0',
epochs=10, alpha=1, batch_size=-1, mode='plain')
hetero_ftl_0.add_nn_layer(Dense(units=32, activation='sigmoid',
kernel_initializer=initializers.RandomNormal(stddev=1.0),
bias_initializer=initializers.Zeros()))
hetero_ftl_0.compile(optimizer=optimizers.Adam(lr=0.01))
evaluation_0 = Evaluation(name='evaluation_0', eval_type="binary")
pipeline.add_component(reader_0)
pipeline.add_component(data_transform_0, data=Data(data=reader_0.output.data))
pipeline.add_component(hetero_ftl_0, data=Data(train_data=data_transform_0.output.data))
pipeline.add_component(evaluation_0, data=Data(data=hetero_ftl_0.output.data))
pipeline.compile()
pipeline.fit()
pipeline.deploy_component([data_transform_0, hetero_ftl_0])
predict_pipeline = PipeLine()
predict_pipeline.add_component(reader_0)
predict_pipeline.add_component(
pipeline, data=Data(
predict_input={
pipeline.data_transform_0.input.data: reader_0.output.data}))
predict_pipeline.predict()
if __name__ == "__main__":
parser = argparse.ArgumentParser("PIPELINE DEMO")
parser.add_argument("-config", type=str,
help="config file")
args = parser.parse_args()
if args.config is not None:
main(args.config)
else:
main()
| true | true |
f7376b9a03e8264096d83c9df6df4eacc6d0bf09 | 3,560 | py | Python | fn_qradar_integration/tests/test_funct_qradar_reference_table_delete_item.py | nickpartner-goahead/resilient-community-apps | 097c0dbefddbd221b31149d82af9809420498134 | [
"MIT"
] | 65 | 2017-12-04T13:58:32.000Z | 2022-03-24T18:33:17.000Z | fn_qradar_integration/tests/test_funct_qradar_reference_table_delete_item.py | nickpartner-goahead/resilient-community-apps | 097c0dbefddbd221b31149d82af9809420498134 | [
"MIT"
] | 48 | 2018-03-02T19:17:14.000Z | 2022-03-09T22:00:38.000Z | fn_qradar_integration/tests/test_funct_qradar_reference_table_delete_item.py | nickpartner-goahead/resilient-community-apps | 097c0dbefddbd221b31149d82af9809420498134 | [
"MIT"
] | 95 | 2018-01-11T16:23:39.000Z | 2022-03-21T11:34:29.000Z | # -*- coding: utf-8 -*-
"""Tests using pytest_resilient_circuits"""
import pytest
from resilient_circuits.util import get_config_data, get_function_definition
from resilient_circuits import SubmitTestFunction, FunctionResult
from mock import patch
PACKAGE_NAME = "fn_qradar_integration"
FUNCTION_NAME = "qradar_reference_table_delete_item"
# Read the default configuration-data section from the package
config_data = get_config_data(PACKAGE_NAME)
# Provide a simulation of the Resilient REST API (uncomment to connect to a real appliance)
resilient_mock = "pytest_resilient_circuits.BasicResilientMock"
MOCK_DELETE_RESPONSE = {
"time_to_live": "999 years 0 mons 0 days 0 hours 0 mins 0.00 secs",
"timeout_type": "LAST_SEEN",
"number_of_elements": 555,
"creation_time": 1570221529014,
"name": "demo_v3",
"namespace": "SHARED",
"element_type": "ALN",
"collection_id": 86
}
MOCK_DELETE_RESPONSE_UNICODE = {
"time_to_live": "999 years 0 mons 0 days 0 hours 0 mins 0.00 secs",
"timeout_type": "LAST_SEEN",
"number_of_elements": 555,
"creation_time": 1570221529014,
"name": "演示版vz",
"namespace": "SHARED",
"element_type": "ALN",
"collection_id": 86
}
def call_qradar_reference_table_delete_item_function(circuits, function_params, timeout=5):
# Create the submitTestFunction event
evt = SubmitTestFunction("qradar_reference_table_delete_item", function_params)
# Fire a message to the function
circuits.manager.fire(evt)
# circuits will fire an "exception" event if an exception is raised in the FunctionComponent
# return this exception if it is raised
exception_event = circuits.watcher.wait("exception", parent=None, timeout=timeout)
if exception_event is not False:
exception = exception_event.args[1]
raise exception
# else return the FunctionComponent's results
else:
event = circuits.watcher.wait("qradar_reference_table_delete_item_result", parent=evt, timeout=timeout)
assert event
assert isinstance(event.kwargs["result"], FunctionResult)
pytest.wait_for(event, "complete", True)
return event.kwargs["result"].value
class TestQradarReferenceTableDeleteItem:
""" Tests for the qradar_reference_table_delete_item function"""
def test_function_definition(self):
""" Test that the package provides customization_data that defines the function """
func = get_function_definition(PACKAGE_NAME, FUNCTION_NAME)
assert func is not None
mock_inputs_1 = {
"qradar_reference_table_name": "sample text",
"qradar_reference_table_item_value": "sample text"
}
expected_results_1 = MOCK_DELETE_RESPONSE
mock_inputs_2 = {
"qradar_reference_table_name": "sample text",
"qradar_reference_table_item_value": "sample text"
}
expected_results_2 = MOCK_DELETE_RESPONSE_UNICODE
@pytest.mark.parametrize("mock_inputs, expected_results", [
(mock_inputs_1, expected_results_1),
(mock_inputs_2, expected_results_2)
])
def test_success(self, circuits_app, mock_inputs, expected_results):
""" Test calling with sample values for the parameters """
with patch('fn_qradar_integration.lib.reference_data.ReferenceTableFacade.ReferenceTableFacade.delete_ref_element') as patched_add_element:
patched_add_element.return_value = expected_results
results = call_qradar_reference_table_delete_item_function(circuits_app, mock_inputs)
assert(expected_results == results['content'])
| 37.473684 | 147 | 0.742978 |
import pytest
from resilient_circuits.util import get_config_data, get_function_definition
from resilient_circuits import SubmitTestFunction, FunctionResult
from mock import patch
PACKAGE_NAME = "fn_qradar_integration"
FUNCTION_NAME = "qradar_reference_table_delete_item"
config_data = get_config_data(PACKAGE_NAME)
resilient_mock = "pytest_resilient_circuits.BasicResilientMock"
MOCK_DELETE_RESPONSE = {
"time_to_live": "999 years 0 mons 0 days 0 hours 0 mins 0.00 secs",
"timeout_type": "LAST_SEEN",
"number_of_elements": 555,
"creation_time": 1570221529014,
"name": "demo_v3",
"namespace": "SHARED",
"element_type": "ALN",
"collection_id": 86
}
MOCK_DELETE_RESPONSE_UNICODE = {
"time_to_live": "999 years 0 mons 0 days 0 hours 0 mins 0.00 secs",
"timeout_type": "LAST_SEEN",
"number_of_elements": 555,
"creation_time": 1570221529014,
"name": "演示版vz",
"namespace": "SHARED",
"element_type": "ALN",
"collection_id": 86
}
def call_qradar_reference_table_delete_item_function(circuits, function_params, timeout=5):
evt = SubmitTestFunction("qradar_reference_table_delete_item", function_params)
circuits.manager.fire(evt)
exception_event = circuits.watcher.wait("exception", parent=None, timeout=timeout)
if exception_event is not False:
exception = exception_event.args[1]
raise exception
else:
event = circuits.watcher.wait("qradar_reference_table_delete_item_result", parent=evt, timeout=timeout)
assert event
assert isinstance(event.kwargs["result"], FunctionResult)
pytest.wait_for(event, "complete", True)
return event.kwargs["result"].value
class TestQradarReferenceTableDeleteItem:
def test_function_definition(self):
func = get_function_definition(PACKAGE_NAME, FUNCTION_NAME)
assert func is not None
mock_inputs_1 = {
"qradar_reference_table_name": "sample text",
"qradar_reference_table_item_value": "sample text"
}
expected_results_1 = MOCK_DELETE_RESPONSE
mock_inputs_2 = {
"qradar_reference_table_name": "sample text",
"qradar_reference_table_item_value": "sample text"
}
expected_results_2 = MOCK_DELETE_RESPONSE_UNICODE
@pytest.mark.parametrize("mock_inputs, expected_results", [
(mock_inputs_1, expected_results_1),
(mock_inputs_2, expected_results_2)
])
def test_success(self, circuits_app, mock_inputs, expected_results):
with patch('fn_qradar_integration.lib.reference_data.ReferenceTableFacade.ReferenceTableFacade.delete_ref_element') as patched_add_element:
patched_add_element.return_value = expected_results
results = call_qradar_reference_table_delete_item_function(circuits_app, mock_inputs)
assert(expected_results == results['content'])
| true | true |
f7376c7af13e6f09f4c98fed7f717e2e3887cc0f | 11,884 | py | Python | .venv/lib/python3.8/site-packages/poetry/core/_vendor/pyrsistent/_field_common.py | RivtLib/replit01 | ce1ae18b446a9c844f40e88a51c71fbc45ab3ad7 | [
"MIT"
] | 1 | 2020-08-07T16:09:57.000Z | 2020-08-07T16:09:57.000Z | .venv/lib/python3.8/site-packages/poetry/core/_vendor/pyrsistent/_field_common.py | RivtLib/replit01 | ce1ae18b446a9c844f40e88a51c71fbc45ab3ad7 | [
"MIT"
] | null | null | null | .venv/lib/python3.8/site-packages/poetry/core/_vendor/pyrsistent/_field_common.py | RivtLib/replit01 | ce1ae18b446a9c844f40e88a51c71fbc45ab3ad7 | [
"MIT"
] | null | null | null | import six
import sys
from pyrsistent._checked_types import (
CheckedPMap,
CheckedPSet,
CheckedPVector,
CheckedType,
InvariantException,
_restore_pickle,
get_type,
maybe_parse_user_type,
maybe_parse_many_user_types,
)
from pyrsistent._checked_types import optional as optional_type
from pyrsistent._checked_types import wrap_invariant
import inspect
PY2 = sys.version_info[0] < 3
def set_fields(dct, bases, name):
dct[name] = dict(sum([list(b.__dict__.get(name, {}).items()) for b in bases], []))
for k, v in list(dct.items()):
if isinstance(v, _PField):
dct[name][k] = v
del dct[k]
def check_global_invariants(subject, invariants):
error_codes = tuple(error_code for is_ok, error_code in
(invariant(subject) for invariant in invariants) if not is_ok)
if error_codes:
raise InvariantException(error_codes, (), 'Global invariant failed')
def serialize(serializer, format, value):
if isinstance(value, CheckedType) and serializer is PFIELD_NO_SERIALIZER:
return value.serialize(format)
return serializer(format, value)
def check_type(destination_cls, field, name, value):
if field.type and not any(isinstance(value, get_type(t)) for t in field.type):
actual_type = type(value)
message = "Invalid type for field {0}.{1}, was {2}".format(destination_cls.__name__, name, actual_type.__name__)
raise PTypeError(destination_cls, name, field.type, actual_type, message)
def is_type_cls(type_cls, field_type):
if type(field_type) is set:
return True
types = tuple(field_type)
if len(types) == 0:
return False
return issubclass(get_type(types[0]), type_cls)
def is_field_ignore_extra_complaint(type_cls, field, ignore_extra):
# ignore_extra param has default False value, for speed purpose no need to propagate False
if not ignore_extra:
return False
if not is_type_cls(type_cls, field.type):
return False
if PY2:
return 'ignore_extra' in inspect.getargspec(field.factory).args
else:
return 'ignore_extra' in inspect.signature(field.factory).parameters
class _PField(object):
__slots__ = ('type', 'invariant', 'initial', 'mandatory', '_factory', 'serializer')
def __init__(self, type, invariant, initial, mandatory, factory, serializer):
self.type = type
self.invariant = invariant
self.initial = initial
self.mandatory = mandatory
self._factory = factory
self.serializer = serializer
@property
def factory(self):
# If no factory is specified and the type is another CheckedType use the factory method of that CheckedType
if self._factory is PFIELD_NO_FACTORY and len(self.type) == 1:
typ = get_type(tuple(self.type)[0])
if issubclass(typ, CheckedType):
return typ.create
return self._factory
PFIELD_NO_TYPE = ()
PFIELD_NO_INVARIANT = lambda _: (True, None)
PFIELD_NO_FACTORY = lambda x: x
PFIELD_NO_INITIAL = object()
PFIELD_NO_SERIALIZER = lambda _, value: value
def field(type=PFIELD_NO_TYPE, invariant=PFIELD_NO_INVARIANT, initial=PFIELD_NO_INITIAL,
mandatory=False, factory=PFIELD_NO_FACTORY, serializer=PFIELD_NO_SERIALIZER):
"""
Field specification factory for :py:class:`PRecord`.
:param type: a type or iterable with types that are allowed for this field
:param invariant: a function specifying an invariant that must hold for the field
:param initial: value of field if not specified when instantiating the record
:param mandatory: boolean specifying if the field is mandatory or not
:param factory: function called when field is set.
:param serializer: function that returns a serialized version of the field
"""
# NB: We have to check this predicate separately from the predicates in
# `maybe_parse_user_type` et al. because this one is related to supporting
# the argspec for `field`, while those are related to supporting the valid
# ways to specify types.
# Multiple types must be passed in one of the following containers. Note
# that a type that is a subclass of one of these containers, like a
# `collections.namedtuple`, will work as expected, since we check
# `isinstance` and not `issubclass`.
if isinstance(type, (list, set, tuple)):
types = set(maybe_parse_many_user_types(type))
else:
types = set(maybe_parse_user_type(type))
invariant_function = wrap_invariant(invariant) if invariant != PFIELD_NO_INVARIANT and callable(invariant) else invariant
field = _PField(type=types, invariant=invariant_function, initial=initial,
mandatory=mandatory, factory=factory, serializer=serializer)
_check_field_parameters(field)
return field
def _check_field_parameters(field):
for t in field.type:
if not isinstance(t, type) and not isinstance(t, six.string_types):
raise TypeError('Type parameter expected, not {0}'.format(type(t)))
if field.initial is not PFIELD_NO_INITIAL and \
not callable(field.initial) and \
field.type and not any(isinstance(field.initial, t) for t in field.type):
raise TypeError('Initial has invalid type {0}'.format(type(field.initial)))
if not callable(field.invariant):
raise TypeError('Invariant must be callable')
if not callable(field.factory):
raise TypeError('Factory must be callable')
if not callable(field.serializer):
raise TypeError('Serializer must be callable')
class PTypeError(TypeError):
"""
Raised when trying to assign a value with a type that doesn't match the declared type.
Attributes:
source_class -- The class of the record
field -- Field name
expected_types -- Types allowed for the field
actual_type -- The non matching type
"""
def __init__(self, source_class, field, expected_types, actual_type, *args, **kwargs):
super(PTypeError, self).__init__(*args, **kwargs)
self.source_class = source_class
self.field = field
self.expected_types = expected_types
self.actual_type = actual_type
SEQ_FIELD_TYPE_SUFFIXES = {
CheckedPVector: "PVector",
CheckedPSet: "PSet",
}
# Global dictionary to hold auto-generated field types: used for unpickling
_seq_field_types = {}
def _restore_seq_field_pickle(checked_class, item_type, data):
"""Unpickling function for auto-generated PVec/PSet field types."""
type_ = _seq_field_types[checked_class, item_type]
return _restore_pickle(type_, data)
def _types_to_names(types):
"""Convert a tuple of types to a human-readable string."""
return "".join(get_type(typ).__name__.capitalize() for typ in types)
def _make_seq_field_type(checked_class, item_type):
"""Create a subclass of the given checked class with the given item type."""
type_ = _seq_field_types.get((checked_class, item_type))
if type_ is not None:
return type_
class TheType(checked_class):
__type__ = item_type
def __reduce__(self):
return (_restore_seq_field_pickle,
(checked_class, item_type, list(self)))
suffix = SEQ_FIELD_TYPE_SUFFIXES[checked_class]
TheType.__name__ = _types_to_names(TheType._checked_types) + suffix
_seq_field_types[checked_class, item_type] = TheType
return TheType
def _sequence_field(checked_class, item_type, optional, initial):
"""
Create checked field for either ``PSet`` or ``PVector``.
:param checked_class: ``CheckedPSet`` or ``CheckedPVector``.
:param item_type: The required type for the items in the set.
:param optional: If true, ``None`` can be used as a value for
this field.
:param initial: Initial value to pass to factory.
:return: A ``field`` containing a checked class.
"""
TheType = _make_seq_field_type(checked_class, item_type)
if optional:
def factory(argument, _factory_fields=None, ignore_extra=False):
if argument is None:
return None
else:
return TheType.create(argument, _factory_fields=_factory_fields, ignore_extra=ignore_extra)
else:
factory = TheType.create
return field(type=optional_type(TheType) if optional else TheType,
factory=factory, mandatory=True,
initial=factory(initial))
def pset_field(item_type, optional=False, initial=()):
"""
Create checked ``PSet`` field.
:param item_type: The required type for the items in the set.
:param optional: If true, ``None`` can be used as a value for
this field.
:param initial: Initial value to pass to factory if no value is given
for the field.
:return: A ``field`` containing a ``CheckedPSet`` of the given type.
"""
return _sequence_field(CheckedPSet, item_type, optional,
initial)
def pvector_field(item_type, optional=False, initial=()):
"""
Create checked ``PVector`` field.
:param item_type: The required type for the items in the vector.
:param optional: If true, ``None`` can be used as a value for
this field.
:param initial: Initial value to pass to factory if no value is given
for the field.
:return: A ``field`` containing a ``CheckedPVector`` of the given type.
"""
return _sequence_field(CheckedPVector, item_type, optional,
initial)
_valid = lambda item: (True, "")
# Global dictionary to hold auto-generated field types: used for unpickling
_pmap_field_types = {}
def _restore_pmap_field_pickle(key_type, value_type, data):
"""Unpickling function for auto-generated PMap field types."""
type_ = _pmap_field_types[key_type, value_type]
return _restore_pickle(type_, data)
def _make_pmap_field_type(key_type, value_type):
"""Create a subclass of CheckedPMap with the given key and value types."""
type_ = _pmap_field_types.get((key_type, value_type))
if type_ is not None:
return type_
class TheMap(CheckedPMap):
__key_type__ = key_type
__value_type__ = value_type
def __reduce__(self):
return (_restore_pmap_field_pickle,
(self.__key_type__, self.__value_type__, dict(self)))
TheMap.__name__ = "{0}To{1}PMap".format(
_types_to_names(TheMap._checked_key_types),
_types_to_names(TheMap._checked_value_types))
_pmap_field_types[key_type, value_type] = TheMap
return TheMap
def pmap_field(key_type, value_type, optional=False, invariant=PFIELD_NO_INVARIANT):
"""
Create a checked ``PMap`` field.
:param key: The required type for the keys of the map.
:param value: The required type for the values of the map.
:param optional: If true, ``None`` can be used as a value for
this field.
:param invariant: Pass-through to ``field``.
:return: A ``field`` containing a ``CheckedPMap``.
"""
TheMap = _make_pmap_field_type(key_type, value_type)
if optional:
def factory(argument):
if argument is None:
return None
else:
return TheMap.create(argument)
else:
factory = TheMap.create
return field(mandatory=True, initial=TheMap(),
type=optional_type(TheMap) if optional else TheMap,
factory=factory, invariant=invariant)
| 35.903323 | 126 | 0.667789 | import six
import sys
from pyrsistent._checked_types import (
CheckedPMap,
CheckedPSet,
CheckedPVector,
CheckedType,
InvariantException,
_restore_pickle,
get_type,
maybe_parse_user_type,
maybe_parse_many_user_types,
)
from pyrsistent._checked_types import optional as optional_type
from pyrsistent._checked_types import wrap_invariant
import inspect
PY2 = sys.version_info[0] < 3
def set_fields(dct, bases, name):
dct[name] = dict(sum([list(b.__dict__.get(name, {}).items()) for b in bases], []))
for k, v in list(dct.items()):
if isinstance(v, _PField):
dct[name][k] = v
del dct[k]
def check_global_invariants(subject, invariants):
error_codes = tuple(error_code for is_ok, error_code in
(invariant(subject) for invariant in invariants) if not is_ok)
if error_codes:
raise InvariantException(error_codes, (), 'Global invariant failed')
def serialize(serializer, format, value):
if isinstance(value, CheckedType) and serializer is PFIELD_NO_SERIALIZER:
return value.serialize(format)
return serializer(format, value)
def check_type(destination_cls, field, name, value):
if field.type and not any(isinstance(value, get_type(t)) for t in field.type):
actual_type = type(value)
message = "Invalid type for field {0}.{1}, was {2}".format(destination_cls.__name__, name, actual_type.__name__)
raise PTypeError(destination_cls, name, field.type, actual_type, message)
def is_type_cls(type_cls, field_type):
if type(field_type) is set:
return True
types = tuple(field_type)
if len(types) == 0:
return False
return issubclass(get_type(types[0]), type_cls)
def is_field_ignore_extra_complaint(type_cls, field, ignore_extra):
if not ignore_extra:
return False
if not is_type_cls(type_cls, field.type):
return False
if PY2:
return 'ignore_extra' in inspect.getargspec(field.factory).args
else:
return 'ignore_extra' in inspect.signature(field.factory).parameters
class _PField(object):
__slots__ = ('type', 'invariant', 'initial', 'mandatory', '_factory', 'serializer')
def __init__(self, type, invariant, initial, mandatory, factory, serializer):
self.type = type
self.invariant = invariant
self.initial = initial
self.mandatory = mandatory
self._factory = factory
self.serializer = serializer
@property
def factory(self):
if self._factory is PFIELD_NO_FACTORY and len(self.type) == 1:
typ = get_type(tuple(self.type)[0])
if issubclass(typ, CheckedType):
return typ.create
return self._factory
PFIELD_NO_TYPE = ()
PFIELD_NO_INVARIANT = lambda _: (True, None)
PFIELD_NO_FACTORY = lambda x: x
PFIELD_NO_INITIAL = object()
PFIELD_NO_SERIALIZER = lambda _, value: value
def field(type=PFIELD_NO_TYPE, invariant=PFIELD_NO_INVARIANT, initial=PFIELD_NO_INITIAL,
mandatory=False, factory=PFIELD_NO_FACTORY, serializer=PFIELD_NO_SERIALIZER):
if isinstance(type, (list, set, tuple)):
types = set(maybe_parse_many_user_types(type))
else:
types = set(maybe_parse_user_type(type))
invariant_function = wrap_invariant(invariant) if invariant != PFIELD_NO_INVARIANT and callable(invariant) else invariant
field = _PField(type=types, invariant=invariant_function, initial=initial,
mandatory=mandatory, factory=factory, serializer=serializer)
_check_field_parameters(field)
return field
def _check_field_parameters(field):
for t in field.type:
if not isinstance(t, type) and not isinstance(t, six.string_types):
raise TypeError('Type parameter expected, not {0}'.format(type(t)))
if field.initial is not PFIELD_NO_INITIAL and \
not callable(field.initial) and \
field.type and not any(isinstance(field.initial, t) for t in field.type):
raise TypeError('Initial has invalid type {0}'.format(type(field.initial)))
if not callable(field.invariant):
raise TypeError('Invariant must be callable')
if not callable(field.factory):
raise TypeError('Factory must be callable')
if not callable(field.serializer):
raise TypeError('Serializer must be callable')
class PTypeError(TypeError):
def __init__(self, source_class, field, expected_types, actual_type, *args, **kwargs):
super(PTypeError, self).__init__(*args, **kwargs)
self.source_class = source_class
self.field = field
self.expected_types = expected_types
self.actual_type = actual_type
SEQ_FIELD_TYPE_SUFFIXES = {
CheckedPVector: "PVector",
CheckedPSet: "PSet",
}
_seq_field_types = {}
def _restore_seq_field_pickle(checked_class, item_type, data):
type_ = _seq_field_types[checked_class, item_type]
return _restore_pickle(type_, data)
def _types_to_names(types):
return "".join(get_type(typ).__name__.capitalize() for typ in types)
def _make_seq_field_type(checked_class, item_type):
type_ = _seq_field_types.get((checked_class, item_type))
if type_ is not None:
return type_
class TheType(checked_class):
__type__ = item_type
def __reduce__(self):
return (_restore_seq_field_pickle,
(checked_class, item_type, list(self)))
suffix = SEQ_FIELD_TYPE_SUFFIXES[checked_class]
TheType.__name__ = _types_to_names(TheType._checked_types) + suffix
_seq_field_types[checked_class, item_type] = TheType
return TheType
def _sequence_field(checked_class, item_type, optional, initial):
TheType = _make_seq_field_type(checked_class, item_type)
if optional:
def factory(argument, _factory_fields=None, ignore_extra=False):
if argument is None:
return None
else:
return TheType.create(argument, _factory_fields=_factory_fields, ignore_extra=ignore_extra)
else:
factory = TheType.create
return field(type=optional_type(TheType) if optional else TheType,
factory=factory, mandatory=True,
initial=factory(initial))
def pset_field(item_type, optional=False, initial=()):
return _sequence_field(CheckedPSet, item_type, optional,
initial)
def pvector_field(item_type, optional=False, initial=()):
return _sequence_field(CheckedPVector, item_type, optional,
initial)
_valid = lambda item: (True, "")
_pmap_field_types = {}
def _restore_pmap_field_pickle(key_type, value_type, data):
type_ = _pmap_field_types[key_type, value_type]
return _restore_pickle(type_, data)
def _make_pmap_field_type(key_type, value_type):
type_ = _pmap_field_types.get((key_type, value_type))
if type_ is not None:
return type_
class TheMap(CheckedPMap):
__key_type__ = key_type
__value_type__ = value_type
def __reduce__(self):
return (_restore_pmap_field_pickle,
(self.__key_type__, self.__value_type__, dict(self)))
TheMap.__name__ = "{0}To{1}PMap".format(
_types_to_names(TheMap._checked_key_types),
_types_to_names(TheMap._checked_value_types))
_pmap_field_types[key_type, value_type] = TheMap
return TheMap
def pmap_field(key_type, value_type, optional=False, invariant=PFIELD_NO_INVARIANT):
TheMap = _make_pmap_field_type(key_type, value_type)
if optional:
def factory(argument):
if argument is None:
return None
else:
return TheMap.create(argument)
else:
factory = TheMap.create
return field(mandatory=True, initial=TheMap(),
type=optional_type(TheMap) if optional else TheMap,
factory=factory, invariant=invariant)
| true | true |
f7376cfb48ca7e0466ff023677b9d8f9331e296d | 3,510 | py | Python | graphs/prim.py | Hasenn/Python | 7e359c78781167f597ff742817efabe460bf8ab0 | [
"MIT"
] | 3 | 2020-07-17T00:21:04.000Z | 2020-09-01T08:46:08.000Z | graphs/prim.py | Hasenn/Python | 7e359c78781167f597ff742817efabe460bf8ab0 | [
"MIT"
] | 2 | 2021-07-09T21:23:47.000Z | 2021-08-06T02:47:29.000Z | graphs/prim.py | Hasenn/Python | 7e359c78781167f597ff742817efabe460bf8ab0 | [
"MIT"
] | 3 | 2020-09-30T18:27:01.000Z | 2021-10-06T10:04:45.000Z | """Prim's Algorithm.
Determines the minimum spanning tree(MST) of a graph using the Prim's Algorithm.
Details: https://en.wikipedia.org/wiki/Prim%27s_algorithm
"""
import heapq as hq
import math
from typing import Iterator
class Vertex:
"""Class Vertex."""
def __init__(self, id):
"""
Arguments:
id - input an id to identify the vertex
Attributes:
neighbors - a list of the vertices it is linked to
edges - a dict to store the edges's weight
"""
self.id = str(id)
self.key = None
self.pi = None
self.neighbors = []
self.edges = {} # {vertex:distance}
def __lt__(self, other):
"""Comparison rule to < operator."""
return self.key < other.key
def __repr__(self):
"""Return the vertex id."""
return self.id
def add_neighbor(self, vertex):
"""Add a pointer to a vertex at neighbor's list."""
self.neighbors.append(vertex)
def add_edge(self, vertex, weight):
"""Destination vertex and weight."""
self.edges[vertex.id] = weight
def connect(graph, a, b, edge):
# add the neighbors:
graph[a - 1].add_neighbor(graph[b - 1])
graph[b - 1].add_neighbor(graph[a - 1])
# add the edges:
graph[a - 1].add_edge(graph[b - 1], edge)
graph[b - 1].add_edge(graph[a - 1], edge)
def prim(graph: list, root: Vertex) -> list:
"""Prim's Algorithm.
Runtime:
O(mn) with `m` edges and `n` vertices
Return:
List with the edges of a Minimum Spanning Tree
Usage:
prim(graph, graph[0])
"""
a = []
for u in graph:
u.key = math.inf
u.pi = None
root.key = 0
q = graph[:]
while q:
u = min(q)
q.remove(u)
for v in u.neighbors:
if (v in q) and (u.edges[v.id] < v.key):
v.pi = u
v.key = u.edges[v.id]
for i in range(1, len(graph)):
a.append((int(graph[i].id) + 1, int(graph[i].pi.id) + 1))
return a
def prim_heap(graph: list, root: Vertex) -> Iterator[tuple]:
"""Prim's Algorithm with min heap.
Runtime:
O((m + n)log n) with `m` edges and `n` vertices
Yield:
Edges of a Minimum Spanning Tree
Usage:
prim(graph, graph[0])
"""
for u in graph:
u.key = math.inf
u.pi = None
root.key = 0
h = [v for v in graph]
hq.heapify(h)
while h:
u = hq.heappop(h)
for v in u.neighbors:
if (v in h) and (u.edges[v.id] < v.key):
v.pi = u
v.key = u.edges[v.id]
hq.heapify(h)
for i in range(1, len(graph)):
yield (int(graph[i].id) + 1, int(graph[i].pi.id) + 1)
def test_vector() -> None:
"""
# Creates a list to store x vertices.
>>> x = 5
>>> G = [Vertex(n) for n in range(x)]
>>> connect(G, 1, 2, 15)
>>> connect(G, 1, 3, 12)
>>> connect(G, 2, 4, 13)
>>> connect(G, 2, 5, 5)
>>> connect(G, 3, 2, 6)
>>> connect(G, 3, 4, 6)
>>> connect(G, 0, 0, 0) # Generate the minimum spanning tree:
>>> G_heap = G[:]
>>> MST = prim(G, G[0])
>>> MST_heap = prim_heap(G, G[0])
>>> for i in MST:
... print(i)
(2, 3)
(3, 1)
(4, 3)
(5, 2)
>>> for i in MST_heap:
... print(i)
(2, 3)
(3, 1)
(4, 3)
(5, 2)
"""
if __name__ == "__main__":
import doctest
doctest.testmod()
| 22.941176 | 84 | 0.512821 |
import heapq as hq
import math
from typing import Iterator
class Vertex:
def __init__(self, id):
self.id = str(id)
self.key = None
self.pi = None
self.neighbors = []
self.edges = {}
def __lt__(self, other):
return self.key < other.key
def __repr__(self):
return self.id
def add_neighbor(self, vertex):
self.neighbors.append(vertex)
def add_edge(self, vertex, weight):
self.edges[vertex.id] = weight
def connect(graph, a, b, edge):
graph[a - 1].add_neighbor(graph[b - 1])
graph[b - 1].add_neighbor(graph[a - 1])
graph[a - 1].add_edge(graph[b - 1], edge)
graph[b - 1].add_edge(graph[a - 1], edge)
def prim(graph: list, root: Vertex) -> list:
a = []
for u in graph:
u.key = math.inf
u.pi = None
root.key = 0
q = graph[:]
while q:
u = min(q)
q.remove(u)
for v in u.neighbors:
if (v in q) and (u.edges[v.id] < v.key):
v.pi = u
v.key = u.edges[v.id]
for i in range(1, len(graph)):
a.append((int(graph[i].id) + 1, int(graph[i].pi.id) + 1))
return a
def prim_heap(graph: list, root: Vertex) -> Iterator[tuple]:
for u in graph:
u.key = math.inf
u.pi = None
root.key = 0
h = [v for v in graph]
hq.heapify(h)
while h:
u = hq.heappop(h)
for v in u.neighbors:
if (v in h) and (u.edges[v.id] < v.key):
v.pi = u
v.key = u.edges[v.id]
hq.heapify(h)
for i in range(1, len(graph)):
yield (int(graph[i].id) + 1, int(graph[i].pi.id) + 1)
def test_vector() -> None:
if __name__ == "__main__":
import doctest
doctest.testmod()
| true | true |
f7376ed1d9e41ba75092c4bd064cee37e28e2b8d | 201 | py | Python | hypernlp/dl_framework_adaptor/configs/bertbase_config.py | DataCanvasIO/HyperNLP | 3ae565c88b6fc63b664c8fb264dc89c47ff92623 | [
"Apache-2.0"
] | 3 | 2021-11-22T04:09:22.000Z | 2022-01-10T10:27:28.000Z | hypernlp/dl_framework_adaptor/configs/bertbase_config.py | DataCanvasIO/HyperNLP | 3ae565c88b6fc63b664c8fb264dc89c47ff92623 | [
"Apache-2.0"
] | null | null | null | hypernlp/dl_framework_adaptor/configs/bertbase_config.py | DataCanvasIO/HyperNLP | 3ae565c88b6fc63b664c8fb264dc89c47ff92623 | [
"Apache-2.0"
] | null | null | null | from utils.string_utils import home_path
import yaml
configs = open(home_path() + "hypernlp/dl_framework_adaptor/configs/bert_config.yaml", encoding='utf-8')
bert_models_config = yaml.load(configs)
| 25.125 | 104 | 0.800995 | from utils.string_utils import home_path
import yaml
configs = open(home_path() + "hypernlp/dl_framework_adaptor/configs/bert_config.yaml", encoding='utf-8')
bert_models_config = yaml.load(configs)
| true | true |
f7376f407c68482086590915492302eb2677bc4b | 234 | py | Python | popolo_admin/admin.py | ciudadanointeligente/django-popolo-admin | f74aa087e436d06e73a4dbf93019d8dfc1f7a34e | [
"MIT"
] | null | null | null | popolo_admin/admin.py | ciudadanointeligente/django-popolo-admin | f74aa087e436d06e73a4dbf93019d8dfc1f7a34e | [
"MIT"
] | null | null | null | popolo_admin/admin.py | ciudadanointeligente/django-popolo-admin | f74aa087e436d06e73a4dbf93019d8dfc1f7a34e | [
"MIT"
] | null | null | null | from django.contrib import admin
from popolo.models import Person, Organization
@admin.register(Person)
class PersonAdmin(admin.ModelAdmin):
pass
@admin.register(Organization)
class OrganizationAdmin(admin.ModelAdmin):
pass
| 21.272727 | 46 | 0.803419 | from django.contrib import admin
from popolo.models import Person, Organization
@admin.register(Person)
class PersonAdmin(admin.ModelAdmin):
pass
@admin.register(Organization)
class OrganizationAdmin(admin.ModelAdmin):
pass
| true | true |
f7376f93c9df1bfbd6ea51643563b722453a15b3 | 541 | py | Python | url_to_data/insert_in_db.py | eike-heimpel/url-to-data | 265cc8055569774ef775a82614b094a7f3184324 | [
"MIT"
] | null | null | null | url_to_data/insert_in_db.py | eike-heimpel/url-to-data | 265cc8055569774ef775a82614b094a7f3184324 | [
"MIT"
] | null | null | null | url_to_data/insert_in_db.py | eike-heimpel/url-to-data | 265cc8055569774ef775a82614b094a7f3184324 | [
"MIT"
] | null | null | null | from api import all_csvs_to_file
bka_zeitreihen_daten_url = "https://www.bka.de/DE/AktuelleInformationen/StatistikenLagebilder/PolizeilicheKriminalstatistik/PKS2019/PKSTabellen/Zeitreihen/zeitreihen_node.html"
bka_bund_fall_tabellen_url = "https://www.bka.de/DE/AktuelleInformationen/StatistikenLagebilder/PolizeilicheKriminalstatistik/PKS2019/PKSTabellen/BundFalltabellen/bundfalltabellen.html?nn=130872"
corona_url = "https://ourworldindata.org/coronavirus-source-data"
all_csvs_to_file(bka_zeitreihen_daten_url, "bka_zeitreihe")
| 36.066667 | 195 | 0.861368 | from api import all_csvs_to_file
bka_zeitreihen_daten_url = "https://www.bka.de/DE/AktuelleInformationen/StatistikenLagebilder/PolizeilicheKriminalstatistik/PKS2019/PKSTabellen/Zeitreihen/zeitreihen_node.html"
bka_bund_fall_tabellen_url = "https://www.bka.de/DE/AktuelleInformationen/StatistikenLagebilder/PolizeilicheKriminalstatistik/PKS2019/PKSTabellen/BundFalltabellen/bundfalltabellen.html?nn=130872"
corona_url = "https://ourworldindata.org/coronavirus-source-data"
all_csvs_to_file(bka_zeitreihen_daten_url, "bka_zeitreihe")
| true | true |
f7376fe0fc5af75de5bbf73c3aae3fc8ff63f043 | 3,655 | py | Python | hello/assistant/assistant.py | ToBegin/hello-python | 71d9989e3a8695550391ac544c59e5866366a3e7 | [
"Apache-2.0"
] | null | null | null | hello/assistant/assistant.py | ToBegin/hello-python | 71d9989e3a8695550391ac544c59e5866366a3e7 | [
"Apache-2.0"
] | null | null | null | hello/assistant/assistant.py | ToBegin/hello-python | 71d9989e3a8695550391ac544c59e5866366a3e7 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
r'''
Usage:
python3 assistant.py
service
'''
import sys
def check_version():
v = sys.version_info
# print(v)
if v.major == 3 and v.minor >= 4:
return True
print('Your current python is %d.%d. Please use Python 3.4.' % (v.major, v.minor))
return False
if not check_version():
exit(1)
import os, io, json, subprocess, tempfile
from urllib import parse
from wsgiref.simple_server import make_server
EXEC = sys.executable
# print(EXEC)
PORT = 39093
HOST = 'local.liaoxuefeng.com:%d' % PORT
TEMP = tempfile.mkdtemp(suffix='_py', prefix='learn_python_')
INDEX = 0
def main():
httpd = make_server('127.0.0.1', PORT, application)
print('Ready for Python code on port %d...' % PORT)
httpd.serve_forever()
def get_name():
global INDEX
INDEX = INDEX + 1
return 'test_%d' % INDEX
def write_py(name, code):
fpath = os.path.join(TEMP, '%s.py' % name)
with open(fpath, 'w', encoding='utf-8') as f:
f.write(code)
print('Code wrote to: %s' % fpath)
return fpath
def decode(s):
try:
return s.decode('utf-8')
except UnicodeDecodeError:
return s.decode('gbk')
def application(environ, start_response):
host = environ.get('HTTP_HOST')
method = environ.get('REQUEST_METHOD')
path = environ.get('PATH_INFO')
if method == 'GET' and path == '/':
start_response('200 OK', [('Content-Type', 'text/html')])
return [b'<html><head><title>Learning Python</title></head><body><form method="post" action="/run"><textarea name="code" style="width:90%;height: 600px"></textarea><p><button type="submit">Run</button></p></form></body></html>']
if method == 'GET' and path == '/env':
start_response('200 OK', [('Content-Type', 'text/html')])
L = [b'<html><head><title>ENV</title></head><body>']
for k, v in environ.items():
p = '<p>%s = %s' % (k, str(v))
L.append(p.encode('utf-8'))
L.append(b'</html>')
return L
if host != HOST or method != 'POST' or path != '/run' or not environ.get('CONTENT_TYPE', '').lower().startswith('application/x-www-form-urlencoded'):
start_response('400 Bad Request', [('Content-Type', 'application/json')])
return [b'{"error":"bad_request"}']
s = environ['wsgi.input'].read(int(environ['CONTENT_LENGTH']))
qs = parse.parse_qs(s.decode('utf-8'))
if not 'code' in qs:
start_response('400 Bad Request', [('Content-Type', 'application/json')])
return [b'{"error":"invalid_params"}']
name = qs['name'][0] if 'name' in qs else get_name()
code = qs['code'][0]
headers = [('Content-Type', 'application/json')]
origin = environ.get('HTTP_ORIGIN', '')
if origin.find('.liaoxuefeng.com') == -1:
start_response('400 Bad Request', [('Content-Type', 'application/json')])
return [b'{"error":"invalid_origin"}']
headers.append(('Access-Control-Allow-Origin', origin))
start_response('200 OK', headers)
r = dict()
try:
fpath = write_py(name, code)
print('Execute: %s %s' % (EXEC, fpath))
r['output'] = decode(subprocess.check_output([EXEC, fpath], stderr=subprocess.STDOUT, timeout=5))
except subprocess.CalledProcessError as e:
r = dict(error='Exception', output=decode(e.output))
except subprocess.TimeoutExpired as e:
r = dict(error='Timeout', output='执行超时')
except subprocess.CalledProcessError as e:
r = dict(error='Error', output='执行错误')
print('Execute done.')
return [json.dumps(r).encode('utf-8')]
if __name__ == '__main__':
main()
| 33.842593 | 236 | 0.615595 |
import sys
def check_version():
v = sys.version_info
if v.major == 3 and v.minor >= 4:
return True
print('Your current python is %d.%d. Please use Python 3.4.' % (v.major, v.minor))
return False
if not check_version():
exit(1)
import os, io, json, subprocess, tempfile
from urllib import parse
from wsgiref.simple_server import make_server
EXEC = sys.executable
PORT = 39093
HOST = 'local.liaoxuefeng.com:%d' % PORT
TEMP = tempfile.mkdtemp(suffix='_py', prefix='learn_python_')
INDEX = 0
def main():
httpd = make_server('127.0.0.1', PORT, application)
print('Ready for Python code on port %d...' % PORT)
httpd.serve_forever()
def get_name():
global INDEX
INDEX = INDEX + 1
return 'test_%d' % INDEX
def write_py(name, code):
fpath = os.path.join(TEMP, '%s.py' % name)
with open(fpath, 'w', encoding='utf-8') as f:
f.write(code)
print('Code wrote to: %s' % fpath)
return fpath
def decode(s):
try:
return s.decode('utf-8')
except UnicodeDecodeError:
return s.decode('gbk')
def application(environ, start_response):
host = environ.get('HTTP_HOST')
method = environ.get('REQUEST_METHOD')
path = environ.get('PATH_INFO')
if method == 'GET' and path == '/':
start_response('200 OK', [('Content-Type', 'text/html')])
return [b'<html><head><title>Learning Python</title></head><body><form method="post" action="/run"><textarea name="code" style="width:90%;height: 600px"></textarea><p><button type="submit">Run</button></p></form></body></html>']
if method == 'GET' and path == '/env':
start_response('200 OK', [('Content-Type', 'text/html')])
L = [b'<html><head><title>ENV</title></head><body>']
for k, v in environ.items():
p = '<p>%s = %s' % (k, str(v))
L.append(p.encode('utf-8'))
L.append(b'</html>')
return L
if host != HOST or method != 'POST' or path != '/run' or not environ.get('CONTENT_TYPE', '').lower().startswith('application/x-www-form-urlencoded'):
start_response('400 Bad Request', [('Content-Type', 'application/json')])
return [b'{"error":"bad_request"}']
s = environ['wsgi.input'].read(int(environ['CONTENT_LENGTH']))
qs = parse.parse_qs(s.decode('utf-8'))
if not 'code' in qs:
start_response('400 Bad Request', [('Content-Type', 'application/json')])
return [b'{"error":"invalid_params"}']
name = qs['name'][0] if 'name' in qs else get_name()
code = qs['code'][0]
headers = [('Content-Type', 'application/json')]
origin = environ.get('HTTP_ORIGIN', '')
if origin.find('.liaoxuefeng.com') == -1:
start_response('400 Bad Request', [('Content-Type', 'application/json')])
return [b'{"error":"invalid_origin"}']
headers.append(('Access-Control-Allow-Origin', origin))
start_response('200 OK', headers)
r = dict()
try:
fpath = write_py(name, code)
print('Execute: %s %s' % (EXEC, fpath))
r['output'] = decode(subprocess.check_output([EXEC, fpath], stderr=subprocess.STDOUT, timeout=5))
except subprocess.CalledProcessError as e:
r = dict(error='Exception', output=decode(e.output))
except subprocess.TimeoutExpired as e:
r = dict(error='Timeout', output='执行超时')
except subprocess.CalledProcessError as e:
r = dict(error='Error', output='执行错误')
print('Execute done.')
return [json.dumps(r).encode('utf-8')]
if __name__ == '__main__':
main()
| true | true |
f7376ff86ba6b0f3ae7a501f37f92df71bd0d3a7 | 10,135 | py | Python | STEPIK.py | AlPus108/Python_lessons | 0e96117d9a8b76fd651e137fc126ddedaa6accd9 | [
"MIT"
] | null | null | null | STEPIK.py | AlPus108/Python_lessons | 0e96117d9a8b76fd651e137fc126ddedaa6accd9 | [
"MIT"
] | null | null | null | STEPIK.py | AlPus108/Python_lessons | 0e96117d9a8b76fd651e137fc126ddedaa6accd9 | [
"MIT"
] | null | null | null | # ЭКРАНИРОВАНИЕ
# a = "строка с \" кавычкой двойной и \' одинарной"
# Чтобы в строке появился символ обратной косой черты
# a = "Строка с обратным слешем \\"
# Перенос строки
# a = "Первая строка \nВторая строка
'''
Сделать перенос строки в Питоне можно и другим способом — объявить строку с помощью тройных кавычек.
То есть вы как бы сразу расставляете переносы строк прямо в программе, записывая значение переменной на разных строках.
Со строками, объявленными с помощью одинарных кавычек, так сделать не получится.
a = """Первая строка
Вторая строка
Третья строка"""
Помните, что если строка начинается с трёх кавычек, то и заканчиваться она должна тремя такими же кавычками.
'''
# a = '''Первая строка
# Вторая строка
# Третья строка'''
# print(a)
# ------------------------------------------
'''
result = print('Что вернет функция print?')
print(result)
Первой строкой эта программа выведет текст "Что вернет функция print?", а второй — слово None.
Специально для обозначения «ничего» в Питоне есть отдельный тип данных NoneType.
Переменные этого типа могут иметь только одно значение — None. None — это «никакое» или «неопределенное» значение.
В программе выше переменная result как раз и будет иметь значение None. Это одно из ключевых слов языка и,
если вы хотите присвоить None переменной, его надо писать как есть, без кавычек:
z = None
'''
# ---------------------------------------------------
# КВАДРАТНЫЙ КОРЕНЬ ИЗ ЧИСЛА
# print(9 ** 0.5) # Возвведение числа в степень 0.5 дает квадратный корень этого числа
# ----------------------------------------------------
# фОРМАТ ЕСТЕСТВЕННЫХ ЧИСЕЛ - через е-1
# print(5e-1) # это то же самое, что и 0.5. Число пять умножается на 10 в степени -1
# print(5 * 10**-1) # запись идентичная первой
#
# print(1234e-2) # это то же самое выражение, что и
# print(1234 * 10**-2)
#
# # Также можно использовать положительную степень
# print(1234e2) # аналогично (1234 * 10**2)
# print(1234 * 10**2) # 10**2 - 10 в квадрате
# -------------------------------------------------------
# #Помните, что любые арифметические операции выше по приоритету операций сравнения и логических операторов.
# -------------------------------------------------------
# Площадь треугольника:
# S = √p(p - a)(p - b)(p - c), где p = (a + b + c) / 2
# где (a + b + c) / 2
# Площадь прямоугольника:
# S = a · b
# Площадь круга:
# S = π r2
# Вычисление площади фигур
# s = input()
# if s == "треугольник":
# a = float(input())
# b = float(input())
# c = float(input())
# p = (a + b + c) / 2
# print((p*((p-a)*(p-b)*(p-c)))**0.5) # Вычислене площади треугольника по формуле Герона
# elif s == "прямоугольник":
# a = float(input())
# b = float(input())
# print(a * b)
# elif s == "круг":
# r = float(input())
# print(3.14 * r**2)
# ----------------------------------------------------
# Склонение существительных
# x = int(input())
#
# if x % 10 == 1 and x % 100 != 11:
# print(x, 'программист')
# # elif x % 10 == 2 and x % 20 != 12 or x % 10 == 3 and x % 20 != 13 or x % 10 == 4 and x % 20 != 14:
# elif (x % 10 >= 2) and (x % 10 <= 4) and (x % 100 < 10 or x % 100 > 20):
# print(x, 'программиста')
# else:
# print(x, 'программистов')
# --------------------------------------------------
# Счастливый билет. Сумма первых трех цифр должна быть равна сумме последних трех
# x = int(input())
# n1 = x % 10
# x = x // 10
# n2 = x % 10
# x = x // 10
# n3 = x % 10
# x = x // 10
# n4 = x % 10
# x = x // 10
# n5 = x % 10
# x = x // 10
# n6 = x % 10
# if n1 + n2 + n3 == n4 + n5 + n6:
# print('Счастливый')
# else:
# print("Обычный")
# ------------------------------------------------
# Выводим треугольник из звездочек
# n = int(input())
# i = 1
# while i <= n:
# print('*' * i)
# i += 1
#
# stars = '*'
# while len(stars) <= n:
# print(stars)
# stars += '*'
# ----------------------------------------------
# Вычисляем сумму числе на задаваемом отрезке от а до b
# a = int(input())
# b = int(input())
# i = a
# while i != b:
# i += 1
# a += i
#
# print(a)
# ----------------------------------------
# Суммируем вводимые числа и, после первого нуля, подсчитываем сумму этих чисел
# n = 1
# i = 0
# while n != 0:
# n = int(input())
# i += n
# print(i)
# -----------------------------------------------------
# Ищем наименьшее общее кратное двух чисел
# a = int(input())
# b = int(input())
# i = 1
#
# while i % a != 0 or i % b != 0:
# i = i + 1
# print(i)
# --------------------------------------------------
# i = 0
#
# while i < 5:
# a, b = input("Введите два любых числа через пробел").split() # split() разделяет пары чисел по пробелу между ними
# a = int(a)
# b = int(b)
# if(a == 0) and (b == 0): # Если обе введенных цифры равны 0
# break # досрочно завершаем цикл
# if(a == 0) or (b == 0):
# continue # код ниже не выполняется и переходим к следующей итерации
# # (в том случае, если по условию она должна быть), вывод произведения чисел и приращивание i не происходит.
# То есть, эту пару чисел игнорируем
# print(a * b)
# i += 1
# ---------------------------------------------
# Напишите программу, которая считывает целые числа с консоли по одному числу в строке.
#
# Для каждого введённого числа проверить:
# если число меньше 10, то пропускаем это число;
# если число больше 100, то прекращаем считывать числа;
# в остальных случаях вывести это число обратно на консоль в отдельной строке.
# while True:
# n = int(input())
# if n < 10:
# continue
# if n > 100:
# break
# else:
# print(n)
# -------------------------------------------------
# Квадрат из звездочек в цикле for
# a = int(input())
# for i in range(a):
# print('*' * a)
# Двойной цикл
# n = int(input())
# for i in range(n):
# for j in range(n): # внутренний цикл выводит звездочку n-раз и создает строку
# print('*', end=' ') # end - указываем, что будем использовать в качестве разделителя.
# # Вданном случае "пробел". Если end отсутствует, будет обычный перевод на новую строку
# print() # этот print делает новую строку без вывода на экран
# -----------------------------------------------------------
# Таблица умножения
# Напишите программу, на вход которой даются четыре числа aa, bb, cc и dd, каждое в своей строке.
# Программа должна вывести фрагмент таблицы умножения для всех чисел отрезка [a; b]
# на все числа отрезка [c;d].
#
# Числа a, b, c и d являются натуральными и не превосходят 10, a <= b, c <= d.
#
# Следуйте формату вывода из примера, для разделения элементов внутри строки используйте '\t' — символ табуляции.
# Заметьте, что левым столбцом и верхней строкой выводятся сами числа из заданных отрезков
# — заголовочные столбец и строка таблицы.
# a = int(input())
# b = int(input())
# c = int(input())
# d = int(input())
#
# print()
# for x in range(c, d + 1):
# print('\t', x, end='')
# print()
# for i in range(a, b + 1):
# print(i, end='\t')
# for n in range(c, d + 1):
# if n < 10:
# print('', n * i, end='\t')
# else:
# print(n * i, end='\t')
# print()
# ------------------------------------------
# Вариант 1 Вывести сумму всех нечетных числел от a до b (включая границы)
# a, b = input().split() # получвем первое и последнее значения диапазона в одной строке через пробел
# a = int(a) # переводим значения в цифоровой формат
# b = int(b)
# x = 0
# for i in range(a, b+1): # циклом проходимся по множеству от a до b
# if i % 2 == 1: # если значение нечетное
# x += i # складываем значения
# print(x) # выводим сумму
#---------------------------------------------
# Вариант 2 Вывести сумму всех нечетных числел от a до b (включая границы)
# a, b = input().split() # получвем первое и последнее значения диапазона в одной строке через пробел
# a = int(a) # переводим значения в цифоровой формат
# b = int(b)
# x = 0
# if a % 2 == 0: # если первое число четное
# a = a + 1 # увеличиваем его на 1 (берем ближайшее к нему нечетное число)
# for i in range(a, b+1, 2): # циклом проходимся по множеству от a до b с шагом 2 (через 1) по нечетным числам
# x += i # складываем значения
# print(x) # выводим сумму
#---------------------------------------------
# Вариант 3 Вывести сумму всех нечетных числел от a до b (включая границы)
# Отличается от предыдущего вводом данных
# a, b = (int(i) for i in input().split()) # В такой ситуации, когда нам нужно к последовательности объектов
# # применить одну и ту же ф-ю, мы применяем специальную конструкцию,
# # которая называется list comprehensive (комплексный список).
# # В начале указываем, какую ф-ю мы применяем int(), которую применям для каждого элемента последовательности
# # Выражение, генерирующее эту последвательность, записывается справа input().split().
# # Для каждого объета этой последовательности через цикл применяем ф-ю int()
# # Такую последовательность удобно применять, когда несколько чисел выводятся в одной строке
# x = 0
# if a % 2 == 0: # если первое число четное
# a = a + 1 # увеличиваем его на 1 (берем ближайшее к нему нечетное число)
# for i in range(a, b+1, 2): # циклом проходимся по множеству от a до b с шагом 2 (через 1) по нечетным числам
# x += i # складываем значения
# print(x) # выводим сумму
#---------------------------------------------
'''
Напишите программу, которая считывает с клавиатуры два числа a и b,
считает и выводит на консоль среднее арифметическое всех чисел из отрезка [a; b], которые делятся на 3.
В приведенном ниже примере среднее арифметическое считается для чисел на отрезке [-5; 12].
Всего чисел, делящихся на 3, на этом отрезке 6: -3, 0, 3, 6, 9, 12. Их среднее арифметическое равно 4.5
На вход программе подаются интервалы, внутри которых всегда есть хотя бы одно число, которое делится на 3.
'''
# a, b = (int(i) for i in input().split())
# x = 0
# z = 0
# for i in range(a, b+1):
# if i % 3 == 0:
# x += i
# z += 1
# print(x / z)
#------------------------------------------
| 32.693548 | 119 | 0.574642 |
# Чтобы в строке появился символ обратной косой черты
# a = "Строка с обратным слешем \\"
# Перенос строки
# a = "Первая строка \nВторая строка
# a = '''Первая строка
# Вторая строка
# Третья строка'''
# print(a)
# ------------------------------------------
# ---------------------------------------------------
# КВАДРАТНЫЙ КОРЕНЬ ИЗ ЧИСЛА
# print(9 ** 0.5) # Возвведение числа в степень 0.5 дает квадратный корень этого числа
# ----------------------------------------------------
# фОРМАТ ЕСТЕСТВЕННЫХ ЧИСЕЛ - через е-1
# print(5e-1) # это то же самое, что и 0.5. Число пять умножается на 10 в степени -1
# print(5 * 10**-1) # запись идентичная первой
#
# print(1234e-2) # это то же самое выражение, что и
# print(1234 * 10**-2)
#
# # Также можно использовать положительную степень
# print(1234e2) # аналогично (1234 * 10**2)
# print(1234 * 10**2) # 10**2 - 10 в квадрате
# -------------------------------------------------------
# #Помните, что любые арифметические операции выше по приоритету операций сравнения и логических операторов.
# -------------------------------------------------------
# Площадь треугольника:
# S = √p(p - a)(p - b)(p - c), где p = (a + b + c) / 2
# где (a + b + c) / 2
# Площадь прямоугольника:
# S = a · b
# Площадь круга:
# S = π r2
# Вычисление площади фигур
# s = input()
# if s == "треугольник":
# a = float(input())
# b = float(input())
# c = float(input())
# p = (a + b + c) / 2
# print((p*((p-a)*(p-b)*(p-c)))**0.5) # Вычислене площади треугольника по формуле Герона
# elif s == "прямоугольник":
# a = float(input())
# b = float(input())
# print(a * b)
# elif s == "круг":
# r = float(input())
# print(3.14 * r**2)
# ----------------------------------------------------
# Склонение существительных
# x = int(input())
#
# if x % 10 == 1 and x % 100 != 11:
# print(x, 'программист')
# # elif x % 10 == 2 and x % 20 != 12 or x % 10 == 3 and x % 20 != 13 or x % 10 == 4 and x % 20 != 14:
# elif (x % 10 >= 2) and (x % 10 <= 4) and (x % 100 < 10 or x % 100 > 20):
# print(x, 'программиста')
# else:
# print(x, 'программистов')
# --------------------------------------------------
# Счастливый билет. Сумма первых трех цифр должна быть равна сумме последних трех
# x = int(input())
# n1 = x % 10
# x = x // 10
# n2 = x % 10
# x = x // 10
# n3 = x % 10
# x = x // 10
# n4 = x % 10
# x = x // 10
# n5 = x % 10
# x = x // 10
# n6 = x % 10
# if n1 + n2 + n3 == n4 + n5 + n6:
# print('Счастливый')
# else:
# print("Обычный")
# ------------------------------------------------
# Выводим треугольник из звездочек
# n = int(input())
# i = 1
# while i <= n:
# print('*' * i)
# i += 1
#
# stars = '*'
# while len(stars) <= n:
# print(stars)
# stars += '*'
# ----------------------------------------------
# Вычисляем сумму числе на задаваемом отрезке от а до b
# a = int(input())
# b = int(input())
# i = a
# while i != b:
# i += 1
# a += i
#
# print(a)
# ----------------------------------------
# Суммируем вводимые числа и, после первого нуля, подсчитываем сумму этих чисел
# n = 1
# i = 0
# while n != 0:
# n = int(input())
# i += n
# print(i)
# -----------------------------------------------------
# Ищем наименьшее общее кратное двух чисел
# a = int(input())
# b = int(input())
# i = 1
#
# while i % a != 0 or i % b != 0:
# i = i + 1
# print(i)
# --------------------------------------------------
# i = 0
#
# while i < 5:
# a, b = input("Введите два любых числа через пробел").split() # split() разделяет пары чисел по пробелу между ними
# a = int(a)
# b = int(b)
# if(a == 0) and (b == 0): # Если обе введенных цифры равны 0
# break # досрочно завершаем цикл
# if(a == 0) or (b == 0):
# continue # код ниже не выполняется и переходим к следующей итерации
# # (в том случае, если по условию она должна быть), вывод произведения чисел и приращивание i не происходит.
# То есть, эту пару чисел игнорируем
# print(a * b)
# i += 1
# ---------------------------------------------
# Напишите программу, которая считывает целые числа с консоли по одному числу в строке.
#
# Для каждого введённого числа проверить:
# если число меньше 10, то пропускаем это число;
# если число больше 100, то прекращаем считывать числа;
# в остальных случаях вывести это число обратно на консоль в отдельной строке.
# while True:
# n = int(input())
# if n < 10:
# continue
# if n > 100:
# break
# else:
# print(n)
# -------------------------------------------------
# Квадрат из звездочек в цикле for
# a = int(input())
# for i in range(a):
# print('*' * a)
# Двойной цикл
# n = int(input())
# for i in range(n):
# for j in range(n): # внутренний цикл выводит звездочку n-раз и создает строку
# print('*', end=' ') # end - указываем, что будем использовать в качестве разделителя.
# # Вданном случае "пробел". Если end отсутствует, будет обычный перевод на новую строку
# print() # этот print делает новую строку без вывода на экран
# -----------------------------------------------------------
# Таблица умножения
# Напишите программу, на вход которой даются четыре числа aa, bb, cc и dd, каждое в своей строке.
# Программа должна вывести фрагмент таблицы умножения для всех чисел отрезка [a; b]
# на все числа отрезка [c;d].
#
# Числа a, b, c и d являются натуральными и не превосходят 10, a <= b, c <= d.
#
# Следуйте формату вывода из примера, для разделения элементов внутри строки используйте '\t' — символ табуляции.
# Заметьте, что левым столбцом и верхней строкой выводятся сами числа из заданных отрезков
# — заголовочные столбец и строка таблицы.
# a = int(input())
# b = int(input())
# c = int(input())
# d = int(input())
#
# print()
# for x in range(c, d + 1):
# print('\t', x, end='')
# print()
# for i in range(a, b + 1):
# print(i, end='\t')
# for n in range(c, d + 1):
# if n < 10:
# print('', n * i, end='\t')
# else:
# print(n * i, end='\t')
# print()
# ------------------------------------------
# Вариант 1 Вывести сумму всех нечетных числел от a до b (включая границы)
# a, b = input().split() # получвем первое и последнее значения диапазона в одной строке через пробел
# a = int(a) # переводим значения в цифоровой формат
# b = int(b)
# x = 0
# for i in range(a, b+1): # циклом проходимся по множеству от a до b
# if i % 2 == 1: # если значение нечетное
# x += i # складываем значения
# print(x) # выводим сумму
#---------------------------------------------
# Вариант 2 Вывести сумму всех нечетных числел от a до b (включая границы)
# a, b = input().split() # получвем первое и последнее значения диапазона в одной строке через пробел
# a = int(a) # переводим значения в цифоровой формат
# b = int(b)
# x = 0
# if a % 2 == 0: # если первое число четное
# a = a + 1 # увеличиваем его на 1 (берем ближайшее к нему нечетное число)
# for i in range(a, b+1, 2): # циклом проходимся по множеству от a до b с шагом 2 (через 1) по нечетным числам
# x += i # складываем значения
# print(x) # выводим сумму
#---------------------------------------------
# Вариант 3 Вывести сумму всех нечетных числел от a до b (включая границы)
# Отличается от предыдущего вводом данных
# a, b = (int(i) for i in input().split()) # В такой ситуации, когда нам нужно к последовательности объектов
# # применить одну и ту же ф-ю, мы применяем специальную конструкцию,
# # которая называется list comprehensive (комплексный список).
# # В начале указываем, какую ф-ю мы применяем int(), которую применям для каждого элемента последовательности
# # Выражение, генерирующее эту последвательность, записывается справа input().split().
# # Для каждого объета этой последовательности через цикл применяем ф-ю int()
# # Такую последовательность удобно применять, когда несколько чисел выводятся в одной строке
# x = 0
# if a % 2 == 0: # если первое число четное
# a = a + 1 # увеличиваем его на 1 (берем ближайшее к нему нечетное число)
# for i in range(a, b+1, 2): # циклом проходимся по множеству от a до b с шагом 2 (через 1) по нечетным числам
# x += i # складываем значения
# print(x) # выводим сумму
#---------------------------------------------
# a, b = (int(i) for i in input().split())
# x = 0
# z = 0
# for i in range(a, b+1):
# if i % 3 == 0:
# x += i
# z += 1
# print(x / z)
#------------------------------------------
| true | true |
f73771decc67f67866c4a6986ba6c766191c1d51 | 2,972 | py | Python | edk2basetools/Table/TableDataModel.py | matthewfcarlson/edk2-pytool-base | ddf78ca6e2110f03e020a5bd0ca32b2a463fecff | [
"BSD-2-Clause-Patent"
] | null | null | null | edk2basetools/Table/TableDataModel.py | matthewfcarlson/edk2-pytool-base | ddf78ca6e2110f03e020a5bd0ca32b2a463fecff | [
"BSD-2-Clause-Patent"
] | 1 | 2020-04-14T22:23:01.000Z | 2020-04-15T06:47:53.000Z | edk2basetools/Table/TableDataModel.py | matthewfcarlson/edk2-pytool-base | ddf78ca6e2110f03e020a5bd0ca32b2a463fecff | [
"BSD-2-Clause-Patent"
] | null | null | null | ## @file
# This file is used to create/update/query/erase table for data models
#
# Copyright (c) 2008 - 2018, Intel Corporation. All rights reserved.<BR>
# SPDX-License-Identifier: BSD-2-Clause-Patent
#
##
# Import Modules
#
from __future__ import absolute_import
import edk2basetools.Common.EdkLogger as EdkLogger
import edk2basetools.CommonDataClass.DataClass as DataClass
from Table.Table import Table
from edk2basetools.Common.StringUtils import ConvertToSqlString
## TableDataModel
#
# This class defined a table used for data model
#
# @param object: Inherited from object class
#
#
class TableDataModel(Table):
def __init__(self, Cursor):
Table.__init__(self, Cursor)
self.Table = 'DataModel'
## Create table
#
# Create table DataModel
#
# @param ID: ID of a ModelType
# @param CrossIndex: CrossIndex of a ModelType
# @param Name: Name of a ModelType
# @param Description: Description of a ModelType
#
def Create(self):
SqlCommand = """create table IF NOT EXISTS %s (ID INTEGER PRIMARY KEY,
CrossIndex INTEGER NOT NULL,
Name VARCHAR NOT NULL,
Description VARCHAR
)""" % self.Table
Table.Create(self, SqlCommand)
## Insert table
#
# Insert a record into table DataModel
#
# @param ID: ID of a ModelType
# @param CrossIndex: CrossIndex of a ModelType
# @param Name: Name of a ModelType
# @param Description: Description of a ModelType
#
def Insert(self, CrossIndex, Name, Description):
self.ID = self.ID + 1
(Name, Description) = ConvertToSqlString((Name, Description))
SqlCommand = """insert into %s values(%s, %s, '%s', '%s')""" % (self.Table, self.ID, CrossIndex, Name, Description)
Table.Insert(self, SqlCommand)
return self.ID
## Init table
#
# Create all default records of table DataModel
#
def InitTable(self):
EdkLogger.verbose("\nInitialize table DataModel started ...")
for Item in DataClass.MODEL_LIST:
CrossIndex = Item[1]
Name = Item[0]
Description = Item[0]
self.Insert(CrossIndex, Name, Description)
EdkLogger.verbose("Initialize table DataModel ... DONE!")
## Get CrossIndex
#
# Get a model's cross index from its name
#
# @param ModelName: Name of the model
# @retval CrossIndex: CrossIndex of the model
#
def GetCrossIndex(self, ModelName):
CrossIndex = -1
SqlCommand = """select CrossIndex from DataModel where name = '""" + ModelName + """'"""
self.Cur.execute(SqlCommand)
for Item in self.Cur:
CrossIndex = Item[0]
return CrossIndex
| 32.659341 | 123 | 0.595895 |
from __future__ import absolute_import
import edk2basetools.Common.EdkLogger as EdkLogger
import edk2basetools.CommonDataClass.DataClass as DataClass
from Table.Table import Table
from edk2basetools.Common.StringUtils import ConvertToSqlString
leDataModel(Table):
def __init__(self, Cursor):
Table.__init__(self, Cursor)
self.Table = 'DataModel'
def Create(self):
SqlCommand = """create table IF NOT EXISTS %s (ID INTEGER PRIMARY KEY,
CrossIndex INTEGER NOT NULL,
Name VARCHAR NOT NULL,
Description VARCHAR
)""" % self.Table
Table.Create(self, SqlCommand)
def Insert(self, CrossIndex, Name, Description):
self.ID = self.ID + 1
(Name, Description) = ConvertToSqlString((Name, Description))
SqlCommand = """insert into %s values(%s, %s, '%s', '%s')""" % (self.Table, self.ID, CrossIndex, Name, Description)
Table.Insert(self, SqlCommand)
return self.ID
def InitTable(self):
EdkLogger.verbose("\nInitialize table DataModel started ...")
for Item in DataClass.MODEL_LIST:
CrossIndex = Item[1]
Name = Item[0]
Description = Item[0]
self.Insert(CrossIndex, Name, Description)
EdkLogger.verbose("Initialize table DataModel ... DONE!")
# @param ModelName: Name of the model
# @retval CrossIndex: CrossIndex of the model
#
def GetCrossIndex(self, ModelName):
CrossIndex = -1
SqlCommand = """select CrossIndex from DataModel where name = '""" + ModelName + """'"""
self.Cur.execute(SqlCommand)
for Item in self.Cur:
CrossIndex = Item[0]
return CrossIndex
| true | true |
f7377260feb68760a99e6c8c935a545eb16c71ed | 1,856 | py | Python | Phase3/Feedback.py | Surya97/MWDB-project | 508562913624416415cd143cef9b7689066037ef | [
"Apache-2.0"
] | null | null | null | Phase3/Feedback.py | Surya97/MWDB-project | 508562913624416415cd143cef9b7689066037ef | [
"Apache-2.0"
] | 2 | 2019-10-16T20:16:02.000Z | 2019-10-20T10:16:47.000Z | Phase3/Feedback.py | Surya97/MWDB-project | 508562913624416415cd143cef9b7689066037ef | [
"Apache-2.0"
] | 2 | 2019-09-07T16:47:36.000Z | 2021-11-16T11:41:31.000Z | import os
import sys
from pathlib import Path
sys.path.insert(1, '../Phase1')
sys.path.insert(2, '../Phase2')
import misc
import numpy as np
class Feedback:
def __init__(self):
self.task5_result = None
self.reduced_pickle_file_folder = os.path.join(Path(os.path.dirname(__file__)).parent,
'Phase2', 'pickle_files')
self.set_task5_result()
self.dataset = list()
self.X = None
self.y = None
self.dataset=list()
def generate_input_data_set(self, rorir_map, dataset_features):
for image_id, label in rorir_map.items():
image_id = os.path.basename(image_id)
if label==0 or label==1:
feat = dataset_features[image_id].tolist()
feat+=[label]
self.dataset.append(np.array(feat))
return
def set_task5_result(self):
self.task5_result = misc.load_from_pickle(self.reduced_pickle_file_folder, 'Task_5_Result')
def generate_input_data(self, rorir_map, dataset_features):
X = []
y = []
for image_id, label in rorir_map.items():
image_id = os.path.basename(image_id)
if label == 0 or label == 1:
X.append(dataset_features[image_id])
y+=[rorir_map[image_id]]
X = np.array(X)
y = np.array(y)
self.X=X
self.y=y
return
def euclidean_distance(self, dist1, dist2):
return (sum([(a - b) ** 2 for a, b in zip(dist1, dist2)])) ** 0.5
def save_result(self, result):
reduced_pickle_file_folder = os.path.join(Path(os.path.dirname(__file__)).parent,
'Phase2', 'pickle_files')
misc.save2pickle(result, reduced_pickle_file_folder, 'Task_5_Result') | 34.37037 | 99 | 0.578664 | import os
import sys
from pathlib import Path
sys.path.insert(1, '../Phase1')
sys.path.insert(2, '../Phase2')
import misc
import numpy as np
class Feedback:
def __init__(self):
self.task5_result = None
self.reduced_pickle_file_folder = os.path.join(Path(os.path.dirname(__file__)).parent,
'Phase2', 'pickle_files')
self.set_task5_result()
self.dataset = list()
self.X = None
self.y = None
self.dataset=list()
def generate_input_data_set(self, rorir_map, dataset_features):
for image_id, label in rorir_map.items():
image_id = os.path.basename(image_id)
if label==0 or label==1:
feat = dataset_features[image_id].tolist()
feat+=[label]
self.dataset.append(np.array(feat))
return
def set_task5_result(self):
self.task5_result = misc.load_from_pickle(self.reduced_pickle_file_folder, 'Task_5_Result')
def generate_input_data(self, rorir_map, dataset_features):
X = []
y = []
for image_id, label in rorir_map.items():
image_id = os.path.basename(image_id)
if label == 0 or label == 1:
X.append(dataset_features[image_id])
y+=[rorir_map[image_id]]
X = np.array(X)
y = np.array(y)
self.X=X
self.y=y
return
def euclidean_distance(self, dist1, dist2):
return (sum([(a - b) ** 2 for a, b in zip(dist1, dist2)])) ** 0.5
def save_result(self, result):
reduced_pickle_file_folder = os.path.join(Path(os.path.dirname(__file__)).parent,
'Phase2', 'pickle_files')
misc.save2pickle(result, reduced_pickle_file_folder, 'Task_5_Result') | true | true |
f73774d9eff7444d644a87d43df54fb4ecacd966 | 6,862 | py | Python | PHASE_1/API_SourceCode/report/filter.py | unsw-se3011/SENG3011_Neon | 6ea05b62a470538b72ae1397ce6d29e41142945a | [
"MIT"
] | 1 | 2021-05-01T05:11:42.000Z | 2021-05-01T05:11:42.000Z | PHASE_1/API_SourceCode/report/filter.py | unsw-se3011/SENG3011_Neon | 6ea05b62a470538b72ae1397ce6d29e41142945a | [
"MIT"
] | null | null | null | PHASE_1/API_SourceCode/report/filter.py | unsw-se3011/SENG3011_Neon | 6ea05b62a470538b72ae1397ce6d29e41142945a | [
"MIT"
] | 3 | 2020-04-29T08:49:29.000Z | 2021-06-04T03:14:08.000Z | from rest_framework.filters import BaseFilterBackend, SearchFilter
from django.db.models import Q
from rest_framework.compat import coreapi, coreschema
from datetime import datetime
from django.utils.dateparse import parse_datetime
from rest_framework.serializers import ValidationError
class DatetimeFilter(BaseFilterBackend):
def get_search_fields(self, view, request):
return getattr(view, 'time_field', None)
def filter_queryset(self, request, queryset, view):
search_filed = self.get_search_fields(view, request)
if not search_filed:
return queryset
start_date = datetime.strptime(
request.query_params.get(
"start_date", None), '%Y-%m-%dT%H:%M:%S'
)
end_date = datetime.strptime(
request.query_params.get("end_date", None), '%Y-%m-%dT%H:%M:%S'
)
if not start_date or not end_date:
return queryset
queryset.filter(
Q(**{search_filed + "__gte": start_date}) &
Q(**{search_filed + "__lte": end_date}))
return queryset
def get_schema_fields(self, view):
return [
coreapi.Field(
name="start_date",
required=False,
location='query',
schema=coreschema.String(
title="start date",
description="Start datetime for filetring"
)
),
coreapi.Field(
name="end_date",
required=False,
location='query',
schema=coreschema.String(
title="end date",
description="End datetime for filetring"
)
),
]
class ReportEventDatetimeRangeFilter(BaseFilterBackend):
"""
This could only be use at report class
"""
def filter_queryset(self, request, queryset, view):
try:
start_date = parse_datetime(
request.query_params.get(
"start_date", None
)
)
end_date = parse_datetime(
request.query_params.get("end_date", None)
)
except ValueError as e:
raise ValidationError({
'date': str(e)
})
except Exception as e:
if view.kwargs.get('pk', None):
return queryset
# else: filter error
# not input start_date or end_date
raise ValidationError({
'date': 'Both start date and end date are reuiqred.'
})
if start_date > end_date:
# validate both is in order
raise ValidationError({
'date': 'Start date must be earlier than end date.'
})
return queryset.filter(
Q(
reportevent__start_date__gte=start_date,
reportevent__start_date__lte=end_date
) |
Q(
reportevent__end_date__gte=start_date,
reportevent__end_date__lte=end_date
) |
Q(
reportevent__start_date__lte=start_date,
reportevent__end_date__gte=end_date
)
)
def get_schema_fields(self, view):
return [
coreapi.Field(
name="start_date",
required=False,
location='query',
schema=coreschema.String(
title="start date",
description="Start datetime of the event occour"
)
),
coreapi.Field(
name="end_date",
required=False,
location='query',
schema=coreschema.String(
title="end date",
description="End datetime of the event occour "
)
),
]
class KeytermFilter(SearchFilter):
search_param = "key_term"
class LocationFilter(BaseFilterBackend):
"""
This could only be use at report class
"""
def filter_queryset(self, request, queryset, view):
# all the field we need
continent = request.query_params.get("continent", None)
country = request.query_params.get("country", None)
state = request.query_params.get("state", None)
city = request.query_params.get("city", None)
location = request.query_params.get("location", None)
if continent:
queryset = queryset.filter(
reportevent__location__continent__icontains=continent)
if country:
queryset = queryset.filter(
reportevent__location__country__icontains=country)
if state:
queryset = queryset.filter(
reportevent__location__state__icontains=state)
if city:
queryset = queryset.filter(
reportevent__location__city__icontains=city)
if location:
queryset = queryset.filter(
Q(reportevent__location__continent__icontains=location) |
Q(reportevent__location__country__icontains=location) |
Q(reportevent__location__state__icontains=location) |
Q(reportevent__location__city__icontains=location) |
Q(reportevent__location__name__icontains=location)
)
# distinct the result, because we may have duplicate result
queryset = queryset.distinct()
return queryset
def get_schema_fields(self, view):
return [
coreapi.Field(
name="continent",
required=False,
location='query',
schema=coreschema.String(
title="continent",
description="continent of the event occour"
)
),
coreapi.Field(
name="country",
required=False,
location='query',
schema=coreschema.String(
title="country",
description="country of the event occour"
)
),
coreapi.Field(
name="state",
required=False,
location='query',
schema=coreschema.String(
title="state",
description="state of the event occour"
)
),
coreapi.Field(
name="city",
required=False,
location='query',
schema=coreschema.String(
title="city",
description="city of the event occour "
)
),
]
| 32.67619 | 75 | 0.52492 | from rest_framework.filters import BaseFilterBackend, SearchFilter
from django.db.models import Q
from rest_framework.compat import coreapi, coreschema
from datetime import datetime
from django.utils.dateparse import parse_datetime
from rest_framework.serializers import ValidationError
class DatetimeFilter(BaseFilterBackend):
def get_search_fields(self, view, request):
return getattr(view, 'time_field', None)
def filter_queryset(self, request, queryset, view):
search_filed = self.get_search_fields(view, request)
if not search_filed:
return queryset
start_date = datetime.strptime(
request.query_params.get(
"start_date", None), '%Y-%m-%dT%H:%M:%S'
)
end_date = datetime.strptime(
request.query_params.get("end_date", None), '%Y-%m-%dT%H:%M:%S'
)
if not start_date or not end_date:
return queryset
queryset.filter(
Q(**{search_filed + "__gte": start_date}) &
Q(**{search_filed + "__lte": end_date}))
return queryset
def get_schema_fields(self, view):
return [
coreapi.Field(
name="start_date",
required=False,
location='query',
schema=coreschema.String(
title="start date",
description="Start datetime for filetring"
)
),
coreapi.Field(
name="end_date",
required=False,
location='query',
schema=coreschema.String(
title="end date",
description="End datetime for filetring"
)
),
]
class ReportEventDatetimeRangeFilter(BaseFilterBackend):
def filter_queryset(self, request, queryset, view):
try:
start_date = parse_datetime(
request.query_params.get(
"start_date", None
)
)
end_date = parse_datetime(
request.query_params.get("end_date", None)
)
except ValueError as e:
raise ValidationError({
'date': str(e)
})
except Exception as e:
if view.kwargs.get('pk', None):
return queryset
raise ValidationError({
'date': 'Both start date and end date are reuiqred.'
})
if start_date > end_date:
raise ValidationError({
'date': 'Start date must be earlier than end date.'
})
return queryset.filter(
Q(
reportevent__start_date__gte=start_date,
reportevent__start_date__lte=end_date
) |
Q(
reportevent__end_date__gte=start_date,
reportevent__end_date__lte=end_date
) |
Q(
reportevent__start_date__lte=start_date,
reportevent__end_date__gte=end_date
)
)
def get_schema_fields(self, view):
return [
coreapi.Field(
name="start_date",
required=False,
location='query',
schema=coreschema.String(
title="start date",
description="Start datetime of the event occour"
)
),
coreapi.Field(
name="end_date",
required=False,
location='query',
schema=coreschema.String(
title="end date",
description="End datetime of the event occour "
)
),
]
class KeytermFilter(SearchFilter):
search_param = "key_term"
class LocationFilter(BaseFilterBackend):
def filter_queryset(self, request, queryset, view):
continent = request.query_params.get("continent", None)
country = request.query_params.get("country", None)
state = request.query_params.get("state", None)
city = request.query_params.get("city", None)
location = request.query_params.get("location", None)
if continent:
queryset = queryset.filter(
reportevent__location__continent__icontains=continent)
if country:
queryset = queryset.filter(
reportevent__location__country__icontains=country)
if state:
queryset = queryset.filter(
reportevent__location__state__icontains=state)
if city:
queryset = queryset.filter(
reportevent__location__city__icontains=city)
if location:
queryset = queryset.filter(
Q(reportevent__location__continent__icontains=location) |
Q(reportevent__location__country__icontains=location) |
Q(reportevent__location__state__icontains=location) |
Q(reportevent__location__city__icontains=location) |
Q(reportevent__location__name__icontains=location)
)
queryset = queryset.distinct()
return queryset
def get_schema_fields(self, view):
return [
coreapi.Field(
name="continent",
required=False,
location='query',
schema=coreschema.String(
title="continent",
description="continent of the event occour"
)
),
coreapi.Field(
name="country",
required=False,
location='query',
schema=coreschema.String(
title="country",
description="country of the event occour"
)
),
coreapi.Field(
name="state",
required=False,
location='query',
schema=coreschema.String(
title="state",
description="state of the event occour"
)
),
coreapi.Field(
name="city",
required=False,
location='query',
schema=coreschema.String(
title="city",
description="city of the event occour "
)
),
]
| true | true |
f73775ae8f96777285bd3ec5461ed1a62a0f447a | 4,288 | py | Python | metal-archives.py | gersonhb/Metal-Archives | 87e7c04a6c1a76f550dc637884069442f606356d | [
"Unlicense"
] | null | null | null | metal-archives.py | gersonhb/Metal-Archives | 87e7c04a6c1a76f550dc637884069442f606356d | [
"Unlicense"
] | null | null | null | metal-archives.py | gersonhb/Metal-Archives | 87e7c04a6c1a76f550dc637884069442f606356d | [
"Unlicense"
] | null | null | null | #!/usr/bin/env python3
import requests, re, os
from lxml import html
print('Ingrese nombre de la banda:')
bus=str(input())
pag=1
start=0
url='https://www.metal-archives.com/search/ajax-band-search/?field=name&query='+bus+'&sEcho='+str(pag)+'&iColumns=3&sColumns=&iDisplayStart='+str(start)+'&iDisplayLength=200&mDataProp_0=0&mDataProp_1=1&mDataProp_2=2'
headers={
"user-agent":"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/96.0.4664.51 Safari/537.36"
}
r=requests.get(url,headers=headers)
busqueda=r.json()
tam=int(busqueda['iTotalRecords'])
aux=0
if(tam>0):
print('Se encontró '+str(tam)+' resultados\n')
lista=[]
for i in range(0,tam):
if i%200==0 and i//200>0:
pag+=1
start+=200
url='https://www.metal-archives.com/search/ajax-band-search/?field=name&query='+bus+'&sEcho='+str(pag)+'&iColumns=3&sColumns=&iDisplayStart='+str(start)+'&iDisplayLength=200&mDataProp_0=0&mDataProp_1=1&mDataProp_2=2'
r=requests.get(url,headers=headers)
busqueda=r.json()
aux=0
link=re.findall(r'<a href="(.*)">',busqueda['aaData'][aux][0])[0]
band=re.findall(r'">(.*)</a>',busqueda['aaData'][aux][0])[0]
genero=busqueda['aaData'][aux][1]
pais=busqueda['aaData'][aux][2]
id=re.findall('^.+/(\\d*)$',link)[0]
lista.append([band,genero,pais,link,id])
aux+=1
print("{:<3} {:<25} {:<40} {:<10}".format('#','Banda','Género','País'))
for i in range(0,tam):
print("{:<3} {:<25} {:<40} {:<10}".format(i+1,lista[i][0],lista[i][1],lista[i][2]))
print('\nIngrese número: ')
while(True):
try:
num=int(input())
if(num>0 and num<=tam):
break
else:
print('El número ingresado no existe en la lista')
print('Ingrese número: ')
continue
except:
print('Debe ingresar un número válido')
print('Ingrese número: ')
url=lista[num-1][3]
disc_url='https://www.metal-archives.com/band/discography/id/'+str(lista[num-1][4])+'/tab/all'
r=requests.get(url,headers=headers)
rd=requests.get(disc_url,headers=headers)
if r.status_code==200:
doc=html.fromstring(r.content)
disc_doc=html.fromstring(rd.content)
os.system('cls||clear')
print(doc.xpath('//h1//text()')[0]+'\n')
print(doc.xpath("//div[@id='band_stats']/dl[1]/dt[1]/text()")[0]+' '+doc.xpath("//div[@id='band_stats']/dl[1]//a/text()")[0])
print(doc.xpath("//div[@id='band_stats']/dl[1]/dt[2]/text()")[0]+' '+doc.xpath("//div[@id='band_stats']/dl[1]/dd[2]/text()")[0])
print(doc.xpath("//div[@id='band_stats']/dl[1]/dt[3]/text()")[0]+' '+doc.xpath("//div[@id='band_stats']/dl[1]/dd[3]/text()")[0])
print(doc.xpath("//div[@id='band_stats']/dl[1]/dt[4]/text()")[0]+' '+doc.xpath("//div[@id='band_stats']/dl[1]/dd[4]/text()")[0])
print()
print(doc.xpath("//div[@id='band_stats']/dl[2]/dt[1]/text()")[0]+' '+doc.xpath("//div[@id='band_stats']/dl[2]/dd[1]/text()")[0])
print(doc.xpath("//div[@id='band_stats']/dl[2]/dt[2]/text()")[0]+' '+doc.xpath("//div[@id='band_stats']/dl[2]/dd[2]/text()")[0])
print(doc.xpath("//div[@id='band_stats']/dl[2]/dt[3]/text()")[0]+' '+doc.xpath("//div[@id='band_stats']/dl[2]//a/text()")[0])
print()
print(doc.xpath("//div[@id='band_stats']/dl[3]/dt[1]/text()")[0]+' '+doc.xpath("//div[@id='band_stats']/dl[3]/dd[1]")[0].text_content())
print()
print('Discografía:')
print('---------------------------------------------------------')
discs=disc_doc.xpath("//tbody/tr")
aux=1
for disc in discs:
print(disc.xpath('(//td[1]/a/text())['+str(aux)+']')[0])
print(disc.xpath('(//td[2]/text())['+str(aux)+']')[0])
print(disc.xpath('(//td[3]/text())['+str(aux)+']')[0])
if aux==len(discs):
break
print()
aux+=1
print('---------------------------------------------------------')
else:
print('Error al mostrar información. Por favor vuelva a intentar.')
else:
print('No se encontró resultado')
| 43.755102 | 228 | 0.536847 |
import requests, re, os
from lxml import html
print('Ingrese nombre de la banda:')
bus=str(input())
pag=1
start=0
url='https://www.metal-archives.com/search/ajax-band-search/?field=name&query='+bus+'&sEcho='+str(pag)+'&iColumns=3&sColumns=&iDisplayStart='+str(start)+'&iDisplayLength=200&mDataProp_0=0&mDataProp_1=1&mDataProp_2=2'
headers={
"user-agent":"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/96.0.4664.51 Safari/537.36"
}
r=requests.get(url,headers=headers)
busqueda=r.json()
tam=int(busqueda['iTotalRecords'])
aux=0
if(tam>0):
print('Se encontró '+str(tam)+' resultados\n')
lista=[]
for i in range(0,tam):
if i%200==0 and i//200>0:
pag+=1
start+=200
url='https://www.metal-archives.com/search/ajax-band-search/?field=name&query='+bus+'&sEcho='+str(pag)+'&iColumns=3&sColumns=&iDisplayStart='+str(start)+'&iDisplayLength=200&mDataProp_0=0&mDataProp_1=1&mDataProp_2=2'
r=requests.get(url,headers=headers)
busqueda=r.json()
aux=0
link=re.findall(r'<a href="(.*)">',busqueda['aaData'][aux][0])[0]
band=re.findall(r'">(.*)</a>',busqueda['aaData'][aux][0])[0]
genero=busqueda['aaData'][aux][1]
pais=busqueda['aaData'][aux][2]
id=re.findall('^.+/(\\d*)$',link)[0]
lista.append([band,genero,pais,link,id])
aux+=1
print("{:<3} {:<25} {:<40} {:<10}".format('#','Banda','Género','País'))
for i in range(0,tam):
print("{:<3} {:<25} {:<40} {:<10}".format(i+1,lista[i][0],lista[i][1],lista[i][2]))
print('\nIngrese número: ')
while(True):
try:
num=int(input())
if(num>0 and num<=tam):
break
else:
print('El número ingresado no existe en la lista')
print('Ingrese número: ')
continue
except:
print('Debe ingresar un número válido')
print('Ingrese número: ')
url=lista[num-1][3]
disc_url='https://www.metal-archives.com/band/discography/id/'+str(lista[num-1][4])+'/tab/all'
r=requests.get(url,headers=headers)
rd=requests.get(disc_url,headers=headers)
if r.status_code==200:
doc=html.fromstring(r.content)
disc_doc=html.fromstring(rd.content)
os.system('cls||clear')
print(doc.xpath('//h1//text()')[0]+'\n')
print(doc.xpath("//div[@id='band_stats']/dl[1]/dt[1]/text()")[0]+' '+doc.xpath("//div[@id='band_stats']/dl[1]//a/text()")[0])
print(doc.xpath("//div[@id='band_stats']/dl[1]/dt[2]/text()")[0]+' '+doc.xpath("//div[@id='band_stats']/dl[1]/dd[2]/text()")[0])
print(doc.xpath("//div[@id='band_stats']/dl[1]/dt[3]/text()")[0]+' '+doc.xpath("//div[@id='band_stats']/dl[1]/dd[3]/text()")[0])
print(doc.xpath("//div[@id='band_stats']/dl[1]/dt[4]/text()")[0]+' '+doc.xpath("//div[@id='band_stats']/dl[1]/dd[4]/text()")[0])
print()
print(doc.xpath("//div[@id='band_stats']/dl[2]/dt[1]/text()")[0]+' '+doc.xpath("//div[@id='band_stats']/dl[2]/dd[1]/text()")[0])
print(doc.xpath("//div[@id='band_stats']/dl[2]/dt[2]/text()")[0]+' '+doc.xpath("//div[@id='band_stats']/dl[2]/dd[2]/text()")[0])
print(doc.xpath("//div[@id='band_stats']/dl[2]/dt[3]/text()")[0]+' '+doc.xpath("//div[@id='band_stats']/dl[2]//a/text()")[0])
print()
print(doc.xpath("//div[@id='band_stats']/dl[3]/dt[1]/text()")[0]+' '+doc.xpath("//div[@id='band_stats']/dl[3]/dd[1]")[0].text_content())
print()
print('Discografía:')
print('---------------------------------------------------------')
discs=disc_doc.xpath("//tbody/tr")
aux=1
for disc in discs:
print(disc.xpath('(//td[1]/a/text())['+str(aux)+']')[0])
print(disc.xpath('(//td[2]/text())['+str(aux)+']')[0])
print(disc.xpath('(//td[3]/text())['+str(aux)+']')[0])
if aux==len(discs):
break
print()
aux+=1
print('---------------------------------------------------------')
else:
print('Error al mostrar información. Por favor vuelva a intentar.')
else:
print('No se encontró resultado')
| true | true |
f73777d7ca2d4d80693cb15dd56f511c05f1c49c | 3,163 | py | Python | alipay/aop/api/domain/AlipayUserFamilyShareAuthCheckModel.py | antopen/alipay-sdk-python-all | 8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c | [
"Apache-2.0"
] | null | null | null | alipay/aop/api/domain/AlipayUserFamilyShareAuthCheckModel.py | antopen/alipay-sdk-python-all | 8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c | [
"Apache-2.0"
] | null | null | null | alipay/aop/api/domain/AlipayUserFamilyShareAuthCheckModel.py | antopen/alipay-sdk-python-all | 8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class AlipayUserFamilyShareAuthCheckModel(object):
def __init__(self):
self._resource_id = None
self._scene_id = None
self._target_biz_user_id = None
self._target_user_biz_source = None
self._user_id = None
@property
def resource_id(self):
return self._resource_id
@resource_id.setter
def resource_id(self, value):
self._resource_id = value
@property
def scene_id(self):
return self._scene_id
@scene_id.setter
def scene_id(self, value):
self._scene_id = value
@property
def target_biz_user_id(self):
return self._target_biz_user_id
@target_biz_user_id.setter
def target_biz_user_id(self, value):
self._target_biz_user_id = value
@property
def target_user_biz_source(self):
return self._target_user_biz_source
@target_user_biz_source.setter
def target_user_biz_source(self, value):
self._target_user_biz_source = value
@property
def user_id(self):
return self._user_id
@user_id.setter
def user_id(self, value):
self._user_id = value
def to_alipay_dict(self):
params = dict()
if self.resource_id:
if hasattr(self.resource_id, 'to_alipay_dict'):
params['resource_id'] = self.resource_id.to_alipay_dict()
else:
params['resource_id'] = self.resource_id
if self.scene_id:
if hasattr(self.scene_id, 'to_alipay_dict'):
params['scene_id'] = self.scene_id.to_alipay_dict()
else:
params['scene_id'] = self.scene_id
if self.target_biz_user_id:
if hasattr(self.target_biz_user_id, 'to_alipay_dict'):
params['target_biz_user_id'] = self.target_biz_user_id.to_alipay_dict()
else:
params['target_biz_user_id'] = self.target_biz_user_id
if self.target_user_biz_source:
if hasattr(self.target_user_biz_source, 'to_alipay_dict'):
params['target_user_biz_source'] = self.target_user_biz_source.to_alipay_dict()
else:
params['target_user_biz_source'] = self.target_user_biz_source
if self.user_id:
if hasattr(self.user_id, 'to_alipay_dict'):
params['user_id'] = self.user_id.to_alipay_dict()
else:
params['user_id'] = self.user_id
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlipayUserFamilyShareAuthCheckModel()
if 'resource_id' in d:
o.resource_id = d['resource_id']
if 'scene_id' in d:
o.scene_id = d['scene_id']
if 'target_biz_user_id' in d:
o.target_biz_user_id = d['target_biz_user_id']
if 'target_user_biz_source' in d:
o.target_user_biz_source = d['target_user_biz_source']
if 'user_id' in d:
o.user_id = d['user_id']
return o
| 31.316832 | 95 | 0.62251 |
import json
from alipay.aop.api.constant.ParamConstants import *
class AlipayUserFamilyShareAuthCheckModel(object):
def __init__(self):
self._resource_id = None
self._scene_id = None
self._target_biz_user_id = None
self._target_user_biz_source = None
self._user_id = None
@property
def resource_id(self):
return self._resource_id
@resource_id.setter
def resource_id(self, value):
self._resource_id = value
@property
def scene_id(self):
return self._scene_id
@scene_id.setter
def scene_id(self, value):
self._scene_id = value
@property
def target_biz_user_id(self):
return self._target_biz_user_id
@target_biz_user_id.setter
def target_biz_user_id(self, value):
self._target_biz_user_id = value
@property
def target_user_biz_source(self):
return self._target_user_biz_source
@target_user_biz_source.setter
def target_user_biz_source(self, value):
self._target_user_biz_source = value
@property
def user_id(self):
return self._user_id
@user_id.setter
def user_id(self, value):
self._user_id = value
def to_alipay_dict(self):
params = dict()
if self.resource_id:
if hasattr(self.resource_id, 'to_alipay_dict'):
params['resource_id'] = self.resource_id.to_alipay_dict()
else:
params['resource_id'] = self.resource_id
if self.scene_id:
if hasattr(self.scene_id, 'to_alipay_dict'):
params['scene_id'] = self.scene_id.to_alipay_dict()
else:
params['scene_id'] = self.scene_id
if self.target_biz_user_id:
if hasattr(self.target_biz_user_id, 'to_alipay_dict'):
params['target_biz_user_id'] = self.target_biz_user_id.to_alipay_dict()
else:
params['target_biz_user_id'] = self.target_biz_user_id
if self.target_user_biz_source:
if hasattr(self.target_user_biz_source, 'to_alipay_dict'):
params['target_user_biz_source'] = self.target_user_biz_source.to_alipay_dict()
else:
params['target_user_biz_source'] = self.target_user_biz_source
if self.user_id:
if hasattr(self.user_id, 'to_alipay_dict'):
params['user_id'] = self.user_id.to_alipay_dict()
else:
params['user_id'] = self.user_id
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlipayUserFamilyShareAuthCheckModel()
if 'resource_id' in d:
o.resource_id = d['resource_id']
if 'scene_id' in d:
o.scene_id = d['scene_id']
if 'target_biz_user_id' in d:
o.target_biz_user_id = d['target_biz_user_id']
if 'target_user_biz_source' in d:
o.target_user_biz_source = d['target_user_biz_source']
if 'user_id' in d:
o.user_id = d['user_id']
return o
| true | true |
f73777e36141dbe3753dfde57de14ece205886c0 | 1,136 | py | Python | dsa/patterns/bitwise_xor/complement_of_base_10.py | bksahu/dsa | 4b36abbb3e00ce449c435c44260316f46d6d35ec | [
"MIT"
] | null | null | null | dsa/patterns/bitwise_xor/complement_of_base_10.py | bksahu/dsa | 4b36abbb3e00ce449c435c44260316f46d6d35ec | [
"MIT"
] | 4 | 2019-10-02T14:24:54.000Z | 2020-03-26T07:06:15.000Z | dsa/patterns/bitwise_xor/complement_of_base_10.py | bksahu/dsa | 4b36abbb3e00ce449c435c44260316f46d6d35ec | [
"MIT"
] | 2 | 2019-10-02T15:57:51.000Z | 2020-04-10T07:22:06.000Z | """
Every non-negative integer N has a binary representation, for example, 8 can be represented as “1000”
in binary and 7 as “0111” in binary.
The complement of a binary representation is the number in binary that we get when we change every 1
to a 0 and every 0 to a 1. For example, the binary complement of “1010” is “0101”.
For a given positive number N in base-10, return the complement of its binary representation as a base-10
integer.
Example 1:
Input: 8
Output: 7
Explanation: 8 is 1000 in binary, its complement is 0111 in binary, which is 7 in base-10.
Example 2:
Input: 10
Output: 5
Explanation: 10 is 1010 in binary, its complement is 0101 in binary, which is 5 in base-10.
"""
# intuition: add the compliments to get 2**n - 1 where n is no. of bits
# example: 1000 (8) + 0001 (7) = 1111 (15)
def calculate_bitwise_complement(N):
if N == 0:
return 1
bits = 0
n = N
while n:
bits += 1
n = n >> 1
bound = pow(2, bits) - 1
return bound ^ N # bound - N
if __name__ == "__main__":
print(calculate_bitwise_complement(8))
print(calculate_bitwise_complement(10))
| 27.047619 | 106 | 0.683979 |
def calculate_bitwise_complement(N):
if N == 0:
return 1
bits = 0
n = N
while n:
bits += 1
n = n >> 1
bound = pow(2, bits) - 1
return bound ^ N
if __name__ == "__main__":
print(calculate_bitwise_complement(8))
print(calculate_bitwise_complement(10))
| true | true |
f737783aff46a71f34ea0722cf26b83af715ac99 | 2,331 | py | Python | datacamp-data-scientist-master/Python-career-track/07-cleaning-data-in-python/02-tidying-data-for-analysis.py | vitthal10/datacamp | 522d2b192656f7f6563bf6fc33471b048f1cf029 | [
"MIT"
] | null | null | null | datacamp-data-scientist-master/Python-career-track/07-cleaning-data-in-python/02-tidying-data-for-analysis.py | vitthal10/datacamp | 522d2b192656f7f6563bf6fc33471b048f1cf029 | [
"MIT"
] | null | null | null | datacamp-data-scientist-master/Python-career-track/07-cleaning-data-in-python/02-tidying-data-for-analysis.py | vitthal10/datacamp | 522d2b192656f7f6563bf6fc33471b048f1cf029 | [
"MIT"
] | 1 | 2021-08-08T05:09:52.000Z | 2021-08-08T05:09:52.000Z | # Print the head of airquality
print(airquality.head())
# Melt airquality: airquality_melt
airquality_melt = pd.melt(airquality, id_vars=["Month", "Day"], value_vars=["Ozone","Solar.R", "Wind", "Temp"])
# Print the head of airquality_melt
print(airquality_melt.head())
# Print the head of airquality
print(airquality.head())
# Melt airquality: airquality_melt
airquality_melt = pd.melt(airquality, id_vars = ["Month", "Day"], value_vars=["Ozone", "Solar.R", "Wind", "Temp"], var_name="measurement", value_name="reading")
# Print the head of airquality_melt
print(airquality_melt.head())
# Print the head of airquality_melt
print(airquality_melt.head())
# Pivot airquality_melt: airquality_pivot
airquality_pivot = pd.pivot_table(airquality_melt, index=["Month", "Day"], columns="measurement", values="reading")
# Print the head of airquality_pivot
print(airquality_pivot.head())
# Print the index of airquality_pivot
print(airquality_pivot.index)
# Reset the index of airquality_pivot: airquality_pivot
airquality_pivot = airquality_pivot.reset_index()
# Print the new index of airquality_pivot
print(airquality_pivot.index)
# Print the head of airquality_pivot
print(airquality_pivot.head())
# Pivot airquality_dup: airquality_pivot
airquality_pivot = airquality_dup.pivot_table(index=["Month", "Day"], columns="measurement", values="reading", aggfunc=np.mean)
# Reset the index of airquality_pivot
airquality_pivot = airquality_pivot.reset_index()
# Print the head of airquality_pivot
print(airquality_pivot.head())
# Print the head of airquality
print(airquality.head())
# Melt tb: tb_melt
tb_melt = pd.melt(tb, id_vars=["country", "year"])
# Create the 'gender' column
tb_melt['gender'] = tb_melt.variable.str[0]
# Create the 'age_group' column
tb_melt['age_group'] = tb_melt.variable.str[1:]
# Print the head of tb_melt
print(tb_melt.head())
# Melt ebola: ebola_melt
ebola_melt = pd.melt(ebola, id_vars=["Date", "Day"], var_name="type_country", value_name="counts")
# Create the 'str_split' column
ebola_melt['str_split'] = ebola_melt["type_country"].str.split("_")
# Create the 'type' column
ebola_melt['type'] = ebola_melt["str_split"].str.get(0)
# Create the 'country' column
ebola_melt['country'] = ebola_melt["str_split"].str.get(1)
# Print the head of ebola_melt
print(ebola_melt.head())
| 27.423529 | 160 | 0.75547 |
print(airquality.head())
airquality_melt = pd.melt(airquality, id_vars=["Month", "Day"], value_vars=["Ozone","Solar.R", "Wind", "Temp"])
print(airquality_melt.head())
print(airquality.head())
airquality_melt = pd.melt(airquality, id_vars = ["Month", "Day"], value_vars=["Ozone", "Solar.R", "Wind", "Temp"], var_name="measurement", value_name="reading")
print(airquality_melt.head())
print(airquality_melt.head())
airquality_pivot = pd.pivot_table(airquality_melt, index=["Month", "Day"], columns="measurement", values="reading")
print(airquality_pivot.head())
print(airquality_pivot.index)
airquality_pivot = airquality_pivot.reset_index()
print(airquality_pivot.index)
print(airquality_pivot.head())
airquality_pivot = airquality_dup.pivot_table(index=["Month", "Day"], columns="measurement", values="reading", aggfunc=np.mean)
airquality_pivot = airquality_pivot.reset_index()
print(airquality_pivot.head())
print(airquality.head())
tb_melt = pd.melt(tb, id_vars=["country", "year"])
tb_melt['gender'] = tb_melt.variable.str[0]
tb_melt['age_group'] = tb_melt.variable.str[1:]
print(tb_melt.head())
ebola_melt = pd.melt(ebola, id_vars=["Date", "Day"], var_name="type_country", value_name="counts")
ebola_melt['str_split'] = ebola_melt["type_country"].str.split("_")
ebola_melt['type'] = ebola_melt["str_split"].str.get(0)
ebola_melt['country'] = ebola_melt["str_split"].str.get(1)
print(ebola_melt.head())
| true | true |
f73778e9b3d28fecc5870a455813aa3930059e19 | 63,593 | py | Python | mesonbuild/mesonlib.py | SamuelLongchamps/meson | 4720a10c6bc92bc9e6bc500ff9eaa492ee285f85 | [
"Apache-2.0"
] | null | null | null | mesonbuild/mesonlib.py | SamuelLongchamps/meson | 4720a10c6bc92bc9e6bc500ff9eaa492ee285f85 | [
"Apache-2.0"
] | null | null | null | mesonbuild/mesonlib.py | SamuelLongchamps/meson | 4720a10c6bc92bc9e6bc500ff9eaa492ee285f85 | [
"Apache-2.0"
] | null | null | null | # Copyright 2012-2020 The Meson development team
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A library of random helper functionality."""
from pathlib import Path
import sys
import stat
import time
import platform, subprocess, operator, os, shlex, shutil, re
import collections
from enum import IntEnum
from functools import lru_cache, wraps
from itertools import tee, filterfalse
from tempfile import TemporaryDirectory
import typing as T
import uuid
import textwrap
from mesonbuild import mlog
if T.TYPE_CHECKING:
from .build import ConfigurationData
from .coredata import OptionDictType, UserOption
from .compilers.compilers import CompilerType
from .interpreterbase import ObjectHolder
FileOrString = T.Union['File', str]
_T = T.TypeVar('_T')
_U = T.TypeVar('_U')
have_fcntl = False
have_msvcrt = False
# TODO: this is such a hack, this really should be either in coredata or in the
# interpreter
# {subproject: project_meson_version}
project_meson_versions = collections.defaultdict(str) # type: T.DefaultDict[str, str]
try:
import fcntl
have_fcntl = True
except Exception:
pass
try:
import msvcrt
have_msvcrt = True
except Exception:
pass
from glob import glob
if os.path.basename(sys.executable) == 'meson.exe':
# In Windows and using the MSI installed executable.
python_command = [sys.executable, 'runpython']
else:
python_command = [sys.executable]
meson_command = None
class MesonException(Exception):
'''Exceptions thrown by Meson'''
def __init__(self, *args: object, file: T.Optional[str] = None,
lineno: T.Optional[int] = None, colno: T.Optional[int] = None):
super().__init__(*args)
self.file = file
self.lineno = lineno
self.colno = colno
class EnvironmentException(MesonException):
'''Exceptions thrown while processing and creating the build environment'''
class GitException(MesonException):
def __init__(self, msg: str, output: T.Optional[str] = None):
super().__init__(msg)
self.output = output.strip() if output else ''
GIT = shutil.which('git')
def git(cmd: T.List[str], workingdir: str, check: bool = False, **kwargs: T.Any) -> T.Tuple[subprocess.Popen, str, str]:
cmd = [GIT] + cmd
p, o, e = Popen_safe(cmd, cwd=workingdir, **kwargs)
if check and p.returncode != 0:
raise GitException('Git command failed: ' + str(cmd), e)
return p, o, e
def quiet_git(cmd: T.List[str], workingdir: str, check: bool = False) -> T.Tuple[bool, str]:
if not GIT:
m = 'Git program not found.'
if check:
raise GitException(m)
return False, m
p, o, e = git(cmd, workingdir, check)
if p.returncode != 0:
return False, e
return True, o
def verbose_git(cmd: T.List[str], workingdir: str, check: bool = False) -> bool:
if not GIT:
m = 'Git program not found.'
if check:
raise GitException(m)
return False
p, _, _ = git(cmd, workingdir, check, stdout=None, stderr=None)
return p.returncode == 0
def set_meson_command(mainfile: str) -> None:
global python_command
global meson_command
# On UNIX-like systems `meson` is a Python script
# On Windows `meson` and `meson.exe` are wrapper exes
if not mainfile.endswith('.py'):
meson_command = [mainfile]
elif os.path.isabs(mainfile) and mainfile.endswith('mesonmain.py'):
# Can't actually run meson with an absolute path to mesonmain.py, it must be run as -m mesonbuild.mesonmain
meson_command = python_command + ['-m', 'mesonbuild.mesonmain']
else:
# Either run uninstalled, or full path to meson-script.py
meson_command = python_command + [mainfile]
# We print this value for unit tests.
if 'MESON_COMMAND_TESTS' in os.environ:
mlog.log('meson_command is {!r}'.format(meson_command))
def is_ascii_string(astring: T.Union[str, bytes]) -> bool:
try:
if isinstance(astring, str):
astring.encode('ascii')
elif isinstance(astring, bytes):
astring.decode('ascii')
except UnicodeDecodeError:
return False
return True
def check_direntry_issues(direntry_array: T.Union[T.List[T.Union[str, bytes]], str, bytes]) -> None:
import locale
# Warn if the locale is not UTF-8. This can cause various unfixable issues
# such as os.stat not being able to decode filenames with unicode in them.
# There is no way to reset both the preferred encoding and the filesystem
# encoding, so we can just warn about it.
e = locale.getpreferredencoding()
if e.upper() != 'UTF-8' and not is_windows():
if not isinstance(direntry_array, list):
direntry_array = [direntry_array]
for de in direntry_array:
if is_ascii_string(de):
continue
mlog.warning(textwrap.dedent('''
You are using {!r} which is not a Unicode-compatible
locale but you are trying to access a file system entry called {!r} which is
not pure ASCII. This may cause problems.
'''.format(e, de)), file=sys.stderr)
# Put this in objects that should not get dumped to pickle files
# by accident.
import threading
an_unpicklable_object = threading.Lock()
class FileMode:
# The first triad is for owner permissions, the second for group permissions,
# and the third for others (everyone else).
# For the 1st character:
# 'r' means can read
# '-' means not allowed
# For the 2nd character:
# 'w' means can write
# '-' means not allowed
# For the 3rd character:
# 'x' means can execute
# 's' means can execute and setuid/setgid is set (owner/group triads only)
# 'S' means cannot execute and setuid/setgid is set (owner/group triads only)
# 't' means can execute and sticky bit is set ("others" triads only)
# 'T' means cannot execute and sticky bit is set ("others" triads only)
# '-' means none of these are allowed
#
# The meanings of 'rwx' perms is not obvious for directories; see:
# https://www.hackinglinuxexposed.com/articles/20030424.html
#
# For information on this notation such as setuid/setgid/sticky bits, see:
# https://en.wikipedia.org/wiki/File_system_permissions#Symbolic_notation
symbolic_perms_regex = re.compile('[r-][w-][xsS-]' # Owner perms
'[r-][w-][xsS-]' # Group perms
'[r-][w-][xtT-]') # Others perms
def __init__(self, perms: T.Optional[str] = None, owner: T.Optional[str] = None,
group: T.Optional[str] = None):
self.perms_s = perms
self.perms = self.perms_s_to_bits(perms)
self.owner = owner
self.group = group
def __repr__(self) -> str:
ret = '<FileMode: {!r} owner={} group={}'
return ret.format(self.perms_s, self.owner, self.group)
@classmethod
def perms_s_to_bits(cls, perms_s: T.Optional[str]) -> int:
'''
Does the opposite of stat.filemode(), converts strings of the form
'rwxr-xr-x' to st_mode enums which can be passed to os.chmod()
'''
if perms_s is None:
# No perms specified, we will not touch the permissions
return -1
eg = 'rwxr-xr-x'
if not isinstance(perms_s, str):
msg = 'Install perms must be a string. For example, {!r}'
raise MesonException(msg.format(eg))
if len(perms_s) != 9 or not cls.symbolic_perms_regex.match(perms_s):
msg = 'File perms {!r} must be exactly 9 chars. For example, {!r}'
raise MesonException(msg.format(perms_s, eg))
perms = 0
# Owner perms
if perms_s[0] == 'r':
perms |= stat.S_IRUSR
if perms_s[1] == 'w':
perms |= stat.S_IWUSR
if perms_s[2] == 'x':
perms |= stat.S_IXUSR
elif perms_s[2] == 'S':
perms |= stat.S_ISUID
elif perms_s[2] == 's':
perms |= stat.S_IXUSR
perms |= stat.S_ISUID
# Group perms
if perms_s[3] == 'r':
perms |= stat.S_IRGRP
if perms_s[4] == 'w':
perms |= stat.S_IWGRP
if perms_s[5] == 'x':
perms |= stat.S_IXGRP
elif perms_s[5] == 'S':
perms |= stat.S_ISGID
elif perms_s[5] == 's':
perms |= stat.S_IXGRP
perms |= stat.S_ISGID
# Others perms
if perms_s[6] == 'r':
perms |= stat.S_IROTH
if perms_s[7] == 'w':
perms |= stat.S_IWOTH
if perms_s[8] == 'x':
perms |= stat.S_IXOTH
elif perms_s[8] == 'T':
perms |= stat.S_ISVTX
elif perms_s[8] == 't':
perms |= stat.S_IXOTH
perms |= stat.S_ISVTX
return perms
class File:
def __init__(self, is_built: bool, subdir: str, fname: str):
self.is_built = is_built
self.subdir = subdir
self.fname = fname
self.hash = hash((is_built, subdir, fname))
def __str__(self) -> str:
return self.relative_name()
def __repr__(self) -> str:
ret = '<File: {0}'
if not self.is_built:
ret += ' (not built)'
ret += '>'
return ret.format(self.relative_name())
@staticmethod
@lru_cache(maxsize=None)
def from_source_file(source_root: str, subdir: str, fname: str) -> 'File':
if not os.path.isfile(os.path.join(source_root, subdir, fname)):
raise MesonException('File %s does not exist.' % fname)
return File(False, subdir, fname)
@staticmethod
def from_built_file(subdir: str, fname: str) -> 'File':
return File(True, subdir, fname)
@staticmethod
def from_absolute_file(fname: str) -> 'File':
return File(False, '', fname)
@lru_cache(maxsize=None)
def rel_to_builddir(self, build_to_src: str) -> str:
if self.is_built:
return self.relative_name()
else:
return os.path.join(build_to_src, self.subdir, self.fname)
@lru_cache(maxsize=None)
def absolute_path(self, srcdir: str, builddir: str) -> str:
absdir = srcdir
if self.is_built:
absdir = builddir
return os.path.join(absdir, self.relative_name())
def endswith(self, ending: str) -> bool:
return self.fname.endswith(ending)
def split(self, s: str) -> T.List[str]:
return self.fname.split(s)
def __eq__(self, other: object) -> bool:
if not isinstance(other, File):
return NotImplemented
if self.hash != other.hash:
return False
return (self.fname, self.subdir, self.is_built) == (other.fname, other.subdir, other.is_built)
def __hash__(self) -> int:
return self.hash
@lru_cache(maxsize=None)
def relative_name(self) -> str:
return os.path.join(self.subdir, self.fname)
def get_compiler_for_source(compilers: T.Iterable['CompilerType'], src: str) -> 'CompilerType':
"""Given a set of compilers and a source, find the compiler for that source type."""
for comp in compilers:
if comp.can_compile(src):
return comp
raise MesonException('No specified compiler can handle file {!s}'.format(src))
def classify_unity_sources(compilers: T.Iterable['CompilerType'], sources: T.Iterable[str]) -> T.Dict['CompilerType', T.List[str]]:
compsrclist = {} # type: T.Dict[CompilerType, T.List[str]]
for src in sources:
comp = get_compiler_for_source(compilers, src)
if comp not in compsrclist:
compsrclist[comp] = [src]
else:
compsrclist[comp].append(src)
return compsrclist
class MachineChoice(IntEnum):
"""Enum class representing one of the two abstract machine names used in
most places: the build, and host, machines.
"""
BUILD = 0
HOST = 1
def get_lower_case_name(self) -> str:
return PerMachine('build', 'host')[self]
def get_prefix(self) -> str:
return PerMachine('build.', '')[self]
class PerMachine(T.Generic[_T]):
def __init__(self, build: _T, host: _T) -> None:
self.build = build
self.host = host
def __getitem__(self, machine: MachineChoice) -> _T:
return {
MachineChoice.BUILD: self.build,
MachineChoice.HOST: self.host,
}[machine]
def __setitem__(self, machine: MachineChoice, val: _T) -> None:
setattr(self, machine.get_lower_case_name(), val)
def miss_defaulting(self) -> "PerMachineDefaultable[T.Optional[_T]]":
"""Unset definition duplicated from their previous to None
This is the inverse of ''default_missing''. By removing defaulted
machines, we can elaborate the original and then redefault them and thus
avoid repeating the elaboration explicitly.
"""
unfreeze = PerMachineDefaultable() # type: PerMachineDefaultable[T.Optional[_T]]
unfreeze.build = self.build
unfreeze.host = self.host
if unfreeze.host == unfreeze.build:
unfreeze.host = None
return unfreeze
def __repr__(self) -> str:
return 'PerMachine({!r}, {!r})'.format(self.build, self.host)
class PerThreeMachine(PerMachine[_T]):
"""Like `PerMachine` but includes `target` too.
It turns out just one thing do we need track the target machine. There's no
need to computer the `target` field so we don't bother overriding the
`__getitem__`/`__setitem__` methods.
"""
def __init__(self, build: _T, host: _T, target: _T) -> None:
super().__init__(build, host)
self.target = target
def miss_defaulting(self) -> "PerThreeMachineDefaultable[T.Optional[_T]]":
"""Unset definition duplicated from their previous to None
This is the inverse of ''default_missing''. By removing defaulted
machines, we can elaborate the original and then redefault them and thus
avoid repeating the elaboration explicitly.
"""
unfreeze = PerThreeMachineDefaultable() # type: PerThreeMachineDefaultable[T.Optional[_T]]
unfreeze.build = self.build
unfreeze.host = self.host
unfreeze.target = self.target
if unfreeze.target == unfreeze.host:
unfreeze.target = None
if unfreeze.host == unfreeze.build:
unfreeze.host = None
return unfreeze
def matches_build_machine(self, machine: MachineChoice) -> bool:
return self.build == self[machine]
def __repr__(self) -> str:
return 'PerThreeMachine({!r}, {!r}, {!r})'.format(self.build, self.host, self.target)
class PerMachineDefaultable(PerMachine[T.Optional[_T]]):
"""Extends `PerMachine` with the ability to default from `None`s.
"""
def __init__(self) -> None:
super().__init__(None, None)
def default_missing(self) -> "PerMachine[T.Optional[_T]]":
"""Default host to build
This allows just specifying nothing in the native case, and just host in the
cross non-compiler case.
"""
freeze = PerMachine(self.build, self.host)
if freeze.host is None:
freeze.host = freeze.build
return freeze
def __repr__(self) -> str:
return 'PerMachineDefaultable({!r}, {!r})'.format(self.build, self.host)
class PerThreeMachineDefaultable(PerMachineDefaultable, PerThreeMachine[T.Optional[_T]]):
"""Extends `PerThreeMachine` with the ability to default from `None`s.
"""
def __init__(self) -> None:
PerThreeMachine.__init__(self, None, None, None)
def default_missing(self) -> "PerThreeMachine[T.Optional[_T]]":
"""Default host to build and target to host.
This allows just specifying nothing in the native case, just host in the
cross non-compiler case, and just target in the native-built
cross-compiler case.
"""
freeze = PerThreeMachine(self.build, self.host, self.target)
if freeze.host is None:
freeze.host = freeze.build
if freeze.target is None:
freeze.target = freeze.host
return freeze
def __repr__(self) -> str:
return 'PerThreeMachineDefaultable({!r}, {!r}, {!r})'.format(self.build, self.host, self.target)
def is_sunos() -> bool:
return platform.system().lower() == 'sunos'
def is_osx() -> bool:
return platform.system().lower() == 'darwin'
def is_linux() -> bool:
return platform.system().lower() == 'linux'
def is_android() -> bool:
return platform.system().lower() == 'android'
def is_haiku() -> bool:
return platform.system().lower() == 'haiku'
def is_openbsd() -> bool:
return platform.system().lower() == 'openbsd'
def is_windows() -> bool:
platname = platform.system().lower()
return platname == 'windows'
def is_cygwin() -> bool:
return sys.platform == 'cygwin'
def is_debianlike() -> bool:
return os.path.isfile('/etc/debian_version')
def is_dragonflybsd() -> bool:
return platform.system().lower() == 'dragonfly'
def is_netbsd() -> bool:
return platform.system().lower() == 'netbsd'
def is_freebsd() -> bool:
return platform.system().lower() == 'freebsd'
def is_irix() -> bool:
return platform.system().startswith('irix')
def is_hurd() -> bool:
return platform.system().lower() == 'gnu'
def is_qnx() -> bool:
return platform.system().lower() == 'qnx'
def is_aix() -> bool:
return platform.system().lower() == 'aix'
def exe_exists(arglist: T.List[str]) -> bool:
try:
if subprocess.run(arglist, timeout=10).returncode == 0:
return True
except (FileNotFoundError, subprocess.TimeoutExpired):
pass
return False
@lru_cache(maxsize=None)
def darwin_get_object_archs(objpath: str) -> T.List[str]:
'''
For a specific object (executable, static library, dylib, etc), run `lipo`
to fetch the list of archs supported by it. Supports both thin objects and
'fat' objects.
'''
_, stdo, stderr = Popen_safe(['lipo', '-info', objpath])
if not stdo:
mlog.debug('lipo {}: {}'.format(objpath, stderr))
return None
stdo = stdo.rsplit(': ', 1)[1]
# Convert from lipo-style archs to meson-style CPUs
stdo = stdo.replace('i386', 'x86')
stdo = stdo.replace('arm64', 'aarch64')
# Add generic name for armv7 and armv7s
if 'armv7' in stdo:
stdo += ' arm'
return stdo.split()
def detect_vcs(source_dir: T.Union[str, Path]) -> T.Optional[T.Dict[str, str]]:
vcs_systems = [
dict(name = 'git', cmd = 'git', repo_dir = '.git', get_rev = 'git describe --dirty=+', rev_regex = '(.*)', dep = '.git/logs/HEAD'),
dict(name = 'mercurial', cmd = 'hg', repo_dir = '.hg', get_rev = 'hg id -i', rev_regex = '(.*)', dep = '.hg/dirstate'),
dict(name = 'subversion', cmd = 'svn', repo_dir = '.svn', get_rev = 'svn info', rev_regex = 'Revision: (.*)', dep = '.svn/wc.db'),
dict(name = 'bazaar', cmd = 'bzr', repo_dir = '.bzr', get_rev = 'bzr revno', rev_regex = '(.*)', dep = '.bzr'),
]
if isinstance(source_dir, str):
source_dir = Path(source_dir)
parent_paths_and_self = collections.deque(source_dir.parents)
# Prepend the source directory to the front so we can check it;
# source_dir.parents doesn't include source_dir
parent_paths_and_self.appendleft(source_dir)
for curdir in parent_paths_and_self:
for vcs in vcs_systems:
if Path.is_dir(curdir.joinpath(vcs['repo_dir'])) and shutil.which(vcs['cmd']):
vcs['wc_dir'] = str(curdir)
return vcs
return None
def current_vs_supports_modules() -> bool:
vsver = os.environ.get('VSCMD_VER', '')
return vsver.startswith('16.9.0') and '-pre.' in vsver
# a helper class which implements the same version ordering as RPM
class Version:
def __init__(self, s: str) -> None:
self._s = s
# split into numeric, alphabetic and non-alphanumeric sequences
sequences1 = re.finditer(r'(\d+|[a-zA-Z]+|[^a-zA-Z\d]+)', s)
# non-alphanumeric separators are discarded
sequences2 = [m for m in sequences1 if not re.match(r'[^a-zA-Z\d]+', m.group(1))]
# numeric sequences are converted from strings to ints
sequences3 = [int(m.group(1)) if m.group(1).isdigit() else m.group(1) for m in sequences2]
self._v = sequences3
def __str__(self) -> str:
return '%s (V=%s)' % (self._s, str(self._v))
def __repr__(self) -> str:
return '<Version: {}>'.format(self._s)
def __lt__(self, other: object) -> bool:
if isinstance(other, Version):
return self.__cmp(other, operator.lt)
return NotImplemented
def __gt__(self, other: object) -> bool:
if isinstance(other, Version):
return self.__cmp(other, operator.gt)
return NotImplemented
def __le__(self, other: object) -> bool:
if isinstance(other, Version):
return self.__cmp(other, operator.le)
return NotImplemented
def __ge__(self, other: object) -> bool:
if isinstance(other, Version):
return self.__cmp(other, operator.ge)
return NotImplemented
def __eq__(self, other: object) -> bool:
if isinstance(other, Version):
return self._v == other._v
return NotImplemented
def __ne__(self, other: object) -> bool:
if isinstance(other, Version):
return self._v != other._v
return NotImplemented
def __cmp(self, other: 'Version', comparator: T.Callable[[T.Any, T.Any], bool]) -> bool:
# compare each sequence in order
for ours, theirs in zip(self._v, other._v):
# sort a non-digit sequence before a digit sequence
ours_is_int = isinstance(ours, int)
theirs_is_int = isinstance(theirs, int)
if ours_is_int != theirs_is_int:
return comparator(ours_is_int, theirs_is_int)
if ours != theirs:
return comparator(ours, theirs)
# if equal length, all components have matched, so equal
# otherwise, the version with a suffix remaining is greater
return comparator(len(self._v), len(other._v))
def _version_extract_cmpop(vstr2: str) -> T.Tuple[T.Callable[[T.Any, T.Any], bool], str]:
if vstr2.startswith('>='):
cmpop = operator.ge
vstr2 = vstr2[2:]
elif vstr2.startswith('<='):
cmpop = operator.le
vstr2 = vstr2[2:]
elif vstr2.startswith('!='):
cmpop = operator.ne
vstr2 = vstr2[2:]
elif vstr2.startswith('=='):
cmpop = operator.eq
vstr2 = vstr2[2:]
elif vstr2.startswith('='):
cmpop = operator.eq
vstr2 = vstr2[1:]
elif vstr2.startswith('>'):
cmpop = operator.gt
vstr2 = vstr2[1:]
elif vstr2.startswith('<'):
cmpop = operator.lt
vstr2 = vstr2[1:]
else:
cmpop = operator.eq
return (cmpop, vstr2)
def version_compare(vstr1: str, vstr2: str) -> bool:
(cmpop, vstr2) = _version_extract_cmpop(vstr2)
return cmpop(Version(vstr1), Version(vstr2))
def version_compare_many(vstr1: str, conditions: T.Union[str, T.Iterable[str]]) -> T.Tuple[bool, T.List[str], T.List[str]]:
if isinstance(conditions, str):
conditions = [conditions]
found = []
not_found = []
for req in conditions:
if not version_compare(vstr1, req):
not_found.append(req)
else:
found.append(req)
return not_found == [], not_found, found
# determine if the minimum version satisfying the condition |condition| exceeds
# the minimum version for a feature |minimum|
def version_compare_condition_with_min(condition: str, minimum: str) -> bool:
if condition.startswith('>='):
cmpop = operator.le
condition = condition[2:]
elif condition.startswith('<='):
return False
elif condition.startswith('!='):
return False
elif condition.startswith('=='):
cmpop = operator.le
condition = condition[2:]
elif condition.startswith('='):
cmpop = operator.le
condition = condition[1:]
elif condition.startswith('>'):
cmpop = operator.lt
condition = condition[1:]
elif condition.startswith('<'):
return False
else:
cmpop = operator.le
# Declaring a project(meson_version: '>=0.46') and then using features in
# 0.46.0 is valid, because (knowing the meson versioning scheme) '0.46.0' is
# the lowest version which satisfies the constraint '>=0.46'.
#
# But this will fail here, because the minimum version required by the
# version constraint ('0.46') is strictly less (in our version comparison)
# than the minimum version needed for the feature ('0.46.0').
#
# Map versions in the constraint of the form '0.46' to '0.46.0', to embed
# this knowledge of the meson versioning scheme.
condition = condition.strip()
if re.match(r'^\d+.\d+$', condition):
condition += '.0'
return T.cast(bool, cmpop(Version(minimum), Version(condition)))
def default_libdir() -> str:
if is_debianlike():
try:
pc = subprocess.Popen(['dpkg-architecture', '-qDEB_HOST_MULTIARCH'],
stdout=subprocess.PIPE,
stderr=subprocess.DEVNULL)
(stdo, _) = pc.communicate()
if pc.returncode == 0:
archpath = stdo.decode().strip()
return 'lib/' + archpath
except Exception:
pass
if is_freebsd() or is_irix():
return 'lib'
if os.path.isdir('/usr/lib64') and not os.path.islink('/usr/lib64'):
return 'lib64'
return 'lib'
def default_libexecdir() -> str:
# There is no way to auto-detect this, so it must be set at build time
return 'libexec'
def default_prefix() -> str:
return 'c:/' if is_windows() else '/usr/local'
def get_library_dirs() -> T.List[str]:
if is_windows():
return ['C:/mingw/lib'] # TODO: get programmatically
if is_osx():
return ['/usr/lib'] # TODO: get programmatically
# The following is probably Debian/Ubuntu specific.
# /usr/local/lib is first because it contains stuff
# installed by the sysadmin and is probably more up-to-date
# than /usr/lib. If you feel that this search order is
# problematic, please raise the issue on the mailing list.
unixdirs = ['/usr/local/lib', '/usr/lib', '/lib']
if is_freebsd():
return unixdirs
# FIXME: this needs to be further genericized for aarch64 etc.
machine = platform.machine()
if machine in ('i386', 'i486', 'i586', 'i686'):
plat = 'i386'
elif machine.startswith('arm'):
plat = 'arm'
else:
plat = ''
# Solaris puts 32-bit libraries in the main /lib & /usr/lib directories
# and 64-bit libraries in platform specific subdirectories.
if is_sunos():
if machine == 'i86pc':
plat = 'amd64'
elif machine.startswith('sun4'):
plat = 'sparcv9'
usr_platdir = Path('/usr/lib/') / plat
if usr_platdir.is_dir():
unixdirs += [str(x) for x in (usr_platdir).iterdir() if x.is_dir()]
if os.path.exists('/usr/lib64'):
unixdirs.append('/usr/lib64')
lib_platdir = Path('/lib/') / plat
if lib_platdir.is_dir():
unixdirs += [str(x) for x in (lib_platdir).iterdir() if x.is_dir()]
if os.path.exists('/lib64'):
unixdirs.append('/lib64')
return unixdirs
def has_path_sep(name: str, sep: str = '/\\') -> bool:
'Checks if any of the specified @sep path separators are in @name'
for each in sep:
if each in name:
return True
return False
if is_windows():
# shlex.split is not suitable for splitting command line on Window (https://bugs.python.org/issue1724822);
# shlex.quote is similarly problematic. Below are "proper" implementations of these functions according to
# https://docs.microsoft.com/en-us/cpp/c-language/parsing-c-command-line-arguments and
# https://blogs.msdn.microsoft.com/twistylittlepassagesallalike/2011/04/23/everyone-quotes-command-line-arguments-the-wrong-way/
_whitespace = ' \t\n\r'
_find_unsafe_char = re.compile(r'[{}"]'.format(_whitespace)).search
def quote_arg(arg: str) -> str:
if arg and not _find_unsafe_char(arg):
return arg
result = '"'
num_backslashes = 0
for c in arg:
if c == '\\':
num_backslashes += 1
else:
if c == '"':
# Escape all backslashes and the following double quotation mark
num_backslashes = num_backslashes * 2 + 1
result += num_backslashes * '\\' + c
num_backslashes = 0
# Escape all backslashes, but let the terminating double quotation
# mark we add below be interpreted as a metacharacter
result += (num_backslashes * 2) * '\\' + '"'
return result
def split_args(cmd: str) -> T.List[str]:
result = []
arg = ''
num_backslashes = 0
num_quotes = 0
in_quotes = False
for c in cmd:
if c == '\\':
num_backslashes += 1
else:
if c == '"' and not (num_backslashes % 2):
# unescaped quote, eat it
arg += (num_backslashes // 2) * '\\'
num_quotes += 1
in_quotes = not in_quotes
elif c in _whitespace and not in_quotes:
if arg or num_quotes:
# reached the end of the argument
result.append(arg)
arg = ''
num_quotes = 0
else:
if c == '"':
# escaped quote
num_backslashes = (num_backslashes - 1) // 2
arg += num_backslashes * '\\' + c
num_backslashes = 0
if arg or num_quotes:
result.append(arg)
return result
else:
def quote_arg(arg: str) -> str:
return shlex.quote(arg)
def split_args(cmd: str) -> T.List[str]:
return shlex.split(cmd)
def join_args(args: T.Iterable[str]) -> str:
return ' '.join([quote_arg(x) for x in args])
def do_replacement(regex: T.Pattern[str], line: str, variable_format: str,
confdata: 'ConfigurationData') -> T.Tuple[str, T.Set[str]]:
missing_variables = set() # type: T.Set[str]
if variable_format == 'cmake':
start_tag = '${'
backslash_tag = '\\${'
else:
assert variable_format in ['meson', 'cmake@']
start_tag = '@'
backslash_tag = '\\@'
def variable_replace(match: T.Match[str]) -> str:
# Pairs of escape characters before '@' or '\@'
if match.group(0).endswith('\\'):
num_escapes = match.end(0) - match.start(0)
return '\\' * (num_escapes // 2)
# Single escape character and '@'
elif match.group(0) == backslash_tag:
return start_tag
# Template variable to be replaced
else:
varname = match.group(1)
var_str = ''
if varname in confdata:
(var, desc) = confdata.get(varname)
if isinstance(var, str):
var_str = var
elif isinstance(var, int):
var_str = str(var)
else:
msg = 'Tried to replace variable {!r} value with ' \
'something other than a string or int: {!r}'
raise MesonException(msg.format(varname, var))
else:
missing_variables.add(varname)
return var_str
return re.sub(regex, variable_replace, line), missing_variables
def do_define(regex: T.Pattern[str], line: str, confdata: 'ConfigurationData', variable_format: str) -> str:
def get_cmake_define(line: str, confdata: 'ConfigurationData') -> str:
arr = line.split()
define_value=[]
for token in arr[2:]:
try:
(v, desc) = confdata.get(token)
define_value += [str(v)]
except KeyError:
define_value += [token]
return ' '.join(define_value)
arr = line.split()
if variable_format == 'meson' and len(arr) != 2:
raise MesonException('#mesondefine does not contain exactly two tokens: %s' % line.strip())
varname = arr[1]
try:
(v, desc) = confdata.get(varname)
except KeyError:
return '/* #undef %s */\n' % varname
if isinstance(v, bool):
if v:
return '#define %s\n' % varname
else:
return '#undef %s\n' % varname
elif isinstance(v, int):
return '#define %s %d\n' % (varname, v)
elif isinstance(v, str):
if variable_format == 'meson':
result = v
else:
result = get_cmake_define(line, confdata)
result = '#define %s %s\n' % (varname, result)
(result, missing_variable) = do_replacement(regex, result, variable_format, confdata)
return result
else:
raise MesonException('#mesondefine argument "%s" is of unknown type.' % varname)
def get_variable_regex(variable_format: str = 'meson') -> T.Pattern[str]:
# Only allow (a-z, A-Z, 0-9, _, -) as valid characters for a define
# Also allow escaping '@' with '\@'
if variable_format in ['meson', 'cmake@']:
regex = re.compile(r'(?:\\\\)+(?=\\?@)|\\@|@([-a-zA-Z0-9_]+)@')
elif variable_format == 'cmake':
regex = re.compile(r'(?:\\\\)+(?=\\?\$)|\\\${|\${([-a-zA-Z0-9_]+)}')
else:
raise MesonException('Format "{}" not handled'.format(variable_format))
return regex
def do_conf_str (data: list, confdata: 'ConfigurationData', variable_format: str,
encoding: str = 'utf-8') -> T.Tuple[T.List[str],T.Set[str], bool]:
def line_is_valid(line : str, variable_format: str) -> bool:
if variable_format == 'meson':
if '#cmakedefine' in line:
return False
else: #cmake format
if '#mesondefine' in line:
return False
return True
regex = get_variable_regex(variable_format)
search_token = '#mesondefine'
if variable_format != 'meson':
search_token = '#cmakedefine'
result = []
missing_variables = set()
# Detect when the configuration data is empty and no tokens were found
# during substitution so we can warn the user to use the `copy:` kwarg.
confdata_useless = not confdata.keys()
for line in data:
if line.startswith(search_token):
confdata_useless = False
line = do_define(regex, line, confdata, variable_format)
else:
if not line_is_valid(line,variable_format):
raise MesonException('Format "{}" mismatched'.format(variable_format))
line, missing = do_replacement(regex, line, variable_format, confdata)
missing_variables.update(missing)
if missing:
confdata_useless = False
result.append(line)
return result, missing_variables, confdata_useless
def do_conf_file(src: str, dst: str, confdata: 'ConfigurationData', variable_format: str,
encoding: str = 'utf-8') -> T.Tuple[T.Set[str], bool]:
try:
with open(src, encoding=encoding, newline='') as f:
data = f.readlines()
except Exception as e:
raise MesonException('Could not read input file %s: %s' % (src, str(e)))
(result, missing_variables, confdata_useless) = do_conf_str(data, confdata, variable_format, encoding)
dst_tmp = dst + '~'
try:
with open(dst_tmp, 'w', encoding=encoding, newline='') as f:
f.writelines(result)
except Exception as e:
raise MesonException('Could not write output file %s: %s' % (dst, str(e)))
shutil.copymode(src, dst_tmp)
replace_if_different(dst, dst_tmp)
return missing_variables, confdata_useless
CONF_C_PRELUDE = '''/*
* Autogenerated by the Meson build system.
* Do not edit, your changes will be lost.
*/
#pragma once
'''
CONF_NASM_PRELUDE = '''; Autogenerated by the Meson build system.
; Do not edit, your changes will be lost.
'''
def dump_conf_header(ofilename: str, cdata: 'ConfigurationData', output_format: str) -> None:
if output_format == 'c':
prelude = CONF_C_PRELUDE
prefix = '#'
elif output_format == 'nasm':
prelude = CONF_NASM_PRELUDE
prefix = '%'
ofilename_tmp = ofilename + '~'
with open(ofilename_tmp, 'w', encoding='utf-8') as ofile:
ofile.write(prelude)
for k in sorted(cdata.keys()):
(v, desc) = cdata.get(k)
if desc:
if output_format == 'c':
ofile.write('/* %s */\n' % desc)
elif output_format == 'nasm':
for line in desc.split('\n'):
ofile.write('; %s\n' % line)
if isinstance(v, bool):
if v:
ofile.write('%sdefine %s\n\n' % (prefix, k))
else:
ofile.write('%sundef %s\n\n' % (prefix, k))
elif isinstance(v, (int, str)):
ofile.write('%sdefine %s %s\n\n' % (prefix, k, v))
else:
raise MesonException('Unknown data type in configuration file entry: ' + k)
replace_if_different(ofilename, ofilename_tmp)
def replace_if_different(dst: str, dst_tmp: str) -> None:
# If contents are identical, don't touch the file to prevent
# unnecessary rebuilds.
different = True
try:
with open(dst, 'rb') as f1, open(dst_tmp, 'rb') as f2:
if f1.read() == f2.read():
different = False
except FileNotFoundError:
pass
if different:
os.replace(dst_tmp, dst)
else:
os.unlink(dst_tmp)
@T.overload
def unholder(item: 'ObjectHolder[_T]') -> _T: ...
@T.overload
def unholder(item: T.List['ObjectHolder[_T]']) -> T.List[_T]: ...
@T.overload
def unholder(item: T.List[_T]) -> T.List[_T]: ...
@T.overload
def unholder(item: T.List[T.Union[_T, 'ObjectHolder[_T]']]) -> T.List[_T]: ...
def unholder(item): # type: ignore # TODO fix overload (somehow)
"""Get the held item of an object holder or list of object holders."""
if isinstance(item, list):
return [i.held_object if hasattr(i, 'held_object') else i for i in item]
if hasattr(item, 'held_object'):
return item.held_object
return item
def listify(item: T.Any, flatten: bool = True) -> T.List[T.Any]:
'''
Returns a list with all args embedded in a list if they are not a list.
This function preserves order.
@flatten: Convert lists of lists to a flat list
'''
if not isinstance(item, list):
return [item]
result = [] # type: T.List[T.Any]
for i in item:
if flatten and isinstance(i, list):
result += listify(i, flatten=True)
else:
result.append(i)
return result
def extract_as_list(dict_object: T.Dict[_T, _U], key: _T, pop: bool = False) -> T.List[_U]:
'''
Extracts all values from given dict_object and listifies them.
'''
fetch = dict_object.get
if pop:
fetch = dict_object.pop
# If there's only one key, we don't return a list with one element
return listify(fetch(key, []), flatten=True)
def typeslistify(item: 'T.Union[_T, T.Sequence[_T]]',
types: 'T.Union[T.Type[_T], T.Tuple[T.Type[_T]]]') -> T.List[_T]:
'''
Ensure that type(@item) is one of @types or a
list of items all of which are of type @types
'''
if isinstance(item, types):
item = T.cast(T.List[_T], [item])
if not isinstance(item, list):
raise MesonException('Item must be a list or one of {!r}'.format(types))
for i in item:
if i is not None and not isinstance(i, types):
raise MesonException('List item must be one of {!r}'.format(types))
return item
def stringlistify(item: T.Union[T.Any, T.Sequence[T.Any]]) -> T.List[str]:
return typeslistify(item, str)
def expand_arguments(args: T.Iterable[str]) -> T.Optional[T.List[str]]:
expended_args = [] # type: T.List[str]
for arg in args:
if not arg.startswith('@'):
expended_args.append(arg)
continue
args_file = arg[1:]
try:
with open(args_file) as f:
extended_args = f.read().split()
expended_args += extended_args
except Exception as e:
mlog.error('Expanding command line arguments:', args_file, 'not found')
mlog.exception(e)
return None
return expended_args
def partition(pred: T.Callable[[_T], object], iterable: T.Iterator[_T]) -> T.Tuple[T.Iterator[_T], T.Iterator[_T]]:
"""Use a predicate to partition entries into false entries and true
entries.
>>> x, y = partition(is_odd, range(10))
>>> (list(x), list(y))
([0, 2, 4, 6, 8], [1, 3, 5, 7, 9])
"""
t1, t2 = tee(iterable)
return filterfalse(pred, t1), filter(pred, t2)
def Popen_safe(args: T.List[str], write: T.Optional[str] = None,
stdout: T.Union[T.BinaryIO, int] = subprocess.PIPE,
stderr: T.Union[T.BinaryIO, int] = subprocess.PIPE,
**kwargs: T.Any) -> T.Tuple[subprocess.Popen, str, str]:
import locale
encoding = locale.getpreferredencoding()
# Redirect stdin to DEVNULL otherwise the command run by us here might mess
# up the console and ANSI colors will stop working on Windows.
if 'stdin' not in kwargs:
kwargs['stdin'] = subprocess.DEVNULL
if not sys.stdout.encoding or encoding.upper() != 'UTF-8':
p, o, e = Popen_safe_legacy(args, write=write, stdout=stdout, stderr=stderr, **kwargs)
else:
p = subprocess.Popen(args, universal_newlines=True, close_fds=False,
stdout=stdout, stderr=stderr, **kwargs)
o, e = p.communicate(write)
# Sometimes the command that we run will call another command which will be
# without the above stdin workaround, so set the console mode again just in
# case.
mlog.setup_console()
return p, o, e
def Popen_safe_legacy(args: T.List[str], write: T.Optional[str] = None,
stdout: T.Union[T.BinaryIO, int] = subprocess.PIPE,
stderr: T.Union[T.BinaryIO, int] = subprocess.PIPE,
**kwargs: T.Any) -> T.Tuple[subprocess.Popen, str, str]:
p = subprocess.Popen(args, universal_newlines=False, close_fds=False,
stdout=stdout, stderr=stderr, **kwargs)
input_ = None # type: T.Optional[bytes]
if write is not None:
input_ = write.encode('utf-8')
o, e = p.communicate(input_)
if o is not None:
if sys.stdout.encoding:
o = o.decode(encoding=sys.stdout.encoding, errors='replace').replace('\r\n', '\n')
else:
o = o.decode(errors='replace').replace('\r\n', '\n')
if e is not None:
if sys.stderr.encoding:
e = e.decode(encoding=sys.stderr.encoding, errors='replace').replace('\r\n', '\n')
else:
e = e.decode(errors='replace').replace('\r\n', '\n')
return p, o, e
def iter_regexin_iter(regexiter: T.Iterable[str], initer: T.Iterable[str]) -> T.Optional[str]:
'''
Takes each regular expression in @regexiter and tries to search for it in
every item in @initer. If there is a match, returns that match.
Else returns False.
'''
for regex in regexiter:
for ii in initer:
if not isinstance(ii, str):
continue
match = re.search(regex, ii)
if match:
return match.group()
return None
def _substitute_values_check_errors(command: T.List[str], values: T.Dict[str, str]) -> None:
# Error checking
inregex = ['@INPUT([0-9]+)?@', '@PLAINNAME@', '@BASENAME@'] # type: T.List[str]
outregex = ['@OUTPUT([0-9]+)?@', '@OUTDIR@'] # type: T.List[str]
if '@INPUT@' not in values:
# Error out if any input-derived templates are present in the command
match = iter_regexin_iter(inregex, command)
if match:
m = 'Command cannot have {!r}, since no input files were specified'
raise MesonException(m.format(match))
else:
if len(values['@INPUT@']) > 1:
# Error out if @PLAINNAME@ or @BASENAME@ is present in the command
match = iter_regexin_iter(inregex[1:], command)
if match:
raise MesonException('Command cannot have {!r} when there is '
'more than one input file'.format(match))
# Error out if an invalid @INPUTnn@ template was specified
for each in command:
if not isinstance(each, str):
continue
match2 = re.search(inregex[0], each)
if match2 and match2.group() not in values:
m = 'Command cannot have {!r} since there are only {!r} inputs'
raise MesonException(m.format(match2.group(), len(values['@INPUT@'])))
if '@OUTPUT@' not in values:
# Error out if any output-derived templates are present in the command
match = iter_regexin_iter(outregex, command)
if match:
m = 'Command cannot have {!r} since there are no outputs'
raise MesonException(m.format(match))
else:
# Error out if an invalid @OUTPUTnn@ template was specified
for each in command:
if not isinstance(each, str):
continue
match2 = re.search(outregex[0], each)
if match2 and match2.group() not in values:
m = 'Command cannot have {!r} since there are only {!r} outputs'
raise MesonException(m.format(match2.group(), len(values['@OUTPUT@'])))
def substitute_values(command: T.List[str], values: T.Dict[str, str]) -> T.List[str]:
'''
Substitute the template strings in the @values dict into the list of
strings @command and return a new list. For a full list of the templates,
see get_filenames_templates_dict()
If multiple inputs/outputs are given in the @values dictionary, we
substitute @INPUT@ and @OUTPUT@ only if they are the entire string, not
just a part of it, and in that case we substitute *all* of them.
'''
# Error checking
_substitute_values_check_errors(command, values)
# Substitution
outcmd = [] # type: T.List[str]
rx_keys = [re.escape(key) for key in values if key not in ('@INPUT@', '@OUTPUT@')]
value_rx = re.compile('|'.join(rx_keys)) if rx_keys else None
for vv in command:
if not isinstance(vv, str):
outcmd.append(vv)
elif '@INPUT@' in vv:
inputs = values['@INPUT@']
if vv == '@INPUT@':
outcmd += inputs
elif len(inputs) == 1:
outcmd.append(vv.replace('@INPUT@', inputs[0]))
else:
raise MesonException("Command has '@INPUT@' as part of a "
"string and more than one input file")
elif '@OUTPUT@' in vv:
outputs = values['@OUTPUT@']
if vv == '@OUTPUT@':
outcmd += outputs
elif len(outputs) == 1:
outcmd.append(vv.replace('@OUTPUT@', outputs[0]))
else:
raise MesonException("Command has '@OUTPUT@' as part of a "
"string and more than one output file")
# Append values that are exactly a template string.
# This is faster than a string replace.
elif vv in values:
outcmd.append(values[vv])
# Substitute everything else with replacement
elif value_rx:
outcmd.append(value_rx.sub(lambda m: values[m.group(0)], vv))
else:
outcmd.append(vv)
return outcmd
def get_filenames_templates_dict(inputs: T.List[str], outputs: T.List[str]) -> T.Dict[str, T.Union[str, T.List[str]]]:
'''
Create a dictionary with template strings as keys and values as values for
the following templates:
@INPUT@ - the full path to one or more input files, from @inputs
@OUTPUT@ - the full path to one or more output files, from @outputs
@OUTDIR@ - the full path to the directory containing the output files
If there is only one input file, the following keys are also created:
@PLAINNAME@ - the filename of the input file
@BASENAME@ - the filename of the input file with the extension removed
If there is more than one input file, the following keys are also created:
@INPUT0@, @INPUT1@, ... one for each input file
If there is more than one output file, the following keys are also created:
@OUTPUT0@, @OUTPUT1@, ... one for each output file
'''
values = {} # type: T.Dict[str, T.Union[str, T.List[str]]]
# Gather values derived from the input
if inputs:
# We want to substitute all the inputs.
values['@INPUT@'] = inputs
for (ii, vv) in enumerate(inputs):
# Write out @INPUT0@, @INPUT1@, ...
values['@INPUT{}@'.format(ii)] = vv
if len(inputs) == 1:
# Just one value, substitute @PLAINNAME@ and @BASENAME@
values['@PLAINNAME@'] = plain = os.path.basename(inputs[0])
values['@BASENAME@'] = os.path.splitext(plain)[0]
if outputs:
# Gather values derived from the outputs, similar to above.
values['@OUTPUT@'] = outputs
for (ii, vv) in enumerate(outputs):
values['@OUTPUT{}@'.format(ii)] = vv
# Outdir should be the same for all outputs
values['@OUTDIR@'] = os.path.dirname(outputs[0])
# Many external programs fail on empty arguments.
if values['@OUTDIR@'] == '':
values['@OUTDIR@'] = '.'
return values
def _make_tree_writable(topdir: str) -> None:
# Ensure all files and directories under topdir are writable
# (and readable) by owner.
for d, _, files in os.walk(topdir):
os.chmod(d, os.stat(d).st_mode | stat.S_IWRITE | stat.S_IREAD)
for fname in files:
fpath = os.path.join(d, fname)
if os.path.isfile(fpath):
os.chmod(fpath, os.stat(fpath).st_mode | stat.S_IWRITE | stat.S_IREAD)
def windows_proof_rmtree(f: str) -> None:
# On Windows if anyone is holding a file open you can't
# delete it. As an example an anti virus scanner might
# be scanning files you are trying to delete. The only
# way to fix this is to try again and again.
delays = [0.1, 0.1, 0.2, 0.2, 0.2, 0.5, 0.5, 1, 1, 1, 1, 2]
# Start by making the tree wriable.
_make_tree_writable(f)
for d in delays:
try:
shutil.rmtree(f)
return
except FileNotFoundError:
return
except OSError:
time.sleep(d)
# Try one last time and throw if it fails.
shutil.rmtree(f)
def windows_proof_rm(fpath: str) -> None:
"""Like windows_proof_rmtree, but for a single file."""
if os.path.isfile(fpath):
os.chmod(fpath, os.stat(fpath).st_mode | stat.S_IWRITE | stat.S_IREAD)
delays = [0.1, 0.1, 0.2, 0.2, 0.2, 0.5, 0.5, 1, 1, 1, 1, 2]
for d in delays:
try:
os.unlink(fpath)
return
except FileNotFoundError:
return
except OSError:
time.sleep(d)
os.unlink(fpath)
class TemporaryDirectoryWinProof(TemporaryDirectory):
"""
Like TemporaryDirectory, but cleans things up using
windows_proof_rmtree()
"""
def __exit__(self, exc: T.Any, value: T.Any, tb: T.Any) -> None:
try:
super().__exit__(exc, value, tb)
except OSError:
windows_proof_rmtree(self.name)
def cleanup(self) -> None:
try:
super().cleanup()
except OSError:
windows_proof_rmtree(self.name)
def detect_subprojects(spdir_name: str, current_dir: str = '',
result: T.Optional[T.Dict[str, T.List[str]]] = None) -> T.Optional[T.Dict[str, T.List[str]]]:
if result is None:
result = {}
spdir = os.path.join(current_dir, spdir_name)
if not os.path.exists(spdir):
return result
for trial in glob(os.path.join(spdir, '*')):
basename = os.path.basename(trial)
if trial == 'packagecache':
continue
append_this = True
if os.path.isdir(trial):
detect_subprojects(spdir_name, trial, result)
elif trial.endswith('.wrap') and os.path.isfile(trial):
basename = os.path.splitext(basename)[0]
else:
append_this = False
if append_this:
if basename in result:
result[basename].append(trial)
else:
result[basename] = [trial]
return result
def substring_is_in_list(substr: str, strlist: T.List[str]) -> bool:
for s in strlist:
if substr in s:
return True
return False
class OrderedSet(T.MutableSet[_T]):
"""A set that preserves the order in which items are added, by first
insertion.
"""
def __init__(self, iterable: T.Optional[T.Iterable[_T]] = None):
# typing.OrderedDict is new in 3.7.2, so we can't use that, but we can
# use MutableMapping, which is fine in this case.
self.__container = collections.OrderedDict() # type: T.MutableMapping[_T, None]
if iterable:
self.update(iterable)
def __contains__(self, value: object) -> bool:
return value in self.__container
def __iter__(self) -> T.Iterator[_T]:
return iter(self.__container.keys())
def __len__(self) -> int:
return len(self.__container)
def __repr__(self) -> str:
# Don't print 'OrderedSet("")' for an empty set.
if self.__container:
return 'OrderedSet("{}")'.format(
'", "'.join(repr(e) for e in self.__container.keys()))
return 'OrderedSet()'
def __reversed__(self) -> T.Iterator[_T]:
# Mypy is complaining that sets cant be reversed, which is true for
# unordered sets, but this is an ordered, set so reverse() makes sense.
return reversed(self.__container.keys()) # type: ignore
def add(self, value: _T) -> None:
self.__container[value] = None
def discard(self, value: _T) -> None:
if value in self.__container:
del self.__container[value]
def move_to_end(self, value: _T, last: bool = True) -> None:
# Mypy does not know about move_to_end, because it is not part of MutableMapping
self.__container.move_to_end(value, last) # type: ignore
def pop(self, last: bool = True) -> _T:
# Mypy does not know about the last argument, because it is not part of MutableMapping
item, _ = self.__container.popitem(last) # type: ignore
return item
def update(self, iterable: T.Iterable[_T]) -> None:
for item in iterable:
self.__container[item] = None
def difference(self, set_: T.Union[T.Set[_T], 'OrderedSet[_T]']) -> 'OrderedSet[_T]':
return type(self)(e for e in self if e not in set_)
class BuildDirLock:
def __init__(self, builddir: str) -> None:
self.lockfilename = os.path.join(builddir, 'meson-private/meson.lock')
def __enter__(self) -> None:
self.lockfile = open(self.lockfilename, 'w')
try:
if have_fcntl:
fcntl.flock(self.lockfile, fcntl.LOCK_EX | fcntl.LOCK_NB)
elif have_msvcrt:
msvcrt.locking(self.lockfile.fileno(), msvcrt.LK_NBLCK, 1)
except (BlockingIOError, PermissionError):
self.lockfile.close()
raise MesonException('Some other Meson process is already using this build directory. Exiting.')
def __exit__(self, *args: T.Any) -> None:
if have_fcntl:
fcntl.flock(self.lockfile, fcntl.LOCK_UN)
elif have_msvcrt:
msvcrt.locking(self.lockfile.fileno(), msvcrt.LK_UNLCK, 1)
self.lockfile.close()
def relpath(path: str, start: str) -> str:
# On Windows a relative path can't be evaluated for paths on two different
# drives (i.e. c:\foo and f:\bar). The only thing left to do is to use the
# original absolute path.
try:
return os.path.relpath(path, start)
except (TypeError, ValueError):
return path
def path_is_in_root(path: Path, root: Path, resolve: bool = False) -> bool:
# Check wheter a path is within the root directory root
try:
if resolve:
path.resolve().relative_to(root.resolve())
else:
path.relative_to(root)
except ValueError:
return False
return True
def relative_to_if_possible(path: Path, root: Path, resolve: bool = False) -> Path:
try:
if resolve:
return path.resolve().relative_to(root.resolve())
else:
return path.relative_to(root)
except ValueError:
return path
class LibType(IntEnum):
"""Enumeration for library types."""
SHARED = 0
STATIC = 1
PREFER_SHARED = 2
PREFER_STATIC = 3
class ProgressBarFallback: # lgtm [py/iter-returns-non-self]
'''
Fallback progress bar implementation when tqdm is not found
Since this class is not an actual iterator, but only provides a minimal
fallback, it is safe to ignore the 'Iterator does not return self from
__iter__ method' warning.
'''
def __init__(self, iterable: T.Optional[T.Iterable[str]] = None, total: T.Optional[int] = None,
bar_type: T.Optional[str] = None, desc: T.Optional[str] = None):
if iterable is not None:
self.iterable = iter(iterable)
return
self.total = total
self.done = 0
self.printed_dots = 0
if self.total and bar_type == 'download':
print('Download size:', self.total)
if desc:
print('{}: '.format(desc), end='')
# Pretend to be an iterator when called as one and don't print any
# progress
def __iter__(self) -> T.Iterator[str]:
return self.iterable
def __next__(self) -> str:
return next(self.iterable)
def print_dot(self) -> None:
print('.', end='')
sys.stdout.flush()
self.printed_dots += 1
def update(self, progress: int) -> None:
self.done += progress
if not self.total:
# Just print one dot per call if we don't have a total length
self.print_dot()
return
ratio = int(self.done / self.total * 10)
while self.printed_dots < ratio:
self.print_dot()
def close(self) -> None:
print('')
try:
from tqdm import tqdm
except ImportError:
# ideally we would use a typing.Protocol here, but it's part of typing_extensions until 3.8
ProgressBar = ProgressBarFallback # type: T.Union[T.Type[ProgressBarFallback], T.Type[ProgressBarTqdm]]
else:
class ProgressBarTqdm(tqdm):
def __init__(self, *args: T.Any, bar_type: T.Optional[str] = None, **kwargs: T.Any) -> None:
if bar_type == 'download':
kwargs.update({'unit': 'bytes', 'leave': True})
else:
kwargs.update({'leave': False})
kwargs['ncols'] = 100
super().__init__(*args, **kwargs)
ProgressBar = ProgressBarTqdm
def get_wine_shortpath(winecmd: T.List[str], wine_paths: T.Sequence[str]) -> str:
"""Get A short version of @wine_paths to avoid reaching WINEPATH number
of char limit.
"""
wine_paths = list(OrderedSet(wine_paths))
getShortPathScript = '%s.bat' % str(uuid.uuid4()).lower()[:5]
with open(getShortPathScript, mode='w') as f:
f.write("@ECHO OFF\nfor %%x in (%*) do (\n echo|set /p=;%~sx\n)\n")
f.flush()
try:
with open(os.devnull, 'w') as stderr:
wine_path = subprocess.check_output(
winecmd +
['cmd', '/C', getShortPathScript] + wine_paths,
stderr=stderr).decode('utf-8')
except subprocess.CalledProcessError as e:
print("Could not get short paths: %s" % e)
wine_path = ';'.join(wine_paths)
finally:
os.remove(getShortPathScript)
if len(wine_path) > 2048:
raise MesonException(
'WINEPATH size {} > 2048'
' this will cause random failure.'.format(
len(wine_path)))
return wine_path.strip(';')
def run_once(func: T.Callable[..., _T]) -> T.Callable[..., _T]:
ret = [] # type: T.List[_T]
@wraps(func)
def wrapper(*args: T.Any, **kwargs: T.Any) -> _T:
if ret:
return ret[0]
val = func(*args, **kwargs)
ret.append(val)
return val
return wrapper
class OptionProxy(T.Generic[_T]):
def __init__(self, value: _T):
self.value = value
class OptionOverrideProxy(collections.abc.MutableMapping):
'''Mimic an option list but transparently override selected option
values.
'''
# TODO: the typing here could be made more explicit using a TypeDict from
# python 3.8 or typing_extensions
def __init__(self, overrides: T.Dict[str, T.Any], *options: 'OptionDictType'):
self.overrides = overrides.copy()
self.options = {} # type: T.Dict[str, UserOption]
for o in options:
self.options.update(o)
def __getitem__(self, key: str) -> T.Union['UserOption', OptionProxy]:
if key in self.options:
opt = self.options[key]
if key in self.overrides:
return OptionProxy(opt.validate_value(self.overrides[key]))
return opt
raise KeyError('Option not found', key)
def __setitem__(self, key: str, value: T.Union['UserOption', OptionProxy]) -> None:
self.overrides[key] = value.value
def __delitem__(self, key: str) -> None:
del self.overrides[key]
def __iter__(self) -> T.Iterator[str]:
return iter(self.options)
def __len__(self) -> int:
return len(self.options)
def copy(self) -> 'OptionOverrideProxy':
return OptionOverrideProxy(self.overrides.copy(), self.options.copy())
| 35.686308 | 152 | 0.603117 |
from pathlib import Path
import sys
import stat
import time
import platform, subprocess, operator, os, shlex, shutil, re
import collections
from enum import IntEnum
from functools import lru_cache, wraps
from itertools import tee, filterfalse
from tempfile import TemporaryDirectory
import typing as T
import uuid
import textwrap
from mesonbuild import mlog
if T.TYPE_CHECKING:
from .build import ConfigurationData
from .coredata import OptionDictType, UserOption
from .compilers.compilers import CompilerType
from .interpreterbase import ObjectHolder
FileOrString = T.Union['File', str]
_T = T.TypeVar('_T')
_U = T.TypeVar('_U')
have_fcntl = False
have_msvcrt = False
project_meson_versions = collections.defaultdict(str)
try:
import fcntl
have_fcntl = True
except Exception:
pass
try:
import msvcrt
have_msvcrt = True
except Exception:
pass
from glob import glob
if os.path.basename(sys.executable) == 'meson.exe':
python_command = [sys.executable, 'runpython']
else:
python_command = [sys.executable]
meson_command = None
class MesonException(Exception):
def __init__(self, *args: object, file: T.Optional[str] = None,
lineno: T.Optional[int] = None, colno: T.Optional[int] = None):
super().__init__(*args)
self.file = file
self.lineno = lineno
self.colno = colno
class EnvironmentException(MesonException):
class GitException(MesonException):
def __init__(self, msg: str, output: T.Optional[str] = None):
super().__init__(msg)
self.output = output.strip() if output else ''
GIT = shutil.which('git')
def git(cmd: T.List[str], workingdir: str, check: bool = False, **kwargs: T.Any) -> T.Tuple[subprocess.Popen, str, str]:
cmd = [GIT] + cmd
p, o, e = Popen_safe(cmd, cwd=workingdir, **kwargs)
if check and p.returncode != 0:
raise GitException('Git command failed: ' + str(cmd), e)
return p, o, e
def quiet_git(cmd: T.List[str], workingdir: str, check: bool = False) -> T.Tuple[bool, str]:
if not GIT:
m = 'Git program not found.'
if check:
raise GitException(m)
return False, m
p, o, e = git(cmd, workingdir, check)
if p.returncode != 0:
return False, e
return True, o
def verbose_git(cmd: T.List[str], workingdir: str, check: bool = False) -> bool:
if not GIT:
m = 'Git program not found.'
if check:
raise GitException(m)
return False
p, _, _ = git(cmd, workingdir, check, stdout=None, stderr=None)
return p.returncode == 0
def set_meson_command(mainfile: str) -> None:
global python_command
global meson_command
if not mainfile.endswith('.py'):
meson_command = [mainfile]
elif os.path.isabs(mainfile) and mainfile.endswith('mesonmain.py'):
meson_command = python_command + ['-m', 'mesonbuild.mesonmain']
else:
# Either run uninstalled, or full path to meson-script.py
meson_command = python_command + [mainfile]
# We print this value for unit tests.
if 'MESON_COMMAND_TESTS' in os.environ:
mlog.log('meson_command is {!r}'.format(meson_command))
def is_ascii_string(astring: T.Union[str, bytes]) -> bool:
try:
if isinstance(astring, str):
astring.encode('ascii')
elif isinstance(astring, bytes):
astring.decode('ascii')
except UnicodeDecodeError:
return False
return True
def check_direntry_issues(direntry_array: T.Union[T.List[T.Union[str, bytes]], str, bytes]) -> None:
import locale
# Warn if the locale is not UTF-8. This can cause various unfixable issues
# such as os.stat not being able to decode filenames with unicode in them.
# There is no way to reset both the preferred encoding and the filesystem
# encoding, so we can just warn about it.
e = locale.getpreferredencoding()
if e.upper() != 'UTF-8' and not is_windows():
if not isinstance(direntry_array, list):
direntry_array = [direntry_array]
for de in direntry_array:
if is_ascii_string(de):
continue
mlog.warning(textwrap.dedent('''
You are using {!r} which is not a Unicode-compatible
locale but you are trying to access a file system entry called {!r} which is
not pure ASCII. This may cause problems.
'''.format(e, de)), file=sys.stderr)
# Put this in objects that should not get dumped to pickle files
# by accident.
import threading
an_unpicklable_object = threading.Lock()
class FileMode:
# The first triad is for owner permissions, the second for group permissions,
# and the third for others (everyone else).
# For the 1st character:
# 'r' means can read
# '-' means not allowed
# For the 2nd character:
# 'w' means can write
# '-' means not allowed
# For the 3rd character:
# 'x' means can execute
# 's' means can execute and setuid/setgid is set (owner/group triads only)
# 'S' means cannot execute and setuid/setgid is set (owner/group triads only)
# 't' means can execute and sticky bit is set ("others" triads only)
# 'T' means cannot execute and sticky bit is set ("others" triads only)
# '-' means none of these are allowed
#
# The meanings of 'rwx' perms is not obvious for directories; see:
# https://www.hackinglinuxexposed.com/articles/20030424.html
#
# For information on this notation such as setuid/setgid/sticky bits, see:
# https://en.wikipedia.org/wiki/File_system_permissions#Symbolic_notation
symbolic_perms_regex = re.compile('[r-][w-][xsS-]' # Owner perms
'[r-][w-][xsS-]' # Group perms
'[r-][w-][xtT-]') # Others perms
def __init__(self, perms: T.Optional[str] = None, owner: T.Optional[str] = None,
group: T.Optional[str] = None):
self.perms_s = perms
self.perms = self.perms_s_to_bits(perms)
self.owner = owner
self.group = group
def __repr__(self) -> str:
ret = '<FileMode: {!r} owner={} group={}'
return ret.format(self.perms_s, self.owner, self.group)
@classmethod
def perms_s_to_bits(cls, perms_s: T.Optional[str]) -> int:
if perms_s is None:
# No perms specified, we will not touch the permissions
return -1
eg = 'rwxr-xr-x'
if not isinstance(perms_s, str):
msg = 'Install perms must be a string. For example, {!r}'
raise MesonException(msg.format(eg))
if len(perms_s) != 9 or not cls.symbolic_perms_regex.match(perms_s):
msg = 'File perms {!r} must be exactly 9 chars. For example, {!r}'
raise MesonException(msg.format(perms_s, eg))
perms = 0
# Owner perms
if perms_s[0] == 'r':
perms |= stat.S_IRUSR
if perms_s[1] == 'w':
perms |= stat.S_IWUSR
if perms_s[2] == 'x':
perms |= stat.S_IXUSR
elif perms_s[2] == 'S':
perms |= stat.S_ISUID
elif perms_s[2] == 's':
perms |= stat.S_IXUSR
perms |= stat.S_ISUID
# Group perms
if perms_s[3] == 'r':
perms |= stat.S_IRGRP
if perms_s[4] == 'w':
perms |= stat.S_IWGRP
if perms_s[5] == 'x':
perms |= stat.S_IXGRP
elif perms_s[5] == 'S':
perms |= stat.S_ISGID
elif perms_s[5] == 's':
perms |= stat.S_IXGRP
perms |= stat.S_ISGID
# Others perms
if perms_s[6] == 'r':
perms |= stat.S_IROTH
if perms_s[7] == 'w':
perms |= stat.S_IWOTH
if perms_s[8] == 'x':
perms |= stat.S_IXOTH
elif perms_s[8] == 'T':
perms |= stat.S_ISVTX
elif perms_s[8] == 't':
perms |= stat.S_IXOTH
perms |= stat.S_ISVTX
return perms
class File:
def __init__(self, is_built: bool, subdir: str, fname: str):
self.is_built = is_built
self.subdir = subdir
self.fname = fname
self.hash = hash((is_built, subdir, fname))
def __str__(self) -> str:
return self.relative_name()
def __repr__(self) -> str:
ret = '<File: {0}'
if not self.is_built:
ret += ' (not built)'
ret += '>'
return ret.format(self.relative_name())
@staticmethod
@lru_cache(maxsize=None)
def from_source_file(source_root: str, subdir: str, fname: str) -> 'File':
if not os.path.isfile(os.path.join(source_root, subdir, fname)):
raise MesonException('File %s does not exist.' % fname)
return File(False, subdir, fname)
@staticmethod
def from_built_file(subdir: str, fname: str) -> 'File':
return File(True, subdir, fname)
@staticmethod
def from_absolute_file(fname: str) -> 'File':
return File(False, '', fname)
@lru_cache(maxsize=None)
def rel_to_builddir(self, build_to_src: str) -> str:
if self.is_built:
return self.relative_name()
else:
return os.path.join(build_to_src, self.subdir, self.fname)
@lru_cache(maxsize=None)
def absolute_path(self, srcdir: str, builddir: str) -> str:
absdir = srcdir
if self.is_built:
absdir = builddir
return os.path.join(absdir, self.relative_name())
def endswith(self, ending: str) -> bool:
return self.fname.endswith(ending)
def split(self, s: str) -> T.List[str]:
return self.fname.split(s)
def __eq__(self, other: object) -> bool:
if not isinstance(other, File):
return NotImplemented
if self.hash != other.hash:
return False
return (self.fname, self.subdir, self.is_built) == (other.fname, other.subdir, other.is_built)
def __hash__(self) -> int:
return self.hash
@lru_cache(maxsize=None)
def relative_name(self) -> str:
return os.path.join(self.subdir, self.fname)
def get_compiler_for_source(compilers: T.Iterable['CompilerType'], src: str) -> 'CompilerType':
for comp in compilers:
if comp.can_compile(src):
return comp
raise MesonException('No specified compiler can handle file {!s}'.format(src))
def classify_unity_sources(compilers: T.Iterable['CompilerType'], sources: T.Iterable[str]) -> T.Dict['CompilerType', T.List[str]]:
compsrclist = {} # type: T.Dict[CompilerType, T.List[str]]
for src in sources:
comp = get_compiler_for_source(compilers, src)
if comp not in compsrclist:
compsrclist[comp] = [src]
else:
compsrclist[comp].append(src)
return compsrclist
class MachineChoice(IntEnum):
BUILD = 0
HOST = 1
def get_lower_case_name(self) -> str:
return PerMachine('build', 'host')[self]
def get_prefix(self) -> str:
return PerMachine('build.', '')[self]
class PerMachine(T.Generic[_T]):
def __init__(self, build: _T, host: _T) -> None:
self.build = build
self.host = host
def __getitem__(self, machine: MachineChoice) -> _T:
return {
MachineChoice.BUILD: self.build,
MachineChoice.HOST: self.host,
}[machine]
def __setitem__(self, machine: MachineChoice, val: _T) -> None:
setattr(self, machine.get_lower_case_name(), val)
def miss_defaulting(self) -> "PerMachineDefaultable[T.Optional[_T]]":
unfreeze = PerMachineDefaultable() # type: PerMachineDefaultable[T.Optional[_T]]
unfreeze.build = self.build
unfreeze.host = self.host
if unfreeze.host == unfreeze.build:
unfreeze.host = None
return unfreeze
def __repr__(self) -> str:
return 'PerMachine({!r}, {!r})'.format(self.build, self.host)
class PerThreeMachine(PerMachine[_T]):
def __init__(self, build: _T, host: _T, target: _T) -> None:
super().__init__(build, host)
self.target = target
def miss_defaulting(self) -> "PerThreeMachineDefaultable[T.Optional[_T]]":
unfreeze = PerThreeMachineDefaultable() # type: PerThreeMachineDefaultable[T.Optional[_T]]
unfreeze.build = self.build
unfreeze.host = self.host
unfreeze.target = self.target
if unfreeze.target == unfreeze.host:
unfreeze.target = None
if unfreeze.host == unfreeze.build:
unfreeze.host = None
return unfreeze
def matches_build_machine(self, machine: MachineChoice) -> bool:
return self.build == self[machine]
def __repr__(self) -> str:
return 'PerThreeMachine({!r}, {!r}, {!r})'.format(self.build, self.host, self.target)
class PerMachineDefaultable(PerMachine[T.Optional[_T]]):
def __init__(self) -> None:
super().__init__(None, None)
def default_missing(self) -> "PerMachine[T.Optional[_T]]":
freeze = PerMachine(self.build, self.host)
if freeze.host is None:
freeze.host = freeze.build
return freeze
def __repr__(self) -> str:
return 'PerMachineDefaultable({!r}, {!r})'.format(self.build, self.host)
class PerThreeMachineDefaultable(PerMachineDefaultable, PerThreeMachine[T.Optional[_T]]):
def __init__(self) -> None:
PerThreeMachine.__init__(self, None, None, None)
def default_missing(self) -> "PerThreeMachine[T.Optional[_T]]":
freeze = PerThreeMachine(self.build, self.host, self.target)
if freeze.host is None:
freeze.host = freeze.build
if freeze.target is None:
freeze.target = freeze.host
return freeze
def __repr__(self) -> str:
return 'PerThreeMachineDefaultable({!r}, {!r}, {!r})'.format(self.build, self.host, self.target)
def is_sunos() -> bool:
return platform.system().lower() == 'sunos'
def is_osx() -> bool:
return platform.system().lower() == 'darwin'
def is_linux() -> bool:
return platform.system().lower() == 'linux'
def is_android() -> bool:
return platform.system().lower() == 'android'
def is_haiku() -> bool:
return platform.system().lower() == 'haiku'
def is_openbsd() -> bool:
return platform.system().lower() == 'openbsd'
def is_windows() -> bool:
platname = platform.system().lower()
return platname == 'windows'
def is_cygwin() -> bool:
return sys.platform == 'cygwin'
def is_debianlike() -> bool:
return os.path.isfile('/etc/debian_version')
def is_dragonflybsd() -> bool:
return platform.system().lower() == 'dragonfly'
def is_netbsd() -> bool:
return platform.system().lower() == 'netbsd'
def is_freebsd() -> bool:
return platform.system().lower() == 'freebsd'
def is_irix() -> bool:
return platform.system().startswith('irix')
def is_hurd() -> bool:
return platform.system().lower() == 'gnu'
def is_qnx() -> bool:
return platform.system().lower() == 'qnx'
def is_aix() -> bool:
return platform.system().lower() == 'aix'
def exe_exists(arglist: T.List[str]) -> bool:
try:
if subprocess.run(arglist, timeout=10).returncode == 0:
return True
except (FileNotFoundError, subprocess.TimeoutExpired):
pass
return False
@lru_cache(maxsize=None)
def darwin_get_object_archs(objpath: str) -> T.List[str]:
_, stdo, stderr = Popen_safe(['lipo', '-info', objpath])
if not stdo:
mlog.debug('lipo {}: {}'.format(objpath, stderr))
return None
stdo = stdo.rsplit(': ', 1)[1]
# Convert from lipo-style archs to meson-style CPUs
stdo = stdo.replace('i386', 'x86')
stdo = stdo.replace('arm64', 'aarch64')
# Add generic name for armv7 and armv7s
if 'armv7' in stdo:
stdo += ' arm'
return stdo.split()
def detect_vcs(source_dir: T.Union[str, Path]) -> T.Optional[T.Dict[str, str]]:
vcs_systems = [
dict(name = 'git', cmd = 'git', repo_dir = '.git', get_rev = 'git describe --dirty=+', rev_regex = '(.*)', dep = '.git/logs/HEAD'),
dict(name = 'mercurial', cmd = 'hg', repo_dir = '.hg', get_rev = 'hg id -i', rev_regex = '(.*)', dep = '.hg/dirstate'),
dict(name = 'subversion', cmd = 'svn', repo_dir = '.svn', get_rev = 'svn info', rev_regex = 'Revision: (.*)', dep = '.svn/wc.db'),
dict(name = 'bazaar', cmd = 'bzr', repo_dir = '.bzr', get_rev = 'bzr revno', rev_regex = '(.*)', dep = '.bzr'),
]
if isinstance(source_dir, str):
source_dir = Path(source_dir)
parent_paths_and_self = collections.deque(source_dir.parents)
# Prepend the source directory to the front so we can check it;
# source_dir.parents doesn't include source_dir
parent_paths_and_self.appendleft(source_dir)
for curdir in parent_paths_and_self:
for vcs in vcs_systems:
if Path.is_dir(curdir.joinpath(vcs['repo_dir'])) and shutil.which(vcs['cmd']):
vcs['wc_dir'] = str(curdir)
return vcs
return None
def current_vs_supports_modules() -> bool:
vsver = os.environ.get('VSCMD_VER', '')
return vsver.startswith('16.9.0') and '-pre.' in vsver
class Version:
def __init__(self, s: str) -> None:
self._s = s
sequences1 = re.finditer(r'(\d+|[a-zA-Z]+|[^a-zA-Z\d]+)', s)
sequences2 = [m for m in sequences1 if not re.match(r'[^a-zA-Z\d]+', m.group(1))]
sequences3 = [int(m.group(1)) if m.group(1).isdigit() else m.group(1) for m in sequences2]
self._v = sequences3
def __str__(self) -> str:
return '%s (V=%s)' % (self._s, str(self._v))
def __repr__(self) -> str:
return '<Version: {}>'.format(self._s)
def __lt__(self, other: object) -> bool:
if isinstance(other, Version):
return self.__cmp(other, operator.lt)
return NotImplemented
def __gt__(self, other: object) -> bool:
if isinstance(other, Version):
return self.__cmp(other, operator.gt)
return NotImplemented
def __le__(self, other: object) -> bool:
if isinstance(other, Version):
return self.__cmp(other, operator.le)
return NotImplemented
def __ge__(self, other: object) -> bool:
if isinstance(other, Version):
return self.__cmp(other, operator.ge)
return NotImplemented
def __eq__(self, other: object) -> bool:
if isinstance(other, Version):
return self._v == other._v
return NotImplemented
def __ne__(self, other: object) -> bool:
if isinstance(other, Version):
return self._v != other._v
return NotImplemented
def __cmp(self, other: 'Version', comparator: T.Callable[[T.Any, T.Any], bool]) -> bool:
for ours, theirs in zip(self._v, other._v):
ours_is_int = isinstance(ours, int)
theirs_is_int = isinstance(theirs, int)
if ours_is_int != theirs_is_int:
return comparator(ours_is_int, theirs_is_int)
if ours != theirs:
return comparator(ours, theirs)
return comparator(len(self._v), len(other._v))
def _version_extract_cmpop(vstr2: str) -> T.Tuple[T.Callable[[T.Any, T.Any], bool], str]:
if vstr2.startswith('>='):
cmpop = operator.ge
vstr2 = vstr2[2:]
elif vstr2.startswith('<='):
cmpop = operator.le
vstr2 = vstr2[2:]
elif vstr2.startswith('!='):
cmpop = operator.ne
vstr2 = vstr2[2:]
elif vstr2.startswith('=='):
cmpop = operator.eq
vstr2 = vstr2[2:]
elif vstr2.startswith('='):
cmpop = operator.eq
vstr2 = vstr2[1:]
elif vstr2.startswith('>'):
cmpop = operator.gt
vstr2 = vstr2[1:]
elif vstr2.startswith('<'):
cmpop = operator.lt
vstr2 = vstr2[1:]
else:
cmpop = operator.eq
return (cmpop, vstr2)
def version_compare(vstr1: str, vstr2: str) -> bool:
(cmpop, vstr2) = _version_extract_cmpop(vstr2)
return cmpop(Version(vstr1), Version(vstr2))
def version_compare_many(vstr1: str, conditions: T.Union[str, T.Iterable[str]]) -> T.Tuple[bool, T.List[str], T.List[str]]:
if isinstance(conditions, str):
conditions = [conditions]
found = []
not_found = []
for req in conditions:
if not version_compare(vstr1, req):
not_found.append(req)
else:
found.append(req)
return not_found == [], not_found, found
def version_compare_condition_with_min(condition: str, minimum: str) -> bool:
if condition.startswith('>='):
cmpop = operator.le
condition = condition[2:]
elif condition.startswith('<='):
return False
elif condition.startswith('!='):
return False
elif condition.startswith('=='):
cmpop = operator.le
condition = condition[2:]
elif condition.startswith('='):
cmpop = operator.le
condition = condition[1:]
elif condition.startswith('>'):
cmpop = operator.lt
condition = condition[1:]
elif condition.startswith('<'):
return False
else:
cmpop = operator.le
condition = condition.strip()
if re.match(r'^\d+.\d+$', condition):
condition += '.0'
return T.cast(bool, cmpop(Version(minimum), Version(condition)))
def default_libdir() -> str:
if is_debianlike():
try:
pc = subprocess.Popen(['dpkg-architecture', '-qDEB_HOST_MULTIARCH'],
stdout=subprocess.PIPE,
stderr=subprocess.DEVNULL)
(stdo, _) = pc.communicate()
if pc.returncode == 0:
archpath = stdo.decode().strip()
return 'lib/' + archpath
except Exception:
pass
if is_freebsd() or is_irix():
return 'lib'
if os.path.isdir('/usr/lib64') and not os.path.islink('/usr/lib64'):
return 'lib64'
return 'lib'
def default_libexecdir() -> str:
return 'libexec'
def default_prefix() -> str:
return 'c:/' if is_windows() else '/usr/local'
def get_library_dirs() -> T.List[str]:
if is_windows():
return ['C:/mingw/lib']
if is_osx():
return ['/usr/lib']
unixdirs = ['/usr/local/lib', '/usr/lib', '/lib']
if is_freebsd():
return unixdirs
machine = platform.machine()
if machine in ('i386', 'i486', 'i586', 'i686'):
plat = 'i386'
elif machine.startswith('arm'):
plat = 'arm'
else:
plat = ''
if is_sunos():
if machine == 'i86pc':
plat = 'amd64'
elif machine.startswith('sun4'):
plat = 'sparcv9'
usr_platdir = Path('/usr/lib/') / plat
if usr_platdir.is_dir():
unixdirs += [str(x) for x in (usr_platdir).iterdir() if x.is_dir()]
if os.path.exists('/usr/lib64'):
unixdirs.append('/usr/lib64')
lib_platdir = Path('/lib/') / plat
if lib_platdir.is_dir():
unixdirs += [str(x) for x in (lib_platdir).iterdir() if x.is_dir()]
if os.path.exists('/lib64'):
unixdirs.append('/lib64')
return unixdirs
def has_path_sep(name: str, sep: str = '/\\') -> bool:
for each in sep:
if each in name:
return True
return False
if is_windows():
_whitespace = ' \t\n\r'
_find_unsafe_char = re.compile(r'[{}"]'.format(_whitespace)).search
def quote_arg(arg: str) -> str:
if arg and not _find_unsafe_char(arg):
return arg
result = '"'
num_backslashes = 0
for c in arg:
if c == '\\':
num_backslashes += 1
else:
if c == '"':
# Escape all backslashes and the following double quotation mark
num_backslashes = num_backslashes * 2 + 1
result += num_backslashes * '\\' + c
num_backslashes = 0
# Escape all backslashes, but let the terminating double quotation
# mark we add below be interpreted as a metacharacter
result += (num_backslashes * 2) * '\\' + '"'
return result
def split_args(cmd: str) -> T.List[str]:
result = []
arg = ''
num_backslashes = 0
num_quotes = 0
in_quotes = False
for c in cmd:
if c == '\\':
num_backslashes += 1
else:
if c == '"' and not (num_backslashes % 2):
# unescaped quote, eat it
arg += (num_backslashes // 2) * '\\'
num_quotes += 1
in_quotes = not in_quotes
elif c in _whitespace and not in_quotes:
if arg or num_quotes:
# reached the end of the argument
result.append(arg)
arg = ''
num_quotes = 0
else:
if c == '"':
num_backslashes = (num_backslashes - 1) // 2
arg += num_backslashes * '\\' + c
num_backslashes = 0
if arg or num_quotes:
result.append(arg)
return result
else:
def quote_arg(arg: str) -> str:
return shlex.quote(arg)
def split_args(cmd: str) -> T.List[str]:
return shlex.split(cmd)
def join_args(args: T.Iterable[str]) -> str:
return ' '.join([quote_arg(x) for x in args])
def do_replacement(regex: T.Pattern[str], line: str, variable_format: str,
confdata: 'ConfigurationData') -> T.Tuple[str, T.Set[str]]:
missing_variables = set()
if variable_format == 'cmake':
start_tag = '${'
backslash_tag = '\\${'
else:
assert variable_format in ['meson', 'cmake@']
start_tag = '@'
backslash_tag = '\\@'
def variable_replace(match: T.Match[str]) -> str:
if match.group(0).endswith('\\'):
num_escapes = match.end(0) - match.start(0)
return '\\' * (num_escapes // 2)
elif match.group(0) == backslash_tag:
return start_tag
else:
varname = match.group(1)
var_str = ''
if varname in confdata:
(var, desc) = confdata.get(varname)
if isinstance(var, str):
var_str = var
elif isinstance(var, int):
var_str = str(var)
else:
msg = 'Tried to replace variable {!r} value with ' \
'something other than a string or int: {!r}'
raise MesonException(msg.format(varname, var))
else:
missing_variables.add(varname)
return var_str
return re.sub(regex, variable_replace, line), missing_variables
def do_define(regex: T.Pattern[str], line: str, confdata: 'ConfigurationData', variable_format: str) -> str:
def get_cmake_define(line: str, confdata: 'ConfigurationData') -> str:
arr = line.split()
define_value=[]
for token in arr[2:]:
try:
(v, desc) = confdata.get(token)
define_value += [str(v)]
except KeyError:
define_value += [token]
return ' '.join(define_value)
arr = line.split()
if variable_format == 'meson' and len(arr) != 2:
raise MesonException('#mesondefine does not contain exactly two tokens: %s' % line.strip())
varname = arr[1]
try:
(v, desc) = confdata.get(varname)
except KeyError:
return '/* #undef %s */\n' % varname
if isinstance(v, bool):
if v:
return '#define %s\n' % varname
else:
return '#undef %s\n' % varname
elif isinstance(v, int):
return '#define %s %d\n' % (varname, v)
elif isinstance(v, str):
if variable_format == 'meson':
result = v
else:
result = get_cmake_define(line, confdata)
result = '#define %s %s\n' % (varname, result)
(result, missing_variable) = do_replacement(regex, result, variable_format, confdata)
return result
else:
raise MesonException('#mesondefine argument "%s" is of unknown type.' % varname)
def get_variable_regex(variable_format: str = 'meson') -> T.Pattern[str]:
if variable_format in ['meson', 'cmake@']:
regex = re.compile(r'(?:\\\\)+(?=\\?@)|\\@|@([-a-zA-Z0-9_]+)@')
elif variable_format == 'cmake':
regex = re.compile(r'(?:\\\\)+(?=\\?\$)|\\\${|\${([-a-zA-Z0-9_]+)}')
else:
raise MesonException('Format "{}" not handled'.format(variable_format))
return regex
def do_conf_str (data: list, confdata: 'ConfigurationData', variable_format: str,
encoding: str = 'utf-8') -> T.Tuple[T.List[str],T.Set[str], bool]:
def line_is_valid(line : str, variable_format: str) -> bool:
if variable_format == 'meson':
if '#cmakedefine' in line:
return False
else:
if '#mesondefine' in line:
return False
return True
regex = get_variable_regex(variable_format)
search_token = '#mesondefine'
if variable_format != 'meson':
search_token = '#cmakedefine'
result = []
missing_variables = set()
confdata_useless = not confdata.keys()
for line in data:
if line.startswith(search_token):
confdata_useless = False
line = do_define(regex, line, confdata, variable_format)
else:
if not line_is_valid(line,variable_format):
raise MesonException('Format "{}" mismatched'.format(variable_format))
line, missing = do_replacement(regex, line, variable_format, confdata)
missing_variables.update(missing)
if missing:
confdata_useless = False
result.append(line)
return result, missing_variables, confdata_useless
def do_conf_file(src: str, dst: str, confdata: 'ConfigurationData', variable_format: str,
encoding: str = 'utf-8') -> T.Tuple[T.Set[str], bool]:
try:
with open(src, encoding=encoding, newline='') as f:
data = f.readlines()
except Exception as e:
raise MesonException('Could not read input file %s: %s' % (src, str(e)))
(result, missing_variables, confdata_useless) = do_conf_str(data, confdata, variable_format, encoding)
dst_tmp = dst + '~'
try:
with open(dst_tmp, 'w', encoding=encoding, newline='') as f:
f.writelines(result)
except Exception as e:
raise MesonException('Could not write output file %s: %s' % (dst, str(e)))
shutil.copymode(src, dst_tmp)
replace_if_different(dst, dst_tmp)
return missing_variables, confdata_useless
CONF_C_PRELUDE = '''/*
* Autogenerated by the Meson build system.
* Do not edit, your changes will be lost.
*/
#pragma once
'''
CONF_NASM_PRELUDE = '''; Autogenerated by the Meson build system.
; Do not edit, your changes will be lost.
'''
def dump_conf_header(ofilename: str, cdata: 'ConfigurationData', output_format: str) -> None:
if output_format == 'c':
prelude = CONF_C_PRELUDE
prefix = '#'
elif output_format == 'nasm':
prelude = CONF_NASM_PRELUDE
prefix = '%'
ofilename_tmp = ofilename + '~'
with open(ofilename_tmp, 'w', encoding='utf-8') as ofile:
ofile.write(prelude)
for k in sorted(cdata.keys()):
(v, desc) = cdata.get(k)
if desc:
if output_format == 'c':
ofile.write('/* %s */\n' % desc)
elif output_format == 'nasm':
for line in desc.split('\n'):
ofile.write('; %s\n' % line)
if isinstance(v, bool):
if v:
ofile.write('%sdefine %s\n\n' % (prefix, k))
else:
ofile.write('%sundef %s\n\n' % (prefix, k))
elif isinstance(v, (int, str)):
ofile.write('%sdefine %s %s\n\n' % (prefix, k, v))
else:
raise MesonException('Unknown data type in configuration file entry: ' + k)
replace_if_different(ofilename, ofilename_tmp)
def replace_if_different(dst: str, dst_tmp: str) -> None:
# unnecessary rebuilds.
different = True
try:
with open(dst, 'rb') as f1, open(dst_tmp, 'rb') as f2:
if f1.read() == f2.read():
different = False
except FileNotFoundError:
pass
if different:
os.replace(dst_tmp, dst)
else:
os.unlink(dst_tmp)
@T.overload
def unholder(item: 'ObjectHolder[_T]') -> _T: ...
@T.overload
def unholder(item: T.List['ObjectHolder[_T]']) -> T.List[_T]: ...
@T.overload
def unholder(item: T.List[_T]) -> T.List[_T]: ...
@T.overload
def unholder(item: T.List[T.Union[_T, 'ObjectHolder[_T]']]) -> T.List[_T]: ...
def unholder(item): # type: ignore # TODO fix overload (somehow)
if isinstance(item, list):
return [i.held_object if hasattr(i, 'held_object') else i for i in item]
if hasattr(item, 'held_object'):
return item.held_object
return item
def listify(item: T.Any, flatten: bool = True) -> T.List[T.Any]:
if not isinstance(item, list):
return [item]
result = [] # type: T.List[T.Any]
for i in item:
if flatten and isinstance(i, list):
result += listify(i, flatten=True)
else:
result.append(i)
return result
def extract_as_list(dict_object: T.Dict[_T, _U], key: _T, pop: bool = False) -> T.List[_U]:
fetch = dict_object.get
if pop:
fetch = dict_object.pop
# If there's only one key, we don't return a list with one element
return listify(fetch(key, []), flatten=True)
def typeslistify(item: 'T.Union[_T, T.Sequence[_T]]',
types: 'T.Union[T.Type[_T], T.Tuple[T.Type[_T]]]') -> T.List[_T]:
if isinstance(item, types):
item = T.cast(T.List[_T], [item])
if not isinstance(item, list):
raise MesonException('Item must be a list or one of {!r}'.format(types))
for i in item:
if i is not None and not isinstance(i, types):
raise MesonException('List item must be one of {!r}'.format(types))
return item
def stringlistify(item: T.Union[T.Any, T.Sequence[T.Any]]) -> T.List[str]:
return typeslistify(item, str)
def expand_arguments(args: T.Iterable[str]) -> T.Optional[T.List[str]]:
expended_args = [] # type: T.List[str]
for arg in args:
if not arg.startswith('@'):
expended_args.append(arg)
continue
args_file = arg[1:]
try:
with open(args_file) as f:
extended_args = f.read().split()
expended_args += extended_args
except Exception as e:
mlog.error('Expanding command line arguments:', args_file, 'not found')
mlog.exception(e)
return None
return expended_args
def partition(pred: T.Callable[[_T], object], iterable: T.Iterator[_T]) -> T.Tuple[T.Iterator[_T], T.Iterator[_T]]:
t1, t2 = tee(iterable)
return filterfalse(pred, t1), filter(pred, t2)
def Popen_safe(args: T.List[str], write: T.Optional[str] = None,
stdout: T.Union[T.BinaryIO, int] = subprocess.PIPE,
stderr: T.Union[T.BinaryIO, int] = subprocess.PIPE,
**kwargs: T.Any) -> T.Tuple[subprocess.Popen, str, str]:
import locale
encoding = locale.getpreferredencoding()
# Redirect stdin to DEVNULL otherwise the command run by us here might mess
# up the console and ANSI colors will stop working on Windows.
if 'stdin' not in kwargs:
kwargs['stdin'] = subprocess.DEVNULL
if not sys.stdout.encoding or encoding.upper() != 'UTF-8':
p, o, e = Popen_safe_legacy(args, write=write, stdout=stdout, stderr=stderr, **kwargs)
else:
p = subprocess.Popen(args, universal_newlines=True, close_fds=False,
stdout=stdout, stderr=stderr, **kwargs)
o, e = p.communicate(write)
# Sometimes the command that we run will call another command which will be
# without the above stdin workaround, so set the console mode again just in
# case.
mlog.setup_console()
return p, o, e
def Popen_safe_legacy(args: T.List[str], write: T.Optional[str] = None,
stdout: T.Union[T.BinaryIO, int] = subprocess.PIPE,
stderr: T.Union[T.BinaryIO, int] = subprocess.PIPE,
**kwargs: T.Any) -> T.Tuple[subprocess.Popen, str, str]:
p = subprocess.Popen(args, universal_newlines=False, close_fds=False,
stdout=stdout, stderr=stderr, **kwargs)
input_ = None # type: T.Optional[bytes]
if write is not None:
input_ = write.encode('utf-8')
o, e = p.communicate(input_)
if o is not None:
if sys.stdout.encoding:
o = o.decode(encoding=sys.stdout.encoding, errors='replace').replace('\r\n', '\n')
else:
o = o.decode(errors='replace').replace('\r\n', '\n')
if e is not None:
if sys.stderr.encoding:
e = e.decode(encoding=sys.stderr.encoding, errors='replace').replace('\r\n', '\n')
else:
e = e.decode(errors='replace').replace('\r\n', '\n')
return p, o, e
def iter_regexin_iter(regexiter: T.Iterable[str], initer: T.Iterable[str]) -> T.Optional[str]:
for regex in regexiter:
for ii in initer:
if not isinstance(ii, str):
continue
match = re.search(regex, ii)
if match:
return match.group()
return None
def _substitute_values_check_errors(command: T.List[str], values: T.Dict[str, str]) -> None:
# Error checking
inregex = ['@INPUT([0-9]+)?@', '@PLAINNAME@', '@BASENAME@'] # type: T.List[str]
outregex = ['@OUTPUT([0-9]+)?@', '@OUTDIR@'] # type: T.List[str]
if '@INPUT@' not in values:
# Error out if any input-derived templates are present in the command
match = iter_regexin_iter(inregex, command)
if match:
m = 'Command cannot have {!r}, since no input files were specified'
raise MesonException(m.format(match))
else:
if len(values['@INPUT@']) > 1:
# Error out if @PLAINNAME@ or @BASENAME@ is present in the command
match = iter_regexin_iter(inregex[1:], command)
if match:
raise MesonException('Command cannot have {!r} when there is '
'more than one input file'.format(match))
# Error out if an invalid @INPUTnn@ template was specified
for each in command:
if not isinstance(each, str):
continue
match2 = re.search(inregex[0], each)
if match2 and match2.group() not in values:
m = 'Command cannot have {!r} since there are only {!r} inputs'
raise MesonException(m.format(match2.group(), len(values['@INPUT@'])))
if '@OUTPUT@' not in values:
# Error out if any output-derived templates are present in the command
match = iter_regexin_iter(outregex, command)
if match:
m = 'Command cannot have {!r} since there are no outputs'
raise MesonException(m.format(match))
else:
# Error out if an invalid @OUTPUTnn@ template was specified
for each in command:
if not isinstance(each, str):
continue
match2 = re.search(outregex[0], each)
if match2 and match2.group() not in values:
m = 'Command cannot have {!r} since there are only {!r} outputs'
raise MesonException(m.format(match2.group(), len(values['@OUTPUT@'])))
def substitute_values(command: T.List[str], values: T.Dict[str, str]) -> T.List[str]:
# Error checking
_substitute_values_check_errors(command, values)
# Substitution
outcmd = [] # type: T.List[str]
rx_keys = [re.escape(key) for key in values if key not in ('@INPUT@', '@OUTPUT@')]
value_rx = re.compile('|'.join(rx_keys)) if rx_keys else None
for vv in command:
if not isinstance(vv, str):
outcmd.append(vv)
elif '@INPUT@' in vv:
inputs = values['@INPUT@']
if vv == '@INPUT@':
outcmd += inputs
elif len(inputs) == 1:
outcmd.append(vv.replace('@INPUT@', inputs[0]))
else:
raise MesonException("Command has '@INPUT@' as part of a "
"string and more than one input file")
elif '@OUTPUT@' in vv:
outputs = values['@OUTPUT@']
if vv == '@OUTPUT@':
outcmd += outputs
elif len(outputs) == 1:
outcmd.append(vv.replace('@OUTPUT@', outputs[0]))
else:
raise MesonException("Command has '@OUTPUT@' as part of a "
"string and more than one output file")
# Append values that are exactly a template string.
# This is faster than a string replace.
elif vv in values:
outcmd.append(values[vv])
# Substitute everything else with replacement
elif value_rx:
outcmd.append(value_rx.sub(lambda m: values[m.group(0)], vv))
else:
outcmd.append(vv)
return outcmd
def get_filenames_templates_dict(inputs: T.List[str], outputs: T.List[str]) -> T.Dict[str, T.Union[str, T.List[str]]]:
values = {} # type: T.Dict[str, T.Union[str, T.List[str]]]
# Gather values derived from the input
if inputs:
# We want to substitute all the inputs.
values['@INPUT@'] = inputs
for (ii, vv) in enumerate(inputs):
# Write out @INPUT0@, @INPUT1@, ...
values['@INPUT{}@'.format(ii)] = vv
if len(inputs) == 1:
# Just one value, substitute @PLAINNAME@ and @BASENAME@
values['@PLAINNAME@'] = plain = os.path.basename(inputs[0])
values['@BASENAME@'] = os.path.splitext(plain)[0]
if outputs:
# Gather values derived from the outputs, similar to above.
values['@OUTPUT@'] = outputs
for (ii, vv) in enumerate(outputs):
values['@OUTPUT{}@'.format(ii)] = vv
# Outdir should be the same for all outputs
values['@OUTDIR@'] = os.path.dirname(outputs[0])
# Many external programs fail on empty arguments.
if values['@OUTDIR@'] == '':
values['@OUTDIR@'] = '.'
return values
def _make_tree_writable(topdir: str) -> None:
# Ensure all files and directories under topdir are writable
# (and readable) by owner.
for d, _, files in os.walk(topdir):
os.chmod(d, os.stat(d).st_mode | stat.S_IWRITE | stat.S_IREAD)
for fname in files:
fpath = os.path.join(d, fname)
if os.path.isfile(fpath):
os.chmod(fpath, os.stat(fpath).st_mode | stat.S_IWRITE | stat.S_IREAD)
def windows_proof_rmtree(f: str) -> None:
# On Windows if anyone is holding a file open you can't
delays = [0.1, 0.1, 0.2, 0.2, 0.2, 0.5, 0.5, 1, 1, 1, 1, 2]
_make_tree_writable(f)
for d in delays:
try:
shutil.rmtree(f)
return
except FileNotFoundError:
return
except OSError:
time.sleep(d)
shutil.rmtree(f)
def windows_proof_rm(fpath: str) -> None:
if os.path.isfile(fpath):
os.chmod(fpath, os.stat(fpath).st_mode | stat.S_IWRITE | stat.S_IREAD)
delays = [0.1, 0.1, 0.2, 0.2, 0.2, 0.5, 0.5, 1, 1, 1, 1, 2]
for d in delays:
try:
os.unlink(fpath)
return
except FileNotFoundError:
return
except OSError:
time.sleep(d)
os.unlink(fpath)
class TemporaryDirectoryWinProof(TemporaryDirectory):
def __exit__(self, exc: T.Any, value: T.Any, tb: T.Any) -> None:
try:
super().__exit__(exc, value, tb)
except OSError:
windows_proof_rmtree(self.name)
def cleanup(self) -> None:
try:
super().cleanup()
except OSError:
windows_proof_rmtree(self.name)
def detect_subprojects(spdir_name: str, current_dir: str = '',
result: T.Optional[T.Dict[str, T.List[str]]] = None) -> T.Optional[T.Dict[str, T.List[str]]]:
if result is None:
result = {}
spdir = os.path.join(current_dir, spdir_name)
if not os.path.exists(spdir):
return result
for trial in glob(os.path.join(spdir, '*')):
basename = os.path.basename(trial)
if trial == 'packagecache':
continue
append_this = True
if os.path.isdir(trial):
detect_subprojects(spdir_name, trial, result)
elif trial.endswith('.wrap') and os.path.isfile(trial):
basename = os.path.splitext(basename)[0]
else:
append_this = False
if append_this:
if basename in result:
result[basename].append(trial)
else:
result[basename] = [trial]
return result
def substring_is_in_list(substr: str, strlist: T.List[str]) -> bool:
for s in strlist:
if substr in s:
return True
return False
class OrderedSet(T.MutableSet[_T]):
def __init__(self, iterable: T.Optional[T.Iterable[_T]] = None):
# use MutableMapping, which is fine in this case.
self.__container = collections.OrderedDict() # type: T.MutableMapping[_T, None]
if iterable:
self.update(iterable)
def __contains__(self, value: object) -> bool:
return value in self.__container
def __iter__(self) -> T.Iterator[_T]:
return iter(self.__container.keys())
def __len__(self) -> int:
return len(self.__container)
def __repr__(self) -> str:
# Don't print 'OrderedSet("")' for an empty set.
if self.__container:
return 'OrderedSet("{}")'.format(
'", "'.join(repr(e) for e in self.__container.keys()))
return 'OrderedSet()'
def __reversed__(self) -> T.Iterator[_T]:
return reversed(self.__container.keys())
def add(self, value: _T) -> None:
self.__container[value] = None
def discard(self, value: _T) -> None:
if value in self.__container:
del self.__container[value]
def move_to_end(self, value: _T, last: bool = True) -> None:
self.__container.move_to_end(value, last)
def pop(self, last: bool = True) -> _T:
item, _ = self.__container.popitem(last)
return item
def update(self, iterable: T.Iterable[_T]) -> None:
for item in iterable:
self.__container[item] = None
def difference(self, set_: T.Union[T.Set[_T], 'OrderedSet[_T]']) -> 'OrderedSet[_T]':
return type(self)(e for e in self if e not in set_)
class BuildDirLock:
def __init__(self, builddir: str) -> None:
self.lockfilename = os.path.join(builddir, 'meson-private/meson.lock')
def __enter__(self) -> None:
self.lockfile = open(self.lockfilename, 'w')
try:
if have_fcntl:
fcntl.flock(self.lockfile, fcntl.LOCK_EX | fcntl.LOCK_NB)
elif have_msvcrt:
msvcrt.locking(self.lockfile.fileno(), msvcrt.LK_NBLCK, 1)
except (BlockingIOError, PermissionError):
self.lockfile.close()
raise MesonException('Some other Meson process is already using this build directory. Exiting.')
def __exit__(self, *args: T.Any) -> None:
if have_fcntl:
fcntl.flock(self.lockfile, fcntl.LOCK_UN)
elif have_msvcrt:
msvcrt.locking(self.lockfile.fileno(), msvcrt.LK_UNLCK, 1)
self.lockfile.close()
def relpath(path: str, start: str) -> str:
# drives (i.e. c:\foo and f:\bar). The only thing left to do is to use the
# original absolute path.
try:
return os.path.relpath(path, start)
except (TypeError, ValueError):
return path
def path_is_in_root(path: Path, root: Path, resolve: bool = False) -> bool:
# Check wheter a path is within the root directory root
try:
if resolve:
path.resolve().relative_to(root.resolve())
else:
path.relative_to(root)
except ValueError:
return False
return True
def relative_to_if_possible(path: Path, root: Path, resolve: bool = False) -> Path:
try:
if resolve:
return path.resolve().relative_to(root.resolve())
else:
return path.relative_to(root)
except ValueError:
return path
class LibType(IntEnum):
SHARED = 0
STATIC = 1
PREFER_SHARED = 2
PREFER_STATIC = 3
class ProgressBarFallback: # lgtm [py/iter-returns-non-self]
def __init__(self, iterable: T.Optional[T.Iterable[str]] = None, total: T.Optional[int] = None,
bar_type: T.Optional[str] = None, desc: T.Optional[str] = None):
if iterable is not None:
self.iterable = iter(iterable)
return
self.total = total
self.done = 0
self.printed_dots = 0
if self.total and bar_type == 'download':
print('Download size:', self.total)
if desc:
print('{}: '.format(desc), end='')
# Pretend to be an iterator when called as one and don't print any
def __iter__(self) -> T.Iterator[str]:
return self.iterable
def __next__(self) -> str:
return next(self.iterable)
def print_dot(self) -> None:
print('.', end='')
sys.stdout.flush()
self.printed_dots += 1
def update(self, progress: int) -> None:
self.done += progress
if not self.total:
self.print_dot()
return
ratio = int(self.done / self.total * 10)
while self.printed_dots < ratio:
self.print_dot()
def close(self) -> None:
print('')
try:
from tqdm import tqdm
except ImportError:
# ideally we would use a typing.Protocol here, but it's part of typing_extensions until 3.8
ProgressBar = ProgressBarFallback
else:
class ProgressBarTqdm(tqdm):
def __init__(self, *args: T.Any, bar_type: T.Optional[str] = None, **kwargs: T.Any) -> None:
if bar_type == 'download':
kwargs.update({'unit': 'bytes', 'leave': True})
else:
kwargs.update({'leave': False})
kwargs['ncols'] = 100
super().__init__(*args, **kwargs)
ProgressBar = ProgressBarTqdm
def get_wine_shortpath(winecmd: T.List[str], wine_paths: T.Sequence[str]) -> str:
wine_paths = list(OrderedSet(wine_paths))
getShortPathScript = '%s.bat' % str(uuid.uuid4()).lower()[:5]
with open(getShortPathScript, mode='w') as f:
f.write("@ECHO OFF\nfor %%x in (%*) do (\n echo|set /p=;%~sx\n)\n")
f.flush()
try:
with open(os.devnull, 'w') as stderr:
wine_path = subprocess.check_output(
winecmd +
['cmd', '/C', getShortPathScript] + wine_paths,
stderr=stderr).decode('utf-8')
except subprocess.CalledProcessError as e:
print("Could not get short paths: %s" % e)
wine_path = ';'.join(wine_paths)
finally:
os.remove(getShortPathScript)
if len(wine_path) > 2048:
raise MesonException(
'WINEPATH size {} > 2048'
' this will cause random failure.'.format(
len(wine_path)))
return wine_path.strip(';')
def run_once(func: T.Callable[..., _T]) -> T.Callable[..., _T]:
ret = []
@wraps(func)
def wrapper(*args: T.Any, **kwargs: T.Any) -> _T:
if ret:
return ret[0]
val = func(*args, **kwargs)
ret.append(val)
return val
return wrapper
class OptionProxy(T.Generic[_T]):
def __init__(self, value: _T):
self.value = value
class OptionOverrideProxy(collections.abc.MutableMapping):
def __init__(self, overrides: T.Dict[str, T.Any], *options: 'OptionDictType'):
self.overrides = overrides.copy()
self.options = {}
for o in options:
self.options.update(o)
def __getitem__(self, key: str) -> T.Union['UserOption', OptionProxy]:
if key in self.options:
opt = self.options[key]
if key in self.overrides:
return OptionProxy(opt.validate_value(self.overrides[key]))
return opt
raise KeyError('Option not found', key)
def __setitem__(self, key: str, value: T.Union['UserOption', OptionProxy]) -> None:
self.overrides[key] = value.value
def __delitem__(self, key: str) -> None:
del self.overrides[key]
def __iter__(self) -> T.Iterator[str]:
return iter(self.options)
def __len__(self) -> int:
return len(self.options)
def copy(self) -> 'OptionOverrideProxy':
return OptionOverrideProxy(self.overrides.copy(), self.options.copy())
| true | true |
f7377966f8d6321a7778ebfefbec0cfa0b77dcd0 | 675 | py | Python | django_crypto_trading_bot/users/tests/test_urls.py | chiragmatkar/django-crypto-trading-bot | 922018ead04cf38a123f95f82776173dcd7fa0de | [
"MIT"
] | 37 | 2019-12-21T10:04:25.000Z | 2022-03-15T03:33:44.000Z | django_crypto_trading_bot/users/tests/test_urls.py | chiragmatkar/django-crypto-trading-bot | 922018ead04cf38a123f95f82776173dcd7fa0de | [
"MIT"
] | 347 | 2019-12-19T00:12:56.000Z | 2022-02-04T09:29:27.000Z | django_crypto_trading_bot/users/tests/test_urls.py | chiragmatkar/django-crypto-trading-bot | 922018ead04cf38a123f95f82776173dcd7fa0de | [
"MIT"
] | 18 | 2020-06-28T18:08:44.000Z | 2022-01-30T06:43:52.000Z | import pytest
from django.urls import resolve, reverse
from django_crypto_trading_bot.users.models import User
pytestmark = pytest.mark.django_db
def test_detail(user: User):
assert (
reverse("users:detail", kwargs={"username": user.username})
== f"/users/{user.username}/"
)
assert resolve(f"/users/{user.username}/").view_name == "users:detail"
def test_update():
assert reverse("users:update") == "/users/~update/"
assert resolve("/users/~update/").view_name == "users:update"
def test_redirect():
assert reverse("users:redirect") == "/users/~redirect/"
assert resolve("/users/~redirect/").view_name == "users:redirect"
| 27 | 74 | 0.684444 | import pytest
from django.urls import resolve, reverse
from django_crypto_trading_bot.users.models import User
pytestmark = pytest.mark.django_db
def test_detail(user: User):
assert (
reverse("users:detail", kwargs={"username": user.username})
== f"/users/{user.username}/"
)
assert resolve(f"/users/{user.username}/").view_name == "users:detail"
def test_update():
assert reverse("users:update") == "/users/~update/"
assert resolve("/users/~update/").view_name == "users:update"
def test_redirect():
assert reverse("users:redirect") == "/users/~redirect/"
assert resolve("/users/~redirect/").view_name == "users:redirect"
| true | true |
f73779832c3ac3fafccff7c9af2fd8479d4e46d0 | 4,069 | py | Python | tests/test_lens.py | XiaoshengLin/shadow3 | d007ae59a2038db4f9275f7bb026bd1b11549e5f | [
"MIT"
] | 15 | 2018-05-07T12:46:56.000Z | 2021-08-17T07:39:25.000Z | tests/test_lens.py | XiaoshengLin/shadow3 | d007ae59a2038db4f9275f7bb026bd1b11549e5f | [
"MIT"
] | 23 | 2018-04-18T13:49:56.000Z | 2022-03-05T18:37:59.000Z | tests/test_lens.py | XiaoshengLin/shadow3 | d007ae59a2038db4f9275f7bb026bd1b11549e5f | [
"MIT"
] | 11 | 2018-06-04T10:48:23.000Z | 2021-07-06T10:09:46.000Z | import Shadow
import numpy
# using mac oasys, for plots
# from srxraylib.plot.gol import set_qt
# set_qt()
#
# runs an absorber of 10 um thickness for a source at 10 keV
#
#
def run_example_lens(user_units_to_cm=1.0,npoint=5000,use_prerefl=0):
#
# Python script to run shadow3. Created automatically with ShadowTools.make_python_script_from_list().
#
#
# initialize shadow3 source (oe0) and beam
#
beam = Shadow.Beam()
oe0 = Shadow.Source()
oe1 = Shadow.OE()
oe2 = Shadow.OE()
#
# Define variables. See meaning of variables in:
# https://raw.githubusercontent.com/srio/shadow3/master/docs/source.nml
# https://raw.githubusercontent.com/srio/shadow3/master/docs/oe.nml
#
oe0.FDISTR = 3
oe0.FSOURCE_DEPTH = 0
oe0.F_PHOT = 0
oe0.HDIV1 = 1.0
oe0.HDIV2 = 1.0
oe0.ISTAR1 = 0
oe0.NPOINT = 500000
oe0.PH1 = 8000.0
oe0.SIGDIX = 2.49999994e-05
oe0.SIGDIZ = 8.00000089e-06
oe0.SIGMAX = 0.0122999996 / user_units_to_cm
oe0.SIGMAZ = 0.000699999975 / user_units_to_cm
oe0.VDIV1 = 1.0
oe0.VDIV2 = 1.0
oe1.CCC = numpy.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -1.0, 0.0])
oe1.FCYL = 1
if use_prerefl:
oe1.F_R_IND = 2
oe1.R_ATTENUATION_OBJ = 0.0
oe1.R_IND_OBJ = 1.0
oe1.FILE_R_IND_IMA = b'prerefl.dat'
else:
oe1.F_R_IND = 0
oe1.R_IND_OBJ = 1.0
oe1.R_IND_IMA = 0.9999923264754235
oe1.R_ATTENUATION_OBJ = 0.0
oe1.R_ATTENUATION_IMA = 150.727
oe1.FMIRR = 10
oe1.FWRITE = 3
oe1.F_EXT = 1
oe1.F_REFRAC = 1
oe1.T_INCIDENCE = 0.0
oe1.T_REFLECTION = 180.0
oe1.T_SOURCE = 4700.9 / user_units_to_cm
oe1.T_IMAGE = 0.01 / user_units_to_cm
oe1.DUMMY = user_units_to_cm
oe2.CCC = numpy.array([0.0, 292.67523*user_units_to_cm**2, 0.0045013279*user_units_to_cm**2, 0.0, 0.0, 0.0, 0.0, 0.0, 0.13418387*user_units_to_cm, 0.0])
oe2.FCYL = 1
if use_prerefl:
oe2.F_R_IND = 1
oe2.FILE_R_IND_OBJ = b'prerefl.dat'
oe2.R_ATTENUATION_IMA = 0.0
oe2.R_IND_IMA = 1.0
else:
oe2.F_R_IND = 0
oe2.R_IND_OBJ = 0.9999923264754235
oe2.R_IND_IMA = 1.0
oe2.R_ATTENUATION_OBJ = 150.727
oe2.R_ATTENUATION_IMA = 0.0
oe2.FMIRR = 10
oe2.FWRITE = 3
oe2.F_EXT = 1
oe2.F_REFRAC = 1
oe2.T_INCIDENCE = 0.0
oe2.T_REFLECTION = 180.0
oe2.T_SOURCE = 0.0 / user_units_to_cm
oe2.T_IMAGE = 30.065 / user_units_to_cm
oe2.DUMMY = user_units_to_cm
beam.genSource(oe0)
#
#run optical element 1
#
print(" Running optical element: %d"%(1))
beam.traceOE(oe1,1)
#
#run optical element 2
#
print(" Running optical element: %d"%(2))
beam.traceOE(oe2,2)
# print(oe0.sourcinfo())
# print(oe1.mirinfo())
# print(oe2.mirinfo())
return beam
def test_lens():
#
# inputs
#
cm_or_mm = 1 # 0=using cm, 1=using mm
use_prerefl = 0 # 0=No, 1=Yes
if cm_or_mm == 0:
user_units_to_cm = 1.0
title = "Units are cm"
elif cm_or_mm == 1:
user_units_to_cm = 0.1
title = "Units are mm"
else:
print("No way...")
#
# run prerefl
#
if use_prerefl:
import xraylib
symbol = "Si"
density = xraylib.ElementDensity(xraylib.SymbolToAtomicNumber(symbol))
Shadow.ShadowPreprocessorsXraylib.prerefl(interactive=0,SYMBOL=symbol,DENSITY=density,FILE="prerefl.dat",E_MIN=5000.0,E_MAX=15000.0,E_STEP=100.0)
#
# run SHADOW
#
beam = run_example_lens(user_units_to_cm=user_units_to_cm)
tkt = Shadow.ShadowTools.plotxy(beam,3,6,ref=0,nolost=1,nbins=301,title="Z,Z' "+title)
print("Intensity: %f "%tkt["intensity"])
print("Number of rays: %d, number of GOOD rays: %d "%(beam.nrays(nolost=0),beam.nrays(nolost=1)))
#numpy.testing.assert_almost_equal(sh100,xrl100,2)
if __name__ == "__main__":
test_lens()
| 24.076923 | 156 | 0.609978 | import Shadow
import numpy
def run_example_lens(user_units_to_cm=1.0,npoint=5000,use_prerefl=0):
beam = Shadow.Beam()
oe0 = Shadow.Source()
oe1 = Shadow.OE()
oe2 = Shadow.OE()
oe0.FDISTR = 3
oe0.FSOURCE_DEPTH = 0
oe0.F_PHOT = 0
oe0.HDIV1 = 1.0
oe0.HDIV2 = 1.0
oe0.ISTAR1 = 0
oe0.NPOINT = 500000
oe0.PH1 = 8000.0
oe0.SIGDIX = 2.49999994e-05
oe0.SIGDIZ = 8.00000089e-06
oe0.SIGMAX = 0.0122999996 / user_units_to_cm
oe0.SIGMAZ = 0.000699999975 / user_units_to_cm
oe0.VDIV1 = 1.0
oe0.VDIV2 = 1.0
oe1.CCC = numpy.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -1.0, 0.0])
oe1.FCYL = 1
if use_prerefl:
oe1.F_R_IND = 2
oe1.R_ATTENUATION_OBJ = 0.0
oe1.R_IND_OBJ = 1.0
oe1.FILE_R_IND_IMA = b'prerefl.dat'
else:
oe1.F_R_IND = 0
oe1.R_IND_OBJ = 1.0
oe1.R_IND_IMA = 0.9999923264754235
oe1.R_ATTENUATION_OBJ = 0.0
oe1.R_ATTENUATION_IMA = 150.727
oe1.FMIRR = 10
oe1.FWRITE = 3
oe1.F_EXT = 1
oe1.F_REFRAC = 1
oe1.T_INCIDENCE = 0.0
oe1.T_REFLECTION = 180.0
oe1.T_SOURCE = 4700.9 / user_units_to_cm
oe1.T_IMAGE = 0.01 / user_units_to_cm
oe1.DUMMY = user_units_to_cm
oe2.CCC = numpy.array([0.0, 292.67523*user_units_to_cm**2, 0.0045013279*user_units_to_cm**2, 0.0, 0.0, 0.0, 0.0, 0.0, 0.13418387*user_units_to_cm, 0.0])
oe2.FCYL = 1
if use_prerefl:
oe2.F_R_IND = 1
oe2.FILE_R_IND_OBJ = b'prerefl.dat'
oe2.R_ATTENUATION_IMA = 0.0
oe2.R_IND_IMA = 1.0
else:
oe2.F_R_IND = 0
oe2.R_IND_OBJ = 0.9999923264754235
oe2.R_IND_IMA = 1.0
oe2.R_ATTENUATION_OBJ = 150.727
oe2.R_ATTENUATION_IMA = 0.0
oe2.FMIRR = 10
oe2.FWRITE = 3
oe2.F_EXT = 1
oe2.F_REFRAC = 1
oe2.T_INCIDENCE = 0.0
oe2.T_REFLECTION = 180.0
oe2.T_SOURCE = 0.0 / user_units_to_cm
oe2.T_IMAGE = 30.065 / user_units_to_cm
oe2.DUMMY = user_units_to_cm
beam.genSource(oe0)
print(" Running optical element: %d"%(1))
beam.traceOE(oe1,1)
print(" Running optical element: %d"%(2))
beam.traceOE(oe2,2)
return beam
def test_lens():
cm_or_mm = 1
use_prerefl = 0
if cm_or_mm == 0:
user_units_to_cm = 1.0
title = "Units are cm"
elif cm_or_mm == 1:
user_units_to_cm = 0.1
title = "Units are mm"
else:
print("No way...")
if use_prerefl:
import xraylib
symbol = "Si"
density = xraylib.ElementDensity(xraylib.SymbolToAtomicNumber(symbol))
Shadow.ShadowPreprocessorsXraylib.prerefl(interactive=0,SYMBOL=symbol,DENSITY=density,FILE="prerefl.dat",E_MIN=5000.0,E_MAX=15000.0,E_STEP=100.0)
beam = run_example_lens(user_units_to_cm=user_units_to_cm)
tkt = Shadow.ShadowTools.plotxy(beam,3,6,ref=0,nolost=1,nbins=301,title="Z,Z' "+title)
print("Intensity: %f "%tkt["intensity"])
print("Number of rays: %d, number of GOOD rays: %d "%(beam.nrays(nolost=0),beam.nrays(nolost=1)))
#numpy.testing.assert_almost_equal(sh100,xrl100,2)
if __name__ == "__main__":
test_lens()
| true | true |
f7377a2e0b8f23ae0f0911d46b215b4162709f01 | 2,037 | py | Python | configs/m2det320_vgg.py | vuanh96/Thesis | c8a73e267f17e0e9d074c42b5b1c3b343ed86268 | [
"MIT"
] | null | null | null | configs/m2det320_vgg.py | vuanh96/Thesis | c8a73e267f17e0e9d074c42b5b1c3b343ed86268 | [
"MIT"
] | null | null | null | configs/m2det320_vgg.py | vuanh96/Thesis | c8a73e267f17e0e9d074c42b5b1c3b343ed86268 | [
"MIT"
] | null | null | null | model = dict(
type = 'm2det',
input_size = 320,
init_net = True,
pretrained = 'weights/vgg16_reducedfc.pth',
m2det_config = dict(
backbone = 'vgg16',
net_family = 'vgg', # vgg includes ['vgg16','vgg19'], res includes ['resnetxxx','resnextxxx']
base_out = [22,34], # [22,34] for vgg, [2,4] or [3,4] for res families
planes = 256,
num_levels = 8,
num_scales = 6,
sfam = False,
smooth = True,
num_classes = 81,
),
rgb_means = (104, 117, 123),
p = 0.6,
anchor_config = dict(
step_pattern = [8, 16, 32, 64, 107, 320],
size_pattern = [0.08, 0.15, 0.33, 0.51, 0.69, 0.87, 1.05],
),
save_eposhs = 10,
weights_save = 'weights/'
)
train_cfg = dict(
cuda = True,
warmup = 5,
per_batch_size = 16,
lr = [0.004, 0.002, 0.0004, 0.00004, 0.000004],
gamma = 0.1,
end_lr = 1e-6,
step_lr = dict(
COCO = [90, 110, 130, 150, 160],
VOC = [100, 150, 200, 250, 300], # unsolve
),
print_epochs = 10,
num_workers= 8,
)
test_cfg = dict(
cuda = True,
topk = 0,
iou = 0.45,
soft_nms = True,
score_threshold = 0.1,
keep_per_class = 50,
save_folder = 'eval'
)
loss = dict(overlap_thresh = 0.5,
prior_for_matching = True,
bkg_label = 0,
neg_mining = True,
neg_pos = 3,
neg_overlap = 0.5,
encode_target = False)
optimizer = dict(type='SGD', momentum=0.9, weight_decay=0.0005)
dataset = dict(
VOC = dict(
train_sets = [('2007', 'trainval'), ('2012', 'trainval')],
eval_sets = [('2007', 'test')],
),
COCO = dict(
train_sets = [('2014', 'train'), ('2014', 'valminusminival')],
eval_sets = [('2014', 'minival')],
test_sets = [('2015', 'test-dev')],
)
)
import os
home = os.path.expanduser("~")
VOCroot = os.path.join(home,"data/VOCdevkit/")
COCOroot = os.path.join(home,"data/coco/")
| 26.115385 | 101 | 0.523319 | model = dict(
type = 'm2det',
input_size = 320,
init_net = True,
pretrained = 'weights/vgg16_reducedfc.pth',
m2det_config = dict(
backbone = 'vgg16',
net_family = 'vgg',
base_out = [22,34],
planes = 256,
num_levels = 8,
num_scales = 6,
sfam = False,
smooth = True,
num_classes = 81,
),
rgb_means = (104, 117, 123),
p = 0.6,
anchor_config = dict(
step_pattern = [8, 16, 32, 64, 107, 320],
size_pattern = [0.08, 0.15, 0.33, 0.51, 0.69, 0.87, 1.05],
),
save_eposhs = 10,
weights_save = 'weights/'
)
train_cfg = dict(
cuda = True,
warmup = 5,
per_batch_size = 16,
lr = [0.004, 0.002, 0.0004, 0.00004, 0.000004],
gamma = 0.1,
end_lr = 1e-6,
step_lr = dict(
COCO = [90, 110, 130, 150, 160],
VOC = [100, 150, 200, 250, 300],
),
print_epochs = 10,
num_workers= 8,
)
test_cfg = dict(
cuda = True,
topk = 0,
iou = 0.45,
soft_nms = True,
score_threshold = 0.1,
keep_per_class = 50,
save_folder = 'eval'
)
loss = dict(overlap_thresh = 0.5,
prior_for_matching = True,
bkg_label = 0,
neg_mining = True,
neg_pos = 3,
neg_overlap = 0.5,
encode_target = False)
optimizer = dict(type='SGD', momentum=0.9, weight_decay=0.0005)
dataset = dict(
VOC = dict(
train_sets = [('2007', 'trainval'), ('2012', 'trainval')],
eval_sets = [('2007', 'test')],
),
COCO = dict(
train_sets = [('2014', 'train'), ('2014', 'valminusminival')],
eval_sets = [('2014', 'minival')],
test_sets = [('2015', 'test-dev')],
)
)
import os
home = os.path.expanduser("~")
VOCroot = os.path.join(home,"data/VOCdevkit/")
COCOroot = os.path.join(home,"data/coco/")
| true | true |
f7377a6dcfebbbb98df97e96fe224eddc62728ec | 4,735 | py | Python | eval_CIFAR10C.py | wiseodd/lula | a52b27c118ed136a62d8d7d1a898067d5ac685fb | [
"MIT"
] | 15 | 2021-06-07T14:25:35.000Z | 2021-12-26T16:41:01.000Z | eval_CIFAR10C.py | wiseodd/lula | a52b27c118ed136a62d8d7d1a898067d5ac685fb | [
"MIT"
] | 1 | 2022-03-11T01:03:12.000Z | 2022-03-11T01:03:12.000Z | eval_CIFAR10C.py | wiseodd/lula | a52b27c118ed136a62d8d7d1a898067d5ac685fb | [
"MIT"
] | 2 | 2021-06-19T05:41:05.000Z | 2022-03-23T11:51:06.000Z | import warnings
warnings.filterwarnings('ignore')
import torch
import numpy as np
from models import wrn
from laplace import kfla
import laplace.util as lutil
import util.evaluation as evalutil
import util.dataloaders as dl
import util.misc
from math import *
from tqdm import tqdm, trange
import argparse
import os, sys
from tqdm import tqdm, trange
from collections import defaultdict
import reluq
parser = argparse.ArgumentParser()
parser.add_argument('--ood_dset', default='imagenet', choices=['imagenet', 'uniform', 'smooth'])
args = parser.parse_args()
torch.manual_seed(9999)
np.random.seed(9999)
path = f'./pretrained_models'
train_loader = dl.CIFAR10(train=True, augm_flag=False)
val_loader, test_loader = dl.CIFAR10(train=False, val_size=2000)
print(len(train_loader.dataset), len(val_loader.dataset), len(test_loader.dataset))
num_classes = 10
data_shape = [3, 32, 32]
method_types = ['MAP', 'DE', 'LA', 'LULA']
method_strs = ['MAP', 'DE', 'LA', 'LA-LULA']
distortion_types = dl.CorruptedCIFAR10Dataset.distortions
severity_levels = range(1, 6) # 1 ... 5
tab_acc = defaultdict(lambda: defaultdict(lambda: defaultdict(list)))
tab_mmc = defaultdict(lambda: defaultdict(lambda: defaultdict(list)))
tab_ece = defaultdict(lambda: defaultdict(lambda: defaultdict(list)))
tab_brier = defaultdict(lambda: defaultdict(lambda: defaultdict(list)))
tab_loglik = defaultdict(lambda: defaultdict(lambda: defaultdict(list)))
def load_model(type='MAP'):
def create_model():
return wrn.WideResNet(16, 4, num_classes).cuda()
if type == 'DE':
K = 5
model = [create_model() for _ in range(K)]
state_dicts = torch.load(f'./pretrained_models/CIFAR10_wrn_de.pt')
for k in range(K):
model[k].load_state_dict(state_dicts[k])
model[k].eval()
else:
model = create_model()
model.load_state_dict(torch.load(f'./pretrained_models/CIFAR10_wrn_plain.pt'))
model.eval()
# Additionally, load these for LULA
if type == 'LULA':
lula_params = torch.load(f'./pretrained_models/kfla/CIFAR10_wrn_lula_{args.ood_dset}.pt')
if args.ood_dset == 'best':
state_dict, n_units, noise = lula_params
print(f'LULA uses this OOD dataset: {noise}')
else:
state_dict, n_units = lula_params
model = lula.model.LULAModel_LastLayer(model, n_units).cuda()
model.to_gpu()
model.load_state_dict(state_dict)
model.disable_grad_mask()
model.unmask()
model.eval()
if type in ['LA', 'LULA']:
var0 = torch.tensor(1/(5e-4*len(train_loader.dataset))).float().cuda()
model = kfla.KFLA(model)
model.get_hessian(train_loader)
model.estimate_variance(var0)
return model
def predict_(test_loader, model, model_name, params=None):
assert model_name in method_types
if model_name in ['LA', 'LULA']:
py = lutil.predict(test_loader, model, n_samples=20)
elif model_name == 'DE':
py = evalutil.predict_ensemble(test_loader, model)
else: # MAP
py = evalutil.predict(test_loader, model)
return py.cpu().numpy()
def evaluate(model_name):
assert model_name in method_types
model = load_model(model_name)
params = None
if model_name == 'LULA':
model_str = 'LA-LULA'
else:
model_str = model_name
print(f'Processing for {model_str}')
# For all distortions, for all severity
for d in tqdm(distortion_types, leave=False):
for s in tqdm(severity_levels, leave=False):
shift_loader = dl.CorruptedCIFAR10(d, s)
py_shift = predict_(shift_loader, model, model_name, params=params)
targets = torch.cat([y for x, y in shift_loader], dim=0).numpy()
tab_acc[model_str][d][str(s)].append(evalutil.get_acc(py_shift, targets))
tab_mmc[model_str][d][str(s)].append(evalutil.get_mmc(py_shift))
tab_ece[model_str][d][str(s)].append(evalutil.get_calib(py_shift, targets)[0])
tab_brier[model_str][d][str(s)].append(evalutil.get_brier(py_shift, targets))
tab_loglik[model_str][d][str(s)].append(evalutil.get_loglik(py_shift, targets))
evaluate('MAP')
evaluate('DE')
evaluate('LA')
evaluate('LULA')
# Save results
dir_name = f'results/CIFAR10C/'
dir_name += f'{args.ood_dset}'
if not os.path.exists(dir_name):
os.makedirs(dir_name)
np.save(f'{dir_name}/mmcs', util.misc.ddict2dict(tab_mmc))
np.save(f'{dir_name}/accs', util.misc.ddict2dict(tab_acc))
np.save(f'{dir_name}/eces', util.misc.ddict2dict(tab_ece))
np.save(f'{dir_name}/briers', util.misc.ddict2dict(tab_brier))
np.save(f'{dir_name}/logliks', util.misc.ddict2dict(tab_loglik))
| 31.993243 | 97 | 0.685111 | import warnings
warnings.filterwarnings('ignore')
import torch
import numpy as np
from models import wrn
from laplace import kfla
import laplace.util as lutil
import util.evaluation as evalutil
import util.dataloaders as dl
import util.misc
from math import *
from tqdm import tqdm, trange
import argparse
import os, sys
from tqdm import tqdm, trange
from collections import defaultdict
import reluq
parser = argparse.ArgumentParser()
parser.add_argument('--ood_dset', default='imagenet', choices=['imagenet', 'uniform', 'smooth'])
args = parser.parse_args()
torch.manual_seed(9999)
np.random.seed(9999)
path = f'./pretrained_models'
train_loader = dl.CIFAR10(train=True, augm_flag=False)
val_loader, test_loader = dl.CIFAR10(train=False, val_size=2000)
print(len(train_loader.dataset), len(val_loader.dataset), len(test_loader.dataset))
num_classes = 10
data_shape = [3, 32, 32]
method_types = ['MAP', 'DE', 'LA', 'LULA']
method_strs = ['MAP', 'DE', 'LA', 'LA-LULA']
distortion_types = dl.CorruptedCIFAR10Dataset.distortions
severity_levels = range(1, 6)
tab_acc = defaultdict(lambda: defaultdict(lambda: defaultdict(list)))
tab_mmc = defaultdict(lambda: defaultdict(lambda: defaultdict(list)))
tab_ece = defaultdict(lambda: defaultdict(lambda: defaultdict(list)))
tab_brier = defaultdict(lambda: defaultdict(lambda: defaultdict(list)))
tab_loglik = defaultdict(lambda: defaultdict(lambda: defaultdict(list)))
def load_model(type='MAP'):
def create_model():
return wrn.WideResNet(16, 4, num_classes).cuda()
if type == 'DE':
K = 5
model = [create_model() for _ in range(K)]
state_dicts = torch.load(f'./pretrained_models/CIFAR10_wrn_de.pt')
for k in range(K):
model[k].load_state_dict(state_dicts[k])
model[k].eval()
else:
model = create_model()
model.load_state_dict(torch.load(f'./pretrained_models/CIFAR10_wrn_plain.pt'))
model.eval()
if type == 'LULA':
lula_params = torch.load(f'./pretrained_models/kfla/CIFAR10_wrn_lula_{args.ood_dset}.pt')
if args.ood_dset == 'best':
state_dict, n_units, noise = lula_params
print(f'LULA uses this OOD dataset: {noise}')
else:
state_dict, n_units = lula_params
model = lula.model.LULAModel_LastLayer(model, n_units).cuda()
model.to_gpu()
model.load_state_dict(state_dict)
model.disable_grad_mask()
model.unmask()
model.eval()
if type in ['LA', 'LULA']:
var0 = torch.tensor(1/(5e-4*len(train_loader.dataset))).float().cuda()
model = kfla.KFLA(model)
model.get_hessian(train_loader)
model.estimate_variance(var0)
return model
def predict_(test_loader, model, model_name, params=None):
assert model_name in method_types
if model_name in ['LA', 'LULA']:
py = lutil.predict(test_loader, model, n_samples=20)
elif model_name == 'DE':
py = evalutil.predict_ensemble(test_loader, model)
else:
py = evalutil.predict(test_loader, model)
return py.cpu().numpy()
def evaluate(model_name):
assert model_name in method_types
model = load_model(model_name)
params = None
if model_name == 'LULA':
model_str = 'LA-LULA'
else:
model_str = model_name
print(f'Processing for {model_str}')
for d in tqdm(distortion_types, leave=False):
for s in tqdm(severity_levels, leave=False):
shift_loader = dl.CorruptedCIFAR10(d, s)
py_shift = predict_(shift_loader, model, model_name, params=params)
targets = torch.cat([y for x, y in shift_loader], dim=0).numpy()
tab_acc[model_str][d][str(s)].append(evalutil.get_acc(py_shift, targets))
tab_mmc[model_str][d][str(s)].append(evalutil.get_mmc(py_shift))
tab_ece[model_str][d][str(s)].append(evalutil.get_calib(py_shift, targets)[0])
tab_brier[model_str][d][str(s)].append(evalutil.get_brier(py_shift, targets))
tab_loglik[model_str][d][str(s)].append(evalutil.get_loglik(py_shift, targets))
evaluate('MAP')
evaluate('DE')
evaluate('LA')
evaluate('LULA')
dir_name = f'results/CIFAR10C/'
dir_name += f'{args.ood_dset}'
if not os.path.exists(dir_name):
os.makedirs(dir_name)
np.save(f'{dir_name}/mmcs', util.misc.ddict2dict(tab_mmc))
np.save(f'{dir_name}/accs', util.misc.ddict2dict(tab_acc))
np.save(f'{dir_name}/eces', util.misc.ddict2dict(tab_ece))
np.save(f'{dir_name}/briers', util.misc.ddict2dict(tab_brier))
np.save(f'{dir_name}/logliks', util.misc.ddict2dict(tab_loglik))
| true | true |
f7377aedee49b0c8c4dc7b04e77135c1a1785005 | 4,072 | py | Python | slave/views.py | ngr/sm_00 | d8480cbbe88efd9947e2cb6d5a205f824cc5450d | [
"MIT"
] | null | null | null | slave/views.py | ngr/sm_00 | d8480cbbe88efd9947e2cb6d5a205f824cc5450d | [
"MIT"
] | 117 | 2015-01-01T06:20:05.000Z | 2016-01-06T08:16:20.000Z | slave/views.py | ngr/sm_00 | d8480cbbe88efd9947e2cb6d5a205f824cc5450d | [
"MIT"
] | null | null | null | ### Slave API Views ###
from django.db.models import F, Count
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework import status
from rest_framework import generics
from rest_framework import permissions
from rest_framework import pagination
from slave.models import Slave
from slave.serializers import SlaveSerializer, SlaveDetailSerializer
from slave.helpers import filter_by_attribute, filter_by_location_region
class API_SlaveList(generics.ListAPIView):
""" List Slaves. """
permission_classes = (permissions.IsAuthenticated,)
serializer_class = SlaveSerializer
def get_queryset(self):
""" Return Slaves of the current user. """
# Authorization check.
# We assume later that slave_list is already
# filtered with authorized slaves only so we may
# simply add some more filters.
slave_list = Slave.objects.filter(owner=self.request.user)
# Default filter alive slave only.
# Reversed order because alive are much more frequent requests.
if not 'dead' in self.request.query_params:
slave_list = slave_list.filter(date_death__isnull=True)
else:
slave_list = slave_list.filter(date_death__isnull=False)
# Filter by valid attributes
valid_params = ['location', 'sex']
for attr in valid_params:
if attr in self.request.query_params:
slave_list = filter_by_attribute(slave_list,\
attribute_name=attr,\
attribute=self.request.query_params.get(attr))
# Filter by Region
if 'region' in self.request.query_params:
slave_list = filter_by_location_region(slave_list, self.request.query_params.get('region'))
# Filter free Slaves
if 'free' in self.request.query_params:
# FIXME! This looks quite shitty.
# We compare the number of assignments to number of released ones.
# If the numbers are equal - then nothing is currently running.
# Unfortunately I couldn't yet filter by annotation of NON-released ones.
slave_list = slave_list.annotate(assgns=Count('assignments')).\
annotate(rel_assgns=Count('assignments__date_released')).\
filter(assgns=F('rel_assgns'))
# Order By
# Should one day get the ordering from request.
slave_list = slave_list.order_by('location__region', 'date_birth')
# Paginate
# FIXME The build in "LimitOffsetPagination" didn't work
# Had to write directly in the view.
if any(q for q in self.request.query_params if q in ['limit', 'offset']):
if 'limit' in self.request.query_params:
limit = int(self.request.query_params.get('limit'))
offset = int(self.request.query_params.get('offset'))\
if 'offset' in self.request.query_params else 0
if 'limit' in locals():
slave_list = slave_list[offset:limit+offset]
else:
slave_list = slave_list[offset:]
return slave_list
class API_SlaveDetail(APIView):
""" Slave Details. """
permission_classes = (permissions.IsAuthenticated,)
serializer_class = SlaveDetailSerializer
def get_object(self, pk):
""" Get already authorized Item."""
s = Slave.objects.get(pk=pk, owner=self.request.user)
# This updates available skills for the next time
s.get_available_skills()
return s
def get(self, request, pk, format=None):
# Get authorized Slave
try:
slave = self.get_object(pk)
except Slave.DoesNotExist:
return Response("Authorization error or wrong Slave id.",
status=status.HTTP_404_NOT_FOUND)
print(slave);
return Response(self.serializer_class(slave).data) | 42.416667 | 104 | 0.637525 | ework.views import APIView
from rest_framework.response import Response
from rest_framework import status
from rest_framework import generics
from rest_framework import permissions
from rest_framework import pagination
from slave.models import Slave
from slave.serializers import SlaveSerializer, SlaveDetailSerializer
from slave.helpers import filter_by_attribute, filter_by_location_region
class API_SlaveList(generics.ListAPIView):
permission_classes = (permissions.IsAuthenticated,)
serializer_class = SlaveSerializer
def get_queryset(self):
slave_list = Slave.objects.filter(owner=self.request.user)
if not 'dead' in self.request.query_params:
slave_list = slave_list.filter(date_death__isnull=True)
else:
slave_list = slave_list.filter(date_death__isnull=False)
valid_params = ['location', 'sex']
for attr in valid_params:
if attr in self.request.query_params:
slave_list = filter_by_attribute(slave_list,\
attribute_name=attr,\
attribute=self.request.query_params.get(attr))
if 'region' in self.request.query_params:
slave_list = filter_by_location_region(slave_list, self.request.query_params.get('region'))
if 'free' in self.request.query_params:
slave_list = slave_list.annotate(assgns=Count('assignments')).\
annotate(rel_assgns=Count('assignments__date_released')).\
filter(assgns=F('rel_assgns'))
# Order By
# Should one day get the ordering from request.
slave_list = slave_list.order_by('location__region', 'date_birth')
# Paginate
# FIXME The build in "LimitOffsetPagination" didn't work
if any(q for q in self.request.query_params if q in ['limit', 'offset']):
if 'limit' in self.request.query_params:
limit = int(self.request.query_params.get('limit'))
offset = int(self.request.query_params.get('offset'))\
if 'offset' in self.request.query_params else 0
if 'limit' in locals():
slave_list = slave_list[offset:limit+offset]
else:
slave_list = slave_list[offset:]
return slave_list
class API_SlaveDetail(APIView):
permission_classes = (permissions.IsAuthenticated,)
serializer_class = SlaveDetailSerializer
def get_object(self, pk):
s = Slave.objects.get(pk=pk, owner=self.request.user)
s.get_available_skills()
return s
def get(self, request, pk, format=None):
try:
slave = self.get_object(pk)
except Slave.DoesNotExist:
return Response("Authorization error or wrong Slave id.",
status=status.HTTP_404_NOT_FOUND)
print(slave);
return Response(self.serializer_class(slave).data) | true | true |
f7377b01d87d9348bafc91d5bfbd84b56a3c9b9d | 9,447 | py | Python | tests/test_average_true_range_percent.py | dibyajyotidash/https-github.com-kylejusticemagnuson-pyti | 08532970f9d2b163f1223599e3ac80f6c51533e4 | [
"MIT"
] | 1 | 2019-04-16T01:27:45.000Z | 2019-04-16T01:27:45.000Z | tests/test_average_true_range_percent.py | dibyajyotidash/https-github.com-kylejusticemagnuson-pyti | 08532970f9d2b163f1223599e3ac80f6c51533e4 | [
"MIT"
] | null | null | null | tests/test_average_true_range_percent.py | dibyajyotidash/https-github.com-kylejusticemagnuson-pyti | 08532970f9d2b163f1223599e3ac80f6c51533e4 | [
"MIT"
] | 1 | 2020-06-30T22:37:44.000Z | 2020-06-30T22:37:44.000Z | from __future__ import absolute_import
import unittest
import numpy as np
from tests.sample_data import SampleData
from pyti import average_true_range_percent
class TestAverageTrueRangePercent(unittest.TestCase):
def setUp(self):
"""Create data to use for testing."""
self.close_data = SampleData().get_sample_close_data()
self.atr_period_6_expected = [np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan, 1.189235578986088,
1.1523554512220486, 1.1278525931922065, 1.1546224092640733,
1.1660380962839427, 1.465357027466913, 1.7831881894142803,
2.3561329806184581, 2.6708206943371162, 3.2466120755686263,
3.3784546239726194, 3.3564621491521369, 3.2791301980772869,
3.2778865256303997, 3.2875442760137483, 3.2810676552694984,
3.0012226151326331, 2.7233687488098011, 2.5062178027349966,
2.2774730211707057, 2.1306723573292059, 2.0231111698118602,
2.4639048069082961, 2.7153248878733027, 2.9415900735797162,
3.457810754140358, 4.0649377298167551, 4.6505410623216603,
4.8377005165939497, 4.7010401069556149, 4.5393599025684406,
4.3416370097985153, 4.1909513300536148, 4.2334214723046726,
4.2994054993189517, 4.244940888039114, 3.9739765293353395,
3.7984682769968288, 3.5821945386433534, 3.3670297979975179,
3.0716656116914933, 2.8662794746678979, 3.0289151976072608,
2.9969860158644486, 2.9760460695914741, 2.9289691288143112,
2.8058612079021295, 2.531556736800797, 2.4252616931651314,
2.2944282121480746, 2.1964244646895756, 2.1062390474088564,
2.0476395013091233, 1.7748361482743773, 1.558061265928161,
1.4856536290363038, 1.4497927574913438, 1.4352358669002241,
1.4299189209362686, 1.4620245560453282, 1.5102324721906708,
1.6037560819721852, 1.7746556607866535, 1.9035211913074188,
2.0074893237351557, 2.0029061884391339, 1.9371230450535861,
1.8548689401186171, 1.8355003791530897, 1.8003331288038178,
1.8931540501005137, 1.9806126301955329, 2.0822871750835494,
2.1587399768435973, 2.1858863683758751, 2.1992145124735707,
2.2042274600601361, 1.9903770888121171, 1.7884145439862129,
1.6114041799566228, 1.4484765868823961, 1.3246773786986321,
1.2742050031825125, 1.2954614666198452, 1.3205653492681662,
1.2899663246832471, 1.2549300623614186, 1.197182571361552,
1.1407924958934879, 1.1008057151615109, 1.0691600335312013,
0.96093180817465618, 0.8664228618513774, 0.96576000827190556,
1.0376009347982038, 1.0764636750622629, 1.0975646487156931,
1.2540789775805865, 1.8437302592780713, 2.3966411426581957,
2.9608753508340118, 3.423129872873973, 3.5883658875288575,
3.2621236585354922, 2.8752781621886734, 2.5375908547247414,
2.2497857207671332, 2.4554221153770741, 2.5315780677888444,
2.7585119334766222, 2.8337261439349244, 2.9745745527293854,
2.9297633150649793, 3.1503331074467429, 3.212529671651343,
3.3456605064982394, 3.2905345939522999]
self.atr_period_8_expected = [np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan,
1.4180925319661521, 1.6202706653923087, 1.841626084216712,
2.3148854933979575, 2.6901735299560841, 3.2282310244121613,
3.563083750221574, 3.7982037524565646, 3.7546634785721498,
3.7323220510040827, 3.6914812566023922, 3.6379421910796386,
3.5587099948976539, 3.5146074512555128, 3.3287123687477114,
3.0890446215528855, 2.8876354582425368, 2.7197748421358332,
2.9957755812395579, 3.0918928706039539, 3.2034849639589456,
3.5276212120453141, 3.966956483762083, 4.4299504506994678,
4.9204122583250323, 5.2383912644056707, 5.1851996464032979,
4.932742857755783, 4.7691968174243575, 4.7104555366635612,
4.7209742731687623, 4.7303883735587853, 4.8062829601892965,
4.8770730470382375, 4.786932261959409, 4.5940745527992979,
4.2712108603228502, 4.0426131987685459, 4.0492483355737523,
4.0367369950162013, 4.0266065104420958, 3.8442225538659289,
3.7281167468319927, 3.5969454050028618, 3.5246336505629778,
3.284458875889694, 3.0905268522063674, 2.9085376948755512,
2.7680000175672808, 2.6252958389957679, 2.5159579023784877,
2.3306698200373246, 2.197229157817036, 2.1031142412351342,
2.0360589047455808, 2.0179156129481299, 1.9962963663924316,
2.010951331437755, 2.0924060195314591, 2.1470029836845206,
2.1917407916945137, 2.3469908853240153, 2.4897011782528256,
2.4061646855957806, 2.3351333133106342, 2.230276487867163,
2.2408826576806198, 2.2629816480494824, 2.3143268379407238,
2.3476629061550369, 2.3674721414695301, 2.374550948419341,
2.352947385951865, 2.351910270923812, 2.3499424768917128,
2.1608124958654997, 1.9893774678680414, 1.851281037063653,
1.7449273052921825, 1.6992086324724789, 1.7010190503124114,
1.7165471586824528, 1.6847862993283729, 1.634206765480277,
1.6018940222973894, 1.5378290457744153, 1.4602893936269465,
1.3946452189065861, 1.3308265877060355, 1.3548427859710599,
1.3588718015896448, 1.3628282348400853, 1.366672736225832,
1.4652930518912579, 1.9954910663104219, 2.4924846364624273,
2.9075743465183366, 3.261046890754919, 3.5616237891147984,
3.90750519846529, 4.0167530144468557, 3.7380084557614692,
3.4172149917994443, 3.4633450788377629, 3.4315003707559737,
3.5392138594642271, 3.5549856117004808, 3.6469399018473312,
3.8534409701377266, 4.2338496480817174, 4.1988778641402176,
4.2434190220063472, 4.1710674834485006]
self.atr_period_10_expected = [np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, 2.7186399732723401, 3.1897417785103217,
3.5720003140004968, 3.8243907563025203, 3.9543745819821638,
4.14966762493796, 4.1567426181748255, 4.0952266131322199,
3.9774055776649679, 3.9005313973245412, 3.8740200478862201,
3.7608260441925863, 3.5854123216136311, 3.4311560353359063,
3.6153121546372664, 3.6024786252080254, 3.6288964245394193,
3.8135185789216313, 4.1288199078722743, 4.4946788398373076,
4.9058770233302322, 5.1603606395261004, 5.3529137258920727,
5.445088414102206, 5.3705628277712574, 5.2556944589666976,
5.2134596631940662, 5.1637259824115125, 5.1838624309051387,
5.2791302264027182, 5.4683909738033964, 5.5120358790272803,
5.2939445953362574, 5.1115798997200201, 5.0421404425151195,
4.9375845577616317, 4.8404418486438017, 4.7051409062810361,
4.6372061837056462, 4.432524791048948, 4.3857899092249255,
4.2640474851745038, 4.1058549092701808, 3.8557539531622109,
3.6529206682490591, 3.4440864860929081, 3.2769697024906264,
3.0924922423109011, 2.9510365216850634, 2.8116170875265167,
2.6711372457210696, 2.5919272122011878, 2.5577863035116755,
2.5192993537277801, 2.5383732677201931, 2.5348033661300149,
2.5286463785226361, 2.6292596767417997, 2.7167970175342209,
2.7550799044874084, 2.8604804641492096, 2.7345280164604793,
2.6868520965130984, 2.6599028030438059, 2.6726181141515055,
2.6688858692968398, 2.6540997015450611, 2.6291880295217065,
2.6002059836457319, 2.5941937083210851, 2.5700615442727237,
2.535308379517017, 2.5013101557717698, 2.339251886928428,
2.220771727829487, 2.1516737476710861, 2.1053990350980456,
2.0811011642229702, 2.0438008059797284, 2.0320916025384799,
2.0069284521724975, 1.9527756899172914, 1.9026101646939952,
1.8269161484400693, 1.7312268984763945, 1.7180069351756679,
1.6836641042431297, 1.6535040163297123, 1.6269651536833674,
1.6987531568883674, 2.136689374875739, 2.5300549519920104,
2.9404650888832795, 3.2991327451858363, 3.5341944123641578,
3.8180814412294013, 4.0387527580573863, 4.2834486458410144,
4.3625514336653879, 4.4307414948453614, 4.3447237418697808,
4.3526388061920898, 4.3079912365795749, 4.3466474073655306,
4.510844263106514, 4.8245544999792642, 4.9825704530372255,
5.1956243905869313, 5.0759722759771062]
def test_atrp_period_6(self):
period = 6
atrp = average_true_range_percent.average_true_range_percent(self.close_data, period)
np.testing.assert_array_equal(atrp, self.atr_period_6_expected)
def test_atrp_period_8(self):
period = 8
atrp = average_true_range_percent.average_true_range_percent(self.close_data, period)
np.testing.assert_array_equal(atrp, self.atr_period_8_expected)
def test_atrp_period_10(self):
period = 10
atrp = average_true_range_percent.average_true_range_percent(self.close_data, period)
np.testing.assert_array_equal(atrp, self.atr_period_10_expected)
def test_atrp_invalid_period(self):
period = 128
with self.assertRaises(Exception) as cm:
average_true_range_percent.average_true_range_percent(self.close_data, period)
expected = "Error: data_len < period"
self.assertEqual(str(cm.exception), expected)
| 59.791139 | 93 | 0.739812 | from __future__ import absolute_import
import unittest
import numpy as np
from tests.sample_data import SampleData
from pyti import average_true_range_percent
class TestAverageTrueRangePercent(unittest.TestCase):
def setUp(self):
self.close_data = SampleData().get_sample_close_data()
self.atr_period_6_expected = [np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan, 1.189235578986088,
1.1523554512220486, 1.1278525931922065, 1.1546224092640733,
1.1660380962839427, 1.465357027466913, 1.7831881894142803,
2.3561329806184581, 2.6708206943371162, 3.2466120755686263,
3.3784546239726194, 3.3564621491521369, 3.2791301980772869,
3.2778865256303997, 3.2875442760137483, 3.2810676552694984,
3.0012226151326331, 2.7233687488098011, 2.5062178027349966,
2.2774730211707057, 2.1306723573292059, 2.0231111698118602,
2.4639048069082961, 2.7153248878733027, 2.9415900735797162,
3.457810754140358, 4.0649377298167551, 4.6505410623216603,
4.8377005165939497, 4.7010401069556149, 4.5393599025684406,
4.3416370097985153, 4.1909513300536148, 4.2334214723046726,
4.2994054993189517, 4.244940888039114, 3.9739765293353395,
3.7984682769968288, 3.5821945386433534, 3.3670297979975179,
3.0716656116914933, 2.8662794746678979, 3.0289151976072608,
2.9969860158644486, 2.9760460695914741, 2.9289691288143112,
2.8058612079021295, 2.531556736800797, 2.4252616931651314,
2.2944282121480746, 2.1964244646895756, 2.1062390474088564,
2.0476395013091233, 1.7748361482743773, 1.558061265928161,
1.4856536290363038, 1.4497927574913438, 1.4352358669002241,
1.4299189209362686, 1.4620245560453282, 1.5102324721906708,
1.6037560819721852, 1.7746556607866535, 1.9035211913074188,
2.0074893237351557, 2.0029061884391339, 1.9371230450535861,
1.8548689401186171, 1.8355003791530897, 1.8003331288038178,
1.8931540501005137, 1.9806126301955329, 2.0822871750835494,
2.1587399768435973, 2.1858863683758751, 2.1992145124735707,
2.2042274600601361, 1.9903770888121171, 1.7884145439862129,
1.6114041799566228, 1.4484765868823961, 1.3246773786986321,
1.2742050031825125, 1.2954614666198452, 1.3205653492681662,
1.2899663246832471, 1.2549300623614186, 1.197182571361552,
1.1407924958934879, 1.1008057151615109, 1.0691600335312013,
0.96093180817465618, 0.8664228618513774, 0.96576000827190556,
1.0376009347982038, 1.0764636750622629, 1.0975646487156931,
1.2540789775805865, 1.8437302592780713, 2.3966411426581957,
2.9608753508340118, 3.423129872873973, 3.5883658875288575,
3.2621236585354922, 2.8752781621886734, 2.5375908547247414,
2.2497857207671332, 2.4554221153770741, 2.5315780677888444,
2.7585119334766222, 2.8337261439349244, 2.9745745527293854,
2.9297633150649793, 3.1503331074467429, 3.212529671651343,
3.3456605064982394, 3.2905345939522999]
self.atr_period_8_expected = [np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan,
1.4180925319661521, 1.6202706653923087, 1.841626084216712,
2.3148854933979575, 2.6901735299560841, 3.2282310244121613,
3.563083750221574, 3.7982037524565646, 3.7546634785721498,
3.7323220510040827, 3.6914812566023922, 3.6379421910796386,
3.5587099948976539, 3.5146074512555128, 3.3287123687477114,
3.0890446215528855, 2.8876354582425368, 2.7197748421358332,
2.9957755812395579, 3.0918928706039539, 3.2034849639589456,
3.5276212120453141, 3.966956483762083, 4.4299504506994678,
4.9204122583250323, 5.2383912644056707, 5.1851996464032979,
4.932742857755783, 4.7691968174243575, 4.7104555366635612,
4.7209742731687623, 4.7303883735587853, 4.8062829601892965,
4.8770730470382375, 4.786932261959409, 4.5940745527992979,
4.2712108603228502, 4.0426131987685459, 4.0492483355737523,
4.0367369950162013, 4.0266065104420958, 3.8442225538659289,
3.7281167468319927, 3.5969454050028618, 3.5246336505629778,
3.284458875889694, 3.0905268522063674, 2.9085376948755512,
2.7680000175672808, 2.6252958389957679, 2.5159579023784877,
2.3306698200373246, 2.197229157817036, 2.1031142412351342,
2.0360589047455808, 2.0179156129481299, 1.9962963663924316,
2.010951331437755, 2.0924060195314591, 2.1470029836845206,
2.1917407916945137, 2.3469908853240153, 2.4897011782528256,
2.4061646855957806, 2.3351333133106342, 2.230276487867163,
2.2408826576806198, 2.2629816480494824, 2.3143268379407238,
2.3476629061550369, 2.3674721414695301, 2.374550948419341,
2.352947385951865, 2.351910270923812, 2.3499424768917128,
2.1608124958654997, 1.9893774678680414, 1.851281037063653,
1.7449273052921825, 1.6992086324724789, 1.7010190503124114,
1.7165471586824528, 1.6847862993283729, 1.634206765480277,
1.6018940222973894, 1.5378290457744153, 1.4602893936269465,
1.3946452189065861, 1.3308265877060355, 1.3548427859710599,
1.3588718015896448, 1.3628282348400853, 1.366672736225832,
1.4652930518912579, 1.9954910663104219, 2.4924846364624273,
2.9075743465183366, 3.261046890754919, 3.5616237891147984,
3.90750519846529, 4.0167530144468557, 3.7380084557614692,
3.4172149917994443, 3.4633450788377629, 3.4315003707559737,
3.5392138594642271, 3.5549856117004808, 3.6469399018473312,
3.8534409701377266, 4.2338496480817174, 4.1988778641402176,
4.2434190220063472, 4.1710674834485006]
self.atr_period_10_expected = [np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, 2.7186399732723401, 3.1897417785103217,
3.5720003140004968, 3.8243907563025203, 3.9543745819821638,
4.14966762493796, 4.1567426181748255, 4.0952266131322199,
3.9774055776649679, 3.9005313973245412, 3.8740200478862201,
3.7608260441925863, 3.5854123216136311, 3.4311560353359063,
3.6153121546372664, 3.6024786252080254, 3.6288964245394193,
3.8135185789216313, 4.1288199078722743, 4.4946788398373076,
4.9058770233302322, 5.1603606395261004, 5.3529137258920727,
5.445088414102206, 5.3705628277712574, 5.2556944589666976,
5.2134596631940662, 5.1637259824115125, 5.1838624309051387,
5.2791302264027182, 5.4683909738033964, 5.5120358790272803,
5.2939445953362574, 5.1115798997200201, 5.0421404425151195,
4.9375845577616317, 4.8404418486438017, 4.7051409062810361,
4.6372061837056462, 4.432524791048948, 4.3857899092249255,
4.2640474851745038, 4.1058549092701808, 3.8557539531622109,
3.6529206682490591, 3.4440864860929081, 3.2769697024906264,
3.0924922423109011, 2.9510365216850634, 2.8116170875265167,
2.6711372457210696, 2.5919272122011878, 2.5577863035116755,
2.5192993537277801, 2.5383732677201931, 2.5348033661300149,
2.5286463785226361, 2.6292596767417997, 2.7167970175342209,
2.7550799044874084, 2.8604804641492096, 2.7345280164604793,
2.6868520965130984, 2.6599028030438059, 2.6726181141515055,
2.6688858692968398, 2.6540997015450611, 2.6291880295217065,
2.6002059836457319, 2.5941937083210851, 2.5700615442727237,
2.535308379517017, 2.5013101557717698, 2.339251886928428,
2.220771727829487, 2.1516737476710861, 2.1053990350980456,
2.0811011642229702, 2.0438008059797284, 2.0320916025384799,
2.0069284521724975, 1.9527756899172914, 1.9026101646939952,
1.8269161484400693, 1.7312268984763945, 1.7180069351756679,
1.6836641042431297, 1.6535040163297123, 1.6269651536833674,
1.6987531568883674, 2.136689374875739, 2.5300549519920104,
2.9404650888832795, 3.2991327451858363, 3.5341944123641578,
3.8180814412294013, 4.0387527580573863, 4.2834486458410144,
4.3625514336653879, 4.4307414948453614, 4.3447237418697808,
4.3526388061920898, 4.3079912365795749, 4.3466474073655306,
4.510844263106514, 4.8245544999792642, 4.9825704530372255,
5.1956243905869313, 5.0759722759771062]
def test_atrp_period_6(self):
period = 6
atrp = average_true_range_percent.average_true_range_percent(self.close_data, period)
np.testing.assert_array_equal(atrp, self.atr_period_6_expected)
def test_atrp_period_8(self):
period = 8
atrp = average_true_range_percent.average_true_range_percent(self.close_data, period)
np.testing.assert_array_equal(atrp, self.atr_period_8_expected)
def test_atrp_period_10(self):
period = 10
atrp = average_true_range_percent.average_true_range_percent(self.close_data, period)
np.testing.assert_array_equal(atrp, self.atr_period_10_expected)
def test_atrp_invalid_period(self):
period = 128
with self.assertRaises(Exception) as cm:
average_true_range_percent.average_true_range_percent(self.close_data, period)
expected = "Error: data_len < period"
self.assertEqual(str(cm.exception), expected)
| true | true |
f7377bc760f77463920a28f804df1c6357cb7b35 | 79,949 | py | Python | tests/unit/test_table.py | ryanyuan/python-bigtable | e55ca07561f9c946276f3bde599e69947769f560 | [
"Apache-2.0"
] | null | null | null | tests/unit/test_table.py | ryanyuan/python-bigtable | e55ca07561f9c946276f3bde599e69947769f560 | [
"Apache-2.0"
] | null | null | null | tests/unit/test_table.py | ryanyuan/python-bigtable | e55ca07561f9c946276f3bde599e69947769f560 | [
"Apache-2.0"
] | null | null | null | # Copyright 2015 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import mock
from ._testing import _make_credentials
from google.api_core.exceptions import DeadlineExceeded
class Test___mutate_rows_request(unittest.TestCase):
def _call_fut(self, table_name, rows):
from google.cloud.bigtable.table import _mutate_rows_request
return _mutate_rows_request(table_name, rows)
@mock.patch("google.cloud.bigtable.table._MAX_BULK_MUTATIONS", new=3)
def test__mutate_rows_too_many_mutations(self):
from google.cloud.bigtable.row import DirectRow
from google.cloud.bigtable.table import TooManyMutationsError
table = mock.Mock(name="table", spec=["name"])
table.name = "table"
rows = [
DirectRow(row_key=b"row_key", table=table),
DirectRow(row_key=b"row_key_2", table=table),
]
rows[0].set_cell("cf1", b"c1", 1)
rows[0].set_cell("cf1", b"c1", 2)
rows[1].set_cell("cf1", b"c1", 3)
rows[1].set_cell("cf1", b"c1", 4)
with self.assertRaises(TooManyMutationsError):
self._call_fut("table", rows)
def test__mutate_rows_request(self):
from google.cloud.bigtable.row import DirectRow
table = mock.Mock(name="table", spec=["name"])
table.name = "table"
rows = [
DirectRow(row_key=b"row_key", table=table),
DirectRow(row_key=b"row_key_2"),
]
rows[0].set_cell("cf1", b"c1", b"1")
rows[1].set_cell("cf1", b"c1", b"2")
result = self._call_fut("table", rows)
expected_result = _mutate_rows_request_pb(table_name="table")
entry1 = expected_result.entries.add()
entry1.row_key = b"row_key"
mutations1 = entry1.mutations.add()
mutations1.set_cell.family_name = "cf1"
mutations1.set_cell.column_qualifier = b"c1"
mutations1.set_cell.timestamp_micros = -1
mutations1.set_cell.value = b"1"
entry2 = expected_result.entries.add()
entry2.row_key = b"row_key_2"
mutations2 = entry2.mutations.add()
mutations2.set_cell.family_name = "cf1"
mutations2.set_cell.column_qualifier = b"c1"
mutations2.set_cell.timestamp_micros = -1
mutations2.set_cell.value = b"2"
self.assertEqual(result, expected_result)
class Test__check_row_table_name(unittest.TestCase):
def _call_fut(self, table_name, row):
from google.cloud.bigtable.table import _check_row_table_name
return _check_row_table_name(table_name, row)
def test_wrong_table_name(self):
from google.cloud.bigtable.table import TableMismatchError
from google.cloud.bigtable.row import DirectRow
table = mock.Mock(name="table", spec=["name"])
table.name = "table"
row = DirectRow(row_key=b"row_key", table=table)
with self.assertRaises(TableMismatchError):
self._call_fut("other_table", row)
def test_right_table_name(self):
from google.cloud.bigtable.row import DirectRow
table = mock.Mock(name="table", spec=["name"])
table.name = "table"
row = DirectRow(row_key=b"row_key", table=table)
result = self._call_fut("table", row)
self.assertFalse(result)
class Test__check_row_type(unittest.TestCase):
def _call_fut(self, row):
from google.cloud.bigtable.table import _check_row_type
return _check_row_type(row)
def test_test_wrong_row_type(self):
from google.cloud.bigtable.row import ConditionalRow
row = ConditionalRow(row_key=b"row_key", table="table", filter_=None)
with self.assertRaises(TypeError):
self._call_fut(row)
def test_right_row_type(self):
from google.cloud.bigtable.row import DirectRow
row = DirectRow(row_key=b"row_key", table="table")
result = self._call_fut(row)
self.assertFalse(result)
class TestTable(unittest.TestCase):
PROJECT_ID = "project-id"
INSTANCE_ID = "instance-id"
INSTANCE_NAME = "projects/" + PROJECT_ID + "/instances/" + INSTANCE_ID
CLUSTER_ID = "cluster-id"
CLUSTER_NAME = INSTANCE_NAME + "/clusters/" + CLUSTER_ID
TABLE_ID = "table-id"
TABLE_NAME = INSTANCE_NAME + "/tables/" + TABLE_ID
BACKUP_ID = "backup-id"
BACKUP_NAME = CLUSTER_NAME + "/backups/" + BACKUP_ID
ROW_KEY = b"row-key"
ROW_KEY_1 = b"row-key-1"
ROW_KEY_2 = b"row-key-2"
ROW_KEY_3 = b"row-key-3"
FAMILY_NAME = u"family"
QUALIFIER = b"qualifier"
TIMESTAMP_MICROS = 100
VALUE = b"value"
_json_tests = None
@staticmethod
def _get_target_class():
from google.cloud.bigtable.table import Table
return Table
def _make_one(self, *args, **kwargs):
return self._get_target_class()(*args, **kwargs)
@staticmethod
def _get_target_client_class():
from google.cloud.bigtable.client import Client
return Client
def _make_client(self, *args, **kwargs):
return self._get_target_client_class()(*args, **kwargs)
def test_constructor_w_admin(self):
credentials = _make_credentials()
client = self._make_client(
project=self.PROJECT_ID, credentials=credentials, admin=True
)
instance = client.instance(instance_id=self.INSTANCE_ID)
table = self._make_one(self.TABLE_ID, instance)
self.assertEqual(table.table_id, self.TABLE_ID)
self.assertIs(table._instance._client, client)
self.assertEqual(table.name, self.TABLE_NAME)
def test_constructor_wo_admin(self):
credentials = _make_credentials()
client = self._make_client(
project=self.PROJECT_ID, credentials=credentials, admin=False
)
instance = client.instance(instance_id=self.INSTANCE_ID)
table = self._make_one(self.TABLE_ID, instance)
self.assertEqual(table.table_id, self.TABLE_ID)
self.assertIs(table._instance._client, client)
self.assertEqual(table.name, self.TABLE_NAME)
def _row_methods_helper(self):
client = self._make_client(
project="project-id", credentials=_make_credentials(), admin=True
)
instance = client.instance(instance_id=self.INSTANCE_ID)
table = self._make_one(self.TABLE_ID, instance)
row_key = b"row_key"
return table, row_key
def test_row_factory_direct(self):
from google.cloud.bigtable.row import DirectRow
table, row_key = self._row_methods_helper()
row = table.row(row_key)
self.assertIsInstance(row, DirectRow)
self.assertEqual(row._row_key, row_key)
self.assertEqual(row._table, table)
def test_row_factory_conditional(self):
from google.cloud.bigtable.row import ConditionalRow
table, row_key = self._row_methods_helper()
filter_ = object()
row = table.row(row_key, filter_=filter_)
self.assertIsInstance(row, ConditionalRow)
self.assertEqual(row._row_key, row_key)
self.assertEqual(row._table, table)
def test_row_factory_append(self):
from google.cloud.bigtable.row import AppendRow
table, row_key = self._row_methods_helper()
row = table.row(row_key, append=True)
self.assertIsInstance(row, AppendRow)
self.assertEqual(row._row_key, row_key)
self.assertEqual(row._table, table)
def test_direct_row(self):
from google.cloud.bigtable.row import DirectRow
table, row_key = self._row_methods_helper()
row = table.direct_row(row_key)
self.assertIsInstance(row, DirectRow)
self.assertEqual(row._row_key, row_key)
self.assertEqual(row._table, table)
def test_conditional_row(self):
from google.cloud.bigtable.row import ConditionalRow
table, row_key = self._row_methods_helper()
filter_ = object()
row = table.conditional_row(row_key, filter_=filter_)
self.assertIsInstance(row, ConditionalRow)
self.assertEqual(row._row_key, row_key)
self.assertEqual(row._table, table)
def test_append_row(self):
from google.cloud.bigtable.row import AppendRow
table, row_key = self._row_methods_helper()
row = table.append_row(row_key)
self.assertIsInstance(row, AppendRow)
self.assertEqual(row._row_key, row_key)
self.assertEqual(row._table, table)
def test_row_factory_failure(self):
table, row_key = self._row_methods_helper()
with self.assertRaises(ValueError):
table.row(row_key, filter_=object(), append=True)
def test___eq__(self):
credentials = _make_credentials()
client = self._make_client(
project="project-id", credentials=credentials, admin=True
)
instance = client.instance(instance_id=self.INSTANCE_ID)
table1 = self._make_one(self.TABLE_ID, instance)
table2 = self._make_one(self.TABLE_ID, instance)
self.assertEqual(table1, table2)
def test___eq__type_differ(self):
credentials = _make_credentials()
client = self._make_client(
project="project-id", credentials=credentials, admin=True
)
instance = client.instance(instance_id=self.INSTANCE_ID)
table1 = self._make_one(self.TABLE_ID, instance)
table2 = object()
self.assertNotEqual(table1, table2)
def test___ne__same_value(self):
credentials = _make_credentials()
client = self._make_client(
project="project-id", credentials=credentials, admin=True
)
instance = client.instance(instance_id=self.INSTANCE_ID)
table1 = self._make_one(self.TABLE_ID, instance)
table2 = self._make_one(self.TABLE_ID, instance)
comparison_val = table1 != table2
self.assertFalse(comparison_val)
def test___ne__(self):
table1 = self._make_one("table_id1", None)
table2 = self._make_one("table_id2", None)
self.assertNotEqual(table1, table2)
def _create_test_helper(self, split_keys=[], column_families={}):
from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client
from google.cloud.bigtable_admin_v2.proto import table_pb2
from google.cloud.bigtable_admin_v2.proto import (
bigtable_table_admin_pb2 as table_admin_messages_v2_pb2,
)
from google.cloud.bigtable.column_family import ColumnFamily
table_api = mock.create_autospec(
bigtable_table_admin_client.BigtableTableAdminClient
)
credentials = _make_credentials()
client = self._make_client(
project="project-id", credentials=credentials, admin=True
)
instance = client.instance(instance_id=self.INSTANCE_ID)
table = self._make_one(self.TABLE_ID, instance)
# Patch API calls
client._table_admin_client = table_api
# Perform the method and check the result.
table.create(column_families=column_families, initial_split_keys=split_keys)
families = {
id: ColumnFamily(id, self, rule).to_pb()
for (id, rule) in column_families.items()
}
split = table_admin_messages_v2_pb2.CreateTableRequest.Split
splits = [split(key=split_key) for split_key in split_keys]
table_api.create_table.assert_called_once_with(
parent=self.INSTANCE_NAME,
table=table_pb2.Table(column_families=families),
table_id=self.TABLE_ID,
initial_splits=splits,
)
def test_create(self):
self._create_test_helper()
def test_create_with_families(self):
from google.cloud.bigtable.column_family import MaxVersionsGCRule
families = {"family": MaxVersionsGCRule(5)}
self._create_test_helper(column_families=families)
def test_create_with_split_keys(self):
self._create_test_helper(split_keys=[b"split1", b"split2", b"split3"])
def test_exists(self):
from google.cloud.bigtable_admin_v2.proto import table_pb2 as table_data_v2_pb2
from google.cloud.bigtable_admin_v2.proto import (
bigtable_table_admin_pb2 as table_messages_v1_pb2,
)
from google.cloud.bigtable_admin_v2.gapic import (
bigtable_instance_admin_client,
bigtable_table_admin_client,
)
from google.api_core.exceptions import NotFound
from google.api_core.exceptions import BadRequest
table_api = bigtable_table_admin_client.BigtableTableAdminClient(mock.Mock())
instance_api = bigtable_instance_admin_client.BigtableInstanceAdminClient(
mock.Mock()
)
credentials = _make_credentials()
client = self._make_client(
project="project-id", credentials=credentials, admin=True
)
instance = client.instance(instance_id=self.INSTANCE_ID)
# Create response_pb
response_pb = table_messages_v1_pb2.ListTablesResponse(
tables=[table_data_v2_pb2.Table(name=self.TABLE_NAME)]
)
# Patch API calls
client._table_admin_client = table_api
client._instance_admin_client = instance_api
bigtable_table_stub = client._table_admin_client.transport
bigtable_table_stub.get_table.side_effect = [
response_pb,
NotFound("testing"),
BadRequest("testing"),
]
# Perform the method and check the result.
table1 = instance.table(self.TABLE_ID)
table2 = instance.table("table-id2")
result = table1.exists()
self.assertEqual(True, result)
result = table2.exists()
self.assertEqual(False, result)
with self.assertRaises(BadRequest):
table2.exists()
def test_delete(self):
from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client
table_api = mock.create_autospec(
bigtable_table_admin_client.BigtableTableAdminClient
)
credentials = _make_credentials()
client = self._make_client(
project="project-id", credentials=credentials, admin=True
)
instance = client.instance(instance_id=self.INSTANCE_ID)
table = self._make_one(self.TABLE_ID, instance)
# Patch API calls
client._table_admin_client = table_api
# Create expected_result.
expected_result = None # delete() has no return value.
# Perform the method and check the result.
result = table.delete()
self.assertEqual(result, expected_result)
def _list_column_families_helper(self):
from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client
table_api = bigtable_table_admin_client.BigtableTableAdminClient(mock.Mock())
credentials = _make_credentials()
client = self._make_client(
project="project-id", credentials=credentials, admin=True
)
instance = client.instance(instance_id=self.INSTANCE_ID)
table = self._make_one(self.TABLE_ID, instance)
# Create response_pb
COLUMN_FAMILY_ID = "foo"
column_family = _ColumnFamilyPB()
response_pb = _TablePB(column_families={COLUMN_FAMILY_ID: column_family})
# Patch the stub used by the API method.
client._table_admin_client = table_api
bigtable_table_stub = client._table_admin_client.transport
bigtable_table_stub.get_table.side_effect = [response_pb]
# Create expected_result.
expected_result = {COLUMN_FAMILY_ID: table.column_family(COLUMN_FAMILY_ID)}
# Perform the method and check the result.
result = table.list_column_families()
self.assertEqual(result, expected_result)
def test_list_column_families(self):
self._list_column_families_helper()
def test_get_cluster_states(self):
from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client
from google.cloud.bigtable.enums import Table as enum_table
from google.cloud.bigtable.table import ClusterState
INITIALIZING = enum_table.ReplicationState.INITIALIZING
PLANNED_MAINTENANCE = enum_table.ReplicationState.PLANNED_MAINTENANCE
READY = enum_table.ReplicationState.READY
table_api = bigtable_table_admin_client.BigtableTableAdminClient(mock.Mock())
credentials = _make_credentials()
client = self._make_client(
project="project-id", credentials=credentials, admin=True
)
instance = client.instance(instance_id=self.INSTANCE_ID)
table = self._make_one(self.TABLE_ID, instance)
response_pb = _TablePB(
cluster_states={
"cluster-id1": _ClusterStatePB(INITIALIZING),
"cluster-id2": _ClusterStatePB(PLANNED_MAINTENANCE),
"cluster-id3": _ClusterStatePB(READY),
}
)
# Patch the stub used by the API method.
client._table_admin_client = table_api
bigtable_table_stub = client._table_admin_client.transport
bigtable_table_stub.get_table.side_effect = [response_pb]
# build expected result
expected_result = {
u"cluster-id1": ClusterState(INITIALIZING),
u"cluster-id2": ClusterState(PLANNED_MAINTENANCE),
u"cluster-id3": ClusterState(READY),
}
# Perform the method and check the result.
result = table.get_cluster_states()
self.assertEqual(result, expected_result)
def _read_row_helper(self, chunks, expected_result, app_profile_id=None):
from google.cloud._testing import _Monkey
from google.cloud.bigtable import table as MUT
from google.cloud.bigtable.row_set import RowSet
from google.cloud.bigtable_v2.gapic import bigtable_client
from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client
from google.cloud.bigtable.row_filters import RowSampleFilter
data_api = bigtable_client.BigtableClient(mock.Mock())
table_api = mock.create_autospec(
bigtable_table_admin_client.BigtableTableAdminClient
)
credentials = _make_credentials()
client = self._make_client(
project="project-id", credentials=credentials, admin=True
)
instance = client.instance(instance_id=self.INSTANCE_ID)
table = self._make_one(self.TABLE_ID, instance, app_profile_id=app_profile_id)
# Create request_pb
request_pb = object() # Returned by our mock.
mock_created = []
def mock_create_row_request(table_name, **kwargs):
mock_created.append((table_name, kwargs))
return request_pb
# Create response_iterator
if chunks is None:
response_iterator = iter(()) # no responses at all
else:
response_pb = _ReadRowsResponsePB(chunks=chunks)
response_iterator = iter([response_pb])
# Patch the stub used by the API method.
client._table_data_client = data_api
client._table_admin_client = table_api
client._table_data_client.transport.read_rows = mock.Mock(
side_effect=[response_iterator]
)
# Perform the method and check the result.
filter_obj = RowSampleFilter(0.33)
result = None
with _Monkey(MUT, _create_row_request=mock_create_row_request):
result = table.read_row(self.ROW_KEY, filter_=filter_obj)
row_set = RowSet()
row_set.add_row_key(self.ROW_KEY)
expected_request = [
(
table.name,
{
"end_inclusive": False,
"row_set": row_set,
"app_profile_id": app_profile_id,
"end_key": None,
"limit": None,
"start_key": None,
"filter_": filter_obj,
},
)
]
self.assertEqual(result, expected_result)
self.assertEqual(mock_created, expected_request)
def test_read_row_miss_no__responses(self):
self._read_row_helper(None, None)
def test_read_row_miss_no_chunks_in_response(self):
chunks = []
self._read_row_helper(chunks, None)
def test_read_row_complete(self):
from google.cloud.bigtable.row_data import Cell
from google.cloud.bigtable.row_data import PartialRowData
app_profile_id = "app-profile-id"
chunk = _ReadRowsResponseCellChunkPB(
row_key=self.ROW_KEY,
family_name=self.FAMILY_NAME,
qualifier=self.QUALIFIER,
timestamp_micros=self.TIMESTAMP_MICROS,
value=self.VALUE,
commit_row=True,
)
chunks = [chunk]
expected_result = PartialRowData(row_key=self.ROW_KEY)
family = expected_result._cells.setdefault(self.FAMILY_NAME, {})
column = family.setdefault(self.QUALIFIER, [])
column.append(Cell.from_pb(chunk))
self._read_row_helper(chunks, expected_result, app_profile_id)
def test_read_row_more_than_one_row_returned(self):
app_profile_id = "app-profile-id"
chunk_1 = _ReadRowsResponseCellChunkPB(
row_key=self.ROW_KEY,
family_name=self.FAMILY_NAME,
qualifier=self.QUALIFIER,
timestamp_micros=self.TIMESTAMP_MICROS,
value=self.VALUE,
commit_row=True,
)
chunk_2 = _ReadRowsResponseCellChunkPB(
row_key=self.ROW_KEY_2,
family_name=self.FAMILY_NAME,
qualifier=self.QUALIFIER,
timestamp_micros=self.TIMESTAMP_MICROS,
value=self.VALUE,
commit_row=True,
)
chunks = [chunk_1, chunk_2]
with self.assertRaises(ValueError):
self._read_row_helper(chunks, None, app_profile_id)
def test_read_row_still_partial(self):
chunk = _ReadRowsResponseCellChunkPB(
row_key=self.ROW_KEY,
family_name=self.FAMILY_NAME,
qualifier=self.QUALIFIER,
timestamp_micros=self.TIMESTAMP_MICROS,
value=self.VALUE,
)
# No "commit row".
chunks = [chunk]
with self.assertRaises(ValueError):
self._read_row_helper(chunks, None)
def test_mutate_rows(self):
from google.rpc.status_pb2 import Status
from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client
table_api = mock.create_autospec(
bigtable_table_admin_client.BigtableTableAdminClient
)
credentials = _make_credentials()
client = self._make_client(
project="project-id", credentials=credentials, admin=True
)
instance = client.instance(instance_id=self.INSTANCE_ID)
client._table_admin_client = table_api
table = self._make_one(self.TABLE_ID, instance)
response = [Status(code=0), Status(code=1)]
mock_worker = mock.Mock(return_value=response)
with mock.patch(
"google.cloud.bigtable.table._RetryableMutateRowsWorker",
new=mock.MagicMock(return_value=mock_worker),
):
statuses = table.mutate_rows([mock.MagicMock(), mock.MagicMock()])
result = [status.code for status in statuses]
expected_result = [0, 1]
self.assertEqual(result, expected_result)
def test_read_rows(self):
from google.cloud._testing import _Monkey
from google.cloud.bigtable.row_data import PartialRowsData
from google.cloud.bigtable import table as MUT
from google.cloud.bigtable_v2.gapic import bigtable_client
from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client
data_api = bigtable_client.BigtableClient(mock.Mock())
table_api = mock.create_autospec(
bigtable_table_admin_client.BigtableTableAdminClient
)
credentials = _make_credentials()
client = self._make_client(
project="project-id", credentials=credentials, admin=True
)
client._table_data_client = data_api
client._table_admin_client = table_api
instance = client.instance(instance_id=self.INSTANCE_ID)
app_profile_id = "app-profile-id"
table = self._make_one(self.TABLE_ID, instance, app_profile_id=app_profile_id)
# Create request_pb
request = retry = object() # Returned by our mock.
mock_created = []
def mock_create_row_request(table_name, **kwargs):
mock_created.append((table_name, kwargs))
return request
# Create expected_result.
expected_result = PartialRowsData(
client._table_data_client.transport.read_rows, request, retry
)
# Perform the method and check the result.
start_key = b"start-key"
end_key = b"end-key"
filter_obj = object()
limit = 22
with _Monkey(MUT, _create_row_request=mock_create_row_request):
result = table.read_rows(
start_key=start_key,
end_key=end_key,
filter_=filter_obj,
limit=limit,
retry=retry,
)
self.assertEqual(result.rows, expected_result.rows)
self.assertEqual(result.retry, expected_result.retry)
created_kwargs = {
"start_key": start_key,
"end_key": end_key,
"filter_": filter_obj,
"limit": limit,
"end_inclusive": False,
"app_profile_id": app_profile_id,
"row_set": None,
}
self.assertEqual(mock_created, [(table.name, created_kwargs)])
def test_read_retry_rows(self):
from google.cloud.bigtable_v2.gapic import bigtable_client
from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client
from google.api_core import retry
data_api = bigtable_client.BigtableClient(mock.Mock())
table_api = bigtable_table_admin_client.BigtableTableAdminClient(mock.Mock())
credentials = _make_credentials()
client = self._make_client(
project="project-id", credentials=credentials, admin=True
)
client._table_data_client = data_api
client._table_admin_client = table_api
instance = client.instance(instance_id=self.INSTANCE_ID)
table = self._make_one(self.TABLE_ID, instance)
retry_read_rows = retry.Retry(predicate=_read_rows_retry_exception)
# Create response_iterator
chunk_1 = _ReadRowsResponseCellChunkPB(
row_key=self.ROW_KEY_1,
family_name=self.FAMILY_NAME,
qualifier=self.QUALIFIER,
timestamp_micros=self.TIMESTAMP_MICROS,
value=self.VALUE,
commit_row=True,
)
chunk_2 = _ReadRowsResponseCellChunkPB(
row_key=self.ROW_KEY_2,
family_name=self.FAMILY_NAME,
qualifier=self.QUALIFIER,
timestamp_micros=self.TIMESTAMP_MICROS,
value=self.VALUE,
commit_row=True,
)
response_1 = _ReadRowsResponseV2([chunk_1])
response_2 = _ReadRowsResponseV2([chunk_2])
response_failure_iterator_1 = _MockFailureIterator_1()
response_failure_iterator_2 = _MockFailureIterator_2([response_1])
response_iterator = _MockReadRowsIterator(response_2)
# Patch the stub used by the API method.
client._table_data_client.transport.read_rows = mock.Mock(
side_effect=[
response_failure_iterator_1,
response_failure_iterator_2,
response_iterator,
]
)
rows = []
for row in table.read_rows(
start_key=self.ROW_KEY_1, end_key=self.ROW_KEY_2, retry=retry_read_rows
):
rows.append(row)
result = rows[1]
self.assertEqual(result.row_key, self.ROW_KEY_2)
def test_yield_retry_rows(self):
from google.cloud.bigtable_v2.gapic import bigtable_client
from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client
import warnings
data_api = bigtable_client.BigtableClient(mock.Mock())
table_api = bigtable_table_admin_client.BigtableTableAdminClient(mock.Mock())
credentials = _make_credentials()
client = self._make_client(
project="project-id", credentials=credentials, admin=True
)
client._table_data_client = data_api
client._table_admin_client = table_api
instance = client.instance(instance_id=self.INSTANCE_ID)
table = self._make_one(self.TABLE_ID, instance)
# Create response_iterator
chunk_1 = _ReadRowsResponseCellChunkPB(
row_key=self.ROW_KEY_1,
family_name=self.FAMILY_NAME,
qualifier=self.QUALIFIER,
timestamp_micros=self.TIMESTAMP_MICROS,
value=self.VALUE,
commit_row=True,
)
chunk_2 = _ReadRowsResponseCellChunkPB(
row_key=self.ROW_KEY_2,
family_name=self.FAMILY_NAME,
qualifier=self.QUALIFIER,
timestamp_micros=self.TIMESTAMP_MICROS,
value=self.VALUE,
commit_row=True,
)
response_1 = _ReadRowsResponseV2([chunk_1])
response_2 = _ReadRowsResponseV2([chunk_2])
response_failure_iterator_1 = _MockFailureIterator_1()
response_failure_iterator_2 = _MockFailureIterator_2([response_1])
response_iterator = _MockReadRowsIterator(response_2)
# Patch the stub used by the API method.
client._table_data_client.transport.read_rows = mock.Mock(
side_effect=[
response_failure_iterator_1,
response_failure_iterator_2,
response_iterator,
]
)
rows = []
with warnings.catch_warnings(record=True) as warned:
for row in table.yield_rows(
start_key=self.ROW_KEY_1, end_key=self.ROW_KEY_2
):
rows.append(row)
self.assertEqual(len(warned), 1)
self.assertIs(warned[0].category, DeprecationWarning)
result = rows[1]
self.assertEqual(result.row_key, self.ROW_KEY_2)
def test_yield_rows_with_row_set(self):
from google.cloud.bigtable_v2.gapic import bigtable_client
from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client
from google.cloud.bigtable.row_set import RowSet
from google.cloud.bigtable.row_set import RowRange
import warnings
data_api = bigtable_client.BigtableClient(mock.Mock())
table_api = bigtable_table_admin_client.BigtableTableAdminClient(mock.Mock())
credentials = _make_credentials()
client = self._make_client(
project="project-id", credentials=credentials, admin=True
)
client._table_data_client = data_api
client._table_admin_client = table_api
instance = client.instance(instance_id=self.INSTANCE_ID)
table = self._make_one(self.TABLE_ID, instance)
# Create response_iterator
chunk_1 = _ReadRowsResponseCellChunkPB(
row_key=self.ROW_KEY_1,
family_name=self.FAMILY_NAME,
qualifier=self.QUALIFIER,
timestamp_micros=self.TIMESTAMP_MICROS,
value=self.VALUE,
commit_row=True,
)
chunk_2 = _ReadRowsResponseCellChunkPB(
row_key=self.ROW_KEY_2,
family_name=self.FAMILY_NAME,
qualifier=self.QUALIFIER,
timestamp_micros=self.TIMESTAMP_MICROS,
value=self.VALUE,
commit_row=True,
)
chunk_3 = _ReadRowsResponseCellChunkPB(
row_key=self.ROW_KEY_3,
family_name=self.FAMILY_NAME,
qualifier=self.QUALIFIER,
timestamp_micros=self.TIMESTAMP_MICROS,
value=self.VALUE,
commit_row=True,
)
response_1 = _ReadRowsResponseV2([chunk_1])
response_2 = _ReadRowsResponseV2([chunk_2])
response_3 = _ReadRowsResponseV2([chunk_3])
response_iterator = _MockReadRowsIterator(response_1, response_2, response_3)
# Patch the stub used by the API method.
client._table_data_client.transport.read_rows = mock.Mock(
side_effect=[response_iterator]
)
rows = []
row_set = RowSet()
row_set.add_row_range(
RowRange(start_key=self.ROW_KEY_1, end_key=self.ROW_KEY_2)
)
row_set.add_row_key(self.ROW_KEY_3)
with warnings.catch_warnings(record=True) as warned:
for row in table.yield_rows(row_set=row_set):
rows.append(row)
self.assertEqual(len(warned), 1)
self.assertIs(warned[0].category, DeprecationWarning)
self.assertEqual(rows[0].row_key, self.ROW_KEY_1)
self.assertEqual(rows[1].row_key, self.ROW_KEY_2)
self.assertEqual(rows[2].row_key, self.ROW_KEY_3)
def test_sample_row_keys(self):
from google.cloud.bigtable_v2.gapic import bigtable_client
from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client
data_api = bigtable_client.BigtableClient(mock.Mock())
table_api = bigtable_table_admin_client.BigtableTableAdminClient(mock.Mock())
credentials = _make_credentials()
client = self._make_client(
project="project-id", credentials=credentials, admin=True
)
client._table_data_client = data_api
client._table_admin_client = table_api
instance = client.instance(instance_id=self.INSTANCE_ID)
table = self._make_one(self.TABLE_ID, instance)
# Create response_iterator
response_iterator = object() # Just passed to a mock.
# Patch the stub used by the API method.
inner_api_calls = client._table_data_client._inner_api_calls
inner_api_calls["sample_row_keys"] = mock.Mock(
side_effect=[[response_iterator]]
)
# Create expected_result.
expected_result = response_iterator
# Perform the method and check the result.
result = table.sample_row_keys()
self.assertEqual(result[0], expected_result)
def test_truncate(self):
from google.cloud.bigtable_v2.gapic import bigtable_client
from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client
data_api = mock.create_autospec(bigtable_client.BigtableClient)
table_api = mock.create_autospec(
bigtable_table_admin_client.BigtableTableAdminClient
)
credentials = _make_credentials()
client = self._make_client(
project="project-id", credentials=credentials, admin=True
)
client._table_data_client = data_api
client._table_admin_client = table_api
instance = client.instance(instance_id=self.INSTANCE_ID)
table = self._make_one(self.TABLE_ID, instance)
expected_result = None # truncate() has no return value.
with mock.patch("google.cloud.bigtable.table.Table.name", new=self.TABLE_NAME):
result = table.truncate()
table_api.drop_row_range.assert_called_once_with(
name=self.TABLE_NAME, delete_all_data_from_table=True
)
self.assertEqual(result, expected_result)
def test_truncate_w_timeout(self):
from google.cloud.bigtable_v2.gapic import bigtable_client
from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client
data_api = mock.create_autospec(bigtable_client.BigtableClient)
table_api = mock.create_autospec(
bigtable_table_admin_client.BigtableTableAdminClient
)
credentials = _make_credentials()
client = self._make_client(
project="project-id", credentials=credentials, admin=True
)
client._table_data_client = data_api
client._table_admin_client = table_api
instance = client.instance(instance_id=self.INSTANCE_ID)
table = self._make_one(self.TABLE_ID, instance)
expected_result = None # truncate() has no return value.
timeout = 120
result = table.truncate(timeout=timeout)
self.assertEqual(result, expected_result)
def test_drop_by_prefix(self):
from google.cloud.bigtable_v2.gapic import bigtable_client
from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client
data_api = mock.create_autospec(bigtable_client.BigtableClient)
table_api = mock.create_autospec(
bigtable_table_admin_client.BigtableTableAdminClient
)
credentials = _make_credentials()
client = self._make_client(
project="project-id", credentials=credentials, admin=True
)
client._table_data_client = data_api
client._table_admin_client = table_api
instance = client.instance(instance_id=self.INSTANCE_ID)
table = self._make_one(self.TABLE_ID, instance)
expected_result = None # drop_by_prefix() has no return value.
row_key_prefix = "row-key-prefix"
result = table.drop_by_prefix(row_key_prefix=row_key_prefix)
self.assertEqual(result, expected_result)
def test_drop_by_prefix_w_timeout(self):
from google.cloud.bigtable_v2.gapic import bigtable_client
from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client
data_api = mock.create_autospec(bigtable_client.BigtableClient)
table_api = mock.create_autospec(
bigtable_table_admin_client.BigtableTableAdminClient
)
credentials = _make_credentials()
client = self._make_client(
project="project-id", credentials=credentials, admin=True
)
client._table_data_client = data_api
client._table_admin_client = table_api
instance = client.instance(instance_id=self.INSTANCE_ID)
table = self._make_one(self.TABLE_ID, instance)
expected_result = None # drop_by_prefix() has no return value.
row_key_prefix = "row-key-prefix"
timeout = 120
result = table.drop_by_prefix(row_key_prefix=row_key_prefix, timeout=timeout)
self.assertEqual(result, expected_result)
def test_mutations_batcher_factory(self):
flush_count = 100
max_row_bytes = 1000
table = self._make_one(self.TABLE_ID, None)
mutation_batcher = table.mutations_batcher(
flush_count=flush_count, max_row_bytes=max_row_bytes
)
self.assertEqual(mutation_batcher.table.table_id, self.TABLE_ID)
self.assertEqual(mutation_batcher.flush_count, flush_count)
self.assertEqual(mutation_batcher.max_row_bytes, max_row_bytes)
def test_get_iam_policy(self):
from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client
from google.iam.v1 import policy_pb2
from google.cloud.bigtable.policy import BIGTABLE_ADMIN_ROLE
credentials = _make_credentials()
client = self._make_client(
project="project-id", credentials=credentials, admin=True
)
instance = client.instance(instance_id=self.INSTANCE_ID)
table = self._make_one(self.TABLE_ID, instance)
version = 1
etag = b"etag_v1"
members = ["serviceAccount:service_acc1@test.com", "user:user1@test.com"]
bindings = [{"role": BIGTABLE_ADMIN_ROLE, "members": members}]
iam_policy = policy_pb2.Policy(version=version, etag=etag, bindings=bindings)
table_api = mock.create_autospec(
bigtable_table_admin_client.BigtableTableAdminClient
)
client._table_admin_client = table_api
table_api.get_iam_policy.return_value = iam_policy
result = table.get_iam_policy()
table_api.get_iam_policy.assert_called_once_with(resource=table.name)
self.assertEqual(result.version, version)
self.assertEqual(result.etag, etag)
admins = result.bigtable_admins
self.assertEqual(len(admins), len(members))
for found, expected in zip(sorted(admins), sorted(members)):
self.assertEqual(found, expected)
def test_set_iam_policy(self):
from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client
from google.iam.v1 import policy_pb2
from google.cloud.bigtable.policy import Policy
from google.cloud.bigtable.policy import BIGTABLE_ADMIN_ROLE
credentials = _make_credentials()
client = self._make_client(
project="project-id", credentials=credentials, admin=True
)
instance = client.instance(instance_id=self.INSTANCE_ID)
table = self._make_one(self.TABLE_ID, instance)
version = 1
etag = b"etag_v1"
members = ["serviceAccount:service_acc1@test.com", "user:user1@test.com"]
bindings = [{"role": BIGTABLE_ADMIN_ROLE, "members": sorted(members)}]
iam_policy_pb = policy_pb2.Policy(version=version, etag=etag, bindings=bindings)
table_api = mock.create_autospec(
bigtable_table_admin_client.BigtableTableAdminClient
)
client._table_admin_client = table_api
table_api.set_iam_policy.return_value = iam_policy_pb
iam_policy = Policy(etag=etag, version=version)
iam_policy[BIGTABLE_ADMIN_ROLE] = [
Policy.user("user1@test.com"),
Policy.service_account("service_acc1@test.com"),
]
result = table.set_iam_policy(iam_policy)
table_api.set_iam_policy.assert_called_once_with(
resource=table.name, policy=iam_policy_pb
)
self.assertEqual(result.version, version)
self.assertEqual(result.etag, etag)
admins = result.bigtable_admins
self.assertEqual(len(admins), len(members))
for found, expected in zip(sorted(admins), sorted(members)):
self.assertEqual(found, expected)
def test_test_iam_permissions(self):
from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client
from google.iam.v1 import iam_policy_pb2
credentials = _make_credentials()
client = self._make_client(
project="project-id", credentials=credentials, admin=True
)
instance = client.instance(instance_id=self.INSTANCE_ID)
table = self._make_one(self.TABLE_ID, instance)
permissions = ["bigtable.tables.mutateRows", "bigtable.tables.readRows"]
response = iam_policy_pb2.TestIamPermissionsResponse(permissions=permissions)
table_api = mock.create_autospec(
bigtable_table_admin_client.BigtableTableAdminClient
)
table_api.test_iam_permissions.return_value = response
client._table_admin_client = table_api
result = table.test_iam_permissions(permissions)
self.assertEqual(result, permissions)
table_api.test_iam_permissions.assert_called_once_with(
resource=table.name, permissions=permissions
)
def test_backup_factory_defaults(self):
from google.cloud.bigtable.backup import Backup
instance = self._make_one(self.INSTANCE_ID, None)
table = self._make_one(self.TABLE_ID, instance)
backup = table.backup(self.BACKUP_ID)
self.assertIsInstance(backup, Backup)
self.assertEqual(backup.backup_id, self.BACKUP_ID)
self.assertIs(backup._instance, instance)
self.assertIsNone(backup._cluster)
self.assertEqual(backup.table_id, self.TABLE_ID)
self.assertIsNone(backup._expire_time)
self.assertIsNone(backup._parent)
self.assertIsNone(backup._source_table)
self.assertIsNone(backup._start_time)
self.assertIsNone(backup._end_time)
self.assertIsNone(backup._size_bytes)
self.assertIsNone(backup._state)
def test_backup_factory_non_defaults(self):
import datetime
from google.cloud._helpers import UTC
from google.cloud.bigtable.backup import Backup
instance = self._make_one(self.INSTANCE_ID, None)
table = self._make_one(self.TABLE_ID, instance)
timestamp = datetime.datetime.utcnow().replace(tzinfo=UTC)
backup = table.backup(
self.BACKUP_ID, cluster_id=self.CLUSTER_ID, expire_time=timestamp,
)
self.assertIsInstance(backup, Backup)
self.assertEqual(backup.backup_id, self.BACKUP_ID)
self.assertIs(backup._instance, instance)
self.assertEqual(backup.backup_id, self.BACKUP_ID)
self.assertIs(backup._cluster, self.CLUSTER_ID)
self.assertEqual(backup.table_id, self.TABLE_ID)
self.assertEqual(backup._expire_time, timestamp)
self.assertIsNone(backup._start_time)
self.assertIsNone(backup._end_time)
self.assertIsNone(backup._size_bytes)
self.assertIsNone(backup._state)
def _list_backups_helper(self, cluster_id=None, filter_=None, **kwargs):
from google.cloud.bigtable_admin_v2.gapic import (
bigtable_instance_admin_client,
bigtable_table_admin_client,
)
from google.cloud.bigtable_admin_v2.proto import (
bigtable_table_admin_pb2,
table_pb2,
)
from google.cloud.bigtable.backup import Backup
instance_api = bigtable_instance_admin_client.BigtableInstanceAdminClient
table_api = bigtable_table_admin_client.BigtableTableAdminClient(mock.Mock())
client = self._make_client(
project=self.PROJECT_ID, credentials=_make_credentials(), admin=True
)
instance = client.instance(instance_id=self.INSTANCE_ID)
table = self._make_one(self.TABLE_ID, instance)
client._instance_admin_client = instance_api
client._table_admin_client = table_api
parent = self.INSTANCE_NAME + "/clusters/cluster"
backups_pb = bigtable_table_admin_pb2.ListBackupsResponse(
backups=[
table_pb2.Backup(name=parent + "/backups/op1"),
table_pb2.Backup(name=parent + "/backups/op2"),
table_pb2.Backup(name=parent + "/backups/op3"),
]
)
api = table_api._inner_api_calls["list_backups"] = mock.Mock(
return_value=backups_pb
)
backups_filter = "source_table:{}".format(self.TABLE_NAME)
if filter_:
backups_filter = "({}) AND ({})".format(backups_filter, filter_)
backups = table.list_backups(cluster_id=cluster_id, filter_=filter_, **kwargs)
for backup in backups:
self.assertIsInstance(backup, Backup)
if not cluster_id:
cluster_id = "-"
parent = "{}/clusters/{}".format(self.INSTANCE_NAME, cluster_id)
expected_metadata = [
("x-goog-request-params", "parent={}".format(parent)),
]
api.assert_called_once_with(
bigtable_table_admin_pb2.ListBackupsRequest(
parent=parent, filter=backups_filter, **kwargs
),
retry=mock.ANY,
timeout=mock.ANY,
metadata=expected_metadata,
)
def test_list_backups_defaults(self):
self._list_backups_helper()
def test_list_backups_w_options(self):
self._list_backups_helper(
cluster_id="cluster", filter_="filter", order_by="order_by", page_size=10
)
def _restore_helper(self, backup_name=None):
from google.cloud.bigtable_admin_v2 import BigtableTableAdminClient
from google.cloud.bigtable_admin_v2.gapic import bigtable_instance_admin_client
from google.cloud.bigtable.instance import Instance
op_future = object()
instance_api = bigtable_instance_admin_client.BigtableInstanceAdminClient
client = mock.Mock(project=self.PROJECT_ID, instance_admin_client=instance_api)
instance = Instance(self.INSTANCE_ID, client=client)
table = self._make_one(self.TABLE_ID, instance)
api = client.table_admin_client = mock.create_autospec(
BigtableTableAdminClient, instance=True
)
api.restore_table.return_value = op_future
if backup_name:
future = table.restore(self.TABLE_ID, backup_name=self.BACKUP_NAME)
else:
future = table.restore(self.TABLE_ID, self.CLUSTER_ID, self.BACKUP_ID)
self.assertIs(future, op_future)
api.restore_table.assert_called_once_with(
parent=self.INSTANCE_NAME, table_id=self.TABLE_ID, backup=self.BACKUP_NAME,
)
def test_restore_table_w_backup_id(self):
self._restore_helper()
def test_restore_table_w_backup_name(self):
self._restore_helper(backup_name=self.BACKUP_NAME)
class Test__RetryableMutateRowsWorker(unittest.TestCase):
from grpc import StatusCode
PROJECT_ID = "project-id"
INSTANCE_ID = "instance-id"
INSTANCE_NAME = "projects/" + PROJECT_ID + "/instances/" + INSTANCE_ID
TABLE_ID = "table-id"
# RPC Status Codes
SUCCESS = StatusCode.OK.value[0]
RETRYABLE_1 = StatusCode.DEADLINE_EXCEEDED.value[0]
RETRYABLE_2 = StatusCode.ABORTED.value[0]
NON_RETRYABLE = StatusCode.CANCELLED.value[0]
@staticmethod
def _get_target_class_for_worker():
from google.cloud.bigtable.table import _RetryableMutateRowsWorker
return _RetryableMutateRowsWorker
def _make_worker(self, *args, **kwargs):
return self._get_target_class_for_worker()(*args, **kwargs)
@staticmethod
def _get_target_class_for_table():
from google.cloud.bigtable.table import Table
return Table
def _make_table(self, *args, **kwargs):
return self._get_target_class_for_table()(*args, **kwargs)
@staticmethod
def _get_target_client_class():
from google.cloud.bigtable.client import Client
return Client
def _make_client(self, *args, **kwargs):
return self._get_target_client_class()(*args, **kwargs)
def _make_responses_statuses(self, codes):
from google.rpc.status_pb2 import Status
response = [Status(code=code) for code in codes]
return response
def _make_responses(self, codes):
import six
from google.cloud.bigtable_v2.proto.bigtable_pb2 import MutateRowsResponse
from google.rpc.status_pb2 import Status
entries = [
MutateRowsResponse.Entry(index=i, status=Status(code=codes[i]))
for i in six.moves.xrange(len(codes))
]
return MutateRowsResponse(entries=entries)
def test_callable_empty_rows(self):
from google.cloud.bigtable_v2.gapic import bigtable_client
from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client
data_api = mock.create_autospec(bigtable_client.BigtableClient)
table_api = mock.create_autospec(
bigtable_table_admin_client.BigtableTableAdminClient
)
credentials = _make_credentials()
client = self._make_client(
project="project-id", credentials=credentials, admin=True
)
client._table_data_client = data_api
client._table_admin_client = table_api
instance = client.instance(instance_id=self.INSTANCE_ID)
table = self._make_table(self.TABLE_ID, instance)
worker = self._make_worker(client, table.name, [])
statuses = worker()
self.assertEqual(len(statuses), 0)
def test_callable_no_retry_strategy(self):
from google.cloud.bigtable.row import DirectRow
from google.cloud.bigtable_v2.gapic import bigtable_client
from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client
# Setup:
# - Mutate 3 rows.
# Action:
# - Attempt to mutate the rows w/o any retry strategy.
# Expectation:
# - Since no retry, should return statuses as they come back.
# - Even if there are retryable errors, no retry attempt is made.
# - State of responses_statuses should be
# [success, retryable, non-retryable]
data_api = bigtable_client.BigtableClient(mock.Mock())
table_api = bigtable_table_admin_client.BigtableTableAdminClient(mock.Mock())
credentials = _make_credentials()
client = self._make_client(
project="project-id", credentials=credentials, admin=True
)
client._table_data_client = data_api
client._table_admin_client = table_api
instance = client.instance(instance_id=self.INSTANCE_ID)
table = self._make_table(self.TABLE_ID, instance)
row_1 = DirectRow(row_key=b"row_key", table=table)
row_1.set_cell("cf", b"col", b"value1")
row_2 = DirectRow(row_key=b"row_key_2", table=table)
row_2.set_cell("cf", b"col", b"value2")
row_3 = DirectRow(row_key=b"row_key_3", table=table)
row_3.set_cell("cf", b"col", b"value3")
response = self._make_responses(
[self.SUCCESS, self.RETRYABLE_1, self.NON_RETRYABLE]
)
with mock.patch("google.cloud.bigtable.table.wrap_method") as patched:
patched.return_value = mock.Mock(return_value=[response])
worker = self._make_worker(client, table.name, [row_1, row_2, row_3])
statuses = worker(retry=None)
result = [status.code for status in statuses]
expected_result = [self.SUCCESS, self.RETRYABLE_1, self.NON_RETRYABLE]
client._table_data_client._inner_api_calls["mutate_rows"].assert_called_once()
self.assertEqual(result, expected_result)
def test_callable_retry(self):
from google.cloud.bigtable.row import DirectRow
from google.cloud.bigtable.table import DEFAULT_RETRY
from google.cloud.bigtable_v2.gapic import bigtable_client
from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client
# Setup:
# - Mutate 3 rows.
# Action:
# - Initial attempt will mutate all 3 rows.
# Expectation:
# - First attempt will result in one retryable error.
# - Second attempt will result in success for the retry-ed row.
# - Check MutateRows is called twice.
# - State of responses_statuses should be
# [success, success, non-retryable]
data_api = bigtable_client.BigtableClient(mock.Mock())
table_api = bigtable_table_admin_client.BigtableTableAdminClient(mock.Mock())
credentials = _make_credentials()
client = self._make_client(
project="project-id", credentials=credentials, admin=True
)
client._table_data_client = data_api
client._table_admin_client = table_api
instance = client.instance(instance_id=self.INSTANCE_ID)
table = self._make_table(self.TABLE_ID, instance)
row_1 = DirectRow(row_key=b"row_key", table=table)
row_1.set_cell("cf", b"col", b"value1")
row_2 = DirectRow(row_key=b"row_key_2", table=table)
row_2.set_cell("cf", b"col", b"value2")
row_3 = DirectRow(row_key=b"row_key_3", table=table)
row_3.set_cell("cf", b"col", b"value3")
response_1 = self._make_responses(
[self.SUCCESS, self.RETRYABLE_1, self.NON_RETRYABLE]
)
response_2 = self._make_responses([self.SUCCESS])
# Patch the stub used by the API method.
client._table_data_client._inner_api_calls["mutate_rows"] = mock.Mock(
side_effect=[[response_1], [response_2]]
)
retry = DEFAULT_RETRY.with_delay(initial=0.1)
worker = self._make_worker(client, table.name, [row_1, row_2, row_3])
statuses = worker(retry=retry)
result = [status.code for status in statuses]
expected_result = [self.SUCCESS, self.SUCCESS, self.NON_RETRYABLE]
self.assertEqual(
client._table_data_client._inner_api_calls["mutate_rows"].call_count, 2
)
self.assertEqual(result, expected_result)
def test_do_mutate_retryable_rows_empty_rows(self):
from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client
table_api = mock.create_autospec(
bigtable_table_admin_client.BigtableTableAdminClient
)
credentials = _make_credentials()
client = self._make_client(
project="project-id", credentials=credentials, admin=True
)
client._table_admin_client = table_api
instance = client.instance(instance_id=self.INSTANCE_ID)
table = self._make_table(self.TABLE_ID, instance)
worker = self._make_worker(client, table.name, [])
statuses = worker._do_mutate_retryable_rows()
self.assertEqual(len(statuses), 0)
def test_do_mutate_retryable_rows(self):
from google.cloud.bigtable.row import DirectRow
from google.cloud.bigtable_v2.gapic import bigtable_client
from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client
# Setup:
# - Mutate 2 rows.
# Action:
# - Initial attempt will mutate all 2 rows.
# Expectation:
# - Expect [success, non-retryable]
data_api = bigtable_client.BigtableClient(mock.Mock())
table_api = bigtable_table_admin_client.BigtableTableAdminClient(mock.Mock())
credentials = _make_credentials()
client = self._make_client(
project="project-id", credentials=credentials, admin=True
)
client._table_data_client = data_api
client._table_admin_client = table_api
instance = client.instance(instance_id=self.INSTANCE_ID)
table = self._make_table(self.TABLE_ID, instance)
row_1 = DirectRow(row_key=b"row_key", table=table)
row_1.set_cell("cf", b"col", b"value1")
row_2 = DirectRow(row_key=b"row_key_2", table=table)
row_2.set_cell("cf", b"col", b"value2")
response = self._make_responses([self.SUCCESS, self.NON_RETRYABLE])
# Patch the stub used by the API method.
inner_api_calls = client._table_data_client._inner_api_calls
inner_api_calls["mutate_rows"] = mock.Mock(side_effect=[[response]])
worker = self._make_worker(client, table.name, [row_1, row_2])
statuses = worker._do_mutate_retryable_rows()
result = [status.code for status in statuses]
expected_result = [self.SUCCESS, self.NON_RETRYABLE]
self.assertEqual(result, expected_result)
def test_do_mutate_retryable_rows_retry(self):
from google.cloud.bigtable.row import DirectRow
from google.cloud.bigtable.table import _BigtableRetryableError
from google.cloud.bigtable_v2.gapic import bigtable_client
from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client
# Setup:
# - Mutate 3 rows.
# Action:
# - Initial attempt will mutate all 3 rows.
# Expectation:
# - Second row returns retryable error code, so expect a raise.
# - State of responses_statuses should be
# [success, retryable, non-retryable]
data_api = bigtable_client.BigtableClient(mock.Mock())
table_api = bigtable_table_admin_client.BigtableTableAdminClient(mock.Mock())
credentials = _make_credentials()
client = self._make_client(
project="project-id", credentials=credentials, admin=True
)
client._table_data_client = data_api
client._table_admin_client = table_api
instance = client.instance(instance_id=self.INSTANCE_ID)
table = self._make_table(self.TABLE_ID, instance)
row_1 = DirectRow(row_key=b"row_key", table=table)
row_1.set_cell("cf", b"col", b"value1")
row_2 = DirectRow(row_key=b"row_key_2", table=table)
row_2.set_cell("cf", b"col", b"value2")
row_3 = DirectRow(row_key=b"row_key_3", table=table)
row_3.set_cell("cf", b"col", b"value3")
response = self._make_responses(
[self.SUCCESS, self.RETRYABLE_1, self.NON_RETRYABLE]
)
# Patch the stub used by the API method.
inner_api_calls = client._table_data_client._inner_api_calls
inner_api_calls["mutate_rows"] = mock.Mock(side_effect=[[response]])
worker = self._make_worker(client, table.name, [row_1, row_2, row_3])
with self.assertRaises(_BigtableRetryableError):
worker._do_mutate_retryable_rows()
statuses = worker.responses_statuses
result = [status.code for status in statuses]
expected_result = [self.SUCCESS, self.RETRYABLE_1, self.NON_RETRYABLE]
self.assertEqual(result, expected_result)
def test_do_mutate_retryable_rows_second_retry(self):
from google.cloud.bigtable.row import DirectRow
from google.cloud.bigtable.table import _BigtableRetryableError
from google.cloud.bigtable_v2.gapic import bigtable_client
from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client
# Setup:
# - Mutate 4 rows.
# - First try results:
# [success, retryable, non-retryable, retryable]
# Action:
# - Second try should re-attempt the 'retryable' rows.
# Expectation:
# - After second try:
# [success, success, non-retryable, retryable]
# - One of the rows tried second time returns retryable error code,
# so expect a raise.
# - Exception contains response whose index should be '3' even though
# only two rows were retried.
data_api = bigtable_client.BigtableClient(mock.Mock())
table_api = bigtable_table_admin_client.BigtableTableAdminClient(mock.Mock())
credentials = _make_credentials()
client = self._make_client(
project="project-id", credentials=credentials, admin=True
)
client._table_data_client = data_api
client._table_admin_client = table_api
instance = client.instance(instance_id=self.INSTANCE_ID)
table = self._make_table(self.TABLE_ID, instance)
row_1 = DirectRow(row_key=b"row_key", table=table)
row_1.set_cell("cf", b"col", b"value1")
row_2 = DirectRow(row_key=b"row_key_2", table=table)
row_2.set_cell("cf", b"col", b"value2")
row_3 = DirectRow(row_key=b"row_key_3", table=table)
row_3.set_cell("cf", b"col", b"value3")
row_4 = DirectRow(row_key=b"row_key_4", table=table)
row_4.set_cell("cf", b"col", b"value4")
response = self._make_responses([self.SUCCESS, self.RETRYABLE_1])
# Patch the stub used by the API method.
inner_api_calls = client._table_data_client._inner_api_calls
inner_api_calls["mutate_rows"] = mock.Mock(side_effect=[[response]])
worker = self._make_worker(client, table.name, [row_1, row_2, row_3, row_4])
worker.responses_statuses = self._make_responses_statuses(
[self.SUCCESS, self.RETRYABLE_1, self.NON_RETRYABLE, self.RETRYABLE_2]
)
with self.assertRaises(_BigtableRetryableError):
worker._do_mutate_retryable_rows()
statuses = worker.responses_statuses
result = [status.code for status in statuses]
expected_result = [
self.SUCCESS,
self.SUCCESS,
self.NON_RETRYABLE,
self.RETRYABLE_1,
]
self.assertEqual(result, expected_result)
def test_do_mutate_retryable_rows_second_try(self):
from google.cloud.bigtable.row import DirectRow
from google.cloud.bigtable_v2.gapic import bigtable_client
from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client
# Setup:
# - Mutate 4 rows.
# - First try results:
# [success, retryable, non-retryable, retryable]
# Action:
# - Second try should re-attempt the 'retryable' rows.
# Expectation:
# - After second try:
# [success, non-retryable, non-retryable, success]
data_api = bigtable_client.BigtableClient(mock.Mock())
table_api = bigtable_table_admin_client.BigtableTableAdminClient(mock.Mock())
credentials = _make_credentials()
client = self._make_client(
project="project-id", credentials=credentials, admin=True
)
client._table_data_client = data_api
client._table_admin_client = table_api
instance = client.instance(instance_id=self.INSTANCE_ID)
table = self._make_table(self.TABLE_ID, instance)
row_1 = DirectRow(row_key=b"row_key", table=table)
row_1.set_cell("cf", b"col", b"value1")
row_2 = DirectRow(row_key=b"row_key_2", table=table)
row_2.set_cell("cf", b"col", b"value2")
row_3 = DirectRow(row_key=b"row_key_3", table=table)
row_3.set_cell("cf", b"col", b"value3")
row_4 = DirectRow(row_key=b"row_key_4", table=table)
row_4.set_cell("cf", b"col", b"value4")
response = self._make_responses([self.NON_RETRYABLE, self.SUCCESS])
# Patch the stub used by the API method.
inner_api_calls = client._table_data_client._inner_api_calls
inner_api_calls["mutate_rows"] = mock.Mock(side_effect=[[response]])
worker = self._make_worker(client, table.name, [row_1, row_2, row_3, row_4])
worker.responses_statuses = self._make_responses_statuses(
[self.SUCCESS, self.RETRYABLE_1, self.NON_RETRYABLE, self.RETRYABLE_2]
)
statuses = worker._do_mutate_retryable_rows()
result = [status.code for status in statuses]
expected_result = [
self.SUCCESS,
self.NON_RETRYABLE,
self.NON_RETRYABLE,
self.SUCCESS,
]
self.assertEqual(result, expected_result)
def test_do_mutate_retryable_rows_second_try_no_retryable(self):
from google.cloud.bigtable.row import DirectRow
from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client
# Setup:
# - Mutate 2 rows.
# - First try results: [success, non-retryable]
# Action:
# - Second try has no row to retry.
# Expectation:
# - After second try: [success, non-retryable]
table_api = mock.create_autospec(
bigtable_table_admin_client.BigtableTableAdminClient
)
credentials = _make_credentials()
client = self._make_client(
project="project-id", credentials=credentials, admin=True
)
client._table_admin_client = table_api
instance = client.instance(instance_id=self.INSTANCE_ID)
table = self._make_table(self.TABLE_ID, instance)
row_1 = DirectRow(row_key=b"row_key", table=table)
row_1.set_cell("cf", b"col", b"value1")
row_2 = DirectRow(row_key=b"row_key_2", table=table)
row_2.set_cell("cf", b"col", b"value2")
worker = self._make_worker(client, table.name, [row_1, row_2])
worker.responses_statuses = self._make_responses_statuses(
[self.SUCCESS, self.NON_RETRYABLE]
)
statuses = worker._do_mutate_retryable_rows()
result = [status.code for status in statuses]
expected_result = [self.SUCCESS, self.NON_RETRYABLE]
self.assertEqual(result, expected_result)
def test_do_mutate_retryable_rows_mismatch_num_responses(self):
from google.cloud.bigtable.row import DirectRow
from google.cloud.bigtable_v2.gapic import bigtable_client
from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client
data_api = bigtable_client.BigtableClient(mock.Mock())
table_api = bigtable_table_admin_client.BigtableTableAdminClient(mock.Mock())
credentials = _make_credentials()
client = self._make_client(
project="project-id", credentials=credentials, admin=True
)
client._table_data_client = data_api
client._table_admin_client = table_api
instance = client.instance(instance_id=self.INSTANCE_ID)
table = self._make_table(self.TABLE_ID, instance)
row_1 = DirectRow(row_key=b"row_key", table=table)
row_1.set_cell("cf", b"col", b"value1")
row_2 = DirectRow(row_key=b"row_key_2", table=table)
row_2.set_cell("cf", b"col", b"value2")
response = self._make_responses([self.SUCCESS])
# Patch the stub used by the API method.
inner_api_calls = client._table_data_client._inner_api_calls
inner_api_calls["mutate_rows"] = mock.Mock(side_effect=[[response]])
worker = self._make_worker(client, table.name, [row_1, row_2])
with self.assertRaises(RuntimeError):
worker._do_mutate_retryable_rows()
class Test__create_row_request(unittest.TestCase):
def _call_fut(
self,
table_name,
start_key=None,
end_key=None,
filter_=None,
limit=None,
end_inclusive=False,
app_profile_id=None,
row_set=None,
):
from google.cloud.bigtable.table import _create_row_request
return _create_row_request(
table_name,
start_key=start_key,
end_key=end_key,
filter_=filter_,
limit=limit,
end_inclusive=end_inclusive,
app_profile_id=app_profile_id,
row_set=row_set,
)
def test_table_name_only(self):
table_name = "table_name"
result = self._call_fut(table_name)
expected_result = _ReadRowsRequestPB(table_name=table_name)
self.assertEqual(result, expected_result)
def test_row_range_row_set_conflict(self):
with self.assertRaises(ValueError):
self._call_fut(None, end_key=object(), row_set=object())
def test_row_range_start_key(self):
table_name = "table_name"
start_key = b"start_key"
result = self._call_fut(table_name, start_key=start_key)
expected_result = _ReadRowsRequestPB(table_name=table_name)
expected_result.rows.row_ranges.add(start_key_closed=start_key)
self.assertEqual(result, expected_result)
def test_row_range_end_key(self):
table_name = "table_name"
end_key = b"end_key"
result = self._call_fut(table_name, end_key=end_key)
expected_result = _ReadRowsRequestPB(table_name=table_name)
expected_result.rows.row_ranges.add(end_key_open=end_key)
self.assertEqual(result, expected_result)
def test_row_range_both_keys(self):
table_name = "table_name"
start_key = b"start_key"
end_key = b"end_key"
result = self._call_fut(table_name, start_key=start_key, end_key=end_key)
expected_result = _ReadRowsRequestPB(table_name=table_name)
expected_result.rows.row_ranges.add(
start_key_closed=start_key, end_key_open=end_key
)
self.assertEqual(result, expected_result)
def test_row_range_both_keys_inclusive(self):
table_name = "table_name"
start_key = b"start_key"
end_key = b"end_key"
result = self._call_fut(
table_name, start_key=start_key, end_key=end_key, end_inclusive=True
)
expected_result = _ReadRowsRequestPB(table_name=table_name)
expected_result.rows.row_ranges.add(
start_key_closed=start_key, end_key_closed=end_key
)
self.assertEqual(result, expected_result)
def test_with_filter(self):
from google.cloud.bigtable.row_filters import RowSampleFilter
table_name = "table_name"
row_filter = RowSampleFilter(0.33)
result = self._call_fut(table_name, filter_=row_filter)
expected_result = _ReadRowsRequestPB(
table_name=table_name, filter=row_filter.to_pb()
)
self.assertEqual(result, expected_result)
def test_with_limit(self):
table_name = "table_name"
limit = 1337
result = self._call_fut(table_name, limit=limit)
expected_result = _ReadRowsRequestPB(table_name=table_name, rows_limit=limit)
self.assertEqual(result, expected_result)
def test_with_row_set(self):
from google.cloud.bigtable.row_set import RowSet
table_name = "table_name"
row_set = RowSet()
result = self._call_fut(table_name, row_set=row_set)
expected_result = _ReadRowsRequestPB(table_name=table_name)
self.assertEqual(result, expected_result)
def test_with_app_profile_id(self):
table_name = "table_name"
limit = 1337
app_profile_id = "app-profile-id"
result = self._call_fut(table_name, limit=limit, app_profile_id=app_profile_id)
expected_result = _ReadRowsRequestPB(
table_name=table_name, rows_limit=limit, app_profile_id=app_profile_id
)
self.assertEqual(result, expected_result)
def _ReadRowsRequestPB(*args, **kw):
from google.cloud.bigtable_v2.proto import bigtable_pb2 as messages_v2_pb2
return messages_v2_pb2.ReadRowsRequest(*args, **kw)
class Test_ClusterState(unittest.TestCase):
def test___eq__(self):
from google.cloud.bigtable.enums import Table as enum_table
from google.cloud.bigtable.table import ClusterState
READY = enum_table.ReplicationState.READY
state1 = ClusterState(READY)
state2 = ClusterState(READY)
self.assertEqual(state1, state2)
def test___eq__type_differ(self):
from google.cloud.bigtable.enums import Table as enum_table
from google.cloud.bigtable.table import ClusterState
READY = enum_table.ReplicationState.READY
state1 = ClusterState(READY)
state2 = object()
self.assertNotEqual(state1, state2)
def test___ne__same_value(self):
from google.cloud.bigtable.enums import Table as enum_table
from google.cloud.bigtable.table import ClusterState
READY = enum_table.ReplicationState.READY
state1 = ClusterState(READY)
state2 = ClusterState(READY)
comparison_val = state1 != state2
self.assertFalse(comparison_val)
def test___ne__(self):
from google.cloud.bigtable.enums import Table as enum_table
from google.cloud.bigtable.table import ClusterState
READY = enum_table.ReplicationState.READY
INITIALIZING = enum_table.ReplicationState.INITIALIZING
state1 = ClusterState(READY)
state2 = ClusterState(INITIALIZING)
self.assertNotEqual(state1, state2)
def test__repr__(self):
from google.cloud.bigtable.enums import Table as enum_table
from google.cloud.bigtable.table import ClusterState
STATE_NOT_KNOWN = enum_table.ReplicationState.STATE_NOT_KNOWN
INITIALIZING = enum_table.ReplicationState.INITIALIZING
PLANNED_MAINTENANCE = enum_table.ReplicationState.PLANNED_MAINTENANCE
UNPLANNED_MAINTENANCE = enum_table.ReplicationState.UNPLANNED_MAINTENANCE
READY = enum_table.ReplicationState.READY
replication_dict = {
STATE_NOT_KNOWN: "STATE_NOT_KNOWN",
INITIALIZING: "INITIALIZING",
PLANNED_MAINTENANCE: "PLANNED_MAINTENANCE",
UNPLANNED_MAINTENANCE: "UNPLANNED_MAINTENANCE",
READY: "READY",
}
self.assertEqual(
str(ClusterState(STATE_NOT_KNOWN)), replication_dict[STATE_NOT_KNOWN]
)
self.assertEqual(
str(ClusterState(INITIALIZING)), replication_dict[INITIALIZING]
)
self.assertEqual(
str(ClusterState(PLANNED_MAINTENANCE)),
replication_dict[PLANNED_MAINTENANCE],
)
self.assertEqual(
str(ClusterState(UNPLANNED_MAINTENANCE)),
replication_dict[UNPLANNED_MAINTENANCE],
)
self.assertEqual(str(ClusterState(READY)), replication_dict[READY])
self.assertEqual(
ClusterState(STATE_NOT_KNOWN).replication_state, STATE_NOT_KNOWN
)
self.assertEqual(ClusterState(INITIALIZING).replication_state, INITIALIZING)
self.assertEqual(
ClusterState(PLANNED_MAINTENANCE).replication_state, PLANNED_MAINTENANCE
)
self.assertEqual(
ClusterState(UNPLANNED_MAINTENANCE).replication_state, UNPLANNED_MAINTENANCE
)
self.assertEqual(ClusterState(READY).replication_state, READY)
def _ReadRowsResponseCellChunkPB(*args, **kw):
from google.cloud.bigtable_v2.proto import bigtable_pb2 as messages_v2_pb2
family_name = kw.pop("family_name")
qualifier = kw.pop("qualifier")
message = messages_v2_pb2.ReadRowsResponse.CellChunk(*args, **kw)
message.family_name.value = family_name
message.qualifier.value = qualifier
return message
def _ReadRowsResponsePB(*args, **kw):
from google.cloud.bigtable_v2.proto import bigtable_pb2 as messages_v2_pb2
return messages_v2_pb2.ReadRowsResponse(*args, **kw)
def _mutate_rows_request_pb(*args, **kw):
from google.cloud.bigtable_v2.proto import bigtable_pb2 as data_messages_v2_pb2
return data_messages_v2_pb2.MutateRowsRequest(*args, **kw)
class _MockReadRowsIterator(object):
def __init__(self, *values):
self.iter_values = iter(values)
def next(self):
return next(self.iter_values)
__next__ = next
class _MockFailureIterator_1(object):
def next(self):
raise DeadlineExceeded("Failed to read from server")
__next__ = next
class _MockFailureIterator_2(object):
def __init__(self, *values):
self.iter_values = values[0]
self.calls = 0
def next(self):
self.calls += 1
if self.calls == 1:
return self.iter_values[0]
else:
raise DeadlineExceeded("Failed to read from server")
__next__ = next
class _ReadRowsResponseV2(object):
def __init__(self, chunks, last_scanned_row_key=""):
self.chunks = chunks
self.last_scanned_row_key = last_scanned_row_key
def _TablePB(*args, **kw):
from google.cloud.bigtable_admin_v2.proto import table_pb2 as table_v2_pb2
return table_v2_pb2.Table(*args, **kw)
def _ColumnFamilyPB(*args, **kw):
from google.cloud.bigtable_admin_v2.proto import table_pb2 as table_v2_pb2
return table_v2_pb2.ColumnFamily(*args, **kw)
def _ClusterStatePB(replication_state):
from google.cloud.bigtable_admin_v2.proto import table_pb2 as table_v2_pb2
return table_v2_pb2.Table.ClusterState(replication_state=replication_state)
def _read_rows_retry_exception(exc):
return isinstance(exc, DeadlineExceeded)
| 38.271422 | 88 | 0.671416 |
import unittest
import mock
from ._testing import _make_credentials
from google.api_core.exceptions import DeadlineExceeded
class Test___mutate_rows_request(unittest.TestCase):
def _call_fut(self, table_name, rows):
from google.cloud.bigtable.table import _mutate_rows_request
return _mutate_rows_request(table_name, rows)
@mock.patch("google.cloud.bigtable.table._MAX_BULK_MUTATIONS", new=3)
def test__mutate_rows_too_many_mutations(self):
from google.cloud.bigtable.row import DirectRow
from google.cloud.bigtable.table import TooManyMutationsError
table = mock.Mock(name="table", spec=["name"])
table.name = "table"
rows = [
DirectRow(row_key=b"row_key", table=table),
DirectRow(row_key=b"row_key_2", table=table),
]
rows[0].set_cell("cf1", b"c1", 1)
rows[0].set_cell("cf1", b"c1", 2)
rows[1].set_cell("cf1", b"c1", 3)
rows[1].set_cell("cf1", b"c1", 4)
with self.assertRaises(TooManyMutationsError):
self._call_fut("table", rows)
def test__mutate_rows_request(self):
from google.cloud.bigtable.row import DirectRow
table = mock.Mock(name="table", spec=["name"])
table.name = "table"
rows = [
DirectRow(row_key=b"row_key", table=table),
DirectRow(row_key=b"row_key_2"),
]
rows[0].set_cell("cf1", b"c1", b"1")
rows[1].set_cell("cf1", b"c1", b"2")
result = self._call_fut("table", rows)
expected_result = _mutate_rows_request_pb(table_name="table")
entry1 = expected_result.entries.add()
entry1.row_key = b"row_key"
mutations1 = entry1.mutations.add()
mutations1.set_cell.family_name = "cf1"
mutations1.set_cell.column_qualifier = b"c1"
mutations1.set_cell.timestamp_micros = -1
mutations1.set_cell.value = b"1"
entry2 = expected_result.entries.add()
entry2.row_key = b"row_key_2"
mutations2 = entry2.mutations.add()
mutations2.set_cell.family_name = "cf1"
mutations2.set_cell.column_qualifier = b"c1"
mutations2.set_cell.timestamp_micros = -1
mutations2.set_cell.value = b"2"
self.assertEqual(result, expected_result)
class Test__check_row_table_name(unittest.TestCase):
def _call_fut(self, table_name, row):
from google.cloud.bigtable.table import _check_row_table_name
return _check_row_table_name(table_name, row)
def test_wrong_table_name(self):
from google.cloud.bigtable.table import TableMismatchError
from google.cloud.bigtable.row import DirectRow
table = mock.Mock(name="table", spec=["name"])
table.name = "table"
row = DirectRow(row_key=b"row_key", table=table)
with self.assertRaises(TableMismatchError):
self._call_fut("other_table", row)
def test_right_table_name(self):
from google.cloud.bigtable.row import DirectRow
table = mock.Mock(name="table", spec=["name"])
table.name = "table"
row = DirectRow(row_key=b"row_key", table=table)
result = self._call_fut("table", row)
self.assertFalse(result)
class Test__check_row_type(unittest.TestCase):
def _call_fut(self, row):
from google.cloud.bigtable.table import _check_row_type
return _check_row_type(row)
def test_test_wrong_row_type(self):
from google.cloud.bigtable.row import ConditionalRow
row = ConditionalRow(row_key=b"row_key", table="table", filter_=None)
with self.assertRaises(TypeError):
self._call_fut(row)
def test_right_row_type(self):
from google.cloud.bigtable.row import DirectRow
row = DirectRow(row_key=b"row_key", table="table")
result = self._call_fut(row)
self.assertFalse(result)
class TestTable(unittest.TestCase):
PROJECT_ID = "project-id"
INSTANCE_ID = "instance-id"
INSTANCE_NAME = "projects/" + PROJECT_ID + "/instances/" + INSTANCE_ID
CLUSTER_ID = "cluster-id"
CLUSTER_NAME = INSTANCE_NAME + "/clusters/" + CLUSTER_ID
TABLE_ID = "table-id"
TABLE_NAME = INSTANCE_NAME + "/tables/" + TABLE_ID
BACKUP_ID = "backup-id"
BACKUP_NAME = CLUSTER_NAME + "/backups/" + BACKUP_ID
ROW_KEY = b"row-key"
ROW_KEY_1 = b"row-key-1"
ROW_KEY_2 = b"row-key-2"
ROW_KEY_3 = b"row-key-3"
FAMILY_NAME = u"family"
QUALIFIER = b"qualifier"
TIMESTAMP_MICROS = 100
VALUE = b"value"
_json_tests = None
@staticmethod
def _get_target_class():
from google.cloud.bigtable.table import Table
return Table
def _make_one(self, *args, **kwargs):
return self._get_target_class()(*args, **kwargs)
@staticmethod
def _get_target_client_class():
from google.cloud.bigtable.client import Client
return Client
def _make_client(self, *args, **kwargs):
return self._get_target_client_class()(*args, **kwargs)
def test_constructor_w_admin(self):
credentials = _make_credentials()
client = self._make_client(
project=self.PROJECT_ID, credentials=credentials, admin=True
)
instance = client.instance(instance_id=self.INSTANCE_ID)
table = self._make_one(self.TABLE_ID, instance)
self.assertEqual(table.table_id, self.TABLE_ID)
self.assertIs(table._instance._client, client)
self.assertEqual(table.name, self.TABLE_NAME)
def test_constructor_wo_admin(self):
credentials = _make_credentials()
client = self._make_client(
project=self.PROJECT_ID, credentials=credentials, admin=False
)
instance = client.instance(instance_id=self.INSTANCE_ID)
table = self._make_one(self.TABLE_ID, instance)
self.assertEqual(table.table_id, self.TABLE_ID)
self.assertIs(table._instance._client, client)
self.assertEqual(table.name, self.TABLE_NAME)
def _row_methods_helper(self):
client = self._make_client(
project="project-id", credentials=_make_credentials(), admin=True
)
instance = client.instance(instance_id=self.INSTANCE_ID)
table = self._make_one(self.TABLE_ID, instance)
row_key = b"row_key"
return table, row_key
def test_row_factory_direct(self):
from google.cloud.bigtable.row import DirectRow
table, row_key = self._row_methods_helper()
row = table.row(row_key)
self.assertIsInstance(row, DirectRow)
self.assertEqual(row._row_key, row_key)
self.assertEqual(row._table, table)
def test_row_factory_conditional(self):
from google.cloud.bigtable.row import ConditionalRow
table, row_key = self._row_methods_helper()
filter_ = object()
row = table.row(row_key, filter_=filter_)
self.assertIsInstance(row, ConditionalRow)
self.assertEqual(row._row_key, row_key)
self.assertEqual(row._table, table)
def test_row_factory_append(self):
from google.cloud.bigtable.row import AppendRow
table, row_key = self._row_methods_helper()
row = table.row(row_key, append=True)
self.assertIsInstance(row, AppendRow)
self.assertEqual(row._row_key, row_key)
self.assertEqual(row._table, table)
def test_direct_row(self):
from google.cloud.bigtable.row import DirectRow
table, row_key = self._row_methods_helper()
row = table.direct_row(row_key)
self.assertIsInstance(row, DirectRow)
self.assertEqual(row._row_key, row_key)
self.assertEqual(row._table, table)
def test_conditional_row(self):
from google.cloud.bigtable.row import ConditionalRow
table, row_key = self._row_methods_helper()
filter_ = object()
row = table.conditional_row(row_key, filter_=filter_)
self.assertIsInstance(row, ConditionalRow)
self.assertEqual(row._row_key, row_key)
self.assertEqual(row._table, table)
def test_append_row(self):
from google.cloud.bigtable.row import AppendRow
table, row_key = self._row_methods_helper()
row = table.append_row(row_key)
self.assertIsInstance(row, AppendRow)
self.assertEqual(row._row_key, row_key)
self.assertEqual(row._table, table)
def test_row_factory_failure(self):
table, row_key = self._row_methods_helper()
with self.assertRaises(ValueError):
table.row(row_key, filter_=object(), append=True)
def test___eq__(self):
credentials = _make_credentials()
client = self._make_client(
project="project-id", credentials=credentials, admin=True
)
instance = client.instance(instance_id=self.INSTANCE_ID)
table1 = self._make_one(self.TABLE_ID, instance)
table2 = self._make_one(self.TABLE_ID, instance)
self.assertEqual(table1, table2)
def test___eq__type_differ(self):
credentials = _make_credentials()
client = self._make_client(
project="project-id", credentials=credentials, admin=True
)
instance = client.instance(instance_id=self.INSTANCE_ID)
table1 = self._make_one(self.TABLE_ID, instance)
table2 = object()
self.assertNotEqual(table1, table2)
def test___ne__same_value(self):
credentials = _make_credentials()
client = self._make_client(
project="project-id", credentials=credentials, admin=True
)
instance = client.instance(instance_id=self.INSTANCE_ID)
table1 = self._make_one(self.TABLE_ID, instance)
table2 = self._make_one(self.TABLE_ID, instance)
comparison_val = table1 != table2
self.assertFalse(comparison_val)
def test___ne__(self):
table1 = self._make_one("table_id1", None)
table2 = self._make_one("table_id2", None)
self.assertNotEqual(table1, table2)
def _create_test_helper(self, split_keys=[], column_families={}):
from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client
from google.cloud.bigtable_admin_v2.proto import table_pb2
from google.cloud.bigtable_admin_v2.proto import (
bigtable_table_admin_pb2 as table_admin_messages_v2_pb2,
)
from google.cloud.bigtable.column_family import ColumnFamily
table_api = mock.create_autospec(
bigtable_table_admin_client.BigtableTableAdminClient
)
credentials = _make_credentials()
client = self._make_client(
project="project-id", credentials=credentials, admin=True
)
instance = client.instance(instance_id=self.INSTANCE_ID)
table = self._make_one(self.TABLE_ID, instance)
client._table_admin_client = table_api
table.create(column_families=column_families, initial_split_keys=split_keys)
families = {
id: ColumnFamily(id, self, rule).to_pb()
for (id, rule) in column_families.items()
}
split = table_admin_messages_v2_pb2.CreateTableRequest.Split
splits = [split(key=split_key) for split_key in split_keys]
table_api.create_table.assert_called_once_with(
parent=self.INSTANCE_NAME,
table=table_pb2.Table(column_families=families),
table_id=self.TABLE_ID,
initial_splits=splits,
)
def test_create(self):
self._create_test_helper()
def test_create_with_families(self):
from google.cloud.bigtable.column_family import MaxVersionsGCRule
families = {"family": MaxVersionsGCRule(5)}
self._create_test_helper(column_families=families)
def test_create_with_split_keys(self):
self._create_test_helper(split_keys=[b"split1", b"split2", b"split3"])
def test_exists(self):
from google.cloud.bigtable_admin_v2.proto import table_pb2 as table_data_v2_pb2
from google.cloud.bigtable_admin_v2.proto import (
bigtable_table_admin_pb2 as table_messages_v1_pb2,
)
from google.cloud.bigtable_admin_v2.gapic import (
bigtable_instance_admin_client,
bigtable_table_admin_client,
)
from google.api_core.exceptions import NotFound
from google.api_core.exceptions import BadRequest
table_api = bigtable_table_admin_client.BigtableTableAdminClient(mock.Mock())
instance_api = bigtable_instance_admin_client.BigtableInstanceAdminClient(
mock.Mock()
)
credentials = _make_credentials()
client = self._make_client(
project="project-id", credentials=credentials, admin=True
)
instance = client.instance(instance_id=self.INSTANCE_ID)
response_pb = table_messages_v1_pb2.ListTablesResponse(
tables=[table_data_v2_pb2.Table(name=self.TABLE_NAME)]
)
client._table_admin_client = table_api
client._instance_admin_client = instance_api
bigtable_table_stub = client._table_admin_client.transport
bigtable_table_stub.get_table.side_effect = [
response_pb,
NotFound("testing"),
BadRequest("testing"),
]
table1 = instance.table(self.TABLE_ID)
table2 = instance.table("table-id2")
result = table1.exists()
self.assertEqual(True, result)
result = table2.exists()
self.assertEqual(False, result)
with self.assertRaises(BadRequest):
table2.exists()
def test_delete(self):
from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client
table_api = mock.create_autospec(
bigtable_table_admin_client.BigtableTableAdminClient
)
credentials = _make_credentials()
client = self._make_client(
project="project-id", credentials=credentials, admin=True
)
instance = client.instance(instance_id=self.INSTANCE_ID)
table = self._make_one(self.TABLE_ID, instance)
client._table_admin_client = table_api
expected_result = None
result = table.delete()
self.assertEqual(result, expected_result)
def _list_column_families_helper(self):
from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client
table_api = bigtable_table_admin_client.BigtableTableAdminClient(mock.Mock())
credentials = _make_credentials()
client = self._make_client(
project="project-id", credentials=credentials, admin=True
)
instance = client.instance(instance_id=self.INSTANCE_ID)
table = self._make_one(self.TABLE_ID, instance)
COLUMN_FAMILY_ID = "foo"
column_family = _ColumnFamilyPB()
response_pb = _TablePB(column_families={COLUMN_FAMILY_ID: column_family})
client._table_admin_client = table_api
bigtable_table_stub = client._table_admin_client.transport
bigtable_table_stub.get_table.side_effect = [response_pb]
expected_result = {COLUMN_FAMILY_ID: table.column_family(COLUMN_FAMILY_ID)}
result = table.list_column_families()
self.assertEqual(result, expected_result)
def test_list_column_families(self):
self._list_column_families_helper()
def test_get_cluster_states(self):
from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client
from google.cloud.bigtable.enums import Table as enum_table
from google.cloud.bigtable.table import ClusterState
INITIALIZING = enum_table.ReplicationState.INITIALIZING
PLANNED_MAINTENANCE = enum_table.ReplicationState.PLANNED_MAINTENANCE
READY = enum_table.ReplicationState.READY
table_api = bigtable_table_admin_client.BigtableTableAdminClient(mock.Mock())
credentials = _make_credentials()
client = self._make_client(
project="project-id", credentials=credentials, admin=True
)
instance = client.instance(instance_id=self.INSTANCE_ID)
table = self._make_one(self.TABLE_ID, instance)
response_pb = _TablePB(
cluster_states={
"cluster-id1": _ClusterStatePB(INITIALIZING),
"cluster-id2": _ClusterStatePB(PLANNED_MAINTENANCE),
"cluster-id3": _ClusterStatePB(READY),
}
)
client._table_admin_client = table_api
bigtable_table_stub = client._table_admin_client.transport
bigtable_table_stub.get_table.side_effect = [response_pb]
expected_result = {
u"cluster-id1": ClusterState(INITIALIZING),
u"cluster-id2": ClusterState(PLANNED_MAINTENANCE),
u"cluster-id3": ClusterState(READY),
}
result = table.get_cluster_states()
self.assertEqual(result, expected_result)
def _read_row_helper(self, chunks, expected_result, app_profile_id=None):
from google.cloud._testing import _Monkey
from google.cloud.bigtable import table as MUT
from google.cloud.bigtable.row_set import RowSet
from google.cloud.bigtable_v2.gapic import bigtable_client
from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client
from google.cloud.bigtable.row_filters import RowSampleFilter
data_api = bigtable_client.BigtableClient(mock.Mock())
table_api = mock.create_autospec(
bigtable_table_admin_client.BigtableTableAdminClient
)
credentials = _make_credentials()
client = self._make_client(
project="project-id", credentials=credentials, admin=True
)
instance = client.instance(instance_id=self.INSTANCE_ID)
table = self._make_one(self.TABLE_ID, instance, app_profile_id=app_profile_id)
request_pb = object()
mock_created = []
def mock_create_row_request(table_name, **kwargs):
mock_created.append((table_name, kwargs))
return request_pb
if chunks is None:
response_iterator = iter(())
else:
response_pb = _ReadRowsResponsePB(chunks=chunks)
response_iterator = iter([response_pb])
client._table_data_client = data_api
client._table_admin_client = table_api
client._table_data_client.transport.read_rows = mock.Mock(
side_effect=[response_iterator]
)
filter_obj = RowSampleFilter(0.33)
result = None
with _Monkey(MUT, _create_row_request=mock_create_row_request):
result = table.read_row(self.ROW_KEY, filter_=filter_obj)
row_set = RowSet()
row_set.add_row_key(self.ROW_KEY)
expected_request = [
(
table.name,
{
"end_inclusive": False,
"row_set": row_set,
"app_profile_id": app_profile_id,
"end_key": None,
"limit": None,
"start_key": None,
"filter_": filter_obj,
},
)
]
self.assertEqual(result, expected_result)
self.assertEqual(mock_created, expected_request)
def test_read_row_miss_no__responses(self):
self._read_row_helper(None, None)
def test_read_row_miss_no_chunks_in_response(self):
chunks = []
self._read_row_helper(chunks, None)
def test_read_row_complete(self):
from google.cloud.bigtable.row_data import Cell
from google.cloud.bigtable.row_data import PartialRowData
app_profile_id = "app-profile-id"
chunk = _ReadRowsResponseCellChunkPB(
row_key=self.ROW_KEY,
family_name=self.FAMILY_NAME,
qualifier=self.QUALIFIER,
timestamp_micros=self.TIMESTAMP_MICROS,
value=self.VALUE,
commit_row=True,
)
chunks = [chunk]
expected_result = PartialRowData(row_key=self.ROW_KEY)
family = expected_result._cells.setdefault(self.FAMILY_NAME, {})
column = family.setdefault(self.QUALIFIER, [])
column.append(Cell.from_pb(chunk))
self._read_row_helper(chunks, expected_result, app_profile_id)
def test_read_row_more_than_one_row_returned(self):
app_profile_id = "app-profile-id"
chunk_1 = _ReadRowsResponseCellChunkPB(
row_key=self.ROW_KEY,
family_name=self.FAMILY_NAME,
qualifier=self.QUALIFIER,
timestamp_micros=self.TIMESTAMP_MICROS,
value=self.VALUE,
commit_row=True,
)
chunk_2 = _ReadRowsResponseCellChunkPB(
row_key=self.ROW_KEY_2,
family_name=self.FAMILY_NAME,
qualifier=self.QUALIFIER,
timestamp_micros=self.TIMESTAMP_MICROS,
value=self.VALUE,
commit_row=True,
)
chunks = [chunk_1, chunk_2]
with self.assertRaises(ValueError):
self._read_row_helper(chunks, None, app_profile_id)
def test_read_row_still_partial(self):
chunk = _ReadRowsResponseCellChunkPB(
row_key=self.ROW_KEY,
family_name=self.FAMILY_NAME,
qualifier=self.QUALIFIER,
timestamp_micros=self.TIMESTAMP_MICROS,
value=self.VALUE,
)
chunks = [chunk]
with self.assertRaises(ValueError):
self._read_row_helper(chunks, None)
def test_mutate_rows(self):
from google.rpc.status_pb2 import Status
from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client
table_api = mock.create_autospec(
bigtable_table_admin_client.BigtableTableAdminClient
)
credentials = _make_credentials()
client = self._make_client(
project="project-id", credentials=credentials, admin=True
)
instance = client.instance(instance_id=self.INSTANCE_ID)
client._table_admin_client = table_api
table = self._make_one(self.TABLE_ID, instance)
response = [Status(code=0), Status(code=1)]
mock_worker = mock.Mock(return_value=response)
with mock.patch(
"google.cloud.bigtable.table._RetryableMutateRowsWorker",
new=mock.MagicMock(return_value=mock_worker),
):
statuses = table.mutate_rows([mock.MagicMock(), mock.MagicMock()])
result = [status.code for status in statuses]
expected_result = [0, 1]
self.assertEqual(result, expected_result)
def test_read_rows(self):
from google.cloud._testing import _Monkey
from google.cloud.bigtable.row_data import PartialRowsData
from google.cloud.bigtable import table as MUT
from google.cloud.bigtable_v2.gapic import bigtable_client
from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client
data_api = bigtable_client.BigtableClient(mock.Mock())
table_api = mock.create_autospec(
bigtable_table_admin_client.BigtableTableAdminClient
)
credentials = _make_credentials()
client = self._make_client(
project="project-id", credentials=credentials, admin=True
)
client._table_data_client = data_api
client._table_admin_client = table_api
instance = client.instance(instance_id=self.INSTANCE_ID)
app_profile_id = "app-profile-id"
table = self._make_one(self.TABLE_ID, instance, app_profile_id=app_profile_id)
request = retry = object()
mock_created = []
def mock_create_row_request(table_name, **kwargs):
mock_created.append((table_name, kwargs))
return request
expected_result = PartialRowsData(
client._table_data_client.transport.read_rows, request, retry
)
start_key = b"start-key"
end_key = b"end-key"
filter_obj = object()
limit = 22
with _Monkey(MUT, _create_row_request=mock_create_row_request):
result = table.read_rows(
start_key=start_key,
end_key=end_key,
filter_=filter_obj,
limit=limit,
retry=retry,
)
self.assertEqual(result.rows, expected_result.rows)
self.assertEqual(result.retry, expected_result.retry)
created_kwargs = {
"start_key": start_key,
"end_key": end_key,
"filter_": filter_obj,
"limit": limit,
"end_inclusive": False,
"app_profile_id": app_profile_id,
"row_set": None,
}
self.assertEqual(mock_created, [(table.name, created_kwargs)])
def test_read_retry_rows(self):
from google.cloud.bigtable_v2.gapic import bigtable_client
from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client
from google.api_core import retry
data_api = bigtable_client.BigtableClient(mock.Mock())
table_api = bigtable_table_admin_client.BigtableTableAdminClient(mock.Mock())
credentials = _make_credentials()
client = self._make_client(
project="project-id", credentials=credentials, admin=True
)
client._table_data_client = data_api
client._table_admin_client = table_api
instance = client.instance(instance_id=self.INSTANCE_ID)
table = self._make_one(self.TABLE_ID, instance)
retry_read_rows = retry.Retry(predicate=_read_rows_retry_exception)
chunk_1 = _ReadRowsResponseCellChunkPB(
row_key=self.ROW_KEY_1,
family_name=self.FAMILY_NAME,
qualifier=self.QUALIFIER,
timestamp_micros=self.TIMESTAMP_MICROS,
value=self.VALUE,
commit_row=True,
)
chunk_2 = _ReadRowsResponseCellChunkPB(
row_key=self.ROW_KEY_2,
family_name=self.FAMILY_NAME,
qualifier=self.QUALIFIER,
timestamp_micros=self.TIMESTAMP_MICROS,
value=self.VALUE,
commit_row=True,
)
response_1 = _ReadRowsResponseV2([chunk_1])
response_2 = _ReadRowsResponseV2([chunk_2])
response_failure_iterator_1 = _MockFailureIterator_1()
response_failure_iterator_2 = _MockFailureIterator_2([response_1])
response_iterator = _MockReadRowsIterator(response_2)
client._table_data_client.transport.read_rows = mock.Mock(
side_effect=[
response_failure_iterator_1,
response_failure_iterator_2,
response_iterator,
]
)
rows = []
for row in table.read_rows(
start_key=self.ROW_KEY_1, end_key=self.ROW_KEY_2, retry=retry_read_rows
):
rows.append(row)
result = rows[1]
self.assertEqual(result.row_key, self.ROW_KEY_2)
def test_yield_retry_rows(self):
from google.cloud.bigtable_v2.gapic import bigtable_client
from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client
import warnings
data_api = bigtable_client.BigtableClient(mock.Mock())
table_api = bigtable_table_admin_client.BigtableTableAdminClient(mock.Mock())
credentials = _make_credentials()
client = self._make_client(
project="project-id", credentials=credentials, admin=True
)
client._table_data_client = data_api
client._table_admin_client = table_api
instance = client.instance(instance_id=self.INSTANCE_ID)
table = self._make_one(self.TABLE_ID, instance)
chunk_1 = _ReadRowsResponseCellChunkPB(
row_key=self.ROW_KEY_1,
family_name=self.FAMILY_NAME,
qualifier=self.QUALIFIER,
timestamp_micros=self.TIMESTAMP_MICROS,
value=self.VALUE,
commit_row=True,
)
chunk_2 = _ReadRowsResponseCellChunkPB(
row_key=self.ROW_KEY_2,
family_name=self.FAMILY_NAME,
qualifier=self.QUALIFIER,
timestamp_micros=self.TIMESTAMP_MICROS,
value=self.VALUE,
commit_row=True,
)
response_1 = _ReadRowsResponseV2([chunk_1])
response_2 = _ReadRowsResponseV2([chunk_2])
response_failure_iterator_1 = _MockFailureIterator_1()
response_failure_iterator_2 = _MockFailureIterator_2([response_1])
response_iterator = _MockReadRowsIterator(response_2)
client._table_data_client.transport.read_rows = mock.Mock(
side_effect=[
response_failure_iterator_1,
response_failure_iterator_2,
response_iterator,
]
)
rows = []
with warnings.catch_warnings(record=True) as warned:
for row in table.yield_rows(
start_key=self.ROW_KEY_1, end_key=self.ROW_KEY_2
):
rows.append(row)
self.assertEqual(len(warned), 1)
self.assertIs(warned[0].category, DeprecationWarning)
result = rows[1]
self.assertEqual(result.row_key, self.ROW_KEY_2)
def test_yield_rows_with_row_set(self):
from google.cloud.bigtable_v2.gapic import bigtable_client
from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client
from google.cloud.bigtable.row_set import RowSet
from google.cloud.bigtable.row_set import RowRange
import warnings
data_api = bigtable_client.BigtableClient(mock.Mock())
table_api = bigtable_table_admin_client.BigtableTableAdminClient(mock.Mock())
credentials = _make_credentials()
client = self._make_client(
project="project-id", credentials=credentials, admin=True
)
client._table_data_client = data_api
client._table_admin_client = table_api
instance = client.instance(instance_id=self.INSTANCE_ID)
table = self._make_one(self.TABLE_ID, instance)
chunk_1 = _ReadRowsResponseCellChunkPB(
row_key=self.ROW_KEY_1,
family_name=self.FAMILY_NAME,
qualifier=self.QUALIFIER,
timestamp_micros=self.TIMESTAMP_MICROS,
value=self.VALUE,
commit_row=True,
)
chunk_2 = _ReadRowsResponseCellChunkPB(
row_key=self.ROW_KEY_2,
family_name=self.FAMILY_NAME,
qualifier=self.QUALIFIER,
timestamp_micros=self.TIMESTAMP_MICROS,
value=self.VALUE,
commit_row=True,
)
chunk_3 = _ReadRowsResponseCellChunkPB(
row_key=self.ROW_KEY_3,
family_name=self.FAMILY_NAME,
qualifier=self.QUALIFIER,
timestamp_micros=self.TIMESTAMP_MICROS,
value=self.VALUE,
commit_row=True,
)
response_1 = _ReadRowsResponseV2([chunk_1])
response_2 = _ReadRowsResponseV2([chunk_2])
response_3 = _ReadRowsResponseV2([chunk_3])
response_iterator = _MockReadRowsIterator(response_1, response_2, response_3)
client._table_data_client.transport.read_rows = mock.Mock(
side_effect=[response_iterator]
)
rows = []
row_set = RowSet()
row_set.add_row_range(
RowRange(start_key=self.ROW_KEY_1, end_key=self.ROW_KEY_2)
)
row_set.add_row_key(self.ROW_KEY_3)
with warnings.catch_warnings(record=True) as warned:
for row in table.yield_rows(row_set=row_set):
rows.append(row)
self.assertEqual(len(warned), 1)
self.assertIs(warned[0].category, DeprecationWarning)
self.assertEqual(rows[0].row_key, self.ROW_KEY_1)
self.assertEqual(rows[1].row_key, self.ROW_KEY_2)
self.assertEqual(rows[2].row_key, self.ROW_KEY_3)
def test_sample_row_keys(self):
from google.cloud.bigtable_v2.gapic import bigtable_client
from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client
data_api = bigtable_client.BigtableClient(mock.Mock())
table_api = bigtable_table_admin_client.BigtableTableAdminClient(mock.Mock())
credentials = _make_credentials()
client = self._make_client(
project="project-id", credentials=credentials, admin=True
)
client._table_data_client = data_api
client._table_admin_client = table_api
instance = client.instance(instance_id=self.INSTANCE_ID)
table = self._make_one(self.TABLE_ID, instance)
response_iterator = object()
inner_api_calls = client._table_data_client._inner_api_calls
inner_api_calls["sample_row_keys"] = mock.Mock(
side_effect=[[response_iterator]]
)
expected_result = response_iterator
result = table.sample_row_keys()
self.assertEqual(result[0], expected_result)
def test_truncate(self):
from google.cloud.bigtable_v2.gapic import bigtable_client
from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client
data_api = mock.create_autospec(bigtable_client.BigtableClient)
table_api = mock.create_autospec(
bigtable_table_admin_client.BigtableTableAdminClient
)
credentials = _make_credentials()
client = self._make_client(
project="project-id", credentials=credentials, admin=True
)
client._table_data_client = data_api
client._table_admin_client = table_api
instance = client.instance(instance_id=self.INSTANCE_ID)
table = self._make_one(self.TABLE_ID, instance)
expected_result = None
with mock.patch("google.cloud.bigtable.table.Table.name", new=self.TABLE_NAME):
result = table.truncate()
table_api.drop_row_range.assert_called_once_with(
name=self.TABLE_NAME, delete_all_data_from_table=True
)
self.assertEqual(result, expected_result)
def test_truncate_w_timeout(self):
from google.cloud.bigtable_v2.gapic import bigtable_client
from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client
data_api = mock.create_autospec(bigtable_client.BigtableClient)
table_api = mock.create_autospec(
bigtable_table_admin_client.BigtableTableAdminClient
)
credentials = _make_credentials()
client = self._make_client(
project="project-id", credentials=credentials, admin=True
)
client._table_data_client = data_api
client._table_admin_client = table_api
instance = client.instance(instance_id=self.INSTANCE_ID)
table = self._make_one(self.TABLE_ID, instance)
expected_result = None
timeout = 120
result = table.truncate(timeout=timeout)
self.assertEqual(result, expected_result)
def test_drop_by_prefix(self):
from google.cloud.bigtable_v2.gapic import bigtable_client
from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client
data_api = mock.create_autospec(bigtable_client.BigtableClient)
table_api = mock.create_autospec(
bigtable_table_admin_client.BigtableTableAdminClient
)
credentials = _make_credentials()
client = self._make_client(
project="project-id", credentials=credentials, admin=True
)
client._table_data_client = data_api
client._table_admin_client = table_api
instance = client.instance(instance_id=self.INSTANCE_ID)
table = self._make_one(self.TABLE_ID, instance)
expected_result = None
row_key_prefix = "row-key-prefix"
result = table.drop_by_prefix(row_key_prefix=row_key_prefix)
self.assertEqual(result, expected_result)
def test_drop_by_prefix_w_timeout(self):
from google.cloud.bigtable_v2.gapic import bigtable_client
from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client
data_api = mock.create_autospec(bigtable_client.BigtableClient)
table_api = mock.create_autospec(
bigtable_table_admin_client.BigtableTableAdminClient
)
credentials = _make_credentials()
client = self._make_client(
project="project-id", credentials=credentials, admin=True
)
client._table_data_client = data_api
client._table_admin_client = table_api
instance = client.instance(instance_id=self.INSTANCE_ID)
table = self._make_one(self.TABLE_ID, instance)
expected_result = None
row_key_prefix = "row-key-prefix"
timeout = 120
result = table.drop_by_prefix(row_key_prefix=row_key_prefix, timeout=timeout)
self.assertEqual(result, expected_result)
def test_mutations_batcher_factory(self):
flush_count = 100
max_row_bytes = 1000
table = self._make_one(self.TABLE_ID, None)
mutation_batcher = table.mutations_batcher(
flush_count=flush_count, max_row_bytes=max_row_bytes
)
self.assertEqual(mutation_batcher.table.table_id, self.TABLE_ID)
self.assertEqual(mutation_batcher.flush_count, flush_count)
self.assertEqual(mutation_batcher.max_row_bytes, max_row_bytes)
def test_get_iam_policy(self):
from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client
from google.iam.v1 import policy_pb2
from google.cloud.bigtable.policy import BIGTABLE_ADMIN_ROLE
credentials = _make_credentials()
client = self._make_client(
project="project-id", credentials=credentials, admin=True
)
instance = client.instance(instance_id=self.INSTANCE_ID)
table = self._make_one(self.TABLE_ID, instance)
version = 1
etag = b"etag_v1"
members = ["serviceAccount:service_acc1@test.com", "user:user1@test.com"]
bindings = [{"role": BIGTABLE_ADMIN_ROLE, "members": members}]
iam_policy = policy_pb2.Policy(version=version, etag=etag, bindings=bindings)
table_api = mock.create_autospec(
bigtable_table_admin_client.BigtableTableAdminClient
)
client._table_admin_client = table_api
table_api.get_iam_policy.return_value = iam_policy
result = table.get_iam_policy()
table_api.get_iam_policy.assert_called_once_with(resource=table.name)
self.assertEqual(result.version, version)
self.assertEqual(result.etag, etag)
admins = result.bigtable_admins
self.assertEqual(len(admins), len(members))
for found, expected in zip(sorted(admins), sorted(members)):
self.assertEqual(found, expected)
def test_set_iam_policy(self):
from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client
from google.iam.v1 import policy_pb2
from google.cloud.bigtable.policy import Policy
from google.cloud.bigtable.policy import BIGTABLE_ADMIN_ROLE
credentials = _make_credentials()
client = self._make_client(
project="project-id", credentials=credentials, admin=True
)
instance = client.instance(instance_id=self.INSTANCE_ID)
table = self._make_one(self.TABLE_ID, instance)
version = 1
etag = b"etag_v1"
members = ["serviceAccount:service_acc1@test.com", "user:user1@test.com"]
bindings = [{"role": BIGTABLE_ADMIN_ROLE, "members": sorted(members)}]
iam_policy_pb = policy_pb2.Policy(version=version, etag=etag, bindings=bindings)
table_api = mock.create_autospec(
bigtable_table_admin_client.BigtableTableAdminClient
)
client._table_admin_client = table_api
table_api.set_iam_policy.return_value = iam_policy_pb
iam_policy = Policy(etag=etag, version=version)
iam_policy[BIGTABLE_ADMIN_ROLE] = [
Policy.user("user1@test.com"),
Policy.service_account("service_acc1@test.com"),
]
result = table.set_iam_policy(iam_policy)
table_api.set_iam_policy.assert_called_once_with(
resource=table.name, policy=iam_policy_pb
)
self.assertEqual(result.version, version)
self.assertEqual(result.etag, etag)
admins = result.bigtable_admins
self.assertEqual(len(admins), len(members))
for found, expected in zip(sorted(admins), sorted(members)):
self.assertEqual(found, expected)
def test_test_iam_permissions(self):
from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client
from google.iam.v1 import iam_policy_pb2
credentials = _make_credentials()
client = self._make_client(
project="project-id", credentials=credentials, admin=True
)
instance = client.instance(instance_id=self.INSTANCE_ID)
table = self._make_one(self.TABLE_ID, instance)
permissions = ["bigtable.tables.mutateRows", "bigtable.tables.readRows"]
response = iam_policy_pb2.TestIamPermissionsResponse(permissions=permissions)
table_api = mock.create_autospec(
bigtable_table_admin_client.BigtableTableAdminClient
)
table_api.test_iam_permissions.return_value = response
client._table_admin_client = table_api
result = table.test_iam_permissions(permissions)
self.assertEqual(result, permissions)
table_api.test_iam_permissions.assert_called_once_with(
resource=table.name, permissions=permissions
)
def test_backup_factory_defaults(self):
from google.cloud.bigtable.backup import Backup
instance = self._make_one(self.INSTANCE_ID, None)
table = self._make_one(self.TABLE_ID, instance)
backup = table.backup(self.BACKUP_ID)
self.assertIsInstance(backup, Backup)
self.assertEqual(backup.backup_id, self.BACKUP_ID)
self.assertIs(backup._instance, instance)
self.assertIsNone(backup._cluster)
self.assertEqual(backup.table_id, self.TABLE_ID)
self.assertIsNone(backup._expire_time)
self.assertIsNone(backup._parent)
self.assertIsNone(backup._source_table)
self.assertIsNone(backup._start_time)
self.assertIsNone(backup._end_time)
self.assertIsNone(backup._size_bytes)
self.assertIsNone(backup._state)
def test_backup_factory_non_defaults(self):
import datetime
from google.cloud._helpers import UTC
from google.cloud.bigtable.backup import Backup
instance = self._make_one(self.INSTANCE_ID, None)
table = self._make_one(self.TABLE_ID, instance)
timestamp = datetime.datetime.utcnow().replace(tzinfo=UTC)
backup = table.backup(
self.BACKUP_ID, cluster_id=self.CLUSTER_ID, expire_time=timestamp,
)
self.assertIsInstance(backup, Backup)
self.assertEqual(backup.backup_id, self.BACKUP_ID)
self.assertIs(backup._instance, instance)
self.assertEqual(backup.backup_id, self.BACKUP_ID)
self.assertIs(backup._cluster, self.CLUSTER_ID)
self.assertEqual(backup.table_id, self.TABLE_ID)
self.assertEqual(backup._expire_time, timestamp)
self.assertIsNone(backup._start_time)
self.assertIsNone(backup._end_time)
self.assertIsNone(backup._size_bytes)
self.assertIsNone(backup._state)
def _list_backups_helper(self, cluster_id=None, filter_=None, **kwargs):
from google.cloud.bigtable_admin_v2.gapic import (
bigtable_instance_admin_client,
bigtable_table_admin_client,
)
from google.cloud.bigtable_admin_v2.proto import (
bigtable_table_admin_pb2,
table_pb2,
)
from google.cloud.bigtable.backup import Backup
instance_api = bigtable_instance_admin_client.BigtableInstanceAdminClient
table_api = bigtable_table_admin_client.BigtableTableAdminClient(mock.Mock())
client = self._make_client(
project=self.PROJECT_ID, credentials=_make_credentials(), admin=True
)
instance = client.instance(instance_id=self.INSTANCE_ID)
table = self._make_one(self.TABLE_ID, instance)
client._instance_admin_client = instance_api
client._table_admin_client = table_api
parent = self.INSTANCE_NAME + "/clusters/cluster"
backups_pb = bigtable_table_admin_pb2.ListBackupsResponse(
backups=[
table_pb2.Backup(name=parent + "/backups/op1"),
table_pb2.Backup(name=parent + "/backups/op2"),
table_pb2.Backup(name=parent + "/backups/op3"),
]
)
api = table_api._inner_api_calls["list_backups"] = mock.Mock(
return_value=backups_pb
)
backups_filter = "source_table:{}".format(self.TABLE_NAME)
if filter_:
backups_filter = "({}) AND ({})".format(backups_filter, filter_)
backups = table.list_backups(cluster_id=cluster_id, filter_=filter_, **kwargs)
for backup in backups:
self.assertIsInstance(backup, Backup)
if not cluster_id:
cluster_id = "-"
parent = "{}/clusters/{}".format(self.INSTANCE_NAME, cluster_id)
expected_metadata = [
("x-goog-request-params", "parent={}".format(parent)),
]
api.assert_called_once_with(
bigtable_table_admin_pb2.ListBackupsRequest(
parent=parent, filter=backups_filter, **kwargs
),
retry=mock.ANY,
timeout=mock.ANY,
metadata=expected_metadata,
)
def test_list_backups_defaults(self):
self._list_backups_helper()
def test_list_backups_w_options(self):
self._list_backups_helper(
cluster_id="cluster", filter_="filter", order_by="order_by", page_size=10
)
def _restore_helper(self, backup_name=None):
from google.cloud.bigtable_admin_v2 import BigtableTableAdminClient
from google.cloud.bigtable_admin_v2.gapic import bigtable_instance_admin_client
from google.cloud.bigtable.instance import Instance
op_future = object()
instance_api = bigtable_instance_admin_client.BigtableInstanceAdminClient
client = mock.Mock(project=self.PROJECT_ID, instance_admin_client=instance_api)
instance = Instance(self.INSTANCE_ID, client=client)
table = self._make_one(self.TABLE_ID, instance)
api = client.table_admin_client = mock.create_autospec(
BigtableTableAdminClient, instance=True
)
api.restore_table.return_value = op_future
if backup_name:
future = table.restore(self.TABLE_ID, backup_name=self.BACKUP_NAME)
else:
future = table.restore(self.TABLE_ID, self.CLUSTER_ID, self.BACKUP_ID)
self.assertIs(future, op_future)
api.restore_table.assert_called_once_with(
parent=self.INSTANCE_NAME, table_id=self.TABLE_ID, backup=self.BACKUP_NAME,
)
def test_restore_table_w_backup_id(self):
self._restore_helper()
def test_restore_table_w_backup_name(self):
self._restore_helper(backup_name=self.BACKUP_NAME)
class Test__RetryableMutateRowsWorker(unittest.TestCase):
from grpc import StatusCode
PROJECT_ID = "project-id"
INSTANCE_ID = "instance-id"
INSTANCE_NAME = "projects/" + PROJECT_ID + "/instances/" + INSTANCE_ID
TABLE_ID = "table-id"
SUCCESS = StatusCode.OK.value[0]
RETRYABLE_1 = StatusCode.DEADLINE_EXCEEDED.value[0]
RETRYABLE_2 = StatusCode.ABORTED.value[0]
NON_RETRYABLE = StatusCode.CANCELLED.value[0]
@staticmethod
def _get_target_class_for_worker():
from google.cloud.bigtable.table import _RetryableMutateRowsWorker
return _RetryableMutateRowsWorker
def _make_worker(self, *args, **kwargs):
return self._get_target_class_for_worker()(*args, **kwargs)
@staticmethod
def _get_target_class_for_table():
from google.cloud.bigtable.table import Table
return Table
def _make_table(self, *args, **kwargs):
return self._get_target_class_for_table()(*args, **kwargs)
@staticmethod
def _get_target_client_class():
from google.cloud.bigtable.client import Client
return Client
def _make_client(self, *args, **kwargs):
return self._get_target_client_class()(*args, **kwargs)
def _make_responses_statuses(self, codes):
from google.rpc.status_pb2 import Status
response = [Status(code=code) for code in codes]
return response
def _make_responses(self, codes):
import six
from google.cloud.bigtable_v2.proto.bigtable_pb2 import MutateRowsResponse
from google.rpc.status_pb2 import Status
entries = [
MutateRowsResponse.Entry(index=i, status=Status(code=codes[i]))
for i in six.moves.xrange(len(codes))
]
return MutateRowsResponse(entries=entries)
def test_callable_empty_rows(self):
from google.cloud.bigtable_v2.gapic import bigtable_client
from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client
data_api = mock.create_autospec(bigtable_client.BigtableClient)
table_api = mock.create_autospec(
bigtable_table_admin_client.BigtableTableAdminClient
)
credentials = _make_credentials()
client = self._make_client(
project="project-id", credentials=credentials, admin=True
)
client._table_data_client = data_api
client._table_admin_client = table_api
instance = client.instance(instance_id=self.INSTANCE_ID)
table = self._make_table(self.TABLE_ID, instance)
worker = self._make_worker(client, table.name, [])
statuses = worker()
self.assertEqual(len(statuses), 0)
def test_callable_no_retry_strategy(self):
from google.cloud.bigtable.row import DirectRow
from google.cloud.bigtable_v2.gapic import bigtable_client
from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client
data_api = bigtable_client.BigtableClient(mock.Mock())
table_api = bigtable_table_admin_client.BigtableTableAdminClient(mock.Mock())
credentials = _make_credentials()
client = self._make_client(
project="project-id", credentials=credentials, admin=True
)
client._table_data_client = data_api
client._table_admin_client = table_api
instance = client.instance(instance_id=self.INSTANCE_ID)
table = self._make_table(self.TABLE_ID, instance)
row_1 = DirectRow(row_key=b"row_key", table=table)
row_1.set_cell("cf", b"col", b"value1")
row_2 = DirectRow(row_key=b"row_key_2", table=table)
row_2.set_cell("cf", b"col", b"value2")
row_3 = DirectRow(row_key=b"row_key_3", table=table)
row_3.set_cell("cf", b"col", b"value3")
response = self._make_responses(
[self.SUCCESS, self.RETRYABLE_1, self.NON_RETRYABLE]
)
with mock.patch("google.cloud.bigtable.table.wrap_method") as patched:
patched.return_value = mock.Mock(return_value=[response])
worker = self._make_worker(client, table.name, [row_1, row_2, row_3])
statuses = worker(retry=None)
result = [status.code for status in statuses]
expected_result = [self.SUCCESS, self.RETRYABLE_1, self.NON_RETRYABLE]
client._table_data_client._inner_api_calls["mutate_rows"].assert_called_once()
self.assertEqual(result, expected_result)
def test_callable_retry(self):
from google.cloud.bigtable.row import DirectRow
from google.cloud.bigtable.table import DEFAULT_RETRY
from google.cloud.bigtable_v2.gapic import bigtable_client
from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client
data_api = bigtable_client.BigtableClient(mock.Mock())
table_api = bigtable_table_admin_client.BigtableTableAdminClient(mock.Mock())
credentials = _make_credentials()
client = self._make_client(
project="project-id", credentials=credentials, admin=True
)
client._table_data_client = data_api
client._table_admin_client = table_api
instance = client.instance(instance_id=self.INSTANCE_ID)
table = self._make_table(self.TABLE_ID, instance)
row_1 = DirectRow(row_key=b"row_key", table=table)
row_1.set_cell("cf", b"col", b"value1")
row_2 = DirectRow(row_key=b"row_key_2", table=table)
row_2.set_cell("cf", b"col", b"value2")
row_3 = DirectRow(row_key=b"row_key_3", table=table)
row_3.set_cell("cf", b"col", b"value3")
response_1 = self._make_responses(
[self.SUCCESS, self.RETRYABLE_1, self.NON_RETRYABLE]
)
response_2 = self._make_responses([self.SUCCESS])
client._table_data_client._inner_api_calls["mutate_rows"] = mock.Mock(
side_effect=[[response_1], [response_2]]
)
retry = DEFAULT_RETRY.with_delay(initial=0.1)
worker = self._make_worker(client, table.name, [row_1, row_2, row_3])
statuses = worker(retry=retry)
result = [status.code for status in statuses]
expected_result = [self.SUCCESS, self.SUCCESS, self.NON_RETRYABLE]
self.assertEqual(
client._table_data_client._inner_api_calls["mutate_rows"].call_count, 2
)
self.assertEqual(result, expected_result)
def test_do_mutate_retryable_rows_empty_rows(self):
from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client
table_api = mock.create_autospec(
bigtable_table_admin_client.BigtableTableAdminClient
)
credentials = _make_credentials()
client = self._make_client(
project="project-id", credentials=credentials, admin=True
)
client._table_admin_client = table_api
instance = client.instance(instance_id=self.INSTANCE_ID)
table = self._make_table(self.TABLE_ID, instance)
worker = self._make_worker(client, table.name, [])
statuses = worker._do_mutate_retryable_rows()
self.assertEqual(len(statuses), 0)
def test_do_mutate_retryable_rows(self):
from google.cloud.bigtable.row import DirectRow
from google.cloud.bigtable_v2.gapic import bigtable_client
from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client
data_api = bigtable_client.BigtableClient(mock.Mock())
table_api = bigtable_table_admin_client.BigtableTableAdminClient(mock.Mock())
credentials = _make_credentials()
client = self._make_client(
project="project-id", credentials=credentials, admin=True
)
client._table_data_client = data_api
client._table_admin_client = table_api
instance = client.instance(instance_id=self.INSTANCE_ID)
table = self._make_table(self.TABLE_ID, instance)
row_1 = DirectRow(row_key=b"row_key", table=table)
row_1.set_cell("cf", b"col", b"value1")
row_2 = DirectRow(row_key=b"row_key_2", table=table)
row_2.set_cell("cf", b"col", b"value2")
response = self._make_responses([self.SUCCESS, self.NON_RETRYABLE])
inner_api_calls = client._table_data_client._inner_api_calls
inner_api_calls["mutate_rows"] = mock.Mock(side_effect=[[response]])
worker = self._make_worker(client, table.name, [row_1, row_2])
statuses = worker._do_mutate_retryable_rows()
result = [status.code for status in statuses]
expected_result = [self.SUCCESS, self.NON_RETRYABLE]
self.assertEqual(result, expected_result)
def test_do_mutate_retryable_rows_retry(self):
from google.cloud.bigtable.row import DirectRow
from google.cloud.bigtable.table import _BigtableRetryableError
from google.cloud.bigtable_v2.gapic import bigtable_client
from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client
data_api = bigtable_client.BigtableClient(mock.Mock())
table_api = bigtable_table_admin_client.BigtableTableAdminClient(mock.Mock())
credentials = _make_credentials()
client = self._make_client(
project="project-id", credentials=credentials, admin=True
)
client._table_data_client = data_api
client._table_admin_client = table_api
instance = client.instance(instance_id=self.INSTANCE_ID)
table = self._make_table(self.TABLE_ID, instance)
row_1 = DirectRow(row_key=b"row_key", table=table)
row_1.set_cell("cf", b"col", b"value1")
row_2 = DirectRow(row_key=b"row_key_2", table=table)
row_2.set_cell("cf", b"col", b"value2")
row_3 = DirectRow(row_key=b"row_key_3", table=table)
row_3.set_cell("cf", b"col", b"value3")
response = self._make_responses(
[self.SUCCESS, self.RETRYABLE_1, self.NON_RETRYABLE]
)
inner_api_calls = client._table_data_client._inner_api_calls
inner_api_calls["mutate_rows"] = mock.Mock(side_effect=[[response]])
worker = self._make_worker(client, table.name, [row_1, row_2, row_3])
with self.assertRaises(_BigtableRetryableError):
worker._do_mutate_retryable_rows()
statuses = worker.responses_statuses
result = [status.code for status in statuses]
expected_result = [self.SUCCESS, self.RETRYABLE_1, self.NON_RETRYABLE]
self.assertEqual(result, expected_result)
def test_do_mutate_retryable_rows_second_retry(self):
from google.cloud.bigtable.row import DirectRow
from google.cloud.bigtable.table import _BigtableRetryableError
from google.cloud.bigtable_v2.gapic import bigtable_client
from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client
data_api = bigtable_client.BigtableClient(mock.Mock())
table_api = bigtable_table_admin_client.BigtableTableAdminClient(mock.Mock())
credentials = _make_credentials()
client = self._make_client(
project="project-id", credentials=credentials, admin=True
)
client._table_data_client = data_api
client._table_admin_client = table_api
instance = client.instance(instance_id=self.INSTANCE_ID)
table = self._make_table(self.TABLE_ID, instance)
row_1 = DirectRow(row_key=b"row_key", table=table)
row_1.set_cell("cf", b"col", b"value1")
row_2 = DirectRow(row_key=b"row_key_2", table=table)
row_2.set_cell("cf", b"col", b"value2")
row_3 = DirectRow(row_key=b"row_key_3", table=table)
row_3.set_cell("cf", b"col", b"value3")
row_4 = DirectRow(row_key=b"row_key_4", table=table)
row_4.set_cell("cf", b"col", b"value4")
response = self._make_responses([self.SUCCESS, self.RETRYABLE_1])
inner_api_calls = client._table_data_client._inner_api_calls
inner_api_calls["mutate_rows"] = mock.Mock(side_effect=[[response]])
worker = self._make_worker(client, table.name, [row_1, row_2, row_3, row_4])
worker.responses_statuses = self._make_responses_statuses(
[self.SUCCESS, self.RETRYABLE_1, self.NON_RETRYABLE, self.RETRYABLE_2]
)
with self.assertRaises(_BigtableRetryableError):
worker._do_mutate_retryable_rows()
statuses = worker.responses_statuses
result = [status.code for status in statuses]
expected_result = [
self.SUCCESS,
self.SUCCESS,
self.NON_RETRYABLE,
self.RETRYABLE_1,
]
self.assertEqual(result, expected_result)
def test_do_mutate_retryable_rows_second_try(self):
from google.cloud.bigtable.row import DirectRow
from google.cloud.bigtable_v2.gapic import bigtable_client
from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client
data_api = bigtable_client.BigtableClient(mock.Mock())
table_api = bigtable_table_admin_client.BigtableTableAdminClient(mock.Mock())
credentials = _make_credentials()
client = self._make_client(
project="project-id", credentials=credentials, admin=True
)
client._table_data_client = data_api
client._table_admin_client = table_api
instance = client.instance(instance_id=self.INSTANCE_ID)
table = self._make_table(self.TABLE_ID, instance)
row_1 = DirectRow(row_key=b"row_key", table=table)
row_1.set_cell("cf", b"col", b"value1")
row_2 = DirectRow(row_key=b"row_key_2", table=table)
row_2.set_cell("cf", b"col", b"value2")
row_3 = DirectRow(row_key=b"row_key_3", table=table)
row_3.set_cell("cf", b"col", b"value3")
row_4 = DirectRow(row_key=b"row_key_4", table=table)
row_4.set_cell("cf", b"col", b"value4")
response = self._make_responses([self.NON_RETRYABLE, self.SUCCESS])
inner_api_calls = client._table_data_client._inner_api_calls
inner_api_calls["mutate_rows"] = mock.Mock(side_effect=[[response]])
worker = self._make_worker(client, table.name, [row_1, row_2, row_3, row_4])
worker.responses_statuses = self._make_responses_statuses(
[self.SUCCESS, self.RETRYABLE_1, self.NON_RETRYABLE, self.RETRYABLE_2]
)
statuses = worker._do_mutate_retryable_rows()
result = [status.code for status in statuses]
expected_result = [
self.SUCCESS,
self.NON_RETRYABLE,
self.NON_RETRYABLE,
self.SUCCESS,
]
self.assertEqual(result, expected_result)
def test_do_mutate_retryable_rows_second_try_no_retryable(self):
from google.cloud.bigtable.row import DirectRow
from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client
table_api = mock.create_autospec(
bigtable_table_admin_client.BigtableTableAdminClient
)
credentials = _make_credentials()
client = self._make_client(
project="project-id", credentials=credentials, admin=True
)
client._table_admin_client = table_api
instance = client.instance(instance_id=self.INSTANCE_ID)
table = self._make_table(self.TABLE_ID, instance)
row_1 = DirectRow(row_key=b"row_key", table=table)
row_1.set_cell("cf", b"col", b"value1")
row_2 = DirectRow(row_key=b"row_key_2", table=table)
row_2.set_cell("cf", b"col", b"value2")
worker = self._make_worker(client, table.name, [row_1, row_2])
worker.responses_statuses = self._make_responses_statuses(
[self.SUCCESS, self.NON_RETRYABLE]
)
statuses = worker._do_mutate_retryable_rows()
result = [status.code for status in statuses]
expected_result = [self.SUCCESS, self.NON_RETRYABLE]
self.assertEqual(result, expected_result)
def test_do_mutate_retryable_rows_mismatch_num_responses(self):
from google.cloud.bigtable.row import DirectRow
from google.cloud.bigtable_v2.gapic import bigtable_client
from google.cloud.bigtable_admin_v2.gapic import bigtable_table_admin_client
data_api = bigtable_client.BigtableClient(mock.Mock())
table_api = bigtable_table_admin_client.BigtableTableAdminClient(mock.Mock())
credentials = _make_credentials()
client = self._make_client(
project="project-id", credentials=credentials, admin=True
)
client._table_data_client = data_api
client._table_admin_client = table_api
instance = client.instance(instance_id=self.INSTANCE_ID)
table = self._make_table(self.TABLE_ID, instance)
row_1 = DirectRow(row_key=b"row_key", table=table)
row_1.set_cell("cf", b"col", b"value1")
row_2 = DirectRow(row_key=b"row_key_2", table=table)
row_2.set_cell("cf", b"col", b"value2")
response = self._make_responses([self.SUCCESS])
inner_api_calls = client._table_data_client._inner_api_calls
inner_api_calls["mutate_rows"] = mock.Mock(side_effect=[[response]])
worker = self._make_worker(client, table.name, [row_1, row_2])
with self.assertRaises(RuntimeError):
worker._do_mutate_retryable_rows()
class Test__create_row_request(unittest.TestCase):
def _call_fut(
self,
table_name,
start_key=None,
end_key=None,
filter_=None,
limit=None,
end_inclusive=False,
app_profile_id=None,
row_set=None,
):
from google.cloud.bigtable.table import _create_row_request
return _create_row_request(
table_name,
start_key=start_key,
end_key=end_key,
filter_=filter_,
limit=limit,
end_inclusive=end_inclusive,
app_profile_id=app_profile_id,
row_set=row_set,
)
def test_table_name_only(self):
table_name = "table_name"
result = self._call_fut(table_name)
expected_result = _ReadRowsRequestPB(table_name=table_name)
self.assertEqual(result, expected_result)
def test_row_range_row_set_conflict(self):
with self.assertRaises(ValueError):
self._call_fut(None, end_key=object(), row_set=object())
def test_row_range_start_key(self):
table_name = "table_name"
start_key = b"start_key"
result = self._call_fut(table_name, start_key=start_key)
expected_result = _ReadRowsRequestPB(table_name=table_name)
expected_result.rows.row_ranges.add(start_key_closed=start_key)
self.assertEqual(result, expected_result)
def test_row_range_end_key(self):
table_name = "table_name"
end_key = b"end_key"
result = self._call_fut(table_name, end_key=end_key)
expected_result = _ReadRowsRequestPB(table_name=table_name)
expected_result.rows.row_ranges.add(end_key_open=end_key)
self.assertEqual(result, expected_result)
def test_row_range_both_keys(self):
table_name = "table_name"
start_key = b"start_key"
end_key = b"end_key"
result = self._call_fut(table_name, start_key=start_key, end_key=end_key)
expected_result = _ReadRowsRequestPB(table_name=table_name)
expected_result.rows.row_ranges.add(
start_key_closed=start_key, end_key_open=end_key
)
self.assertEqual(result, expected_result)
def test_row_range_both_keys_inclusive(self):
table_name = "table_name"
start_key = b"start_key"
end_key = b"end_key"
result = self._call_fut(
table_name, start_key=start_key, end_key=end_key, end_inclusive=True
)
expected_result = _ReadRowsRequestPB(table_name=table_name)
expected_result.rows.row_ranges.add(
start_key_closed=start_key, end_key_closed=end_key
)
self.assertEqual(result, expected_result)
def test_with_filter(self):
from google.cloud.bigtable.row_filters import RowSampleFilter
table_name = "table_name"
row_filter = RowSampleFilter(0.33)
result = self._call_fut(table_name, filter_=row_filter)
expected_result = _ReadRowsRequestPB(
table_name=table_name, filter=row_filter.to_pb()
)
self.assertEqual(result, expected_result)
def test_with_limit(self):
table_name = "table_name"
limit = 1337
result = self._call_fut(table_name, limit=limit)
expected_result = _ReadRowsRequestPB(table_name=table_name, rows_limit=limit)
self.assertEqual(result, expected_result)
def test_with_row_set(self):
from google.cloud.bigtable.row_set import RowSet
table_name = "table_name"
row_set = RowSet()
result = self._call_fut(table_name, row_set=row_set)
expected_result = _ReadRowsRequestPB(table_name=table_name)
self.assertEqual(result, expected_result)
def test_with_app_profile_id(self):
table_name = "table_name"
limit = 1337
app_profile_id = "app-profile-id"
result = self._call_fut(table_name, limit=limit, app_profile_id=app_profile_id)
expected_result = _ReadRowsRequestPB(
table_name=table_name, rows_limit=limit, app_profile_id=app_profile_id
)
self.assertEqual(result, expected_result)
def _ReadRowsRequestPB(*args, **kw):
from google.cloud.bigtable_v2.proto import bigtable_pb2 as messages_v2_pb2
return messages_v2_pb2.ReadRowsRequest(*args, **kw)
class Test_ClusterState(unittest.TestCase):
def test___eq__(self):
from google.cloud.bigtable.enums import Table as enum_table
from google.cloud.bigtable.table import ClusterState
READY = enum_table.ReplicationState.READY
state1 = ClusterState(READY)
state2 = ClusterState(READY)
self.assertEqual(state1, state2)
def test___eq__type_differ(self):
from google.cloud.bigtable.enums import Table as enum_table
from google.cloud.bigtable.table import ClusterState
READY = enum_table.ReplicationState.READY
state1 = ClusterState(READY)
state2 = object()
self.assertNotEqual(state1, state2)
def test___ne__same_value(self):
from google.cloud.bigtable.enums import Table as enum_table
from google.cloud.bigtable.table import ClusterState
READY = enum_table.ReplicationState.READY
state1 = ClusterState(READY)
state2 = ClusterState(READY)
comparison_val = state1 != state2
self.assertFalse(comparison_val)
def test___ne__(self):
from google.cloud.bigtable.enums import Table as enum_table
from google.cloud.bigtable.table import ClusterState
READY = enum_table.ReplicationState.READY
INITIALIZING = enum_table.ReplicationState.INITIALIZING
state1 = ClusterState(READY)
state2 = ClusterState(INITIALIZING)
self.assertNotEqual(state1, state2)
def test__repr__(self):
from google.cloud.bigtable.enums import Table as enum_table
from google.cloud.bigtable.table import ClusterState
STATE_NOT_KNOWN = enum_table.ReplicationState.STATE_NOT_KNOWN
INITIALIZING = enum_table.ReplicationState.INITIALIZING
PLANNED_MAINTENANCE = enum_table.ReplicationState.PLANNED_MAINTENANCE
UNPLANNED_MAINTENANCE = enum_table.ReplicationState.UNPLANNED_MAINTENANCE
READY = enum_table.ReplicationState.READY
replication_dict = {
STATE_NOT_KNOWN: "STATE_NOT_KNOWN",
INITIALIZING: "INITIALIZING",
PLANNED_MAINTENANCE: "PLANNED_MAINTENANCE",
UNPLANNED_MAINTENANCE: "UNPLANNED_MAINTENANCE",
READY: "READY",
}
self.assertEqual(
str(ClusterState(STATE_NOT_KNOWN)), replication_dict[STATE_NOT_KNOWN]
)
self.assertEqual(
str(ClusterState(INITIALIZING)), replication_dict[INITIALIZING]
)
self.assertEqual(
str(ClusterState(PLANNED_MAINTENANCE)),
replication_dict[PLANNED_MAINTENANCE],
)
self.assertEqual(
str(ClusterState(UNPLANNED_MAINTENANCE)),
replication_dict[UNPLANNED_MAINTENANCE],
)
self.assertEqual(str(ClusterState(READY)), replication_dict[READY])
self.assertEqual(
ClusterState(STATE_NOT_KNOWN).replication_state, STATE_NOT_KNOWN
)
self.assertEqual(ClusterState(INITIALIZING).replication_state, INITIALIZING)
self.assertEqual(
ClusterState(PLANNED_MAINTENANCE).replication_state, PLANNED_MAINTENANCE
)
self.assertEqual(
ClusterState(UNPLANNED_MAINTENANCE).replication_state, UNPLANNED_MAINTENANCE
)
self.assertEqual(ClusterState(READY).replication_state, READY)
def _ReadRowsResponseCellChunkPB(*args, **kw):
from google.cloud.bigtable_v2.proto import bigtable_pb2 as messages_v2_pb2
family_name = kw.pop("family_name")
qualifier = kw.pop("qualifier")
message = messages_v2_pb2.ReadRowsResponse.CellChunk(*args, **kw)
message.family_name.value = family_name
message.qualifier.value = qualifier
return message
def _ReadRowsResponsePB(*args, **kw):
from google.cloud.bigtable_v2.proto import bigtable_pb2 as messages_v2_pb2
return messages_v2_pb2.ReadRowsResponse(*args, **kw)
def _mutate_rows_request_pb(*args, **kw):
from google.cloud.bigtable_v2.proto import bigtable_pb2 as data_messages_v2_pb2
return data_messages_v2_pb2.MutateRowsRequest(*args, **kw)
class _MockReadRowsIterator(object):
def __init__(self, *values):
self.iter_values = iter(values)
def next(self):
return next(self.iter_values)
__next__ = next
class _MockFailureIterator_1(object):
def next(self):
raise DeadlineExceeded("Failed to read from server")
__next__ = next
class _MockFailureIterator_2(object):
def __init__(self, *values):
self.iter_values = values[0]
self.calls = 0
def next(self):
self.calls += 1
if self.calls == 1:
return self.iter_values[0]
else:
raise DeadlineExceeded("Failed to read from server")
__next__ = next
class _ReadRowsResponseV2(object):
def __init__(self, chunks, last_scanned_row_key=""):
self.chunks = chunks
self.last_scanned_row_key = last_scanned_row_key
def _TablePB(*args, **kw):
from google.cloud.bigtable_admin_v2.proto import table_pb2 as table_v2_pb2
return table_v2_pb2.Table(*args, **kw)
def _ColumnFamilyPB(*args, **kw):
from google.cloud.bigtable_admin_v2.proto import table_pb2 as table_v2_pb2
return table_v2_pb2.ColumnFamily(*args, **kw)
def _ClusterStatePB(replication_state):
from google.cloud.bigtable_admin_v2.proto import table_pb2 as table_v2_pb2
return table_v2_pb2.Table.ClusterState(replication_state=replication_state)
def _read_rows_retry_exception(exc):
return isinstance(exc, DeadlineExceeded)
| true | true |
f7377c984300222892a90ae77a735e1cd718ad77 | 4,304 | py | Python | python/taichi/ui/staging_buffer.py | gaoxinge/taichi | 86d403f071b8505858763d4712b37cd71b89db91 | [
"MIT"
] | 11,699 | 2020-01-09T03:02:46.000Z | 2022-03-31T20:59:08.000Z | python/taichi/ui/staging_buffer.py | gaoxinge/taichi | 86d403f071b8505858763d4712b37cd71b89db91 | [
"MIT"
] | 3,589 | 2020-01-09T03:18:25.000Z | 2022-03-31T19:06:42.000Z | python/taichi/ui/staging_buffer.py | gaoxinge/taichi | 86d403f071b8505858763d4712b37cd71b89db91 | [
"MIT"
] | 1,391 | 2020-01-09T03:02:54.000Z | 2022-03-31T08:44:29.000Z | from taichi.lang.kernel_impl import kernel
from taichi.lang.matrix import Vector
from taichi.types.annotations import template
from taichi.types.primitive_types import f32, u8
import taichi as ti
vbo_field_cache = {}
def get_vbo_field(vertices):
if vertices not in vbo_field_cache:
N = vertices.shape[0]
pos = 3
normal = 3
tex_coord = 2
color = 4
vertex_stride = pos + normal + tex_coord + color
vbo = Vector.field(vertex_stride, f32, shape=(N, ))
vbo_field_cache[vertices] = vbo
return vbo
return vbo_field_cache[vertices]
@kernel
def copy_to_vbo(vbo: template(), src: template(), offset: template(),
num_components: template()):
for i in src:
for c in ti.static(range(num_components)):
vbo[i][offset + c] = src[i][c]
@kernel
def fill_vbo(vbo: template(), value: template(), offset: template(),
num_components: template()):
for i in vbo:
for c in ti.static(range(num_components)):
vbo[i][offset + c] = value
def validate_input_field(f, name):
if f.dtype != f32:
raise Exception(f"{name} needs to have dtype f32")
if hasattr(f, 'n'):
if f.m != 1:
raise Exception(
f'{name} needs to be a Vector field (matrix with 1 column)')
else:
raise Exception(f'{name} needs to be a Vector field')
if len(f.shape) != 1:
raise Exception(f"the shape of {name} needs to be 1-dimensional")
def copy_vertices_to_vbo(vbo, vertices):
validate_input_field(vertices, "vertices")
if not 2 <= vertices.n <= 3:
raise Exception('vertices can only be 2D or 3D vector fields')
copy_to_vbo(vbo, vertices, 0, vertices.n)
def copy_normals_to_vbo(vbo, normals):
validate_input_field(normals, "normals")
if normals.n != 3:
raise Exception('normals can only be 3D vector fields')
copy_to_vbo(vbo, normals, 3, normals.n)
def copy_texcoords_to_vbo(vbo, texcoords):
validate_input_field(texcoords, "texcoords")
if texcoords.n != 2:
raise Exception('texcoords can only be 3D vector fields')
copy_to_vbo(vbo, texcoords, 6, texcoords.n)
def copy_colors_to_vbo(vbo, colors):
validate_input_field(colors, "colors")
if colors.n != 3 and colors.n != 4:
raise Exception('colors can only be 3D/4D vector fields')
copy_to_vbo(vbo, colors, 8, colors.n)
if colors.n == 3:
fill_vbo(vbo, 1.0, 11, 1)
@ti.kernel
def copy_image_f32_to_u8(src: ti.template(), dst: ti.template(),
num_components: ti.template()):
for i, j in src:
for k in ti.static(range(num_components)):
c = src[i, j][k]
c = max(0.0, min(1.0, c))
c = c * 255
dst[i, j][k] = int(c)
if num_components < 4:
# alpha channel
dst[i, j][3] = 255
@ti.kernel
def copy_image_u8_to_u8(src: ti.template(), dst: ti.template(),
num_components: ti.template()):
for i, j in src:
for k in ti.static(range(num_components)):
dst[i, j][k] = src[i, j][k]
if num_components < 4:
# alpha channel
dst[i, j][3] = 255
# ggui renderer always assumes the input image to be u8 RGBA
# if the user input is not in this format, a staging ti field is needed
image_field_cache = {}
def to_u8_rgba(image):
if not hasattr(image, 'n') or image.m != 1:
raise Exception(
'the input image needs to be a Vector field (matrix with 1 column)'
)
if len(image.shape) != 2:
raise Exception(
"the shape of the image must be of the form (width,height)")
if image.dtype == u8 and image.n == 4:
# already in the desired format
return image
if image not in image_field_cache:
staging_img = Vector.field(4, u8, image.shape)
image_field_cache[image] = staging_img
else:
staging_img = image_field_cache[image]
if image.dtype == u8:
copy_image_u8_to_u8(image, staging_img, image.n)
elif image.dtype == f32:
copy_image_f32_to_u8(image, staging_img, image.n)
else:
raise Exception("dtype of input image must either be u8 or f32")
return staging_img
| 30.742857 | 79 | 0.617565 | from taichi.lang.kernel_impl import kernel
from taichi.lang.matrix import Vector
from taichi.types.annotations import template
from taichi.types.primitive_types import f32, u8
import taichi as ti
vbo_field_cache = {}
def get_vbo_field(vertices):
if vertices not in vbo_field_cache:
N = vertices.shape[0]
pos = 3
normal = 3
tex_coord = 2
color = 4
vertex_stride = pos + normal + tex_coord + color
vbo = Vector.field(vertex_stride, f32, shape=(N, ))
vbo_field_cache[vertices] = vbo
return vbo
return vbo_field_cache[vertices]
@kernel
def copy_to_vbo(vbo: template(), src: template(), offset: template(),
num_components: template()):
for i in src:
for c in ti.static(range(num_components)):
vbo[i][offset + c] = src[i][c]
@kernel
def fill_vbo(vbo: template(), value: template(), offset: template(),
num_components: template()):
for i in vbo:
for c in ti.static(range(num_components)):
vbo[i][offset + c] = value
def validate_input_field(f, name):
if f.dtype != f32:
raise Exception(f"{name} needs to have dtype f32")
if hasattr(f, 'n'):
if f.m != 1:
raise Exception(
f'{name} needs to be a Vector field (matrix with 1 column)')
else:
raise Exception(f'{name} needs to be a Vector field')
if len(f.shape) != 1:
raise Exception(f"the shape of {name} needs to be 1-dimensional")
def copy_vertices_to_vbo(vbo, vertices):
validate_input_field(vertices, "vertices")
if not 2 <= vertices.n <= 3:
raise Exception('vertices can only be 2D or 3D vector fields')
copy_to_vbo(vbo, vertices, 0, vertices.n)
def copy_normals_to_vbo(vbo, normals):
validate_input_field(normals, "normals")
if normals.n != 3:
raise Exception('normals can only be 3D vector fields')
copy_to_vbo(vbo, normals, 3, normals.n)
def copy_texcoords_to_vbo(vbo, texcoords):
validate_input_field(texcoords, "texcoords")
if texcoords.n != 2:
raise Exception('texcoords can only be 3D vector fields')
copy_to_vbo(vbo, texcoords, 6, texcoords.n)
def copy_colors_to_vbo(vbo, colors):
validate_input_field(colors, "colors")
if colors.n != 3 and colors.n != 4:
raise Exception('colors can only be 3D/4D vector fields')
copy_to_vbo(vbo, colors, 8, colors.n)
if colors.n == 3:
fill_vbo(vbo, 1.0, 11, 1)
@ti.kernel
def copy_image_f32_to_u8(src: ti.template(), dst: ti.template(),
num_components: ti.template()):
for i, j in src:
for k in ti.static(range(num_components)):
c = src[i, j][k]
c = max(0.0, min(1.0, c))
c = c * 255
dst[i, j][k] = int(c)
if num_components < 4:
dst[i, j][3] = 255
@ti.kernel
def copy_image_u8_to_u8(src: ti.template(), dst: ti.template(),
num_components: ti.template()):
for i, j in src:
for k in ti.static(range(num_components)):
dst[i, j][k] = src[i, j][k]
if num_components < 4:
dst[i, j][3] = 255
image_field_cache = {}
def to_u8_rgba(image):
if not hasattr(image, 'n') or image.m != 1:
raise Exception(
'the input image needs to be a Vector field (matrix with 1 column)'
)
if len(image.shape) != 2:
raise Exception(
"the shape of the image must be of the form (width,height)")
if image.dtype == u8 and image.n == 4:
return image
if image not in image_field_cache:
staging_img = Vector.field(4, u8, image.shape)
image_field_cache[image] = staging_img
else:
staging_img = image_field_cache[image]
if image.dtype == u8:
copy_image_u8_to_u8(image, staging_img, image.n)
elif image.dtype == f32:
copy_image_f32_to_u8(image, staging_img, image.n)
else:
raise Exception("dtype of input image must either be u8 or f32")
return staging_img
| true | true |
f7377dcd51a6292c65747a3c3db690121395d368 | 21,515 | py | Python | sdk/python/pulumi_azuread/provider.py | ragnarstolsmark/pulumi-azuread | b9398511c142f0aad349e492ded419f870edc925 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azuread/provider.py | ragnarstolsmark/pulumi-azuread | b9398511c142f0aad349e492ded419f870edc925 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azuread/provider.py | ragnarstolsmark/pulumi-azuread | b9398511c142f0aad349e492ded419f870edc925 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from . import _utilities
__all__ = ['ProviderArgs', 'Provider']
@pulumi.input_type
class ProviderArgs:
def __init__(__self__, *,
metadata_host: pulumi.Input[str],
client_certificate_password: Optional[pulumi.Input[str]] = None,
client_certificate_path: Optional[pulumi.Input[str]] = None,
client_id: Optional[pulumi.Input[str]] = None,
client_secret: Optional[pulumi.Input[str]] = None,
disable_terraform_partner_id: Optional[pulumi.Input[bool]] = None,
environment: Optional[pulumi.Input[str]] = None,
msi_endpoint: Optional[pulumi.Input[str]] = None,
partner_id: Optional[pulumi.Input[str]] = None,
tenant_id: Optional[pulumi.Input[str]] = None,
use_cli: Optional[pulumi.Input[bool]] = None,
use_microsoft_graph: Optional[pulumi.Input[bool]] = None,
use_msi: Optional[pulumi.Input[bool]] = None):
"""
The set of arguments for constructing a Provider resource.
:param pulumi.Input[str] metadata_host: [DEPRECATED] The Hostname which should be used for the Azure Metadata Service.
:param pulumi.Input[str] client_certificate_path: The path to the Client Certificate associated with the Service Principal for use when authenticating as a Service
Principal using a Client Certificate.
:param pulumi.Input[str] client_id: The Client ID which should be used for service principal authentication.
:param pulumi.Input[str] client_secret: The password to decrypt the Client Certificate. For use when authenticating as a Service Principal using a Client
Certificate
:param pulumi.Input[bool] disable_terraform_partner_id: Disable the Terraform Partner ID which is used if a custom `partner_id` isn't specified.
:param pulumi.Input[str] environment: The cloud environment which should be used. Possible values are `global` (formerly `public`), `usgovernment`, `dod`,
`germany`, and `china`. Defaults to `global`.
:param pulumi.Input[str] msi_endpoint: The path to a custom endpoint for Managed Identity - in most circumstances this should be detected automatically.
:param pulumi.Input[str] partner_id: A GUID/UUID that is registered with Microsoft to facilitate partner resource usage attribution.
:param pulumi.Input[str] tenant_id: The Tenant ID which should be used. Works with all authentication methods except Managed Identity.
:param pulumi.Input[bool] use_cli: Allow Azure CLI to be used for Authentication.
:param pulumi.Input[bool] use_microsoft_graph: Beta: Use the Microsoft Graph API, instead of the legacy Azure Active Directory Graph API, where supported.
:param pulumi.Input[bool] use_msi: Allow Managed Identity to be used for Authentication.
"""
pulumi.set(__self__, "metadata_host", metadata_host)
if client_certificate_password is not None:
pulumi.set(__self__, "client_certificate_password", client_certificate_password)
if client_certificate_path is not None:
pulumi.set(__self__, "client_certificate_path", client_certificate_path)
if client_id is not None:
pulumi.set(__self__, "client_id", client_id)
if client_secret is not None:
pulumi.set(__self__, "client_secret", client_secret)
if disable_terraform_partner_id is not None:
pulumi.set(__self__, "disable_terraform_partner_id", disable_terraform_partner_id)
if environment is None:
environment = (_utilities.get_env('ARM_ENVIRONMENT') or 'public')
if environment is not None:
pulumi.set(__self__, "environment", environment)
if msi_endpoint is None:
msi_endpoint = _utilities.get_env('ARM_MSI_ENDPOINT')
if msi_endpoint is not None:
pulumi.set(__self__, "msi_endpoint", msi_endpoint)
if partner_id is not None:
pulumi.set(__self__, "partner_id", partner_id)
if tenant_id is not None:
pulumi.set(__self__, "tenant_id", tenant_id)
if use_cli is not None:
pulumi.set(__self__, "use_cli", use_cli)
if use_microsoft_graph is not None:
pulumi.set(__self__, "use_microsoft_graph", use_microsoft_graph)
if use_msi is None:
use_msi = (_utilities.get_env_bool('ARM_USE_MSI') or False)
if use_msi is not None:
pulumi.set(__self__, "use_msi", use_msi)
@property
@pulumi.getter(name="metadataHost")
def metadata_host(self) -> pulumi.Input[str]:
"""
[DEPRECATED] The Hostname which should be used for the Azure Metadata Service.
"""
return pulumi.get(self, "metadata_host")
@metadata_host.setter
def metadata_host(self, value: pulumi.Input[str]):
pulumi.set(self, "metadata_host", value)
@property
@pulumi.getter(name="clientCertificatePassword")
def client_certificate_password(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "client_certificate_password")
@client_certificate_password.setter
def client_certificate_password(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "client_certificate_password", value)
@property
@pulumi.getter(name="clientCertificatePath")
def client_certificate_path(self) -> Optional[pulumi.Input[str]]:
"""
The path to the Client Certificate associated with the Service Principal for use when authenticating as a Service
Principal using a Client Certificate.
"""
return pulumi.get(self, "client_certificate_path")
@client_certificate_path.setter
def client_certificate_path(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "client_certificate_path", value)
@property
@pulumi.getter(name="clientId")
def client_id(self) -> Optional[pulumi.Input[str]]:
"""
The Client ID which should be used for service principal authentication.
"""
return pulumi.get(self, "client_id")
@client_id.setter
def client_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "client_id", value)
@property
@pulumi.getter(name="clientSecret")
def client_secret(self) -> Optional[pulumi.Input[str]]:
"""
The password to decrypt the Client Certificate. For use when authenticating as a Service Principal using a Client
Certificate
"""
return pulumi.get(self, "client_secret")
@client_secret.setter
def client_secret(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "client_secret", value)
@property
@pulumi.getter(name="disableTerraformPartnerId")
def disable_terraform_partner_id(self) -> Optional[pulumi.Input[bool]]:
"""
Disable the Terraform Partner ID which is used if a custom `partner_id` isn't specified.
"""
return pulumi.get(self, "disable_terraform_partner_id")
@disable_terraform_partner_id.setter
def disable_terraform_partner_id(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "disable_terraform_partner_id", value)
@property
@pulumi.getter
def environment(self) -> Optional[pulumi.Input[str]]:
"""
The cloud environment which should be used. Possible values are `global` (formerly `public`), `usgovernment`, `dod`,
`germany`, and `china`. Defaults to `global`.
"""
return pulumi.get(self, "environment")
@environment.setter
def environment(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "environment", value)
@property
@pulumi.getter(name="msiEndpoint")
def msi_endpoint(self) -> Optional[pulumi.Input[str]]:
"""
The path to a custom endpoint for Managed Identity - in most circumstances this should be detected automatically.
"""
return pulumi.get(self, "msi_endpoint")
@msi_endpoint.setter
def msi_endpoint(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "msi_endpoint", value)
@property
@pulumi.getter(name="partnerId")
def partner_id(self) -> Optional[pulumi.Input[str]]:
"""
A GUID/UUID that is registered with Microsoft to facilitate partner resource usage attribution.
"""
return pulumi.get(self, "partner_id")
@partner_id.setter
def partner_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "partner_id", value)
@property
@pulumi.getter(name="tenantId")
def tenant_id(self) -> Optional[pulumi.Input[str]]:
"""
The Tenant ID which should be used. Works with all authentication methods except Managed Identity.
"""
return pulumi.get(self, "tenant_id")
@tenant_id.setter
def tenant_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "tenant_id", value)
@property
@pulumi.getter(name="useCli")
def use_cli(self) -> Optional[pulumi.Input[bool]]:
"""
Allow Azure CLI to be used for Authentication.
"""
return pulumi.get(self, "use_cli")
@use_cli.setter
def use_cli(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "use_cli", value)
@property
@pulumi.getter(name="useMicrosoftGraph")
def use_microsoft_graph(self) -> Optional[pulumi.Input[bool]]:
"""
Beta: Use the Microsoft Graph API, instead of the legacy Azure Active Directory Graph API, where supported.
"""
return pulumi.get(self, "use_microsoft_graph")
@use_microsoft_graph.setter
def use_microsoft_graph(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "use_microsoft_graph", value)
@property
@pulumi.getter(name="useMsi")
def use_msi(self) -> Optional[pulumi.Input[bool]]:
"""
Allow Managed Identity to be used for Authentication.
"""
return pulumi.get(self, "use_msi")
@use_msi.setter
def use_msi(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "use_msi", value)
class Provider(pulumi.ProviderResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
client_certificate_password: Optional[pulumi.Input[str]] = None,
client_certificate_path: Optional[pulumi.Input[str]] = None,
client_id: Optional[pulumi.Input[str]] = None,
client_secret: Optional[pulumi.Input[str]] = None,
disable_terraform_partner_id: Optional[pulumi.Input[bool]] = None,
environment: Optional[pulumi.Input[str]] = None,
metadata_host: Optional[pulumi.Input[str]] = None,
msi_endpoint: Optional[pulumi.Input[str]] = None,
partner_id: Optional[pulumi.Input[str]] = None,
tenant_id: Optional[pulumi.Input[str]] = None,
use_cli: Optional[pulumi.Input[bool]] = None,
use_microsoft_graph: Optional[pulumi.Input[bool]] = None,
use_msi: Optional[pulumi.Input[bool]] = None,
__props__=None):
"""
The provider type for the azuread package. By default, resources use package-wide configuration
settings, however an explicit `Provider` instance may be created and passed during resource
construction to achieve fine-grained programmatic control over provider settings. See the
[documentation](https://www.pulumi.com/docs/reference/programming-model/#providers) for more information.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] client_certificate_path: The path to the Client Certificate associated with the Service Principal for use when authenticating as a Service
Principal using a Client Certificate.
:param pulumi.Input[str] client_id: The Client ID which should be used for service principal authentication.
:param pulumi.Input[str] client_secret: The password to decrypt the Client Certificate. For use when authenticating as a Service Principal using a Client
Certificate
:param pulumi.Input[bool] disable_terraform_partner_id: Disable the Terraform Partner ID which is used if a custom `partner_id` isn't specified.
:param pulumi.Input[str] environment: The cloud environment which should be used. Possible values are `global` (formerly `public`), `usgovernment`, `dod`,
`germany`, and `china`. Defaults to `global`.
:param pulumi.Input[str] metadata_host: [DEPRECATED] The Hostname which should be used for the Azure Metadata Service.
:param pulumi.Input[str] msi_endpoint: The path to a custom endpoint for Managed Identity - in most circumstances this should be detected automatically.
:param pulumi.Input[str] partner_id: A GUID/UUID that is registered with Microsoft to facilitate partner resource usage attribution.
:param pulumi.Input[str] tenant_id: The Tenant ID which should be used. Works with all authentication methods except Managed Identity.
:param pulumi.Input[bool] use_cli: Allow Azure CLI to be used for Authentication.
:param pulumi.Input[bool] use_microsoft_graph: Beta: Use the Microsoft Graph API, instead of the legacy Azure Active Directory Graph API, where supported.
:param pulumi.Input[bool] use_msi: Allow Managed Identity to be used for Authentication.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: ProviderArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
The provider type for the azuread package. By default, resources use package-wide configuration
settings, however an explicit `Provider` instance may be created and passed during resource
construction to achieve fine-grained programmatic control over provider settings. See the
[documentation](https://www.pulumi.com/docs/reference/programming-model/#providers) for more information.
:param str resource_name: The name of the resource.
:param ProviderArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(ProviderArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
client_certificate_password: Optional[pulumi.Input[str]] = None,
client_certificate_path: Optional[pulumi.Input[str]] = None,
client_id: Optional[pulumi.Input[str]] = None,
client_secret: Optional[pulumi.Input[str]] = None,
disable_terraform_partner_id: Optional[pulumi.Input[bool]] = None,
environment: Optional[pulumi.Input[str]] = None,
metadata_host: Optional[pulumi.Input[str]] = None,
msi_endpoint: Optional[pulumi.Input[str]] = None,
partner_id: Optional[pulumi.Input[str]] = None,
tenant_id: Optional[pulumi.Input[str]] = None,
use_cli: Optional[pulumi.Input[bool]] = None,
use_microsoft_graph: Optional[pulumi.Input[bool]] = None,
use_msi: Optional[pulumi.Input[bool]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = ProviderArgs.__new__(ProviderArgs)
__props__.__dict__["client_certificate_password"] = client_certificate_password
__props__.__dict__["client_certificate_path"] = client_certificate_path
__props__.__dict__["client_id"] = client_id
__props__.__dict__["client_secret"] = client_secret
__props__.__dict__["disable_terraform_partner_id"] = pulumi.Output.from_input(disable_terraform_partner_id).apply(pulumi.runtime.to_json) if disable_terraform_partner_id is not None else None
if environment is None:
environment = (_utilities.get_env('ARM_ENVIRONMENT') or 'public')
__props__.__dict__["environment"] = environment
if metadata_host is None and not opts.urn:
raise TypeError("Missing required property 'metadata_host'")
__props__.__dict__["metadata_host"] = metadata_host
if msi_endpoint is None:
msi_endpoint = _utilities.get_env('ARM_MSI_ENDPOINT')
__props__.__dict__["msi_endpoint"] = msi_endpoint
__props__.__dict__["partner_id"] = partner_id
__props__.__dict__["tenant_id"] = tenant_id
__props__.__dict__["use_cli"] = pulumi.Output.from_input(use_cli).apply(pulumi.runtime.to_json) if use_cli is not None else None
__props__.__dict__["use_microsoft_graph"] = pulumi.Output.from_input(use_microsoft_graph).apply(pulumi.runtime.to_json) if use_microsoft_graph is not None else None
if use_msi is None:
use_msi = (_utilities.get_env_bool('ARM_USE_MSI') or False)
__props__.__dict__["use_msi"] = pulumi.Output.from_input(use_msi).apply(pulumi.runtime.to_json) if use_msi is not None else None
super(Provider, __self__).__init__(
'azuread',
resource_name,
__props__,
opts)
@property
@pulumi.getter(name="clientCertificatePassword")
def client_certificate_password(self) -> pulumi.Output[Optional[str]]:
return pulumi.get(self, "client_certificate_password")
@property
@pulumi.getter(name="clientCertificatePath")
def client_certificate_path(self) -> pulumi.Output[Optional[str]]:
"""
The path to the Client Certificate associated with the Service Principal for use when authenticating as a Service
Principal using a Client Certificate.
"""
return pulumi.get(self, "client_certificate_path")
@property
@pulumi.getter(name="clientId")
def client_id(self) -> pulumi.Output[Optional[str]]:
"""
The Client ID which should be used for service principal authentication.
"""
return pulumi.get(self, "client_id")
@property
@pulumi.getter(name="clientSecret")
def client_secret(self) -> pulumi.Output[Optional[str]]:
"""
The password to decrypt the Client Certificate. For use when authenticating as a Service Principal using a Client
Certificate
"""
return pulumi.get(self, "client_secret")
@property
@pulumi.getter
def environment(self) -> pulumi.Output[Optional[str]]:
"""
The cloud environment which should be used. Possible values are `global` (formerly `public`), `usgovernment`, `dod`,
`germany`, and `china`. Defaults to `global`.
"""
return pulumi.get(self, "environment")
@property
@pulumi.getter(name="metadataHost")
def metadata_host(self) -> pulumi.Output[str]:
"""
[DEPRECATED] The Hostname which should be used for the Azure Metadata Service.
"""
return pulumi.get(self, "metadata_host")
@property
@pulumi.getter(name="msiEndpoint")
def msi_endpoint(self) -> pulumi.Output[Optional[str]]:
"""
The path to a custom endpoint for Managed Identity - in most circumstances this should be detected automatically.
"""
return pulumi.get(self, "msi_endpoint")
@property
@pulumi.getter(name="partnerId")
def partner_id(self) -> pulumi.Output[Optional[str]]:
"""
A GUID/UUID that is registered with Microsoft to facilitate partner resource usage attribution.
"""
return pulumi.get(self, "partner_id")
@property
@pulumi.getter(name="tenantId")
def tenant_id(self) -> pulumi.Output[Optional[str]]:
"""
The Tenant ID which should be used. Works with all authentication methods except Managed Identity.
"""
return pulumi.get(self, "tenant_id")
| 50.034884 | 203 | 0.668929 |
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from . import _utilities
__all__ = ['ProviderArgs', 'Provider']
@pulumi.input_type
class ProviderArgs:
def __init__(__self__, *,
metadata_host: pulumi.Input[str],
client_certificate_password: Optional[pulumi.Input[str]] = None,
client_certificate_path: Optional[pulumi.Input[str]] = None,
client_id: Optional[pulumi.Input[str]] = None,
client_secret: Optional[pulumi.Input[str]] = None,
disable_terraform_partner_id: Optional[pulumi.Input[bool]] = None,
environment: Optional[pulumi.Input[str]] = None,
msi_endpoint: Optional[pulumi.Input[str]] = None,
partner_id: Optional[pulumi.Input[str]] = None,
tenant_id: Optional[pulumi.Input[str]] = None,
use_cli: Optional[pulumi.Input[bool]] = None,
use_microsoft_graph: Optional[pulumi.Input[bool]] = None,
use_msi: Optional[pulumi.Input[bool]] = None):
pulumi.set(__self__, "metadata_host", metadata_host)
if client_certificate_password is not None:
pulumi.set(__self__, "client_certificate_password", client_certificate_password)
if client_certificate_path is not None:
pulumi.set(__self__, "client_certificate_path", client_certificate_path)
if client_id is not None:
pulumi.set(__self__, "client_id", client_id)
if client_secret is not None:
pulumi.set(__self__, "client_secret", client_secret)
if disable_terraform_partner_id is not None:
pulumi.set(__self__, "disable_terraform_partner_id", disable_terraform_partner_id)
if environment is None:
environment = (_utilities.get_env('ARM_ENVIRONMENT') or 'public')
if environment is not None:
pulumi.set(__self__, "environment", environment)
if msi_endpoint is None:
msi_endpoint = _utilities.get_env('ARM_MSI_ENDPOINT')
if msi_endpoint is not None:
pulumi.set(__self__, "msi_endpoint", msi_endpoint)
if partner_id is not None:
pulumi.set(__self__, "partner_id", partner_id)
if tenant_id is not None:
pulumi.set(__self__, "tenant_id", tenant_id)
if use_cli is not None:
pulumi.set(__self__, "use_cli", use_cli)
if use_microsoft_graph is not None:
pulumi.set(__self__, "use_microsoft_graph", use_microsoft_graph)
if use_msi is None:
use_msi = (_utilities.get_env_bool('ARM_USE_MSI') or False)
if use_msi is not None:
pulumi.set(__self__, "use_msi", use_msi)
@property
@pulumi.getter(name="metadataHost")
def metadata_host(self) -> pulumi.Input[str]:
return pulumi.get(self, "metadata_host")
@metadata_host.setter
def metadata_host(self, value: pulumi.Input[str]):
pulumi.set(self, "metadata_host", value)
@property
@pulumi.getter(name="clientCertificatePassword")
def client_certificate_password(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "client_certificate_password")
@client_certificate_password.setter
def client_certificate_password(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "client_certificate_password", value)
@property
@pulumi.getter(name="clientCertificatePath")
def client_certificate_path(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "client_certificate_path")
@client_certificate_path.setter
def client_certificate_path(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "client_certificate_path", value)
@property
@pulumi.getter(name="clientId")
def client_id(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "client_id")
@client_id.setter
def client_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "client_id", value)
@property
@pulumi.getter(name="clientSecret")
def client_secret(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "client_secret")
@client_secret.setter
def client_secret(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "client_secret", value)
@property
@pulumi.getter(name="disableTerraformPartnerId")
def disable_terraform_partner_id(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "disable_terraform_partner_id")
@disable_terraform_partner_id.setter
def disable_terraform_partner_id(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "disable_terraform_partner_id", value)
@property
@pulumi.getter
def environment(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "environment")
@environment.setter
def environment(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "environment", value)
@property
@pulumi.getter(name="msiEndpoint")
def msi_endpoint(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "msi_endpoint")
@msi_endpoint.setter
def msi_endpoint(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "msi_endpoint", value)
@property
@pulumi.getter(name="partnerId")
def partner_id(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "partner_id")
@partner_id.setter
def partner_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "partner_id", value)
@property
@pulumi.getter(name="tenantId")
def tenant_id(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "tenant_id")
@tenant_id.setter
def tenant_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "tenant_id", value)
@property
@pulumi.getter(name="useCli")
def use_cli(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "use_cli")
@use_cli.setter
def use_cli(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "use_cli", value)
@property
@pulumi.getter(name="useMicrosoftGraph")
def use_microsoft_graph(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "use_microsoft_graph")
@use_microsoft_graph.setter
def use_microsoft_graph(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "use_microsoft_graph", value)
@property
@pulumi.getter(name="useMsi")
def use_msi(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "use_msi")
@use_msi.setter
def use_msi(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "use_msi", value)
class Provider(pulumi.ProviderResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
client_certificate_password: Optional[pulumi.Input[str]] = None,
client_certificate_path: Optional[pulumi.Input[str]] = None,
client_id: Optional[pulumi.Input[str]] = None,
client_secret: Optional[pulumi.Input[str]] = None,
disable_terraform_partner_id: Optional[pulumi.Input[bool]] = None,
environment: Optional[pulumi.Input[str]] = None,
metadata_host: Optional[pulumi.Input[str]] = None,
msi_endpoint: Optional[pulumi.Input[str]] = None,
partner_id: Optional[pulumi.Input[str]] = None,
tenant_id: Optional[pulumi.Input[str]] = None,
use_cli: Optional[pulumi.Input[bool]] = None,
use_microsoft_graph: Optional[pulumi.Input[bool]] = None,
use_msi: Optional[pulumi.Input[bool]] = None,
__props__=None):
...
@overload
def __init__(__self__,
resource_name: str,
args: ProviderArgs,
opts: Optional[pulumi.ResourceOptions] = None):
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(ProviderArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
client_certificate_password: Optional[pulumi.Input[str]] = None,
client_certificate_path: Optional[pulumi.Input[str]] = None,
client_id: Optional[pulumi.Input[str]] = None,
client_secret: Optional[pulumi.Input[str]] = None,
disable_terraform_partner_id: Optional[pulumi.Input[bool]] = None,
environment: Optional[pulumi.Input[str]] = None,
metadata_host: Optional[pulumi.Input[str]] = None,
msi_endpoint: Optional[pulumi.Input[str]] = None,
partner_id: Optional[pulumi.Input[str]] = None,
tenant_id: Optional[pulumi.Input[str]] = None,
use_cli: Optional[pulumi.Input[bool]] = None,
use_microsoft_graph: Optional[pulumi.Input[bool]] = None,
use_msi: Optional[pulumi.Input[bool]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = ProviderArgs.__new__(ProviderArgs)
__props__.__dict__["client_certificate_password"] = client_certificate_password
__props__.__dict__["client_certificate_path"] = client_certificate_path
__props__.__dict__["client_id"] = client_id
__props__.__dict__["client_secret"] = client_secret
__props__.__dict__["disable_terraform_partner_id"] = pulumi.Output.from_input(disable_terraform_partner_id).apply(pulumi.runtime.to_json) if disable_terraform_partner_id is not None else None
if environment is None:
environment = (_utilities.get_env('ARM_ENVIRONMENT') or 'public')
__props__.__dict__["environment"] = environment
if metadata_host is None and not opts.urn:
raise TypeError("Missing required property 'metadata_host'")
__props__.__dict__["metadata_host"] = metadata_host
if msi_endpoint is None:
msi_endpoint = _utilities.get_env('ARM_MSI_ENDPOINT')
__props__.__dict__["msi_endpoint"] = msi_endpoint
__props__.__dict__["partner_id"] = partner_id
__props__.__dict__["tenant_id"] = tenant_id
__props__.__dict__["use_cli"] = pulumi.Output.from_input(use_cli).apply(pulumi.runtime.to_json) if use_cli is not None else None
__props__.__dict__["use_microsoft_graph"] = pulumi.Output.from_input(use_microsoft_graph).apply(pulumi.runtime.to_json) if use_microsoft_graph is not None else None
if use_msi is None:
use_msi = (_utilities.get_env_bool('ARM_USE_MSI') or False)
__props__.__dict__["use_msi"] = pulumi.Output.from_input(use_msi).apply(pulumi.runtime.to_json) if use_msi is not None else None
super(Provider, __self__).__init__(
'azuread',
resource_name,
__props__,
opts)
@property
@pulumi.getter(name="clientCertificatePassword")
def client_certificate_password(self) -> pulumi.Output[Optional[str]]:
return pulumi.get(self, "client_certificate_password")
@property
@pulumi.getter(name="clientCertificatePath")
def client_certificate_path(self) -> pulumi.Output[Optional[str]]:
return pulumi.get(self, "client_certificate_path")
@property
@pulumi.getter(name="clientId")
def client_id(self) -> pulumi.Output[Optional[str]]:
return pulumi.get(self, "client_id")
@property
@pulumi.getter(name="clientSecret")
def client_secret(self) -> pulumi.Output[Optional[str]]:
return pulumi.get(self, "client_secret")
@property
@pulumi.getter
def environment(self) -> pulumi.Output[Optional[str]]:
return pulumi.get(self, "environment")
@property
@pulumi.getter(name="metadataHost")
def metadata_host(self) -> pulumi.Output[str]:
return pulumi.get(self, "metadata_host")
@property
@pulumi.getter(name="msiEndpoint")
def msi_endpoint(self) -> pulumi.Output[Optional[str]]:
return pulumi.get(self, "msi_endpoint")
@property
@pulumi.getter(name="partnerId")
def partner_id(self) -> pulumi.Output[Optional[str]]:
return pulumi.get(self, "partner_id")
@property
@pulumi.getter(name="tenantId")
def tenant_id(self) -> pulumi.Output[Optional[str]]:
return pulumi.get(self, "tenant_id")
| true | true |
f7377e8adc483df2bf5c09e61c0854ad99828c2e | 291 | py | Python | leads/admin.py | alexdeathway/CRM | 54c7a1605c9db5af35618e0c198239c0d329fbea | [
"MIT"
] | 2 | 2021-10-04T13:46:31.000Z | 2022-02-15T11:34:39.000Z | leads/admin.py | alexdeathway/CRM | 54c7a1605c9db5af35618e0c198239c0d329fbea | [
"MIT"
] | null | null | null | leads/admin.py | alexdeathway/CRM | 54c7a1605c9db5af35618e0c198239c0d329fbea | [
"MIT"
] | 1 | 2021-10-04T13:46:51.000Z | 2021-10-04T13:46:51.000Z | from django.contrib import admin
from .models import LeadModel,AgentModel,UserProfile,User,CategoryModel
# Register your models here.
admin.site.register(LeadModel)
admin.site.register(AgentModel)
admin.site.register(UserProfile)
admin.site.register(User)
admin.site.register(CategoryModel) | 32.333333 | 71 | 0.841924 | from django.contrib import admin
from .models import LeadModel,AgentModel,UserProfile,User,CategoryModel
admin.site.register(LeadModel)
admin.site.register(AgentModel)
admin.site.register(UserProfile)
admin.site.register(User)
admin.site.register(CategoryModel) | true | true |
f7377f8f4bfcc5d200b1a7d4c2087be49ac96239 | 25,064 | py | Python | selfdrive/controls/controlsd.py | stevenan93/openpilot | 2162d8d6654944652ca4ae84be8c05b702fb65fb | [
"MIT"
] | 12 | 2020-12-12T05:08:33.000Z | 2022-03-08T09:05:01.000Z | selfdrive/controls/controlsd.py | Ambroos/openpilot | 7488c6d2613409bd5d505b85c157906e5b9d1a1e | [
"MIT"
] | 1 | 2021-02-02T20:57:27.000Z | 2021-02-02T20:57:27.000Z | selfdrive/controls/controlsd.py | Ambroos/openpilot | 7488c6d2613409bd5d505b85c157906e5b9d1a1e | [
"MIT"
] | 2 | 2020-12-18T07:20:37.000Z | 2021-01-13T02:49:44.000Z | #!/usr/bin/env python3
import os
from cereal import car, log
from common.numpy_fast import clip
from common.realtime import sec_since_boot, config_realtime_process, Priority, Ratekeeper, DT_CTRL
from common.profiler import Profiler
from common.params import Params, put_nonblocking
import cereal.messaging as messaging
from selfdrive.config import Conversions as CV
from selfdrive.swaglog import cloudlog
from selfdrive.boardd.boardd import can_list_to_can_capnp
from selfdrive.car.car_helpers import get_car, get_startup_event, get_one_can
from selfdrive.controls.lib.lane_planner import CAMERA_OFFSET
from selfdrive.controls.lib.drive_helpers import update_v_cruise, initialize_v_cruise
from selfdrive.controls.lib.longcontrol import LongControl, STARTING_TARGET_SPEED
from selfdrive.controls.lib.latcontrol_pid import LatControlPID
from selfdrive.controls.lib.latcontrol_indi import LatControlINDI
from selfdrive.controls.lib.latcontrol_lqr import LatControlLQR
from selfdrive.controls.lib.events import Events, ET
from selfdrive.controls.lib.alertmanager import AlertManager
from selfdrive.controls.lib.vehicle_model import VehicleModel
from selfdrive.controls.lib.longitudinal_planner import LON_MPC_STEP
from selfdrive.locationd.calibrationd import Calibration
from selfdrive.hardware import HARDWARE, TICI
LDW_MIN_SPEED = 31 * CV.MPH_TO_MS
LANE_DEPARTURE_THRESHOLD = 0.1
STEER_ANGLE_SATURATION_TIMEOUT = 1.0 / DT_CTRL
STEER_ANGLE_SATURATION_THRESHOLD = 2.5 # Degrees
SIMULATION = "SIMULATION" in os.environ
NOSENSOR = "NOSENSOR" in os.environ
IGNORE_PROCESSES = set(["rtshield", "uploader", "deleter", "loggerd", "logmessaged", "tombstoned", "logcatd", "proclogd", "clocksd", "updated", "timezoned", "manage_athenad"])
ThermalStatus = log.DeviceState.ThermalStatus
State = log.ControlsState.OpenpilotState
PandaType = log.PandaState.PandaType
LongitudinalPlanSource = log.LongitudinalPlan.LongitudinalPlanSource
Desire = log.LateralPlan.Desire
LaneChangeState = log.LateralPlan.LaneChangeState
LaneChangeDirection = log.LateralPlan.LaneChangeDirection
EventName = car.CarEvent.EventName
class Controls:
def __init__(self, sm=None, pm=None, can_sock=None):
config_realtime_process(3, Priority.CTRL_HIGH)
# Setup sockets
self.pm = pm
if self.pm is None:
self.pm = messaging.PubMaster(['sendcan', 'controlsState', 'carState',
'carControl', 'carEvents', 'carParams'])
self.sm = sm
if self.sm is None:
ignore = ['ubloxRaw', 'driverCameraState', 'managerState'] if SIMULATION else None
self.sm = messaging.SubMaster(['deviceState', 'pandaState', 'modelV2', 'liveCalibration', 'ubloxRaw',
'driverMonitoringState', 'longitudinalPlan', 'lateralPlan', 'liveLocationKalman',
'roadCameraState', 'driverCameraState', 'managerState', 'liveParameters', 'radarState'], ignore_alive=ignore)
self.can_sock = can_sock
if can_sock is None:
can_timeout = None if os.environ.get('NO_CAN_TIMEOUT', False) else 100
self.can_sock = messaging.sub_sock('can', timeout=can_timeout)
# wait for one pandaState and one CAN packet
print("Waiting for CAN messages...")
get_one_can(self.can_sock)
self.CI, self.CP = get_car(self.can_sock, self.pm.sock['sendcan'])
# read params
params = Params()
self.is_metric = params.get("IsMetric", encoding='utf8') == "1"
self.is_ldw_enabled = params.get("IsLdwEnabled", encoding='utf8') == "1"
community_feature_toggle = params.get("CommunityFeaturesToggle", encoding='utf8') == "1"
openpilot_enabled_toggle = params.get("OpenpilotEnabledToggle", encoding='utf8') == "1"
passive = params.get("Passive", encoding='utf8') == "1" or not openpilot_enabled_toggle
# detect sound card presence and ensure successful init
sounds_available = HARDWARE.get_sound_card_online()
car_recognized = self.CP.carName != 'mock'
# If stock camera is disconnected, we loaded car controls and it's not dashcam mode
controller_available = self.CP.enableCamera and self.CI.CC is not None and not passive and not self.CP.dashcamOnly
community_feature_disallowed = self.CP.communityFeature and not community_feature_toggle
self.read_only = not car_recognized or not controller_available or \
self.CP.dashcamOnly or community_feature_disallowed
if self.read_only:
self.CP.safetyModel = car.CarParams.SafetyModel.noOutput
# Write CarParams for radard and boardd safety mode
cp_bytes = self.CP.to_bytes()
params.put("CarParams", cp_bytes)
put_nonblocking("CarParamsCache", cp_bytes)
self.CC = car.CarControl.new_message()
self.AM = AlertManager()
self.events = Events()
self.LoC = LongControl(self.CP, self.CI.compute_gb)
self.VM = VehicleModel(self.CP)
if self.CP.lateralTuning.which() == 'pid':
self.LaC = LatControlPID(self.CP)
elif self.CP.lateralTuning.which() == 'indi':
self.LaC = LatControlINDI(self.CP)
elif self.CP.lateralTuning.which() == 'lqr':
self.LaC = LatControlLQR(self.CP)
self.state = State.disabled
self.enabled = False
self.active = False
self.can_rcv_error = False
self.soft_disable_timer = 0
self.v_cruise_kph = 255
self.v_cruise_kph_last = 0
self.mismatch_counter = 0
self.can_error_counter = 0
self.last_blinker_frame = 0
self.saturated_count = 0
self.distance_traveled = 0
self.last_functional_fan_frame = 0
self.events_prev = []
self.current_alert_types = [ET.PERMANENT]
self.logged_comm_issue = False
self.sm['liveCalibration'].calStatus = Calibration.CALIBRATED
self.sm['deviceState'].freeSpacePercent = 100
self.sm['driverMonitoringState'].events = []
self.sm['driverMonitoringState'].awarenessStatus = 1.
self.sm['driverMonitoringState'].faceDetected = False
self.startup_event = get_startup_event(car_recognized, controller_available)
if not sounds_available:
self.events.add(EventName.soundsUnavailable, static=True)
if community_feature_disallowed:
self.events.add(EventName.communityFeatureDisallowed, static=True)
if not car_recognized:
self.events.add(EventName.carUnrecognized, static=True)
# controlsd is driven by can recv, expected at 100Hz
self.rk = Ratekeeper(100, print_delay_threshold=None)
self.prof = Profiler(False) # off by default
def update_events(self, CS):
"""Compute carEvents from carState"""
self.events.clear()
self.events.add_from_msg(CS.events)
self.events.add_from_msg(self.sm['driverMonitoringState'].events)
# Handle startup event
if self.startup_event is not None:
self.events.add(self.startup_event)
self.startup_event = None
# Create events for battery, temperature, disk space, and memory
if self.sm['deviceState'].batteryPercent < 1 and self.sm['deviceState'].chargingError:
# at zero percent battery, while discharging, OP should not allowed
self.events.add(EventName.lowBattery)
if self.sm['deviceState'].thermalStatus >= ThermalStatus.red:
self.events.add(EventName.overheat)
if self.sm['deviceState'].freeSpacePercent < 7:
# under 7% of space free no enable allowed
self.events.add(EventName.outOfSpace)
if self.sm['deviceState'].memoryUsagePercent > 90:
self.events.add(EventName.lowMemory)
# Alert if fan isn't spinning for 5 seconds
if self.sm['pandaState'].pandaType in [PandaType.uno, PandaType.dos]:
if self.sm['pandaState'].fanSpeedRpm == 0 and self.sm['deviceState'].fanSpeedPercentDesired > 50:
if (self.sm.frame - self.last_functional_fan_frame) * DT_CTRL > 5.0:
self.events.add(EventName.fanMalfunction)
else:
self.last_functional_fan_frame = self.sm.frame
# Handle calibration status
cal_status = self.sm['liveCalibration'].calStatus
if cal_status != Calibration.CALIBRATED:
if cal_status == Calibration.UNCALIBRATED:
self.events.add(EventName.calibrationIncomplete)
else:
self.events.add(EventName.calibrationInvalid)
# Handle lane change
if self.sm['lateralPlan'].laneChangeState == LaneChangeState.preLaneChange:
direction = self.sm['lateralPlan'].laneChangeDirection
if (CS.leftBlindspot and direction == LaneChangeDirection.left) or \
(CS.rightBlindspot and direction == LaneChangeDirection.right):
self.events.add(EventName.laneChangeBlocked)
else:
if direction == LaneChangeDirection.left:
self.events.add(EventName.preLaneChangeLeft)
else:
self.events.add(EventName.preLaneChangeRight)
elif self.sm['lateralPlan'].laneChangeState in [LaneChangeState.laneChangeStarting,
LaneChangeState.laneChangeFinishing]:
self.events.add(EventName.laneChange)
if self.can_rcv_error or (not CS.canValid and self.sm.frame > 5 / DT_CTRL):
self.events.add(EventName.canError)
if (self.sm['pandaState'].safetyModel != self.CP.safetyModel and self.sm.frame > 2 / DT_CTRL) or \
self.mismatch_counter >= 200:
self.events.add(EventName.controlsMismatch)
if len(self.sm['radarState'].radarErrors):
self.events.add(EventName.radarFault)
elif not self.sm.valid['liveParameters']:
self.events.add(EventName.vehicleModelInvalid)
elif not self.sm.all_alive_and_valid():
self.events.add(EventName.commIssue)
if not self.logged_comm_issue:
cloudlog.error(f"commIssue - valid: {self.sm.valid} - alive: {self.sm.alive}")
self.logged_comm_issue = True
else:
self.logged_comm_issue = False
if not self.sm['lateralPlan'].mpcSolutionValid:
self.events.add(EventName.plannerError)
if not self.sm['liveLocationKalman'].sensorsOK and not NOSENSOR:
if self.sm.frame > 5 / DT_CTRL: # Give locationd some time to receive all the inputs
self.events.add(EventName.sensorDataInvalid)
if not self.sm['liveLocationKalman'].posenetOK:
self.events.add(EventName.posenetInvalid)
if not self.sm['liveLocationKalman'].deviceStable:
self.events.add(EventName.deviceFalling)
if log.PandaState.FaultType.relayMalfunction in self.sm['pandaState'].faults:
self.events.add(EventName.relayMalfunction)
if self.sm['longitudinalPlan'].fcw:
self.events.add(EventName.fcw)
# TODO: fix simulator
if not SIMULATION:
if not NOSENSOR:
if not self.sm.alive['ubloxRaw'] and (self.sm.frame > 10. / DT_CTRL):
self.events.add(EventName.gpsMalfunction)
elif not self.sm['liveLocationKalman'].gpsOK and (self.distance_traveled > 1000) and not TICI:
# Not show in first 1 km to allow for driving out of garage. This event shows after 5 minutes
self.events.add(EventName.noGps)
if not self.sm.all_alive(['roadCameraState', 'driverCameraState']) and (self.sm.frame > 5 / DT_CTRL):
self.events.add(EventName.cameraMalfunction)
if self.sm['modelV2'].frameDropPerc > 20:
self.events.add(EventName.modeldLagging)
# Check if all manager processes are running
not_running = set(p.name for p in self.sm['managerState'].processes if not p.running)
if self.sm.rcv_frame['managerState'] and (not_running - IGNORE_PROCESSES):
self.events.add(EventName.processNotRunning)
# Only allow engagement with brake pressed when stopped behind another stopped car
if CS.brakePressed and self.sm['longitudinalPlan'].vTargetFuture >= STARTING_TARGET_SPEED \
and self.CP.openpilotLongitudinalControl and CS.vEgo < 0.3:
self.events.add(EventName.noTarget)
def data_sample(self):
"""Receive data from sockets and update carState"""
# Update carState from CAN
can_strs = messaging.drain_sock_raw(self.can_sock, wait_for_one=True)
CS = self.CI.update(self.CC, can_strs)
self.sm.update(0)
# Check for CAN timeout
if not can_strs:
self.can_error_counter += 1
self.can_rcv_error = True
else:
self.can_rcv_error = False
# When the panda and controlsd do not agree on controls_allowed
# we want to disengage openpilot. However the status from the panda goes through
# another socket other than the CAN messages and one can arrive earlier than the other.
# Therefore we allow a mismatch for two samples, then we trigger the disengagement.
if not self.enabled:
self.mismatch_counter = 0
if not self.sm['pandaState'].controlsAllowed and self.enabled:
self.mismatch_counter += 1
self.distance_traveled += CS.vEgo * DT_CTRL
return CS
def state_transition(self, CS):
"""Compute conditional state transitions and execute actions on state transitions"""
self.v_cruise_kph_last = self.v_cruise_kph
# if stock cruise is completely disabled, then we can use our own set speed logic
if not self.CP.enableCruise:
self.v_cruise_kph = update_v_cruise(self.v_cruise_kph, CS.buttonEvents, self.enabled)
elif self.CP.enableCruise and CS.cruiseState.enabled:
self.v_cruise_kph = CS.cruiseState.speed * CV.MS_TO_KPH
# decrease the soft disable timer at every step, as it's reset on
# entrance in SOFT_DISABLING state
self.soft_disable_timer = max(0, self.soft_disable_timer - 1)
self.current_alert_types = [ET.PERMANENT]
# ENABLED, PRE ENABLING, SOFT DISABLING
if self.state != State.disabled:
# user and immediate disable always have priority in a non-disabled state
if self.events.any(ET.USER_DISABLE):
self.state = State.disabled
self.current_alert_types.append(ET.USER_DISABLE)
elif self.events.any(ET.IMMEDIATE_DISABLE):
self.state = State.disabled
self.current_alert_types.append(ET.IMMEDIATE_DISABLE)
else:
# ENABLED
if self.state == State.enabled:
if self.events.any(ET.SOFT_DISABLE):
self.state = State.softDisabling
self.soft_disable_timer = 300 # 3s
self.current_alert_types.append(ET.SOFT_DISABLE)
# SOFT DISABLING
elif self.state == State.softDisabling:
if not self.events.any(ET.SOFT_DISABLE):
# no more soft disabling condition, so go back to ENABLED
self.state = State.enabled
elif self.events.any(ET.SOFT_DISABLE) and self.soft_disable_timer > 0:
self.current_alert_types.append(ET.SOFT_DISABLE)
elif self.soft_disable_timer <= 0:
self.state = State.disabled
# PRE ENABLING
elif self.state == State.preEnabled:
if not self.events.any(ET.PRE_ENABLE):
self.state = State.enabled
else:
self.current_alert_types.append(ET.PRE_ENABLE)
# DISABLED
elif self.state == State.disabled:
if self.events.any(ET.ENABLE):
if self.events.any(ET.NO_ENTRY):
self.current_alert_types.append(ET.NO_ENTRY)
else:
if self.events.any(ET.PRE_ENABLE):
self.state = State.preEnabled
else:
self.state = State.enabled
self.current_alert_types.append(ET.ENABLE)
self.v_cruise_kph = initialize_v_cruise(CS.vEgo, CS.buttonEvents, self.v_cruise_kph_last)
# Check if actuators are enabled
self.active = self.state == State.enabled or self.state == State.softDisabling
if self.active:
self.current_alert_types.append(ET.WARNING)
# Check if openpilot is engaged
self.enabled = self.active or self.state == State.preEnabled
def state_control(self, CS):
"""Given the state, this function returns an actuators packet"""
lat_plan = self.sm['lateralPlan']
long_plan = self.sm['longitudinalPlan']
actuators = car.CarControl.Actuators.new_message()
if CS.leftBlinker or CS.rightBlinker:
self.last_blinker_frame = self.sm.frame
# State specific actions
if not self.active:
self.LaC.reset()
self.LoC.reset(v_pid=CS.vEgo)
long_plan_age = DT_CTRL * (self.sm.frame - self.sm.rcv_frame['longitudinalPlan'])
# no greater than dt mpc + dt, to prevent too high extraps
dt = min(long_plan_age, LON_MPC_STEP + DT_CTRL) + DT_CTRL
a_acc_sol = long_plan.aStart + (dt / LON_MPC_STEP) * (long_plan.aTarget - long_plan.aStart)
v_acc_sol = long_plan.vStart + dt * (a_acc_sol + long_plan.aStart) / 2.0
# Gas/Brake PID loop
actuators.gas, actuators.brake = self.LoC.update(self.active, CS, v_acc_sol, long_plan.vTargetFuture, a_acc_sol, self.CP)
# Steering PID loop and lateral MPC
actuators.steer, actuators.steeringAngleDeg, lac_log = self.LaC.update(self.active, CS, self.CP, lat_plan)
# Check for difference between desired angle and angle for angle based control
angle_control_saturated = self.CP.steerControlType == car.CarParams.SteerControlType.angle and \
abs(actuators.steeringAngleDeg - CS.steeringAngleDeg) > STEER_ANGLE_SATURATION_THRESHOLD
if angle_control_saturated and not CS.steeringPressed and self.active:
self.saturated_count += 1
else:
self.saturated_count = 0
# Send a "steering required alert" if saturation count has reached the limit
if (lac_log.saturated and not CS.steeringPressed) or \
(self.saturated_count > STEER_ANGLE_SATURATION_TIMEOUT):
# Check if we deviated from the path
left_deviation = actuators.steer > 0 and lat_plan.dPathPoints[0] < -0.1
right_deviation = actuators.steer < 0 and lat_plan.dPathPoints[0] > 0.1
if left_deviation or right_deviation:
self.events.add(EventName.steerSaturated)
return actuators, v_acc_sol, a_acc_sol, lac_log
def publish_logs(self, CS, start_time, actuators, v_acc, a_acc, lac_log):
"""Send actuators and hud commands to the car, send controlsstate and MPC logging"""
CC = car.CarControl.new_message()
CC.enabled = self.enabled
CC.actuators = actuators
CC.cruiseControl.override = True
CC.cruiseControl.cancel = not self.CP.enableCruise or (not self.enabled and CS.cruiseState.enabled)
# Some override values for Honda
# brake discount removes a sharp nonlinearity
brake_discount = (1.0 - clip(actuators.brake * 3., 0.0, 1.0))
speed_override = max(0.0, (self.LoC.v_pid + CS.cruiseState.speedOffset) * brake_discount)
CC.cruiseControl.speedOverride = float(speed_override if self.CP.enableCruise else 0.0)
CC.cruiseControl.accelOverride = self.CI.calc_accel_override(CS.aEgo, self.sm['longitudinalPlan'].aTarget, CS.vEgo, self.sm['longitudinalPlan'].vTarget)
CC.hudControl.setSpeed = float(self.v_cruise_kph * CV.KPH_TO_MS)
CC.hudControl.speedVisible = self.enabled
CC.hudControl.lanesVisible = self.enabled
CC.hudControl.leadVisible = self.sm['longitudinalPlan'].hasLead
right_lane_visible = self.sm['lateralPlan'].rProb > 0.5
left_lane_visible = self.sm['lateralPlan'].lProb > 0.5
CC.hudControl.rightLaneVisible = bool(right_lane_visible)
CC.hudControl.leftLaneVisible = bool(left_lane_visible)
recent_blinker = (self.sm.frame - self.last_blinker_frame) * DT_CTRL < 5.0 # 5s blinker cooldown
ldw_allowed = self.is_ldw_enabled and CS.vEgo > LDW_MIN_SPEED and not recent_blinker \
and not self.active and self.sm['liveCalibration'].calStatus == Calibration.CALIBRATED
meta = self.sm['modelV2'].meta
if len(meta.desirePrediction) and ldw_allowed:
l_lane_change_prob = meta.desirePrediction[Desire.laneChangeLeft - 1]
r_lane_change_prob = meta.desirePrediction[Desire.laneChangeRight - 1]
l_lane_close = left_lane_visible and (self.sm['modelV2'].laneLines[1].y[0] > -(1.08 + CAMERA_OFFSET))
r_lane_close = right_lane_visible and (self.sm['modelV2'].laneLines[2].y[0] < (1.08 - CAMERA_OFFSET))
CC.hudControl.leftLaneDepart = bool(l_lane_change_prob > LANE_DEPARTURE_THRESHOLD and l_lane_close)
CC.hudControl.rightLaneDepart = bool(r_lane_change_prob > LANE_DEPARTURE_THRESHOLD and r_lane_close)
if CC.hudControl.rightLaneDepart or CC.hudControl.leftLaneDepart:
self.events.add(EventName.ldw)
clear_event = ET.WARNING if ET.WARNING not in self.current_alert_types else None
alerts = self.events.create_alerts(self.current_alert_types, [self.CP, self.sm, self.is_metric])
self.AM.add_many(self.sm.frame, alerts, self.enabled)
self.AM.process_alerts(self.sm.frame, clear_event)
CC.hudControl.visualAlert = self.AM.visual_alert
if not self.read_only:
# send car controls over can
can_sends = self.CI.apply(CC)
self.pm.send('sendcan', can_list_to_can_capnp(can_sends, msgtype='sendcan', valid=CS.canValid))
force_decel = (self.sm['driverMonitoringState'].awarenessStatus < 0.) or \
(self.state == State.softDisabling)
steer_angle_rad = (CS.steeringAngleDeg - self.sm['lateralPlan'].angleOffsetDeg) * CV.DEG_TO_RAD
# controlsState
dat = messaging.new_message('controlsState')
dat.valid = CS.canValid
controlsState = dat.controlsState
controlsState.alertText1 = self.AM.alert_text_1
controlsState.alertText2 = self.AM.alert_text_2
controlsState.alertSize = self.AM.alert_size
controlsState.alertStatus = self.AM.alert_status
controlsState.alertBlinkingRate = self.AM.alert_rate
controlsState.alertType = self.AM.alert_type
controlsState.alertSound = self.AM.audible_alert
controlsState.canMonoTimes = list(CS.canMonoTimes)
controlsState.longitudinalPlanMonoTime = self.sm.logMonoTime['longitudinalPlan']
controlsState.lateralPlanMonoTime = self.sm.logMonoTime['lateralPlan']
controlsState.enabled = self.enabled
controlsState.active = self.active
controlsState.curvature = self.VM.calc_curvature(steer_angle_rad, CS.vEgo)
controlsState.state = self.state
controlsState.engageable = not self.events.any(ET.NO_ENTRY)
controlsState.longControlState = self.LoC.long_control_state
controlsState.vPid = float(self.LoC.v_pid)
controlsState.vCruise = float(self.v_cruise_kph)
controlsState.upAccelCmd = float(self.LoC.pid.p)
controlsState.uiAccelCmd = float(self.LoC.pid.i)
controlsState.ufAccelCmd = float(self.LoC.pid.f)
controlsState.steeringAngleDesiredDeg = float(self.LaC.angle_steers_des)
controlsState.vTargetLead = float(v_acc)
controlsState.aTarget = float(a_acc)
controlsState.cumLagMs = -self.rk.remaining * 1000.
controlsState.startMonoTime = int(start_time * 1e9)
controlsState.forceDecel = bool(force_decel)
controlsState.canErrorCounter = self.can_error_counter
if self.CP.lateralTuning.which() == 'pid':
controlsState.lateralControlState.pidState = lac_log
elif self.CP.lateralTuning.which() == 'lqr':
controlsState.lateralControlState.lqrState = lac_log
elif self.CP.lateralTuning.which() == 'indi':
controlsState.lateralControlState.indiState = lac_log
self.pm.send('controlsState', dat)
# carState
car_events = self.events.to_msg()
cs_send = messaging.new_message('carState')
cs_send.valid = CS.canValid
cs_send.carState = CS
cs_send.carState.events = car_events
self.pm.send('carState', cs_send)
# carEvents - logged every second or on change
if (self.sm.frame % int(1. / DT_CTRL) == 0) or (self.events.names != self.events_prev):
ce_send = messaging.new_message('carEvents', len(self.events))
ce_send.carEvents = car_events
self.pm.send('carEvents', ce_send)
self.events_prev = self.events.names.copy()
# carParams - logged every 50 seconds (> 1 per segment)
if (self.sm.frame % int(50. / DT_CTRL) == 0):
cp_send = messaging.new_message('carParams')
cp_send.carParams = self.CP
self.pm.send('carParams', cp_send)
# carControl
cc_send = messaging.new_message('carControl')
cc_send.valid = CS.canValid
cc_send.carControl = CC
self.pm.send('carControl', cc_send)
# copy CarControl to pass to CarInterface on the next iteration
self.CC = CC
def step(self):
start_time = sec_since_boot()
self.prof.checkpoint("Ratekeeper", ignore=True)
# Sample data from sockets and get a carState
CS = self.data_sample()
self.prof.checkpoint("Sample")
self.update_events(CS)
if not self.read_only:
# Update control state
self.state_transition(CS)
self.prof.checkpoint("State transition")
# Compute actuators (runs PID loops and lateral MPC)
actuators, v_acc, a_acc, lac_log = self.state_control(CS)
self.prof.checkpoint("State Control")
# Publish data
self.publish_logs(CS, start_time, actuators, v_acc, a_acc, lac_log)
self.prof.checkpoint("Sent")
def controlsd_thread(self):
while True:
self.step()
self.rk.monitor_time()
self.prof.display()
def main(sm=None, pm=None, logcan=None):
controls = Controls(sm, pm, logcan)
controls.controlsd_thread()
if __name__ == "__main__":
main()
| 42.991424 | 175 | 0.718042 |
import os
from cereal import car, log
from common.numpy_fast import clip
from common.realtime import sec_since_boot, config_realtime_process, Priority, Ratekeeper, DT_CTRL
from common.profiler import Profiler
from common.params import Params, put_nonblocking
import cereal.messaging as messaging
from selfdrive.config import Conversions as CV
from selfdrive.swaglog import cloudlog
from selfdrive.boardd.boardd import can_list_to_can_capnp
from selfdrive.car.car_helpers import get_car, get_startup_event, get_one_can
from selfdrive.controls.lib.lane_planner import CAMERA_OFFSET
from selfdrive.controls.lib.drive_helpers import update_v_cruise, initialize_v_cruise
from selfdrive.controls.lib.longcontrol import LongControl, STARTING_TARGET_SPEED
from selfdrive.controls.lib.latcontrol_pid import LatControlPID
from selfdrive.controls.lib.latcontrol_indi import LatControlINDI
from selfdrive.controls.lib.latcontrol_lqr import LatControlLQR
from selfdrive.controls.lib.events import Events, ET
from selfdrive.controls.lib.alertmanager import AlertManager
from selfdrive.controls.lib.vehicle_model import VehicleModel
from selfdrive.controls.lib.longitudinal_planner import LON_MPC_STEP
from selfdrive.locationd.calibrationd import Calibration
from selfdrive.hardware import HARDWARE, TICI
LDW_MIN_SPEED = 31 * CV.MPH_TO_MS
LANE_DEPARTURE_THRESHOLD = 0.1
STEER_ANGLE_SATURATION_TIMEOUT = 1.0 / DT_CTRL
STEER_ANGLE_SATURATION_THRESHOLD = 2.5
SIMULATION = "SIMULATION" in os.environ
NOSENSOR = "NOSENSOR" in os.environ
IGNORE_PROCESSES = set(["rtshield", "uploader", "deleter", "loggerd", "logmessaged", "tombstoned", "logcatd", "proclogd", "clocksd", "updated", "timezoned", "manage_athenad"])
ThermalStatus = log.DeviceState.ThermalStatus
State = log.ControlsState.OpenpilotState
PandaType = log.PandaState.PandaType
LongitudinalPlanSource = log.LongitudinalPlan.LongitudinalPlanSource
Desire = log.LateralPlan.Desire
LaneChangeState = log.LateralPlan.LaneChangeState
LaneChangeDirection = log.LateralPlan.LaneChangeDirection
EventName = car.CarEvent.EventName
class Controls:
def __init__(self, sm=None, pm=None, can_sock=None):
config_realtime_process(3, Priority.CTRL_HIGH)
self.pm = pm
if self.pm is None:
self.pm = messaging.PubMaster(['sendcan', 'controlsState', 'carState',
'carControl', 'carEvents', 'carParams'])
self.sm = sm
if self.sm is None:
ignore = ['ubloxRaw', 'driverCameraState', 'managerState'] if SIMULATION else None
self.sm = messaging.SubMaster(['deviceState', 'pandaState', 'modelV2', 'liveCalibration', 'ubloxRaw',
'driverMonitoringState', 'longitudinalPlan', 'lateralPlan', 'liveLocationKalman',
'roadCameraState', 'driverCameraState', 'managerState', 'liveParameters', 'radarState'], ignore_alive=ignore)
self.can_sock = can_sock
if can_sock is None:
can_timeout = None if os.environ.get('NO_CAN_TIMEOUT', False) else 100
self.can_sock = messaging.sub_sock('can', timeout=can_timeout)
print("Waiting for CAN messages...")
get_one_can(self.can_sock)
self.CI, self.CP = get_car(self.can_sock, self.pm.sock['sendcan'])
params = Params()
self.is_metric = params.get("IsMetric", encoding='utf8') == "1"
self.is_ldw_enabled = params.get("IsLdwEnabled", encoding='utf8') == "1"
community_feature_toggle = params.get("CommunityFeaturesToggle", encoding='utf8') == "1"
openpilot_enabled_toggle = params.get("OpenpilotEnabledToggle", encoding='utf8') == "1"
passive = params.get("Passive", encoding='utf8') == "1" or not openpilot_enabled_toggle
sounds_available = HARDWARE.get_sound_card_online()
car_recognized = self.CP.carName != 'mock'
controller_available = self.CP.enableCamera and self.CI.CC is not None and not passive and not self.CP.dashcamOnly
community_feature_disallowed = self.CP.communityFeature and not community_feature_toggle
self.read_only = not car_recognized or not controller_available or \
self.CP.dashcamOnly or community_feature_disallowed
if self.read_only:
self.CP.safetyModel = car.CarParams.SafetyModel.noOutput
# Write CarParams for radard and boardd safety mode
cp_bytes = self.CP.to_bytes()
params.put("CarParams", cp_bytes)
put_nonblocking("CarParamsCache", cp_bytes)
self.CC = car.CarControl.new_message()
self.AM = AlertManager()
self.events = Events()
self.LoC = LongControl(self.CP, self.CI.compute_gb)
self.VM = VehicleModel(self.CP)
if self.CP.lateralTuning.which() == 'pid':
self.LaC = LatControlPID(self.CP)
elif self.CP.lateralTuning.which() == 'indi':
self.LaC = LatControlINDI(self.CP)
elif self.CP.lateralTuning.which() == 'lqr':
self.LaC = LatControlLQR(self.CP)
self.state = State.disabled
self.enabled = False
self.active = False
self.can_rcv_error = False
self.soft_disable_timer = 0
self.v_cruise_kph = 255
self.v_cruise_kph_last = 0
self.mismatch_counter = 0
self.can_error_counter = 0
self.last_blinker_frame = 0
self.saturated_count = 0
self.distance_traveled = 0
self.last_functional_fan_frame = 0
self.events_prev = []
self.current_alert_types = [ET.PERMANENT]
self.logged_comm_issue = False
self.sm['liveCalibration'].calStatus = Calibration.CALIBRATED
self.sm['deviceState'].freeSpacePercent = 100
self.sm['driverMonitoringState'].events = []
self.sm['driverMonitoringState'].awarenessStatus = 1.
self.sm['driverMonitoringState'].faceDetected = False
self.startup_event = get_startup_event(car_recognized, controller_available)
if not sounds_available:
self.events.add(EventName.soundsUnavailable, static=True)
if community_feature_disallowed:
self.events.add(EventName.communityFeatureDisallowed, static=True)
if not car_recognized:
self.events.add(EventName.carUnrecognized, static=True)
# controlsd is driven by can recv, expected at 100Hz
self.rk = Ratekeeper(100, print_delay_threshold=None)
self.prof = Profiler(False) # off by default
def update_events(self, CS):
self.events.clear()
self.events.add_from_msg(CS.events)
self.events.add_from_msg(self.sm['driverMonitoringState'].events)
# Handle startup event
if self.startup_event is not None:
self.events.add(self.startup_event)
self.startup_event = None
# Create events for battery, temperature, disk space, and memory
if self.sm['deviceState'].batteryPercent < 1 and self.sm['deviceState'].chargingError:
# at zero percent battery, while discharging, OP should not allowed
self.events.add(EventName.lowBattery)
if self.sm['deviceState'].thermalStatus >= ThermalStatus.red:
self.events.add(EventName.overheat)
if self.sm['deviceState'].freeSpacePercent < 7:
# under 7% of space free no enable allowed
self.events.add(EventName.outOfSpace)
if self.sm['deviceState'].memoryUsagePercent > 90:
self.events.add(EventName.lowMemory)
# Alert if fan isn't spinning for 5 seconds
if self.sm['pandaState'].pandaType in [PandaType.uno, PandaType.dos]:
if self.sm['pandaState'].fanSpeedRpm == 0 and self.sm['deviceState'].fanSpeedPercentDesired > 50:
if (self.sm.frame - self.last_functional_fan_frame) * DT_CTRL > 5.0:
self.events.add(EventName.fanMalfunction)
else:
self.last_functional_fan_frame = self.sm.frame
cal_status = self.sm['liveCalibration'].calStatus
if cal_status != Calibration.CALIBRATED:
if cal_status == Calibration.UNCALIBRATED:
self.events.add(EventName.calibrationIncomplete)
else:
self.events.add(EventName.calibrationInvalid)
if self.sm['lateralPlan'].laneChangeState == LaneChangeState.preLaneChange:
direction = self.sm['lateralPlan'].laneChangeDirection
if (CS.leftBlindspot and direction == LaneChangeDirection.left) or \
(CS.rightBlindspot and direction == LaneChangeDirection.right):
self.events.add(EventName.laneChangeBlocked)
else:
if direction == LaneChangeDirection.left:
self.events.add(EventName.preLaneChangeLeft)
else:
self.events.add(EventName.preLaneChangeRight)
elif self.sm['lateralPlan'].laneChangeState in [LaneChangeState.laneChangeStarting,
LaneChangeState.laneChangeFinishing]:
self.events.add(EventName.laneChange)
if self.can_rcv_error or (not CS.canValid and self.sm.frame > 5 / DT_CTRL):
self.events.add(EventName.canError)
if (self.sm['pandaState'].safetyModel != self.CP.safetyModel and self.sm.frame > 2 / DT_CTRL) or \
self.mismatch_counter >= 200:
self.events.add(EventName.controlsMismatch)
if len(self.sm['radarState'].radarErrors):
self.events.add(EventName.radarFault)
elif not self.sm.valid['liveParameters']:
self.events.add(EventName.vehicleModelInvalid)
elif not self.sm.all_alive_and_valid():
self.events.add(EventName.commIssue)
if not self.logged_comm_issue:
cloudlog.error(f"commIssue - valid: {self.sm.valid} - alive: {self.sm.alive}")
self.logged_comm_issue = True
else:
self.logged_comm_issue = False
if not self.sm['lateralPlan'].mpcSolutionValid:
self.events.add(EventName.plannerError)
if not self.sm['liveLocationKalman'].sensorsOK and not NOSENSOR:
if self.sm.frame > 5 / DT_CTRL:
self.events.add(EventName.sensorDataInvalid)
if not self.sm['liveLocationKalman'].posenetOK:
self.events.add(EventName.posenetInvalid)
if not self.sm['liveLocationKalman'].deviceStable:
self.events.add(EventName.deviceFalling)
if log.PandaState.FaultType.relayMalfunction in self.sm['pandaState'].faults:
self.events.add(EventName.relayMalfunction)
if self.sm['longitudinalPlan'].fcw:
self.events.add(EventName.fcw)
if not SIMULATION:
if not NOSENSOR:
if not self.sm.alive['ubloxRaw'] and (self.sm.frame > 10. / DT_CTRL):
self.events.add(EventName.gpsMalfunction)
elif not self.sm['liveLocationKalman'].gpsOK and (self.distance_traveled > 1000) and not TICI:
self.events.add(EventName.noGps)
if not self.sm.all_alive(['roadCameraState', 'driverCameraState']) and (self.sm.frame > 5 / DT_CTRL):
self.events.add(EventName.cameraMalfunction)
if self.sm['modelV2'].frameDropPerc > 20:
self.events.add(EventName.modeldLagging)
not_running = set(p.name for p in self.sm['managerState'].processes if not p.running)
if self.sm.rcv_frame['managerState'] and (not_running - IGNORE_PROCESSES):
self.events.add(EventName.processNotRunning)
if CS.brakePressed and self.sm['longitudinalPlan'].vTargetFuture >= STARTING_TARGET_SPEED \
and self.CP.openpilotLongitudinalControl and CS.vEgo < 0.3:
self.events.add(EventName.noTarget)
def data_sample(self):
can_strs = messaging.drain_sock_raw(self.can_sock, wait_for_one=True)
CS = self.CI.update(self.CC, can_strs)
self.sm.update(0)
if not can_strs:
self.can_error_counter += 1
self.can_rcv_error = True
else:
self.can_rcv_error = False
if not self.enabled:
self.mismatch_counter = 0
if not self.sm['pandaState'].controlsAllowed and self.enabled:
self.mismatch_counter += 1
self.distance_traveled += CS.vEgo * DT_CTRL
return CS
def state_transition(self, CS):
self.v_cruise_kph_last = self.v_cruise_kph
if not self.CP.enableCruise:
self.v_cruise_kph = update_v_cruise(self.v_cruise_kph, CS.buttonEvents, self.enabled)
elif self.CP.enableCruise and CS.cruiseState.enabled:
self.v_cruise_kph = CS.cruiseState.speed * CV.MS_TO_KPH
# entrance in SOFT_DISABLING state
self.soft_disable_timer = max(0, self.soft_disable_timer - 1)
self.current_alert_types = [ET.PERMANENT]
# ENABLED, PRE ENABLING, SOFT DISABLING
if self.state != State.disabled:
# user and immediate disable always have priority in a non-disabled state
if self.events.any(ET.USER_DISABLE):
self.state = State.disabled
self.current_alert_types.append(ET.USER_DISABLE)
elif self.events.any(ET.IMMEDIATE_DISABLE):
self.state = State.disabled
self.current_alert_types.append(ET.IMMEDIATE_DISABLE)
else:
# ENABLED
if self.state == State.enabled:
if self.events.any(ET.SOFT_DISABLE):
self.state = State.softDisabling
self.soft_disable_timer = 300 # 3s
self.current_alert_types.append(ET.SOFT_DISABLE)
# SOFT DISABLING
elif self.state == State.softDisabling:
if not self.events.any(ET.SOFT_DISABLE):
# no more soft disabling condition, so go back to ENABLED
self.state = State.enabled
elif self.events.any(ET.SOFT_DISABLE) and self.soft_disable_timer > 0:
self.current_alert_types.append(ET.SOFT_DISABLE)
elif self.soft_disable_timer <= 0:
self.state = State.disabled
# PRE ENABLING
elif self.state == State.preEnabled:
if not self.events.any(ET.PRE_ENABLE):
self.state = State.enabled
else:
self.current_alert_types.append(ET.PRE_ENABLE)
# DISABLED
elif self.state == State.disabled:
if self.events.any(ET.ENABLE):
if self.events.any(ET.NO_ENTRY):
self.current_alert_types.append(ET.NO_ENTRY)
else:
if self.events.any(ET.PRE_ENABLE):
self.state = State.preEnabled
else:
self.state = State.enabled
self.current_alert_types.append(ET.ENABLE)
self.v_cruise_kph = initialize_v_cruise(CS.vEgo, CS.buttonEvents, self.v_cruise_kph_last)
# Check if actuators are enabled
self.active = self.state == State.enabled or self.state == State.softDisabling
if self.active:
self.current_alert_types.append(ET.WARNING)
# Check if openpilot is engaged
self.enabled = self.active or self.state == State.preEnabled
def state_control(self, CS):
lat_plan = self.sm['lateralPlan']
long_plan = self.sm['longitudinalPlan']
actuators = car.CarControl.Actuators.new_message()
if CS.leftBlinker or CS.rightBlinker:
self.last_blinker_frame = self.sm.frame
# State specific actions
if not self.active:
self.LaC.reset()
self.LoC.reset(v_pid=CS.vEgo)
long_plan_age = DT_CTRL * (self.sm.frame - self.sm.rcv_frame['longitudinalPlan'])
# no greater than dt mpc + dt, to prevent too high extraps
dt = min(long_plan_age, LON_MPC_STEP + DT_CTRL) + DT_CTRL
a_acc_sol = long_plan.aStart + (dt / LON_MPC_STEP) * (long_plan.aTarget - long_plan.aStart)
v_acc_sol = long_plan.vStart + dt * (a_acc_sol + long_plan.aStart) / 2.0
# Gas/Brake PID loop
actuators.gas, actuators.brake = self.LoC.update(self.active, CS, v_acc_sol, long_plan.vTargetFuture, a_acc_sol, self.CP)
# Steering PID loop and lateral MPC
actuators.steer, actuators.steeringAngleDeg, lac_log = self.LaC.update(self.active, CS, self.CP, lat_plan)
# Check for difference between desired angle and angle for angle based control
angle_control_saturated = self.CP.steerControlType == car.CarParams.SteerControlType.angle and \
abs(actuators.steeringAngleDeg - CS.steeringAngleDeg) > STEER_ANGLE_SATURATION_THRESHOLD
if angle_control_saturated and not CS.steeringPressed and self.active:
self.saturated_count += 1
else:
self.saturated_count = 0
# Send a "steering required alert" if saturation count has reached the limit
if (lac_log.saturated and not CS.steeringPressed) or \
(self.saturated_count > STEER_ANGLE_SATURATION_TIMEOUT):
# Check if we deviated from the path
left_deviation = actuators.steer > 0 and lat_plan.dPathPoints[0] < -0.1
right_deviation = actuators.steer < 0 and lat_plan.dPathPoints[0] > 0.1
if left_deviation or right_deviation:
self.events.add(EventName.steerSaturated)
return actuators, v_acc_sol, a_acc_sol, lac_log
def publish_logs(self, CS, start_time, actuators, v_acc, a_acc, lac_log):
CC = car.CarControl.new_message()
CC.enabled = self.enabled
CC.actuators = actuators
CC.cruiseControl.override = True
CC.cruiseControl.cancel = not self.CP.enableCruise or (not self.enabled and CS.cruiseState.enabled)
# Some override values for Honda
# brake discount removes a sharp nonlinearity
brake_discount = (1.0 - clip(actuators.brake * 3., 0.0, 1.0))
speed_override = max(0.0, (self.LoC.v_pid + CS.cruiseState.speedOffset) * brake_discount)
CC.cruiseControl.speedOverride = float(speed_override if self.CP.enableCruise else 0.0)
CC.cruiseControl.accelOverride = self.CI.calc_accel_override(CS.aEgo, self.sm['longitudinalPlan'].aTarget, CS.vEgo, self.sm['longitudinalPlan'].vTarget)
CC.hudControl.setSpeed = float(self.v_cruise_kph * CV.KPH_TO_MS)
CC.hudControl.speedVisible = self.enabled
CC.hudControl.lanesVisible = self.enabled
CC.hudControl.leadVisible = self.sm['longitudinalPlan'].hasLead
right_lane_visible = self.sm['lateralPlan'].rProb > 0.5
left_lane_visible = self.sm['lateralPlan'].lProb > 0.5
CC.hudControl.rightLaneVisible = bool(right_lane_visible)
CC.hudControl.leftLaneVisible = bool(left_lane_visible)
recent_blinker = (self.sm.frame - self.last_blinker_frame) * DT_CTRL < 5.0 # 5s blinker cooldown
ldw_allowed = self.is_ldw_enabled and CS.vEgo > LDW_MIN_SPEED and not recent_blinker \
and not self.active and self.sm['liveCalibration'].calStatus == Calibration.CALIBRATED
meta = self.sm['modelV2'].meta
if len(meta.desirePrediction) and ldw_allowed:
l_lane_change_prob = meta.desirePrediction[Desire.laneChangeLeft - 1]
r_lane_change_prob = meta.desirePrediction[Desire.laneChangeRight - 1]
l_lane_close = left_lane_visible and (self.sm['modelV2'].laneLines[1].y[0] > -(1.08 + CAMERA_OFFSET))
r_lane_close = right_lane_visible and (self.sm['modelV2'].laneLines[2].y[0] < (1.08 - CAMERA_OFFSET))
CC.hudControl.leftLaneDepart = bool(l_lane_change_prob > LANE_DEPARTURE_THRESHOLD and l_lane_close)
CC.hudControl.rightLaneDepart = bool(r_lane_change_prob > LANE_DEPARTURE_THRESHOLD and r_lane_close)
if CC.hudControl.rightLaneDepart or CC.hudControl.leftLaneDepart:
self.events.add(EventName.ldw)
clear_event = ET.WARNING if ET.WARNING not in self.current_alert_types else None
alerts = self.events.create_alerts(self.current_alert_types, [self.CP, self.sm, self.is_metric])
self.AM.add_many(self.sm.frame, alerts, self.enabled)
self.AM.process_alerts(self.sm.frame, clear_event)
CC.hudControl.visualAlert = self.AM.visual_alert
if not self.read_only:
# send car controls over can
can_sends = self.CI.apply(CC)
self.pm.send('sendcan', can_list_to_can_capnp(can_sends, msgtype='sendcan', valid=CS.canValid))
force_decel = (self.sm['driverMonitoringState'].awarenessStatus < 0.) or \
(self.state == State.softDisabling)
steer_angle_rad = (CS.steeringAngleDeg - self.sm['lateralPlan'].angleOffsetDeg) * CV.DEG_TO_RAD
# controlsState
dat = messaging.new_message('controlsState')
dat.valid = CS.canValid
controlsState = dat.controlsState
controlsState.alertText1 = self.AM.alert_text_1
controlsState.alertText2 = self.AM.alert_text_2
controlsState.alertSize = self.AM.alert_size
controlsState.alertStatus = self.AM.alert_status
controlsState.alertBlinkingRate = self.AM.alert_rate
controlsState.alertType = self.AM.alert_type
controlsState.alertSound = self.AM.audible_alert
controlsState.canMonoTimes = list(CS.canMonoTimes)
controlsState.longitudinalPlanMonoTime = self.sm.logMonoTime['longitudinalPlan']
controlsState.lateralPlanMonoTime = self.sm.logMonoTime['lateralPlan']
controlsState.enabled = self.enabled
controlsState.active = self.active
controlsState.curvature = self.VM.calc_curvature(steer_angle_rad, CS.vEgo)
controlsState.state = self.state
controlsState.engageable = not self.events.any(ET.NO_ENTRY)
controlsState.longControlState = self.LoC.long_control_state
controlsState.vPid = float(self.LoC.v_pid)
controlsState.vCruise = float(self.v_cruise_kph)
controlsState.upAccelCmd = float(self.LoC.pid.p)
controlsState.uiAccelCmd = float(self.LoC.pid.i)
controlsState.ufAccelCmd = float(self.LoC.pid.f)
controlsState.steeringAngleDesiredDeg = float(self.LaC.angle_steers_des)
controlsState.vTargetLead = float(v_acc)
controlsState.aTarget = float(a_acc)
controlsState.cumLagMs = -self.rk.remaining * 1000.
controlsState.startMonoTime = int(start_time * 1e9)
controlsState.forceDecel = bool(force_decel)
controlsState.canErrorCounter = self.can_error_counter
if self.CP.lateralTuning.which() == 'pid':
controlsState.lateralControlState.pidState = lac_log
elif self.CP.lateralTuning.which() == 'lqr':
controlsState.lateralControlState.lqrState = lac_log
elif self.CP.lateralTuning.which() == 'indi':
controlsState.lateralControlState.indiState = lac_log
self.pm.send('controlsState', dat)
# carState
car_events = self.events.to_msg()
cs_send = messaging.new_message('carState')
cs_send.valid = CS.canValid
cs_send.carState = CS
cs_send.carState.events = car_events
self.pm.send('carState', cs_send)
# carEvents - logged every second or on change
if (self.sm.frame % int(1. / DT_CTRL) == 0) or (self.events.names != self.events_prev):
ce_send = messaging.new_message('carEvents', len(self.events))
ce_send.carEvents = car_events
self.pm.send('carEvents', ce_send)
self.events_prev = self.events.names.copy()
# carParams - logged every 50 seconds (> 1 per segment)
if (self.sm.frame % int(50. / DT_CTRL) == 0):
cp_send = messaging.new_message('carParams')
cp_send.carParams = self.CP
self.pm.send('carParams', cp_send)
# carControl
cc_send = messaging.new_message('carControl')
cc_send.valid = CS.canValid
cc_send.carControl = CC
self.pm.send('carControl', cc_send)
# copy CarControl to pass to CarInterface on the next iteration
self.CC = CC
def step(self):
start_time = sec_since_boot()
self.prof.checkpoint("Ratekeeper", ignore=True)
# Sample data from sockets and get a carState
CS = self.data_sample()
self.prof.checkpoint("Sample")
self.update_events(CS)
if not self.read_only:
# Update control state
self.state_transition(CS)
self.prof.checkpoint("State transition")
# Compute actuators (runs PID loops and lateral MPC)
actuators, v_acc, a_acc, lac_log = self.state_control(CS)
self.prof.checkpoint("State Control")
# Publish data
self.publish_logs(CS, start_time, actuators, v_acc, a_acc, lac_log)
self.prof.checkpoint("Sent")
def controlsd_thread(self):
while True:
self.step()
self.rk.monitor_time()
self.prof.display()
def main(sm=None, pm=None, logcan=None):
controls = Controls(sm, pm, logcan)
controls.controlsd_thread()
if __name__ == "__main__":
main()
| true | true |
f7377f954c9f4a7a2c10fdd7e6b44fa63d78d86d | 91 | py | Python | web/app/ponthe/v1/private/__init__.py | adriensade/Galeries | 2a1b53d5fa07621fa3e41b10e26af9dd32b0e874 | [
"MIT"
] | null | null | null | web/app/ponthe/v1/private/__init__.py | adriensade/Galeries | 2a1b53d5fa07621fa3e41b10e26af9dd32b0e874 | [
"MIT"
] | 1 | 2021-01-05T03:43:06.000Z | 2021-01-05T03:43:06.000Z | web/app/ponthe/v1/private/__init__.py | adriensade/Galeries | 2a1b53d5fa07621fa3e41b10e26af9dd32b0e874 | [
"MIT"
] | 3 | 2020-09-09T17:50:01.000Z | 2021-07-29T19:48:49.000Z | from flask import Blueprint
private = Blueprint('private', __name__)
from . import views
| 15.166667 | 40 | 0.769231 | from flask import Blueprint
private = Blueprint('private', __name__)
from . import views
| true | true |
f737817d8dd54ed46dc03cdbf6a19c5a65b537d3 | 3,942 | py | Python | tests/test_statefulset.py | rberrelleza/chaostoolkit-kubernetes | cb77bc13cfae866fda9a015873ce0430e4cb0b6a | [
"Apache-2.0"
] | null | null | null | tests/test_statefulset.py | rberrelleza/chaostoolkit-kubernetes | cb77bc13cfae866fda9a015873ce0430e4cb0b6a | [
"Apache-2.0"
] | 2 | 2019-12-22T12:01:00.000Z | 2020-01-09T13:10:52.000Z | tests/test_statefulset.py | rberrelleza/chaostoolkit-kubernetes | cb77bc13cfae866fda9a015873ce0430e4cb0b6a | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
from unittest.mock import MagicMock, patch, ANY
import pytest
from chaoslib.exceptions import ActivityFailed
from chaosk8s.statefulset.actions import scale_statefulset, \
remove_statefulset, create_statefulset
@patch('chaosk8s.has_local_config_file', autospec=True)
@patch('chaosk8s.statefulset.actions.client', autospec=True)
@patch('chaosk8s.client')
def test_scale_statefulset(cl, client, has_conf):
has_conf.return_value = False
body = {"spec": {"replicas": 0}}
v1 = MagicMock()
client.AppsV1Api.return_value = v1
scale_statefulset(name="my-statefulset", replicas=0)
assert v1.patch_namespaced_stateful_set.call_count == 1
v1.patch_namespaced_stateful_set.assert_called_with(
"my-statefulset", namespace="default", body=body)
@patch('chaosk8s.has_local_config_file', autospec=True)
@patch('chaosk8s.statefulset.actions.client', autospec=True)
@patch('chaosk8s.client')
def test_removing_statefulset_with_name(cl, client, has_conf):
has_conf.return_value = False
v1 = MagicMock()
client.AppsV1Api.return_value = v1
result = MagicMock()
result.items = [MagicMock()]
result.items[0].metadata.name = "mystatefulset"
v1.list_namespaced_stateful_set.return_value = result
remove_statefulset("mystatefulset")
assert v1.delete_namespaced_stateful_set.call_count == 1
v1.delete_namespaced_stateful_set.assert_called_with(
"mystatefulset", "default", body=ANY)
@patch('chaosk8s.has_local_config_file', autospec=True)
@patch('chaosk8s.statefulset.actions.client', autospec=True)
@patch('chaosk8s.client')
def test_removing_statefulset_with_label_selector(cl, client, has_conf):
has_conf.return_value = False
v1 = MagicMock()
client.AppsV1Api.return_value = v1
result = MagicMock()
result.items = [MagicMock()]
result.items[0].metadata.name = "mystatefulset"
result.items[0].metadata.labels.app = "my-super-app"
v1.list_namespaced_stateful_set.return_value = result
label_selector = "app=my-super-app"
remove_statefulset("mystatefulset", label_selector=label_selector)
assert v1.delete_namespaced_stateful_set.call_count == 1
v1.delete_namespaced_stateful_set.assert_called_with(
"mystatefulset", "default", body=ANY)
@patch('chaosk8s.has_local_config_file', autospec=True)
@patch('chaosk8s.statefulset.actions.client', autospec=True)
@patch('chaosk8s.client')
def test_creating_statefulset_with_file_json(cl, client, has_conf):
has_conf.return_value = False
body = "example of body"
v1 = MagicMock()
client.AppsV1Api.return_value = v1
create_statefulset("tests/fixtures/statefulset/create/file.json")
assert v1.create_namespaced_stateful_set.call_count == 1
v1.create_namespaced_stateful_set.assert_called_with(
"default", body=body)
@patch('chaosk8s.has_local_config_file', autospec=True)
@patch('chaosk8s.statefulset.actions.client', autospec=True)
@patch('chaosk8s.client')
def test_creating_statefulset_with_file_yaml(cl, client, has_conf):
has_conf.return_value = False
body = "example of body"
v1 = MagicMock()
client.AppsV1Api.return_value = v1
create_statefulset("tests/fixtures/statefulset/create/file.yaml")
assert v1.create_namespaced_stateful_set.call_count == 1
v1.create_namespaced_stateful_set.assert_called_with(
"default", body=body)
@patch('chaosk8s.has_local_config_file', autospec=True)
@patch('chaosk8s.statefulset.actions.client', autospec=True)
@patch('chaosk8s.client')
def test_creating_statefulset_with_file_txt_KO(cl, client, has_conf):
has_conf.return_value = False
path = "tests/fixtures/statefulset/create/file.txt"
v1 = MagicMock()
client.AppsV1Api.return_value = v1
with pytest.raises(ActivityFailed) as excinfo:
create_statefulset(path)
assert "cannot process {path}".format(path=path) in str(excinfo.value) | 32.311475 | 74 | 0.754947 |
from unittest.mock import MagicMock, patch, ANY
import pytest
from chaoslib.exceptions import ActivityFailed
from chaosk8s.statefulset.actions import scale_statefulset, \
remove_statefulset, create_statefulset
@patch('chaosk8s.has_local_config_file', autospec=True)
@patch('chaosk8s.statefulset.actions.client', autospec=True)
@patch('chaosk8s.client')
def test_scale_statefulset(cl, client, has_conf):
has_conf.return_value = False
body = {"spec": {"replicas": 0}}
v1 = MagicMock()
client.AppsV1Api.return_value = v1
scale_statefulset(name="my-statefulset", replicas=0)
assert v1.patch_namespaced_stateful_set.call_count == 1
v1.patch_namespaced_stateful_set.assert_called_with(
"my-statefulset", namespace="default", body=body)
@patch('chaosk8s.has_local_config_file', autospec=True)
@patch('chaosk8s.statefulset.actions.client', autospec=True)
@patch('chaosk8s.client')
def test_removing_statefulset_with_name(cl, client, has_conf):
has_conf.return_value = False
v1 = MagicMock()
client.AppsV1Api.return_value = v1
result = MagicMock()
result.items = [MagicMock()]
result.items[0].metadata.name = "mystatefulset"
v1.list_namespaced_stateful_set.return_value = result
remove_statefulset("mystatefulset")
assert v1.delete_namespaced_stateful_set.call_count == 1
v1.delete_namespaced_stateful_set.assert_called_with(
"mystatefulset", "default", body=ANY)
@patch('chaosk8s.has_local_config_file', autospec=True)
@patch('chaosk8s.statefulset.actions.client', autospec=True)
@patch('chaosk8s.client')
def test_removing_statefulset_with_label_selector(cl, client, has_conf):
has_conf.return_value = False
v1 = MagicMock()
client.AppsV1Api.return_value = v1
result = MagicMock()
result.items = [MagicMock()]
result.items[0].metadata.name = "mystatefulset"
result.items[0].metadata.labels.app = "my-super-app"
v1.list_namespaced_stateful_set.return_value = result
label_selector = "app=my-super-app"
remove_statefulset("mystatefulset", label_selector=label_selector)
assert v1.delete_namespaced_stateful_set.call_count == 1
v1.delete_namespaced_stateful_set.assert_called_with(
"mystatefulset", "default", body=ANY)
@patch('chaosk8s.has_local_config_file', autospec=True)
@patch('chaosk8s.statefulset.actions.client', autospec=True)
@patch('chaosk8s.client')
def test_creating_statefulset_with_file_json(cl, client, has_conf):
has_conf.return_value = False
body = "example of body"
v1 = MagicMock()
client.AppsV1Api.return_value = v1
create_statefulset("tests/fixtures/statefulset/create/file.json")
assert v1.create_namespaced_stateful_set.call_count == 1
v1.create_namespaced_stateful_set.assert_called_with(
"default", body=body)
@patch('chaosk8s.has_local_config_file', autospec=True)
@patch('chaosk8s.statefulset.actions.client', autospec=True)
@patch('chaosk8s.client')
def test_creating_statefulset_with_file_yaml(cl, client, has_conf):
has_conf.return_value = False
body = "example of body"
v1 = MagicMock()
client.AppsV1Api.return_value = v1
create_statefulset("tests/fixtures/statefulset/create/file.yaml")
assert v1.create_namespaced_stateful_set.call_count == 1
v1.create_namespaced_stateful_set.assert_called_with(
"default", body=body)
@patch('chaosk8s.has_local_config_file', autospec=True)
@patch('chaosk8s.statefulset.actions.client', autospec=True)
@patch('chaosk8s.client')
def test_creating_statefulset_with_file_txt_KO(cl, client, has_conf):
has_conf.return_value = False
path = "tests/fixtures/statefulset/create/file.txt"
v1 = MagicMock()
client.AppsV1Api.return_value = v1
with pytest.raises(ActivityFailed) as excinfo:
create_statefulset(path)
assert "cannot process {path}".format(path=path) in str(excinfo.value) | true | true |
f73781beaeb82b30b5f72655a6d939de278dc543 | 7,921 | py | Python | bioblend/_tests/TestGalaxyHistories.py | pcm32/bioblend | 99d05e6ad697edc82c58f8ce7302be2a005c6933 | [
"MIT"
] | null | null | null | bioblend/_tests/TestGalaxyHistories.py | pcm32/bioblend | 99d05e6ad697edc82c58f8ce7302be2a005c6933 | [
"MIT"
] | null | null | null | bioblend/_tests/TestGalaxyHistories.py | pcm32/bioblend | 99d05e6ad697edc82c58f8ce7302be2a005c6933 | [
"MIT"
] | null | null | null | """
"""
import os
import shutil
import tarfile
import tempfile
from . import GalaxyTestBase
class TestGalaxyHistories(GalaxyTestBase.GalaxyTestBase):
def setUp(self):
super(TestGalaxyHistories, self).setUp()
self.default_history_name = "buildbot - automated test"
self.history = self.gi.histories.create_history(name=self.default_history_name)
def test_create_history(self):
history_name = "another buildbot - automated test"
new_history = self.gi.histories.create_history(name=history_name)
self.assertIsNotNone(new_history['id'])
self.assertEqual(new_history['name'], history_name)
self.assertIsNotNone(new_history['url'])
def test_update_history(self):
new_name = 'buildbot - automated test renamed'
new_annotation = 'Annotation for %s' % new_name
new_tags = ['tag1', 'tag2']
updated_hist = self.gi.histories.update_history(self.history['id'], name=new_name, annotation=new_annotation, tags=new_tags)
if 'id' not in updated_hist:
updated_hist = self.gi.histories.show_history(self.history['id'])
self.assertEqual(self.history['id'], updated_hist['id'])
self.assertEqual(updated_hist['name'], new_name)
self.assertEqual(updated_hist['annotation'], new_annotation)
self.assertEqual(updated_hist['tags'], new_tags)
updated_hist = self.gi.histories.update_history(self.history['id'], published=True)
if 'id' not in updated_hist:
updated_hist = self.gi.histories.show_history(self.history['id'])
self.assertEqual(self.history['id'], updated_hist['id'])
self.assertTrue(updated_hist['published'])
def test_get_histories(self):
# Make sure there's at least one value - the one we created
all_histories = self.gi.histories.get_histories()
self.assertGreater(len(all_histories), 0)
# Check whether name is correct, when searched by id
new_history = self.gi.histories.get_histories(history_id=self.history['id'])[0]
self.assertEqual(new_history['name'], self.default_history_name)
# Check whether id is present, when searched by name
new_history = self.gi.histories.get_histories(name=self.default_history_name)
self.assertTrue(any(d['id'] == self.history['id'] for d in new_history))
# TODO: check whether deleted history is returned correctly
# At the moment, get_histories() returns only not-deleted histories
# and get_histories(deleted=True) returns only deleted histories,
# so they are not comparable.
# In the future, according to https://trello.com/c/MoilsmVv/1673-api-incoherent-and-buggy-indexing-of-deleted-entities ,
# get_histories() will return both not-deleted and deleted histories
# and we can uncomment the following test.
# deleted_history = self.gi.histories.get_histories(deleted=True)
# self.assertGreaterEqual(len(all_histories), len(deleted_history))
def test_show_history(self):
history_data = self.gi.histories.show_history(self.history['id'])
self.assertEqual(self.history['id'], history_data['id'])
self.assertEqual(self.history['name'], history_data['name'])
self.assertEqual('new', history_data['state'])
def test_create_history_tag(self):
new_tag = 'tag1'
self.gi.histories.create_history_tag(self.history['id'], new_tag)
updated_hist = self.gi.histories.show_history(self.history['id'])
self.assertEqual(self.history['id'], updated_hist['id'])
self.assertIn(new_tag, updated_hist['tags'])
def test_show_dataset(self):
history_id = self.history["id"]
dataset1_id = self._test_dataset(history_id)
dataset = self.gi.histories.show_dataset(history_id, dataset1_id)
for key in ["name", "hid", "id", "deleted", "history_id", "visible"]:
self.assertIn(key, dataset)
self.assertEqual(dataset["history_id"], history_id)
self.assertEqual(dataset["hid"], 1)
self.assertEqual(dataset["id"], dataset1_id)
self.assertEqual(dataset["deleted"], False)
self.assertEqual(dataset["visible"], True)
def test_show_dataset_provenance(self):
history_id = self.history["id"]
dataset1_id = self._test_dataset(history_id)
prov = self.gi.histories.show_dataset_provenance(history_id, dataset1_id)
for key in ["id", "job_id", "parameters", "stderr", "stdout", "tool_id"]:
self.assertIn(key, prov)
def test_delete_dataset(self):
history_id = self.history["id"]
dataset1_id = self._test_dataset(history_id)
self.gi.histories.delete_dataset(history_id, dataset1_id)
dataset = self.gi.histories.show_dataset(history_id, dataset1_id)
self.assertTrue(dataset["deleted"])
self.assertFalse(dataset['purged'])
def test_purge_dataset(self):
history_id = self.history["id"]
dataset1_id = self._test_dataset(history_id)
self.gi.histories.delete_dataset(history_id, dataset1_id, purge=True)
dataset = self.gi.histories.show_dataset(history_id, dataset1_id)
# Galaxy since release_15.03 wrongly reports dataset["deleted"] as False, see https://github.com/galaxyproject/galaxy/issues/3548
# self.assertTrue(dataset["deleted"])
self.assertTrue(dataset['purged'])
def test_update_dataset(self):
history_id = self.history["id"]
dataset1_id = self._test_dataset(history_id)
updated_dataset = self.gi.histories.update_dataset(history_id, dataset1_id, visible=False)
if 'id' not in updated_dataset:
updated_dataset = self.gi.histories.show_dataset(history_id, dataset1_id)
self.assertFalse(updated_dataset["visible"])
def test_upload_dataset_from_library(self):
pass
# download_dataset() is already tested in TestGalaxyDatasets
def test_delete_history(self):
result = self.gi.histories.delete_history(self.history['id'])
self.assertTrue(result['deleted'])
all_histories = self.gi.histories.get_histories()
self.assertTrue(not any(d['id'] == self.history['id'] for d in all_histories))
def test_undelete_history(self):
self.gi.histories.delete_history(self.history['id'])
self.gi.histories.undelete_history(self.history['id'])
all_histories = self.gi.histories.get_histories()
self.assertTrue(any(d['id'] == self.history['id'] for d in all_histories))
def test_get_status(self):
state = self.gi.histories.get_status(self.history['id'])
self.assertEqual('new', state['state'])
def test_get_most_recently_used_history(self):
most_recently_used_history = self.gi.histories.get_most_recently_used_history()
# if the user has been created via the API, it does not have
# a session, therefore no history
if most_recently_used_history is not None:
self.assertIsNotNone(most_recently_used_history['id'])
self.assertIsNotNone(most_recently_used_history['name'])
self.assertIsNotNone(most_recently_used_history['state'])
def test_download_history(self):
jeha_id = self.gi.histories.export_history(
self.history['id'], wait=True, maxwait=60)
self.assertTrue(jeha_id)
tempdir = tempfile.mkdtemp(prefix='bioblend_test_')
temp_fn = os.path.join(tempdir, 'export.tar.gz')
try:
with open(temp_fn, 'wb') as fo:
self.gi.histories.download_history(self.history['id'], jeha_id,
fo)
self.assertTrue(tarfile.is_tarfile(temp_fn))
finally:
shutil.rmtree(tempdir)
def tearDown(self):
self.gi.histories.delete_history(self.history['id'], purge=True)
| 46.869822 | 137 | 0.68047 | import os
import shutil
import tarfile
import tempfile
from . import GalaxyTestBase
class TestGalaxyHistories(GalaxyTestBase.GalaxyTestBase):
def setUp(self):
super(TestGalaxyHistories, self).setUp()
self.default_history_name = "buildbot - automated test"
self.history = self.gi.histories.create_history(name=self.default_history_name)
def test_create_history(self):
history_name = "another buildbot - automated test"
new_history = self.gi.histories.create_history(name=history_name)
self.assertIsNotNone(new_history['id'])
self.assertEqual(new_history['name'], history_name)
self.assertIsNotNone(new_history['url'])
def test_update_history(self):
new_name = 'buildbot - automated test renamed'
new_annotation = 'Annotation for %s' % new_name
new_tags = ['tag1', 'tag2']
updated_hist = self.gi.histories.update_history(self.history['id'], name=new_name, annotation=new_annotation, tags=new_tags)
if 'id' not in updated_hist:
updated_hist = self.gi.histories.show_history(self.history['id'])
self.assertEqual(self.history['id'], updated_hist['id'])
self.assertEqual(updated_hist['name'], new_name)
self.assertEqual(updated_hist['annotation'], new_annotation)
self.assertEqual(updated_hist['tags'], new_tags)
updated_hist = self.gi.histories.update_history(self.history['id'], published=True)
if 'id' not in updated_hist:
updated_hist = self.gi.histories.show_history(self.history['id'])
self.assertEqual(self.history['id'], updated_hist['id'])
self.assertTrue(updated_hist['published'])
def test_get_histories(self):
all_histories = self.gi.histories.get_histories()
self.assertGreater(len(all_histories), 0)
# Check whether name is correct, when searched by id
new_history = self.gi.histories.get_histories(history_id=self.history['id'])[0]
self.assertEqual(new_history['name'], self.default_history_name)
# Check whether id is present, when searched by name
new_history = self.gi.histories.get_histories(name=self.default_history_name)
self.assertTrue(any(d['id'] == self.history['id'] for d in new_history))
# TODO: check whether deleted history is returned correctly
# At the moment, get_histories() returns only not-deleted histories
# and get_histories(deleted=True) returns only deleted histories,
# so they are not comparable.
# In the future, according to https://trello.com/c/MoilsmVv/1673-api-incoherent-and-buggy-indexing-of-deleted-entities ,
# get_histories() will return both not-deleted and deleted histories
# and we can uncomment the following test.
# deleted_history = self.gi.histories.get_histories(deleted=True)
# self.assertGreaterEqual(len(all_histories), len(deleted_history))
def test_show_history(self):
history_data = self.gi.histories.show_history(self.history['id'])
self.assertEqual(self.history['id'], history_data['id'])
self.assertEqual(self.history['name'], history_data['name'])
self.assertEqual('new', history_data['state'])
def test_create_history_tag(self):
new_tag = 'tag1'
self.gi.histories.create_history_tag(self.history['id'], new_tag)
updated_hist = self.gi.histories.show_history(self.history['id'])
self.assertEqual(self.history['id'], updated_hist['id'])
self.assertIn(new_tag, updated_hist['tags'])
def test_show_dataset(self):
history_id = self.history["id"]
dataset1_id = self._test_dataset(history_id)
dataset = self.gi.histories.show_dataset(history_id, dataset1_id)
for key in ["name", "hid", "id", "deleted", "history_id", "visible"]:
self.assertIn(key, dataset)
self.assertEqual(dataset["history_id"], history_id)
self.assertEqual(dataset["hid"], 1)
self.assertEqual(dataset["id"], dataset1_id)
self.assertEqual(dataset["deleted"], False)
self.assertEqual(dataset["visible"], True)
def test_show_dataset_provenance(self):
history_id = self.history["id"]
dataset1_id = self._test_dataset(history_id)
prov = self.gi.histories.show_dataset_provenance(history_id, dataset1_id)
for key in ["id", "job_id", "parameters", "stderr", "stdout", "tool_id"]:
self.assertIn(key, prov)
def test_delete_dataset(self):
history_id = self.history["id"]
dataset1_id = self._test_dataset(history_id)
self.gi.histories.delete_dataset(history_id, dataset1_id)
dataset = self.gi.histories.show_dataset(history_id, dataset1_id)
self.assertTrue(dataset["deleted"])
self.assertFalse(dataset['purged'])
def test_purge_dataset(self):
history_id = self.history["id"]
dataset1_id = self._test_dataset(history_id)
self.gi.histories.delete_dataset(history_id, dataset1_id, purge=True)
dataset = self.gi.histories.show_dataset(history_id, dataset1_id)
# Galaxy since release_15.03 wrongly reports dataset["deleted"] as False, see https://github.com/galaxyproject/galaxy/issues/3548
# self.assertTrue(dataset["deleted"])
self.assertTrue(dataset['purged'])
def test_update_dataset(self):
history_id = self.history["id"]
dataset1_id = self._test_dataset(history_id)
updated_dataset = self.gi.histories.update_dataset(history_id, dataset1_id, visible=False)
if 'id' not in updated_dataset:
updated_dataset = self.gi.histories.show_dataset(history_id, dataset1_id)
self.assertFalse(updated_dataset["visible"])
def test_upload_dataset_from_library(self):
pass
# download_dataset() is already tested in TestGalaxyDatasets
def test_delete_history(self):
result = self.gi.histories.delete_history(self.history['id'])
self.assertTrue(result['deleted'])
all_histories = self.gi.histories.get_histories()
self.assertTrue(not any(d['id'] == self.history['id'] for d in all_histories))
def test_undelete_history(self):
self.gi.histories.delete_history(self.history['id'])
self.gi.histories.undelete_history(self.history['id'])
all_histories = self.gi.histories.get_histories()
self.assertTrue(any(d['id'] == self.history['id'] for d in all_histories))
def test_get_status(self):
state = self.gi.histories.get_status(self.history['id'])
self.assertEqual('new', state['state'])
def test_get_most_recently_used_history(self):
most_recently_used_history = self.gi.histories.get_most_recently_used_history()
# if the user has been created via the API, it does not have
# a session, therefore no history
if most_recently_used_history is not None:
self.assertIsNotNone(most_recently_used_history['id'])
self.assertIsNotNone(most_recently_used_history['name'])
self.assertIsNotNone(most_recently_used_history['state'])
def test_download_history(self):
jeha_id = self.gi.histories.export_history(
self.history['id'], wait=True, maxwait=60)
self.assertTrue(jeha_id)
tempdir = tempfile.mkdtemp(prefix='bioblend_test_')
temp_fn = os.path.join(tempdir, 'export.tar.gz')
try:
with open(temp_fn, 'wb') as fo:
self.gi.histories.download_history(self.history['id'], jeha_id,
fo)
self.assertTrue(tarfile.is_tarfile(temp_fn))
finally:
shutil.rmtree(tempdir)
def tearDown(self):
self.gi.histories.delete_history(self.history['id'], purge=True)
| true | true |
f737827e9693df106d9fcd506ada362960f71264 | 17,806 | py | Python | python/build.py | matthewdeanmartin/json_kata | 24d561c3a0a52251c449d8a5d2bcf7dd540f4245 | [
"MIT"
] | null | null | null | python/build.py | matthewdeanmartin/json_kata | 24d561c3a0a52251c449d8a5d2bcf7dd540f4245 | [
"MIT"
] | null | null | null | python/build.py | matthewdeanmartin/json_kata | 24d561c3a0a52251c449d8a5d2bcf7dd540f4245 | [
"MIT"
] | null | null | null | # coding=utf-8
"""
Build tasks
"""
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import glob
import json
import os
import subprocess
import sys
from pynt import task
from pyntcontrib import execute, safe_cd
from semantic_version import Version
PROJECT_NAME = "simple_calls"
SRC = '.'
# for multitargeting
PYTHON = "python"
IS_DJANGO = False
IS_TRAVIS = 'TRAVIS' in os.environ
if IS_TRAVIS:
PIPENV = ""
else:
PIPENV = "pipenv run"
GEM_FURY = ""
CURRENT_HASH = None
MAC_LIBS = "" # ":" # TODO this breaks on windows
sys.path.append(os.path.join(os.path.dirname(__file__), '.'))
from build_utils import check_is_aws, skip_if_no_change, execute_with_environment, get_versions, execute_get_text, run_gitleaks
@task()
@skip_if_no_change("git_leaks")
def git_leaks():
run_gitleaks()
@task()
@skip_if_no_change("git_secrets")
def git_secrets():
"""
Install git secrets if possible.
"""
print("turning off because I'm on windows ...")
return
if check_is_aws():
# no easy way to install git secrets on ubuntu.
return
if IS_TRAVIS:
# nothing is edited on travis
return
try:
commands = ["git secrets --install", "git secrets --register-aws"]
for command in commands:
cp = subprocess.run(command.split(" "),
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
shell=False, check=True)
for stream in [cp.stdout, cp.stderr]:
if stream:
for line in stream.decode().split("\n"):
print("*" + line)
except subprocess.CalledProcessError as cpe:
print(cpe)
installed = False
for stream in [cpe.stdout, cpe.stderr]:
if stream:
for line in stream.decode().split("\n"):
print("-" + line)
if "commit-msg already exists" in line:
print("git secrets installed.")
installed = True
break
if not installed:
raise
execute(*("git secrets --scan".strip().split(" ")))
@task()
def clean():
"""
Delete all outputs. Blank until I think of a better way to do this.
"""
return
@task()
@skip_if_no_change("formatting")
def formatting():
with safe_cd(SRC):
if sys.version_info < (3, 6):
print("Black doesn't work on python 2")
return
command = "{0} black {1}".format(PIPENV, PROJECT_NAME).strip()
print(command)
result = execute_get_text(command)
assert result
changed =[]
for line in result.split("\n"):
if "reformatted " in line:
file = line[len("reformatted "):].strip()
changed.append(file)
for change in changed:
command ="git add {0}".format(change)
print(command)
execute(*(command.split(" ")))
@task()
@skip_if_no_change("compile_py")
def compile_py():
"""
Catch on the worst syntax errors
"""
with safe_cd(SRC):
execute(PYTHON, "-m", "compileall", PROJECT_NAME)
@task(formatting, compile_py)
@skip_if_no_change("prospector")
def prospector():
"""
Catch a few things with a non-strict propector run
"""
with safe_cd(SRC):
command = "{0} prospector {1} --profile {1}_style --pylint-config-file=pylintrc.ini --profile-path=.prospector".format(
PIPENV, PROJECT_NAME).strip().replace(" ", " ")
print(command)
execute(*(command
.split(" ")))
@task()
@skip_if_no_change("detect_secrets")
def detect_secrets():
"""
Call detect-secrets tool
"""
# use
# blah blah = "foo" # pragma: whitelist secret
# to ignore a false posites
errors_file = "detect-secrets-results.txt"
# TODO: not windows compat
# print(execute_get_text("pwd"))
command = "{0} detect-secrets --scan --base64-limit 4 --exclude .idea|.js|.min.js|.html|.xsd|" \
"lock.json|synced_folders|.scss|Pipfile.lock|" \
"lint.txt|{1}".format(PIPENV, errors_file).strip()
print(command)
bash_process = subprocess.Popen(command.split(" "),
#shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
foo = bash_process.wait()
out, err = bash_process.communicate() # wait
with open(errors_file, "w+") as file_handle:
if len(out)==0:
print("Warning- no output from detect secrets. Happens with git hook, but not from ordinary command line.")
return
file_handle.write(out.decode())
with open(errors_file) as f:
try:
data = json.load(f)
except Exception:
print("Can't read json")
exit(-1)
return
if data["results"]:
for result in data["results"]:
print(result)
print("detect-secrets has discovered high entropy strings, possibly passwords?")
exit(-1)
@task(compile_py, formatting, prospector)
@skip_if_no_change("lint")
def lint():
"""
Lint
"""
with safe_cd(SRC):
if os.path.isfile("lint.txt"):
# TODO: detect OS
try:
execute("rm", "lint.txt")
except:
# execute("DEL", "lint.txt") # fails... why?
os.remove("lint.txt")
with safe_cd(SRC):
if IS_DJANGO:
django_bits = "--load-plugins pylint_django "
else:
django_bits = ""
# command += "{0}--rcfile=pylintrc.ini {1}".format(django_bits, PROJECT_NAME).split(" ")
command = "{0} pylint {1} --rcfile=pylintrc.ini {2}".format(PIPENV, django_bits, PROJECT_NAME) \
.strip() \
.replace(" ", " ")
print(command)
command = command.split(" ")
# keep out of src tree, causes extraneous change detections
lint_output_file_name = "lint.txt"
with open(lint_output_file_name, "w") as outfile:
env = config_pythonpath()
subprocess.call(command, stdout=outfile, env=env)
fatal_errors = sum(1 for line in open(lint_output_file_name)
if "no-member" in line or \
"no-name-in-module" in line or \
"import-error" in line)
if fatal_errors > 0:
for line in open(lint_output_file_name):
if "no-member" in line or \
"no-name-in-module" in line or \
"import-error" in line:
print(line)
print("Fatal lint errors : {0}".format(fatal_errors))
exit(-1)
cutoff = 100
num_lines = sum(1 for line in open(lint_output_file_name)
if "*************" not in line
and "---------------------" not in line
and "Your code has been rated at" not in line)
if num_lines > cutoff:
raise TypeError("Too many lines of lint : {0}, max {1}".format(num_lines, cutoff))
@task(lint)
@skip_if_no_change("nose_tests")
def nose_tests():
"""
Nose tests
"""
# with safe_cd(SRC):
if IS_DJANGO:
command = "{0} manage.py test -v 2".format(PYTHON)
# We'd expect this to be MAC or a build server.
my_env = config_pythonpath()
execute_with_environment(command, env=my_env)
else:
my_env = config_pythonpath()
if IS_TRAVIS:
command = "{0} -m nose {1}".format(PYTHON, "test").strip()
else:
command = "{0} {1} -m nose {2}".format(PIPENV, PYTHON, "simple_calls").strip()
print(command)
execute_with_environment(command, env=my_env)
def config_pythonpath():
"""
Add to PYTHONPATH
"""
if check_is_aws():
env = "DEV"
else:
env = "MAC"
my_env = {'ENV': env}
for key, value in os.environ.items():
my_env[key] = value
my_env["PYTHONPATH"] = my_env.get("PYTHONPATH",
"") + MAC_LIBS
print(my_env["PYTHONPATH"])
return my_env
@task()
def coverage():
"""
Coverage, which is a bit redundant with nose test
"""
print("coverage broken on windows... don't know why yet")
return # will get back to this..
print("Coverage tests always re-run")
with safe_cd(SRC):
my_env = config_pythonpath()
command = "{0} py.test {1} --cov={2} --cov-report html:coverage --cov-fail-under 1 --verbose".format(
PIPENV,
"simple_calls", PROJECT_NAME)
print(command)
execute_with_environment(command, my_env)
@task()
@skip_if_no_change("docs")
def docs():
"""
Docs
"""
with safe_cd(SRC):
with safe_cd("docs"):
my_env = config_pythonpath()
command = "{0} make html".format(PIPENV).strip()
print(command)
execute_with_environment(command, env=my_env)
@task()
def pip_check():
"""
Are packages ok?
"""
execute("pip", "check")
if PIPENV and not IS_TRAVIS:
execute("pipenv", "check")
execute("safety", "check", "-r", "requirements_dev.txt")
@task()
def compile_mark_down():
"""
Convert MD to RST
"""
print("Not compiling README.md because moderately complex MD makes pypi rst parser puke.")
# with safe_cd(SRC):
# if IS_TRAVIS:
# command = "pandoc --from=markdown --to=rst --output=README.rst README.md".strip().split(
# " ")
# else:
# command = "{0} pandoc --from=markdown --to=rst --output=README.rst README.md".format(PIPENV).strip().split(
# " ")
# execute(*(command))
@task()
@skip_if_no_change("mypy")
def mypy():
"""
Are types ok?
"""
if sys.version_info < (3, 4):
print("Mypy doesn't work on python < 3.4")
return
if IS_TRAVIS:
command = "{0} -m mypy {1} --ignore-missing-imports --strict".format(PYTHON, PROJECT_NAME).strip()
else:
command = "{0} mypy {1} --ignore-missing-imports --strict".format(PIPENV, PROJECT_NAME).strip()
bash_process = subprocess.Popen(command.split(" "),
# shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
out, err = bash_process.communicate() # wait
mypy_file = "mypy_errors.txt"
with open(mypy_file, "w+") as lint_file:
lines = out.decode().split("\n")
for line in lines:
if "build_utils.py" in line:
continue
if "test.py" in line:
continue
if "tests.py" in line:
continue
if "/test_" in line:
continue
if "/tests_" in line:
continue
else:
lint_file.writelines([line + "\n"])
num_lines = sum(1 for line in open(mypy_file) if line and line.strip(" \n"))
max_lines = 25
if num_lines > max_lines:
raise TypeError("Too many lines of mypy : {0}, max {1}".format(num_lines, max_lines))
@task()
def pin_dependencies():
"""
Create requirement*.txt
"""
with safe_cd(SRC):
execute(*("{0} pipenv_to_requirements".format(PIPENV).strip().split(" ")))
@task()
def jiggle_version():
command = "{0} jiggle_version --project={1} --source={2}".format(PIPENV, PROJECT_NAME, "").strip()
execute(*(command.split(" ")))
@task()
def check_setup_py():
# if
# ValueError: ZIP does not support timestamps before 1980
# then run this to ID
# find . -mtime +13700 -ls
with safe_cd(SRC):
if IS_TRAVIS:
execute(PYTHON, *("setup.py check -r -s".split(" ")))
else:
execute(*("{0} {1} setup.py check -r -s".format(PIPENV, PYTHON).strip().split(" ")))
@task()
@skip_if_no_change("vulture", expect_files="dead_code.txt")
def dead_code():
"""
This also finds code you are working on today!
"""
with safe_cd(SRC):
if IS_TRAVIS:
command = "{0} vulture {1}".format(PYTHON, PROJECT_NAME).strip().split()
else:
command = "{0} vulture {1}".format(PIPENV, PROJECT_NAME).strip().split()
output_file_name = "dead_code.txt"
with open(output_file_name, "w") as outfile:
env = config_pythonpath()
subprocess.call(command, stdout=outfile, env=env)
cutoff = 20
num_lines = sum(1 for line in open(output_file_name) if line)
if num_lines > cutoff:
print("Too many lines of dead code : {0}, max {1}".format(num_lines, cutoff))
exit(-1)
@task(formatting, mypy, detect_secrets, git_secrets,dead_code, nose_tests, coverage, compile_py, lint,
compile_mark_down, check_setup_py, pin_dependencies, jiggle_version) # docs ... later
@skip_if_no_change("package")
def package():
"""
package, but don't upload
"""
with safe_cd(SRC):
for folder in ["build", "dist", PROJECT_NAME + ".egg-info"]:
# execute("rm", "-rf", folder)
if os.path.exists(folder):
os.rmdir(folder)
with safe_cd(SRC):
execute(PYTHON, "setup.py", "sdist", "--formats=gztar,zip")
@task(package)
def gemfury():
"""
Push to gem fury, a repo with private options
"""
# fury login
# fury push dist/*.gz --as=YOUR_ACCT
# fury push dist/*.whl --as=YOUR_ACCT
cp = subprocess.run(("fury login --as={0}".format(GEM_FURY).split(" ")),
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
shell=False, check=True)
print(cp.stdout)
about = {}
with open(os.path.join(SRC, PROJECT_NAME, "__version__.py")) as f:
exec(f.read(), about)
version = Version(about["__version__"])
print("Have version : " + str(version))
print("Preparing to upload")
if version not in get_versions():
for kind in ["gz", "whl"]:
try:
files = glob.glob("{0}dist/*.{1}".format(SRC.replace(".", ""), kind))
for file_name in files:
cp = subprocess.run(("fury push {0} --as={1}".format(file_name, GEM_FURY).split(" ")),
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
shell=False, check=True)
print("result of fury push")
for stream in [cp.stdout, cp.stderr]:
if stream:
for line in stream.decode().split("\n"):
print(line)
except subprocess.CalledProcessError as cpe:
print("result of fury push- got error")
for stream in [cp.stdout, cp.stderr]:
if stream:
for line in stream.decode().split("\n"):
print(line)
print(cpe)
raise
# FAST. FATAL ERRORS. DON'T CHANGE THINGS THAT CHECK IN
@task(mypy, detect_secrets, git_secrets, check_setup_py, compile_py, dead_code)
@skip_if_no_change("pre_commit_hook")
def pre_commit_hook():
# Don't format or update version
# Don't do slow stuff- discourages frequent check in
# Run checks that are likely to have FATAL errors, not just sloppy coding.
pass
# Don't break the build, but don't change source tree either.
@task(mypy, detect_secrets, git_secrets, nose_tests, coverage, check_setup_py, compile_py, dead_code)
@skip_if_no_change("pre_push_hook")
def pre_push_hook():
# Don't format or update version
# Don't do slow stuff- discourages frequent check in
# Run checks that are likely to have FATAL errors, not just sloppy coding.
pass
def do_check_manifest(output_file_name, env):
if IS_TRAVIS:
command = "check-manifest".format(PYTHON).strip().split()
else:
command = "{0} check-manifest".format(PIPENV).strip().split()
with open(output_file_name, "w") as outfile:
subprocess.call(command, stdout=outfile, env=env)
@task()
@skip_if_no_change("check_manifest", "manifest_errors.txt")
def check_manifest():
env = config_pythonpath()
output_file_name = "manifest_errors.txt"
do_check_manifest(output_file_name, env)
with open(output_file_name) as outfile_reader:
text = outfile_reader.read()
print(text)
if not os.path.isfile("MANIFEST.in") and "no MANIFEST.in found" in text:
command = "{0} check-manifest -c".format(PIPENV).strip().split()
subprocess.call(command, env=env)
# print("Had to create MANIFEST.in, please review and redo")
do_check_manifest(output_file_name, env)
else:
pass
# print("found it")
cutoff = 20
num_lines = sum(1 for line in open(output_file_name) if line)
if num_lines > cutoff:
print("Too many lines of manifest problems : {0}, max {1}".format(num_lines, cutoff))
exit(-1)
@task()
def echo(*args, **kwargs):
"""
Pure diagnostics
"""
print(args)
print(kwargs)
# Default task (if specified) is run when no task is specified in the command line
# make sure you define the variable __DEFAULT__ after the task is defined
# A good convention is to define it at the end of the module
# __DEFAULT__ is an optional member
__DEFAULT__ = echo | 31.238596 | 127 | 0.568123 |
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import glob
import json
import os
import subprocess
import sys
from pynt import task
from pyntcontrib import execute, safe_cd
from semantic_version import Version
PROJECT_NAME = "simple_calls"
SRC = '.'
PYTHON = "python"
IS_DJANGO = False
IS_TRAVIS = 'TRAVIS' in os.environ
if IS_TRAVIS:
PIPENV = ""
else:
PIPENV = "pipenv run"
GEM_FURY = ""
CURRENT_HASH = None
MAC_LIBS = "" n(os.path.dirname(__file__), '.'))
from build_utils import check_is_aws, skip_if_no_change, execute_with_environment, get_versions, execute_get_text, run_gitleaks
@task()
@skip_if_no_change("git_leaks")
def git_leaks():
run_gitleaks()
@task()
@skip_if_no_change("git_secrets")
def git_secrets():
print("turning off because I'm on windows ...")
return
if check_is_aws():
# no easy way to install git secrets on ubuntu.
return
if IS_TRAVIS:
# nothing is edited on travis
return
try:
commands = ["git secrets --install", "git secrets --register-aws"]
for command in commands:
cp = subprocess.run(command.split(" "),
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
shell=False, check=True)
for stream in [cp.stdout, cp.stderr]:
if stream:
for line in stream.decode().split("\n"):
print("*" + line)
except subprocess.CalledProcessError as cpe:
print(cpe)
installed = False
for stream in [cpe.stdout, cpe.stderr]:
if stream:
for line in stream.decode().split("\n"):
print("-" + line)
if "commit-msg already exists" in line:
print("git secrets installed.")
installed = True
break
if not installed:
raise
execute(*("git secrets --scan".strip().split(" ")))
@task()
def clean():
return
@task()
@skip_if_no_change("formatting")
def formatting():
with safe_cd(SRC):
if sys.version_info < (3, 6):
print("Black doesn't work on python 2")
return
command = "{0} black {1}".format(PIPENV, PROJECT_NAME).strip()
print(command)
result = execute_get_text(command)
assert result
changed =[]
for line in result.split("\n"):
if "reformatted " in line:
file = line[len("reformatted "):].strip()
changed.append(file)
for change in changed:
command ="git add {0}".format(change)
print(command)
execute(*(command.split(" ")))
@task()
@skip_if_no_change("compile_py")
def compile_py():
with safe_cd(SRC):
execute(PYTHON, "-m", "compileall", PROJECT_NAME)
@task(formatting, compile_py)
@skip_if_no_change("prospector")
def prospector():
with safe_cd(SRC):
command = "{0} prospector {1} --profile {1}_style --pylint-config-file=pylintrc.ini --profile-path=.prospector".format(
PIPENV, PROJECT_NAME).strip().replace(" ", " ")
print(command)
execute(*(command
.split(" ")))
@task()
@skip_if_no_change("detect_secrets")
def detect_secrets():
etect-secrets-results.txt"
command = "{0} detect-secrets --scan --base64-limit 4 --exclude .idea|.js|.min.js|.html|.xsd|" \
"lock.json|synced_folders|.scss|Pipfile.lock|" \
"lint.txt|{1}".format(PIPENV, errors_file).strip()
print(command)
bash_process = subprocess.Popen(command.split(" "),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
foo = bash_process.wait()
out, err = bash_process.communicate()
with open(errors_file, "w+") as file_handle:
if len(out)==0:
print("Warning- no output from detect secrets. Happens with git hook, but not from ordinary command line.")
return
file_handle.write(out.decode())
with open(errors_file) as f:
try:
data = json.load(f)
except Exception:
print("Can't read json")
exit(-1)
return
if data["results"]:
for result in data["results"]:
print(result)
print("detect-secrets has discovered high entropy strings, possibly passwords?")
exit(-1)
@task(compile_py, formatting, prospector)
@skip_if_no_change("lint")
def lint():
with safe_cd(SRC):
if os.path.isfile("lint.txt"):
# TODO: detect OS
try:
execute("rm", "lint.txt")
except:
# execute("DEL", "lint.txt") # fails... why?
os.remove("lint.txt")
with safe_cd(SRC):
if IS_DJANGO:
django_bits = "--load-plugins pylint_django "
else:
django_bits = ""
# command += "{0}--rcfile=pylintrc.ini {1}".format(django_bits, PROJECT_NAME).split(" ")
command = "{0} pylint {1} --rcfile=pylintrc.ini {2}".format(PIPENV, django_bits, PROJECT_NAME) \
.strip() \
.replace(" ", " ")
print(command)
command = command.split(" ")
# keep out of src tree, causes extraneous change detections
lint_output_file_name = "lint.txt"
with open(lint_output_file_name, "w") as outfile:
env = config_pythonpath()
subprocess.call(command, stdout=outfile, env=env)
fatal_errors = sum(1 for line in open(lint_output_file_name)
if "no-member" in line or \
"no-name-in-module" in line or \
"import-error" in line)
if fatal_errors > 0:
for line in open(lint_output_file_name):
if "no-member" in line or \
"no-name-in-module" in line or \
"import-error" in line:
print(line)
print("Fatal lint errors : {0}".format(fatal_errors))
exit(-1)
cutoff = 100
num_lines = sum(1 for line in open(lint_output_file_name)
if "*************" not in line
and "---------------------" not in line
and "Your code has been rated at" not in line)
if num_lines > cutoff:
raise TypeError("Too many lines of lint : {0}, max {1}".format(num_lines, cutoff))
@task(lint)
@skip_if_no_change("nose_tests")
def nose_tests():
# with safe_cd(SRC):
if IS_DJANGO:
command = "{0} manage.py test -v 2".format(PYTHON)
# We'd expect this to be MAC or a build server.
my_env = config_pythonpath()
execute_with_environment(command, env=my_env)
else:
my_env = config_pythonpath()
if IS_TRAVIS:
command = "{0} -m nose {1}".format(PYTHON, "test").strip()
else:
command = "{0} {1} -m nose {2}".format(PIPENV, PYTHON, "simple_calls").strip()
print(command)
execute_with_environment(command, env=my_env)
def config_pythonpath():
if check_is_aws():
env = "DEV"
else:
env = "MAC"
my_env = {'ENV': env}
for key, value in os.environ.items():
my_env[key] = value
my_env["PYTHONPATH"] = my_env.get("PYTHONPATH",
"") + MAC_LIBS
print(my_env["PYTHONPATH"])
return my_env
@task()
def coverage():
print("coverage broken on windows... don't know why yet")
return # will get back to this..
print("Coverage tests always re-run")
with safe_cd(SRC):
my_env = config_pythonpath()
command = "{0} py.test {1} --cov={2} --cov-report html:coverage --cov-fail-under 1 --verbose".format(
PIPENV,
"simple_calls", PROJECT_NAME)
print(command)
execute_with_environment(command, my_env)
@task()
@skip_if_no_change("docs")
def docs():
with safe_cd(SRC):
with safe_cd("docs"):
my_env = config_pythonpath()
command = "{0} make html".format(PIPENV).strip()
print(command)
execute_with_environment(command, env=my_env)
@task()
def pip_check():
execute("pip", "check")
if PIPENV and not IS_TRAVIS:
execute("pipenv", "check")
execute("safety", "check", "-r", "requirements_dev.txt")
@task()
def compile_mark_down():
print("Not compiling README.md because moderately complex MD makes pypi rst parser puke.")
# with safe_cd(SRC):
# if IS_TRAVIS:
# command = "pandoc --from=markdown --to=rst --output=README.rst README.md".strip().split(
# " ")
# else:
# command = "{0} pandoc --from=markdown --to=rst --output=README.rst README.md".format(PIPENV).strip().split(
# " ")
# execute(*(command))
@task()
@skip_if_no_change("mypy")
def mypy():
if sys.version_info < (3, 4):
print("Mypy doesn't work on python < 3.4")
return
if IS_TRAVIS:
command = "{0} -m mypy {1} --ignore-missing-imports --strict".format(PYTHON, PROJECT_NAME).strip()
else:
command = "{0} mypy {1} --ignore-missing-imports --strict".format(PIPENV, PROJECT_NAME).strip()
bash_process = subprocess.Popen(command.split(" "),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
out, err = bash_process.communicate()
mypy_file = "mypy_errors.txt"
with open(mypy_file, "w+") as lint_file:
lines = out.decode().split("\n")
for line in lines:
if "build_utils.py" in line:
continue
if "test.py" in line:
continue
if "tests.py" in line:
continue
if "/test_" in line:
continue
if "/tests_" in line:
continue
else:
lint_file.writelines([line + "\n"])
num_lines = sum(1 for line in open(mypy_file) if line and line.strip(" \n"))
max_lines = 25
if num_lines > max_lines:
raise TypeError("Too many lines of mypy : {0}, max {1}".format(num_lines, max_lines))
@task()
def pin_dependencies():
with safe_cd(SRC):
execute(*("{0} pipenv_to_requirements".format(PIPENV).strip().split(" ")))
@task()
def jiggle_version():
command = "{0} jiggle_version --project={1} --source={2}".format(PIPENV, PROJECT_NAME, "").strip()
execute(*(command.split(" ")))
@task()
def check_setup_py():
with safe_cd(SRC):
if IS_TRAVIS:
execute(PYTHON, *("setup.py check -r -s".split(" ")))
else:
execute(*("{0} {1} setup.py check -r -s".format(PIPENV, PYTHON).strip().split(" ")))
@task()
@skip_if_no_change("vulture", expect_files="dead_code.txt")
def dead_code():
with safe_cd(SRC):
if IS_TRAVIS:
command = "{0} vulture {1}".format(PYTHON, PROJECT_NAME).strip().split()
else:
command = "{0} vulture {1}".format(PIPENV, PROJECT_NAME).strip().split()
output_file_name = "dead_code.txt"
with open(output_file_name, "w") as outfile:
env = config_pythonpath()
subprocess.call(command, stdout=outfile, env=env)
cutoff = 20
num_lines = sum(1 for line in open(output_file_name) if line)
if num_lines > cutoff:
print("Too many lines of dead code : {0}, max {1}".format(num_lines, cutoff))
exit(-1)
@task(formatting, mypy, detect_secrets, git_secrets,dead_code, nose_tests, coverage, compile_py, lint,
compile_mark_down, check_setup_py, pin_dependencies, jiggle_version)
@skip_if_no_change("package")
def package():
with safe_cd(SRC):
for folder in ["build", "dist", PROJECT_NAME + ".egg-info"]:
if os.path.exists(folder):
os.rmdir(folder)
with safe_cd(SRC):
execute(PYTHON, "setup.py", "sdist", "--formats=gztar,zip")
@task(package)
def gemfury():
cp = subprocess.run(("fury login --as={0}".format(GEM_FURY).split(" ")),
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
shell=False, check=True)
print(cp.stdout)
about = {}
with open(os.path.join(SRC, PROJECT_NAME, "__version__.py")) as f:
exec(f.read(), about)
version = Version(about["__version__"])
print("Have version : " + str(version))
print("Preparing to upload")
if version not in get_versions():
for kind in ["gz", "whl"]:
try:
files = glob.glob("{0}dist/*.{1}".format(SRC.replace(".", ""), kind))
for file_name in files:
cp = subprocess.run(("fury push {0} --as={1}".format(file_name, GEM_FURY).split(" ")),
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
shell=False, check=True)
print("result of fury push")
for stream in [cp.stdout, cp.stderr]:
if stream:
for line in stream.decode().split("\n"):
print(line)
except subprocess.CalledProcessError as cpe:
print("result of fury push- got error")
for stream in [cp.stdout, cp.stderr]:
if stream:
for line in stream.decode().split("\n"):
print(line)
print(cpe)
raise
@task(mypy, detect_secrets, git_secrets, check_setup_py, compile_py, dead_code)
@skip_if_no_change("pre_commit_hook")
def pre_commit_hook():
# Don't format or update version
# Run checks that are likely to have FATAL errors, not just sloppy coding.
pass
# Don't break the build, but don't change source tree either.
@task(mypy, detect_secrets, git_secrets, nose_tests, coverage, check_setup_py, compile_py, dead_code)
@skip_if_no_change("pre_push_hook")
def pre_push_hook():
# Don't format or update version
# Run checks that are likely to have FATAL errors, not just sloppy coding.
pass
def do_check_manifest(output_file_name, env):
if IS_TRAVIS:
command = "check-manifest".format(PYTHON).strip().split()
else:
command = "{0} check-manifest".format(PIPENV).strip().split()
with open(output_file_name, "w") as outfile:
subprocess.call(command, stdout=outfile, env=env)
@task()
@skip_if_no_change("check_manifest", "manifest_errors.txt")
def check_manifest():
env = config_pythonpath()
output_file_name = "manifest_errors.txt"
do_check_manifest(output_file_name, env)
with open(output_file_name) as outfile_reader:
text = outfile_reader.read()
print(text)
if not os.path.isfile("MANIFEST.in") and "no MANIFEST.in found" in text:
command = "{0} check-manifest -c".format(PIPENV).strip().split()
subprocess.call(command, env=env)
# print("Had to create MANIFEST.in, please review and redo")
do_check_manifest(output_file_name, env)
else:
pass
# print("found it")
cutoff = 20
num_lines = sum(1 for line in open(output_file_name) if line)
if num_lines > cutoff:
print("Too many lines of manifest problems : {0}, max {1}".format(num_lines, cutoff))
exit(-1)
@task()
def echo(*args, **kwargs):
print(args)
print(kwargs)
# Default task (if specified) is run when no task is specified in the command line
# make sure you define the variable __DEFAULT__ after the task is defined
# A good convention is to define it at the end of the module
# __DEFAULT__ is an optional member
__DEFAULT__ = echo | true | true |
f7378301a74dc9cd9e025b71204a6e6ed1b8e4b4 | 4,520 | py | Python | pypayd/interfaces/blockr.py | ser/pypayd-ng | 63d0e09ca5e966177874e6e2e82f6036898a56b0 | [
"MIT"
] | null | null | null | pypayd/interfaces/blockr.py | ser/pypayd-ng | 63d0e09ca5e966177874e6e2e82f6036898a56b0 | [
"MIT"
] | null | null | null | pypayd/interfaces/blockr.py | ser/pypayd-ng | 63d0e09ca5e966177874e6e2e82f6036898a56b0 | [
"MIT"
] | null | null | null | """
blockr.io
"""
import logging
import hashlib
from hashlib import sha256
import requests
from .. import config
from binascii import hexlify, unhexlify
def getUrl(request_string):
return requests.get(request_string).json()
def setHost():
config.BLOCKCHAIN_CONNECT = ('http://tbtc.blockr.io' if config.TESTNET else 'http://btc.blockr.io')
#And fix this
def check():
return getInfo()
def getInfo():
result = getUrl(config.BLOCKCHAIN_CONNECT + '/api/v1/coin/info', )
if 'status' in result and result['status'] == 'success':
return {
"info": {
"blocks": result['data']['last_block']['nb'],
"difficulty": result['data']['last_block']['difficulty']
}
}
return result
def getUtxo(address):
result = getUrl(config.BLOCKCHAIN_CONNECT + '/api/v1/address/unspent/{}/'.format(address))
if 'status' in result and result['status'] == 'success':
utxo = []
for txo in result['data']['unspent']:
newtxo = {
'address': address,
'txid': txo['tx'],
'vout': txo['n'],
'ts': 0,
'scriptPubKey': txo['script'],
'amount': float(txo['amount']),
'confirmations': txo['confirmations'],
'confirmationsFromCache': False
}
utxo.append(newtxo)
return utxo
return None
def getAddressInfo(address):
infos = getUrl(config.BLOCKCHAIN_CONNECT + '/api/v1/address/info/{}'.format(address), )
if 'status' in infos and infos['status'] == 'success':
txs = getUrl(config.BLOCKCHAIN_CONNECT + '/api/v1/address/txs/{}'.format(address), )
if 'status' in txs and txs['status'] == 'success':
transactions = []
for tx in txs['data']['txs']:
transactions.append(tx['tx'])
return {
'addrStr': address,
'balance': infos['data']['balance'],
'balanceSat': infos['data']['balance'] * config.UNIT,
'totalReceived': infos['data']['totalreceived'],
'totalReceivedSat': infos['data']['totalreceived'] * config.UNIT,
'unconfirmedBalance': 0,
'unconfirmedBalanceSat': 0,
'unconfirmedTxApperances': 0,
'txApperances': txs['data']['nb_txs'],
'transactions': transactions
}
return None
def getTxInfo(tx_hash):
tx = getUrl(config.BLOCKCHAIN_CONNECT + '/api/v1/tx/raw/{}'.format(tx_hash))
if tx.get('status') == 'success':
valueOut = 0
for vout in tx['data']['tx']['vout']:
valueOut += vout['value']
return {
'txid': tx_hash,
'version': tx['data']['tx']['version'],
'locktime': tx['data']['tx']['locktime'],
'blockhash': tx['data']['tx'].get('blockhash', None),
'confirmations': tx['data']['tx'].get('confirmations', None),
'time': tx['data']['tx'].get('time', None),
'blocktime': tx['data']['tx'].get('blocktime', None),
'valueOut': valueOut,
'vin': tx['data']['tx']['vin'],
'vout': tx['data']['tx']['vout']
}
return None
def sourceAddressesFromTX(tx_full):
'''Return source (outbound) addresses for a bitcoin tx'''
return [addressForPubKey(i['scriptSig']['asm'].split(" ")[1]) for i in tx_full['vin']]
#This can be replaced with the pycoin function
_b58chars = '123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz'
def addressForPubKey(pubkey_hex, testnet=None):
if testnet is None:
testnet = config.TESTNET
ripehash = hashlib.new('ripemd160')
step1 = unhexlify(pubkey_hex)
step2 = sha256(step1).digest()
ripehash.update(step2)
if testnet:
step4 = b'\x6F' + ripehash.digest()
else:
step4 = b'\x00' + ripehash.digest()
step5 = sha256(step4).digest()
step6 = sha256(step5).digest()
chksum = step6[:4]
address = step4 + chksum
addr_58 = encodeBase58(address)
return addr_58
def encodeBase58(v):
long_value = int.from_bytes(v, 'big')
result = ''
while long_value >= 58:
div, mod = divmod(long_value, 58)
result = _b58chars[mod] + result
long_value = div
result = _b58chars[long_value] + result
nPad = 0
for c in v:
if c == ord(b'\0'): nPad += 1
else: break
return (_b58chars[0]*nPad) + result
| 34.503817 | 103 | 0.562389 | import logging
import hashlib
from hashlib import sha256
import requests
from .. import config
from binascii import hexlify, unhexlify
def getUrl(request_string):
return requests.get(request_string).json()
def setHost():
config.BLOCKCHAIN_CONNECT = ('http://tbtc.blockr.io' if config.TESTNET else 'http://btc.blockr.io')
def check():
return getInfo()
def getInfo():
result = getUrl(config.BLOCKCHAIN_CONNECT + '/api/v1/coin/info', )
if 'status' in result and result['status'] == 'success':
return {
"info": {
"blocks": result['data']['last_block']['nb'],
"difficulty": result['data']['last_block']['difficulty']
}
}
return result
def getUtxo(address):
result = getUrl(config.BLOCKCHAIN_CONNECT + '/api/v1/address/unspent/{}/'.format(address))
if 'status' in result and result['status'] == 'success':
utxo = []
for txo in result['data']['unspent']:
newtxo = {
'address': address,
'txid': txo['tx'],
'vout': txo['n'],
'ts': 0,
'scriptPubKey': txo['script'],
'amount': float(txo['amount']),
'confirmations': txo['confirmations'],
'confirmationsFromCache': False
}
utxo.append(newtxo)
return utxo
return None
def getAddressInfo(address):
infos = getUrl(config.BLOCKCHAIN_CONNECT + '/api/v1/address/info/{}'.format(address), )
if 'status' in infos and infos['status'] == 'success':
txs = getUrl(config.BLOCKCHAIN_CONNECT + '/api/v1/address/txs/{}'.format(address), )
if 'status' in txs and txs['status'] == 'success':
transactions = []
for tx in txs['data']['txs']:
transactions.append(tx['tx'])
return {
'addrStr': address,
'balance': infos['data']['balance'],
'balanceSat': infos['data']['balance'] * config.UNIT,
'totalReceived': infos['data']['totalreceived'],
'totalReceivedSat': infos['data']['totalreceived'] * config.UNIT,
'unconfirmedBalance': 0,
'unconfirmedBalanceSat': 0,
'unconfirmedTxApperances': 0,
'txApperances': txs['data']['nb_txs'],
'transactions': transactions
}
return None
def getTxInfo(tx_hash):
tx = getUrl(config.BLOCKCHAIN_CONNECT + '/api/v1/tx/raw/{}'.format(tx_hash))
if tx.get('status') == 'success':
valueOut = 0
for vout in tx['data']['tx']['vout']:
valueOut += vout['value']
return {
'txid': tx_hash,
'version': tx['data']['tx']['version'],
'locktime': tx['data']['tx']['locktime'],
'blockhash': tx['data']['tx'].get('blockhash', None),
'confirmations': tx['data']['tx'].get('confirmations', None),
'time': tx['data']['tx'].get('time', None),
'blocktime': tx['data']['tx'].get('blocktime', None),
'valueOut': valueOut,
'vin': tx['data']['tx']['vin'],
'vout': tx['data']['tx']['vout']
}
return None
def sourceAddressesFromTX(tx_full):
return [addressForPubKey(i['scriptSig']['asm'].split(" ")[1]) for i in tx_full['vin']]
_b58chars = '123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz'
def addressForPubKey(pubkey_hex, testnet=None):
if testnet is None:
testnet = config.TESTNET
ripehash = hashlib.new('ripemd160')
step1 = unhexlify(pubkey_hex)
step2 = sha256(step1).digest()
ripehash.update(step2)
if testnet:
step4 = b'\x6F' + ripehash.digest()
else:
step4 = b'\x00' + ripehash.digest()
step5 = sha256(step4).digest()
step6 = sha256(step5).digest()
chksum = step6[:4]
address = step4 + chksum
addr_58 = encodeBase58(address)
return addr_58
def encodeBase58(v):
long_value = int.from_bytes(v, 'big')
result = ''
while long_value >= 58:
div, mod = divmod(long_value, 58)
result = _b58chars[mod] + result
long_value = div
result = _b58chars[long_value] + result
nPad = 0
for c in v:
if c == ord(b'\0'): nPad += 1
else: break
return (_b58chars[0]*nPad) + result
| true | true |
f73783f43a83454c6e8f9343bc28de26e3306193 | 9,158 | py | Python | test/widgets/test_mpris2widget.py | karthink/qtile | 83bd706dbf2d3abeda990d728c74310ea1fbea92 | [
"MIT"
] | null | null | null | test/widgets/test_mpris2widget.py | karthink/qtile | 83bd706dbf2d3abeda990d728c74310ea1fbea92 | [
"MIT"
] | null | null | null | test/widgets/test_mpris2widget.py | karthink/qtile | 83bd706dbf2d3abeda990d728c74310ea1fbea92 | [
"MIT"
] | null | null | null | # Copyright (c) 2021 elParaguayo
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# Widget specific tests
import sys
from importlib import reload
from types import ModuleType
import pytest
from libqtile.bar import Bar
def no_op(*args, **kwargs):
pass
async def mock_signal_receiver(*args, **kwargs):
return True
def fake_timer(interval, func, *args, **kwargs):
class TimerObj:
def cancel(self):
pass
@property
def _scheduled(self):
return False
return TimerObj()
class MockConstants(ModuleType):
class MessageType:
SIGNAL = 1
class MockMessage:
def __init__(self, is_signal=True, body=None):
self.message_type = 1 if is_signal else 0
self.body = body
# dbus_next message data is stored in variants. The widget extracts the
# information via the `value` attribute so we just need to mock that here.
class obj: # noqa: N801
def __init__(self, value):
self.value = value
# Creates a mock message body containing both metadata and playback status
def metadata_and_status(status):
return MockMessage(body=(
"",
{
'Metadata': obj(
{
'mpris:trackid': obj(1),
'xesam:url': obj("/path/to/rickroll.mp3"),
'xesam:title': obj("Never Gonna Give You Up"),
'xesam:artist': obj(["Rick Astley"]),
'xesam:album': obj("Whenever You Need Somebody"),
'mpris:length': obj(200000000)
}
),
'PlaybackStatus': obj(status)
},
[])
)
# Creates a mock message body containing just playback status
def playback_status(status, signal=True):
return MockMessage(is_signal=signal, body=(
"",
{
'PlaybackStatus': obj(status)
},
[])
)
METADATA_PLAYING = metadata_and_status("Playing")
METADATA_PAUSED = metadata_and_status("Paused")
STATUS_PLAYING = playback_status("Playing")
STATUS_PAUSED = playback_status("Paused")
STATUS_STOPPED = playback_status("Stopped")
NON_SIGNAL = playback_status("Paused", False)
@pytest.fixture
def patched_module(monkeypatch):
# Remove dbus_next.constants entry from modules. If it's not there, don't raise error
monkeypatch.delitem(sys.modules, "dbus_next.constants", raising=False)
monkeypatch.setitem(sys.modules, "dbus_next.constants", MockConstants("dbus_next.constants"))
from libqtile.widget import mpris2widget
# Need to force reload of the module to ensure patched module is loaded
# This may only be needed if dbus_next is installed on testing system so helpful for
# local tests.
reload(mpris2widget)
monkeypatch.setattr("libqtile.widget.mpris2widget.add_signal_receiver", mock_signal_receiver)
return mpris2widget
def test_mpris2_signal_handling(fake_qtile, patched_module, fake_window):
mp = patched_module.Mpris2(scroll_chars=20, scroll_wait_intervals=5)
fakebar = Bar([mp], 24)
fakebar.window = fake_window
fakebar.width = 10
fakebar.height = 10
fakebar.draw = no_op
mp.timeout_add = fake_timer
mp._configure(fake_qtile, fakebar)
assert mp.displaytext == ""
# No text will be displayed if widget is not configured
mp.message(METADATA_PLAYING)
assert mp.displaytext == ""
# Set configured flag, create a message with the metadata and playback status
mp.configured = True
mp.message(METADATA_PLAYING)
assert mp.displaytext == "Never Gonna Give You Up - Whenever You Need Somebody - Rick Astley"
assert mp.text == ""
# Text is displayed after first run of scroll_text
mp.scroll_text()
assert mp.text == "Never Gonna Give You Up - Whenever You Need Somebody - Rick Astley"[:mp.scroll_chars]
# Text is scrolled 1 character after `scroll_wait_intervals`runs of scroll_text
for _ in range(mp.scroll_wait_intervals):
mp.scroll_text()
assert mp.text == "Never Gonna Give You Up - Whenever You Need Somebody - Rick Astley"[1:mp.scroll_chars + 1]
# Non-signal type message will be ignored
mp.message(NON_SIGNAL)
assert mp.text == "Never Gonna Give You Up - Whenever You Need Somebody - Rick Astley"[1:mp.scroll_chars + 1]
# If widget receives "paused" signal with no metadata then default message is "Paused"
mp.message(STATUS_PAUSED)
assert mp.displaytext == "Paused"
# If widget receives "stopped" signal with no metadata then widget is blank
mp.message(STATUS_STOPPED)
assert mp.displaytext == ""
# Reset to playing + metadata
mp.message(METADATA_PLAYING)
mp.scroll_text()
assert mp.text == "Never Gonna Give You Up - Whenever You Need Somebody - Rick Astley"[:mp.scroll_chars]
# If widget receives "paused" signal with metadata then message is "Paused: {metadata}"
mp.message(METADATA_PAUSED)
mp.scroll_text()
assert mp.text == "Paused: Never Gonna Give You Up - Whenever You Need Somebody - Rick Astley"[:mp.scroll_chars]
# If widget now receives "playing" signal with no metadata, "paused" word is removed
mp.message(STATUS_PLAYING)
mp.scroll_text()
assert mp.text == "Never Gonna Give You Up - Whenever You Need Somebody - Rick Astley"[:mp.scroll_chars]
info = mp.cmd_info()
assert info["displaytext"] == "Never Gonna Give You Up - Whenever You Need Somebody - Rick Astley"
assert info["isplaying"]
def test_mpris2_custom_stop_text(fake_qtile, patched_module, fake_window):
mp = patched_module.Mpris2(stop_pause_text="Test Paused")
fakebar = Bar([mp], 24)
fakebar.window = fake_window
fakebar.width = 10
fakebar.height = 10
fakebar.draw = no_op
mp.timeout_add = fake_timer
mp._configure(fake_qtile, fakebar)
mp.configured = True
mp.message(METADATA_PLAYING)
assert mp.displaytext == "Never Gonna Give You Up - Whenever You Need Somebody - Rick Astley"
assert mp.text == ""
mp.scroll_text()
# Check our custom paused wording is shown
mp.message(STATUS_PAUSED)
assert mp.displaytext == "Test Paused"
def test_mpris2_no_metadata(fake_qtile, patched_module, fake_window):
mp = patched_module.Mpris2(stop_pause_text="Test Paused")
fakebar = Bar([mp], 24)
fakebar.window = fake_window
fakebar.width = 10
fakebar.height = 10
fakebar.draw = no_op
mp.timeout_add = fake_timer
mp._configure(fake_qtile, fakebar)
mp.configured = True
mp.message(STATUS_PLAYING)
assert mp.displaytext == "No metadata for current track"
def test_mpris2_no_scroll(fake_qtile, patched_module, fake_window):
# If no scrolling, then the update function creates the text to display
# and draws the bar.
mp = patched_module.Mpris2(scroll_chars=None)
fakebar = Bar([mp], 24)
fakebar.window = fake_window
fakebar.width = 10
fakebar.height = 10
fakebar.draw = no_op
mp.timeout_add = fake_timer
mp._configure(fake_qtile, fakebar)
mp.configured = True
mp.message(METADATA_PLAYING)
assert mp.text == "Never Gonna Give You Up - Whenever You Need Somebody - Rick Astley"
mp.message(METADATA_PAUSED)
assert mp.text == "Paused: Never Gonna Give You Up - Whenever You Need Somebody - Rick Astley"
def test_mpris2_clear_after_scroll(fake_qtile, patched_module, fake_window):
mp = patched_module.Mpris2(scroll_chars=60, scroll_wait_intervals=2)
fakebar = Bar([mp], 24)
fakebar.window = fake_window
fakebar.width = 10
fakebar.height = 10
fakebar.draw = no_op
mp.timeout_add = fake_timer
mp._configure(fake_qtile, fakebar)
mp.configured = True
mp.message(METADATA_PLAYING)
# After 10 loops, text should be cleared as scroll reaches end of text.
# 2 loops before starting scroll
# 6 loops to loop over remaining text in dispay
# 1 additional loop at end of text (so total 2 loops on that display)
# 1 loop to clear.
for i in range(10):
mp.scroll_text()
assert mp.text == ""
# TO DO: untested lines
# 85-86: Logging when unable to subscribe to dbus signal. Needs `caplog`
| 34.171642 | 116 | 0.698624 |
import sys
from importlib import reload
from types import ModuleType
import pytest
from libqtile.bar import Bar
def no_op(*args, **kwargs):
pass
async def mock_signal_receiver(*args, **kwargs):
return True
def fake_timer(interval, func, *args, **kwargs):
class TimerObj:
def cancel(self):
pass
@property
def _scheduled(self):
return False
return TimerObj()
class MockConstants(ModuleType):
class MessageType:
SIGNAL = 1
class MockMessage:
def __init__(self, is_signal=True, body=None):
self.message_type = 1 if is_signal else 0
self.body = body
class obj:
def __init__(self, value):
self.value = value
def metadata_and_status(status):
return MockMessage(body=(
"",
{
'Metadata': obj(
{
'mpris:trackid': obj(1),
'xesam:url': obj("/path/to/rickroll.mp3"),
'xesam:title': obj("Never Gonna Give You Up"),
'xesam:artist': obj(["Rick Astley"]),
'xesam:album': obj("Whenever You Need Somebody"),
'mpris:length': obj(200000000)
}
),
'PlaybackStatus': obj(status)
},
[])
)
def playback_status(status, signal=True):
return MockMessage(is_signal=signal, body=(
"",
{
'PlaybackStatus': obj(status)
},
[])
)
METADATA_PLAYING = metadata_and_status("Playing")
METADATA_PAUSED = metadata_and_status("Paused")
STATUS_PLAYING = playback_status("Playing")
STATUS_PAUSED = playback_status("Paused")
STATUS_STOPPED = playback_status("Stopped")
NON_SIGNAL = playback_status("Paused", False)
@pytest.fixture
def patched_module(monkeypatch):
monkeypatch.delitem(sys.modules, "dbus_next.constants", raising=False)
monkeypatch.setitem(sys.modules, "dbus_next.constants", MockConstants("dbus_next.constants"))
from libqtile.widget import mpris2widget
reload(mpris2widget)
monkeypatch.setattr("libqtile.widget.mpris2widget.add_signal_receiver", mock_signal_receiver)
return mpris2widget
def test_mpris2_signal_handling(fake_qtile, patched_module, fake_window):
mp = patched_module.Mpris2(scroll_chars=20, scroll_wait_intervals=5)
fakebar = Bar([mp], 24)
fakebar.window = fake_window
fakebar.width = 10
fakebar.height = 10
fakebar.draw = no_op
mp.timeout_add = fake_timer
mp._configure(fake_qtile, fakebar)
assert mp.displaytext == ""
mp.message(METADATA_PLAYING)
assert mp.displaytext == ""
mp.configured = True
mp.message(METADATA_PLAYING)
assert mp.displaytext == "Never Gonna Give You Up - Whenever You Need Somebody - Rick Astley"
assert mp.text == ""
mp.scroll_text()
assert mp.text == "Never Gonna Give You Up - Whenever You Need Somebody - Rick Astley"[:mp.scroll_chars]
for _ in range(mp.scroll_wait_intervals):
mp.scroll_text()
assert mp.text == "Never Gonna Give You Up - Whenever You Need Somebody - Rick Astley"[1:mp.scroll_chars + 1]
mp.message(NON_SIGNAL)
assert mp.text == "Never Gonna Give You Up - Whenever You Need Somebody - Rick Astley"[1:mp.scroll_chars + 1]
mp.message(STATUS_PAUSED)
assert mp.displaytext == "Paused"
mp.message(STATUS_STOPPED)
assert mp.displaytext == ""
mp.message(METADATA_PLAYING)
mp.scroll_text()
assert mp.text == "Never Gonna Give You Up - Whenever You Need Somebody - Rick Astley"[:mp.scroll_chars]
mp.message(METADATA_PAUSED)
mp.scroll_text()
assert mp.text == "Paused: Never Gonna Give You Up - Whenever You Need Somebody - Rick Astley"[:mp.scroll_chars]
mp.message(STATUS_PLAYING)
mp.scroll_text()
assert mp.text == "Never Gonna Give You Up - Whenever You Need Somebody - Rick Astley"[:mp.scroll_chars]
info = mp.cmd_info()
assert info["displaytext"] == "Never Gonna Give You Up - Whenever You Need Somebody - Rick Astley"
assert info["isplaying"]
def test_mpris2_custom_stop_text(fake_qtile, patched_module, fake_window):
mp = patched_module.Mpris2(stop_pause_text="Test Paused")
fakebar = Bar([mp], 24)
fakebar.window = fake_window
fakebar.width = 10
fakebar.height = 10
fakebar.draw = no_op
mp.timeout_add = fake_timer
mp._configure(fake_qtile, fakebar)
mp.configured = True
mp.message(METADATA_PLAYING)
assert mp.displaytext == "Never Gonna Give You Up - Whenever You Need Somebody - Rick Astley"
assert mp.text == ""
mp.scroll_text()
mp.message(STATUS_PAUSED)
assert mp.displaytext == "Test Paused"
def test_mpris2_no_metadata(fake_qtile, patched_module, fake_window):
mp = patched_module.Mpris2(stop_pause_text="Test Paused")
fakebar = Bar([mp], 24)
fakebar.window = fake_window
fakebar.width = 10
fakebar.height = 10
fakebar.draw = no_op
mp.timeout_add = fake_timer
mp._configure(fake_qtile, fakebar)
mp.configured = True
mp.message(STATUS_PLAYING)
assert mp.displaytext == "No metadata for current track"
def test_mpris2_no_scroll(fake_qtile, patched_module, fake_window):
mp = patched_module.Mpris2(scroll_chars=None)
fakebar = Bar([mp], 24)
fakebar.window = fake_window
fakebar.width = 10
fakebar.height = 10
fakebar.draw = no_op
mp.timeout_add = fake_timer
mp._configure(fake_qtile, fakebar)
mp.configured = True
mp.message(METADATA_PLAYING)
assert mp.text == "Never Gonna Give You Up - Whenever You Need Somebody - Rick Astley"
mp.message(METADATA_PAUSED)
assert mp.text == "Paused: Never Gonna Give You Up - Whenever You Need Somebody - Rick Astley"
def test_mpris2_clear_after_scroll(fake_qtile, patched_module, fake_window):
mp = patched_module.Mpris2(scroll_chars=60, scroll_wait_intervals=2)
fakebar = Bar([mp], 24)
fakebar.window = fake_window
fakebar.width = 10
fakebar.height = 10
fakebar.draw = no_op
mp.timeout_add = fake_timer
mp._configure(fake_qtile, fakebar)
mp.configured = True
mp.message(METADATA_PLAYING)
for i in range(10):
mp.scroll_text()
assert mp.text == ""
| true | true |
f737854a198f1054eb409018d023611d116717a7 | 19,025 | py | Python | django/contrib/gis/tests/distapp/tests.py | hellysmile/django | f915d39afe3565ac2355de57c11843530b96f3bc | [
"BSD-3-Clause"
] | null | null | null | django/contrib/gis/tests/distapp/tests.py | hellysmile/django | f915d39afe3565ac2355de57c11843530b96f3bc | [
"BSD-3-Clause"
] | null | null | null | django/contrib/gis/tests/distapp/tests.py | hellysmile/django | f915d39afe3565ac2355de57c11843530b96f3bc | [
"BSD-3-Clause"
] | null | null | null | from __future__ import unicode_literals
from unittest import skipUnless
from django.db import connection
from django.db.models import Q
from django.contrib.gis.geos import HAS_GEOS
from django.contrib.gis.measure import D # alias for Distance
from django.contrib.gis.tests.utils import (
HAS_SPATIAL_DB, mysql, oracle, postgis, spatialite, no_oracle, no_spatialite
)
from django.test import TestCase
if HAS_GEOS and HAS_SPATIAL_DB:
from django.contrib.gis.geos import GEOSGeometry, LineString
from .models import (AustraliaCity, Interstate, SouthTexasInterstate,
SouthTexasCity, SouthTexasCityFt, CensusZipcode, SouthTexasZipcode)
@skipUnless(HAS_GEOS and HAS_SPATIAL_DB and not mysql,
"Geos and spatial db (not mysql) are required.")
class DistanceTest(TestCase):
fixtures = ['initial']
if HAS_GEOS and HAS_SPATIAL_DB:
# A point we are testing distances with -- using a WGS84
# coordinate that'll be implicitly transformed to that to
# the coordinate system of the field, EPSG:32140 (Texas South Central
# w/units in meters)
stx_pnt = GEOSGeometry('POINT (-95.370401017314293 29.704867409475465)', 4326)
# Another one for Australia
au_pnt = GEOSGeometry('POINT (150.791 -34.4919)', 4326)
def get_names(self, qs):
cities = [c.name for c in qs]
cities.sort()
return cities
def test_init(self):
"""
Test initialization of distance models.
"""
self.assertEqual(9, SouthTexasCity.objects.count())
self.assertEqual(9, SouthTexasCityFt.objects.count())
self.assertEqual(11, AustraliaCity.objects.count())
self.assertEqual(4, SouthTexasZipcode.objects.count())
self.assertEqual(4, CensusZipcode.objects.count())
self.assertEqual(1, Interstate.objects.count())
self.assertEqual(1, SouthTexasInterstate.objects.count())
@no_spatialite
def test_dwithin(self):
"""
Test the `dwithin` lookup type.
"""
# Distances -- all should be equal (except for the
# degree/meter pair in au_cities, that's somewhat
# approximate).
tx_dists = [(7000, 22965.83), D(km=7), D(mi=4.349)]
au_dists = [(0.5, 32000), D(km=32), D(mi=19.884)]
# Expected cities for Australia and Texas.
tx_cities = ['Downtown Houston', 'Southside Place']
au_cities = ['Mittagong', 'Shellharbour', 'Thirroul', 'Wollongong']
# Performing distance queries on two projected coordinate systems one
# with units in meters and the other in units of U.S. survey feet.
for dist in tx_dists:
if isinstance(dist, tuple):
dist1, dist2 = dist
else:
dist1 = dist2 = dist
qs1 = SouthTexasCity.objects.filter(point__dwithin=(self.stx_pnt, dist1))
qs2 = SouthTexasCityFt.objects.filter(point__dwithin=(self.stx_pnt, dist2))
for qs in qs1, qs2:
self.assertEqual(tx_cities, self.get_names(qs))
# Now performing the `dwithin` queries on a geodetic coordinate system.
for dist in au_dists:
if isinstance(dist, D) and not oracle:
type_error = True
else:
type_error = False
if isinstance(dist, tuple):
if oracle:
dist = dist[1]
else:
dist = dist[0]
# Creating the query set.
qs = AustraliaCity.objects.order_by('name')
if type_error:
# A ValueError should be raised on PostGIS when trying to pass
# Distance objects into a DWithin query using a geodetic field.
self.assertRaises(ValueError, AustraliaCity.objects.filter(point__dwithin=(self.au_pnt, dist)).count)
else:
self.assertListEqual(au_cities, self.get_names(qs.filter(point__dwithin=(self.au_pnt, dist))))
def test_distance_projected(self):
"""
Test the `distance` GeoQuerySet method on projected coordinate systems.
"""
# The point for La Grange, TX
lagrange = GEOSGeometry('POINT(-96.876369 29.905320)', 4326)
# Reference distances in feet and in meters. Got these values from
# using the provided raw SQL statements.
# SELECT ST_Distance(point, ST_Transform(ST_GeomFromText('POINT(-96.876369 29.905320)', 4326), 32140)) FROM distapp_southtexascity;
m_distances = [147075.069813, 139630.198056, 140888.552826,
138809.684197, 158309.246259, 212183.594374,
70870.188967, 165337.758878, 139196.085105]
# SELECT ST_Distance(point, ST_Transform(ST_GeomFromText('POINT(-96.876369 29.905320)', 4326), 2278)) FROM distapp_southtexascityft;
# Oracle 11 thinks this is not a projected coordinate system, so it's
# not tested.
ft_distances = [482528.79154625, 458103.408123001, 462231.860397575,
455411.438904354, 519386.252102563, 696139.009211594,
232513.278304279, 542445.630586414, 456679.155883207]
# Testing using different variations of parameters and using models
# with different projected coordinate systems.
dist1 = SouthTexasCity.objects.distance(lagrange, field_name='point')
dist2 = SouthTexasCity.objects.distance(lagrange) # Using GEOSGeometry parameter
if spatialite or oracle:
dist_qs = [dist1, dist2]
else:
dist3 = SouthTexasCityFt.objects.distance(lagrange.ewkt) # Using EWKT string parameter.
dist4 = SouthTexasCityFt.objects.distance(lagrange)
dist_qs = [dist1, dist2, dist3, dist4]
# Original query done on PostGIS, have to adjust AlmostEqual tolerance
# for Oracle.
tol = 2 if oracle else 5
# Ensuring expected distances are returned for each distance queryset.
for qs in dist_qs:
for i, c in enumerate(qs):
self.assertAlmostEqual(m_distances[i], c.distance.m, tol)
self.assertAlmostEqual(ft_distances[i], c.distance.survey_ft, tol)
@no_spatialite
def test_distance_geodetic(self):
"""
Test the `distance` GeoQuerySet method on geodetic coordinate systems.
"""
tol = 2 if oracle else 5
# Testing geodetic distance calculation with a non-point geometry
# (a LineString of Wollongong and Shellharbour coords).
ls = LineString(((150.902, -34.4245), (150.87, -34.5789)))
if oracle or postgis:
# Reference query:
# SELECT ST_distance_sphere(point, ST_GeomFromText('LINESTRING(150.9020 -34.4245,150.8700 -34.5789)', 4326)) FROM distapp_australiacity ORDER BY name;
distances = [1120954.92533513, 140575.720018241, 640396.662906304,
60580.9693849269, 972807.955955075, 568451.8357838,
40435.4335201384, 0, 68272.3896586844, 12375.0643697706, 0]
qs = AustraliaCity.objects.distance(ls).order_by('name')
for city, distance in zip(qs, distances):
# Testing equivalence to within a meter.
self.assertAlmostEqual(distance, city.distance.m, 0)
# Got the reference distances using the raw SQL statements:
# SELECT ST_distance_spheroid(point, ST_GeomFromText('POINT(151.231341 -33.952685)', 4326), 'SPHEROID["WGS 84",6378137.0,298.257223563]') FROM distapp_australiacity WHERE (NOT (id = 11));
# SELECT ST_distance_sphere(point, ST_GeomFromText('POINT(151.231341 -33.952685)', 4326)) FROM distapp_australiacity WHERE (NOT (id = 11)); st_distance_sphere
if connection.ops.postgis and connection.ops.proj_version_tuple() >= (4, 7, 0):
# PROJ.4 versions 4.7+ have updated datums, and thus different
# distance values.
spheroid_distances = [60504.0628957201, 77023.9489850262, 49154.8867574404,
90847.4358768573, 217402.811919332, 709599.234564757,
640011.483550888, 7772.00667991925, 1047861.78619339,
1165126.55236034]
sphere_distances = [60580.9693849267, 77144.0435286473, 49199.4415344719,
90804.7533823494, 217713.384600405, 709134.127242793,
639828.157159169, 7786.82949717788, 1049204.06569028,
1162623.7238134]
else:
spheroid_distances = [60504.0628825298, 77023.948962654, 49154.8867507115,
90847.435881812, 217402.811862568, 709599.234619957,
640011.483583758, 7772.00667666425, 1047861.7859506,
1165126.55237647]
sphere_distances = [60580.7612632291, 77143.7785056615, 49199.2725132184,
90804.4414289463, 217712.63666124, 709131.691061906,
639825.959074112, 7786.80274606706, 1049200.46122281,
1162619.7297006]
# Testing with spheroid distances first.
hillsdale = AustraliaCity.objects.get(name='Hillsdale')
qs = AustraliaCity.objects.exclude(id=hillsdale.id).distance(hillsdale.point, spheroid=True)
for i, c in enumerate(qs):
self.assertAlmostEqual(spheroid_distances[i], c.distance.m, tol)
if postgis:
# PostGIS uses sphere-only distances by default, testing these as well.
qs = AustraliaCity.objects.exclude(id=hillsdale.id).distance(hillsdale.point)
for i, c in enumerate(qs):
self.assertAlmostEqual(sphere_distances[i], c.distance.m, tol)
@no_oracle # Oracle already handles geographic distance calculation.
def test_distance_transform(self):
"""
Test the `distance` GeoQuerySet method used with `transform` on a geographic field.
"""
# We'll be using a Polygon (created by buffering the centroid
# of 77005 to 100m) -- which aren't allowed in geographic distance
# queries normally, however our field has been transformed to
# a non-geographic system.
z = SouthTexasZipcode.objects.get(name='77005')
# Reference query:
# SELECT ST_Distance(ST_Transform("distapp_censuszipcode"."poly", 32140), ST_GeomFromText('<buffer_wkt>', 32140)) FROM "distapp_censuszipcode";
dists_m = [3553.30384972258, 1243.18391525602, 2186.15439472242]
# Having our buffer in the SRID of the transformation and of the field
# -- should get the same results. The first buffer has no need for
# transformation SQL because it is the same SRID as what was given
# to `transform()`. The second buffer will need to be transformed,
# however.
buf1 = z.poly.centroid.buffer(100)
buf2 = buf1.transform(4269, clone=True)
ref_zips = ['77002', '77025', '77401']
for buf in [buf1, buf2]:
qs = CensusZipcode.objects.exclude(name='77005').transform(32140).distance(buf).order_by('name')
self.assertListEqual(ref_zips, self.get_names(qs))
for i, z in enumerate(qs):
self.assertAlmostEqual(z.distance.m, dists_m[i], 5)
def test_distance_lookups(self):
"""
Test the `distance_lt`, `distance_gt`, `distance_lte`, and `distance_gte` lookup types.
"""
# Retrieving the cities within a 20km 'donut' w/a 7km radius 'hole'
# (thus, Houston and Southside place will be excluded as tested in
# the `test02_dwithin` above).
qs1 = SouthTexasCity.objects.filter(point__distance_gte=(self.stx_pnt, D(km=7))).filter(point__distance_lte=(self.stx_pnt, D(km=20)))
# Can't determine the units on SpatiaLite from PROJ.4 string, and
# Oracle 11 incorrectly thinks it is not projected.
if spatialite or oracle:
dist_qs = (qs1,)
else:
qs2 = SouthTexasCityFt.objects.filter(point__distance_gte=(self.stx_pnt, D(km=7))).filter(point__distance_lte=(self.stx_pnt, D(km=20)))
dist_qs = (qs1, qs2)
for qs in dist_qs:
cities = self.get_names(qs)
self.assertEqual(cities, ['Bellaire', 'Pearland', 'West University Place'])
# Doing a distance query using Polygons instead of a Point.
z = SouthTexasZipcode.objects.get(name='77005')
qs = SouthTexasZipcode.objects.exclude(name='77005').filter(poly__distance_lte=(z.poly, D(m=275)))
self.assertEqual(['77025', '77401'], self.get_names(qs))
# If we add a little more distance 77002 should be included.
qs = SouthTexasZipcode.objects.exclude(name='77005').filter(poly__distance_lte=(z.poly, D(m=300)))
self.assertEqual(['77002', '77025', '77401'], self.get_names(qs))
def test_geodetic_distance_lookups(self):
"""
Test distance lookups on geodetic coordinate systems.
"""
# Line is from Canberra to Sydney. Query is for all other cities within
# a 100km of that line (which should exclude only Hobart & Adelaide).
line = GEOSGeometry('LINESTRING(144.9630 -37.8143,151.2607 -33.8870)', 4326)
dist_qs = AustraliaCity.objects.filter(point__distance_lte=(line, D(km=100)))
if oracle or postgis:
# Oracle and PostGIS can do distance lookups on arbitrary geometries.
self.assertEqual(9, dist_qs.count())
self.assertEqual(['Batemans Bay', 'Canberra', 'Hillsdale',
'Melbourne', 'Mittagong', 'Shellharbour',
'Sydney', 'Thirroul', 'Wollongong'],
self.get_names(dist_qs))
else:
# spatialite only allow geodetic distance queries (utilizing
# ST_Distance_Sphere/ST_Distance_Spheroid) from Points to PointFields
# on geometry columns.
self.assertRaises(ValueError, dist_qs.count)
# Ensured that a ValueError was raised, none of the rest of the test is
# support on this backend, so bail now.
if spatialite:
return
# Too many params (4 in this case) should raise a ValueError.
self.assertRaises(ValueError, len,
AustraliaCity.objects.filter(point__distance_lte=('POINT(5 23)', D(km=100), 'spheroid', '4')))
# Not enough params should raise a ValueError.
self.assertRaises(ValueError, len,
AustraliaCity.objects.filter(point__distance_lte=('POINT(5 23)',)))
# Getting all cities w/in 550 miles of Hobart.
hobart = AustraliaCity.objects.get(name='Hobart')
qs = AustraliaCity.objects.exclude(name='Hobart').filter(point__distance_lte=(hobart.point, D(mi=550)))
cities = self.get_names(qs)
self.assertEqual(cities, ['Batemans Bay', 'Canberra', 'Melbourne'])
# Cities that are either really close or really far from Wollongong --
# and using different units of distance.
wollongong = AustraliaCity.objects.get(name='Wollongong')
d1, d2 = D(yd=19500), D(nm=400) # Yards (~17km) & Nautical miles.
# Normal geodetic distance lookup (uses `distance_sphere` on PostGIS.
gq1 = Q(point__distance_lte=(wollongong.point, d1))
gq2 = Q(point__distance_gte=(wollongong.point, d2))
qs1 = AustraliaCity.objects.exclude(name='Wollongong').filter(gq1 | gq2)
# Geodetic distance lookup but telling GeoDjango to use `distance_spheroid`
# instead (we should get the same results b/c accuracy variance won't matter
# in this test case).
if postgis:
gq3 = Q(point__distance_lte=(wollongong.point, d1, 'spheroid'))
gq4 = Q(point__distance_gte=(wollongong.point, d2, 'spheroid'))
qs2 = AustraliaCity.objects.exclude(name='Wollongong').filter(gq3 | gq4)
querysets = [qs1, qs2]
else:
querysets = [qs1]
for qs in querysets:
cities = self.get_names(qs)
self.assertEqual(cities, ['Adelaide', 'Hobart', 'Shellharbour', 'Thirroul'])
def test_area(self):
"""
Test the `area` GeoQuerySet method.
"""
# Reference queries:
# SELECT ST_Area(poly) FROM distapp_southtexaszipcode;
area_sq_m = [5437908.90234375, 10183031.4389648, 11254471.0073242, 9881708.91772461]
# Tolerance has to be lower for Oracle
tol = 2
for i, z in enumerate(SouthTexasZipcode.objects.order_by('name').area()):
self.assertAlmostEqual(area_sq_m[i], z.area.sq_m, tol)
def test_length(self):
"""
Test the `length` GeoQuerySet method.
"""
# Reference query (should use `length_spheroid`).
# SELECT ST_length_spheroid(ST_GeomFromText('<wkt>', 4326) 'SPHEROID["WGS 84",6378137,298.257223563, AUTHORITY["EPSG","7030"]]');
len_m1 = 473504.769553813
len_m2 = 4617.668
if spatialite:
# Does not support geodetic coordinate systems.
self.assertRaises(ValueError, Interstate.objects.length)
else:
qs = Interstate.objects.length()
tol = 2 if oracle else 3
self.assertAlmostEqual(len_m1, qs[0].length.m, tol)
# Now doing length on a projected coordinate system.
i10 = SouthTexasInterstate.objects.length().get(name='I-10')
self.assertAlmostEqual(len_m2, i10.length.m, 2)
@no_spatialite
def test_perimeter(self):
"""
Test the `perimeter` GeoQuerySet method.
"""
# Reference query:
# SELECT ST_Perimeter(distapp_southtexaszipcode.poly) FROM distapp_southtexaszipcode;
perim_m = [18404.3550889361, 15627.2108551001, 20632.5588368978, 17094.5996143697]
tol = 2 if oracle else 7
for i, z in enumerate(SouthTexasZipcode.objects.order_by('name').perimeter()):
self.assertAlmostEqual(perim_m[i], z.perimeter.m, tol)
# Running on points; should return 0.
for i, c in enumerate(SouthTexasCity.objects.perimeter(model_att='perim')):
self.assertEqual(0, c.perim.m)
def test_measurement_null_fields(self):
"""
Test the measurement GeoQuerySet methods on fields with NULL values.
"""
# Creating SouthTexasZipcode w/NULL value.
SouthTexasZipcode.objects.create(name='78212')
# Performing distance/area queries against the NULL PolygonField,
# and ensuring the result of the operations is None.
htown = SouthTexasCity.objects.get(name='Downtown Houston')
z = SouthTexasZipcode.objects.distance(htown.point).area().get(name='78212')
self.assertIsNone(z.distance)
self.assertIsNone(z.area)
| 49.544271 | 196 | 0.634901 | from __future__ import unicode_literals
from unittest import skipUnless
from django.db import connection
from django.db.models import Q
from django.contrib.gis.geos import HAS_GEOS
from django.contrib.gis.measure import D
from django.contrib.gis.tests.utils import (
HAS_SPATIAL_DB, mysql, oracle, postgis, spatialite, no_oracle, no_spatialite
)
from django.test import TestCase
if HAS_GEOS and HAS_SPATIAL_DB:
from django.contrib.gis.geos import GEOSGeometry, LineString
from .models import (AustraliaCity, Interstate, SouthTexasInterstate,
SouthTexasCity, SouthTexasCityFt, CensusZipcode, SouthTexasZipcode)
@skipUnless(HAS_GEOS and HAS_SPATIAL_DB and not mysql,
"Geos and spatial db (not mysql) are required.")
class DistanceTest(TestCase):
fixtures = ['initial']
if HAS_GEOS and HAS_SPATIAL_DB:
# the coordinate system of the field, EPSG:32140 (Texas South Central
# w/units in meters)
stx_pnt = GEOSGeometry('POINT (-95.370401017314293 29.704867409475465)', 4326)
# Another one for Australia
au_pnt = GEOSGeometry('POINT (150.791 -34.4919)', 4326)
def get_names(self, qs):
cities = [c.name for c in qs]
cities.sort()
return cities
def test_init(self):
self.assertEqual(9, SouthTexasCity.objects.count())
self.assertEqual(9, SouthTexasCityFt.objects.count())
self.assertEqual(11, AustraliaCity.objects.count())
self.assertEqual(4, SouthTexasZipcode.objects.count())
self.assertEqual(4, CensusZipcode.objects.count())
self.assertEqual(1, Interstate.objects.count())
self.assertEqual(1, SouthTexasInterstate.objects.count())
@no_spatialite
def test_dwithin(self):
# Distances -- all should be equal (except for the
# degree/meter pair in au_cities, that's somewhat
tx_dists = [(7000, 22965.83), D(km=7), D(mi=4.349)]
au_dists = [(0.5, 32000), D(km=32), D(mi=19.884)]
tx_cities = ['Downtown Houston', 'Southside Place']
au_cities = ['Mittagong', 'Shellharbour', 'Thirroul', 'Wollongong']
for dist in tx_dists:
if isinstance(dist, tuple):
dist1, dist2 = dist
else:
dist1 = dist2 = dist
qs1 = SouthTexasCity.objects.filter(point__dwithin=(self.stx_pnt, dist1))
qs2 = SouthTexasCityFt.objects.filter(point__dwithin=(self.stx_pnt, dist2))
for qs in qs1, qs2:
self.assertEqual(tx_cities, self.get_names(qs))
for dist in au_dists:
if isinstance(dist, D) and not oracle:
type_error = True
else:
type_error = False
if isinstance(dist, tuple):
if oracle:
dist = dist[1]
else:
dist = dist[0]
qs = AustraliaCity.objects.order_by('name')
if type_error:
self.assertRaises(ValueError, AustraliaCity.objects.filter(point__dwithin=(self.au_pnt, dist)).count)
else:
self.assertListEqual(au_cities, self.get_names(qs.filter(point__dwithin=(self.au_pnt, dist))))
def test_distance_projected(self):
lagrange = GEOSGeometry('POINT(-96.876369 29.905320)', 4326)
m_distances = [147075.069813, 139630.198056, 140888.552826,
138809.684197, 158309.246259, 212183.594374,
70870.188967, 165337.758878, 139196.085105]
# not tested.
ft_distances = [482528.79154625, 458103.408123001, 462231.860397575,
455411.438904354, 519386.252102563, 696139.009211594,
232513.278304279, 542445.630586414, 456679.155883207]
# Testing using different variations of parameters and using models
# with different projected coordinate systems.
dist1 = SouthTexasCity.objects.distance(lagrange, field_name='point')
dist2 = SouthTexasCity.objects.distance(lagrange) # Using GEOSGeometry parameter
if spatialite or oracle:
dist_qs = [dist1, dist2]
else:
dist3 = SouthTexasCityFt.objects.distance(lagrange.ewkt) # Using EWKT string parameter.
dist4 = SouthTexasCityFt.objects.distance(lagrange)
dist_qs = [dist1, dist2, dist3, dist4]
# Original query done on PostGIS, have to adjust AlmostEqual tolerance
# for Oracle.
tol = 2 if oracle else 5
# Ensuring expected distances are returned for each distance queryset.
for qs in dist_qs:
for i, c in enumerate(qs):
self.assertAlmostEqual(m_distances[i], c.distance.m, tol)
self.assertAlmostEqual(ft_distances[i], c.distance.survey_ft, tol)
@no_spatialite
def test_distance_geodetic(self):
tol = 2 if oracle else 5
# Testing geodetic distance calculation with a non-point geometry
# (a LineString of Wollongong and Shellharbour coords).
ls = LineString(((150.902, -34.4245), (150.87, -34.5789)))
if oracle or postgis:
# Reference query:
# SELECT ST_distance_sphere(point, ST_GeomFromText('LINESTRING(150.9020 -34.4245,150.8700 -34.5789)', 4326)) FROM distapp_australiacity ORDER BY name;
distances = [1120954.92533513, 140575.720018241, 640396.662906304,
60580.9693849269, 972807.955955075, 568451.8357838,
40435.4335201384, 0, 68272.3896586844, 12375.0643697706, 0]
qs = AustraliaCity.objects.distance(ls).order_by('name')
for city, distance in zip(qs, distances):
# Testing equivalence to within a meter.
self.assertAlmostEqual(distance, city.distance.m, 0)
# Got the reference distances using the raw SQL statements:
# SELECT ST_distance_spheroid(point, ST_GeomFromText('POINT(151.231341 -33.952685)', 4326), 'SPHEROID["WGS 84",6378137.0,298.257223563]') FROM distapp_australiacity WHERE (NOT (id = 11));
# SELECT ST_distance_sphere(point, ST_GeomFromText('POINT(151.231341 -33.952685)', 4326)) FROM distapp_australiacity WHERE (NOT (id = 11)); st_distance_sphere
if connection.ops.postgis and connection.ops.proj_version_tuple() >= (4, 7, 0):
# PROJ.4 versions 4.7+ have updated datums, and thus different
# distance values.
spheroid_distances = [60504.0628957201, 77023.9489850262, 49154.8867574404,
90847.4358768573, 217402.811919332, 709599.234564757,
640011.483550888, 7772.00667991925, 1047861.78619339,
1165126.55236034]
sphere_distances = [60580.9693849267, 77144.0435286473, 49199.4415344719,
90804.7533823494, 217713.384600405, 709134.127242793,
639828.157159169, 7786.82949717788, 1049204.06569028,
1162623.7238134]
else:
spheroid_distances = [60504.0628825298, 77023.948962654, 49154.8867507115,
90847.435881812, 217402.811862568, 709599.234619957,
640011.483583758, 7772.00667666425, 1047861.7859506,
1165126.55237647]
sphere_distances = [60580.7612632291, 77143.7785056615, 49199.2725132184,
90804.4414289463, 217712.63666124, 709131.691061906,
639825.959074112, 7786.80274606706, 1049200.46122281,
1162619.7297006]
# Testing with spheroid distances first.
hillsdale = AustraliaCity.objects.get(name='Hillsdale')
qs = AustraliaCity.objects.exclude(id=hillsdale.id).distance(hillsdale.point, spheroid=True)
for i, c in enumerate(qs):
self.assertAlmostEqual(spheroid_distances[i], c.distance.m, tol)
if postgis:
# PostGIS uses sphere-only distances by default, testing these as well.
qs = AustraliaCity.objects.exclude(id=hillsdale.id).distance(hillsdale.point)
for i, c in enumerate(qs):
self.assertAlmostEqual(sphere_distances[i], c.distance.m, tol)
@no_oracle # Oracle already handles geographic distance calculation.
def test_distance_transform(self):
# We'll be using a Polygon (created by buffering the centroid
# queries normally, however our field has been transformed to
# a non-geographic system.
z = SouthTexasZipcode.objects.get(name='77005')
# Reference query:
# SELECT ST_Distance(ST_Transform("distapp_censuszipcode"."poly", 32140), ST_GeomFromText('<buffer_wkt>', 32140)) FROM "distapp_censuszipcode";
dists_m = [3553.30384972258, 1243.18391525602, 2186.15439472242]
# Having our buffer in the SRID of the transformation and of the field
# -- should get the same results. The first buffer has no need for
# transformation SQL because it is the same SRID as what was given
# to `transform()`. The second buffer will need to be transformed,
# however.
buf1 = z.poly.centroid.buffer(100)
buf2 = buf1.transform(4269, clone=True)
ref_zips = ['77002', '77025', '77401']
for buf in [buf1, buf2]:
qs = CensusZipcode.objects.exclude(name='77005').transform(32140).distance(buf).order_by('name')
self.assertListEqual(ref_zips, self.get_names(qs))
for i, z in enumerate(qs):
self.assertAlmostEqual(z.distance.m, dists_m[i], 5)
def test_distance_lookups(self):
# Retrieving the cities within a 20km 'donut' w/a 7km radius 'hole'
# (thus, Houston and Southside place will be excluded as tested in
# the `test02_dwithin` above).
qs1 = SouthTexasCity.objects.filter(point__distance_gte=(self.stx_pnt, D(km=7))).filter(point__distance_lte=(self.stx_pnt, D(km=20)))
# Can't determine the units on SpatiaLite from PROJ.4 string, and
if spatialite or oracle:
dist_qs = (qs1,)
else:
qs2 = SouthTexasCityFt.objects.filter(point__distance_gte=(self.stx_pnt, D(km=7))).filter(point__distance_lte=(self.stx_pnt, D(km=20)))
dist_qs = (qs1, qs2)
for qs in dist_qs:
cities = self.get_names(qs)
self.assertEqual(cities, ['Bellaire', 'Pearland', 'West University Place'])
z = SouthTexasZipcode.objects.get(name='77005')
qs = SouthTexasZipcode.objects.exclude(name='77005').filter(poly__distance_lte=(z.poly, D(m=275)))
self.assertEqual(['77025', '77401'], self.get_names(qs))
qs = SouthTexasZipcode.objects.exclude(name='77005').filter(poly__distance_lte=(z.poly, D(m=300)))
self.assertEqual(['77002', '77025', '77401'], self.get_names(qs))
def test_geodetic_distance_lookups(self):
line = GEOSGeometry('LINESTRING(144.9630 -37.8143,151.2607 -33.8870)', 4326)
dist_qs = AustraliaCity.objects.filter(point__distance_lte=(line, D(km=100)))
if oracle or postgis:
self.assertEqual(9, dist_qs.count())
self.assertEqual(['Batemans Bay', 'Canberra', 'Hillsdale',
'Melbourne', 'Mittagong', 'Shellharbour',
'Sydney', 'Thirroul', 'Wollongong'],
self.get_names(dist_qs))
else:
self.assertRaises(ValueError, dist_qs.count)
if spatialite:
return
self.assertRaises(ValueError, len,
AustraliaCity.objects.filter(point__distance_lte=('POINT(5 23)', D(km=100), 'spheroid', '4')))
self.assertRaises(ValueError, len,
AustraliaCity.objects.filter(point__distance_lte=('POINT(5 23)',)))
hobart = AustraliaCity.objects.get(name='Hobart')
qs = AustraliaCity.objects.exclude(name='Hobart').filter(point__distance_lte=(hobart.point, D(mi=550)))
cities = self.get_names(qs)
self.assertEqual(cities, ['Batemans Bay', 'Canberra', 'Melbourne'])
wollongong = AustraliaCity.objects.get(name='Wollongong')
d1, d2 = D(yd=19500), D(nm=400)
gq1 = Q(point__distance_lte=(wollongong.point, d1))
gq2 = Q(point__distance_gte=(wollongong.point, d2))
qs1 = AustraliaCity.objects.exclude(name='Wollongong').filter(gq1 | gq2)
# in this test case).
if postgis:
gq3 = Q(point__distance_lte=(wollongong.point, d1, 'spheroid'))
gq4 = Q(point__distance_gte=(wollongong.point, d2, 'spheroid'))
qs2 = AustraliaCity.objects.exclude(name='Wollongong').filter(gq3 | gq4)
querysets = [qs1, qs2]
else:
querysets = [qs1]
for qs in querysets:
cities = self.get_names(qs)
self.assertEqual(cities, ['Adelaide', 'Hobart', 'Shellharbour', 'Thirroul'])
def test_area(self):
# Reference queries:
# SELECT ST_Area(poly) FROM distapp_southtexaszipcode;
area_sq_m = [5437908.90234375, 10183031.4389648, 11254471.0073242, 9881708.91772461]
# Tolerance has to be lower for Oracle
tol = 2
for i, z in enumerate(SouthTexasZipcode.objects.order_by('name').area()):
self.assertAlmostEqual(area_sq_m[i], z.area.sq_m, tol)
def test_length(self):
# Reference query (should use `length_spheroid`).
# SELECT ST_length_spheroid(ST_GeomFromText('<wkt>', 4326) 'SPHEROID["WGS 84",6378137,298.257223563, AUTHORITY["EPSG","7030"]]');
len_m1 = 473504.769553813
len_m2 = 4617.668
if spatialite:
# Does not support geodetic coordinate systems.
self.assertRaises(ValueError, Interstate.objects.length)
else:
qs = Interstate.objects.length()
tol = 2 if oracle else 3
self.assertAlmostEqual(len_m1, qs[0].length.m, tol)
# Now doing length on a projected coordinate system.
i10 = SouthTexasInterstate.objects.length().get(name='I-10')
self.assertAlmostEqual(len_m2, i10.length.m, 2)
@no_spatialite
def test_perimeter(self):
# Reference query:
# SELECT ST_Perimeter(distapp_southtexaszipcode.poly) FROM distapp_southtexaszipcode;
perim_m = [18404.3550889361, 15627.2108551001, 20632.5588368978, 17094.5996143697]
tol = 2 if oracle else 7
for i, z in enumerate(SouthTexasZipcode.objects.order_by('name').perimeter()):
self.assertAlmostEqual(perim_m[i], z.perimeter.m, tol)
# Running on points; should return 0.
for i, c in enumerate(SouthTexasCity.objects.perimeter(model_att='perim')):
self.assertEqual(0, c.perim.m)
def test_measurement_null_fields(self):
# Creating SouthTexasZipcode w/NULL value.
SouthTexasZipcode.objects.create(name='78212')
# Performing distance/area queries against the NULL PolygonField,
# and ensuring the result of the operations is None.
htown = SouthTexasCity.objects.get(name='Downtown Houston')
z = SouthTexasZipcode.objects.distance(htown.point).area().get(name='78212')
self.assertIsNone(z.distance)
self.assertIsNone(z.area)
| true | true |
f73785767837a20dce3d7cd775c23b406a9a4698 | 2,171 | py | Python | salt/states/ifttt.py | markgras/salt | d66cd3c935533c63870b83228b978ce43e0ef70d | [
"Apache-2.0"
] | 9,425 | 2015-01-01T05:59:24.000Z | 2022-03-31T20:44:05.000Z | salt/states/ifttt.py | markgras/salt | d66cd3c935533c63870b83228b978ce43e0ef70d | [
"Apache-2.0"
] | 33,507 | 2015-01-01T00:19:56.000Z | 2022-03-31T23:48:20.000Z | salt/states/ifttt.py | markgras/salt | d66cd3c935533c63870b83228b978ce43e0ef70d | [
"Apache-2.0"
] | 5,810 | 2015-01-01T19:11:45.000Z | 2022-03-31T02:37:20.000Z | """
Trigger an event in IFTTT
=========================
This state is useful for trigging events in IFTTT.
.. versionadded:: 2015.8.0
.. code-block:: yaml
ifttt-event:
ifttt.trigger_event:
- event: TestEvent
- value1: 'This state was executed successfully.'
- value2: 'Another value we can send.'
- value3: 'A third value we can send.'
The api key can be specified in the master or minion configuration like below:
.. code-block:: yaml
ifttt:
secret_key: bzMRb-KKIAaNOwKEEw792J7Eb-B3z7muhdhYblJn4V6
"""
def __virtual__():
"""
Only load if the ifttt module is available in __salt__
"""
if "ifttt.trigger_event" in __salt__:
return "ifttt"
return (False, "ifttt module could not be loaded")
def trigger_event(name, event, value1=None, value2=None, value3=None):
"""
Trigger an event in IFTTT
.. code-block:: yaml
ifttt-event:
ifttt.trigger_event:
- event: TestEvent
- value1: 'A value that we want to send.'
- value2: 'A second value that we want to send.'
- value3: 'A third value that we want to send.'
The following parameters are required:
name
The unique name for this event.
event
The name of the event to trigger in IFTTT.
The following parameters are optional:
value1
One of the values that we can send to IFTT.
value2
One of the values that we can send to IFTT.
value3
One of the values that we can send to IFTT.
"""
ret = {"name": name, "changes": {}, "result": False, "comment": ""}
if __opts__["test"]:
ret["comment"] = "The following trigger would be sent to IFTTT: {}".format(
event
)
ret["result"] = None
return ret
ret["result"] = __salt__["ifttt.trigger_event"](
event=event, value1=value1, value2=value2, value3=value3
)
if ret and ret["result"]:
ret["result"] = True
ret["comment"] = "Triggered Event: {}".format(name)
else:
ret["comment"] = "Failed to trigger event: {}".format(name)
return ret
| 24.670455 | 83 | 0.599263 |
def __virtual__():
if "ifttt.trigger_event" in __salt__:
return "ifttt"
return (False, "ifttt module could not be loaded")
def trigger_event(name, event, value1=None, value2=None, value3=None):
ret = {"name": name, "changes": {}, "result": False, "comment": ""}
if __opts__["test"]:
ret["comment"] = "The following trigger would be sent to IFTTT: {}".format(
event
)
ret["result"] = None
return ret
ret["result"] = __salt__["ifttt.trigger_event"](
event=event, value1=value1, value2=value2, value3=value3
)
if ret and ret["result"]:
ret["result"] = True
ret["comment"] = "Triggered Event: {}".format(name)
else:
ret["comment"] = "Failed to trigger event: {}".format(name)
return ret
| true | true |
f7378620c3d9001e8c20431b0cbc80ad157bf7d5 | 2,433 | py | Python | tensorflow/python/data/experimental/kernel_tests/optimization/latency_all_edges_test.py | uve/tensorflow | e08079463bf43e5963acc41da1f57e95603f8080 | [
"Apache-2.0"
] | null | null | null | tensorflow/python/data/experimental/kernel_tests/optimization/latency_all_edges_test.py | uve/tensorflow | e08079463bf43e5963acc41da1f57e95603f8080 | [
"Apache-2.0"
] | null | null | null | tensorflow/python/data/experimental/kernel_tests/optimization/latency_all_edges_test.py | uve/tensorflow | e08079463bf43e5963acc41da1f57e95603f8080 | [
"Apache-2.0"
] | null | null | null | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the `LatencyAllEdges` optimization."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.data.experimental.kernel_tests import stats_dataset_test_base
from tensorflow.python.data.experimental.ops import optimization
from tensorflow.python.data.experimental.ops import stats_aggregator
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.platform import test
class LatencyAllEdgesTest(stats_dataset_test_base.StatsDatasetTestBase):
def testLatencyStatsOptimization(self):
aggregator = stats_aggregator.StatsAggregator()
dataset = dataset_ops.Dataset.from_tensors(1).apply(
optimization.assert_next(
["LatencyStats", "Map", "LatencyStats", "Prefetch",
"LatencyStats"])).map(lambda x: x * x).prefetch(1)
options = dataset_ops.Options()
options.experimental_optimization.apply_default_optimizations = False
options.experimental_stats.latency_all_edges = True
options.experimental_stats.aggregator = aggregator
dataset = dataset.with_options(options)
self.assertDatasetProduces(
dataset,
expected_output=[1],
requires_initialization=True,
num_test_iterations=1)
handle = self.getHandle(aggregator)
self.assertStatisticsHasCount(
handle, self.regexForNodeName("record_latency::TensorDataset"), 1)
self.assertStatisticsHasCount(
handle, self.regexForNodeName("record_latency::MapDataset"), 1)
self.assertStatisticsHasCount(
handle, self.regexForNodeName("record_latency::PrefetchDataset"), 1)
if __name__ == "__main__":
test.main()
| 43.446429 | 85 | 0.72462 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.data.experimental.kernel_tests import stats_dataset_test_base
from tensorflow.python.data.experimental.ops import optimization
from tensorflow.python.data.experimental.ops import stats_aggregator
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.platform import test
class LatencyAllEdgesTest(stats_dataset_test_base.StatsDatasetTestBase):
def testLatencyStatsOptimization(self):
aggregator = stats_aggregator.StatsAggregator()
dataset = dataset_ops.Dataset.from_tensors(1).apply(
optimization.assert_next(
["LatencyStats", "Map", "LatencyStats", "Prefetch",
"LatencyStats"])).map(lambda x: x * x).prefetch(1)
options = dataset_ops.Options()
options.experimental_optimization.apply_default_optimizations = False
options.experimental_stats.latency_all_edges = True
options.experimental_stats.aggregator = aggregator
dataset = dataset.with_options(options)
self.assertDatasetProduces(
dataset,
expected_output=[1],
requires_initialization=True,
num_test_iterations=1)
handle = self.getHandle(aggregator)
self.assertStatisticsHasCount(
handle, self.regexForNodeName("record_latency::TensorDataset"), 1)
self.assertStatisticsHasCount(
handle, self.regexForNodeName("record_latency::MapDataset"), 1)
self.assertStatisticsHasCount(
handle, self.regexForNodeName("record_latency::PrefetchDataset"), 1)
if __name__ == "__main__":
test.main()
| true | true |
f73787f0a33a0e0fdf8e3030e315016ac855edea | 3,256 | py | Python | iceScripts/isdacProfileModifications_dry_above.py | JaakkoAhola/LES-scripting | 1ebe99ce4292e58581bf50615cb8e0aa3d0c0af2 | [
"MIT"
] | null | null | null | iceScripts/isdacProfileModifications_dry_above.py | JaakkoAhola/LES-scripting | 1ebe99ce4292e58581bf50615cb8e0aa3d0c0af2 | [
"MIT"
] | null | null | null | iceScripts/isdacProfileModifications_dry_above.py | JaakkoAhola/LES-scripting | 1ebe99ce4292e58581bf50615cb8e0aa3d0c0af2 | [
"MIT"
] | null | null | null | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Tue Aug 28 13:38:05 2018
@author: aholaj
"""
import numpy as np
import sound_in_profiles as sp
import PythonMethods as pm
import ModDataPros as mdp
from copy import deepcopy
from FindCloudBase import calc_rh_profile
from ECLAIR_calcs import calc_rw
import time
start = time.time()
isdac = sp.Profiles("sound_in3.5","bin/case_isdac")
rh = isdac.getRH()
q = isdac.getQ()
z = isdac.getZ()
t = isdac.getT()
p = isdac.getPS()
u = isdac.getU()
v = isdac.getV()
osc=rh-100.*np.ones(np.shape(rh))
ab=osc[0];
for s in range(len(osc)):
if np.sign(ab)*np.sign(osc[s]) == -1:
print(s)
ab = osc[s]
dry0 = 236
dryL =364
#size = 40
rh_target = 20
rh_mod = deepcopy(rh)
rh_mod[dry0:(dryL+1)] = rh_target
#
#
#for kk in xrange(dry0,(dryL+1)):
#
# q_mod[kk] = 1000.*calc_rw( rh[kk], t[kk], p[kk] )
z[0] = isdac.getP0()
#
q_mod = deepcopy(q)
q_mod = np.multiply(q_mod, 1.e-3)
rh_temp = 100000.*np.ones(np.shape(rh))
for i in range(dry0,dryL+1): #size
k = 1
incr = 1. #1.e-6
incr = incr*1.e-3
etumerkki = 1.
print('z', i)
vanha = np.abs( rh_temp[i] - rh_mod[i] )
switchCount = 0
while (( vanha > 0.01 ) and (switchCount < 300)): #and (k < 10000)
q_mod[i] = np.max(q_mod[i]-etumerkki*k*incr,0)
rh_temp, b = calc_rh_profile( t, q_mod, z )
uusi = np.abs( rh_temp[i] - rh_mod[i] )
if uusi-vanha > 0:
print('switch')
etumerkki = -1*etumerkki
incr = incr*1.e-1
switchCount += 1
incr = max(incr, 1.e-9)
vanha = uusi
k += 1
print(uusi, rh_temp[i], rh_mod[i])
print('q[i]', q[i], 'q_mod[i]', q_mod[i]*1.e+3)
print(' ')
rh_iter, ps_iter = calc_rh_profile( t, q_mod, z )
q_mod = np.multiply(q_mod, 1.e3)
#isdac.writeNewSoundIn("sound_in3.5_rh_dry_above_"+str(rh_target), z, t, q_mod, u, v)
#####################
### plotting ########
####################
z[0] = 0.
fig, ax = mdp.plottaa( rh, z, tit = 'Relative humidity', xl = 'rel. humid. [%]', yl = 'height [m]', markers=False, uusikuva = True, LEGEND = True, omavari = 'k' )
fig, ax = mdp.plottaa( rh_mod[dry0-1:(dryL+1)+1], z[dry0-1:(dryL+1)+1], tit = 'Relative humidity dry-above', xl = 'rel. humid. [%]', yl = 'height [m]', markers=False, uusikuva = False, LEGEND = True, omavari = 'r' )
#mdp.plottaa( rh_iter[dry0:(dryL+1)], z[dry0:(dryL+1)], tit = 'Relative humidity dry-above iterated', xl = 'rel. humid. [%]', yl = 'height [m]', markers=False, uusikuva = False, LEGEND = True, omavari = 'b' )
xticks = list(range(0, 111, 10))
xlabels = list(map(str, xticks))
ax.set_xticks( xticks )
ax.set_xticklabels( xlabels )
####################
mdp.plottaa( q, z, tit = 'Total water mixing ratio', xl = 'mix. rat. [g/kg]', yl = 'height [m]', markers=False, uusikuva = True, LEGEND = True, omavari = 'k' )
mdp.plottaa( q_mod[dry0:(dryL+1)], z[dry0:(dryL+1)], tit = 'Total water mixing ratio dry-below', xl = 'mix. rat. [g/kg]', yl = 'height [m]', markers=False, uusikuva = False, LEGEND = True, omavari = 'b' )
mdp.plot_lopetus()
#
end = time.time()
print('suoritusaika', end-start) | 23.092199 | 215 | 0.57801 |
import numpy as np
import sound_in_profiles as sp
import PythonMethods as pm
import ModDataPros as mdp
from copy import deepcopy
from FindCloudBase import calc_rh_profile
from ECLAIR_calcs import calc_rw
import time
start = time.time()
isdac = sp.Profiles("sound_in3.5","bin/case_isdac")
rh = isdac.getRH()
q = isdac.getQ()
z = isdac.getZ()
t = isdac.getT()
p = isdac.getPS()
u = isdac.getU()
v = isdac.getV()
osc=rh-100.*np.ones(np.shape(rh))
ab=osc[0];
for s in range(len(osc)):
if np.sign(ab)*np.sign(osc[s]) == -1:
print(s)
ab = osc[s]
dry0 = 236
dryL =364
rh_target = 20
rh_mod = deepcopy(rh)
rh_mod[dry0:(dryL+1)] = rh_target
z[0] = isdac.getP0()
q_mod = deepcopy(q)
q_mod = np.multiply(q_mod, 1.e-3)
rh_temp = 100000.*np.ones(np.shape(rh))
for i in range(dry0,dryL+1):
k = 1
incr = 1.
incr = incr*1.e-3
etumerkki = 1.
print('z', i)
vanha = np.abs( rh_temp[i] - rh_mod[i] )
switchCount = 0
while (( vanha > 0.01 ) and (switchCount < 300)):
q_mod[i] = np.max(q_mod[i]-etumerkki*k*incr,0)
rh_temp, b = calc_rh_profile( t, q_mod, z )
uusi = np.abs( rh_temp[i] - rh_mod[i] )
if uusi-vanha > 0:
print('switch')
etumerkki = -1*etumerkki
incr = incr*1.e-1
switchCount += 1
incr = max(incr, 1.e-9)
vanha = uusi
k += 1
print(uusi, rh_temp[i], rh_mod[i])
print('q[i]', q[i], 'q_mod[i]', q_mod[i]*1.e+3)
print(' ')
rh_iter, ps_iter = calc_rh_profile( t, q_mod, z )
q_mod = np.multiply(q_mod, 1.e3)
( xticks )
ax.set_xticklabels( xlabels )
L+1)], z[dry0:(dryL+1)], tit = 'Total water mixing ratio dry-below', xl = 'mix. rat. [g/kg]', yl = 'height [m]', markers=False, uusikuva = False, LEGEND = True, omavari = 'b' )
mdp.plot_lopetus()
end = time.time()
print('suoritusaika', end-start) | true | true |
f737882edefd04e1461379f3dc323c5656e7fcdd | 7,654 | py | Python | 2021/Day18/Day18-Prob2-Biggest-Magnitude.py | guilhermebaos/Advent-of-Code-Solutions | 232facf72a21284478134b2c56357352b4aaaf74 | [
"MIT"
] | null | null | null | 2021/Day18/Day18-Prob2-Biggest-Magnitude.py | guilhermebaos/Advent-of-Code-Solutions | 232facf72a21284478134b2c56357352b4aaaf74 | [
"MIT"
] | null | null | null | 2021/Day18/Day18-Prob2-Biggest-Magnitude.py | guilhermebaos/Advent-of-Code-Solutions | 232facf72a21284478134b2c56357352b4aaaf74 | [
"MIT"
] | null | null | null | from copy import deepcopy
# Puzzle Input ----------
with open('Day18-Input.txt', 'r') as file:
puzzle = file.read().split('\n')
with open('Day18-Test01.txt', 'r') as file:
test01 = file.read().split('\n')
with open('Day18-Test02.txt', 'r') as file:
test02 = file.read().split('\n')
# Main Code ----------
# SnailNumber class
class SnailNumber:
def __init__(self, snail_number, parent=None):
# Separate the x and y values of the snail number
x = snail_number[0]
y = snail_number[1]
if type(x) == list:
x = SnailNumber(x, parent=self)
if type(y) == list:
y = SnailNumber(y, parent=self)
# Save the snail number's x and y values
self.x = x
self.y = y
self.parent = parent
self.numbers_found, self.snails_found = explore(self)
# Addition
def __add__(self, other):
# The result is the composition of the numbers
result = SnailNumber([self, other])
# Keep trying to explode and split the number until it is impossible to do so (it's reduced)
did_explode, did_split = True, False
while did_explode or did_split:
did_explode = explode(result)
if did_explode:
continue
did_split = split(result)
return result
def __str__(self):
return f'[{str(self.x)}, {str(self.y)}]'
def explore(snail_number, depth=0):
x, y = snail_number.x, snail_number.y
snails_found = []
numbers_found = []
for item, position in zip([x, y], ['x', 'y']):
# If we find another SnailNumber, mark it's parent
if isinstance(item, SnailNumber):
# Snail Numbers and their parents
snails_found += [item]
item.parent = snail_number
# Explore their children
new_numbers_found, new_snails_found = explore(item, depth + 1)
# Save the new numbers and snail numbers found
numbers_found += new_numbers_found
snails_found += new_snails_found
elif type(item) == int:
# Literal number found, store it in a tuple with:
# The value of the number
# The parent
# The position -> either 'x' or 'y'
# The depth
numbers_found += [[item, snail_number, position, depth]]
return numbers_found, snails_found
def explode(snail_number):
index = 0
while index < len(snail_number.numbers_found):
number, num_parent, position, depth = snail_number.numbers_found[index]
# If we have to explode a number
# We consider only the literal numbers in position 'x' because we can also do 'y' numbers at the same times
if depth == 4 and position == 'x':
explode_x = number
explode_y = snail_number.numbers_found[index + 1][0]
# Add exploded_x to the number before, if it exists
if index > 0:
number, num_parent, position, depth = snail_number.numbers_found[index - 1]
snail_number.numbers_found[index - 1][0] += explode_x
if position == 'x':
num_parent.x += explode_x
else:
num_parent.y += explode_x
# Add exploded_y to the next number, if it exists
if index < len(snail_number.numbers_found) - 2:
number, num_parent, position, depth = snail_number.numbers_found[index + 2]
snail_number.numbers_found[index + 2][0] += explode_y
if position == 'x':
num_parent.x += explode_y
else:
num_parent.y += explode_y
# Put a 0 in the exploded pair's place (by accessing the parent)
number, num_parent, position, depth = snail_number.numbers_found[index]
if type(num_parent.parent.x) != int:
zero_position = 'x'
num_parent.parent.x = 0
else:
zero_position = 'y'
num_parent.parent.y = 0
# Erase the two deleted numbers and add a 0 in their place
# Essentially, add the new literal number to the list of numbers found in this snail number
snail_number.numbers_found.pop(index)
snail_number.numbers_found[index] = [0, num_parent.parent, zero_position, depth - 1]
return True
index += 1
return False
def split(snail_number):
index = 0
while index < len(snail_number.numbers_found):
# Extract the next literal number
number, num_parent, position, depth = snail_number.numbers_found[index]
# If the number has to be split
if number >= 10:
# Calculate x and y values of the new Snail Number
left_element = int(number / 2)
right_element = number - int(number / 2)
# Create new Snail Number with the right parents
if position == 'x':
num_parent.x = SnailNumber([left_element, right_element], num_parent)
this_parent = num_parent.x
else:
num_parent.y = SnailNumber([left_element, right_element], num_parent)
this_parent = num_parent.y
# Add the new literal numbers to the list of numbers found in this snail number
snail_number.numbers_found[index] = [left_element, this_parent, 'x', depth + 1]
snail_number.numbers_found.insert(index + 1, [right_element, this_parent, 'y', depth + 1])
index -= 1
if depth == 3:
return True
index += 1
return False
# Find the magnitude of a snail number
def magnitude(snail_number):
x, y = snail_number.x, snail_number.y
# Recursively find out the magnitude of each component of this snail number
mag_x = x if type(x) == int else magnitude(x)
mag_y = y if type(y) == int else magnitude(y)
return 3 * mag_x + 2 * mag_y
def parse_number(num_str: list):
# Variables for the x and y values of this number
x, y = None, None
snail_number = []
# Analyse one character at a time
while len(num_str) > 0:
next_char = num_str.pop(0)
# Parse the x element of the Snail Number
if next_char == '[':
num_str, x = parse_number(num_str)
# Parse the y element of the Snail Number
elif next_char == ',':
num_str, y = parse_number(num_str)
# Join the x and y elements into a list
elif next_char == ']':
snail_number = [x, y]
break
else:
return num_str, int(next_char)
# Return the list and the remaining string
return num_str, snail_number
# Return the magnitude of the sum of all numbers
def solve_homework(num_str: list):
# Parse the snail numbers into a list
snail_numbers = []
for num in num_str:
snail_numbers += [parse_number(list(num))[1]]
snail_numbers = list(map(SnailNumber, snail_numbers))
# Add the possible magnitudes to the total
all_magnitudes = []
for num1 in snail_numbers:
for num2 in snail_numbers:
# Using deepcopy because my SnailNumber Class changes its own properties after a sum
temp1, temp2 = deepcopy(num1), deepcopy(num2)
if num1 != num2:
all_magnitudes += [magnitude(temp1 + temp2)]
# Return the magnitude of the total
return max(all_magnitudes)
# Tests and Solution ----------
print(solve_homework(test01))
print(solve_homework(test02))
print(solve_homework(puzzle))
| 33.867257 | 115 | 0.594591 | from copy import deepcopy
with open('Day18-Input.txt', 'r') as file:
puzzle = file.read().split('\n')
with open('Day18-Test01.txt', 'r') as file:
test01 = file.read().split('\n')
with open('Day18-Test02.txt', 'r') as file:
test02 = file.read().split('\n')
class SnailNumber:
def __init__(self, snail_number, parent=None):
x = snail_number[0]
y = snail_number[1]
if type(x) == list:
x = SnailNumber(x, parent=self)
if type(y) == list:
y = SnailNumber(y, parent=self)
self.x = x
self.y = y
self.parent = parent
self.numbers_found, self.snails_found = explore(self)
# Addition
def __add__(self, other):
# The result is the composition of the numbers
result = SnailNumber([self, other])
# Keep trying to explode and split the number until it is impossible to do so (it's reduced)
did_explode, did_split = True, False
while did_explode or did_split:
did_explode = explode(result)
if did_explode:
continue
did_split = split(result)
return result
def __str__(self):
return f'[{str(self.x)}, {str(self.y)}]'
def explore(snail_number, depth=0):
x, y = snail_number.x, snail_number.y
snails_found = []
numbers_found = []
for item, position in zip([x, y], ['x', 'y']):
if isinstance(item, SnailNumber):
# Snail Numbers and their parents
snails_found += [item]
item.parent = snail_number
# Explore their children
new_numbers_found, new_snails_found = explore(item, depth + 1)
# Save the new numbers and snail numbers found
numbers_found += new_numbers_found
snails_found += new_snails_found
elif type(item) == int:
# Literal number found, store it in a tuple with:
# The value of the number
# The parent
# The position -> either 'x' or 'y'
# The depth
numbers_found += [[item, snail_number, position, depth]]
return numbers_found, snails_found
def explode(snail_number):
index = 0
while index < len(snail_number.numbers_found):
number, num_parent, position, depth = snail_number.numbers_found[index]
# If we have to explode a number
# We consider only the literal numbers in position 'x' because we can also do 'y' numbers at the same times
if depth == 4 and position == 'x':
explode_x = number
explode_y = snail_number.numbers_found[index + 1][0]
# Add exploded_x to the number before, if it exists
if index > 0:
number, num_parent, position, depth = snail_number.numbers_found[index - 1]
snail_number.numbers_found[index - 1][0] += explode_x
if position == 'x':
num_parent.x += explode_x
else:
num_parent.y += explode_x
# Add exploded_y to the next number, if it exists
if index < len(snail_number.numbers_found) - 2:
number, num_parent, position, depth = snail_number.numbers_found[index + 2]
snail_number.numbers_found[index + 2][0] += explode_y
if position == 'x':
num_parent.x += explode_y
else:
num_parent.y += explode_y
# Put a 0 in the exploded pair's place (by accessing the parent)
number, num_parent, position, depth = snail_number.numbers_found[index]
if type(num_parent.parent.x) != int:
zero_position = 'x'
num_parent.parent.x = 0
else:
zero_position = 'y'
num_parent.parent.y = 0
snail_number.numbers_found.pop(index)
snail_number.numbers_found[index] = [0, num_parent.parent, zero_position, depth - 1]
return True
index += 1
return False
def split(snail_number):
index = 0
while index < len(snail_number.numbers_found):
number, num_parent, position, depth = snail_number.numbers_found[index]
if number >= 10:
left_element = int(number / 2)
right_element = number - int(number / 2)
if position == 'x':
num_parent.x = SnailNumber([left_element, right_element], num_parent)
this_parent = num_parent.x
else:
num_parent.y = SnailNumber([left_element, right_element], num_parent)
this_parent = num_parent.y
snail_number.numbers_found[index] = [left_element, this_parent, 'x', depth + 1]
snail_number.numbers_found.insert(index + 1, [right_element, this_parent, 'y', depth + 1])
index -= 1
if depth == 3:
return True
index += 1
return False
def magnitude(snail_number):
x, y = snail_number.x, snail_number.y
mag_x = x if type(x) == int else magnitude(x)
mag_y = y if type(y) == int else magnitude(y)
return 3 * mag_x + 2 * mag_y
def parse_number(num_str: list):
x, y = None, None
snail_number = []
while len(num_str) > 0:
next_char = num_str.pop(0)
if next_char == '[':
num_str, x = parse_number(num_str)
elif next_char == ',':
num_str, y = parse_number(num_str)
elif next_char == ']':
snail_number = [x, y]
break
else:
return num_str, int(next_char)
return num_str, snail_number
def solve_homework(num_str: list):
snail_numbers = []
for num in num_str:
snail_numbers += [parse_number(list(num))[1]]
snail_numbers = list(map(SnailNumber, snail_numbers))
all_magnitudes = []
for num1 in snail_numbers:
for num2 in snail_numbers:
temp1, temp2 = deepcopy(num1), deepcopy(num2)
if num1 != num2:
all_magnitudes += [magnitude(temp1 + temp2)]
return max(all_magnitudes)
print(solve_homework(test01))
print(solve_homework(test02))
print(solve_homework(puzzle))
| true | true |
f737886df794ae13500f275031a3c22906034d18 | 31,165 | py | Python | test/python/transpiler/test_pass_scheduler.py | dominik-steenken/qiskit-terra | 1e04bad5067610abda5e7cbba36939745075f3b9 | [
"Apache-2.0"
] | 2 | 2021-09-06T19:25:36.000Z | 2021-11-17T10:46:12.000Z | test/python/transpiler/test_pass_scheduler.py | gaurav-iiser/qiskit-terra | 3554a7e9ab5c77432ed5ccaa106fb8dc15553756 | [
"Apache-2.0"
] | null | null | null | test/python/transpiler/test_pass_scheduler.py | gaurav-iiser/qiskit-terra | 3554a7e9ab5c77432ed5ccaa106fb8dc15553756 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright 2018, IBM.
#
# This source code is licensed under the Apache License, Version 2.0 found in
# the LICENSE.txt file in the root directory of this source tree.
# pylint: disable=invalid-name
"""Transpiler testing"""
import unittest.mock
from qiskit import QuantumRegister, QuantumCircuit
from qiskit.dagcircuit import DAGCircuit
from qiskit.transpiler import PassManager
from qiskit.transpiler import transpile_dag
from qiskit.transpiler import TranspilerAccessError, TranspilerError
from qiskit.transpiler.passmanager import DoWhileController, ConditionalController, FlowController
from qiskit.converters import circuit_to_dag
from qiskit.test import QiskitTestCase
from ._dummy_passes import (PassA_TP_NR_NP, PassB_TP_RA_PA, PassC_TP_RA_PA,
PassD_TP_NR_NP, PassE_AP_NR_NP, PassF_reduce_dag_property,
PassH_Bad_TP, PassI_Bad_AP, PassJ_Bad_NoReturn,
PassK_check_fixed_point_property, PassM_AP_NR_NP)
logger = "LocalLogger"
class SchedulerTestCase(QiskitTestCase):
""" Asserts for the scheduler. """
def assertScheduler(self, dag, passmanager, expected):
"""
Runs transpiler(dag, passmanager) and checks if the passes run as expected.
Args:
dag (DAGCircuit): DAG circuit to transform via transpilation.
passmanager (PassManager): pass manager instance for the transpilation process
expected (list): List of things the passes are logging
"""
with self.assertLogs(logger, level='INFO') as cm:
dag = transpile_dag(dag, pass_manager=passmanager)
self.assertIsInstance(dag, DAGCircuit)
self.assertEqual([record.message for record in cm.records], expected)
def assertSchedulerRaises(self, dag, passmanager, expected, exception_type):
"""
Runs transpiler(dag, passmanager) and checks if the passes run as expected until
exception_type is raised.
Args:
dag (DAGCircuit): DAG circuit to transform via transpilation
passmanager (PassManager): pass manager instance for the transpilation process
expected (list): List of things the passes are logging
exception_type (Exception): Exception that is expected to be raised.
"""
with self.assertLogs(logger, level='INFO') as cm:
self.assertRaises(exception_type, transpile_dag, dag, pass_manager=passmanager)
self.assertEqual([record.message for record in cm.records], expected)
class TestPassManagerInit(SchedulerTestCase):
""" The pass manager sets things at init time."""
def test_passes(self):
""" A single chain of passes, with Requests and Preserves, at __init__ time"""
dag = circuit_to_dag(QuantumCircuit(QuantumRegister(1)))
passmanager = PassManager(passes=[
PassC_TP_RA_PA(), # Request: PassA / Preserves: PassA
PassB_TP_RA_PA(), # Request: PassA / Preserves: PassA
PassD_TP_NR_NP(argument1=[1, 2]), # Requires: {}/ Preserves: {}
PassB_TP_RA_PA()])
self.assertScheduler(dag, passmanager, ['run transformation pass PassA_TP_NR_NP',
'run transformation pass PassC_TP_RA_PA',
'run transformation pass PassB_TP_RA_PA',
'run transformation pass PassD_TP_NR_NP',
'argument [1, 2]',
'run transformation pass PassA_TP_NR_NP',
'run transformation pass PassB_TP_RA_PA'])
class TestUseCases(SchedulerTestCase):
""" The pass manager schedules passes in, sometimes, tricky ways. These tests combine passes in
many ways, and checks that passes are ran in the right order. """
def setUp(self):
self.dag = circuit_to_dag(QuantumCircuit(QuantumRegister(1)))
self.passmanager = PassManager()
def test_chain(self):
""" A single chain of passes, with Requests and Preserves."""
self.passmanager.append(PassC_TP_RA_PA()) # Request: PassA / Preserves: PassA
self.passmanager.append(PassB_TP_RA_PA()) # Request: PassA / Preserves: PassA
self.passmanager.append(PassD_TP_NR_NP(argument1=[1, 2])) # Requires: {}/ Preserves: {}
self.passmanager.append(PassB_TP_RA_PA())
self.assertScheduler(self.dag, self.passmanager, ['run transformation pass PassA_TP_NR_NP',
'run transformation pass PassC_TP_RA_PA',
'run transformation pass PassB_TP_RA_PA',
'run transformation pass PassD_TP_NR_NP',
'argument [1, 2]',
'run transformation pass PassA_TP_NR_NP',
'run transformation pass PassB_TP_RA_PA'])
def test_conditional_passes_true(self):
""" A pass set with a conditional parameter. The callable is True. """
self.passmanager.append(PassE_AP_NR_NP(True))
self.passmanager.append(PassA_TP_NR_NP(),
condition=lambda property_set: property_set['property'])
self.assertScheduler(self.dag, self.passmanager, ['run analysis pass PassE_AP_NR_NP',
'set property as True',
'run transformation pass PassA_TP_NR_NP'])
def test_conditional_passes_false(self):
""" A pass set with a conditional parameter. The callable is False. """
self.passmanager.append(PassE_AP_NR_NP(False))
self.passmanager.append(PassA_TP_NR_NP(),
condition=lambda property_set: property_set['property'])
self.assertScheduler(self.dag, self.passmanager, ['run analysis pass PassE_AP_NR_NP',
'set property as False'])
def test_conditional_and_loop(self):
""" Run a conditional first, then a loop"""
self.passmanager.append(PassE_AP_NR_NP(True))
self.passmanager.append(
[PassK_check_fixed_point_property(),
PassA_TP_NR_NP(),
PassF_reduce_dag_property()],
do_while=lambda property_set: not property_set['fixed_point']['property'],
condition=lambda property_set: property_set['property'])
self.assertScheduler(self.dag, self.passmanager,
['run analysis pass PassE_AP_NR_NP',
'set property as True',
'run analysis pass PassG_calculates_dag_property',
'set property as 8 (from dag.property)',
'run analysis pass PassK_check_fixed_point_property',
'run transformation pass PassA_TP_NR_NP',
'run transformation pass PassF_reduce_dag_property',
'dag property = 6',
'run analysis pass PassG_calculates_dag_property',
'set property as 6 (from dag.property)',
'run analysis pass PassK_check_fixed_point_property',
'run transformation pass PassA_TP_NR_NP',
'run transformation pass PassF_reduce_dag_property',
'dag property = 5',
'run analysis pass PassG_calculates_dag_property',
'set property as 5 (from dag.property)',
'run analysis pass PassK_check_fixed_point_property',
'run transformation pass PassA_TP_NR_NP',
'run transformation pass PassF_reduce_dag_property',
'dag property = 4',
'run analysis pass PassG_calculates_dag_property',
'set property as 4 (from dag.property)',
'run analysis pass PassK_check_fixed_point_property',
'run transformation pass PassA_TP_NR_NP',
'run transformation pass PassF_reduce_dag_property',
'dag property = 3',
'run analysis pass PassG_calculates_dag_property',
'set property as 3 (from dag.property)',
'run analysis pass PassK_check_fixed_point_property',
'run transformation pass PassA_TP_NR_NP',
'run transformation pass PassF_reduce_dag_property',
'dag property = 2',
'run analysis pass PassG_calculates_dag_property',
'set property as 2 (from dag.property)',
'run analysis pass PassK_check_fixed_point_property',
'run transformation pass PassA_TP_NR_NP',
'run transformation pass PassF_reduce_dag_property',
'dag property = 2',
'run analysis pass PassG_calculates_dag_property',
'set property as 2 (from dag.property)',
'run analysis pass PassK_check_fixed_point_property',
'run transformation pass PassA_TP_NR_NP',
'run transformation pass PassF_reduce_dag_property',
'dag property = 2'])
def test_loop_and_conditional(self):
""" Run a loop first, then a conditional"""
FlowController.remove_flow_controller('condition')
FlowController.add_flow_controller('condition', ConditionalController)
self.passmanager.append(PassK_check_fixed_point_property())
self.passmanager.append(
[PassK_check_fixed_point_property(),
PassA_TP_NR_NP(),
PassF_reduce_dag_property()],
do_while=lambda property_set: not property_set['fixed_point']['property'],
condition=lambda property_set: not property_set['fixed_point']['property'])
self.assertScheduler(self.dag, self.passmanager,
['run analysis pass PassG_calculates_dag_property',
'set property as 8 (from dag.property)',
'run analysis pass PassK_check_fixed_point_property',
'run transformation pass PassA_TP_NR_NP',
'run transformation pass PassF_reduce_dag_property',
'dag property = 6',
'run analysis pass PassG_calculates_dag_property',
'set property as 6 (from dag.property)',
'run analysis pass PassK_check_fixed_point_property',
'run transformation pass PassA_TP_NR_NP',
'run transformation pass PassF_reduce_dag_property',
'dag property = 5',
'run analysis pass PassG_calculates_dag_property',
'set property as 5 (from dag.property)',
'run analysis pass PassK_check_fixed_point_property',
'run transformation pass PassA_TP_NR_NP',
'run transformation pass PassF_reduce_dag_property',
'dag property = 4',
'run analysis pass PassG_calculates_dag_property',
'set property as 4 (from dag.property)',
'run analysis pass PassK_check_fixed_point_property',
'run transformation pass PassA_TP_NR_NP',
'run transformation pass PassF_reduce_dag_property',
'dag property = 3',
'run analysis pass PassG_calculates_dag_property',
'set property as 3 (from dag.property)',
'run analysis pass PassK_check_fixed_point_property',
'run transformation pass PassA_TP_NR_NP',
'run transformation pass PassF_reduce_dag_property',
'dag property = 2',
'run analysis pass PassG_calculates_dag_property',
'set property as 2 (from dag.property)',
'run analysis pass PassK_check_fixed_point_property',
'run transformation pass PassA_TP_NR_NP',
'run transformation pass PassF_reduce_dag_property',
'dag property = 2',
'run analysis pass PassG_calculates_dag_property',
'set property as 2 (from dag.property)',
'run analysis pass PassK_check_fixed_point_property',
'run transformation pass PassA_TP_NR_NP',
'run transformation pass PassF_reduce_dag_property',
'dag property = 2'])
def test_do_not_repeat_based_on_preservation(self):
""" When a pass is still a valid pass (because following passes preserved it), it should not
run again"""
self.passmanager.append([PassB_TP_RA_PA(), PassA_TP_NR_NP(), PassB_TP_RA_PA()])
self.assertScheduler(self.dag, self.passmanager, ['run transformation pass PassA_TP_NR_NP',
'run transformation pass PassB_TP_RA_PA'])
def test_do_not_repeat_based_on_idempotence(self):
""" Repetition can be optimized to a single execution when the pass is idempotent"""
self.passmanager.append(PassA_TP_NR_NP())
self.passmanager.append([PassA_TP_NR_NP(), PassA_TP_NR_NP()])
self.passmanager.append(PassA_TP_NR_NP())
self.assertScheduler(self.dag, self.passmanager, ['run transformation pass PassA_TP_NR_NP'])
def test_non_idempotent_pass(self):
""" Two or more runs of a non-idempotent pass cannot be optimized. """
self.passmanager.append(PassF_reduce_dag_property())
self.passmanager.append([PassF_reduce_dag_property(), PassF_reduce_dag_property()])
self.passmanager.append(PassF_reduce_dag_property())
self.assertScheduler(self.dag, self.passmanager,
['run transformation pass PassF_reduce_dag_property',
'dag property = 6',
'run transformation pass PassF_reduce_dag_property',
'dag property = 5',
'run transformation pass PassF_reduce_dag_property',
'dag property = 4',
'run transformation pass PassF_reduce_dag_property',
'dag property = 3'])
def test_fenced_property_set(self):
""" Transformation passes are not allowed to modified the property set. """
self.passmanager.append(PassH_Bad_TP())
self.assertSchedulerRaises(self.dag, self.passmanager,
['run transformation pass PassH_Bad_TP'],
TranspilerAccessError)
def test_fenced_dag(self):
""" Analysis passes are not allowed to modified the DAG. """
qr = QuantumRegister(2)
circ = QuantumCircuit(qr)
# pylint: disable=no-member
circ.cx(qr[0], qr[1])
circ.cx(qr[0], qr[1])
circ.cx(qr[1], qr[0])
circ.cx(qr[1], qr[0])
dag = circuit_to_dag(circ)
self.passmanager.append(PassI_Bad_AP())
self.assertSchedulerRaises(dag, self.passmanager,
['run analysis pass PassI_Bad_AP',
'cx_runs: {(5, 6, 7, 8)}'],
TranspilerAccessError)
def test_ignore_request_pm(self):
""" A pass manager that ignores requires does not run the passes decleared in the 'requires'
field of the passes."""
passmanager = PassManager(ignore_requires=True)
passmanager.append(PassC_TP_RA_PA()) # Request: PassA / Preserves: PassA
passmanager.append(PassB_TP_RA_PA()) # Request: PassA / Preserves: PassA
passmanager.append(PassD_TP_NR_NP(argument1=[1, 2])) # Requires: {} / Preserves: {}
passmanager.append(PassB_TP_RA_PA())
self.assertScheduler(self.dag, passmanager, ['run transformation pass PassC_TP_RA_PA',
'run transformation pass PassB_TP_RA_PA',
'run transformation pass PassD_TP_NR_NP',
'argument [1, 2]',
'run transformation pass PassB_TP_RA_PA'])
def test_ignore_preserves_pm(self):
""" A pass manager that ignores preserves does not record the passes decleared in the
'preserves' field of the passes as valid passes."""
passmanager = PassManager(ignore_preserves=True)
passmanager.append(PassC_TP_RA_PA()) # Request: PassA / Preserves: PassA
passmanager.append(PassB_TP_RA_PA()) # Request: PassA / Preserves: PassA
passmanager.append(PassD_TP_NR_NP(argument1=[1, 2])) # Requires: {} / Preserves: {}
passmanager.append(PassB_TP_RA_PA())
self.assertScheduler(self.dag, passmanager, ['run transformation pass PassA_TP_NR_NP',
'run transformation pass PassC_TP_RA_PA',
'run transformation pass PassA_TP_NR_NP',
'run transformation pass PassB_TP_RA_PA',
'run transformation pass PassD_TP_NR_NP',
'argument [1, 2]',
'run transformation pass PassA_TP_NR_NP',
'run transformation pass PassB_TP_RA_PA'])
def test_pass_non_idempotence_pm(self):
""" A pass manager that considers every pass as not idempotent, allows the immediate
repetition of a pass"""
passmanager = PassManager(ignore_preserves=True)
passmanager.append(PassA_TP_NR_NP())
passmanager.append(PassA_TP_NR_NP()) # Normally removed for optimization, not here.
passmanager.append(PassB_TP_RA_PA()) # Normally required is ignored for optimization,
# not here
self.assertScheduler(self.dag, passmanager, ['run transformation pass PassA_TP_NR_NP',
'run transformation pass PassA_TP_NR_NP',
'run transformation pass PassA_TP_NR_NP',
'run transformation pass PassB_TP_RA_PA'])
def test_pass_non_idempotence_passset(self):
""" A pass set that is not idempotent. """
passmanager = PassManager()
passmanager.append([PassA_TP_NR_NP(), PassB_TP_RA_PA()], ignore_preserves=True)
self.assertScheduler(self.dag, passmanager, ['run transformation pass PassA_TP_NR_NP',
'run transformation pass PassA_TP_NR_NP',
'run transformation pass PassB_TP_RA_PA'])
def test_analysis_pass_is_idempotent(self):
""" Analysis passes are idempotent. """
passmanager = PassManager()
passmanager.append(PassE_AP_NR_NP(argument1=1))
passmanager.append(PassE_AP_NR_NP(argument1=1))
self.assertScheduler(self.dag, passmanager, ['run analysis pass PassE_AP_NR_NP',
'set property as 1'])
def test_ap_before_and_after_a_tp(self):
""" A default transformation does not preserves anything and analysis passes
need to be re-run"""
passmanager = PassManager()
passmanager.append(PassE_AP_NR_NP(argument1=1))
passmanager.append(PassA_TP_NR_NP())
passmanager.append(PassE_AP_NR_NP(argument1=1))
self.assertScheduler(self.dag, passmanager, ['run analysis pass PassE_AP_NR_NP',
'set property as 1',
'run transformation pass PassA_TP_NR_NP',
'run analysis pass PassE_AP_NR_NP',
'set property as 1'])
def test_pass_option_precedence(self):
""" The precedence of options is, in order of priority:
- The passset option
- The Pass Manager option
- Default
"""
passmanager = PassManager(ignore_preserves=False, ignore_requires=True)
tp_pass = PassA_TP_NR_NP()
passmanager.append(tp_pass, ignore_preserves=True)
the_pass_in_the_workinglist = next(iter(passmanager.working_list))
self.assertTrue(the_pass_in_the_workinglist.options['ignore_preserves'])
self.assertTrue(the_pass_in_the_workinglist.options['ignore_requires'])
def test_pass_no_return_a_dag(self):
""" Passes instances with same arguments (independently of the order) are the same. """
self.passmanager.append(PassJ_Bad_NoReturn())
self.assertSchedulerRaises(self.dag, self.passmanager,
['run transformation pass PassJ_Bad_NoReturn'], TranspilerError)
def test_fixed_point_pass(self):
""" A pass set with a do_while parameter that checks for a fixed point. """
self.passmanager.append(
[PassK_check_fixed_point_property(),
PassA_TP_NR_NP(),
PassF_reduce_dag_property()],
do_while=lambda property_set: not property_set['fixed_point']['property'])
self.assertScheduler(self.dag, self.passmanager,
['run analysis pass PassG_calculates_dag_property',
'set property as 8 (from dag.property)',
'run analysis pass PassK_check_fixed_point_property',
'run transformation pass PassA_TP_NR_NP',
'run transformation pass PassF_reduce_dag_property',
'dag property = 6',
'run analysis pass PassG_calculates_dag_property',
'set property as 6 (from dag.property)',
'run analysis pass PassK_check_fixed_point_property',
'run transformation pass PassA_TP_NR_NP',
'run transformation pass PassF_reduce_dag_property',
'dag property = 5',
'run analysis pass PassG_calculates_dag_property',
'set property as 5 (from dag.property)',
'run analysis pass PassK_check_fixed_point_property',
'run transformation pass PassA_TP_NR_NP',
'run transformation pass PassF_reduce_dag_property',
'dag property = 4',
'run analysis pass PassG_calculates_dag_property',
'set property as 4 (from dag.property)',
'run analysis pass PassK_check_fixed_point_property',
'run transformation pass PassA_TP_NR_NP',
'run transformation pass PassF_reduce_dag_property',
'dag property = 3',
'run analysis pass PassG_calculates_dag_property',
'set property as 3 (from dag.property)',
'run analysis pass PassK_check_fixed_point_property',
'run transformation pass PassA_TP_NR_NP',
'run transformation pass PassF_reduce_dag_property',
'dag property = 2',
'run analysis pass PassG_calculates_dag_property',
'set property as 2 (from dag.property)',
'run analysis pass PassK_check_fixed_point_property',
'run transformation pass PassA_TP_NR_NP',
'run transformation pass PassF_reduce_dag_property',
'dag property = 2',
'run analysis pass PassG_calculates_dag_property',
'set property as 2 (from dag.property)',
'run analysis pass PassK_check_fixed_point_property',
'run transformation pass PassA_TP_NR_NP',
'run transformation pass PassF_reduce_dag_property',
'dag property = 2'])
def test_fixed_point_pass_max_iteration(self):
""" A pass set with a do_while parameter that checks that the max_iteration is raised. """
self.passmanager.append(
[PassK_check_fixed_point_property(),
PassA_TP_NR_NP(),
PassF_reduce_dag_property()],
do_while=lambda property_set: not property_set['fixed_point']['property'],
max_iteration=2)
self.assertSchedulerRaises(self.dag, self.passmanager,
['run analysis pass PassG_calculates_dag_property',
'set property as 8 (from dag.property)',
'run analysis pass PassK_check_fixed_point_property',
'run transformation pass PassA_TP_NR_NP',
'run transformation pass PassF_reduce_dag_property',
'dag property = 6',
'run analysis pass PassG_calculates_dag_property',
'set property as 6 (from dag.property)',
'run analysis pass PassK_check_fixed_point_property',
'run transformation pass PassA_TP_NR_NP',
'run transformation pass PassF_reduce_dag_property',
'dag property = 5'], TranspilerError)
def test_fresh_initial_state(self):
""" New construction gives fresh instance """
self.passmanager.append(PassM_AP_NR_NP(argument1=1))
self.passmanager.append(PassA_TP_NR_NP())
self.passmanager.append(PassM_AP_NR_NP(argument1=1))
self.assertScheduler(self.dag, self.passmanager, ['run analysis pass PassM_AP_NR_NP',
'self.argument1 = 2',
'run transformation pass PassA_TP_NR_NP',
'run analysis pass PassM_AP_NR_NP',
'self.argument1 = 2'])
class DoXTimesController(FlowController):
""" A control-flow plugin for running a set of passes an X amount of times."""
def __init__(self, passes, options, do_x_times=0, **_): # pylint: disable=super-init-not-called
self.do_x_times = do_x_times()
super().__init__(passes, options)
def __iter__(self):
for _ in range(self.do_x_times):
for pass_ in self.passes:
yield pass_
class TestControlFlowPlugin(SchedulerTestCase):
""" Testing the control flow plugin system. """
def setUp(self):
self.passmanager = PassManager()
self.dag = circuit_to_dag(QuantumCircuit(QuantumRegister(1)))
def test_control_flow_plugin(self):
""" Adds a control flow plugin with a single parameter and runs it. """
FlowController.add_flow_controller('do_x_times', DoXTimesController)
self.passmanager.append([PassB_TP_RA_PA(), PassC_TP_RA_PA()], do_x_times=lambda x: 3)
self.assertScheduler(self.dag, self.passmanager, ['run transformation pass PassA_TP_NR_NP',
'run transformation pass PassB_TP_RA_PA',
'run transformation pass PassC_TP_RA_PA',
'run transformation pass PassB_TP_RA_PA',
'run transformation pass PassC_TP_RA_PA',
'run transformation pass PassB_TP_RA_PA',
'run transformation pass PassC_TP_RA_PA'])
def test_callable_control_flow_plugin(self):
""" Removes do_while, then adds it back. Checks max_iteration still working. """
controllers_length = len(FlowController.registered_controllers)
FlowController.remove_flow_controller('do_while')
self.assertEqual(controllers_length - 1, len(FlowController.registered_controllers))
FlowController.add_flow_controller('do_while', DoWhileController)
self.assertEqual(controllers_length, len(FlowController.registered_controllers))
self.passmanager.append([PassB_TP_RA_PA(), PassC_TP_RA_PA()],
do_while=lambda property_set: True, max_iteration=2)
self.assertSchedulerRaises(self.dag, self.passmanager,
['run transformation pass PassA_TP_NR_NP',
'run transformation pass PassB_TP_RA_PA',
'run transformation pass PassC_TP_RA_PA',
'run transformation pass PassB_TP_RA_PA',
'run transformation pass PassC_TP_RA_PA'], TranspilerError)
def test_remove_nonexistent_plugin(self):
""" Tries to remove a plugin that does not exist. """
self.assertRaises(KeyError, FlowController.remove_flow_controller, "foo")
if __name__ == '__main__':
unittest.main()
| 60.397287 | 100 | 0.56018 |
import unittest.mock
from qiskit import QuantumRegister, QuantumCircuit
from qiskit.dagcircuit import DAGCircuit
from qiskit.transpiler import PassManager
from qiskit.transpiler import transpile_dag
from qiskit.transpiler import TranspilerAccessError, TranspilerError
from qiskit.transpiler.passmanager import DoWhileController, ConditionalController, FlowController
from qiskit.converters import circuit_to_dag
from qiskit.test import QiskitTestCase
from ._dummy_passes import (PassA_TP_NR_NP, PassB_TP_RA_PA, PassC_TP_RA_PA,
PassD_TP_NR_NP, PassE_AP_NR_NP, PassF_reduce_dag_property,
PassH_Bad_TP, PassI_Bad_AP, PassJ_Bad_NoReturn,
PassK_check_fixed_point_property, PassM_AP_NR_NP)
logger = "LocalLogger"
class SchedulerTestCase(QiskitTestCase):
def assertScheduler(self, dag, passmanager, expected):
with self.assertLogs(logger, level='INFO') as cm:
dag = transpile_dag(dag, pass_manager=passmanager)
self.assertIsInstance(dag, DAGCircuit)
self.assertEqual([record.message for record in cm.records], expected)
def assertSchedulerRaises(self, dag, passmanager, expected, exception_type):
with self.assertLogs(logger, level='INFO') as cm:
self.assertRaises(exception_type, transpile_dag, dag, pass_manager=passmanager)
self.assertEqual([record.message for record in cm.records], expected)
class TestPassManagerInit(SchedulerTestCase):
def test_passes(self):
dag = circuit_to_dag(QuantumCircuit(QuantumRegister(1)))
passmanager = PassManager(passes=[
PassC_TP_RA_PA(),
PassB_TP_RA_PA(),
PassD_TP_NR_NP(argument1=[1, 2]),
PassB_TP_RA_PA()])
self.assertScheduler(dag, passmanager, ['run transformation pass PassA_TP_NR_NP',
'run transformation pass PassC_TP_RA_PA',
'run transformation pass PassB_TP_RA_PA',
'run transformation pass PassD_TP_NR_NP',
'argument [1, 2]',
'run transformation pass PassA_TP_NR_NP',
'run transformation pass PassB_TP_RA_PA'])
class TestUseCases(SchedulerTestCase):
def setUp(self):
self.dag = circuit_to_dag(QuantumCircuit(QuantumRegister(1)))
self.passmanager = PassManager()
def test_chain(self):
self.passmanager.append(PassC_TP_RA_PA())
self.passmanager.append(PassB_TP_RA_PA())
self.passmanager.append(PassD_TP_NR_NP(argument1=[1, 2]))
self.passmanager.append(PassB_TP_RA_PA())
self.assertScheduler(self.dag, self.passmanager, ['run transformation pass PassA_TP_NR_NP',
'run transformation pass PassC_TP_RA_PA',
'run transformation pass PassB_TP_RA_PA',
'run transformation pass PassD_TP_NR_NP',
'argument [1, 2]',
'run transformation pass PassA_TP_NR_NP',
'run transformation pass PassB_TP_RA_PA'])
def test_conditional_passes_true(self):
self.passmanager.append(PassE_AP_NR_NP(True))
self.passmanager.append(PassA_TP_NR_NP(),
condition=lambda property_set: property_set['property'])
self.assertScheduler(self.dag, self.passmanager, ['run analysis pass PassE_AP_NR_NP',
'set property as True',
'run transformation pass PassA_TP_NR_NP'])
def test_conditional_passes_false(self):
self.passmanager.append(PassE_AP_NR_NP(False))
self.passmanager.append(PassA_TP_NR_NP(),
condition=lambda property_set: property_set['property'])
self.assertScheduler(self.dag, self.passmanager, ['run analysis pass PassE_AP_NR_NP',
'set property as False'])
def test_conditional_and_loop(self):
self.passmanager.append(PassE_AP_NR_NP(True))
self.passmanager.append(
[PassK_check_fixed_point_property(),
PassA_TP_NR_NP(),
PassF_reduce_dag_property()],
do_while=lambda property_set: not property_set['fixed_point']['property'],
condition=lambda property_set: property_set['property'])
self.assertScheduler(self.dag, self.passmanager,
['run analysis pass PassE_AP_NR_NP',
'set property as True',
'run analysis pass PassG_calculates_dag_property',
'set property as 8 (from dag.property)',
'run analysis pass PassK_check_fixed_point_property',
'run transformation pass PassA_TP_NR_NP',
'run transformation pass PassF_reduce_dag_property',
'dag property = 6',
'run analysis pass PassG_calculates_dag_property',
'set property as 6 (from dag.property)',
'run analysis pass PassK_check_fixed_point_property',
'run transformation pass PassA_TP_NR_NP',
'run transformation pass PassF_reduce_dag_property',
'dag property = 5',
'run analysis pass PassG_calculates_dag_property',
'set property as 5 (from dag.property)',
'run analysis pass PassK_check_fixed_point_property',
'run transformation pass PassA_TP_NR_NP',
'run transformation pass PassF_reduce_dag_property',
'dag property = 4',
'run analysis pass PassG_calculates_dag_property',
'set property as 4 (from dag.property)',
'run analysis pass PassK_check_fixed_point_property',
'run transformation pass PassA_TP_NR_NP',
'run transformation pass PassF_reduce_dag_property',
'dag property = 3',
'run analysis pass PassG_calculates_dag_property',
'set property as 3 (from dag.property)',
'run analysis pass PassK_check_fixed_point_property',
'run transformation pass PassA_TP_NR_NP',
'run transformation pass PassF_reduce_dag_property',
'dag property = 2',
'run analysis pass PassG_calculates_dag_property',
'set property as 2 (from dag.property)',
'run analysis pass PassK_check_fixed_point_property',
'run transformation pass PassA_TP_NR_NP',
'run transformation pass PassF_reduce_dag_property',
'dag property = 2',
'run analysis pass PassG_calculates_dag_property',
'set property as 2 (from dag.property)',
'run analysis pass PassK_check_fixed_point_property',
'run transformation pass PassA_TP_NR_NP',
'run transformation pass PassF_reduce_dag_property',
'dag property = 2'])
def test_loop_and_conditional(self):
FlowController.remove_flow_controller('condition')
FlowController.add_flow_controller('condition', ConditionalController)
self.passmanager.append(PassK_check_fixed_point_property())
self.passmanager.append(
[PassK_check_fixed_point_property(),
PassA_TP_NR_NP(),
PassF_reduce_dag_property()],
do_while=lambda property_set: not property_set['fixed_point']['property'],
condition=lambda property_set: not property_set['fixed_point']['property'])
self.assertScheduler(self.dag, self.passmanager,
['run analysis pass PassG_calculates_dag_property',
'set property as 8 (from dag.property)',
'run analysis pass PassK_check_fixed_point_property',
'run transformation pass PassA_TP_NR_NP',
'run transformation pass PassF_reduce_dag_property',
'dag property = 6',
'run analysis pass PassG_calculates_dag_property',
'set property as 6 (from dag.property)',
'run analysis pass PassK_check_fixed_point_property',
'run transformation pass PassA_TP_NR_NP',
'run transformation pass PassF_reduce_dag_property',
'dag property = 5',
'run analysis pass PassG_calculates_dag_property',
'set property as 5 (from dag.property)',
'run analysis pass PassK_check_fixed_point_property',
'run transformation pass PassA_TP_NR_NP',
'run transformation pass PassF_reduce_dag_property',
'dag property = 4',
'run analysis pass PassG_calculates_dag_property',
'set property as 4 (from dag.property)',
'run analysis pass PassK_check_fixed_point_property',
'run transformation pass PassA_TP_NR_NP',
'run transformation pass PassF_reduce_dag_property',
'dag property = 3',
'run analysis pass PassG_calculates_dag_property',
'set property as 3 (from dag.property)',
'run analysis pass PassK_check_fixed_point_property',
'run transformation pass PassA_TP_NR_NP',
'run transformation pass PassF_reduce_dag_property',
'dag property = 2',
'run analysis pass PassG_calculates_dag_property',
'set property as 2 (from dag.property)',
'run analysis pass PassK_check_fixed_point_property',
'run transformation pass PassA_TP_NR_NP',
'run transformation pass PassF_reduce_dag_property',
'dag property = 2',
'run analysis pass PassG_calculates_dag_property',
'set property as 2 (from dag.property)',
'run analysis pass PassK_check_fixed_point_property',
'run transformation pass PassA_TP_NR_NP',
'run transformation pass PassF_reduce_dag_property',
'dag property = 2'])
def test_do_not_repeat_based_on_preservation(self):
self.passmanager.append([PassB_TP_RA_PA(), PassA_TP_NR_NP(), PassB_TP_RA_PA()])
self.assertScheduler(self.dag, self.passmanager, ['run transformation pass PassA_TP_NR_NP',
'run transformation pass PassB_TP_RA_PA'])
def test_do_not_repeat_based_on_idempotence(self):
self.passmanager.append(PassA_TP_NR_NP())
self.passmanager.append([PassA_TP_NR_NP(), PassA_TP_NR_NP()])
self.passmanager.append(PassA_TP_NR_NP())
self.assertScheduler(self.dag, self.passmanager, ['run transformation pass PassA_TP_NR_NP'])
def test_non_idempotent_pass(self):
self.passmanager.append(PassF_reduce_dag_property())
self.passmanager.append([PassF_reduce_dag_property(), PassF_reduce_dag_property()])
self.passmanager.append(PassF_reduce_dag_property())
self.assertScheduler(self.dag, self.passmanager,
['run transformation pass PassF_reduce_dag_property',
'dag property = 6',
'run transformation pass PassF_reduce_dag_property',
'dag property = 5',
'run transformation pass PassF_reduce_dag_property',
'dag property = 4',
'run transformation pass PassF_reduce_dag_property',
'dag property = 3'])
def test_fenced_property_set(self):
self.passmanager.append(PassH_Bad_TP())
self.assertSchedulerRaises(self.dag, self.passmanager,
['run transformation pass PassH_Bad_TP'],
TranspilerAccessError)
def test_fenced_dag(self):
qr = QuantumRegister(2)
circ = QuantumCircuit(qr)
circ.cx(qr[0], qr[1])
circ.cx(qr[0], qr[1])
circ.cx(qr[1], qr[0])
circ.cx(qr[1], qr[0])
dag = circuit_to_dag(circ)
self.passmanager.append(PassI_Bad_AP())
self.assertSchedulerRaises(dag, self.passmanager,
['run analysis pass PassI_Bad_AP',
'cx_runs: {(5, 6, 7, 8)}'],
TranspilerAccessError)
def test_ignore_request_pm(self):
passmanager = PassManager(ignore_requires=True)
passmanager.append(PassC_TP_RA_PA())
passmanager.append(PassB_TP_RA_PA())
passmanager.append(PassD_TP_NR_NP(argument1=[1, 2]))
passmanager.append(PassB_TP_RA_PA())
self.assertScheduler(self.dag, passmanager, ['run transformation pass PassC_TP_RA_PA',
'run transformation pass PassB_TP_RA_PA',
'run transformation pass PassD_TP_NR_NP',
'argument [1, 2]',
'run transformation pass PassB_TP_RA_PA'])
def test_ignore_preserves_pm(self):
passmanager = PassManager(ignore_preserves=True)
passmanager.append(PassC_TP_RA_PA())
passmanager.append(PassB_TP_RA_PA())
passmanager.append(PassD_TP_NR_NP(argument1=[1, 2]))
passmanager.append(PassB_TP_RA_PA())
self.assertScheduler(self.dag, passmanager, ['run transformation pass PassA_TP_NR_NP',
'run transformation pass PassC_TP_RA_PA',
'run transformation pass PassA_TP_NR_NP',
'run transformation pass PassB_TP_RA_PA',
'run transformation pass PassD_TP_NR_NP',
'argument [1, 2]',
'run transformation pass PassA_TP_NR_NP',
'run transformation pass PassB_TP_RA_PA'])
def test_pass_non_idempotence_pm(self):
passmanager = PassManager(ignore_preserves=True)
passmanager.append(PassA_TP_NR_NP())
passmanager.append(PassA_TP_NR_NP())
passmanager.append(PassB_TP_RA_PA())
self.assertScheduler(self.dag, passmanager, ['run transformation pass PassA_TP_NR_NP',
'run transformation pass PassA_TP_NR_NP',
'run transformation pass PassA_TP_NR_NP',
'run transformation pass PassB_TP_RA_PA'])
def test_pass_non_idempotence_passset(self):
passmanager = PassManager()
passmanager.append([PassA_TP_NR_NP(), PassB_TP_RA_PA()], ignore_preserves=True)
self.assertScheduler(self.dag, passmanager, ['run transformation pass PassA_TP_NR_NP',
'run transformation pass PassA_TP_NR_NP',
'run transformation pass PassB_TP_RA_PA'])
def test_analysis_pass_is_idempotent(self):
passmanager = PassManager()
passmanager.append(PassE_AP_NR_NP(argument1=1))
passmanager.append(PassE_AP_NR_NP(argument1=1))
self.assertScheduler(self.dag, passmanager, ['run analysis pass PassE_AP_NR_NP',
'set property as 1'])
def test_ap_before_and_after_a_tp(self):
passmanager = PassManager()
passmanager.append(PassE_AP_NR_NP(argument1=1))
passmanager.append(PassA_TP_NR_NP())
passmanager.append(PassE_AP_NR_NP(argument1=1))
self.assertScheduler(self.dag, passmanager, ['run analysis pass PassE_AP_NR_NP',
'set property as 1',
'run transformation pass PassA_TP_NR_NP',
'run analysis pass PassE_AP_NR_NP',
'set property as 1'])
def test_pass_option_precedence(self):
passmanager = PassManager(ignore_preserves=False, ignore_requires=True)
tp_pass = PassA_TP_NR_NP()
passmanager.append(tp_pass, ignore_preserves=True)
the_pass_in_the_workinglist = next(iter(passmanager.working_list))
self.assertTrue(the_pass_in_the_workinglist.options['ignore_preserves'])
self.assertTrue(the_pass_in_the_workinglist.options['ignore_requires'])
def test_pass_no_return_a_dag(self):
self.passmanager.append(PassJ_Bad_NoReturn())
self.assertSchedulerRaises(self.dag, self.passmanager,
['run transformation pass PassJ_Bad_NoReturn'], TranspilerError)
def test_fixed_point_pass(self):
self.passmanager.append(
[PassK_check_fixed_point_property(),
PassA_TP_NR_NP(),
PassF_reduce_dag_property()],
do_while=lambda property_set: not property_set['fixed_point']['property'])
self.assertScheduler(self.dag, self.passmanager,
['run analysis pass PassG_calculates_dag_property',
'set property as 8 (from dag.property)',
'run analysis pass PassK_check_fixed_point_property',
'run transformation pass PassA_TP_NR_NP',
'run transformation pass PassF_reduce_dag_property',
'dag property = 6',
'run analysis pass PassG_calculates_dag_property',
'set property as 6 (from dag.property)',
'run analysis pass PassK_check_fixed_point_property',
'run transformation pass PassA_TP_NR_NP',
'run transformation pass PassF_reduce_dag_property',
'dag property = 5',
'run analysis pass PassG_calculates_dag_property',
'set property as 5 (from dag.property)',
'run analysis pass PassK_check_fixed_point_property',
'run transformation pass PassA_TP_NR_NP',
'run transformation pass PassF_reduce_dag_property',
'dag property = 4',
'run analysis pass PassG_calculates_dag_property',
'set property as 4 (from dag.property)',
'run analysis pass PassK_check_fixed_point_property',
'run transformation pass PassA_TP_NR_NP',
'run transformation pass PassF_reduce_dag_property',
'dag property = 3',
'run analysis pass PassG_calculates_dag_property',
'set property as 3 (from dag.property)',
'run analysis pass PassK_check_fixed_point_property',
'run transformation pass PassA_TP_NR_NP',
'run transformation pass PassF_reduce_dag_property',
'dag property = 2',
'run analysis pass PassG_calculates_dag_property',
'set property as 2 (from dag.property)',
'run analysis pass PassK_check_fixed_point_property',
'run transformation pass PassA_TP_NR_NP',
'run transformation pass PassF_reduce_dag_property',
'dag property = 2',
'run analysis pass PassG_calculates_dag_property',
'set property as 2 (from dag.property)',
'run analysis pass PassK_check_fixed_point_property',
'run transformation pass PassA_TP_NR_NP',
'run transformation pass PassF_reduce_dag_property',
'dag property = 2'])
def test_fixed_point_pass_max_iteration(self):
self.passmanager.append(
[PassK_check_fixed_point_property(),
PassA_TP_NR_NP(),
PassF_reduce_dag_property()],
do_while=lambda property_set: not property_set['fixed_point']['property'],
max_iteration=2)
self.assertSchedulerRaises(self.dag, self.passmanager,
['run analysis pass PassG_calculates_dag_property',
'set property as 8 (from dag.property)',
'run analysis pass PassK_check_fixed_point_property',
'run transformation pass PassA_TP_NR_NP',
'run transformation pass PassF_reduce_dag_property',
'dag property = 6',
'run analysis pass PassG_calculates_dag_property',
'set property as 6 (from dag.property)',
'run analysis pass PassK_check_fixed_point_property',
'run transformation pass PassA_TP_NR_NP',
'run transformation pass PassF_reduce_dag_property',
'dag property = 5'], TranspilerError)
def test_fresh_initial_state(self):
self.passmanager.append(PassM_AP_NR_NP(argument1=1))
self.passmanager.append(PassA_TP_NR_NP())
self.passmanager.append(PassM_AP_NR_NP(argument1=1))
self.assertScheduler(self.dag, self.passmanager, ['run analysis pass PassM_AP_NR_NP',
'self.argument1 = 2',
'run transformation pass PassA_TP_NR_NP',
'run analysis pass PassM_AP_NR_NP',
'self.argument1 = 2'])
class DoXTimesController(FlowController):
def __init__(self, passes, options, do_x_times=0, **_):
self.do_x_times = do_x_times()
super().__init__(passes, options)
def __iter__(self):
for _ in range(self.do_x_times):
for pass_ in self.passes:
yield pass_
class TestControlFlowPlugin(SchedulerTestCase):
def setUp(self):
self.passmanager = PassManager()
self.dag = circuit_to_dag(QuantumCircuit(QuantumRegister(1)))
def test_control_flow_plugin(self):
FlowController.add_flow_controller('do_x_times', DoXTimesController)
self.passmanager.append([PassB_TP_RA_PA(), PassC_TP_RA_PA()], do_x_times=lambda x: 3)
self.assertScheduler(self.dag, self.passmanager, ['run transformation pass PassA_TP_NR_NP',
'run transformation pass PassB_TP_RA_PA',
'run transformation pass PassC_TP_RA_PA',
'run transformation pass PassB_TP_RA_PA',
'run transformation pass PassC_TP_RA_PA',
'run transformation pass PassB_TP_RA_PA',
'run transformation pass PassC_TP_RA_PA'])
def test_callable_control_flow_plugin(self):
controllers_length = len(FlowController.registered_controllers)
FlowController.remove_flow_controller('do_while')
self.assertEqual(controllers_length - 1, len(FlowController.registered_controllers))
FlowController.add_flow_controller('do_while', DoWhileController)
self.assertEqual(controllers_length, len(FlowController.registered_controllers))
self.passmanager.append([PassB_TP_RA_PA(), PassC_TP_RA_PA()],
do_while=lambda property_set: True, max_iteration=2)
self.assertSchedulerRaises(self.dag, self.passmanager,
['run transformation pass PassA_TP_NR_NP',
'run transformation pass PassB_TP_RA_PA',
'run transformation pass PassC_TP_RA_PA',
'run transformation pass PassB_TP_RA_PA',
'run transformation pass PassC_TP_RA_PA'], TranspilerError)
def test_remove_nonexistent_plugin(self):
self.assertRaises(KeyError, FlowController.remove_flow_controller, "foo")
if __name__ == '__main__':
unittest.main()
| true | true |
f7378884bdfd4e721e18c4902180b71f13438de9 | 371 | py | Python | src/ortec/scientific/benchmarks/loadbuilding/common/objectives/ItemCountObjective.py | ORTECScientificBenchmarks/ortec-scientific-benchmarks-loadbuilding | 8b1f5c58d930448a29195355d28fda856f4705b2 | [
"MIT"
] | 4 | 2018-05-23T22:48:42.000Z | 2020-04-21T10:21:30.000Z | src/ortec/scientific/benchmarks/loadbuilding/common/objectives/ItemCountObjective.py | ORTECScientificBenchmarks/ortec-scientific-benchmarks-loadbuilding | 8b1f5c58d930448a29195355d28fda856f4705b2 | [
"MIT"
] | null | null | null | src/ortec/scientific/benchmarks/loadbuilding/common/objectives/ItemCountObjective.py | ORTECScientificBenchmarks/ortec-scientific-benchmarks-loadbuilding | 8b1f5c58d930448a29195355d28fda856f4705b2 | [
"MIT"
] | null | null | null | from ..Requirements import BaseObjective
# MAXIMIZE
class ItemCountObjective(BaseObjective):
name = "item_count"
def Evaluate(threeDsolution):
return -sum([1 for placement in threeDsolution.GetAllPlacements() if placement.position != placement.UNPLACED and placement.itemid is not None])
if __name__=="__main__":
exit("Don't run this file") | 37.1 | 153 | 0.733154 | from ..Requirements import BaseObjective
class ItemCountObjective(BaseObjective):
name = "item_count"
def Evaluate(threeDsolution):
return -sum([1 for placement in threeDsolution.GetAllPlacements() if placement.position != placement.UNPLACED and placement.itemid is not None])
if __name__=="__main__":
exit("Don't run this file") | true | true |
f737889d9eddff618f66eb6757a8f32ee1eb73a5 | 517 | py | Python | msgraph-cli-extensions/beta/search_beta/azext_search_beta/vendored_sdks/search/aio/__init__.py | thewahome/msgraph-cli | 33127d9efa23a0e5f5303c93242fbdbb73348671 | [
"MIT"
] | null | null | null | msgraph-cli-extensions/beta/search_beta/azext_search_beta/vendored_sdks/search/aio/__init__.py | thewahome/msgraph-cli | 33127d9efa23a0e5f5303c93242fbdbb73348671 | [
"MIT"
] | null | null | null | msgraph-cli-extensions/beta/search_beta/azext_search_beta/vendored_sdks/search/aio/__init__.py | thewahome/msgraph-cli | 33127d9efa23a0e5f5303c93242fbdbb73348671 | [
"MIT"
] | null | null | null | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from ._search import Search
__all__ = ['Search']
| 47 | 94 | 0.545455 |
from ._search import Search
__all__ = ['Search']
| true | true |
f73788dfeceda67549aad48d49a50ffa4c97a2cc | 9,168 | py | Python | aiger_coins/pcirc.py | mvcisback/py-aiger-coins | 3e7f5a84e56debe7001d63f2f271e29163781e68 | [
"MIT"
] | null | null | null | aiger_coins/pcirc.py | mvcisback/py-aiger-coins | 3e7f5a84e56debe7001d63f2f271e29163781e68 | [
"MIT"
] | 7 | 2019-04-01T17:19:13.000Z | 2019-11-01T17:33:15.000Z | aiger_coins/pcirc.py | mvcisback/py-aiger-coins | 3e7f5a84e56debe7001d63f2f271e29163781e68 | [
"MIT"
] | 2 | 2019-03-28T03:05:53.000Z | 2021-01-05T23:03:53.000Z | from __future__ import annotations
import heapq
import random
import uuid
from fractions import Fraction
from functools import reduce
from typing import Any, Mapping, Optional, Sequence, Union
import aiger
import aiger_bv as BV
import aiger_discrete
import attr
import funcy as fn
from aiger_discrete import FiniteFunc
Prob = Union[float, Fraction]
Distribution = Mapping[Any, Prob]
def coin_gadget(name: str,
dist: Distribution,
func: FiniteFunc,
tree_encoding: bool = True) -> PCirc:
"""Return a Probabilistic Circuit representing a distribution.
- chain encoding. O(|support|)
- tree encoding. O(log|support|)
"""
# Transform support of dist to UnsignedBVExpr using encoding.
size = func.circ.imap[name].size
encode = func.input_encodings \
.get(name, aiger_discrete.Encoding()) \
.encode
dist = ((p, BV.uatom(size, encode(v))) for v, p in dist.items())
# Create priority queue.
cost = len if tree_encoding else (lambda x: -len(x))
queue = [(cost(elem), elem) for elem in dist]
coins = []
while len(queue) > 1:
cost1, (weight1, expr1) = heapq.heappop(queue)
cost2, (weight2, expr2) = heapq.heappop(queue)
# Create new coin input.
coin = BV.uatom(1, None)
bias = weight1 / (weight1 + weight2)
coins.append((fn.first(coin.inputs), bias))
# Merge and add back to queue.
expr12 = BV.ite(coin, expr1, expr2)
cost12 = cost1 + cost2 # (-x + -y) == -(x + y).
weight12 = weight1 + weight2
heapq.heappush(queue, (cost12, (weight12, expr12)))
assert len(queue) == 1
_, (weight, expr) = queue[0]
expr = expr.bundle_inputs(order=[c for c, _ in coins]) \
.with_output(name)
assert len(expr.inputs) == 1
coins_id = fn.first(expr.inputs)
biases = tuple(bias for _, bias in coins)
return PCirc(expr.aigbv, coins_id=coins_id, coin_biases=biases)
def coin_gadgets(dist_map: Mapping[str, Distribution],
func: FiniteFunc,
tree_encoding: bool = True) -> PCirc:
"""Return a Probabilistic Circuit representing a product distribution."""
gadgets = (
coin_gadget(k, v, func, tree_encoding) for k, v in dist_map.items()
)
return reduce(PCirc.__or__, gadgets)
def to_finite_func(circ) -> FiniteFunc:
if isinstance(circ, FiniteFunc):
return circ
return aiger_discrete.from_aigbv(circ.aigbv)
def merge_pcirc_coins(circ, left: PCirc, right: PCirc, coins_id: str):
if not left.has_coins and not right.has_coins:
biases = ()
elif left.has_coins and not right.has_coins:
circ <<= BV.uatom(left.num_coins, coins_id) \
.with_output(left.coins_id).aigbv
biases = left.coin_biases
elif right.has_coins and not left.has_coins:
circ <<= BV.uatom(right.num_coins, coins_id) \
.with_output(right.coins_id).aigbv
biases = right.coin_biases
else:
coins = BV.uatom(left.num_coins + right.num_coins, coins_id)
relabeler = coins[:left.num_coins].with_output(left.coins_id).aigbv \
| coins[left.num_coins:].with_output(right.coins_id).aigbv # noqa: E127, E501
circ <<= relabeler
biases = tuple(left.coin_biases) + tuple(right.coin_biases)
return circ, biases
def sample_coins(coin_biases: Sequence[float]) -> int:
"""Return integer where bits bias towards 1 as described in coin_biases."""
result = 0
for i, bias in enumerate(coin_biases):
result |= int(random.random() < bias) << i
return result
@attr.s(frozen=True, auto_attribs=True)
class PCirc:
"""Wrapper around AIG representing a function with some random inputs."""
circ: FiniteFunc = attr.ib(converter=to_finite_func)
coin_biases: Sequence[Prob] # Bias of each coin flips.
coins_id: str = "##coins" # Input reservered for coin flips.
def __attrs_post_init__(self):
if not self.has_coins:
return
circ: BV.AIGBV = self.circ.circ
if self.coins_id not in circ.imap:
raise ValueError("Underlying circuit doesn't have coins input.")
if circ.imap[self.coins_id].size != len(self.coin_biases):
raise ValueError("Underlying circuit doesn't have enough coins.")
@property
def has_coins(self): return len(self.coin_biases) > 0
@property
def inputs(self): return self.circ.inputs - {self.coins_id}
@property
def outputs(self): return self.circ.outputs
@property
def latches(self): return self.circ.latches
@property
def latch2init(self): return self.circ.latch2init
@property
def aig(self): return self.circ.aig
@property
def aigbv(self): return self.circ.aigbv
@property
def num_coins(self): return len(self.coin_biases)
@property
def imap(self):
return self.circ.imap.omit([self.coins_id])
@property
def omap(self):
return self.circ.omap.omit([self.coins_id])
def assume(self, aigbv_like) -> PCirc:
"""Return Probabilistic Circuit with new assumption over the inputs."""
return attr.evolve(self, circ=self.circ.assume(aigbv_like))
def with_coins_id(self, name=None):
if name is None:
name = str(uuid.uuid1())
circ = self.circ
if self.has_coins:
circ = self.circ['i', {self.coins_id: name}]
return attr.evolve(self, circ=circ, coins_id=name)
def __rshift__(self, other) -> PCirc:
other = canon(other)
circ = self.circ >> other.circ
circ, biases = merge_pcirc_coins(circ, self, other, self.coins_id)
return PCirc(circ, coins_id=self.coins_id, coin_biases=biases)
def __lshift__(self, other) -> PCirc:
return canon(other) >> self
def __or__(self, other) -> PCirc:
other = canon(other)
circ = self.circ | other.circ
circ, biases = merge_pcirc_coins(circ, self, other, self.coins_id)
return PCirc(circ, coins_id=self.coins_id, coin_biases=biases)
def __call__(self, inputs, latches=None):
inputs = dict(inputs)
if self.has_coins:
inputs.setdefault(self.coins_id, sample_coins(self.coin_biases))
return self.circ(inputs=inputs, latches=latches)
def __getitem__(self, others) -> PCirc:
circ = self.circ[others]
kind, relabels = others
if (kind == 'i') and (self.coins_id in relabels):
raise ValueError("Use with_coins_id to relabel coins.")
return attr.evolve(self, circ=circ)
def loopback(self, *wirings) -> PCirc:
inputs = self.inputs
assert all(w['input'] in inputs for w in wirings)
circ = self.circ.loopback(*wirings)
return attr.evolve(self, circ=circ)
def unroll(self,
horizon, *,
init=True,
omit_latches=True,
only_last_outputs=False) -> PCirc:
# Unroll underlying circuit.
circ = self.circ.unroll(
horizon=horizon, init=init, omit_latches=omit_latches,
only_last_outputs=only_last_outputs,
)
if not self.has_coins:
return attr.evolve(self, circ=circ)
# Merge timed coin sequences into a single input.
coins = BV.uatom(self.num_coins * horizon, self.coins_id)
relabeler = BV.aig2aigbv(aiger.empty())
for time in range(horizon):
name = f"{self.coins_id}##time_{time}"
assert name in circ.inputs
start = time * self.num_coins
end = start + self.num_coins
relabeler |= coins[start:end].with_output(name).aigbv
circ <<= relabeler
biases = self.coin_biases * horizon
return PCirc(circ, coins_id=self.coins_id, coin_biases=biases)
def randomize(self, dist_map: Mapping[str, Distribution]) -> PCirc:
"""Apply distributions in dist_map to corresponding inputs."""
circ = BV.aig2aigbv(aiger.empty())
for name in dist_map.keys():
size = self.circ.circ.imap[name].size # TODO: propogate imap.
circ |= BV.identity_gate(size, name)
func = aiger_discrete.from_aigbv(
circ=circ,
input_encodings=self.circ.input_encodings,
output_encodings=self.circ.output_encodings,
)
return pcirc(func, dist_map) >> self
simulator = aiger_discrete.FiniteFunc.simulator
simulate = aiger_discrete.FiniteFunc.simulate
def pcirc(func,
dist_map: Optional[Mapping[str, Distribution]] = None,
tree_encoding: bool = False) -> PCirc:
"""Lift Discrete Function to a probilistic circuit."""
func = to_finite_func(func)
if dist_map is None:
return PCirc(circ=func, coin_biases=())
gadgets = coin_gadgets(dist_map, func, tree_encoding)
return gadgets >> func
def canon(circ) -> PCirc:
if not isinstance(circ, PCirc):
circ = PCirc(circ, coin_biases=())
return circ.with_coins_id()
__all__ = ['PCirc', 'pcirc']
| 33.582418 | 96 | 0.634708 | from __future__ import annotations
import heapq
import random
import uuid
from fractions import Fraction
from functools import reduce
from typing import Any, Mapping, Optional, Sequence, Union
import aiger
import aiger_bv as BV
import aiger_discrete
import attr
import funcy as fn
from aiger_discrete import FiniteFunc
Prob = Union[float, Fraction]
Distribution = Mapping[Any, Prob]
def coin_gadget(name: str,
dist: Distribution,
func: FiniteFunc,
tree_encoding: bool = True) -> PCirc:
size = func.circ.imap[name].size
encode = func.input_encodings \
.get(name, aiger_discrete.Encoding()) \
.encode
dist = ((p, BV.uatom(size, encode(v))) for v, p in dist.items())
cost = len if tree_encoding else (lambda x: -len(x))
queue = [(cost(elem), elem) for elem in dist]
coins = []
while len(queue) > 1:
cost1, (weight1, expr1) = heapq.heappop(queue)
cost2, (weight2, expr2) = heapq.heappop(queue)
coin = BV.uatom(1, None)
bias = weight1 / (weight1 + weight2)
coins.append((fn.first(coin.inputs), bias))
expr12 = BV.ite(coin, expr1, expr2)
cost12 = cost1 + cost2
weight12 = weight1 + weight2
heapq.heappush(queue, (cost12, (weight12, expr12)))
assert len(queue) == 1
_, (weight, expr) = queue[0]
expr = expr.bundle_inputs(order=[c for c, _ in coins]) \
.with_output(name)
assert len(expr.inputs) == 1
coins_id = fn.first(expr.inputs)
biases = tuple(bias for _, bias in coins)
return PCirc(expr.aigbv, coins_id=coins_id, coin_biases=biases)
def coin_gadgets(dist_map: Mapping[str, Distribution],
func: FiniteFunc,
tree_encoding: bool = True) -> PCirc:
gadgets = (
coin_gadget(k, v, func, tree_encoding) for k, v in dist_map.items()
)
return reduce(PCirc.__or__, gadgets)
def to_finite_func(circ) -> FiniteFunc:
if isinstance(circ, FiniteFunc):
return circ
return aiger_discrete.from_aigbv(circ.aigbv)
def merge_pcirc_coins(circ, left: PCirc, right: PCirc, coins_id: str):
if not left.has_coins and not right.has_coins:
biases = ()
elif left.has_coins and not right.has_coins:
circ <<= BV.uatom(left.num_coins, coins_id) \
.with_output(left.coins_id).aigbv
biases = left.coin_biases
elif right.has_coins and not left.has_coins:
circ <<= BV.uatom(right.num_coins, coins_id) \
.with_output(right.coins_id).aigbv
biases = right.coin_biases
else:
coins = BV.uatom(left.num_coins + right.num_coins, coins_id)
relabeler = coins[:left.num_coins].with_output(left.coins_id).aigbv \
| coins[left.num_coins:].with_output(right.coins_id).aigbv
circ <<= relabeler
biases = tuple(left.coin_biases) + tuple(right.coin_biases)
return circ, biases
def sample_coins(coin_biases: Sequence[float]) -> int:
result = 0
for i, bias in enumerate(coin_biases):
result |= int(random.random() < bias) << i
return result
@attr.s(frozen=True, auto_attribs=True)
class PCirc:
circ: FiniteFunc = attr.ib(converter=to_finite_func)
coin_biases: Sequence[Prob]
coins_id: str = "##coins"
def __attrs_post_init__(self):
if not self.has_coins:
return
circ: BV.AIGBV = self.circ.circ
if self.coins_id not in circ.imap:
raise ValueError("Underlying circuit doesn't have coins input.")
if circ.imap[self.coins_id].size != len(self.coin_biases):
raise ValueError("Underlying circuit doesn't have enough coins.")
@property
def has_coins(self): return len(self.coin_biases) > 0
@property
def inputs(self): return self.circ.inputs - {self.coins_id}
@property
def outputs(self): return self.circ.outputs
@property
def latches(self): return self.circ.latches
@property
def latch2init(self): return self.circ.latch2init
@property
def aig(self): return self.circ.aig
@property
def aigbv(self): return self.circ.aigbv
@property
def num_coins(self): return len(self.coin_biases)
@property
def imap(self):
return self.circ.imap.omit([self.coins_id])
@property
def omap(self):
return self.circ.omap.omit([self.coins_id])
def assume(self, aigbv_like) -> PCirc:
return attr.evolve(self, circ=self.circ.assume(aigbv_like))
def with_coins_id(self, name=None):
if name is None:
name = str(uuid.uuid1())
circ = self.circ
if self.has_coins:
circ = self.circ['i', {self.coins_id: name}]
return attr.evolve(self, circ=circ, coins_id=name)
def __rshift__(self, other) -> PCirc:
other = canon(other)
circ = self.circ >> other.circ
circ, biases = merge_pcirc_coins(circ, self, other, self.coins_id)
return PCirc(circ, coins_id=self.coins_id, coin_biases=biases)
def __lshift__(self, other) -> PCirc:
return canon(other) >> self
def __or__(self, other) -> PCirc:
other = canon(other)
circ = self.circ | other.circ
circ, biases = merge_pcirc_coins(circ, self, other, self.coins_id)
return PCirc(circ, coins_id=self.coins_id, coin_biases=biases)
def __call__(self, inputs, latches=None):
inputs = dict(inputs)
if self.has_coins:
inputs.setdefault(self.coins_id, sample_coins(self.coin_biases))
return self.circ(inputs=inputs, latches=latches)
def __getitem__(self, others) -> PCirc:
circ = self.circ[others]
kind, relabels = others
if (kind == 'i') and (self.coins_id in relabels):
raise ValueError("Use with_coins_id to relabel coins.")
return attr.evolve(self, circ=circ)
def loopback(self, *wirings) -> PCirc:
inputs = self.inputs
assert all(w['input'] in inputs for w in wirings)
circ = self.circ.loopback(*wirings)
return attr.evolve(self, circ=circ)
def unroll(self,
horizon, *,
init=True,
omit_latches=True,
only_last_outputs=False) -> PCirc:
circ = self.circ.unroll(
horizon=horizon, init=init, omit_latches=omit_latches,
only_last_outputs=only_last_outputs,
)
if not self.has_coins:
return attr.evolve(self, circ=circ)
coins = BV.uatom(self.num_coins * horizon, self.coins_id)
relabeler = BV.aig2aigbv(aiger.empty())
for time in range(horizon):
name = f"{self.coins_id}##time_{time}"
assert name in circ.inputs
start = time * self.num_coins
end = start + self.num_coins
relabeler |= coins[start:end].with_output(name).aigbv
circ <<= relabeler
biases = self.coin_biases * horizon
return PCirc(circ, coins_id=self.coins_id, coin_biases=biases)
def randomize(self, dist_map: Mapping[str, Distribution]) -> PCirc:
circ = BV.aig2aigbv(aiger.empty())
for name in dist_map.keys():
size = self.circ.circ.imap[name].size
circ |= BV.identity_gate(size, name)
func = aiger_discrete.from_aigbv(
circ=circ,
input_encodings=self.circ.input_encodings,
output_encodings=self.circ.output_encodings,
)
return pcirc(func, dist_map) >> self
simulator = aiger_discrete.FiniteFunc.simulator
simulate = aiger_discrete.FiniteFunc.simulate
def pcirc(func,
dist_map: Optional[Mapping[str, Distribution]] = None,
tree_encoding: bool = False) -> PCirc:
func = to_finite_func(func)
if dist_map is None:
return PCirc(circ=func, coin_biases=())
gadgets = coin_gadgets(dist_map, func, tree_encoding)
return gadgets >> func
def canon(circ) -> PCirc:
if not isinstance(circ, PCirc):
circ = PCirc(circ, coin_biases=())
return circ.with_coins_id()
__all__ = ['PCirc', 'pcirc']
| true | true |
f73788ed4caf943d9fa5b917f5ab8ead5a549021 | 5,333 | py | Python | sdk/python/pulumi_kubernetes/rbac/v1beta1/ClusterRoleBinding.py | MatheusMiranda/pulumi-kubernetes | eecebd55fe96f63365194182a69d99eda625bb96 | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_kubernetes/rbac/v1beta1/ClusterRoleBinding.py | MatheusMiranda/pulumi-kubernetes | eecebd55fe96f63365194182a69d99eda625bb96 | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_kubernetes/rbac/v1beta1/ClusterRoleBinding.py | MatheusMiranda/pulumi-kubernetes | eecebd55fe96f63365194182a69d99eda625bb96 | [
"Apache-2.0"
] | null | null | null | # *** WARNING: this file was generated by the Pulumi Kubernetes codegen tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
from typing import Optional
import pulumi
import pulumi.runtime
from pulumi import Input, ResourceOptions
from ... import tables, version
class ClusterRoleBinding(pulumi.CustomResource):
"""
ClusterRoleBinding references a ClusterRole, but not contain it. It can reference a ClusterRole
in the global namespace, and adds who information via Subject.
"""
apiVersion: pulumi.Output[str]
"""
APIVersion defines the versioned schema of this representation of an object. Servers should
convert recognized schemas to the latest internal value, and may reject unrecognized values.
More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources
"""
kind: pulumi.Output[str]
"""
Kind is a string value representing the REST resource this object represents. Servers may infer
this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More
info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds
"""
metadata: pulumi.Output[dict]
"""
Standard object's metadata.
"""
role_ref: pulumi.Output[dict]
"""
RoleRef can only reference a ClusterRole in the global namespace. If the RoleRef cannot be
resolved, the Authorizer must return an error.
"""
subjects: pulumi.Output[list]
"""
Subjects holds references to the objects the role applies to.
"""
def __init__(self, resource_name, opts=None, role_ref=None, metadata=None, subjects=None, __name__=None, __opts__=None):
"""
Create a ClusterRoleBinding resource with the given unique name, arguments, and options.
:param str resource_name: The _unique_ name of the resource.
:param pulumi.ResourceOptions opts: A bag of options that control this resource's behavior.
:param pulumi.Input[dict] role_ref: RoleRef can only reference a ClusterRole in the global namespace. If the RoleRef
cannot be resolved, the Authorizer must return an error.
:param pulumi.Input[dict] metadata: Standard object's metadata.
:param pulumi.Input[list] subjects: Subjects holds references to the objects the role applies to.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if not resource_name:
raise TypeError('Missing resource name argument (for URN creation)')
if not isinstance(resource_name, str):
raise TypeError('Expected resource name to be a string')
if opts and not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
__props__ = dict()
__props__['apiVersion'] = 'rbac.authorization.k8s.io/v1beta1'
__props__['kind'] = 'ClusterRoleBinding'
if role_ref is None:
raise TypeError('Missing required property role_ref')
__props__['roleRef'] = role_ref
__props__['metadata'] = metadata
__props__['subjects'] = subjects
__props__['status'] = None
additional_secret_outputs = [
]
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(
version=version.get_version(), additional_secret_outputs=additional_secret_outputs))
parent = opts.parent if opts and opts.parent else None
aliases = [
]
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(
version=version.get_version(), aliases=aliases))
super(ClusterRoleBinding, self).__init__(
"kubernetes:rbac.authorization.k8s.io/v1beta1:ClusterRoleBinding",
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name, id, opts=None):
"""
Get the state of an existing `ClusterRoleBinding` resource, as identified by `id`.
The ID is of the form `[namespace]/[name]`; if `[namespace]` is omitted,
then (per Kubernetes convention) the ID becomes `default/[name]`.
Pulumi will keep track of this resource using `resource_name` as the Pulumi ID.
:param str resource_name: _Unique_ name used to register this resource with Pulumi.
:param pulumi.Input[str] id: An ID for the Kubernetes resource to retrieve.
Takes the form `[namespace]/[name]` or `[name]`.
:param Optional[pulumi.ResourceOptions] opts: A bag of options that control this
resource's behavior.
"""
opts = ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
return ClusterRoleBinding(resource_name, opts)
def translate_output_property(self, prop: str) -> str:
return tables._CASING_FORWARD_TABLE.get(prop) or prop
def translate_input_property(self, prop: str) -> str:
return tables._CASING_BACKWARD_TABLE.get(prop) or prop
| 41.664063 | 124 | 0.68498 |
import warnings
from typing import Optional
import pulumi
import pulumi.runtime
from pulumi import Input, ResourceOptions
from ... import tables, version
class ClusterRoleBinding(pulumi.CustomResource):
apiVersion: pulumi.Output[str]
kind: pulumi.Output[str]
metadata: pulumi.Output[dict]
role_ref: pulumi.Output[dict]
subjects: pulumi.Output[list]
def __init__(self, resource_name, opts=None, role_ref=None, metadata=None, subjects=None, __name__=None, __opts__=None):
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if not resource_name:
raise TypeError('Missing resource name argument (for URN creation)')
if not isinstance(resource_name, str):
raise TypeError('Expected resource name to be a string')
if opts and not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
__props__ = dict()
__props__['apiVersion'] = 'rbac.authorization.k8s.io/v1beta1'
__props__['kind'] = 'ClusterRoleBinding'
if role_ref is None:
raise TypeError('Missing required property role_ref')
__props__['roleRef'] = role_ref
__props__['metadata'] = metadata
__props__['subjects'] = subjects
__props__['status'] = None
additional_secret_outputs = [
]
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(
version=version.get_version(), additional_secret_outputs=additional_secret_outputs))
parent = opts.parent if opts and opts.parent else None
aliases = [
]
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(
version=version.get_version(), aliases=aliases))
super(ClusterRoleBinding, self).__init__(
"kubernetes:rbac.authorization.k8s.io/v1beta1:ClusterRoleBinding",
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name, id, opts=None):
opts = ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
return ClusterRoleBinding(resource_name, opts)
def translate_output_property(self, prop: str) -> str:
return tables._CASING_FORWARD_TABLE.get(prop) or prop
def translate_input_property(self, prop: str) -> str:
return tables._CASING_BACKWARD_TABLE.get(prop) or prop
| true | true |
f7378950d246488d9590ce8266a72dc5659ccc9a | 4,465 | py | Python | third_party/maya/lib/usdMaya/testenv/testUsdImportFrameRange.py | YuqiaoZhang/USD | bf3a21e6e049486441440ebf8c0387db2538d096 | [
"BSD-2-Clause"
] | 88 | 2018-07-13T01:22:00.000Z | 2022-01-16T22:15:27.000Z | third_party/maya/lib/usdMaya/testenv/testUsdImportFrameRange.py | YuqiaoZhang/USD | bf3a21e6e049486441440ebf8c0387db2538d096 | [
"BSD-2-Clause"
] | 1 | 2020-07-07T22:39:42.000Z | 2020-07-07T22:39:42.000Z | third_party/maya/lib/usdMaya/testenv/testUsdImportFrameRange.py | YuqiaoZhang/USD | bf3a21e6e049486441440ebf8c0387db2538d096 | [
"BSD-2-Clause"
] | 26 | 2018-06-06T03:39:22.000Z | 2021-08-28T23:02:42.000Z | #!/pxrpythonsubst
#
# Copyright 2016 Pixar
#
# Licensed under the Apache License, Version 2.0 (the "Apache License")
# with the following modification; you may not use this file except in
# compliance with the Apache License and the following modification to it:
# Section 6. Trademarks. is deleted and replaced with:
#
# 6. Trademarks. This License does not grant permission to use the trade
# names, trademarks, service marks, or product names of the Licensor
# and its affiliates, except as required to comply with Section 4(c) of
# the License and to reproduce the content of the NOTICE file.
#
# You may obtain a copy of the Apache License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the Apache License with the above modification is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the Apache License for the specific
# language governing permissions and limitations under the Apache License.
#
import os
import unittest
from pxr import Usd
from maya import cmds
from maya import standalone
class testUsdImportFrameRange(unittest.TestCase):
def _LoadUsdWithRange(self, start=None, end=None):
# Import the USD file.
usdFilePath = os.path.abspath('MovingCube.usda')
cmds.loadPlugin('pxrUsd')
if start is not None and end is not None:
cmds.usdImport(file=usdFilePath, readAnimData=True,
frameRange=(start, end))
else:
cmds.usdImport(file=usdFilePath, readAnimData=True)
self.stage = Usd.Stage.Open(usdFilePath)
self.assertTrue(self.stage)
@classmethod
def tearDownClass(cls):
standalone.uninitialize()
@classmethod
def setUpClass(cls):
standalone.initialize('usd')
def setUp(self):
cmds.file(new=True, force=True)
def testUsdImport(self):
"""
Tests a simple import with frame range specified.
"""
self._LoadUsdWithRange(1, 15)
numKeyFrames = cmds.keyframe("pCube1.translateX", query=True,
keyframeCount=True)
self.assertEqual(numKeyFrames, 14)
keyTimes = cmds.keyframe("pCube1.translateX", index=(0,14), query=True)
self.assertEqual(keyTimes, [float(x) for x in range(1, 16) if x != 5.0])
def testUsdImportNoRangeSpecified(self):
"""
Tests an import with animation but no range specified.
"""
self._LoadUsdWithRange()
numKeyFrames = cmds.keyframe("pCube1.translateX", query=True,
keyframeCount=True)
self.assertEqual(numKeyFrames, 29)
keyTimes = cmds.keyframe("pCube1.translateX", index=(0,29), query=True)
self.assertEqual(keyTimes, [float(x) for x in range(1, 31) if x != 5.0])
def testUsdImportOverlyLargeRange(self):
"""
Tests an import frame range that is larger than the time range of
animation available in USD prims.
"""
self._LoadUsdWithRange(-100, 100)
numKeyFrames = cmds.keyframe("pCube1.translateX", query=True,
keyframeCount=True)
self.assertEqual(numKeyFrames, 29)
keyTimes = cmds.keyframe("pCube1.translateX", index=(0,29), query=True)
self.assertEqual(keyTimes, [float(x) for x in range(1, 31) if x != 5.0])
def testUsdImportOutOfRange(self):
"""
Tests an import frame range that doesn't intersect with the time range
of animation available in USD prims.
"""
self._LoadUsdWithRange(-200, -100)
numKeyFrames = cmds.keyframe("pCube1.translateX", query=True,
keyframeCount=True)
self.assertEqual(numKeyFrames, 0)
def testUsdImportSingle(self):
"""
Tests an import frame range that is only one frame.
"""
self._LoadUsdWithRange(29, 29)
xValue = cmds.getAttr("pCube1.translateX")
self.assertAlmostEqual(xValue, 11.7042500857406)
numKeyFrames = cmds.keyframe("pCube1.translateX", query=True,
keyframeCount=True)
self.assertEqual(numKeyFrames, 0) # Only one frame, so no real animation.
keyTimes = cmds.keyframe("pCube1.translateX", index=(0,0), query=True)
self.assertEqual(keyTimes, None) # Only one frame, so no real animation.
if __name__ == '__main__':
unittest.main(verbosity=2)
| 34.083969 | 81 | 0.670101 |
import os
import unittest
from pxr import Usd
from maya import cmds
from maya import standalone
class testUsdImportFrameRange(unittest.TestCase):
def _LoadUsdWithRange(self, start=None, end=None):
usdFilePath = os.path.abspath('MovingCube.usda')
cmds.loadPlugin('pxrUsd')
if start is not None and end is not None:
cmds.usdImport(file=usdFilePath, readAnimData=True,
frameRange=(start, end))
else:
cmds.usdImport(file=usdFilePath, readAnimData=True)
self.stage = Usd.Stage.Open(usdFilePath)
self.assertTrue(self.stage)
@classmethod
def tearDownClass(cls):
standalone.uninitialize()
@classmethod
def setUpClass(cls):
standalone.initialize('usd')
def setUp(self):
cmds.file(new=True, force=True)
def testUsdImport(self):
self._LoadUsdWithRange(1, 15)
numKeyFrames = cmds.keyframe("pCube1.translateX", query=True,
keyframeCount=True)
self.assertEqual(numKeyFrames, 14)
keyTimes = cmds.keyframe("pCube1.translateX", index=(0,14), query=True)
self.assertEqual(keyTimes, [float(x) for x in range(1, 16) if x != 5.0])
def testUsdImportNoRangeSpecified(self):
self._LoadUsdWithRange()
numKeyFrames = cmds.keyframe("pCube1.translateX", query=True,
keyframeCount=True)
self.assertEqual(numKeyFrames, 29)
keyTimes = cmds.keyframe("pCube1.translateX", index=(0,29), query=True)
self.assertEqual(keyTimes, [float(x) for x in range(1, 31) if x != 5.0])
def testUsdImportOverlyLargeRange(self):
self._LoadUsdWithRange(-100, 100)
numKeyFrames = cmds.keyframe("pCube1.translateX", query=True,
keyframeCount=True)
self.assertEqual(numKeyFrames, 29)
keyTimes = cmds.keyframe("pCube1.translateX", index=(0,29), query=True)
self.assertEqual(keyTimes, [float(x) for x in range(1, 31) if x != 5.0])
def testUsdImportOutOfRange(self):
self._LoadUsdWithRange(-200, -100)
numKeyFrames = cmds.keyframe("pCube1.translateX", query=True,
keyframeCount=True)
self.assertEqual(numKeyFrames, 0)
def testUsdImportSingle(self):
self._LoadUsdWithRange(29, 29)
xValue = cmds.getAttr("pCube1.translateX")
self.assertAlmostEqual(xValue, 11.7042500857406)
numKeyFrames = cmds.keyframe("pCube1.translateX", query=True,
keyframeCount=True)
self.assertEqual(numKeyFrames, 0)
keyTimes = cmds.keyframe("pCube1.translateX", index=(0,0), query=True)
self.assertEqual(keyTimes, None)
if __name__ == '__main__':
unittest.main(verbosity=2)
| true | true |
f737899751ad1f4262188710f83c78d542af1fc8 | 1,016 | py | Python | src/fact/cli.py | nagagopi19/19thAug2020-caswstudy | fe3d5fa75b9d3d3b9ef1a2013866e9f9fff60ec8 | [
"MIT"
] | null | null | null | src/fact/cli.py | nagagopi19/19thAug2020-caswstudy | fe3d5fa75b9d3d3b9ef1a2013866e9f9fff60ec8 | [
"MIT"
] | null | null | null | src/fact/cli.py | nagagopi19/19thAug2020-caswstudy | fe3d5fa75b9d3d3b9ef1a2013866e9f9fff60ec8 | [
"MIT"
] | 1 | 2021-09-10T14:49:59.000Z | 2021-09-10T14:49:59.000Z | #!/usr/bin/env python3
import argparse
import sys
import colorama
from exitstatus import ExitStatus
from fact.lib import factorial
def parse_args() -> argparse.Namespace:
"""Parse user command line arguments."""
parser = argparse.ArgumentParser(
description="Compute factorial of a given input.",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser.add_argument("-n", type=int, required=True, help="The input n of fact(n).")
return parser.parse_args()
def main() -> ExitStatus:
"""Accept arguments from the user, compute the factorial, and display the results."""
colorama.init(autoreset=True, strip=False)
args = parse_args()
print(
f"fact({colorama.Fore.CYAN}{args.n}{colorama.Fore.RESET}) = "
f"{colorama.Fore.GREEN}{factorial(args.n)}{colorama.Fore.RESET}"
)
return ExitStatus.success
# Allow the script to be run standalone (useful during development in PyCharm).
if __name__ == "__main__":
sys.exit(main())
| 27.459459 | 89 | 0.699803 |
import argparse
import sys
import colorama
from exitstatus import ExitStatus
from fact.lib import factorial
def parse_args() -> argparse.Namespace:
parser = argparse.ArgumentParser(
description="Compute factorial of a given input.",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser.add_argument("-n", type=int, required=True, help="The input n of fact(n).")
return parser.parse_args()
def main() -> ExitStatus:
colorama.init(autoreset=True, strip=False)
args = parse_args()
print(
f"fact({colorama.Fore.CYAN}{args.n}{colorama.Fore.RESET}) = "
f"{colorama.Fore.GREEN}{factorial(args.n)}{colorama.Fore.RESET}"
)
return ExitStatus.success
if __name__ == "__main__":
sys.exit(main())
| true | true |
f7378d80bbdae3bef7c248482c5c931876ca66a6 | 1,062 | py | Python | aula1705_revisao/arquivos_hard.py | fillipesouza/aulasdelogicaprogramacao | 409a9b82433eea9bcef2203c7c48ac0ab698f5db | [
"MIT"
] | 1 | 2021-06-30T11:53:21.000Z | 2021-06-30T11:53:21.000Z | aula1705_revisao/arquivos_hard.py | fillipesouza/aulasdelogicaprogramacao | 409a9b82433eea9bcef2203c7c48ac0ab698f5db | [
"MIT"
] | null | null | null | aula1705_revisao/arquivos_hard.py | fillipesouza/aulasdelogicaprogramacao | 409a9b82433eea9bcef2203c7c48ac0ab698f5db | [
"MIT"
] | 25 | 2021-04-17T00:36:10.000Z | 2021-06-01T17:28:16.000Z | # Objetivo:
# Criar um arquivo que mostre:
# Aluno -- media
# ===============================
# Aprovados --- total_aprovado ( media >= 6 )
alunos = []
medias = []
with open('./alunos.txt', 'r') as arquivo_alunos:
try:
linhas = arquivo_alunos.readlines()
for linha in linhas:
linha_com_split = linha.split(',')
alunos.append(linha_com_split[0])
notas = list(map(lambda nota: float(nota), linha_com_split[1:]))
media = sum(notas)/len(notas)
medias.append(media)
print(medias)
except:
print('deu ruim')
arquivo_alunos.close()
with open('./boletim_classe.txt', 'w') as boletim:
linhas_do_boletim = []
for i in range(len(alunos)):
linhas_do_boletim.append(f'{alunos[i]} \t {medias[i]}')
linhas_do_boletim.append('================')
aprovados = len( list( filter(lambda media: media >= 6, medias) ) )
linhas_do_boletim.append(f'Aprovados \t {aprovados}')
boletim.write(str.join('\n', linhas_do_boletim))
boletim.close() | 32.181818 | 76 | 0.584746 |
alunos = []
medias = []
with open('./alunos.txt', 'r') as arquivo_alunos:
try:
linhas = arquivo_alunos.readlines()
for linha in linhas:
linha_com_split = linha.split(',')
alunos.append(linha_com_split[0])
notas = list(map(lambda nota: float(nota), linha_com_split[1:]))
media = sum(notas)/len(notas)
medias.append(media)
print(medias)
except:
print('deu ruim')
arquivo_alunos.close()
with open('./boletim_classe.txt', 'w') as boletim:
linhas_do_boletim = []
for i in range(len(alunos)):
linhas_do_boletim.append(f'{alunos[i]} \t {medias[i]}')
linhas_do_boletim.append('================')
aprovados = len( list( filter(lambda media: media >= 6, medias) ) )
linhas_do_boletim.append(f'Aprovados \t {aprovados}')
boletim.write(str.join('\n', linhas_do_boletim))
boletim.close() | true | true |
f7378d85ebe9b6cfc3c79ed398382806d5a100e5 | 2,513 | py | Python | test/runtime/frontend_test/chainer_test/functions_test/connection_test/convolution_2d_function_test.py | steerapi/webdnn | 1df51cc094e5a528cfd3452c264905708eadb491 | [
"MIT"
] | 1 | 2021-04-09T15:55:35.000Z | 2021-04-09T15:55:35.000Z | test/runtime/frontend_test/chainer_test/functions_test/connection_test/convolution_2d_function_test.py | steerapi/webdnn | 1df51cc094e5a528cfd3452c264905708eadb491 | [
"MIT"
] | null | null | null | test/runtime/frontend_test/chainer_test/functions_test/connection_test/convolution_2d_function_test.py | steerapi/webdnn | 1df51cc094e5a528cfd3452c264905708eadb491 | [
"MIT"
] | null | null | null | import chainer
import numpy as np
from test.util import generate_kernel_test_case, wrap_template
from webdnn.graph.placeholder import Placeholder
from webdnn.frontend.chainer.converter import ChainerConverter
from webdnn.frontend.chainer.placeholder_variable import PlaceholderVariable
@wrap_template
def template(n=2, c_in=4, h_in=6, w_in=8, c_out=10, ksize=3, stride=1, pad=0, nobias=True, description=""):
link = chainer.links.Convolution2D(c_in, c_out, ksize=ksize, stride=stride, pad=pad, nobias=nobias)
vx = chainer.Variable(np.random.rand(n, c_in, h_in, w_in).astype(np.float32))
vy = link(vx)
graph = ChainerConverter().convert([vx], [vy])
x = graph.inputs[0]
y = graph.outputs[0]
generate_kernel_test_case(
description=f"[chainer] L.Convolution2D {description}",
graph=graph,
inputs={x: vx.data},
expected={y: vy.data},
EPS=1e-2
)
def test():
template()
def test_nobias():
template(nobias=True)
def test_nopadding():
template(pad=0)
def test_irregular_kernel_size():
template(ksize=(3, 4))
def test_irregular_stride_size():
template(stride=(2, 3))
def test_irregular_padding_size1():
template(pad=(1, 2))
def test_irregular_padding_size2():
template(pad=2)
def test_irregular_padding_size3():
template(pad=2, ksize=5)
def test_irregular_padding_size4():
template(pad=(1, 0))
def test_irregular_size():
template(ksize=(3, 5), stride=(2, 3), pad=(1, 3))
def test_special_size():
# https://github.com/mil-tokyo/webdnn/issues/525
# In case that the max position index (=n*c_in*h_in*w_in*ksize*ksize) > 1<<23
template(n=1, c_in=1 << 6, h_in=1 << 7, w_in=1 << 7, c_out=3, ksize=(1 << 2) + 1, pad=1 << 1)
def test_with_placeholder():
link = chainer.links.Convolution2D(None, 16, ksize=3, stride=1, pad=1)
vx = chainer.Variable(np.random.rand(1, 3, 16, 16).astype(np.float32))
vy = link(vx)
N = Placeholder(label="N")
H = Placeholder(label="H")
W = Placeholder(label="W")
px = PlaceholderVariable([N, 3, H, W])
py = link(px)
graph = ChainerConverter().convert([px], [py])
x = graph.inputs[0]
y = graph.outputs[0]
N.value = 1
H.value = 16
W.value = 16
generate_kernel_test_case(
description=f"[chainer] L.Convolution2D with placeholder",
graph=graph,
backend=["webgpu", "webassembly"],
inputs={x: vx.data},
expected={y: vy.data},
EPS=1e-2
)
| 24.398058 | 107 | 0.657382 | import chainer
import numpy as np
from test.util import generate_kernel_test_case, wrap_template
from webdnn.graph.placeholder import Placeholder
from webdnn.frontend.chainer.converter import ChainerConverter
from webdnn.frontend.chainer.placeholder_variable import PlaceholderVariable
@wrap_template
def template(n=2, c_in=4, h_in=6, w_in=8, c_out=10, ksize=3, stride=1, pad=0, nobias=True, description=""):
link = chainer.links.Convolution2D(c_in, c_out, ksize=ksize, stride=stride, pad=pad, nobias=nobias)
vx = chainer.Variable(np.random.rand(n, c_in, h_in, w_in).astype(np.float32))
vy = link(vx)
graph = ChainerConverter().convert([vx], [vy])
x = graph.inputs[0]
y = graph.outputs[0]
generate_kernel_test_case(
description=f"[chainer] L.Convolution2D {description}",
graph=graph,
inputs={x: vx.data},
expected={y: vy.data},
EPS=1e-2
)
def test():
template()
def test_nobias():
template(nobias=True)
def test_nopadding():
template(pad=0)
def test_irregular_kernel_size():
template(ksize=(3, 4))
def test_irregular_stride_size():
template(stride=(2, 3))
def test_irregular_padding_size1():
template(pad=(1, 2))
def test_irregular_padding_size2():
template(pad=2)
def test_irregular_padding_size3():
template(pad=2, ksize=5)
def test_irregular_padding_size4():
template(pad=(1, 0))
def test_irregular_size():
template(ksize=(3, 5), stride=(2, 3), pad=(1, 3))
def test_special_size():
template(n=1, c_in=1 << 6, h_in=1 << 7, w_in=1 << 7, c_out=3, ksize=(1 << 2) + 1, pad=1 << 1)
def test_with_placeholder():
link = chainer.links.Convolution2D(None, 16, ksize=3, stride=1, pad=1)
vx = chainer.Variable(np.random.rand(1, 3, 16, 16).astype(np.float32))
vy = link(vx)
N = Placeholder(label="N")
H = Placeholder(label="H")
W = Placeholder(label="W")
px = PlaceholderVariable([N, 3, H, W])
py = link(px)
graph = ChainerConverter().convert([px], [py])
x = graph.inputs[0]
y = graph.outputs[0]
N.value = 1
H.value = 16
W.value = 16
generate_kernel_test_case(
description=f"[chainer] L.Convolution2D with placeholder",
graph=graph,
backend=["webgpu", "webassembly"],
inputs={x: vx.data},
expected={y: vy.data},
EPS=1e-2
)
| true | true |
f7378d86a9f0d3e6a2d28726474b637ff6da08e3 | 5,794 | py | Python | minfraud/komand_minfraud/actions/card_lookup/schema.py | xhennessy-r7/insightconnect-plugins | 59268051313d67735b5dd3a30222eccb92aca8e9 | [
"MIT"
] | null | null | null | minfraud/komand_minfraud/actions/card_lookup/schema.py | xhennessy-r7/insightconnect-plugins | 59268051313d67735b5dd3a30222eccb92aca8e9 | [
"MIT"
] | null | null | null | minfraud/komand_minfraud/actions/card_lookup/schema.py | xhennessy-r7/insightconnect-plugins | 59268051313d67735b5dd3a30222eccb92aca8e9 | [
"MIT"
] | null | null | null | # GENERATED BY KOMAND SDK - DO NOT EDIT
import komand
import json
class Input:
ADDRESS = "address"
AVS_RESULT = "avs_result"
BANK_PHONE_COUNTRY_CODE = "bank_phone_country_code"
BANK_PHONE_NUMBER = "bank_phone_number"
CARD_BANK_NAME = "card_bank_name"
CARD_ISSUER_ID_NUMBER = "card_issuer_id_number"
CARD_LAST_4_DIGITS = "card_last_4_digits"
CARD_TOKEN = "card_token"
CVV_RESULT = "cvv_result"
class Output:
CREDIT_CARD_RESULT = "credit_card_result"
RISK_SCORE = "risk_score"
class CardLookupInput(komand.Input):
schema = json.loads("""
{
"type": "object",
"title": "Variables",
"properties": {
"address": {
"type": "string",
"title": "IP Address",
"description": "IP address to query",
"order": 1
},
"avs_result": {
"type": "string",
"title": "AVS Result",
"description": "Address Verification System result",
"order": 8
},
"bank_phone_country_code": {
"type": "string",
"title": "Bank Phone Country Code",
"description": "Phone country code for bank",
"order": 6
},
"bank_phone_number": {
"type": "string",
"title": "Bank Phone Number",
"description": "Phone number for bank",
"order": 7
},
"card_bank_name": {
"type": "string",
"title": "Issuing Bank",
"description": "Issuing bank of the credit card",
"order": 5
},
"card_issuer_id_number": {
"type": "string",
"title": "Card Issuer ID Number",
"description": "Issuer ID number for the credit card",
"order": 2
},
"card_last_4_digits": {
"type": "string",
"title": "Card Last 4 Digits",
"description": "Last 4 digits of credit card",
"order": 3
},
"card_token": {
"type": "string",
"title": "Credit Card Token",
"description": "Token representing the credit card",
"order": 4
},
"cvv_result": {
"type": "string",
"title": "CVV Result",
"description": "Card Verification Value code",
"order": 9
}
},
"required": [
"address"
]
}
""")
def __init__(self):
super(self.__class__, self).__init__(self.schema)
class CardLookupOutput(komand.Output):
schema = json.loads("""
{
"type": "object",
"title": "Variables",
"properties": {
"credit_card_result": {
"$ref": "#/definitions/credit_card",
"title": "Credit Card Result",
"description": "Result for credit card",
"order": 1
},
"risk_score": {
"type": "string",
"title": "Risk Score",
"description": "Overall risk score",
"order": 2
}
},
"definitions": {
"credit_card": {
"type": "object",
"title": "credit_card",
"properties": {
"brand": {
"type": "string",
"title": "Brand",
"description": "Card brand",
"order": 2
},
"country": {
"type": "string",
"title": "Country",
"description": "Country of credit card",
"order": 3
},
"is_issued_in_billing_address_country": {
"type": "boolean",
"title": "Is Issued In Billing Address Country",
"description": "Is card issued in billing address country",
"order": 4
},
"is_prepaid": {
"type": "boolean",
"title": "Is Prepaid",
"description": "Is card prepaid",
"order": 5
},
"issuer": {
"$ref": "#/definitions/issuer",
"title": "Issuer",
"description": "Issuer data",
"order": 1
},
"type": {
"type": "string",
"title": "Type",
"description": "Card type",
"order": 6
}
},
"definitions": {
"issuer": {
"type": "object",
"title": "issuer",
"properties": {
"matches_provided_name": {
"type": "boolean",
"title": "Matches Provided Name",
"description": "Issuer name matches name provided",
"order": 2
},
"matches_provided_phone_number": {
"type": "boolean",
"title": "Matches Provided Phone Number",
"description": "Issuer phone number matches number provided",
"order": 4
},
"name": {
"type": "string",
"title": "Name",
"description": "Card issuer name",
"order": 1
},
"phone_number": {
"type": "string",
"title": "Phone Number",
"description": "Issuer phone number",
"order": 3
}
}
}
}
},
"issuer": {
"type": "object",
"title": "issuer",
"properties": {
"matches_provided_name": {
"type": "boolean",
"title": "Matches Provided Name",
"description": "Issuer name matches name provided",
"order": 2
},
"matches_provided_phone_number": {
"type": "boolean",
"title": "Matches Provided Phone Number",
"description": "Issuer phone number matches number provided",
"order": 4
},
"name": {
"type": "string",
"title": "Name",
"description": "Card issuer name",
"order": 1
},
"phone_number": {
"type": "string",
"title": "Phone Number",
"description": "Issuer phone number",
"order": 3
}
}
}
}
}
""")
def __init__(self):
super(self.__class__, self).__init__(self.schema)
| 25.866071 | 75 | 0.497411 |
import komand
import json
class Input:
ADDRESS = "address"
AVS_RESULT = "avs_result"
BANK_PHONE_COUNTRY_CODE = "bank_phone_country_code"
BANK_PHONE_NUMBER = "bank_phone_number"
CARD_BANK_NAME = "card_bank_name"
CARD_ISSUER_ID_NUMBER = "card_issuer_id_number"
CARD_LAST_4_DIGITS = "card_last_4_digits"
CARD_TOKEN = "card_token"
CVV_RESULT = "cvv_result"
class Output:
CREDIT_CARD_RESULT = "credit_card_result"
RISK_SCORE = "risk_score"
class CardLookupInput(komand.Input):
schema = json.loads("""
{
"type": "object",
"title": "Variables",
"properties": {
"address": {
"type": "string",
"title": "IP Address",
"description": "IP address to query",
"order": 1
},
"avs_result": {
"type": "string",
"title": "AVS Result",
"description": "Address Verification System result",
"order": 8
},
"bank_phone_country_code": {
"type": "string",
"title": "Bank Phone Country Code",
"description": "Phone country code for bank",
"order": 6
},
"bank_phone_number": {
"type": "string",
"title": "Bank Phone Number",
"description": "Phone number for bank",
"order": 7
},
"card_bank_name": {
"type": "string",
"title": "Issuing Bank",
"description": "Issuing bank of the credit card",
"order": 5
},
"card_issuer_id_number": {
"type": "string",
"title": "Card Issuer ID Number",
"description": "Issuer ID number for the credit card",
"order": 2
},
"card_last_4_digits": {
"type": "string",
"title": "Card Last 4 Digits",
"description": "Last 4 digits of credit card",
"order": 3
},
"card_token": {
"type": "string",
"title": "Credit Card Token",
"description": "Token representing the credit card",
"order": 4
},
"cvv_result": {
"type": "string",
"title": "CVV Result",
"description": "Card Verification Value code",
"order": 9
}
},
"required": [
"address"
]
}
""")
def __init__(self):
super(self.__class__, self).__init__(self.schema)
class CardLookupOutput(komand.Output):
schema = json.loads("""
{
"type": "object",
"title": "Variables",
"properties": {
"credit_card_result": {
"$ref": "#/definitions/credit_card",
"title": "Credit Card Result",
"description": "Result for credit card",
"order": 1
},
"risk_score": {
"type": "string",
"title": "Risk Score",
"description": "Overall risk score",
"order": 2
}
},
"definitions": {
"credit_card": {
"type": "object",
"title": "credit_card",
"properties": {
"brand": {
"type": "string",
"title": "Brand",
"description": "Card brand",
"order": 2
},
"country": {
"type": "string",
"title": "Country",
"description": "Country of credit card",
"order": 3
},
"is_issued_in_billing_address_country": {
"type": "boolean",
"title": "Is Issued In Billing Address Country",
"description": "Is card issued in billing address country",
"order": 4
},
"is_prepaid": {
"type": "boolean",
"title": "Is Prepaid",
"description": "Is card prepaid",
"order": 5
},
"issuer": {
"$ref": "#/definitions/issuer",
"title": "Issuer",
"description": "Issuer data",
"order": 1
},
"type": {
"type": "string",
"title": "Type",
"description": "Card type",
"order": 6
}
},
"definitions": {
"issuer": {
"type": "object",
"title": "issuer",
"properties": {
"matches_provided_name": {
"type": "boolean",
"title": "Matches Provided Name",
"description": "Issuer name matches name provided",
"order": 2
},
"matches_provided_phone_number": {
"type": "boolean",
"title": "Matches Provided Phone Number",
"description": "Issuer phone number matches number provided",
"order": 4
},
"name": {
"type": "string",
"title": "Name",
"description": "Card issuer name",
"order": 1
},
"phone_number": {
"type": "string",
"title": "Phone Number",
"description": "Issuer phone number",
"order": 3
}
}
}
}
},
"issuer": {
"type": "object",
"title": "issuer",
"properties": {
"matches_provided_name": {
"type": "boolean",
"title": "Matches Provided Name",
"description": "Issuer name matches name provided",
"order": 2
},
"matches_provided_phone_number": {
"type": "boolean",
"title": "Matches Provided Phone Number",
"description": "Issuer phone number matches number provided",
"order": 4
},
"name": {
"type": "string",
"title": "Name",
"description": "Card issuer name",
"order": 1
},
"phone_number": {
"type": "string",
"title": "Phone Number",
"description": "Issuer phone number",
"order": 3
}
}
}
}
}
""")
def __init__(self):
super(self.__class__, self).__init__(self.schema)
| true | true |
f7378dbd126246fe227c0d1477147fbb6adf240c | 665 | py | Python | Algorithms/4_Sorting/10.py | abphilip-codes/Hackerrank_DSA | bb9e233d9d45c5b14c138830602695ad4113fba4 | [
"MIT"
] | 1 | 2021-11-25T13:39:30.000Z | 2021-11-25T13:39:30.000Z | Algorithms/4_Sorting/10.py | abphilip-codes/Hackerrank_DSA | bb9e233d9d45c5b14c138830602695ad4113fba4 | [
"MIT"
] | null | null | null | Algorithms/4_Sorting/10.py | abphilip-codes/Hackerrank_DSA | bb9e233d9d45c5b14c138830602695ad4113fba4 | [
"MIT"
] | null | null | null | # https://www.hackerrank.com/challenges/countingsort2/problem
#!/bin/python3
import math
import os
import random
import re
import sys
#
# Complete the 'countingSort' function below.
#
# The function is expected to return an INTEGER_ARRAY.
# The function accepts INTEGER_ARRAY arr as parameter.
#
def countingSort(arr):
return [(str(z)+' ')*arr.count(z) for z in range(100) if z in arr]
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
n = int(input().strip())
arr = list(map(int, input().rstrip().split()))
result = countingSort(arr)
fptr.write(''.join(map(str, result)))
fptr.write('\n')
fptr.close() | 20.151515 | 70 | 0.67218 |
import math
import os
import random
import re
import sys
def countingSort(arr):
return [(str(z)+' ')*arr.count(z) for z in range(100) if z in arr]
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
n = int(input().strip())
arr = list(map(int, input().rstrip().split()))
result = countingSort(arr)
fptr.write(''.join(map(str, result)))
fptr.write('\n')
fptr.close() | true | true |
f7378e25272db7d8b1378cf7ffe0bdfc7ea625d5 | 1,416 | py | Python | noxfile.py | iterative/pytest-test-utils | 2077af9971c2774c444ba5cd894658f726f14791 | [
"Apache-2.0"
] | 7 | 2021-11-30T10:28:41.000Z | 2022-01-14T09:13:21.000Z | noxfile.py | iterative/pytest-test-utils | 2077af9971c2774c444ba5cd894658f726f14791 | [
"Apache-2.0"
] | null | null | null | noxfile.py | iterative/pytest-test-utils | 2077af9971c2774c444ba5cd894658f726f14791 | [
"Apache-2.0"
] | 1 | 2021-11-30T06:54:47.000Z | 2021-11-30T06:54:47.000Z | """Automation using nox.
"""
import glob
import nox
nox.options.reuse_existing_virtualenvs = True
nox.options.sessions = "lint", "tests", "tests-pytest5"
locations = "pytest_test_utils", "tests.py"
@nox.session(python=["3.7", "3.8", "3.9", "3.10"])
def tests(session: nox.Session) -> None:
session.install(".[tests]")
# `pytest --cov` will start coverage after pytest
# so we need to use `coverage`.
session.run("coverage", "run", "-m", "pytest")
session.run("coverage", "report", "--show-missing", "--skip-covered")
@nox.session(python=["3.7"], name="tests-pytest5")
def tests_pytest5(session: nox.Session) -> None:
session.install(".[tests]")
session.install("pytest==5.0.0")
session.run("coverage", "run", "-m", "pytest", "tests.py")
@nox.session
def lint(session: nox.Session) -> None:
session.install("pre-commit")
session.install("-e", ".[dev]")
if session.posargs:
args = session.posargs + ["--all-files"]
else:
args = ["--all-files", "--show-diff-on-failure"]
session.run("pre-commit", "run", *args)
session.run("python", "-m", "mypy")
session.run("python", "-m", "pylint", *locations)
@nox.session
def build(session: nox.Session) -> None:
session.install("build", "setuptools", "twine")
session.run("python", "-m", "build")
dists = glob.glob("dist/*")
session.run("twine", "check", *dists, silent=True)
| 28.897959 | 73 | 0.622881 | import glob
import nox
nox.options.reuse_existing_virtualenvs = True
nox.options.sessions = "lint", "tests", "tests-pytest5"
locations = "pytest_test_utils", "tests.py"
@nox.session(python=["3.7", "3.8", "3.9", "3.10"])
def tests(session: nox.Session) -> None:
session.install(".[tests]")
session.run("coverage", "run", "-m", "pytest")
session.run("coverage", "report", "--show-missing", "--skip-covered")
@nox.session(python=["3.7"], name="tests-pytest5")
def tests_pytest5(session: nox.Session) -> None:
session.install(".[tests]")
session.install("pytest==5.0.0")
session.run("coverage", "run", "-m", "pytest", "tests.py")
@nox.session
def lint(session: nox.Session) -> None:
session.install("pre-commit")
session.install("-e", ".[dev]")
if session.posargs:
args = session.posargs + ["--all-files"]
else:
args = ["--all-files", "--show-diff-on-failure"]
session.run("pre-commit", "run", *args)
session.run("python", "-m", "mypy")
session.run("python", "-m", "pylint", *locations)
@nox.session
def build(session: nox.Session) -> None:
session.install("build", "setuptools", "twine")
session.run("python", "-m", "build")
dists = glob.glob("dist/*")
session.run("twine", "check", *dists, silent=True)
| true | true |
f7378e54963fc94f39d6eecdddf8bf6494ebcae4 | 122,150 | py | Python | python/ccxt/base/exchange.py | MitchellTesla/ccxt | 613d5e8a0c9156c24ef2e57f8b0e5c84527558fd | [
"MIT"
] | 1 | 2022-02-18T03:54:29.000Z | 2022-02-18T03:54:29.000Z | python/ccxt/base/exchange.py | MitchellTesla/ccxt | 613d5e8a0c9156c24ef2e57f8b0e5c84527558fd | [
"MIT"
] | null | null | null | python/ccxt/base/exchange.py | MitchellTesla/ccxt | 613d5e8a0c9156c24ef2e57f8b0e5c84527558fd | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""Base exchange class"""
# -----------------------------------------------------------------------------
__version__ = '1.81.74'
# -----------------------------------------------------------------------------
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import NetworkError
from ccxt.base.errors import NotSupported
from ccxt.base.errors import AuthenticationError
from ccxt.base.errors import DDoSProtection
from ccxt.base.errors import RequestTimeout
from ccxt.base.errors import ExchangeNotAvailable
from ccxt.base.errors import InvalidAddress
from ccxt.base.errors import InvalidOrder
from ccxt.base.errors import ArgumentsRequired
from ccxt.base.errors import BadSymbol
from ccxt.base.errors import BadRequest
from ccxt.base.errors import RateLimitExceeded
# -----------------------------------------------------------------------------
from ccxt.base.decimal_to_precision import decimal_to_precision
from ccxt.base.decimal_to_precision import DECIMAL_PLACES, NO_PADDING, TRUNCATE, ROUND, ROUND_UP, ROUND_DOWN
from ccxt.base.decimal_to_precision import number_to_string
from ccxt.base.precise import Precise
# -----------------------------------------------------------------------------
# rsa jwt signing
from cryptography.hazmat import backends
from cryptography.hazmat.primitives import hashes
from cryptography.hazmat.primitives.asymmetric import padding
from cryptography.hazmat.primitives.serialization import load_pem_private_key
# -----------------------------------------------------------------------------
# ecdsa signing
from ccxt.static_dependencies import ecdsa
from ccxt.static_dependencies import keccak
# eddsa signing
try:
import axolotl_curve25519 as eddsa
except ImportError:
eddsa = None
# -----------------------------------------------------------------------------
__all__ = [
'Exchange',
]
# -----------------------------------------------------------------------------
import types
import logging
import base64
import calendar
import collections
import datetime
from email.utils import parsedate
import functools
import gzip
import hashlib
import hmac
import io
import json
import math
import random
from numbers import Number
import re
from requests import Session
from requests.utils import default_user_agent
from requests.exceptions import HTTPError, Timeout, TooManyRedirects, RequestException, ConnectionError as requestsConnectionError
# import socket
from ssl import SSLError
# import sys
import time
import uuid
import zlib
from decimal import Decimal
from time import mktime
from wsgiref.handlers import format_date_time
import urllib.parse as _urlencode
# -----------------------------------------------------------------------------
class Exchange(object):
"""Base exchange class"""
id = None
name = None
version = None
certified = False # if certified by the CCXT dev team
pro = False # if it is integrated with CCXT Pro for WebSocket support
alias = False # whether this exchange is an alias to another exchange
# rate limiter settings
enableRateLimit = True
rateLimit = 2000 # milliseconds = seconds * 1000
timeout = 10000 # milliseconds = seconds * 1000
asyncio_loop = None
aiohttp_proxy = None
aiohttp_trust_env = False
session = None # Session () by default
verify = True # SSL verification
validateServerSsl = True
validateClientSsl = False
logger = None # logging.getLogger(__name__) by default
userAgent = None
userAgents = {
'chrome': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3202.94 Safari/537.36',
'chrome39': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.71 Safari/537.36',
'chrome100': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/100.0.4896.75 Safari/537.36',
}
verbose = False
markets = None
symbols = None
codes = None
timeframes = None
fees = {
'trading': {
'percentage': True, # subclasses should rarely have to redefine this
},
'funding': {
'withdraw': {},
'deposit': {},
},
}
loaded_fees = {
'trading': {
'percentage': True,
},
'funding': {
'withdraw': {},
'deposit': {},
},
}
ids = None
urls = None
api = None
parseJsonResponse = True
proxy = ''
origin = '*' # CORS origin
proxies = None
hostname = None # in case of inaccessibility of the "main" domain
apiKey = ''
secret = ''
password = ''
uid = ''
privateKey = '' # a "0x"-prefixed hexstring private key for a wallet
walletAddress = '' # the wallet address "0x"-prefixed hexstring
token = '' # reserved for HTTP auth in some cases
twofa = None
markets_by_id = None
currencies_by_id = None
precision = None
exceptions = None
limits = {
'leverage': {
'min': None,
'max': None,
},
'amount': {
'min': None,
'max': None,
},
'price': {
'min': None,
'max': None,
},
'cost': {
'min': None,
'max': None,
},
}
httpExceptions = {
'422': ExchangeError,
'418': DDoSProtection,
'429': RateLimitExceeded,
'404': ExchangeNotAvailable,
'409': ExchangeNotAvailable,
'410': ExchangeNotAvailable,
'500': ExchangeNotAvailable,
'501': ExchangeNotAvailable,
'502': ExchangeNotAvailable,
'520': ExchangeNotAvailable,
'521': ExchangeNotAvailable,
'522': ExchangeNotAvailable,
'525': ExchangeNotAvailable,
'526': ExchangeNotAvailable,
'400': ExchangeNotAvailable,
'403': ExchangeNotAvailable,
'405': ExchangeNotAvailable,
'503': ExchangeNotAvailable,
'530': ExchangeNotAvailable,
'408': RequestTimeout,
'504': RequestTimeout,
'401': AuthenticationError,
'511': AuthenticationError,
}
headers = None
balance = None
orderbooks = None
orders = None
myTrades = None
trades = None
transactions = None
ohlcvs = None
tickers = None
base_currencies = None
quote_currencies = None
currencies = None
options = None # Python does not allow to define properties in run-time with setattr
accounts = None
positions = None
status = {
'status': 'ok',
'updated': None,
'eta': None,
'url': None,
}
requiredCredentials = {
'apiKey': True,
'secret': True,
'uid': False,
'login': False,
'password': False,
'twofa': False, # 2-factor authentication (one-time password key)
'privateKey': False, # a "0x"-prefixed hexstring private key for a wallet
'walletAddress': False, # the wallet address "0x"-prefixed hexstring
'token': False, # reserved for HTTP auth in some cases
}
# API method metainfo
has = {
'publicAPI': True,
'privateAPI': True,
'CORS': None,
'spot': None,
'margin': None,
'swap': None,
'future': None,
'option': None,
'addMargin': None,
'cancelAllOrders': None,
'cancelOrder': True,
'cancelOrders': None,
'createDepositAddress': None,
'createLimitOrder': True,
'createMarketOrder': True,
'createOrder': True,
'createPostOnlyOrder': None,
'createStopOrder': None,
'createStopLimitOrder': None,
'createStopMarketOrder': None,
'editOrder': 'emulated',
'fetchAccounts': None,
'fetchBalance': True,
'fetchBidsAsks': None,
'fetchBorrowInterest': None,
'fetchBorrowRate': None,
'fetchBorrowRateHistory': None,
'fetchBorrowRatesPerSymbol': None,
'fetchBorrowRates': None,
'fetchCanceledOrders': None,
'fetchClosedOrder': None,
'fetchClosedOrders': None,
'fetchCurrencies': 'emulated',
'fetchDeposit': None,
'fetchDepositAddress': None,
'fetchDepositAddresses': None,
'fetchDepositAddressesByNetwork': None,
'fetchDeposits': None,
'fetchFundingFee': None,
'fetchFundingFees': None,
'fetchFundingHistory': None,
'fetchFundingRate': None,
'fetchFundingRateHistory': None,
'fetchFundingRates': None,
'fetchIndexOHLCV': None,
'fetchL2OrderBook': True,
'fetchLedger': None,
'fetchLedgerEntry': None,
'fetchLeverageTiers': None,
'fetchMarketLeverageTiers': None,
'fetchMarkets': True,
'fetchMarkOHLCV': None,
'fetchMyTrades': None,
'fetchOHLCV': 'emulated',
'fetchOpenOrder': None,
'fetchOpenOrders': None,
'fetchOrder': None,
'fetchOrderBook': True,
'fetchOrderBooks': None,
'fetchOrders': None,
'fetchOrderTrades': None,
'fetchPermissions': None,
'fetchPosition': None,
'fetchPositions': None,
'fetchPositionsRisk': None,
'fetchPremiumIndexOHLCV': None,
'fetchStatus': 'emulated',
'fetchTicker': True,
'fetchTickers': None,
'fetchTime': None,
'fetchTrades': True,
'fetchTradingFee': None,
'fetchTradingFees': None,
'fetchTradingLimits': None,
'fetchTransactions': None,
'fetchTransfers': None,
'fetchWithdrawal': None,
'fetchWithdrawals': None,
'loadMarkets': True,
'reduceMargin': None,
'setLeverage': None,
'setMarginMode': None,
'setPositionMode': None,
'signIn': None,
'transfer': None,
'withdraw': None,
}
precisionMode = DECIMAL_PLACES
paddingMode = NO_PADDING
minFundingAddressLength = 1 # used in check_address
substituteCommonCurrencyCodes = True
quoteJsonNumbers = True
number = float # or str (a pointer to a class)
handleContentTypeApplicationZip = False
# whether fees should be summed by currency code
reduceFees = True
lastRestRequestTimestamp = 0
lastRestPollTimestamp = 0
restRequestQueue = None
restPollerLoopIsRunning = False
rateLimitTokens = 16
rateLimitMaxTokens = 16
rateLimitUpdateTime = 0
enableLastHttpResponse = True
enableLastJsonResponse = True
enableLastResponseHeaders = True
last_http_response = None
last_json_response = None
last_response_headers = None
requiresEddsa = False
base58_encoder = None
base58_decoder = None
# no lower case l or upper case I, O
base58_alphabet = '123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz'
commonCurrencies = {
'XBT': 'BTC',
'BCC': 'BCH',
'DRK': 'DASH',
'BCHABC': 'BCH',
'BCHSV': 'BSV',
}
synchronous = True
def __init__(self, config={}):
self.precision = dict() if self.precision is None else self.precision
self.limits = dict() if self.limits is None else self.limits
self.exceptions = dict() if self.exceptions is None else self.exceptions
self.headers = dict() if self.headers is None else self.headers
self.balance = dict() if self.balance is None else self.balance
self.orderbooks = dict() if self.orderbooks is None else self.orderbooks
self.tickers = dict() if self.tickers is None else self.tickers
self.trades = dict() if self.trades is None else self.trades
self.transactions = dict() if self.transactions is None else self.transactions
self.positions = dict() if self.positions is None else self.positions
self.ohlcvs = dict() if self.ohlcvs is None else self.ohlcvs
self.currencies = dict() if self.currencies is None else self.currencies
self.options = dict() if self.options is None else self.options # Python does not allow to define properties in run-time with setattr
self.decimal_to_precision = decimal_to_precision
self.number_to_string = number_to_string
# version = '.'.join(map(str, sys.version_info[:3]))
# self.userAgent = {
# 'User-Agent': 'ccxt/' + __version__ + ' (+https://github.com/ccxt/ccxt) Python/' + version
# }
self.origin = self.uuid()
self.userAgent = default_user_agent()
settings = self.deep_extend(self.describe(), config)
for key in settings:
if hasattr(self, key) and isinstance(getattr(self, key), dict):
setattr(self, key, self.deep_extend(getattr(self, key), settings[key]))
else:
setattr(self, key, settings[key])
if self.api:
self.define_rest_api(self.api, 'request')
if self.markets:
self.set_markets(self.markets)
# convert all properties from underscore notation foo_bar to camelcase notation fooBar
cls = type(self)
for name in dir(self):
if name[0] != '_' and name[-1] != '_' and '_' in name:
parts = name.split('_')
# fetch_ohlcv → fetchOHLCV (not fetchOhlcv!)
exceptions = {'ohlcv': 'OHLCV', 'le': 'LE', 'be': 'BE'}
camelcase = parts[0] + ''.join(exceptions.get(i, self.capitalize(i)) for i in parts[1:])
attr = getattr(self, name)
if isinstance(attr, types.MethodType):
setattr(cls, camelcase, getattr(cls, name))
else:
setattr(self, camelcase, attr)
self.tokenBucket = self.extend({
'refillRate': 1.0 / self.rateLimit if self.rateLimit > 0 else float('inf'),
'delay': 0.001,
'capacity': 1.0,
'defaultCost': 1.0,
}, getattr(self, 'tokenBucket', {}))
self.session = self.session if self.session or not self.synchronous else Session()
self.logger = self.logger if self.logger else logging.getLogger(__name__)
def __del__(self):
if self.session:
try:
self.session.close()
except Exception as e:
pass
def __repr__(self):
return 'ccxt.' + ('async_support.' if self.asyncio_loop else '') + self.id + '()'
def __str__(self):
return self.name
def describe(self):
return {}
def set_sandbox_mode(self, enabled):
if enabled:
if 'test' in self.urls:
self.urls['apiBackup'] = self.urls['api']
self.urls['api'] = self.urls['test']
else:
raise NotSupported(self.id + ' does not have a sandbox URL')
elif 'apiBackup' in self.urls:
self.urls['api'] = self.urls['apiBackup']
del self.urls['apiBackup']
def define_rest_api_endpoint(self, method_name, uppercase_method, lowercase_method, camelcase_method, path, paths, config={}):
cls = type(self)
entry = getattr(cls, method_name) # returns a function (instead of a bound method)
delimiters = re.compile('[^a-zA-Z0-9]')
split_path = delimiters.split(path)
lowercase_path = [x.strip().lower() for x in split_path]
camelcase_suffix = ''.join([Exchange.capitalize(x) for x in split_path])
underscore_suffix = '_'.join([x for x in lowercase_path if len(x)])
camelcase_prefix = ''
underscore_prefix = ''
if len(paths):
camelcase_prefix = paths[0]
underscore_prefix = paths[0]
if len(paths) > 1:
camelcase_prefix += ''.join([Exchange.capitalize(x) for x in paths[1:]])
underscore_prefix += '_' + '_'.join([x.strip() for p in paths[1:] for x in delimiters.split(p)])
api_argument = paths
else:
api_argument = paths[0]
camelcase = camelcase_prefix + camelcase_method + Exchange.capitalize(camelcase_suffix)
underscore = underscore_prefix + '_' + lowercase_method + '_' + underscore_suffix.lower()
def partialer():
outer_kwargs = {'path': path, 'api': api_argument, 'method': uppercase_method, 'config': config}
@functools.wraps(entry)
def inner(_self, params=None, context=None):
"""
Inner is called when a generated method (publicGetX) is called.
_self is a reference to self created by function.__get__(exchange, type(exchange))
https://en.wikipedia.org/wiki/Closure_(computer_programming) equivalent to functools.partial
"""
inner_kwargs = dict(outer_kwargs) # avoid mutation
if params is not None:
inner_kwargs['params'] = params
if context is not None:
inner_kwargs['context'] = params
return entry(_self, **inner_kwargs)
return inner
to_bind = partialer()
setattr(cls, camelcase, to_bind)
setattr(cls, underscore, to_bind)
def define_rest_api(self, api, method_name, paths=[]):
for key, value in api.items():
uppercase_method = key.upper()
lowercase_method = key.lower()
camelcase_method = lowercase_method.capitalize()
if isinstance(value, list):
for path in value:
self.define_rest_api_endpoint(method_name, uppercase_method, lowercase_method, camelcase_method, path, paths)
# the options HTTP method conflicts with the 'options' API url path
# elif re.search(r'^(?:get|post|put|delete|options|head|patch)$', key, re.IGNORECASE) is not None:
elif re.search(r'^(?:get|post|put|delete|head|patch)$', key, re.IGNORECASE) is not None:
for [endpoint, config] in value.items():
path = endpoint.strip()
if isinstance(config, dict):
self.define_rest_api_endpoint(method_name, uppercase_method, lowercase_method, camelcase_method, path, paths, config)
elif isinstance(config, Number):
self.define_rest_api_endpoint(method_name, uppercase_method, lowercase_method, camelcase_method, path, paths, {'cost': config})
else:
raise NotSupported(self.id + ' define_rest_api() API format not supported, API leafs must strings, objects or numbers')
else:
self.define_rest_api(value, method_name, paths + [key])
def throttle(self, cost=None):
now = float(self.milliseconds())
elapsed = now - self.lastRestRequestTimestamp
cost = 1 if cost is None else cost
sleep_time = self.rateLimit * cost
if elapsed < sleep_time:
delay = sleep_time - elapsed
time.sleep(delay / 1000.0)
def calculate_rate_limiter_cost(self, api, method, path, params, config={}, context={}):
return self.safe_value(config, 'cost', 1)
def fetch2(self, path, api='public', method='GET', params={}, headers=None, body=None, config={}, context={}):
"""A better wrapper over request for deferred signing"""
if self.enableRateLimit:
cost = self.calculate_rate_limiter_cost(api, method, path, params, config, context)
self.throttle(cost)
self.lastRestRequestTimestamp = self.milliseconds()
request = self.sign(path, api, method, params, headers, body)
return self.fetch(request['url'], request['method'], request['headers'], request['body'])
def request(self, path, api='public', method='GET', params={}, headers=None, body=None, config={}, context={}):
"""Exchange.request is the entry point for all generated methods"""
return self.fetch2(path, api, method, params, headers, body, config, context)
@staticmethod
def gzip_deflate(response, text):
encoding = response.info().get('Content-Encoding')
if encoding in ('gzip', 'x-gzip', 'deflate'):
if encoding == 'deflate':
return zlib.decompress(text, -zlib.MAX_WBITS)
else:
return gzip.GzipFile('', 'rb', 9, io.BytesIO(text)).read()
return text
def throw_exactly_matched_exception(self, exact, string, message):
if string in exact:
raise exact[string](message)
def throw_broadly_matched_exception(self, broad, string, message):
broad_key = self.find_broadly_matched_key(broad, string)
if broad_key is not None:
raise broad[broad_key](message)
def find_broadly_matched_key(self, broad, string):
"""A helper method for matching error strings exactly vs broadly"""
keys = list(broad.keys())
for i in range(0, len(keys)):
key = keys[i]
if string.find(key) >= 0:
return key
return None
def prepare_request_headers(self, headers=None):
headers = headers or {}
if self.session:
headers.update(self.session.headers)
headers.update(self.headers)
if self.userAgent:
if type(self.userAgent) is str:
headers.update({'User-Agent': self.userAgent})
elif (type(self.userAgent) is dict) and ('User-Agent' in self.userAgent):
headers.update(self.userAgent)
if self.proxy:
headers.update({'Origin': self.origin})
headers.update({'Accept-Encoding': 'gzip, deflate'})
return self.set_headers(headers)
def log(self, *args):
print(*args)
def set_headers(self, headers):
return headers
def handle_errors(self, code, reason, url, method, headers, body, response, request_headers, request_body):
pass
def on_rest_response(self, code, reason, url, method, response_headers, response_body, request_headers, request_body):
return response_body.strip()
def on_json_response(self, response_body):
if self.quoteJsonNumbers:
return json.loads(response_body, parse_float=str, parse_int=str)
else:
return json.loads(response_body)
def fetch(self, url, method='GET', headers=None, body=None):
"""Perform a HTTP request and return decoded JSON data"""
request_headers = self.prepare_request_headers(headers)
url = self.proxy + url
if self.verbose:
self.log("\nfetch Request:", self.id, method, url, "RequestHeaders:", request_headers, "RequestBody:", body)
self.logger.debug("%s %s, Request: %s %s", method, url, request_headers, body)
request_body = body
if body:
body = body.encode()
self.session.cookies.clear()
http_response = None
http_status_code = None
http_status_text = None
json_response = None
try:
response = self.session.request(
method,
url,
data=body,
headers=request_headers,
timeout=int(self.timeout / 1000),
proxies=self.proxies,
verify=self.verify and self.validateServerSsl
)
# does not try to detect encoding
response.encoding = 'utf-8'
headers = response.headers
http_status_code = response.status_code
http_status_text = response.reason
http_response = self.on_rest_response(http_status_code, http_status_text, url, method, headers, response.text, request_headers, request_body)
json_response = self.parse_json(http_response)
# FIXME remove last_x_responses from subclasses
if self.enableLastHttpResponse:
self.last_http_response = http_response
if self.enableLastJsonResponse:
self.last_json_response = json_response
if self.enableLastResponseHeaders:
self.last_response_headers = headers
if self.verbose:
self.log("\nfetch Response:", self.id, method, url, http_status_code, "ResponseHeaders:", headers, "ResponseBody:", http_response)
self.logger.debug("%s %s, Response: %s %s %s", method, url, http_status_code, headers, http_response)
response.raise_for_status()
except Timeout as e:
details = ' '.join([self.id, method, url])
raise RequestTimeout(details) from e
except TooManyRedirects as e:
details = ' '.join([self.id, method, url])
raise ExchangeError(details) from e
except SSLError as e:
details = ' '.join([self.id, method, url])
raise ExchangeError(details) from e
except HTTPError as e:
details = ' '.join([self.id, method, url])
skip_further_error_handling = self.handle_errors(http_status_code, http_status_text, url, method, headers, http_response, json_response, request_headers, request_body)
if not skip_further_error_handling:
self.handle_http_status_code(http_status_code, http_status_text, url, method, http_response)
raise ExchangeError(details) from e
except requestsConnectionError as e:
error_string = str(e)
details = ' '.join([self.id, method, url])
if 'Read timed out' in error_string:
raise RequestTimeout(details) from e
else:
raise NetworkError(details) from e
except ConnectionResetError as e:
error_string = str(e)
details = ' '.join([self.id, method, url])
raise NetworkError(details) from e
except RequestException as e: # base exception class
error_string = str(e)
details = ' '.join([self.id, method, url])
if any(x in error_string for x in ['ECONNRESET', 'Connection aborted.', 'Connection broken:']):
raise NetworkError(details) from e
else:
raise ExchangeError(details) from e
self.handle_errors(http_status_code, http_status_text, url, method, headers, http_response, json_response, request_headers, request_body)
if json_response is not None:
return json_response
elif self.is_text_response(headers):
return http_response
else:
return response.content
def handle_http_status_code(self, http_status_code, http_status_text, url, method, body):
string_code = str(http_status_code)
if string_code in self.httpExceptions:
Exception = self.httpExceptions[string_code]
raise Exception(' '.join([self.id, method, url, string_code, http_status_text, body]))
def parse_json(self, http_response):
try:
if Exchange.is_json_encoded_object(http_response):
return self.on_json_response(http_response)
except ValueError: # superclass of JsonDecodeError (python2)
pass
def is_text_response(self, headers):
# https://github.com/ccxt/ccxt/issues/5302
content_type = headers.get('Content-Type', '')
return content_type.startswith('application/json') or content_type.startswith('text/')
@staticmethod
def key_exists(dictionary, key):
if dictionary is None or key is None:
return False
if isinstance(dictionary, list):
if isinstance(key, int) and 0 <= key and key < len(dictionary):
return dictionary[key] is not None
else:
return False
if key in dictionary:
return dictionary[key] is not None
return False
@staticmethod
def safe_float(dictionary, key, default_value=None):
value = default_value
try:
if Exchange.key_exists(dictionary, key):
value = float(dictionary[key])
except ValueError as e:
value = default_value
return value
@staticmethod
def safe_string(dictionary, key, default_value=None):
return str(dictionary[key]) if Exchange.key_exists(dictionary, key) else default_value
@staticmethod
def safe_string_lower(dictionary, key, default_value=None):
return str(dictionary[key]).lower() if Exchange.key_exists(dictionary, key) else default_value
@staticmethod
def safe_string_upper(dictionary, key, default_value=None):
return str(dictionary[key]).upper() if Exchange.key_exists(dictionary, key) else default_value
@staticmethod
def safe_integer(dictionary, key, default_value=None):
if not Exchange.key_exists(dictionary, key):
return default_value
value = dictionary[key]
try:
# needed to avoid breaking on "100.0"
# https://stackoverflow.com/questions/1094717/convert-a-string-to-integer-with-decimal-in-python#1094721
return int(float(value))
except ValueError:
return default_value
except TypeError:
return default_value
@staticmethod
def safe_integer_product(dictionary, key, factor, default_value=None):
if not Exchange.key_exists(dictionary, key):
return default_value
value = dictionary[key]
if isinstance(value, Number):
return int(value * factor)
elif isinstance(value, str):
try:
return int(float(value) * factor)
except ValueError:
pass
return default_value
@staticmethod
def safe_timestamp(dictionary, key, default_value=None):
return Exchange.safe_integer_product(dictionary, key, 1000, default_value)
@staticmethod
def safe_value(dictionary, key, default_value=None):
return dictionary[key] if Exchange.key_exists(dictionary, key) else default_value
# we're not using safe_floats with a list argument as we're trying to save some cycles here
# we're not using safe_float_3 either because those cases are too rare to deserve their own optimization
@staticmethod
def safe_float_2(dictionary, key1, key2, default_value=None):
return Exchange.safe_either(Exchange.safe_float, dictionary, key1, key2, default_value)
@staticmethod
def safe_string_2(dictionary, key1, key2, default_value=None):
return Exchange.safe_either(Exchange.safe_string, dictionary, key1, key2, default_value)
@staticmethod
def safe_string_lower_2(dictionary, key1, key2, default_value=None):
return Exchange.safe_either(Exchange.safe_string_lower, dictionary, key1, key2, default_value)
@staticmethod
def safe_string_upper_2(dictionary, key1, key2, default_value=None):
return Exchange.safe_either(Exchange.safe_string_upper, dictionary, key1, key2, default_value)
@staticmethod
def safe_integer_2(dictionary, key1, key2, default_value=None):
return Exchange.safe_either(Exchange.safe_integer, dictionary, key1, key2, default_value)
@staticmethod
def safe_integer_product_2(dictionary, key1, key2, factor, default_value=None):
value = Exchange.safe_integer_product(dictionary, key1, factor)
return value if value is not None else Exchange.safe_integer_product(dictionary, key2, factor, default_value)
@staticmethod
def safe_timestamp_2(dictionary, key1, key2, default_value=None):
return Exchange.safe_integer_product_2(dictionary, key1, key2, 1000, default_value)
@staticmethod
def safe_value_2(dictionary, key1, key2, default_value=None):
return Exchange.safe_either(Exchange.safe_value, dictionary, key1, key2, default_value)
@staticmethod
def safe_either(method, dictionary, key1, key2, default_value=None):
"""A helper-wrapper for the safe_value_2() family."""
value = method(dictionary, key1)
return value if value is not None else method(dictionary, key2, default_value)
@staticmethod
def truncate(num, precision=0):
"""Deprecated, use decimal_to_precision instead"""
if precision > 0:
decimal_precision = math.pow(10, precision)
return math.trunc(num * decimal_precision) / decimal_precision
return int(Exchange.truncate_to_string(num, precision))
@staticmethod
def truncate_to_string(num, precision=0):
"""Deprecated, todo: remove references from subclasses"""
if precision > 0:
parts = ('{0:.%df}' % precision).format(Decimal(num)).split('.')
decimal_digits = parts[1][:precision].rstrip('0')
decimal_digits = decimal_digits if len(decimal_digits) else '0'
return parts[0] + '.' + decimal_digits
return ('%d' % num)
@staticmethod
def uuid22(length=22):
return format(random.getrandbits(length * 4), 'x')
@staticmethod
def uuid16(length=16):
return format(random.getrandbits(length * 4), 'x')
@staticmethod
def uuid():
return str(uuid.uuid4())
@staticmethod
def uuidv1():
return str(uuid.uuid1()).replace('-', '')
@staticmethod
def capitalize(string): # first character only, rest characters unchanged
# the native pythonic .capitalize() method lowercases all other characters
# which is an unwanted behaviour, therefore we use this custom implementation
# check it yourself: print('foobar'.capitalize(), 'fooBar'.capitalize())
if len(string) > 1:
return "%s%s" % (string[0].upper(), string[1:])
return string.upper()
@staticmethod
def strip(string):
return string.strip()
@staticmethod
def keysort(dictionary):
return collections.OrderedDict(sorted(dictionary.items(), key=lambda t: t[0]))
@staticmethod
def extend(*args):
if args is not None:
result = None
if type(args[0]) is collections.OrderedDict:
result = collections.OrderedDict()
else:
result = {}
for arg in args:
result.update(arg)
return result
return {}
@staticmethod
def merge(*args):
if args is not None:
result = None
if type(args[0]) is collections.OrderedDict:
result = collections.OrderedDict()
else:
result = {}
for arg in args:
# -- diff --
for key in arg:
if result.get(key) is None:
result[key] = arg[key]
# -- enddiff --
return result
return {}
@staticmethod
def deep_extend(*args):
result = None
for arg in args:
if isinstance(arg, dict):
if not isinstance(result, dict):
result = {}
for key in arg:
result[key] = Exchange.deep_extend(result[key] if key in result else None, arg[key])
else:
result = arg
return result
@staticmethod
def filter_by(array, key, value=None):
array = Exchange.to_array(array)
return list(filter(lambda x: x[key] == value, array))
@staticmethod
def filterBy(array, key, value=None):
return Exchange.filter_by(array, key, value)
@staticmethod
def group_by(array, key):
result = {}
array = Exchange.to_array(array)
array = [entry for entry in array if (key in entry) and (entry[key] is not None)]
for entry in array:
if entry[key] not in result:
result[entry[key]] = []
result[entry[key]].append(entry)
return result
@staticmethod
def groupBy(array, key):
return Exchange.group_by(array, key)
@staticmethod
def index_by(array, key):
result = {}
if type(array) is dict:
array = Exchange.keysort(array).values()
is_int_key = isinstance(key, int)
for element in array:
if ((is_int_key and (key < len(element))) or (key in element)) and (element[key] is not None):
k = element[key]
result[k] = element
return result
@staticmethod
def sort_by(array, key, descending=False):
return sorted(array, key=lambda k: k[key] if k[key] is not None else "", reverse=descending)
@staticmethod
def sort_by_2(array, key1, key2, descending=False):
return sorted(array, key=lambda k: (k[key1] if k[key1] is not None else "", k[key2] if k[key2] is not None else ""), reverse=descending)
@staticmethod
def array_concat(a, b):
return a + b
@staticmethod
def in_array(needle, haystack):
return needle in haystack
@staticmethod
def is_empty(object):
return not object
@staticmethod
def extract_params(string):
return re.findall(r'{([\w-]+)}', string)
@staticmethod
def implode_params(string, params):
if isinstance(params, dict):
for key in params:
if not isinstance(params[key], list):
string = string.replace('{' + key + '}', str(params[key]))
return string
def implode_hostname(self, url):
return Exchange.implode_params(url, {'hostname': self.hostname})
def resolve_path(self, path, params):
return [
self.implode_params(path, params),
self.omit(params, self.extract_params(path))
]
@staticmethod
def urlencode(params={}, doseq=False):
for key, value in params.items():
if isinstance(value, bool):
params[key] = 'true' if value else 'false'
return _urlencode.urlencode(params, doseq)
@staticmethod
def urlencode_with_array_repeat(params={}):
return re.sub(r'%5B\d*%5D', '', Exchange.urlencode(params, True))
@staticmethod
def urlencode_nested(params):
result = {}
def _encode_params(params, p_key=None):
encode_params = {}
if isinstance(params, dict):
for key in params:
encode_key = '{}[{}]'.format(p_key, key)
encode_params[encode_key] = params[key]
elif isinstance(params, (list, tuple)):
for offset, value in enumerate(params):
encode_key = '{}[{}]'.format(p_key, offset)
encode_params[encode_key] = value
else:
result[p_key] = params
for key in encode_params:
value = encode_params[key]
_encode_params(value, key)
if isinstance(params, dict):
for key in params:
_encode_params(params[key], key)
return _urlencode.urlencode(result)
@staticmethod
def rawencode(params={}):
return _urlencode.unquote(Exchange.urlencode(params))
@staticmethod
def encode_uri_component(uri, safe="~()*!.'"):
return _urlencode.quote(uri, safe=safe)
@staticmethod
def omit(d, *args):
if isinstance(d, dict):
result = d.copy()
for arg in args:
if type(arg) is list:
for key in arg:
if key in result:
del result[key]
else:
if arg in result:
del result[arg]
return result
return d
@staticmethod
def unique(array):
return list(set(array))
@staticmethod
def pluck(array, key):
return [
element[key]
for element in array
if (key in element) and (element[key] is not None)
]
@staticmethod
def sum(*args):
return sum([arg for arg in args if isinstance(arg, (float, int))])
@staticmethod
def ordered(array):
return collections.OrderedDict(array)
@staticmethod
def aggregate(bidasks):
ordered = Exchange.ordered({})
for [price, volume, *_] in bidasks:
if volume > 0:
ordered[price] = (ordered[price] if price in ordered else 0) + volume
result = []
items = list(ordered.items())
for price, volume in items:
result.append([price, volume])
return result
@staticmethod
def sec():
return Exchange.seconds()
@staticmethod
def msec():
return Exchange.milliseconds()
@staticmethod
def usec():
return Exchange.microseconds()
@staticmethod
def seconds():
return int(time.time())
@staticmethod
def milliseconds():
return int(time.time() * 1000)
@staticmethod
def microseconds():
return int(time.time() * 1000000)
@staticmethod
def iso8601(timestamp=None):
if timestamp is None:
return timestamp
if not isinstance(timestamp, int):
return None
if int(timestamp) < 0:
return None
try:
utc = datetime.datetime.utcfromtimestamp(timestamp // 1000)
return utc.strftime('%Y-%m-%dT%H:%M:%S.%f')[:-6] + "{:03d}".format(int(timestamp) % 1000) + 'Z'
except (TypeError, OverflowError, OSError):
return None
@staticmethod
def rfc2616(self, timestamp=None):
if timestamp is None:
ts = datetime.datetime.now()
else:
ts = timestamp
stamp = mktime(ts.timetuple())
return format_date_time(stamp)
@staticmethod
def dmy(timestamp, infix='-'):
utc_datetime = datetime.datetime.utcfromtimestamp(int(round(timestamp / 1000)))
return utc_datetime.strftime('%m' + infix + '%d' + infix + '%Y')
@staticmethod
def ymd(timestamp, infix='-', fullYear=True):
year_format = '%Y' if fullYear else '%y'
utc_datetime = datetime.datetime.utcfromtimestamp(int(round(timestamp / 1000)))
return utc_datetime.strftime(year_format + infix + '%m' + infix + '%d')
@staticmethod
def yymmdd(timestamp, infix=''):
return Exchange.ymd(timestamp, infix, False)
@staticmethod
def yyyymmdd(timestamp, infix='-'):
return Exchange.ymd(timestamp, infix, True)
@staticmethod
def ymdhms(timestamp, infix=' '):
utc_datetime = datetime.datetime.utcfromtimestamp(int(round(timestamp / 1000)))
return utc_datetime.strftime('%Y-%m-%d' + infix + '%H:%M:%S')
@staticmethod
def parse_date(timestamp=None):
if timestamp is None:
return timestamp
if not isinstance(timestamp, str):
return None
if 'GMT' in timestamp:
try:
string = ''.join([str(value) for value in parsedate(timestamp)[:6]]) + '.000Z'
dt = datetime.datetime.strptime(string, "%Y%m%d%H%M%S.%fZ")
return calendar.timegm(dt.utctimetuple()) * 1000
except (TypeError, OverflowError, OSError):
return None
else:
return Exchange.parse8601(timestamp)
@staticmethod
def parse8601(timestamp=None):
if timestamp is None:
return timestamp
yyyy = '([0-9]{4})-?'
mm = '([0-9]{2})-?'
dd = '([0-9]{2})(?:T|[\\s])?'
h = '([0-9]{2}):?'
m = '([0-9]{2}):?'
s = '([0-9]{2})'
ms = '(\\.[0-9]{1,3})?'
tz = '(?:(\\+|\\-)([0-9]{2})\\:?([0-9]{2})|Z)?'
regex = r'' + yyyy + mm + dd + h + m + s + ms + tz
try:
match = re.search(regex, timestamp, re.IGNORECASE)
if match is None:
return None
yyyy, mm, dd, h, m, s, ms, sign, hours, minutes = match.groups()
ms = ms or '.000'
ms = (ms + '00')[0:4]
msint = int(ms[1:])
sign = sign or ''
sign = int(sign + '1') * -1
hours = int(hours or 0) * sign
minutes = int(minutes or 0) * sign
offset = datetime.timedelta(hours=hours, minutes=minutes)
string = yyyy + mm + dd + h + m + s + ms + 'Z'
dt = datetime.datetime.strptime(string, "%Y%m%d%H%M%S.%fZ")
dt = dt + offset
return calendar.timegm(dt.utctimetuple()) * 1000 + msint
except (TypeError, OverflowError, OSError, ValueError):
return None
@staticmethod
def hash(request, algorithm='md5', digest='hex'):
if algorithm == 'keccak':
binary = bytes(keccak.SHA3(request))
else:
h = hashlib.new(algorithm, request)
binary = h.digest()
if digest == 'base64':
return Exchange.binary_to_base64(binary)
elif digest == 'hex':
return Exchange.binary_to_base16(binary)
return binary
@staticmethod
def hmac(request, secret, algorithm=hashlib.sha256, digest='hex'):
h = hmac.new(secret, request, algorithm)
binary = h.digest()
if digest == 'hex':
return Exchange.binary_to_base16(binary)
elif digest == 'base64':
return Exchange.binary_to_base64(binary)
return binary
@staticmethod
def binary_concat(*args):
result = bytes()
for arg in args:
result = result + arg
return result
@staticmethod
def binary_concat_array(array):
result = bytes()
for element in array:
result = result + element
return result
@staticmethod
def base64urlencode(s):
return Exchange.decode(base64.urlsafe_b64encode(s)).replace('=', '')
@staticmethod
def binary_to_base64(s):
return Exchange.decode(base64.standard_b64encode(s))
@staticmethod
def base64_to_binary(s):
return base64.standard_b64decode(s)
@staticmethod
def string_to_base64(s):
# will return string in the future
binary = Exchange.encode(s) if isinstance(s, str) else s
return Exchange.encode(Exchange.binary_to_base64(binary))
@staticmethod
def base64_to_string(s):
return base64.b64decode(s).decode('utf-8')
@staticmethod
def jwt(request, secret, alg='HS256'):
algos = {
'HS256': hashlib.sha256,
'HS384': hashlib.sha384,
'HS512': hashlib.sha512,
}
header = Exchange.encode(Exchange.json({
'alg': alg,
'typ': 'JWT',
}))
encoded_header = Exchange.base64urlencode(header)
encoded_data = Exchange.base64urlencode(Exchange.encode(Exchange.json(request)))
token = encoded_header + '.' + encoded_data
if alg[:2] == 'RS':
signature = Exchange.rsa(token, secret, alg)
else:
algorithm = algos[alg]
signature = Exchange.hmac(Exchange.encode(token), secret, algorithm, 'binary')
return token + '.' + Exchange.base64urlencode(signature)
@staticmethod
def rsa(request, secret, alg='RS256'):
algorithms = {
"RS256": hashes.SHA256(),
"RS384": hashes.SHA384(),
"RS512": hashes.SHA512(),
}
algorithm = algorithms[alg]
priv_key = load_pem_private_key(secret, None, backends.default_backend())
return priv_key.sign(Exchange.encode(request), padding.PKCS1v15(), algorithm)
@staticmethod
def ecdsa(request, secret, algorithm='p256', hash=None, fixed_length=False):
# your welcome - frosty00
algorithms = {
'p192': [ecdsa.NIST192p, 'sha256'],
'p224': [ecdsa.NIST224p, 'sha256'],
'p256': [ecdsa.NIST256p, 'sha256'],
'p384': [ecdsa.NIST384p, 'sha384'],
'p521': [ecdsa.NIST521p, 'sha512'],
'secp256k1': [ecdsa.SECP256k1, 'sha256'],
}
if algorithm not in algorithms:
raise ArgumentsRequired(algorithm + ' is not a supported algorithm')
curve_info = algorithms[algorithm]
hash_function = getattr(hashlib, curve_info[1])
encoded_request = Exchange.encode(request)
if hash is not None:
digest = Exchange.hash(encoded_request, hash, 'binary')
else:
digest = base64.b16decode(encoded_request, casefold=True)
key = ecdsa.SigningKey.from_string(base64.b16decode(Exchange.encode(secret),
casefold=True), curve=curve_info[0])
r_binary, s_binary, v = key.sign_digest_deterministic(digest, hashfunc=hash_function,
sigencode=ecdsa.util.sigencode_strings_canonize)
r_int, s_int = ecdsa.util.sigdecode_strings((r_binary, s_binary), key.privkey.order)
counter = 0
minimum_size = (1 << (8 * 31)) - 1
half_order = key.privkey.order / 2
while fixed_length and (r_int > half_order or r_int <= minimum_size or s_int <= minimum_size):
r_binary, s_binary, v = key.sign_digest_deterministic(digest, hashfunc=hash_function,
sigencode=ecdsa.util.sigencode_strings_canonize,
extra_entropy=Exchange.number_to_le(counter, 32))
r_int, s_int = ecdsa.util.sigdecode_strings((r_binary, s_binary), key.privkey.order)
counter += 1
r, s = Exchange.decode(base64.b16encode(r_binary)).lower(), Exchange.decode(base64.b16encode(s_binary)).lower()
return {
'r': r,
's': s,
'v': v,
}
@staticmethod
def eddsa(request, secret, curve='ed25519'):
random = b'\x00' * 64
request = base64.b16decode(request, casefold=True)
secret = base64.b16decode(secret, casefold=True)
signature = eddsa.calculateSignature(random, secret, request)
return Exchange.binary_to_base58(signature)
@staticmethod
def json(data, params=None):
return json.dumps(data, separators=(',', ':'))
@staticmethod
def is_json_encoded_object(input):
return (isinstance(input, str) and
(len(input) >= 2) and
((input[0] == '{') or (input[0] == '[')))
@staticmethod
def encode(string):
return string.encode('latin-1')
@staticmethod
def decode(string):
return string.decode('latin-1')
@staticmethod
def to_array(value):
return list(value.values()) if type(value) is dict else value
def nonce(self):
return Exchange.seconds()
@staticmethod
def check_required_version(required_version, error=True):
result = True
[major1, minor1, patch1] = required_version.split('.')
[major2, minor2, patch2] = __version__.split('.')
int_major1 = int(major1)
int_minor1 = int(minor1)
int_patch1 = int(patch1)
int_major2 = int(major2)
int_minor2 = int(minor2)
int_patch2 = int(patch2)
if int_major1 > int_major2:
result = False
if int_major1 == int_major2:
if int_minor1 > int_minor2:
result = False
elif int_minor1 == int_minor2 and int_patch1 > int_patch2:
result = False
if not result:
if error:
raise NotSupported('Your current version of CCXT is ' + __version__ + ', a newer version ' + required_version + ' is required, please, upgrade your version of CCXT')
else:
return error
return result
def check_required_credentials(self, error=True):
keys = list(self.requiredCredentials.keys())
for key in keys:
if self.requiredCredentials[key] and not getattr(self, key):
if error:
raise AuthenticationError(self.id + ' requires `' + key + '`')
else:
return error
return True
def check_address(self, address):
"""Checks an address is not the same character repeated or an empty sequence"""
if address is None:
raise InvalidAddress(self.id + ' address is None')
if all(letter == address[0] for letter in address) or len(address) < self.minFundingAddressLength or ' ' in address:
raise InvalidAddress(self.id + ' address is invalid or has less than ' + str(self.minFundingAddressLength) + ' characters: "' + str(address) + '"')
return address
def account(self):
return {
'free': None,
'used': None,
'total': None,
}
def common_currency_code(self, currency):
if not self.substituteCommonCurrencyCodes:
return currency
return self.safe_string(self.commonCurrencies, currency, currency)
def precision_from_string(self, string):
parts = re.sub(r'0+$', '', string).split('.')
return len(parts[1]) if len(parts) > 1 else 0
def cost_to_precision(self, symbol, cost):
market = self.market(symbol)
return self.decimal_to_precision(cost, TRUNCATE, market['precision']['price'], self.precisionMode, self.paddingMode)
def price_to_precision(self, symbol, price):
market = self.market(symbol)
return self.decimal_to_precision(price, ROUND, market['precision']['price'], self.precisionMode, self.paddingMode)
def amount_to_precision(self, symbol, amount):
market = self.market(symbol)
return self.decimal_to_precision(amount, TRUNCATE, market['precision']['amount'], self.precisionMode, self.paddingMode)
def fee_to_precision(self, symbol, fee):
market = self.market(symbol)
return self.decimal_to_precision(fee, ROUND, market['precision']['price'], self.precisionMode, self.paddingMode)
def currency_to_precision(self, code, fee):
return self.decimal_to_precision(fee, ROUND, self.currencies[code]['precision'], self.precisionMode, self.paddingMode)
def set_markets(self, markets, currencies=None):
values = list(markets.values()) if type(markets) is dict else markets
for i in range(0, len(values)):
values[i] = self.extend(
{
'id': None,
'symbol': None,
'base': None,
'quote': None,
'baseId': None,
'quoteId': None,
'active': None,
'type': None,
'linear': None,
'inverse': None,
'spot': False,
'swap': False,
'future': False,
'option': False,
'margin': False,
'contract': False,
'contractSize': None,
'expiry': None,
'expiryDatetime': None,
'optionType': None,
'strike': None,
'settle': None,
'settleId': None,
'precision': self.precision,
'limits': self.limits,
'info': None,
},
self.fees['trading'],
values[i]
)
self.markets = self.index_by(values, 'symbol')
self.markets_by_id = self.index_by(values, 'id')
self.symbols = sorted(self.markets.keys())
self.ids = sorted(self.markets_by_id.keys())
if currencies:
self.currencies = self.deep_extend(self.currencies, currencies)
else:
base_currencies = [{
'id': market['baseId'] if (('baseId' in market) and (market['baseId'] is not None)) else market['base'],
'numericId': market['baseNumericId'] if 'baseNumericId' in market else None,
'code': market['base'],
'precision': (
market['precision']['base'] if 'base' in market['precision'] else (
market['precision']['amount'] if 'amount' in market['precision'] else None
)
) if 'precision' in market else 8,
} for market in values if 'base' in market]
quote_currencies = [{
'id': market['quoteId'] if (('quoteId' in market) and (market['quoteId'] is not None)) else market['quote'],
'numericId': market['quoteNumericId'] if 'quoteNumericId' in market else None,
'code': market['quote'],
'precision': (
market['precision']['quote'] if 'quote' in market['precision'] else (
market['precision']['price'] if 'price' in market['precision'] else None
)
) if 'precision' in market else 8,
} for market in values if 'quote' in market]
base_currencies = self.sort_by(base_currencies, 'code')
quote_currencies = self.sort_by(quote_currencies, 'code')
self.base_currencies = self.index_by(base_currencies, 'code')
self.quote_currencies = self.index_by(quote_currencies, 'code')
currencies = self.sort_by(base_currencies + quote_currencies, 'code')
self.currencies = self.deep_extend(self.currencies, self.index_by(currencies, 'code'))
self.currencies_by_id = self.index_by(list(self.currencies.values()), 'id')
self.codes = sorted(self.currencies.keys())
return self.markets
def fetch_permissions(self, params={}):
raise NotSupported(self.id + ' fetch_permissions() is not supported yet')
def load_markets(self, reload=False, params={}):
if not reload:
if self.markets:
if not self.markets_by_id:
return self.set_markets(self.markets)
return self.markets
currencies = None
if self.has['fetchCurrencies'] is True:
currencies = self.fetch_currencies()
markets = self.fetch_markets(params)
return self.set_markets(markets, currencies)
def load_accounts(self, reload=False, params={}):
if reload:
self.accounts = self.fetch_accounts(params)
else:
if self.accounts:
return self.accounts
else:
self.accounts = self.fetch_accounts(params)
self.accountsById = self.index_by(self.accounts, 'id')
return self.accounts
def load_fees(self, reload=False):
if not reload:
if self.loaded_fees != Exchange.loaded_fees:
return self.loaded_fees
self.loaded_fees = self.deep_extend(self.loaded_fees, self.fetch_fees())
return self.loaded_fees
def fetch_markets(self, params={}):
# markets are returned as a list
# currencies are returned as a dict
# this is for historical reasons
# and may be changed for consistency later
return self.to_array(self.markets)
def fetch_currencies(self, params={}):
# markets are returned as a list
# currencies are returned as a dict
# this is for historical reasons
# and may be changed for consistency later
return self.currencies
def fetch_fees(self):
trading = {}
funding = {}
if self.has['fetchTradingFees']:
trading = self.fetch_trading_fees()
if self.has['fetchFundingFees']:
funding = self.fetch_funding_fees()
return {
'trading': trading,
'funding': funding,
}
def fetch_balance(self, params={}):
raise NotSupported(self.id + ' fetch_balance() is not supported yet')
def create_order(self, symbol, type, side, amount, price=None, params={}):
raise NotSupported(self.id + ' create_order() is not supported yet')
def cancel_order(self, id, symbol=None, params={}):
raise NotSupported(self.id + ' cancel_order() is not supported yet')
def cancel_unified_order(self, order, params={}):
return self.cancel_order(self.safe_value(order, 'id'), self.safe_value(order, 'symbol'), params)
def fetch_bids_asks(self, symbols=None, params={}) -> dict:
raise NotSupported(self.id + ' API does not allow to fetch all prices at once with a single call to fetch_bids_asks() for now')
def fetch_ticker(self, symbol, params={}):
if self.has['fetchTickers']:
tickers = self.fetch_tickers([symbol], params)
ticker = self.safe_value(tickers, symbol)
if ticker is None:
raise BadSymbol(self.id + ' fetchTickers could not find a ticker for ' + symbol)
else:
return ticker
else:
raise NotSupported(self.id + ' fetchTicker not supported yet')
def fetch_tickers(self, symbols=None, params={}):
raise NotSupported(self.id + ' API does not allow to fetch all tickers at once with a single call to fetch_tickers() for now')
def fetch_order_status(self, id, symbol=None, params={}):
order = self.fetch_order(id, symbol, params)
return order['status']
def fetch_order(self, id, symbol=None, params={}):
raise NotSupported(self.id + ' fetch_order() is not supported yet')
def fetch_unified_order(self, order, params={}):
return self.fetch_order(self.safe_value(order, 'id'), self.safe_value(order, 'symbol'), params)
def fetch_orders(self, symbol=None, since=None, limit=None, params={}):
raise NotSupported(self.id + ' fetch_orders() is not supported yet')
def fetch_open_orders(self, symbol=None, since=None, limit=None, params={}):
raise NotSupported(self.id + ' fetch_open_orders() is not supported yet')
def fetch_closed_orders(self, symbol=None, since=None, limit=None, params={}):
raise NotSupported(self.id + ' fetch_closed_orders() is not supported yet')
def fetch_my_trades(self, symbol=None, since=None, limit=None, params={}):
raise NotSupported(self.id + ' fetch_my_trades() is not supported yet')
def fetch_order_trades(self, id, symbol=None, params={}):
raise NotSupported(self.id + ' fetch_order_trades() is not supported yet')
def fetch_transactions(self, code=None, since=None, limit=None, params={}):
raise NotSupported(self.id + ' fetch_transactions() is not supported yet')
def fetch_deposits(self, code=None, since=None, limit=None, params={}):
raise NotSupported(self.id + ' fetch_deposits() is not supported yet')
def fetch_withdrawals(self, code=None, since=None, limit=None, params={}):
raise NotSupported(self.id + ' fetch_withdrawals() is not supported yet')
# def fetch_deposit_addresses(self, codes=None, params={}):
# raise NotSupported(self.id + ' fetch_deposit_addresses() is not supported yet')
def fetch_deposit_address(self, code, params={}):
if self.has['fetchDepositAddresses']:
deposit_addresses = self.fetch_deposit_addresses([code], params)
deposit_address = self.safe_value(deposit_addresses, code)
if deposit_address is None:
raise NotSupported(self.id + ' fetch_deposit_address could not find a deposit address for ' + code + ', make sure you have created a corresponding deposit address in your wallet on the exchange website')
else:
return deposit_address
else:
raise NotSupported(self.id + ' fetchDepositAddress not supported yet')
def parse_funding_rate(self, contract, market=None):
raise NotSupported(self.id + ' parse_funding_rate() is not supported yet')
def parse_funding_rates(self, response, market=None):
result = {}
for entry in response:
parsed = self.parse_funding_rate(entry, market)
result[parsed['symbol']] = parsed
return result
def parse_ohlcv(self, ohlcv, market=None):
if isinstance(ohlcv, list):
return [
self.safe_integer(ohlcv, 0),
self.safe_float(ohlcv, 1),
self.safe_float(ohlcv, 2),
self.safe_float(ohlcv, 3),
self.safe_float(ohlcv, 4),
self.safe_float(ohlcv, 5),
]
else:
return ohlcv
def parse_ohlcvs(self, ohlcvs, market=None, timeframe='1m', since=None, limit=None):
parsed = [self.parse_ohlcv(ohlcv, market) for ohlcv in ohlcvs]
sorted = self.sort_by(parsed, 0)
tail = since is None
return self.filter_by_since_limit(sorted, since, limit, 0, tail)
def parse_bid_ask(self, bidask, price_key=0, amount_key=0):
return [self.safe_number(bidask, price_key), self.safe_number(bidask, amount_key)]
def parse_bids_asks(self, bidasks, price_key=0, amount_key=1):
result = []
if len(bidasks):
if type(bidasks[0]) is list:
for bidask in bidasks:
if bidask[price_key] and bidask[amount_key]:
result.append(self.parse_bid_ask(bidask, price_key, amount_key))
elif type(bidasks[0]) is dict:
for bidask in bidasks:
if (price_key in bidask) and (amount_key in bidask) and (bidask[price_key] and bidask[amount_key]):
result.append(self.parse_bid_ask(bidask, price_key, amount_key))
else:
raise ExchangeError(self.id + ' unrecognized bidask format: ' + str(bidasks[0]))
return result
def fetch_l2_order_book(self, symbol, limit=None, params={}):
orderbook = self.fetch_order_book(symbol, limit, params)
return self.extend(orderbook, {
'bids': self.sort_by(self.aggregate(orderbook['bids']), 0, True),
'asks': self.sort_by(self.aggregate(orderbook['asks']), 0),
})
def parse_order_book(self, orderbook, symbol, timestamp=None, bids_key='bids', asks_key='asks', price_key=0, amount_key=1):
return {
'symbol': symbol,
'bids': self.sort_by(self.parse_bids_asks(orderbook[bids_key], price_key, amount_key) if (bids_key in orderbook) and isinstance(orderbook[bids_key], list) else [], 0, True),
'asks': self.sort_by(self.parse_bids_asks(orderbook[asks_key], price_key, amount_key) if (asks_key in orderbook) and isinstance(orderbook[asks_key], list) else [], 0),
'timestamp': timestamp,
'datetime': self.iso8601(timestamp) if timestamp is not None else None,
'nonce': None,
}
def safe_balance(self, balance):
currencies = self.omit(balance, ['info', 'timestamp', 'datetime', 'free', 'used', 'total']).keys()
balance['free'] = {}
balance['used'] = {}
balance['total'] = {}
for currency in currencies:
if balance[currency].get('total') is None:
if balance[currency].get('free') is not None and balance[currency].get('used') is not None:
balance[currency]['total'] = Precise.string_add(balance[currency]['free'], balance[currency]['used'])
if balance[currency].get('free') is None:
if balance[currency].get('total') is not None and balance[currency].get('used') is not None:
balance[currency]['free'] = Precise.string_sub(balance[currency]['total'], balance[currency]['used'])
if balance[currency].get('used') is None:
if balance[currency].get('total') is not None and balance[currency].get('free') is not None:
balance[currency]['used'] = Precise.string_sub(balance[currency]['total'], balance[currency]['free'])
balance[currency]['free'] = self.parse_number(balance[currency]['free'])
balance[currency]['used'] = self.parse_number(balance[currency]['used'])
balance[currency]['total'] = self.parse_number(balance[currency]['total'])
balance['free'][currency] = balance[currency]['free']
balance['used'][currency] = balance[currency]['used']
balance['total'][currency] = balance[currency]['total']
return balance
def fetch_partial_balance(self, part, params={}):
balance = self.fetch_balance(params)
return balance[part]
def fetch_free_balance(self, params={}):
return self.fetch_partial_balance('free', params)
def fetch_used_balance(self, params={}):
return self.fetch_partial_balance('used', params)
def fetch_total_balance(self, params={}):
return self.fetch_partial_balance('total', params)
def fetch_trading_fees(self, symbol, params={}):
raise NotSupported(self.id + ' fetch_trading_fees() is not supported yet')
def fetch_trading_fee(self, symbol, params={}):
if not self.has['fetchTradingFees']:
raise NotSupported(self.id + ' fetch_trading_fee() is not supported yet')
return self.fetch_trading_fees(params)
def fetch_funding_fees(self, params={}):
raise NotSupported(self.id + ' fetch_funding_fees() is not supported yet')
def fetch_funding_fee(self, code, params={}):
if not self.has['fetchFundingFees']:
raise NotSupported(self.id + ' fetch_funding_fee() is not supported yet')
return self.fetch_funding_fees(params)
def load_trading_limits(self, symbols=None, reload=False, params={}):
if self.has['fetchTradingLimits']:
if reload or not('limitsLoaded' in list(self.options.keys())):
response = self.fetch_trading_limits(symbols)
for i in range(0, len(symbols)):
symbol = symbols[i]
self.markets[symbol] = self.deep_extend(self.markets[symbol], response[symbol])
self.options['limitsLoaded'] = self.milliseconds()
return self.markets
def fetch_ohlcvc(self, symbol, timeframe='1m', since=None, limit=None, params={}):
if not self.has['fetchTrades']:
raise NotSupported(self.id + ' fetch_ohlcv() is not supported yet')
self.load_markets()
trades = self.fetch_trades(symbol, since, limit, params)
return self.build_ohlcvc(trades, timeframe, since, limit)
def fetch_ohlcv(self, symbol, timeframe='1m', since=None, limit=None, params={}):
ohlcvs = self.fetch_ohlcvc(symbol, timeframe, since, limit, params)
return [ohlcv[0:-1] for ohlcv in ohlcvs]
def fetch_status(self, params={}):
if self.has['fetchTime']:
updated = self.fetch_time(params)
self.status['updated'] = updated
return self.status
def fetchOHLCV(self, symbol, timeframe='1m', since=None, limit=None, params={}):
return self.fetch_ohlcv(symbol, timeframe, since, limit, params)
def parse_trading_view_ohlcv(self, ohlcvs, market=None, timeframe='1m', since=None, limit=None):
result = self.convert_trading_view_to_ohlcv(ohlcvs)
return self.parse_ohlcvs(result, market, timeframe, since, limit)
def convert_trading_view_to_ohlcv(self, ohlcvs, t='t', o='o', h='h', l='l', c='c', v='v', ms=False): # noqa E741
result = []
for i in range(0, len(ohlcvs[t])):
result.append([
ohlcvs[t][i] if ms else (int(ohlcvs[t][i]) * 1000),
ohlcvs[o][i],
ohlcvs[h][i],
ohlcvs[l][i],
ohlcvs[c][i],
ohlcvs[v][i],
])
return result
def convert_ohlcv_to_trading_view(self, ohlcvs, t='t', o='o', h='h', l='l', c='c', v='v', ms=False): # noqa E741
result = {}
result[t] = []
result[o] = []
result[h] = []
result[l] = []
result[c] = []
result[v] = []
for i in range(0, len(ohlcvs)):
result[t].append(ohlcvs[i][0] if ms else int(ohlcvs[i][0] / 1000))
result[o].append(ohlcvs[i][1])
result[h].append(ohlcvs[i][2])
result[l].append(ohlcvs[i][3])
result[c].append(ohlcvs[i][4])
result[v].append(ohlcvs[i][5])
return result
def build_ohlcvc(self, trades, timeframe='1m', since=None, limit=None):
ms = self.parse_timeframe(timeframe) * 1000
ohlcvs = []
(timestamp, open, high, low, close, volume, count) = (0, 1, 2, 3, 4, 5, 6)
num_trades = len(trades)
oldest = (num_trades - 1) if limit is None else min(num_trades - 1, limit)
for i in range(0, oldest):
trade = trades[i]
if (since is not None) and (trade['timestamp'] < since):
continue
opening_time = None
if trade['timestamp']:
opening_time = int(math.floor(trade['timestamp'] / ms) * ms) # Shift the edge of the m/h/d (but not M)
j = len(ohlcvs)
candle = j - 1
if (j == 0) or (opening_time and opening_time >= ohlcvs[candle][timestamp] + ms):
# moved to a new timeframe -> create a new candle from opening trade
ohlcvs.append([
opening_time,
trade['price'],
trade['price'],
trade['price'],
trade['price'],
trade['amount'],
1, # count
])
else:
# still processing the same timeframe -> update opening trade
ohlcvs[candle][high] = max(ohlcvs[candle][high], trade['price'])
ohlcvs[candle][low] = min(ohlcvs[candle][low], trade['price'])
ohlcvs[candle][close] = trade['price']
ohlcvs[candle][volume] += trade['amount']
ohlcvs[candle][count] += 1
return ohlcvs
@staticmethod
def parse_timeframe(timeframe):
amount = int(timeframe[0:-1])
unit = timeframe[-1]
if 'y' == unit:
scale = 60 * 60 * 24 * 365
elif 'M' == unit:
scale = 60 * 60 * 24 * 30
elif 'w' == unit:
scale = 60 * 60 * 24 * 7
elif 'd' == unit:
scale = 60 * 60 * 24
elif 'h' == unit:
scale = 60 * 60
elif 'm' == unit:
scale = 60
elif 's' == unit:
scale = 1
else:
raise NotSupported('timeframe unit {} is not supported'.format(unit))
return amount * scale
@staticmethod
def round_timeframe(timeframe, timestamp, direction=ROUND_DOWN):
ms = Exchange.parse_timeframe(timeframe) * 1000
# Get offset based on timeframe in milliseconds
offset = timestamp % ms
return timestamp - offset + (ms if direction == ROUND_UP else 0)
def safe_ticker(self, ticker, market=None, legacy=True):
if legacy:
symbol = self.safe_value(ticker, 'symbol')
if symbol is None:
symbol = self.safe_symbol(None, market)
timestamp = self.safe_integer(ticker, 'timestamp')
baseVolume = self.safe_value(ticker, 'baseVolume')
quoteVolume = self.safe_value(ticker, 'quoteVolume')
vwap = self.safe_value(ticker, 'vwap')
if vwap is None:
vwap = self.vwap(baseVolume, quoteVolume)
open = self.safe_value(ticker, 'open')
close = self.safe_value(ticker, 'close')
last = self.safe_value(ticker, 'last')
change = self.safe_value(ticker, 'change')
percentage = self.safe_value(ticker, 'percentage')
average = self.safe_value(ticker, 'average')
if (last is not None) and (close is None):
close = last
elif (last is None) and (close is not None):
last = close
if (last is not None) and (open is not None):
if change is None:
change = last - open
if average is None:
average = self.sum(last, open) / 2
if (percentage is None) and (change is not None) and (open is not None) and (open > 0):
percentage = change / open * 100
if (change is None) and (percentage is not None) and (last is not None):
change = percentage / 100 * last
if (open is None) and (last is not None) and (change is not None):
open = last - change
if (vwap is not None) and (baseVolume is not None) and (quoteVolume is None):
quoteVolume = vwap / baseVolume
if (vwap is not None) and (quoteVolume is not None) and (baseVolume is None):
baseVolume = quoteVolume / vwap
ticker['symbol'] = symbol
ticker['timestamp'] = timestamp
ticker['datetime'] = self.iso8601(timestamp)
ticker['open'] = open
ticker['close'] = close
ticker['last'] = last
ticker['vwap'] = vwap
ticker['change'] = change
ticker['percentage'] = percentage
ticker['average'] = average
return ticker
else:
open = self.safe_value(ticker, 'open')
close = self.safe_value(ticker, 'close')
last = self.safe_value(ticker, 'last')
change = self.safe_value(ticker, 'change')
percentage = self.safe_value(ticker, 'percentage')
average = self.safe_value(ticker, 'average')
vwap = self.safe_value(ticker, 'vwap')
baseVolume = self.safe_value(ticker, 'baseVolume')
quoteVolume = self.safe_value(ticker, 'quoteVolume')
if vwap is None:
vwap = Precise.string_div(quoteVolume, baseVolume)
if (last is not None) and (close is None):
close = last
elif (last is None) and (close is not None):
last = close
if (last is not None) and (open is not None):
if change is None:
change = Precise.string_sub(last, open)
if average is None:
average = Precise.string_div(Precise.string_add(last, open), '2')
if (percentage is None) and (change is not None) and (open is not None) and (Precise.string_gt(open, '0')):
percentage = Precise.string_mul(Precise.string_div(change, open), '100')
if (change is None) and (percentage is not None) and (last is not None):
change = Precise.string_div(Precise.string_mul(percentage, last), '100')
if (open is None) and (last is not None) and (change is not None):
open = Precise.string_sub(last, change)
# timestamp and symbol operations don't belong in safeTicker
# they should be done in the derived classes
return self.extend(ticker, {
'bid': self.safe_number(ticker, 'bid'),
'bidVolume': self.safe_number(ticker, 'bidVolume'),
'ask': self.safe_number(ticker, 'ask'),
'askVolume': self.safe_number(ticker, 'askVolume'),
'high': self.safe_number(ticker, 'high'),
'low': self.safe_number(ticker, 'low'),
'open': self.parse_number(open),
'close': self.parse_number(close),
'last': self.parse_number(last),
'change': self.parse_number(change),
'percentage': self.parse_number(percentage),
'average': self.parse_number(average),
'vwap': self.parse_number(vwap),
'baseVolume': self.parse_number(baseVolume),
'quoteVolume': self.parse_number(quoteVolume),
})
def parse_tickers(self, tickers, symbols=None, params={}):
result = []
values = self.to_array(tickers)
for i in range(0, len(values)):
result.append(self.extend(self.parse_ticker(values[i]), params))
return self.filter_by_array(result, 'symbol', symbols)
def parse_deposit_addresses(self, addresses, codes=None, indexed=True, params={}):
result = []
for i in range(0, len(addresses)):
address = self.extend(self.parse_deposit_address(addresses[i]), params)
result.append(address)
if codes:
result = self.filter_by_array(result, 'currency', codes, False)
return self.index_by(result, 'currency') if indexed else result
def parse_trades(self, trades, market=None, since=None, limit=None, params={}):
array = self.to_array(trades)
array = [self.merge(self.parse_trade(trade, market), params) for trade in array]
array = self.sort_by_2(array, 'timestamp', 'id')
symbol = market['symbol'] if market else None
tail = since is None
return self.filter_by_symbol_since_limit(array, symbol, since, limit, tail)
def parse_transactions(self, transactions, currency=None, since=None, limit=None, params={}):
array = self.to_array(transactions)
array = [self.extend(self.parse_transaction(transaction, currency), params) for transaction in array]
array = self.sort_by(array, 'timestamp')
code = currency['code'] if currency else None
tail = since is None
return self.filter_by_currency_since_limit(array, code, since, limit, tail)
def parse_transfers(self, transfers, currency=None, since=None, limit=None, params={}):
array = self.to_array(transfers)
array = [self.extend(self.parse_transfer(transfer, currency), params) for transfer in array]
array = self.sort_by(array, 'timestamp')
code = currency['code'] if currency else None
tail = since is None
return self.filter_by_currency_since_limit(array, code, since, limit, tail)
def parse_ledger(self, data, currency=None, since=None, limit=None, params={}):
array = self.to_array(data)
result = []
for item in array:
entry = self.parse_ledger_entry(item, currency)
if isinstance(entry, list):
result += [self.extend(i, params) for i in entry]
else:
result.append(self.extend(entry, params))
result = self.sort_by(result, 'timestamp')
code = currency['code'] if currency else None
tail = since is None
return self.filter_by_currency_since_limit(result, code, since, limit, tail)
def safe_ledger_entry(self, entry, currency=None):
currency = self.safe_currency(None, currency)
direction = self.safe_string(entry, 'direction')
before = self.safe_string(entry, 'before')
after = self.safe_string(entry, 'after')
amount = self.safe_string(entry, 'amount')
fee = self.safe_string(entry, 'fee')
if amount is not None and fee is not None:
if before is None and after is not None:
amountAndFee = Precise.string_add(amount, fee)
before = Precise.string_sub(after, amountAndFee)
elif before is not None and after is None:
amountAndFee = Precise.string_add(amount, fee)
after = Precise.string_add(before, amountAndFee)
if before is not None and after is not None:
if direction is None:
if Precise.string_gt(before, after):
direction = 'out'
if Precise.string_gt(after, before):
direction = 'in'
if amount is None and fee is not None:
betweenAfterBefore = Precise.string_sub(after, before)
amount = Precise.string_sub(betweenAfterBefore, fee)
if amount is not None and fee is None:
betweenAfterBefore = Precise.string_sub(after, before)
fee = Precise.string_sub(betweenAfterBefore, amount)
return self.extend({
'id': None,
'timestamp': None,
'datetime': None,
'direction': None,
'account': None,
'referenceId': None,
'referenceAccount': None,
'type': None,
'currency': currency['code'],
'amount': amount,
'before': before,
'after': after,
'status': None,
'fee': fee,
'info': None,
}, entry)
def parse_orders(self, orders, market=None, since=None, limit=None, params={}):
if isinstance(orders, list):
array = [self.extend(self.parse_order(order, market), params) for order in orders]
else:
array = [self.extend(self.parse_order(self.extend({'id': id}, order), market), params) for id, order in orders.items()]
array = self.sort_by(array, 'timestamp')
symbol = market['symbol'] if market else None
tail = since is None
return self.filter_by_symbol_since_limit(array, symbol, since, limit, tail)
def safe_market(self, marketId, market=None, delimiter=None):
if marketId is not None:
if self.markets_by_id is not None and marketId in self.markets_by_id:
market = self.markets_by_id[marketId]
elif delimiter is not None:
parts = marketId.split(delimiter)
if len(parts) == 2:
baseId = self.safe_string(parts, 0)
quoteId = self.safe_string(parts, 1)
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
symbol = base + '/' + quote
return {
'id': marketId,
'symbol': symbol,
'base': base,
'quote': quote,
'baseId': baseId,
'quoteId': quoteId,
}
else:
return {
'id': marketId,
'symbol': marketId,
'base': None,
'quote': None,
'baseId': None,
'quoteId': None,
}
if market is not None:
return market
return {
'id': marketId,
'symbol': marketId,
'base': None,
'quote': None,
'baseId': None,
'quoteId': None,
}
def safe_symbol(self, marketId, market=None, delimiter=None):
market = self.safe_market(marketId, market, delimiter)
return market['symbol']
def safe_currency(self, currency_id, currency=None):
if currency_id is None and currency is not None:
return currency
if (self.currencies_by_id is not None) and (currency_id in self.currencies_by_id):
return self.currencies_by_id[currency_id]
return {
'id': currency_id,
'code': self.common_currency_code(currency_id.upper()) if currency_id is not None else currency_id
}
def safe_currency_code(self, currency_id, currency=None):
currency = self.safe_currency(currency_id, currency)
return currency['code']
def filter_by_value_since_limit(self, array, field, value=None, since=None, limit=None, key='timestamp', tail=False):
array = self.to_array(array)
if value is not None:
array = [entry for entry in array if entry[field] == value]
if since is not None:
array = [entry for entry in array if entry[key] >= since]
if limit is not None:
array = array[-limit:] if tail else array[:limit]
return array
def filter_by_symbol_since_limit(self, array, symbol=None, since=None, limit=None, tail=False):
return self.filter_by_value_since_limit(array, 'symbol', symbol, since, limit, 'timestamp', tail)
def filter_by_currency_since_limit(self, array, code=None, since=None, limit=None, tail=False):
return self.filter_by_value_since_limit(array, 'currency', code, since, limit, 'timestamp', tail)
def filter_by_since_limit(self, array, since=None, limit=None, key='timestamp', tail=False):
array = self.to_array(array)
if since is not None:
array = [entry for entry in array if entry[key] >= since]
if limit is not None:
array = array[-limit:] if tail else array[:limit]
return array
def filter_by_symbol(self, array, symbol=None):
array = self.to_array(array)
if symbol:
return [entry for entry in array if entry['symbol'] == symbol]
return array
def filter_by_array(self, objects, key, values=None, indexed=True):
objects = self.to_array(objects)
# return all of them if no values were passed in
if values is None:
return self.index_by(objects, key) if indexed else objects
result = []
for i in range(0, len(objects)):
value = objects[i][key] if key in objects[i] else None
if value in values:
result.append(objects[i])
return self.index_by(result, key) if indexed else result
def currency(self, code):
if not self.currencies:
raise ExchangeError(self.id + ' currencies not loaded')
if isinstance(code, str):
if code in self.currencies:
return self.currencies[code]
elif code in self.currencies_by_id:
return self.currencies_by_id[code]
raise ExchangeError(self.id + ' does not have currency code ' + str(code))
def market(self, symbol):
if not self.markets:
raise ExchangeError(self.id + ' markets not loaded')
if not self.markets_by_id:
raise ExchangeError(self.id + ' markets not loaded')
if isinstance(symbol, str):
if symbol in self.markets:
return self.markets[symbol]
elif symbol in self.markets_by_id:
return self.markets_by_id[symbol]
raise BadSymbol(self.id + ' does not have market symbol ' + symbol)
def market_ids(self, symbols):
return [self.market_id(symbol) for symbol in symbols]
def market_id(self, symbol):
market = self.market(symbol)
return market['id'] if type(market) is dict else symbol
def calculate_fee(self, symbol, type, side, amount, price, takerOrMaker='taker', params={}):
market = self.markets[symbol]
feeSide = self.safe_string(market, 'feeSide', 'quote')
key = 'quote'
cost = None
if feeSide == 'quote':
# the fee is always in quote currency
cost = amount * price
elif feeSide == 'base':
# the fee is always in base currency
cost = amount
elif feeSide == 'get':
# the fee is always in the currency you get
cost = amount
if side == 'sell':
cost *= price
else:
key = 'base'
elif feeSide == 'give':
# the fee is always in the currency you give
cost = amount
if side == 'buy':
cost *= price
else:
key = 'base'
rate = market[takerOrMaker]
if cost is not None:
cost *= rate
return {
'type': takerOrMaker,
'currency': market[key],
'rate': rate,
'cost': cost,
}
def edit_limit_buy_order(self, id, symbol, *args):
return self.edit_limit_order(id, symbol, 'buy', *args)
def edit_limit_sell_order(self, id, symbol, *args):
return self.edit_limit_order(id, symbol, 'sell', *args)
def edit_limit_order(self, id, symbol, *args):
return self.edit_order(id, symbol, 'limit', *args)
def edit_order(self, id, symbol, *args):
if not self.enableRateLimit:
raise ExchangeError(self.id + ' edit_order() requires enableRateLimit = true')
self.cancel_order(id, symbol)
return self.create_order(symbol, *args)
def create_limit_order(self, symbol, side, amount, price, params={}) -> dict:
return self.create_order(symbol, 'limit', side, amount, price, params)
def create_market_order(self, symbol, side, amount, price=None, params={}) -> dict:
return self.create_order(symbol, 'market', side, amount, price, params)
def create_limit_buy_order(self, symbol, amount, price, params={}) -> dict:
return self.create_order(symbol, 'limit', 'buy', amount, price, params)
def create_limit_sell_order(self, symbol, amount, price, params={}) -> dict:
return self.create_order(symbol, 'limit', 'sell', amount, price, params)
def create_market_buy_order(self, symbol, amount, params={}) -> dict:
return self.create_order(symbol, 'market', 'buy', amount, None, params)
def create_market_sell_order(self, symbol, amount, params={}) -> dict:
return self.create_order(symbol, 'market', 'sell', amount, None, params)
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
raise NotSupported(self.id + ' sign() pure method must be redefined in derived classes')
def vwap(self, baseVolume, quoteVolume):
return (quoteVolume / baseVolume) if (quoteVolume is not None) and (baseVolume is not None) and (baseVolume > 0) else None
# -------------------------------------------------------------------------
def check_required_dependencies(self):
if self.requiresEddsa and eddsa is None:
raise NotSupported(self.id + ' Eddsa functionality requires python-axolotl-curve25519, install with `pip install python-axolotl-curve25519==0.4.1.post2`: https://github.com/tgalal/python-axolotl-curve25519')
def privateKeyToAddress(self, privateKey):
private_key_bytes = base64.b16decode(Exchange.encode(privateKey), True)
public_key_bytes = ecdsa.SigningKey.from_string(private_key_bytes, curve=ecdsa.SECP256k1).verifying_key.to_string()
public_key_hash = keccak.SHA3(public_key_bytes)
return '0x' + Exchange.decode(base64.b16encode(public_key_hash))[-40:].lower()
@staticmethod
def remove0x_prefix(value):
if value[:2] == '0x':
return value[2:]
return value
def hashMessage(self, message):
message_bytes = base64.b16decode(Exchange.encode(Exchange.remove0x_prefix(message)), True)
hash_bytes = keccak.SHA3(b"\x19Ethereum Signed Message:\n" + Exchange.encode(str(len(message_bytes))) + message_bytes)
return '0x' + Exchange.decode(base64.b16encode(hash_bytes)).lower()
@staticmethod
def signHash(hash, privateKey):
signature = Exchange.ecdsa(hash[-64:], privateKey, 'secp256k1', None)
return {
'r': '0x' + signature['r'],
's': '0x' + signature['s'],
'v': 27 + signature['v'],
}
def sign_message_string(self, message, privateKey):
signature = self.signMessage(message, privateKey)
return signature['r'] + Exchange.remove0x_prefix(signature['s']) + Exchange.binary_to_base16(Exchange.number_to_be(signature['v'], 1))
def signMessage(self, message, privateKey):
#
# The following comment is related to MetaMask, we use the upper type of signature prefix:
#
# z.ecSignOrderHashAsync ('0xcfdb0a485324ff37699b4c8557f6858f25916fc6fce5993b32fe018aea510b9f',
# '0x731fc101bbe102221c91c31ed0489f1ddfc439a3', {
# prefixType: 'ETH_SIGN',
# shouldAddPrefixBeforeCallingEthSign: true
# }).then ((e, r) => console.log (e,r))
#
# { ↓
# v: 28,
# r: "0xea7a68268b47c48d5d7a4c900e6f9af0015bf70951b3db2f1d835c5d544aaec2",
# s: "0x5d1db2a060c955c1fde4c967237b995c2361097405407b33c6046c8aeb3ccbdf"
# }
#
# --------------------------------------------------------------------
#
# z.ecSignOrderHashAsync ('0xcfdb0a485324ff37699b4c8557f6858f25916fc6fce5993b32fe018aea510b9f',
# '0x731fc101bbe102221c91c31ed0489f1ddfc439a3', {
# prefixType: 'NONE',
# shouldAddPrefixBeforeCallingEthSign: true
# }).then ((e, r) => console.log (e,r))
#
# { ↓
# v: 27,
# r: "0xc8c710022c57de4f529d448e9b40517dd9bfb49ff1eb245f5856664b865d14a6",
# s: "0x0740bb21f4f094fbbdbafa903bb8f057f82e0c6e4fe65d19a1daed4ed97cd394"
# }
#
message_hash = self.hashMessage(message)
signature = self.signHash(message_hash[-64:], privateKey[-64:])
return signature
def get_network(self, network, code):
network = network.upper()
aliases = {
'ETHEREUM': 'ETH',
'ETHER': 'ETH',
'ERC20': 'ETH',
'ETH': 'ETH',
'TRC20': 'TRX',
'TRON': 'TRX',
'TRX': 'TRX',
'BEP20': 'BSC',
'BSC': 'BSC',
'HRC20': 'HT',
'HECO': 'HT',
'SPL': 'SOL',
'SOL': 'SOL',
'TERRA': 'LUNA',
'LUNA': 'LUNA',
'POLYGON': 'MATIC',
'MATIC': 'MATIC',
'EOS': 'EOS',
'WAVES': 'WAVES',
'AVALANCHE': 'AVAX',
'AVAX': 'AVAX',
'QTUM': 'QTUM',
'CHZ': 'CHZ',
'NEO': 'NEO',
'ONT': 'ONT',
'RON': 'RON',
}
if network == code:
return network
elif network in aliases:
return aliases[network]
else:
raise NotSupported(self.id + ' network ' + network + ' is not yet supported')
def oath(self):
if self.twofa is not None:
return self.totp(self.twofa)
else:
raise ExchangeError(self.id + ' set .twofa to use this feature')
@staticmethod
def totp(key):
def hex_to_dec(n):
return int(n, base=16)
def base32_to_bytes(n):
missing_padding = len(n) % 8
padding = 8 - missing_padding if missing_padding > 0 else 0
padded = n.upper() + ('=' * padding)
return base64.b32decode(padded) # throws an error if the key is invalid
epoch = int(time.time()) // 30
hmac_res = Exchange.hmac(epoch.to_bytes(8, 'big'), base32_to_bytes(key.replace(' ', '')), hashlib.sha1, 'hex')
offset = hex_to_dec(hmac_res[-1]) * 2
otp = str(hex_to_dec(hmac_res[offset: offset + 8]) & 0x7fffffff)
return otp[-6:]
@staticmethod
def number_to_le(n, size):
return int(n).to_bytes(size, 'little')
@staticmethod
def number_to_be(n, size):
return int(n).to_bytes(size, 'big')
@staticmethod
def base16_to_binary(s):
return base64.b16decode(s, True)
@staticmethod
def binary_to_base16(s):
return Exchange.decode(base64.b16encode(s)).lower()
def sleep(self, milliseconds):
return time.sleep(milliseconds / 1000)
@staticmethod
def base58_to_binary(s):
"""encodes a base58 string to as a big endian integer"""
if Exchange.base58_decoder is None:
Exchange.base58_decoder = {}
Exchange.base58_encoder = {}
for i, c in enumerate(Exchange.base58_alphabet):
Exchange.base58_decoder[c] = i
Exchange.base58_encoder[i] = c
result = 0
for i in range(len(s)):
result *= 58
result += Exchange.base58_decoder[s[i]]
return result.to_bytes((result.bit_length() + 7) // 8, 'big')
@staticmethod
def binary_to_base58(b):
if Exchange.base58_encoder is None:
Exchange.base58_decoder = {}
Exchange.base58_encoder = {}
for i, c in enumerate(Exchange.base58_alphabet):
Exchange.base58_decoder[c] = i
Exchange.base58_encoder[i] = c
result = 0
# undo decimal_to_bytes
for byte in b:
result *= 0x100
result += byte
string = []
while result > 0:
result, next_character = divmod(result, 58)
string.append(Exchange.base58_encoder[next_character])
string.reverse()
return ''.join(string)
def reduce_fees_by_currency(self, fees, string=False):
#
# self function takes a list of fee structures having the following format
#
# string = True
#
# [
# {'currency': 'BTC', 'cost': '0.1'},
# {'currency': 'BTC', 'cost': '0.2' },
# {'currency': 'BTC', 'cost': '0.2', 'rate': '0.00123'},
# {'currency': 'BTC', 'cost': '0.4', 'rate': '0.00123'},
# {'currency': 'BTC', 'cost': '0.5', 'rate': '0.00456'},
# {'currency': 'USDT', 'cost': '12.3456'},
# ]
#
# string = False
#
# [
# {'currency': 'BTC', 'cost': 0.1},
# {'currency': 'BTC', 'cost': 0.2},
# {'currency': 'BTC', 'cost': 0.2, 'rate': 0.00123},
# {'currency': 'BTC', 'cost': 0.4, 'rate': 0.00123},
# {'currency': 'BTC', 'cost': 0.5, 'rate': 0.00456},
# {'currency': 'USDT', 'cost': 12.3456},
# ]
#
# and returns a reduced fee list, where fees are summed per currency and rate(if any)
#
# string = True
#
# [
# {'currency': 'BTC', 'cost': '0.3' },
# {'currency': 'BTC', 'cost': '0.6', 'rate': '0.00123'},
# {'currency': 'BTC', 'cost': '0.5', 'rate': '0.00456'},
# {'currency': 'USDT', 'cost': '12.3456'},
# ]
#
# string = False
#
# [
# {'currency': 'BTC', 'cost': 0.3 },
# {'currency': 'BTC', 'cost': 0.6, 'rate': 0.00123},
# {'currency': 'BTC', 'cost': 0.5, 'rate': 0.00456},
# {'currency': 'USDT', 'cost': 12.3456},
# ]
#
reduced = {}
for i in range(0, len(fees)):
fee = fees[i]
feeCurrencyCode = self.safe_string(fee, 'currency')
if feeCurrencyCode is not None:
rate = self.safe_string(fee, 'rate')
cost = self.safe_value(fee, 'cost')
if not (feeCurrencyCode in reduced):
reduced[feeCurrencyCode] = {}
rateKey = '' if (rate is None) else rate
if rateKey in reduced[feeCurrencyCode]:
if string:
reduced[feeCurrencyCode][rateKey]['cost'] = Precise.string_add(reduced[feeCurrencyCode][rateKey]['cost'], cost)
else:
reduced[feeCurrencyCode][rateKey]['cost'] = self.sum(reduced[feeCurrencyCode][rateKey]['cost'], cost)
else:
reduced[feeCurrencyCode][rateKey] = {
'currency': feeCurrencyCode,
'cost': cost if string else self.parse_number(cost),
}
if rate is not None:
reduced[feeCurrencyCode][rateKey]['rate'] = rate if string else self.parse_number(rate)
result = []
feeValues = list(reduced.values())
for i in range(0, len(feeValues)):
reducedFeeValues = list(feeValues[i].values())
result = self.array_concat(result, reducedFeeValues)
return result
def safe_trade(self, trade, market=None):
amount = self.safe_string(trade, 'amount')
price = self.safe_string(trade, 'price')
cost = self.safe_string(trade, 'cost')
if cost is None:
# contract trading
contractSize = self.safe_string(market, 'contractSize')
multiplyPrice = price
if contractSize is not None:
inverse = self.safe_value(market, 'inverse', False)
if inverse:
multiplyPrice = Precise.string_div('1', price)
multiplyPrice = Precise.string_mul(multiplyPrice, contractSize)
cost = Precise.string_mul(multiplyPrice, amount)
parseFee = self.safe_value(trade, 'fee') is None
parseFees = self.safe_value(trade, 'fees') is None
shouldParseFees = parseFee or parseFees
fees = self.safe_value(trade, 'fees', [])
if shouldParseFees:
tradeFees = self.safe_value(trade, 'fees')
if tradeFees is not None:
for j in range(0, len(tradeFees)):
tradeFee = tradeFees[j]
fees.append(self.extend({}, tradeFee))
else:
tradeFee = self.safe_value(trade, 'fee')
if tradeFee is not None:
fees.append(self.extend({}, tradeFee))
fee = self.safe_value(trade, 'fee')
if shouldParseFees:
reducedFees = self.reduce_fees_by_currency(fees, True) if self.reduceFees else fees
reducedLength = len(reducedFees)
for i in range(0, reducedLength):
reducedFees[i]['cost'] = self.safe_number(reducedFees[i], 'cost')
if 'rate' in reducedFees[i]:
reducedFees[i]['rate'] = self.safe_number(reducedFees[i], 'rate')
if not parseFee and (reducedLength == 0):
fee['cost'] = self.safe_number(fee, 'cost')
if 'rate' in fee:
fee['rate'] = self.safe_number(fee, 'rate')
reducedFees.append(fee)
if parseFees:
trade['fees'] = reducedFees
if parseFee and (reducedLength == 1):
trade['fee'] = reducedFees[0]
tradeFee = self.safe_value(trade, 'fee')
if tradeFee is not None:
tradeFee['cost'] = self.safe_number(tradeFee, 'cost')
if 'rate' in tradeFee:
tradeFee['rate'] = self.safe_number(tradeFee, 'rate')
trade['fee'] = tradeFee
trade['amount'] = self.parse_number(amount)
trade['price'] = self.parse_number(price)
trade['cost'] = self.parse_number(cost)
return trade
def safe_order(self, order, market=None):
# parses numbers as strings
# it is important pass the trades as unparsed rawTrades
amount = self.omit_zero(self.safe_string(order, 'amount'))
remaining = self.safe_string(order, 'remaining')
filled = self.safe_string(order, 'filled')
cost = self.safe_string(order, 'cost')
average = self.omit_zero(self.safe_string(order, 'average'))
price = self.omit_zero(self.safe_string(order, 'price'))
lastTradeTimeTimestamp = self.safe_integer(order, 'lastTradeTimestamp')
parseFilled = (filled is None)
parseCost = (cost is None)
parseLastTradeTimeTimestamp = (lastTradeTimeTimestamp is None)
fee = self.safe_value(order, 'fee')
parseFee = (fee is None)
parseFees = self.safe_value(order, 'fees') is None
shouldParseFees = parseFee or parseFees
fees = self.safe_value(order, 'fees', [])
trades = []
if parseFilled or parseCost or shouldParseFees:
rawTrades = self.safe_value(order, 'trades', trades)
oldNumber = self.number
# we parse trades as strings here!
self.number = str
trades = self.parse_trades(rawTrades, market, None, None, {
'symbol': order['symbol'],
'side': order['side'],
'type': order['type'],
'order': order['id'],
})
self.number = oldNumber
if isinstance(trades, list) and len(trades):
# move properties that are defined in trades up into the order
if order['symbol'] is None:
order['symbol'] = trades[0]['symbol']
if order['side'] is None:
order['side'] = trades[0]['side']
if order['type'] is None:
order['type'] = trades[0]['type']
if order['id'] is None:
order['id'] = trades[0]['order']
if parseFilled:
filled = '0'
if parseCost:
cost = '0'
for i in range(0, len(trades)):
trade = trades[i]
tradeAmount = self.safe_string(trade, 'amount')
if parseFilled and (tradeAmount is not None):
filled = Precise.string_add(filled, tradeAmount)
tradeCost = self.safe_string(trade, 'cost')
if parseCost and (tradeCost is not None):
cost = Precise.string_add(cost, tradeCost)
tradeTimestamp = self.safe_value(trade, 'timestamp')
if parseLastTradeTimeTimestamp and (tradeTimestamp is not None):
if lastTradeTimeTimestamp is None:
lastTradeTimeTimestamp = tradeTimestamp
else:
lastTradeTimeTimestamp = max(lastTradeTimeTimestamp, tradeTimestamp)
if shouldParseFees:
tradeFees = self.safe_value(trade, 'fees')
if tradeFees is not None:
for j in range(0, len(tradeFees)):
tradeFee = tradeFees[j]
fees.append(self.extend({}, tradeFee))
else:
tradeFee = self.safe_value(trade, 'fee')
if tradeFee is not None:
fees.append(self.extend({}, tradeFee))
if shouldParseFees:
reducedFees = self.reduce_fees_by_currency(fees, True) if self.reduceFees else fees
reducedLength = len(reducedFees)
for i in range(0, reducedLength):
reducedFees[i]['cost'] = self.parse_number(reducedFees[i]['cost'])
if 'rate' in reducedFees[i]:
reducedFees[i]['rate'] = self.parse_number(reducedFees[i]['rate'])
if not parseFee and (reducedLength == 0):
fee['cost'] = self.safe_number(fee, 'cost')
if 'rate' in fee:
fee['rate'] = self.parse_number(fee['rate'])
reducedFees.append(fee)
if parseFees:
order['fees'] = reducedFees
if parseFee and (reducedLength == 1):
order['fee'] = reducedFees[0]
if amount is None:
# ensure amount = filled + remaining
if filled is not None and remaining is not None:
amount = Precise.string_add(filled, remaining)
elif self.safe_string(order, 'status') == 'closed':
amount = filled
if filled is None:
if amount is not None and remaining is not None:
filled = Precise.string_sub(amount, remaining)
if remaining is None:
if amount is not None and filled is not None:
remaining = Precise.string_sub(amount, filled)
# ensure that the average field is calculated correctly
if average is None:
if (filled is not None) and (cost is not None) and Precise.string_gt(filled, '0'):
average = Precise.string_div(cost, filled)
# also ensure the cost field is calculated correctly
costPriceExists = (average is not None) or (price is not None)
if parseCost and (filled is not None) and costPriceExists:
multiplyPrice = None
if average is None:
multiplyPrice = price
else:
multiplyPrice = average
# contract trading
contractSize = self.safe_string(market, 'contractSize')
if contractSize is not None:
inverse = self.safe_value(market, 'inverse', False)
if inverse:
multiplyPrice = Precise.string_div('1', multiplyPrice)
multiplyPrice = Precise.string_mul(multiplyPrice, contractSize)
cost = Precise.string_mul(multiplyPrice, filled)
# support for market orders
orderType = self.safe_value(order, 'type')
emptyPrice = (price is None) or Precise.string_equals(price, '0')
if emptyPrice and (orderType == 'market'):
price = average
# we have trades with string values at self point so we will mutate them
for i in range(0, len(trades)):
entry = trades[i]
entry['amount'] = self.safe_number(entry, 'amount')
entry['price'] = self.safe_number(entry, 'price')
entry['cost'] = self.safe_number(entry, 'cost')
fee = self.safe_value(entry, 'fee', {})
fee['cost'] = self.safe_number(fee, 'cost')
if 'rate' in fee:
fee['rate'] = self.safe_number(fee, 'rate')
entry['fee'] = fee
# timeInForceHandling
timeInForce = self.safe_string(order, 'timeInForce')
if self.safe_value(order, 'postOnly', False):
timeInForce = 'PO'
elif self.safe_string(order, 'type') == 'market':
timeInForce = 'IOC'
return self.extend(order, {
'lastTradeTimestamp': lastTradeTimeTimestamp,
'price': self.parse_number(price),
'amount': self.parse_number(amount),
'cost': self.parse_number(cost),
'average': self.parse_number(average),
'filled': self.parse_number(filled),
'remaining': self.parse_number(remaining),
'trades': trades,
'timeInForce': timeInForce,
})
def parse_number(self, value, default=None):
if value is None:
return default
else:
try:
return self.number(value)
except Exception:
return default
def safe_number(self, dictionary, key, default=None):
value = self.safe_string(dictionary, key)
return self.parse_number(value, default)
def safe_number_2(self, dictionary, key1, key2, default=None):
value = self.safe_string_2(dictionary, key1, key2)
return self.parse_number(value, default)
def parse_precision(self, precision):
if precision is None:
return None
return '1e' + Precise.string_neg(precision)
def omit_zero(self, string_number):
if string_number is None or string_number == '':
return None
if float(string_number) == 0:
return None
return string_number
def handle_withdraw_tag_and_params(self, tag, params):
if isinstance(tag, dict):
params = self.extend(tag, params)
tag = None
if tag is None:
tag = self.safe_string(params, 'tag')
if tag is not None:
params = self.omit(params, 'tag')
return [tag, params]
def get_supported_mapping(self, key, mapping={}):
# Takes a key and a dictionary, and returns the dictionary's value for that key
# :throws:
# NotSupported if the dictionary does not contain the key
if (key in mapping):
return mapping[key]
else:
raise NotSupported(self.id + ' ' + key + ' does not have a value in mapping')
def fetch_borrow_rate(self, code, params={}):
self.load_markets()
if not self.has['fetchBorrowRates']:
raise NotSupported(self.id + 'fetchBorrowRate() is not supported yet')
borrow_rates = self.fetch_borrow_rates(params)
rate = self.safe_value(borrow_rates, code)
if rate is None:
raise ExchangeError(self.id + 'fetchBorrowRate() could not find the borrow rate for currency code ' + code)
return rate
def handle_market_type_and_params(self, method_name, market=None, params={}):
default_type = self.safe_string_2(self.options, 'defaultType', 'type', 'spot')
method_options = self.safe_value(self.options, method_name)
method_type = default_type
if method_options is not None:
if isinstance(method_options, str):
method_type = method_options
else:
method_type = self.safe_string_2(method_options, 'defaultType', 'type', method_type)
market_type = method_type if market is None else market['type']
type = self.safe_string_2(params, 'defaultType', 'type', market_type)
params = self.omit(params, ['defaultType', 'type'])
return [type, params]
def load_time_difference(self, params={}):
server_time = self.fetch_time(params)
after = self.milliseconds()
self.options['timeDifference'] = after - server_time
return self.options['timeDifference']
def parse_leverage_tiers(self, response, symbols, market_id_key):
tiers = {}
for item in response:
id = self.safe_string(item, market_id_key)
market = self.safe_market(id)
symbol = market['symbol']
symbols_length = 0
if (symbols is not None):
symbols_length = len(symbols)
contract = self.safe_value(market, 'contract', False)
if (contract and (symbols_length == 0 or symbol in symbols)):
tiers[symbol] = self.parse_market_leverage_tiers(item, market)
return tiers
def fetch_market_leverage_tiers(self, symbol, params={}):
if self.has['fetchLeverageTiers']:
market = self.market(symbol)
if (not market['contract']):
raise BadRequest(self.id + ' fetch_leverage_tiers() supports contract markets only')
tiers = self.fetch_leverage_tiers([symbol])
return self.safe_value(tiers, symbol)
else:
raise NotSupported(self.id + 'fetch_market_leverage_tiers() is not supported yet')
def is_post_only(self, type, time_in_force, exchange_specific_option, params={}):
post_only = self.safe_value_2(params, 'postOnly', 'post_only', False)
params = self.omit(params, ['post_only', 'postOnly'])
time_in_force_upper = time_in_force.upper()
type_lower = type.lower()
ioc = time_in_force_upper == 'IOC'
time_in_force_post_only = time_in_force_upper == 'PO'
is_market = type_lower == 'market'
post_only = post_only or type_lower == 'postonly' or time_in_force_post_only or exchange_specific_option
if (post_only):
if (ioc):
raise InvalidOrder(self.id + ' postOnly orders cannot have timeInForce equal to ' + time_in_force)
elif (is_market):
raise InvalidOrder(self.id + ' postOnly orders cannot have type ' + type)
else:
time_in_force = None if time_in_force_post_only else time_in_force
return ['limit', True, time_in_force, params]
else:
return [type, False, time_in_force, params]
def create_post_only_order(self, symbol, type, side, amount, price, params={}):
if not self.has['createPostOnlyOrder']:
raise NotSupported(self.id + ' create_post_only_order() is not supported yet')
query = self.extend(params, {'postOnly': True})
return self.create_order(symbol, type, side, amount, price, query)
def create_stop_order(self, symbol, type, side, amount, price=None, stopPrice=None, params={}):
if not self.has['createStopOrder']:
raise NotSupported(self.id + 'create_stop_order() is not supported yet')
if stopPrice is None:
raise ArgumentsRequired(self.id + ' create_stop_order() requires a stopPrice argument')
query = self.extend(params, {'stopPrice': stopPrice})
return self.create_order(symbol, type, side, amount, price, query)
def create_stop_limit_order(self, symbol, side, amount, price, stopPrice, params={}):
if not self.has['createStopLimitOrder']:
raise NotSupported(self.id + ' create_stop_limit_order() is not supported yet')
query = self.extend(params, {'stopPrice': stopPrice})
return self.create_order(symbol, 'limit', side, amount, price, query)
def create_stop_market_order(self, symbol, side, amount, stopPrice, params={}):
if not self.has['createStopMarketOrder']:
raise NotSupported(self.id + ' create_stop_market_order() is not supported yet')
query = self.extend(params, {'stopPrice': stopPrice})
return self.create_order(symbol, 'market', side, amount, None, query)
def parse_borrow_interests(self, response, market=None):
interest = []
for i in range(len(response)):
row = response[i]
interest.append(self.parse_borrow_interest(row, market))
return interest
| 41.505267 | 219 | 0.583045 |
__version__ = '1.81.74'
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import NetworkError
from ccxt.base.errors import NotSupported
from ccxt.base.errors import AuthenticationError
from ccxt.base.errors import DDoSProtection
from ccxt.base.errors import RequestTimeout
from ccxt.base.errors import ExchangeNotAvailable
from ccxt.base.errors import InvalidAddress
from ccxt.base.errors import InvalidOrder
from ccxt.base.errors import ArgumentsRequired
from ccxt.base.errors import BadSymbol
from ccxt.base.errors import BadRequest
from ccxt.base.errors import RateLimitExceeded
from ccxt.base.decimal_to_precision import decimal_to_precision
from ccxt.base.decimal_to_precision import DECIMAL_PLACES, NO_PADDING, TRUNCATE, ROUND, ROUND_UP, ROUND_DOWN
from ccxt.base.decimal_to_precision import number_to_string
from ccxt.base.precise import Precise
from cryptography.hazmat import backends
from cryptography.hazmat.primitives import hashes
from cryptography.hazmat.primitives.asymmetric import padding
from cryptography.hazmat.primitives.serialization import load_pem_private_key
from ccxt.static_dependencies import ecdsa
from ccxt.static_dependencies import keccak
try:
import axolotl_curve25519 as eddsa
except ImportError:
eddsa = None
__all__ = [
'Exchange',
]
import types
import logging
import base64
import calendar
import collections
import datetime
from email.utils import parsedate
import functools
import gzip
import hashlib
import hmac
import io
import json
import math
import random
from numbers import Number
import re
from requests import Session
from requests.utils import default_user_agent
from requests.exceptions import HTTPError, Timeout, TooManyRedirects, RequestException, ConnectionError as requestsConnectionError
from ssl import SSLError
import time
import uuid
import zlib
from decimal import Decimal
from time import mktime
from wsgiref.handlers import format_date_time
import urllib.parse as _urlencode
class Exchange(object):
id = None
name = None
version = None
certified = False
pro = False
alias = False
enableRateLimit = True
rateLimit = 2000
timeout = 10000
asyncio_loop = None
aiohttp_proxy = None
aiohttp_trust_env = False
session = None
verify = True
validateServerSsl = True
validateClientSsl = False
logger = None
userAgent = None
userAgents = {
'chrome': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3202.94 Safari/537.36',
'chrome39': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.71 Safari/537.36',
'chrome100': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/100.0.4896.75 Safari/537.36',
}
verbose = False
markets = None
symbols = None
codes = None
timeframes = None
fees = {
'trading': {
'percentage': True,
},
'funding': {
'withdraw': {},
'deposit': {},
},
}
loaded_fees = {
'trading': {
'percentage': True,
},
'funding': {
'withdraw': {},
'deposit': {},
},
}
ids = None
urls = None
api = None
parseJsonResponse = True
proxy = ''
origin = '*'
proxies = None
hostname = None
apiKey = ''
secret = ''
password = ''
uid = ''
privateKey = ''
walletAddress = ''
token = ''
twofa = None
markets_by_id = None
currencies_by_id = None
precision = None
exceptions = None
limits = {
'leverage': {
'min': None,
'max': None,
},
'amount': {
'min': None,
'max': None,
},
'price': {
'min': None,
'max': None,
},
'cost': {
'min': None,
'max': None,
},
}
httpExceptions = {
'422': ExchangeError,
'418': DDoSProtection,
'429': RateLimitExceeded,
'404': ExchangeNotAvailable,
'409': ExchangeNotAvailable,
'410': ExchangeNotAvailable,
'500': ExchangeNotAvailable,
'501': ExchangeNotAvailable,
'502': ExchangeNotAvailable,
'520': ExchangeNotAvailable,
'521': ExchangeNotAvailable,
'522': ExchangeNotAvailable,
'525': ExchangeNotAvailable,
'526': ExchangeNotAvailable,
'400': ExchangeNotAvailable,
'403': ExchangeNotAvailable,
'405': ExchangeNotAvailable,
'503': ExchangeNotAvailable,
'530': ExchangeNotAvailable,
'408': RequestTimeout,
'504': RequestTimeout,
'401': AuthenticationError,
'511': AuthenticationError,
}
headers = None
balance = None
orderbooks = None
orders = None
myTrades = None
trades = None
transactions = None
ohlcvs = None
tickers = None
base_currencies = None
quote_currencies = None
currencies = None
options = None
accounts = None
positions = None
status = {
'status': 'ok',
'updated': None,
'eta': None,
'url': None,
}
requiredCredentials = {
'apiKey': True,
'secret': True,
'uid': False,
'login': False,
'password': False,
'twofa': False,
'privateKey': False,
'walletAddress': False,
'token': False,
}
has = {
'publicAPI': True,
'privateAPI': True,
'CORS': None,
'spot': None,
'margin': None,
'swap': None,
'future': None,
'option': None,
'addMargin': None,
'cancelAllOrders': None,
'cancelOrder': True,
'cancelOrders': None,
'createDepositAddress': None,
'createLimitOrder': True,
'createMarketOrder': True,
'createOrder': True,
'createPostOnlyOrder': None,
'createStopOrder': None,
'createStopLimitOrder': None,
'createStopMarketOrder': None,
'editOrder': 'emulated',
'fetchAccounts': None,
'fetchBalance': True,
'fetchBidsAsks': None,
'fetchBorrowInterest': None,
'fetchBorrowRate': None,
'fetchBorrowRateHistory': None,
'fetchBorrowRatesPerSymbol': None,
'fetchBorrowRates': None,
'fetchCanceledOrders': None,
'fetchClosedOrder': None,
'fetchClosedOrders': None,
'fetchCurrencies': 'emulated',
'fetchDeposit': None,
'fetchDepositAddress': None,
'fetchDepositAddresses': None,
'fetchDepositAddressesByNetwork': None,
'fetchDeposits': None,
'fetchFundingFee': None,
'fetchFundingFees': None,
'fetchFundingHistory': None,
'fetchFundingRate': None,
'fetchFundingRateHistory': None,
'fetchFundingRates': None,
'fetchIndexOHLCV': None,
'fetchL2OrderBook': True,
'fetchLedger': None,
'fetchLedgerEntry': None,
'fetchLeverageTiers': None,
'fetchMarketLeverageTiers': None,
'fetchMarkets': True,
'fetchMarkOHLCV': None,
'fetchMyTrades': None,
'fetchOHLCV': 'emulated',
'fetchOpenOrder': None,
'fetchOpenOrders': None,
'fetchOrder': None,
'fetchOrderBook': True,
'fetchOrderBooks': None,
'fetchOrders': None,
'fetchOrderTrades': None,
'fetchPermissions': None,
'fetchPosition': None,
'fetchPositions': None,
'fetchPositionsRisk': None,
'fetchPremiumIndexOHLCV': None,
'fetchStatus': 'emulated',
'fetchTicker': True,
'fetchTickers': None,
'fetchTime': None,
'fetchTrades': True,
'fetchTradingFee': None,
'fetchTradingFees': None,
'fetchTradingLimits': None,
'fetchTransactions': None,
'fetchTransfers': None,
'fetchWithdrawal': None,
'fetchWithdrawals': None,
'loadMarkets': True,
'reduceMargin': None,
'setLeverage': None,
'setMarginMode': None,
'setPositionMode': None,
'signIn': None,
'transfer': None,
'withdraw': None,
}
precisionMode = DECIMAL_PLACES
paddingMode = NO_PADDING
minFundingAddressLength = 1
substituteCommonCurrencyCodes = True
quoteJsonNumbers = True
number = float
handleContentTypeApplicationZip = False
reduceFees = True
lastRestRequestTimestamp = 0
lastRestPollTimestamp = 0
restRequestQueue = None
restPollerLoopIsRunning = False
rateLimitTokens = 16
rateLimitMaxTokens = 16
rateLimitUpdateTime = 0
enableLastHttpResponse = True
enableLastJsonResponse = True
enableLastResponseHeaders = True
last_http_response = None
last_json_response = None
last_response_headers = None
requiresEddsa = False
base58_encoder = None
base58_decoder = None
base58_alphabet = '123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz'
commonCurrencies = {
'XBT': 'BTC',
'BCC': 'BCH',
'DRK': 'DASH',
'BCHABC': 'BCH',
'BCHSV': 'BSV',
}
synchronous = True
def __init__(self, config={}):
self.precision = dict() if self.precision is None else self.precision
self.limits = dict() if self.limits is None else self.limits
self.exceptions = dict() if self.exceptions is None else self.exceptions
self.headers = dict() if self.headers is None else self.headers
self.balance = dict() if self.balance is None else self.balance
self.orderbooks = dict() if self.orderbooks is None else self.orderbooks
self.tickers = dict() if self.tickers is None else self.tickers
self.trades = dict() if self.trades is None else self.trades
self.transactions = dict() if self.transactions is None else self.transactions
self.positions = dict() if self.positions is None else self.positions
self.ohlcvs = dict() if self.ohlcvs is None else self.ohlcvs
self.currencies = dict() if self.currencies is None else self.currencies
self.options = dict() if self.options is None else self.options
self.decimal_to_precision = decimal_to_precision
self.number_to_string = number_to_string
self.origin = self.uuid()
self.userAgent = default_user_agent()
settings = self.deep_extend(self.describe(), config)
for key in settings:
if hasattr(self, key) and isinstance(getattr(self, key), dict):
setattr(self, key, self.deep_extend(getattr(self, key), settings[key]))
else:
setattr(self, key, settings[key])
if self.api:
self.define_rest_api(self.api, 'request')
if self.markets:
self.set_markets(self.markets)
cls = type(self)
for name in dir(self):
if name[0] != '_' and name[-1] != '_' and '_' in name:
parts = name.split('_')
exceptions = {'ohlcv': 'OHLCV', 'le': 'LE', 'be': 'BE'}
camelcase = parts[0] + ''.join(exceptions.get(i, self.capitalize(i)) for i in parts[1:])
attr = getattr(self, name)
if isinstance(attr, types.MethodType):
setattr(cls, camelcase, getattr(cls, name))
else:
setattr(self, camelcase, attr)
self.tokenBucket = self.extend({
'refillRate': 1.0 / self.rateLimit if self.rateLimit > 0 else float('inf'),
'delay': 0.001,
'capacity': 1.0,
'defaultCost': 1.0,
}, getattr(self, 'tokenBucket', {}))
self.session = self.session if self.session or not self.synchronous else Session()
self.logger = self.logger if self.logger else logging.getLogger(__name__)
def __del__(self):
if self.session:
try:
self.session.close()
except Exception as e:
pass
def __repr__(self):
return 'ccxt.' + ('async_support.' if self.asyncio_loop else '') + self.id + '()'
def __str__(self):
return self.name
def describe(self):
return {}
def set_sandbox_mode(self, enabled):
if enabled:
if 'test' in self.urls:
self.urls['apiBackup'] = self.urls['api']
self.urls['api'] = self.urls['test']
else:
raise NotSupported(self.id + ' does not have a sandbox URL')
elif 'apiBackup' in self.urls:
self.urls['api'] = self.urls['apiBackup']
del self.urls['apiBackup']
def define_rest_api_endpoint(self, method_name, uppercase_method, lowercase_method, camelcase_method, path, paths, config={}):
cls = type(self)
entry = getattr(cls, method_name)
delimiters = re.compile('[^a-zA-Z0-9]')
split_path = delimiters.split(path)
lowercase_path = [x.strip().lower() for x in split_path]
camelcase_suffix = ''.join([Exchange.capitalize(x) for x in split_path])
underscore_suffix = '_'.join([x for x in lowercase_path if len(x)])
camelcase_prefix = ''
underscore_prefix = ''
if len(paths):
camelcase_prefix = paths[0]
underscore_prefix = paths[0]
if len(paths) > 1:
camelcase_prefix += ''.join([Exchange.capitalize(x) for x in paths[1:]])
underscore_prefix += '_' + '_'.join([x.strip() for p in paths[1:] for x in delimiters.split(p)])
api_argument = paths
else:
api_argument = paths[0]
camelcase = camelcase_prefix + camelcase_method + Exchange.capitalize(camelcase_suffix)
underscore = underscore_prefix + '_' + lowercase_method + '_' + underscore_suffix.lower()
def partialer():
outer_kwargs = {'path': path, 'api': api_argument, 'method': uppercase_method, 'config': config}
@functools.wraps(entry)
def inner(_self, params=None, context=None):
inner_kwargs = dict(outer_kwargs)
if params is not None:
inner_kwargs['params'] = params
if context is not None:
inner_kwargs['context'] = params
return entry(_self, **inner_kwargs)
return inner
to_bind = partialer()
setattr(cls, camelcase, to_bind)
setattr(cls, underscore, to_bind)
def define_rest_api(self, api, method_name, paths=[]):
for key, value in api.items():
uppercase_method = key.upper()
lowercase_method = key.lower()
camelcase_method = lowercase_method.capitalize()
if isinstance(value, list):
for path in value:
self.define_rest_api_endpoint(method_name, uppercase_method, lowercase_method, camelcase_method, path, paths)
elif re.search(r'^(?:get|post|put|delete|head|patch)$', key, re.IGNORECASE) is not None:
for [endpoint, config] in value.items():
path = endpoint.strip()
if isinstance(config, dict):
self.define_rest_api_endpoint(method_name, uppercase_method, lowercase_method, camelcase_method, path, paths, config)
elif isinstance(config, Number):
self.define_rest_api_endpoint(method_name, uppercase_method, lowercase_method, camelcase_method, path, paths, {'cost': config})
else:
raise NotSupported(self.id + ' define_rest_api() API format not supported, API leafs must strings, objects or numbers')
else:
self.define_rest_api(value, method_name, paths + [key])
def throttle(self, cost=None):
now = float(self.milliseconds())
elapsed = now - self.lastRestRequestTimestamp
cost = 1 if cost is None else cost
sleep_time = self.rateLimit * cost
if elapsed < sleep_time:
delay = sleep_time - elapsed
time.sleep(delay / 1000.0)
def calculate_rate_limiter_cost(self, api, method, path, params, config={}, context={}):
return self.safe_value(config, 'cost', 1)
def fetch2(self, path, api='public', method='GET', params={}, headers=None, body=None, config={}, context={}):
if self.enableRateLimit:
cost = self.calculate_rate_limiter_cost(api, method, path, params, config, context)
self.throttle(cost)
self.lastRestRequestTimestamp = self.milliseconds()
request = self.sign(path, api, method, params, headers, body)
return self.fetch(request['url'], request['method'], request['headers'], request['body'])
def request(self, path, api='public', method='GET', params={}, headers=None, body=None, config={}, context={}):
return self.fetch2(path, api, method, params, headers, body, config, context)
@staticmethod
def gzip_deflate(response, text):
encoding = response.info().get('Content-Encoding')
if encoding in ('gzip', 'x-gzip', 'deflate'):
if encoding == 'deflate':
return zlib.decompress(text, -zlib.MAX_WBITS)
else:
return gzip.GzipFile('', 'rb', 9, io.BytesIO(text)).read()
return text
def throw_exactly_matched_exception(self, exact, string, message):
if string in exact:
raise exact[string](message)
def throw_broadly_matched_exception(self, broad, string, message):
broad_key = self.find_broadly_matched_key(broad, string)
if broad_key is not None:
raise broad[broad_key](message)
def find_broadly_matched_key(self, broad, string):
keys = list(broad.keys())
for i in range(0, len(keys)):
key = keys[i]
if string.find(key) >= 0:
return key
return None
def prepare_request_headers(self, headers=None):
headers = headers or {}
if self.session:
headers.update(self.session.headers)
headers.update(self.headers)
if self.userAgent:
if type(self.userAgent) is str:
headers.update({'User-Agent': self.userAgent})
elif (type(self.userAgent) is dict) and ('User-Agent' in self.userAgent):
headers.update(self.userAgent)
if self.proxy:
headers.update({'Origin': self.origin})
headers.update({'Accept-Encoding': 'gzip, deflate'})
return self.set_headers(headers)
def log(self, *args):
print(*args)
def set_headers(self, headers):
return headers
def handle_errors(self, code, reason, url, method, headers, body, response, request_headers, request_body):
pass
def on_rest_response(self, code, reason, url, method, response_headers, response_body, request_headers, request_body):
return response_body.strip()
def on_json_response(self, response_body):
if self.quoteJsonNumbers:
return json.loads(response_body, parse_float=str, parse_int=str)
else:
return json.loads(response_body)
def fetch(self, url, method='GET', headers=None, body=None):
request_headers = self.prepare_request_headers(headers)
url = self.proxy + url
if self.verbose:
self.log("\nfetch Request:", self.id, method, url, "RequestHeaders:", request_headers, "RequestBody:", body)
self.logger.debug("%s %s, Request: %s %s", method, url, request_headers, body)
request_body = body
if body:
body = body.encode()
self.session.cookies.clear()
http_response = None
http_status_code = None
http_status_text = None
json_response = None
try:
response = self.session.request(
method,
url,
data=body,
headers=request_headers,
timeout=int(self.timeout / 1000),
proxies=self.proxies,
verify=self.verify and self.validateServerSsl
)
response.encoding = 'utf-8'
headers = response.headers
http_status_code = response.status_code
http_status_text = response.reason
http_response = self.on_rest_response(http_status_code, http_status_text, url, method, headers, response.text, request_headers, request_body)
json_response = self.parse_json(http_response)
if self.enableLastHttpResponse:
self.last_http_response = http_response
if self.enableLastJsonResponse:
self.last_json_response = json_response
if self.enableLastResponseHeaders:
self.last_response_headers = headers
if self.verbose:
self.log("\nfetch Response:", self.id, method, url, http_status_code, "ResponseHeaders:", headers, "ResponseBody:", http_response)
self.logger.debug("%s %s, Response: %s %s %s", method, url, http_status_code, headers, http_response)
response.raise_for_status()
except Timeout as e:
details = ' '.join([self.id, method, url])
raise RequestTimeout(details) from e
except TooManyRedirects as e:
details = ' '.join([self.id, method, url])
raise ExchangeError(details) from e
except SSLError as e:
details = ' '.join([self.id, method, url])
raise ExchangeError(details) from e
except HTTPError as e:
details = ' '.join([self.id, method, url])
skip_further_error_handling = self.handle_errors(http_status_code, http_status_text, url, method, headers, http_response, json_response, request_headers, request_body)
if not skip_further_error_handling:
self.handle_http_status_code(http_status_code, http_status_text, url, method, http_response)
raise ExchangeError(details) from e
except requestsConnectionError as e:
error_string = str(e)
details = ' '.join([self.id, method, url])
if 'Read timed out' in error_string:
raise RequestTimeout(details) from e
else:
raise NetworkError(details) from e
except ConnectionResetError as e:
error_string = str(e)
details = ' '.join([self.id, method, url])
raise NetworkError(details) from e
except RequestException as e:
error_string = str(e)
details = ' '.join([self.id, method, url])
if any(x in error_string for x in ['ECONNRESET', 'Connection aborted.', 'Connection broken:']):
raise NetworkError(details) from e
else:
raise ExchangeError(details) from e
self.handle_errors(http_status_code, http_status_text, url, method, headers, http_response, json_response, request_headers, request_body)
if json_response is not None:
return json_response
elif self.is_text_response(headers):
return http_response
else:
return response.content
def handle_http_status_code(self, http_status_code, http_status_text, url, method, body):
string_code = str(http_status_code)
if string_code in self.httpExceptions:
Exception = self.httpExceptions[string_code]
raise Exception(' '.join([self.id, method, url, string_code, http_status_text, body]))
def parse_json(self, http_response):
try:
if Exchange.is_json_encoded_object(http_response):
return self.on_json_response(http_response)
except ValueError:
pass
def is_text_response(self, headers):
content_type = headers.get('Content-Type', '')
return content_type.startswith('application/json') or content_type.startswith('text/')
@staticmethod
def key_exists(dictionary, key):
if dictionary is None or key is None:
return False
if isinstance(dictionary, list):
if isinstance(key, int) and 0 <= key and key < len(dictionary):
return dictionary[key] is not None
else:
return False
if key in dictionary:
return dictionary[key] is not None
return False
@staticmethod
def safe_float(dictionary, key, default_value=None):
value = default_value
try:
if Exchange.key_exists(dictionary, key):
value = float(dictionary[key])
except ValueError as e:
value = default_value
return value
@staticmethod
def safe_string(dictionary, key, default_value=None):
return str(dictionary[key]) if Exchange.key_exists(dictionary, key) else default_value
@staticmethod
def safe_string_lower(dictionary, key, default_value=None):
return str(dictionary[key]).lower() if Exchange.key_exists(dictionary, key) else default_value
@staticmethod
def safe_string_upper(dictionary, key, default_value=None):
return str(dictionary[key]).upper() if Exchange.key_exists(dictionary, key) else default_value
@staticmethod
def safe_integer(dictionary, key, default_value=None):
if not Exchange.key_exists(dictionary, key):
return default_value
value = dictionary[key]
try:
return int(float(value))
except ValueError:
return default_value
except TypeError:
return default_value
@staticmethod
def safe_integer_product(dictionary, key, factor, default_value=None):
if not Exchange.key_exists(dictionary, key):
return default_value
value = dictionary[key]
if isinstance(value, Number):
return int(value * factor)
elif isinstance(value, str):
try:
return int(float(value) * factor)
except ValueError:
pass
return default_value
@staticmethod
def safe_timestamp(dictionary, key, default_value=None):
return Exchange.safe_integer_product(dictionary, key, 1000, default_value)
@staticmethod
def safe_value(dictionary, key, default_value=None):
return dictionary[key] if Exchange.key_exists(dictionary, key) else default_value
@staticmethod
def safe_float_2(dictionary, key1, key2, default_value=None):
return Exchange.safe_either(Exchange.safe_float, dictionary, key1, key2, default_value)
@staticmethod
def safe_string_2(dictionary, key1, key2, default_value=None):
return Exchange.safe_either(Exchange.safe_string, dictionary, key1, key2, default_value)
@staticmethod
def safe_string_lower_2(dictionary, key1, key2, default_value=None):
return Exchange.safe_either(Exchange.safe_string_lower, dictionary, key1, key2, default_value)
@staticmethod
def safe_string_upper_2(dictionary, key1, key2, default_value=None):
return Exchange.safe_either(Exchange.safe_string_upper, dictionary, key1, key2, default_value)
@staticmethod
def safe_integer_2(dictionary, key1, key2, default_value=None):
return Exchange.safe_either(Exchange.safe_integer, dictionary, key1, key2, default_value)
@staticmethod
def safe_integer_product_2(dictionary, key1, key2, factor, default_value=None):
value = Exchange.safe_integer_product(dictionary, key1, factor)
return value if value is not None else Exchange.safe_integer_product(dictionary, key2, factor, default_value)
@staticmethod
def safe_timestamp_2(dictionary, key1, key2, default_value=None):
return Exchange.safe_integer_product_2(dictionary, key1, key2, 1000, default_value)
@staticmethod
def safe_value_2(dictionary, key1, key2, default_value=None):
return Exchange.safe_either(Exchange.safe_value, dictionary, key1, key2, default_value)
@staticmethod
def safe_either(method, dictionary, key1, key2, default_value=None):
value = method(dictionary, key1)
return value if value is not None else method(dictionary, key2, default_value)
@staticmethod
def truncate(num, precision=0):
if precision > 0:
decimal_precision = math.pow(10, precision)
return math.trunc(num * decimal_precision) / decimal_precision
return int(Exchange.truncate_to_string(num, precision))
@staticmethod
def truncate_to_string(num, precision=0):
if precision > 0:
parts = ('{0:.%df}' % precision).format(Decimal(num)).split('.')
decimal_digits = parts[1][:precision].rstrip('0')
decimal_digits = decimal_digits if len(decimal_digits) else '0'
return parts[0] + '.' + decimal_digits
return ('%d' % num)
@staticmethod
def uuid22(length=22):
return format(random.getrandbits(length * 4), 'x')
@staticmethod
def uuid16(length=16):
return format(random.getrandbits(length * 4), 'x')
@staticmethod
def uuid():
return str(uuid.uuid4())
@staticmethod
def uuidv1():
return str(uuid.uuid1()).replace('-', '')
@staticmethod
def capitalize(string): # first character only, rest characters unchanged
# the native pythonic .capitalize() method lowercases all other characters
# which is an unwanted behaviour, therefore we use this custom implementation
# check it yourself: print('foobar'.capitalize(), 'fooBar'.capitalize())
if len(string) > 1:
return "%s%s" % (string[0].upper(), string[1:])
return string.upper()
@staticmethod
def strip(string):
return string.strip()
@staticmethod
def keysort(dictionary):
return collections.OrderedDict(sorted(dictionary.items(), key=lambda t: t[0]))
@staticmethod
def extend(*args):
if args is not None:
result = None
if type(args[0]) is collections.OrderedDict:
result = collections.OrderedDict()
else:
result = {}
for arg in args:
result.update(arg)
return result
return {}
@staticmethod
def merge(*args):
if args is not None:
result = None
if type(args[0]) is collections.OrderedDict:
result = collections.OrderedDict()
else:
result = {}
for arg in args:
# -- diff --
for key in arg:
if result.get(key) is None:
result[key] = arg[key]
# -- enddiff --
return result
return {}
@staticmethod
def deep_extend(*args):
result = None
for arg in args:
if isinstance(arg, dict):
if not isinstance(result, dict):
result = {}
for key in arg:
result[key] = Exchange.deep_extend(result[key] if key in result else None, arg[key])
else:
result = arg
return result
@staticmethod
def filter_by(array, key, value=None):
array = Exchange.to_array(array)
return list(filter(lambda x: x[key] == value, array))
@staticmethod
def filterBy(array, key, value=None):
return Exchange.filter_by(array, key, value)
@staticmethod
def group_by(array, key):
result = {}
array = Exchange.to_array(array)
array = [entry for entry in array if (key in entry) and (entry[key] is not None)]
for entry in array:
if entry[key] not in result:
result[entry[key]] = []
result[entry[key]].append(entry)
return result
@staticmethod
def groupBy(array, key):
return Exchange.group_by(array, key)
@staticmethod
def index_by(array, key):
result = {}
if type(array) is dict:
array = Exchange.keysort(array).values()
is_int_key = isinstance(key, int)
for element in array:
if ((is_int_key and (key < len(element))) or (key in element)) and (element[key] is not None):
k = element[key]
result[k] = element
return result
@staticmethod
def sort_by(array, key, descending=False):
return sorted(array, key=lambda k: k[key] if k[key] is not None else "", reverse=descending)
@staticmethod
def sort_by_2(array, key1, key2, descending=False):
return sorted(array, key=lambda k: (k[key1] if k[key1] is not None else "", k[key2] if k[key2] is not None else ""), reverse=descending)
@staticmethod
def array_concat(a, b):
return a + b
@staticmethod
def in_array(needle, haystack):
return needle in haystack
@staticmethod
def is_empty(object):
return not object
@staticmethod
def extract_params(string):
return re.findall(r'{([\w-]+)}', string)
@staticmethod
def implode_params(string, params):
if isinstance(params, dict):
for key in params:
if not isinstance(params[key], list):
string = string.replace('{' + key + '}', str(params[key]))
return string
def implode_hostname(self, url):
return Exchange.implode_params(url, {'hostname': self.hostname})
def resolve_path(self, path, params):
return [
self.implode_params(path, params),
self.omit(params, self.extract_params(path))
]
@staticmethod
def urlencode(params={}, doseq=False):
for key, value in params.items():
if isinstance(value, bool):
params[key] = 'true' if value else 'false'
return _urlencode.urlencode(params, doseq)
@staticmethod
def urlencode_with_array_repeat(params={}):
return re.sub(r'%5B\d*%5D', '', Exchange.urlencode(params, True))
@staticmethod
def urlencode_nested(params):
result = {}
def _encode_params(params, p_key=None):
encode_params = {}
if isinstance(params, dict):
for key in params:
encode_key = '{}[{}]'.format(p_key, key)
encode_params[encode_key] = params[key]
elif isinstance(params, (list, tuple)):
for offset, value in enumerate(params):
encode_key = '{}[{}]'.format(p_key, offset)
encode_params[encode_key] = value
else:
result[p_key] = params
for key in encode_params:
value = encode_params[key]
_encode_params(value, key)
if isinstance(params, dict):
for key in params:
_encode_params(params[key], key)
return _urlencode.urlencode(result)
@staticmethod
def rawencode(params={}):
return _urlencode.unquote(Exchange.urlencode(params))
@staticmethod
def encode_uri_component(uri, safe="~()*!.'"):
return _urlencode.quote(uri, safe=safe)
@staticmethod
def omit(d, *args):
if isinstance(d, dict):
result = d.copy()
for arg in args:
if type(arg) is list:
for key in arg:
if key in result:
del result[key]
else:
if arg in result:
del result[arg]
return result
return d
@staticmethod
def unique(array):
return list(set(array))
@staticmethod
def pluck(array, key):
return [
element[key]
for element in array
if (key in element) and (element[key] is not None)
]
@staticmethod
def sum(*args):
return sum([arg for arg in args if isinstance(arg, (float, int))])
@staticmethod
def ordered(array):
return collections.OrderedDict(array)
@staticmethod
def aggregate(bidasks):
ordered = Exchange.ordered({})
for [price, volume, *_] in bidasks:
if volume > 0:
ordered[price] = (ordered[price] if price in ordered else 0) + volume
result = []
items = list(ordered.items())
for price, volume in items:
result.append([price, volume])
return result
@staticmethod
def sec():
return Exchange.seconds()
@staticmethod
def msec():
return Exchange.milliseconds()
@staticmethod
def usec():
return Exchange.microseconds()
@staticmethod
def seconds():
return int(time.time())
@staticmethod
def milliseconds():
return int(time.time() * 1000)
@staticmethod
def microseconds():
return int(time.time() * 1000000)
@staticmethod
def iso8601(timestamp=None):
if timestamp is None:
return timestamp
if not isinstance(timestamp, int):
return None
if int(timestamp) < 0:
return None
try:
utc = datetime.datetime.utcfromtimestamp(timestamp // 1000)
return utc.strftime('%Y-%m-%dT%H:%M:%S.%f')[:-6] + "{:03d}".format(int(timestamp) % 1000) + 'Z'
except (TypeError, OverflowError, OSError):
return None
@staticmethod
def rfc2616(self, timestamp=None):
if timestamp is None:
ts = datetime.datetime.now()
else:
ts = timestamp
stamp = mktime(ts.timetuple())
return format_date_time(stamp)
@staticmethod
def dmy(timestamp, infix='-'):
utc_datetime = datetime.datetime.utcfromtimestamp(int(round(timestamp / 1000)))
return utc_datetime.strftime('%m' + infix + '%d' + infix + '%Y')
@staticmethod
def ymd(timestamp, infix='-', fullYear=True):
year_format = '%Y' if fullYear else '%y'
utc_datetime = datetime.datetime.utcfromtimestamp(int(round(timestamp / 1000)))
return utc_datetime.strftime(year_format + infix + '%m' + infix + '%d')
@staticmethod
def yymmdd(timestamp, infix=''):
return Exchange.ymd(timestamp, infix, False)
@staticmethod
def yyyymmdd(timestamp, infix='-'):
return Exchange.ymd(timestamp, infix, True)
@staticmethod
def ymdhms(timestamp, infix=' '):
utc_datetime = datetime.datetime.utcfromtimestamp(int(round(timestamp / 1000)))
return utc_datetime.strftime('%Y-%m-%d' + infix + '%H:%M:%S')
@staticmethod
def parse_date(timestamp=None):
if timestamp is None:
return timestamp
if not isinstance(timestamp, str):
return None
if 'GMT' in timestamp:
try:
string = ''.join([str(value) for value in parsedate(timestamp)[:6]]) + '.000Z'
dt = datetime.datetime.strptime(string, "%Y%m%d%H%M%S.%fZ")
return calendar.timegm(dt.utctimetuple()) * 1000
except (TypeError, OverflowError, OSError):
return None
else:
return Exchange.parse8601(timestamp)
@staticmethod
def parse8601(timestamp=None):
if timestamp is None:
return timestamp
yyyy = '([0-9]{4})-?'
mm = '([0-9]{2})-?'
dd = '([0-9]{2})(?:T|[\\s])?'
h = '([0-9]{2}):?'
m = '([0-9]{2}):?'
s = '([0-9]{2})'
ms = '(\\.[0-9]{1,3})?'
tz = '(?:(\\+|\\-)([0-9]{2})\\:?([0-9]{2})|Z)?'
regex = r'' + yyyy + mm + dd + h + m + s + ms + tz
try:
match = re.search(regex, timestamp, re.IGNORECASE)
if match is None:
return None
yyyy, mm, dd, h, m, s, ms, sign, hours, minutes = match.groups()
ms = ms or '.000'
ms = (ms + '00')[0:4]
msint = int(ms[1:])
sign = sign or ''
sign = int(sign + '1') * -1
hours = int(hours or 0) * sign
minutes = int(minutes or 0) * sign
offset = datetime.timedelta(hours=hours, minutes=minutes)
string = yyyy + mm + dd + h + m + s + ms + 'Z'
dt = datetime.datetime.strptime(string, "%Y%m%d%H%M%S.%fZ")
dt = dt + offset
return calendar.timegm(dt.utctimetuple()) * 1000 + msint
except (TypeError, OverflowError, OSError, ValueError):
return None
@staticmethod
def hash(request, algorithm='md5', digest='hex'):
if algorithm == 'keccak':
binary = bytes(keccak.SHA3(request))
else:
h = hashlib.new(algorithm, request)
binary = h.digest()
if digest == 'base64':
return Exchange.binary_to_base64(binary)
elif digest == 'hex':
return Exchange.binary_to_base16(binary)
return binary
@staticmethod
def hmac(request, secret, algorithm=hashlib.sha256, digest='hex'):
h = hmac.new(secret, request, algorithm)
binary = h.digest()
if digest == 'hex':
return Exchange.binary_to_base16(binary)
elif digest == 'base64':
return Exchange.binary_to_base64(binary)
return binary
@staticmethod
def binary_concat(*args):
result = bytes()
for arg in args:
result = result + arg
return result
@staticmethod
def binary_concat_array(array):
result = bytes()
for element in array:
result = result + element
return result
@staticmethod
def base64urlencode(s):
return Exchange.decode(base64.urlsafe_b64encode(s)).replace('=', '')
@staticmethod
def binary_to_base64(s):
return Exchange.decode(base64.standard_b64encode(s))
@staticmethod
def base64_to_binary(s):
return base64.standard_b64decode(s)
@staticmethod
def string_to_base64(s):
binary = Exchange.encode(s) if isinstance(s, str) else s
return Exchange.encode(Exchange.binary_to_base64(binary))
@staticmethod
def base64_to_string(s):
return base64.b64decode(s).decode('utf-8')
@staticmethod
def jwt(request, secret, alg='HS256'):
algos = {
'HS256': hashlib.sha256,
'HS384': hashlib.sha384,
'HS512': hashlib.sha512,
}
header = Exchange.encode(Exchange.json({
'alg': alg,
'typ': 'JWT',
}))
encoded_header = Exchange.base64urlencode(header)
encoded_data = Exchange.base64urlencode(Exchange.encode(Exchange.json(request)))
token = encoded_header + '.' + encoded_data
if alg[:2] == 'RS':
signature = Exchange.rsa(token, secret, alg)
else:
algorithm = algos[alg]
signature = Exchange.hmac(Exchange.encode(token), secret, algorithm, 'binary')
return token + '.' + Exchange.base64urlencode(signature)
@staticmethod
def rsa(request, secret, alg='RS256'):
algorithms = {
"RS256": hashes.SHA256(),
"RS384": hashes.SHA384(),
"RS512": hashes.SHA512(),
}
algorithm = algorithms[alg]
priv_key = load_pem_private_key(secret, None, backends.default_backend())
return priv_key.sign(Exchange.encode(request), padding.PKCS1v15(), algorithm)
@staticmethod
def ecdsa(request, secret, algorithm='p256', hash=None, fixed_length=False):
algorithms = {
'p192': [ecdsa.NIST192p, 'sha256'],
'p224': [ecdsa.NIST224p, 'sha256'],
'p256': [ecdsa.NIST256p, 'sha256'],
'p384': [ecdsa.NIST384p, 'sha384'],
'p521': [ecdsa.NIST521p, 'sha512'],
'secp256k1': [ecdsa.SECP256k1, 'sha256'],
}
if algorithm not in algorithms:
raise ArgumentsRequired(algorithm + ' is not a supported algorithm')
curve_info = algorithms[algorithm]
hash_function = getattr(hashlib, curve_info[1])
encoded_request = Exchange.encode(request)
if hash is not None:
digest = Exchange.hash(encoded_request, hash, 'binary')
else:
digest = base64.b16decode(encoded_request, casefold=True)
key = ecdsa.SigningKey.from_string(base64.b16decode(Exchange.encode(secret),
casefold=True), curve=curve_info[0])
r_binary, s_binary, v = key.sign_digest_deterministic(digest, hashfunc=hash_function,
sigencode=ecdsa.util.sigencode_strings_canonize)
r_int, s_int = ecdsa.util.sigdecode_strings((r_binary, s_binary), key.privkey.order)
counter = 0
minimum_size = (1 << (8 * 31)) - 1
half_order = key.privkey.order / 2
while fixed_length and (r_int > half_order or r_int <= minimum_size or s_int <= minimum_size):
r_binary, s_binary, v = key.sign_digest_deterministic(digest, hashfunc=hash_function,
sigencode=ecdsa.util.sigencode_strings_canonize,
extra_entropy=Exchange.number_to_le(counter, 32))
r_int, s_int = ecdsa.util.sigdecode_strings((r_binary, s_binary), key.privkey.order)
counter += 1
r, s = Exchange.decode(base64.b16encode(r_binary)).lower(), Exchange.decode(base64.b16encode(s_binary)).lower()
return {
'r': r,
's': s,
'v': v,
}
@staticmethod
def eddsa(request, secret, curve='ed25519'):
random = b'\x00' * 64
request = base64.b16decode(request, casefold=True)
secret = base64.b16decode(secret, casefold=True)
signature = eddsa.calculateSignature(random, secret, request)
return Exchange.binary_to_base58(signature)
@staticmethod
def json(data, params=None):
return json.dumps(data, separators=(',', ':'))
@staticmethod
def is_json_encoded_object(input):
return (isinstance(input, str) and
(len(input) >= 2) and
((input[0] == '{') or (input[0] == '[')))
@staticmethod
def encode(string):
return string.encode('latin-1')
@staticmethod
def decode(string):
return string.decode('latin-1')
@staticmethod
def to_array(value):
return list(value.values()) if type(value) is dict else value
def nonce(self):
return Exchange.seconds()
@staticmethod
def check_required_version(required_version, error=True):
result = True
[major1, minor1, patch1] = required_version.split('.')
[major2, minor2, patch2] = __version__.split('.')
int_major1 = int(major1)
int_minor1 = int(minor1)
int_patch1 = int(patch1)
int_major2 = int(major2)
int_minor2 = int(minor2)
int_patch2 = int(patch2)
if int_major1 > int_major2:
result = False
if int_major1 == int_major2:
if int_minor1 > int_minor2:
result = False
elif int_minor1 == int_minor2 and int_patch1 > int_patch2:
result = False
if not result:
if error:
raise NotSupported('Your current version of CCXT is ' + __version__ + ', a newer version ' + required_version + ' is required, please, upgrade your version of CCXT')
else:
return error
return result
def check_required_credentials(self, error=True):
keys = list(self.requiredCredentials.keys())
for key in keys:
if self.requiredCredentials[key] and not getattr(self, key):
if error:
raise AuthenticationError(self.id + ' requires `' + key + '`')
else:
return error
return True
def check_address(self, address):
if address is None:
raise InvalidAddress(self.id + ' address is None')
if all(letter == address[0] for letter in address) or len(address) < self.minFundingAddressLength or ' ' in address:
raise InvalidAddress(self.id + ' address is invalid or has less than ' + str(self.minFundingAddressLength) + ' characters: "' + str(address) + '"')
return address
def account(self):
return {
'free': None,
'used': None,
'total': None,
}
def common_currency_code(self, currency):
if not self.substituteCommonCurrencyCodes:
return currency
return self.safe_string(self.commonCurrencies, currency, currency)
def precision_from_string(self, string):
parts = re.sub(r'0+$', '', string).split('.')
return len(parts[1]) if len(parts) > 1 else 0
def cost_to_precision(self, symbol, cost):
market = self.market(symbol)
return self.decimal_to_precision(cost, TRUNCATE, market['precision']['price'], self.precisionMode, self.paddingMode)
def price_to_precision(self, symbol, price):
market = self.market(symbol)
return self.decimal_to_precision(price, ROUND, market['precision']['price'], self.precisionMode, self.paddingMode)
def amount_to_precision(self, symbol, amount):
market = self.market(symbol)
return self.decimal_to_precision(amount, TRUNCATE, market['precision']['amount'], self.precisionMode, self.paddingMode)
def fee_to_precision(self, symbol, fee):
market = self.market(symbol)
return self.decimal_to_precision(fee, ROUND, market['precision']['price'], self.precisionMode, self.paddingMode)
def currency_to_precision(self, code, fee):
return self.decimal_to_precision(fee, ROUND, self.currencies[code]['precision'], self.precisionMode, self.paddingMode)
def set_markets(self, markets, currencies=None):
values = list(markets.values()) if type(markets) is dict else markets
for i in range(0, len(values)):
values[i] = self.extend(
{
'id': None,
'symbol': None,
'base': None,
'quote': None,
'baseId': None,
'quoteId': None,
'active': None,
'type': None,
'linear': None,
'inverse': None,
'spot': False,
'swap': False,
'future': False,
'option': False,
'margin': False,
'contract': False,
'contractSize': None,
'expiry': None,
'expiryDatetime': None,
'optionType': None,
'strike': None,
'settle': None,
'settleId': None,
'precision': self.precision,
'limits': self.limits,
'info': None,
},
self.fees['trading'],
values[i]
)
self.markets = self.index_by(values, 'symbol')
self.markets_by_id = self.index_by(values, 'id')
self.symbols = sorted(self.markets.keys())
self.ids = sorted(self.markets_by_id.keys())
if currencies:
self.currencies = self.deep_extend(self.currencies, currencies)
else:
base_currencies = [{
'id': market['baseId'] if (('baseId' in market) and (market['baseId'] is not None)) else market['base'],
'numericId': market['baseNumericId'] if 'baseNumericId' in market else None,
'code': market['base'],
'precision': (
market['precision']['base'] if 'base' in market['precision'] else (
market['precision']['amount'] if 'amount' in market['precision'] else None
)
) if 'precision' in market else 8,
} for market in values if 'base' in market]
quote_currencies = [{
'id': market['quoteId'] if (('quoteId' in market) and (market['quoteId'] is not None)) else market['quote'],
'numericId': market['quoteNumericId'] if 'quoteNumericId' in market else None,
'code': market['quote'],
'precision': (
market['precision']['quote'] if 'quote' in market['precision'] else (
market['precision']['price'] if 'price' in market['precision'] else None
)
) if 'precision' in market else 8,
} for market in values if 'quote' in market]
base_currencies = self.sort_by(base_currencies, 'code')
quote_currencies = self.sort_by(quote_currencies, 'code')
self.base_currencies = self.index_by(base_currencies, 'code')
self.quote_currencies = self.index_by(quote_currencies, 'code')
currencies = self.sort_by(base_currencies + quote_currencies, 'code')
self.currencies = self.deep_extend(self.currencies, self.index_by(currencies, 'code'))
self.currencies_by_id = self.index_by(list(self.currencies.values()), 'id')
self.codes = sorted(self.currencies.keys())
return self.markets
def fetch_permissions(self, params={}):
raise NotSupported(self.id + ' fetch_permissions() is not supported yet')
def load_markets(self, reload=False, params={}):
if not reload:
if self.markets:
if not self.markets_by_id:
return self.set_markets(self.markets)
return self.markets
currencies = None
if self.has['fetchCurrencies'] is True:
currencies = self.fetch_currencies()
markets = self.fetch_markets(params)
return self.set_markets(markets, currencies)
def load_accounts(self, reload=False, params={}):
if reload:
self.accounts = self.fetch_accounts(params)
else:
if self.accounts:
return self.accounts
else:
self.accounts = self.fetch_accounts(params)
self.accountsById = self.index_by(self.accounts, 'id')
return self.accounts
def load_fees(self, reload=False):
if not reload:
if self.loaded_fees != Exchange.loaded_fees:
return self.loaded_fees
self.loaded_fees = self.deep_extend(self.loaded_fees, self.fetch_fees())
return self.loaded_fees
def fetch_markets(self, params={}):
return self.to_array(self.markets)
def fetch_currencies(self, params={}):
return self.currencies
def fetch_fees(self):
trading = {}
funding = {}
if self.has['fetchTradingFees']:
trading = self.fetch_trading_fees()
if self.has['fetchFundingFees']:
funding = self.fetch_funding_fees()
return {
'trading': trading,
'funding': funding,
}
def fetch_balance(self, params={}):
raise NotSupported(self.id + ' fetch_balance() is not supported yet')
def create_order(self, symbol, type, side, amount, price=None, params={}):
raise NotSupported(self.id + ' create_order() is not supported yet')
def cancel_order(self, id, symbol=None, params={}):
raise NotSupported(self.id + ' cancel_order() is not supported yet')
def cancel_unified_order(self, order, params={}):
return self.cancel_order(self.safe_value(order, 'id'), self.safe_value(order, 'symbol'), params)
def fetch_bids_asks(self, symbols=None, params={}) -> dict:
raise NotSupported(self.id + ' API does not allow to fetch all prices at once with a single call to fetch_bids_asks() for now')
def fetch_ticker(self, symbol, params={}):
if self.has['fetchTickers']:
tickers = self.fetch_tickers([symbol], params)
ticker = self.safe_value(tickers, symbol)
if ticker is None:
raise BadSymbol(self.id + ' fetchTickers could not find a ticker for ' + symbol)
else:
return ticker
else:
raise NotSupported(self.id + ' fetchTicker not supported yet')
def fetch_tickers(self, symbols=None, params={}):
raise NotSupported(self.id + ' API does not allow to fetch all tickers at once with a single call to fetch_tickers() for now')
def fetch_order_status(self, id, symbol=None, params={}):
order = self.fetch_order(id, symbol, params)
return order['status']
def fetch_order(self, id, symbol=None, params={}):
raise NotSupported(self.id + ' fetch_order() is not supported yet')
def fetch_unified_order(self, order, params={}):
return self.fetch_order(self.safe_value(order, 'id'), self.safe_value(order, 'symbol'), params)
def fetch_orders(self, symbol=None, since=None, limit=None, params={}):
raise NotSupported(self.id + ' fetch_orders() is not supported yet')
def fetch_open_orders(self, symbol=None, since=None, limit=None, params={}):
raise NotSupported(self.id + ' fetch_open_orders() is not supported yet')
def fetch_closed_orders(self, symbol=None, since=None, limit=None, params={}):
raise NotSupported(self.id + ' fetch_closed_orders() is not supported yet')
def fetch_my_trades(self, symbol=None, since=None, limit=None, params={}):
raise NotSupported(self.id + ' fetch_my_trades() is not supported yet')
def fetch_order_trades(self, id, symbol=None, params={}):
raise NotSupported(self.id + ' fetch_order_trades() is not supported yet')
def fetch_transactions(self, code=None, since=None, limit=None, params={}):
raise NotSupported(self.id + ' fetch_transactions() is not supported yet')
def fetch_deposits(self, code=None, since=None, limit=None, params={}):
raise NotSupported(self.id + ' fetch_deposits() is not supported yet')
def fetch_withdrawals(self, code=None, since=None, limit=None, params={}):
raise NotSupported(self.id + ' fetch_withdrawals() is not supported yet')
def fetch_deposit_address(self, code, params={}):
if self.has['fetchDepositAddresses']:
deposit_addresses = self.fetch_deposit_addresses([code], params)
deposit_address = self.safe_value(deposit_addresses, code)
if deposit_address is None:
raise NotSupported(self.id + ' fetch_deposit_address could not find a deposit address for ' + code + ', make sure you have created a corresponding deposit address in your wallet on the exchange website')
else:
return deposit_address
else:
raise NotSupported(self.id + ' fetchDepositAddress not supported yet')
def parse_funding_rate(self, contract, market=None):
raise NotSupported(self.id + ' parse_funding_rate() is not supported yet')
def parse_funding_rates(self, response, market=None):
result = {}
for entry in response:
parsed = self.parse_funding_rate(entry, market)
result[parsed['symbol']] = parsed
return result
def parse_ohlcv(self, ohlcv, market=None):
if isinstance(ohlcv, list):
return [
self.safe_integer(ohlcv, 0),
self.safe_float(ohlcv, 1),
self.safe_float(ohlcv, 2),
self.safe_float(ohlcv, 3),
self.safe_float(ohlcv, 4),
self.safe_float(ohlcv, 5),
]
else:
return ohlcv
def parse_ohlcvs(self, ohlcvs, market=None, timeframe='1m', since=None, limit=None):
parsed = [self.parse_ohlcv(ohlcv, market) for ohlcv in ohlcvs]
sorted = self.sort_by(parsed, 0)
tail = since is None
return self.filter_by_since_limit(sorted, since, limit, 0, tail)
def parse_bid_ask(self, bidask, price_key=0, amount_key=0):
return [self.safe_number(bidask, price_key), self.safe_number(bidask, amount_key)]
def parse_bids_asks(self, bidasks, price_key=0, amount_key=1):
result = []
if len(bidasks):
if type(bidasks[0]) is list:
for bidask in bidasks:
if bidask[price_key] and bidask[amount_key]:
result.append(self.parse_bid_ask(bidask, price_key, amount_key))
elif type(bidasks[0]) is dict:
for bidask in bidasks:
if (price_key in bidask) and (amount_key in bidask) and (bidask[price_key] and bidask[amount_key]):
result.append(self.parse_bid_ask(bidask, price_key, amount_key))
else:
raise ExchangeError(self.id + ' unrecognized bidask format: ' + str(bidasks[0]))
return result
def fetch_l2_order_book(self, symbol, limit=None, params={}):
orderbook = self.fetch_order_book(symbol, limit, params)
return self.extend(orderbook, {
'bids': self.sort_by(self.aggregate(orderbook['bids']), 0, True),
'asks': self.sort_by(self.aggregate(orderbook['asks']), 0),
})
def parse_order_book(self, orderbook, symbol, timestamp=None, bids_key='bids', asks_key='asks', price_key=0, amount_key=1):
return {
'symbol': symbol,
'bids': self.sort_by(self.parse_bids_asks(orderbook[bids_key], price_key, amount_key) if (bids_key in orderbook) and isinstance(orderbook[bids_key], list) else [], 0, True),
'asks': self.sort_by(self.parse_bids_asks(orderbook[asks_key], price_key, amount_key) if (asks_key in orderbook) and isinstance(orderbook[asks_key], list) else [], 0),
'timestamp': timestamp,
'datetime': self.iso8601(timestamp) if timestamp is not None else None,
'nonce': None,
}
def safe_balance(self, balance):
currencies = self.omit(balance, ['info', 'timestamp', 'datetime', 'free', 'used', 'total']).keys()
balance['free'] = {}
balance['used'] = {}
balance['total'] = {}
for currency in currencies:
if balance[currency].get('total') is None:
if balance[currency].get('free') is not None and balance[currency].get('used') is not None:
balance[currency]['total'] = Precise.string_add(balance[currency]['free'], balance[currency]['used'])
if balance[currency].get('free') is None:
if balance[currency].get('total') is not None and balance[currency].get('used') is not None:
balance[currency]['free'] = Precise.string_sub(balance[currency]['total'], balance[currency]['used'])
if balance[currency].get('used') is None:
if balance[currency].get('total') is not None and balance[currency].get('free') is not None:
balance[currency]['used'] = Precise.string_sub(balance[currency]['total'], balance[currency]['free'])
balance[currency]['free'] = self.parse_number(balance[currency]['free'])
balance[currency]['used'] = self.parse_number(balance[currency]['used'])
balance[currency]['total'] = self.parse_number(balance[currency]['total'])
balance['free'][currency] = balance[currency]['free']
balance['used'][currency] = balance[currency]['used']
balance['total'][currency] = balance[currency]['total']
return balance
def fetch_partial_balance(self, part, params={}):
balance = self.fetch_balance(params)
return balance[part]
def fetch_free_balance(self, params={}):
return self.fetch_partial_balance('free', params)
def fetch_used_balance(self, params={}):
return self.fetch_partial_balance('used', params)
def fetch_total_balance(self, params={}):
return self.fetch_partial_balance('total', params)
def fetch_trading_fees(self, symbol, params={}):
raise NotSupported(self.id + ' fetch_trading_fees() is not supported yet')
def fetch_trading_fee(self, symbol, params={}):
if not self.has['fetchTradingFees']:
raise NotSupported(self.id + ' fetch_trading_fee() is not supported yet')
return self.fetch_trading_fees(params)
def fetch_funding_fees(self, params={}):
raise NotSupported(self.id + ' fetch_funding_fees() is not supported yet')
def fetch_funding_fee(self, code, params={}):
if not self.has['fetchFundingFees']:
raise NotSupported(self.id + ' fetch_funding_fee() is not supported yet')
return self.fetch_funding_fees(params)
def load_trading_limits(self, symbols=None, reload=False, params={}):
if self.has['fetchTradingLimits']:
if reload or not('limitsLoaded' in list(self.options.keys())):
response = self.fetch_trading_limits(symbols)
for i in range(0, len(symbols)):
symbol = symbols[i]
self.markets[symbol] = self.deep_extend(self.markets[symbol], response[symbol])
self.options['limitsLoaded'] = self.milliseconds()
return self.markets
def fetch_ohlcvc(self, symbol, timeframe='1m', since=None, limit=None, params={}):
if not self.has['fetchTrades']:
raise NotSupported(self.id + ' fetch_ohlcv() is not supported yet')
self.load_markets()
trades = self.fetch_trades(symbol, since, limit, params)
return self.build_ohlcvc(trades, timeframe, since, limit)
def fetch_ohlcv(self, symbol, timeframe='1m', since=None, limit=None, params={}):
ohlcvs = self.fetch_ohlcvc(symbol, timeframe, since, limit, params)
return [ohlcv[0:-1] for ohlcv in ohlcvs]
def fetch_status(self, params={}):
if self.has['fetchTime']:
updated = self.fetch_time(params)
self.status['updated'] = updated
return self.status
def fetchOHLCV(self, symbol, timeframe='1m', since=None, limit=None, params={}):
return self.fetch_ohlcv(symbol, timeframe, since, limit, params)
def parse_trading_view_ohlcv(self, ohlcvs, market=None, timeframe='1m', since=None, limit=None):
result = self.convert_trading_view_to_ohlcv(ohlcvs)
return self.parse_ohlcvs(result, market, timeframe, since, limit)
def convert_trading_view_to_ohlcv(self, ohlcvs, t='t', o='o', h='h', l='l', c='c', v='v', ms=False):
result = []
for i in range(0, len(ohlcvs[t])):
result.append([
ohlcvs[t][i] if ms else (int(ohlcvs[t][i]) * 1000),
ohlcvs[o][i],
ohlcvs[h][i],
ohlcvs[l][i],
ohlcvs[c][i],
ohlcvs[v][i],
])
return result
def convert_ohlcv_to_trading_view(self, ohlcvs, t='t', o='o', h='h', l='l', c='c', v='v', ms=False):
result = {}
result[t] = []
result[o] = []
result[h] = []
result[l] = []
result[c] = []
result[v] = []
for i in range(0, len(ohlcvs)):
result[t].append(ohlcvs[i][0] if ms else int(ohlcvs[i][0] / 1000))
result[o].append(ohlcvs[i][1])
result[h].append(ohlcvs[i][2])
result[l].append(ohlcvs[i][3])
result[c].append(ohlcvs[i][4])
result[v].append(ohlcvs[i][5])
return result
def build_ohlcvc(self, trades, timeframe='1m', since=None, limit=None):
ms = self.parse_timeframe(timeframe) * 1000
ohlcvs = []
(timestamp, open, high, low, close, volume, count) = (0, 1, 2, 3, 4, 5, 6)
num_trades = len(trades)
oldest = (num_trades - 1) if limit is None else min(num_trades - 1, limit)
for i in range(0, oldest):
trade = trades[i]
if (since is not None) and (trade['timestamp'] < since):
continue
opening_time = None
if trade['timestamp']:
opening_time = int(math.floor(trade['timestamp'] / ms) * ms)
j = len(ohlcvs)
candle = j - 1
if (j == 0) or (opening_time and opening_time >= ohlcvs[candle][timestamp] + ms):
ohlcvs.append([
opening_time,
trade['price'],
trade['price'],
trade['price'],
trade['price'],
trade['amount'],
1,
])
else:
ohlcvs[candle][high] = max(ohlcvs[candle][high], trade['price'])
ohlcvs[candle][low] = min(ohlcvs[candle][low], trade['price'])
ohlcvs[candle][close] = trade['price']
ohlcvs[candle][volume] += trade['amount']
ohlcvs[candle][count] += 1
return ohlcvs
@staticmethod
def parse_timeframe(timeframe):
amount = int(timeframe[0:-1])
unit = timeframe[-1]
if 'y' == unit:
scale = 60 * 60 * 24 * 365
elif 'M' == unit:
scale = 60 * 60 * 24 * 30
elif 'w' == unit:
scale = 60 * 60 * 24 * 7
elif 'd' == unit:
scale = 60 * 60 * 24
elif 'h' == unit:
scale = 60 * 60
elif 'm' == unit:
scale = 60
elif 's' == unit:
scale = 1
else:
raise NotSupported('timeframe unit {} is not supported'.format(unit))
return amount * scale
@staticmethod
def round_timeframe(timeframe, timestamp, direction=ROUND_DOWN):
ms = Exchange.parse_timeframe(timeframe) * 1000
offset = timestamp % ms
return timestamp - offset + (ms if direction == ROUND_UP else 0)
def safe_ticker(self, ticker, market=None, legacy=True):
if legacy:
symbol = self.safe_value(ticker, 'symbol')
if symbol is None:
symbol = self.safe_symbol(None, market)
timestamp = self.safe_integer(ticker, 'timestamp')
baseVolume = self.safe_value(ticker, 'baseVolume')
quoteVolume = self.safe_value(ticker, 'quoteVolume')
vwap = self.safe_value(ticker, 'vwap')
if vwap is None:
vwap = self.vwap(baseVolume, quoteVolume)
open = self.safe_value(ticker, 'open')
close = self.safe_value(ticker, 'close')
last = self.safe_value(ticker, 'last')
change = self.safe_value(ticker, 'change')
percentage = self.safe_value(ticker, 'percentage')
average = self.safe_value(ticker, 'average')
if (last is not None) and (close is None):
close = last
elif (last is None) and (close is not None):
last = close
if (last is not None) and (open is not None):
if change is None:
change = last - open
if average is None:
average = self.sum(last, open) / 2
if (percentage is None) and (change is not None) and (open is not None) and (open > 0):
percentage = change / open * 100
if (change is None) and (percentage is not None) and (last is not None):
change = percentage / 100 * last
if (open is None) and (last is not None) and (change is not None):
open = last - change
if (vwap is not None) and (baseVolume is not None) and (quoteVolume is None):
quoteVolume = vwap / baseVolume
if (vwap is not None) and (quoteVolume is not None) and (baseVolume is None):
baseVolume = quoteVolume / vwap
ticker['symbol'] = symbol
ticker['timestamp'] = timestamp
ticker['datetime'] = self.iso8601(timestamp)
ticker['open'] = open
ticker['close'] = close
ticker['last'] = last
ticker['vwap'] = vwap
ticker['change'] = change
ticker['percentage'] = percentage
ticker['average'] = average
return ticker
else:
open = self.safe_value(ticker, 'open')
close = self.safe_value(ticker, 'close')
last = self.safe_value(ticker, 'last')
change = self.safe_value(ticker, 'change')
percentage = self.safe_value(ticker, 'percentage')
average = self.safe_value(ticker, 'average')
vwap = self.safe_value(ticker, 'vwap')
baseVolume = self.safe_value(ticker, 'baseVolume')
quoteVolume = self.safe_value(ticker, 'quoteVolume')
if vwap is None:
vwap = Precise.string_div(quoteVolume, baseVolume)
if (last is not None) and (close is None):
close = last
elif (last is None) and (close is not None):
last = close
if (last is not None) and (open is not None):
if change is None:
change = Precise.string_sub(last, open)
if average is None:
average = Precise.string_div(Precise.string_add(last, open), '2')
if (percentage is None) and (change is not None) and (open is not None) and (Precise.string_gt(open, '0')):
percentage = Precise.string_mul(Precise.string_div(change, open), '100')
if (change is None) and (percentage is not None) and (last is not None):
change = Precise.string_div(Precise.string_mul(percentage, last), '100')
if (open is None) and (last is not None) and (change is not None):
open = Precise.string_sub(last, change)
# they should be done in the derived classes
return self.extend(ticker, {
'bid': self.safe_number(ticker, 'bid'),
'bidVolume': self.safe_number(ticker, 'bidVolume'),
'ask': self.safe_number(ticker, 'ask'),
'askVolume': self.safe_number(ticker, 'askVolume'),
'high': self.safe_number(ticker, 'high'),
'low': self.safe_number(ticker, 'low'),
'open': self.parse_number(open),
'close': self.parse_number(close),
'last': self.parse_number(last),
'change': self.parse_number(change),
'percentage': self.parse_number(percentage),
'average': self.parse_number(average),
'vwap': self.parse_number(vwap),
'baseVolume': self.parse_number(baseVolume),
'quoteVolume': self.parse_number(quoteVolume),
})
def parse_tickers(self, tickers, symbols=None, params={}):
result = []
values = self.to_array(tickers)
for i in range(0, len(values)):
result.append(self.extend(self.parse_ticker(values[i]), params))
return self.filter_by_array(result, 'symbol', symbols)
def parse_deposit_addresses(self, addresses, codes=None, indexed=True, params={}):
result = []
for i in range(0, len(addresses)):
address = self.extend(self.parse_deposit_address(addresses[i]), params)
result.append(address)
if codes:
result = self.filter_by_array(result, 'currency', codes, False)
return self.index_by(result, 'currency') if indexed else result
def parse_trades(self, trades, market=None, since=None, limit=None, params={}):
array = self.to_array(trades)
array = [self.merge(self.parse_trade(trade, market), params) for trade in array]
array = self.sort_by_2(array, 'timestamp', 'id')
symbol = market['symbol'] if market else None
tail = since is None
return self.filter_by_symbol_since_limit(array, symbol, since, limit, tail)
def parse_transactions(self, transactions, currency=None, since=None, limit=None, params={}):
array = self.to_array(transactions)
array = [self.extend(self.parse_transaction(transaction, currency), params) for transaction in array]
array = self.sort_by(array, 'timestamp')
code = currency['code'] if currency else None
tail = since is None
return self.filter_by_currency_since_limit(array, code, since, limit, tail)
def parse_transfers(self, transfers, currency=None, since=None, limit=None, params={}):
array = self.to_array(transfers)
array = [self.extend(self.parse_transfer(transfer, currency), params) for transfer in array]
array = self.sort_by(array, 'timestamp')
code = currency['code'] if currency else None
tail = since is None
return self.filter_by_currency_since_limit(array, code, since, limit, tail)
def parse_ledger(self, data, currency=None, since=None, limit=None, params={}):
array = self.to_array(data)
result = []
for item in array:
entry = self.parse_ledger_entry(item, currency)
if isinstance(entry, list):
result += [self.extend(i, params) for i in entry]
else:
result.append(self.extend(entry, params))
result = self.sort_by(result, 'timestamp')
code = currency['code'] if currency else None
tail = since is None
return self.filter_by_currency_since_limit(result, code, since, limit, tail)
def safe_ledger_entry(self, entry, currency=None):
currency = self.safe_currency(None, currency)
direction = self.safe_string(entry, 'direction')
before = self.safe_string(entry, 'before')
after = self.safe_string(entry, 'after')
amount = self.safe_string(entry, 'amount')
fee = self.safe_string(entry, 'fee')
if amount is not None and fee is not None:
if before is None and after is not None:
amountAndFee = Precise.string_add(amount, fee)
before = Precise.string_sub(after, amountAndFee)
elif before is not None and after is None:
amountAndFee = Precise.string_add(amount, fee)
after = Precise.string_add(before, amountAndFee)
if before is not None and after is not None:
if direction is None:
if Precise.string_gt(before, after):
direction = 'out'
if Precise.string_gt(after, before):
direction = 'in'
if amount is None and fee is not None:
betweenAfterBefore = Precise.string_sub(after, before)
amount = Precise.string_sub(betweenAfterBefore, fee)
if amount is not None and fee is None:
betweenAfterBefore = Precise.string_sub(after, before)
fee = Precise.string_sub(betweenAfterBefore, amount)
return self.extend({
'id': None,
'timestamp': None,
'datetime': None,
'direction': None,
'account': None,
'referenceId': None,
'referenceAccount': None,
'type': None,
'currency': currency['code'],
'amount': amount,
'before': before,
'after': after,
'status': None,
'fee': fee,
'info': None,
}, entry)
def parse_orders(self, orders, market=None, since=None, limit=None, params={}):
if isinstance(orders, list):
array = [self.extend(self.parse_order(order, market), params) for order in orders]
else:
array = [self.extend(self.parse_order(self.extend({'id': id}, order), market), params) for id, order in orders.items()]
array = self.sort_by(array, 'timestamp')
symbol = market['symbol'] if market else None
tail = since is None
return self.filter_by_symbol_since_limit(array, symbol, since, limit, tail)
def safe_market(self, marketId, market=None, delimiter=None):
if marketId is not None:
if self.markets_by_id is not None and marketId in self.markets_by_id:
market = self.markets_by_id[marketId]
elif delimiter is not None:
parts = marketId.split(delimiter)
if len(parts) == 2:
baseId = self.safe_string(parts, 0)
quoteId = self.safe_string(parts, 1)
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
symbol = base + '/' + quote
return {
'id': marketId,
'symbol': symbol,
'base': base,
'quote': quote,
'baseId': baseId,
'quoteId': quoteId,
}
else:
return {
'id': marketId,
'symbol': marketId,
'base': None,
'quote': None,
'baseId': None,
'quoteId': None,
}
if market is not None:
return market
return {
'id': marketId,
'symbol': marketId,
'base': None,
'quote': None,
'baseId': None,
'quoteId': None,
}
def safe_symbol(self, marketId, market=None, delimiter=None):
market = self.safe_market(marketId, market, delimiter)
return market['symbol']
def safe_currency(self, currency_id, currency=None):
if currency_id is None and currency is not None:
return currency
if (self.currencies_by_id is not None) and (currency_id in self.currencies_by_id):
return self.currencies_by_id[currency_id]
return {
'id': currency_id,
'code': self.common_currency_code(currency_id.upper()) if currency_id is not None else currency_id
}
def safe_currency_code(self, currency_id, currency=None):
currency = self.safe_currency(currency_id, currency)
return currency['code']
def filter_by_value_since_limit(self, array, field, value=None, since=None, limit=None, key='timestamp', tail=False):
array = self.to_array(array)
if value is not None:
array = [entry for entry in array if entry[field] == value]
if since is not None:
array = [entry for entry in array if entry[key] >= since]
if limit is not None:
array = array[-limit:] if tail else array[:limit]
return array
def filter_by_symbol_since_limit(self, array, symbol=None, since=None, limit=None, tail=False):
return self.filter_by_value_since_limit(array, 'symbol', symbol, since, limit, 'timestamp', tail)
def filter_by_currency_since_limit(self, array, code=None, since=None, limit=None, tail=False):
return self.filter_by_value_since_limit(array, 'currency', code, since, limit, 'timestamp', tail)
def filter_by_since_limit(self, array, since=None, limit=None, key='timestamp', tail=False):
array = self.to_array(array)
if since is not None:
array = [entry for entry in array if entry[key] >= since]
if limit is not None:
array = array[-limit:] if tail else array[:limit]
return array
def filter_by_symbol(self, array, symbol=None):
array = self.to_array(array)
if symbol:
return [entry for entry in array if entry['symbol'] == symbol]
return array
def filter_by_array(self, objects, key, values=None, indexed=True):
objects = self.to_array(objects)
# return all of them if no values were passed in
if values is None:
return self.index_by(objects, key) if indexed else objects
result = []
for i in range(0, len(objects)):
value = objects[i][key] if key in objects[i] else None
if value in values:
result.append(objects[i])
return self.index_by(result, key) if indexed else result
def currency(self, code):
if not self.currencies:
raise ExchangeError(self.id + ' currencies not loaded')
if isinstance(code, str):
if code in self.currencies:
return self.currencies[code]
elif code in self.currencies_by_id:
return self.currencies_by_id[code]
raise ExchangeError(self.id + ' does not have currency code ' + str(code))
def market(self, symbol):
if not self.markets:
raise ExchangeError(self.id + ' markets not loaded')
if not self.markets_by_id:
raise ExchangeError(self.id + ' markets not loaded')
if isinstance(symbol, str):
if symbol in self.markets:
return self.markets[symbol]
elif symbol in self.markets_by_id:
return self.markets_by_id[symbol]
raise BadSymbol(self.id + ' does not have market symbol ' + symbol)
def market_ids(self, symbols):
return [self.market_id(symbol) for symbol in symbols]
def market_id(self, symbol):
market = self.market(symbol)
return market['id'] if type(market) is dict else symbol
def calculate_fee(self, symbol, type, side, amount, price, takerOrMaker='taker', params={}):
market = self.markets[symbol]
feeSide = self.safe_string(market, 'feeSide', 'quote')
key = 'quote'
cost = None
if feeSide == 'quote':
# the fee is always in quote currency
cost = amount * price
elif feeSide == 'base':
# the fee is always in base currency
cost = amount
elif feeSide == 'get':
# the fee is always in the currency you get
cost = amount
if side == 'sell':
cost *= price
else:
key = 'base'
elif feeSide == 'give':
# the fee is always in the currency you give
cost = amount
if side == 'buy':
cost *= price
else:
key = 'base'
rate = market[takerOrMaker]
if cost is not None:
cost *= rate
return {
'type': takerOrMaker,
'currency': market[key],
'rate': rate,
'cost': cost,
}
def edit_limit_buy_order(self, id, symbol, *args):
return self.edit_limit_order(id, symbol, 'buy', *args)
def edit_limit_sell_order(self, id, symbol, *args):
return self.edit_limit_order(id, symbol, 'sell', *args)
def edit_limit_order(self, id, symbol, *args):
return self.edit_order(id, symbol, 'limit', *args)
def edit_order(self, id, symbol, *args):
if not self.enableRateLimit:
raise ExchangeError(self.id + ' edit_order() requires enableRateLimit = true')
self.cancel_order(id, symbol)
return self.create_order(symbol, *args)
def create_limit_order(self, symbol, side, amount, price, params={}) -> dict:
return self.create_order(symbol, 'limit', side, amount, price, params)
def create_market_order(self, symbol, side, amount, price=None, params={}) -> dict:
return self.create_order(symbol, 'market', side, amount, price, params)
def create_limit_buy_order(self, symbol, amount, price, params={}) -> dict:
return self.create_order(symbol, 'limit', 'buy', amount, price, params)
def create_limit_sell_order(self, symbol, amount, price, params={}) -> dict:
return self.create_order(symbol, 'limit', 'sell', amount, price, params)
def create_market_buy_order(self, symbol, amount, params={}) -> dict:
return self.create_order(symbol, 'market', 'buy', amount, None, params)
def create_market_sell_order(self, symbol, amount, params={}) -> dict:
return self.create_order(symbol, 'market', 'sell', amount, None, params)
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
raise NotSupported(self.id + ' sign() pure method must be redefined in derived classes')
def vwap(self, baseVolume, quoteVolume):
return (quoteVolume / baseVolume) if (quoteVolume is not None) and (baseVolume is not None) and (baseVolume > 0) else None
# -------------------------------------------------------------------------
def check_required_dependencies(self):
if self.requiresEddsa and eddsa is None:
raise NotSupported(self.id + ' Eddsa functionality requires python-axolotl-curve25519, install with `pip install python-axolotl-curve25519==0.4.1.post2`: https://github.com/tgalal/python-axolotl-curve25519')
def privateKeyToAddress(self, privateKey):
private_key_bytes = base64.b16decode(Exchange.encode(privateKey), True)
public_key_bytes = ecdsa.SigningKey.from_string(private_key_bytes, curve=ecdsa.SECP256k1).verifying_key.to_string()
public_key_hash = keccak.SHA3(public_key_bytes)
return '0x' + Exchange.decode(base64.b16encode(public_key_hash))[-40:].lower()
@staticmethod
def remove0x_prefix(value):
if value[:2] == '0x':
return value[2:]
return value
def hashMessage(self, message):
message_bytes = base64.b16decode(Exchange.encode(Exchange.remove0x_prefix(message)), True)
hash_bytes = keccak.SHA3(b"\x19Ethereum Signed Message:\n" + Exchange.encode(str(len(message_bytes))) + message_bytes)
return '0x' + Exchange.decode(base64.b16encode(hash_bytes)).lower()
@staticmethod
def signHash(hash, privateKey):
signature = Exchange.ecdsa(hash[-64:], privateKey, 'secp256k1', None)
return {
'r': '0x' + signature['r'],
's': '0x' + signature['s'],
'v': 27 + signature['v'],
}
def sign_message_string(self, message, privateKey):
signature = self.signMessage(message, privateKey)
return signature['r'] + Exchange.remove0x_prefix(signature['s']) + Exchange.binary_to_base16(Exchange.number_to_be(signature['v'], 1))
def signMessage(self, message, privateKey):
#
# The following comment is related to MetaMask, we use the upper type of signature prefix:
#
# z.ecSignOrderHashAsync ('0xcfdb0a485324ff37699b4c8557f6858f25916fc6fce5993b32fe018aea510b9f',
# '0x731fc101bbe102221c91c31ed0489f1ddfc439a3', {
# prefixType: 'ETH_SIGN',
# shouldAddPrefixBeforeCallingEthSign: true
# }).then ((e, r) => console.log (e,r))
#
# { ↓
# v: 28,
# r: "0xea7a68268b47c48d5d7a4c900e6f9af0015bf70951b3db2f1d835c5d544aaec2",
# s: "0x5d1db2a060c955c1fde4c967237b995c2361097405407b33c6046c8aeb3ccbdf"
# }
#
# --------------------------------------------------------------------
#
# z.ecSignOrderHashAsync ('0xcfdb0a485324ff37699b4c8557f6858f25916fc6fce5993b32fe018aea510b9f',
# '0x731fc101bbe102221c91c31ed0489f1ddfc439a3', {
# prefixType: 'NONE',
# shouldAddPrefixBeforeCallingEthSign: true
# }).then ((e, r) => console.log (e,r))
#
# { ↓
# v: 27,
# r: "0xc8c710022c57de4f529d448e9b40517dd9bfb49ff1eb245f5856664b865d14a6",
# s: "0x0740bb21f4f094fbbdbafa903bb8f057f82e0c6e4fe65d19a1daed4ed97cd394"
# }
#
message_hash = self.hashMessage(message)
signature = self.signHash(message_hash[-64:], privateKey[-64:])
return signature
def get_network(self, network, code):
network = network.upper()
aliases = {
'ETHEREUM': 'ETH',
'ETHER': 'ETH',
'ERC20': 'ETH',
'ETH': 'ETH',
'TRC20': 'TRX',
'TRON': 'TRX',
'TRX': 'TRX',
'BEP20': 'BSC',
'BSC': 'BSC',
'HRC20': 'HT',
'HECO': 'HT',
'SPL': 'SOL',
'SOL': 'SOL',
'TERRA': 'LUNA',
'LUNA': 'LUNA',
'POLYGON': 'MATIC',
'MATIC': 'MATIC',
'EOS': 'EOS',
'WAVES': 'WAVES',
'AVALANCHE': 'AVAX',
'AVAX': 'AVAX',
'QTUM': 'QTUM',
'CHZ': 'CHZ',
'NEO': 'NEO',
'ONT': 'ONT',
'RON': 'RON',
}
if network == code:
return network
elif network in aliases:
return aliases[network]
else:
raise NotSupported(self.id + ' network ' + network + ' is not yet supported')
def oath(self):
if self.twofa is not None:
return self.totp(self.twofa)
else:
raise ExchangeError(self.id + ' set .twofa to use this feature')
@staticmethod
def totp(key):
def hex_to_dec(n):
return int(n, base=16)
def base32_to_bytes(n):
missing_padding = len(n) % 8
padding = 8 - missing_padding if missing_padding > 0 else 0
padded = n.upper() + ('=' * padding)
return base64.b32decode(padded) # throws an error if the key is invalid
epoch = int(time.time()) // 30
hmac_res = Exchange.hmac(epoch.to_bytes(8, 'big'), base32_to_bytes(key.replace(' ', '')), hashlib.sha1, 'hex')
offset = hex_to_dec(hmac_res[-1]) * 2
otp = str(hex_to_dec(hmac_res[offset: offset + 8]) & 0x7fffffff)
return otp[-6:]
@staticmethod
def number_to_le(n, size):
return int(n).to_bytes(size, 'little')
@staticmethod
def number_to_be(n, size):
return int(n).to_bytes(size, 'big')
@staticmethod
def base16_to_binary(s):
return base64.b16decode(s, True)
@staticmethod
def binary_to_base16(s):
return Exchange.decode(base64.b16encode(s)).lower()
def sleep(self, milliseconds):
return time.sleep(milliseconds / 1000)
@staticmethod
def base58_to_binary(s):
if Exchange.base58_decoder is None:
Exchange.base58_decoder = {}
Exchange.base58_encoder = {}
for i, c in enumerate(Exchange.base58_alphabet):
Exchange.base58_decoder[c] = i
Exchange.base58_encoder[i] = c
result = 0
for i in range(len(s)):
result *= 58
result += Exchange.base58_decoder[s[i]]
return result.to_bytes((result.bit_length() + 7) // 8, 'big')
@staticmethod
def binary_to_base58(b):
if Exchange.base58_encoder is None:
Exchange.base58_decoder = {}
Exchange.base58_encoder = {}
for i, c in enumerate(Exchange.base58_alphabet):
Exchange.base58_decoder[c] = i
Exchange.base58_encoder[i] = c
result = 0
# undo decimal_to_bytes
for byte in b:
result *= 0x100
result += byte
string = []
while result > 0:
result, next_character = divmod(result, 58)
string.append(Exchange.base58_encoder[next_character])
string.reverse()
return ''.join(string)
def reduce_fees_by_currency(self, fees, string=False):
#
# self function takes a list of fee structures having the following format
#
# string = True
#
# [
# {'currency': 'BTC', 'cost': '0.1'},
# {'currency': 'BTC', 'cost': '0.2' },
# {'currency': 'BTC', 'cost': '0.2', 'rate': '0.00123'},
# {'currency': 'BTC', 'cost': '0.4', 'rate': '0.00123'},
# {'currency': 'BTC', 'cost': '0.5', 'rate': '0.00456'},
# {'currency': 'USDT', 'cost': '12.3456'},
# ]
#
# string = False
#
# [
# {'currency': 'BTC', 'cost': 0.1},
# {'currency': 'BTC', 'cost': 0.2},
# {'currency': 'BTC', 'cost': 0.2, 'rate': 0.00123},
# {'currency': 'BTC', 'cost': 0.4, 'rate': 0.00123},
# {'currency': 'BTC', 'cost': 0.5, 'rate': 0.00456},
# {'currency': 'USDT', 'cost': 12.3456},
# ]
#
# and returns a reduced fee list, where fees are summed per currency and rate(if any)
#
# string = True
#
# [
# {'currency': 'BTC', 'cost': '0.3' },
# {'currency': 'BTC', 'cost': '0.6', 'rate': '0.00123'},
# {'currency': 'BTC', 'cost': '0.5', 'rate': '0.00456'},
# {'currency': 'USDT', 'cost': '12.3456'},
# ]
#
# string = False
#
# [
# {'currency': 'BTC', 'cost': 0.3 },
# {'currency': 'BTC', 'cost': 0.6, 'rate': 0.00123},
# {'currency': 'BTC', 'cost': 0.5, 'rate': 0.00456},
# {'currency': 'USDT', 'cost': 12.3456},
# ]
#
reduced = {}
for i in range(0, len(fees)):
fee = fees[i]
feeCurrencyCode = self.safe_string(fee, 'currency')
if feeCurrencyCode is not None:
rate = self.safe_string(fee, 'rate')
cost = self.safe_value(fee, 'cost')
if not (feeCurrencyCode in reduced):
reduced[feeCurrencyCode] = {}
rateKey = '' if (rate is None) else rate
if rateKey in reduced[feeCurrencyCode]:
if string:
reduced[feeCurrencyCode][rateKey]['cost'] = Precise.string_add(reduced[feeCurrencyCode][rateKey]['cost'], cost)
else:
reduced[feeCurrencyCode][rateKey]['cost'] = self.sum(reduced[feeCurrencyCode][rateKey]['cost'], cost)
else:
reduced[feeCurrencyCode][rateKey] = {
'currency': feeCurrencyCode,
'cost': cost if string else self.parse_number(cost),
}
if rate is not None:
reduced[feeCurrencyCode][rateKey]['rate'] = rate if string else self.parse_number(rate)
result = []
feeValues = list(reduced.values())
for i in range(0, len(feeValues)):
reducedFeeValues = list(feeValues[i].values())
result = self.array_concat(result, reducedFeeValues)
return result
def safe_trade(self, trade, market=None):
amount = self.safe_string(trade, 'amount')
price = self.safe_string(trade, 'price')
cost = self.safe_string(trade, 'cost')
if cost is None:
# contract trading
contractSize = self.safe_string(market, 'contractSize')
multiplyPrice = price
if contractSize is not None:
inverse = self.safe_value(market, 'inverse', False)
if inverse:
multiplyPrice = Precise.string_div('1', price)
multiplyPrice = Precise.string_mul(multiplyPrice, contractSize)
cost = Precise.string_mul(multiplyPrice, amount)
parseFee = self.safe_value(trade, 'fee') is None
parseFees = self.safe_value(trade, 'fees') is None
shouldParseFees = parseFee or parseFees
fees = self.safe_value(trade, 'fees', [])
if shouldParseFees:
tradeFees = self.safe_value(trade, 'fees')
if tradeFees is not None:
for j in range(0, len(tradeFees)):
tradeFee = tradeFees[j]
fees.append(self.extend({}, tradeFee))
else:
tradeFee = self.safe_value(trade, 'fee')
if tradeFee is not None:
fees.append(self.extend({}, tradeFee))
fee = self.safe_value(trade, 'fee')
if shouldParseFees:
reducedFees = self.reduce_fees_by_currency(fees, True) if self.reduceFees else fees
reducedLength = len(reducedFees)
for i in range(0, reducedLength):
reducedFees[i]['cost'] = self.safe_number(reducedFees[i], 'cost')
if 'rate' in reducedFees[i]:
reducedFees[i]['rate'] = self.safe_number(reducedFees[i], 'rate')
if not parseFee and (reducedLength == 0):
fee['cost'] = self.safe_number(fee, 'cost')
if 'rate' in fee:
fee['rate'] = self.safe_number(fee, 'rate')
reducedFees.append(fee)
if parseFees:
trade['fees'] = reducedFees
if parseFee and (reducedLength == 1):
trade['fee'] = reducedFees[0]
tradeFee = self.safe_value(trade, 'fee')
if tradeFee is not None:
tradeFee['cost'] = self.safe_number(tradeFee, 'cost')
if 'rate' in tradeFee:
tradeFee['rate'] = self.safe_number(tradeFee, 'rate')
trade['fee'] = tradeFee
trade['amount'] = self.parse_number(amount)
trade['price'] = self.parse_number(price)
trade['cost'] = self.parse_number(cost)
return trade
def safe_order(self, order, market=None):
# parses numbers as strings
# it is important pass the trades as unparsed rawTrades
amount = self.omit_zero(self.safe_string(order, 'amount'))
remaining = self.safe_string(order, 'remaining')
filled = self.safe_string(order, 'filled')
cost = self.safe_string(order, 'cost')
average = self.omit_zero(self.safe_string(order, 'average'))
price = self.omit_zero(self.safe_string(order, 'price'))
lastTradeTimeTimestamp = self.safe_integer(order, 'lastTradeTimestamp')
parseFilled = (filled is None)
parseCost = (cost is None)
parseLastTradeTimeTimestamp = (lastTradeTimeTimestamp is None)
fee = self.safe_value(order, 'fee')
parseFee = (fee is None)
parseFees = self.safe_value(order, 'fees') is None
shouldParseFees = parseFee or parseFees
fees = self.safe_value(order, 'fees', [])
trades = []
if parseFilled or parseCost or shouldParseFees:
rawTrades = self.safe_value(order, 'trades', trades)
oldNumber = self.number
# we parse trades as strings here!
self.number = str
trades = self.parse_trades(rawTrades, market, None, None, {
'symbol': order['symbol'],
'side': order['side'],
'type': order['type'],
'order': order['id'],
})
self.number = oldNumber
if isinstance(trades, list) and len(trades):
# move properties that are defined in trades up into the order
if order['symbol'] is None:
order['symbol'] = trades[0]['symbol']
if order['side'] is None:
order['side'] = trades[0]['side']
if order['type'] is None:
order['type'] = trades[0]['type']
if order['id'] is None:
order['id'] = trades[0]['order']
if parseFilled:
filled = '0'
if parseCost:
cost = '0'
for i in range(0, len(trades)):
trade = trades[i]
tradeAmount = self.safe_string(trade, 'amount')
if parseFilled and (tradeAmount is not None):
filled = Precise.string_add(filled, tradeAmount)
tradeCost = self.safe_string(trade, 'cost')
if parseCost and (tradeCost is not None):
cost = Precise.string_add(cost, tradeCost)
tradeTimestamp = self.safe_value(trade, 'timestamp')
if parseLastTradeTimeTimestamp and (tradeTimestamp is not None):
if lastTradeTimeTimestamp is None:
lastTradeTimeTimestamp = tradeTimestamp
else:
lastTradeTimeTimestamp = max(lastTradeTimeTimestamp, tradeTimestamp)
if shouldParseFees:
tradeFees = self.safe_value(trade, 'fees')
if tradeFees is not None:
for j in range(0, len(tradeFees)):
tradeFee = tradeFees[j]
fees.append(self.extend({}, tradeFee))
else:
tradeFee = self.safe_value(trade, 'fee')
if tradeFee is not None:
fees.append(self.extend({}, tradeFee))
if shouldParseFees:
reducedFees = self.reduce_fees_by_currency(fees, True) if self.reduceFees else fees
reducedLength = len(reducedFees)
for i in range(0, reducedLength):
reducedFees[i]['cost'] = self.parse_number(reducedFees[i]['cost'])
if 'rate' in reducedFees[i]:
reducedFees[i]['rate'] = self.parse_number(reducedFees[i]['rate'])
if not parseFee and (reducedLength == 0):
fee['cost'] = self.safe_number(fee, 'cost')
if 'rate' in fee:
fee['rate'] = self.parse_number(fee['rate'])
reducedFees.append(fee)
if parseFees:
order['fees'] = reducedFees
if parseFee and (reducedLength == 1):
order['fee'] = reducedFees[0]
if amount is None:
# ensure amount = filled + remaining
if filled is not None and remaining is not None:
amount = Precise.string_add(filled, remaining)
elif self.safe_string(order, 'status') == 'closed':
amount = filled
if filled is None:
if amount is not None and remaining is not None:
filled = Precise.string_sub(amount, remaining)
if remaining is None:
if amount is not None and filled is not None:
remaining = Precise.string_sub(amount, filled)
# ensure that the average field is calculated correctly
if average is None:
if (filled is not None) and (cost is not None) and Precise.string_gt(filled, '0'):
average = Precise.string_div(cost, filled)
# also ensure the cost field is calculated correctly
costPriceExists = (average is not None) or (price is not None)
if parseCost and (filled is not None) and costPriceExists:
multiplyPrice = None
if average is None:
multiplyPrice = price
else:
multiplyPrice = average
# contract trading
contractSize = self.safe_string(market, 'contractSize')
if contractSize is not None:
inverse = self.safe_value(market, 'inverse', False)
if inverse:
multiplyPrice = Precise.string_div('1', multiplyPrice)
multiplyPrice = Precise.string_mul(multiplyPrice, contractSize)
cost = Precise.string_mul(multiplyPrice, filled)
# support for market orders
orderType = self.safe_value(order, 'type')
emptyPrice = (price is None) or Precise.string_equals(price, '0')
if emptyPrice and (orderType == 'market'):
price = average
# we have trades with string values at self point so we will mutate them
for i in range(0, len(trades)):
entry = trades[i]
entry['amount'] = self.safe_number(entry, 'amount')
entry['price'] = self.safe_number(entry, 'price')
entry['cost'] = self.safe_number(entry, 'cost')
fee = self.safe_value(entry, 'fee', {})
fee['cost'] = self.safe_number(fee, 'cost')
if 'rate' in fee:
fee['rate'] = self.safe_number(fee, 'rate')
entry['fee'] = fee
# timeInForceHandling
timeInForce = self.safe_string(order, 'timeInForce')
if self.safe_value(order, 'postOnly', False):
timeInForce = 'PO'
elif self.safe_string(order, 'type') == 'market':
timeInForce = 'IOC'
return self.extend(order, {
'lastTradeTimestamp': lastTradeTimeTimestamp,
'price': self.parse_number(price),
'amount': self.parse_number(amount),
'cost': self.parse_number(cost),
'average': self.parse_number(average),
'filled': self.parse_number(filled),
'remaining': self.parse_number(remaining),
'trades': trades,
'timeInForce': timeInForce,
})
def parse_number(self, value, default=None):
if value is None:
return default
else:
try:
return self.number(value)
except Exception:
return default
def safe_number(self, dictionary, key, default=None):
value = self.safe_string(dictionary, key)
return self.parse_number(value, default)
def safe_number_2(self, dictionary, key1, key2, default=None):
value = self.safe_string_2(dictionary, key1, key2)
return self.parse_number(value, default)
def parse_precision(self, precision):
if precision is None:
return None
return '1e' + Precise.string_neg(precision)
def omit_zero(self, string_number):
if string_number is None or string_number == '':
return None
if float(string_number) == 0:
return None
return string_number
def handle_withdraw_tag_and_params(self, tag, params):
if isinstance(tag, dict):
params = self.extend(tag, params)
tag = None
if tag is None:
tag = self.safe_string(params, 'tag')
if tag is not None:
params = self.omit(params, 'tag')
return [tag, params]
def get_supported_mapping(self, key, mapping={}):
# Takes a key and a dictionary, and returns the dictionary's value for that key
if (key in mapping):
return mapping[key]
else:
raise NotSupported(self.id + ' ' + key + ' does not have a value in mapping')
def fetch_borrow_rate(self, code, params={}):
self.load_markets()
if not self.has['fetchBorrowRates']:
raise NotSupported(self.id + 'fetchBorrowRate() is not supported yet')
borrow_rates = self.fetch_borrow_rates(params)
rate = self.safe_value(borrow_rates, code)
if rate is None:
raise ExchangeError(self.id + 'fetchBorrowRate() could not find the borrow rate for currency code ' + code)
return rate
def handle_market_type_and_params(self, method_name, market=None, params={}):
default_type = self.safe_string_2(self.options, 'defaultType', 'type', 'spot')
method_options = self.safe_value(self.options, method_name)
method_type = default_type
if method_options is not None:
if isinstance(method_options, str):
method_type = method_options
else:
method_type = self.safe_string_2(method_options, 'defaultType', 'type', method_type)
market_type = method_type if market is None else market['type']
type = self.safe_string_2(params, 'defaultType', 'type', market_type)
params = self.omit(params, ['defaultType', 'type'])
return [type, params]
def load_time_difference(self, params={}):
server_time = self.fetch_time(params)
after = self.milliseconds()
self.options['timeDifference'] = after - server_time
return self.options['timeDifference']
def parse_leverage_tiers(self, response, symbols, market_id_key):
tiers = {}
for item in response:
id = self.safe_string(item, market_id_key)
market = self.safe_market(id)
symbol = market['symbol']
symbols_length = 0
if (symbols is not None):
symbols_length = len(symbols)
contract = self.safe_value(market, 'contract', False)
if (contract and (symbols_length == 0 or symbol in symbols)):
tiers[symbol] = self.parse_market_leverage_tiers(item, market)
return tiers
def fetch_market_leverage_tiers(self, symbol, params={}):
if self.has['fetchLeverageTiers']:
market = self.market(symbol)
if (not market['contract']):
raise BadRequest(self.id + ' fetch_leverage_tiers() supports contract markets only')
tiers = self.fetch_leverage_tiers([symbol])
return self.safe_value(tiers, symbol)
else:
raise NotSupported(self.id + 'fetch_market_leverage_tiers() is not supported yet')
def is_post_only(self, type, time_in_force, exchange_specific_option, params={}):
post_only = self.safe_value_2(params, 'postOnly', 'post_only', False)
params = self.omit(params, ['post_only', 'postOnly'])
time_in_force_upper = time_in_force.upper()
type_lower = type.lower()
ioc = time_in_force_upper == 'IOC'
time_in_force_post_only = time_in_force_upper == 'PO'
is_market = type_lower == 'market'
post_only = post_only or type_lower == 'postonly' or time_in_force_post_only or exchange_specific_option
if (post_only):
if (ioc):
raise InvalidOrder(self.id + ' postOnly orders cannot have timeInForce equal to ' + time_in_force)
elif (is_market):
raise InvalidOrder(self.id + ' postOnly orders cannot have type ' + type)
else:
time_in_force = None if time_in_force_post_only else time_in_force
return ['limit', True, time_in_force, params]
else:
return [type, False, time_in_force, params]
def create_post_only_order(self, symbol, type, side, amount, price, params={}):
if not self.has['createPostOnlyOrder']:
raise NotSupported(self.id + ' create_post_only_order() is not supported yet')
query = self.extend(params, {'postOnly': True})
return self.create_order(symbol, type, side, amount, price, query)
def create_stop_order(self, symbol, type, side, amount, price=None, stopPrice=None, params={}):
if not self.has['createStopOrder']:
raise NotSupported(self.id + 'create_stop_order() is not supported yet')
if stopPrice is None:
raise ArgumentsRequired(self.id + ' create_stop_order() requires a stopPrice argument')
query = self.extend(params, {'stopPrice': stopPrice})
return self.create_order(symbol, type, side, amount, price, query)
def create_stop_limit_order(self, symbol, side, amount, price, stopPrice, params={}):
if not self.has['createStopLimitOrder']:
raise NotSupported(self.id + ' create_stop_limit_order() is not supported yet')
query = self.extend(params, {'stopPrice': stopPrice})
return self.create_order(symbol, 'limit', side, amount, price, query)
def create_stop_market_order(self, symbol, side, amount, stopPrice, params={}):
if not self.has['createStopMarketOrder']:
raise NotSupported(self.id + ' create_stop_market_order() is not supported yet')
query = self.extend(params, {'stopPrice': stopPrice})
return self.create_order(symbol, 'market', side, amount, None, query)
def parse_borrow_interests(self, response, market=None):
interest = []
for i in range(len(response)):
row = response[i]
interest.append(self.parse_borrow_interest(row, market))
return interest
| true | true |
f7378eed54e6814a8713abc80bfd6d91829808fb | 8,117 | py | Python | docs/conf.py | hyzhak/zalando-research-fashionmnist-analyze | 5dfff74f80982769c7ffae746abc58fc7113113b | [
"MIT"
] | 1 | 2020-05-29T22:04:52.000Z | 2020-05-29T22:04:52.000Z | docs/conf.py | hyzhak/zalando-research-fashionmnist-analyze | 5dfff74f80982769c7ffae746abc58fc7113113b | [
"MIT"
] | null | null | null | docs/conf.py | hyzhak/zalando-research-fashionmnist-analyze | 5dfff74f80982769c7ffae746abc58fc7113113b | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
#
# zalando-research-fashionmnist-experiments documentation build configuration file, created by
# sphinx-quickstart.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import os
import sys
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'zalando-research-fashionmnist-experiments'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'zalando-research-fashionmnist-experimentsdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index',
'zalando-research-fashionmnist-experiments.tex',
u'zalando-research-fashionmnist-experiments Documentation',
u"Eugene Krevenets", 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'zalando-research-fashionmnist-experiments', u'zalando-research-fashionmnist-experiments Documentation',
[u"Eugene Krevenets"], 1)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'zalando-research-fashionmnist-experiments', u'zalando-research-fashionmnist-experiments Documentation',
u"Eugene Krevenets", 'zalando-research-fashionmnist-experiments',
'my research of Zalando Research Fashion MNIST', 'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
| 33.130612 | 118 | 0.715412 |
import os
import sys
extensions = []
templates_path = ['_templates']
source_suffix = '.rst'
master_doc = 'index'
project = u'zalando-research-fashionmnist-experiments'
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'zalando-research-fashionmnist-experimentsdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index',
'zalando-research-fashionmnist-experiments.tex',
u'zalando-research-fashionmnist-experiments Documentation',
u"Eugene Krevenets", 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'zalando-research-fashionmnist-experiments', u'zalando-research-fashionmnist-experiments Documentation',
[u"Eugene Krevenets"], 1)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'zalando-research-fashionmnist-experiments', u'zalando-research-fashionmnist-experiments Documentation',
u"Eugene Krevenets", 'zalando-research-fashionmnist-experiments',
'my research of Zalando Research Fashion MNIST', 'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
| true | true |
f7379164fc6db9d093eeaf98153dca4a6cfd22ae | 1,499 | py | Python | components/jobRunner/QuestionJobRunnerComponent.py | AmeyKamat/ProjectJarvis | 25bef53bda7ffe09b3aa5c47a910c5b35b8cf551 | [
"MIT"
] | 2 | 2020-07-18T18:54:28.000Z | 2020-07-19T18:37:14.000Z | components/jobRunner/QuestionJobRunnerComponent.py | arpit006/ProjectJarvis | 25bef53bda7ffe09b3aa5c47a910c5b35b8cf551 | [
"MIT"
] | null | null | null | components/jobRunner/QuestionJobRunnerComponent.py | arpit006/ProjectJarvis | 25bef53bda7ffe09b3aa5c47a910c5b35b8cf551 | [
"MIT"
] | 1 | 2020-07-18T18:54:31.000Z | 2020-07-18T18:54:31.000Z | import requests
import datetime
import configparser
import json
import copy
from circuits import Component, handler
from events.JobCompleteEvent import JobCompleteEvent
from events.EntityPreprocessedEvent import EntityPreprocessedEvent
class QuestionJobRunnerComponent(Component):
config = configparser.ConfigParser()
with open('./components/jobRunner/api_config.json', 'r') as apiConfig:
config = json.load(apiConfig)
@handler("EntityPreprocessedEvent")
def handleEntityPreprocessedEvent(self, context):
if context.intent == 'question':
self.handleQuestionRequest(context)
def handleQuestionRequest(self, context):
response = requests.get(self.config["DUCKDUCKGO_API"]["URL"]["QUESTION"], params= {
'q': context.message,
'format': 'json'
})
formattedResponse = response.json()
if formattedResponse["Answer"] != "":
context.result["answer"] = formattedResponse["Answer"]
context.result["source"] = "DuckDuckGo"
elif formattedResponse["Definition"] != "":
context.result["answer"] = formattedResponse["Definition"]
context.result["source"] = formattedResponse["DefinitionSource"]
elif formattedResponse["AbstractText"] != "":
context.result["answer"] = formattedResponse["AbstractText"]
context.result["source"] = formattedResponse["AbstractSource"]
else:
context.intent = 'search-general'
self.fire(EntityPreprocessedEvent(context))
if context.intent == 'question':
self.fire(JobCompleteEvent(copy.deepcopy(context)))
| 31.229167 | 85 | 0.749166 | import requests
import datetime
import configparser
import json
import copy
from circuits import Component, handler
from events.JobCompleteEvent import JobCompleteEvent
from events.EntityPreprocessedEvent import EntityPreprocessedEvent
class QuestionJobRunnerComponent(Component):
config = configparser.ConfigParser()
with open('./components/jobRunner/api_config.json', 'r') as apiConfig:
config = json.load(apiConfig)
@handler("EntityPreprocessedEvent")
def handleEntityPreprocessedEvent(self, context):
if context.intent == 'question':
self.handleQuestionRequest(context)
def handleQuestionRequest(self, context):
response = requests.get(self.config["DUCKDUCKGO_API"]["URL"]["QUESTION"], params= {
'q': context.message,
'format': 'json'
})
formattedResponse = response.json()
if formattedResponse["Answer"] != "":
context.result["answer"] = formattedResponse["Answer"]
context.result["source"] = "DuckDuckGo"
elif formattedResponse["Definition"] != "":
context.result["answer"] = formattedResponse["Definition"]
context.result["source"] = formattedResponse["DefinitionSource"]
elif formattedResponse["AbstractText"] != "":
context.result["answer"] = formattedResponse["AbstractText"]
context.result["source"] = formattedResponse["AbstractSource"]
else:
context.intent = 'search-general'
self.fire(EntityPreprocessedEvent(context))
if context.intent == 'question':
self.fire(JobCompleteEvent(copy.deepcopy(context)))
| true | true |
f737918efd2a2d94f0a2683c179a02a529cb42ed | 250 | py | Python | 02-swiss-army-knife/03-sympy/02_limit.py | jameszhan/notes-ml | c633d04e5443eab71bc3b27fff89d57b89d1786c | [
"Apache-2.0"
] | null | null | null | 02-swiss-army-knife/03-sympy/02_limit.py | jameszhan/notes-ml | c633d04e5443eab71bc3b27fff89d57b89d1786c | [
"Apache-2.0"
] | null | null | null | 02-swiss-army-knife/03-sympy/02_limit.py | jameszhan/notes-ml | c633d04e5443eab71bc3b27fff89d57b89d1786c | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
import sympy as sp
x = sp.symbols('x')
assert 1 == sp.limit(sp.exp(x), x, 0)
assert 1 == sp.limit(sp.sin(x) / x, x, 0)
assert sp.oo == sp.limit(1.0 / x, x, 0)
assert sp.E.evalf() == sp.limit((1 + 1.0 / x) ** x, x, sp.oo)
| 19.230769 | 61 | 0.536 |
import sympy as sp
x = sp.symbols('x')
assert 1 == sp.limit(sp.exp(x), x, 0)
assert 1 == sp.limit(sp.sin(x) / x, x, 0)
assert sp.oo == sp.limit(1.0 / x, x, 0)
assert sp.E.evalf() == sp.limit((1 + 1.0 / x) ** x, x, sp.oo)
| true | true |
f73792cd475518e27d7d608ce2f44b249c7c457d | 1,383 | py | Python | test/PR_test/unit_test/dataset/test_dir_dataset.py | DwijayDS/fastestimator | 9b288cb2bd870f971ec4cee09d0b3205e1316a94 | [
"Apache-2.0"
] | 57 | 2019-05-21T21:29:26.000Z | 2022-02-23T05:55:21.000Z | test/PR_test/unit_test/dataset/test_dir_dataset.py | vbvg2008/fastestimator | 6061a4fbbeb62a2194ef82ba8017f651710d0c65 | [
"Apache-2.0"
] | 93 | 2019-05-23T18:36:07.000Z | 2022-03-23T17:15:55.000Z | test/PR_test/unit_test/dataset/test_dir_dataset.py | vbvg2008/fastestimator | 6061a4fbbeb62a2194ef82ba8017f651710d0c65 | [
"Apache-2.0"
] | 47 | 2019-05-09T15:41:37.000Z | 2022-03-26T17:00:08.000Z | # Copyright 2020 The FastEstimator Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import os
import tempfile
import unittest
import fastestimator as fe
class TestDirDataset(unittest.TestCase):
def test_dataset(self):
tmpdirname = tempfile.mkdtemp()
a_tmpdirname = tempfile.TemporaryDirectory(dir=tmpdirname)
b_tmpdirname = tempfile.TemporaryDirectory(dir=tmpdirname)
a1 = open(os.path.join(a_tmpdirname.name, "fa1.txt"), "x")
a2 = open(os.path.join(a_tmpdirname.name, "fa2.txt"), "x")
b1 = open(os.path.join(b_tmpdirname.name, "fb1.txt"), "x")
b2 = open(os.path.join(b_tmpdirname.name, "fb2.txt"), "x")
dataset = fe.dataset.DirDataset(root_dir=tmpdirname)
self.assertEqual(len(dataset), 4)
| 36.394737 | 80 | 0.676067 |
import os
import tempfile
import unittest
import fastestimator as fe
class TestDirDataset(unittest.TestCase):
def test_dataset(self):
tmpdirname = tempfile.mkdtemp()
a_tmpdirname = tempfile.TemporaryDirectory(dir=tmpdirname)
b_tmpdirname = tempfile.TemporaryDirectory(dir=tmpdirname)
a1 = open(os.path.join(a_tmpdirname.name, "fa1.txt"), "x")
a2 = open(os.path.join(a_tmpdirname.name, "fa2.txt"), "x")
b1 = open(os.path.join(b_tmpdirname.name, "fb1.txt"), "x")
b2 = open(os.path.join(b_tmpdirname.name, "fb2.txt"), "x")
dataset = fe.dataset.DirDataset(root_dir=tmpdirname)
self.assertEqual(len(dataset), 4)
| true | true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.