text
stringlengths 2
999k
|
|---|
"""
WSGI config for whatsnew project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "whatsnew.settings")
application = get_wsgi_application()
|
import copy
import json
import math
import os
import random
import re
import socket
import string
import time
import traceback
import sys
from functools import cmp_to_key
from http.client import IncompleteRead
from multiprocessing import Process, Manager, Semaphore
from threading import Thread
import crc32
import logger
import testconstants
from cb_tools.cbstats import Cbstats
from remote.remote_util import RemoteMachineShellConnection
from collection.collections_rest_client import CollectionsRest
from collection.collections_stats import CollectionsStats
from couchbase_helper.document import DesignDocument
from couchbase_helper.documentgenerator import BatchedDocumentGenerator
from couchbase_helper.stats_tools import StatsCommon
from deepdiff import DeepDiff
from mc_bin_client import MemcachedError
from membase.api.exception import BucketCreationException
from membase.api.exception import N1QLQueryException, DropIndexException, CreateIndexException, \
DesignDocCreationException, QueryViewException, ReadDocumentException, RebalanceFailedException, \
GetBucketInfoFailed, CompactViewFailed, SetViewInfoNotFound, FailoverFailedException, \
ServerUnavailableException, BucketFlushFailed, CBRecoveryFailedException, BucketCompactionException, \
AutoFailoverException,NodesFailureException, ServerAlreadyJoinedException
from membase.api.rest_client import RestConnection, Bucket, RestHelper
from membase.helper.bucket_helper import BucketOperationHelper
from memcacheConstants import ERR_NOT_FOUND, NotFoundError
from memcached.helper.data_helper import MemcachedClientHelper
from memcached.helper.kvstore import KVStore
from remote.remote_util import RemoteMachineShellConnection, RemoteUtilHelper
from tasks.future import Future
from testconstants import MIN_KV_QUOTA, INDEX_QUOTA, FTS_QUOTA, COUCHBASE_FROM_4DOT6, \
THROUGHPUT_CONCURRENCY, ALLOW_HTP, CBAS_QUOTA, CLUSTER_QUOTA_RATIO
from TestInput import TestInputServer, TestInputSingleton
try:
CHECK_FLAG = False
if (TestInputSingleton.input.param("testrunner_client", None) == testconstants.PYTHON_SDK) or \
((testconstants.TESTRUNNER_CLIENT in list(os.environ.keys())) and os.environ[testconstants.TESTRUNNER_CLIENT] == testconstants.PYTHON_SDK):
try:
from sdk_client import SDKSmartClient as VBucketAwareMemcached
from sdk_client import SDKBasedKVStoreAwareSmartClient as KVStoreAwareSmartClient
except:
from sdk_client3 import SDKSmartClient as VBucketAwareMemcached
from sdk_client3 import SDKBasedKVStoreAwareSmartClient as KVStoreAwareSmartClient
if (TestInputSingleton.input.param("enable_sdk_logging", False)):
import logging
import couchbase
logging.basicConfig(stream=sys.stderr, level=logging.INFO)
couchbase.enable_logging()
else:
CHECK_FLAG = True
from memcached.helper.data_helper import VBucketAwareMemcached, KVStoreAwareSmartClient
except Exception as e:
CHECK_FLAG = False
try:
from sdk_client import SDKSmartClient as VBucketAwareMemcached
from sdk_client import SDKBasedKVStoreAwareSmartClient as KVStoreAwareSmartClient
except:
from sdk_client3 import SDKSmartClient as VBucketAwareMemcached
from sdk_client3 import SDKBasedKVStoreAwareSmartClient as KVStoreAwareSmartClient
# TODO: Setup stacktracer
# TODO: Needs "easy_install pygments"
# import stacktracer
# stacktracer.trace_start("trace.html",interval=30,auto=True) # Set auto flag to always update file!
CONCURRENCY_LOCK = Semaphore(THROUGHPUT_CONCURRENCY)
PENDING = 'PENDING'
EXECUTING = 'EXECUTING'
CHECKING = 'CHECKING'
FINISHED = 'FINISHED'
class Task(Future):
def __init__(self, name):
Future.__init__(self)
self.log = logger.Logger.get_logger()
self.state = PENDING
self.name = name
self.cancelled = False
self.retries = 0
self.res = None
def step(self, task_manager):
if not self.done():
if self.state == PENDING:
self.state = EXECUTING
task_manager.schedule(self)
elif self.state == EXECUTING:
self.execute(task_manager)
elif self.state == CHECKING:
self.check(task_manager)
elif self.state != FINISHED:
raise Exception("Bad State in {0}: {1}".format(self.name, self.state))
def execute(self, task_manager):
raise NotImplementedError
def check(self, task_manager):
raise NotImplementedError
def set_unexpected_exception(self, e, suffix=""):
self.log.error("Unexpected exception [{0}] caught".format(e) + suffix)
self.log.error(''.join(traceback.format_stack()))
self.set_exception(e)
class NodeInitializeTask(Task):
def __init__(self, server, disabled_consistent_view=None,
rebalanceIndexWaitingDisabled=None,
rebalanceIndexPausingDisabled=None,
maxParallelIndexers=None,
maxParallelReplicaIndexers=None,
port=None, quota_percent=None,
index_quota_percent=None,
services=None, gsi_type='forestdb'):
Task.__init__(self, "node_init_task")
self.server = server
self.port = port or server.port
self.quota = 0
self.index_quota = 0
self.index_quota_percent = index_quota_percent
self.quota_percent = quota_percent
self.disable_consistent_view = disabled_consistent_view
self.rebalanceIndexWaitingDisabled = rebalanceIndexWaitingDisabled
self.rebalanceIndexPausingDisabled = rebalanceIndexPausingDisabled
self.maxParallelIndexers = maxParallelIndexers
self.maxParallelReplicaIndexers = maxParallelReplicaIndexers
self.services = services
self.gsi_type = gsi_type
def execute(self, task_manager):
try:
rest = RestConnection(self.server)
except Exception as error:
self.state = FINISHED
print("debuging hanging issue task 127" + str(error))
self.set_exception(error)
return
self.log.info("server: %s, nodes/self ", self.server)
info = Future.wait_until(lambda: rest.get_nodes_self(),
lambda x: x.memoryTotal > 0 or x.storageTotalRam > 0,
timeout_secs=60, interval_time=0.1,
exponential_backoff=False)
self.log.info(" %s", info.__dict__)
username = self.server.rest_username
password = self.server.rest_password
if int(info.port) in range(9091, 9991):
self.state = FINISHED
self.set_result(True)
return
self.quota = int(info.mcdMemoryReserved * CLUSTER_QUOTA_RATIO)
if self.index_quota_percent:
self.index_quota = int((info.mcdMemoryReserved * CLUSTER_QUOTA_RATIO) * \
self.index_quota_percent // 100)
rest.set_service_memoryQuota(service='indexMemoryQuota', username=username, \
password=password, memoryQuota=self.index_quota)
if self.quota_percent:
self.quota = int(info.mcdMemoryReserved * self.quota_percent / 100)
""" Adjust KV RAM to correct value when there is INDEX
and FTS services added to node from Watson """
index_quota = INDEX_QUOTA
cluster_setting = rest.cluster_status()
fts_quota = FTS_QUOTA
if cluster_setting:
if cluster_setting["ftsMemoryQuota"] and \
int(cluster_setting["ftsMemoryQuota"]) >= 256:
fts_quota = int(cluster_setting["ftsMemoryQuota"])
kv_quota = int(info.mcdMemoryReserved * CLUSTER_QUOTA_RATIO)
if self.index_quota_percent:
index_quota = self.index_quota
if not self.quota_percent:
set_services = copy.deepcopy(self.services)
if set_services is None:
set_services = ["kv"]
# info = rest.get_nodes_self()
# cb_version = info.version[:5]
# if cb_version in COUCHBASE_FROM_VERSION_4:
if "index" in set_services:
self.log.info("quota for index service will be %s MB" % (index_quota))
kv_quota -= index_quota
self.log.info("set index quota to node %s " % self.server.ip)
rest.set_service_memoryQuota(service='indexMemoryQuota', memoryQuota=index_quota)
if "fts" in set_services:
self.log.info("quota for fts service will be %s MB" % (fts_quota))
kv_quota -= fts_quota
self.log.info("set both index and fts quota at node %s " % self.server.ip)
rest.set_service_memoryQuota(service='ftsMemoryQuota', memoryQuota=fts_quota)
if "cbas" in set_services:
self.log.info("quota for cbas service will be %s MB" % (CBAS_QUOTA))
kv_quota -= CBAS_QUOTA
rest.set_service_memoryQuota(service="cbasMemoryQuota", memoryQuota=CBAS_QUOTA)
if kv_quota < MIN_KV_QUOTA:
raise Exception("KV RAM needs to be more than %s MB"
" at node %s" % (MIN_KV_QUOTA, self.server.ip))
if kv_quota < int(self.quota):
self.quota = kv_quota
rest.init_cluster_memoryQuota(username, password, self.quota)
if self.services:
status = rest.init_node_services(username=username, password=password, \
port=self.port, hostname=self.server.ip, \
services=self.services)
if not status:
self.state = FINISHED
self.set_exception(Exception('unable to set services for server %s' \
% (self.server.ip)))
return
if self.disable_consistent_view is not None:
rest.set_reb_cons_view(self.disable_consistent_view)
if self.rebalanceIndexWaitingDisabled is not None:
rest.set_reb_index_waiting(self.rebalanceIndexWaitingDisabled)
if self.rebalanceIndexPausingDisabled is not None:
rest.set_rebalance_index_pausing(self.rebalanceIndexPausingDisabled)
if self.maxParallelIndexers is not None:
rest.set_max_parallel_indexers(self.maxParallelIndexers)
if self.maxParallelReplicaIndexers is not None:
rest.set_max_parallel_replica_indexers(self.maxParallelReplicaIndexers)
if self.server.internal_ip:
rest.set_alternate_address(self.server.ip)
rest.init_cluster(username, password, self.port)
remote_shell = RemoteMachineShellConnection(self.server)
remote_shell.enable_diag_eval_on_non_local_hosts()
remote_shell.disconnect()
if rest.is_cluster_compat_mode_greater_than(4.0):
if self.gsi_type == "plasma":
if (not rest.is_cluster_compat_mode_greater_than(5.0)) or (not rest.is_enterprise_edition()):
rest.set_indexer_storage_mode(username, password, "forestdb")
else:
rest.set_indexer_storage_mode(username, password, self.gsi_type)
else:
rest.set_indexer_storage_mode(username, password, self.gsi_type)
self.server.port = self.port
try:
rest = RestConnection(self.server)
except Exception as error:
self.state = FINISHED
print("debuging hanging issue task 230" + str(error))
self.set_exception(error)
return
info = rest.get_nodes_self()
if info is None:
self.state = FINISHED
self.set_exception(
Exception('unable to get information on a server %s, it is available?' % (self.server.ip)))
return
self.state = CHECKING
task_manager.schedule(self)
def check(self, task_manager):
self.state = FINISHED
self.set_result(self.quota)
class BucketCreateTask(Task):
def __init__(self, bucket_params):
Task.__init__(self, "bucket_create_task")
self.server = bucket_params['server']
self.bucket = bucket_params['bucket_name']
self.alt_addr = TestInputSingleton.input.param("alt_addr", False)
self.replicas = bucket_params['replicas']
self.port = bucket_params['port']
self.size = bucket_params['size']
self.password = bucket_params['password']
self.bucket_type = bucket_params['bucket_type']
self.enable_replica_index = bucket_params['enable_replica_index']
self.eviction_policy = bucket_params['eviction_policy']
self.lww = bucket_params['lww']
self.storageBackend = bucket_params['bucket_storage']
if 'maxTTL' in bucket_params:
self.maxttl = bucket_params['maxTTL']
else:
self.maxttl = 0
if 'compressionMode' in bucket_params:
self.compressionMode = bucket_params['compressionMode']
else:
self.compressionMode = 'passive'
self.flush_enabled = bucket_params['flush_enabled']
if bucket_params['bucket_priority'] is None or bucket_params['bucket_priority'].lower() is 'low':
self.bucket_priority = 3
else:
self.bucket_priority = 8
def execute(self, task_manager):
try:
rest = RestConnection(self.server)
except Exception as error:
self.state = FINISHED
print("debuging hanging issue task 279" + str(error))
self.set_exception(error)
return
info = rest.get_nodes_self()
if self.size is None or int(self.size) <= 0:
self.size = info.memoryQuota * 2 // 3
if int(info.port) in range(9091, 9991):
try:
self.port = info.port
rest.create_bucket(bucket=self.bucket)
self.state = CHECKING
task_manager.schedule(self)
except Exception as e:
self.state = FINISHED
self.set_exception(e)
return
version = rest.get_nodes_self().version
try:
if float(version[:2]) >= 3.0 and self.bucket_priority is not None:
rest.create_bucket(bucket=self.bucket,
ramQuotaMB=self.size,
replicaNumber=self.replicas,
proxyPort=self.port,
bucketType=self.bucket_type,
replica_index=self.enable_replica_index,
flushEnabled=self.flush_enabled,
evictionPolicy=self.eviction_policy,
threadsNumber=self.bucket_priority,
lww=self.lww,
maxTTL=self.maxttl,
compressionMode=self.compressionMode,
storageBackend=self.storageBackend)
else:
rest.create_bucket(bucket=self.bucket,
ramQuotaMB=self.size,
replicaNumber=self.replicas,
proxyPort=self.port,
bucketType=self.bucket_type,
replica_index=self.enable_replica_index,
flushEnabled=self.flush_enabled,
evictionPolicy=self.eviction_policy,
lww=self.lww,
maxTTL=self.maxttl,
compressionMode=self.compressionMode)
self.state = CHECKING
task_manager.schedule(self)
except BucketCreationException as e:
self.state = FINISHED
self.set_exception(e)
# catch and set all unexpected exceptions
except Exception as e:
self.state = FINISHED
self.set_unexpected_exception(e)
def check(self, task_manager):
try:
if self.bucket_type == 'memcached' or int(self.port) in range(9091, 9991):
self.set_result(True)
self.state = FINISHED
return
if BucketOperationHelper.wait_for_memcached(self.server, self.bucket):
self.log.info("bucket '{0}' was created with per node RAM quota: {1}".format(self.bucket, self.size))
self.set_result(True)
self.state = FINISHED
return
else:
self.log.warning("vbucket map not ready after try {0}".format(self.retries))
if self.retries >= 5:
self.set_result(False)
self.state = FINISHED
return
except Exception as e:
self.log.error("Unexpected error: %s" % str(e))
self.log.warning("vbucket map not ready after try {0}".format(self.retries))
if self.retries >= 5:
self.state = FINISHED
self.set_exception(e)
self.retries = self.retries + 1
task_manager.schedule(self)
class BucketDeleteTask(Task):
def __init__(self, server, bucket="default"):
Task.__init__(self, "bucket_delete_task")
self.server = server
self.bucket = bucket
def execute(self, task_manager):
try:
rest = RestConnection(self.server)
if rest.delete_bucket(self.bucket):
self.state = CHECKING
task_manager.schedule(self)
else:
self.log.info(StatsCommon.get_stats([self.server], self.bucket, "timings"))
self.state = FINISHED
self.set_result(False)
# catch and set all unexpected exceptions
except Exception as e:
self.state = FINISHED
self.log.info(StatsCommon.get_stats([self.server], self.bucket, "timings"))
self.set_unexpected_exception(e)
def check(self, task_manager):
try:
rest = RestConnection(self.server)
if BucketOperationHelper.wait_for_bucket_deletion(self.bucket, rest, 200):
self.set_result(True)
else:
self.set_result(False)
self.state = FINISHED
# catch and set all unexpected exceptions
except Exception as e:
self.state = FINISHED
self.log.info(StatsCommon.get_stats([self.server], self.bucket, "timings"))
self.set_unexpected_exception(e)
class CollectionCreateTask(Task):
def __init__(self, server, bucket, scope, collection, params=None):
Task.__init__(self, "collection_create_task")
self.server = server
self.bucket_name = bucket
self.scope_name = scope
self.collection_name = collection
self.collection_params = params
def execute(self, task_manager):
try:
rest = RestConnection(self.server)
except ServerUnavailableException as error:
self.state = FINISHED
self.set_exception(error)
return
try:
rest.create_collection(bucket=self.bucket_name, scope=self.scope_name,
collection=self.collection_name,
params=self.collection_params)
self.state = CHECKING
task_manager.schedule(self)
# catch and set all unexpected exceptions
except Exception as e:
self.state = FINISHED
self.set_unexpected_exception(e)
def check(self, task_manager):
self.set_result(True)
self.state = FINISHED
task_manager.schedule(self)
class ConcurrentIndexCreateTask(Task):
def __init__(self, server, bucket, scope, collection, query_definitions=None, IndexTrackingObject=None,
n1ql_helper=None, num_indexes=1, defer_build="", itr=0, expected_failure=[],
query_def_group="plasma_test"):
Task.__init__(self, "collection_create_task")
self.server = server
self.bucket_name = bucket
self.scope_name = scope
self.collection_name = collection
self.query_definitions = query_definitions
self.test_fail = False
self.index_tracking_obj = IndexTrackingObject
self.n1ql_helper=n1ql_helper
self.num_indexes = num_indexes
self.defer_build = defer_build
self.itr = itr
self.expected_failure = expected_failure
self.query_def_group = query_def_group
def execute(self, task_manager):
try:
RestConnection(self.server)
except ServerUnavailableException as error:
self.state = FINISHED
self.set_exception(error)
return
try:
itr = self.itr
while itr < (self.num_indexes + self.itr) and not self.index_tracking_obj.get_stop_create_index():
for query_def in self.query_definitions:
if itr >= (self.num_indexes + self.itr):
break
if self.query_def_group in query_def.groups:
query_def_copy = copy.deepcopy(query_def)
index_name = query_def_copy.get_index_name()
index_name = index_name + str(itr)
query_def_copy.update_index_name(index_name)
if self.defer_build == "":
defer_build = random.choice([True, False])
else:
defer_build = self.defer_build
index_meta = {"name": query_def_copy.get_index_name(), "query_def": query_def_copy,
"defer_build": defer_build}
if "primary" in query_def.groups:
query = query_def_copy.generate_primary_index_create_query(defer_build=defer_build)
else:
query = query_def_copy.generate_index_create_query(use_gsi_for_secondary=True, gsi_type="plasma",
defer_build=defer_build)
try:
# create index
self.n1ql_helper.run_cbq_query(query=query, server=self.server)
self.index_tracking_obj.all_indexes_metadata(index_meta=index_meta, operation="create",
defer_build=defer_build)
except Exception as err:
if not any(error in str(err) for error in self.expected_failure) \
and "Build Already In Progress" not in str(err) \
and "Timeout 1ms exceeded" not in str(err):
error_map = {"query": query, "error": str(err)}
self.index_tracking_obj.update_errors(error_map)
elif not any(error in str(err) for error in self.expected_failure):
self.index_tracking_obj.all_indexes_metadata(index_meta=index_meta, operation="create",
defer_build=defer_build)
itr += 1
self.state = CHECKING
task_manager.schedule(self)
# catch and set all unexpected exceptions
except Exception as e:
self.state = FINISHED
self.set_unexpected_exception(e)
task_manager.schedule(self)
def check(self, task_manager):
self.set_result(True)
self.state = FINISHED
task_manager.schedule(self)
class CollectionDeleteTask(Task):
def __init__(self, server, bucket, scope, collection):
Task.__init__(self, "collection_delete_task")
self.server = server
self.bucket_name = bucket
self.scope_name = scope
self.collection_name = collection
def execute(self, task_manager):
try:
RestConnection(self.server)
except ServerUnavailableException as error:
self.state = FINISHED
self.set_exception(error)
return
try:
CollectionsRest(self.server).delete_collection(bucket=self.bucket_name, scope=self.scope_name,
collection=self.collection_name)
self.state = CHECKING
task_manager.schedule(self)
# catch and set all unexpected exceptions
except Exception as e:
self.state = FINISHED
self.set_unexpected_exception(e)
def check(self, task_manager):
self.set_result(True)
self.state = FINISHED
task_manager.schedule(self)
class ScopeCollectionCreateTask(Task):
def __init__(self, server, bucket, scope, collection, params=None):
Task.__init__(self, "collection_create_task")
self.server = server
self.bucket_name = bucket
self.scope_name = scope
self.collection_name = collection
self.collection_params = params
def execute(self, task_manager):
try:
RestConnection(self.server)
except ServerUnavailableException as error:
self.state = FINISHED
self.set_exception(error)
return
try:
CollectionsRest(self.server).create_scope_collection(bucket=self.bucket_name, scope=self.scope_name,
collection=self.collection_name,
params=self.collection_params)
self.state = CHECKING
task_manager.schedule(self)
# catch and set all unexpected exceptions
except Exception as e:
self.state = FINISHED
self.set_unexpected_exception(e)
def check(self, task_manager):
self.set_result(True)
self.state = FINISHED
task_manager.schedule(self)
class ScopeCollectionDeleteTask(Task):
def __init__(self, server, bucket, scope, collection):
Task.__init__(self, "collection_delete_task")
self.server = server
self.bucket_name = bucket
self.scope_name = scope
self.collection_name = collection
def execute(self, task_manager):
try:
RestConnection(self.server)
except ServerUnavailableException as error:
self.state = FINISHED
self.set_exception(error)
return
try:
CollectionsRest(self.server).delete_scope_collection(bucket=self.bucket_name, scope=self.scope_name,
collection=self.collection_name)
self.state = CHECKING
task_manager.schedule(self)
# catch and set all unexpected exceptions
except Exception as e:
self.state = FINISHED
self.set_unexpected_exception(e)
def check(self, task_manager):
self.set_result(True)
self.state = FINISHED
task_manager.schedule(self)
class ScopeCreateTask(Task):
def __init__(self, server, bucket, scope, params=None):
Task.__init__(self, "scope_create_task")
self.server = server
self.bucket_name = bucket
self.scope_name = scope
self.scope_params = params
def execute(self, task_manager):
try:
RestConnection(self.server)
except ServerUnavailableException as error:
self.state = FINISHED
self.set_exception(error)
return
try:
CollectionsRest(self.server).create_scope(bucket=self.bucket_name, scope=self.scope_name,
params=self.scope_params)
self.state = CHECKING
task_manager.schedule(self)
# catch and set all unexpected exceptions
except Exception as e:
self.state = FINISHED
self.set_unexpected_exception(e)
def check(self, task_manager):
self.set_result(True)
self.state = FINISHED
task_manager.schedule(self)
class ScopeDeleteTask(Task):
def __init__(self, server, bucket, scope):
Task.__init__(self, "scope_delete_task")
self.server = server
self.bucket_name = bucket
self.scope_name = scope
def execute(self, task_manager):
try:
RestConnection(self.server)
except ServerUnavailableException as error:
self.state = FINISHED
self.set_exception(error)
return
try:
CollectionsRest(self.server).delete_scope(bucket=self.bucket_name, scope=self.scope_name)
self.state = CHECKING
task_manager.schedule(self)
# catch and set all unexpected exceptions
except Exception as e:
self.state = FINISHED
self.set_unexpected_exception(e)
def check(self, task_manager):
self.set_result(True)
self.state = FINISHED
task_manager.schedule(self)
class RebalanceTask(Task):
def __init__(self, servers, to_add=[], to_remove=[],
do_stop=False, progress=30,
use_hostnames=False, services=None,
sleep_before_rebalance=None):
Task.__init__(self, "rebalance_task")
self.servers = servers
self.to_add = to_add
self.to_remove = to_remove
self.start_time = None
if services is not None and not services:
services = ["kv"]
self.services = services
self.monitor_vbuckets_shuffling = False
self.sleep_before_rebalance = sleep_before_rebalance
try:
self.rest = RestConnection(self.servers[0])
except ServerUnavailableException as e:
self.log.error(e)
self.state = FINISHED
self.set_exception(e)
self.retry_get_progress = 0
self.use_hostnames = use_hostnames
self.previous_progress = 0
self.old_vbuckets = {}
def execute(self, task_manager):
try:
if len(self.to_add) and len(self.to_add) == len(self.to_remove):
node_version_check = self.rest.check_node_versions()
non_swap_servers = (node for node in self.servers if node not in self.to_add and node not in self.to_remove)
self.old_vbuckets = RestHelper(self.rest)._get_vbuckets(non_swap_servers, None)
if self.old_vbuckets:
self.monitor_vbuckets_shuffling = True
if self.monitor_vbuckets_shuffling and node_version_check and self.services:
for service_group in self.services:
if "kv" not in service_group:
self.monitor_vbuckets_shuffling = False
if self.monitor_vbuckets_shuffling and node_version_check:
services_map = self.rest.get_nodes_services()
for remove_node in self.to_remove:
key = "{0}:{1}".format(remove_node.ip, remove_node.port)
services = services_map[key]
if "kv" not in services:
self.monitor_vbuckets_shuffling = False
if self.monitor_vbuckets_shuffling:
self.log.info("This is swap rebalance and we will monitor vbuckets shuffling")
self.add_nodes(task_manager)
if self.sleep_before_rebalance:
self.log.info("Sleep {0}secs before rebalance_start"
.format(self.sleep_before_rebalance))
time.sleep(self.sleep_before_rebalance)
self.start_rebalance(task_manager)
self.state = CHECKING
task_manager.schedule(self)
except Exception as e:
self.state = FINISHED
traceback.print_exc()
self.set_exception(e)
def add_nodes(self, task_manager):
master = self.servers[0]
services_for_node = None
node_index = 0
for node in self.to_add:
self.log.info("adding node {0}:{1} to cluster".format(node.ip, node.port))
if self.services is not None:
services_for_node = [self.services[node_index]]
node_index += 1
if self.use_hostnames:
remote_ip = node.hostname
else:
remote_ip = node.cluster_ip
try:
self.rest.add_node(master.rest_username, master.rest_password,
remote_ip, node.port, services=services_for_node)
except ServerAlreadyJoinedException:
pass
def start_rebalance(self, task_manager):
nodes = self.rest.node_statuses()
# Determine whether its a cluster_run/not
cluster_run = True
firstIp = self.servers[0].ip
if len(self.servers) == 1 and self.servers[0].port == '8091':
cluster_run = False
else:
for node in self.servers:
if node.ip != firstIp:
cluster_run = False
break
ejectedNodes = []
for server in self.to_remove:
for node in nodes:
if cluster_run:
if int(server.port) == int(node.port):
ejectedNodes.append(node.id)
else:
if self.use_hostnames:
if server.hostname == node.ip and int(server.port) == int(node.port):
ejectedNodes.append(node.id)
elif server.ip == node.ip and int(server.port) == int(node.port):
ejectedNodes.append(node.id)
if self.rest.is_cluster_mixed():
# workaround MB-8094
self.log.warning("cluster is mixed. sleep for 15 seconds before rebalance")
time.sleep(15)
self.rest.rebalance(otpNodes=[node.id for node in nodes], ejectedNodes=ejectedNodes)
self.start_time = time.time()
def check(self, task_manager):
status = None
progress = -100
try:
if self.monitor_vbuckets_shuffling:
self.log.info("This is swap rebalance and we will monitor vbuckets shuffling")
non_swap_servers = set(self.servers) - set(self.to_remove) - set(self.to_add)
new_vbuckets = RestHelper(self.rest)._get_vbuckets(non_swap_servers, None)
for vb_type in ["active_vb", "replica_vb"]:
for srv in non_swap_servers:
if not (len(self.old_vbuckets[srv][vb_type]) + 1 >= len(new_vbuckets[srv][vb_type]) and \
len(self.old_vbuckets[srv][vb_type]) - 1 <= len(new_vbuckets[srv][vb_type])):
msg = "Vbuckets were suffled! Expected %s for %s" % (vb_type, srv.ip) + \
" are %s. And now are %s" % (
len(self.old_vbuckets[srv][vb_type]),
len(new_vbuckets[srv][vb_type]))
self.log.error(msg)
self.log.error("Old vbuckets: %s, new vbuckets %s" % (self.old_vbuckets, new_vbuckets))
raise Exception(msg)
time.sleep(10)
(status, progress) = self.rest._rebalance_status_and_progress()
self.log.info("Rebalance - status: {}, progress: {:.02f}%".format(status, progress))
# if ServerUnavailableException
if progress == -100:
self.retry_get_progress += 1
if self.previous_progress != progress:
self.previous_progress = progress
self.retry_get_progress = 0
else:
self.retry_get_progress += 1
except RebalanceFailedException as ex:
self.state = FINISHED
self.set_exception(ex)
self.retry_get_progress += 1
# catch and set all unexpected exceptions
except Exception as e:
self.state = FINISHED
self.set_unexpected_exception(e, " in {0} sec".format(time.time() - self.start_time))
retry_get_process_num = 300
if self.rest.is_cluster_mixed(timeout=300): # See MB-40670
""" for mix cluster, rebalance takes longer """
self.log.info("rebalance in mix cluster")
retry_get_process_num = 40
# we need to wait for status to be 'none' (i.e. rebalance actually finished and
# not just 'running' and at 100%) before we declare ourselves done
if progress != -1 and status != 'none':
if self.retry_get_progress < retry_get_process_num:
task_manager.schedule(self, 10)
else:
self.state = FINISHED
# self.set_result(False)
self.rest.print_UI_logs()
self.set_exception(RebalanceFailedException( \
"seems like rebalance hangs. please check logs!"))
else:
success_cleaned = []
for removed in self.to_remove:
try:
rest = RestConnection(removed)
except ServerUnavailableException as e:
self.log.error(e)
continue
start = time.time()
while time.time() - start < 30:
try:
if 'pools' in rest.get_pools_info() and \
(len(rest.get_pools_info()["pools"]) == 0):
success_cleaned.append(removed)
break
else:
time.sleep(0.1)
except (ServerUnavailableException, IncompleteRead) as e:
self.log.error(e)
result = True
for node in set(self.to_remove) - set(success_cleaned):
self.log.error("node {0}:{1} was not cleaned after removing from cluster" \
.format(node.ip, node.port))
result = False
self.log.info("rebalancing was completed with progress: {0}% in {1} sec".
format(progress, time.time() - self.start_time))
for added in self.to_add:
if added.internal_ip:
self.log.info("Adding alternate address {} after rebalance in using internal ip {}".format(added.ip, added.internal_ip))
rest = RestConnection(added)
rest.set_alternate_address(added.ip)
self.state = FINISHED
self.set_result(result)
class StatsWaitTask(Task):
EQUAL = '=='
NOT_EQUAL = '!='
LESS_THAN = '<'
LESS_THAN_EQ = '<='
GREATER_THAN = '>'
GREATER_THAN_EQ = '>='
def __init__(self, servers, bucket, param, stat, comparison, value, scope=None, collection=None):
Task.__init__(self, "stats_wait_task")
self.servers = servers
self.bucket = bucket
if isinstance(bucket, Bucket):
self.bucket = bucket.name
self.param = param
self.stat = stat
self.comparison = comparison
self.value = value
self.conns = {}
self.scope = scope
self.collection = collection
def execute(self, task_manager):
self.state = CHECKING
task_manager.schedule(self)
def check(self, task_manager):
stat_result = 0
for server in self.servers:
try:
shell = RemoteMachineShellConnection(server)
cbstat = Cbstats(shell)
stats = cbstat.all_stats(self.bucket, stat_name=self.param)
if self.stat not in stats:
self.state = FINISHED
self.set_exception(Exception("Stat {0} not found".format(self.stat)))
shell.disconnect()
return
if stats[self.stat].isdigit():
stat_result += int(stats[self.stat])
else:
stat_result = stats[self.stat]
shell.disconnect()
except EOFError as ex:
self.state = FINISHED
self.set_exception(ex)
shell.disconnect()
return
if not self._compare(self.comparison, str(stat_result), self.value):
self.log.warning("Not Ready: %s %s %s %s expected on %s, %s bucket" % (self.stat, stat_result,
self.comparison, self.value,
self._stringify_servers(),
self.bucket))
task_manager.schedule(self, 5)
return
self.log.info("Saw %s %s %s %s expected on %s,%s bucket" % (self.stat, stat_result,
self.comparison, self.value,
self._stringify_servers(), self.bucket))
for server, conn in list(self.conns.items()):
conn.close()
self.state = FINISHED
self.set_result(True)
def _stringify_servers(self):
return ''.join([repr(server.ip + ":" + str(server.port)) for server in self.servers])
def _get_connection(self, server, admin_user='cbadminbucket', admin_pass='password'):
if server not in self.conns:
for i in range(3):
try:
self.conns[server] = MemcachedClientHelper.direct_client(server, self.bucket, admin_user=admin_user,
admin_pass=admin_pass)
return self.conns[server]
except (EOFError, socket.error):
self.log.error("failed to create direct client, retry in 1 sec")
time.sleep(1)
self.conns[server] = MemcachedClientHelper.direct_client(server, self.bucket, admin_user=admin_user,
admin_pass=admin_pass)
return self.conns[server]
def _compare(self, cmp_type, a, b):
if isinstance(b, int) and a.isdigit():
a = int(a)
elif isinstance(b, int) and not a.isdigit():
return False
if (cmp_type == StatsWaitTask.EQUAL and a == b) or \
(cmp_type == StatsWaitTask.NOT_EQUAL and a != b) or \
(cmp_type == StatsWaitTask.LESS_THAN_EQ and a <= b) or \
(cmp_type == StatsWaitTask.GREATER_THAN_EQ and a >= b) or \
(cmp_type == StatsWaitTask.LESS_THAN and a < b) or \
(cmp_type == StatsWaitTask.GREATER_THAN and a > b):
return True
return False
class XdcrStatsWaitTask(StatsWaitTask):
def __init__(self, servers, bucket, param, stat, comparison, value, scope=None, collection=None):
StatsWaitTask.__init__(self, servers, bucket, param, stat, comparison, value, scope, collection)
def check(self, task_manager):
stat_result = 0
for server in self.servers:
try:
rest = RestConnection(server)
stat = 'replications/' + rest.get_replication_for_buckets(self.bucket, self.bucket)[
'id'] + '/' + self.stat
# just get the required value, don't fetch the big big structure of stats
stats_value = rest.fetch_bucket_xdcr_stats(self.bucket)['op']['samples'][stat][-1]
stat_result += int(stats_value)
except (EOFError, Exception) as ex:
self.state = FINISHED
self.set_exception(ex)
return
if not self._compare(self.comparison, str(stat_result), self.value):
self.log.warning("Not Ready: %s %s %s %s expected on %s, %s bucket" % (self.stat, stat_result,
self.comparison, self.value,
self._stringify_servers(),
self.bucket))
task_manager.schedule(self, 5)
return
self.log.info("Saw %s %s %s %s expected on %s,%s bucket" % (self.stat, stat_result,
self.comparison, self.value,
self._stringify_servers(), self.bucket))
for server, conn in list(self.conns.items()):
conn.close()
self.state = FINISHED
self.set_result(True)
class GenericLoadingTask(Thread, Task):
def __init__(self, server, bucket, kv_store, batch_size=1, pause_secs=1, timeout_secs=60, compression=True,
scope=None, collection=None):
Thread.__init__(self)
Task.__init__(self, "load_gen_task")
self.kv_store = kv_store
self.batch_size = batch_size
self.pause = pause_secs
self.timeout = timeout_secs
self.server = server
self.bucket = bucket
self.collection = collection
self.scope = scope
if CHECK_FLAG:
self.client = VBucketAwareMemcached(RestConnection(server), bucket)
else:
self.client = VBucketAwareMemcached(RestConnection(server), bucket, compression=compression)
self.process_concurrency = THROUGHPUT_CONCURRENCY
# task queue's for synchronization
process_manager = Manager()
self.wait_queue = process_manager.Queue()
self.shared_kvstore_queue = process_manager.Queue()
def execute(self, task_manager):
self.start()
self.state = EXECUTING
def check(self, task_manager):
pass
def run(self):
while self.has_next() and not self.done():
next(self)
self.state = FINISHED
self.set_result(True)
def has_next(self):
raise NotImplementedError
def __next__(self):
raise NotImplementedError
def _unlocked_create(self, partition, key, value, is_base64_value=False):
try:
value_json = json.loads(value)
if isinstance(value_json, dict):
value_json['mutated'] = 0
value = json.dumps(value_json)
except ValueError:
index = random.choice(list(range(len(value))))
if not is_base64_value:
value = value[0:index] + random.choice(string.ascii_uppercase) + value[index + 1:]
except TypeError:
value = json.dumps(value)
try:
self.client.set(key, self.exp, self.flag, value, scope=self.scope, collection=self.collection)
if self.only_store_hash:
value = str(crc32.crc32_hash(value))
partition.set(key, value, self.exp, self.flag)
except Exception as error:
self.state = FINISHED
self.set_exception(error)
def _unlocked_read(self, partition, key):
try:
o, c, d = self.client.get(key, scope=self.scope, collection=self.collection)
except MemcachedError as error:
if error.status == ERR_NOT_FOUND and partition.get_valid(key) is None:
pass
else:
self.state = FINISHED
self.set_exception(error)
def _unlocked_replica_read(self, partition, key):
try:
o, c, d = self.client.getr(key, scope=self.scope, collection=self.collection)
except Exception as error:
self.state = FINISHED
self.set_exception(error)
def _unlocked_update(self, partition, key):
value = None
try:
o, c, value = self.client.get(key, scope=self.scope, collection=self.collection)
if value is None:
return
value_json = json.loads(value)
value_json['mutated'] += 1
value = json.dumps(value_json)
except MemcachedError as error:
if error.status == ERR_NOT_FOUND and partition.get_valid(key) is None:
# there is no such item, we do not know what value to set
return
else:
self.state = FINISHED
self.log.error("%s, key: %s update operation." % (error, key))
self.set_exception(error)
return
except (ValueError, json.JSONDecodeError) as e:
if value is None:
return
index = random.choice(list(range(len(value))))
value = value[0:index] + random.choice(string.ascii_uppercase).encode() + value[index + 1:]
except BaseException as error:
self.state = FINISHED
self.set_exception(error)
try:
self.client.set(key, self.exp, self.flag, value, scope=self.scope, collection=self.collection)
if self.only_store_hash:
if value != None:
value = str(crc32.crc32_hash(value))
partition.set(key, value, self.exp, self.flag)
except BaseException as error:
self.state = FINISHED
self.set_exception(error)
def _unlocked_delete(self, partition, key):
try:
self.client.delete(key, scope=self.scope, collection=self.collection)
partition.delete(key)
except MemcachedError as error:
if error.status == ERR_NOT_FOUND and partition.get_valid(key) is None:
pass
else:
self.state = FINISHED
self.log.error("%s, key: %s delete operation." % (error, key))
self.set_exception(error)
except BaseException as error:
self.state = FINISHED
self.set_exception(error)
def _unlocked_append(self, partition, key, value):
try:
o, c, old_value = self.client.get(key, scope=self.scope, collection=self.collection)
if value is None:
return
value_json = json.loads(value)
old_value_json = json.loads(old_value)
old_value_json.update(value_json)
old_value = json.dumps(old_value_json)
value = json.dumps(value_json)
except MemcachedError as error:
if error.status == ERR_NOT_FOUND and partition.get_valid(key) is None:
# there is no such item, we do not know what value to set
return
else:
self.state = FINISHED
self.set_exception(error)
return
except ValueError:
o, c, old_value = self.client.get(key, scope=self.scope, collection=self.collection)
index = random.choice(list(range(len(value))))
value = value[0:index] + random.choice(string.ascii_uppercase) + value[index + 1:]
old_value += value
except BaseException as error:
self.state = FINISHED
self.set_exception(error)
try:
self.client.append(key, value, scope=self.scope, collection=self.collection)
if self.only_store_hash:
old_value = str(crc32.crc32_hash(old_value))
partition.set(key, old_value)
except BaseException as error:
self.state = FINISHED
self.set_exception(error)
# start of batch methods
def _create_batch_client(self, key_val, shared_client=None):
"""
standalone method for creating key/values in batch (sans kvstore)
arguments:
key_val -- array of key/value dicts to load size = self.batch_size
shared_client -- optional client to use for data loading
"""
try:
self._process_values_for_create(key_val)
client = shared_client or self.client
client.setMulti(self.exp, self.flag, key_val, self.pause, self.timeout, parallel=False,
scope=self.scope, collection=self.collection)
except (
MemcachedError, ServerUnavailableException, socket.error, EOFError, AttributeError,
RuntimeError) as error:
self.state = FINISHED
self.set_exception(error)
def _create_batch(self, partition_keys_dic, key_val):
self._create_batch_client(key_val)
self._populate_kvstore(partition_keys_dic, key_val)
def _update_batch(self, partition_keys_dic, key_val):
try:
self._process_values_for_update(partition_keys_dic, key_val)
self.client.setMulti(self.exp, self.flag, key_val, self.pause, self.timeout, parallel=False,
scope=self.scope, collection=self.collection)
self._populate_kvstore(partition_keys_dic, key_val)
except (
MemcachedError, ServerUnavailableException, socket.error, EOFError, AttributeError,
RuntimeError) as error:
self.state = FINISHED
self.set_exception(error)
def _delete_batch(self, partition_keys_dic, key_val):
for partition, keys in list(partition_keys_dic.items()):
for key in keys:
try:
self.client.delete(key, scope=self.scope, collection=self.collection)
partition.delete(key)
except MemcachedError as error:
if error.status == ERR_NOT_FOUND and partition.get_valid(key) is None:
pass
else:
self.state = FINISHED
self.set_exception(error)
return
except (ServerUnavailableException, socket.error, EOFError, AttributeError) as error:
self.state = FINISHED
self.set_exception(error)
def _read_batch(self, partition_keys_dic, key_val):
try:
self.client.getMulti(list(key_val.keys()), self.pause, self.timeout, scope=self.scope,
collection=self.collection)
# print "the key is {} from collection {}".format(c, collection)
except MemcachedError as error:
self.state = FINISHED
self.set_exception(error)
def _process_values_for_create(self, key_val):
for key, value in list(key_val.items()):
try:
value_json = json.loads(value)
value_json['mutated'] = 0
value = json.dumps(value_json)
except ValueError:
index = random.choice(list(range(len(value))))
value = value[0:index] + random.choice(string.ascii_uppercase) + value[index + 1:]
except TypeError:
value = json.dumps(value)
finally:
key_val[key] = value
def _process_values_for_update(self, partition_keys_dic, key_val):
for partition, keys in list(partition_keys_dic.items()):
for key in keys:
value = partition.get_valid(key)
if value is None:
del key_val[key]
continue
try:
value = key_val[
key] # new updated value, however it is not their in orginal code "LoadDocumentsTask"
value_json = json.loads(value)
value_json['mutated'] += 1
value = json.dumps(value_json)
except ValueError:
index = random.choice(list(range(len(value))))
value = value[0:index] + random.choice(string.ascii_uppercase) + value[index + 1:]
finally:
key_val[key] = value
def _populate_kvstore(self, partition_keys_dic, key_val):
for partition, keys in list(partition_keys_dic.items()):
self._populate_kvstore_partition(partition, keys, key_val)
def _release_locks_on_kvstore(self):
for part in self._partitions_keyvals_dic.keys:
self.kv_store.release_lock(part)
def _populate_kvstore_partition(self, partition, keys, key_val):
for key in keys:
if self.only_store_hash:
key_val[key] = str(crc32.crc32_hash(key_val[key]))
partition.set(key, key_val[key], self.exp, self.flag)
class LoadDocumentsTask(GenericLoadingTask):
def __init__(self, server, bucket, generator, kv_store, op_type, exp, flag=0,
only_store_hash=True, proxy_client=None, batch_size=1, pause_secs=1, timeout_secs=30,
compression=True, scope=None, collection=None):
GenericLoadingTask.__init__(self, server, bucket, kv_store, batch_size=batch_size, pause_secs=pause_secs,
timeout_secs=timeout_secs, compression=compression, scope=scope,
collection=collection)
self.generator = generator
self.op_type = op_type
self.exp = exp
self.flag = flag
self.only_store_hash = only_store_hash
self.scope = scope
self.collection = collection
if proxy_client:
self.log.info("Changing client to proxy %s:%s..." % (proxy_client.host,
proxy_client.port))
self.client = proxy_client
def has_next(self):
return self.generator.has_next()
def next(self, override_generator=None):
if self.batch_size == 1:
key, value = next(self.generator)
partition = self.kv_store.acquire_partition(key, self.bucket, self.scope, self.collection)
if self.op_type == 'create':
is_base64_value = (self.generator.__class__.__name__ == 'Base64Generator')
self._unlocked_create(partition, key, value, is_base64_value=is_base64_value)
elif self.op_type == 'read':
self._unlocked_read(partition, key)
elif self.op_type == 'read_replica':
self._unlocked_replica_read(partition, key)
elif self.op_type == 'update':
self._unlocked_update(partition, key)
elif self.op_type == 'delete':
self._unlocked_delete(partition, key)
elif self.op_type == 'append':
self._unlocked_append(partition, key, value)
else:
self.state = FINISHED
self.set_exception(Exception("Bad operation type: %s" % self.op_type))
self.kv_store.release_partition(key, self.bucket, self.scope, self.collection)
else:
doc_gen = override_generator or self.generator
key_value = doc_gen.next_batch()
partition_keys_dic = self.kv_store.acquire_partitions(list(key_value.keys()), self.bucket,
self.scope, self.collection)
if self.op_type == 'create':
self._create_batch(partition_keys_dic, key_value)
elif self.op_type == 'update':
self._update_batch(partition_keys_dic, key_value)
elif self.op_type == 'delete':
self._delete_batch(partition_keys_dic, key_value)
elif self.op_type == 'read':
self._read_batch(partition_keys_dic, key_value)
else:
self.state = FINISHED
self.set_exception(Exception("Bad operation type: %s" % self.op_type))
self.kv_store.release_partitions(list(partition_keys_dic.keys()))
class LoadDocumentsGeneratorsTask(LoadDocumentsTask):
def __init__(self, server, bucket, generators, kv_store, op_type, exp, flag=0, only_store_hash=True,
batch_size=1, pause_secs=1, timeout_secs=60, compression=True, scope=None, collection=None):
LoadDocumentsTask.__init__(self, server, bucket, generators[0], kv_store, op_type, exp, flag=flag,
only_store_hash=only_store_hash, batch_size=batch_size, pause_secs=pause_secs,
timeout_secs=timeout_secs, compression=compression, scope=scope,
collection=collection)
if batch_size == 1:
self.generators = generators
else:
self.generators = []
for i in generators:
if i.isGenerator():
self.generators.append(BatchedDocumentGenerator(i, batch_size))
else:
self.generators.append(i)
# only run high throughput for batch-create workloads
# also check number of input generators isn't greater than
# process_concurrency as too many generators become inefficient
self.is_high_throughput_mode = False
if ALLOW_HTP and not TestInputSingleton.input.param("disable_HTP", False):
self.is_high_throughput_mode = self.op_type == "create" and \
self.batch_size > 1 and \
len(self.generators) < self.process_concurrency
self.input_generators = generators
self.bucket = bucket
self.op_types = None
self.buckets = None
if isinstance(op_type, list):
self.op_types = op_type
if isinstance(bucket, list):
self.buckets = bucket
self.compression = compression
self.scope = scope
self.collection = collection
def run(self):
if self.op_types:
if len(self.op_types) != len(self.generators):
self.state = FINISHED
self.set_exception(Exception("not all generators have op_type!"))
if self.buckets:
if len(self.op_types) != len(self.buckets):
self.state = FINISHED
self.set_exception(Exception("not all generators have bucket specified!"))
# check if running in high throughput mode or normal
if self.is_high_throughput_mode:
self.run_high_throughput_mode()
else:
self.run_normal_throughput_mode()
self.state = FINISHED
self.set_result(True)
def run_normal_throughput_mode(self):
iterator = 0
for generator in self.generators:
self.generator = generator
if self.op_types:
self.op_type = self.op_types[iterator]
if self.buckets:
self.bucket = self.buckets[iterator]
while self.has_next() and not self.done():
self.next()
iterator += 1
def run_high_throughput_mode(self):
# high throughput mode requires partitioning the doc generators
self.generators = []
for gen in self.input_generators:
gen_start = int(gen.start)
gen_end = max(int(gen.end), 1)
gen_range = max(int(gen.end / self.process_concurrency), 1)
for pos in range(gen_start, gen_end, gen_range):
try:
partition_gen = copy.deepcopy(gen)
partition_gen.start = pos
partition_gen.itr = pos
partition_gen.end = pos + gen_range
if partition_gen.end > gen.end:
partition_gen.end = gen.end
batch_gen = BatchedDocumentGenerator(
partition_gen,
self.batch_size)
self.generators.append(batch_gen)
except Exception as e:
traceback.print_exc()
iterator = 0
all_processes = []
for generator in self.generators:
# only start processing when there resources available
CONCURRENCY_LOCK.acquire()
# add child process to wait queue
self.wait_queue.put(iterator + 1)
generator_process = Process(
target=self.run_generator,
args=(generator, iterator))
generator_process.start()
iterator += 1
all_processes.append(generator_process)
# wait for all child processes to finish
self.wait_queue.join()
# merge kvstore partitions
while self.shared_kvstore_queue.empty() is False:
# get partitions created by child process
rv = self.shared_kvstore_queue.get()
if rv["err"] is not None:
self.state = FINISHED
self.set_exception(rv["err"])
return
# merge child partitions with parent
generator_partitions = rv["partitions"]
self.kv_store.merge_partitions(generator_partitions)
# terminate child process
iterator -= 1
all_processes[iterator].terminate()
def run_generator(self, generator, iterator):
tmp_kv_store = KVStore()
rv = {"err": None, "partitions": None}
try:
if CHECK_FLAG:
client = VBucketAwareMemcached(
RestConnection(self.server),
self.bucket)
else:
client = VBucketAwareMemcached(
RestConnection(self.server),
self.bucket, compression=self.compression)
try:
if self.op_types:
self.op_type = self.op_types[iterator]
if self.buckets:
self.bucket = self.buckets[iterator]
while generator.has_next() and not self.done():
# generate
key_value = generator.next_batch()
# create
self._create_batch_client(key_value, client)
# cache
self.cache_items(tmp_kv_store, key_value)
except Exception as e:
traceback.print_exc()
except Exception as ex:
rv["err"] = ex
else:
rv["partitions"] = tmp_kv_store.get_partitions()
finally:
# share the kvstore from this generator
self.shared_kvstore_queue.put(rv)
self.wait_queue.task_done()
# release concurrency lock
CONCURRENCY_LOCK.release()
def cache_items(self, store, key_value):
"""
unpacks keys,values and adds them to provided store
"""
for key, value in key_value.items():
if self.only_store_hash:
value = str(crc32.crc32_hash(value))
partition = store.partition(key, self.scope, self.collection, self.bucket)
partition["partition"].set(
key,
value,
self.exp,
self.flag)
class ESLoadGeneratorTask(Task):
"""
Class to load/update/delete documents into/from Elastic Search
"""
def __init__(self, es_instance, index_name, generator, op_type="create", scope=None, collection=None):
Task.__init__(self, "ES_loader_task")
self.es_instance = es_instance
self.index_name = index_name
self.generator = generator
self.iterator = 0
self.scope = scope
self.collection = collection
self.log.info("Starting to load data into Elastic Search ...")
def check(self, task_manager):
self.state = FINISHED
self.set_result(True)
def execute(self, task_manager):
for key, doc in self.generator:
doc = json.loads(doc)
self.es_instance.load_data(self.index_name,
json.dumps(doc, encoding='utf-8'),
doc['type'],
key, self.scope, self.collection)
self.iterator += 1
if math.fmod(self.iterator, 500) == 0.0:
self.log.info("{0} documents loaded into ES".
format(self.iterator))
self.state = FINISHED
self.set_result(True)
class ESBulkLoadGeneratorTask(Task):
"""
Class to load/update/delete documents into/from Elastic Search
"""
def __init__(self, es_instance, index_name, generator, op_type="create",
batch=1000, scope=None, collection=None):
Task.__init__(self, "ES_loader_task")
self.es_instance = es_instance
self.index_name = index_name
self.generator = generator
self.iterator = 0
self.op_type = op_type
self.batch_size = batch
self.scope = scope
self.collection = collection
self.log.info("Starting operation '%s' on Elastic Search ..." % op_type)
def check(self, task_manager):
self.state = FINISHED
self.set_result(True)
def execute(self, task_manager):
es_filename = "/tmp/es_bulk.txt"
es_bulk_docs = []
loaded = 0
batched = 0
for key, doc in self.generator:
doc = json.loads(doc)
es_doc = {
self.op_type: {
"_index": self.index_name,
"_type": doc['type'],
"_id": key,
}
}
es_bulk_docs.append(json.dumps(es_doc))
if self.op_type == "create":
es_bulk_docs.append(json.dumps(doc))
elif self.op_type == "update":
doc['mutated'] += 1
es_bulk_docs.append(json.dumps({"doc": doc}))
batched += 1
if batched == self.batch_size or not self.generator.has_next():
es_file = open(es_filename, "wb")
for line in es_bulk_docs:
es_file.write("{}\n".format(line).encode())
es_file.close()
self.es_instance.load_bulk_data(es_filename)
loaded += batched
self.log.info("{0} documents bulk loaded into ES".format(loaded))
self.es_instance.update_index(self.index_name)
batched = 0
indexed = self.es_instance.get_index_count(self.index_name)
self.log.info("ES index count for '{0}': {1}".
format(self.index_name, indexed))
self.state = FINISHED
self.set_result(True)
class ESRunQueryCompare(Task):
def __init__(self, fts_index, es_instance, query_index, es_index_name=None, n1ql_executor=None,
use_collections=False):
Task.__init__(self, "Query_runner_task")
self.fts_index = fts_index
self.fts_query = fts_index.fts_queries[query_index]
self.es = es_instance
if self.es:
self.es_query = es_instance.es_queries[query_index]
self.max_verify = None
self.show_results = False
self.query_index = query_index
self.passed = True
self.es_index_name = es_index_name or "es_index"
self.n1ql_executor = n1ql_executor
self.score = TestInputSingleton.input.param("score",'')
self.use_collections = use_collections
def check(self, task_manager):
self.state = FINISHED
self.set_result(self.result)
def execute(self, task_manager):
self.es_compare = True
should_verify_n1ql = True
try:
self.log.info("---------------------------------------"
"-------------- Query # %s -------------"
"---------------------------------------"
% str(self.query_index + 1))
try:
fts_hits, fts_doc_ids, fts_time, fts_status = \
self.run_fts_query(self.fts_query, self.score)
self.log.info("Status: %s" % fts_status)
if fts_status == 'fail':
error = fts_doc_ids
if "err: TooManyClauses over field" in str(error):
self.log.info("FTS chose not to run this big query"
"...skipping ES validation")
self.passed = True
self.es_compare = False
should_verify_n1ql = False
elif fts_hits < 0:
self.passed = False
elif 'errors' in list(fts_status.keys()) and fts_status['errors']:
if fts_status['successful'] == 0 and \
(list(set(fts_status['errors'].values())) ==
['context deadline exceeded'] or
"TooManyClauses" in str(list(set(fts_status['errors'].values())))):
# too many clauses in the query for fts to process
self.log.info("FTS chose not to run this big query"
"...skipping ES validation")
self.passed = True
self.es_compare = False
should_verify_n1ql = False
elif 0 < fts_status['successful'] < \
self.fts_index.num_pindexes:
# partial results
self.log.info("FTS returned partial results..."
"skipping ES validation")
self.passed = True
self.es_compare = False
self.log.info("FTS hits for query: %s is %s (took %sms)" % \
(json.dumps(self.fts_query, ensure_ascii=False),
fts_hits,
float(fts_time) / 1000000))
except ServerUnavailableException:
self.log.error("ERROR: FTS Query timed out (client timeout=70s)!")
self.passed = False
es_hits = 0
if self.es and self.es_query:
es_hits, es_doc_ids, es_time = self.run_es_query(self.es_query)
self.log.info("ES hits for query: %s on %s is %s (took %sms)" % \
(json.dumps(self.es_query, ensure_ascii=False),
self.es_index_name,
es_hits,
es_time))
if self.passed and self.es_compare:
if int(es_hits) != int(fts_hits):
msg = "FAIL: FTS hits: %s, while ES hits: %s" \
% (fts_hits, es_hits)
self.log.error(msg)
es_but_not_fts = list(set(es_doc_ids) - set(fts_doc_ids))
fts_but_not_es = list(set(fts_doc_ids) - set(es_doc_ids))
if not (es_but_not_fts or fts_but_not_es):
self.log.info("SUCCESS: Docs returned by FTS = docs"
" returned by ES, doc_ids verified")
else:
if fts_but_not_es:
msg = "FAIL: Following %s doc(s) were not returned" \
" by ES,but FTS, printing 50: %s" \
% (len(fts_but_not_es), fts_but_not_es[:50])
else:
msg = "FAIL: Following %s docs were not returned" \
" by FTS, but ES, printing 50: %s" \
% (len(es_but_not_fts), es_but_not_fts[:50])
self.log.error(msg)
self.passed = False
if fts_hits <= 0 and es_hits == 0:
should_verify_n1ql = False
if self.n1ql_executor and should_verify_n1ql:
if self.fts_index.dataset == 'all':
query_type = 'emp'
if int(TestInputSingleton.input.param("doc_maps", 1)) > 1:
query_type = 'wiki'
wiki_fields = ["revision.text", "title"]
if any(field in str(json.dumps(self.fts_query)) for field in wiki_fields):
query_type = 'wiki'
else:
query_type = self.fts_index.dataset
geo_strings = ['"field": "geo"']
if any(geo_str in str(json.dumps(self.fts_query)) for geo_str in geo_strings):
query_type = 'earthquake'
if self.use_collections:
kv_container = "default:default.scope1.collection1"
else:
kv_container = "default"
n1ql_queries = [f"select meta().id from {kv_container} where type='" + str(
query_type) + "' and search(default, " + str(
json.dumps(self.fts_query, ensure_ascii=False)) + ")", f"select meta().id from {kv_container} where type='" + str(
query_type) + "' and search(default, " + str(
json.dumps(self.fts_query, ensure_ascii=False)) + ",{\"index\": \"" + self.fts_index.name + "\"})", f"select meta().id,* from {kv_container} where type='" + str(
query_type) + "' and search(default, " + str(
json.dumps(self.fts_query, ensure_ascii=False)) + ",{\"index\": \"" + self.fts_index.name + "\"})"]
for n1ql_query in n1ql_queries:
if ("disjuncts" not in n1ql_query and "-" not in n1ql_query) or "\"index\"" in n1ql_query:
self.log.info("Running N1QL query: " + str(n1ql_query))
n1ql_result = self.n1ql_executor.run_n1ql_query(query=n1ql_query)
if n1ql_result['status'] == 'success':
n1ql_hits = n1ql_result['metrics']['resultCount']
n1ql_doc_ids = []
for res in n1ql_result['results']:
n1ql_doc_ids.append(res['id'])
n1ql_time = n1ql_result['metrics']['elapsedTime']
self.log.info("N1QL hits for query: %s is %s (took %s)" % \
(json.dumps(n1ql_query, ensure_ascii=False),
n1ql_hits,
n1ql_time))
if self.passed:
if int(n1ql_hits) != int(fts_hits):
msg = "FAIL: FTS hits: %s, while N1QL hits: %s" \
% (fts_hits, n1ql_hits)
self.log.error(msg)
n1ql_but_not_fts = list(set(n1ql_doc_ids) - set(fts_doc_ids))
fts_but_not_n1ql = list(set(fts_doc_ids) - set(n1ql_doc_ids))
if not (n1ql_but_not_fts or fts_but_not_n1ql):
self.log.info("SUCCESS: Docs returned by FTS = docs"
" returned by N1QL, doc_ids verified")
else:
if fts_but_not_n1ql:
msg = "FAIL: Following %s doc(s) were not returned" \
" by N1QL,but FTS, printing 50: %s" \
% (len(fts_but_not_n1ql), fts_but_not_n1ql[:50])
else:
msg = "FAIL: Following %s docs were not returned" \
" by FTS, but N1QL, printing 50: %s" \
% (len(n1ql_but_not_fts), n1ql_but_not_fts[:50])
self.log.error(msg)
self.passed = False
else:
self.passed = False
self.log.info("N1QL query execution is failed.")
self.log.error(n1ql_result["errors"][0]['msg'])
self.state = CHECKING
task_manager.schedule(self)
if not should_verify_n1ql and self.n1ql_executor:
self.log.info("Skipping N1QL result validation since FTS results are - " + str(
fts_hits) + " and es results are - " + str(es_hits) + ".")
except Exception as e:
self.log.error(e)
self.set_exception(e)
self.state = FINISHED
def run_fts_query(self, query, score=''):
return self.fts_index.execute_query(query, score=score)
def run_es_query(self, query):
return self.es.search(index_name=self.es_index_name, query=query)
# This will be obsolete with the implementation of batch operations in LoadDocumentsTaks
class BatchedLoadDocumentsTask(GenericLoadingTask):
def __init__(self, server, bucket, generator, kv_store, op_type, exp, flag=0, only_store_hash=True,
batch_size=100, pause_secs=1, timeout_secs=60, compression=True, scope=None, collection=None):
GenericLoadingTask.__init__(self, server, bucket, kv_store, compression=compression, scope=scope,
collection=collection)
self.batch_generator = BatchedDocumentGenerator(generator, batch_size)
self.op_type = op_type
self.exp = exp
self.flag = flag
self.only_store_hash = only_store_hash
self.batch_size = batch_size
self.pause = pause_secs
self.timeout = timeout_secs
self.bucket = bucket
self.server = server
self.scope=scope
self.collection = collection
def has_next(self):
has = self.batch_generator.has_next()
if math.fmod(self.batch_generator._doc_gen.itr, 50000) == 0.0 or not has:
self.log.info("Batch {0} documents queued #: {1} with exp:{2} @ {3}, bucket {4}". \
format(self.op_type,
(self.batch_generator._doc_gen.itr - self.batch_generator._doc_gen.start),
self.exp,
self.server.ip,
self.bucket))
return has
def __next__(self):
key_value = self.batch_generator.next_batch()
partition_keys_dic = self.kv_store.acquire_partitions(list(key_value.keys()), self.bucket, self.scope,
self.collection)
if self.op_type == 'create':
self._create_batch(partition_keys_dic, key_value)
elif self.op_type == 'update':
self._update_batch(partition_keys_dic, key_value)
elif self.op_type == 'delete':
self._delete_batch(partition_keys_dic, key_value)
elif self.op_type == 'read':
self._read_batch(partition_keys_dic, key_value)
else:
self.state = FINISHED
self.set_exception(Exception("Bad operation type: %s" % self.op_type))
self.kv_store.release_partitions(list(partition_keys_dic.keys()), self.scope, self.collection)
def _create_batch(self, partition_keys_dic, key_val):
try:
self._process_values_for_create(key_val)
self.client.setMulti(self.exp, self.flag, key_val, self.pause, self.timeout, parallel=False,
scope=self.scope, collection=self.collection)
self._populate_kvstore(partition_keys_dic, key_val)
except (
MemcachedError, ServerUnavailableException, socket.error, EOFError, AttributeError,
RuntimeError) as error:
self.state = FINISHED
self.set_exception(error)
def _update_batch(self, partition_keys_dic, key_val):
try:
self._process_values_for_update(partition_keys_dic, key_val)
self.client.setMulti(self.exp, self.flag, key_val, self.pause, self.timeout, parallel=False,
scope=self.scope, collection=self.collection)
self._populate_kvstore(partition_keys_dic, key_val)
except (
MemcachedError, ServerUnavailableException, socket.error, EOFError, AttributeError,
RuntimeError) as error:
self.state = FINISHED
self.set_exception(error)
def _delete_batch(self, partition_keys_dic, key_val):
for partition, keys in list(partition_keys_dic.items()):
for key in keys:
try:
self.client.delete(key, scope=self.scope, collection=self.collection)
partition.delete(key)
except MemcachedError as error:
if error.status == ERR_NOT_FOUND and partition.get_valid(key) is None:
pass
else:
self.state = FINISHED
self.set_exception(error)
return
except (ServerUnavailableException, socket.error, EOFError, AttributeError) as error:
self.state = FINISHED
self.set_exception(error)
def _read_batch(self, partition_keys_dic, key_val):
try:
self.client.getMulti(list(key_val.keys()), self.pause, self.timeout, scope=self.scope,
collection=self.collection)
except MemcachedError as error:
self.state = FINISHED
self.set_exception(error)
def _process_values_for_create(self, key_val):
for key, value in list(key_val.items()):
try:
value_json = json.loads(value)
value_json['mutated'] = 0
value = json.dumps(value_json)
except ValueError:
index = random.choice(list(range(len(value))))
value = value[0:index] + random.choice(string.ascii_uppercase) + value[index + 1:]
finally:
key_val[key] = value
def _process_values_for_update(self, partition_keys_dic, key_val):
for partition, keys in list(partition_keys_dic.items()):
for key in keys:
value = partition.get_valid(key)
if value is None:
del key_val[key]
continue
try:
value = key_val[
key] # new updated value, however it is not their in orginal code "LoadDocumentsTask"
value_json = json.loads(value)
value_json['mutated'] += 1
value = json.dumps(value_json)
except ValueError:
index = random.choice(list(range(len(value))))
value = value[0:index] + random.choice(string.ascii_uppercase) + value[index + 1:]
finally:
key_val[key] = value
def _populate_kvstore(self, partition_keys_dic, key_val):
for partition, keys in list(partition_keys_dic.items()):
self._populate_kvstore_partition(partition, keys, key_val)
def _release_locks_on_kvstore(self):
for part in self._partitions_keyvals_dic.keys:
self.kv_store.release_lock(part)
def _populate_kvstore_partition(self, partition, keys, key_val):
for key in keys:
if self.only_store_hash:
key_val[key] = str(crc32.crc32_hash(key_val[key]))
partition.set(key, key_val[key], self.exp, self.flag)
class WorkloadTask(GenericLoadingTask):
def __init__(self, server, bucket, kv_store, num_ops, create, read, update, delete, exp, compression=True,
scope=None, collection=None):
GenericLoadingTask.__init__(self, server, bucket, kv_store, compression=compression,
scope=scope, collection=collection)
self.itr = 0
self.num_ops = num_ops
self.create = create
self.read = create + read
self.update = create + read + update
self.delete = create + read + update + delete
self.exp = exp
self.scope = scope
self.collection = collection
self.bucket = bucket
def has_next(self):
if self.num_ops == 0 or self.itr < self.num_ops:
return True
return False
def __next__(self):
self.itr += 1
rand = random.randint(1, self.delete)
if 0 < rand <= self.create:
self._create_random_key()
elif self.create < rand <= self.read:
self._get_random_key()
elif self.read < rand <= self.update:
self._update_random_key()
elif self.update < rand <= self.delete:
self._delete_random_key()
def _get_random_key(self):
partition, part_num = self.kv_store.acquire_random_partition()
if partition is None:
return
key = partition.get_random_valid_key()
if key is None:
self.kv_store.release_partitions(part_num)
return
self._unlocked_read(partition, key)
self.kv_store.release_partitions(part_num)
def _create_random_key(self):
partition, part_num = self.kv_store.acquire_random_partition(False)
if partition is None:
return
key = partition.get_random_deleted_key()
if key is None:
self.kv_store.release_partitions(part_num)
return
value = partition.get_deleted(key)
if value is None:
self.kv_store.release_partitions(part_num)
return
self._unlocked_create(partition, key, value)
self.kv_store.release_partitions(part_num)
def _update_random_key(self):
partition, part_num = self.kv_store.acquire_random_partition()
if partition is None:
return
key = partition.get_random_valid_key()
if key is None:
self.kv_store.release_partitions(part_num)
return
self._unlocked_update(partition, key)
self.kv_store.release_partitions(part_num)
def _delete_random_key(self):
partition, part_num = self.kv_store.acquire_random_partition()
if partition is None:
return
key = partition.get_random_valid_key()
if key is None:
self.kv_store.release_partitions(part_num)
return
self._unlocked_delete(partition, key)
self.kv_store.release_partitions(part_num)
class ValidateDataTask(GenericLoadingTask):
def __init__(self, server, bucket, kv_store, max_verify=None, only_store_hash=True, replica_to_read=None,
compression=True, scope=None, collection=None):
GenericLoadingTask.__init__(self, server, bucket, kv_store, compression=compression,
scope=scope, collection=collection)
self.collection = collection
self.scope = scope
self.bucket = bucket
self.valid_keys, self.deleted_keys = kv_store.key_set(bucket=self.bucket, scope=self.scope,
collection=self.collection)
self.num_valid_keys = len(self.valid_keys)
self.num_deleted_keys = len(self.deleted_keys)
self.itr = 0
self.max_verify = self.num_valid_keys + self.num_deleted_keys
self.only_store_hash = only_store_hash
self.replica_to_read = replica_to_read
self.bucket = bucket
self.server = server
if max_verify is not None:
self.max_verify = min(max_verify, self.max_verify)
self.log.info(
"%s items will be verified on %s bucket on scope %s on collection %s" % (self.max_verify, bucket,
self.scope, self.collection))
self.start_time = time.time()
def has_next(self):
if self.itr < (self.num_valid_keys + self.num_deleted_keys) and \
self.itr < self.max_verify:
if not self.itr % 50000:
self.log.info("{0} items were verified".format(self.itr))
return True
self.log.info("{0} items were verified in {1} sec.the average number of ops\
- {2} per second ".format(self.itr, time.time() - self.start_time,
self.itr // (time.time() - self.start_time)).rstrip())
return False
def __next__(self):
if self.itr < self.num_valid_keys:
self._check_valid_key(self.valid_keys[self.itr], self.bucket, scope=self.scope, collection=self.collection)
else:
self._check_deleted_key(self.deleted_keys[self.itr - self.num_valid_keys], self.bucket,
scope=self.scope, collection=self.collection)
self.itr += 1
def _check_valid_key(self, key, bucket="default", scope=None, collection=None):
partition = self.kv_store.acquire_partition(key, bucket, scope=scope, collection=collection)
value = partition.get_valid(key)
flag = partition.get_flag(key)
if value is None or flag is None:
self.kv_store.release_partition(key, bucket, scope=scope, collection=collection)
return
try:
if self.replica_to_read is None:
o, c, d = self.client.get(key, scope=scope, collection=collection)
else:
o, c, d = self.client.getr(key, replica_index=self.replica_to_read, scope=scope, collection=collection)
try:
d = d.decode()
except AttributeError:
pass
if self.only_store_hash:
if crc32.crc32_hash(d) != int(value):
self.state = FINISHED
self.set_exception(Exception(
'Key: %s, Bad hash result: %d != %d for key %s' % (key, crc32.crc32_hash(d), int(value), key)))
else:
value = json.dumps(value)
if d != json.loads(value):
self.log.info(f"the scope {scope} collection is {collection} for which the value is failing")
self.state = FINISHED
self.set_exception(
Exception('Key: %s, Bad result: %s != %s for key %s' % (key, json.dumps(d), value, key)))
if CHECK_FLAG and o != flag:
self.state = FINISHED
self.set_exception(
Exception('Key: %s, Bad result for flag value: %s != the value we set: %s' % (key, o, flag)))
except MemcachedError as error:
if error.status == ERR_NOT_FOUND and partition.get_valid(key) is None:
pass
else:
self.state = FINISHED
self.set_exception(error)
except Exception as error:
self.log.error("Unexpected error: %s" % str(error))
self.state = FINISHED
self.set_exception(error)
self.kv_store.release_partition(key, bucket, scope=scope, collection=collection)
def _check_deleted_key(self, key, bucket="default", scope=None, collection=None):
partition = self.kv_store.acquire_partition(key, bucket, scope=scope, collection=collection)
try:
self.client.delete(key, scope=scope, collection=collection)
if partition.get_valid(key) is not None:
self.state = FINISHED
self.set_exception(Exception('Not Deletes: %s' % (key)))
except MemcachedError as error:
if error.status == ERR_NOT_FOUND:
pass
else:
self.state = FINISHED
self.set_exception(error)
except Exception as error:
if error.rc != NotFoundError:
self.state = FINISHED
self.set_exception(error)
self.kv_store.release_partition(key, bucket, scope=scope, collection=collection)
class ValidateDataWithActiveAndReplicaTask(GenericLoadingTask):
def __init__(self, server, bucket, kv_store, max_verify=None, compression=True, scope=None, collection=None):
GenericLoadingTask.__init__(self, server, bucket, kv_store, compression=compression,
scope=scope, collection=collection)
self.collection = collection
self.scope = scope
self.bucket = bucket
self.valid_keys, self.deleted_keys = kv_store.key_set(bucket=self.bucket, scope=self.scope,
collection=self.collection)
self.num_valid_keys = len(self.valid_keys)
self.num_deleted_keys = len(self.deleted_keys)
self.itr = 0
self.max_verify = self.num_valid_keys + self.num_deleted_keys
if max_verify is not None:
self.max_verify = min(max_verify, self.max_verify)
self.log.info("%s items will be verified on %s bucket" % (self.max_verify, bucket))
self.start_time = time.time()
def has_next(self):
if self.itr < (self.num_valid_keys + self.num_deleted_keys) and \
self.itr < self.max_verify:
if not self.itr % 50000:
self.log.info("{0} items were verified".format(self.itr))
return True
self.log.info("{0} items were verified in {1} sec.the average number of ops\
- {2} per second ".format(self.itr, time.time() - self.start_time,
self.itr // (time.time() - self.start_time)).rstrip())
return False
def __next__(self):
if self.itr < self.num_valid_keys:
self._check_valid_key(self.valid_keys[self.itr], self.bucket, self.scope, self.collection)
else:
self._check_deleted_key(self.deleted_keys[self.itr - self.num_valid_keys], self.bucket,
self.scope, self.collection)
self.itr += 1
def _check_valid_key(self, key, bucket, scope=None, collection=None):
partition = self.kv_store.acquire_partition(key, bucket, scope=scope, collection=collection)
try:
o, c, d = self.client.get(key, scope=scope, collection=collection)
o_r, c_r, d_r = self.client.getr(key, replica_index=0, scope=scope, collection=collection)
if o != o_r:
self.state = FINISHED
self.set_exception(Exception(
'ACTIVE AND REPLICA FLAG CHECK FAILED :: Key: %s, Bad result for CAS value: REPLICA FLAG %s != ACTIVE FLAG %s' % (
key, o_r, o)))
if c != c_r:
self.state = FINISHED
self.set_exception(Exception(
'ACTIVE AND REPLICA CAS CHECK FAILED :: Key: %s, Bad result for CAS value: REPLICA CAS %s != ACTIVE CAS %s' % (
key, c_r, c)))
if d != d_r:
self.state = FINISHED
self.set_exception(Exception(
'ACTIVE AND REPLICA VALUE CHECK FAILED :: Key: %s, Bad result for Value value: REPLICA VALUE %s != ACTIVE VALUE %s' % (
key, d_r, d)))
except MemcachedError as error:
if error.status == ERR_NOT_FOUND and partition.get_valid(key) is None:
pass
else:
self.state = FINISHED
self.set_exception(error)
except Exception as error:
self.log.error("Unexpected error: %s" % str(error))
self.state = FINISHED
self.set_exception(error)
def _check_deleted_key(self, key, bucket, scope=None, collection=None):
partition = self.kv_store.acquire_partition(key, bucket, scope=scope, collection=collection)
try:
self.client.delete(key, scope=scope, collection=collection)
if partition.get_valid(key) is not None:
self.state = FINISHED
self.set_exception(Exception('ACTIVE CHECK :: Not Deletes: %s' % key))
except MemcachedError as error:
if error.status == ERR_NOT_FOUND:
pass
else:
self.state = FINISHED
self.set_exception(error)
except Exception as error:
if error.rc != NotFoundError:
self.state = FINISHED
self.set_exception(error)
self.kv_store.release_partition(key, bucket, scope=scope, collection=collection)
class BatchedValidateDataTask(GenericLoadingTask):
def __init__(self, server, bucket, kv_store, max_verify=None, only_store_hash=True, batch_size=100,
timeout_sec=30, compression=True, scope=None, collection=None):
GenericLoadingTask.__init__(self, server, bucket, kv_store, compression=compression, scope=scope,
collection=collection)
self.collection = collection
self.scope = scope
self.bucket = bucket
self.valid_keys, self.deleted_keys = kv_store.key_set(bucket=self.bucket, scope=self.scope,
collection=self.collection)
self.num_valid_keys = len(self.valid_keys)
self.num_deleted_keys = len(self.deleted_keys)
self.itr = 0
self.max_verify = self.num_valid_keys + self.num_deleted_keys
self.timeout_sec = timeout_sec
self.only_store_hash = only_store_hash
if max_verify is not None:
self.max_verify = min(max_verify, self.max_verify)
self.log.info("%s items will be verified on %s bucket" % (self.max_verify, bucket))
self.batch_size = batch_size
self.start_time = time.time()
def has_next(self):
has = False
if self.itr < (self.num_valid_keys + self.num_deleted_keys) and self.itr < self.max_verify:
has = True
if math.fmod(self.itr, 10000) == 0.0:
self.log.info("{0} items were verified".format(self.itr))
if not has:
self.log.info("{0} items were verified in {1} sec.the average number of ops\
- {2} per second".format(self.itr, time.time() - self.start_time,
self.itr // (time.time() - self.start_time)).rstrip())
return has
def __next__(self):
if self.itr < self.num_valid_keys:
keys_batch = self.valid_keys[self.itr:self.itr + self.batch_size]
self.itr += len(keys_batch)
self._check_valid_keys(keys_batch, self.bucket, self.scope, self.collection)
else:
self._check_deleted_key(self.deleted_keys[self.itr - self.num_valid_keys], self.bucket, self.scope,
self.collection)
self.itr += 1
def _check_valid_keys(self, keys, bucket, scope=None, collection=None):
partition_keys_dic = self.kv_store.acquire_partitions(keys, bucket, scope=scope, collection=collection)
try:
key_vals = self.client.getMulti(keys, parallel=True, timeout_sec=self.timeout_sec, scope=scope,
collection=collection)
except ValueError as error:
self.log.error("Read failed via memcached client. Error: %s" % str(error))
self.state = FINISHED
self.kv_store.release_partitions(list(partition_keys_dic.keys()))
self.set_exception(error)
return
except BaseException as error:
# handle all other exception, for instance concurrent.futures._base.TimeoutError
self.log.error("Read failed via memcached client. Error: %s" % str(error))
self.state = FINISHED
self.kv_store.release_partitions(list(partition_keys_dic.keys()))
self.set_exception(error)
return
for partition, keys in list(partition_keys_dic.items()):
self._check_validity(partition, keys, key_vals)
self.kv_store.release_partitions(list(partition_keys_dic.keys()))
def _check_validity(self, partition, keys, key_vals):
for key in keys:
value = partition.get_valid(key)
flag = partition.get_flag(key)
if value is None:
continue
try:
o, c, d = key_vals[key]
if self.only_store_hash:
if crc32.crc32_hash(d) != int(value):
self.state = FINISHED
self.set_exception(
Exception('Key: %s Bad hash result: %d != %d' % (key, crc32.crc32_hash(d), int(value))))
else:
# value = json.dumps(value)
if json.loads(d) != json.loads(value):
self.state = FINISHED
self.set_exception(Exception('Key: %s Bad result: %s != %s' % (key, json.dumps(d), value)))
if CHECK_FLAG and o != flag:
self.state = FINISHED
self.set_exception(
Exception('Key: %s Bad result for flag value: %s != the value we set: %s' % (key, o, flag)))
except KeyError as error:
self.state = FINISHED
self.set_exception(error)
def _check_deleted_key(self, key, bucket, scope=None, collection=None):
partition = self.kv_store.acquire_partition(key, bucket, scope=scope, collection=collection)
try:
self.client.delete(key, scope=scope, collection=collection)
if partition.get_valid(key) is not None:
self.state = FINISHED
self.set_exception(Exception('Not Deletes: %s' % (key)))
except MemcachedError as error:
if error.status == ERR_NOT_FOUND:
pass
else:
self.state = FINISHED
self.kv_store.release_partition(key, bucket, scope=scope, collection=collection)
self.set_exception(error)
except Exception as error:
if error.rc != NotFoundError:
self.state = FINISHED
self.kv_store.release_partition(key, bucket, scope=scope, collection=collection)
self.set_exception(error)
self.kv_store.release_partition(key, bucket, scope=scope, collection=collection)
class VerifyRevIdTask(GenericLoadingTask):
def __init__(self, src_server, dest_server, bucket, src_kv_store, dest_kv_store, max_err_count=200000,
max_verify=None, compression=True, scope=None, collection=None):
GenericLoadingTask.__init__(self, src_server, bucket, src_kv_store, compression=compression,
scope=scope, collection=collection)
from memcached.helper.data_helper import VBucketAwareMemcached as SmartClient
self.collection = collection
self.scope = scope
self.client_src = SmartClient(RestConnection(src_server), bucket)
self.client_dest = SmartClient(RestConnection(dest_server), bucket)
self.src_valid_keys, self.src_deleted_keys = src_kv_store.key_set(bucket=self.bucket, scope=self.scope,
collection=self.collection)
self.dest_valid_keys, self.dest_del_keys = dest_kv_store.key_set(bucket=self.bucket, scope=self.scope,
collection=self.collection)
self.num_valid_keys = len(self.src_valid_keys)
self.num_deleted_keys = len(self.src_deleted_keys)
self.keys_not_found = {self.client.rest.ip: [], self.client_dest.rest.ip: []}
if max_verify:
self.max_verify = max_verify
else:
self.max_verify = self.num_valid_keys + self.num_deleted_keys
self.itr = 0
self.not_matching_filter_keys = 0
self.err_count = 0
self.max_err_count = max_err_count
self.src_server = src_server
self.bucket = bucket
self.log.info(f"RevID verification: in progress for {self.bucket.name} in scope:{scope}"
f" in collection: {collection}")
def has_next(self):
if self.itr < (self.num_valid_keys + self.num_deleted_keys) and \
self.err_count < self.max_err_count and \
self.itr < self.max_verify:
return True
self.log.info("RevId Verification : {0} existing items have been verified"
.format(self.itr if self.itr < self.num_valid_keys else self.num_valid_keys))
self.log.info("RevId Verification : {0} deleted items have been verified"
.format(self.itr - self.num_valid_keys if self.itr > self.num_valid_keys else 0))
self.log.info("RevId Verification : {0} keys were apparently filtered "
"out and not found in target bucket"
.format(self.not_matching_filter_keys))
# if there are missing keys, we would have printed them by now
# check if excess keys are present on server, if yes, set an exception
# TODO : print excess keys
server = RestConnection(self.src_server)
server_count = server.fetch_bucket_stats(bucket=self.bucket.name)["op"]["samples"]["curr_items"][-1]
if server_count > self.num_valid_keys:
self.set_exception(Exception("ERROR: {0} keys present on bucket {1} "
"on {2} while kvstore expects only {3}"
.format(server_count, self.bucket.name,
self.src_server.ip, self.num_valid_keys)))
return False
def __next__(self):
if self.itr < self.num_valid_keys:
self._check_key_revId(self.src_valid_keys[self.itr], collection=self.collection)
elif self.itr < (self.num_valid_keys + self.num_deleted_keys):
# verify deleted/expired keys
self._check_key_revId(self.src_deleted_keys[self.itr - self.num_valid_keys],
ignore_meta_data=['expiration', 'cas'], collection=self.collection)
self.itr += 1
# show progress of verification for every 50k items
if math.fmod(self.itr, 50000) == 0.0:
self.log.info("{0} items have been verified".format(self.itr))
def __get_meta_data(self, client, key, scope=None, collection=None):
try:
mc = client.memcached(key)
meta_data = eval("{'deleted': %s, 'flags': %s, 'expiration': %s, 'seqno': %s, 'cas': %s}" % (
mc.getMeta(key, scope=scope, collection=collection)))
return meta_data
except MemcachedError as error:
if error.status == ERR_NOT_FOUND:
# if a filter was specified, the key will not be found in
# target kv store if key did not match filter expression
if key not in self.src_deleted_keys and key in (self.dest_valid_keys + self.dest_del_keys):
self.err_count += 1
self.keys_not_found[client.rest.ip].append(
("key: %s" % key, "vbucket: %s" % client._get_vBucket_id(key, scope=scope,
collection=collection)))
else:
self.not_matching_filter_keys += 1
else:
self.state = FINISHED
self.set_exception(error)
# catch and set all unexpected exceptions
except Exception as e:
self.state = FINISHED
self.set_unexpected_exception(e)
def _check_key_revId(self, key, ignore_meta_data=None, scope=None, collection=None):
if ignore_meta_data is None:
ignore_meta_data = []
src_meta_data = self.__get_meta_data(self.client_src, key, scope=scope, collection=collection)
dest_meta_data = self.__get_meta_data(self.client_dest, key, scope=scope, collection=collection)
if not src_meta_data or not dest_meta_data:
return
prev_error_count = self.err_count
err_msg = []
# seqno number should never be zero
if src_meta_data['seqno'] == 0:
self.err_count += 1
err_msg.append(
"seqno on Source should not be 0, Error Count:{0}".format(self.err_count))
if dest_meta_data['seqno'] == 0:
self.err_count += 1
err_msg.append(
"seqno on Destination should not be 0, Error Count:{0}".format(self.err_count))
# verify all metadata
for meta_key in list(src_meta_data.keys()):
check = True
if meta_key == 'flags' and not CHECK_FLAG:
check = False
if check and src_meta_data[meta_key] != dest_meta_data[meta_key] and meta_key not in ignore_meta_data:
self.err_count += 1
err_msg.append("{0} mismatch: Source {0}:{1}, Destination {0}:{2}, Error Count:{3}"
.format(meta_key, src_meta_data[meta_key],
dest_meta_data[meta_key], self.err_count))
if self.err_count - prev_error_count > 0 and self.err_count < 200:
self.log.error("===== Verifying rev_ids failed for key: {0}, bucket:{1} =====".format(key, self.bucket))
[self.log.error(err) for err in err_msg]
self.log.error("Source meta data: %s" % src_meta_data)
self.log.error("Dest meta data: %s" % dest_meta_data)
self.state = FINISHED
class VerifyCollectionDocCountTask(Task):
def __init__(self, src, dest, bucket, mapping):
Task.__init__(self, "verify_collection_doc_count_task")
self.src = src
self.dest = dest
self.bucket = bucket
self.mapping = mapping
self.src_conn = CollectionsStats(src.get_master_node())
self.dest_conn = CollectionsStats(dest.get_master_node())
self.src_stats = self.src_conn.get_collection_stats(self.bucket)[0]
self.dest_stats = self.dest_conn.get_collection_stats(self.bucket)[0]
def execute(self, task_manager):
try:
for map_exp in self.mapping.items():
if ':' in map_exp[0]:
src_scope = map_exp[0].split(':')[0]
src_collection = map_exp[0].split(':')[1]
src_count = self.src_conn.get_collection_item_count(self.bucket,
src_scope, src_collection,
self.src.get_nodes(),
self.src_stats)
else:
src_scope = map_exp[0]
src_collection = "all"
src_count = self.src_conn.get_scope_item_count(self.bucket, src_scope,
self.src.get_nodes(), self.src_stats)
if map_exp[1]:
if map_exp[1].lower() == "null":
self.log.info("{} mapped to null, skipping doc count verification"
.format())
dest_collection_specified = False
if ':' in map_exp[1]:
dest_collection_specified = True
dest_scope = map_exp[1].split(':')[0]
dest_collection = map_exp[1].split(':')[1]
elif "colon" in map_exp[1]:
dest_collection_specified = True
dest_scope = map_exp[1].split("colon")[0]
dest_collection = map_exp[1].split("colon")[1]
if dest_collection_specified:
dest_count = self.dest_conn.get_collection_item_count(self.bucket,
dest_scope, dest_collection,
self.dest.get_nodes(),
self.dest_stats)
else:
dest_scope = map_exp[1]
dest_collection = "all"
dest_count = self.dest_conn.get_scope_item_count(self.bucket, dest_scope,
self.dest.get_nodes(), self.dest_stats)
self.log.info('-' * 100)
if src_count == dest_count:
self.log.info("Item count on src:{} {} = {} on dest:{} for "
"bucket {} \nsrc : scope {}-> collection {},"
"dest: scope {}-> collection {}"
.format(self.src.get_master_node().ip, src_count,
dest_count, self.dest.get_master_node().ip,
self.bucket, src_scope, src_collection,
dest_scope, dest_collection))
else:
self.set_exception(Exception("ERROR: Item count on src:{} {} != {} on dest:{} for "
"bucket {} \nsrc : scope {}-> collection {},"
"dest: scope {}-> collection {}"
.format(self.src.get_master_node().ip, src_count,
dest_count, self.dest.get_master_node().ip,
self.bucket, src_scope, src_collection,
dest_scope, dest_collection)))
self.log.info('-' * 100)
except Exception as e:
self.state = FINISHED
self.set_unexpected_exception(e)
self.check(task_manager)
def check(self, task_manager):
self.set_result(True)
self.state = FINISHED
task_manager.schedule(self)
class VerifyMetaDataTask(GenericLoadingTask):
def __init__(self, dest_server, bucket, kv_store, meta_data_store, max_err_count=100, compression=True,
scope=None, collection=None):
GenericLoadingTask.__init__(self, dest_server, bucket, kv_store, compression=compression, scope=scope,
collection=collection)
from memcached.helper.data_helper import VBucketAwareMemcached as SmartClient
self.collections = collection
self.scope = scope
self.client = SmartClient(RestConnection(dest_server), bucket)
self.valid_keys, self.deleted_keys = kv_store.key_set(bucket=self.bucket,
scope=self.scope, collection=self.collection)
self.num_valid_keys = len(self.valid_keys)
self.num_deleted_keys = len(self.deleted_keys)
self.keys_not_found = {self.client.rest.ip: [], self.client.rest.ip: []}
self.itr = 0
self.err_count = 0
self.max_err_count = max_err_count
self.meta_data_store = meta_data_store
def has_next(self):
if self.itr < (self.num_valid_keys + self.num_deleted_keys) and self.err_count < self.max_err_count:
return True
self.log.info("Meta Data Verification : {0} existing items have been verified"
.format(self.itr if self.itr < self.num_valid_keys else self.num_valid_keys))
self.log.info("Meta Data Verification : {0} deleted items have been verified"
.format(self.itr - self.num_valid_keys if self.itr > self.num_valid_keys else 0))
return False
def __next__(self):
if self.itr < self.num_valid_keys:
self._check_key_meta_data(self.valid_keys[self.itr], self.collections)
elif self.itr < (self.num_valid_keys + self.num_deleted_keys):
# verify deleted/expired keys
self._check_key_meta_data(self.deleted_keys[self.itr - self.num_valid_keys],
ignore_meta_data=['expiration'], scope=self.scope, collection=self.collections)
self.itr += 1
# show progress of verification for every 50k items
if math.fmod(self.itr, 50000) == 0.0:
self.log.info("{0} items have been verified".format(self.itr))
def __get_meta_data(self, client, key, scope=None, collection=None):
try:
mc = client.memcached(key)
meta_data = eval("{'deleted': %s, 'flags': %s, 'expiration': %s, 'seqno': %s, 'cas': %s}" % (
mc.getMeta(key, scope=scope, collection=collection)))
return meta_data
except MemcachedError as error:
if error.status == ERR_NOT_FOUND:
if key not in self.deleted_keys:
self.err_count += 1
self.keys_not_found[client.rest.ip].append(
("key: %s" % key, "vbucket: %s" % client._get_vBucket_id(key)))
else:
self.state = FINISHED
self.set_exception(error)
def _check_key_meta_data(self, key, ignore_meta_data=[], scope=None, collection=None):
src_meta_data = self.meta_data_store[key]
dest_meta_data = self.__get_meta_data(self.client, key, scope=scope, collection=collection)
if not src_meta_data or not dest_meta_data:
return
prev_error_count = self.err_count
err_msg = []
# seqno number should never be zero
if dest_meta_data['seqno'] == 0:
self.err_count += 1
err_msg.append(
"seqno on Destination should not be 0, Error Count:{0}".format(self.err_count))
# verify all metadata
for meta_key in list(src_meta_data.keys()):
if src_meta_data[meta_key] != dest_meta_data[meta_key] and meta_key not in ignore_meta_data:
self.err_count += 1
err_msg.append("{0} mismatch: Source {0}:{1}, Destination {0}:{2}, Error Count:{3}"
.format(meta_key, src_meta_data[meta_key],
dest_meta_data[meta_key], self.err_count))
if self.err_count - prev_error_count > 0:
self.log.error("===== Verifying meta data failed for key: {0} =====".format(key))
[self.log.error(err) for err in err_msg]
self.log.error("Source meta data: %s" % src_meta_data)
self.log.error("Dest meta data: %s" % dest_meta_data)
self.state = FINISHED
class GetMetaDataTask(GenericLoadingTask):
def __init__(self, dest_server, bucket, kv_store, compression=True, scope=None, collection=None):
GenericLoadingTask.__init__(self, dest_server, bucket, kv_store, compression=compression,
scope=scope, collection=collection)
from memcached.helper.data_helper import VBucketAwareMemcached as SmartClient
self.collection = collection
self.scope = scope
self.client = SmartClient(RestConnection(dest_server), bucket)
self.valid_keys, self.deleted_keys = kv_store.key_set(bucket=self.bucket, scope=self.scope,
collection=self.collection)
self.num_valid_keys = len(self.valid_keys)
self.num_deleted_keys = len(self.deleted_keys)
self.keys_not_found = {self.client.rest.ip: [], self.client.rest.ip: []}
self.itr = 0
self.err_count = 0
self.max_err_count = 100
self.meta_data_store = {}
def has_next(self):
if self.itr < (self.num_valid_keys + self.num_deleted_keys) and self.err_count < self.max_err_count:
return True
self.log.info("Get Meta Data : {0} existing items have been gathered"
.format(self.itr if self.itr < self.num_valid_keys else self.num_valid_keys))
self.log.info("Get Meta Data : {0} deleted items have been gathered"
.format(self.itr - self.num_valid_keys if self.itr > self.num_valid_keys else 0))
return False
def __next__(self):
if self.itr < self.num_valid_keys:
self.meta_data_store[self.valid_keys[self.itr]] = self.__get_meta_data(self.client,
self.valid_keys[self.itr],
self.scope, self.collection)
elif self.itr < (self.num_valid_keys + self.num_deleted_keys):
self.meta_data_store[self.deleted_keys[self.itr - self.num_valid_keys]] = self.__get_meta_data(self.client,
self.deleted_keys[
self.itr - self.num_valid_keys],
scope=self.scope,
collection=self.collection)
self.itr += 1
def __get_meta_data(self, client, key, scope=None, collection=None):
try:
mc = client.memcached(key)
meta_data = eval("{'deleted': %s, 'flags': %s, 'expiration': %s, 'seqno': %s, 'cas': %s}" % (
mc.getMeta(key, scope=scope, collection=collection)))
return meta_data
except MemcachedError as error:
if error.status == ERR_NOT_FOUND:
if key not in self.deleted_keys:
self.err_count += 1
self.keys_not_found[client.rest.ip].append(
("key: %s" % key, "vbucket: %s" % client._get_vBucket_id(key)))
else:
self.state = FINISHED
self.set_exception(error)
def get_meta_data_store(self):
return self.meta_data_store
class ViewCreateTask(Task):
def __init__(self, server, design_doc_name, view, bucket="default", with_query=True,
check_replication=False, ddoc_options=None):
Task.__init__(self, "create_view_task")
self.server = server
self.bucket = bucket
self.view = view
prefix = ""
if self.view:
prefix = ("", "dev_")[self.view.dev_view]
if design_doc_name.find('/') != -1:
design_doc_name = design_doc_name.replace('/', '%2f')
self.design_doc_name = prefix + design_doc_name
self.ddoc_rev_no = 0
self.with_query = with_query
self.check_replication = check_replication
self.ddoc_options = ddoc_options
self.rest = RestConnection(self.server)
def execute(self, task_manager):
try:
# appending view to existing design doc
content, meta = self.rest.get_ddoc(self.bucket, self.design_doc_name)
ddoc = DesignDocument._init_from_json(self.design_doc_name, content)
# if view is to be updated
if self.view:
if self.view.is_spatial:
ddoc.add_spatial_view(self.view)
else:
ddoc.add_view(self.view)
self.ddoc_rev_no = self._parse_revision(meta['rev'])
except ReadDocumentException:
# creating first view in design doc
if self.view:
if self.view.is_spatial:
ddoc = DesignDocument(self.design_doc_name, [], spatial_views=[self.view])
else:
ddoc = DesignDocument(self.design_doc_name, [self.view])
# create an empty design doc
else:
ddoc = DesignDocument(self.design_doc_name, [])
if self.ddoc_options:
ddoc.options = self.ddoc_options
# catch and set all unexpected exceptions
except Exception as e:
self.state = FINISHED
self.set_unexpected_exception(e)
try:
self.rest.create_design_document(self.bucket, ddoc)
self.state = CHECKING
task_manager.schedule(self)
except DesignDocCreationException as e:
self.state = FINISHED
self.set_exception(e)
# catch and set all unexpected exceptions
except Exception as e:
self.state = FINISHED
self.set_unexpected_exception(e)
def check(self, task_manager):
try:
# only query if the DDoc has a view
if self.view:
if self.with_query:
query = {"stale": "ok"}
if self.view.is_spatial:
content = \
self.rest.query_view(self.design_doc_name, self.view.name,
self.bucket, query, type="spatial")
else:
content = \
self.rest.query_view(self.design_doc_name, self.view.name,
self.bucket, query)
else:
_, json_parsed, _ = self.rest._get_design_doc(self.bucket, self.design_doc_name)
if self.view.is_spatial:
if self.view.name not in list(json_parsed["spatial"].keys()):
self.set_exception(
Exception("design doc {O} doesn't contain spatial view {1}".format(
self.design_doc_name, self.view.name)))
else:
if self.view.name not in list(json_parsed["views"].keys()):
self.set_exception(Exception("design doc {O} doesn't contain view {1}".format(
self.design_doc_name, self.view.name)))
self.log.info(
"view : {0} was created successfully in ddoc: {1}".format(self.view.name, self.design_doc_name))
else:
# if we have reached here, it means design doc was successfully updated
self.log.info("Design Document : {0} was updated successfully".format(self.design_doc_name))
self.state = FINISHED
if self._check_ddoc_revision():
self.set_result(self.ddoc_rev_no)
else:
self.set_exception(Exception("failed to update design document"))
if self.check_replication:
self._check_ddoc_replication_on_nodes()
except QueryViewException as e:
if str(e).find('not_found') or str(e).find('view_undefined') > -1:
task_manager.schedule(self, 2)
else:
self.state = FINISHED
self.set_unexpected_exception(e)
# catch and set all unexpected exceptions
except Exception as e:
self.state = FINISHED
self.set_unexpected_exception(e)
def _check_ddoc_revision(self):
valid = False
try:
content, meta = self.rest.get_ddoc(self.bucket, self.design_doc_name)
new_rev_id = self._parse_revision(meta['rev'])
if new_rev_id != self.ddoc_rev_no:
self.ddoc_rev_no = new_rev_id
valid = True
except ReadDocumentException:
pass
# catch and set all unexpected exceptions
except Exception as e:
self.state = FINISHED
self.set_unexpected_exception(e)
return valid
def _parse_revision(self, rev_string):
return int(rev_string.split('-')[0])
def _check_ddoc_replication_on_nodes(self):
nodes = self.rest.node_statuses()
retry_count = 3
# nothing to check if there is only 1 node
if len(nodes) <= 1:
return
for node in nodes:
server_info = {"ip": node.ip,
"port": node.port,
"username": self.rest.username,
"password": self.rest.password}
for count in range(retry_count):
try:
rest_node = RestConnection(server_info)
content, meta = rest_node.get_ddoc(self.bucket, self.design_doc_name)
new_rev_id = self._parse_revision(meta['rev'])
if new_rev_id == self.ddoc_rev_no:
break
else:
self.log.info("Design Doc {0} version is not updated on node {1}:{2}. Retrying.".format(
self.design_doc_name, node.ip, node.port))
time.sleep(2)
except ReadDocumentException as e:
if (count < retry_count):
self.log.info(
"Design Doc {0} not yet available on node {1}:{2}. Retrying.".format(self.design_doc_name,
node.ip, node.port))
time.sleep(2)
else:
self.log.error(
"Design Doc {0} failed to replicate on node {1}:{2}".format(self.design_doc_name, node.ip,
node.port))
self.set_exception(e)
self.state = FINISHED
break
except Exception as e:
if (count < retry_count):
self.log.info("Unexpected Exception Caught. Retrying.")
time.sleep(2)
else:
self.set_unexpected_exception(e)
self.state = FINISHED
break
else:
self.set_exception(Exception(
"Design Doc {0} version mismatch on node {1}:{2}".format(self.design_doc_name, node.ip, node.port)))
class ViewDeleteTask(Task):
def __init__(self, server, design_doc_name, view, bucket="default"):
Task.__init__(self, "delete_view_task")
self.server = server
self.bucket = bucket
self.view = view
prefix = ""
if self.view:
prefix = ("", "dev_")[self.view.dev_view]
self.design_doc_name = prefix + design_doc_name
def execute(self, task_manager):
try:
rest = RestConnection(self.server)
if self.view:
# remove view from existing design doc
content, header = rest.get_ddoc(self.bucket, self.design_doc_name)
ddoc = DesignDocument._init_from_json(self.design_doc_name, content)
if self.view.is_spatial:
status = ddoc.delete_spatial(self.view)
else:
status = ddoc.delete_view(self.view)
if not status:
self.state = FINISHED
self.set_exception(Exception('View does not exist! %s' % (self.view.name)))
# update design doc
rest.create_design_document(self.bucket, ddoc)
self.state = CHECKING
task_manager.schedule(self, 2)
else:
# delete the design doc
rest.delete_view(self.bucket, self.design_doc_name)
self.log.info("Design Doc : {0} was successfully deleted".format(self.design_doc_name))
self.state = FINISHED
self.set_result(True)
except (ValueError, ReadDocumentException, DesignDocCreationException) as e:
self.state = FINISHED
self.set_exception(e)
# catch and set all unexpected exceptions
except Exception as e:
self.state = FINISHED
self.set_unexpected_exception(e)
def check(self, task_manager):
try:
rest = RestConnection(self.server)
# make sure view was deleted
query = {"stale": "ok"}
content = \
rest.query_view(self.design_doc_name, self.view.name, self.bucket, query)
self.state = FINISHED
self.set_result(False)
except QueryViewException as e:
self.log.info(
"view : {0} was successfully deleted in ddoc: {1}".format(self.view.name, self.design_doc_name))
self.state = FINISHED
self.set_result(True)
# catch and set all unexpected exceptions
except Exception as e:
self.state = FINISHED
self.set_unexpected_exception(e)
class ViewQueryTask(Task):
def __init__(self, server, design_doc_name, view_name,
query, expected_rows=None,
bucket="default", retry_time=2):
Task.__init__(self, "query_view_task")
self.server = server
self.bucket = bucket
self.view_name = view_name
self.design_doc_name = design_doc_name
self.query = query
self.expected_rows = expected_rows
self.retry_time = retry_time
self.timeout = 900
def execute(self, task_manager):
try:
rest = RestConnection(self.server)
# make sure view can be queried
content = \
rest.query_view(self.design_doc_name, self.view_name, self.bucket, self.query, self.timeout)
if self.expected_rows is None:
# no verification
self.state = FINISHED
self.set_result(content)
else:
self.state = CHECKING
task_manager.schedule(self)
except QueryViewException as e:
# initial query failed, try again
task_manager.schedule(self, self.retry_time)
# catch and set all unexpected exceptions
except Exception as e:
self.state = FINISHED
self.set_unexpected_exception(e)
def check(self, task_manager):
try:
rest = RestConnection(self.server)
# query and verify expected num of rows returned
content = \
rest.query_view(self.design_doc_name, self.view_name, self.bucket, self.query, self.timeout)
self.log.info("Server: %s, Design Doc: %s, View: %s, (%d rows) expected, (%d rows) returned" % \
(self.server.ip, self.design_doc_name, self.view_name, self.expected_rows,
len(content['rows'])))
raised_error = content.get('error', '') or ''.join([str(item) for item in content.get('errors', [])])
if raised_error:
raise QueryViewException(self.view_name, raised_error)
if len(content['rows']) == self.expected_rows:
self.log.info("expected number of rows: '{0}' was found for view query".format(self.
expected_rows))
self.state = FINISHED
self.set_result(True)
else:
if len(content['rows']) > self.expected_rows:
raise QueryViewException(self.view_name,
"Server: {0}, Design Doc: {1}, actual returned rows: '{2}' are greater than expected {3}"
.format(self.server.ip, self.design_doc_name, len(content['rows']),
self.expected_rows, ))
if "stale" in self.query:
if self.query["stale"].lower() == "false":
self.state = FINISHED
self.set_result(False)
# retry until expected results or task times out
task_manager.schedule(self, self.retry_time)
except QueryViewException as e:
# subsequent query failed! exit
self.state = FINISHED
self.set_exception(e)
# catch and set all unexpected exceptions
except Exception as e:
self.state = FINISHED
self.set_unexpected_exception(e)
class N1QLQueryTask(Task):
def __init__(self,
server, bucket,
query, n1ql_helper=None,
expected_result=None,
verify_results=True,
is_explain_query=False,
index_name=None,
retry_time=2,
scan_consistency=None,
scan_vector=None):
Task.__init__(self, "query_n1ql_task")
self.server = server
self.bucket = bucket
self.query = query
self.expected_result = expected_result
self.n1ql_helper = n1ql_helper
self.timeout = 900
self.verify_results = verify_results
self.is_explain_query = is_explain_query
self.index_name = index_name
self.retry_time = 2
self.scan_consistency = scan_consistency
self.scan_vector = scan_vector
def execute(self, task_manager):
try:
# Query and get results
self.log.info(" <<<<< START Executing Query {0} >>>>>>".format(self.query))
if not self.is_explain_query:
self.msg, self.isSuccess = self.n1ql_helper.run_query_and_verify_result(
query=self.query, server=self.server, expected_result=self.expected_result,
scan_consistency=self.scan_consistency, scan_vector=self.scan_vector,
verify_results=self.verify_results)
else:
self.actual_result = self.n1ql_helper.run_cbq_query(query=self.query, server=self.server,
scan_consistency=self.scan_consistency,
scan_vector=self.scan_vector)
self.log.info(self.actual_result)
self.log.info(" <<<<< Done Executing Query {0} >>>>>>".format(self.query))
self.state = CHECKING
task_manager.schedule(self)
except N1QLQueryException as e:
self.state = FINISHED
# initial query failed, try again
task_manager.schedule(self, self.retry_time)
# catch and set all unexpected exceptions
except Exception as e:
self.state = FINISHED
self.set_unexpected_exception(e)
def check(self, task_manager):
try:
# Verify correctness of result set
if self.verify_results:
if not self.is_explain_query:
if not self.isSuccess:
self.log.info(" Query {0} results leads to INCORRECT RESULT ".format(self.query))
raise N1QLQueryException(self.msg)
else:
check = self.n1ql_helper.verify_index_with_explain(self.actual_result, self.index_name)
if not check:
actual_result = self.n1ql_helper.run_cbq_query(query="select * from system:indexes",
server=self.server)
self.log.info(actual_result)
raise Exception(
" INDEX usage in Query {0} :: NOT FOUND {1} :: as observed in result {2}".format(
self.query, self.index_name, self.actual_result))
self.log.info(" <<<<< Done VERIFYING Query {0} >>>>>>".format(self.query))
self.set_result(True)
self.state = FINISHED
except N1QLQueryException as e:
# subsequent query failed! exit
self.state = FINISHED
self.set_exception(e)
# catch and set all unexpected exceptions
except Exception as e:
self.state = FINISHED
self.set_unexpected_exception(e)
class CreateIndexTask(Task):
def __init__(self,
server, bucket, index_name,
query, n1ql_helper=None,
retry_time=2, defer_build=False,
timeout=240):
Task.__init__(self, "create_index_task")
self.server = server
self.bucket = bucket
self.defer_build = defer_build
self.query = query
self.index_name = index_name
self.n1ql_helper = n1ql_helper
self.retry_time = 2
self.timeout = timeout
def execute(self, task_manager):
try:
# Query and get results
self.n1ql_helper.run_cbq_query(query=self.query, server=self.server)
self.state = CHECKING
task_manager.schedule(self)
except CreateIndexException as e:
# initial query failed, try again
self.state = FINISHED
task_manager.schedule(self, self.retry_time)
# catch and set all unexpected exceptions
except Exception as e:
self.state = FINISHED
self.log.error(e)
self.set_exception(e)
#if not "will retry building in the background for reason" in e:
# self.log.error(e)
# self.set_exception(e)
def check(self, task_manager):
try:
# Verify correctness of result set
check = True
if not self.defer_build:
check = self.n1ql_helper.is_index_online_and_in_list(self.bucket, self.index_name, server=self.server,
timeout=self.timeout)
if not check:
raise CreateIndexException("Index {0} not created as expected ".format(self.index_name))
self.set_result(True)
self.state = FINISHED
except CreateIndexException as e:
# subsequent query failed! exit
self.state = FINISHED
self.log.error(e)
self.set_exception(e)
# catch and set all unexpected exceptions
except Exception as e:
self.state = FINISHED
self.log.error(e)
self.set_exception(e)
class BuildIndexTask(Task):
def __init__(self,
server, bucket,
query, n1ql_helper=None,
retry_time=2):
Task.__init__(self, "build_index_task")
self.server = server
self.bucket = bucket
self.query = query
self.n1ql_helper = n1ql_helper
self.retry_time = 2
def execute(self, task_manager):
try:
# Query and get results
self.n1ql_helper.run_cbq_query(query=self.query, server=self.server)
self.state = CHECKING
task_manager.schedule(self)
except CreateIndexException as e:
# initial query failed, try again
self.state = FINISHED
task_manager.schedule(self, self.retry_time)
# catch and set all unexpected exceptions
except Exception as e:
self.state = FINISHED
self.set_unexpected_exception(e)
def check(self, task_manager):
try:
# Verify correctness of result set
self.set_result(True)
self.state = FINISHED
except CreateIndexException as e:
# subsequent query failed! exit
self.state = FINISHED
self.set_exception(e)
# catch and set all unexpected exceptions
except Exception as e:
self.state = FINISHED
self.set_unexpected_exception(e)
class MonitorIndexTask(Task):
def __init__(self,
server, bucket, index_name,
n1ql_helper=None,
retry_time=2,
timeout=240):
Task.__init__(self, "build_index_task")
self.server = server
self.bucket = bucket
self.index_name = index_name
self.n1ql_helper = n1ql_helper
self.retry_time = 2
self.timeout = timeout
def execute(self, task_manager):
try:
check = self.n1ql_helper.is_index_online_and_in_list(self.bucket, self.index_name,
server=self.server, timeout=self.timeout)
if not check:
self.state = FINISHED
raise CreateIndexException("Index {0} not created as expected ".format(self.index_name))
self.state = CHECKING
task_manager.schedule(self)
except CreateIndexException as e:
# initial query failed, try again
self.state = FINISHED
self.set_exception(e)
# catch and set all unexpected exceptions
except Exception as e:
self.state = FINISHED
self.set_unexpected_exception(e)
def check(self, task_manager):
try:
self.set_result(True)
self.state = FINISHED
except CreateIndexException as e:
# subsequent query failed! exit
self.state = FINISHED
self.set_exception(e)
# catch and set all unexpected exceptions
except Exception as e:
self.state = FINISHED
self.set_unexpected_exception(e)
class DropIndexTask(Task):
def __init__(self,
server, bucket, index_name,
query, n1ql_helper=None,
retry_time=2):
Task.__init__(self, "drop_index_task")
self.server = server
self.bucket = bucket
self.query = query
self.index_name = index_name
self.n1ql_helper = n1ql_helper
self.timeout = 900
self.retry_time = 2
def execute(self, task_manager):
try:
# Query and get results
check = self.n1ql_helper._is_index_in_list(self.bucket, self.index_name, server=self.server)
if not check:
raise DropIndexException("index {0} does not exist will not drop".format(self.index_name))
self.n1ql_helper.run_cbq_query(query=self.query, server=self.server)
self.state = CHECKING
task_manager.schedule(self)
except N1QLQueryException as e:
# initial query failed, try again
self.state = FINISHED
task_manager.schedule(self, self.retry_time)
# catch and set all unexpected exceptions
except DropIndexException as e:
self.state = FINISHED
self.set_unexpected_exception(e)
def check(self, task_manager):
try:
# Verify correctness of result set
check = self.n1ql_helper._is_index_in_list(self.bucket, self.index_name, server=self.server)
if check:
raise Exception("Index {0} not dropped as expected ".format(self.index_name))
self.set_result(True)
self.state = FINISHED
except DropIndexException as e:
# subsequent query failed! exit
self.state = FINISHED
self.set_exception(e)
# catch and set all unexpected exceptions
except Exception as e:
self.state = FINISHED
self.set_unexpected_exception(e)
class MonitorViewQueryResultsTask(Task):
def __init__(self, servers, design_doc_name, view,
query, expected_docs=None, bucket="default",
retries=100, error=None, verify_rows=False,
server_to_query=0):
Task.__init__(self, "query_view_task")
self.servers = servers
self.bucket = bucket
self.view_name = view.name
self.view = view
self.design_doc_name = design_doc_name
self.query = query
self.retries = retries
self.current_retry = 0
self.timeout = 900
self.error = error
self.expected_docs = expected_docs
self.verify_rows = verify_rows
self.rest = RestConnection(self.servers[server_to_query])
self.results = None
self.connection_timeout = 60000
self.query["connection_timeout"] = self.connection_timeout
if self.design_doc_name.find("dev_") == 0:
self.query["full_set"] = "true"
def execute(self, task_manager):
try:
self.current_retry += 1
self.results = self.rest.query_view(
self.design_doc_name, self.view_name, self.bucket, self.query,
self.timeout)
raised_error = self.results.get('error', '') or ''.join(
[str(item) for item in self.results.get('errors', [])])
if raised_error:
raise QueryViewException(self.view_name, raised_error)
else:
self.log.info("view %s, query %s: expected- %s, actual -%s" % (
self.design_doc_name, self.query,
len(self.expected_docs),
len(self.results.get('rows', []))))
self.state = CHECKING
task_manager.schedule(self)
except QueryViewException as ex:
self.log.error("During query run (ddoc=%s, query=%s, server=%s) error is: %s" % (
self.design_doc_name, self.query, self.servers[0].ip, str(ex)))
if self.error and str(ex).find(self.error) != -1:
self.state = FINISHED
self.set_result({"passed": True,
"errors": str(ex)})
elif self.current_retry == self.retries:
self.state = FINISHED
self.set_result({"passed": False,
"errors": str(ex)})
elif str(ex).find('view_undefined') != -1 or \
str(ex).find('not_found') != -1 or \
str(ex).find('unable to reach') != -1 or \
str(ex).find('socket error') != -1 or \
str(ex).find('econnrefused') != -1 or \
str(ex).find("doesn't exist") != -1 or \
str(ex).find('missing') != -1 or \
str(ex).find("Undefined set view") != -1:
self.log.error(
"view_results not ready yet ddoc=%s , try again in 10 seconds..." %
self.design_doc_name)
task_manager.schedule(self, 10)
elif str(ex).find('timeout') != -1:
self.connection_timeout = self.connection_timeout * 2
self.log.error("view_results not ready yet ddoc=%s ," % self.design_doc_name + \
" try again in 10 seconds... and double timeout")
task_manager.schedule(self, 10)
else:
self.state = FINISHED
res = {"passed": False,
"errors": str(ex)}
if self.results and self.results.get('rows', []):
res['results'] = self.results
self.set_result(res)
except Exception as ex:
if self.current_retry == self.retries:
self.state = CHECKING
self.log.error("view %s, query %s: verifying results" % (
self.design_doc_name, self.query))
task_manager.schedule(self)
else:
self.log.error(
"view_results not ready yet ddoc=%s , try again in 10 seconds..." %
self.design_doc_name)
task_manager.schedule(self, 10)
def check(self, task_manager):
try:
if self.view.red_func and (('reduce' in self.query and \
self.query['reduce'] == "true") or (not 'reduce' in self.query)):
if len(self.expected_docs) != len(self.results.get('rows', [])):
if self.current_retry == self.retries:
self.state = FINISHED
msg = "ddoc=%s, query=%s, server=%s" % (
self.design_doc_name, self.query, self.servers[0].ip)
msg += "Number of groups expected:%s, actual:%s" % (
len(self.expected_docs), len(self.results.get('rows', [])))
self.set_result({"passed": False,
"errors": msg})
else:
RestHelper(self.rest)._wait_for_indexer_ddoc(self.servers, self.design_doc_name)
self.state = EXECUTING
task_manager.schedule(self, 10)
else:
for row in self.expected_docs:
key_expected = row['key']
if not (key_expected in [key['key'] for key in self.results.get('rows', [])]):
if self.current_retry == self.retries:
self.state = FINISHED
msg = "ddoc=%s, query=%s, server=%s" % (
self.design_doc_name, self.query, self.servers[0].ip)
msg += "Key expected but not present :%s" % (key_expected)
self.set_result({"passed": False,
"errors": msg})
else:
RestHelper(self.rest)._wait_for_indexer_ddoc(self.servers, self.design_doc_name)
self.state = EXECUTING
task_manager.schedule(self, 10)
else:
for res in self.results.get('rows', []):
if key_expected == res['key']:
value = res['value']
break
msg = "ddoc=%s, query=%s, server=%s\n" % (
self.design_doc_name, self.query, self.servers[0].ip)
msg += "Key %s: expected value %s, actual: %s" % (
key_expected, row['value'], value)
self.log.info(msg)
if row['value'] == value:
self.state = FINISHED
self.log.info(msg)
self.set_result({"passed": True,
"errors": []})
else:
if self.current_retry == self.retries:
self.state = FINISHED
self.log.error(msg)
self.set_result({"passed": True,
"errors": msg})
else:
RestHelper(self.rest)._wait_for_indexer_ddoc(self.servers, self.design_doc_name)
self.state = EXECUTING
task_manager.schedule(self, 10)
return
if len(self.expected_docs) > len(self.results.get('rows', [])):
if self.current_retry == self.retries:
self.state = FINISHED
self.set_result({"passed": False,
"errors": [],
"results": self.results})
else:
RestHelper(self.rest)._wait_for_indexer_ddoc(self.servers, self.design_doc_name)
if self.current_retry == 70:
self.query["stale"] = 'false'
self.log.info(
"View result is still not expected (ddoc=%s, query=%s, server=%s). retry in 10 sec" % (
self.design_doc_name, self.query, self.servers[0].ip))
self.state = EXECUTING
task_manager.schedule(self, 10)
elif len(self.expected_docs) < len(self.results.get('rows', [])):
self.state = FINISHED
self.set_result({"passed": False,
"errors": [],
"results": self.results})
elif len(self.expected_docs) == len(self.results.get('rows', [])):
if self.verify_rows:
expected_ids = [row['id'] for row in self.expected_docs]
rows_ids = [str(row['id']) for row in self.results['rows']]
if expected_ids == rows_ids:
self.state = FINISHED
self.set_result({"passed": True,
"errors": []})
else:
if self.current_retry == self.retries:
self.state = FINISHED
self.set_result({"passed": False,
"errors": [],
"results": self.results})
else:
self.state = EXECUTING
task_manager.schedule(self, 10)
else:
self.state = FINISHED
self.set_result({"passed": True,
"errors": []})
# catch and set all unexpected exceptions
except Exception as e:
self.state = FINISHED
self.log.error("Exception caught %s" % str(e))
self.set_exception(e)
self.set_result({"passed": False,
"errors": str(e)})
class ModifyFragmentationConfigTask(Task):
"""
Given a config dictionary attempt to configure fragmentation settings.
This task will override the default settings that are provided for
a given <bucket>.
"""
def __init__(self, server, config=None, bucket="default"):
Task.__init__(self, "modify_frag_config_task")
self.server = server
self.config = {"parallelDBAndVC": "false",
"dbFragmentThreshold": None,
"viewFragmntThreshold": None,
"dbFragmentThresholdPercentage": 100,
"viewFragmntThresholdPercentage": 100,
"allowedTimePeriodFromHour": None,
"allowedTimePeriodFromMin": None,
"allowedTimePeriodToHour": None,
"allowedTimePeriodToMin": None,
"allowedTimePeriodAbort": None,
"autoCompactionDefined": "true"}
self.bucket = bucket
for key in config:
self.config[key] = config[key]
def execute(self, task_manager):
try:
rest = RestConnection(self.server)
rest.set_auto_compaction(parallelDBAndVC=self.config["parallelDBAndVC"],
dbFragmentThreshold=self.config["dbFragmentThreshold"],
viewFragmntThreshold=self.config["viewFragmntThreshold"],
dbFragmentThresholdPercentage=self.config["dbFragmentThresholdPercentage"],
viewFragmntThresholdPercentage=self.config["viewFragmntThresholdPercentage"],
allowedTimePeriodFromHour=self.config["allowedTimePeriodFromHour"],
allowedTimePeriodFromMin=self.config["allowedTimePeriodFromMin"],
allowedTimePeriodToHour=self.config["allowedTimePeriodToHour"],
allowedTimePeriodToMin=self.config["allowedTimePeriodToMin"],
allowedTimePeriodAbort=self.config["allowedTimePeriodAbort"],
bucket=self.bucket)
self.state = CHECKING
task_manager.schedule(self, 10)
except Exception as e:
self.state = FINISHED
self.set_exception(e)
def check(self, task_manager):
try:
rest = RestConnection(self.server)
# verify server accepted settings
content = rest.get_bucket_json(self.bucket)
if content["autoCompactionSettings"] == False:
self.set_exception(Exception("Failed to set auto compaction settings"))
else:
# retrieved compaction settings
self.set_result(True)
self.state = FINISHED
except GetBucketInfoFailed as e:
# subsequent query failed! exit
self.state = FINISHED
self.set_exception(e)
# catch and set all unexpected exceptions
except Exception as e:
self.state = FINISHED
self.set_unexpected_exception(e)
class MonitorActiveTask(Task):
"""
Attempt to monitor active task that is available in _active_tasks API.
It allows to monitor indexer, bucket compaction.
Execute function looks at _active_tasks API and tries to identifies task for monitoring
and its pid by: task type('indexer' , 'bucket_compaction', 'view_compaction' )
and target value (for example "_design/ddoc" for indexing, bucket "default" for bucket compaction or
"_design/dev_view" for view compaction).
wait_task=True means that task should be found in the first attempt otherwise,
we can assume that the task has been completed( reached 100%).
Check function monitors task by pid that was identified in execute func
and matches new progress result with the previous.
task is failed if:
progress is not changed during num_iterations iteration
new progress was gotten less then previous
task is passed and completed if:
progress reached wait_progress value
task was not found by pid(believe that it's over)
"""
def __init__(self, server, type, target_value, wait_progress=100, num_iterations=100, wait_task=True):
Task.__init__(self, "monitor_active_task")
self.server = server
self.type = type # indexer or bucket_compaction
self.target_key = ""
if self.type == 'indexer':
pass # no special actions
elif self.type == "bucket_compaction":
self.target_key = "original_target"
elif self.type == "view_compaction":
self.target_key = "designDocument"
else:
raise Exception("type %s is not defined!" % self.type)
self.target_value = target_value
self.wait_progress = wait_progress
self.num_iterations = num_iterations
self.wait_task = wait_task
self.rest = RestConnection(self.server)
self.current_progress = None
self.current_iter = 0
self.task = None
def execute(self, task_manager):
tasks = self.rest.active_tasks()
for task in tasks:
if task["type"] == self.type and ((
self.target_key == "designDocument" and task[
self.target_key] == self.target_value) or (
self.target_key == "original_target" and task[self.target_key][
"type"] == self.target_value) or (
self.type == 'indexer')):
self.current_progress = task["progress"]
self.task = task
self.log.info("monitoring active task was found:" + str(task))
self.log.info("progress %s:%s - %s %%" % (self.type, self.target_value, task["progress"]))
if self.current_progress >= self.wait_progress:
self.log.info("expected progress was gotten: %s" % self.current_progress)
self.state = FINISHED
self.set_result(True)
else:
self.state = CHECKING
task_manager.schedule(self, 5)
return
if self.wait_task:
# task is not performed
self.state = FINISHED
self.log.error("expected active task %s:%s was not found" % (self.type, self.target_value))
self.set_result(False)
else:
# task was completed
self.state = FINISHED
self.log.info("task for monitoring %s:%s completed" % (self.type, self.target_value))
self.set_result(True)
def check(self, task_manager):
tasks = self.rest.active_tasks()
for task in tasks:
# if task still exists
if task == self.task:
self.log.info("progress %s:%s - %s %%" % (self.type, self.target_value, task["progress"]))
# reached expected progress
if task["progress"] >= self.wait_progress:
self.state = FINISHED
self.log.error("progress was reached %s" % self.wait_progress)
self.set_result(True)
# progress value was changed
if task["progress"] > self.current_progress:
self.current_progress = task["progress"]
self.currebt_iter = 0
task_manager.schedule(self, 2)
# progress value was not changed
elif task["progress"] == self.current_progress:
if self.current_iter < self.num_iterations:
time.sleep(2)
self.current_iter += 1
task_manager.schedule(self, 2)
# num iteration with the same progress = num_iterations
else:
self.state = FINISHED
self.log.error(
"progress for active task was not changed during %s sec" % 2 * self.num_iterations)
self.set_result(False)
else:
self.state = FINISHED
self.log.error("progress for task %s:%s changed direction!" % (self.type, self.target_value))
self.set_result(False)
# task was completed
self.state = FINISHED
self.log.info("task %s:%s was completed" % (self.type, self.target_value))
self.set_result(True)
class MonitorViewFragmentationTask(Task):
"""
Attempt to monitor fragmentation that is occurring for a given design_doc.
execute stage is just for preliminary sanity checking of values and environment.
Check function looks at index file accross all nodes and attempts to calculate
total fragmentation occurring by the views within the design_doc.
Note: If autocompaction is enabled and user attempts to monitor for fragmentation
value higher than level at which auto_compaction kicks in a warning is sent and
it is best user to use lower value as this can lead to infinite monitoring.
"""
def __init__(self, server, design_doc_name, fragmentation_value=10, bucket="default"):
Task.__init__(self, "monitor_frag_task")
self.server = server
self.bucket = bucket
self.fragmentation_value = fragmentation_value
self.design_doc_name = design_doc_name
def execute(self, task_manager):
# sanity check of fragmentation value
if self.fragmentation_value < 0 or self.fragmentation_value > 100:
err_msg = \
"Invalid value for fragmentation %d" % self.fragmentation_value
self.state = FINISHED
self.set_exception(Exception(err_msg))
# warning if autocompaction is less than <fragmentation_value>
try:
auto_compact_percentage = self._get_current_auto_compaction_percentage()
if auto_compact_percentage != "undefined" and auto_compact_percentage < self.fragmentation_value:
self.log.warning("Auto compaction is set to %s. Therefore fragmentation_value %s may not be reached" % (
auto_compact_percentage, self.fragmentation_value))
self.state = CHECKING
task_manager.schedule(self, 5)
except GetBucketInfoFailed as e:
self.state = FINISHED
self.set_exception(e)
# catch and set all unexpected exceptions
except Exception as e:
self.state = FINISHED
self.set_unexpected_exception(e)
def _get_current_auto_compaction_percentage(self):
""" check at bucket level and cluster level for compaction percentage """
auto_compact_percentage = None
rest = RestConnection(self.server)
content = rest.get_bucket_json(self.bucket)
if content["autoCompactionSettings"] == False:
# try to read cluster level compaction settings
content = rest.cluster_status()
auto_compact_percentage = \
content["autoCompactionSettings"]["viewFragmentationThreshold"]["percentage"]
return auto_compact_percentage
def check(self, task_manager):
rest = RestConnection(self.server)
new_frag_value = MonitorViewFragmentationTask. \
calc_ddoc_fragmentation(rest, self.design_doc_name, bucket=self.bucket)
self.log.info("%s: current amount of fragmentation = %d" % (self.design_doc_name,
new_frag_value))
if new_frag_value > self.fragmentation_value:
self.state = FINISHED
self.set_result(True)
else:
# try again
task_manager.schedule(self, 2)
@staticmethod
def aggregate_ddoc_info(rest, design_doc_name, bucket="default", with_rebalance=False):
nodes = rest.node_statuses()
info = []
for node in nodes:
server_info = {"ip": node.ip,
"port": node.port,
"username": rest.username,
"password": rest.password}
rest = RestConnection(server_info)
status = False
try:
status, content = rest.set_view_info(bucket, design_doc_name)
except Exception as e:
print((str(e)))
if "Error occured reading set_view _info" in str(e) and with_rebalance:
print(("node {0} {1} is not ready yet?: {2}".format(
node.id, node.port, str(e))))
else:
raise e
if status:
info.append(content)
return info
@staticmethod
def calc_ddoc_fragmentation(rest, design_doc_name, bucket="default", with_rebalance=False):
total_disk_size = 0
total_data_size = 0
total_fragmentation = 0
nodes_ddoc_info = \
MonitorViewFragmentationTask.aggregate_ddoc_info(rest,
design_doc_name,
bucket, with_rebalance)
total_disk_size = sum([content['disk_size'] for content in nodes_ddoc_info])
total_data_size = sum([content['data_size'] for content in nodes_ddoc_info])
if total_disk_size > 0 and total_data_size > 0:
total_fragmentation = \
(total_disk_size - total_data_size) / float(total_disk_size) * 100
return total_fragmentation
class ViewCompactionTask(Task):
"""
Executes view compaction for a given design doc. This is technicially view compaction
as represented by the api and also because the fragmentation is generated by the
keys emitted by map/reduce functions within views. Task will check that compaction
history for design doc is incremented and if any work was really done.
"""
def __init__(self, server, design_doc_name, bucket="default", with_rebalance=False):
Task.__init__(self, "view_compaction_task")
self.server = server
self.bucket = bucket
self.design_doc_name = design_doc_name
self.ddoc_id = "_design%2f" + design_doc_name
self.compaction_revision = 0
self.precompacted_fragmentation = 0
self.with_rebalance = with_rebalance
self.rest = RestConnection(self.server)
def execute(self, task_manager):
try:
self.compaction_revision, self.precompacted_fragmentation = \
self._get_compaction_details()
self.log.info("{0}: stats compaction before triggering it: ({1},{2})".
format(self.design_doc_name,
self.compaction_revision, self.precompacted_fragmentation))
if self.precompacted_fragmentation == 0:
self.log.info("%s: There is nothing to compact, fragmentation is 0" %
self.design_doc_name)
self.set_result(False)
self.state = FINISHED
return
self.rest.ddoc_compaction(self.ddoc_id, self.bucket)
self.state = CHECKING
task_manager.schedule(self, 2)
except (CompactViewFailed, SetViewInfoNotFound) as ex:
self.state = FINISHED
self.set_exception(ex)
# catch and set all unexpected exceptions
except Exception as e:
self.state = FINISHED
self.set_unexpected_exception(e)
# verify compaction history incremented and some defraging occurred
def check(self, task_manager):
try:
_compaction_running = self._is_compacting()
new_compaction_revision, fragmentation = self._get_compaction_details()
self.log.info("{0}: stats compaction:revision and fragmentation: ({1},{2})".
format(self.design_doc_name,
new_compaction_revision, fragmentation))
if new_compaction_revision == self.compaction_revision and _compaction_running:
# compaction ran successfully but compaction was not changed
# perhaps we are still compacting
self.log.info("design doc {0} is compacting".format(self.design_doc_name))
task_manager.schedule(self, 3)
elif new_compaction_revision > self.compaction_revision or \
self.precompacted_fragmentation > fragmentation:
self.log.info(
"{1}: compactor was run, compaction revision was changed on {0}".format(new_compaction_revision,
self.design_doc_name))
frag_val_diff = fragmentation - self.precompacted_fragmentation
self.log.info("%s: fragmentation went from %d to %d" % \
(self.design_doc_name,
self.precompacted_fragmentation, fragmentation))
if frag_val_diff > 0:
# compaction ran successfully but datasize still same
# perhaps we are still compacting
if self._is_compacting():
task_manager.schedule(self, 2)
self.log.info(
"compaction was completed, but fragmentation value {0} is more than before compaction {1}".
format(fragmentation, self.precompacted_fragmentation))
# probably we already compacted, but no work needed to be done
self.set_result(self.with_rebalance)
else:
self.set_result(True)
self.state = FINISHED
else:
# Sometimes the compacting is not started immediately
for i in range(17):
time.sleep(3)
if self._is_compacting():
task_manager.schedule(self, 2)
return
else:
new_compaction_revision, fragmentation = self._get_compaction_details()
self.log.info("{2}: stats compaction: ({0},{1})".
format(new_compaction_revision, fragmentation,
self.design_doc_name))
# case of rebalance when with concurrent updates it's possible that
# compaction value has not changed significantly
if new_compaction_revision > self.compaction_revision and self.with_rebalance:
self.log.warning("the compaction revision was increased,\
but the actual fragmentation value has not changed significantly")
self.set_result(True)
self.state = FINISHED
return
else:
continue
# print details in case of failure
self.log.info("design doc {0} is compacting:{1}".format(self.design_doc_name, self._is_compacting()))
new_compaction_revision, fragmentation = self._get_compaction_details()
self.log.error("stats compaction still: ({0},{1})".
format(new_compaction_revision, fragmentation))
status, content = self.rest.set_view_info(self.bucket, self.design_doc_name)
stats = content["stats"]
self.log.warning("general compaction stats:{0}".format(stats))
self.set_exception(Exception("Check system logs, looks like compaction failed to start"))
except (SetViewInfoNotFound) as ex:
self.state = FINISHED
self.set_exception(ex)
# catch and set all unexpected exceptions
except Exception as e:
self.state = FINISHED
self.set_unexpected_exception(e)
def _get_compaction_details(self):
status, content = self.rest.set_view_info(self.bucket, self.design_doc_name)
curr_no_of_compactions = content["stats"]["compactions"]
curr_ddoc_fragemtation = \
MonitorViewFragmentationTask.calc_ddoc_fragmentation(self.rest, self.design_doc_name, self.bucket,
self.with_rebalance)
return (curr_no_of_compactions, curr_ddoc_fragemtation)
def _is_compacting(self):
status, content = self.rest.set_view_info(self.bucket, self.design_doc_name)
return content["compact_running"] == True
'''task class for failover. This task will only failover nodes but doesn't
rebalance as there is already a task to do that'''
class FailoverTask(Task):
def __init__(self, servers, to_failover=[], wait_for_pending=0, graceful=False, use_hostnames=False):
Task.__init__(self, "failover_task")
self.servers = servers
self.to_failover = to_failover
self.graceful = graceful
self.wait_for_pending = wait_for_pending
self.use_hostnames = use_hostnames
def execute(self, task_manager):
try:
self._failover_nodes(task_manager)
self.log.info("{0} seconds sleep after failover, for nodes to go pending....".format(self.wait_for_pending))
time.sleep(self.wait_for_pending)
self.state = FINISHED
self.set_result(True)
except FailoverFailedException as e:
self.state = FINISHED
self.set_exception(e)
except Exception as e:
self.state = FINISHED
self.set_unexpected_exception(e)
def _failover_nodes(self, task_manager):
rest = RestConnection(self.servers[0])
# call REST fail_over for the nodes to be failed over
for server in self.to_failover:
for node in rest.node_statuses():
if (server.hostname if self.use_hostnames else server.ip) == node.ip and int(server.port) == int(
node.port):
self.log.info("Failing over {0}:{1} with graceful={2}".format(node.ip, node.port, self.graceful))
rest.fail_over(node.id, self.graceful)
class GenerateExpectedViewResultsTask(Task):
"""
Task to produce the set of keys that are expected to be returned
by querying the provided <view>. Results can be later passed to
ViewQueryVerificationTask and compared with actual results from
server.
Currently only views with map functions that emit a single string
or integer as keys are accepted.
Also NOTE, this task is to be used with doc_generators that
produce json like documentgenerator.DocumentGenerator
"""
def __init__(self, doc_generators, view, query):
Task.__init__(self, "generate_view_query_results_task")
self.doc_generators = doc_generators
self.view = view
self.query = query
self.emitted_rows = []
self.is_reduced = self.view.red_func is not None and (('reduce' in query and query['reduce'] == "true") or \
(not 'reduce' in query))
self.custom_red_fn = self.is_reduced and not self.view.red_func in ['_count', '_sum', '_stats']
self.type_filter = None
def execute(self, task_manager):
try:
self.generate_emitted_rows()
self.filter_emitted_rows()
self.log.info("Finished generating expected query results")
self.state = CHECKING
task_manager.schedule(self)
except Exception as ex:
self.state = FINISHED
self.set_unexpected_exception(ex)
traceback.print_exc()
def check(self, task_manager):
self.state = FINISHED
self.set_result(self.emitted_rows)
def generate_emitted_rows(self):
emit_key = re.sub(r',.*', '', re.sub(r'.*emit\([ +]?doc\.', '', self.view.map_func))
emit_value = None
if re.match(r'.*emit\([ +]?\[doc\.*', self.view.map_func):
emit_key = re.sub(r'],.*', '', re.sub(r'.*emit\([ +]?\[doc\.', '', self.view.map_func))
emit_key = emit_key.split(", doc.")
if re.match(r'.*new RegExp\("\^.*', self.view.map_func):
filter_what = re.sub(r'.*new RegExp\(.*\)*doc\.', '',
re.sub(r'\.match\(.*', '', self.view.map_func))
self.type_filter = {"filter_what": filter_what,
"filter_expr": re.sub(r'[ +]?"\);.*', '',
re.sub(r'.*.new RegExp\("\^', '', self.view.map_func))}
if self.is_reduced and self.view.red_func != "_count":
emit_value = re.sub(r'\);.*', '', re.sub(r'.*emit\([ +]?\[*],[ +]?doc\.', '', self.view.map_func))
if self.view.map_func.count("[") <= 1:
emit_value = re.sub(r'\);.*', '', re.sub(r'.*emit\([ +]?.*,[ +]?doc\.', '', self.view.map_func))
for doc_gen in self.doc_generators:
query_doc_gen = copy.deepcopy(doc_gen)
while query_doc_gen.has_next():
_id, val = next(query_doc_gen)
val = json.loads(val)
if isinstance(emit_key, list):
val_emit_key = []
for ek in emit_key:
val_emit_key.append(val[ek])
else:
val_emit_key = val[emit_key]
if self.type_filter:
filter_expr = r'\A{0}.*'.format(self.type_filter["filter_expr"])
if re.match(filter_expr, val[self.type_filter["filter_what"]]) is None:
continue
if isinstance(val_emit_key, str):
val_emit_key = val_emit_key.encode('utf-8')
if not self.is_reduced or self.view.red_func == "_count" or self.custom_red_fn:
self.emitted_rows.append({'id': _id, 'key': val_emit_key})
else:
val_emit_value = val[emit_value]
self.emitted_rows.append({'value': val_emit_value, 'key': val_emit_key, 'id': _id, })
def filter_emitted_rows(self):
query = self.query
# parse query flags
descending_set = 'descending' in query and query['descending'] == "true"
startkey_set, endkey_set = 'startkey' in query, 'endkey' in query
startkey_docid_set, endkey_docid_set = 'startkey_docid' in query, 'endkey_docid' in query
inclusive_end_false = 'inclusive_end' in query and query['inclusive_end'] == "false"
key_set = 'key' in query
# sort expected results to match view results
expected_rows = sorted(self.emitted_rows,
key=cmp_to_key(lambda a, b: GenerateExpectedViewResultsTask.cmp_result_rows(a, b)),
reverse=descending_set)
# filter rows according to query flags
if startkey_set:
start_key = query['startkey']
if isinstance(start_key, str) and start_key.find('"') == 0:
start_key = start_key[1:-1]
if isinstance(start_key, str) and start_key.find('[') == 0:
start_key = start_key[1:-1].split(',')
start_key = [int(x) if x != 'null' else 0 for x in start_key]
else:
start_key = expected_rows[0]['key']
if isinstance(start_key, str) and start_key.find('"') == 0:
start_key = start_key[1:-1]
if endkey_set:
end_key = query['endkey']
if isinstance(end_key, str) and end_key.find('"') == 0:
end_key = end_key[1:-1]
if isinstance(end_key, str) and end_key.find('[') == 0:
end_key = end_key[1:-1].split(',')
end_key = [int(x) if x != 'null' else None for x in end_key]
else:
end_key = expected_rows[-1]['key']
if isinstance(end_key, str) and end_key.find('"') == 0:
end_key = end_key[1:-1]
if descending_set:
start_key, end_key = end_key, start_key
if startkey_set or endkey_set:
if isinstance(start_key, str):
start_key = start_key.strip("\"")
if isinstance(end_key, str):
end_key = end_key.strip("\"")
expected_rows = [row for row in expected_rows if row['key'] >= start_key and row['key'] <= end_key]
if key_set:
key_ = query['key']
if isinstance(key_, str) and key_.find('[') == 0:
key_ = key_[1:-1].split(',')
key_ = [int(x) if x != 'null' else None for x in key_]
start_key, end_key = key_, key_
expected_rows = [row for row in expected_rows if row['key'] == key_]
if descending_set:
startkey_docid_set, endkey_docid_set = endkey_docid_set, startkey_docid_set
if startkey_docid_set:
if not startkey_set:
self.log.warning("Ignoring startkey_docid filter when startkey is not set")
else:
do_filter = False
if descending_set:
if endkey_docid_set:
startkey_docid = query['endkey_docid']
do_filter = True
else:
startkey_docid = query['startkey_docid']
do_filter = True
if do_filter:
expected_rows = \
[row for row in expected_rows if row['id'] >= startkey_docid or row['key'] > start_key]
if endkey_docid_set:
if not endkey_set:
self.log.warning("Ignoring endkey_docid filter when endkey is not set")
else:
do_filter = False
if descending_set:
if endkey_docid_set:
endkey_docid = query['startkey_docid']
do_filter = True
else:
endkey_docid = query['endkey_docid']
do_filter = True
if do_filter:
expected_rows = \
[row for row in expected_rows if row['id'] <= endkey_docid or row['key'] < end_key]
if inclusive_end_false:
if endkey_set and endkey_docid_set:
# remove all keys that match endkey
expected_rows = [row for row in expected_rows if
row['id'] < query['endkey_docid'] or row['key'] < end_key]
elif endkey_set:
expected_rows = [row for row in expected_rows if row['key'] != end_key]
if self.is_reduced:
groups = {}
gr_level = None
if not 'group' in query and \
not 'group_level' in query:
if len(expected_rows) == 0:
expected_rows = []
self.emitted_rows = expected_rows
return
if self.view.red_func == '_count':
groups[None] = len(expected_rows)
elif self.view.red_func == '_sum':
groups[None] = 0
groups[None] = math.fsum([row['value']
for row in expected_rows])
elif self.view.red_func == '_stats':
groups[None] = {}
values = [row['value'] for row in expected_rows]
groups[None]['count'] = len(expected_rows)
groups[None]['sum'] = math.fsum(values)
groups[None]['max'] = max(values)
groups[None]['min'] = min(values)
groups[None]['sumsqr'] = math.fsum([x * x for x in values])
elif self.custom_red_fn:
custom_action = re.sub(r'.*return[ +]', '', re.sub(r'.*return[ +]', '', self.view.red_func))
if custom_action.find('String') != -1:
groups[None] = str(len(expected_rows))
elif custom_action.find('-') != -1:
groups[None] = -len(expected_rows)
elif 'group' in query and query['group'] == 'true':
if not 'group_level' in query:
gr_level = len(expected_rows) - 1
elif 'group_level' in query:
gr_level = int(query['group_level'])
if gr_level is not None:
for row in expected_rows:
key = str(row['key'][:gr_level])
if not key in groups:
if self.view.red_func == '_count':
groups[key] = 1
elif self.view.red_func == '_sum':
groups[key] = row['value']
elif self.view.red_func == '_stats':
groups[key] = {}
groups[key]['count'] = 1
groups[key]['sum'] = row['value']
groups[key]['max'] = row['value']
groups[key]['min'] = row['value']
groups[key]['sumsqr'] = row['value'] ** 2
else:
if self.view.red_func == '_count':
groups[key] += 1
elif self.view.red_func == '_sum':
groups[key] += row['value']
elif self.view.red_func == '_stats':
groups[key]['count'] += 1
groups[key]['sum'] += row['value']
groups[key]['max'] = max(row['value'], groups[key]['max'])
groups[key]['min'] = min(row['value'], groups[key]['min'])
groups[key]['sumsqr'] += row['value'] ** 2
expected_rows = []
for group, value in groups.items():
if isinstance(group, str) and group.find("[") == 0:
group = group[1:-1].split(",")
group = [int(k) for k in group]
expected_rows.append({"key": group, "value": value})
expected_rows = sorted(expected_rows,
key=cmp_to_key(lambda a, b: GenerateExpectedViewResultsTask.cmp_result_rows(a, b)),
reverse=descending_set)
if 'skip' in query:
expected_rows = expected_rows[(int(query['skip'])):]
if 'limit' in query:
expected_rows = expected_rows[:(int(query['limit']))]
self.emitted_rows = expected_rows
@staticmethod
def cmp_result_rows(x, y):
rc = len(DeepDiff(x['key'], y['key'], ignore_order=True))
if rc == 0:
# sort by id is tie breaker
rc = len(DeepDiff(x['id'], y['id'], ignore_order=True))
return rc
class ViewQueryVerificationTask(Task):
"""
* query with stale=false
* check for duplicates
* check for missing docs
* check memcached
* check couch
"""
def __init__(self, design_doc_name, view_name, query, expected_rows, server=None,
num_verified_docs=20, bucket="default", query_timeout=120, results=None,
config=None):
Task.__init__(self, "view_query_verification_task")
self.server = server
self.design_doc_name = design_doc_name
self.view_name = view_name
self.query = query
self.expected_rows = expected_rows
self.num_verified_docs = num_verified_docs
self.bucket = bucket
self.query_timeout = query_timeout
self.results = results
try:
for key in config:
self.config[key] = config[key]
except:
pass
def execute(self, task_manager):
if not self.results:
rest = RestConnection(self.server)
try:
# query for full view results
self.query["stale"] = "false"
self.query["reduce"] = "false"
self.query["include_docs"] = "true"
self.results = rest.query_view(self.design_doc_name, self.view_name,
self.bucket, self.query, timeout=self.query_timeout)
except QueryViewException as e:
self.set_exception(e)
self.state = FINISHED
msg = "Checking view query results: (%d keys expected) vs (%d keys returned)" % \
(len(self.expected_rows), len(self.results['rows']))
self.log.info(msg)
self.state = CHECKING
task_manager.schedule(self)
def check(self, task_manager):
err_infos = []
rc_status = {"passed": False,
"errors": err_infos} # array of dicts with keys 'msg' and 'details'
try:
# create verification id lists
expected_ids = [row['id'] for row in self.expected_rows]
couch_ids = [str(row['id']) for row in self.results['rows']]
# check results
self.check_for_duplicate_ids(expected_ids, couch_ids, err_infos)
self.check_for_missing_ids(expected_ids, couch_ids, err_infos)
self.check_for_extra_ids(expected_ids, couch_ids, err_infos)
self.check_for_value_corruption(err_infos)
# check for errors
if len(rc_status["errors"]) == 0:
rc_status["passed"] = True
self.state = FINISHED
self.set_result(rc_status)
except Exception as ex:
self.state = FINISHED
try:
max_example_result = max(100, len(self.results['rows'] - 1))
self.log.info("FIRST %s RESULTS for view %s : %s" % (max_example_result, self.view_name,
self.results['rows'][max_example_result]))
except Exception as inner_ex:
self.log.error(inner_ex)
self.set_result({"passed": False,
"errors": "ERROR: %s" % ex})
def check_for_duplicate_ids(self, expected_ids, couch_ids, err_infos):
extra_id_set = set(couch_ids) - set(expected_ids)
seen = set()
for id in couch_ids:
if id in seen and id not in extra_id_set:
extra_id_set.add(id)
else:
seen.add(id)
if len(extra_id_set) > 0:
# extra/duplicate id verification
dupe_rows = [row for row in self.results['rows'] if row['id'] in extra_id_set]
err = {"msg": "duplicate rows found in query results",
"details": dupe_rows}
err_infos.append(err)
def check_for_missing_ids(self, expected_ids, couch_ids, err_infos):
missing_id_set = set(expected_ids) - set(couch_ids)
if len(missing_id_set) > 0:
missing_id_errors = self.debug_missing_items(missing_id_set)
if len(missing_id_errors) > 0:
err = {"msg": "missing ids from memcached",
"details": missing_id_errors}
err_infos.append(err)
def check_for_extra_ids(self, expected_ids, couch_ids, err_infos):
extra_id_set = set(couch_ids) - set(expected_ids)
if len(extra_id_set) > 0:
err = {"msg": "extra ids from memcached",
"details": extra_id_set}
err_infos.append(err)
def check_for_value_corruption(self, err_infos):
if self.num_verified_docs > 0:
doc_integrity_errors = self.include_doc_integrity()
if len(doc_integrity_errors) > 0:
err = {"msg": "missmatch in document values",
"details": doc_integrity_errors}
err_infos.append(err)
def debug_missing_items(self, missing_id_set):
rest = RestConnection(self.server)
client = KVStoreAwareSmartClient(rest, self.bucket)
missing_id_errors = []
# debug missing documents
for doc_id in list(missing_id_set)[:self.num_verified_docs]:
# attempt to retrieve doc from memcached
mc_item = client.mc_get_full(doc_id)
if mc_item == None:
missing_id_errors.append("document %s missing from memcached" % (doc_id))
# attempt to retrieve doc from disk
else:
num_vbuckets = len(rest.get_vbuckets(self.bucket))
doc_meta = client.get_doc_metadata(num_vbuckets, doc_id)
if (doc_meta != None):
if (doc_meta['key_valid'] != 'valid'):
msg = "Error expected in results for key with invalid state %s" % doc_meta
missing_id_errors.append(msg)
else:
msg = "query doc_id: %s doesn't exist in bucket: %s" % \
(doc_id, self.bucket)
missing_id_errors.append(msg)
if (len(missing_id_errors) == 0):
msg = "view engine failed to index doc [%s] in query: %s" % (doc_id, self.query)
missing_id_errors.append(msg)
return missing_id_errors
def include_doc_integrity(self):
rest = RestConnection(self.server)
client = KVStoreAwareSmartClient(rest, self.bucket)
doc_integrity_errors = []
if 'doc' not in self.results['rows'][0]:
return doc_integrity_errors
exp_verify_set = [row['doc'] for row in \
self.results['rows'][:self.num_verified_docs]]
for view_doc in exp_verify_set:
doc_id = str(view_doc['_id'])
mc_item = client.mc_get_full(doc_id)
if mc_item is not None:
mc_doc = json.loads(mc_item["value"])
# compare doc content
for key in list(mc_doc.keys()):
if (mc_doc[key] != view_doc[key]):
err_msg = \
"error verifying document id %s: retrieved value %s expected %s \n" % \
(doc_id, mc_doc[key], view_doc[key])
doc_integrity_errors.append(err_msg)
else:
doc_integrity_errors.append("doc_id %s could not be retrieved for verification \n" % doc_id)
return doc_integrity_errors
class BucketFlushTask(Task):
def __init__(self, server, bucket="default"):
Task.__init__(self, "bucket_flush_task")
self.server = server
self.bucket = bucket
if isinstance(bucket, Bucket):
self.bucket = bucket.name
def execute(self, task_manager):
try:
rest = RestConnection(self.server)
if rest.flush_bucket(self.bucket):
self.state = CHECKING
task_manager.schedule(self)
else:
self.state = FINISHED
self.set_result(False)
except BucketFlushFailed as e:
self.state = FINISHED
self.set_exception(e)
except Exception as e:
self.state = FINISHED
self.set_unexpected_exception(e)
def check(self, task_manager):
try:
# check if after flush the vbuckets are ready
if BucketOperationHelper.wait_for_vbuckets_ready_state(self.server, self.bucket):
self.set_result(True)
else:
self.log.error("Unable to reach bucket {0} on server {1} after flush".format(self.bucket, self.server))
self.set_result(False)
self.state = FINISHED
except Exception as e:
self.state = FINISHED
self.set_unexpected_exception(e)
class MonitorDBFragmentationTask(Task):
"""
Attempt to monitor fragmentation that is occurring for a given bucket.
Note: If autocompaction is enabled and user attempts to monitor for fragmentation
value higher than level at which auto_compaction kicks in a warning is sent and
it is best user to use lower value as this can lead to infinite monitoring.
"""
def __init__(self, server, fragmentation_value=10, bucket="default", get_view_frag=False):
Task.__init__(self, "monitor_frag_db_task")
self.server = server
self.bucket = bucket
self.fragmentation_value = fragmentation_value
self.get_view_frag = get_view_frag
def execute(self, task_manager):
# sanity check of fragmentation value
if self.fragmentation_value < 0 or self.fragmentation_value > 100:
err_msg = \
"Invalid value for fragmentation %d" % self.fragmentation_value
self.state = FINISHED
self.set_exception(Exception(err_msg))
self.state = CHECKING
task_manager.schedule(self, 5)
def check(self, task_manager):
try:
rest = RestConnection(self.server)
stats = rest.fetch_bucket_stats(bucket=self.bucket)
if self.get_view_frag:
new_frag_value = stats["op"]["samples"]["couch_views_fragmentation"][-1]
self.log.info("Current amount of views fragmentation = %d" % new_frag_value)
else:
new_frag_value = stats["op"]["samples"]["couch_docs_fragmentation"][-1]
self.log.info("current amount of docs fragmentation = %d" % new_frag_value)
if new_frag_value >= self.fragmentation_value:
self.state = FINISHED
self.set_result(True)
else:
# try again
task_manager.schedule(self, 2)
except Exception as ex:
self.state = FINISHED
self.set_result(False)
self.set_exception(ex)
class CBRecoveryTask(Task):
def __init__(self, src_server, dest_server, bucket_src='', bucket_dest='', username='', password='',
username_dest='', password_dest='', verbose=False, wait_completed=True):
Task.__init__(self, "cbrecovery_task")
self.src_server = src_server
self.dest_server = dest_server
self.bucket_src = bucket_src
self.bucket_dest = bucket_dest
if isinstance(bucket_src, Bucket):
self.bucket_src = bucket_src.name
if isinstance(bucket_dest, Bucket):
self.bucket_dest = bucket_dest.name
self.username = username
self.password = password
self.username_dest = username_dest
self.password_dest = password_dest
self.verbose = verbose
self.wait_completed = wait_completed
try:
self.shell = RemoteMachineShellConnection(src_server)
self.info = self.shell.extract_remote_info()
self.rest = RestConnection(dest_server)
except Exception as e:
self.log.error(e)
self.state = FINISHED
self.set_exception(e)
self.progress = {}
self.started = False
self.retries = 0
def execute(self, task_manager):
try:
if self.info.type.lower() == "linux":
command = "/opt/couchbase/bin/cbrecovery "
elif self.info.type.lower() == "windows":
command = "C:/Program\ Files/Couchbase/Server/bin/cbrecovery.exe "
src_url = "http://{0}:{1}".format(self.src_server.ip, self.src_server.port)
dest_url = "http://{0}:{1}".format(self.dest_server.ip, self.dest_server.port)
command += "{0} {1} ".format(src_url, dest_url)
if self.bucket_src:
command += "-b {0} ".format(self.bucket_src)
if self.bucket_dest:
command += "-B {0} ".format(self.bucket_dest)
if self.username:
command += "-u {0} ".format(self.username)
if self.password:
command += "-p {0} ".format(self.password)
if self.username_dest:
command += "-U {0} ".format(self.username_dest)
if self.password_dest:
command += "-P {0} ".format(self.password_dest)
if self.verbose:
command += " -v "
transport = self.shell._ssh_client.get_transport()
transport.set_keepalive(1)
self.chan = transport.open_session()
self.chan.settimeout(10 * 60.0)
self.chan.exec_command(command)
self.log.info("command was executed: '{0}'".format(command))
self.state = CHECKING
task_manager.schedule(self, 20)
except Exception as e:
self.state = FINISHED
self.set_exception(e)
# it was done to keep connection alive
def checkChannel(self):
try:
if self.chan.exit_status_ready():
if self.chan.recv_ready():
output = self.chan.recv(1048576)
if self.chan.recv_stderr_ready():
error = self.chan.recv_stderr(1048576)
except socket.timeout:
print("SSH channel timeout exceeded.")
except Exception:
traceback.print_exc()
def check(self, task_manager):
self.checkChannel()
self.recovery_task = self.rest.get_recovery_task()
if self.recovery_task is not None:
if not self.started:
self.started = True
if not self.wait_completed:
progress = self.rest.get_recovery_progress(self.recovery_task["recoveryStatusURI"])
self.log.info("cbrecovery strarted with progress: {0}".format(progress))
self.log.info("will not wait for the end of the cbrecovery")
self.state = FINISHED
self.set_result(True)
progress = self.rest.get_recovery_progress(self.recovery_task["recoveryStatusURI"])
if progress == self.progress:
self.log.warning("cbrecovery progress was not changed")
if self.retries > 20:
self.shell.disconnect()
self.rest.print_UI_logs()
self.state = FINISHED
self.log.warning("ns_server_tasks: {0}".format(self.rest.ns_server_tasks()))
self.log.warning("cbrecovery progress: {0}".format(
self.rest.get_recovery_progress(self.recovery_task["recoveryStatusURI"])))
self.set_exception(CBRecoveryFailedException("cbrecovery hangs"))
return
self.retries += 1
task_manager.schedule(self, 20)
else:
self.progress = progress
self.log.info("cbrecovery progress: {0}".format(self.progress))
self.retries = 0
task_manager.schedule(self, 20)
else:
if self.started:
self.shell.disconnect()
self.log.info("cbrecovery completed succesfully")
self.state = FINISHED
self.set_result(True)
if self.retries > 5:
self.shell.disconnect()
self.rest.print_UI_logs()
self.state = FINISHED
self.log.warning("ns_server_tasks: {0}".format(self.rest.ns_server_tasks()))
self.set_exception(CBRecoveryFailedException("cbrecovery was not started"))
return
else:
self.retries += 1
task_manager.schedule(self, 20)
class CompactBucketTask(Task):
def __init__(self, server, bucket="default"):
Task.__init__(self, "bucket_compaction_task")
self.server = server
self.bucket = bucket
self.rest = RestConnection(server)
self.retries = 20
self.statuses = {}
# get the current count of compactions
nodes = self.rest.get_nodes()
self.compaction_count = {}
for node in nodes:
self.compaction_count[node.ip] = 0
def execute(self, task_manager):
try:
status = self.rest.compact_bucket(self.bucket)
self.state = CHECKING
except BucketCompactionException as e:
self.log.error("Bucket compaction failed for unknown reason")
self.set_exception(e)
self.state = FINISHED
self.set_result(False)
task_manager.schedule(self)
def check(self, task_manager):
# check bucket compaction status across all nodes
nodes = self.rest.get_nodes()
current_compaction_count = {}
for node in nodes:
current_compaction_count[node.ip] = 0
s = TestInputServer()
s.ip = node.ip
s.ssh_username = self.server.ssh_username
s.ssh_password = self.server.ssh_password
shell = RemoteMachineShellConnection(s)
res = shell.execute_cbstats("", "raw", keyname="kvtimings", vbid="")
for i in res[0]:
# check for lines that look like
# rw_0:compact_131072,262144: 8
if 'compact' in i:
current_compaction_count[node.ip] += int(i.split(':')[2])
if len(DeepDiff(current_compaction_count, self.compaction_count)) == 1:
# compaction count has increased
self.set_result(True)
self.state = FINISHED
else:
if self.retries > 0:
# retry
self.retries = self.retries - 1
task_manager.schedule(self, 10)
else:
# never detected a compaction task running
self.set_result(False)
self.state = FINISHED
def _get_disk_size(self):
stats = self.rest.fetch_bucket_stats(bucket=self.bucket)
total_disk_size = stats["op"]["samples"]["couch_total_disk_size"][-1]
self.log.info("Disk size is = %d" % total_disk_size)
return total_disk_size
class MonitorViewCompactionTask(ViewCompactionTask):
def __init__(self, server, design_doc_name, bucket="default", with_rebalance=False, frag_value=0):
ViewCompactionTask.__init__(self, server, design_doc_name, bucket, with_rebalance)
self.ddoc_id = "_design%2f" + design_doc_name
self.compaction_revision = 0
self.precompacted_fragmentation = 0
self.fragmentation_value = frag_value
self.rest = RestConnection(self.server)
def execute(self, task_manager):
try:
self.compaction_revision, self.precompacted_fragmentation = self._get_compaction_details()
self.log.info("{0}: stats compaction before triggering it: ({1},{2})".
format(self.design_doc_name, self.compaction_revision, self.precompacted_fragmentation))
self.disk_size = self._get_disk_size()
self.log.info("Disk Size Before Compaction {0}".format(self.disk_size))
if self.precompacted_fragmentation == 0:
self.log.warning("%s: There is nothing to compact, fragmentation is 0" % self.design_doc_name)
self.set_result(False)
self.state = FINISHED
elif self.precompacted_fragmentation < self.fragmentation_value:
self.log.info(
"{0}: Compaction is already done and there is nothing to compact, current fragmentation is lesser {1} {2}".
format(self.design_doc_name, self.precompacted_fragmentation, self.fragmentation_value))
self.compaction_revision, self.precompacted_fragmentation = self._get_compaction_details()
self.log.info("{0}: stats compaction before triggering it: ({1},{2})".
format(self.design_doc_name, self.compaction_revision, self.precompacted_fragmentation))
self.set_result(True)
self.state = FINISHED
return
self.state = CHECKING
task_manager.schedule(self, 2)
except (CompactViewFailed, SetViewInfoNotFound) as ex:
self.state = FINISHED
self.set_exception(ex)
# catch and set all unexpected exceptions
except Exception as e:
self.state = FINISHED
self.set_unexpected_exception(e)
# verify compaction history incremented and some defraging occurred
def check(self, task_manager):
try:
_compaction_running = self._is_compacting()
new_compaction_revision, fragmentation = self._get_compaction_details()
self.log.info("{0}: stats compaction:revision and fragmentation: ({1},{2})".
format(self.design_doc_name, new_compaction_revision, fragmentation))
curr_disk_size = self._get_disk_size()
self.log.info("Current Disk Size {0}".format(curr_disk_size))
if new_compaction_revision == self.compaction_revision and _compaction_running:
# compaction ran successfully but compaction was not changed, perhaps we are still compacting
self.log.info("design doc {0} is compacting".format(self.design_doc_name))
task_manager.schedule(self, 3)
elif self.precompacted_fragmentation > fragmentation:
self.log.info("%s: Pre Compacted fragmentation is more, before Compaction %d and after Compaction %d" % \
(self.design_doc_name, self.precompacted_fragmentation, fragmentation))
frag_val_diff = fragmentation - self.precompacted_fragmentation
if new_compaction_revision == self.compaction_revision or new_compaction_revision > self.compaction_revision:
self.log.info("{1}: compactor was run, compaction revision was changed on {0}".
format(new_compaction_revision, self.design_doc_name))
self.log.info("%s: fragmentation went from %d to %d" % (
self.design_doc_name, self.precompacted_fragmentation, fragmentation))
if frag_val_diff > 0:
if self._is_compacting():
task_manager.schedule(self, 5)
self.log.info(
"compaction was completed, but fragmentation value {0} is more than before compaction {1}".
format(fragmentation, self.precompacted_fragmentation))
self.log.info("Load is still in progress, Need to be checked")
self.set_result(self.with_rebalance)
else:
self.set_result(True)
self.state = FINISHED
else:
for i in range(10):
time.sleep(3)
if self._is_compacting():
task_manager.schedule(self, 2)
return
else:
new_compaction_revision, fragmentation = self._get_compaction_details()
self.log.info("{2}: stats compaction: ({0},{1})".format(new_compaction_revision, fragmentation,
self.design_doc_name))
curr_disk_size = self._get_disk_size()
self.log.info("Disk Size went from {0} {1}".format(self.disk_size, curr_disk_size))
if new_compaction_revision > self.compaction_revision and self.precompacted_fragmentation > fragmentation:
self.log.warning(
"the compaction revision was increase and fragmentation value went from {0} {1}".
format(self.precompacted_fragmentation, fragmentation))
self.set_result(True)
self.state = FINISHED
return
elif new_compaction_revision > self.compaction_revision and self.with_rebalance:
self.log.warning(
"the compaction revision was increased, but the actual fragmentation value has not changed significantly")
self.set_result(True)
self.state = FINISHED
return
else:
continue
self.log.info("design doc {0} is compacting:{1}".format(self.design_doc_name, self._is_compacting()))
new_compaction_revision, fragmentation = self._get_compaction_details()
self.log.error("stats compaction still: ({0},{1})".
format(new_compaction_revision, fragmentation))
status, content = self.rest.set_view_info(self.bucket, self.design_doc_name)
stats = content["stats"]
self.log.warning("general compaction stats:{0}".format(stats))
self.state = FINISHED
self.set_result(False)
self.set_exception(Exception("Check system logs, looks like compaction failed to start"))
except (SetViewInfoNotFound) as ex:
self.state = FINISHED
self.set_exception(ex)
# catch and set all unexpected exceptions
except Exception as e:
self.state = FINISHED
self.set_unexpected_exception(e)
def _get_disk_size(self):
nodes_ddoc_info = MonitorViewFragmentationTask.aggregate_ddoc_info(self.rest, self.design_doc_name,
self.bucket, self.with_rebalance)
disk_size = sum([content['disk_size'] for content in nodes_ddoc_info])
return disk_size
class MonitorDiskSizeFragmentationTask(Task):
def __init__(self, server, fragmentation_value=10, bucket="default", get_view_frag=False):
Task.__init__(self, "monitor_frag_db_task")
self.server = server
self.bucket = bucket
self.fragmentation_value = fragmentation_value
self.get_view_frag = get_view_frag
self.rest = RestConnection(self.server)
self.curr_disk_size = 0
def execute(self, task_manager):
if self.fragmentation_value < 0:
err_msg = \
"Invalid value for fragmentation %d" % self.fragmentation_value
self.state = FINISHED
self.set_exception(Exception(err_msg))
self.state = CHECKING
task_manager.schedule(self, 5)
def check(self, task_manager):
try:
rest = RestConnection(self.server)
stats = rest.fetch_bucket_stats(bucket=self.bucket)
if self.get_view_frag:
new_disk_size = stats["op"]["samples"]["couch_views_actual_disk_size"][-1]
else:
new_disk_size = stats["op"]["samples"]["couch_total_disk_size"][-1]
if self.curr_disk_size > new_disk_size:
self.state = FINISHED
self.set_result(True)
else:
# try again
task_manager.schedule(self, 5)
self.log.info("New and Current Disk size is {0} {1}".format(new_disk_size, self.curr_disk_size))
self.curr_disk_size = new_disk_size
except Exception as ex:
self.state = FINISHED
self.set_result(False)
self.set_exception(ex)
class CancelBucketCompactionTask(Task):
def __init__(self, server, bucket="default"):
Task.__init__(self, "cancel_bucket_compaction_task")
self.server = server
self.bucket = bucket
self.retries = 20
self.statuses = {}
try:
self.rest = RestConnection(server)
except ServerUnavailableException as e:
self.log.error(e)
self.state = FINISHED
self.set_exception(e)
def execute(self, task_manager):
try:
status = self.rest.cancel_bucket_compaction(self.bucket)
self.state = CHECKING
except BucketCompactionException as e:
self.log.error("Cancel Bucket compaction failed for unknown reason")
self.set_exception(e)
self.state = FINISHED
self.set_result(False)
task_manager.schedule(self)
def check(self, task_manager):
# check cancel bucket compaction status across all nodes
nodes = self.rest.get_nodes()
for node in nodes:
last_status = self.statuses.get(node.id)
try:
rest = RestConnection(node)
except ServerUnavailableException as e:
self.log.error(e)
self.state = FINISHED
self.set_exception(e)
running, progress = rest.check_compaction_status(self.bucket)
if progress is None and last_status is False:
# finished if previously detected running but not == 100%
self.statuses[node.id] = True
if running:
self.log.info("Progress is {0}".format(progress))
self.statuses[node.id] = (progress == 100)
done = all(self.statuses.values())
if done:
self.log.info("Bucket Compaction Cancelled successfully")
# task was completed sucessfully
self.set_result(True)
self.state = FINISHED
else:
if self.retries > 0:
self.retries = self.retries - 1
task_manager.schedule(self, 10)
else:
# never detected a compaction task running
self.log.error("Bucket Compaction Cancellation not started")
self.set_result(False)
self.state = FINISHED
class EnterpriseBackupTask(Task):
def __init__(self, backupset, objstore_provider, resume=False, purge=False, no_progress_bar=False,
cli_command_location='', cb_version=None, num_shards=''):
Task.__init__(self, "enterprise_backup_task")
self.backupset = backupset
self.objstore_provider = objstore_provider
self.resume = resume
self.purge = purge
self.no_progress_bar = no_progress_bar
self.cli_command_location = cli_command_location
self.cb_version = cb_version
self.cluster_flag = "--host"
self.num_shards = num_shards
""" from couchbase version 4.6.x, --host flag is not supported """
if self.cb_version is None:
raise Exception("Need to pass Couchbase version to run correctly bk/rt ")
elif self.cb_version[:5] in COUCHBASE_FROM_4DOT6:
self.cluster_flag = "--cluster"
self.output = []
self.error = []
try:
self.remote_client = RemoteMachineShellConnection(self.backupset.backup_host)
except Exception as e:
self.log.error(e)
self.state = FINISHED
self.set_exception(e)
def execute(self, task_manager):
try:
args = (
f"backup --archive {self.objstore_provider.schema_prefix() + self.backupset.objstore_bucket + '/' if self.objstore_provider else ''}{self.backupset.directory}"
f" --repo {self.backupset.name}"
f" {self.cluster_flag} http://{self.backupset.cluster_host.ip}:{self.backupset.cluster_host.port}"
f" --username {self.backupset.cluster_host.rest_username}"
f" --password {self.backupset.cluster_host.rest_password}"
f" {self.num_shards}"
f"{' --obj-staging-dir ' + self.backupset.objstore_staging_directory if self.objstore_provider else ''}"
f"{' --obj-endpoint ' + self.backupset.objstore_endpoint if self.objstore_provider and self.backupset.objstore_endpoint else ''}"
f"{' --obj-region ' + self.backupset.objstore_region if self.objstore_provider and self.backupset.objstore_region else ''}"
f"{' --obj-access-key-id ' + self.backupset.objstore_access_key_id if self.objstore_provider and self.backupset.objstore_access_key_id else ''}"
f"{' --obj-secret-access-key ' + self.backupset.objstore_secret_access_key if self.objstore_provider and self.backupset.objstore_secret_access_key else ''}"
f"{' --s3-force-path-style' if self.objstore_provider and self.objstore_provider.schema_prefix() == 's3://' else ''}"
)
if self.resume:
args += " --resume"
if self.purge:
args += " --purge"
if self.no_progress_bar:
args += " --no-progress-bar"
command = "{0}/cbbackupmgr {1}".format(self.cli_command_location, args)
self.output, self.error = self.remote_client.execute_command(command)
self.state = CHECKING
except Exception as e:
self.log.error("Backup cluster failed for unknown reason")
self.set_exception(e)
self.state = FINISHED
self.set_result(False)
task_manager.schedule(self)
def check(self, task_manager):
if self.output:
self.state = FINISHED
self.set_result(self.output)
self.remote_client.log_command_output(self.output, self.error)
elif self.error:
self.state = FINISHED
self.set_result(self.error)
self.remote_client.log_command_output(self.output, self.error)
else:
task_manager.schedule(self, 10)
class EnterpriseRestoreTask(Task):
def __init__(self, backupset, objstore_provider, no_progress_bar=False, cli_command_location='', cb_version=None, start="start", end="end", backups=[], force_updates=False, no_resume=False):
Task.__init__(self, "enterprise_backup_task")
self.backupset = backupset
self.objstore_provider = objstore_provider
self.no_progress_bar = no_progress_bar
self.cli_command_location = cli_command_location
self.cb_version = cb_version
self.cluster_flag = "--host"
""" from couchbase version 4.6.x, --host flag is not supported """
if self.cb_version is None:
raise Exception("Need to pass Couchbase version to run correctly bk/rt ")
elif self.cb_version[:5] in COUCHBASE_FROM_4DOT6:
self.cluster_flag = "--cluster"
self.output = []
self.error = []
self.backups = backups
self.start = start
self.end = end
self.force_updates = force_updates
self.no_resume = no_resume
try:
self.remote_client = RemoteMachineShellConnection(self.backupset.backup_host)
except Exception as e:
self.log.error(e)
self.state = FINISHED
self.set_exception(e)
def execute(self, task_manager):
try:
if isinstance(self.start, int) and isinstance(self.end, int):
try:
backup_start = self.backups[int(self.start) - 1]
except IndexError:
backup_start = "{0}{1}".format(self.backups[-1], self.start)
try:
backup_end = self.backups[int(self.end) - 1]
except IndexError:
backup_end = "{0}{1}".format(self.backups[-1], self.end)
else:
backup_start = self.start
backup_end = self.end
args = (
f"restore --archive {self.objstore_provider.schema_prefix() + self.backupset.objstore_bucket + '/' if self.objstore_provider else ''}{self.backupset.directory}"
f" --repo {self.backupset.name}"
f" {self.cluster_flag} http://{self.backupset.restore_cluster_host.ip}:{self.backupset.restore_cluster_host.port}"
f" --username {self.backupset.restore_cluster_host.rest_username} "
f" --password {self.backupset.restore_cluster_host.rest_password}"
f" --start {backup_start}"
f" --end {backup_end}"
f"{' --obj-staging-dir ' + self.backupset.objstore_staging_directory if self.objstore_provider else ''}"
f"{' --obj-endpoint ' + self.backupset.objstore_endpoint if self.objstore_provider and self.backupset.objstore_endpoint else ''}"
f"{' --obj-region ' + self.backupset.objstore_region if self.objstore_provider and self.backupset.objstore_region else ''}"
f"{' --obj-access-key-id ' + self.backupset.objstore_access_key_id if self.objstore_provider and self.backupset.objstore_access_key_id else ''}"
f"{' --obj-secret-access-key ' + self.backupset.objstore_secret_access_key if self.objstore_provider and self.backupset.objstore_secret_access_key else ''}"
f"{' --s3-force-path-style' if self.objstore_provider and self.objstore_provider.schema_prefix() == 's3://' else ''}"
f"{' --resume' if self.backupset.resume and not self.no_resume else ''}"
)
if self.no_progress_bar:
args += " --no-progress-bar"
if self.force_updates:
args += " --force-updates"
command = "{0}/cbbackupmgr {1}".format(self.cli_command_location, args)
self.output, self.error = self.remote_client.execute_command(command)
self.state = CHECKING
except Exception as e:
self.log.error("Restore failed for unknown reason")
self.set_exception(e)
self.state = FINISHED
self.set_result(False)
task_manager.schedule(self)
def check(self, task_manager):
if self.output:
self.state = FINISHED
self.set_result(self.output)
self.remote_client.log_command_output(self.output, self.error)
elif self.error:
self.state = FINISHED
self.set_result(self.error)
self.remote_client.log_command_output(self.output, self.error)
else:
task_manager.schedule(self, 10)
class EnterpriseMergeTask(Task):
def __init__(self, backup_host, backups=[], start=0, end=0, directory='', name='',
cli_command_location=''):
Task.__init__(self, "enterprise_backup_task")
self.backup_host = backup_host
self.directory = directory
self.name = name
self.cli_command_location = cli_command_location
self.output = []
self.error = []
self.backups = backups
self.start = start
self.end = end
try:
self.remote_client = RemoteMachineShellConnection(self.backup_host)
except Exception as e:
self.log.error(e)
self.state = FINISHED
self.set_exception(e)
def execute(self, task_manager):
try:
try:
backup_start = self.backups[int(self.start) - 1]
except IndexError:
backup_start = "{0}{1}".format(self.backups[-1], self.start)
try:
backup_end = self.backups[int(self.end) - 1]
except IndexError:
backup_end = "{0}{1}".format(self.backups[-1], self.end)
args = "merge --archive {0} --repo {1} --start {2} --end {3}".format(self.directory, self.name,
backup_start, backup_end)
command = "{0}/cbbackupmgr {1}".format(self.cli_command_location, args)
self.output, self.error = self.remote_client.execute_command(command)
self.state = CHECKING
except Exception as e:
self.log.error("Merge failed for unknown reason")
self.set_exception(e)
self.state = FINISHED
self.set_result(False)
task_manager.schedule(self)
def check(self, task_manager):
if self.output:
self.state = FINISHED
self.set_result(self.output)
self.remote_client.log_command_output(self.output, self.error)
elif self.error:
self.state = FINISHED
self.set_result(self.error)
self.remote_client.log_command_output(self.output, self.error)
else:
task_manager.schedule(self, 10)
class EnterpriseCompactTask(Task):
def __init__(self, backup_host, backup_to_compact, backups=[], directory='', name='',
cli_command_location=''):
Task.__init__(self, "enterprise_backup_task")
self.backup_host = backup_host
self.backup_to_compact = backup_to_compact
self.directory = directory
self.name = name
self.cli_command_location = cli_command_location
self.output = []
self.error = []
self.backups = backups
try:
self.remote_client = RemoteMachineShellConnection(self.backup_host)
except Exception as e:
self.log.error(e)
self.state = FINISHED
self.set_exception(e)
def execute(self, task_manager):
try:
args = "compact --archive {0} --repo {1} --backup {2}".format(self.directory, self.name,
self.backups[self.backup_to_compact])
command = "{0}/cbbackupmgr {1}".format(self.cli_command_location, args)
self.output, self.error = self.remote_client.execute_command(command)
self.state = CHECKING
except Exception as e:
self.log.error("Compact failed for unknown reason")
self.set_exception(e)
self.state = FINISHED
self.set_result(False)
task_manager.schedule(self)
def check(self, task_manager):
if self.output:
self.state = FINISHED
self.set_result(self.output)
self.remote_client.log_command_output(self.output, self.error)
elif self.error:
self.state = FINISHED
self.set_result(self.error)
self.remote_client.log_command_output(self.output, self.error)
else:
task_manager.schedule(self, 10)
class CBASQueryExecuteTask(Task):
def __init__(self, server, cbas_endpoint, statement, mode=None, pretty=True):
Task.__init__(self, "cbas_query_execute_task")
self.server = server
self.cbas_endpoint = cbas_endpoint
self.statement = statement
self.mode = mode
self.pretty = pretty
self.response = {}
self.passed = True
def execute(self, task_manager):
try:
rest = RestConnection(self.server)
self.response = json.loads(rest.execute_statement_on_cbas(self.statement,
self.mode, self.pretty, 70))
if self.response:
self.state = CHECKING
task_manager.schedule(self)
else:
self.log.info("Some error")
self.state = FINISHED
self.passed = False
self.set_result(False)
# catch and set all unexpected exceptions
except Exception as e:
self.state = FINISHED
self.passed = False
self.set_unexpected_exception(e)
def check(self, task_manager):
try:
if "errors" in self.response:
errors = self.response["errors"]
else:
errors = None
if "results" in self.response:
results = self.response["results"]
else:
results = None
if "handle" in self.response:
handle = self.response["handle"]
else:
handle = None
if self.mode != "async":
if self.response["status"] == "success":
self.set_result(True)
self.passed = True
else:
self.log.info(errors)
self.passed = False
self.set_result(False)
else:
if self.response["status"] == "started":
self.set_result(True)
self.passed = True
else:
self.log.info(errors)
self.passed = False
self.set_result(False)
self.state = FINISHED
# catch and set all unexpected exceptions
except Exception as e:
self.state = FINISHED
self.set_unexpected_exception(e)
class NodesFailureTask(Task):
def __init__(self, master, servers_to_fail, failure_type, timeout,
pause=0, timeout_buffer=3, disk_timeout=0, disk_location=None, disk_size=5000, failure_timeout=60):
Task.__init__(self, "NodesFailureTask")
self.master = master
self.servers_to_fail = servers_to_fail
self.num_servers_to_fail = self.servers_to_fail.__len__()
self.itr = 0
self.failure_type = failure_type
self.timeout = timeout
self.failure_timeout = failure_timeout
self.pause = pause
self.start_time = 0
self.timeout_buffer = timeout_buffer
self.current_failure_node = self.servers_to_fail[0]
self.max_time_to_wait_for_failover = self.timeout + \
self.timeout_buffer + 60
self.disk_timeout = disk_timeout
self.disk_location = disk_location
self.disk_size = disk_size
self.taskmanager = None
self.rebalance_in_progress = False
def execute(self, task_manager):
self.taskmanager = task_manager
rest = RestConnection(self.master)
if rest._rebalance_progress_status() == "running":
self.rebalance_in_progress = True
while self.has_next() and not self.done():
next(self)
if self.pause > 0 and self.pause > self.timeout:
self.check(task_manager)
if self.pause == 0 or 0 < self.pause < self.timeout:
self.check(task_manager)
self.state = FINISHED
self.set_result(True)
def check(self, task_manager):
rest = RestConnection(self.master)
max_timeout = self.timeout + self.timeout_buffer + self.disk_timeout
if self.start_time == 0:
message = "Did not inject failure in the system."
rest.print_UI_logs(10)
self.log.error(message)
self.state = FINISHED
self.set_result(False)
self.set_exception(NodesFailureException(message))
def has_next(self):
return self.itr < self.num_servers_to_fail
def __next__(self):
if self.pause != 0:
time.sleep(self.pause)
if self.pause > self.timeout and self.itr != 0:
rest = RestConnection(self.master)
status = rest.reset_autofailover()
self._rebalance()
if not status:
self.state = FINISHED
self.set_result(False)
self.set_exception(Exception("Reset of autofailover "
"count failed"))
self.current_failure_node = self.servers_to_fail[self.itr]
self.start_time = time.time()
self.log.info("before failure time: {}".format(time.ctime(time.time())))
if self.failure_type == "limit_file_limits_desc":
self._enable_disable_limit_file_limits_desc(self.current_failure_node, self.failure_timeout)
elif self.failure_type == "induce_limit_file_limits_desc":
self.enable_file_limit_desc(self.current_failure_node)
elif self.failure_type == "disable_limit_file_limits_desc":
self.disable_file_limit_desc(self.current_failure_node)
elif self.failure_type == "limit_file_limits":
self._enable_disable_limit_file_limits(self.current_failure_node, self.failure_timeout)
elif self.failure_type == "induce_limit_file_limits":
self.enable_file_limit(self.current_failure_node)
elif self.failure_type == "disable_limit_file_limits":
self.disable_file_limit(self.current_failure_node)
elif self.failure_type == "extra_files_in_log_dir":
self._extra_files_in_log_dir(self.current_failure_node, self.failure_timeout)
elif self.failure_type == "induce_extra_files_in_log_dir":
self.add_extra_files_in_log_dir(self.current_failure_node)
elif self.failure_type == "disable_extra_files_in_log_dir":
self.remove_extra_files_in_log_dir(self.current_failure_node)
elif self.failure_type == "empty_files_in_log_dir":
self._empty_file_in_log_dir(self.current_failure_node, self.failure_timeout)
elif self.failure_type == "induce_empty_files_in_log_dir":
self.add_empty_file_in_log_dir(self.current_failure_node)
elif self.failure_type == "disable_empty_files_in_log_dir":
self.remove_dummy_file_in_log_dir(self.current_failure_node)
elif self.failure_type == "dummy_file_in_log_dir":
self._dummy_file_in_log_dir(self.current_failure_node, self.failure_timeout)
elif self.failure_type == "induce_dummy_file_in_log_dir":
self.add_dummy_file_in_log_dir(self.current_failure_node)
elif self.failure_type == "disable_dummy_file_in_log_dir":
self.remove_dummy_file_in_log_dir(self.current_failure_node)
elif self.failure_type == "limit_file_size_limit":
self._enable_disable_limit_file_size_limit(self.current_failure_node, self.failure_timeout)
elif self.failure_type == "induce_limit_file_size_limit":
self.enable_file_size_limit(self.current_failure_node)
elif self.failure_type == "disable_limit_file_size_limit":
self.disable_file_size_limit(self.current_failure_node)
elif self.failure_type == "disk_readonly":
self._enable_disable_disk_readonly(self.current_failure_node, self.failure_timeout)
elif self.failure_type == "induce_disk_readonly":
self._enable_disk_readonly(self.current_failure_node)
elif self.failure_type == "disable_disk_readonly":
self._disable_disk_readonly(self.current_failure_node)
elif self.failure_type == "stress_ram":
self._enable_stress_ram(self.current_failure_node, self.failure_timeout)
elif self.failure_type == "stress_cpu":
self._enable_stress_cpu(self.current_failure_node, self.failure_timeout)
elif self.failure_type == "network_delay":
self._enable_disable_network_delay(self.current_failure_node, self.failure_timeout)
elif self.failure_type == "induce_network_delay":
self.enable_network_delay(self.current_failure_node)
elif self.failure_type == "disable_network_delay":
self.delete_network_rule(self.current_failure_node)
elif self.failure_type == "net_packet_loss":
self._enable_disable_packet_loss(self.current_failure_node, self.failure_timeout)
elif self.failure_type == "induce_net_packet_loss":
self.enable_packet_loss(self.current_failure_node)
elif self.failure_type == "disable_net_packet_loss":
self.delete_network_rule(self.current_failure_node)
elif self.failure_type == "enable_firewall":
self._enable_disable_firewall(self.current_failure_node, self.failure_timeout)
if self.failure_type == "induce_enable_firewall":
self._enable_firewall(self.current_failure_node)
elif self.failure_type == "disable_firewall":
self._disable_firewall(self.current_failure_node)
elif self.failure_type == "restart_couchbase":
self._restart_couchbase_server(self.current_failure_node, self.failure_timeout)
elif self.failure_type == "stop_couchbase":
self._stop_couchbase_server(self.current_failure_node)
elif self.failure_type == "start_couchbase":
self._start_couchbase_server(self.current_failure_node)
elif self.failure_type == "restart_network":
self._stop_restart_network(self.current_failure_node,
self.failure_timeout)
elif self.failure_type == "restart_machine":
self._restart_machine(self.current_failure_node)
elif self.failure_type == "stop_memcached":
self._stop_memcached(self.current_failure_node)
elif self.failure_type == "start_memcached":
self._start_memcached(self.current_failure_node)
elif self.failure_type == "kill_goxdcr":
self._kill_goxdcr(self.current_failure_node)
elif self.failure_type == "network_split":
self._block_incoming_network_from_node(self.servers_to_fail[0],
self.servers_to_fail[
self.itr + 1])
self.itr += 1
elif self.failure_type == "disk_failure":
self._fail_recover_disk_failure(self.current_failure_node, self.failure_timeout)
elif self.failure_type == "induce_disk_failure":
self._fail_disk(self.current_failure_node)
elif self.failure_type == "disk_full":
self._disk_full_recover_failure(self.current_failure_node, self.failure_timeout)
elif self.failure_type == "shard_json_corruption":
self.shard_json_corruption(self.current_failure_node)
elif self.failure_type == "induce_disk_full":
self._disk_full_failure(self.current_failure_node)
elif self.failure_type == "recover_disk_failure":
self._recover_disk(self.current_failure_node)
elif self.failure_type == "recover_disk_full_failure":
self._recover_disk_full_failure(self.current_failure_node)
self.log.info("Start time = {}".format(time.ctime(self.start_time)))
self.itr += 1
def _enable_disable_firewall(self, node, recover_time):
self._enable_firewall(node)
time.sleep(recover_time)
self._disable_firewall(node)
def _enable_disable_packet_loss(self, node, recover_time):
self.enable_packet_loss(node)
time.sleep(recover_time)
self.delete_network_rule(node)
def _enable_disable_limit_file_limits(self, node, recover_time):
self.enable_file_limit(node)
time.sleep(recover_time)
self.disable_file_limit(node)
def _enable_disable_limit_file_size_limit(self, node, recover_time):
self.enable_file_size_limit(node)
time.sleep(recover_time)
self.disable_file_size_limit(node)
def enable_file_size_limit(self, node):
shell = RemoteMachineShellConnection(node)
self.log.info("Updating file size limit to 10MB on {}".format(node))
shell.enable_file_size_limit()
shell.disconnect()
self.log.info("Enabled file size limit on {}".format(node))
def disable_file_size_limit(self, node):
shell = RemoteMachineShellConnection(node)
shell.disable_file_size_limit()
shell.disconnect()
self.log.info("disabled file size limit on {}".format(node))
def enable_file_limit(self, node):
shell = RemoteMachineShellConnection(node)
shell.enable_file_limit()
shell.disconnect()
self.log.info("Enabled file limit on {}".format(node))
def disable_file_limit(self, node):
shell = RemoteMachineShellConnection(node)
shell.disable_file_limit()
shell.disconnect()
self.log.info("disabled file limit on {}".format(node))
def _enable_disable_limit_file_limits_desc(self, node, recover_time):
self.enable_file_limit_desc(node)
time.sleep(recover_time)
self.disable_file_limit_desc(node)
def enable_file_limit_desc(self, node):
shell = RemoteMachineShellConnection(node)
shell.enable_file_limit_desc()
shell.disconnect()
self.log.info("Enabled file limit _desc on {}".format(node))
def disable_file_limit_desc(self, node):
shell = RemoteMachineShellConnection(node)
shell.disable_file_limit_desc()
shell.disconnect()
self.log.info("disabled file limit _desc on {}".format(node))
def _enable_disable_network_delay(self, node, recover_time):
self.enable_network_delay(node)
time.sleep(recover_time)
self.delete_network_rule(node)
def enable_network_delay(self, node):
shell = RemoteMachineShellConnection(node)
shell.enable_network_delay()
shell.disconnect()
self.log.info("Enabled network delay on {}".format(node))
def delete_network_rule(self, node):
shell = RemoteMachineShellConnection(node)
shell.delete_network_rule()
shell.disconnect()
self.log.info("Disabled packet loss on {}".format(node))
def enable_packet_loss(self, node):
shell = RemoteMachineShellConnection(node)
shell.enable_packet_loss()
shell.disconnect()
self.log.info("Enabled packet loss on {}".format(node))
def _enable_firewall(self, node):
RemoteUtilHelper.enable_firewall(node)
self.log.info("Enabled firewall on {}".format(node))
def _disable_firewall(self, node):
shell = RemoteMachineShellConnection(node)
shell.disable_firewall()
def _restart_couchbase_server(self, node, failure_timeout):
shell = RemoteMachineShellConnection(node)
shell.restart_couchbase()
shell.disconnect()
self.log.info("Restarted the couchbase server on {}".format(node))
time.sleep(failure_timeout)
def _stop_couchbase_server(self, node):
shell = RemoteMachineShellConnection(node)
shell.stop_couchbase()
shell.disconnect()
self.log.info("Stopped the couchbase server on {}".format(node))
def _start_couchbase_server(self, node):
shell = RemoteMachineShellConnection(node)
shell.start_couchbase()
shell.disconnect()
self.log.info("Started the couchbase server on {}".format(node))
def _enable_stress_cpu(self, node, stop_time):
shell = RemoteMachineShellConnection(node)
shell.cpu_stress(stop_time)
shell.disconnect()
self.log.info("cpu stressed for {0} sec on node {1}".format(stop_time, node))
def _enable_stress_ram(self, node, stop_time):
shell = RemoteMachineShellConnection(node)
shell.ram_stress(stop_time)
shell.disconnect()
self.log.info("ram stressed for {0} sec on node {1}".format(stop_time, node))
def _enable_disk_readonly(self, node):
shell = RemoteMachineShellConnection(node)
shell.enable_disk_readonly(self.disk_location)
shell.disconnect()
self.log.info("Dir {} made readonly on node {}".format(self.disk_location, node))
def _disable_disk_readonly(self, node):
shell = RemoteMachineShellConnection(node)
shell.disable_disk_readonly(self.disk_location)
shell.disconnect()
self.log.info("Dir {} made read/write on node {}".format(self.disk_location, node))
def _stop_restart_network(self, node, stop_time):
shell = RemoteMachineShellConnection(node)
shell.stop_network(stop_time)
shell.disconnect()
self.log.info("Stopped the network for {0} sec and restarted the "
"network on {1}".format(stop_time, node))
def _restart_machine(self, node):
shell = RemoteMachineShellConnection(node)
command = "/sbin/reboot"
shell.execute_command(command=command)
def _stop_memcached(self, node):
time.sleep(1)
shell = RemoteMachineShellConnection(node)
o, r = shell.stop_memcached()
self.log.info("Killed memcached. {0} {1}".format(o, r))
def _start_memcached(self, node):
shell = RemoteMachineShellConnection(node)
o, r = shell.start_memcached()
self.log.info("Started back memcached. {0} {1}".format(o, r))
shell.disconnect()
def _kill_goxdcr(self, node):
shell = RemoteMachineShellConnection(node)
o, r = shell.kill_goxdcr()
self.log.info("Killed goxdcr. {0} {1}".format(o, r))
def _block_incoming_network_from_node(self, node1, node2):
shell = RemoteMachineShellConnection(node1)
self.log.info("Adding {0} into iptables rules on {1}".format(
node1.ip, node2.ip))
command = "iptables -A INPUT -s {0} -j DROP".format(node2.ip)
shell.execute_command(command)
self.start_time = time.time()
def _fail_recover_disk_failure(self, node, recover_time):
self._fail_disk(node)
time.sleep(recover_time)
self._recover_disk(node)
def _fail_disk(self, node):
shell = RemoteMachineShellConnection(node)
output, error = shell.unmount_partition(self.disk_location)
success = True
if output:
for line in output:
if self.disk_location in line:
success = False
if success:
self.log.info("Unmounted disk at location : {0} on {1}".format(self.disk_location, node.ip))
self.start_time = time.time()
else:
self.log.info("Could not fail the disk at {0} on {1}".format(self.disk_location, node.ip))
self.state = FINISHED
self.set_exception(Exception("Could not fail the disk at {0} on {1}".format(self.disk_location, node.ip)))
self.set_result(False)
def _recover_disk(self, node):
shell = RemoteMachineShellConnection(node)
o, r = shell.mount_partition_ext4(self.disk_location)
for line in o:
if self.disk_location in line:
self.log.info("Mounted disk at location : {0} on {1}".format(self.disk_location, node.ip))
return
self.set_exception(Exception("Could not mount disk at location {0} on {1}".format(self.disk_location, node.ip)))
raise Exception()
def _disk_full_recover_failure(self, node, recover_time):
self._disk_full_failure(node)
time.sleep(recover_time)
self._recover_disk_full_failure(node)
def _extra_files_in_log_dir(self, node, recover_time):
self.add_extra_files_in_log_dir(node)
time.sleep(recover_time)
self.remove_extra_files_in_log_dir(node)
def add_extra_files_in_log_dir(self, node):
shell = RemoteMachineShellConnection(node)
output, error = shell.add_extra_files_index_log_dir(self.disk_location)
if error:
self.log.info(error)
def remove_extra_files_in_log_dir(self, node):
shell = RemoteMachineShellConnection(node)
output, error = shell.remove_extra_files_index_log_dir(self.disk_location)
if error:
self.log.info(error)
def _dummy_file_in_log_dir(self, node, recover_time):
self.add_dummy_file_in_log_dir(node)
time.sleep(recover_time)
self.remove_dummy_file_in_log_dir(node)
def add_dummy_file_in_log_dir(self, node):
shell = RemoteMachineShellConnection(node)
output, error = shell.add_dummy_file_index_log_dir(self.disk_location)
if error:
self.log.info(error)
def shard_json_corruption(self, node):
shell = RemoteMachineShellConnection(node)
output, error = shell.shard_json_corruption(self.disk_location)
if error:
self.log.info(error)
def remove_dummy_file_in_log_dir(self, node):
shell = RemoteMachineShellConnection(node)
output, error = shell.remove_dummy_file_index_log_dir(self.disk_location)
if error:
self.log.info(error)
def _empty_file_in_log_dir(self, node, recover_time):
self.add_empty_file_in_log_dir(node)
time.sleep(recover_time)
self.remove_dummy_file_in_log_dir(node)
def add_empty_file_in_log_dir(self, node):
shell = RemoteMachineShellConnection(node)
output, error = shell.add_empty_file_index_log_dir(self.disk_location)
if error:
self.log.info(error)
def _enable_disable_disk_readonly(self, node, recover_time):
self._enable_disk_readonly(node)
time.sleep(recover_time)
self._disable_disk_readonly(node)
def _disk_full_failure(self, node):
shell = RemoteMachineShellConnection(node)
output, error = shell.fill_disk_space(self.disk_location)
success = False
if output:
for line in output:
if self.disk_location in line:
if "0 100% {0}".format(self.disk_location) in line:
success = True
if success:
self.log.info("Filled up disk Space at {0} on {1}".format(self.disk_location, node.ip))
self.start_time = time.time()
else:
self.log.info("Could not fill the disk at {0} on {1}".format(self.disk_location, node.ip))
#self.state = FINISHED
#self.set_exception(Exception("Could not fill the disk at {0} on {1}".format(self.disk_location, node.ip)))
def _recover_disk_full_failure(self, node):
shell = RemoteMachineShellConnection(node)
delete_file = "{0}/disk-quota.ext3".format(self.disk_location)
output, error = shell.execute_command("rm -f {0}".format(delete_file))
self.log.info(output)
if error:
self.log.info(error)
def _check_for_autofailover_initiation(self, failed_over_node):
rest = RestConnection(self.master)
ui_logs = rest.get_logs(10)
ui_logs_text = [t["text"] for t in ui_logs]
ui_logs_time = [t["serverTime"] for t in ui_logs]
expected_log = "Starting failing over ['ns_1@{}']".format(
failed_over_node.ip)
if expected_log in ui_logs_text:
failed_over_time = ui_logs_time[ui_logs_text.index(expected_log)]
return True, failed_over_time
return False, None
def _wait_for_autofailover_initiation(self, timeout):
autofailover_initated = False
while time.time() < timeout + self.start_time:
autofailover_initated, failed_over_time = \
self._check_for_autofailover_initiation(
self.current_failure_node)
if autofailover_initated:
end_time = self._get_mktime_from_server_time(failed_over_time)
time_taken = end_time - self.start_time
return autofailover_initated, time_taken
return autofailover_initated, -1
def _get_mktime_from_server_time(self, server_time):
time_format = "%Y-%m-%dT%H:%M:%S"
server_time = server_time.split('.')[0]
mk_time = time.mktime(time.strptime(server_time, time_format))
return mk_time
def _rebalance(self):
rest = RestConnection(self.master)
nodes = rest.node_statuses()
rest.rebalance(otpNodes=[node.id for node in nodes])
rebalance_progress = rest.monitorRebalance()
if not rebalance_progress:
self.set_result(False)
self.state = FINISHED
self.set_exception(Exception("Failed to rebalance after failover"))
def _check_if_rebalance_in_progress(self, timeout):
rest = RestConnection(self.master)
end_time = time.time() + timeout
while time.time() < end_time:
try:
rebalance_status, progress = \
rest._rebalance_status_and_progress()
if rebalance_status == "running":
continue
elif rebalance_status is None and progress == 100:
return False, -1
except RebalanceFailedException:
ui_logs = rest.get_logs(10)
ui_logs_text = [t["text"] for t in ui_logs]
ui_logs_time = [t["serverTime"] for t in ui_logs]
rebalace_failure_log = "Rebalance exited with reason"
for ui_log in ui_logs_text:
if rebalace_failure_log in ui_log:
rebalance_failure_time = ui_logs_time[
ui_logs_text.index(ui_log)]
failover_log = "Could not automatically fail over " \
"node ('ns_1@{}'). Rebalance is " \
"running.".format(
self.current_failure_node.ip)
if failover_log in ui_logs_text:
return True, self._get_mktime_from_server_time(
rebalance_failure_time)
else:
return False, -2
return False, -3
class AutoFailoverNodesFailureTask(Task):
def __init__(self, master, servers_to_fail, failure_type, timeout,
pause=0, expect_auto_failover=True, timeout_buffer=3,
check_for_failover=True, failure_timers=None,
disk_timeout=0, disk_location=None, disk_size=200):
Task.__init__(self, "AutoFailoverNodesFailureTask")
self.master = master
self.servers_to_fail = servers_to_fail
self.num_servers_to_fail = self.servers_to_fail.__len__()
self.itr = 0
self.failure_type = failure_type
self.timeout = timeout
self.pause = pause
self.expect_auto_failover = expect_auto_failover
self.check_for_autofailover = check_for_failover
self.start_time = 0
self.timeout_buffer = timeout_buffer
self.current_failure_node = self.servers_to_fail[0]
self.max_time_to_wait_for_failover = self.timeout + \
self.timeout_buffer + 200
self.disk_timeout = disk_timeout
self.disk_location = disk_location
self.disk_size = disk_size
if failure_timers is None:
failure_timers = []
self.failure_timers = failure_timers
self.taskmanager = None
self.rebalance_in_progress = False
def execute(self, task_manager):
self.taskmanager = task_manager
rest = RestConnection(self.master)
if rest._rebalance_progress_status() == "running":
self.rebalance_in_progress = True
while self.has_next() and not self.done():
next(self)
if self.pause > 0 and self.pause > self.timeout:
self.check(task_manager)
if self.pause == 0 or 0 < self.pause < self.timeout:
self.check(task_manager)
self.state = FINISHED
self.set_result(True)
def check(self, task_manager):
if not self.check_for_autofailover:
self.state = EXECUTING
return
rest = RestConnection(self.master)
max_timeout = self.timeout + self.timeout_buffer + self.disk_timeout
if self.start_time == 0:
message = "Did not inject failure in the system."
rest.print_UI_logs(10)
self.log.error(message)
self.state = FINISHED
self.set_result(False)
self.set_exception(AutoFailoverException(message))
if self.rebalance_in_progress:
status, stop_time = self._check_if_rebalance_in_progress(120)
if not status:
if stop_time == -1:
message = "Rebalance already completed before failover " \
"of node"
self.log.error(message)
self.state = FINISHED
self.set_result(False)
self.set_exception(AutoFailoverException(message))
elif stop_time == -2:
message = "Rebalance failed but no failed autofailover " \
"message was printed in logs"
self.log.warning(message)
else:
message = "Rebalance not failed even after 2 minutes " \
"after node failure."
self.log.error(message)
rest.print_UI_logs(10)
self.state = FINISHED
self.set_result(False)
self.set_exception(AutoFailoverException(message))
else:
self.start_time = stop_time
autofailover_initiated, time_taken = \
self._wait_for_autofailover_initiation(
self.max_time_to_wait_for_failover)
if self.expect_auto_failover:
if autofailover_initiated:
if time_taken < max_timeout + 1:
self.log.info("Autofailover of node {0} successfully "
"initiated in {1} sec".format(
self.current_failure_node.ip, time_taken))
rest.print_UI_logs(10)
self.state = EXECUTING
else:
message = "Autofailover of node {0} was initiated after " \
"the timeout period. Expected timeout: {1} " \
"Actual time taken: {2}".format(
self.current_failure_node.ip, self.timeout, time_taken)
self.log.error(message)
rest.print_UI_logs(10)
self.state = FINISHED
self.set_result(False)
self.set_exception(AutoFailoverException(message))
else:
message = "Autofailover of node {0} was not initiated after " \
"the expected timeout period of {1}".format(
self.current_failure_node.ip, self.timeout)
rest.print_UI_logs(10)
self.log.error(message)
self.state = FINISHED
self.set_result(False)
self.set_exception(AutoFailoverException(message))
else:
if autofailover_initiated:
message = "Node {0} was autofailed over but no autofailover " \
"of the node was expected".format(
self.current_failure_node.ip)
rest.print_UI_logs(10)
self.log.error(message)
self.state = FINISHED
self.set_result(False)
self.set_exception(AutoFailoverException(message))
else:
self.log.info("Node not autofailed over as expected")
rest.print_UI_logs(10)
self.state = EXECUTING
def has_next(self):
return self.itr < self.num_servers_to_fail
def __next__(self):
if self.pause != 0:
time.sleep(self.pause)
if self.pause > self.timeout and self.itr != 0:
rest = RestConnection(self.master)
status = rest.reset_autofailover()
self._rebalance()
if not status:
self.state = FINISHED
self.set_result(False)
self.set_exception(Exception("Reset of autofailover "
"count failed"))
self.current_failure_node = self.servers_to_fail[self.itr]
self.log.info("before failure time: {}".format(time.ctime(time.time())))
if self.failure_type == "enable_firewall":
self._enable_firewall(self.current_failure_node)
elif self.failure_type == "disable_firewall":
self._disable_firewall(self.current_failure_node)
elif self.failure_type == "restart_couchbase":
self._restart_couchbase_server(self.current_failure_node)
elif self.failure_type == "stop_couchbase":
self._stop_couchbase_server(self.current_failure_node)
elif self.failure_type == "start_couchbase":
self._start_couchbase_server(self.current_failure_node)
elif self.failure_type == "restart_network":
self._stop_restart_network(self.current_failure_node,
self.timeout + self.timeout_buffer + 30)
elif self.failure_type == "restart_machine":
self._restart_machine(self.current_failure_node)
elif self.failure_type == "stop_indexer":
self._stop_indexer(self.current_failure_node)
elif self.failure_type == "start_indexer":
self._start_indexer(self.current_failure_node)
elif self.failure_type == "block_indexer_port":
self._block_indexer_port(self.current_failure_node)
elif self.failure_type == "resume_indexer_port":
self._resume_indexer_port(self.current_failure_node)
elif self.failure_type == "stop_memcached":
self._stop_memcached(self.current_failure_node)
elif self.failure_type == "start_memcached":
self._start_memcached(self.current_failure_node)
elif self.failure_type == "network_split":
self._block_incoming_network_from_node(self.servers_to_fail[0],
self.servers_to_fail[
self.itr + 1])
self.itr += 1
elif self.failure_type == "disk_failure":
self._fail_disk(self.current_failure_node)
elif self.failure_type == "disk_full":
self._disk_full_failure(self.current_failure_node)
elif self.failure_type == "recover_disk_failure":
self._recover_disk(self.current_failure_node)
elif self.failure_type == "recover_disk_full_failure":
self._recover_disk_full_failure(self.current_failure_node)
self.log.info("Start time = {}".format(time.ctime(self.start_time)))
self.itr += 1
def _enable_firewall(self, node):
node_failure_timer = self.failure_timers[self.itr]
time.sleep(1)
RemoteUtilHelper.enable_firewall(node)
self.log.info("Enabled firewall on {}".format(node))
node_failure_timer.result()
self.start_time = node_failure_timer.start_time
def _disable_firewall(self, node):
shell = RemoteMachineShellConnection(node)
shell.disable_firewall()
def _restart_couchbase_server(self, node):
node_failure_timer = self.failure_timers[self.itr]
time.sleep(1)
shell = RemoteMachineShellConnection(node)
shell.restart_couchbase()
shell.disconnect()
self.log.info("Restarted the couchbase server on {}".format(node))
node_failure_timer.result()
self.start_time = node_failure_timer.start_time
def _stop_couchbase_server(self, node):
node_failure_timer = self.failure_timers[self.itr]
time.sleep(1)
shell = RemoteMachineShellConnection(node)
shell.stop_couchbase()
shell.disconnect()
self.log.info("Stopped the couchbase server on {}".format(node))
node_failure_timer.result()
self.start_time = node_failure_timer.start_time
def _start_couchbase_server(self, node):
shell = RemoteMachineShellConnection(node)
shell.start_couchbase()
shell.disconnect()
self.log.info("Started the couchbase server on {}".format(node))
def _stop_restart_network(self, node, stop_time):
node_failure_timer = self.failure_timers[self.itr]
time.sleep(1)
shell = RemoteMachineShellConnection(node)
shell.stop_network(stop_time)
shell.disconnect()
self.log.info("Stopped the network for {0} sec and restarted the "
"network on {1}".format(stop_time, node))
node_failure_timer.result()
self.start_time = node_failure_timer.start_time
def _restart_machine(self, node):
node_failure_timer = self.failure_timers[self.itr]
time.sleep(1)
shell = RemoteMachineShellConnection(node)
command = "/sbin/reboot"
shell.execute_command(command=command)
node_failure_timer.result()
self.start_time = node_failure_timer.start_time
def _block_indexer_port(self, node):
shell = RemoteMachineShellConnection(node)
self.log.info(f"Blocking port 9103 and 9105 on {node}")
shell.execute_command("iptables -A INPUT -p tcp --destination-port 9103 -j DROP")
shell.execute_command("iptables -A OUTPUT -p tcp --destination-port 9103 -j DROP")
shell.execute_command("iptables -A INPUT -p tcp --destination-port 9105 -j DROP")
shell.execute_command("iptables -A OUTPUT -p tcp --destination-port 9105 -j DROP")
def _resume_indexer_port(self, node):
shell = RemoteMachineShellConnection(node)
self.log.info(f"Resuming port 9103 and 9105 on {node}")
shell.execute_command("iptables -D INPUT -p tcp --destination-port 9103 -j DROP")
shell.execute_command("iptables -D OUTPUT -p tcp --destination-port 9103 -j DROP")
shell.execute_command("iptables -D INPUT -p tcp --destination-port 9105 -j DROP")
shell.execute_command("iptables -D OUTPUT -p tcp --destination-port 9105 -j DROP")
def _stop_indexer(self, node):
node_failure_timer = self.failure_timers[self.itr]
time.sleep(1)
shell = RemoteMachineShellConnection(node)
o, r = shell.stop_indexer()
self.log.info("Killed indexer. {0} {1}".format(o, r))
node_failure_timer.result()
self.start_time = node_failure_timer.start_time
def _start_indexer(self, node):
shell = RemoteMachineShellConnection(node)
o, r = shell.start_indexer()
self.log.info("Started back indexer. {0} {1}".format(o, r))
shell.disconnect()
def _stop_memcached(self, node):
node_failure_timer = self.failure_timers[self.itr]
time.sleep(1)
shell = RemoteMachineShellConnection(node)
o, r = shell.stop_memcached()
self.log.info("Killed memcached. {0} {1}".format(o, r))
node_failure_timer.result()
self.start_time = node_failure_timer.start_time
def _start_memcached(self, node):
shell = RemoteMachineShellConnection(node)
o, r = shell.start_memcached()
self.log.info("Started back memcached. {0} {1}".format(o, r))
shell.disconnect()
def _block_incoming_network_from_node(self, node1, node2):
shell = RemoteMachineShellConnection(node1)
self.log.info("Adding {0} into iptables rules on {1}".format(
node1.ip, node2.ip))
command = "iptables -A INPUT -s {0} -j DROP".format(node2.ip)
shell.execute_command(command)
self.start_time = time.time()
def _fail_disk(self, node):
shell = RemoteMachineShellConnection(node)
output, error = shell.unmount_partition(self.disk_location)
success = True
if output:
for line in output:
if self.disk_location in line:
success = False
if success:
self.log.info("Unmounted disk at location : {0} on {1}".format(self.disk_location, node.ip))
self.start_time = time.time()
else:
self.log.info("Could not fail the disk at {0} on {1}".format(self.disk_location, node.ip))
self.state = FINISHED
self.set_exception(Exception("Could not fail the disk at {0} on {1}".format(self.disk_location, node.ip)))
self.set_result(False)
def _recover_disk(self, node):
shell = RemoteMachineShellConnection(node)
o, r = shell.mount_partition(self.disk_location)
for line in o:
if self.disk_location in line:
self.log.info("Mounted disk at location : {0} on {1}".format(self.disk_location, node.ip))
return
self.set_exception(Exception("Could not mount disk at location {0} on {1}".format(self.disk_location, node.ip)))
raise Exception()
def _disk_full_failure(self, node):
shell = RemoteMachineShellConnection(node)
output, error = shell.fill_disk_space(self.disk_location, self.disk_size)
success = False
if output:
for line in output:
if self.disk_location in line:
if "0 100% {0}".format(self.disk_location) in line:
success = True
if success:
self.log.info("Filled up disk Space at {0} on {1}".format(self.disk_location, node.ip))
self.start_time = time.time()
else:
self.log.info("Could not fill the disk at {0} on {1}".format(self.disk_location, node.ip))
self.state = FINISHED
self.set_exception(Exception("Could not fill the disk at {0} on {1}".format(self.disk_location, node.ip)))
def _recover_disk_full_failure(self, node):
shell = RemoteMachineShellConnection(node)
delete_file = "{0}/disk-quota.ext3".format(self.disk_location)
output, error = shell.execute_command("rm -f {0}".format(delete_file))
self.log.info(output)
if error:
self.log.info(error)
def _check_for_autofailover_initiation(self, failed_over_node):
rest = RestConnection(self.master)
ui_logs = rest.get_logs(10)
ui_logs_text = [t["text"] for t in ui_logs]
ui_logs_time = [t["serverTime"] for t in ui_logs]
expected_log = "Starting failing over ['ns_1@{}']".format(
failed_over_node.ip)
if expected_log in ui_logs_text:
failed_over_time = ui_logs_time[ui_logs_text.index(expected_log)]
return True, failed_over_time
return False, None
def _wait_for_autofailover_initiation(self, timeout):
autofailover_initated = False
while time.time() < timeout + self.start_time:
autofailover_initated, failed_over_time = \
self._check_for_autofailover_initiation(
self.current_failure_node)
if autofailover_initated:
end_time = self._get_mktime_from_server_time(failed_over_time)
time_taken = end_time - self.start_time
return autofailover_initated, time_taken
return autofailover_initated, -1
def _get_mktime_from_server_time(self, server_time):
time_format = "%Y-%m-%dT%H:%M:%S"
server_time = server_time.split('.')[0]
mk_time = time.mktime(time.strptime(server_time, time_format))
return mk_time
def _rebalance(self):
rest = RestConnection(self.master)
nodes = rest.node_statuses()
rest.rebalance(otpNodes=[node.id for node in nodes])
rebalance_progress = rest.monitorRebalance()
if not rebalance_progress:
self.set_result(False)
self.state = FINISHED
self.set_exception(Exception("Failed to rebalance after failover"))
def _check_if_rebalance_in_progress(self, timeout):
rest = RestConnection(self.master)
end_time = time.time() + timeout
while time.time() < end_time:
try:
rebalance_status, progress = \
rest._rebalance_status_and_progress()
if rebalance_status == "running":
continue
elif rebalance_status is None and progress == 100:
return False, -1
except RebalanceFailedException:
ui_logs = rest.get_logs(10)
ui_logs_text = [t["text"] for t in ui_logs]
ui_logs_time = [t["serverTime"] for t in ui_logs]
rebalace_failure_log = "Rebalance exited with reason"
for ui_log in ui_logs_text:
if rebalace_failure_log in ui_log:
rebalance_failure_time = ui_logs_time[
ui_logs_text.index(ui_log)]
failover_log = "Could not automatically fail over " \
"node ('ns_1@{}'). Rebalance is " \
"running.".format(
self.current_failure_node.ip)
if failover_log in ui_logs_text:
return True, self._get_mktime_from_server_time(
rebalance_failure_time)
else:
return False, -2
return False, -3
class NodeDownTimerTask(Task):
def __init__(self, node, port=None, timeout=300):
Task.__init__(self, "NodeDownTimerTask")
self.log.info("Initializing NodeDownTimerTask")
self.node = node
self.port = port
self.timeout = timeout
self.start_time = 0
def execute(self, task_manager):
self.log.info("Starting execution of NodeDownTimerTask")
end_task = time.time() + self.timeout
while not self.done() and time.time() < end_task:
if not self.port:
try:
self.start_time = time.time()
response = os.system("ping -c 1 {} > /dev/null".format(
self.node))
if response != 0:
self.log.info("Injected failure in {}. Caught "
"due to ping".format(self.node))
self.state = FINISHED
self.set_result(True)
break
except Exception as e:
self.log.warning("Unexpected exception caught {"
"}".format(e))
self.state = FINISHED
self.set_result(True)
break
try:
self.start_time = time.time()
socket.socket().connect(("{}".format(self.node), 8091))
socket.socket().close()
socket.socket().connect(("{}".format(self.node), 11210))
socket.socket().close()
except socket.error:
self.log.info("Injected failure in {}. Caught due "
"to ports".format(self.node))
self.state = FINISHED
self.set_result(True)
break
else:
try:
self.start_time = time.time()
socket.socket().connect(("{}".format(self.node),
int(self.port)))
socket.socket().close()
socket.socket().connect(("{}".format(self.node), 11210))
socket.socket().close()
except socket.error:
self.log.info("Injected failure in {}".format(self.node))
self.state = FINISHED
self.set_result(True)
break
if time.time() >= end_task:
self.state = FINISHED
self.set_result(False)
self.log.info("Could not inject failure in {}".format(self.node))
def check(self, task_manager):
pass
class NodeMonitorsAnalyserTask(Task):
def __init__(self, node, stop=False):
Task.__init__(self, "NodeMonitorAnalyzerTask")
self.command = "dict:to_list(node_status_analyzer:get_nodes())"
self.master = node
self.rest = RestConnection(self.master)
self.stop = stop
def execute(self, task_manager):
while not self.done() and not self.stop:
self.status, self.content = self.rest.diag_eval(self.command,
print_log=False)
self.state = CHECKING
def check(self, task_manager):
if self.status and self.content:
self.log.info("NodeStatus: {}".format(self.content))
if not self.master.ip in self.content:
self.set_result(False)
self.state = FINISHED
self.set_exception(Exception("Node status monitors does not "
"contain the node status"))
return
time.sleep(1)
self.state = EXECUTING
else:
raise Exception("Monitors not working correctly")
# Runs java sdk client directly on slave
class SDKLoadDocumentsTask(Task):
def __init__(self, server, bucket, sdk_docloader):
Task.__init__(self, "SDKLoadDocumentsTask")
self.server = server
if isinstance(bucket, Bucket):
self.bucket = bucket.name
else:
self.bucket = bucket
self.sdk_docloader = sdk_docloader
def execute_for_collection(self, collection, start_seq_num_shift=0):
import subprocess
command = f"java -jar java_sdk_client/collections/target/javaclient/javaclient.jar " \
f"-i {self.server.ip} -u {self.sdk_docloader.username} -p {self.sdk_docloader.password} -b {self.bucket} " \
f"-s {self.sdk_docloader.scope} -c {collection} " \
f"-n {self.sdk_docloader.num_ops} -pc {self.sdk_docloader.percent_create} -pu {self.sdk_docloader.percent_update} " \
f"-pd {self.sdk_docloader.percent_delete} -l {self.sdk_docloader.load_pattern} " \
f"-dsn {self.sdk_docloader.start_seq_num + start_seq_num_shift} -dpx {self.sdk_docloader.key_prefix} -dt {self.sdk_docloader.json_template} " \
f"-de {self.sdk_docloader.doc_expiry} -ds {self.sdk_docloader.doc_size} -ac {self.sdk_docloader.all_collections} " \
f"-st {self.sdk_docloader.start+start_seq_num_shift} -en {self.sdk_docloader.end+start_seq_num_shift} -o {self.sdk_docloader.output} -sd {self.sdk_docloader.shuffle_docs} --secure {self.sdk_docloader.secure}"
if self.sdk_docloader.es_compare:
command = command + " -es true -es_host " + str(self.sdk_docloader.es_host) + " -es_port " + str(
self.sdk_docloader.es_port) + \
" -es_login " + str(self.sdk_docloader.es_login) + " -es_password " + str(
self.sdk_docloader.es_password)
if self.sdk_docloader.op_type == "update":
arr_fields_to_update = self.sdk_docloader.fields_to_update if self.sdk_docloader.fields_to_update else ""
if len(arr_fields_to_update) > 0:
command = command + " -fu "
command = command + ",".join(arr_fields_to_update)
self.log.info(command)
try:
proc = subprocess.Popen(command, stdout=subprocess.PIPE, shell=True)
out = proc.communicate(timeout=self.sdk_docloader.timeout)
if self.sdk_docloader.get_sdk_logs:
self.sdk_docloader.set_sdk_results(out)
self.log.info(out[0].decode("utf-8"))
if proc.returncode != 0:
raise Exception("Exception in java sdk client to {}:{}\n{}".format(self.server.ip, self.bucket, out))
except Exception as e:
proc.terminate()
self.state = FINISHED
self.set_exception(e)
proc.terminate()
self.state = FINISHED
self.set_result(True)
def execute(self, task_manager):
if type(self.sdk_docloader.collection) is list:
start_seq_num_shift = 0
for c in self.sdk_docloader.collection:
self.execute_for_collection(c, start_seq_num_shift)
start_seq_num_shift = start_seq_num_shift + self.sdk_docloader.upd_del_shift
else:
self.execute_for_collection(self.sdk_docloader.collection)
self.check(task_manager)
#TODO additional sleep to let ES finish with docs indexing, should be replaced with something more intelligent.
time.sleep(30)
def check(self, task_manager):
self.set_result(True)
self.state = FINISHED
task_manager.schedule(self)
# Runs java sdk client on a docker container on slave
class DockerSDKLoadDocumentsTask(Task):
def __init__(self, server, bucket, sdk_docloader, pause_secs, timeout_secs):
Task.__init__(self, "SDKLoadDocumentsTask")
self.server = server
if isinstance(bucket, Bucket):
self.bucket = bucket.name
else:
self.bucket = bucket
self.params = sdk_docloader
self.pause_secs = pause_secs
self.timeout_secs = timeout_secs
from lib.collection.collections_dataloader import JavaSDKClient
self.javasdkclient = JavaSDKClient(self.server, self.bucket, self.params)
def execute(self, task_manager):
try:
self.javasdkclient.do_ops()
self.state = CHECKING
task_manager.schedule(self)
except Exception as e:
self.state = FINISHED
self.set_exception(Exception("Exception while loading data to {}:{}"
.format(self.server.ip, self.bucket)))
finally:
self.javasdkclient.cleanup()
def check(self, task_manager):
self.set_result(True)
self.state = FINISHED
task_manager.schedule(self)
#TODO:
# input params to include,exclude keywords,
# populate dictionary {node:matches} in setUp()
# call LogScanTask in basetestcase tearDown() and diff dict
# add sensitive data patterns
# pretty print matches
class LogScanTask(Task):
def __init__(self, server, file_prefix):
Task.__init__(self, "log_scan_task")
self.server = server
self.log_scan_file_name = f'{self.server.ip}_{file_prefix}'
from lib.log_scanner import LogScanner
exclude_keywords = TestInputSingleton.input.param("exclude_keywords", None)
skip_security_scan = TestInputSingleton.input.param("skip_security_scan", False)
self.log_scanner = LogScanner(server=self.server, exclude_keywords=exclude_keywords,
skip_security_scan=skip_security_scan)
def execute(self, task_manager):
try:
# Scan logs corresponding to node services
matches = self.log_scanner.scan()
target = open(self.log_scan_file_name, 'w+')
target.write(str(matches))
target.close()
self.state = CHECKING
task_manager.schedule(self)
# catch and set all unexpected exceptions
except Exception as e:
self.state = FINISHED
self.set_unexpected_exception(e)
def check(self, task_manager):
self.set_result(True)
self.state = FINISHED
task_manager.schedule(self)
|
""" Defines helper functions for creating kernel entry points and process
launchers.
"""
# Standard library imports.
import atexit
import json
import os
import socket
from subprocess import Popen, PIPE
import sys
import tempfile
# System library imports
# IPython imports
from IPython.utils.localinterfaces import LOCALHOST
from IPython.utils.py3compat import bytes_to_str
# Local imports.
from parentpoller import ParentPollerWindows
def write_connection_file(fname=None, shell_port=0, iopub_port=0, stdin_port=0, hb_port=0,
ip=LOCALHOST, key=b'', transport='tcp'):
"""Generates a JSON config file, including the selection of random ports.
Parameters
----------
fname : unicode
The path to the file to write
shell_port : int, optional
The port to use for ROUTER channel.
iopub_port : int, optional
The port to use for the SUB channel.
stdin_port : int, optional
The port to use for the REQ (raw input) channel.
hb_port : int, optional
The port to use for the hearbeat REP channel.
ip : str, optional
The ip address the kernel will bind to.
key : str, optional
The Session key used for HMAC authentication.
"""
# default to temporary connector file
if not fname:
fname = tempfile.mktemp('.json')
# Find open ports as necessary.
ports = []
ports_needed = int(shell_port <= 0) + int(iopub_port <= 0) + \
int(stdin_port <= 0) + int(hb_port <= 0)
if transport == 'tcp':
for i in range(ports_needed):
sock = socket.socket()
sock.bind(('', 0))
ports.append(sock)
for i, sock in enumerate(ports):
port = sock.getsockname()[1]
sock.close()
ports[i] = port
else:
N = 1
for i in range(ports_needed):
while os.path.exists("%s-%s" % (ip, str(N))):
N += 1
ports.append(N)
N += 1
if shell_port <= 0:
shell_port = ports.pop(0)
if iopub_port <= 0:
iopub_port = ports.pop(0)
if stdin_port <= 0:
stdin_port = ports.pop(0)
if hb_port <= 0:
hb_port = ports.pop(0)
cfg = dict( shell_port=shell_port,
iopub_port=iopub_port,
stdin_port=stdin_port,
hb_port=hb_port,
)
cfg['ip'] = ip
cfg['key'] = bytes_to_str(key)
cfg['transport'] = transport
with open(fname, 'w') as f:
f.write(json.dumps(cfg, indent=2))
return fname, cfg
def base_launch_kernel(code, fname, stdin=None, stdout=None, stderr=None,
executable=None, independent=False, extra_arguments=[],
cwd=None):
""" Launches a localhost kernel, binding to the specified ports.
Parameters
----------
code : str,
A string of Python code that imports and executes a kernel entry point.
stdin, stdout, stderr : optional (default None)
Standards streams, as defined in subprocess.Popen.
fname : unicode, optional
The JSON connector file, containing ip/port/hmac key information.
key : str, optional
The Session key used for HMAC authentication.
executable : str, optional (default sys.executable)
The Python executable to use for the kernel process.
independent : bool, optional (default False)
If set, the kernel process is guaranteed to survive if this process
dies. If not set, an effort is made to ensure that the kernel is killed
when this process dies. Note that in this case it is still good practice
to kill kernels manually before exiting.
extra_arguments : list, optional
A list of extra arguments to pass when executing the launch code.
cwd : path, optional
The working dir of the kernel process (default: cwd of this process).
Returns
-------
A tuple of form:
(kernel_process, shell_port, iopub_port, stdin_port, hb_port)
where kernel_process is a Popen object and the ports are integers.
"""
# Build the kernel launch command.
if executable is None:
executable = sys.executable
arguments = [ executable, '-c', code, '-f', fname ]
arguments.extend(extra_arguments)
# Popen will fail (sometimes with a deadlock) if stdin, stdout, and stderr
# are invalid. Unfortunately, there is in general no way to detect whether
# they are valid. The following two blocks redirect them to (temporary)
# pipes in certain important cases.
# If this process has been backgrounded, our stdin is invalid. Since there
# is no compelling reason for the kernel to inherit our stdin anyway, we'll
# place this one safe and always redirect.
redirect_in = True
_stdin = PIPE if stdin is None else stdin
# If this process in running on pythonw, we know that stdin, stdout, and
# stderr are all invalid.
redirect_out = sys.executable.endswith('pythonw.exe')
if redirect_out:
_stdout = PIPE if stdout is None else stdout
_stderr = PIPE if stderr is None else stderr
else:
_stdout, _stderr = stdout, stderr
# Spawn a kernel.
if sys.platform == 'win32':
# Create a Win32 event for interrupting the kernel.
interrupt_event = ParentPollerWindows.create_interrupt_event()
arguments += [ '--interrupt=%i'%interrupt_event ]
# If the kernel is running on pythonw and stdout/stderr are not been
# re-directed, it will crash when more than 4KB of data is written to
# stdout or stderr. This is a bug that has been with Python for a very
# long time; see http://bugs.python.org/issue706263.
# A cleaner solution to this problem would be to pass os.devnull to
# Popen directly. Unfortunately, that does not work.
if executable.endswith('pythonw.exe'):
if stdout is None:
arguments.append('--no-stdout')
if stderr is None:
arguments.append('--no-stderr')
# Launch the kernel process.
if independent:
proc = Popen(arguments,
creationflags=512, # CREATE_NEW_PROCESS_GROUP
stdin=_stdin, stdout=_stdout, stderr=_stderr)
else:
try:
from _winapi import DuplicateHandle, GetCurrentProcess, \
DUPLICATE_SAME_ACCESS
except:
from _subprocess import DuplicateHandle, GetCurrentProcess, \
DUPLICATE_SAME_ACCESS
pid = GetCurrentProcess()
handle = DuplicateHandle(pid, pid, pid, 0,
True, # Inheritable by new processes.
DUPLICATE_SAME_ACCESS)
proc = Popen(arguments + ['--parent=%i'%int(handle)],
stdin=_stdin, stdout=_stdout, stderr=_stderr)
# Attach the interrupt event to the Popen objet so it can be used later.
proc.win32_interrupt_event = interrupt_event
else:
if independent:
proc = Popen(arguments, preexec_fn=lambda: os.setsid(),
stdin=_stdin, stdout=_stdout, stderr=_stderr, cwd=cwd)
else:
proc = Popen(arguments + ['--parent=1'],
stdin=_stdin, stdout=_stdout, stderr=_stderr, cwd=cwd)
# Clean up pipes created to work around Popen bug.
if redirect_in:
if stdin is None:
proc.stdin.close()
if redirect_out:
if stdout is None:
proc.stdout.close()
if stderr is None:
proc.stderr.close()
return proc
|
class Solution:
def dailyTemperatures(self, T: List[int]) -> List[int]:
l = len(T)
res = [0] * l
s = []
for i, v in enumerate(T):
while s and s[-1][0] < v:
tmp_v = s.pop()[1]
res[tmp_v] = i - tmp_v
s.append((v, i))
return res
|
config = {
"interfaces": {
"google.ads.googleads.v4.services.TopicConstantService": {
"retry_codes": {
"idempotent": [
"DEADLINE_EXCEEDED",
"UNAVAILABLE"
],
"non_idempotent": []
},
"retry_params": {
"default": {
"initial_retry_delay_millis": 5000,
"retry_delay_multiplier": 1.3,
"max_retry_delay_millis": 60000,
"initial_rpc_timeout_millis": 3600000,
"rpc_timeout_multiplier": 1.0,
"max_rpc_timeout_millis": 3600000,
"total_timeout_millis": 3600000
}
},
"methods": {
"GetTopicConstant": {
"timeout_millis": 60000,
"retry_codes_name": "idempotent",
"retry_params_name": "default"
}
}
}
}
}
|
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: second/protos/voxel_generator.proto
import sys
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
_b = sys.version_info[0] < 3 and (lambda x: x) or (lambda x: x.encode("latin1"))
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name="second/protos/voxel_generator.proto",
package="second.protos",
syntax="proto3",
serialized_options=None,
serialized_pb=_b(
'\n#second/protos/voxel_generator.proto\x12\rsecond.prot' +
'os"\xbc\x01\n\x0eVoxelGenerator\x12\x12\n\nvoxel_size\x18\x01 ' +
'\x03(\x02\x12\x19\n\x11point_cloud_range\x18\x02 \x03(\x02\x12&\n\x1ema' +
'x_number_of_points_per_voxel\x18\x03 \x01(\r\x12\x19\n\x11submanifold_' +
'group\x18\x04 \x01(\x08\x12\x18\n\x10submanifold_size\x18\x05 \x03(\r\x12\x1e\n\x16sub' +
'manifold_max_points\x18\x06 \x01(\rb\x06proto3'
),
)
_VOXELGENERATOR = _descriptor.Descriptor(
name="VoxelGenerator",
full_name="second.protos.VoxelGenerator",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="voxel_size",
full_name="second.protos.VoxelGenerator.voxel_size",
index=0,
number=1,
type=2,
cpp_type=6,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="point_cloud_range",
full_name="second.protos.VoxelGenerator.point_cloud_range",
index=1,
number=2,
type=2,
cpp_type=6,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="max_number_of_points_per_voxel",
full_name="second.protos.VoxelGenerator.max_number_of_points_per_voxel",
index=2,
number=3,
type=13,
cpp_type=3,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="submanifold_group",
full_name="second.protos.VoxelGenerator.submanifold_group",
index=3,
number=4,
type=8,
cpp_type=7,
label=1,
has_default_value=False,
default_value=False,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="submanifold_size",
full_name="second.protos.VoxelGenerator.submanifold_size",
index=4,
number=5,
type=13,
cpp_type=3,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="submanifold_max_points",
full_name="second.protos.VoxelGenerator.submanifold_max_points",
index=5,
number=6,
type=13,
cpp_type=3,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=55,
serialized_end=243,
)
DESCRIPTOR.message_types_by_name["VoxelGenerator"] = _VOXELGENERATOR
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
VoxelGenerator = _reflection.GeneratedProtocolMessageType(
"VoxelGenerator",
(_message.Message,),
dict(
DESCRIPTOR=_VOXELGENERATOR,
__module__="second.protos.voxel_generator_pb2"
# @@protoc_insertion_point(class_scope:second.protos.VoxelGenerator)
),
)
_sym_db.RegisterMessage(VoxelGenerator)
# @@protoc_insertion_point(module_scope)
|
# ext/asyncio/__init__.py
# Copyright (C) 2020-2021 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: https://www.opensource.org/licenses/mit-license.php
from .engine import async_engine_from_config
from .engine import AsyncConnection
from .engine import AsyncEngine
from .engine import AsyncTransaction
from .engine import create_async_engine
from .events import AsyncConnectionEvents
from .events import AsyncSessionEvents
from .result import AsyncMappingResult
from .result import AsyncResult
from .result import AsyncScalarResult
from .scoping import async_scoped_session
from .session import async_object_session
from .session import async_session
from .session import AsyncSession
from .session import AsyncSessionTransaction
|
import logging
import subprocess
import os.path
import time
from glob import glob
def CallSaWriter( inputFasta ):
saWriterCmd = ['sawriter', inputFasta]
logging.debug("Calling sawriter with command line '%s'", ' '.join(saWriterCmd))
proc = subprocess.Popen(saWriterCmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = proc.communicate()
logging.debug("Finished running sawriter")
if proc.returncode != 0:
logging.error("sawriter failed. Stderr was %s", stderr)
raise RuntimeError(" exited with returncode {e}"
.format(e=proc.returncode))
return True
class RefDb(object):
def __init__(self, dbPath, writeSuffixArrays=True):
logging.info("Building reference database from path '{0}'".format(dbPath))
tStart = time.time()
fastas = []
for suffix in ("fa", "fna", "fasta"):
fastas.extend(glob(os.path.join(dbPath, "*.{0}".format(suffix))))
logging.info("Found {0} reference fasta files".format(len(fastas)))
refs = dict()
for fasta in fastas:
bn = os.path.basename(fasta)
suffixArray = fasta + ".sa"
loci, _ = os.path.splitext(bn)
loci = loci.split('.')[0]
if loci.endswith("_gen"): # Catch and clean-up the common IMGT genomic suffix
loci = loci[:-4]
if not os.path.exists(suffixArray):
if writeSuffixArrays:
CallSaWriter( fasta )
else:
logging.warn("missing suffix array for : '{0}'".format(fasta))
suffixArray = None
if loci in refs.keys():
msg = "duplicate references for locus '{0}' found".format(loci)
logging.error(msg)
raise RuntimeError(msg)
refs[loci] = (fasta, suffixArray)
self._refs = refs
logging.debug("Found references for the following loci : {0}".format(", ".join(sorted(self._refs.keys()))))
tEnd = time.time()
logging.info("Finished building reference database in {0}s".format(round(tEnd - tStart, 3)))
def __iter__(self):
for locus in sorted(self._refs.keys()):
yield locus
def iteritems(self):
for locus in self:
yield (locus, self._refs[locus])
def keys(self):
for locus in sorted(self._refs.keys()):
yield locus
def __getitem__(self, name):
return self._refs[name]
if __name__ == "__main__":
import sys
dbPath = sys.argv[1]
logging.basicConfig(level=logging.DEBUG)
db = RefDb( dbPath )
|
"""pytest-allclose version information.
We use semantic versioning (see http://semver.org/).
and conform to PEP440 (see https://www.python.org/dev/peps/pep-0440/).
'.devN' will be added to the version unless the code base represents
a release version. Release versions are git tagged with the version.
"""
name = "pytest_allclose"
version_info = (1, 0, 1) # (major, minor, patch)
dev = 0
version = "{v}{dev}".format(
v=".".join(str(v) for v in version_info),
dev=(".dev%d" % dev) if dev is not None else "",
)
|
"""Defines the templaters."""
import logging
from bisect import bisect_left
from collections import defaultdict
from typing import Dict, Iterator, List, Tuple, Optional, NamedTuple, Iterable
from cached_property import cached_property
# Instantiate the templater logger
templater_logger = logging.getLogger("sqlfluff.templater")
def iter_indices_of_newlines(raw_str: str) -> Iterator[int]:
"""Find the indices of all newlines in a string."""
init_idx = -1
while True:
nl_pos = raw_str.find("\n", init_idx + 1)
if nl_pos >= 0:
yield nl_pos
init_idx = nl_pos
else:
break # pragma: no cover TODO?
class RawFileSlice(NamedTuple):
"""A slice referring to a raw file."""
raw: str
slice_type: str
source_idx: int
slice_subtype: Optional[str] = None
def end_source_idx(self):
"""Return the closing index of this slice."""
return self.source_idx + len(self.raw)
def source_slice(self):
"""Return the a slice object for this slice."""
return slice(self.source_idx, self.end_source_idx())
class TemplatedFileSlice(NamedTuple):
"""A slice referring to a templated file."""
slice_type: str
source_slice: slice
templated_slice: slice
class RawSliceBlockInfo(NamedTuple):
"""Template-related info about the raw slices in a TemplateFile."""
# Given a raw file slace, return its block ID. Useful for identifying
# regions of a file with respect to template control structures (for, if).
block_ids: Dict[RawFileSlice, int]
# List of block IDs that have the following characteristics:
# - Loop body
# - Containing only literals (no templating)
literal_only_loops: List[int]
class TemplatedFile:
"""A templated SQL file.
This is the response of a templaters .process() method
and contains both references to the original file and also
the capability to split up that file when lexing.
"""
def __init__(
self,
source_str: str,
fname: str,
templated_str: Optional[str] = None,
sliced_file: Optional[List[TemplatedFileSlice]] = None,
raw_sliced: Optional[List[RawFileSlice]] = None,
):
"""Initialise the TemplatedFile.
If no templated_str is provided then we assume that
the file is NOT templated and that the templated view
is the same as the source view.
"""
self.source_str = source_str
# An empty string is still allowed as the templated string.
self.templated_str = source_str if templated_str is None else templated_str
# If no fname, we assume this is from a string or stdin.
self.fname = fname
# Assume that no sliced_file, means the file is not templated
# TODO: Enable error handling.
if (
not sliced_file
) and self.templated_str != self.source_str: # pragma: no cover
raise ValueError("Cannot instantiate a templated file unsliced!")
# If we get here and we don't have sliced files, then it's raw, so create them.
self.sliced_file: List[TemplatedFileSlice] = sliced_file or [
TemplatedFileSlice(
"literal", slice(0, len(source_str)), slice(0, len(source_str))
)
]
self.raw_sliced: List[RawFileSlice] = raw_sliced or [
RawFileSlice(source_str, "literal", 0)
]
# Precalculate newlines, character positions.
self._source_newlines = list(iter_indices_of_newlines(self.source_str))
self._templated_newlines = list(iter_indices_of_newlines(self.templated_str))
@classmethod
def from_string(cls, raw):
"""Create TemplatedFile from a string."""
return cls(source_str=raw, fname="<string>")
def __bool__(self):
"""Return true if there's a templated file."""
return bool(self.templated_str)
def __repr__(self): # pragma: no cover TODO?
return "<TemplatedFile>"
def __str__(self):
"""Return the templated file if coerced to string."""
return self.templated_str
def get_line_pos_of_char_pos(
self, char_pos: int, source: bool = True
) -> Tuple[int, int]:
"""Get the line number and position of a point in the source file.
Args:
char_pos: The character position in the relevant file.
source: Are we checking the source file (as opposed to the
templated file)
Returns:
line_number, line_position
"""
if source:
ref_str = self._source_newlines
else:
ref_str = self._templated_newlines
nl_idx = bisect_left(ref_str, char_pos)
if nl_idx > 0:
return nl_idx + 1, char_pos - ref_str[nl_idx - 1]
else:
# NB: line_pos is char_pos+1 because character position is 0-indexed,
# but the line position is 1-indexed.
return 1, char_pos + 1
def _find_slice_indices_of_templated_pos(
self,
templated_pos: int,
start_idx: Optional[int] = None,
inclusive: bool = True,
) -> Tuple[int, int]:
"""Find a subset of the sliced file which touch this point.
NB: the last_idx is exclusive, as the intent is to use this as a slice.
"""
start_idx = start_idx or 0
first_idx = None
last_idx = start_idx
for idx, elem in enumerate(self.sliced_file[start_idx:]):
last_idx = idx + start_idx
if elem[2].stop >= templated_pos:
if first_idx is None:
first_idx = idx + start_idx
if elem[2].start > templated_pos:
break
elif not inclusive and elem[2].start >= templated_pos:
break
# If we got to the end add another index
else:
last_idx += 1
if first_idx is None: # pragma: no cover
raise ValueError("Position Not Found")
return first_idx, last_idx
@cached_property
def raw_slice_block_info(self) -> RawSliceBlockInfo:
"""Returns a dict with a unique ID for each template block."""
block_ids: Dict[RawFileSlice, int] = {}
block_content_types = defaultdict(set)
loops = set()
blocks = []
block_id = 0
for idx, raw_slice in enumerate(self.raw_sliced):
if raw_slice.slice_type != "block_end":
block_content_types[block_id].add(raw_slice.slice_type)
if raw_slice.slice_type == "block_start":
blocks.append(raw_slice)
templater_logger.info("%d -> %r", block_id, raw_slice.raw)
block_ids[raw_slice] = block_id
block_id += 1
if raw_slice.slice_subtype == "loop":
loops.add(block_id)
elif raw_slice.slice_type == "block_end":
blocks.pop()
block_id += 1
templater_logger.info("%d -> %r", block_id, raw_slice.raw)
block_ids[raw_slice] = block_id
else:
templater_logger.info("%d -> %r", block_id, raw_slice.raw)
block_ids[raw_slice] = block_id
literal_only_loops = [
block_id
for block_id in set(block_ids.values())
if block_id in loops and block_content_types[block_id] == {"literal"}
]
return RawSliceBlockInfo(block_ids, literal_only_loops)
def raw_slices_spanning_source_slice(self, source_slice: slice):
"""Return a list of the raw slices spanning a set of indices."""
# First find the start index
raw_slice_idx = 0
# Move the raw pointer forward to the start of this patch
while (
raw_slice_idx + 1 < len(self.raw_sliced)
and self.raw_sliced[raw_slice_idx + 1].source_idx <= source_slice.start
):
raw_slice_idx += 1
# Find slice index of the end of this patch.
slice_span = 1
while (
raw_slice_idx + slice_span < len(self.raw_sliced)
and self.raw_sliced[raw_slice_idx + slice_span].source_idx
< source_slice.stop
):
slice_span += 1
# Return the raw slices:
return self.raw_sliced[raw_slice_idx : raw_slice_idx + slice_span]
def templated_slice_to_source_slice(
self,
template_slice: slice,
) -> slice:
"""Convert a template slice to a source slice."""
if not self.sliced_file:
return template_slice # pragma: no cover TODO?
ts_start_sf_start, ts_start_sf_stop = self._find_slice_indices_of_templated_pos(
template_slice.start
)
ts_start_subsliced_file = self.sliced_file[ts_start_sf_start:ts_start_sf_stop]
# Work out the insertion point
insertion_point = -1
for elem in ts_start_subsliced_file:
# Do slice starts and ends:
for slice_elem in ("start", "stop"):
if getattr(elem[2], slice_elem) == template_slice.start:
# Store the lowest.
point = getattr(elem[1], slice_elem)
if insertion_point < 0 or point < insertion_point:
insertion_point = point
# We don't break here, because we might find ANOTHER
# later which is actually earlier.
# Zero length slice.
if template_slice.start == template_slice.stop:
# Is it on a join?
if insertion_point >= 0:
return slice(insertion_point, insertion_point)
# It's within a segment.
else:
if (
ts_start_subsliced_file
and ts_start_subsliced_file[0][0] == "literal"
):
offset = template_slice.start - ts_start_subsliced_file[0][2].start
return slice(
ts_start_subsliced_file[0][1].start + offset,
ts_start_subsliced_file[0][1].start + offset,
)
else:
raise ValueError(
"Attempting a single length slice within a templated section!"
)
# Otherwise it's a slice with length.
# Use a non inclusive match to get the end point.
ts_stop_sf_start, ts_stop_sf_stop = self._find_slice_indices_of_templated_pos(
template_slice.stop, inclusive=False
)
# Update starting position based on insertion point:
if insertion_point >= 0:
for elem in self.sliced_file[ts_start_sf_start:]:
if elem[1].start != insertion_point:
ts_start_sf_start += 1
else:
break
subslices = self.sliced_file[
# Very inclusive slice
min(ts_start_sf_start, ts_stop_sf_start) : max(
ts_start_sf_stop, ts_stop_sf_stop
)
]
if ts_start_sf_start == ts_start_sf_stop:
if ts_start_sf_start > len(self.sliced_file): # pragma: no cover
# We should never get here
raise ValueError("Starting position higher than sliced file position")
if ts_start_sf_start < len(self.sliced_file): # pragma: no cover
return self.sliced_file[1].source_slice
else:
return self.sliced_file[-1].source_slice # pragma: no cover
else:
start_slices = self.sliced_file[ts_start_sf_start:ts_start_sf_stop]
if ts_stop_sf_start == ts_stop_sf_stop: # pragma: no cover TODO?
stop_slices = [self.sliced_file[ts_stop_sf_start]]
else:
stop_slices = self.sliced_file[ts_stop_sf_start:ts_stop_sf_stop]
# if it's a literal segment then we can get the exact position
# otherwise we're greedy.
# Start.
if insertion_point >= 0:
source_start = insertion_point
elif start_slices[0][0] == "literal":
offset = template_slice.start - start_slices[0][2].start
source_start = start_slices[0][1].start + offset
else:
source_start = start_slices[0][1].start
# Stop.
if stop_slices[-1][0] == "literal":
offset = stop_slices[-1][2].stop - template_slice.stop
source_stop = stop_slices[-1][1].stop - offset
else:
source_stop = stop_slices[-1][1].stop
# Does this slice go backward?
if source_start > source_stop:
# If this happens, it's because one was templated and
# the other isn't, or because a loop means that the segments
# are in a different order.
# Take the widest possible span in this case.
source_start = min(elem[1].start for elem in subslices)
source_stop = max(elem[1].stop for elem in subslices)
source_slice = slice(source_start, source_stop)
return source_slice
def is_source_slice_literal(self, source_slice: slice) -> bool:
"""Work out whether a slice of the source file is a literal or not."""
# No sliced file? Everything is literal
if not self.raw_sliced: # pragma: no cover TODO?
return True
# Zero length slice. It's a literal, because it's definitely not templated.
if source_slice.start == source_slice.stop:
return True
is_literal = True
for raw_slice in self.raw_sliced:
# Reset if we find a literal and we're up to the start
# otherwise set false.
if raw_slice.source_idx <= source_slice.start:
is_literal = raw_slice.slice_type == "literal"
elif raw_slice.source_idx >= source_slice.stop:
# We've gone past the end. Break and Return.
break
else:
# We're in the middle. Check type
if raw_slice.slice_type != "literal":
is_literal = False
return is_literal
def source_only_slices(self) -> List[RawFileSlice]:
"""Return a list a slices which reference the parts only in the source.
All of these slices should be expected to have zero-length
in the templated file.
The results are NECESSARILY sorted.
"""
ret_buff = []
for elem in self.raw_sliced:
if elem.slice_type in ("comment", "block_end", "block_start", "block_mid"):
ret_buff.append(elem)
return ret_buff
class RawTemplater:
"""A templater which does nothing.
This also acts as the base templating class.
"""
name = "raw"
templater_selector = "templater"
def __init__(self, **kwargs):
"""Placeholder init function.
Here we should load any initial config found in the root directory. The init
function shouldn't take any arguments at this stage as we assume that it will load
its own config. Maybe at this stage we might allow override parameters to be passed
to the linter at runtime from the cli - that would be the only time we would pass
arguments in here.
"""
def sequence_files(
self, fnames: List[str], config=None, formatter=None
) -> Iterable[str]:
"""Given files to be processed, return a valid processing sequence."""
# Default is to process in the original order.
return fnames
def process(
self, *, in_str: str, fname: str, config=None, formatter=None
) -> Tuple[Optional[TemplatedFile], list]:
"""Process a string and return a TemplatedFile.
Note that the arguments are enforced as keywords
because Templaters can have differences in their
`process` method signature.
A Templater that only supports reading from a file
would need the following signature:
process(*, fname, in_str=None, config=None)
(arguments are swapped)
Args:
in_str (:obj:`str`): The input string.
fname (:obj:`str`, optional): The filename of this string. This is
mostly for loading config files at runtime.
config (:obj:`FluffConfig`): A specific config to use for this
templating operation. Only necessary for some templaters.
formatter (:obj:`CallbackFormatter`): Optional object for output.
"""
return TemplatedFile(in_str, fname=fname), []
def __eq__(self, other):
"""Return true if `other` is of the same class as this one.
NB: This is useful in comparing configs.
"""
return isinstance(other, self.__class__)
def config_pairs(self):
"""Returns info about the given templater for output by the cli."""
return [("templater", self.name)]
|
# coding: utf-8
from rnns import gru, lstm, atr, sru, lrn
def get_cell(cell_name, hidden_size, ln=False, scope=None):
"""Convert the cell_name into cell instance."""
cell_name = cell_name.lower()
if cell_name == "gru":
return gru.gru(hidden_size, ln=ln, scope=scope or "gru")
elif cell_name == "lstm":
return lstm.lstm(hidden_size, ln=ln, scope=scope or "lstm")
elif cell_name == "atr":
return atr.atr(hidden_size, ln=ln, scope=scope or "atr")
elif cell_name == "sru":
return sru.sru(hidden_size, ln=ln, scope=scope or "sru")
elif cell_name == "lrn":
return lrn.lrn(hidden_size, ln=ln, scope=scope or "lrn")
else:
raise NotImplementedError(
"{} is not supported".format(cell_name))
|
from collections import namedtuple
import random
import jssp.types
import jssp.utility
Config = namedtuple('Config', [
'num_scouts',
'num_normal_sites',
'num_elite_sites',
'num_normal_bees',
'num_elite_bees',
'taboo'])
class Optimizer(object):
def __init__(self, config, problem):
self.config = config
self.problem = problem
self.initialize()
def initialize(self):
self.sites = [jssp.utility.generate_random_solution(self.problem) for _ in range(self.config.num_scouts)]
self.sites.sort(key=lambda site: site[1])
def iterate(self):
for es in range(self.config.num_elite_sites):
site_index = es
site_schedule, site_makespan = self.sites[site_index]
best_site_schedule = site_schedule
best_site_makespan = site_makespan
for eb in range(self.config.num_elite_bees):
new_schedule, new_makespan = jssp.utility.taboo_search(
self.problem, site_schedule, site_makespan, self.config.taboo)
if new_makespan < best_site_makespan:
best_site_schedule = new_schedule
best_site_makespan = new_makespan
self.sites[site_index] = (best_site_schedule, best_site_makespan)
for ns in range(self.config.num_normal_sites):
site_index = self.config.num_elite_sites + ns
site_schedule, _ = self.sites[site_index]
site_allocations, site_makespan = jssp.utility.compute_allocations(self.problem, site_schedule)
site_moves = jssp.utility.find_neighborhood_moves(self.problem, site_allocations, site_makespan)
best_site_schedule = site_schedule
best_site_makespan = site_makespan
for nb in range(self.config.num_normal_bees):
new_schedule = jssp.utility.apply_move(self.problem, site_schedule, random.choice(site_moves))
new_makespan = jssp.utility.compute_makespan(self.problem, new_schedule)
if new_makespan < best_site_makespan:
best_site_schedule = new_schedule
best_site_makespan = new_makespan
self.sites[site_index] = (best_site_schedule, best_site_makespan)
for k in range(self.config.num_elite_sites + self.config.num_normal_sites, self.config.num_scouts):
self.sites[k] = jssp.utility.generate_random_solution(self.problem)
self.sites.sort(key=lambda site: site[1])
return jssp.types.Solution(self.sites[0][0], self.sites[0][1])
|
# In this file you can register your RNN cells and then you will be able to get the cell configuration from the cell you
# will choose in other files
# Importing all the cells that we need
from cells.BasicLSTMCell import BasicLSTMCell
from cells.GRUCell import GRUCell
from cells.MogrifierLSTMCell import MogrifierLSTMCell
from cells.RRUCell import RRUCell
# This dictionary holds the configuration for each cell we have implemented (each variable is explained in the function
# below), here you can also implement your cell by adding it's configuration (cell function / class, model name, whether
# it has a separate output size variable and whether the cell's state is tuple (as it is for LSTM based cells)) to the
# registry below, then importing the necessary function / class above.
cell_registry = {
"RRU": { # RRU ReZero version
"cell_fn": RRUCell,
"model_name": "rru_model",
"has_separate_output_size": True,
"state_is_tuple": False
},
"GRU": {
"cell_fn": GRUCell,
"model_name": "gru_model",
"has_separate_output_size": False,
"state_is_tuple": False
},
"LSTM": {
"cell_fn": BasicLSTMCell,
"model_name": "lstm_model",
"has_separate_output_size": False,
"state_is_tuple": True
},
# A competitor's cell that we test our cells against, for this you have to have dm-sonnet, etc. installed
"MogrifierLSTM": {
"cell_fn": MogrifierLSTMCell,
"model_name": "mogrifier_lstm_model",
"has_separate_output_size": False,
"state_is_tuple": True
}
}
def get_cell_information(cell_name):
"""
This function returns the requested cell's configuration.
Input:
cell_name: string, RNN cell's name.
Output:
cell_fn: class, a function / class that implements the corresponding cell;
model_name: string, name that will be used for the cell when running the tasks and logging the results;
has_separate_output_size: bool, does the cell have a separate output_size (we need this, because we need to
know whether or not to send output_size to the implemented cell, because, if it has no such argument, it
will raise an error);
state_is_tuple: bool, is the cell's state a tuple (as is the case for the LSTM based cells).
"""
# If the passed cell name isn't in the cell registry, we raise an error
if cell_name not in cell_registry.keys():
raise ValueError(f"No such cell ('{cell_name}') has been implemented!")
# Access the information of the cell from the registry
cell_information = cell_registry[cell_name]
# Setting the information to it's corresponding variables
cell_fn = cell_information["cell_fn"]
model_name = cell_information["model_name"]
has_separate_output_size = cell_information["has_separate_output_size"]
state_is_tuple = cell_information["state_is_tuple"]
return cell_fn, model_name, has_separate_output_size, state_is_tuple
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import json
import warnings
import pulumi
import pulumi.runtime
from typing import Union
from .. import utilities, tables
class RegionBackendService(pulumi.CustomResource):
affinity_cookie_ttl_sec: pulumi.Output[float]
backends: pulumi.Output[list]
circuit_breakers: pulumi.Output[dict]
connection_draining_timeout_sec: pulumi.Output[float]
consistent_hash: pulumi.Output[dict]
creation_timestamp: pulumi.Output[str]
description: pulumi.Output[str]
failover_policy: pulumi.Output[dict]
fingerprint: pulumi.Output[str]
health_checks: pulumi.Output[str]
load_balancing_scheme: pulumi.Output[str]
locality_lb_policy: pulumi.Output[str]
log_config: pulumi.Output[dict]
name: pulumi.Output[str]
network: pulumi.Output[str]
outlier_detection: pulumi.Output[dict]
project: pulumi.Output[str]
"""
The ID of the project in which the resource belongs.
If it is not provided, the provider project is used.
"""
protocol: pulumi.Output[str]
region: pulumi.Output[str]
self_link: pulumi.Output[str]
"""
The URI of the created resource.
"""
session_affinity: pulumi.Output[str]
timeout_sec: pulumi.Output[float]
def __init__(__self__, resource_name, opts=None, affinity_cookie_ttl_sec=None, backends=None, circuit_breakers=None, connection_draining_timeout_sec=None, consistent_hash=None, description=None, failover_policy=None, health_checks=None, load_balancing_scheme=None, locality_lb_policy=None, log_config=None, name=None, network=None, outlier_detection=None, project=None, protocol=None, region=None, session_affinity=None, timeout_sec=None, __props__=None, __name__=None, __opts__=None):
"""
Create a RegionBackendService resource with the given unique name, props, and options.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] project: The ID of the project in which the resource belongs.
If it is not provided, the provider project is used.
The **backends** object supports the following:
* `balancingMode` (`pulumi.Input[str]`)
* `capacityScaler` (`pulumi.Input[float]`)
* `description` (`pulumi.Input[str]`)
* `failover` (`pulumi.Input[bool]`)
* `group` (`pulumi.Input[str]`)
* `maxConnections` (`pulumi.Input[float]`)
* `maxConnectionsPerEndpoint` (`pulumi.Input[float]`)
* `maxConnectionsPerInstance` (`pulumi.Input[float]`)
* `maxRate` (`pulumi.Input[float]`)
* `maxRatePerEndpoint` (`pulumi.Input[float]`)
* `maxRatePerInstance` (`pulumi.Input[float]`)
* `maxUtilization` (`pulumi.Input[float]`)
The **circuit_breakers** object supports the following:
* `connectTimeout` (`pulumi.Input[dict]`)
* `nanos` (`pulumi.Input[float]`)
* `seconds` (`pulumi.Input[float]`)
* `maxConnections` (`pulumi.Input[float]`)
* `maxPendingRequests` (`pulumi.Input[float]`)
* `maxRequests` (`pulumi.Input[float]`)
* `maxRequestsPerConnection` (`pulumi.Input[float]`)
* `maxRetries` (`pulumi.Input[float]`)
The **consistent_hash** object supports the following:
* `httpCookie` (`pulumi.Input[dict]`)
* `name` (`pulumi.Input[str]`)
* `path` (`pulumi.Input[str]`)
* `ttl` (`pulumi.Input[dict]`)
* `nanos` (`pulumi.Input[float]`)
* `seconds` (`pulumi.Input[float]`)
* `httpHeaderName` (`pulumi.Input[str]`)
* `minimumRingSize` (`pulumi.Input[float]`)
The **failover_policy** object supports the following:
* `disableConnectionDrainOnFailover` (`pulumi.Input[bool]`)
* `dropTrafficIfUnhealthy` (`pulumi.Input[bool]`)
* `failover_ratio` (`pulumi.Input[float]`)
The **log_config** object supports the following:
* `enable` (`pulumi.Input[bool]`)
* `sampleRate` (`pulumi.Input[float]`)
The **outlier_detection** object supports the following:
* `baseEjectionTime` (`pulumi.Input[dict]`)
* `nanos` (`pulumi.Input[float]`)
* `seconds` (`pulumi.Input[float]`)
* `consecutiveErrors` (`pulumi.Input[float]`)
* `consecutiveGatewayFailure` (`pulumi.Input[float]`)
* `enforcingConsecutiveErrors` (`pulumi.Input[float]`)
* `enforcingConsecutiveGatewayFailure` (`pulumi.Input[float]`)
* `enforcingSuccessRate` (`pulumi.Input[float]`)
* `interval` (`pulumi.Input[dict]`)
* `nanos` (`pulumi.Input[float]`)
* `seconds` (`pulumi.Input[float]`)
* `maxEjectionPercent` (`pulumi.Input[float]`)
* `successRateMinimumHosts` (`pulumi.Input[float]`)
* `successRateRequestVolume` (`pulumi.Input[float]`)
* `successRateStdevFactor` (`pulumi.Input[float]`)
> This content is derived from https://github.com/terraform-providers/terraform-provider-google/blob/master/website/docs/r/compute_region_backend_service.html.markdown.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['affinity_cookie_ttl_sec'] = affinity_cookie_ttl_sec
__props__['backends'] = backends
__props__['circuit_breakers'] = circuit_breakers
__props__['connection_draining_timeout_sec'] = connection_draining_timeout_sec
__props__['consistent_hash'] = consistent_hash
__props__['description'] = description
__props__['failover_policy'] = failover_policy
if health_checks is None:
raise TypeError("Missing required property 'health_checks'")
__props__['health_checks'] = health_checks
__props__['load_balancing_scheme'] = load_balancing_scheme
__props__['locality_lb_policy'] = locality_lb_policy
__props__['log_config'] = log_config
__props__['name'] = name
__props__['network'] = network
__props__['outlier_detection'] = outlier_detection
__props__['project'] = project
__props__['protocol'] = protocol
__props__['region'] = region
__props__['session_affinity'] = session_affinity
__props__['timeout_sec'] = timeout_sec
__props__['creation_timestamp'] = None
__props__['fingerprint'] = None
__props__['self_link'] = None
super(RegionBackendService, __self__).__init__(
'gcp:compute/regionBackendService:RegionBackendService',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name, id, opts=None, affinity_cookie_ttl_sec=None, backends=None, circuit_breakers=None, connection_draining_timeout_sec=None, consistent_hash=None, creation_timestamp=None, description=None, failover_policy=None, fingerprint=None, health_checks=None, load_balancing_scheme=None, locality_lb_policy=None, log_config=None, name=None, network=None, outlier_detection=None, project=None, protocol=None, region=None, self_link=None, session_affinity=None, timeout_sec=None):
"""
Get an existing RegionBackendService resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param str id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] project: The ID of the project in which the resource belongs.
If it is not provided, the provider project is used.
:param pulumi.Input[str] self_link: The URI of the created resource.
The **backends** object supports the following:
* `balancingMode` (`pulumi.Input[str]`)
* `capacityScaler` (`pulumi.Input[float]`)
* `description` (`pulumi.Input[str]`)
* `failover` (`pulumi.Input[bool]`)
* `group` (`pulumi.Input[str]`)
* `maxConnections` (`pulumi.Input[float]`)
* `maxConnectionsPerEndpoint` (`pulumi.Input[float]`)
* `maxConnectionsPerInstance` (`pulumi.Input[float]`)
* `maxRate` (`pulumi.Input[float]`)
* `maxRatePerEndpoint` (`pulumi.Input[float]`)
* `maxRatePerInstance` (`pulumi.Input[float]`)
* `maxUtilization` (`pulumi.Input[float]`)
The **circuit_breakers** object supports the following:
* `connectTimeout` (`pulumi.Input[dict]`)
* `nanos` (`pulumi.Input[float]`)
* `seconds` (`pulumi.Input[float]`)
* `maxConnections` (`pulumi.Input[float]`)
* `maxPendingRequests` (`pulumi.Input[float]`)
* `maxRequests` (`pulumi.Input[float]`)
* `maxRequestsPerConnection` (`pulumi.Input[float]`)
* `maxRetries` (`pulumi.Input[float]`)
The **consistent_hash** object supports the following:
* `httpCookie` (`pulumi.Input[dict]`)
* `name` (`pulumi.Input[str]`)
* `path` (`pulumi.Input[str]`)
* `ttl` (`pulumi.Input[dict]`)
* `nanos` (`pulumi.Input[float]`)
* `seconds` (`pulumi.Input[float]`)
* `httpHeaderName` (`pulumi.Input[str]`)
* `minimumRingSize` (`pulumi.Input[float]`)
The **failover_policy** object supports the following:
* `disableConnectionDrainOnFailover` (`pulumi.Input[bool]`)
* `dropTrafficIfUnhealthy` (`pulumi.Input[bool]`)
* `failover_ratio` (`pulumi.Input[float]`)
The **log_config** object supports the following:
* `enable` (`pulumi.Input[bool]`)
* `sampleRate` (`pulumi.Input[float]`)
The **outlier_detection** object supports the following:
* `baseEjectionTime` (`pulumi.Input[dict]`)
* `nanos` (`pulumi.Input[float]`)
* `seconds` (`pulumi.Input[float]`)
* `consecutiveErrors` (`pulumi.Input[float]`)
* `consecutiveGatewayFailure` (`pulumi.Input[float]`)
* `enforcingConsecutiveErrors` (`pulumi.Input[float]`)
* `enforcingConsecutiveGatewayFailure` (`pulumi.Input[float]`)
* `enforcingSuccessRate` (`pulumi.Input[float]`)
* `interval` (`pulumi.Input[dict]`)
* `nanos` (`pulumi.Input[float]`)
* `seconds` (`pulumi.Input[float]`)
* `maxEjectionPercent` (`pulumi.Input[float]`)
* `successRateMinimumHosts` (`pulumi.Input[float]`)
* `successRateRequestVolume` (`pulumi.Input[float]`)
* `successRateStdevFactor` (`pulumi.Input[float]`)
> This content is derived from https://github.com/terraform-providers/terraform-provider-google/blob/master/website/docs/r/compute_region_backend_service.html.markdown.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__["affinity_cookie_ttl_sec"] = affinity_cookie_ttl_sec
__props__["backends"] = backends
__props__["circuit_breakers"] = circuit_breakers
__props__["connection_draining_timeout_sec"] = connection_draining_timeout_sec
__props__["consistent_hash"] = consistent_hash
__props__["creation_timestamp"] = creation_timestamp
__props__["description"] = description
__props__["failover_policy"] = failover_policy
__props__["fingerprint"] = fingerprint
__props__["health_checks"] = health_checks
__props__["load_balancing_scheme"] = load_balancing_scheme
__props__["locality_lb_policy"] = locality_lb_policy
__props__["log_config"] = log_config
__props__["name"] = name
__props__["network"] = network
__props__["outlier_detection"] = outlier_detection
__props__["project"] = project
__props__["protocol"] = protocol
__props__["region"] = region
__props__["self_link"] = self_link
__props__["session_affinity"] = session_affinity
__props__["timeout_sec"] = timeout_sec
return RegionBackendService(resource_name, opts=opts, __props__=__props__)
def translate_output_property(self, prop):
return tables._CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return tables._SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
|
from __future__ import print_function, absolute_import
from io import BytesIO
import struct
s24 = 's24'
u16 = 'u16'
u8 = 'u8'
u30 = 'u30'
s32 = 's32'
u32 = 'u32'
d64 = 'd64'
class uint(int):
__slots__ = ()
def __init__(self, val):
assert self >= 0
class ABCStream(BytesIO):
def read_formatted(self, format):
return getattr(self, 'read_' + format)()
def write_formatted(self, format, value):
return getattr(self, 'write_' + format)(value)
def read_s24(self):
bytes = map(ord, self.read(3))
res = bytes[0] | (bytes[1] << 8) | (bytes[2] << 16)
if res > (1 << 23):
res = -((~res)&((1 << 23)-1))-1
return res
def write_s24(self, val):
assert -(1 << 23) < val < (1 << 23)
if val < 0:
val = (1 << 24) + val
self.write(bytearray([val & 0xFF,
((val >> 8) & 0xFF),
val >> 16]))
def read_u16(self):
bytes = map(ord, self.read(2))
return uint(bytes[0] | (bytes[1] << 8))
def write_u16(self, val):
assert 0 <= val < 1 << 16
self.write(bytearray([
val & 0xFF,
val >> 8]))
def read_u8(self):
return uint(ord(self.read(1)[0]))
def write_u8(self, val):
assert 0 <= val < 256
self.write(bytearray([ val ]))
def read_u30(self):
res = 0
for i in range(5):
b = ord(self.read(1))
res |= (b & 127) << (i*7)
if not (b & 128):
break
assert res < (1 << 30)
return uint(res)
def write_u30(self, val):
assert val < (1 << 30)
while True:
byte = val & 127
val >>= 7
if val:
byte |= 128
self.write(bytearray([byte]))
if not val:
break
def read_s32(self):
res = 0
for i in range(5):
b = ord(self.read(1))
if b & 128:
res |= (b & 127) << (i*7)
else:
res |= (b & 63) << (i*7)
if b & 64:
res = -res
break
assert -(1 << 32) < res < (1 << 32)
return res
def write_s32(self, val):
if val < 0:
sign = 64
val = -val
else:
sign = 0
assert val < (1 << 32)
while True:
byte = val & 127
val >>= 7
if val:
byte |= 128
elif byte >= 64:
byte |= 128
else:
byte |= sign
self.write(bytearray([byte]))
if not (byte & 128):
break
def read_u32(self):
res = 0
for i in range(5):
b = ord(self.read(1))
res |= (b & 127) << (i*7)
if not (b & 128):
break
assert res < (1 << 32)
return uint(res)
def write_u32(self, val):
assert 0 <= val < (1 << 32)
while val:
byte = val & 127
val >>= 7
if val:
byte |= 128
self.write(bytearray([byte]))
def read_d64(self):
return struct.unpack('d', self.read(8))[0]
def write_d64(self, val):
self.write(struct.pack('d', val))
class DummyABCStream(object):
for i in dir(ABCStream):
if not i.startswith('__') and hasattr(getattr(ABCStream, i), '__call__'):
locals()[i] = lambda *args: None
class Bytes(object):
def __init__(self, bytes):
self._bytes = bytes
@property
def int_be(self):
res = 0
for i in self._bytes:
res = (res << 8) | ord(i)
return res
@property
def int_le(self):
res = 0
for i in reversed(self._bytes):
res = (res << 8) | ord(i)
return res
@property
def sint_le(self):
res = 0
if self._bytes[0] > 128:
res = - (self._bytes[0] & 127)
else:
res = self._bytes[0]
for i in reversed(self._bytes[1:]):
res = (res << 8) | ord(i)
return res
@property
def bytes(self):
return self._bytes
class BitStream(object):
def __init__(self, stream):
self._stream = stream
self._buf = b''
self._bitoffset = 0
def readbytes(self, count):
if self._buf:
self._buf = b''
self._bitoffset = 0
return Bytes(self._stream.read(count))
def readstring(self):
c = self._stream.read(1)
res = bytearray()
while c and c != b'\x00':
res.extend(c)
c = self._stream.read(1)
return res.decode('utf-8')
def readbits(self, count):
if self._buf:
if self._bitoffset + count >= 8:
val = ((1 << (8 - self._bitoffset)) - 1) & self._buf[0]
count -= 8 - self._bitoffset
self._buf = b''
self._bitoffset = 0
else:
val = ((1 << count)-1) & (self._buf[0] >> (8
- self._bitoffset - count))
self._bitoffset += count
return val
else:
val = 0
bytes = count >> 3
for i in range(bytes):
byt = map(ord, self._stream.read(1))
val = (val << 8) | byt[0]
tail = count & 7
if tail:
self._buf = map(ord, self._stream.read(1))
val <<= tail
val |= self._buf[0] >> (8 - tail)
self._bitoffset = tail
return val
|
"""
Written by DaehwaKim
>> daehwa.github.io
"""
import numpy as np
import cv2
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from load_param import cap, cap2, frame_from, frame_to, rs_offset, opti_offset
plt.ion()
fig = plt.figure(figsize=(8, 8))
ax = fig.add_subplot(111, projection='3d') # Axe3D object
r_start, r_end = -500,500
visShPoints, = ax.plot(range(r_start,r_end), range(r_start,r_end), range(r_start,r_end),alpha=0.6, linestyle="", marker=".", c='g')
visNosePoints, = ax.plot(range(r_start,r_end), range(r_start,r_end), range(r_start,r_end),alpha=0.6, linestyle="", marker=".", c='y')
visTruePoints, = ax.plot(range(r_start,r_end), range(r_start,r_end), range(r_start,r_end),alpha=0.6, linestyle="", marker=".", c='b')
visPredPoints, = ax.plot(range(r_start,r_end), range(r_start,r_end), range(r_start,r_end),alpha=0.6, linestyle="", marker=".", c='r')
xaxis, yaxis, zaxis = np.array([[-400,0,0],[400,0,0]]), np.array([[0,-400,0],[0,400,0]]), np.array([[0,0,-400],[0,0,400]])
visBodyAxisX, = ax.plot(xaxis[:,0], xaxis[:,1], xaxis[:,2],alpha=0.6, c='r')
visBodyAxisY, = ax.plot(yaxis[:,0], yaxis[:,1], yaxis[:,2],alpha=0.6, c='g')
visBodyAxisZ, = ax.plot(zaxis[:,0], zaxis[:,1], zaxis[:,2],alpha=0.6, c='b')
visCamAxisX, = ax.plot(xaxis[:,0], xaxis[:,1], xaxis[:,2],alpha=0.6, c='r')
visCamAxisY, = ax.plot(yaxis[:,0], yaxis[:,1], yaxis[:,2],alpha=0.6, c='g')
visCamAxisZ, = ax.plot(zaxis[:,0], zaxis[:,1], zaxis[:,2],alpha=0.6, c='b')
visTruthAxisX, = ax.plot(xaxis[:,0], xaxis[:,1], xaxis[:,2],alpha=0.6, c='r')
visTruthAxisY, = ax.plot(yaxis[:,0], yaxis[:,1], yaxis[:,2],alpha=0.6, c='g')
visTruthAxisZ, = ax.plot(zaxis[:,0], zaxis[:,1], zaxis[:,2],alpha=0.6, c='b')
ax.set_xlabel('$x$',); ax.set_ylabel('$y$'); ax.set_zlabel('$z$')
def visualize_3d(opti_obj, ec_obj):
t = opti_obj.center
p = ec_obj.center_phone
# distance = t - p
# print("error:",distance," (mm)")
Shpt, Nose = ec_obj.ShoulderPts_inlier, ec_obj.center_phone
xunit, yunit, zunit = ec_obj.xunit_phone, ec_obj.yunit_phone, ec_obj.zunit_phone
xaxis, yaxis, zaxis = np.array([p, p + xunit*100]), np.array([p, p + yunit*100]), np.array([p, p + zunit*100])
xunit_, yunit_, zunit_ = opti_obj.x_axis, opti_obj.y_axis, opti_obj.z_axis
xaxis_, yaxis_, zaxis_ = np.array([t, t + xunit_*100]), np.array([t, t + yunit_*100]), np.array([t, t + zunit_*100])
visShPoints.set_data(Shpt[:,0], Shpt[:,1])
visShPoints.set_3d_properties(Shpt[:,2], zdir="z")
visNosePoints.set_data(Nose[0], Nose[1])
visNosePoints.set_3d_properties(Nose[2], zdir="z")
visTruePoints.set_data(t[0], t[1])
visTruePoints.set_3d_properties(t[2], zdir="z")
visPredPoints.set_data(p[0], p[1])
visPredPoints.set_3d_properties(p[2], zdir="z")
visCamAxisX.set_data(xaxis[:,0], xaxis[:,1])
visCamAxisX.set_3d_properties(xaxis[:,2], zdir="z")
visCamAxisY.set_data(yaxis[:,0], yaxis[:,1])
visCamAxisY.set_3d_properties(yaxis[:,2], zdir="z")
visCamAxisZ.set_data(zaxis[:,0], zaxis[:,1])
visCamAxisZ.set_3d_properties(zaxis[:,2], zdir="z")
visTruthAxisX.set_data(xaxis_[:,0], xaxis_[:,1])
visTruthAxisX.set_3d_properties(xaxis_[:,2], zdir="z")
visTruthAxisY.set_data(yaxis_[:,0], yaxis_[:,1])
visTruthAxisY.set_3d_properties(yaxis_[:,2], zdir="z")
visTruthAxisZ.set_data(zaxis_[:,0], zaxis_[:,1])
visTruthAxisZ.set_3d_properties(zaxis_[:,2], zdir="z")
fig.canvas.draw()
fig.canvas.flush_events()
if not(cap.isOpened()):
print("No video source")
exit()
frames = []
frames2 = []
needupdate = True
for i in range(0,rs_offset):
cap2.read()
ret, frame = cap.read()
ret2, frame2 = cap2.read()
frames.append(frame)
frames2.append(frame2)
i = 0
offset = np.array([0,0,0])
def streaming(opti_obj, ec_obj):
global i, needupdate, frames, frames2, frame, frame2
key = cv2.waitKey(2)
# if(i == offset_sampl_frame):
# global offset
# offset = ec_obj.center_phone - opti_obj.center
if(opti_obj == None or not ec_obj.valid or i < frame_to):
i = i+1
ret, frame = cap.read()
ret2, frame2 = cap2.read()
frames.append(frame)
frames2.append(frame2)
needupdate = True
return
if key == ord('d') or i < frame_to:
i = i+1
if(len(frames) > i):
frame = frames[i]
frame2 = frames2[i]
else:
ret, frame = cap.read()
ret2, frame2 = cap2.read()
frames.append(frame)
frames2.append(frame2)
needupdate = True
print("Current Frame:"+str(i))
# x_angle, _ = angleNaxis(opti_obj.x_axis,ec_obj.phone_xunit)
# y_angle, _ = angleNaxis(opti_obj.y_axis,ec_obj.phone_yunit)
# z_angle, _ = angleNaxis(opti_obj.z_axis,ec_obj.phone_zunit)
# print(x_angle,y_angle,z_angle)
needupdate = True
elif key == ord('a'):
i = i-1
frame = frames[i]
frame2 = frames2[i]
needupdate = True
print("Current Frame:"+str(i))
if(needupdate):
visualize_3d(opti_obj,ec_obj)
if not(len(frames) > i):
frames.append(frame)
frames2.append(frame2)
needupdate = False
def get_frame_num():
return i
def get_images():
return frames[i], frames2[i]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @Time : 2019/1/14 4:44 PM
# @Author : w8ay
# @File : log.py
import logging
import threading
from config import DEBUG as d1
from thirdpart.ansistrm import ColorizingStreamHandler
DEBUG, INFO, WARN, ERROR, SUCCESS = range(1, 6)
logging.addLevelName(DEBUG, '^')
logging.addLevelName(INFO, '*')
logging.addLevelName(WARN, '!')
logging.addLevelName(ERROR, 'x')
logging.addLevelName(SUCCESS, '+')
logger = logging.getLogger("w12scan")
handle = ColorizingStreamHandler()
handle.level_map[logging.getLevelName('^')] = (None, 'white', False)
handle.level_map[logging.getLevelName('*')] = (None, 'cyan', False)
handle.level_map[logging.getLevelName('+')] = (None, 'green', False)
handle.level_map[logging.getLevelName('x')] = (None, 'red', False)
handle.level_map[logging.getLevelName('!')] = (None, 'yellow', False)
if d1:
logger.setLevel(DEBUG)
else:
logger.setLevel(INFO)
formatter = logging.Formatter(fmt='%(asctime)s [%(levelname)s] %(message)s', datefmt='%Y/%m/%d %H:%M:%S')
handle.setFormatter(formatter)
logger.addHandler(handle)
class LOGGER:
def __init__(self):
self.lock = threading.Lock()
def info(self, msg):
self.lock.acquire()
logger.log(INFO, msg)
self.lock.release()
def warning(self, msg):
self.lock.acquire()
logger.log(WARN, msg)
self.lock.release()
def error(self, msg):
self.lock.acquire()
logger.log(ERROR, msg)
self.lock.release()
def success(self, msg):
self.lock.acquire()
logger.log(SUCCESS, msg)
self.lock.release()
def debug(self, msg):
self.lock.acquire()
logger.log(DEBUG, msg)
self.lock.release()
|
#!/usr/bin/env python
# coding: utf-8
import threading
import time
class Job(threading.Thread):
def __init__(self, *args, **kwargs):
super(Job, self).__init__(*args, **kwargs)
self.__flag = threading.Event() # 用于暂停线程的标识
self.__flag.set() # 设置为True
self.__running = threading.Event() # 用于停止线程的标识
self.__running.set() # 将running设置为True
def run(self):
while self.__running.isSet():
self.__flag.wait() # 为True时立即返回, 为False时阻塞直到内部的标识位为True后返回
print time.time()
time.sleep(1)
def pause(self):
self.__flag.clear() # 设置为False, 让线程阻塞
def resume(self):
self.__flag.set() # 设置为True, 让线程停止阻塞
def stop(self):
self.__flag.set() # 将线程从暂停状态恢复, 如何已经暂停的话
self.__running.clear() # 设置为False
if __name__ == "__main__":
a = Job()
a.start()
time.sleep(3)
a.pause()
time.sleep(3)
a.resume()
time.sleep(3)
a.pause()
time.sleep(2)
a.stop()
|
# -*- encoding: utf-8 -*-
# Copyright 2013 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
from http import client as http_client
import io
from unittest import mock
from oslo_config import cfg
from oslo_utils import uuidutils
from ironic import api
from ironic.api.controllers.v1 import node as api_node
from ironic.api.controllers.v1 import utils
from ironic.common import exception
from ironic.common import policy
from ironic.common import states
from ironic import objects
from ironic.tests import base
from ironic.tests.unit.api import utils as test_api_utils
from ironic.tests.unit.objects import utils as obj_utils
CONF = cfg.CONF
class TestApiUtils(base.TestCase):
def test_validate_limit(self):
limit = utils.validate_limit(10)
self.assertEqual(10, limit)
# max limit
limit = utils.validate_limit(999999999)
self.assertEqual(CONF.api.max_limit, limit)
# negative
self.assertRaises(exception.ClientSideError, utils.validate_limit, -1)
# zero
self.assertRaises(exception.ClientSideError, utils.validate_limit, 0)
def test_validate_sort_dir(self):
sort_dir = utils.validate_sort_dir('asc')
self.assertEqual('asc', sort_dir)
# invalid sort_dir parameter
self.assertRaises(exception.ClientSideError,
utils.validate_sort_dir,
'fake-sort')
def test_apply_jsonpatch(self):
doc = {"foo": {"bar": "baz"}}
patch = [{"op": "add", "path": "/foo/answer", "value": 42}]
result = utils.apply_jsonpatch(doc, patch)
expected = {"foo": {"bar": "baz", "answer": 42}}
self.assertEqual(expected, result)
def test_apply_jsonpatch_no_add_root_attr(self):
doc = {}
patch = [{"op": "add", "path": "/foo", "value": 42}]
self.assertRaisesRegex(exception.ClientSideError,
"Adding a new attribute",
utils.apply_jsonpatch, doc, patch)
def test_apply_jsonpatch_remove_non_existent(self):
# Raises a KeyError.
doc = {}
patch = [{"op": "remove", "path": "/foo"}]
self.assertRaises(exception.PatchError,
utils.apply_jsonpatch, doc, patch)
def test_apply_jsonpatch_replace_non_existent_list_item(self):
# Raises an IndexError.
doc = []
patch = [{"op": "replace", "path": "/0", "value": 42}]
self.assertRaises(exception.PatchError,
utils.apply_jsonpatch, doc, patch)
def test_get_patch_values_no_path(self):
patch = [{'path': '/name', 'op': 'update', 'value': 'node-0'}]
path = '/invalid'
values = utils.get_patch_values(patch, path)
self.assertEqual([], values)
def test_get_patch_values_remove(self):
patch = [{'path': '/name', 'op': 'remove'}]
path = '/name'
values = utils.get_patch_values(patch, path)
self.assertEqual([], values)
def test_get_patch_values_success(self):
patch = [{'path': '/name', 'op': 'replace', 'value': 'node-x'}]
path = '/name'
values = utils.get_patch_values(patch, path)
self.assertEqual(['node-x'], values)
def test_get_patch_values_multiple_success(self):
patch = [{'path': '/name', 'op': 'replace', 'value': 'node-x'},
{'path': '/name', 'op': 'replace', 'value': 'node-y'}]
path = '/name'
values = utils.get_patch_values(patch, path)
self.assertEqual(['node-x', 'node-y'], values)
def test_is_path_removed_success(self):
patch = [{'path': '/name', 'op': 'remove'}]
path = '/name'
value = utils.is_path_removed(patch, path)
self.assertTrue(value)
def test_is_path_removed_subpath_success(self):
patch = [{'path': '/local_link_connection/switch_id', 'op': 'remove'}]
path = '/local_link_connection'
value = utils.is_path_removed(patch, path)
self.assertTrue(value)
def test_is_path_removed_similar_subpath(self):
patch = [{'path': '/local_link_connection_info/switch_id',
'op': 'remove'}]
path = '/local_link_connection'
value = utils.is_path_removed(patch, path)
self.assertFalse(value)
def test_is_path_removed_replace(self):
patch = [{'path': '/name', 'op': 'replace', 'value': 'node-x'}]
path = '/name'
value = utils.is_path_removed(patch, path)
self.assertFalse(value)
def test_is_path_updated_success(self):
patch = [{'path': '/name', 'op': 'remove'}]
path = '/name'
value = utils.is_path_updated(patch, path)
self.assertTrue(value)
def test_is_path_updated_subpath_success(self):
patch = [{'path': '/properties/switch_id', 'op': 'add', 'value': 'id'}]
path = '/properties'
value = utils.is_path_updated(patch, path)
self.assertTrue(value)
def test_is_path_updated_similar_subpath(self):
patch = [{'path': '/properties2/switch_id',
'op': 'replace', 'value': 'spam'}]
path = '/properties'
value = utils.is_path_updated(patch, path)
self.assertFalse(value)
def test_check_for_invalid_fields(self):
requested = ['field_1', 'field_3']
supported = ['field_1', 'field_2', 'field_3']
utils.check_for_invalid_fields(requested, supported)
def test_check_for_invalid_fields_fail(self):
requested = ['field_1', 'field_4']
supported = ['field_1', 'field_2', 'field_3']
self.assertRaises(exception.InvalidParameterValue,
utils.check_for_invalid_fields,
requested, supported)
def test_patch_update_changed_fields(self):
schema = {
'properties': {
'one': {},
'two': {},
'three': {},
'four': {},
'five_uuid': {},
}
}
fields = [
'one',
'two',
'three',
'four',
'five_id'
]
def rpc_object():
obj = mock.MagicMock()
items = {
'one': 1,
'two': 'ii',
'three': None,
'four': [1, 2, 3, 4],
'five_id': 123
}
obj.__getitem__.side_effect = items.__getitem__
obj.__contains__.side_effect = items.__contains__
return obj
# test no change
o = rpc_object()
utils.patch_update_changed_fields({
'one': 1,
'two': 'ii',
'three': None,
'four': [1, 2, 3, 4],
}, o, fields, schema, id_map={'five_id': 123})
o.__setitem__.assert_not_called()
# test everything changes, and id_map values override from_dict values
o = rpc_object()
utils.patch_update_changed_fields({
'one': 2,
'two': 'iii',
'three': '',
'four': [2, 3],
}, o, fields, schema, id_map={'four': [4], 'five_id': 456})
o.__setitem__.assert_has_calls([
mock.call('one', 2),
mock.call('two', 'iii'),
mock.call('three', ''),
mock.call('four', [4]),
mock.call('five_id', 456)
])
# test None fields from None values and missing keys
# also five_id is untouched with no id_map
o = rpc_object()
utils.patch_update_changed_fields({
'two': None,
}, o, fields, schema)
o.__setitem__.assert_has_calls([
mock.call('two', None),
])
# test fields not in the schema are untouched
fields = [
'six',
'seven',
'eight'
]
o = rpc_object()
utils.patch_update_changed_fields({
'six': 2,
'seven': 'iii',
'eight': '',
}, o, fields, schema)
o.__setitem__.assert_not_called()
def test_patched_validate_with_schema(self):
schema = {
'properties': {
'one': {'type': 'string'},
'two': {'type': 'integer'},
'three': {'type': 'boolean'},
}
}
# test non-schema fields removed
pd = {
'one': 'one',
'two': 2,
'three': True,
'four': 4,
'five': 'five'
}
utils.patched_validate_with_schema(pd, schema)
self.assertEqual({
'one': 'one',
'two': 2,
'three': True,
}, pd)
# test fails schema validation
pd = {
'one': 1,
'two': 2,
'three': False
}
e = self.assertRaises(exception.InvalidParameterValue,
utils.patched_validate_with_schema, pd, schema)
self.assertIn("1 is not of type 'string'", str(e))
# test fails custom validation
def validate(name, value):
raise exception.InvalidParameterValue('big ouch')
pd = {
'one': 'one',
'two': 2,
'three': False
}
e = self.assertRaises(exception.InvalidParameterValue,
utils.patched_validate_with_schema, pd, schema,
validate)
self.assertIn("big ouch", str(e))
def test_patch_validate_allowed_fields(self):
allowed_fields = ['one', 'two', 'three']
# patch all
self.assertEqual(
{'one', 'two', 'three'},
utils.patch_validate_allowed_fields([
{'path': '/one'},
{'path': '/two'},
{'path': '/three/four'},
], allowed_fields))
# patch one
self.assertEqual(
{'one'},
utils.patch_validate_allowed_fields([
{'path': '/one'},
], allowed_fields))
# patch invalid field
e = self.assertRaises(
exception.Invalid,
utils.patch_validate_allowed_fields,
[{'path': '/four'}],
allowed_fields)
self.assertIn("Cannot patch /four. "
"Only the following can be updated: "
"one, two, three", str(e))
@mock.patch.object(api, 'request', autospec=False)
def test_sanitize_dict(self, mock_req):
mock_req.public_url = 'http://192.0.2.1:5050'
node = obj_utils.get_test_node(
self.context,
created_at=datetime.datetime(2000, 1, 1, 0, 0),
updated_at=datetime.datetime(2001, 1, 1, 0, 0),
inspection_started_at=datetime.datetime(2002, 1, 1, 0, 0),
console_enabled=True,
tags=['one', 'two', 'three'])
expected_links = [{
'href': 'http://192.0.2.1:5050/v1/node/'
'1be26c0b-03f2-4d2e-ae87-c02d7f33c123',
'rel': 'self'
}, {
'href': 'http://192.0.2.1:5050/node/'
'1be26c0b-03f2-4d2e-ae87-c02d7f33c123',
'rel': 'bookmark'
}]
# all fields
node_dict = utils.object_to_dict(
node,
link_resource='node',
)
utils.sanitize_dict(node_dict, None)
self.assertEqual({
'created_at': '2000-01-01T00:00:00+00:00',
'links': expected_links,
'updated_at': '2001-01-01T00:00:00+00:00',
'uuid': '1be26c0b-03f2-4d2e-ae87-c02d7f33c123'
}, node_dict)
# some fields
node_dict = utils.object_to_dict(
node,
link_resource='node',
)
utils.sanitize_dict(node_dict, ['uuid', 'created_at'])
self.assertEqual({
'created_at': '2000-01-01T00:00:00+00:00',
'links': expected_links,
'uuid': '1be26c0b-03f2-4d2e-ae87-c02d7f33c123'
}, node_dict)
# no fields
node_dict = utils.object_to_dict(
node,
link_resource='node',
)
utils.sanitize_dict(node_dict, [])
self.assertEqual({
'links': expected_links,
}, node_dict)
@mock.patch.object(api, 'request', spec_set=['version'])
class TestCheckAllowFields(base.TestCase):
def test_check_allow_specify_fields(self, mock_request):
mock_request.version.minor = 8
self.assertIsNone(utils.check_allow_specify_fields(['foo']))
def test_check_allow_specify_fields_fail(self, mock_request):
mock_request.version.minor = 7
self.assertRaises(exception.NotAcceptable,
utils.check_allow_specify_fields, ['foo'])
def test_check_allowed_fields_network_interface(self, mock_request):
mock_request.version.minor = 20
self.assertIsNone(
utils.check_allowed_fields(['network_interface']))
def test_check_allowed_fields_network_interface_fail(self, mock_request):
mock_request.version.minor = 19
self.assertRaises(
exception.NotAcceptable,
utils.check_allowed_fields,
['network_interface'])
def test_check_allowed_fields_resource_class(self, mock_request):
mock_request.version.minor = 21
self.assertIsNone(
utils.check_allowed_fields(['resource_class']))
def test_check_allowed_fields_resource_class_fail(self, mock_request):
mock_request.version.minor = 20
self.assertRaises(
exception.NotAcceptable,
utils.check_allowed_fields,
['resource_class'])
def test_check_allowed_fields_rescue_interface_fail(self, mock_request):
mock_request.version.minor = 31
self.assertRaises(
exception.NotAcceptable,
utils.check_allowed_fields,
['rescue_interface'])
def test_check_allowed_portgroup_fields_mode_properties(self,
mock_request):
mock_request.version.minor = 26
self.assertIsNone(
utils.check_allowed_portgroup_fields(['mode']))
self.assertIsNone(
utils.check_allowed_portgroup_fields(['properties']))
def test_check_allowed_portgroup_fields_mode_properties_fail(self,
mock_request):
mock_request.version.minor = 25
self.assertRaises(
exception.NotAcceptable,
utils.check_allowed_portgroup_fields,
['mode'])
self.assertRaises(
exception.NotAcceptable,
utils.check_allowed_portgroup_fields,
['properties'])
def test_check_allow_specify_driver(self, mock_request):
mock_request.version.minor = 16
self.assertIsNone(utils.check_allow_specify_driver(['fake']))
def test_check_allow_specify_driver_fail(self, mock_request):
mock_request.version.minor = 15
self.assertRaises(exception.NotAcceptable,
utils.check_allow_specify_driver, ['fake'])
def test_check_allow_specify_resource_class(self, mock_request):
mock_request.version.minor = 21
self.assertIsNone(utils.check_allow_specify_resource_class(['foo']))
def test_check_allow_specify_resource_class_fail(self, mock_request):
mock_request.version.minor = 20
self.assertRaises(exception.NotAcceptable,
utils.check_allow_specify_resource_class, ['foo'])
def test_check_allow_filter_driver_type(self, mock_request):
mock_request.version.minor = 30
self.assertIsNone(utils.check_allow_filter_driver_type('classic'))
def test_check_allow_filter_driver_type_none(self, mock_request):
mock_request.version.minor = 29
self.assertIsNone(utils.check_allow_filter_driver_type(None))
def test_check_allow_filter_driver_type_fail(self, mock_request):
mock_request.version.minor = 29
self.assertRaises(exception.NotAcceptable,
utils.check_allow_filter_driver_type, 'classic')
def test_check_allow_filter_by_conductor_group(self, mock_request):
mock_request.version.minor = 46
self.assertIsNone(utils.check_allow_filter_by_conductor_group('foo'))
def test_check_allow_filter_by_conductor_group_none(self, mock_request):
mock_request.version.minor = 46
self.assertIsNone(utils.check_allow_filter_by_conductor_group(None))
def test_check_allow_filter_by_conductor_group_fail(self, mock_request):
mock_request.version.minor = 45
self.assertRaises(exception.NotAcceptable,
utils.check_allow_filter_by_conductor_group, 'foo')
def test_check_allow_driver_detail(self, mock_request):
mock_request.version.minor = 30
self.assertIsNone(utils.check_allow_driver_detail(True))
def test_check_allow_driver_detail_false(self, mock_request):
mock_request.version.minor = 30
self.assertIsNone(utils.check_allow_driver_detail(False))
def test_check_allow_driver_detail_none(self, mock_request):
mock_request.version.minor = 29
self.assertIsNone(utils.check_allow_driver_detail(None))
def test_check_allow_driver_detail_fail(self, mock_request):
mock_request.version.minor = 29
self.assertRaises(exception.NotAcceptable,
utils.check_allow_driver_detail, True)
def test_check_allow_manage_verbs(self, mock_request):
mock_request.version.minor = 4
utils.check_allow_management_verbs('manage')
def test_check_allow_manage_verbs_fail(self, mock_request):
mock_request.version.minor = 3
self.assertRaises(exception.NotAcceptable,
utils.check_allow_management_verbs, 'manage')
def test_check_allow_provide_verbs(self, mock_request):
mock_request.version.minor = 4
utils.check_allow_management_verbs('provide')
def test_check_allow_provide_verbs_fail(self, mock_request):
mock_request.version.minor = 3
self.assertRaises(exception.NotAcceptable,
utils.check_allow_management_verbs, 'provide')
def test_check_allow_inspect_verbs(self, mock_request):
mock_request.version.minor = 6
utils.check_allow_management_verbs('inspect')
def test_check_allow_inspect_verbs_fail(self, mock_request):
mock_request.version.minor = 5
self.assertRaises(exception.NotAcceptable,
utils.check_allow_management_verbs, 'inspect')
def test_check_allow_abort_verbs(self, mock_request):
mock_request.version.minor = 13
utils.check_allow_management_verbs('abort')
def test_check_allow_abort_verbs_fail(self, mock_request):
mock_request.version.minor = 12
self.assertRaises(exception.NotAcceptable,
utils.check_allow_management_verbs, 'abort')
def test_check_allow_clean_verbs(self, mock_request):
mock_request.version.minor = 15
utils.check_allow_management_verbs('clean')
def test_check_allow_clean_verbs_fail(self, mock_request):
mock_request.version.minor = 14
self.assertRaises(exception.NotAcceptable,
utils.check_allow_management_verbs, 'clean')
def test_check_allow_unknown_verbs(self, mock_request):
utils.check_allow_management_verbs('rebuild')
def test_allow_inject_nmi(self, mock_request):
mock_request.version.minor = 29
self.assertTrue(utils.allow_inject_nmi())
mock_request.version.minor = 28
self.assertFalse(utils.allow_inject_nmi())
def test_allow_links_node_states_and_driver_properties(self, mock_request):
mock_request.version.minor = 14
self.assertTrue(utils.allow_links_node_states_and_driver_properties())
mock_request.version.minor = 10
self.assertFalse(utils.allow_links_node_states_and_driver_properties())
def test_check_allow_adopt_verbs_fail(self, mock_request):
mock_request.version.minor = 16
self.assertRaises(exception.NotAcceptable,
utils.check_allow_management_verbs, 'adopt')
def test_check_allow_adopt_verbs(self, mock_request):
mock_request.version.minor = 17
utils.check_allow_management_verbs('adopt')
def test_allow_port_internal_info(self, mock_request):
mock_request.version.minor = 18
self.assertTrue(utils.allow_port_internal_info())
mock_request.version.minor = 17
self.assertFalse(utils.allow_port_internal_info())
def test_allow_port_advanced_net_fields(self, mock_request):
mock_request.version.minor = 19
self.assertTrue(utils.allow_port_advanced_net_fields())
mock_request.version.minor = 18
self.assertFalse(utils.allow_port_advanced_net_fields())
def test_allow_ramdisk_endpoints(self, mock_request):
mock_request.version.minor = 22
self.assertTrue(utils.allow_ramdisk_endpoints())
mock_request.version.minor = 21
self.assertFalse(utils.allow_ramdisk_endpoints())
def test_allow_portgroups(self, mock_request):
mock_request.version.minor = 23
self.assertTrue(utils.allow_portgroups())
mock_request.version.minor = 22
self.assertFalse(utils.allow_portgroups())
def test_allow_portgroups_subcontrollers(self, mock_request):
mock_request.version.minor = 24
self.assertTrue(utils.allow_portgroups_subcontrollers())
mock_request.version.minor = 23
self.assertFalse(utils.allow_portgroups_subcontrollers())
def test_allow_remove_chassis_uuid(self, mock_request):
mock_request.version.minor = 25
self.assertTrue(utils.allow_remove_chassis_uuid())
mock_request.version.minor = 24
self.assertFalse(utils.allow_remove_chassis_uuid())
def test_allow_portgroup_mode_properties(self, mock_request):
mock_request.version.minor = 26
self.assertTrue(utils.allow_portgroup_mode_properties())
mock_request.version.minor = 25
self.assertFalse(utils.allow_portgroup_mode_properties())
def test_allow_dynamic_drivers(self, mock_request):
mock_request.version.minor = 30
self.assertTrue(utils.allow_dynamic_drivers())
mock_request.version.minor = 29
self.assertFalse(utils.allow_dynamic_drivers())
def test_allow_volume(self, mock_request):
mock_request.version.minor = 32
self.assertTrue(utils.allow_volume())
mock_request.version.minor = 31
self.assertFalse(utils.allow_volume())
def test_allow_storage_interface(self, mock_request):
mock_request.version.minor = 33
self.assertTrue(utils.allow_storage_interface())
mock_request.version.minor = 32
self.assertFalse(utils.allow_storage_interface())
def test_allow_traits(self, mock_request):
mock_request.version.minor = 37
self.assertTrue(utils.allow_traits())
mock_request.version.minor = 36
self.assertFalse(utils.allow_traits())
@mock.patch.object(objects.Port, 'supports_physical_network',
autospec=True)
def test_allow_port_physical_network_no_pin(self, mock_spn, mock_request):
mock_spn.return_value = True
mock_request.version.minor = 34
self.assertTrue(utils.allow_port_physical_network())
mock_request.version.minor = 33
self.assertFalse(utils.allow_port_physical_network())
@mock.patch.object(objects.Port, 'supports_physical_network',
autospec=True)
def test_allow_port_physical_network_pin(self, mock_spn, mock_request):
mock_spn.return_value = False
mock_request.version.minor = 34
self.assertFalse(utils.allow_port_physical_network())
mock_request.version.minor = 33
self.assertFalse(utils.allow_port_physical_network())
def test_allow_node_rebuild_with_configdrive(self, mock_request):
mock_request.version.minor = 35
self.assertTrue(utils.allow_node_rebuild_with_configdrive())
mock_request.version.minor = 34
self.assertFalse(utils.allow_node_rebuild_with_configdrive())
def test_allow_configdrive_vendor_data(self, mock_request):
mock_request.version.minor = 59
self.assertTrue(utils.allow_configdrive_vendor_data())
mock_request.version.minor = 58
self.assertFalse(utils.allow_configdrive_vendor_data())
def test_check_allow_configdrive_fails(self, mock_request):
mock_request.version.minor = 35
self.assertRaises(exception.ClientSideError,
utils.check_allow_configdrive, states.DELETED,
"abcd")
self.assertRaises(exception.ClientSideError,
utils.check_allow_configdrive, states.ACTIVE,
{'meta_data': {}})
mock_request.version.minor = 34
self.assertRaises(exception.ClientSideError,
utils.check_allow_configdrive, states.REBUILD,
"abcd")
def test_check_allow_configdrive(self, mock_request):
mock_request.version.minor = 35
utils.check_allow_configdrive(states.ACTIVE, "abcd")
utils.check_allow_configdrive(states.REBUILD, "abcd")
mock_request.version.minor = 34
utils.check_allow_configdrive(states.ACTIVE, "abcd")
def test_check_allow_configdrive_as_dict(self, mock_request):
mock_request.version.minor = 59
utils.check_allow_configdrive(states.ACTIVE, {'meta_data': {}})
utils.check_allow_configdrive(states.ACTIVE, {'meta_data': {},
'network_data': {},
'user_data': {},
'vendor_data': {}})
utils.check_allow_configdrive(states.ACTIVE, {'user_data': 'foo'})
utils.check_allow_configdrive(states.ACTIVE, {'user_data': ['foo']})
def test_check_allow_configdrive_vendor_data_failed(self, mock_request):
mock_request.version.minor = 58
self.assertRaises(exception.ClientSideError,
utils.check_allow_configdrive,
states.ACTIVE,
{'meta_data': {},
'network_data': {},
'user_data': {},
'vendor_data': {}})
def test_check_allow_configdrive_as_dict_invalid(self, mock_request):
mock_request.version.minor = 59
self.assertRaises(exception.ClientSideError,
utils.check_allow_configdrive, states.REBUILD,
{'foo': 'bar'})
for key in ['meta_data', 'network_data']:
self.assertRaises(exception.ClientSideError,
utils.check_allow_configdrive, states.REBUILD,
{key: 'a string'})
for key in ['meta_data', 'network_data', 'user_data']:
self.assertRaises(exception.ClientSideError,
utils.check_allow_configdrive, states.REBUILD,
{key: 42})
def test_allow_rescue_interface(self, mock_request):
mock_request.version.minor = 38
self.assertTrue(utils.allow_rescue_interface())
mock_request.version.minor = 37
self.assertFalse(utils.allow_rescue_interface())
def test_allow_inspect_abort(self, mock_request):
mock_request.version.minor = 41
self.assertTrue(utils.allow_inspect_abort())
mock_request.version.minor = 40
self.assertFalse(utils.allow_inspect_abort())
def test_allow_port_is_smartnic(self, mock_request):
mock_request.version.minor = 53
self.assertTrue(utils.allow_port_is_smartnic())
mock_request.version.minor = 52
self.assertFalse(utils.allow_port_is_smartnic())
def test_allow_deploy_templates(self, mock_request):
mock_request.version.minor = 55
self.assertTrue(utils.allow_deploy_templates())
mock_request.version.minor = 54
self.assertFalse(utils.allow_deploy_templates())
def test_allow_agent_token(self, mock_request):
mock_request.version.minor = 62
self.assertTrue(utils.allow_agent_token())
mock_request.version.minor = 61
self.assertFalse(utils.allow_agent_token())
@mock.patch.object(api, 'request', spec_set=['context', 'version'])
class TestNodeIdent(base.TestCase):
def setUp(self):
super(TestNodeIdent, self).setUp()
self.valid_name = 'my-host'
self.valid_uuid = uuidutils.generate_uuid()
self.invalid_name = 'Mr Plow'
self.node = test_api_utils.post_get_test_node()
def test_allow_node_logical_names_pre_name(self, mock_pecan_req):
mock_pecan_req.version.minor = 1
self.assertFalse(utils.allow_node_logical_names())
def test_allow_node_logical_names_post_name(self, mock_pecan_req):
mock_pecan_req.version.minor = 5
self.assertTrue(utils.allow_node_logical_names())
def test_is_valid_node_name(self, mock_pecan_req):
mock_pecan_req.version.minor = 10
self.assertTrue(utils.is_valid_node_name(self.valid_name))
self.assertFalse(utils.is_valid_node_name(self.invalid_name))
self.assertFalse(utils.is_valid_node_name(self.valid_uuid))
@mock.patch.object(utils, 'allow_node_logical_names', autospec=True)
@mock.patch.object(objects.Node, 'get_by_uuid', autospec=True)
@mock.patch.object(objects.Node, 'get_by_name', autospec=True)
def test_get_rpc_node_expect_uuid(self, mock_gbn, mock_gbu, mock_anln,
mock_pr):
mock_anln.return_value = True
self.node['uuid'] = self.valid_uuid
mock_gbu.return_value = self.node
self.assertEqual(self.node, utils.get_rpc_node(self.valid_uuid))
self.assertEqual(1, mock_gbu.call_count)
self.assertEqual(0, mock_gbn.call_count)
@mock.patch.object(utils, 'allow_node_logical_names', autospec=True)
@mock.patch.object(objects.Node, 'get_by_uuid', autospec=True)
@mock.patch.object(objects.Node, 'get_by_name', autospec=True)
def test_get_rpc_node_expect_name(self, mock_gbn, mock_gbu, mock_anln,
mock_pr):
mock_pr.version.minor = 10
mock_anln.return_value = True
self.node['name'] = self.valid_name
mock_gbn.return_value = self.node
self.assertEqual(self.node, utils.get_rpc_node(self.valid_name))
self.assertEqual(0, mock_gbu.call_count)
self.assertEqual(1, mock_gbn.call_count)
@mock.patch.object(utils, 'allow_node_logical_names', autospec=True)
@mock.patch.object(objects.Node, 'get_by_uuid', autospec=True)
@mock.patch.object(objects.Node, 'get_by_name', autospec=True)
def test_get_rpc_node_invalid_name(self, mock_gbn, mock_gbu,
mock_anln, mock_pr):
mock_pr.version.minor = 10
mock_anln.return_value = True
self.assertRaises(exception.InvalidUuidOrName,
utils.get_rpc_node,
self.invalid_name)
@mock.patch.object(utils, 'allow_node_logical_names', autospec=True)
@mock.patch.object(objects.Node, 'get_by_uuid', autospec=True)
@mock.patch.object(objects.Node, 'get_by_name', autospec=True)
def test_get_rpc_node_by_uuid_no_logical_name(self, mock_gbn, mock_gbu,
mock_anln, mock_pr):
# allow_node_logical_name() should have no effect
mock_anln.return_value = False
self.node['uuid'] = self.valid_uuid
mock_gbu.return_value = self.node
self.assertEqual(self.node, utils.get_rpc_node(self.valid_uuid))
self.assertEqual(1, mock_gbu.call_count)
self.assertEqual(0, mock_gbn.call_count)
@mock.patch.object(utils, 'allow_node_logical_names', autospec=True)
@mock.patch.object(objects.Node, 'get_by_uuid', autospec=True)
@mock.patch.object(objects.Node, 'get_by_name', autospec=True)
def test_get_rpc_node_by_name_no_logical_name(self, mock_gbn, mock_gbu,
mock_anln, mock_pr):
mock_anln.return_value = False
self.node['name'] = self.valid_name
mock_gbn.return_value = self.node
self.assertRaises(exception.NodeNotFound,
utils.get_rpc_node,
self.valid_name)
@mock.patch.object(objects.Node, 'get_by_id', autospec=True)
def test_populate_node_uuid(self, mock_gbi, mock_pr):
port = obj_utils.get_test_port(self.context)
node = obj_utils.get_test_node(self.context, id=port.node_id)
mock_gbi.return_value = node
# successful lookup
d = {}
utils.populate_node_uuid(port, d)
self.assertEqual({
'node_uuid': '1be26c0b-03f2-4d2e-ae87-c02d7f33c123'
}, d)
# not found, raise exception
mock_gbi.side_effect = exception.NodeNotFound(node=port.node_id)
d = {}
self.assertRaises(exception.NodeNotFound,
utils.populate_node_uuid, port, d)
@mock.patch.object(objects.Node, 'get_by_uuid', autospec=True)
def test_replace_node_uuid_with_id(self, mock_gbu, mock_pr):
node = obj_utils.get_test_node(self.context, id=1)
mock_gbu.return_value = node
to_dict = {'node_uuid': self.valid_uuid}
self.assertEqual(node, utils.replace_node_uuid_with_id(to_dict))
self.assertEqual({'node_id': 1}, to_dict)
@mock.patch.object(objects.Node, 'get_by_uuid', autospec=True)
def test_replace_node_uuid_with_id_not_found(self, mock_gbu, mock_pr):
to_dict = {'node_uuid': self.valid_uuid}
mock_gbu.side_effect = exception.NodeNotFound(node=self.valid_uuid)
e = self.assertRaises(exception.NodeNotFound,
utils.replace_node_uuid_with_id, to_dict)
self.assertEqual(400, e.code)
@mock.patch.object(objects.Node, 'get_by_id', autospec=True)
def test_replace_node_id_with_uuid(self, mock_gbi, mock_pr):
node = obj_utils.get_test_node(self.context, uuid=self.valid_uuid)
mock_gbi.return_value = node
to_dict = {'node_id': 1}
self.assertEqual(node, utils.replace_node_id_with_uuid(to_dict))
self.assertEqual({'node_uuid': self.valid_uuid}, to_dict)
@mock.patch.object(objects.Node, 'get_by_id', autospec=True)
def test_replace_node_id_with_uuid_not_found(self, mock_gbi, mock_pr):
to_dict = {'node_id': 1}
mock_gbi.side_effect = exception.NodeNotFound(node=1)
e = self.assertRaises(exception.NodeNotFound,
utils.replace_node_id_with_uuid, to_dict)
self.assertEqual(400, e.code)
class TestVendorPassthru(base.TestCase):
def test_method_not_specified(self):
self.assertRaises(exception.ClientSideError,
utils.vendor_passthru, 'fake-ident',
None, 'fake-topic', data='fake-data')
@mock.patch.object(api, 'request',
spec_set=['method', 'context', 'rpcapi'])
def _vendor_passthru(self, mock_request, async_call=True,
driver_passthru=False):
return_value = {
'return': 'SpongeBob',
'async': async_call,
'attach': False
}
mock_request.method = 'post'
mock_request.context = 'fake-context'
passthru_mock = None
if driver_passthru:
passthru_mock = mock_request.rpcapi.driver_vendor_passthru
else:
passthru_mock = mock_request.rpcapi.vendor_passthru
passthru_mock.return_value = return_value
response = utils.vendor_passthru('fake-ident', 'squarepants',
'fake-topic', data='fake-data',
driver_passthru=driver_passthru)
passthru_mock.assert_called_once_with(
'fake-context', 'fake-ident', 'squarepants', 'POST',
'fake-data', 'fake-topic')
self.assertIsInstance(response, utils.PassthruResponse)
self.assertEqual('SpongeBob', response.obj)
sc = http_client.ACCEPTED if async_call else http_client.OK
self.assertEqual(sc, response.status_code)
def test_vendor_passthru_async(self):
self._vendor_passthru()
def test_vendor_passthru_sync(self):
self._vendor_passthru(async_call=False)
def test_driver_vendor_passthru_async(self):
self._vendor_passthru(driver_passthru=True)
def test_driver_vendor_passthru_sync(self):
self._vendor_passthru(async_call=False, driver_passthru=True)
@mock.patch.object(api, 'request',
spec_set=['method', 'context', 'rpcapi'])
def _test_vendor_passthru_attach(self, return_value, expct_return_value,
mock_request):
return_ = {'return': return_value, 'async': False, 'attach': True}
mock_request.method = 'get'
mock_request.context = 'fake-context'
mock_request.rpcapi.driver_vendor_passthru.return_value = return_
response = utils.vendor_passthru('fake-ident', 'bar',
'fake-topic', data='fake-data',
driver_passthru=True)
mock_request.rpcapi.driver_vendor_passthru.assert_called_once_with(
'fake-context', 'fake-ident', 'bar', 'GET',
'fake-data', 'fake-topic')
# Assert file was attached to the response object
self.assertIsInstance(response.obj, io.BytesIO)
self.assertEqual(expct_return_value, response.obj.read())
# Assert response message is none
self.assertIsInstance(response, utils.PassthruResponse)
self.assertEqual(http_client.OK, response.status_code)
def test_vendor_passthru_attach(self):
self._test_vendor_passthru_attach('foo', b'foo')
def test_vendor_passthru_attach_unicode_to_byte(self):
self._test_vendor_passthru_attach(u'não', b'n\xc3\xa3o')
def test_vendor_passthru_attach_byte_to_byte(self):
self._test_vendor_passthru_attach(b'\x00\x01', b'\x00\x01')
def test_get_controller_reserved_names(self):
expected = ['maintenance', 'management', 'states',
'vendor_passthru', 'validate', 'detail']
self.assertEqual(sorted(expected),
sorted(utils.get_controller_reserved_names(
api_node.NodesController)))
@mock.patch.object(api, 'request', spec_set=["context"])
@mock.patch.object(policy, 'authorize', spec=True)
def test_check_policy(self, mock_authorize, mock_pr):
utils.check_policy('fake-policy')
cdict = api.request.context.to_policy_values()
mock_authorize.assert_called_once_with('fake-policy', cdict, cdict)
@mock.patch.object(api, 'request', spec_set=["context"])
@mock.patch.object(policy, 'authorize', spec=True)
def test_check_policy_forbidden(self, mock_authorize, mock_pr):
mock_authorize.side_effect = exception.HTTPForbidden(resource='fake')
self.assertRaises(exception.HTTPForbidden,
utils.check_policy, 'fake-policy')
class TestPortgroupIdent(base.TestCase):
def setUp(self):
super(TestPortgroupIdent, self).setUp()
self.valid_name = 'my-portgroup'
self.valid_uuid = uuidutils.generate_uuid()
self.invalid_name = 'My Portgroup'
self.portgroup = test_api_utils.post_get_test_portgroup()
@mock.patch.object(api, 'request', spec_set=["context"])
@mock.patch.object(objects.Portgroup, 'get_by_name', autospec=True)
def test_get_rpc_portgroup_name(self, mock_gbn, mock_pr):
mock_gbn.return_value = self.portgroup
self.assertEqual(self.portgroup, utils.get_rpc_portgroup(
self.valid_name))
mock_gbn.assert_called_once_with(mock_pr.context, self.valid_name)
@mock.patch.object(api, 'request', spec_set=["context"])
@mock.patch.object(objects.Portgroup, 'get_by_uuid', autospec=True)
def test_get_rpc_portgroup_uuid(self, mock_gbu, mock_pr):
self.portgroup['uuid'] = self.valid_uuid
mock_gbu.return_value = self.portgroup
self.assertEqual(self.portgroup, utils.get_rpc_portgroup(
self.valid_uuid))
mock_gbu.assert_called_once_with(mock_pr.context, self.valid_uuid)
def test_get_rpc_portgroup_invalid_name(self):
self.assertRaises(exception.InvalidUuidOrName,
utils.get_rpc_portgroup,
self.invalid_name)
class TestCheckOwnerPolicy(base.TestCase):
def setUp(self):
super(TestCheckOwnerPolicy, self).setUp()
self.valid_node_uuid = uuidutils.generate_uuid()
self.node = test_api_utils.post_get_test_node()
self.node['owner'] = '12345'
self.node['lessee'] = '54321'
@mock.patch.object(api, 'request', spec_set=["context", "version"])
@mock.patch.object(policy, 'authorize', spec=True)
def test_check_owner_policy(
self, mock_authorize, mock_pr
):
mock_pr.version.minor = 50
mock_pr.context.to_policy_values.return_value = {}
utils.check_owner_policy(
'node', 'fake_policy', self.node['owner'], self.node['lessee']
)
mock_authorize.assert_called_once_with(
'fake_policy',
{'node.owner': '12345', 'node.lessee': '54321'}, {})
@mock.patch.object(api, 'request', spec_set=["context", "version"])
@mock.patch.object(policy, 'authorize', spec=True)
def test_check_owner_policy_forbidden(
self, mock_authorize, mock_pr
):
mock_pr.version.minor = 50
mock_pr.context.to_policy_values.return_value = {}
mock_authorize.side_effect = exception.HTTPForbidden(resource='fake')
self.assertRaises(
exception.HTTPForbidden,
utils.check_owner_policy,
'node',
'fake-policy',
self.node
)
class TestCheckNodePolicyAndRetrieve(base.TestCase):
def setUp(self):
super(TestCheckNodePolicyAndRetrieve, self).setUp()
self.valid_node_uuid = uuidutils.generate_uuid()
self.node = test_api_utils.post_get_test_node()
self.node['owner'] = '12345'
self.node['lessee'] = '54321'
@mock.patch.object(api, 'request', spec_set=["context", "version"])
@mock.patch.object(policy, 'authorize', spec=True)
@mock.patch.object(utils, 'get_rpc_node', autospec=True)
@mock.patch.object(utils, 'get_rpc_node_with_suffix', autospec=True)
def test_check_node_policy_and_retrieve(
self, mock_grnws, mock_grn, mock_authorize, mock_pr
):
mock_pr.version.minor = 50
mock_pr.context.to_policy_values.return_value = {}
mock_grn.return_value = self.node
rpc_node = utils.check_node_policy_and_retrieve(
'fake_policy', self.valid_node_uuid
)
mock_grn.assert_called_once_with(self.valid_node_uuid)
mock_grnws.assert_not_called()
mock_authorize.assert_called_once_with(
'fake_policy',
{'node.owner': '12345', 'node.lessee': '54321'}, {})
self.assertEqual(self.node, rpc_node)
@mock.patch.object(api, 'request', spec_set=["context", "version"])
@mock.patch.object(policy, 'authorize', spec=True)
@mock.patch.object(utils, 'get_rpc_node', autospec=True)
@mock.patch.object(utils, 'get_rpc_node_with_suffix', autospec=True)
def test_check_node_policy_and_retrieve_with_suffix(
self, mock_grnws, mock_grn, mock_authorize, mock_pr
):
mock_pr.version.minor = 50
mock_pr.context.to_policy_values.return_value = {}
mock_grnws.return_value = self.node
rpc_node = utils.check_node_policy_and_retrieve(
'fake_policy', self.valid_node_uuid, True
)
mock_grn.assert_not_called()
mock_grnws.assert_called_once_with(self.valid_node_uuid)
mock_authorize.assert_called_once_with(
'fake_policy',
{'node.owner': '12345', 'node.lessee': '54321'}, {})
self.assertEqual(self.node, rpc_node)
@mock.patch.object(api, 'request', spec_set=["context"])
@mock.patch.object(policy, 'authorize', spec=True)
@mock.patch.object(utils, 'get_rpc_node', autospec=True)
def test_check_node_policy_and_retrieve_no_node_policy_forbidden(
self, mock_grn, mock_authorize, mock_pr
):
mock_pr.context.to_policy_values.return_value = {}
mock_authorize.side_effect = exception.HTTPForbidden(resource='fake')
mock_grn.side_effect = exception.NodeNotFound(
node=self.valid_node_uuid)
self.assertRaises(
exception.HTTPForbidden,
utils.check_node_policy_and_retrieve,
'fake-policy',
self.valid_node_uuid
)
@mock.patch.object(api, 'request', spec_set=["context"])
@mock.patch.object(policy, 'authorize', spec=True)
@mock.patch.object(utils, 'get_rpc_node', autospec=True)
def test_check_node_policy_and_retrieve_no_node(
self, mock_grn, mock_authorize, mock_pr
):
mock_pr.context.to_policy_values.return_value = {}
mock_grn.side_effect = exception.NodeNotFound(
node=self.valid_node_uuid)
self.assertRaises(
exception.NodeNotFound,
utils.check_node_policy_and_retrieve,
'fake-policy',
self.valid_node_uuid
)
@mock.patch.object(api, 'request', spec_set=["context", "version"])
@mock.patch.object(policy, 'authorize', spec=True)
@mock.patch.object(utils, 'get_rpc_node', autospec=True)
def test_check_node_policy_and_retrieve_policy_forbidden(
self, mock_grn, mock_authorize, mock_pr
):
mock_pr.version.minor = 50
mock_pr.context.to_policy_values.return_value = {}
mock_authorize.side_effect = exception.HTTPForbidden(resource='fake')
mock_grn.return_value = self.node
self.assertRaises(
exception.HTTPForbidden,
utils.check_node_policy_and_retrieve,
'fake-policy',
self.valid_node_uuid
)
class TestCheckAllocationPolicyAndRetrieve(base.TestCase):
def setUp(self):
super(TestCheckAllocationPolicyAndRetrieve, self).setUp()
self.valid_allocation_uuid = uuidutils.generate_uuid()
self.allocation = test_api_utils.allocation_post_data()
self.allocation['owner'] = '12345'
@mock.patch.object(api, 'request', spec_set=["context", "version"])
@mock.patch.object(policy, 'authorize', spec=True)
@mock.patch.object(utils, 'get_rpc_allocation_with_suffix', autospec=True)
def test_check_node_policy_and_retrieve(
self, mock_graws, mock_authorize, mock_pr
):
mock_pr.version.minor = 60
mock_pr.context.to_policy_values.return_value = {}
mock_graws.return_value = self.allocation
rpc_allocation = utils.check_allocation_policy_and_retrieve(
'fake_policy', self.valid_allocation_uuid
)
mock_graws.assert_called_once_with(self.valid_allocation_uuid)
mock_authorize.assert_called_once_with(
'fake_policy', {'allocation.owner': '12345'}, {})
self.assertEqual(self.allocation, rpc_allocation)
@mock.patch.object(api, 'request', spec_set=["context"])
@mock.patch.object(policy, 'authorize', spec=True)
@mock.patch.object(utils, 'get_rpc_allocation_with_suffix', autospec=True)
def test_check_alloc_policy_and_retrieve_no_alloc_policy_forbidden(
self, mock_graws, mock_authorize, mock_pr
):
mock_pr.context.to_policy_values.return_value = {}
mock_authorize.side_effect = exception.HTTPForbidden(resource='fake')
mock_graws.side_effect = exception.AllocationNotFound(
allocation=self.valid_allocation_uuid)
self.assertRaises(
exception.HTTPForbidden,
utils.check_allocation_policy_and_retrieve,
'fake-policy',
self.valid_allocation_uuid
)
@mock.patch.object(api, 'request', spec_set=["context"])
@mock.patch.object(policy, 'authorize', spec=True)
@mock.patch.object(utils, 'get_rpc_allocation_with_suffix', autospec=True)
def test_check_allocation_policy_and_retrieve_no_allocation(
self, mock_graws, mock_authorize, mock_pr
):
mock_pr.context.to_policy_values.return_value = {}
mock_graws.side_effect = exception.AllocationNotFound(
allocation=self.valid_allocation_uuid)
self.assertRaises(
exception.AllocationNotFound,
utils.check_allocation_policy_and_retrieve,
'fake-policy',
self.valid_allocation_uuid
)
@mock.patch.object(api, 'request', spec_set=["context", "version"])
@mock.patch.object(policy, 'authorize', spec=True)
@mock.patch.object(utils, 'get_rpc_allocation_with_suffix', autospec=True)
def test_check_allocation_policy_and_retrieve_policy_forbidden(
self, mock_graws, mock_authorize, mock_pr
):
mock_pr.version.minor = 50
mock_pr.context.to_policy_values.return_value = {}
mock_authorize.side_effect = exception.HTTPForbidden(resource='fake')
mock_graws.return_value = self.allocation
self.assertRaises(
exception.HTTPForbidden,
utils.check_allocation_policy_and_retrieve,
'fake-policy',
self.valid_allocation_uuid
)
class TestCheckMultipleNodePoliciesAndRetrieve(base.TestCase):
def setUp(self):
super(TestCheckMultipleNodePoliciesAndRetrieve, self).setUp()
self.valid_node_uuid = uuidutils.generate_uuid()
self.node = test_api_utils.post_get_test_node()
self.node['owner'] = '12345'
self.node['lessee'] = '54321'
@mock.patch.object(utils, 'check_node_policy_and_retrieve', autospec=True)
@mock.patch.object(utils, 'check_owner_policy', autospec=True)
def test_check_multiple_node_policies_and_retrieve(
self, mock_cop, mock_cnpar
):
mock_cnpar.return_value = self.node
mock_cop.return_value = True
rpc_node = utils.check_multiple_node_policies_and_retrieve(
['fake_policy_1', 'fake_policy_2'], self.valid_node_uuid
)
mock_cnpar.assert_called_once_with('fake_policy_1',
self.valid_node_uuid, False)
mock_cop.assert_called_once_with(
'node', 'fake_policy_2', '12345', '54321')
self.assertEqual(self.node, rpc_node)
@mock.patch.object(utils, 'check_node_policy_and_retrieve', autospec=True)
@mock.patch.object(utils, 'check_owner_policy', autospec=True)
def test_check_multiple_node_policies_and_retrieve_first_fail(
self, mock_cop, mock_cnpar
):
mock_cnpar.side_effect = exception.HTTPForbidden(resource='fake')
mock_cop.return_value = True
self.assertRaises(
exception.HTTPForbidden,
utils.check_multiple_node_policies_and_retrieve,
['fake_policy_1', 'fake_policy_2'],
self.valid_node_uuid
)
mock_cnpar.assert_called_once_with('fake_policy_1',
self.valid_node_uuid, False)
mock_cop.assert_not_called()
@mock.patch.object(utils, 'check_node_policy_and_retrieve', autospec=True)
@mock.patch.object(utils, 'check_owner_policy', autospec=True)
def test_check_node_policy_and_retrieve_no_node(
self, mock_cop, mock_cnpar
):
mock_cnpar.return_value = self.node
mock_cop.side_effect = exception.HTTPForbidden(resource='fake')
self.assertRaises(
exception.HTTPForbidden,
utils.check_multiple_node_policies_and_retrieve,
['fake_policy_1', 'fake_policy_2'],
self.valid_node_uuid
)
mock_cnpar.assert_called_once_with('fake_policy_1',
self.valid_node_uuid, False)
mock_cop.assert_called_once_with(
'node', 'fake_policy_2', '12345', '54321')
class TestCheckListPolicy(base.TestCase):
@mock.patch.object(api, 'request', spec_set=["context", "version"])
@mock.patch.object(policy, 'authorize', spec=True)
def test_check_list_policy(
self, mock_authorize, mock_pr
):
mock_pr.context.to_policy_values.return_value = {
'project_id': '12345'
}
mock_pr.version.minor = 50
owner = utils.check_list_policy('node')
self.assertIsNone(owner)
@mock.patch.object(api, 'request', spec_set=["context", "version"])
@mock.patch.object(policy, 'authorize', spec=True)
def test_check_list_policy_with_owner(
self, mock_authorize, mock_pr
):
mock_pr.context.to_policy_values.return_value = {
'project_id': '12345'
}
mock_pr.version.minor = 50
owner = utils.check_list_policy('node', '12345')
self.assertEqual(owner, '12345')
@mock.patch.object(api, 'request', spec_set=["context", "version"])
@mock.patch.object(policy, 'authorize', spec=True)
def test_check_list_policy_forbidden(
self, mock_authorize, mock_pr
):
def mock_authorize_function(rule, target, creds):
raise exception.HTTPForbidden(resource='fake')
mock_authorize.side_effect = mock_authorize_function
mock_pr.context.to_policy_values.return_value = {
'project_id': '12345'
}
mock_pr.version.minor = 50
self.assertRaises(
exception.HTTPForbidden,
utils.check_list_policy,
'node'
)
@mock.patch.object(api, 'request', spec_set=["context", "version"])
@mock.patch.object(policy, 'authorize', spec=True)
def test_check_list_policy_forbidden_no_project(
self, mock_authorize, mock_pr
):
def mock_authorize_function(rule, target, creds):
if rule == 'baremetal:node:list_all':
raise exception.HTTPForbidden(resource='fake')
return True
mock_authorize.side_effect = mock_authorize_function
mock_pr.context.to_policy_values.return_value = {}
mock_pr.version.minor = 50
self.assertRaises(
exception.HTTPForbidden,
utils.check_list_policy,
'node'
)
@mock.patch.object(api, 'request', spec_set=["context", "version"])
@mock.patch.object(policy, 'authorize', spec=True)
def test_check_list_policy_non_admin(
self, mock_authorize, mock_pr
):
def mock_authorize_function(rule, target, creds):
if rule == 'baremetal:node:list_all':
raise exception.HTTPForbidden(resource='fake')
return True
mock_authorize.side_effect = mock_authorize_function
mock_pr.context.to_policy_values.return_value = {
'project_id': '12345'
}
mock_pr.version.minor = 50
owner = utils.check_list_policy('node')
self.assertEqual(owner, '12345')
@mock.patch.object(api, 'request', spec_set=["context", "version"])
@mock.patch.object(policy, 'authorize', spec=True)
def test_check_list_policy_non_admin_owner_proj_mismatch(
self, mock_authorize, mock_pr
):
def mock_authorize_function(rule, target, creds):
if rule == 'baremetal:node:list_all':
raise exception.HTTPForbidden(resource='fake')
return True
mock_authorize.side_effect = mock_authorize_function
mock_pr.context.to_policy_values.return_value = {
'project_id': '12345'
}
mock_pr.version.minor = 50
self.assertRaises(
exception.HTTPForbidden,
utils.check_list_policy,
'node',
'54321'
)
class TestCheckPortPolicyAndRetrieve(base.TestCase):
def setUp(self):
super(TestCheckPortPolicyAndRetrieve, self).setUp()
self.valid_port_uuid = uuidutils.generate_uuid()
self.node = test_api_utils.post_get_test_node()
self.node['owner'] = '12345'
self.node['lessee'] = '54321'
self.port = objects.Port(self.context, node_id=42)
@mock.patch.object(api, 'request', spec_set=["context", "version"])
@mock.patch.object(policy, 'authorize', spec=True)
@mock.patch.object(objects.Port, 'get_by_uuid', autospec=True)
@mock.patch.object(objects.Node, 'get_by_id', autospec=True)
def test_check_port_policy_and_retrieve(
self, mock_ngbi, mock_pgbu, mock_authorize, mock_pr
):
mock_pr.version.minor = 50
mock_pr.context.to_policy_values.return_value = {}
mock_pgbu.return_value = self.port
mock_ngbi.return_value = self.node
rpc_port, rpc_node = utils.check_port_policy_and_retrieve(
'fake_policy', self.valid_port_uuid
)
mock_pgbu.assert_called_once_with(mock_pr.context,
self.valid_port_uuid)
mock_ngbi.assert_called_once_with(mock_pr.context, 42)
mock_authorize.assert_called_once_with(
'fake_policy',
{'node.owner': '12345', 'node.lessee': '54321'},
{})
self.assertEqual(self.port, rpc_port)
self.assertEqual(self.node, rpc_node)
@mock.patch.object(api, 'request', spec_set=["context"])
@mock.patch.object(policy, 'authorize', spec=True)
@mock.patch.object(objects.Port, 'get_by_uuid', autospec=True)
def test_check_port_policy_and_retrieve_no_port_policy_forbidden(
self, mock_pgbu, mock_authorize, mock_pr
):
mock_pr.context.to_policy_values.return_value = {}
mock_authorize.side_effect = exception.HTTPForbidden(resource='fake')
mock_pgbu.side_effect = exception.PortNotFound(
port=self.valid_port_uuid)
self.assertRaises(
exception.HTTPForbidden,
utils.check_port_policy_and_retrieve,
'fake-policy',
self.valid_port_uuid
)
@mock.patch.object(api, 'request', spec_set=["context"])
@mock.patch.object(policy, 'authorize', spec=True)
@mock.patch.object(objects.Port, 'get_by_uuid', autospec=True)
def test_check_port_policy_and_retrieve_no_port(
self, mock_pgbu, mock_authorize, mock_pr
):
mock_pr.context.to_policy_values.return_value = {}
mock_pgbu.side_effect = exception.PortNotFound(
port=self.valid_port_uuid)
self.assertRaises(
exception.PortNotFound,
utils.check_port_policy_and_retrieve,
'fake-policy',
self.valid_port_uuid
)
@mock.patch.object(api, 'request', spec_set=["context", "version"])
@mock.patch.object(policy, 'authorize', spec=True)
@mock.patch.object(objects.Port, 'get_by_uuid', autospec=True)
@mock.patch.object(objects.Node, 'get_by_id', autospec=True)
def test_check_port_policy_and_retrieve_policy_forbidden(
self, mock_ngbi, mock_pgbu, mock_authorize, mock_pr
):
mock_pr.version.minor = 50
mock_pr.context.to_policy_values.return_value = {}
mock_authorize.side_effect = exception.HTTPForbidden(resource='fake')
mock_pgbu.return_value = self.port
mock_ngbi.return_value = self.node
self.assertRaises(
exception.HTTPForbidden,
utils.check_port_policy_and_retrieve,
'fake-policy',
self.valid_port_uuid
)
class TestCheckPortListPolicy(base.TestCase):
@mock.patch.object(api, 'request', spec_set=["context", "version"])
@mock.patch.object(policy, 'authorize', spec=True)
def test_check_port_list_policy(
self, mock_authorize, mock_pr
):
mock_pr.context.to_policy_values.return_value = {
'project_id': '12345'
}
mock_pr.version.minor = 50
owner = utils.check_port_list_policy()
self.assertIsNone(owner)
@mock.patch.object(api, 'request', spec_set=["context", "version"])
@mock.patch.object(policy, 'authorize', spec=True)
def test_check_port_list_policy_forbidden(
self, mock_authorize, mock_pr
):
def mock_authorize_function(rule, target, creds):
raise exception.HTTPForbidden(resource='fake')
mock_authorize.side_effect = mock_authorize_function
mock_pr.context.to_policy_values.return_value = {
'project_id': '12345'
}
mock_pr.version.minor = 50
self.assertRaises(
exception.HTTPForbidden,
utils.check_port_list_policy,
)
@mock.patch.object(api, 'request', spec_set=["context", "version"])
@mock.patch.object(policy, 'authorize', spec=True)
def test_check_port_list_policy_forbidden_no_project(
self, mock_authorize, mock_pr
):
def mock_authorize_function(rule, target, creds):
if rule == 'baremetal:port:list_all':
raise exception.HTTPForbidden(resource='fake')
return True
mock_authorize.side_effect = mock_authorize_function
mock_pr.context.to_policy_values.return_value = {}
mock_pr.version.minor = 50
self.assertRaises(
exception.HTTPForbidden,
utils.check_port_list_policy,
)
@mock.patch.object(api, 'request', spec_set=["context", "version"])
@mock.patch.object(policy, 'authorize', spec=True)
def test_check_port_list_policy_non_admin(
self, mock_authorize, mock_pr
):
def mock_authorize_function(rule, target, creds):
if rule == 'baremetal:port:list_all':
raise exception.HTTPForbidden(resource='fake')
return True
mock_authorize.side_effect = mock_authorize_function
mock_pr.context.to_policy_values.return_value = {
'project_id': '12345'
}
mock_pr.version.minor = 50
owner = utils.check_port_list_policy()
self.assertEqual(owner, '12345')
class TestObjectToDict(base.TestCase):
def setUp(self):
super(TestObjectToDict, self).setUp()
self.node = obj_utils.get_test_node(
self.context,
created_at=datetime.datetime(2000, 1, 1, 0, 0),
updated_at=datetime.datetime(2001, 1, 1, 0, 0),
inspection_started_at=datetime.datetime(2002, 1, 1, 0, 0),
console_enabled=True)
p = mock.patch.object(api, 'request', autospec=False)
mock_req = p.start()
mock_req.public_url = 'http://192.0.2.1:5050'
self.addCleanup(p.stop)
def test_no_args(self):
self.assertEqual({
'created_at': '2000-01-01T00:00:00+00:00',
'updated_at': '2001-01-01T00:00:00+00:00',
'uuid': '1be26c0b-03f2-4d2e-ae87-c02d7f33c123'
}, utils.object_to_dict(self.node))
def test_no_base_attributes(self):
self.assertEqual({}, utils.object_to_dict(
self.node,
include_created_at=False,
include_updated_at=False,
include_uuid=False)
)
def test_fields(self):
self.assertEqual({
'conductor_group': '',
'console_enabled': True,
'created_at': '2000-01-01T00:00:00+00:00',
'driver': 'fake-hardware',
'inspection_finished_at': None,
'inspection_started_at': '2002-01-01T00:00:00+00:00',
'maintenance': False,
'updated_at': '2001-01-01T00:00:00+00:00',
'uuid': '1be26c0b-03f2-4d2e-ae87-c02d7f33c123'
}, utils.object_to_dict(
self.node,
fields=[
'conductor_group',
'console_enabled',
'driver',
'inspection_finished_at',
'inspection_started_at',
'maintenance',
])
)
def test_links(self):
self.assertEqual({
'created_at': '2000-01-01T00:00:00+00:00',
'links': [{
'href': 'http://192.0.2.1:5050/v1/node/'
'1be26c0b-03f2-4d2e-ae87-c02d7f33c123',
'rel': 'self'
}, {
'href': 'http://192.0.2.1:5050/node/'
'1be26c0b-03f2-4d2e-ae87-c02d7f33c123',
'rel': 'bookmark'
}],
'updated_at': '2001-01-01T00:00:00+00:00',
'uuid': '1be26c0b-03f2-4d2e-ae87-c02d7f33c123',
}, utils.object_to_dict(self.node, link_resource='node'))
self.assertEqual({
'created_at': '2000-01-01T00:00:00+00:00',
'links': [{
'href': 'http://192.0.2.1:5050/v1/node/foo',
'rel': 'self'
}, {
'href': 'http://192.0.2.1:5050/node/foo',
'rel': 'bookmark'
}],
'updated_at': '2001-01-01T00:00:00+00:00',
'uuid': '1be26c0b-03f2-4d2e-ae87-c02d7f33c123',
}, utils.object_to_dict(
self.node,
link_resource='node',
link_resource_args='foo'))
class TestLocalLinkValidation(base.TestCase):
def test_local_link_connection_type(self):
v = utils.LOCAL_LINK_VALIDATOR
value = {'switch_id': '0a:1b:2c:3d:4e:5f',
'port_id': 'value2',
'switch_info': 'value3'}
self.assertEqual(value, v('l', value))
def test_local_link_connection_type_datapath_id(self):
v = utils.LOCAL_LINK_VALIDATOR
value = {'switch_id': '0000000000000000',
'port_id': 'value2',
'switch_info': 'value3'}
self.assertEqual(value, v('l', value))
def test_local_link_connection_type_not_mac_or_datapath_id(self):
v = utils.LOCAL_LINK_VALIDATOR
value = {'switch_id': 'badid',
'port_id': 'value2',
'switch_info': 'value3'}
self.assertRaises(exception.InvalidSwitchID, v, 'l', value)
def test_local_link_connection_type_invalid_key(self):
v = utils.LOCAL_LINK_VALIDATOR
value = {'switch_id': '0a:1b:2c:3d:4e:5f',
'port_id': 'value2',
'switch_info': 'value3',
'invalid_key': 'value'}
self.assertRaisesRegex(
exception.Invalid,
'Additional properties are not allowed',
v, 'l', value)
def test_local_link_connection_type_missing_local_link_mandatory_key(self):
v = utils.LOCAL_LINK_VALIDATOR
value = {'switch_id': '0a:1b:2c:3d:4e:5f',
'switch_info': 'value3'}
self.assertRaisesRegex(exception.Invalid, 'is a required property',
v, 'l', value)
def test_local_link_connection_type_local_link_keys_mandatory(self):
v = utils.LOCAL_LINK_VALIDATOR
value = {'switch_id': '0a:1b:2c:3d:4e:5f',
'port_id': 'value2'}
self.assertEqual(value, v('l', value))
def test_local_link_connection_type_empty_value(self):
v = utils.LOCAL_LINK_VALIDATOR
value = {}
self.assertEqual(value, v('l', value))
def test_local_link_connection_type_smart_nic_keys_mandatory(self):
v = utils.LOCAL_LINK_VALIDATOR
vs = utils.LOCAL_LINK_SMART_NIC_VALIDATOR
value = {'port_id': 'rep0-0',
'hostname': 'hostname'}
self.assertEqual(value, vs('l', value))
self.assertEqual(value, v('l', value))
def test_local_link_connection_type_smart_nic_keys_with_optional(self):
v = utils.LOCAL_LINK_VALIDATOR
vs = utils.LOCAL_LINK_SMART_NIC_VALIDATOR
value = {'port_id': 'rep0-0',
'hostname': 'hostname',
'switch_id': '0a:1b:2c:3d:4e:5f',
'switch_info': 'sw_info'}
self.assertEqual(value, vs('l', value))
self.assertEqual(value, v('l', value))
def test_local_link_connection_type_smart_nic_keys_hostname_missing(self):
v = utils.LOCAL_LINK_VALIDATOR
vs = utils.LOCAL_LINK_SMART_NIC_VALIDATOR
value = {'port_id': 'rep0-0'}
self.assertRaises(exception.Invalid, vs, 'l', value)
self.assertRaises(exception.Invalid, v, 'l', value)
def test_local_link_connection_type_smart_nic_keys_port_id_missing(self):
v = utils.LOCAL_LINK_VALIDATOR
vs = utils.LOCAL_LINK_SMART_NIC_VALIDATOR
value = {'hostname': 'hostname'}
self.assertRaises(exception.Invalid, vs, 'l', value)
self.assertRaises(exception.Invalid, v, 'l', value)
def test_local_link_connection_net_type_unmanaged(self):
v = utils.LOCAL_LINK_VALIDATOR
value = {'network_type': 'unmanaged'}
self.assertEqual(value, v('l', value))
def test_local_link_connection_net_type_unmanaged_combine_ok(self):
v = utils.LOCAL_LINK_VALIDATOR
value = {'network_type': 'unmanaged',
'switch_id': '0a:1b:2c:3d:4e:5f',
'port_id': 'rep0-0'}
self.assertEqual(value, v('l', value))
def test_local_link_connection_net_type_invalid(self):
v = utils.LOCAL_LINK_VALIDATOR
value = {'network_type': 'invalid'}
self.assertRaises(exception.Invalid, v, 'l', value)
|
from rest_framework import viewsets, mixins, status
from rest_framework.authentication import TokenAuthentication
from rest_framework.decorators import action
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from core.models import Tag, Ingredient, Recipe
from recipe import serializers
class BaseRecipeAttrViewSet(viewsets.GenericViewSet,
mixins.ListModelMixin,
mixins.CreateModelMixin):
authentication_classes = (TokenAuthentication,)
permission_classes = (IsAuthenticated,)
def get_queryset(self):
"""Return objects for the current authenticated user only"""
assigned_only = bool(
int(self.request.query_params.get('assigned_only', 0))
)
queryset = self.queryset
if assigned_only:
queryset = queryset.filter(recipe__isnull=False, )
return queryset.filter(
user=self.request.user).order_by('-name').distinct()
def perform_create(self, serializer):
"""Create a new object attribute"""
serializer.save(user=self.request.user)
class TagViewSet(BaseRecipeAttrViewSet):
"""Manage tags in the database"""
# Make sure that the session is logged in
queryset = Tag.objects.all()
serializer_class = serializers.TagSerializer
class IngredientViewSet(BaseRecipeAttrViewSet):
"""Manage ingredients in the database"""
queryset = Ingredient.objects.all()
serializer_class = serializers.IngredientSerializer
class RecipeViewSet(viewsets.ModelViewSet):
"""Manage recipes in the database"""
serializer_class = serializers.RecipeSerializer
queryset = Recipe.objects.all()
authentication_classes = (TokenAuthentication,)
permission_classes = (IsAuthenticated,)
@staticmethod
def _params_to_ints(qs):
"""Convert a list of string IDs to a list of integers"""
return [int(str_id) for str_id in qs.split(',')]
def get_queryset(self):
"""Retrieve the recipes for the authenticated user"""
tags = self.request.query_params.get('tags')
ingredients = self.request.query_params.get('ingredients')
queryset = self.queryset
if tags:
tags_ids = self._params_to_ints(tags)
queryset = queryset.filter(tags__id__in=tags_ids)
if ingredients:
ingredients_ids = self._params_to_ints(ingredients)
queryset = queryset.filter(ingredients__id__in=ingredients_ids)
return queryset.filter(user=self.request.user)
def get_serializer_class(self):
"""Return appropriate serializer class"""
if self.action == "retrieve":
return serializers.RecipeDetailSerializer
elif self.action == 'upload_image':
return serializers.RecipeImageSerializer
return self.serializer_class
def perform_create(self, serializer):
"""Create a new recipe"""
serializer.save(user=self.request.user)
@action(methods=['POST'], detail=True, url_path="upload-image")
def upload_image(self, request, pk=None):
"""Upload an image to a recipe"""
recipe = self.get_object()
serializer = self.get_serializer(
recipe,
data=request.data
)
if serializer.is_valid():
serializer.save()
return Response(
serializer.data,
status=status.HTTP_200_OK
)
return Response(
serializer.errors,
status=status.HTTP_400_BAD_REQUEST
)
|
import sys, pygame
pygame.init()
size = width, height = 800, 600
speed = [2, 2]
black = 0, 0, 0
screen = pygame.display.set_mode(size)
ball = pygame.image.load("intro_ball.gif")
ballrect = ball.get_rect()
while 1:
for event in pygame.event.get():
if event.type == pygame.QUIT: sys.exit()
ballrect = ballrect.move(speed)
if ballrect.left < 0 or ballrect.right > width:
speed[0] = -speed[0]
if ballrect.top < 0 or ballrect.bottom > height:
speed[1] = -speed[1]
screen.fill(black)
screen.blit(ball, ballrect)
pygame.display.flip()
|
import pygame
import sys
from .background import slow_bg_obj
from models.icon_button import IconButton
from models.controls import audio_cfg, display_cfg
from utils.assets import Assets
from config import config
from constants import Image, Font, Colors, Text
def settings():
settings_title_font = pygame.font.Font(Font.edit_undo_font, 50)
settings_right_font = pygame.font.Font(Font.edit_undo_font, 50)
settings_left_font = pygame.font.Font(Font.edit_undo_font, 46)
go_back_btn = IconButton(Image.GO_BACK_IMAGE)
plus_btn = IconButton(Image.PLUS_IMAGE)
minus_btn = IconButton(Image.MINUS_IMAGE)
run = True
while run:
slow_bg_obj.update()
slow_bg_obj.render()
Assets.text.draw(Text.SETTINGS, settings_title_font, Colors.YELLOW,
(config.center_x, 130), True, False, True)
Assets.image.draw(Image.TOOLS_IMAGE,
(config.center_x - 150, 120), True)
Assets.image.draw(Image.TOOLBOX_IMAGE,
(config.center_x + 150, 129), True)
Assets.text.draw('VOLUME', settings_left_font, Colors.GREEN,
(config.center_x - 160, 240), True)
Assets.text.draw(f'{audio_cfg.volume}', settings_right_font, Colors.WHITE,
(config.center_x + 155, 240), True)
go_back_btn.draw((config.starting_x + 65, 50), True, True)
plus_btn.draw((config.center_x + 235, 260), True, True)
minus_btn.draw((config.center_x + 70, 260), True, True)
# audio_cfg.display_volume()
config.clock.tick(config.FPS)
pygame.display.flip()
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
sys.exit(0)
if event.type == pygame.VIDEORESIZE:
if not display_cfg.fullscreen:
config.update(event.w, event.h)
if event.type == pygame.KEYUP:
if event.key == pygame.K_f:
config.update(
config.monitor_size[0], config.monitor_size[1])
display_cfg.toggle_full_screen()
if event.key == pygame.K_BACKSPACE:
run = False
# Mouse click events
if event.type == pygame.MOUSEBUTTONDOWN:
if event.button == 1:
if go_back_btn.isOver():
run = False
if plus_btn.isOver():
audio_cfg.inc_volume(5)
if minus_btn.isOver():
audio_cfg.dec_volume(5)
# Mouse hover events
if event.type == pygame.MOUSEMOTION:
if go_back_btn.isOver():
go_back_btn.outline = True
else:
go_back_btn.outline = False
# keys = pygame.key.get_pressed()
# if keys[pygame.K_BACKSPACE]:
# run = False
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
import azure.cli.command_modules.taskhelp._help # pylint: disable=unused-import
def load_params(_):
pass
def load_commands():
import azure.cli.command_modules.taskhelp.commands # pylint: disable=redefined-outer-name
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import json
import pickle
import random
import string
from collections import defaultdict
from typing import Dict, List, Optional, Union
import numpy as np
import torch
from torch.nn import functional as F
from torch.utils.data import Dataset
from transformers import PreTrainedTokenizer, GPT2Tokenizer
from datetime import datetime
LOG_CONST_WIDTH = 60
class LogitsList:
"""A list of logits obtained from a finetuned PET model"""
def __init__(self, score: float, logits: List[List[float]]):
"""
Create a new LogitsList.
:param score: the corresponding PET model's score on the training set
:param logits: the list of logits, where ``logits[i][j]`` is the score for label ``j`` at example ``i``
"""
self.score = score
self.logits = logits
def __repr__(self):
return 'LogitsList(score={}, logits[:2]={})'.format(self.score, self.logits[:2])
def save(self, path: str) -> None:
"""Save this list to a file."""
with open(path, 'w') as fh:
fh.write(str(self.score) + '\n')
for example_logits in self.logits:
fh.write(' '.join(str(logit) for logit in example_logits) + '\n')
@staticmethod
def load(path: str, with_score: bool = True) -> 'LogitsList':
"""Load a list from a file"""
score = -1
logits = []
with open(path, 'r') as fh:
for line_idx, line in enumerate(fh.readlines()):
line = line.rstrip('\n')
if line_idx == 0 and with_score:
score = float(line)
else:
logits.append([float(x) for x in line.split()])
return LogitsList(score=score, logits=logits)
class InputExample(object):
"""A raw input example consisting of one or two segments of text and a label"""
def __init__(self, guid, text_a, text_b=None, label=None, logits=None, meta: Optional[Dict] = None, idx=-1):
"""
Create a new InputExample.
:param guid: a unique textual identifier
:param text_a: the sequence of text
:param text_b: an optional, second sequence of text
:param label: an optional label
:param logits: an optional list of per-class logits
:param meta: an optional dictionary to store arbitrary meta information
:param idx: an optional numeric index
"""
self.guid = guid
self.text_a = text_a
self.text_b = text_b
self.label = label
self.logits = logits
self.idx = idx
self.meta = meta if meta else {}
def __repr__(self):
return str(self.to_json_string())
def to_dict(self):
"""Serialize this instance to a Python dictionary."""
output = copy.deepcopy(self.__dict__)
return output
def to_json_string(self):
"""Serialize this instance to a JSON string."""
return json.dumps(self.to_dict(), indent=2, sort_keys=True) + "\n"
@staticmethod
def load_examples(path: str) -> List['InputExample']:
"""Load a set of input examples from a file"""
with open(path, 'rb') as fh:
return pickle.load(fh)
@staticmethod
def save_examples(examples: List['InputExample'], path: str) -> None:
"""Save a set of input examples to a file"""
with open(path, 'wb') as fh:
pickle.dump(examples, fh)
class InputFeatures(object):
"""A set of numeric features obtained from an :class:`InputExample`"""
def __init__(self, input_ids, attention_mask, token_type_ids, label, mlm_labels=None, logits=None,
meta: Optional[Dict] = None, idx=-1):
"""
Create new InputFeatures.
:param input_ids: the input ids corresponding to the original text or text sequence
:param attention_mask: an attention mask, with 0 = no attention, 1 = attention
:param token_type_ids: segment ids as used by BERT
:param label: the label
:param mlm_labels: an optional sequence of labels used for auxiliary language modeling
:param logits: an optional sequence of per-class logits
:param meta: an optional dictionary to store arbitrary meta information
:param idx: an optional numeric index
"""
self.input_ids = input_ids
self.attention_mask = attention_mask
self.token_type_ids = token_type_ids
self.label = label
self.mlm_labels = mlm_labels
self.logits = logits
self.idx = idx
self.meta = meta if meta else {}
def __repr__(self):
return str(self.to_json_string())
def pretty_print(self, tokenizer):
return f'input_ids = {tokenizer.convert_ids_to_tokens(self.input_ids)}\n' + \
f'attention_mask = {self.attention_mask}\n' + \
f'token_type_ids = {self.token_type_ids}\n' + \
f'mlm_labels = {self.mlm_labels}\n' + \
f'logits = {self.logits}\n' + \
f'label = {self.label}'
def to_dict(self):
"""Serialize this instance to a Python dictionary."""
output = copy.deepcopy(self.__dict__)
return output
def to_json_string(self):
"""Serialize this instance to a JSON string."""
return json.dumps(self.to_dict(), indent=2, sort_keys=True) + "\n"
class PLMInputFeatures(InputFeatures):
"""A set of numeric input features for a model pretrained with a permuted language modeling objective."""
def __init__(self, *_, perm_mask, target_mapping, **kwargs):
super().__init__(**kwargs)
self.perm_mask = perm_mask
self.target_mapping = target_mapping
def pretty_print(self, tokenizer):
return super().pretty_print(tokenizer) + '\n' + \
f'perm_mask = {self.perm_mask}\n' + \
f'target_mapping = {self.target_mapping}'
class DictDataset(Dataset):
"""A dataset of tensors that uses a dictionary for key-value mappings"""
def __init__(self, **tensors):
tensors.values()
assert all(next(iter(tensors.values())).size(0) == tensor.size(0) for tensor in tensors.values())
self.tensors = tensors
def __getitem__(self, index):
return {key: tensor[index] for key, tensor in self.tensors.items()}
def __len__(self):
return next(iter(self.tensors.values())).size(0)
class Timer:
def __init__(self, timer_name):
self.start_time = datetime.utcnow()
self.timer_name = timer_name
def elapsed_str(self):
self.total_time = datetime.utcnow() - self.start_time
elapsed_hours = self.total_time.seconds // 3600
elapsed_minuets = self.total_time.seconds % 3600 // 60
elapsed_seconds = self.total_time.seconds % 3600 % 60
elapsed_days = self.total_time.days
formatted_str = f'Total time for "{self.timer_name}": {elapsed_hours:02d}:{elapsed_minuets:02d}:' \
f'{elapsed_seconds:02d} ({elapsed_days}'.center(LOG_CONST_WIDTH, '.')
return formatted_str
def set_seed(seed: int):
""" Set RNG seeds for python's `random` module, numpy and torch"""
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
if torch.cuda.is_available():
torch.cuda.manual_seed_all(seed)
def eq_div(N, i):
""" Equally divide N examples among i buckets. For example, `eq_div(12,3) = [4,4,4]`. """
return [] if i <= 0 else [N // i + 1] * (N % i) + [N // i] * (i - N % i)
def chunks(lst, n):
"""Yield successive n-sized chunks from lst."""
for i in range(0, len(lst), n):
yield lst[i:i + n]
def remove_final_punc(s: str):
"""Remove the last character from a string if it is some form of punctuation"""
return s.rstrip(string.punctuation)
def lowercase_first(s: str):
"""Lowercase the first letter of a string"""
return s[0].lower() + s[1:]
def save_logits(path: str, logits: np.ndarray):
"""Save an array of logits to a file"""
with open(path, 'w') as fh:
for example_logits in logits:
fh.write(' '.join(str(logit) for logit in example_logits) + '\n')
pass
def save_predictions(path: str, wrapper, results: Dict):
"""Save a sequence of predictions to a file"""
predictions_with_idx = []
if wrapper.task_helper and wrapper.task_helper.output:
predictions_with_idx = wrapper.task_helper.output
else:
inv_label_map = {idx: label for label, idx in wrapper.preprocessor.label_map.items()}
for idx, prediction_idx in zip(results['indices'], results['predictions']):
prediction = inv_label_map[prediction_idx]
idx = idx.tolist() if isinstance(idx, np.ndarray) else int(idx)
predictions_with_idx.append({'idx': idx, 'label': prediction})
with open(path, 'w', encoding='utf8') as fh:
for line in predictions_with_idx:
fh.write(json.dumps(line) + '\n')
def softmax(x, temperature=1.0, axis=None):
"""Custom softmax implementation"""
y = np.atleast_2d(x)
if axis is None:
axis = next(j[0] for j in enumerate(y.shape) if j[1] > 1)
y = y * float(temperature)
y = y - np.expand_dims(np.max(y, axis=axis), axis)
y = np.exp(y)
ax_sum = np.expand_dims(np.sum(y, axis=axis), axis)
p = y / ax_sum
if len(x.shape) == 1:
p = p.flatten()
return p
def get_verbalization_ids(word: str, tokenizer: PreTrainedTokenizer, force_single_token: bool) -> Union[int, List[int]]:
"""
Get the token ids corresponding to a verbalization
:param word: the verbalization
:param tokenizer: the tokenizer to use
:param force_single_token: whether it should be enforced that the verbalization corresponds to a single token.
If set to true, this method returns a single int instead of a list and throws an error if the word
corresponds to multiple tokens.
:return: either the list of token ids or the single token id corresponding to this word
"""
kwargs = {'add_prefix_space': True} if isinstance(tokenizer, GPT2Tokenizer) else {}
ids = tokenizer.encode(word, add_special_tokens=False, verbose=False, **kwargs)
if not force_single_token:
return ids
assert len(ids) == 1, \
f'Verbalization "{word}" does not correspond to a single token, got {tokenizer.convert_ids_to_tokens(ids)}'
verbalization_id = ids[0]
assert verbalization_id not in tokenizer.all_special_ids, \
f'Verbalization {word} is mapped to a special token {tokenizer.convert_ids_to_tokens(verbalization_id)}'
return verbalization_id
def trim_input_ids(input_ids: torch.tensor, pad_token_id, mask_token_id, num_masks: int):
"""
Trim a sequence of input ids by removing all padding tokens and keeping at most a specific number of mask tokens.
:param input_ids: the sequence of input token ids
:param pad_token_id: the id of the pad token
:param mask_token_id: the id of the mask tokens
:param num_masks: the number of masks to keeps
:return: the trimmed sequence of input ids
"""
assert input_ids.shape[0] == 1
input_ids_without_pad = [x for x in input_ids[0] if x != pad_token_id]
trimmed_input_ids = []
mask_count = 0
for input_id in input_ids_without_pad:
if input_id == mask_token_id:
if mask_count >= num_masks:
continue
mask_count += 1
trimmed_input_ids.append(input_id)
return torch.tensor([trimmed_input_ids], dtype=torch.long, device=input_ids.device)
def exact_match(predictions: np.ndarray, actuals: np.ndarray, question_ids: np.ndarray):
"""Compute the exact match (EM) for a sequence of predictions and actual labels"""
unique_questions = set(question_ids)
q_actuals = list(zip(question_ids, actuals))
q_predictions = list(zip(question_ids, predictions))
actuals_per_question = defaultdict(list)
predictions_per_question = defaultdict(list)
for qid, val in q_actuals:
actuals_per_question[qid].append(val)
for qid, val in q_predictions:
predictions_per_question[qid].append(val)
em = 0
for qid in unique_questions:
if actuals_per_question[qid] == predictions_per_question[qid]:
em += 1
em /= len(unique_questions)
return em
def distillation_loss(predictions, targets, temperature):
"""Compute the distillation loss (KL divergence between predictions and targets) as described in the PET paper"""
p = F.log_softmax(predictions / temperature, dim=1)
q = F.softmax(targets / temperature, dim=1)
return F.kl_div(p, q, reduction='sum') * (temperature ** 2) / predictions.shape[0]
|
import matplotlib.pyplot as plt
import numpy as np
from keras.models import Sequential
from keras.layers import LSTM, Dense, Activation
from keras.optimizers import Adam
from sklearn.preprocessing import MinMaxScaler
# Set random seed for reproducibility
np.random.seed(1000)
# Download the dataset from: https://datamarket.com/data/set/22ti/zuerich-monthly-sunspot-numbers-1749-1983#!ds=22ti&display=line
dataset_filename = '<YOUR_PATH>\dataset.csv'
n_samples = 2820
data = np.zeros(shape=(n_samples, ), dtype=np.float32)
sequence_length = 15
if __name__ == '__main__':
# Load the dataset
with open(dataset_filename, 'r') as f:
lines = f.readlines()
for i, line in enumerate(lines):
if i == 0:
continue
if i == n_samples + 1:
break
_, value = line.split(',')
data[i - 1] = float(value)
# Scale the dataset between -1 and 1
mmscaler = MinMaxScaler((-1.0, 1.0))
data = mmscaler.fit_transform(data.reshape(-1, 1))
# Show the dataset
fig, ax = plt.subplots(figsize=(20, 10))
ax.plot(data)
ax.grid()
ax.set_xlabel('Time steps')
ax.set_ylabel('Monthly sunspots numbers')
plt.show()
# Create the train and test sets (rounding to 2800 samples)
X_ts = np.zeros(shape=(n_samples - sequence_length, sequence_length, 1), dtype=np.float32)
Y_ts = np.zeros(shape=(n_samples - sequence_length, 1), dtype=np.float32)
for i in range(0, data.shape[0] - sequence_length):
X_ts[i] = data[i:i + sequence_length]
Y_ts[i] = data[i + sequence_length]
X_ts_train = X_ts[0:2300, :]
Y_ts_train = Y_ts[0:2300]
X_ts_test = X_ts[2300:2800, :]
Y_ts_test = Y_ts[2300:2800]
# Create the model
model = Sequential()
model.add(LSTM(4, stateful=True, batch_input_shape=(20, sequence_length, 1)))
model.add(Dense(1))
model.add(Activation('tanh'))
# Compile the model
model.compile(optimizer=Adam(lr=0.001, decay=0.0001),
loss='mse',
metrics=['mse'])
# Train the model
model.fit(X_ts_train, Y_ts_train,
batch_size=20,
epochs=100,
shuffle=False,
validation_data=(X_ts_test, Y_ts_test))
# Show the result
fig, ax = plt.subplots(figsize=(20, 10))
ax.plot(Y_ts_test, label='True values')
ax.plot(model.predict(X_ts_test, batch_size=20), label='Predicted values')
ax.grid()
ax.set_xlabel('Time steps')
ax.set_ylabel('Monthly sunspots numbers')
ax.legend()
plt.show()
|
import logging
import warnings
import numpy as np
from pytplot import get_data, store_data, options
# use nanmean from bottleneck if it's installed, otherwise use the numpy one
# bottleneck nanmean is ~2.5x faster
try:
import bottleneck as bn
nanmean = bn.nanmean
except ImportError:
nanmean = np.nanmean
logging.captureWarnings(True)
logging.basicConfig(format='%(asctime)s: %(message)s', datefmt='%d-%b-%y %H:%M:%S', level=logging.INFO)
def mms_feeps_omni(eyes, probe='1', datatype='electron', data_units='intensity', data_rate='srvy', level='l2', suffix=''):
"""
This function will calculate the omni-directional FEEPS spectrograms, and is automatically called from mms_load_feeps
Parameters:
eyes: dict
Hash table containing the active sensor eyes
probe: str
probe #, e.g., '4' for MMS4
datatype: str
'electron' or 'ion'
data_units: str
'intensity'
data_rate: str
instrument data rate, e.g., 'srvy' or 'brst'
level: str
data level
suffix: str
suffix of the loaded data
Returns:
List of tplot variables created.
"""
out_vars = []
units_label = ''
if data_units == 'intensity':
units_label = '1/(cm^2-sr-s-keV)'
elif data_units == 'counts':
units_label = '[counts/s]'
prefix = 'mms'+probe+'_epd_feeps_'
if datatype == 'electron':
energies = np.array([33.2, 51.90, 70.6, 89.4, 107.1, 125.2, 146.5, 171.3,
200.2, 234.0, 273.4, 319.4, 373.2, 436.0, 509.2])
else:
energies = np.array([57.9, 76.8, 95.4, 114.1, 133.0, 153.7, 177.6,
205.1, 236.7, 273.2, 315.4, 363.8, 419.7, 484.2, 558.6])
# set unique energy bins per spacecraft; from DLT on 31 Jan 2017
eEcorr = [14.0, -1.0, -3.0, -3.0]
iEcorr = [0.0, 0.0, 0.0, 0.0]
eGfact = [1.0, 1.0, 1.0, 1.0]
iGfact = [0.84, 1.0, 1.0, 1.0]
if probe == '1' and datatype == 'electron':
energies = energies + eEcorr[0]
if probe == '2' and datatype == 'electron':
energies = energies + eEcorr[1]
if probe == '3' and datatype == 'electron':
energies = energies + eEcorr[2]
if probe == '4' and datatype == 'electron':
energies = energies + eEcorr[3]
if probe == '1' and datatype == 'ion':
energies = energies + iEcorr[0]
if probe == '2' and datatype == 'ion':
energies = energies + iEcorr[1]
if probe == '3' and datatype == 'ion':
energies = energies + iEcorr[2]
if probe == '4' and datatype == 'ion':
energies = energies + iEcorr[3]
# percent error around energy bin center to accept data for averaging;
# anything outside of energies[i] +/- en_chk*energies[i] will be changed
# to NAN and not averaged
en_chk = 0.10
top_sensors = eyes['top']
bot_sensors = eyes['bottom']
tmpdata = get_data(prefix+data_rate+'_'+level+'_'+datatype+'_top_'+data_units+'_sensorid_'+str(top_sensors[0])+'_clean_sun_removed'+suffix)
if tmpdata is not None:
if level != 'sitl':
dalleyes = np.empty((len(tmpdata[0]), len(tmpdata[2]), len(top_sensors)+len(bot_sensors)))
dalleyes[:] = np.nan
for idx, sensor in enumerate(top_sensors):
var_name = prefix+data_rate+'_'+level+'_'+datatype+'_top_'+data_units+'_sensorid_'+str(sensor)+'_clean_sun_removed'+suffix
data = get_data(var_name)
dalleyes[:, :, idx] = data[1]
try:
iE = np.where(np.abs(energies-data[2]) > en_chk*energies)
if iE[0].size != 0:
dalleyes[:, iE[0], idx] = np.nan
except Warning:
logging.warning('NaN in energy table encountered; sensor T' + str(sensor))
for idx, sensor in enumerate(bot_sensors):
var_name = prefix+data_rate+'_'+level+'_'+datatype+'_bottom_'+data_units+'_sensorid_'+str(sensor)+'_clean_sun_removed'+suffix
data = get_data(var_name)
dalleyes[:, :, idx+len(top_sensors)] = data[1]
try:
iE = np.where(np.abs(energies-data[2]) > en_chk*energies)
if iE[0].size != 0:
dalleyes[:, iE[0], idx+len(top_sensors)] = np.nan
except Warning:
logging.warning('NaN in energy table encountered; sensor B' + str(sensor))
else: # sitl data
dalleyes = np.empty((len(tmpdata[0]), len(tmpdata[2]), len(top_sensors)))
dalleyes[:] = np.nan
for idx, sensor in enumerate(top_sensors):
var_name = prefix+data_rate+'_'+level+'_'+datatype+'_top_'+data_units+'_sensorid_'+str(sensor)+'_clean_sun_removed'+suffix
data = get_data(var_name)
dalleyes[:, :, idx] = data[1]
iE = np.where(np.abs(energies-data[2]) > en_chk*energies)
if iE[0].size != 0:
dalleyes[:, iE[0], idx] = np.nan
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=RuntimeWarning)
flux_omni = nanmean(dalleyes, axis=2)
if probe == '1' and datatype == 'electron':
flux_omni = flux_omni*eGfact[0]
if probe == '2' and datatype == 'electron':
flux_omni = flux_omni*eGfact[1]
if probe == '3' and datatype == 'electron':
flux_omni = flux_omni*eGfact[2]
if probe == '4' and datatype == 'electron':
flux_omni = flux_omni*eGfact[3]
if probe == '1' and datatype == 'ion':
flux_omni = flux_omni*iGfact[0]
if probe == '2' and datatype == 'ion':
flux_omni = flux_omni*iGfact[1]
if probe == '3' and datatype == 'ion':
flux_omni = flux_omni*iGfact[2]
if probe == '4' and datatype == 'ion':
flux_omni = flux_omni*iGfact[3]
store_data('mms'+probe+'_epd_feeps_'+data_rate+'_'+level+'_'+datatype+'_'+data_units+'_omni'+suffix, data={'x': tmpdata[0], 'y': flux_omni, 'v': energies})
options('mms'+probe+'_epd_feeps_'+data_rate+'_'+level+'_'+datatype+'_'+data_units+'_omni'+suffix, 'spec', True)
options('mms'+probe+'_epd_feeps_'+data_rate+'_'+level+'_'+datatype+'_'+data_units+'_omni'+suffix, 'ylog', True)
options('mms'+probe+'_epd_feeps_'+data_rate+'_'+level+'_'+datatype+'_'+data_units+'_omni'+suffix, 'zlog', True)
options('mms'+probe+'_epd_feeps_'+data_rate+'_'+level+'_'+datatype+'_'+data_units+'_omni'+suffix, 'Colormap', 'spedas')
options('mms'+probe+'_epd_feeps_'+data_rate+'_'+level+'_'+datatype+'_'+data_units+'_omni'+suffix, 'ztitle', units_label)
options('mms'+probe+'_epd_feeps_'+data_rate+'_'+level+'_'+datatype+'_'+data_units+'_omni'+suffix, 'ytitle', 'MMS' + str(probe) + ' ' + datatype + ' (keV)')
out_vars.append('mms'+probe+'_epd_feeps_'+data_rate+'_'+level+'_'+datatype+'_'+data_units+'_omni'+suffix)
return out_vars
|
# -*- coding: utf-8 -*-
"""
Test the main module
SPDX-FileCopyrightText: 2016-2021 Uwe Krien <krien@uni-bremen.de>
SPDX-License-Identifier: MIT
"""
__copyright__ = "Uwe Krien <krien@uni-bremen.de>"
__license__ = "MIT"
import os
import shutil
from unittest.mock import MagicMock
import pandas as pd
from reegis import config
from scenario_builder import demand
from scenario_builder import feedin
from scenario_builder import powerplants
from deflex import __file__ as dfile
from deflex import config as cfg
from deflex import scenario as st
from deflex import scenario_creator
from deflex.geometries import deflex_power_lines
from deflex.geometries import deflex_regions
class TestScenarioCreationFull:
@classmethod
def setup_class(cls):
path = os.path.join(
os.path.dirname(__file__), "data", "deflex_2014_de21_test_csv"
)
sc = st.DeflexScenario()
sc.read_csv(path)
cls.tables = sc.input_data
tmp_tables = {}
parameter = {
"costs_source": "ewi",
"downtime_bioenergy": 0.1,
"limited_transformer": "bioenergy",
"local_fuels": "district heating",
"map": "de21",
"mobility_other": "petrol",
"round": 1,
"separate_heat_regions": "de22",
"copperplate": False,
"default_transmission_efficiency": 0.9,
"group_transformer": False,
"heat": True,
"use_CO2_costs": False,
"use_downtime_factor": True,
"use_variable_costs": False,
"year": 2014,
}
config.init(paths=[os.path.dirname(dfile)])
for option, value in parameter.items():
cfg.tmp_set("creator", option, str(value))
config.tmp_set("creator", option, str(value))
name = "heat_demand_deflex"
fn = os.path.join(os.path.dirname(__file__), "data", name + ".csv")
tmp_tables[name] = pd.read_csv(fn, index_col=[0], header=[0, 1])
name = "transformer_balance"
fn = os.path.join(os.path.dirname(__file__), "data", name + ".csv")
tmp_tables[name] = pd.read_csv(fn, index_col=[0, 1, 2], header=[0])
powerplants.scenario_powerplants = MagicMock(
return_value={
"volatile plants": cls.tables["volatile plants"],
"power plants": cls.tables["power plants"],
}
)
powerplants.scenario_chp = MagicMock(
return_value={
"chp-heat plants": cls.tables["chp-heat plants"],
"power plants": cls.tables["power plants"],
}
)
feedin.scenario_feedin = MagicMock(
return_value=cls.tables["volatile series"]
)
demand.scenario_demand = MagicMock(
return_value=cls.tables["demand series"]
)
name = "de21"
polygons = deflex_regions(rmap=parameter["map"], rtype="polygons")
lines = deflex_power_lines(parameter["map"]).index
cls.input_data = scenario_creator.create_scenario(
polygons, 2014, name, lines
)
@classmethod
def teardown_class(cls):
pass
def test_volatile_source(self):
pd.testing.assert_frame_equal(
self.tables["volatile plants"],
self.input_data["volatile plants"],
)
def test_storages(self):
a = self.tables["storages"].apply(pd.to_numeric).astype(float)
b = self.input_data["storages"].apply(pd.to_numeric)
for col in a.columns:
pd.testing.assert_series_equal(a[col], b[col])
# pd.testing.assert_frame_equal(a, b)
def test_demand_series(self):
print(list(self.input_data.keys()))
pd.testing.assert_frame_equal(
self.tables["demand series"],
self.input_data["demand series"],
)
def test_transmission(self):
pd.testing.assert_frame_equal(
self.tables["power lines"].apply(pd.to_numeric).astype(float),
self.input_data["power lines"].apply(pd.to_numeric),
rtol=1e-3,
)
def test_transformer(self):
pd.testing.assert_frame_equal(
self.tables["power plants"],
self.input_data["power plants"],
)
def test_general(self):
pd.testing.assert_series_equal(
self.tables["general"].astype(str).sort_index(),
self.input_data["general"]["value"].astype(str).sort_index(),
)
def test_commodity_source(self):
pd.testing.assert_frame_equal(
self.tables["commodity sources"],
self.input_data["commodity sources"],
)
def test_mobility_series(self):
pd.testing.assert_frame_equal(
self.tables["mobility series"],
self.input_data["mobility series"],
)
def test_mobility(self):
self.input_data["mobility"]["efficiency"] = pd.to_numeric(
self.input_data["mobility"]["efficiency"]
)
pd.testing.assert_frame_equal(
self.tables["mobility"],
self.input_data["mobility"],
)
def test_chp_hp(self):
pd.testing.assert_frame_equal(
self.tables["chp-heat plants"],
self.input_data["chp-heat plants"],
)
def test_decentralised_heat(self):
pd.testing.assert_frame_equal(
self.tables["decentralised heat"],
self.input_data["decentralised heat"],
)
def test_volatile_series(self):
pd.testing.assert_frame_equal(
self.tables["volatile series"],
self.input_data["volatile series"],
)
def test_length(self):
assert len(self.tables.keys()) == len(self.input_data.keys())
assert sorted(list(self.tables.keys())) == sorted(
list(self.input_data.keys())
)
class TestScenarioCreationPart:
@classmethod
def setup_class(cls):
path = os.path.join(
os.path.dirname(__file__), "data", "deflex_2014_de21_part_csv"
)
sc = st.DeflexScenario()
sc.read_csv(path)
cls.tables = sc.input_data
tmp_tables = {}
name = "heat_demand_deflex"
fn = os.path.join(os.path.dirname(__file__), "data", name + ".csv")
tmp_tables[name] = pd.read_csv(fn, index_col=[0], header=[0, 1])
name = "transformer_balance"
fn = os.path.join(os.path.dirname(__file__), "data", name + ".csv")
tmp_tables[name] = pd.read_csv(fn, index_col=[0, 1, 2], header=[0])
powerplants.scenario_powerplants = MagicMock(
return_value={
"volatile plants": cls.tables["volatile plants"],
"power plants": cls.tables["power plants"],
}
)
feedin.scenario_feedin = MagicMock(
return_value=cls.tables["volatile series"]
)
demand.scenario_demand = MagicMock(
return_value=cls.tables["demand series"]
)
name = "de21"
my_parameter = {
"copperplate": False,
"group_transformer": True,
"heat": False,
"use_variable_costs": True,
"use_CO2_costs": True,
"map": "de21",
}
my_name = "deflex"
for k, v in my_parameter.items():
my_name += "_" + str(k) + "-" + str(v)
polygons = deflex_regions(rmap=my_parameter["map"], rtype="polygons")
lines = deflex_power_lines(my_parameter["map"]).index
base = os.path.join(os.path.expanduser("~"), ".tmp_x345234dE_deflex")
os.makedirs(base, exist_ok=True)
path = os.path.join(base, "deflex_test{0}")
scenario_creator.create_basic_reegis_scenario(
name=name,
regions=polygons,
lines=lines,
parameter=my_parameter,
excel_path=path.format(".xlsx"),
csv_path=path.format("_csv"),
)
sc_new = st.DeflexScenario()
sc_new.read_csv(path.format("_csv"))
cls.input_data = sc_new.input_data
@classmethod
def teardown_class(cls):
base = os.path.join(os.path.expanduser("~"), ".tmp_x345234dE_deflex")
shutil.rmtree(base)
def test_volatile_source(self):
pd.testing.assert_frame_equal(
self.tables["volatile plants"],
self.input_data["volatile plants"],
)
def test_storages(self):
a = self.tables["storages"].apply(pd.to_numeric).astype(float)
b = self.input_data["storages"].apply(pd.to_numeric)
for col in a.columns:
pd.testing.assert_series_equal(a[col], b[col])
# pd.testing.assert_frame_equal(a, b)
def test_demand_series(self):
pd.testing.assert_frame_equal(
self.tables["demand series"],
self.input_data["demand series"],
)
def test_transmission(self):
pd.testing.assert_frame_equal(
self.tables["power lines"].apply(pd.to_numeric).astype(float),
self.input_data["power lines"].apply(pd.to_numeric),
)
def test_transformer(self):
pd.testing.assert_frame_equal(
self.tables["power plants"],
self.input_data["power plants"],
)
def test_general(self):
pd.testing.assert_series_equal(
self.tables["general"].astype(str).sort_index(),
self.input_data["general"].astype(str).sort_index(),
)
def test_commodity_source(self):
pd.testing.assert_frame_equal(
self.tables["commodity sources"],
self.input_data["commodity sources"],
)
def test_mobility_series(self):
pd.testing.assert_frame_equal(
self.tables["mobility series"],
self.input_data["mobility series"],
)
def test_mobility(self):
self.input_data["mobility"]["efficiency"] = pd.to_numeric(
self.input_data["mobility"]["efficiency"]
)
pd.testing.assert_frame_equal(
self.tables["mobility"],
self.input_data["mobility"],
)
def test_volatile_series(self):
pd.testing.assert_frame_equal(
self.tables["volatile series"],
self.input_data["volatile series"],
)
def test_length(self):
assert len(self.tables.keys()) == len(self.input_data.keys())
assert sorted(list(self.tables.keys())) == sorted(
list(self.input_data.keys())
)
|
from pl_bolts.models.detection.retinanet.backbones import create_retinanet_backbone
from pl_bolts.models.detection.retinanet.retinanet_module import RetinaNet
__all__ = ["create_retinanet_backbone", "RetinaNet"]
|
import warnings
import matplotlib
warnings.filterwarnings('ignore', category=matplotlib.MatplotlibDeprecationWarning)
warnings.filterwarnings('ignore', category=UserWarning)
import os
import acopy
import samepy
import tsplib95
import networkx as nx
import matplotlib.pyplot as plt
import copy
from acopy.plugins import StatsRecorder, DrawGraph, Printout, InitialEdgePheromone, MaxMinPheromoneRestrict
from acopy.utils.plot import Plotter
# K = int(input())
K = 8
# -------------------------------------------------
# 初期グラフの作成
print("init-graph")
problem_path = os.path.join('tsp_model', 'bays29.tsp')
problem = tsplib95.load_problem(problem_path)
graph = problem.get_graph()
labels = {i: str(i) for i in graph.nodes()}
colony = acopy.Colony()
solver = acopy.Solver(top=1)
recorder = StatsRecorder('init_data')
printer = Printout()
restricter = MaxMinPheromoneRestrict(save_path='init_data')
solver.add_plugins(recorder, printer, restricter)
limit = 1000
init_ans = solver.solve(graph, colony, limit=limit // 3)
print(init_ans)
print("\n\n\n")
def draw_graph(G, path, title, is_save=False, save_path=""):
plt.figure(dpi=400)
_, ax = plt.subplots()
pos = problem.display_data or problem.node_coords
nx.draw_networkx_nodes(G, pos=pos, ax=ax)
nx.draw_networkx_edges(G, pos=pos, edgelist=path, arrows=False)
nx.draw_networkx_labels(G, pos=pos, labels=labels, font_color='w')
ax.tick_params(left=True, bottom=True, labelleft=True, labelbottom=True)
ax.set_title(title)
if is_save:
plt.savefig(save_path)
plt.show()
# -------------------------------------------------
# make average graph
#
# print("make average graph")
# colony = samepy.Colony()
# solver = samepy.Solver()
# ave_graph = copy.deepcopy(graph)
#
# average_solutions = solver.solve(ave_graph, colony, limit=limit, gen_size=K, problem=problem)
# cnt = 0
# for sol in average_solutions:
# print(sol)
# path = sol.path
# title = f"average cost {sol.cost}"
# print(path)
# cnt += 1
# print("\n\n\n")
# -------------------------------------------------
# pheromone update fix graph
print("update pheromone graph")
colony = samepy.Colony()
solver = samepy.Solver()
update_graph = copy.deepcopy(graph)
average_solutions = solver.solve(update_graph, colony, limit=limit, gen_size=K, problem=problem, pheromone_update=True)
cnt = 0
for sol in average_solutions:
print(sol)
path = sol.path
title = f"average cost {sol.cost}"
print(path)
cnt += 1
print("\n\n\n")
# -------------------------------------------------
# Greedy解の構築
# print("greedy min k-path")
# greedy_graph = copy.deepcopy(graph)
# greedy_solutions = []
# for k in range(K):
# print("k-greedy-path: ", k)
# colony = acopy.Colony()
# solver = acopy.Solver()
# greedy_ans = solver.exploit(greedy_graph, colony, limit=100)
# print(greedy_ans, "\n")
# for p in greedy_ans:
# x, y = p[0], p[1]
# greedy_graph.edges[x, y]['pheromone'] = 0
# greedy_graph.edges[y, x]['pheromone'] = 0
# greedy_graph.edges[x, y]['weight'] = 1e100
# greedy_graph.edges[y, x]['weight'] = 1e100
# greedy_solutions.append(greedy_ans)
# print("\n\n\n")
#
# # -------------------------------------------------
# # 異なるパスの計算
#
# print("異なるエッジの計算")
#
# greedy_st = set()
# same_st = set()
# different_st = set()
# for sol in greedy_solutions:
# for p in sol:
# x, y = min(p[0], p[1]), max(p[0], p[1])
# greedy_st.add((x, y))
# for sol in average_solutions:
# for p in sol:
# x, y = min(p[0], p[1]), max(p[0], p[1])
# if (x, y) not in greedy_st:
# different_st.add((x, y))
# else:
# same_st.add((x, y))
#
# print("共通エッジ", len(same_st), same_st)
# print("異なるエッジ", len(different_st), different_st)
# print("\n\n\n")
#
# # -------------------------------------------------
# # make average graph
#
# print("make average graph with same or different")
#
#
# def draw_graph_color_st(G, same_path, diff_path, title, is_save=False, save_path=""):
# plt.figure(dpi=400)
# _, ax = plt.subplots()
# pos = problem.display_data or problem.node_coords
# nx.draw_networkx_nodes(G, pos=pos, ax=ax)
# nx.draw_networkx_edges(G, pos=pos, edgelist=same_path, arrows=False, edge_color='blue')
# nx.draw_networkx_edges(G, pos=pos, edgelist=diff_path, arrows=False, edge_color='red')
# ax.tick_params(left=True, bottom=True, labelleft=True, labelbottom=True)
# ax.set_title(title)
# if is_save:
# plt.savefig(save_path)
# plt.show()
#
#
# cnt = 0
# for sol in average_solutions:
# print(sol)
# path = sol.path
# same_path = []
# diff_path = []
# for x, y in path:
# x, y = min(x, y), max(x, y)
# if (x, y) in greedy_st:
# same_path.append((x, y))
# else:
# diff_path.append((x, y))
# title = f"average cost {sol.cost}"
# draw_graph_color_st(graph, same_path, diff_path, title, is_save=True, save_path=f"average_sample/color_{cnt}.png")
# cnt += 1
|
import logging
from multiprocessing.context import Process
from airflow_monitor.shared.error_handler import capture_monitor_exception
from airflow_monitor.shared.runners.base_runner import BaseRunner
logger = logging.getLogger(__name__)
class MultiProcessRunner(BaseRunner):
JOIN_TIMEOUT = 60
def __init__(self, target, **kwargs):
super(MultiProcessRunner, self).__init__(target, **kwargs)
self.process = None # type: Process
@capture_monitor_exception
def start(self):
self.process = Process(target=self.target, kwargs=self.kwargs)
self.process.start()
@capture_monitor_exception
def stop(self):
if self.process and self.is_alive():
self.process.terminate()
self.process.join(MultiProcessRunner.JOIN_TIMEOUT)
if self.process.is_alive():
self.process.kill()
@capture_monitor_exception
def heartbeat(self):
# do we want to do something here?
pass
@capture_monitor_exception
def is_alive(self):
return self.process.is_alive()
def __str__(self):
s = super(MultiProcessRunner, self).__str__()
return f"{s}({self.process})"
|
# -*- coding: utf-8 -*-
"""
utils.checks
~~~~~~~~~~~~
Custom, Sphinx-only flake8 plugins.
:copyright: Copyright 2007-2017 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import os
import re
import sphinx
name_mail_re = r'[\w ]+(<.*?>)?'
copyright_re = re.compile(r'^ :copyright: Copyright 200\d(-20\d\d)? '
r'by %s(, %s)*[,.]$' % (name_mail_re, name_mail_re))
copyright_2_re = re.compile(r'^ %s(, %s)*[,.]$' %
(name_mail_re, name_mail_re))
license_re = re.compile(r' :license: (.*?).\n')
def flake8ext(_func):
"""Decorate flake8_asserts functions"""
_func.name = _func.__name__
_func.version = sphinx.__version__
_func.code = _func.__name__.upper()
return _func
@flake8ext
def sphinx_has_header(physical_line, filename, lines, line_number):
"""Check for correct headers.
Make sure each Python file has a correct file header including
copyright and license information.
X101 invalid header found
"""
# we have a state machine of sorts so we need to start on line 1. Also,
# there's no point checking really short files
if line_number != 1 or len(lines) < 10:
return
# this file uses a funky license but unfortunately it's not possible to
# ignore specific errors on a file-level basis yet [1]. Simply skip it.
#
# [1] https://gitlab.com/pycqa/flake8/issues/347
if os.path.samefile(filename, './sphinx/util/smartypants.py'):
return
# if the top-level package or not inside the package, ignore
mod_name = os.path.splitext(filename)[0].strip('./\\').replace(
'/', '.').replace('.__init__', '')
if mod_name == 'sphinx' or not mod_name.startswith('sphinx.'):
return
# line number correction
offset = 1
if lines[0:1] == ['#!/usr/bin/env python\n']:
lines = lines[1:]
offset = 2
llist = []
doc_open = False
for lno, line in enumerate(lines):
llist.append(line)
if lno == 0:
if line != '# -*- coding: utf-8 -*-\n':
return 0, 'X101 missing coding declaration'
elif lno == 1:
if line != '"""\n' and line != 'r"""\n':
return 0, 'X101 missing docstring begin (""")'
else:
doc_open = True
elif doc_open:
if line == '"""\n':
# end of docstring
if lno <= 4:
return 0, 'X101 missing module name in docstring'
break
if line != '\n' and line[:4] != ' ' and doc_open:
return 0, 'X101 missing correct docstring indentation'
if lno == 2:
mod_name_len = len(line.strip())
if line.strip() != mod_name:
return 4, 'X101 wrong module name in docstring heading'
elif lno == 3:
if line.strip() != mod_name_len * '~':
return (4, 'X101 wrong module name underline, should be '
'~~~...~')
else:
return 0, 'X101 missing end and/or start of docstring...'
# check for copyright and license fields
license = llist[-2:-1]
if not license or not license_re.match(license[0]):
return 0, 'X101 no correct license info'
offset = -3
copyright = llist[offset:offset + 1]
while copyright and copyright_2_re.match(copyright[0]):
offset -= 1
copyright = llist[offset:offset + 1]
if not copyright or not copyright_re.match(copyright[0]):
return 0, 'X101 no correct copyright info'
|
from sys import exit
from . import cmdline
exit(cmdline.main())
|
#!/usr/bin/env python3
# Copyright (c) 2021 CINN Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
from op_test import OpTest, OpTestTool
import paddle
import paddle.nn.functional as F
import cinn
from cinn.frontend import *
from cinn.common import *
@OpTestTool.skip_if(not is_compiled_with_cuda(),
"x86 test will be skipped due to timeout.")
class TestElementwiseAddOp(OpTest):
def setUp(self):
self.init_case()
def init_case(self):
self.inputs = {
"x": np.random.random([32, 64]).astype("float32"),
"y": np.random.random([32, 64]).astype("float32"),
"dout": np.random.random((32, 64)).astype("float32")
}
self.axis = -1
def build_paddle_program(self, target):
x = paddle.to_tensor(self.inputs["x"], stop_gradient=False)
y = paddle.to_tensor(self.inputs["y"], stop_gradient=False)
def get_unsqueeze_axis(x_rank, y_rank, axis):
self.assertTrue(
x_rank >= y_rank,
"The rank of x should be greater or equal to that of y.")
axis = axis if axis >= 0 else x_rank - y_rank
unsqueeze_axis = np.arange(0, axis).tolist() + np.arange(
axis + y_rank, x_rank).tolist()
return unsqueeze_axis
unsqueeze_axis = get_unsqueeze_axis(
len(self.inputs["x"].shape), len(self.inputs["y"].shape),
self.axis)
y_t = paddle.unsqueeze(
y, axis=unsqueeze_axis) if len(unsqueeze_axis) > 0 else y
out = paddle.add(x, y_t)
self.paddle_outputs = [out]
self.paddle_grads = self.get_paddle_grads([out], [x, y],
[self.inputs["dout"]])
def build_cinn_program(self, target):
builder = NetBuilder("add")
x = builder.create_input(Float(32), self.inputs["x"].shape, "x")
y = builder.create_input(Float(32), self.inputs["y"].shape, "y")
out = builder.elementwise_add(x, y, axis=self.axis)
dout = builder.create_input(
Float(32), self.inputs["dout"].shape, "dout")
x_grad, y_grad = builder.elementwise_add_grad(
dout, x, y, axis=self.axis)
prog = builder.build()
res = self.get_cinn_output(
prog, target, [x, y, dout],
[self.inputs["x"], self.inputs["y"], self.inputs["dout"]],
[out, x_grad, y_grad])
self.cinn_outputs = [res[0]]
self.cinn_grads = [res[1], res[2]]
def test_check_results(self):
self.check_outputs_and_grads()
class TestAddCase1(TestElementwiseAddOp):
def init_case(self):
self.inputs = {
"x": np.random.random([8, 64, 256, 256]).astype("float32"),
"y": np.random.random([256, 256]).astype("float32"),
"dout": np.random.random((8, 64, 256, 256)).astype("float32")
}
self.axis = -1
class TestAddCase2(TestElementwiseAddOp):
def init_case(self):
self.inputs = {
"x": np.random.random([8, 1, 32, 32]).astype("float32"),
"y": np.random.random([64, 32]).astype("float32"),
"dout": np.random.random((8, 64, 32, 32)).astype("float32")
}
self.axis = 1
class TestAddCase3(TestElementwiseAddOp):
def init_case(self):
self.inputs = {
"x": np.random.random([4, 16, 8, 32]).astype("float32"),
"y": np.random.random([4, 16]).astype("float32"),
"dout": np.random.random((4, 16, 8, 32)).astype("float32")
}
self.axis = 0
class TestAddCase4(TestElementwiseAddOp):
def init_case(self):
self.inputs = {
"x": np.random.random([4, 16, 8, 32]).astype("float32"),
"y": np.random.random([1]).astype("float32"),
"dout": np.random.random((4, 16, 8, 32)).astype("float32")
}
self.axis = -1
if __name__ == "__main__":
unittest.main()
|
import numpy as np
from .ksvm import ksvm_train, kernel
from .checks import _check_size, _check_labels
def one_vs_one_ksvm_inference(X, Xtrain, alpha, b, kfun, kparam):
"""Multiclass kernel SVM prediction of the class labels.
Parameters
----------
X : ndarray, shape (m, n)
input features (one row per feature vector).
Xtrain : ndarray, shape (t, n)
features used during training (one row per feature vector).
alpha : ndarray, shape (t, k * (k - 1) // 2)
matrix of learned coefficients.
b : ndarray, shape (k * (k - 1) // 2,)
vector of biases.
kfun : string
name of the kernel function
kparam : float
parameter of the kernel
Returns
-------
ndarray, shape (m,)
predicted labels (one per feature vector) in the range 0...(k-1).
ndarray, shape (m, k)
classification scores.
"""
_check_size("mn, tn, ts, s", X, Xtrain, alpha, b)
# 1) recover the number of classes from s = 1 + 2 + ... + k
m = X.shape[0]
s = b.size
k = int(1 + np.sqrt(1 + 8 * s)) // 2
votes = np.zeros((m, k))
K = kernel(X, Xtrain, kfun, kparam)
logits = K @ alpha + b
bin_labels = (logits > 0)
# For each pair of classes...
j = 0
for pos in range(k):
for neg in range(pos + 1, k):
votes[:, pos] += bin_labels[:, j]
votes[:, neg] += (1 - bin_labels[:, j])
j += 1
labels = np.argmax(votes, 1)
return labels, votes
def one_vs_one_ksvm_train(X, Y, kfun, kparam, lambda_, lr=1e-3, steps=1000,
init_alpha=None, init_b=None):
"""Train a multi-class kernel SVM using the one vs. one strategy.
Parameters
----------
X : ndarray, shape (m, n)
training features.
Y : ndarray, shape (m,)
training labels.
kfun : string
name of the kernel function
kparam : float
parameter of the kernel
lambda_ : float
regularization coefficient.
lr : float
learning rate
steps : int
number of training steps
init_alpha : ndarray, shape (m, k * (k - 1) // 2)
initial coefficient (None for zero initialization)
init_b : ndarray, shape (k * (k - 1) // 2,)
initial biases (None for zero initialization)
Returns
-------
alpha : ndarray, shape (m, k * (k - 1) // 2)
matrix of learned coefficients.
b : ndarray(k * (k - 1) // 2,)
learned biases.
"""
_check_size("mn, m", X, Y)
Y = _check_labels(Y)
k = Y.max() + 1
m, n = X.shape
alpha = np.zeros((m, k * (k - 1) // 2))
b = np.zeros(k * (k - 1) // 2)
j = 0
# For each pair of classes...
for pos in range(k):
for neg in range(pos + 1, k):
# Build a training subset
subset = (np.logical_or(Y == pos, Y == neg)).nonzero()[0]
Xbin = X[subset, :]
Ybin = (Y[subset] == pos)
a1 = (None if init_alpha is None else init_alpha[subset, j])
b1 = (0 if init_b is None else init_b[j])
# Train the classifier
abin, bbin = ksvm_train(Xbin, Ybin, kfun, kparam, lambda_, lr=lr,
steps=steps, init_alpha=a1, init_b=b1)
alpha[subset, j] = abin
b[j] = bbin
j += 1
return alpha, b
def one_vs_rest_ksvm_inference(X, Xtrain, alpha, b, kfun, kparam):
"""Multiclass kernel SVM prediction of the class labels.
Parameters
----------
X : ndarray, shape (m, n)
input features (one row per feature vector).
Xtrain : ndarray, shape (t, n)
features used during training (one row per feature vector).
alpha : ndarray, shape (t, k)
matrix of learned coefficients.
b : ndarray, shape (k,)
vector of biases.
kfun : string
name of the kernel function
kparam : float
parameter of the kernel
Returns
-------
ndarray, shape (m,)
predicted labels (one per feature vector) in the range 0...(k-1).
ndarray, shape (m, k)
classification scores.
"""
_check_size("mn, tn, tk, k", X, Xtrain, alpha, b)
K = kernel(X, Xtrain, kfun, kparam)
logits = K @ alpha + b
labels = np.argmax(logits, 1)
return labels, logits
def one_vs_rest_ksvm_train(X, Y, kfun, kparam, lambda_, lr=1e-3, steps=1000,
init_alpha=None, init_b=None):
"""Train a multi-class kernel SVM using the one vs. rest strategy.
Parameters
----------
X : ndarray, shape (m, n)
training features.
Y : ndarray, shape (m,)
training labels.
kfun : string
name of the kernel function
kparam : float
parameter of the kernel
lambda_ : float
regularization coefficient.
lr : float
learning rate
steps : int
number of training steps
init_alpha : ndarray, shape (m, k * (k - 1) // 2)
initial coefficient (None for zero initialization)
init_b : ndarray, shape (k * (k - 1) // 2,)
initial biases (None for zero initialization)
Returns
-------
alpha : ndarray, shape (m, k * (k - 1) // 2)
matrix of learned coefficients.
b : ndarray(k * (k - 1) // 2,)
learned biases.
"""
_check_size("mn, m", X, Y)
Y = _check_labels(Y)
k = Y.max() + 1
m, n = X.shape
alpha = np.zeros((m, k))
b = np.zeros(k)
for c in range(k):
Ybin = (Y == c)
a1 = (None if init_alpha is None else init_alpha[:, c])
b1 = (0 if init_b is None else init_b[c])
abin, bbin = ksvm_train(X, Ybin, kfun, kparam, lambda_, lr=lr,
steps=steps, init_alpha=a1, init_b=b1)
alpha[:, c] = abin
b[c] = bbin
return alpha, b
|
#
# (c) 2018 Extreme Networks Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import re
from ansible.plugins.action.network import ActionModule as ActionNetworkModule
PRIVATE_KEYS_RE = re.compile('__.+__')
class ActionModule(ActionNetworkModule):
def run(self, tmp=None, task_vars=None):
del tmp # tmp no longer has any effect
self._config_module = True
return super(ActionModule, self).run(task_vars=task_vars)
|
from typing import Dict
from paragen.criteria import AbstractCriterion, create_criterion, register_criterion
@register_criterion
class MultiTaskCriterion(AbstractCriterion):
"""
Criterion is the base class for all the criterion within ParaGen.
"""
def __init__(self, criterions):
super().__init__()
self._criterion_configs = criterions
self._names = [name for name in self._criterion_configs]
self._criterions, self._weights = None, None
def _build(self, model, padding_idx=-1):
"""
Build multi-task criterion by dispatch args to each criterion
Args:
model: neural model
padding_idx: pad idx to ignore
"""
self._model = model
self._criterions, self._weights = {}, {}
for name in self._names:
criterion_config = self._criterion_configs[name]
self._weights[name] = criterion_config.pop('weight') if 'weight' in criterion_config else 1
self._criterions[name] = create_criterion(self._criterion_configs[name])
self._criterions[name].build(model, padding_idx)
def forward(self, net_input, net_output):
"""
Compute loss from a batch of samples
Args:
net_input: neural network input and is used for compute the logits
net_output (dict): oracle target for a network input
Returns:
- loss for network backward and optimization
- logging information
"""
lprobs_dict = self._model(**net_input)
assert isinstance(lprobs_dict, Dict), 'A multitask learning model must return a dict of log-probability'
return self.compute_loss(lprobs_dict, **net_output)
def compute_loss(self, lprobs_dict, **net_output):
# fetch target with default index 0
tot_loss, complete_logging_states = 0, {}
for name in self._names:
lprobs, net_out, criterion = lprobs_dict[name], net_output[name], self._criterions[name]
loss, logging_states = criterion.compute_loss(lprobs, **net_out)
tot_loss += self._weights[name] * loss
logging_states = {f'{name}.{key}': val for key, val in logging_states.items()}
complete_logging_states.update(logging_states)
complete_logging_states['loss'] = tot_loss.data.item()
return tot_loss, complete_logging_states
|
# -*- coding: utf-8 -*-
# Based On:
# https://gist.github.com/chrisbolin/2e90bc492270802d00a6#file-serve-py
# Wrapper around python's SimpleHTTPServer
# If url path does not match a file on disk, redirect
# to index.html so that React can handle the routing.
# Useful for development / testing purposes.
import SimpleHTTPServer
import SocketServer
import urlparse
import os
PORT = 1234
class Handler(SimpleHTTPServer.SimpleHTTPRequestHandler):
def do_GET(self):
# Parse query data to find out what was requested
parsed_params = urlparse.urlparse(self.path)
# See if the file requested exists
if os.access('.' + os.sep + parsed_params.path, os.R_OK):
# File exists, serve it up
SimpleHTTPServer.SimpleHTTPRequestHandler.do_GET(self)
else:
# send index.html
self.send_response(200)
self.send_header('Content-Type', 'text/html')
self.end_headers()
with open('index.html', 'r') as fin:
self.copyfile(fin, self.wfile)
httpd = SocketServer.TCPServer(("", PORT), Handler)
print "serving at port", PORT
httpd.serve_forever()
|
# coding: utf-8
"""
data.world API
# data.world in a nutshell data.world is a productive, secure platform for modern data teamwork. We bring together your data practitioners, subject matter experts, and other stakeholders by removing costly barriers to data discovery, comprehension, integration, and sharing. Everything your team needs to quickly understand and use data stays with it. Social features and integrations encourage collaborators to ask and answer questions, share discoveries, and coordinate closely while still using their preferred tools. Our focus on interoperability helps you enhance your own data with data from any source, including our vast and growing library of free public datasets. Sophisticated permissions, auditing features, and more make it easy to manage who views your data and what they do with it. # Conventions ## Authentication All data.world API calls require an API token. OAuth2 is the preferred and most secure method for authenticating users of your data.world applications. Visit our [oauth documentation](https://apidocs.data.world/toolkit/oauth) for additional information. Alternatively, you can obtain a token for _personal use or testing_ by navigating to your profile settings, under the Advanced tab ([https://data.world/settings/advanced](https://data.world/settings/advanced)). Authentication must be provided in API requests via the `Authorization` header. For example, for a user whose API token is `my_api_token`, the request header should be `Authorization: Bearer my_api_token` (note the `Bearer` prefix). ## Content type By default, `application/json` is the content type used in request and response bodies. Exceptions are noted in respective endpoint documentation. ## HTTPS only Our APIs can only be accessed via HTTPS. # Interested in building data.world apps? Check out our [developer portal](https://apidocs.data.world) for tips on how to get started, tutorials, and to interact with the API endpoints right within your browser.
OpenAPI spec version: 0.21.0
Contact: help@data.world
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class CreateResponse(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'message': 'str',
'uri': 'str'
}
attribute_map = {
'message': 'message',
'uri': 'uri'
}
def __init__(self, message=None, uri=None):
"""
CreateResponse - a model defined in Swagger
"""
self._message = None
self._uri = None
if message is not None:
self.message = message
self.uri = uri
@property
def message(self):
"""
Gets the message of this CreateResponse.
:return: The message of this CreateResponse.
:rtype: str
"""
return self._message
@message.setter
def message(self, message):
"""
Sets the message of this CreateResponse.
:param message: The message of this CreateResponse.
:type: str
"""
self._message = message
@property
def uri(self):
"""
Gets the uri of this CreateResponse.
URI of newly created resource.
:return: The uri of this CreateResponse.
:rtype: str
"""
return self._uri
@uri.setter
def uri(self, uri):
"""
Sets the uri of this CreateResponse.
URI of newly created resource.
:param uri: The uri of this CreateResponse.
:type: str
"""
if uri is None:
raise ValueError("Invalid value for `uri`, must not be `None`")
self._uri = uri
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, CreateResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
import flask
def clear_localstack(stack):
"""
Clear given werkzeug LocalStack instance.
:param ctx: local stack instance
:type ctx: werkzeug.local.LocalStack
"""
while stack.pop():
pass
def clear_flask_context():
"""
Clear flask current_app and request globals.
When using :meth:`flask.Flask.test_client`, even as context manager,
the flask's globals :attr:`flask.current_app` and :attr:`flask.request`
are left dirty, so testing code relying on them will probably fail.
This function clean said globals, and should be called after testing
with :meth:`flask.Flask.test_client`.
"""
clear_localstack(flask._app_ctx_stack)
clear_localstack(flask._request_ctx_stack)
|
#!/usr/bin/env python
import os, time, collections, copy, json, multiprocessing
from PhysicsTools.NanoAODTools.postprocessing.framework.postprocessor import *
from PhysicsTools.NanoAODTools.postprocessing.framework.crabhelper import inputFiles,runsAndLumis
from PhysicsTools.NanoAODTools.postprocessing.modules.common.puWeightProducer import *
from PhysicsTools.NanoAODTools.postprocessing.modules.btv.btagSFProducer import *
from PhysicsTools.NanoAODTools.postprocessing.modules.jme.jetmetHelperRun2 import *
from FourTopNAOD.Kai.modules.LeptonSkimmer import *
from FourTopNAOD.Kai.modules.JetMETSkimmer import *
isData = False
isUltraLegacy = False
era = "2017"
subera = None
thePreselection = None
crossSection = 61526.7
equivLumi = 41.53
nEventsPositive = 44607324
nEventsNegative = 19876
sumWeights = 44587448.0
TriggerChannel = None
JESUnc = "Merged" # options: "All", "Merged", "Total"
theFiles = inputFiles()
GoldenJSON = {"2016": {"non-UL": "Cert_271036-284044_13TeV_ReReco_07Aug2017_Collisions16_JSON.txt",
"UL": "Cert_271036-284044_13TeV_Legacy2016_Collisions16_JSON.txt"
},
"2017": {"non-UL": "Cert_294927-306462_13TeV_EOY2017ReReco_Collisions17_JSON_v1.txt",
"UL": "Cert_294927-306462_13TeV_UL2017_Collisions17_GoldenJSON.txt"
},
"2018": {"non-UL": "Cert_314472-325175_13TeV_17SeptEarlyReReco2018ABC_PromptEraD_Collisions18_JSON.txt",
"UL": "Cert_314472-325175_13TeV_Legacy2018_Collisions18_JSON.txt"
}
}
if isData:
theLumis = os.path.join(os.environ["CMSSW_BASE"], "python/FourTopNAOD/Kai/jsons", GoldenJSON.get(era).get("UL" if isUltraLegacy else "non-UL"))
print("Loading Golden Json: {}".format(theLumis))
if not os.path.isfile(theLumis):
theLumis = os.path.join(os.environ["CMSSW_BASE"], "src/FourTopNAOD/Kai/python/jsons", GoldenJSON.get(era).get("UL" if isUltraLegacy else "non-UL"))
if not os.path.isfile(theLumis):
raise RuntimeError("Valid GoldenJSON file not found, if running on CRAB try a new scram build before resubmitting")
else:
theLumis = None
moduleCache = []
if not isData:
if era == "2016":
moduleCache.append(puWeight_2016())
elif era == "2017":
moduleCache.append(puWeight_2017())
elif era == "2018":
moduleCache.append(puWeight_2018())
else:
raise RuntimeError("Unexpected era identifier {}".format(era))
if JESUnc in ["All", "Merged"]: #btag POG provides all JEC unc sources, except for RelativeSample
btagjes_sources = ['jes', 'jesAbsoluteMPFBias', 'jesAbsoluteScale', 'jesAbsoluteStat', 'jesFlavorQCD', 'jesFragmentation', 'jesPileUpDataMC', 'jesPileUpPtBB', 'jesPileUpPtEC1', 'jesPileUpPtEC2', 'jesPileUpPtHF', 'jesPileUpPtRef', 'jesRelativeBal', 'jesRelativeFSR', 'jesRelativeJEREC1', 'jesRelativeJEREC2', 'jesRelativeJERHF', 'jesRelativePtBB', 'jesRelativePtEC1', 'jesRelativePtEC2', 'jesRelativePtHF', 'jesRelativeStatEC', 'jesRelativeStatFSR', 'jesRelativeStatHF', 'jesSinglePionECAL', 'jesSinglePionHCAL', 'jesTimePtEta']
# if JESUnc == "Merged": #no btag shape unc for regrouped JEC available, so use the total one ("jes") and the remaining single ones that are not grouped (see also: https://docs.google.com/spreadsheets/d/1Feuj1n0MdotcPq19Mht7SUIgvkXkA4hiB0BxEuBShLw/edit#gid=1345121349)
# btagjes_sources = ['jes', 'jesFlavorQCD','jesPileUpPtEC2', 'jesRelativeBal']
else:
btagjes_sources = ['jes']
moduleCache.append(btagSFProducer(era,
algo="deepjet",
selectedWPs=['M', 'shape_corr'],
sfFileName=None, #Automatically deduced
verbose=0,
jesSystsForShape=btagjes_sources
)
)
moduleCache.append(btagSFProducer(era,
algo="deepcsv",
selectedWPs=['M', 'shape_corr'],
sfFileName=None, #Automatically deduced
verbose=0,
jesSystsForShape=btagjes_sources
)
)
#Need to make it into a function, so extra () pair...
jmeModule = createJMECorrector(isMC=(not isData),
dataYear=int(era),
runPeriod=subera if isData else None,
jesUncert=JESUnc,
jetType="AK4PFchs",
noGroom=False,
metBranchName="METFixEE2017" if era == "2017" else "MET",
applySmearing=True,
isFastSim=False,
applyHEMfix=True if era == "2018" and isUltraLegacy else False,
splitJER=False,
saveMETUncs=['T1', 'T1Smear']
)
moduleCache.append(jmeModule())
moduleCache.append(TriggerAndLeptonSkimmer('baseline',
era=era,
subera=subera,
isData=isData,
TriggerChannel=TriggerChannel,
fillHists=False,
mode="Flag",
)
)
moduleCache.append(JetMETSkimmer(jetMinPt=20.0,
jetMaxEta=2.4 if era == "2016" else 2.5,
jetMinID=0b010,
jetMinCount=4,
minPseudoHT=350,
fillHists=False
)
)
p=PostProcessor(".",
theFiles,
modules=moduleCache,
cut=thePreselection,
provenance=True,
fwkJobReport=True,
jsonInput=theLumis,
histFileName="hist.root",
histDirName="plots",
branchsel=None,
outputbranchsel=None,
compression="LZMA:9",
friend=False,
postfix=None,
noOut=False,
justcount=False,
haddFileName="tree.root",
maxEntries=None,
firstEntry=0,
prefetch=True,
longTermCache=False
)
p.run()
|
from qtpy import QtCore
from qtpy import QtGui
from qtpy import QtWidgets
from labelme import QT5
from labelme.shape import Shape
import labelme.utils
# TODO(unknown):
# - [maybe] Find optimal epsilon value.
CURSOR_DEFAULT = QtCore.Qt.ArrowCursor
CURSOR_POINT = QtCore.Qt.PointingHandCursor
CURSOR_DRAW = QtCore.Qt.CrossCursor
CURSOR_MOVE = QtCore.Qt.ClosedHandCursor
CURSOR_GRAB = QtCore.Qt.OpenHandCursor
class Canvas(QtWidgets.QWidget):
zoomRequest = QtCore.Signal(int, QtCore.QPoint)
scrollRequest = QtCore.Signal(int, int)
newShape = QtCore.Signal()
selectionChanged = QtCore.Signal(list)
shapeMoved = QtCore.Signal()
drawingPolygon = QtCore.Signal(bool)
edgeSelected = QtCore.Signal(bool, object)
vertexSelected = QtCore.Signal(bool)
CREATE, EDIT = 0, 1
# polygon, rectangle, line, or point
_createMode = "polygon"
_fill_drawing = False
def __init__(self, *args, **kwargs):
self.epsilon = kwargs.pop("epsilon", 10.0)
self.double_click = kwargs.pop("double_click", "close")
if self.double_click not in [None, "close"]:
raise ValueError(
"Unexpected value for double_click event: {}".format(
self.double_click
)
)
super(Canvas, self).__init__(*args, **kwargs)
# Initialise local state.
self.mode = self.EDIT
self.shapes = []
self.shapesBackups = []
self.current = None
self.selectedShapes = [] # save the selected shapes here
self.selectedShapesCopy = []
# self.line represents:
# - createMode == 'polygon': edge from last point to current
# - createMode == 'rectangle': diagonal line of the rectangle
# - createMode == 'line': the line
# - createMode == 'point': the point
self.line = Shape()
self.prevPoint = QtCore.QPoint()
self.prevMovePoint = QtCore.QPoint()
self.offsets = QtCore.QPoint(), QtCore.QPoint()
self.scale = 1.0
self.pixmap = QtGui.QPixmap()
self.visible = {}
self._hideBackround = False
self.hideBackround = False
self.hShape = None
self.prevhShape = None
self.hVertex = None
self.prevhVertex = None
self.hEdge = None
self.prevhEdge = None
self.movingShape = False
self._painter = QtGui.QPainter()
self._cursor = CURSOR_DEFAULT
# Menus:
# 0: right-click without selection and dragging of shapes
# 1: right-click with selection and dragging of shapes
self.menus = (QtWidgets.QMenu(), QtWidgets.QMenu())
# Set widget options.
self.setMouseTracking(True)
self.setFocusPolicy(QtCore.Qt.WheelFocus)
def fillDrawing(self):
return self._fill_drawing
def setFillDrawing(self, value):
self._fill_drawing = value
@property
def createMode(self):
return self._createMode
@createMode.setter
def createMode(self, value):
if value not in [
"polygon",
"rectangle",
"circle",
"line",
"point",
"linestrip",
]:
raise ValueError("Unsupported createMode: %s" % value)
self._createMode = value
def storeShapes(self):
shapesBackup = []
for shape in self.shapes:
shapesBackup.append(shape.copy())
if len(self.shapesBackups) >= 10:
self.shapesBackups = self.shapesBackups[-9:]
self.shapesBackups.append(shapesBackup)
@property
def isShapeRestorable(self):
if len(self.shapesBackups) < 2:
return False
return True
def restoreShape(self):
if not self.isShapeRestorable:
return
self.shapesBackups.pop() # latest
shapesBackup = self.shapesBackups.pop()
self.shapes = shapesBackup
self.selectedShapes = []
for shape in self.shapes:
shape.selected = False
self.repaint()
def enterEvent(self, ev):
self.overrideCursor(self._cursor)
def leaveEvent(self, ev):
self.unHighlight()
self.restoreCursor()
def focusOutEvent(self, ev):
self.restoreCursor()
def isVisible(self, shape):
return self.visible.get(shape, True)
def drawing(self):
return self.mode == self.CREATE
def editing(self):
return self.mode == self.EDIT
def setEditing(self, value=True):
self.mode = self.EDIT if value else self.CREATE
if not value: # Create
self.unHighlight()
self.deSelectShape()
def unHighlight(self):
if self.hShape:
self.hShape.highlightClear()
self.update()
self.prevhShape = self.hShape
self.prevhVertex = self.hVertex
self.prevhEdge = self.hEdge
self.hShape = self.hVertex = self.hEdge = None
def selectedVertex(self):
return self.hVertex is not None
def mouseMoveEvent(self, ev):
"""Update line with last point and current coordinates."""
try:
if QT5:
pos = self.transformPos(ev.localPos())
else:
pos = self.transformPos(ev.posF())
except AttributeError:
return
self.prevMovePoint = pos
self.restoreCursor()
# Polygon drawing.
if self.drawing():
self.line.shape_type = self.createMode
self.overrideCursor(CURSOR_DRAW)
if not self.current:
return
if self.outOfPixmap(pos):
# Don't allow the user to draw outside the pixmap.
# Project the point to the pixmap's edges.
pos = self.intersectionPoint(self.current[-1], pos)
elif (
len(self.current) > 1
and self.createMode == "polygon"
and self.closeEnough(pos, self.current[0])
):
# Attract line to starting point and
# colorise to alert the user.
pos = self.current[0]
self.overrideCursor(CURSOR_POINT)
self.current.highlightVertex(0, Shape.NEAR_VERTEX)
if self.createMode in ["polygon", "linestrip"]:
self.line[0] = self.current[-1]
self.line[1] = pos
elif self.createMode == "rectangle":
self.line.points = [self.current[0], pos]
self.line.close()
elif self.createMode == "circle":
self.line.points = [self.current[0], pos]
self.line.shape_type = "circle"
elif self.createMode == "line":
self.line.points = [self.current[0], pos]
self.line.close()
elif self.createMode == "point":
self.line.points = [self.current[0]]
self.line.close()
self.repaint()
self.current.highlightClear()
return
# Polygon copy moving.
if QtCore.Qt.RightButton & ev.buttons():
if self.selectedShapesCopy and self.prevPoint:
self.overrideCursor(CURSOR_MOVE)
self.boundedMoveShapes(self.selectedShapesCopy, pos)
self.repaint()
elif self.selectedShapes:
self.selectedShapesCopy = [
s.copy() for s in self.selectedShapes
]
self.repaint()
return
# Polygon/Vertex moving.
if QtCore.Qt.LeftButton & ev.buttons():
if self.selectedVertex():
self.boundedMoveVertex(pos)
self.repaint()
self.movingShape = True
elif self.selectedShapes and self.prevPoint:
self.overrideCursor(CURSOR_MOVE)
self.boundedMoveShapes(self.selectedShapes, pos)
self.repaint()
self.movingShape = True
return
# Just hovering over the canvas, 2 possibilities:
# - Highlight shapes
# - Highlight vertex
# Update shape/vertex fill and tooltip value accordingly.
self.setToolTip(self.tr("Image"))
for shape in reversed([s for s in self.shapes if self.isVisible(s)]):
# Look for a nearby vertex to highlight. If that fails,
# check if we happen to be inside a shape.
index = shape.nearestVertex(pos, self.epsilon / self.scale)
index_edge = shape.nearestEdge(pos, self.epsilon / self.scale)
if index is not None:
if self.selectedVertex():
self.hShape.highlightClear()
self.prevhVertex = self.hVertex = index
self.prevhShape = self.hShape = shape
self.prevhEdge = self.hEdge = index_edge
shape.highlightVertex(index, shape.MOVE_VERTEX)
self.overrideCursor(CURSOR_POINT)
self.setToolTip(self.tr("Click & drag to move point"))
self.setStatusTip(self.toolTip())
self.update()
break
elif shape.containsPoint(pos):
if self.selectedVertex():
self.hShape.highlightClear()
self.prevhVertex = self.hVertex
self.hVertex = None
self.prevhShape = self.hShape = shape
self.prevhEdge = self.hEdge = index_edge
self.setToolTip(
self.tr("Click & drag to move shape '%s'") % shape.label
)
self.setStatusTip(self.toolTip())
self.overrideCursor(CURSOR_GRAB)
self.update()
break
else: # Nothing found, clear highlights, reset state.
self.unHighlight()
self.edgeSelected.emit(self.hEdge is not None, self.hShape)
self.vertexSelected.emit(self.hVertex is not None)
def addPointToEdge(self):
shape = self.prevhShape
index = self.prevhEdge
point = self.prevMovePoint
if shape is None or index is None or point is None:
return
shape.insertPoint(index, point)
shape.highlightVertex(index, shape.MOVE_VERTEX)
self.hShape = shape
self.hVertex = index
self.hEdge = None
self.movingShape = True
def removeSelectedPoint(self):
shape = self.prevhShape
point = self.prevMovePoint
if shape is None or point is None:
return
index = shape.nearestVertex(point, self.epsilon)
shape.removePoint(index)
# shape.highlightVertex(index, shape.MOVE_VERTEX)
self.hShape = shape
self.hVertex = None
self.hEdge = None
self.movingShape = True # Save changes
def mousePressEvent(self, ev):
if QT5:
pos = self.transformPos(ev.localPos())
else:
pos = self.transformPos(ev.posF())
if ev.button() == QtCore.Qt.LeftButton:
if self.drawing():
if self.current:
# Add point to existing shape.
if self.createMode == "polygon":
self.current.addPoint(self.line[1])
self.line[0] = self.current[-1]
if self.current.isClosed():
self.finalise()
elif self.createMode in ["rectangle", "circle", "line"]:
assert len(self.current.points) == 1
self.current.points = self.line.points
self.finalise()
elif self.createMode == "linestrip":
self.current.addPoint(self.line[1])
self.line[0] = self.current[-1]
if int(ev.modifiers()) == QtCore.Qt.ControlModifier:
self.finalise()
elif not self.outOfPixmap(pos):
# Create new shape.
self.current = Shape(shape_type=self.createMode)
self.current.addPoint(pos)
if self.createMode == "point":
self.finalise()
else:
if self.createMode == "circle":
self.current.shape_type = "circle"
self.line.points = [pos, pos]
self.setHiding()
self.drawingPolygon.emit(True)
self.update()
else:
group_mode = int(ev.modifiers()) == QtCore.Qt.ControlModifier
self.selectShapePoint(pos, multiple_selection_mode=group_mode)
self.prevPoint = pos
self.repaint()
elif ev.button() == QtCore.Qt.RightButton and self.editing():
group_mode = int(ev.modifiers()) == QtCore.Qt.ControlModifier
self.selectShapePoint(pos, multiple_selection_mode=group_mode)
self.prevPoint = pos
self.repaint()
def mouseReleaseEvent(self, ev):
if ev.button() == QtCore.Qt.RightButton:
menu = self.menus[len(self.selectedShapesCopy) > 0]
self.restoreCursor()
if (
not menu.exec_(self.mapToGlobal(ev.pos()))
and self.selectedShapesCopy
):
# Cancel the move by deleting the shadow copy.
self.selectedShapesCopy = []
self.repaint()
elif ev.button() == QtCore.Qt.LeftButton and self.selectedShapes:
self.overrideCursor(CURSOR_GRAB)
if (
self.editing()
and int(ev.modifiers()) == QtCore.Qt.ShiftModifier
):
# Add point to line if: left-click + SHIFT on a line segment
self.addPointToEdge()
elif ev.button() == QtCore.Qt.LeftButton and self.selectedVertex():
if (
self.editing()
and int(ev.modifiers()) == QtCore.Qt.ShiftModifier
):
# Delete point if: left-click + SHIFT on a point
self.removeSelectedPoint()
if self.movingShape and self.hShape:
index = self.shapes.index(self.hShape)
if (
self.shapesBackups[-1][index].points
!= self.shapes[index].points
):
self.storeShapes()
self.shapeMoved.emit()
self.movingShape = False
def endMove(self, copy):
assert self.selectedShapes and self.selectedShapesCopy
assert len(self.selectedShapesCopy) == len(self.selectedShapes)
if copy:
for i, shape in enumerate(self.selectedShapesCopy):
self.shapes.append(shape)
self.selectedShapes[i].selected = False
self.selectedShapes[i] = shape
else:
for i, shape in enumerate(self.selectedShapesCopy):
self.selectedShapes[i].points = shape.points
self.selectedShapesCopy = []
self.repaint()
self.storeShapes()
return True
def hideBackroundShapes(self, value):
self.hideBackround = value
if self.selectedShapes:
# Only hide other shapes if there is a current selection.
# Otherwise the user will not be able to select a shape.
self.setHiding(True)
self.repaint()
def setHiding(self, enable=True):
self._hideBackround = self.hideBackround if enable else False
def canCloseShape(self):
return self.drawing() and self.current and len(self.current) > 2
def mouseDoubleClickEvent(self, ev):
# We need at least 4 points here, since the mousePress handler
# adds an extra one before this handler is called.
if (
self.double_click == "close"
and self.canCloseShape()
and len(self.current) > 3
):
self.current.popPoint()
self.finalise()
def selectShapes(self, shapes):
self.setHiding()
self.selectionChanged.emit(shapes)
self.update()
def selectShapePoint(self, point, multiple_selection_mode):
"""Select the first shape created which contains this point."""
if self.selectedVertex(): # A vertex is marked for selection.
index, shape = self.hVertex, self.hShape
shape.highlightVertex(index, shape.MOVE_VERTEX)
else:
for shape in reversed(self.shapes):
if self.isVisible(shape) and shape.containsPoint(point):
self.calculateOffsets(shape, point)
self.setHiding()
if multiple_selection_mode:
if shape not in self.selectedShapes:
self.selectionChanged.emit(
self.selectedShapes + [shape]
)
else:
self.selectionChanged.emit([shape])
return
self.deSelectShape()
def calculateOffsets(self, shape, point):
rect = shape.boundingRect()
x1 = rect.x() - point.x()
y1 = rect.y() - point.y()
x2 = (rect.x() + rect.width() - 1) - point.x()
y2 = (rect.y() + rect.height() - 1) - point.y()
self.offsets = QtCore.QPoint(x1, y1), QtCore.QPoint(x2, y2)
def boundedMoveVertex(self, pos):
index, shape = self.hVertex, self.hShape
point = shape[index]
if self.outOfPixmap(pos):
pos = self.intersectionPoint(point, pos)
shape.moveVertexBy(index, pos - point)
def boundedMoveShapes(self, shapes, pos):
if self.outOfPixmap(pos):
return False # No need to move
o1 = pos + self.offsets[0]
if self.outOfPixmap(o1):
pos -= QtCore.QPoint(min(0, o1.x()), min(0, o1.y()))
o2 = pos + self.offsets[1]
if self.outOfPixmap(o2):
pos += QtCore.QPoint(
min(0, self.pixmap.width() - o2.x()),
min(0, self.pixmap.height() - o2.y()),
)
# XXX: The next line tracks the new position of the cursor
# relative to the shape, but also results in making it
# a bit "shaky" when nearing the border and allows it to
# go outside of the shape's area for some reason.
# self.calculateOffsets(self.selectedShapes, pos)
dp = pos - self.prevPoint
if dp:
for shape in shapes:
shape.moveBy(dp)
self.prevPoint = pos
return True
return False
def deSelectShape(self):
if self.selectedShapes:
self.setHiding(False)
self.selectionChanged.emit([])
self.update()
def deleteSelected(self):
deleted_shapes = []
if self.selectedShapes:
for shape in self.selectedShapes:
self.shapes.remove(shape)
deleted_shapes.append(shape)
self.storeShapes()
self.selectedShapes = []
self.update()
return deleted_shapes
def copySelectedShapes(self):
if self.selectedShapes:
self.selectedShapesCopy = [s.copy() for s in self.selectedShapes]
self.boundedShiftShapes(self.selectedShapesCopy)
self.endMove(copy=True)
return self.selectedShapes
def boundedShiftShapes(self, shapes):
# Try to move in one direction, and if it fails in another.
# Give up if both fail.
point = shapes[0][0]
offset = QtCore.QPoint(2.0, 2.0)
self.offsets = QtCore.QPoint(), QtCore.QPoint()
self.prevPoint = point
if not self.boundedMoveShapes(shapes, point - offset):
self.boundedMoveShapes(shapes, point + offset)
def paintEvent(self, event):
if not self.pixmap:
return super(Canvas, self).paintEvent(event)
p = self._painter
p.begin(self)
p.setRenderHint(QtGui.QPainter.Antialiasing)
p.setRenderHint(QtGui.QPainter.HighQualityAntialiasing)
p.setRenderHint(QtGui.QPainter.SmoothPixmapTransform)
p.scale(self.scale, self.scale)
p.translate(self.offsetToCenter())
p.drawPixmap(0, 0, self.pixmap)
Shape.scale = self.scale
for shape in self.shapes:
if (shape.selected or not self._hideBackround) and self.isVisible(
shape
):
shape.fill = shape.selected or shape == self.hShape
shape.paint(p)
if self.current:
self.current.paint(p)
self.line.paint(p)
if self.selectedShapesCopy:
for s in self.selectedShapesCopy:
s.paint(p)
if (
self.fillDrawing()
and self.createMode == "polygon"
and self.current is not None
and len(self.current.points) >= 2
):
drawing_shape = self.current.copy()
drawing_shape.addPoint(self.line[1])
drawing_shape.fill = True
drawing_shape.paint(p)
p.end()
def transformPos(self, point):
"""Convert from widget-logical coordinates to painter-logical ones."""
return point / self.scale - self.offsetToCenter()
def offsetToCenter(self):
s = self.scale
area = super(Canvas, self).size()
w, h = self.pixmap.width() * s, self.pixmap.height() * s
aw, ah = area.width(), area.height()
x = (aw - w) / (2 * s) if aw > w else 0
y = (ah - h) / (2 * s) if ah > h else 0
return QtCore.QPoint(x, y)
def outOfPixmap(self, p):
w, h = self.pixmap.width(), self.pixmap.height()
return not (0 <= p.x() <= w - 1 and 0 <= p.y() <= h - 1)
def finalise(self):
assert self.current
self.current.close()
self.shapes.append(self.current)
self.storeShapes()
self.current = None
self.setHiding(False)
self.newShape.emit()
self.update()
def closeEnough(self, p1, p2):
# d = distance(p1 - p2)
# m = (p1-p2).manhattanLength()
# print "d %.2f, m %d, %.2f" % (d, m, d - m)
# divide by scale to allow more precision when zoomed in
return labelme.utils.distance(p1 - p2) < (self.epsilon / self.scale)
def intersectionPoint(self, p1, p2):
# Cycle through each image edge in clockwise fashion,
# and find the one intersecting the current line segment.
# http://paulbourke.net/geometry/lineline2d/
size = self.pixmap.size()
points = [
(0, 0),
(size.width() - 1, 0),
(size.width() - 1, size.height() - 1),
(0, size.height() - 1),
]
# x1, y1 should be in the pixmap, x2, y2 should be out of the pixmap
x1 = min(max(p1.x(), 0), size.width() - 1)
y1 = min(max(p1.y(), 0), size.height() - 1)
x2, y2 = p2.x(), p2.y()
d, i, (x, y) = min(self.intersectingEdges((x1, y1), (x2, y2), points))
x3, y3 = points[i]
x4, y4 = points[(i + 1) % 4]
if (x, y) == (x1, y1):
# Handle cases where previous point is on one of the edges.
if x3 == x4:
return QtCore.QPoint(x3, min(max(0, y2), max(y3, y4)))
else: # y3 == y4
return QtCore.QPoint(min(max(0, x2), max(x3, x4)), y3)
return QtCore.QPoint(x, y)
def intersectingEdges(self, point1, point2, points):
"""Find intersecting edges.
For each edge formed by `points', yield the intersection
with the line segment `(x1,y1) - (x2,y2)`, if it exists.
Also return the distance of `(x2,y2)' to the middle of the
edge along with its index, so that the one closest can be chosen.
"""
(x1, y1) = point1
(x2, y2) = point2
for i in range(4):
x3, y3 = points[i]
x4, y4 = points[(i + 1) % 4]
denom = (y4 - y3) * (x2 - x1) - (x4 - x3) * (y2 - y1)
nua = (x4 - x3) * (y1 - y3) - (y4 - y3) * (x1 - x3)
nub = (x2 - x1) * (y1 - y3) - (y2 - y1) * (x1 - x3)
if denom == 0:
# This covers two cases:
# nua == nub == 0: Coincident
# otherwise: Parallel
continue
ua, ub = nua / denom, nub / denom
if 0 <= ua <= 1 and 0 <= ub <= 1:
x = x1 + ua * (x2 - x1)
y = y1 + ua * (y2 - y1)
m = QtCore.QPoint((x3 + x4) / 2, (y3 + y4) / 2)
d = labelme.utils.distance(m - QtCore.QPoint(x2, y2))
yield d, i, (x, y)
# These two, along with a call to adjustSize are required for the
# scroll area.
def sizeHint(self):
return self.minimumSizeHint()
def minimumSizeHint(self):
if self.pixmap:
return self.scale * self.pixmap.size()
return super(Canvas, self).minimumSizeHint()
def wheelEvent(self, ev):
if QT5:
mods = ev.modifiers()
delta = ev.angleDelta()
if QtCore.Qt.ControlModifier == int(mods):
# with Ctrl/Command key
# zoom
self.zoomRequest.emit(delta.y(), ev.pos())
else:
# scroll
self.scrollRequest.emit(delta.x(), QtCore.Qt.Horizontal)
self.scrollRequest.emit(delta.y(), QtCore.Qt.Vertical)
else:
if ev.orientation() == QtCore.Qt.Vertical:
mods = ev.modifiers()
if QtCore.Qt.ControlModifier == int(mods):
# with Ctrl/Command key
self.zoomRequest.emit(ev.delta(), ev.pos())
else:
self.scrollRequest.emit(
ev.delta(),
QtCore.Qt.Horizontal
if (QtCore.Qt.ShiftModifier == int(mods))
else QtCore.Qt.Vertical,
)
else:
self.scrollRequest.emit(ev.delta(), QtCore.Qt.Horizontal)
ev.accept()
def keyPressEvent(self, ev):
key = ev.key()
if key == QtCore.Qt.Key_Escape and self.current:
self.current = None
self.drawingPolygon.emit(False)
self.update()
elif key == QtCore.Qt.Key_Return and self.canCloseShape():
self.finalise()
def setLastLabel(self, text, flags):
assert text
self.shapes[-1].label = text
self.shapes[-1].flags = flags
self.shapesBackups.pop()
self.storeShapes()
return self.shapes[-1]
def undoLastLine(self):
assert self.shapes
self.current = self.shapes.pop()
self.current.setOpen()
if self.createMode in ["polygon", "linestrip"]:
self.line.points = [self.current[-1], self.current[0]]
elif self.createMode in ["rectangle", "line", "circle"]:
self.current.points = self.current.points[0:1]
elif self.createMode == "point":
self.current = None
self.drawingPolygon.emit(True)
def undoLastPoint(self):
if not self.current or self.current.isClosed():
return
self.current.popPoint()
if len(self.current) > 0:
self.line[0] = self.current[-1]
else:
self.current = None
self.drawingPolygon.emit(False)
self.repaint()
def loadPixmap(self, pixmap, clear_shapes=True):
self.pixmap = pixmap
if clear_shapes:
self.shapes = []
self.repaint()
def loadShapes(self, shapes, replace=True):
if replace:
self.shapes = list(shapes)
else:
self.shapes.extend(shapes)
self.storeShapes()
self.current = None
self.hShape = None
self.hVertex = None
self.hEdge = None
self.repaint()
def setShapeVisible(self, shape, value):
self.visible[shape] = value
self.repaint()
def overrideCursor(self, cursor):
self.restoreCursor()
self._cursor = cursor
QtWidgets.QApplication.setOverrideCursor(cursor)
def restoreCursor(self):
QtWidgets.QApplication.restoreOverrideCursor()
def resetState(self):
self.restoreCursor()
self.pixmap = None
self.shapesBackups = []
self.update()
|
import pdb
import mnist_loader
from network import load
from plotter import plot_mnist_digit
training_data, validation_data, test_data = mnist_loader.load_data_wrapper()
#for data in training_data:
# plot_mnist_digit(data[0])
# #pdb.set_trace()
training_data_2 = [data for data in training_data if data[1][4] == 1]
pdb.set_trace()
print 'data loaded'
net = load('network.json')
net.generated_data = training_data[0][0]
net.SGD(training_data_2, 30, 10, 0.5, evaluation_data=test_data, monitor_evaluation_accuracy=True)
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = ['SecretBackendRoleArgs', 'SecretBackendRole']
@pulumi.input_type
class SecretBackendRoleArgs:
def __init__(__self__, *,
policies: pulumi.Input[Sequence[pulumi.Input[str]]],
backend: Optional[pulumi.Input[str]] = None,
local: Optional[pulumi.Input[bool]] = None,
max_ttl: Optional[pulumi.Input[int]] = None,
name: Optional[pulumi.Input[str]] = None,
token_type: Optional[pulumi.Input[str]] = None,
ttl: Optional[pulumi.Input[int]] = None):
"""
The set of arguments for constructing a SecretBackendRole resource.
:param pulumi.Input[Sequence[pulumi.Input[str]]] policies: The list of Consul ACL policies to associate with these roles.
:param pulumi.Input[str] backend: The unique name of an existing Consul secrets backend mount. Must not begin or end with a `/`. One of `path` or `backend` is required.
:param pulumi.Input[bool] local: Indicates that the token should not be replicated globally and instead be local to the current datacenter.
:param pulumi.Input[int] max_ttl: Maximum TTL for leases associated with this role, in seconds.
:param pulumi.Input[str] name: The name of the Consul secrets engine role to create.
:param pulumi.Input[str] token_type: Specifies the type of token to create when using this role. Valid values are "client" or "management".
:param pulumi.Input[int] ttl: Specifies the TTL for this role.
"""
pulumi.set(__self__, "policies", policies)
if backend is not None:
pulumi.set(__self__, "backend", backend)
if local is not None:
pulumi.set(__self__, "local", local)
if max_ttl is not None:
pulumi.set(__self__, "max_ttl", max_ttl)
if name is not None:
pulumi.set(__self__, "name", name)
if token_type is not None:
pulumi.set(__self__, "token_type", token_type)
if ttl is not None:
pulumi.set(__self__, "ttl", ttl)
@property
@pulumi.getter
def policies(self) -> pulumi.Input[Sequence[pulumi.Input[str]]]:
"""
The list of Consul ACL policies to associate with these roles.
"""
return pulumi.get(self, "policies")
@policies.setter
def policies(self, value: pulumi.Input[Sequence[pulumi.Input[str]]]):
pulumi.set(self, "policies", value)
@property
@pulumi.getter
def backend(self) -> Optional[pulumi.Input[str]]:
"""
The unique name of an existing Consul secrets backend mount. Must not begin or end with a `/`. One of `path` or `backend` is required.
"""
return pulumi.get(self, "backend")
@backend.setter
def backend(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "backend", value)
@property
@pulumi.getter
def local(self) -> Optional[pulumi.Input[bool]]:
"""
Indicates that the token should not be replicated globally and instead be local to the current datacenter.
"""
return pulumi.get(self, "local")
@local.setter
def local(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "local", value)
@property
@pulumi.getter(name="maxTtl")
def max_ttl(self) -> Optional[pulumi.Input[int]]:
"""
Maximum TTL for leases associated with this role, in seconds.
"""
return pulumi.get(self, "max_ttl")
@max_ttl.setter
def max_ttl(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "max_ttl", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the Consul secrets engine role to create.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="tokenType")
def token_type(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the type of token to create when using this role. Valid values are "client" or "management".
"""
return pulumi.get(self, "token_type")
@token_type.setter
def token_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "token_type", value)
@property
@pulumi.getter
def ttl(self) -> Optional[pulumi.Input[int]]:
"""
Specifies the TTL for this role.
"""
return pulumi.get(self, "ttl")
@ttl.setter
def ttl(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "ttl", value)
@pulumi.input_type
class _SecretBackendRoleState:
def __init__(__self__, *,
backend: Optional[pulumi.Input[str]] = None,
local: Optional[pulumi.Input[bool]] = None,
max_ttl: Optional[pulumi.Input[int]] = None,
name: Optional[pulumi.Input[str]] = None,
policies: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
token_type: Optional[pulumi.Input[str]] = None,
ttl: Optional[pulumi.Input[int]] = None):
"""
Input properties used for looking up and filtering SecretBackendRole resources.
:param pulumi.Input[str] backend: The unique name of an existing Consul secrets backend mount. Must not begin or end with a `/`. One of `path` or `backend` is required.
:param pulumi.Input[bool] local: Indicates that the token should not be replicated globally and instead be local to the current datacenter.
:param pulumi.Input[int] max_ttl: Maximum TTL for leases associated with this role, in seconds.
:param pulumi.Input[str] name: The name of the Consul secrets engine role to create.
:param pulumi.Input[Sequence[pulumi.Input[str]]] policies: The list of Consul ACL policies to associate with these roles.
:param pulumi.Input[str] token_type: Specifies the type of token to create when using this role. Valid values are "client" or "management".
:param pulumi.Input[int] ttl: Specifies the TTL for this role.
"""
if backend is not None:
pulumi.set(__self__, "backend", backend)
if local is not None:
pulumi.set(__self__, "local", local)
if max_ttl is not None:
pulumi.set(__self__, "max_ttl", max_ttl)
if name is not None:
pulumi.set(__self__, "name", name)
if policies is not None:
pulumi.set(__self__, "policies", policies)
if token_type is not None:
pulumi.set(__self__, "token_type", token_type)
if ttl is not None:
pulumi.set(__self__, "ttl", ttl)
@property
@pulumi.getter
def backend(self) -> Optional[pulumi.Input[str]]:
"""
The unique name of an existing Consul secrets backend mount. Must not begin or end with a `/`. One of `path` or `backend` is required.
"""
return pulumi.get(self, "backend")
@backend.setter
def backend(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "backend", value)
@property
@pulumi.getter
def local(self) -> Optional[pulumi.Input[bool]]:
"""
Indicates that the token should not be replicated globally and instead be local to the current datacenter.
"""
return pulumi.get(self, "local")
@local.setter
def local(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "local", value)
@property
@pulumi.getter(name="maxTtl")
def max_ttl(self) -> Optional[pulumi.Input[int]]:
"""
Maximum TTL for leases associated with this role, in seconds.
"""
return pulumi.get(self, "max_ttl")
@max_ttl.setter
def max_ttl(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "max_ttl", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the Consul secrets engine role to create.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def policies(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
The list of Consul ACL policies to associate with these roles.
"""
return pulumi.get(self, "policies")
@policies.setter
def policies(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "policies", value)
@property
@pulumi.getter(name="tokenType")
def token_type(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the type of token to create when using this role. Valid values are "client" or "management".
"""
return pulumi.get(self, "token_type")
@token_type.setter
def token_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "token_type", value)
@property
@pulumi.getter
def ttl(self) -> Optional[pulumi.Input[int]]:
"""
Specifies the TTL for this role.
"""
return pulumi.get(self, "ttl")
@ttl.setter
def ttl(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "ttl", value)
class SecretBackendRole(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
backend: Optional[pulumi.Input[str]] = None,
local: Optional[pulumi.Input[bool]] = None,
max_ttl: Optional[pulumi.Input[int]] = None,
name: Optional[pulumi.Input[str]] = None,
policies: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
token_type: Optional[pulumi.Input[str]] = None,
ttl: Optional[pulumi.Input[int]] = None,
__props__=None):
"""
Manages a Consul secrets role for a Consul secrets engine in Vault. Consul secret backends can then issue Consul tokens.
## Example Usage
```python
import pulumi
import pulumi_vault as vault
test = vault.consul.SecretBackend("test",
path="consul",
description="Manages the Consul backend",
address="127.0.0.1:8500",
token="4240861b-ce3d-8530-115a-521ff070dd29")
example = vault.consul.SecretBackendRole("example",
backend=test.path,
policies=["example-policy"])
```
## Import
Consul secret backend roles can be imported using the `backend`, `/roles/`, and the `name` e.g.
```sh
$ pulumi import vault:consul/secretBackendRole:SecretBackendRole example consul/roles/my-role
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] backend: The unique name of an existing Consul secrets backend mount. Must not begin or end with a `/`. One of `path` or `backend` is required.
:param pulumi.Input[bool] local: Indicates that the token should not be replicated globally and instead be local to the current datacenter.
:param pulumi.Input[int] max_ttl: Maximum TTL for leases associated with this role, in seconds.
:param pulumi.Input[str] name: The name of the Consul secrets engine role to create.
:param pulumi.Input[Sequence[pulumi.Input[str]]] policies: The list of Consul ACL policies to associate with these roles.
:param pulumi.Input[str] token_type: Specifies the type of token to create when using this role. Valid values are "client" or "management".
:param pulumi.Input[int] ttl: Specifies the TTL for this role.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: SecretBackendRoleArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Manages a Consul secrets role for a Consul secrets engine in Vault. Consul secret backends can then issue Consul tokens.
## Example Usage
```python
import pulumi
import pulumi_vault as vault
test = vault.consul.SecretBackend("test",
path="consul",
description="Manages the Consul backend",
address="127.0.0.1:8500",
token="4240861b-ce3d-8530-115a-521ff070dd29")
example = vault.consul.SecretBackendRole("example",
backend=test.path,
policies=["example-policy"])
```
## Import
Consul secret backend roles can be imported using the `backend`, `/roles/`, and the `name` e.g.
```sh
$ pulumi import vault:consul/secretBackendRole:SecretBackendRole example consul/roles/my-role
```
:param str resource_name: The name of the resource.
:param SecretBackendRoleArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(SecretBackendRoleArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
backend: Optional[pulumi.Input[str]] = None,
local: Optional[pulumi.Input[bool]] = None,
max_ttl: Optional[pulumi.Input[int]] = None,
name: Optional[pulumi.Input[str]] = None,
policies: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
token_type: Optional[pulumi.Input[str]] = None,
ttl: Optional[pulumi.Input[int]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = SecretBackendRoleArgs.__new__(SecretBackendRoleArgs)
__props__.__dict__["backend"] = backend
__props__.__dict__["local"] = local
__props__.__dict__["max_ttl"] = max_ttl
__props__.__dict__["name"] = name
if policies is None and not opts.urn:
raise TypeError("Missing required property 'policies'")
__props__.__dict__["policies"] = policies
__props__.__dict__["token_type"] = token_type
__props__.__dict__["ttl"] = ttl
super(SecretBackendRole, __self__).__init__(
'vault:consul/secretBackendRole:SecretBackendRole',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
backend: Optional[pulumi.Input[str]] = None,
local: Optional[pulumi.Input[bool]] = None,
max_ttl: Optional[pulumi.Input[int]] = None,
name: Optional[pulumi.Input[str]] = None,
policies: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
token_type: Optional[pulumi.Input[str]] = None,
ttl: Optional[pulumi.Input[int]] = None) -> 'SecretBackendRole':
"""
Get an existing SecretBackendRole resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] backend: The unique name of an existing Consul secrets backend mount. Must not begin or end with a `/`. One of `path` or `backend` is required.
:param pulumi.Input[bool] local: Indicates that the token should not be replicated globally and instead be local to the current datacenter.
:param pulumi.Input[int] max_ttl: Maximum TTL for leases associated with this role, in seconds.
:param pulumi.Input[str] name: The name of the Consul secrets engine role to create.
:param pulumi.Input[Sequence[pulumi.Input[str]]] policies: The list of Consul ACL policies to associate with these roles.
:param pulumi.Input[str] token_type: Specifies the type of token to create when using this role. Valid values are "client" or "management".
:param pulumi.Input[int] ttl: Specifies the TTL for this role.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _SecretBackendRoleState.__new__(_SecretBackendRoleState)
__props__.__dict__["backend"] = backend
__props__.__dict__["local"] = local
__props__.__dict__["max_ttl"] = max_ttl
__props__.__dict__["name"] = name
__props__.__dict__["policies"] = policies
__props__.__dict__["token_type"] = token_type
__props__.__dict__["ttl"] = ttl
return SecretBackendRole(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def backend(self) -> pulumi.Output[Optional[str]]:
"""
The unique name of an existing Consul secrets backend mount. Must not begin or end with a `/`. One of `path` or `backend` is required.
"""
return pulumi.get(self, "backend")
@property
@pulumi.getter
def local(self) -> pulumi.Output[Optional[bool]]:
"""
Indicates that the token should not be replicated globally and instead be local to the current datacenter.
"""
return pulumi.get(self, "local")
@property
@pulumi.getter(name="maxTtl")
def max_ttl(self) -> pulumi.Output[Optional[int]]:
"""
Maximum TTL for leases associated with this role, in seconds.
"""
return pulumi.get(self, "max_ttl")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
The name of the Consul secrets engine role to create.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def policies(self) -> pulumi.Output[Sequence[str]]:
"""
The list of Consul ACL policies to associate with these roles.
"""
return pulumi.get(self, "policies")
@property
@pulumi.getter(name="tokenType")
def token_type(self) -> pulumi.Output[Optional[str]]:
"""
Specifies the type of token to create when using this role. Valid values are "client" or "management".
"""
return pulumi.get(self, "token_type")
@property
@pulumi.getter
def ttl(self) -> pulumi.Output[Optional[int]]:
"""
Specifies the TTL for this role.
"""
return pulumi.get(self, "ttl")
|
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Service running an App engine app on a Compute Engine VM."""
from __future__ import with_statement
import logging
import SocketServer
import sys
import traceback
from wsgiref import simple_server
from google.appengine.api import appinfo_includes
from google.appengine.ext.vmruntime import meta_app
from google.appengine.ext.vmruntime import middlewares
from google.appengine.ext.vmruntime import vmconfig
from google.appengine.ext.vmruntime import vmstub
try:
import googleclouddebugger
except ImportError:
pass
LISTENING_HOST = '0.0.0.0'
HTTP_PORT = 8080
class VmRuntimeServer(object):
"""Server for an AppEngine VMRuntime app."""
def __init__(self, host, port, app, appinfo_external):
"""Constructor.
Args:
host: The (string) host to bind to, e.g. 'localhost' or '0.0.0.0'.
port: The (integer) port to bind to.
app: The WSGI app to serve.
appinfo_external: The AppInfoExternal for the user app we are running.
"""
self._host, self._port = host, port
self._app = app
self._appinfo_external = appinfo_external
self._server = self.CreateServer()
logging.info('Creating server on %s:%d', self._host, self._port)
def RunForever(self):
"""Serves this Server's application forever."""
raise NotImplementedError()
def CreateServer(self):
"""Returns a WSGIServer for self._app."""
raise NotImplementedError()
class VmRuntimeWSGIRefServer(VmRuntimeServer):
def CreateServer(self):
return simple_server.make_server(
self._host, self._port, self._app,
server_class=self._ThreadingWSGIServer)
def RunForever(self):
try:
self._server.serve_forever()
except:
logging.error('Could not start server on %s:%s.', self._host, self._port)
raise
class _ThreadingWSGIServer(SocketServer.ThreadingMixIn,
simple_server.WSGIServer):
daemon_threads = True
class VmRuntimeCherryPyServer(VmRuntimeServer):
def CreateServer(self):
from cherrypy.wsgiserver import wsgiserver2
wsgiserver2.socket_error_eintr.append(512)
return wsgiserver2.CherryPyWSGIServer(
(self._host, self._port), self._app,
numthreads=middlewares.MAX_CONCURRENT_REQUESTS,
request_queue_size=middlewares.MAX_CONCURRENT_REQUESTS)
def RunForever(self):
try:
self._server.start()
except:
logging.error('Could not start server on %s:%s.', self._host, self._port)
raise
class VmService(object):
"""Class to create and run the service."""
server_class = VmRuntimeWSGIRefServer
server_class = VmRuntimeCherryPyServer
def __init__(self, filename, host, port):
self.filename = filename
self.host = host
self.port = port
self.server = None
def CreateServer(self):
with open(self.filename) as stream:
appinfo_external = appinfo_includes.Parse(stream)
appengine_config = vmconfig.BuildVmAppengineEnvConfig()
vmstub.Register(vmstub.VMStub(appengine_config.default_ticket))
if 'googleclouddebugger' in sys.modules:
try:
googleclouddebugger.AttachDebugger()
except Exception as e:
logging.warn('Exception while initializing Cloud Debugger: %s',
traceback.format_exc(e))
try:
import appengine_config as user_appengine_config
except ImportError:
pass
app = meta_app.FullyWrappedApp(appinfo_external, appengine_config)
self.server = self.server_class(self.host, self.port, app,
appinfo_external)
logging.info('Configured server on %s:%s', self.host, self.port)
def StartServer(self):
assert self.server
self.server.RunForever()
def CreateAndRunService(config_filename):
"""Helper called from vmboot.main() to create and run the service."""
service = VmService(config_filename, LISTENING_HOST, HTTP_PORT)
service.CreateServer()
service.StartServer()
|
import os
import math
import numpy as np
import matplotlib as matplot
import matplotlib.pyplot as plt
from netCDF4 import Dataset
import csv
from wrf import (to_np, getvar, smooth2d, get_cartopy, cartopy_xlim,
cartopy_ylim, latlon_coords)
# List the colors that will be used for tracing the track.
colors = ['blue', 'orange', 'red', 'purple', 'brown', 'pink', 'gray', 'olive', 'cyan', 'black', 'green', 'gold', 'lightcoral', 'turquoise']
c =0
mainpath = '/project/momen/meng/COAWST/results/WRF_VS_WRFSWAN_2/'
Hurricaneall = ['Dorian','Maria','Irma','Katrina','Lorenzo']
Real_Hurricane_Data = ['Dorian_Real_Track_Time_NOAA.csv',
'Maria_Real_Track_Time_NOAA.csv',
'Irma_Real_Track_Time_NOAA.csv',
'Katrina_Real_Track_Time_NOAA.csv',
'Lorenzo_Real_Track_Time_NOAA.csv']
# Hurricaneall = ['Dorian']
# Real_Hurricane_Data = ['Dorian_Real_Track_Time_NOAA.csv']
gridsize = ['8km','16km']
swansize = ['swgr8p0', 'swgr16p0']
prefix = 'WRFSWAN_NoTurb_swdt10_cpdt7200_'
Dirall = ['_swh8_swt14_A0p12B4p5C0P11',
'_swh8_swt14_A12B4p5C0P11',
'_swh8_swt14_A1200B4p5C0P11',
'_swh8_swt14_A120000B4p5C0P11']
outputpath = '/project/momen/meng/COAWST/results/WRF_VS_WRFSWAN_2/postprocessing_WRFONLY/0_Paper_figures/section2_change_pars_for_strong_winds/source_code_outputs_change_A/'
# This function returns a list of all wrf files in the directory.
def list_files(Dir, ncfiles):
for f in os.listdir(Dir):
if f.startswith('wrfout'):
ncfiles.append(f)
return (ncfiles)
for gk in range(len(gridsize)):
count1=0
for Hurricane in Hurricaneall:
rows=[]
#Initiate the lists that will contain the real data variables
Real_Times = []
Real_Wnd_Ints = []
Real_Long =[]
#Open the file that contains the real data and extract the necessary variables
print('Real track: '+outputpath+Real_Hurricane_Data[count1])
with open(outputpath+Real_Hurricane_Data[count1]) as f:
reader = csv.reader(f)
next (reader)
row_header = next(reader)
#print (row_header)
for row in reader:
YYYY = (row[row_header.index('Time - year')])
MM = (row[row_header.index('Time - month')])
if (len(MM) == 1):
MM = '0' + MM
DD = (row[row_header.index('Time - day')])
if (len(DD) == 1):
DD = '0' + DD
HR = (row[row_header.index('Time - hour')])
if (len(HR) == 1):
HR = '0' + HR
MN = (row[row_header.index('Time - min')])
if (len(MN) == 1):
MN = '0' + MN
Time = YYYY + '-' + MM + '-' + DD + '_' + HR + '_' + MN
Real_Wnd_Ints.append(float(row[row_header.index('Wind Speed(kt)')]))
Real_Times.append(Time)
print (Real_Wnd_Ints)
print (Real_Times)
rows.append(Real_Wnd_Ints)
count1=count1+1
for Dir in Dirall:
print('Current folder is: ')
Dir_local = mainpath+Hurricane+ '/' +gridsize[gk]+ '/' +prefix+swansize[gk]+Dir
print(Dir_local)
#row.append(Hurricane+Dir)
# Set the working space>
os.chdir(Dir_local)
# initiate the list that will contain all wrf files in Dir directory.
ncfiles = []
# Use the list_files function to list all the wrf files in the directory.
ncfiles = list_files(Dir_local, ncfiles)
ncfiles = sorted(ncfiles)
print (ncfiles)
# initiate the list that will contain the hurricane-track data.
row = []
# Identify the time step
Time_Step = 6
k = 0
# initiate the list that will contain the times.
Times = []
for tt in range(1):
for ncfile in ncfiles:
ncfile = Dataset(ncfile)
ttt = np.array(getvar(ncfile, "times", tt))
print('!!!!!!',ttt)
# Get U and V components of wind intensity at 10m of altitude.
U10_2D = np.array(getvar(ncfile, "U10", tt))
#print (U10_2D.shape)
V10_2D = np.array(getvar(ncfile, "V10", tt))
#print (V10_2D.shape)
slp_2D = np.array(getvar(ncfile, "slp", tt))
slp_2D = slp_2D.flatten()
# Reshape the U and V into a 1D array.
U10_1D = U10_2D.flatten()
#print (U10_1D.shape)
V10_1D = V10_2D.flatten()
#print (V10_1D.shape)
WND_SPD_10 = U10_1D
# Calculate the wind intensity at each point of the map.
for i in range (WND_SPD_10.size - 1):
WND_SPD_10[i] = math.sqrt((U10_1D[i]**2)+(V10_1D[i]**2))
# Search for the maximum wind intensity at aspecific time step.
WND_SPD_10_max = np.amax(WND_SPD_10)
slp_min = np.amin(slp_2D)
# List the maximum wind intensity for all time steps.
row.append(WND_SPD_10_max)
# list all the time steps
Times.append(Time_Step*k)
k = k+1
print (row)
print (Times)
rows.append(row)
fields = [time for time in Times]
print (fields)
print (rows)
with open(outputpath+Hurricane+'_wind_intensity_'+gridsize[gk]+'.csv', 'w') as csvfile:
csvwriter = csv.writer(csvfile)
csvwriter.writerow(fields)
csvwriter.writerows(rows)
|
import os
import subprocess
class BaseDatabaseClient:
"""Encapsulate backend-specific methods for opening a client shell."""
# This should be a string representing the name of the executable
# (e.g., "psql"). Subclasses must override this.
executable_name = None
def __init__(self, connection):
# connection is an instance of BaseDatabaseWrapper.
self.connection = connection
@classmethod
def settings_to_cmd_args_env(cls, settings_dict, parameters):
raise NotImplementedError(
"subclasses of BaseDatabaseClient must provide a "
"settings_to_cmd_args_env() method or override a runshell()."
)
def runshell(self, parameters):
args, env = self.settings_to_cmd_args_env(
self.connection.settings_dict, parameters
)
env = {**os.environ, **env} if env else None
subprocess.run(args, env=env, check=True)
|
import random
import shutil
import os
import re
import signal
from pathlib import Path
from antlr4 import *
from src.modules.Solver import Solver, SolverQueryResult, SolverResult
from src.modules.Statistic import Statistic
from config.config import crash_list, duplicate_list, ignore_list
from src.utils import random_string, plain, escape, in_list
from src.parsing.parse import *
from src.generators.TypeAwareOpMutation import TypeAwareOpMutation
from src.generators.SemanticFusion.SemanticFusion import SemanticFusion
class Fuzzer:
def __init__(self, args):
self.args = args
self.currentseeds = ""
self.runforever = True
self.statistic = Statistic()
self.generator = None
random.seed(a=args.seed)
if not self.args.quiet:
print("Yin-Yang is running:")
def admissible_seed_size(self, seed):
"""
Checks if seed size is below file_size_limit.
:returns: True if that is the case and False otherwise.
"""
seed_size_in_bytes = Path(seed).stat().st_size
if seed_size_in_bytes >= self.args.file_size_limit:
return False
return True
def run(self):
if (self.args.strategy == "opfuzz"):
seeds = self.args.PATH_TO_SEEDS
elif (self.args.strategy == "fusion"):
if len(self.args.PATH_TO_SEEDS) > 2:
seeds = [(a, b) for a in self.args.PATH_TO_SEEDS for b in self.args.PATH_TO_SEEDS]
elif len(self.args.PATH_TO_SEEDS) == 2:
seeds = [(self.args.PATH_TO_SEEDS[0],self.args.PATH_TO_SEEDS[1])]
else: assert(False)
else: assert(False)
while len(seeds) != 0:
if (self.args.strategy == "opfuzz"):
seed = seeds.pop(random.randrange(len(seeds)))
self.statistic.seeds += 1
if not self.admissible_seed_size(seed):
self.statistic.ignored += 1
continue
self.currentseeds = Path(seed).stem
script = parse_file(seed,silent=True)
if not script: # i.e. parsing was unsucessful
self.statistic.ignored += 1
continue
self.generator = TypeAwareOpMutation(script, self.args)
elif (self.args.strategy == "fusion"):
seed = seeds.pop(random.randrange(len(seeds)))
seed1 = seed[0]
seed2 = seed[1]
self.statistic.seeds += 2
if not self.admissible_seed_size(seed1) or not self.admissible_seed_size(seed1):
self.statistic.ignored +=2
continue
self.currentseeds = Path(seed1).stem + "-" + Path(seed2).stem
script1 = parse_file(seed1,silent=True)
script2 = parse_file(seed2,silent=True)
if not script1 or not script2: # i.e. parsing was unsucessful
self.statistic.ignored +=2
continue
self.generator = SemanticFusion(script1, script2, self.args)
else: assert(False)
for _ in range(self.args.iterations):
if not self.args.quiet:
self.statistic.printbar()
formula, success, skip_seed = self.generator.generate()
if not success: continue
if not self.test(formula): break
self.statistic.mutants += 1
if skip_seed: break
def create_testbook(self, formula):
testbook = []
if self.args.compare_mode:
timeout_index = random.randint(0,1)
if not self.args.keep_mutants:
testcase = "%s/%s.smt2" % (self.args.scratchfolder, self.args.name)
else:
testcase = "%s/%s-%s-%s.smt2" % (self.args.scratchfolder,
escape(self.currentseeds),
self.args.name,random_string())
with open(testcase, 'w') as testcase_writer:
testcase_writer.write(formula.__str__())
for cli in self.args.SOLVER_CLIS:
if self.args.optfuzz != None:
if not self.args.keep_mutants:
testcase = "%s/%s-%s" % (self.args.scratchfolder,
plain(cli),
self.args.name)
else:
testcase = "%s/%s-%s-%s-%s.smt2" % (self.args.scratchfolder,
plain(cli),
escape(self.currentseeds),
self.args.name,random_string())
with open(testcase, 'w') as testcase_writer:
testcase_writer.write(self.args.optfuzz.generate(cli) + formula.__str__())
if self.args.compare_mode:
testbook.append((cli, testcase, self.args.compare_mode_timeouts[timeout_index % 2]))
timeout_index += 1
else:
testbook.append((cli,testcase))
return testbook
def grep_result(self, stdout):
"""
Grep the result from the stdout of a solver.
"""
result = SolverResult()
for line in stdout.splitlines():
if re.search("^unsat$", line, flags=re.MULTILINE):
result.append(SolverQueryResult.UNSAT)
elif re.search("^sat$", line, flags=re.MULTILINE):
result.append(SolverQueryResult.SAT)
elif re.search("^unknown$", line, flags=re.MULTILINE):
result.append(SolverQueryResult.UNKNOWN)
return result
def init_oracle(self):
"""
Initialize the oracle. For SemanticFusion the oracle is either sat or
unsat. For TypeAwareOpMutation the oracle is unknown
"""
if (self.args.oracle == "unknown"):
return SolverResult(SolverQueryResult.UNKNOWN)
elif (self.args.oracle == "sat"):
return SolverResult(SolverQueryResult.SAT)
elif (self.args.oracle == "unsat"):
return SolverResult(SolverQueryResult.UNSAT)
assert(False)
def test(self, formula):
"""
Tests the solvers with the formula returning "False" if the testing on
formula should be stopped and "True" otherwise.
"""
oracle = self.init_oracle()
testbook = self.create_testbook(formula)
reference = None
solver_timeouts = []
timed_out = []
for testitem in testbook:
timeout = self.args.timeout
if self.args.compare_mode:
timeout = testitem[2]
solver_cli, scratchfile = testitem[0], testitem[1]
solver_timeouts.append((solver_cli, timeout))
solver = Solver(solver_cli)
stdout, stderr, exitcode = solver.solve(scratchfile, timeout, debug=self.args.diagnose)
# (1) Detect crashes from a solver run including invalid models.
if self.in_crash_list(stdout, stderr):
# (2) Match against the duplicate list to avoid reporting duplicate bugs.
if not self.in_duplicate_list(stdout, stderr):
self.statistic.crashes += 1
self.report(scratchfile, "crash", solver_cli, stdout, stderr, random_string())
else:
self.statistic.duplicates += 1
return False # stop testing
else:
# (3a) Check whether the solver run produces errors, by checking
# the ignore list.
if self.in_ignore_list(stdout, stderr):
self.statistic.ignored += 1
continue # continue with next solver (4)
# (3b) Check whether the exit code is nonzero.
if exitcode != 0:
if exitcode == -signal.SIGSEGV or exitcode == 245: #segfault
self.statistic.crashes += 1
self.report(scratchfile, "segfault", solver_cli, stdout, stderr, random_string())
return False # stop testing
elif exitcode == 137: #timeout
self.statistic.timeout += 1
timed_out.append((solver_cli, timeout))
continue # continue with next solver (4)
elif exitcode == 127: #command not found
print("\nPlease check your solver command-line interfaces.")
continue # continue with next solver (4)
self.statistic.ignored+=1
# (3c) if there is no '^sat$' or '^unsat$' in the output
elif not re.search("^unsat$", stdout, flags=re.MULTILINE) and \
not re.search("^sat$", stdout, flags=re.MULTILINE) and \
not re.search("^unknown$", stdout, flags=re.MULTILINE):
self.statistic.ignored += 1
else:
# (5) grep for '^sat$', '^unsat$', and '^unknown$' to produce
# the output (including '^unknown$' to also deal with incremental
# benchmarks) for comparing with the oracle (semantic fusion) or
# with other non-erroneous solver runs (opfuzz) for soundness bugs
result = self.grep_result(stdout)
if oracle.equals(SolverQueryResult.UNKNOWN):
oracle = result
reference = (solver_cli, scratchfile, stdout, stderr)
# Comparing with the oracle (semantic fusion) or with other
# non-erroneous solver runs (opfuzz) for soundness bugs.
if not oracle.equals(result):
self.statistic.soundness += 1
self.report(scratchfile, "incorrect", solver_cli, stdout, stderr, random_string())
if reference:
# Produce a diff bug report for soundness bugs in
# the opfuzz case
ref_cli = reference[0]
ref_stdout = reference[1]
ref_stderr = reference[2]
self.report_diff(scratchfile, "incorrect",
ref_cli, ref_stdout, ref_stderr,
solver_cli, stdout, stderr,
random_string())
return False # stop testing
if self.args.compare_mode:
# if smt solver with longer timeout timed out and smt solver with shorter timeout did not, output formula with solver and timeout
shorter_to = min(solver_timeouts, key=lambda x: x[1])
longer_to = max(solver_timeouts, key=lambda x: x[1])
if longer_to in timed_out and not shorter_to in timed_out:
self.output_mutant(formula, shorter_to, longer_to)
self.statistic.performance_mutants += 1
return True
def output_mutant(self, formula, shorter_to, longer_to):
testcase = "%s/%s-T_%i-VS-%s-T_%i-%s_%d.smt2" % (self.args.outputfolder,
plain(longer_to[0]),
longer_to[1],
plain(shorter_to[0]),
shorter_to[1],
self.args.name,
self.statistic.performance_mutants)
with open(testcase, 'w') as testcase_writer:
testcase_writer.write(formula.__str__())
def in_crash_list(self, stdout, stderr):
return in_list(stdout,stderr,crash_list)
def in_duplicate_list(self, stdout, stderr):
return in_list(stdout,stderr,duplicate_list)
def in_ignore_list(self,stdout, stderr):
return in_list(stdout,stderr,ignore_list)
def report(self, scratchfile, bugtype, cli, stdout, stderr, report_id):
plain_cli = plain(cli)
#format: <solver><{crash,wrong,invalid_model}><seed-name>.<random-string>.smt2
report = "%s/%s-%s-%s-%s.smt2" %(self.args.bugsfolder, bugtype, plain_cli, escape(self.currentseeds), report_id)
try: shutil.copy(scratchfile, report)
except Exception as e:
print(e)
exit(0)
logpath = "%s/%s-%s-%s-%s.output" %(self.args.bugsfolder, bugtype, plain_cli, escape(self.currentseeds), report_id)
with open(logpath, 'w') as log:
log.write("command: "+ cli+"\n")
log.write("stderr:\n")
log.write(stderr)
log.write("stdout:\n")
log.write(stdout)
return report_id
def report_diff(self, scratchfile, bugtype,
ref_cli, ref_stdout, ref_stderr,
sol_cli, sol_stdout, sol_stderr,
report_id):
plain_cli = plain(sol_cli)
#format: <solver><{crash,wrong,invalid_model}><seed-name>.<random-string>.smt2
report = "%s/%s-%s-%s-%s.smt2" %(self.args.bugsfolder, bugtype, plain_cli, escape(self.currentseeds), report_id)
try: shutil.copy(scratchfile, report)
except Exception as e:
print(e)
exit(0)
logpath = "%s/%s-%s-%s-%s.output" %(self.args.bugsfolder, bugtype, plain_cli, escape(self.currentseeds), report_id)
with open(logpath, 'w') as log:
log.write("*** REFERENCE \n")
log.write("command: "+ ref_cli+"\n")
log.write("stderr:\n")
log.write(ref_stderr)
log.write("stdout:\n")
log.write(ref_stdout)
log.write("\n\n*** INCORRECT \n")
log.write("command: "+ sol_cli+"\n")
log.write("stderr:\n")
log.write(sol_stderr)
log.write("stdout:\n")
log.write(sol_stdout)
return report_id
def __del__(self):
if not self.args.keep_mutants:
for file in os.listdir(self.args.scratchfolder):
if self.args.name in file:
os.remove(os.path.join(self.args.scratchfolder, file))
if not self.args.quiet:
self.statistic.printsum()
|
"""
This is a very first draft idea of a module system.
The general idea is to NOT use Djangos ``django.setup()`` which inherently uses the ENV Variable to find the path
to a settings.py and loads it.
Instead we use the ``settings.configure()`` method INSTEAD of ``django.setup()`` where you can pass in arbitrary settings.
From my understanding ``django.setup()`` BASICALLY does nothing else than to load the settings.py (from the ENV variable)
and then calls configure with all (ALL CAPS) Variables from the settings.py file.
"""
import importlib
import inspect
import logging
import os
import sys
from typing import Dict, Optional, List, Set, Union, Iterable
logger = logging.getLogger("modules")
MODULE_LOCATIONS = ["omap.modules.omap_module_registry"]
_modules = []
def modules():
return _modules
class ModuleConfig:
pass
class OmapModule(object):
"""
Very simple implementation of all properties of a "Module"
"""
def __init__(
self,
module_name,
module_version,
django_apps,
module_dependencies: Optional[List[str]] = None,
settings_entries: Optional[Dict] = None,
constance_config: Optional[Dict] = None,
pip_dependencies=None,
) -> None:
self.module_name = module_name
self.module_version = module_version
self.django_apps = django_apps
self.module_dependencies = module_dependencies
self.settings_entries = settings_entries
self.constance_config = constance_config
self.pip_dependencies = pip_dependencies
# BASE_DIR = Path(__file__).resolve().parent.parent
def collect_registry(module_locations: List[str]):
module_configs = {}
for module in module_locations:
logger.debug(f"Checking module {module}")
m = importlib.import_module(module)
module_definitions = []
for a, b in inspect.getmembers(m):
if inspect.isclass(b) and inspect.getmodule(b) == m:
if issubclass(b, ModuleConfig):
logger.debug(f"Adding class {b} to module definitions")
module_definitions.append(b)
if len(module_definitions) == 0:
logger.warning(f"No module definition found in module {module}")
continue
for definition in module_definitions:
logger.debug(
f"We found definition {definition.__name__} in module {module}"
)
# Get all necessary attributes
attributes = {
"module_name": None,
"module_version": None,
"django_apps": None,
"pip_dependencies": [],
"module_dependencies": [],
"settings_entries": {},
"constance_config": {},
}
module_dict = {}
for attr_name, default in attributes.items():
if not hasattr(definition, attr_name):
if default is not None:
module_dict[attr_name] = default
continue
else:
raise RuntimeError(f"Missing required attribute {attr_name}")
module_dict[attr_name] = getattr(definition, attr_name)
logger.debug(f"Module Dict: {module_dict}")
if module_dict["module_name"] in module_configs:
raise RuntimeError(
f"Duplicate Module Name found: {module_dict['name']}"
)
# Create Portal Module class from it
module_configs[module_dict["module_name"]] = OmapModule(**module_dict)
return module_configs
# This is a draft for a registr.
# In a real world scenario this would be loaded from some manifest or cfg files or something?!
modules_registry = collect_registry(MODULE_LOCATIONS)
def resolve(modules: Union[str, List[str]]) -> Iterable[OmapModule]:
"""
This method takes one or more module names and looks up the modules in the registry above.
Then it checks if the modules have dependencies on other moduels and if so, adds them to the "context" as well.
It finally returns a complete list of all modules that have to be loaded
OR
ends with an exception
"""
unresolved = [modules] if isinstance(modules, str) else modules
if len(unresolved) == 0:
raise AssertionError("No module given to load, terminating!")
resolved = []
# resolve until all are resolved
while len(unresolved) > 0:
module_name = unresolved.pop()
logging.info(f"Checking resolution for {module_name}")
if module_name in resolved:
unresolved.remove(module_name)
continue
logging.info(f"Resolving {module_name}")
module: OmapModule = modules_registry.get(module_name)
if not module:
raise RuntimeError(f"Unresolvable Module {module_name}")
resolved.append(module)
if module.module_dependencies:
logging.info(f"Found dependencies: {module.module_dependencies}")
unresolved.extend(module.module_dependencies)
logging.info(
f"Modules to load: {[m.module_name + ':' + m.module_version for m in resolved]}"
)
return resolved
def install_modules(modules: Iterable[OmapModule]):
def install(package):
import subprocess
subprocess.check_call([sys.executable, "-m", "pip", "install", package])
logging.info("Check if there are module dependencies to install...")
for module in modules:
module: OmapModule
logging.info(f"Checking Module {module.module_name}:{module.module_version}")
if module.pip_dependencies:
logging.info(
f"Module {module.module_name}:{module.module_version} has dependencies"
)
for package in module.pip_dependencies:
logging.info(f"Start Installation of package {package}")
try:
install(package)
logging.info(f"Successfully installed {package}")
except Exception:
raise ModuleNotFoundError(
f"Unable to install package {package} for Module {module.module_name}:{module.module_version}"
)
logging.info(
f"Successfully installed all dependencies for {module.module_name}:{module.module_version}"
)
def configure_modules():
"""
This is the central hook.
It loads the name(s) of the Modules to load from the env variable OMAP_MODULES,
resolves them and then configures django accordingly.
So this is a somewhat "improved" version of the ``django.setup()`` function but serves the same purpose
"""
# Add them to the "installed" modules
logging.basicConfig(level="INFO")
from django.conf import settings
if settings.configured:
logging.info("Settings already configured, skipping...")
return
modules = get_resolved_modules()
global _modules
_modules = modules.copy()
apps: Set = set()
additional_settings = {}
constance_config = {}
# Install modules
install_modules(modules)
for module in modules:
module: OmapModule
apps.update(module.django_apps)
if module.settings_entries:
additional_settings.update(module.settings_entries)
if module.constance_config:
constance_config.update(module.constance_config)
from django.conf import settings
# merge constnace configs, if there are multiple ones
if "CONSTANCE_CONFIG" in additional_settings:
constance_config.update(additional_settings["CONSTANCE_CONFIG"])
del additional_settings["CONSTANCE_CONFIG"]
base_settings = {}
if os.getenv("DJANGO_SETTINGS_MODULE"):
# We can use a base settings file
settings_module = os.getenv("DJANGO_SETTINGS_MODULE")
logging.info(f"Using '{settings_module}' as base settings")
mod = importlib.import_module(settings_module)
for setting in dir(mod):
if setting.isupper():
setting_value = getattr(mod, setting)
base_settings[setting] = setting_value
# We have to be careful with the merge especially with INSTALLED_APPS
merged_apps = (
base_settings.get("INSTALLED_APPS", [])
+ additional_settings.get("INSTALLED_APPS", [])
+ list(apps)
)
# Same goes with CONSTANCE Config
merged_constance = {**base_settings.get("CONSTANCE_CONFIG", {}), **constance_config}
merged = base_settings.copy()
merged.update(additional_settings)
# Handle special cases
merged.update({"INSTALLED_APPS": merged_apps})
merged.update({"CONSTANCE_CONFIG": merged_constance})
# TEST for dynamic urls
# merged.update({"ROOT_URLCONF": "omap.modules.module_urls"})
settings.configure(**merged)
# Now modify the INSTALLED_APPS for all apps that contain a urls.py file
# TODO do this after the apps are ready
# from django.apps import apps as django_apps
#
# for app in django_apps.get_app_configs():
# if not hasattr(app, "url_prefix"):
# urls_path = app.module.__name__ + ".urls"
# try:
# mod = importlib.import_module(urls_path)
# except ModuleNotFoundError:
# logging.debug(f"No url module found under {urls_path}", exc_info=True)
# continue
# # We can/should add it
# setattr(app, "url_prefix", app.name)
def get_resolved_modules(module_names=None):
if module_names:
modules = module_names
else:
# Read from ENV Variable
modules = os.getenv("OMAP_MODULES")
if not modules:
# TODO do we need this?
# raise RuntimeError(
# "No Modules given, please set the module to env varibale OMAP_MODULES"
# )
return []
module_list = modules.split(",")
modules = resolve(module_list)
return modules
|
class PongConfig:
def __init__(self):
self.graphics = PongGraphicsConfig()
self.fps_interval = 0.5
self.debug = DebugOnConfig()
self.win_screen_duration = 2
self.win_screen_times = (0.8, 1.6)
self.final_screen_duration = 2
self.final_screen_times = (1, 6)
self.cam = 0
self.goals_to_win = 3
self.ready_state_duration = 3
self.timeout_ready_state_duration = 1
self.face_missing_timeout = 1.0
self.max_ball_speed = 320
class PongGraphicsConfig:
def __init__(self):
self.monitor_size = (1280, 720)
self.face_border_thickness = 10
self.ball_border_thickness = 3
self.middle_field_brightness = 0.3
self.win_screen_brightness = 0.4
self.middle_field_blur = 10
self.target_cam_fps = 25
self.fullscreen = True
self.camera_insets = (0, 0)
self.goal_font_size = 50
self.final_screen_font_size = 100
self.color_face_left_border = (0.0, 0.0, 0.0)
self.color_face_right_border = (0.0, 0.0, 0.0)
self.color_ball_border = (0.0, 0.0, 0.0)
self.color_face_left = (0.5, 1.0, 0.5)
self.color_face_right = (0.5, 0.5, 1.0)
self.color_ball = (1.0, 0.5, 0.5)
self.face_blur = (7, 7)
self.ball_blur = (5, 5)
class DebugOnConfig:
def __init__(self):
self.face_detector_debug = False
CONFIG = PongConfig()
|
# -*- coding: utf-8 -*-
"""
This module provides the utilities used by the requester.
"""
def make_host(headers, dst_ip):
if "Host" in headers:
return headers["Host"]
elif "host" in headers:
return headers["host"]
else:
return dst_ip
def make_request_url(host, port, uri):
if "http://" in host or "https://" in host:
return "%s%s" % (host, uri)
if port == 443:
return "https://%s%s" % (host, uri)
return "http://%s%s" % (host, uri)
def make_dumy_body(byte):
dumy_body = ""
if byte is None or byte <= 0:
return dumy_body
for i in range(byte):
dumy_body += "\x00"
return dumy_body
def make_ellipsis(text, max_len=1000):
if max_len <= 0 or len(text) < max_len:
return text
return text[:max_len] + "\n(ellipsised...)"
|
from pygments import token
from pygments.lexer import RegexLexer, words
KEYWORDS = [
"func",
"struct",
"namespace",
"end",
"call",
"ret",
"jmp",
"if",
"let",
"const",
"import",
"from",
"as",
"abs",
"rel",
"static_assert",
"local",
"tempvar",
"felt",
"return",
"assert",
"member",
"cast",
"else",
"alloc_locals",
"with",
"with_attr",
"nondet",
]
class CairoLexer(RegexLexer):
name = "cairo"
tokens = {
"root": [
(words(KEYWORDS, prefix=r"\b", suffix=r"\b"), token.Keyword),
(words(("SIZEOF_LOCALS", "SIZE"), prefix=r"\b", suffix=r"\b"), token.Literal),
(r"%builtins|%lang", token.Keyword),
(words(("ap", "fp"), prefix=r"\b", suffix=r"\b"), token.Name.Entity),
(r"!=|->", token.Operator),
(r"[+\-*/&]", token.Operator),
(r"[:;,.=\[\]\(\)\{\}]", token.Punctuation),
(r"-?[0-9]+", token.Number),
(r"[a-zA-Z_][a-zA-Z_0-9]*", token.Text),
(r"#.*", token.Comment),
(r"%\{(.|\n)*?%\}", token.Text),
(r"%\[(.|\n)*?%\]", token.Text),
(r"@\w+", token.Keyword),
(r"<[a-zA-Z0-9 _\-]+>", token.Comment),
(r" ", token.Text),
]
}
|
"""
DQNAgent based on work by RLCode team - Copyright (c) 2017 RLCode (MIT Licence)
https://github.com/rlcode/reinforcement-learning
Tailored to the TUSP by Evertjan Peer
KBH example
30000_instances: we generated 30000 instances of a specific size for the Binckhorst.
Changes to V2:
- training on instances of different sizes.
"""
import gc
import random
import numpy as np
import tensorflow as tf
from collections import deque
from keras.models import Sequential
from keras.optimizers import RMSprop
from keras.layers import Dense, Flatten
from keras.layers.convolutional import Conv2D
from keras import backend as K
from kbh_yard_b2b_relocation_expensive_reloc import KBH_Env # This is the environment of the shunting yard
# import convert_event_list as convert
import datetime
import pandas as pd
import time
# import instance generator
from data_retrieval_30000_14151617 import INSTANCEProvider
#visualize learning
visualization = False
if visualization:
import matplotlib.pyplot as plt
from matplotlib.colors import LinearSegmentedColormap
EPISODES = 250000 # 60000# 50000 #used to be 500000 (note that this should be long enough to at least cover the exploration_steps)
class DQNAgent:
def __init__(self, yard, load):
self.load_model = load
# environment settings
self.state_size = yard.get_state().shape # this is specific to the state representation you choose for the problem.
self.action_size = yard.nr_tracks # this as well
# set epsilon
self.epsilon = 1.0
self.epsilon_start, self.epsilon_end = 1.0, 0.1 # start fully random, end with 10% random action selection.
self.exploration_steps = 150000 # 1000000. #this determines how long one explores (nr. steps from 1 to 0.1 epsilon)
self.epsilon_decay_step = (self.epsilon_start - self.epsilon_end) \
/ self.exploration_steps
# train params
self.batch_size = 32 # batch size used to update the neural nets from experience replay
self.train_start = 10000 # first collect some experiences using random move selection
self.update_target_rate = 10000 # after every 10000 actions update the target model
self.discount_factor = 0.99
self.memory = deque(maxlen=125000) # this is the memory the DQN samples experiences from for Experience Replay
# build model
self.model = self.build_model() # two models are maintained: a evaluation and target model.
self.target_model = self.build_model()
self.update_target_model()
self.optimizer = self.optimizer() # here a special type of optimizer is used: why which optimizer works in what situation?
# stuff for tensorboard.
self.sess = tf.InteractiveSession()
K.set_session(self.sess)
self.avg_q_max, self.avg_loss = 0, 0
self.summary_placeholders, self.update_ops, self.summary_op = \
self.setup_summary()
time_stamp_dir = int(time.time())
print(time_stamp_dir)
self.summary_dir_name = 'summary/dqn_dummy/' + str(time_stamp_dir)
self.summary_writer = tf.summary.FileWriter(
self.summary_dir_name, self.sess.graph)
self.sess.run(tf.global_variables_initializer())
if self.load_model:
self.model.load_weights("./save/dummy_problem_weights.h5")
print('model loaded')
# This is a custom optimizer.
# Need to test how this one performs w.r.t. to standard mse Adam?
# if the error is in [-1, 1], then the cost is quadratic to the error
# But outside the interval, the cost is linear to the error
def optimizer(self):
a = K.placeholder(shape=(None,), dtype='int32')
y = K.placeholder(shape=(None,), dtype='float32')
py_x = self.model.output
a_one_hot = K.one_hot(a, self.action_size)
q_value = K.sum(py_x * a_one_hot, axis=1)
error = K.abs(y - q_value)
quadratic_part = K.clip(error, 0.0, 1.0)
linear_part = error - quadratic_part
loss = K.mean(0.5 * K.square(quadratic_part) + linear_part)
optimizer = RMSprop(lr=0.00025, epsilon=0.01) # was lr = 0.00025
updates = optimizer.get_updates(self.model.trainable_weights, [], loss)
train = K.function([self.model.input, a, y], [loss], updates=updates)
return train
# build model to approx the Q-table with a CNN
# in: state, out: Qvalue of each state-action pair.
def build_model(self):
# This is the original DQN NN that is used by the Atari paper.
# I use a simpler one for this model since I've a simpler state space here.
#
# model = Sequential()
# model.add(Conv2D(32, (8, 8), strides=(4, 4), activation='relu',
# input_shape=self.state_size))
# model.add(Conv2D(64, (4, 4), strides=(2, 2), activation='relu'))
# model.add(Conv2D(64, (3, 3), strides=(1, 1), activation='relu'))
# model.add(Flatten())
# model.add(Dense(512, activation='relu'))
# model.add(Dense(self.action_size))
# model.summary()
# return model
model = Sequential()
model.add(Conv2D(32, (4, 4), padding='same', activation='relu',
input_shape=self.state_size))
model.add(Conv2D(64, (2, 2), padding='same', activation='relu'))
model.add(Flatten())
model.add(Dense(256, activation='relu'))
model.add(Dense(self.action_size))
model.summary()
return model
# Update the target model to match the eval. net every some steps.
def update_target_model(self):
print('target_model_updated')
self.target_model.set_weights(self.model.get_weights())
# epsilon-greedy policy used to select next action.
# with probability eps we pick a random action
# with probability 1-eps we pick the best action according to the CNN for that state.
def get_action(self, history):
if np.random.rand() <= self.epsilon:
return random.randrange(self.action_size)
else:
q_value = self.model.predict(history)
return np.argmax(q_value[0])
def get_random_action(self):
return random.randrange(self.action_size)
# We save a <state, action, reward, next state> sample to the replay memory
# this replay memory is later used to random sample experiences from to update the NN.
def replay_memory(self, history, action, reward, next_history, done):
self.memory.append((history, action, reward, next_history, done))
# Pick #batch_size samples randomly from the memory
# note that standard DQN does this randomly
# In next versions 'Prioritized Replay' could be implemented here to
# prioritize some experiences over other experiences based on their 'surpriziness'
def train_replay(self):
if len(self.memory) < self.train_start:
return # do nothing when we are still in the 'warm up' period.
if self.epsilon > self.epsilon_end:
self.epsilon -= self.epsilon_decay_step # here is where we decay epsilon.
mini_batch = random.sample(self.memory, self.batch_size) # sample randomly from the memory
history = np.zeros((self.batch_size, self.state_size[0],
self.state_size[1], self.state_size[2]))
next_history = np.zeros((self.batch_size, self.state_size[0],
self.state_size[1], self.state_size[2]))
target = np.zeros((self.batch_size,))
action, reward, done = [], [], []
for i in range(self.batch_size):
history[i] = mini_batch[i][0]
next_history[i] = mini_batch[i][3]
action.append(mini_batch[i][1])
reward.append(mini_batch[i][2])
done.append(mini_batch[i][4])
target_value = self.target_model.predict(next_history)
# this is very much like Q learning
# we get approx the state-action value pair with the direct reward + the max Q value at next state
# we compute this using the target model, which is fixed for several training steps to keep targets fixed.
for i in range(self.batch_size):
if done[i]:
target[i] = reward[i]
else:
target[i] = reward[i] + self.discount_factor * \
np.amax(target_value[i])
# train the model based on this new data of states, actions and new targets.
loss = self.optimizer([history, action, target])
self.avg_loss += loss[0]
def save_model(self, name):
self.model.save_weights(name)
# This is for tensorboard.
def setup_summary(self):
episode_total_reward = tf.Variable(0.)
episode_avg_max_q = tf.Variable(0.)
episode_duration = tf.Variable(0.)
episode_avg_loss = tf.Variable(0.)
episode_start_espilon = tf.Variable(0.)
tf.summary.scalar('Total_Reward/Episode', episode_total_reward)
tf.summary.scalar('Average_Max_Q/Episode', episode_avg_max_q)
tf.summary.scalar('Duration/Episode', episode_duration)
tf.summary.scalar('Average_Loss/Episode', episode_avg_loss)
tf.summary.scalar('End_Epsilon/Episode', episode_start_espilon)
summary_vars = [episode_total_reward, episode_avg_max_q,
episode_duration, episode_avg_loss, episode_start_espilon]
summary_placeholders = [tf.placeholder(tf.float32) for _ in
range(len(summary_vars))]
update_ops = [summary_vars[i].assign(summary_placeholders[i]) for i in
range(len(summary_vars))]
summary_op = tf.summary.merge_all()
return summary_placeholders, update_ops, summary_op
# this function returns random colors for visualisation of learning.
def rand_cmap(nlabels, type='soft', first_color_black=True, last_color_black=False):
# Generate soft pastel colors, by limiting the RGB spectrum
if type == 'soft':
low = 0.6
high = 0.95
randRGBcolors = [(np.random.uniform(low=low, high=high),
np.random.uniform(low=low, high=high),
np.random.uniform(low=low, high=high)) for i in range(nlabels)]
if first_color_black:
randRGBcolors[0] = [0, 0, 0]
if last_color_black:
randRGBcolors[-1] = [0, 0, 0]
random_colormap = LinearSegmentedColormap.from_list('new_map', randRGBcolors, N=nlabels)
return random_colormap
# This is a new main loop
if __name__ == "__main__":
instance_prov = INSTANCEProvider()
start_time = datetime.datetime.now()
print('start time: ', start_time)
printcounter = 0
scores, episodes, global_step = [], [], 0
yrd = KBH_Env() # Create yard
agent = DQNAgent(yrd, False) # Create agent
if visualization:
#visualize learning
new_cmap = rand_cmap(200, type='soft', first_color_black=True, last_color_black=False)
for episode in range(EPISODES):
# this event list contains arrival, relocation_opportunity and departure events.
event_list = instance_prov.get_random_instance()
steps, t, total_t, score= len(event_list), 0, 0, 0
printcounter += 1
state = yrd.reset(event_list) # get first observation
history = np.float32(np.reshape(state, (1, yrd.shape[0], yrd.shape[1], yrd.shape[2]))) # reshape state.
done, busy_relocating = False, False
if visualization:
yrd.show_state(history, new_cmap)
while not done:
attempt = 1
backup_yard_layout, backup_arr_lookahead, backup_dep_lookahead, backup_arr_back, backup_dep_back, \
backup_tracks, backup_tracks_used_length = yrd.backup_state_elements()
backup_event_list = event_list.copy()
while attempt <= 3:
if attempt == 1:
action = agent.get_action(history)
else:
yrd.set_state(backup_yard_layout, backup_arr_lookahead, backup_dep_lookahead, backup_arr_back,
backup_dep_back, backup_tracks, backup_tracks_used_length) # roll back state
event_list = backup_event_list.copy() # roll back event list
action = agent.get_random_action()
# NN outpus [0,8], yard takes [1,9]
# based on that action now let environment go to new state
event = event_list.iloc[0]
# check if after this we are done...
done_ = True if len(event_list) == 1 else False # then there is no next event
if done_:
print("Reached the end of a problem!")
if busy_relocating:
# here we do not drop an event from the event list.
coming_arrivals = event_list.loc[event_list['event_type'] == 'arrival'].reset_index(drop=True)
coming_departures = event_list.loc[event_list['event_type'] == 'departure'].reset_index(drop=True)
next_state, reward, done = yrd.reloc_destination_step(event, event_list, action+1, coming_arrivals, coming_departures, done_)
busy_relocating = False
else:
# These operations below are expensive: maybe just use indexing.
event_list.drop(event_list.index[:1], inplace=True)
coming_arrivals = event_list.loc[event_list['event_type'] == 'arrival'].reset_index(drop=True)
coming_departures = event_list.loc[event_list['event_type'] == 'departure'].reset_index(drop=True)
# do step
next_state, reward, done = yrd.step(action+1, coming_arrivals, coming_departures, event, event_list, done_)
busy_relocating = True if reward == -0.99 else False
history_ = np.float32(np.reshape(next_state, (1, yrd.shape[0], yrd.shape[1], yrd.shape[2])))
if visualization:
yrd.show_state(history_, new_cmap)
print('action = ', action+1)
print('reward = ', reward)
agent.avg_q_max += np.amax(agent.model.predict(history)[0]) # log q_max for tensorboard.
score += reward # log direct reward of action
# We save a <state, action, reward, next state> sample to the replay memory
agent.replay_memory(history, action, reward, history_, done)
agent.train_replay() # train model (every step) on random batch.
if global_step % agent.update_target_rate == 0:
agent.update_target_model() # every some steps, we update the target model with model
# if we made a mistake we can do a second attempt. if we did the right thing just continue
# at final step this means we do try 3 times
# so we collect unnecessary data at that point since 'done' at the end is good (not bad)
if done:
attempt += 1 #
else:
break
# now max three attempts have been done, go to the next state.
history = history_ # next state now becomes the current state.
t += 1 # next step in this episode
total_t += attempt
global_step += 1
if done: # based on what the environment returns.
if global_step > agent.train_start: # log about this episode.
stats = [score, agent.avg_q_max / float(total_t), t,
agent.avg_loss / float(total_t), agent.epsilon]
for i in range(len(stats)):
agent.sess.run(agent.update_ops[i], feed_dict={
agent.summary_placeholders[i]: float(stats[i])
})
summary_str = agent.sess.run(agent.summary_op)
agent.summary_writer.add_summary(summary_str, episode + 1)
if printcounter == 1000: # print every 1000 episodes
print(datetime.datetime.now() - start_time)
print(datetime.datetime.now())
printcounter = 0
print("episode:", episode, " score:", score, " memory length:",
len(agent.memory), " epsilon:", agent.epsilon,
" global_step:", global_step, " average_q:",
agent.avg_q_max / float(t), " average loss:",
agent.avg_loss / float(t))
agent.model.save_weights(agent.summary_dir_name + "/weights.h5")
agent.avg_q_max, agent.avg_loss = 0, 0 # reset for next episode.
gc.collect()
break # break the while loop to end the episode when t <= len(train_arrivals)
agent.model.save_weights(agent.summary_dir_name + "/weights.h5")
|
# Copyright (c) 2004 Python Software Foundation.
# All rights reserved.
# Written by Eric Price <eprice at tjhsst.edu>
# and Facundo Batista <facundo at taniquetil.com.ar>
# and Raymond Hettinger <python at rcn.com>
# and Aahz <aahz at pobox.com>
# and Tim Peters
# This module is currently Py2.3 compatible and should be kept that way
# unless a major compelling advantage arises. IOW, 2.3 compatibility is
# strongly preferred, but not guaranteed.
# Also, this module should be kept in sync with the latest updates of
# the IBM specification as it evolves. Those updates will be treated
# as bug fixes (deviation from the spec is a compatibility, usability
# bug) and will be backported. At this point the spec is stabilizing
# and the updates are becoming fewer, smaller, and less significant.
"""
This is a Py2.3 implementation of decimal floating point arithmetic based on
the General Decimal Arithmetic Specification:
http://speleotrove.com/decimal/decarith.html
and IEEE standard 854-1987:
www.cs.berkeley.edu/~ejr/projects/754/private/drafts/854-1987/dir.html
Decimal floating point has finite precision with arbitrarily large bounds.
The purpose of this module is to support arithmetic using familiar
"schoolhouse" rules and to avoid some of the tricky representation
issues associated with binary floating point. The package is especially
useful for financial applications or for contexts where users have
expectations that are at odds with binary floating point (for instance,
in binary floating point, 1.00 % 0.1 gives 0.09999999999999995 instead
of the expected Decimal('0.00') returned by decimal floating point).
Here are some examples of using the decimal module:
>>> from decimal import *
>>> setcontext(ExtendedContext)
>>> Decimal(0)
Decimal('0')
>>> Decimal('1')
Decimal('1')
>>> Decimal('-.0123')
Decimal('-0.0123')
>>> Decimal(123456)
Decimal('123456')
>>> Decimal('123.45e12345678901234567890')
Decimal('1.2345E+12345678901234567892')
>>> Decimal('1.33') + Decimal('1.27')
Decimal('2.60')
>>> Decimal('12.34') + Decimal('3.87') - Decimal('18.41')
Decimal('-2.20')
>>> dig = Decimal(1)
>>> print dig / Decimal(3)
0.333333333
>>> getcontext().prec = 18
>>> print dig / Decimal(3)
0.333333333333333333
>>> print dig.sqrt()
1
>>> print Decimal(3).sqrt()
1.73205080756887729
>>> print Decimal(3) ** 123
4.85192780976896427E+58
>>> inf = Decimal(1) / Decimal(0)
>>> print inf
Infinity
>>> neginf = Decimal(-1) / Decimal(0)
>>> print neginf
-Infinity
>>> print neginf + inf
NaN
>>> print neginf * inf
-Infinity
>>> print dig / 0
Infinity
>>> getcontext().traps[DivisionByZero] = 1
>>> print dig / 0
Traceback (most recent call last):
...
...
...
DivisionByZero: x / 0
>>> c = Context()
>>> c.traps[InvalidOperation] = 0
>>> print c.flags[InvalidOperation]
0
>>> c.divide(Decimal(0), Decimal(0))
Decimal('NaN')
>>> c.traps[InvalidOperation] = 1
>>> print c.flags[InvalidOperation]
1
>>> c.flags[InvalidOperation] = 0
>>> print c.flags[InvalidOperation]
0
>>> print c.divide(Decimal(0), Decimal(0))
Traceback (most recent call last):
...
...
...
InvalidOperation: 0 / 0
>>> print c.flags[InvalidOperation]
1
>>> c.flags[InvalidOperation] = 0
>>> c.traps[InvalidOperation] = 0
>>> print c.divide(Decimal(0), Decimal(0))
NaN
>>> print c.flags[InvalidOperation]
1
>>>
"""
__all__ = [
# Two major classes
'Decimal', 'Context',
# Contexts
'DefaultContext', 'BasicContext', 'ExtendedContext',
# Exceptions
'DecimalException', 'Clamped', 'InvalidOperation', 'DivisionByZero',
'Inexact', 'Rounded', 'Subnormal', 'Overflow', 'Underflow',
# Constants for use in setting up contexts
'ROUND_DOWN', 'ROUND_HALF_UP', 'ROUND_HALF_EVEN', 'ROUND_CEILING',
'ROUND_FLOOR', 'ROUND_UP', 'ROUND_HALF_DOWN', 'ROUND_05UP',
# Functions for manipulating contexts
'setcontext', 'getcontext', 'localcontext'
]
__version__ = '1.70' # Highest version of the spec this complies with
import copy as _copy
import math as _math
import numbers as _numbers
try:
from collections import namedtuple as _namedtuple
DecimalTuple = _namedtuple('DecimalTuple', 'sign digits exponent')
except ImportError:
DecimalTuple = lambda *args: args
# Rounding
ROUND_DOWN = 'ROUND_DOWN'
ROUND_HALF_UP = 'ROUND_HALF_UP'
ROUND_HALF_EVEN = 'ROUND_HALF_EVEN'
ROUND_CEILING = 'ROUND_CEILING'
ROUND_FLOOR = 'ROUND_FLOOR'
ROUND_UP = 'ROUND_UP'
ROUND_HALF_DOWN = 'ROUND_HALF_DOWN'
ROUND_05UP = 'ROUND_05UP'
# Errors
class DecimalException(ArithmeticError):
"""Base exception class.
Used exceptions derive from this.
If an exception derives from another exception besides this (such as
Underflow (Inexact, Rounded, Subnormal) that indicates that it is only
called if the others are present. This isn't actually used for
anything, though.
handle -- Called when context._raise_error is called and the
trap_enabler is not set. First argument is self, second is the
context. More arguments can be given, those being after
the explanation in _raise_error (For example,
context._raise_error(NewError, '(-x)!', self._sign) would
call NewError().handle(context, self._sign).)
To define a new exception, it should be sufficient to have it derive
from DecimalException.
"""
def handle(self, context, *args):
pass
class Clamped(DecimalException):
"""Exponent of a 0 changed to fit bounds.
This occurs and signals clamped if the exponent of a result has been
altered in order to fit the constraints of a specific concrete
representation. This may occur when the exponent of a zero result would
be outside the bounds of a representation, or when a large normal
number would have an encoded exponent that cannot be represented. In
this latter case, the exponent is reduced to fit and the corresponding
number of zero digits are appended to the coefficient ("fold-down").
"""
class InvalidOperation(DecimalException):
"""An invalid operation was performed.
Various bad things cause this:
Something creates a signaling NaN
-INF + INF
0 * (+-)INF
(+-)INF / (+-)INF
x % 0
(+-)INF % x
x._rescale( non-integer )
sqrt(-x) , x > 0
0 ** 0
x ** (non-integer)
x ** (+-)INF
An operand is invalid
The result of the operation after these is a quiet positive NaN,
except when the cause is a signaling NaN, in which case the result is
also a quiet NaN, but with the original sign, and an optional
diagnostic information.
"""
def handle(self, context, *args):
if args:
ans = _dec_from_triple(args[0]._sign, args[0]._int, 'n', True)
return ans._fix_nan(context)
return _NaN
class ConversionSyntax(InvalidOperation):
"""Trying to convert badly formed string.
This occurs and signals invalid-operation if an string is being
converted to a number and it does not conform to the numeric string
syntax. The result is [0,qNaN].
"""
def handle(self, context, *args):
return _NaN
class DivisionByZero(DecimalException, ZeroDivisionError):
"""Division by 0.
This occurs and signals division-by-zero if division of a finite number
by zero was attempted (during a divide-integer or divide operation, or a
power operation with negative right-hand operand), and the dividend was
not zero.
The result of the operation is [sign,inf], where sign is the exclusive
or of the signs of the operands for divide, or is 1 for an odd power of
-0, for power.
"""
def handle(self, context, sign, *args):
return _SignedInfinity[sign]
class DivisionImpossible(InvalidOperation):
"""Cannot perform the division adequately.
This occurs and signals invalid-operation if the integer result of a
divide-integer or remainder operation had too many digits (would be
longer than precision). The result is [0,qNaN].
"""
def handle(self, context, *args):
return _NaN
class DivisionUndefined(InvalidOperation, ZeroDivisionError):
"""Undefined result of division.
This occurs and signals invalid-operation if division by zero was
attempted (during a divide-integer, divide, or remainder operation), and
the dividend is also zero. The result is [0,qNaN].
"""
def handle(self, context, *args):
return _NaN
class Inexact(DecimalException):
"""Had to round, losing information.
This occurs and signals inexact whenever the result of an operation is
not exact (that is, it needed to be rounded and any discarded digits
were non-zero), or if an overflow or underflow condition occurs. The
result in all cases is unchanged.
The inexact signal may be tested (or trapped) to determine if a given
operation (or sequence of operations) was inexact.
"""
class InvalidContext(InvalidOperation):
"""Invalid context. Unknown rounding, for example.
This occurs and signals invalid-operation if an invalid context was
detected during an operation. This can occur if contexts are not checked
on creation and either the precision exceeds the capability of the
underlying concrete representation or an unknown or unsupported rounding
was specified. These aspects of the context need only be checked when
the values are required to be used. The result is [0,qNaN].
"""
def handle(self, context, *args):
return _NaN
class Rounded(DecimalException):
"""Number got rounded (not necessarily changed during rounding).
This occurs and signals rounded whenever the result of an operation is
rounded (that is, some zero or non-zero digits were discarded from the
coefficient), or if an overflow or underflow condition occurs. The
result in all cases is unchanged.
The rounded signal may be tested (or trapped) to determine if a given
operation (or sequence of operations) caused a loss of precision.
"""
class Subnormal(DecimalException):
"""Exponent < Emin before rounding.
This occurs and signals subnormal whenever the result of a conversion or
operation is subnormal (that is, its adjusted exponent is less than
Emin, before any rounding). The result in all cases is unchanged.
The subnormal signal may be tested (or trapped) to determine if a given
or operation (or sequence of operations) yielded a subnormal result.
"""
class Overflow(Inexact, Rounded):
"""Numerical overflow.
This occurs and signals overflow if the adjusted exponent of a result
(from a conversion or from an operation that is not an attempt to divide
by zero), after rounding, would be greater than the largest value that
can be handled by the implementation (the value Emax).
The result depends on the rounding mode:
For round-half-up and round-half-even (and for round-half-down and
round-up, if implemented), the result of the operation is [sign,inf],
where sign is the sign of the intermediate result. For round-down, the
result is the largest finite number that can be represented in the
current precision, with the sign of the intermediate result. For
round-ceiling, the result is the same as for round-down if the sign of
the intermediate result is 1, or is [0,inf] otherwise. For round-floor,
the result is the same as for round-down if the sign of the intermediate
result is 0, or is [1,inf] otherwise. In all cases, Inexact and Rounded
will also be raised.
"""
def handle(self, context, sign, *args):
if context.rounding in (ROUND_HALF_UP, ROUND_HALF_EVEN,
ROUND_HALF_DOWN, ROUND_UP):
return _SignedInfinity[sign]
if sign == 0:
if context.rounding == ROUND_CEILING:
return _SignedInfinity[sign]
return _dec_from_triple(sign, '9'*context.prec,
context.Emax-context.prec+1)
if sign == 1:
if context.rounding == ROUND_FLOOR:
return _SignedInfinity[sign]
return _dec_from_triple(sign, '9'*context.prec,
context.Emax-context.prec+1)
class Underflow(Inexact, Rounded, Subnormal):
"""Numerical underflow with result rounded to 0.
This occurs and signals underflow if a result is inexact and the
adjusted exponent of the result would be smaller (more negative) than
the smallest value that can be handled by the implementation (the value
Emin). That is, the result is both inexact and subnormal.
The result after an underflow will be a subnormal number rounded, if
necessary, so that its exponent is not less than Etiny. This may result
in 0 with the sign of the intermediate result and an exponent of Etiny.
In all cases, Inexact, Rounded, and Subnormal will also be raised.
"""
# List of public traps and flags
_signals = [Clamped, DivisionByZero, Inexact, Overflow, Rounded,
Underflow, InvalidOperation, Subnormal]
# Map conditions (per the spec) to signals
_condition_map = {ConversionSyntax:InvalidOperation,
DivisionImpossible:InvalidOperation,
DivisionUndefined:InvalidOperation,
InvalidContext:InvalidOperation}
##### Context Functions ##################################################
# The getcontext() and setcontext() function manage access to a thread-local
# current context. Py2.4 offers direct support for thread locals. If that
# is not available, use threading.currentThread() which is slower but will
# work for older Pythons. If threads are not part of the build, create a
# mock threading object with threading.local() returning the module namespace.
try:
import threading
except ImportError:
# Python was compiled without threads; create a mock object instead
import sys
class MockThreading(object):
def local(self, sys=sys):
return sys.modules[__name__]
threading = MockThreading()
del sys, MockThreading
try:
threading.local
except AttributeError:
# To fix reloading, force it to create a new context
# Old contexts have different exceptions in their dicts, making problems.
if hasattr(threading.currentThread(), '__decimal_context__'):
del threading.currentThread().__decimal_context__
def setcontext(context):
"""Set this thread's context to context."""
if context in (DefaultContext, BasicContext, ExtendedContext):
context = context.copy()
context.clear_flags()
threading.currentThread().__decimal_context__ = context
def getcontext():
"""Returns this thread's context.
If this thread does not yet have a context, returns
a new context and sets this thread's context.
New contexts are copies of DefaultContext.
"""
try:
return threading.currentThread().__decimal_context__
except AttributeError:
context = Context()
threading.currentThread().__decimal_context__ = context
return context
else:
local = threading.local()
if hasattr(local, '__decimal_context__'):
del local.__decimal_context__
def getcontext(_local=local):
"""Returns this thread's context.
If this thread does not yet have a context, returns
a new context and sets this thread's context.
New contexts are copies of DefaultContext.
"""
try:
return _local.__decimal_context__
except AttributeError:
context = Context()
_local.__decimal_context__ = context
return context
def setcontext(context, _local=local):
"""Set this thread's context to context."""
if context in (DefaultContext, BasicContext, ExtendedContext):
context = context.copy()
context.clear_flags()
_local.__decimal_context__ = context
del threading, local # Don't contaminate the namespace
def localcontext(ctx=None):
"""Return a context manager for a copy of the supplied context
Uses a copy of the current context if no context is specified
The returned context manager creates a local decimal context
in a with statement:
def sin(x):
with localcontext() as ctx:
ctx.prec += 2
# Rest of sin calculation algorithm
# uses a precision 2 greater than normal
return +s # Convert result to normal precision
def sin(x):
with localcontext(ExtendedContext):
# Rest of sin calculation algorithm
# uses the Extended Context from the
# General Decimal Arithmetic Specification
return +s # Convert result to normal context
>>> setcontext(DefaultContext)
>>> print getcontext().prec
28
>>> with localcontext():
... ctx = getcontext()
... ctx.prec += 2
... print ctx.prec
...
30
>>> with localcontext(ExtendedContext):
... print getcontext().prec
...
9
>>> print getcontext().prec
28
"""
if ctx is None: ctx = getcontext()
return _ContextManager(ctx)
##### Decimal class #######################################################
class Decimal(object):
"""Floating point class for decimal arithmetic."""
__slots__ = ('_exp','_int','_sign', '_is_special')
# Generally, the value of the Decimal instance is given by
# (-1)**_sign * _int * 10**_exp
# Special values are signified by _is_special == True
# We're immutable, so use __new__ not __init__
def __new__(cls, value="0", context=None):
"""Create a decimal point instance.
>>> Decimal('3.14') # string input
Decimal('3.14')
>>> Decimal((0, (3, 1, 4), -2)) # tuple (sign, digit_tuple, exponent)
Decimal('3.14')
>>> Decimal(314) # int or long
Decimal('314')
>>> Decimal(Decimal(314)) # another decimal instance
Decimal('314')
>>> Decimal(' 3.14 \\n') # leading and trailing whitespace okay
Decimal('3.14')
"""
# Note that the coefficient, self._int, is actually stored as
# a string rather than as a tuple of digits. This speeds up
# the "digits to integer" and "integer to digits" conversions
# that are used in almost every arithmetic operation on
# Decimals. This is an internal detail: the as_tuple function
# and the Decimal constructor still deal with tuples of
# digits.
self = object.__new__(cls)
# From a string
# REs insist on real strings, so we can too.
if isinstance(value, basestring):
m = _parser(value.strip())
if m is None:
if context is None:
context = getcontext()
return context._raise_error(ConversionSyntax,
"Invalid literal for Decimal: %r" % value)
if m.group('sign') == "-":
self._sign = 1
else:
self._sign = 0
intpart = m.group('int')
if intpart is not None:
# finite number
fracpart = m.group('frac') or ''
exp = int(m.group('exp') or '0')
self._int = str(int(intpart+fracpart))
self._exp = exp - len(fracpart)
self._is_special = False
else:
diag = m.group('diag')
if diag is not None:
# NaN
self._int = str(int(diag or '0')).lstrip('0')
if m.group('signal'):
self._exp = 'N'
else:
self._exp = 'n'
else:
# infinity
self._int = '0'
self._exp = 'F'
self._is_special = True
return self
# From an integer
if isinstance(value, (int,long)):
if value >= 0:
self._sign = 0
else:
self._sign = 1
self._exp = 0
self._int = str(abs(value))
self._is_special = False
return self
# From another decimal
if isinstance(value, Decimal):
self._exp = value._exp
self._sign = value._sign
self._int = value._int
self._is_special = value._is_special
return self
# From an internal working value
if isinstance(value, _WorkRep):
self._sign = value.sign
self._int = str(value.int)
self._exp = int(value.exp)
self._is_special = False
return self
# tuple/list conversion (possibly from as_tuple())
if isinstance(value, (list,tuple)):
if len(value) != 3:
raise ValueError('Invalid tuple size in creation of Decimal '
'from list or tuple. The list or tuple '
'should have exactly three elements.')
# process sign. The isinstance test rejects floats
if not (isinstance(value[0], (int, long)) and value[0] in (0,1)):
raise ValueError("Invalid sign. The first value in the tuple "
"should be an integer; either 0 for a "
"positive number or 1 for a negative number.")
self._sign = value[0]
if value[2] == 'F':
# infinity: value[1] is ignored
self._int = '0'
self._exp = value[2]
self._is_special = True
else:
# process and validate the digits in value[1]
digits = []
for digit in value[1]:
if isinstance(digit, (int, long)) and 0 <= digit <= 9:
# skip leading zeros
if digits or digit != 0:
digits.append(digit)
else:
raise ValueError("The second value in the tuple must "
"be composed of integers in the range "
"0 through 9.")
if value[2] in ('n', 'N'):
# NaN: digits form the diagnostic
self._int = ''.join(map(str, digits))
self._exp = value[2]
self._is_special = True
elif isinstance(value[2], (int, long)):
# finite number: digits give the coefficient
self._int = ''.join(map(str, digits or [0]))
self._exp = value[2]
self._is_special = False
else:
raise ValueError("The third value in the tuple must "
"be an integer, or one of the "
"strings 'F', 'n', 'N'.")
return self
if isinstance(value, float):
value = Decimal.from_float(value)
self._exp = value._exp
self._sign = value._sign
self._int = value._int
self._is_special = value._is_special
return self
raise TypeError("Cannot convert %r to Decimal" % value)
# @classmethod, but @decorator is not valid Python 2.3 syntax, so
# don't use it (see notes on Py2.3 compatibility at top of file)
def from_float(cls, f):
"""Converts a float to a decimal number, exactly.
Note that Decimal.from_float(0.1) is not the same as Decimal('0.1').
Since 0.1 is not exactly representable in binary floating point, the
value is stored as the nearest representable value which is
0x1.999999999999ap-4. The exact equivalent of the value in decimal
is 0.1000000000000000055511151231257827021181583404541015625.
>>> Decimal.from_float(0.1)
Decimal('0.1000000000000000055511151231257827021181583404541015625')
>>> Decimal.from_float(float('nan'))
Decimal('NaN')
>>> Decimal.from_float(float('inf'))
Decimal('Infinity')
>>> Decimal.from_float(-float('inf'))
Decimal('-Infinity')
>>> Decimal.from_float(-0.0)
Decimal('-0')
"""
if isinstance(f, (int, long)): # handle integer inputs
return cls(f)
if _math.isinf(f) or _math.isnan(f): # raises TypeError if not a float
return cls(repr(f))
if _math.copysign(1.0, f) == 1.0:
sign = 0
else:
sign = 1
n, d = abs(f).as_integer_ratio()
k = d.bit_length() - 1
result = _dec_from_triple(sign, str(n*5**k), -k)
if cls is Decimal:
return result
else:
return cls(result)
from_float = classmethod(from_float)
def _isnan(self):
"""Returns whether the number is not actually one.
0 if a number
1 if NaN
2 if sNaN
"""
if self._is_special:
exp = self._exp
if exp == 'n':
return 1
elif exp == 'N':
return 2
return 0
def _isinfinity(self):
"""Returns whether the number is infinite
0 if finite or not a number
1 if +INF
-1 if -INF
"""
if self._exp == 'F':
if self._sign:
return -1
return 1
return 0
def _check_nans(self, other=None, context=None):
"""Returns whether the number is not actually one.
if self, other are sNaN, signal
if self, other are NaN return nan
return 0
Done before operations.
"""
self_is_nan = self._isnan()
if other is None:
other_is_nan = False
else:
other_is_nan = other._isnan()
if self_is_nan or other_is_nan:
if context is None:
context = getcontext()
if self_is_nan == 2:
return context._raise_error(InvalidOperation, 'sNaN',
self)
if other_is_nan == 2:
return context._raise_error(InvalidOperation, 'sNaN',
other)
if self_is_nan:
return self._fix_nan(context)
return other._fix_nan(context)
return 0
def _compare_check_nans(self, other, context):
"""Version of _check_nans used for the signaling comparisons
compare_signal, __le__, __lt__, __ge__, __gt__.
Signal InvalidOperation if either self or other is a (quiet
or signaling) NaN. Signaling NaNs take precedence over quiet
NaNs.
Return 0 if neither operand is a NaN.
"""
if context is None:
context = getcontext()
if self._is_special or other._is_special:
if self.is_snan():
return context._raise_error(InvalidOperation,
'comparison involving sNaN',
self)
elif other.is_snan():
return context._raise_error(InvalidOperation,
'comparison involving sNaN',
other)
elif self.is_qnan():
return context._raise_error(InvalidOperation,
'comparison involving NaN',
self)
elif other.is_qnan():
return context._raise_error(InvalidOperation,
'comparison involving NaN',
other)
return 0
def __nonzero__(self):
"""Return True if self is nonzero; otherwise return False.
NaNs and infinities are considered nonzero.
"""
return self._is_special or self._int != '0'
def _cmp(self, other):
"""Compare the two non-NaN decimal instances self and other.
Returns -1 if self < other, 0 if self == other and 1
if self > other. This routine is for internal use only."""
if self._is_special or other._is_special:
self_inf = self._isinfinity()
other_inf = other._isinfinity()
if self_inf == other_inf:
return 0
elif self_inf < other_inf:
return -1
else:
return 1
# check for zeros; Decimal('0') == Decimal('-0')
if not self:
if not other:
return 0
else:
return -((-1)**other._sign)
if not other:
return (-1)**self._sign
# If different signs, neg one is less
if other._sign < self._sign:
return -1
if self._sign < other._sign:
return 1
self_adjusted = self.adjusted()
other_adjusted = other.adjusted()
if self_adjusted == other_adjusted:
self_padded = self._int + '0'*(self._exp - other._exp)
other_padded = other._int + '0'*(other._exp - self._exp)
if self_padded == other_padded:
return 0
elif self_padded < other_padded:
return -(-1)**self._sign
else:
return (-1)**self._sign
elif self_adjusted > other_adjusted:
return (-1)**self._sign
else: # self_adjusted < other_adjusted
return -((-1)**self._sign)
# Note: The Decimal standard doesn't cover rich comparisons for
# Decimals. In particular, the specification is silent on the
# subject of what should happen for a comparison involving a NaN.
# We take the following approach:
#
# == comparisons involving a quiet NaN always return False
# != comparisons involving a quiet NaN always return True
# == or != comparisons involving a signaling NaN signal
# InvalidOperation, and return False or True as above if the
# InvalidOperation is not trapped.
# <, >, <= and >= comparisons involving a (quiet or signaling)
# NaN signal InvalidOperation, and return False if the
# InvalidOperation is not trapped.
#
# This behavior is designed to conform as closely as possible to
# that specified by IEEE 754.
def __eq__(self, other, context=None):
other = _convert_other(other, allow_float=True)
if other is NotImplemented:
return other
if self._check_nans(other, context):
return False
return self._cmp(other) == 0
def __ne__(self, other, context=None):
other = _convert_other(other, allow_float=True)
if other is NotImplemented:
return other
if self._check_nans(other, context):
return True
return self._cmp(other) != 0
def __lt__(self, other, context=None):
other = _convert_other(other, allow_float=True)
if other is NotImplemented:
return other
ans = self._compare_check_nans(other, context)
if ans:
return False
return self._cmp(other) < 0
def __le__(self, other, context=None):
other = _convert_other(other, allow_float=True)
if other is NotImplemented:
return other
ans = self._compare_check_nans(other, context)
if ans:
return False
return self._cmp(other) <= 0
def __gt__(self, other, context=None):
other = _convert_other(other, allow_float=True)
if other is NotImplemented:
return other
ans = self._compare_check_nans(other, context)
if ans:
return False
return self._cmp(other) > 0
def __ge__(self, other, context=None):
other = _convert_other(other, allow_float=True)
if other is NotImplemented:
return other
ans = self._compare_check_nans(other, context)
if ans:
return False
return self._cmp(other) >= 0
def compare(self, other, context=None):
"""Compares one to another.
-1 => a < b
0 => a = b
1 => a > b
NaN => one is NaN
Like __cmp__, but returns Decimal instances.
"""
other = _convert_other(other, raiseit=True)
# Compare(NaN, NaN) = NaN
if (self._is_special or other and other._is_special):
ans = self._check_nans(other, context)
if ans:
return ans
return Decimal(self._cmp(other))
def __hash__(self):
"""x.__hash__() <==> hash(x)"""
# Decimal integers must hash the same as the ints
#
# The hash of a nonspecial noninteger Decimal must depend only
# on the value of that Decimal, and not on its representation.
# For example: hash(Decimal('100E-1')) == hash(Decimal('10')).
# Equality comparisons involving signaling nans can raise an
# exception; since equality checks are implicitly and
# unpredictably used when checking set and dict membership, we
# prevent signaling nans from being used as set elements or
# dict keys by making __hash__ raise an exception.
if self._is_special:
if self.is_snan():
raise TypeError('Cannot hash a signaling NaN value.')
elif self.is_nan():
# 0 to match hash(float('nan'))
return 0
else:
# values chosen to match hash(float('inf')) and
# hash(float('-inf')).
if self._sign:
return -271828
else:
return 314159
# In Python 2.7, we're allowing comparisons (but not
# arithmetic operations) between floats and Decimals; so if
# a Decimal instance is exactly representable as a float then
# its hash should match that of the float.
self_as_float = float(self)
if Decimal.from_float(self_as_float) == self:
return hash(self_as_float)
if self._isinteger():
op = _WorkRep(self.to_integral_value())
# to make computation feasible for Decimals with large
# exponent, we use the fact that hash(n) == hash(m) for
# any two nonzero integers n and m such that (i) n and m
# have the same sign, and (ii) n is congruent to m modulo
# 2**64-1. So we can replace hash((-1)**s*c*10**e) with
# hash((-1)**s*c*pow(10, e, 2**64-1).
return hash((-1)**op.sign*op.int*pow(10, op.exp, 2**64-1))
# The value of a nonzero nonspecial Decimal instance is
# faithfully represented by the triple consisting of its sign,
# its adjusted exponent, and its coefficient with trailing
# zeros removed.
return hash((self._sign,
self._exp+len(self._int),
self._int.rstrip('0')))
def as_tuple(self):
"""Represents the number as a triple tuple.
To show the internals exactly as they are.
"""
return DecimalTuple(self._sign, tuple(map(int, self._int)), self._exp)
def __repr__(self):
"""Represents the number as an instance of Decimal."""
# Invariant: eval(repr(d)) == d
return "Decimal('%s')" % str(self)
def __str__(self, eng=False, context=None):
"""Return string representation of the number in scientific notation.
Captures all of the information in the underlying representation.
"""
sign = ['', '-'][self._sign]
if self._is_special:
if self._exp == 'F':
return sign + 'Infinity'
elif self._exp == 'n':
return sign + 'NaN' + self._int
else: # self._exp == 'N'
return sign + 'sNaN' + self._int
# number of digits of self._int to left of decimal point
leftdigits = self._exp + len(self._int)
# dotplace is number of digits of self._int to the left of the
# decimal point in the mantissa of the output string (that is,
# after adjusting the exponent)
if self._exp <= 0 and leftdigits > -6:
# no exponent required
dotplace = leftdigits
elif not eng:
# usual scientific notation: 1 digit on left of the point
dotplace = 1
elif self._int == '0':
# engineering notation, zero
dotplace = (leftdigits + 1) % 3 - 1
else:
# engineering notation, nonzero
dotplace = (leftdigits - 1) % 3 + 1
if dotplace <= 0:
intpart = '0'
fracpart = '.' + '0'*(-dotplace) + self._int
elif dotplace >= len(self._int):
intpart = self._int+'0'*(dotplace-len(self._int))
fracpart = ''
else:
intpart = self._int[:dotplace]
fracpart = '.' + self._int[dotplace:]
if leftdigits == dotplace:
exp = ''
else:
if context is None:
context = getcontext()
exp = ['e', 'E'][context.capitals] + "%+d" % (leftdigits-dotplace)
return sign + intpart + fracpart + exp
def to_eng_string(self, context=None):
"""Convert to engineering-type string.
Engineering notation has an exponent which is a multiple of 3, so there
are up to 3 digits left of the decimal place.
Same rules for when in exponential and when as a value as in __str__.
"""
return self.__str__(eng=True, context=context)
def __neg__(self, context=None):
"""Returns a copy with the sign switched.
Rounds, if it has reason.
"""
if self._is_special:
ans = self._check_nans(context=context)
if ans:
return ans
if context is None:
context = getcontext()
if not self and context.rounding != ROUND_FLOOR:
# -Decimal('0') is Decimal('0'), not Decimal('-0'), except
# in ROUND_FLOOR rounding mode.
ans = self.copy_abs()
else:
ans = self.copy_negate()
return ans._fix(context)
def __pos__(self, context=None):
"""Returns a copy, unless it is a sNaN.
Rounds the number (if more then precision digits)
"""
if self._is_special:
ans = self._check_nans(context=context)
if ans:
return ans
if context is None:
context = getcontext()
if not self and context.rounding != ROUND_FLOOR:
# + (-0) = 0, except in ROUND_FLOOR rounding mode.
ans = self.copy_abs()
else:
ans = Decimal(self)
return ans._fix(context)
def __abs__(self, round=True, context=None):
"""Returns the absolute value of self.
If the keyword argument 'round' is false, do not round. The
expression self.__abs__(round=False) is equivalent to
self.copy_abs().
"""
if not round:
return self.copy_abs()
if self._is_special:
ans = self._check_nans(context=context)
if ans:
return ans
if self._sign:
ans = self.__neg__(context=context)
else:
ans = self.__pos__(context=context)
return ans
def __add__(self, other, context=None):
"""Returns self + other.
-INF + INF (or the reverse) cause InvalidOperation errors.
"""
other = _convert_other(other)
if other is NotImplemented:
return other
if context is None:
context = getcontext()
if self._is_special or other._is_special:
ans = self._check_nans(other, context)
if ans:
return ans
if self._isinfinity():
# If both INF, same sign => same as both, opposite => error.
if self._sign != other._sign and other._isinfinity():
return context._raise_error(InvalidOperation, '-INF + INF')
return Decimal(self)
if other._isinfinity():
return Decimal(other) # Can't both be infinity here
exp = min(self._exp, other._exp)
negativezero = 0
if context.rounding == ROUND_FLOOR and self._sign != other._sign:
# If the answer is 0, the sign should be negative, in this case.
negativezero = 1
if not self and not other:
sign = min(self._sign, other._sign)
if negativezero:
sign = 1
ans = _dec_from_triple(sign, '0', exp)
ans = ans._fix(context)
return ans
if not self:
exp = max(exp, other._exp - context.prec-1)
ans = other._rescale(exp, context.rounding)
ans = ans._fix(context)
return ans
if not other:
exp = max(exp, self._exp - context.prec-1)
ans = self._rescale(exp, context.rounding)
ans = ans._fix(context)
return ans
op1 = _WorkRep(self)
op2 = _WorkRep(other)
op1, op2 = _normalize(op1, op2, context.prec)
result = _WorkRep()
if op1.sign != op2.sign:
# Equal and opposite
if op1.int == op2.int:
ans = _dec_from_triple(negativezero, '0', exp)
ans = ans._fix(context)
return ans
if op1.int < op2.int:
op1, op2 = op2, op1
# OK, now abs(op1) > abs(op2)
if op1.sign == 1:
result.sign = 1
op1.sign, op2.sign = op2.sign, op1.sign
else:
result.sign = 0
# So we know the sign, and op1 > 0.
elif op1.sign == 1:
result.sign = 1
op1.sign, op2.sign = (0, 0)
else:
result.sign = 0
# Now, op1 > abs(op2) > 0
if op2.sign == 0:
result.int = op1.int + op2.int
else:
result.int = op1.int - op2.int
result.exp = op1.exp
ans = Decimal(result)
ans = ans._fix(context)
return ans
__radd__ = __add__
def __sub__(self, other, context=None):
"""Return self - other"""
other = _convert_other(other)
if other is NotImplemented:
return other
if self._is_special or other._is_special:
ans = self._check_nans(other, context=context)
if ans:
return ans
# self - other is computed as self + other.copy_negate()
return self.__add__(other.copy_negate(), context=context)
def __rsub__(self, other, context=None):
"""Return other - self"""
other = _convert_other(other)
if other is NotImplemented:
return other
return other.__sub__(self, context=context)
def __mul__(self, other, context=None):
"""Return self * other.
(+-) INF * 0 (or its reverse) raise InvalidOperation.
"""
other = _convert_other(other)
if other is NotImplemented:
return other
if context is None:
context = getcontext()
resultsign = self._sign ^ other._sign
if self._is_special or other._is_special:
ans = self._check_nans(other, context)
if ans:
return ans
if self._isinfinity():
if not other:
return context._raise_error(InvalidOperation, '(+-)INF * 0')
return _SignedInfinity[resultsign]
if other._isinfinity():
if not self:
return context._raise_error(InvalidOperation, '0 * (+-)INF')
return _SignedInfinity[resultsign]
resultexp = self._exp + other._exp
# Special case for multiplying by zero
if not self or not other:
ans = _dec_from_triple(resultsign, '0', resultexp)
# Fixing in case the exponent is out of bounds
ans = ans._fix(context)
return ans
# Special case for multiplying by power of 10
if self._int == '1':
ans = _dec_from_triple(resultsign, other._int, resultexp)
ans = ans._fix(context)
return ans
if other._int == '1':
ans = _dec_from_triple(resultsign, self._int, resultexp)
ans = ans._fix(context)
return ans
op1 = _WorkRep(self)
op2 = _WorkRep(other)
ans = _dec_from_triple(resultsign, str(op1.int * op2.int), resultexp)
ans = ans._fix(context)
return ans
__rmul__ = __mul__
def __truediv__(self, other, context=None):
"""Return self / other."""
other = _convert_other(other)
if other is NotImplemented:
return NotImplemented
if context is None:
context = getcontext()
sign = self._sign ^ other._sign
if self._is_special or other._is_special:
ans = self._check_nans(other, context)
if ans:
return ans
if self._isinfinity() and other._isinfinity():
return context._raise_error(InvalidOperation, '(+-)INF/(+-)INF')
if self._isinfinity():
return _SignedInfinity[sign]
if other._isinfinity():
context._raise_error(Clamped, 'Division by infinity')
return _dec_from_triple(sign, '0', context.Etiny())
# Special cases for zeroes
if not other:
if not self:
return context._raise_error(DivisionUndefined, '0 / 0')
return context._raise_error(DivisionByZero, 'x / 0', sign)
if not self:
exp = self._exp - other._exp
coeff = 0
else:
# OK, so neither = 0, INF or NaN
shift = len(other._int) - len(self._int) + context.prec + 1
exp = self._exp - other._exp - shift
op1 = _WorkRep(self)
op2 = _WorkRep(other)
if shift >= 0:
coeff, remainder = divmod(op1.int * 10**shift, op2.int)
else:
coeff, remainder = divmod(op1.int, op2.int * 10**-shift)
if remainder:
# result is not exact; adjust to ensure correct rounding
if coeff % 5 == 0:
coeff += 1
else:
# result is exact; get as close to ideal exponent as possible
ideal_exp = self._exp - other._exp
while exp < ideal_exp and coeff % 10 == 0:
coeff //= 10
exp += 1
ans = _dec_from_triple(sign, str(coeff), exp)
return ans._fix(context)
def _divide(self, other, context):
"""Return (self // other, self % other), to context.prec precision.
Assumes that neither self nor other is a NaN, that self is not
infinite and that other is nonzero.
"""
sign = self._sign ^ other._sign
if other._isinfinity():
ideal_exp = self._exp
else:
ideal_exp = min(self._exp, other._exp)
expdiff = self.adjusted() - other.adjusted()
if not self or other._isinfinity() or expdiff <= -2:
return (_dec_from_triple(sign, '0', 0),
self._rescale(ideal_exp, context.rounding))
if expdiff <= context.prec:
op1 = _WorkRep(self)
op2 = _WorkRep(other)
if op1.exp >= op2.exp:
op1.int *= 10**(op1.exp - op2.exp)
else:
op2.int *= 10**(op2.exp - op1.exp)
q, r = divmod(op1.int, op2.int)
if q < 10**context.prec:
return (_dec_from_triple(sign, str(q), 0),
_dec_from_triple(self._sign, str(r), ideal_exp))
# Here the quotient is too large to be representable
ans = context._raise_error(DivisionImpossible,
'quotient too large in //, % or divmod')
return ans, ans
def __rtruediv__(self, other, context=None):
"""Swaps self/other and returns __truediv__."""
other = _convert_other(other)
if other is NotImplemented:
return other
return other.__truediv__(self, context=context)
__div__ = __truediv__
__rdiv__ = __rtruediv__
def __divmod__(self, other, context=None):
"""
Return (self // other, self % other)
"""
other = _convert_other(other)
if other is NotImplemented:
return other
if context is None:
context = getcontext()
ans = self._check_nans(other, context)
if ans:
return (ans, ans)
sign = self._sign ^ other._sign
if self._isinfinity():
if other._isinfinity():
ans = context._raise_error(InvalidOperation, 'divmod(INF, INF)')
return ans, ans
else:
return (_SignedInfinity[sign],
context._raise_error(InvalidOperation, 'INF % x'))
if not other:
if not self:
ans = context._raise_error(DivisionUndefined, 'divmod(0, 0)')
return ans, ans
else:
return (context._raise_error(DivisionByZero, 'x // 0', sign),
context._raise_error(InvalidOperation, 'x % 0'))
quotient, remainder = self._divide(other, context)
remainder = remainder._fix(context)
return quotient, remainder
def __rdivmod__(self, other, context=None):
"""Swaps self/other and returns __divmod__."""
other = _convert_other(other)
if other is NotImplemented:
return other
return other.__divmod__(self, context=context)
def __mod__(self, other, context=None):
"""
self % other
"""
other = _convert_other(other)
if other is NotImplemented:
return other
if context is None:
context = getcontext()
ans = self._check_nans(other, context)
if ans:
return ans
if self._isinfinity():
return context._raise_error(InvalidOperation, 'INF % x')
elif not other:
if self:
return context._raise_error(InvalidOperation, 'x % 0')
else:
return context._raise_error(DivisionUndefined, '0 % 0')
remainder = self._divide(other, context)[1]
remainder = remainder._fix(context)
return remainder
def __rmod__(self, other, context=None):
"""Swaps self/other and returns __mod__."""
other = _convert_other(other)
if other is NotImplemented:
return other
return other.__mod__(self, context=context)
def remainder_near(self, other, context=None):
"""
Remainder nearest to 0- abs(remainder-near) <= other/2
"""
if context is None:
context = getcontext()
other = _convert_other(other, raiseit=True)
ans = self._check_nans(other, context)
if ans:
return ans
# self == +/-infinity -> InvalidOperation
if self._isinfinity():
return context._raise_error(InvalidOperation,
'remainder_near(infinity, x)')
# other == 0 -> either InvalidOperation or DivisionUndefined
if not other:
if self:
return context._raise_error(InvalidOperation,
'remainder_near(x, 0)')
else:
return context._raise_error(DivisionUndefined,
'remainder_near(0, 0)')
# other = +/-infinity -> remainder = self
if other._isinfinity():
ans = Decimal(self)
return ans._fix(context)
# self = 0 -> remainder = self, with ideal exponent
ideal_exponent = min(self._exp, other._exp)
if not self:
ans = _dec_from_triple(self._sign, '0', ideal_exponent)
return ans._fix(context)
# catch most cases of large or small quotient
expdiff = self.adjusted() - other.adjusted()
if expdiff >= context.prec + 1:
# expdiff >= prec+1 => abs(self/other) > 10**prec
return context._raise_error(DivisionImpossible)
if expdiff <= -2:
# expdiff <= -2 => abs(self/other) < 0.1
ans = self._rescale(ideal_exponent, context.rounding)
return ans._fix(context)
# adjust both arguments to have the same exponent, then divide
op1 = _WorkRep(self)
op2 = _WorkRep(other)
if op1.exp >= op2.exp:
op1.int *= 10**(op1.exp - op2.exp)
else:
op2.int *= 10**(op2.exp - op1.exp)
q, r = divmod(op1.int, op2.int)
# remainder is r*10**ideal_exponent; other is +/-op2.int *
# 10**ideal_exponent. Apply correction to ensure that
# abs(remainder) <= abs(other)/2
if 2*r + (q&1) > op2.int:
r -= op2.int
q += 1
if q >= 10**context.prec:
return context._raise_error(DivisionImpossible)
# result has same sign as self unless r is negative
sign = self._sign
if r < 0:
sign = 1-sign
r = -r
ans = _dec_from_triple(sign, str(r), ideal_exponent)
return ans._fix(context)
def __floordiv__(self, other, context=None):
"""self // other"""
other = _convert_other(other)
if other is NotImplemented:
return other
if context is None:
context = getcontext()
ans = self._check_nans(other, context)
if ans:
return ans
if self._isinfinity():
if other._isinfinity():
return context._raise_error(InvalidOperation, 'INF // INF')
else:
return _SignedInfinity[self._sign ^ other._sign]
if not other:
if self:
return context._raise_error(DivisionByZero, 'x // 0',
self._sign ^ other._sign)
else:
return context._raise_error(DivisionUndefined, '0 // 0')
return self._divide(other, context)[0]
def __rfloordiv__(self, other, context=None):
"""Swaps self/other and returns __floordiv__."""
other = _convert_other(other)
if other is NotImplemented:
return other
return other.__floordiv__(self, context=context)
def __float__(self):
"""Float representation."""
if self._isnan():
if self.is_snan():
raise ValueError("Cannot convert signaling NaN to float")
s = "-nan" if self._sign else "nan"
else:
s = str(self)
return float(s)
def __int__(self):
"""Converts self to an int, truncating if necessary."""
if self._is_special:
if self._isnan():
raise ValueError("Cannot convert NaN to integer")
elif self._isinfinity():
raise OverflowError("Cannot convert infinity to integer")
s = (-1)**self._sign
if self._exp >= 0:
return s*int(self._int)*10**self._exp
else:
return s*int(self._int[:self._exp] or '0')
__trunc__ = __int__
def real(self):
return self
real = property(real)
def imag(self):
return Decimal(0)
imag = property(imag)
def conjugate(self):
return self
def __complex__(self):
return complex(float(self))
def __long__(self):
"""Converts to a long.
Equivalent to long(int(self))
"""
return long(self.__int__())
def _fix_nan(self, context):
"""Decapitate the payload of a NaN to fit the context"""
payload = self._int
# maximum length of payload is precision if _clamp=0,
# precision-1 if _clamp=1.
max_payload_len = context.prec - context._clamp
if len(payload) > max_payload_len:
payload = payload[len(payload)-max_payload_len:].lstrip('0')
return _dec_from_triple(self._sign, payload, self._exp, True)
return Decimal(self)
def _fix(self, context):
"""Round if it is necessary to keep self within prec precision.
Rounds and fixes the exponent. Does not raise on a sNaN.
Arguments:
self - Decimal instance
context - context used.
"""
if self._is_special:
if self._isnan():
# decapitate payload if necessary
return self._fix_nan(context)
else:
# self is +/-Infinity; return unaltered
return Decimal(self)
# if self is zero then exponent should be between Etiny and
# Emax if _clamp==0, and between Etiny and Etop if _clamp==1.
Etiny = context.Etiny()
Etop = context.Etop()
if not self:
exp_max = [context.Emax, Etop][context._clamp]
new_exp = min(max(self._exp, Etiny), exp_max)
if new_exp != self._exp:
context._raise_error(Clamped)
return _dec_from_triple(self._sign, '0', new_exp)
else:
return Decimal(self)
# exp_min is the smallest allowable exponent of the result,
# equal to max(self.adjusted()-context.prec+1, Etiny)
exp_min = len(self._int) + self._exp - context.prec
if exp_min > Etop:
# overflow: exp_min > Etop iff self.adjusted() > Emax
ans = context._raise_error(Overflow, 'above Emax', self._sign)
context._raise_error(Inexact)
context._raise_error(Rounded)
return ans
self_is_subnormal = exp_min < Etiny
if self_is_subnormal:
exp_min = Etiny
# round if self has too many digits
if self._exp < exp_min:
digits = len(self._int) + self._exp - exp_min
if digits < 0:
self = _dec_from_triple(self._sign, '1', exp_min-1)
digits = 0
rounding_method = self._pick_rounding_function[context.rounding]
changed = rounding_method(self, digits)
coeff = self._int[:digits] or '0'
if changed > 0:
coeff = str(int(coeff)+1)
if len(coeff) > context.prec:
coeff = coeff[:-1]
exp_min += 1
# check whether the rounding pushed the exponent out of range
if exp_min > Etop:
ans = context._raise_error(Overflow, 'above Emax', self._sign)
else:
ans = _dec_from_triple(self._sign, coeff, exp_min)
# raise the appropriate signals, taking care to respect
# the precedence described in the specification
if changed and self_is_subnormal:
context._raise_error(Underflow)
if self_is_subnormal:
context._raise_error(Subnormal)
if changed:
context._raise_error(Inexact)
context._raise_error(Rounded)
if not ans:
# raise Clamped on underflow to 0
context._raise_error(Clamped)
return ans
if self_is_subnormal:
context._raise_error(Subnormal)
# fold down if _clamp == 1 and self has too few digits
if context._clamp == 1 and self._exp > Etop:
context._raise_error(Clamped)
self_padded = self._int + '0'*(self._exp - Etop)
return _dec_from_triple(self._sign, self_padded, Etop)
# here self was representable to begin with; return unchanged
return Decimal(self)
# for each of the rounding functions below:
# self is a finite, nonzero Decimal
# prec is an integer satisfying 0 <= prec < len(self._int)
#
# each function returns either -1, 0, or 1, as follows:
# 1 indicates that self should be rounded up (away from zero)
# 0 indicates that self should be truncated, and that all the
# digits to be truncated are zeros (so the value is unchanged)
# -1 indicates that there are nonzero digits to be truncated
def _round_down(self, prec):
"""Also known as round-towards-0, truncate."""
if _all_zeros(self._int, prec):
return 0
else:
return -1
def _round_up(self, prec):
"""Rounds away from 0."""
return -self._round_down(prec)
def _round_half_up(self, prec):
"""Rounds 5 up (away from 0)"""
if self._int[prec] in '56789':
return 1
elif _all_zeros(self._int, prec):
return 0
else:
return -1
def _round_half_down(self, prec):
"""Round 5 down"""
if _exact_half(self._int, prec):
return -1
else:
return self._round_half_up(prec)
def _round_half_even(self, prec):
"""Round 5 to even, rest to nearest."""
if _exact_half(self._int, prec) and \
(prec == 0 or self._int[prec-1] in '02468'):
return -1
else:
return self._round_half_up(prec)
def _round_ceiling(self, prec):
"""Rounds up (not away from 0 if negative.)"""
if self._sign:
return self._round_down(prec)
else:
return -self._round_down(prec)
def _round_floor(self, prec):
"""Rounds down (not towards 0 if negative)"""
if not self._sign:
return self._round_down(prec)
else:
return -self._round_down(prec)
def _round_05up(self, prec):
"""Round down unless digit prec-1 is 0 or 5."""
if prec and self._int[prec-1] not in '05':
return self._round_down(prec)
else:
return -self._round_down(prec)
_pick_rounding_function = dict(
ROUND_DOWN = _round_down,
ROUND_UP = _round_up,
ROUND_HALF_UP = _round_half_up,
ROUND_HALF_DOWN = _round_half_down,
ROUND_HALF_EVEN = _round_half_even,
ROUND_CEILING = _round_ceiling,
ROUND_FLOOR = _round_floor,
ROUND_05UP = _round_05up,
)
def fma(self, other, third, context=None):
"""Fused multiply-add.
Returns self*other+third with no rounding of the intermediate
product self*other.
self and other are multiplied together, with no rounding of
the result. The third operand is then added to the result,
and a single final rounding is performed.
"""
other = _convert_other(other, raiseit=True)
# compute product; raise InvalidOperation if either operand is
# a signaling NaN or if the product is zero times infinity.
if self._is_special or other._is_special:
if context is None:
context = getcontext()
if self._exp == 'N':
return context._raise_error(InvalidOperation, 'sNaN', self)
if other._exp == 'N':
return context._raise_error(InvalidOperation, 'sNaN', other)
if self._exp == 'n':
product = self
elif other._exp == 'n':
product = other
elif self._exp == 'F':
if not other:
return context._raise_error(InvalidOperation,
'INF * 0 in fma')
product = _SignedInfinity[self._sign ^ other._sign]
elif other._exp == 'F':
if not self:
return context._raise_error(InvalidOperation,
'0 * INF in fma')
product = _SignedInfinity[self._sign ^ other._sign]
else:
product = _dec_from_triple(self._sign ^ other._sign,
str(int(self._int) * int(other._int)),
self._exp + other._exp)
third = _convert_other(third, raiseit=True)
return product.__add__(third, context)
def _power_modulo(self, other, modulo, context=None):
"""Three argument version of __pow__"""
# if can't convert other and modulo to Decimal, raise
# TypeError; there's no point returning NotImplemented (no
# equivalent of __rpow__ for three argument pow)
other = _convert_other(other, raiseit=True)
modulo = _convert_other(modulo, raiseit=True)
if context is None:
context = getcontext()
# deal with NaNs: if there are any sNaNs then first one wins,
# (i.e. behaviour for NaNs is identical to that of fma)
self_is_nan = self._isnan()
other_is_nan = other._isnan()
modulo_is_nan = modulo._isnan()
if self_is_nan or other_is_nan or modulo_is_nan:
if self_is_nan == 2:
return context._raise_error(InvalidOperation, 'sNaN',
self)
if other_is_nan == 2:
return context._raise_error(InvalidOperation, 'sNaN',
other)
if modulo_is_nan == 2:
return context._raise_error(InvalidOperation, 'sNaN',
modulo)
if self_is_nan:
return self._fix_nan(context)
if other_is_nan:
return other._fix_nan(context)
return modulo._fix_nan(context)
# check inputs: we apply same restrictions as Python's pow()
if not (self._isinteger() and
other._isinteger() and
modulo._isinteger()):
return context._raise_error(InvalidOperation,
'pow() 3rd argument not allowed '
'unless all arguments are integers')
if other < 0:
return context._raise_error(InvalidOperation,
'pow() 2nd argument cannot be '
'negative when 3rd argument specified')
if not modulo:
return context._raise_error(InvalidOperation,
'pow() 3rd argument cannot be 0')
# additional restriction for decimal: the modulus must be less
# than 10**prec in absolute value
if modulo.adjusted() >= context.prec:
return context._raise_error(InvalidOperation,
'insufficient precision: pow() 3rd '
'argument must not have more than '
'precision digits')
# define 0**0 == NaN, for consistency with two-argument pow
# (even though it hurts!)
if not other and not self:
return context._raise_error(InvalidOperation,
'at least one of pow() 1st argument '
'and 2nd argument must be nonzero ;'
'0**0 is not defined')
# compute sign of result
if other._iseven():
sign = 0
else:
sign = self._sign
# convert modulo to a Python integer, and self and other to
# Decimal integers (i.e. force their exponents to be >= 0)
modulo = abs(int(modulo))
base = _WorkRep(self.to_integral_value())
exponent = _WorkRep(other.to_integral_value())
# compute result using integer pow()
base = (base.int % modulo * pow(10, base.exp, modulo)) % modulo
for i in xrange(exponent.exp):
base = pow(base, 10, modulo)
base = pow(base, exponent.int, modulo)
return _dec_from_triple(sign, str(base), 0)
def _power_exact(self, other, p):
"""Attempt to compute self**other exactly.
Given Decimals self and other and an integer p, attempt to
compute an exact result for the power self**other, with p
digits of precision. Return None if self**other is not
exactly representable in p digits.
Assumes that elimination of special cases has already been
performed: self and other must both be nonspecial; self must
be positive and not numerically equal to 1; other must be
nonzero. For efficiency, other._exp should not be too large,
so that 10**abs(other._exp) is a feasible calculation."""
# In the comments below, we write x for the value of self and y for the
# value of other. Write x = xc*10**xe and abs(y) = yc*10**ye, with xc
# and yc positive integers not divisible by 10.
# The main purpose of this method is to identify the *failure*
# of x**y to be exactly representable with as little effort as
# possible. So we look for cheap and easy tests that
# eliminate the possibility of x**y being exact. Only if all
# these tests are passed do we go on to actually compute x**y.
# Here's the main idea. Express y as a rational number m/n, with m and
# n relatively prime and n>0. Then for x**y to be exactly
# representable (at *any* precision), xc must be the nth power of a
# positive integer and xe must be divisible by n. If y is negative
# then additionally xc must be a power of either 2 or 5, hence a power
# of 2**n or 5**n.
#
# There's a limit to how small |y| can be: if y=m/n as above
# then:
#
# (1) if xc != 1 then for the result to be representable we
# need xc**(1/n) >= 2, and hence also xc**|y| >= 2. So
# if |y| <= 1/nbits(xc) then xc < 2**nbits(xc) <=
# 2**(1/|y|), hence xc**|y| < 2 and the result is not
# representable.
#
# (2) if xe != 0, |xe|*(1/n) >= 1, so |xe|*|y| >= 1. Hence if
# |y| < 1/|xe| then the result is not representable.
#
# Note that since x is not equal to 1, at least one of (1) and
# (2) must apply. Now |y| < 1/nbits(xc) iff |yc|*nbits(xc) <
# 10**-ye iff len(str(|yc|*nbits(xc)) <= -ye.
#
# There's also a limit to how large y can be, at least if it's
# positive: the normalized result will have coefficient xc**y,
# so if it's representable then xc**y < 10**p, and y <
# p/log10(xc). Hence if y*log10(xc) >= p then the result is
# not exactly representable.
# if len(str(abs(yc*xe)) <= -ye then abs(yc*xe) < 10**-ye,
# so |y| < 1/xe and the result is not representable.
# Similarly, len(str(abs(yc)*xc_bits)) <= -ye implies |y|
# < 1/nbits(xc).
x = _WorkRep(self)
xc, xe = x.int, x.exp
while xc % 10 == 0:
xc //= 10
xe += 1
y = _WorkRep(other)
yc, ye = y.int, y.exp
while yc % 10 == 0:
yc //= 10
ye += 1
# case where xc == 1: result is 10**(xe*y), with xe*y
# required to be an integer
if xc == 1:
xe *= yc
# result is now 10**(xe * 10**ye); xe * 10**ye must be integral
while xe % 10 == 0:
xe //= 10
ye += 1
if ye < 0:
return None
exponent = xe * 10**ye
if y.sign == 1:
exponent = -exponent
# if other is a nonnegative integer, use ideal exponent
if other._isinteger() and other._sign == 0:
ideal_exponent = self._exp*int(other)
zeros = min(exponent-ideal_exponent, p-1)
else:
zeros = 0
return _dec_from_triple(0, '1' + '0'*zeros, exponent-zeros)
# case where y is negative: xc must be either a power
# of 2 or a power of 5.
if y.sign == 1:
last_digit = xc % 10
if last_digit in (2,4,6,8):
# quick test for power of 2
if xc & -xc != xc:
return None
# now xc is a power of 2; e is its exponent
e = _nbits(xc)-1
# We now have:
#
# x = 2**e * 10**xe, e > 0, and y < 0.
#
# The exact result is:
#
# x**y = 5**(-e*y) * 10**(e*y + xe*y)
#
# provided that both e*y and xe*y are integers. Note that if
# 5**(-e*y) >= 10**p, then the result can't be expressed
# exactly with p digits of precision.
#
# Using the above, we can guard against large values of ye.
# 93/65 is an upper bound for log(10)/log(5), so if
#
# ye >= len(str(93*p//65))
#
# then
#
# -e*y >= -y >= 10**ye > 93*p/65 > p*log(10)/log(5),
#
# so 5**(-e*y) >= 10**p, and the coefficient of the result
# can't be expressed in p digits.
# emax >= largest e such that 5**e < 10**p.
emax = p*93//65
if ye >= len(str(emax)):
return None
# Find -e*y and -xe*y; both must be integers
e = _decimal_lshift_exact(e * yc, ye)
xe = _decimal_lshift_exact(xe * yc, ye)
if e is None or xe is None:
return None
if e > emax:
return None
xc = 5**e
elif last_digit == 5:
# e >= log_5(xc) if xc is a power of 5; we have
# equality all the way up to xc=5**2658
e = _nbits(xc)*28//65
xc, remainder = divmod(5**e, xc)
if remainder:
return None
while xc % 5 == 0:
xc //= 5
e -= 1
# Guard against large values of ye, using the same logic as in
# the 'xc is a power of 2' branch. 10/3 is an upper bound for
# log(10)/log(2).
emax = p*10//3
if ye >= len(str(emax)):
return None
e = _decimal_lshift_exact(e * yc, ye)
xe = _decimal_lshift_exact(xe * yc, ye)
if e is None or xe is None:
return None
if e > emax:
return None
xc = 2**e
else:
return None
if xc >= 10**p:
return None
xe = -e-xe
return _dec_from_triple(0, str(xc), xe)
# now y is positive; find m and n such that y = m/n
if ye >= 0:
m, n = yc*10**ye, 1
else:
if xe != 0 and len(str(abs(yc*xe))) <= -ye:
return None
xc_bits = _nbits(xc)
if xc != 1 and len(str(abs(yc)*xc_bits)) <= -ye:
return None
m, n = yc, 10**(-ye)
while m % 2 == n % 2 == 0:
m //= 2
n //= 2
while m % 5 == n % 5 == 0:
m //= 5
n //= 5
# compute nth root of xc*10**xe
if n > 1:
# if 1 < xc < 2**n then xc isn't an nth power
if xc != 1 and xc_bits <= n:
return None
xe, rem = divmod(xe, n)
if rem != 0:
return None
# compute nth root of xc using Newton's method
a = 1L << -(-_nbits(xc)//n) # initial estimate
while True:
q, r = divmod(xc, a**(n-1))
if a <= q:
break
else:
a = (a*(n-1) + q)//n
if not (a == q and r == 0):
return None
xc = a
# now xc*10**xe is the nth root of the original xc*10**xe
# compute mth power of xc*10**xe
# if m > p*100//_log10_lb(xc) then m > p/log10(xc), hence xc**m >
# 10**p and the result is not representable.
if xc > 1 and m > p*100//_log10_lb(xc):
return None
xc = xc**m
xe *= m
if xc > 10**p:
return None
# by this point the result *is* exactly representable
# adjust the exponent to get as close as possible to the ideal
# exponent, if necessary
str_xc = str(xc)
if other._isinteger() and other._sign == 0:
ideal_exponent = self._exp*int(other)
zeros = min(xe-ideal_exponent, p-len(str_xc))
else:
zeros = 0
return _dec_from_triple(0, str_xc+'0'*zeros, xe-zeros)
def __pow__(self, other, modulo=None, context=None):
"""Return self ** other [ % modulo].
With two arguments, compute self**other.
With three arguments, compute (self**other) % modulo. For the
three argument form, the following restrictions on the
arguments hold:
- all three arguments must be integral
- other must be nonnegative
- either self or other (or both) must be nonzero
- modulo must be nonzero and must have at most p digits,
where p is the context precision.
If any of these restrictions is violated the InvalidOperation
flag is raised.
The result of pow(self, other, modulo) is identical to the
result that would be obtained by computing (self**other) %
modulo with unbounded precision, but is computed more
efficiently. It is always exact.
"""
if modulo is not None:
return self._power_modulo(other, modulo, context)
other = _convert_other(other)
if other is NotImplemented:
return other
if context is None:
context = getcontext()
# either argument is a NaN => result is NaN
ans = self._check_nans(other, context)
if ans:
return ans
# 0**0 = NaN (!), x**0 = 1 for nonzero x (including +/-Infinity)
if not other:
if not self:
return context._raise_error(InvalidOperation, '0 ** 0')
else:
return _One
# result has sign 1 iff self._sign is 1 and other is an odd integer
result_sign = 0
if self._sign == 1:
if other._isinteger():
if not other._iseven():
result_sign = 1
else:
# -ve**noninteger = NaN
# (-0)**noninteger = 0**noninteger
if self:
return context._raise_error(InvalidOperation,
'x ** y with x negative and y not an integer')
# negate self, without doing any unwanted rounding
self = self.copy_negate()
# 0**(+ve or Inf)= 0; 0**(-ve or -Inf) = Infinity
if not self:
if other._sign == 0:
return _dec_from_triple(result_sign, '0', 0)
else:
return _SignedInfinity[result_sign]
# Inf**(+ve or Inf) = Inf; Inf**(-ve or -Inf) = 0
if self._isinfinity():
if other._sign == 0:
return _SignedInfinity[result_sign]
else:
return _dec_from_triple(result_sign, '0', 0)
# 1**other = 1, but the choice of exponent and the flags
# depend on the exponent of self, and on whether other is a
# positive integer, a negative integer, or neither
if self == _One:
if other._isinteger():
# exp = max(self._exp*max(int(other), 0),
# 1-context.prec) but evaluating int(other) directly
# is dangerous until we know other is small (other
# could be 1e999999999)
if other._sign == 1:
multiplier = 0
elif other > context.prec:
multiplier = context.prec
else:
multiplier = int(other)
exp = self._exp * multiplier
if exp < 1-context.prec:
exp = 1-context.prec
context._raise_error(Rounded)
else:
context._raise_error(Inexact)
context._raise_error(Rounded)
exp = 1-context.prec
return _dec_from_triple(result_sign, '1'+'0'*-exp, exp)
# compute adjusted exponent of self
self_adj = self.adjusted()
# self ** infinity is infinity if self > 1, 0 if self < 1
# self ** -infinity is infinity if self < 1, 0 if self > 1
if other._isinfinity():
if (other._sign == 0) == (self_adj < 0):
return _dec_from_triple(result_sign, '0', 0)
else:
return _SignedInfinity[result_sign]
# from here on, the result always goes through the call
# to _fix at the end of this function.
ans = None
exact = False
# crude test to catch cases of extreme overflow/underflow. If
# log10(self)*other >= 10**bound and bound >= len(str(Emax))
# then 10**bound >= 10**len(str(Emax)) >= Emax+1 and hence
# self**other >= 10**(Emax+1), so overflow occurs. The test
# for underflow is similar.
bound = self._log10_exp_bound() + other.adjusted()
if (self_adj >= 0) == (other._sign == 0):
# self > 1 and other +ve, or self < 1 and other -ve
# possibility of overflow
if bound >= len(str(context.Emax)):
ans = _dec_from_triple(result_sign, '1', context.Emax+1)
else:
# self > 1 and other -ve, or self < 1 and other +ve
# possibility of underflow to 0
Etiny = context.Etiny()
if bound >= len(str(-Etiny)):
ans = _dec_from_triple(result_sign, '1', Etiny-1)
# try for an exact result with precision +1
if ans is None:
ans = self._power_exact(other, context.prec + 1)
if ans is not None:
if result_sign == 1:
ans = _dec_from_triple(1, ans._int, ans._exp)
exact = True
# usual case: inexact result, x**y computed directly as exp(y*log(x))
if ans is None:
p = context.prec
x = _WorkRep(self)
xc, xe = x.int, x.exp
y = _WorkRep(other)
yc, ye = y.int, y.exp
if y.sign == 1:
yc = -yc
# compute correctly rounded result: start with precision +3,
# then increase precision until result is unambiguously roundable
extra = 3
while True:
coeff, exp = _dpower(xc, xe, yc, ye, p+extra)
if coeff % (5*10**(len(str(coeff))-p-1)):
break
extra += 3
ans = _dec_from_triple(result_sign, str(coeff), exp)
# unlike exp, ln and log10, the power function respects the
# rounding mode; no need to switch to ROUND_HALF_EVEN here
# There's a difficulty here when 'other' is not an integer and
# the result is exact. In this case, the specification
# requires that the Inexact flag be raised (in spite of
# exactness), but since the result is exact _fix won't do this
# for us. (Correspondingly, the Underflow signal should also
# be raised for subnormal results.) We can't directly raise
# these signals either before or after calling _fix, since
# that would violate the precedence for signals. So we wrap
# the ._fix call in a temporary context, and reraise
# afterwards.
if exact and not other._isinteger():
# pad with zeros up to length context.prec+1 if necessary; this
# ensures that the Rounded signal will be raised.
if len(ans._int) <= context.prec:
expdiff = context.prec + 1 - len(ans._int)
ans = _dec_from_triple(ans._sign, ans._int+'0'*expdiff,
ans._exp-expdiff)
# create a copy of the current context, with cleared flags/traps
newcontext = context.copy()
newcontext.clear_flags()
for exception in _signals:
newcontext.traps[exception] = 0
# round in the new context
ans = ans._fix(newcontext)
# raise Inexact, and if necessary, Underflow
newcontext._raise_error(Inexact)
if newcontext.flags[Subnormal]:
newcontext._raise_error(Underflow)
# propagate signals to the original context; _fix could
# have raised any of Overflow, Underflow, Subnormal,
# Inexact, Rounded, Clamped. Overflow needs the correct
# arguments. Note that the order of the exceptions is
# important here.
if newcontext.flags[Overflow]:
context._raise_error(Overflow, 'above Emax', ans._sign)
for exception in Underflow, Subnormal, Inexact, Rounded, Clamped:
if newcontext.flags[exception]:
context._raise_error(exception)
else:
ans = ans._fix(context)
return ans
def __rpow__(self, other, context=None):
"""Swaps self/other and returns __pow__."""
other = _convert_other(other)
if other is NotImplemented:
return other
return other.__pow__(self, context=context)
def normalize(self, context=None):
"""Normalize- strip trailing 0s, change anything equal to 0 to 0e0"""
if context is None:
context = getcontext()
if self._is_special:
ans = self._check_nans(context=context)
if ans:
return ans
dup = self._fix(context)
if dup._isinfinity():
return dup
if not dup:
return _dec_from_triple(dup._sign, '0', 0)
exp_max = [context.Emax, context.Etop()][context._clamp]
end = len(dup._int)
exp = dup._exp
while dup._int[end-1] == '0' and exp < exp_max:
exp += 1
end -= 1
return _dec_from_triple(dup._sign, dup._int[:end], exp)
def quantize(self, exp, rounding=None, context=None, watchexp=True):
"""Quantize self so its exponent is the same as that of exp.
Similar to self._rescale(exp._exp) but with error checking.
"""
exp = _convert_other(exp, raiseit=True)
if context is None:
context = getcontext()
if rounding is None:
rounding = context.rounding
if self._is_special or exp._is_special:
ans = self._check_nans(exp, context)
if ans:
return ans
if exp._isinfinity() or self._isinfinity():
if exp._isinfinity() and self._isinfinity():
return Decimal(self) # if both are inf, it is OK
return context._raise_error(InvalidOperation,
'quantize with one INF')
# if we're not watching exponents, do a simple rescale
if not watchexp:
ans = self._rescale(exp._exp, rounding)
# raise Inexact and Rounded where appropriate
if ans._exp > self._exp:
context._raise_error(Rounded)
if ans != self:
context._raise_error(Inexact)
return ans
# exp._exp should be between Etiny and Emax
if not (context.Etiny() <= exp._exp <= context.Emax):
return context._raise_error(InvalidOperation,
'target exponent out of bounds in quantize')
if not self:
ans = _dec_from_triple(self._sign, '0', exp._exp)
return ans._fix(context)
self_adjusted = self.adjusted()
if self_adjusted > context.Emax:
return context._raise_error(InvalidOperation,
'exponent of quantize result too large for current context')
if self_adjusted - exp._exp + 1 > context.prec:
return context._raise_error(InvalidOperation,
'quantize result has too many digits for current context')
ans = self._rescale(exp._exp, rounding)
if ans.adjusted() > context.Emax:
return context._raise_error(InvalidOperation,
'exponent of quantize result too large for current context')
if len(ans._int) > context.prec:
return context._raise_error(InvalidOperation,
'quantize result has too many digits for current context')
# raise appropriate flags
if ans and ans.adjusted() < context.Emin:
context._raise_error(Subnormal)
if ans._exp > self._exp:
if ans != self:
context._raise_error(Inexact)
context._raise_error(Rounded)
# call to fix takes care of any necessary folddown, and
# signals Clamped if necessary
ans = ans._fix(context)
return ans
def same_quantum(self, other):
"""Return True if self and other have the same exponent; otherwise
return False.
If either operand is a special value, the following rules are used:
* return True if both operands are infinities
* return True if both operands are NaNs
* otherwise, return False.
"""
other = _convert_other(other, raiseit=True)
if self._is_special or other._is_special:
return (self.is_nan() and other.is_nan() or
self.is_infinite() and other.is_infinite())
return self._exp == other._exp
def _rescale(self, exp, rounding):
"""Rescale self so that the exponent is exp, either by padding with zeros
or by truncating digits, using the given rounding mode.
Specials are returned without change. This operation is
quiet: it raises no flags, and uses no information from the
context.
exp = exp to scale to (an integer)
rounding = rounding mode
"""
if self._is_special:
return Decimal(self)
if not self:
return _dec_from_triple(self._sign, '0', exp)
if self._exp >= exp:
# pad answer with zeros if necessary
return _dec_from_triple(self._sign,
self._int + '0'*(self._exp - exp), exp)
# too many digits; round and lose data. If self.adjusted() <
# exp-1, replace self by 10**(exp-1) before rounding
digits = len(self._int) + self._exp - exp
if digits < 0:
self = _dec_from_triple(self._sign, '1', exp-1)
digits = 0
this_function = self._pick_rounding_function[rounding]
changed = this_function(self, digits)
coeff = self._int[:digits] or '0'
if changed == 1:
coeff = str(int(coeff)+1)
return _dec_from_triple(self._sign, coeff, exp)
def _round(self, places, rounding):
"""Round a nonzero, nonspecial Decimal to a fixed number of
significant figures, using the given rounding mode.
Infinities, NaNs and zeros are returned unaltered.
This operation is quiet: it raises no flags, and uses no
information from the context.
"""
if places <= 0:
raise ValueError("argument should be at least 1 in _round")
if self._is_special or not self:
return Decimal(self)
ans = self._rescale(self.adjusted()+1-places, rounding)
# it can happen that the rescale alters the adjusted exponent;
# for example when rounding 99.97 to 3 significant figures.
# When this happens we end up with an extra 0 at the end of
# the number; a second rescale fixes this.
if ans.adjusted() != self.adjusted():
ans = ans._rescale(ans.adjusted()+1-places, rounding)
return ans
def to_integral_exact(self, rounding=None, context=None):
"""Rounds to a nearby integer.
If no rounding mode is specified, take the rounding mode from
the context. This method raises the Rounded and Inexact flags
when appropriate.
See also: to_integral_value, which does exactly the same as
this method except that it doesn't raise Inexact or Rounded.
"""
if self._is_special:
ans = self._check_nans(context=context)
if ans:
return ans
return Decimal(self)
if self._exp >= 0:
return Decimal(self)
if not self:
return _dec_from_triple(self._sign, '0', 0)
if context is None:
context = getcontext()
if rounding is None:
rounding = context.rounding
ans = self._rescale(0, rounding)
if ans != self:
context._raise_error(Inexact)
context._raise_error(Rounded)
return ans
def to_integral_value(self, rounding=None, context=None):
"""Rounds to the nearest integer, without raising inexact, rounded."""
if context is None:
context = getcontext()
if rounding is None:
rounding = context.rounding
if self._is_special:
ans = self._check_nans(context=context)
if ans:
return ans
return Decimal(self)
if self._exp >= 0:
return Decimal(self)
else:
return self._rescale(0, rounding)
# the method name changed, but we provide also the old one, for compatibility
to_integral = to_integral_value
def sqrt(self, context=None):
"""Return the square root of self."""
if context is None:
context = getcontext()
if self._is_special:
ans = self._check_nans(context=context)
if ans:
return ans
if self._isinfinity() and self._sign == 0:
return Decimal(self)
if not self:
# exponent = self._exp // 2. sqrt(-0) = -0
ans = _dec_from_triple(self._sign, '0', self._exp // 2)
return ans._fix(context)
if self._sign == 1:
return context._raise_error(InvalidOperation, 'sqrt(-x), x > 0')
# At this point self represents a positive number. Let p be
# the desired precision and express self in the form c*100**e
# with c a positive real number and e an integer, c and e
# being chosen so that 100**(p-1) <= c < 100**p. Then the
# (exact) square root of self is sqrt(c)*10**e, and 10**(p-1)
# <= sqrt(c) < 10**p, so the closest representable Decimal at
# precision p is n*10**e where n = round_half_even(sqrt(c)),
# the closest integer to sqrt(c) with the even integer chosen
# in the case of a tie.
#
# To ensure correct rounding in all cases, we use the
# following trick: we compute the square root to an extra
# place (precision p+1 instead of precision p), rounding down.
# Then, if the result is inexact and its last digit is 0 or 5,
# we increase the last digit to 1 or 6 respectively; if it's
# exact we leave the last digit alone. Now the final round to
# p places (or fewer in the case of underflow) will round
# correctly and raise the appropriate flags.
# use an extra digit of precision
prec = context.prec+1
# write argument in the form c*100**e where e = self._exp//2
# is the 'ideal' exponent, to be used if the square root is
# exactly representable. l is the number of 'digits' of c in
# base 100, so that 100**(l-1) <= c < 100**l.
op = _WorkRep(self)
e = op.exp >> 1
if op.exp & 1:
c = op.int * 10
l = (len(self._int) >> 1) + 1
else:
c = op.int
l = len(self._int)+1 >> 1
# rescale so that c has exactly prec base 100 'digits'
shift = prec-l
if shift >= 0:
c *= 100**shift
exact = True
else:
c, remainder = divmod(c, 100**-shift)
exact = not remainder
e -= shift
# find n = floor(sqrt(c)) using Newton's method
n = 10**prec
while True:
q = c//n
if n <= q:
break
else:
n = n + q >> 1
exact = exact and n*n == c
if exact:
# result is exact; rescale to use ideal exponent e
if shift >= 0:
# assert n % 10**shift == 0
n //= 10**shift
else:
n *= 10**-shift
e += shift
else:
# result is not exact; fix last digit as described above
if n % 5 == 0:
n += 1
ans = _dec_from_triple(0, str(n), e)
# round, and fit to current context
context = context._shallow_copy()
rounding = context._set_rounding(ROUND_HALF_EVEN)
ans = ans._fix(context)
context.rounding = rounding
return ans
def max(self, other, context=None):
"""Returns the larger value.
Like max(self, other) except if one is not a number, returns
NaN (and signals if one is sNaN). Also rounds.
"""
other = _convert_other(other, raiseit=True)
if context is None:
context = getcontext()
if self._is_special or other._is_special:
# If one operand is a quiet NaN and the other is number, then the
# number is always returned
sn = self._isnan()
on = other._isnan()
if sn or on:
if on == 1 and sn == 0:
return self._fix(context)
if sn == 1 and on == 0:
return other._fix(context)
return self._check_nans(other, context)
c = self._cmp(other)
if c == 0:
# If both operands are finite and equal in numerical value
# then an ordering is applied:
#
# If the signs differ then max returns the operand with the
# positive sign and min returns the operand with the negative sign
#
# If the signs are the same then the exponent is used to select
# the result. This is exactly the ordering used in compare_total.
c = self.compare_total(other)
if c == -1:
ans = other
else:
ans = self
return ans._fix(context)
def min(self, other, context=None):
"""Returns the smaller value.
Like min(self, other) except if one is not a number, returns
NaN (and signals if one is sNaN). Also rounds.
"""
other = _convert_other(other, raiseit=True)
if context is None:
context = getcontext()
if self._is_special or other._is_special:
# If one operand is a quiet NaN and the other is number, then the
# number is always returned
sn = self._isnan()
on = other._isnan()
if sn or on:
if on == 1 and sn == 0:
return self._fix(context)
if sn == 1 and on == 0:
return other._fix(context)
return self._check_nans(other, context)
c = self._cmp(other)
if c == 0:
c = self.compare_total(other)
if c == -1:
ans = self
else:
ans = other
return ans._fix(context)
def _isinteger(self):
"""Returns whether self is an integer"""
if self._is_special:
return False
if self._exp >= 0:
return True
rest = self._int[self._exp:]
return rest == '0'*len(rest)
def _iseven(self):
"""Returns True if self is even. Assumes self is an integer."""
if not self or self._exp > 0:
return True
return self._int[-1+self._exp] in '02468'
def adjusted(self):
"""Return the adjusted exponent of self"""
try:
return self._exp + len(self._int) - 1
# If NaN or Infinity, self._exp is string
except TypeError:
return 0
def canonical(self, context=None):
"""Returns the same Decimal object.
As we do not have different encodings for the same number, the
received object already is in its canonical form.
"""
return self
def compare_signal(self, other, context=None):
"""Compares self to the other operand numerically.
It's pretty much like compare(), but all NaNs signal, with signaling
NaNs taking precedence over quiet NaNs.
"""
other = _convert_other(other, raiseit = True)
ans = self._compare_check_nans(other, context)
if ans:
return ans
return self.compare(other, context=context)
def compare_total(self, other):
"""Compares self to other using the abstract representations.
This is not like the standard compare, which use their numerical
value. Note that a total ordering is defined for all possible abstract
representations.
"""
other = _convert_other(other, raiseit=True)
# if one is negative and the other is positive, it's easy
if self._sign and not other._sign:
return _NegativeOne
if not self._sign and other._sign:
return _One
sign = self._sign
# let's handle both NaN types
self_nan = self._isnan()
other_nan = other._isnan()
if self_nan or other_nan:
if self_nan == other_nan:
# compare payloads as though they're integers
self_key = len(self._int), self._int
other_key = len(other._int), other._int
if self_key < other_key:
if sign:
return _One
else:
return _NegativeOne
if self_key > other_key:
if sign:
return _NegativeOne
else:
return _One
return _Zero
if sign:
if self_nan == 1:
return _NegativeOne
if other_nan == 1:
return _One
if self_nan == 2:
return _NegativeOne
if other_nan == 2:
return _One
else:
if self_nan == 1:
return _One
if other_nan == 1:
return _NegativeOne
if self_nan == 2:
return _One
if other_nan == 2:
return _NegativeOne
if self < other:
return _NegativeOne
if self > other:
return _One
if self._exp < other._exp:
if sign:
return _One
else:
return _NegativeOne
if self._exp > other._exp:
if sign:
return _NegativeOne
else:
return _One
return _Zero
def compare_total_mag(self, other):
"""Compares self to other using abstract repr., ignoring sign.
Like compare_total, but with operand's sign ignored and assumed to be 0.
"""
other = _convert_other(other, raiseit=True)
s = self.copy_abs()
o = other.copy_abs()
return s.compare_total(o)
def copy_abs(self):
"""Returns a copy with the sign set to 0. """
return _dec_from_triple(0, self._int, self._exp, self._is_special)
def copy_negate(self):
"""Returns a copy with the sign inverted."""
if self._sign:
return _dec_from_triple(0, self._int, self._exp, self._is_special)
else:
return _dec_from_triple(1, self._int, self._exp, self._is_special)
def copy_sign(self, other):
"""Returns self with the sign of other."""
other = _convert_other(other, raiseit=True)
return _dec_from_triple(other._sign, self._int,
self._exp, self._is_special)
def exp(self, context=None):
"""Returns e ** self."""
if context is None:
context = getcontext()
# exp(NaN) = NaN
ans = self._check_nans(context=context)
if ans:
return ans
# exp(-Infinity) = 0
if self._isinfinity() == -1:
return _Zero
# exp(0) = 1
if not self:
return _One
# exp(Infinity) = Infinity
if self._isinfinity() == 1:
return Decimal(self)
# the result is now guaranteed to be inexact (the true
# mathematical result is transcendental). There's no need to
# raise Rounded and Inexact here---they'll always be raised as
# a result of the call to _fix.
p = context.prec
adj = self.adjusted()
# we only need to do any computation for quite a small range
# of adjusted exponents---for example, -29 <= adj <= 10 for
# the default context. For smaller exponent the result is
# indistinguishable from 1 at the given precision, while for
# larger exponent the result either overflows or underflows.
if self._sign == 0 and adj > len(str((context.Emax+1)*3)):
# overflow
ans = _dec_from_triple(0, '1', context.Emax+1)
elif self._sign == 1 and adj > len(str((-context.Etiny()+1)*3)):
# underflow to 0
ans = _dec_from_triple(0, '1', context.Etiny()-1)
elif self._sign == 0 and adj < -p:
# p+1 digits; final round will raise correct flags
ans = _dec_from_triple(0, '1' + '0'*(p-1) + '1', -p)
elif self._sign == 1 and adj < -p-1:
# p+1 digits; final round will raise correct flags
ans = _dec_from_triple(0, '9'*(p+1), -p-1)
# general case
else:
op = _WorkRep(self)
c, e = op.int, op.exp
if op.sign == 1:
c = -c
# compute correctly rounded result: increase precision by
# 3 digits at a time until we get an unambiguously
# roundable result
extra = 3
while True:
coeff, exp = _dexp(c, e, p+extra)
if coeff % (5*10**(len(str(coeff))-p-1)):
break
extra += 3
ans = _dec_from_triple(0, str(coeff), exp)
# at this stage, ans should round correctly with *any*
# rounding mode, not just with ROUND_HALF_EVEN
context = context._shallow_copy()
rounding = context._set_rounding(ROUND_HALF_EVEN)
ans = ans._fix(context)
context.rounding = rounding
return ans
def is_canonical(self):
"""Return True if self is canonical; otherwise return False.
Currently, the encoding of a Decimal instance is always
canonical, so this method returns True for any Decimal.
"""
return True
def is_finite(self):
"""Return True if self is finite; otherwise return False.
A Decimal instance is considered finite if it is neither
infinite nor a NaN.
"""
return not self._is_special
def is_infinite(self):
"""Return True if self is infinite; otherwise return False."""
return self._exp == 'F'
def is_nan(self):
"""Return True if self is a qNaN or sNaN; otherwise return False."""
return self._exp in ('n', 'N')
def is_normal(self, context=None):
"""Return True if self is a normal number; otherwise return False."""
if self._is_special or not self:
return False
if context is None:
context = getcontext()
return context.Emin <= self.adjusted()
def is_qnan(self):
"""Return True if self is a quiet NaN; otherwise return False."""
return self._exp == 'n'
def is_signed(self):
"""Return True if self is negative; otherwise return False."""
return self._sign == 1
def is_snan(self):
"""Return True if self is a signaling NaN; otherwise return False."""
return self._exp == 'N'
def is_subnormal(self, context=None):
"""Return True if self is subnormal; otherwise return False."""
if self._is_special or not self:
return False
if context is None:
context = getcontext()
return self.adjusted() < context.Emin
def is_zero(self):
"""Return True if self is a zero; otherwise return False."""
return not self._is_special and self._int == '0'
def _ln_exp_bound(self):
"""Compute a lower bound for the adjusted exponent of self.ln().
In other words, compute r such that self.ln() >= 10**r. Assumes
that self is finite and positive and that self != 1.
"""
# for 0.1 <= x <= 10 we use the inequalities 1-1/x <= ln(x) <= x-1
adj = self._exp + len(self._int) - 1
if adj >= 1:
# argument >= 10; we use 23/10 = 2.3 as a lower bound for ln(10)
return len(str(adj*23//10)) - 1
if adj <= -2:
# argument <= 0.1
return len(str((-1-adj)*23//10)) - 1
op = _WorkRep(self)
c, e = op.int, op.exp
if adj == 0:
# 1 < self < 10
num = str(c-10**-e)
den = str(c)
return len(num) - len(den) - (num < den)
# adj == -1, 0.1 <= self < 1
return e + len(str(10**-e - c)) - 1
def ln(self, context=None):
"""Returns the natural (base e) logarithm of self."""
if context is None:
context = getcontext()
# ln(NaN) = NaN
ans = self._check_nans(context=context)
if ans:
return ans
# ln(0.0) == -Infinity
if not self:
return _NegativeInfinity
# ln(Infinity) = Infinity
if self._isinfinity() == 1:
return _Infinity
# ln(1.0) == 0.0
if self == _One:
return _Zero
# ln(negative) raises InvalidOperation
if self._sign == 1:
return context._raise_error(InvalidOperation,
'ln of a negative value')
# result is irrational, so necessarily inexact
op = _WorkRep(self)
c, e = op.int, op.exp
p = context.prec
# correctly rounded result: repeatedly increase precision by 3
# until we get an unambiguously roundable result
places = p - self._ln_exp_bound() + 2 # at least p+3 places
while True:
coeff = _dlog(c, e, places)
# assert len(str(abs(coeff)))-p >= 1
if coeff % (5*10**(len(str(abs(coeff)))-p-1)):
break
places += 3
ans = _dec_from_triple(int(coeff<0), str(abs(coeff)), -places)
context = context._shallow_copy()
rounding = context._set_rounding(ROUND_HALF_EVEN)
ans = ans._fix(context)
context.rounding = rounding
return ans
def _log10_exp_bound(self):
"""Compute a lower bound for the adjusted exponent of self.log10().
In other words, find r such that self.log10() >= 10**r.
Assumes that self is finite and positive and that self != 1.
"""
# For x >= 10 or x < 0.1 we only need a bound on the integer
# part of log10(self), and this comes directly from the
# exponent of x. For 0.1 <= x <= 10 we use the inequalities
# 1-1/x <= log(x) <= x-1. If x > 1 we have |log10(x)| >
# (1-1/x)/2.31 > 0. If x < 1 then |log10(x)| > (1-x)/2.31 > 0
adj = self._exp + len(self._int) - 1
if adj >= 1:
# self >= 10
return len(str(adj))-1
if adj <= -2:
# self < 0.1
return len(str(-1-adj))-1
op = _WorkRep(self)
c, e = op.int, op.exp
if adj == 0:
# 1 < self < 10
num = str(c-10**-e)
den = str(231*c)
return len(num) - len(den) - (num < den) + 2
# adj == -1, 0.1 <= self < 1
num = str(10**-e-c)
return len(num) + e - (num < "231") - 1
def log10(self, context=None):
"""Returns the base 10 logarithm of self."""
if context is None:
context = getcontext()
# log10(NaN) = NaN
ans = self._check_nans(context=context)
if ans:
return ans
# log10(0.0) == -Infinity
if not self:
return _NegativeInfinity
# log10(Infinity) = Infinity
if self._isinfinity() == 1:
return _Infinity
# log10(negative or -Infinity) raises InvalidOperation
if self._sign == 1:
return context._raise_error(InvalidOperation,
'log10 of a negative value')
# log10(10**n) = n
if self._int[0] == '1' and self._int[1:] == '0'*(len(self._int) - 1):
# answer may need rounding
ans = Decimal(self._exp + len(self._int) - 1)
else:
# result is irrational, so necessarily inexact
op = _WorkRep(self)
c, e = op.int, op.exp
p = context.prec
# correctly rounded result: repeatedly increase precision
# until result is unambiguously roundable
places = p-self._log10_exp_bound()+2
while True:
coeff = _dlog10(c, e, places)
# assert len(str(abs(coeff)))-p >= 1
if coeff % (5*10**(len(str(abs(coeff)))-p-1)):
break
places += 3
ans = _dec_from_triple(int(coeff<0), str(abs(coeff)), -places)
context = context._shallow_copy()
rounding = context._set_rounding(ROUND_HALF_EVEN)
ans = ans._fix(context)
context.rounding = rounding
return ans
def logb(self, context=None):
""" Returns the exponent of the magnitude of self's MSD.
The result is the integer which is the exponent of the magnitude
of the most significant digit of self (as though it were truncated
to a single digit while maintaining the value of that digit and
without limiting the resulting exponent).
"""
# logb(NaN) = NaN
ans = self._check_nans(context=context)
if ans:
return ans
if context is None:
context = getcontext()
# logb(+/-Inf) = +Inf
if self._isinfinity():
return _Infinity
# logb(0) = -Inf, DivisionByZero
if not self:
return context._raise_error(DivisionByZero, 'logb(0)', 1)
# otherwise, simply return the adjusted exponent of self, as a
# Decimal. Note that no attempt is made to fit the result
# into the current context.
ans = Decimal(self.adjusted())
return ans._fix(context)
def _islogical(self):
"""Return True if self is a logical operand.
For being logical, it must be a finite number with a sign of 0,
an exponent of 0, and a coefficient whose digits must all be
either 0 or 1.
"""
if self._sign != 0 or self._exp != 0:
return False
for dig in self._int:
if dig not in '01':
return False
return True
def _fill_logical(self, context, opa, opb):
dif = context.prec - len(opa)
if dif > 0:
opa = '0'*dif + opa
elif dif < 0:
opa = opa[-context.prec:]
dif = context.prec - len(opb)
if dif > 0:
opb = '0'*dif + opb
elif dif < 0:
opb = opb[-context.prec:]
return opa, opb
def logical_and(self, other, context=None):
"""Applies an 'and' operation between self and other's digits."""
if context is None:
context = getcontext()
other = _convert_other(other, raiseit=True)
if not self._islogical() or not other._islogical():
return context._raise_error(InvalidOperation)
# fill to context.prec
(opa, opb) = self._fill_logical(context, self._int, other._int)
# make the operation, and clean starting zeroes
result = "".join([str(int(a)&int(b)) for a,b in zip(opa,opb)])
return _dec_from_triple(0, result.lstrip('0') or '0', 0)
def logical_invert(self, context=None):
"""Invert all its digits."""
if context is None:
context = getcontext()
return self.logical_xor(_dec_from_triple(0,'1'*context.prec,0),
context)
def logical_or(self, other, context=None):
"""Applies an 'or' operation between self and other's digits."""
if context is None:
context = getcontext()
other = _convert_other(other, raiseit=True)
if not self._islogical() or not other._islogical():
return context._raise_error(InvalidOperation)
# fill to context.prec
(opa, opb) = self._fill_logical(context, self._int, other._int)
# make the operation, and clean starting zeroes
result = "".join([str(int(a)|int(b)) for a,b in zip(opa,opb)])
return _dec_from_triple(0, result.lstrip('0') or '0', 0)
def logical_xor(self, other, context=None):
"""Applies an 'xor' operation between self and other's digits."""
if context is None:
context = getcontext()
other = _convert_other(other, raiseit=True)
if not self._islogical() or not other._islogical():
return context._raise_error(InvalidOperation)
# fill to context.prec
(opa, opb) = self._fill_logical(context, self._int, other._int)
# make the operation, and clean starting zeroes
result = "".join([str(int(a)^int(b)) for a,b in zip(opa,opb)])
return _dec_from_triple(0, result.lstrip('0') or '0', 0)
def max_mag(self, other, context=None):
"""Compares the values numerically with their sign ignored."""
other = _convert_other(other, raiseit=True)
if context is None:
context = getcontext()
if self._is_special or other._is_special:
# If one operand is a quiet NaN and the other is number, then the
# number is always returned
sn = self._isnan()
on = other._isnan()
if sn or on:
if on == 1 and sn == 0:
return self._fix(context)
if sn == 1 and on == 0:
return other._fix(context)
return self._check_nans(other, context)
c = self.copy_abs()._cmp(other.copy_abs())
if c == 0:
c = self.compare_total(other)
if c == -1:
ans = other
else:
ans = self
return ans._fix(context)
def min_mag(self, other, context=None):
"""Compares the values numerically with their sign ignored."""
other = _convert_other(other, raiseit=True)
if context is None:
context = getcontext()
if self._is_special or other._is_special:
# If one operand is a quiet NaN and the other is number, then the
# number is always returned
sn = self._isnan()
on = other._isnan()
if sn or on:
if on == 1 and sn == 0:
return self._fix(context)
if sn == 1 and on == 0:
return other._fix(context)
return self._check_nans(other, context)
c = self.copy_abs()._cmp(other.copy_abs())
if c == 0:
c = self.compare_total(other)
if c == -1:
ans = self
else:
ans = other
return ans._fix(context)
def next_minus(self, context=None):
"""Returns the largest representable number smaller than itself."""
if context is None:
context = getcontext()
ans = self._check_nans(context=context)
if ans:
return ans
if self._isinfinity() == -1:
return _NegativeInfinity
if self._isinfinity() == 1:
return _dec_from_triple(0, '9'*context.prec, context.Etop())
context = context.copy()
context._set_rounding(ROUND_FLOOR)
context._ignore_all_flags()
new_self = self._fix(context)
if new_self != self:
return new_self
return self.__sub__(_dec_from_triple(0, '1', context.Etiny()-1),
context)
def next_plus(self, context=None):
"""Returns the smallest representable number larger than itself."""
if context is None:
context = getcontext()
ans = self._check_nans(context=context)
if ans:
return ans
if self._isinfinity() == 1:
return _Infinity
if self._isinfinity() == -1:
return _dec_from_triple(1, '9'*context.prec, context.Etop())
context = context.copy()
context._set_rounding(ROUND_CEILING)
context._ignore_all_flags()
new_self = self._fix(context)
if new_self != self:
return new_self
return self.__add__(_dec_from_triple(0, '1', context.Etiny()-1),
context)
def next_toward(self, other, context=None):
"""Returns the number closest to self, in the direction towards other.
The result is the closest representable number to self
(excluding self) that is in the direction towards other,
unless both have the same value. If the two operands are
numerically equal, then the result is a copy of self with the
sign set to be the same as the sign of other.
"""
other = _convert_other(other, raiseit=True)
if context is None:
context = getcontext()
ans = self._check_nans(other, context)
if ans:
return ans
comparison = self._cmp(other)
if comparison == 0:
return self.copy_sign(other)
if comparison == -1:
ans = self.next_plus(context)
else: # comparison == 1
ans = self.next_minus(context)
# decide which flags to raise using value of ans
if ans._isinfinity():
context._raise_error(Overflow,
'Infinite result from next_toward',
ans._sign)
context._raise_error(Inexact)
context._raise_error(Rounded)
elif ans.adjusted() < context.Emin:
context._raise_error(Underflow)
context._raise_error(Subnormal)
context._raise_error(Inexact)
context._raise_error(Rounded)
# if precision == 1 then we don't raise Clamped for a
# result 0E-Etiny.
if not ans:
context._raise_error(Clamped)
return ans
def number_class(self, context=None):
"""Returns an indication of the class of self.
The class is one of the following strings:
sNaN
NaN
-Infinity
-Normal
-Subnormal
-Zero
+Zero
+Subnormal
+Normal
+Infinity
"""
if self.is_snan():
return "sNaN"
if self.is_qnan():
return "NaN"
inf = self._isinfinity()
if inf == 1:
return "+Infinity"
if inf == -1:
return "-Infinity"
if self.is_zero():
if self._sign:
return "-Zero"
else:
return "+Zero"
if context is None:
context = getcontext()
if self.is_subnormal(context=context):
if self._sign:
return "-Subnormal"
else:
return "+Subnormal"
# just a normal, regular, boring number, :)
if self._sign:
return "-Normal"
else:
return "+Normal"
def radix(self):
"""Just returns 10, as this is Decimal, :)"""
return Decimal(10)
def rotate(self, other, context=None):
"""Returns a rotated copy of self, value-of-other times."""
if context is None:
context = getcontext()
other = _convert_other(other, raiseit=True)
ans = self._check_nans(other, context)
if ans:
return ans
if other._exp != 0:
return context._raise_error(InvalidOperation)
if not (-context.prec <= int(other) <= context.prec):
return context._raise_error(InvalidOperation)
if self._isinfinity():
return Decimal(self)
# get values, pad if necessary
torot = int(other)
rotdig = self._int
topad = context.prec - len(rotdig)
if topad > 0:
rotdig = '0'*topad + rotdig
elif topad < 0:
rotdig = rotdig[-topad:]
# let's rotate!
rotated = rotdig[torot:] + rotdig[:torot]
return _dec_from_triple(self._sign,
rotated.lstrip('0') or '0', self._exp)
def scaleb(self, other, context=None):
"""Returns self operand after adding the second value to its exp."""
if context is None:
context = getcontext()
other = _convert_other(other, raiseit=True)
ans = self._check_nans(other, context)
if ans:
return ans
if other._exp != 0:
return context._raise_error(InvalidOperation)
liminf = -2 * (context.Emax + context.prec)
limsup = 2 * (context.Emax + context.prec)
if not (liminf <= int(other) <= limsup):
return context._raise_error(InvalidOperation)
if self._isinfinity():
return Decimal(self)
d = _dec_from_triple(self._sign, self._int, self._exp + int(other))
d = d._fix(context)
return d
def shift(self, other, context=None):
"""Returns a shifted copy of self, value-of-other times."""
if context is None:
context = getcontext()
other = _convert_other(other, raiseit=True)
ans = self._check_nans(other, context)
if ans:
return ans
if other._exp != 0:
return context._raise_error(InvalidOperation)
if not (-context.prec <= int(other) <= context.prec):
return context._raise_error(InvalidOperation)
if self._isinfinity():
return Decimal(self)
# get values, pad if necessary
torot = int(other)
rotdig = self._int
topad = context.prec - len(rotdig)
if topad > 0:
rotdig = '0'*topad + rotdig
elif topad < 0:
rotdig = rotdig[-topad:]
# let's shift!
if torot < 0:
shifted = rotdig[:torot]
else:
shifted = rotdig + '0'*torot
shifted = shifted[-context.prec:]
return _dec_from_triple(self._sign,
shifted.lstrip('0') or '0', self._exp)
# Support for pickling, copy, and deepcopy
def __reduce__(self):
return (self.__class__, (str(self),))
def __copy__(self):
if type(self) is Decimal:
return self # I'm immutable; therefore I am my own clone
return self.__class__(str(self))
def __deepcopy__(self, memo):
if type(self) is Decimal:
return self # My components are also immutable
return self.__class__(str(self))
# PEP 3101 support. the _localeconv keyword argument should be
# considered private: it's provided for ease of testing only.
def __format__(self, specifier, context=None, _localeconv=None):
"""Format a Decimal instance according to the given specifier.
The specifier should be a standard format specifier, with the
form described in PEP 3101. Formatting types 'e', 'E', 'f',
'F', 'g', 'G', 'n' and '%' are supported. If the formatting
type is omitted it defaults to 'g' or 'G', depending on the
value of context.capitals.
"""
# Note: PEP 3101 says that if the type is not present then
# there should be at least one digit after the decimal point.
# We take the liberty of ignoring this requirement for
# Decimal---it's presumably there to make sure that
# format(float, '') behaves similarly to str(float).
if context is None:
context = getcontext()
spec = _parse_format_specifier(specifier, _localeconv=_localeconv)
# special values don't care about the type or precision
if self._is_special:
sign = _format_sign(self._sign, spec)
body = str(self.copy_abs())
return _format_align(sign, body, spec)
# a type of None defaults to 'g' or 'G', depending on context
if spec['type'] is None:
spec['type'] = ['g', 'G'][context.capitals]
# if type is '%', adjust exponent of self accordingly
if spec['type'] == '%':
self = _dec_from_triple(self._sign, self._int, self._exp+2)
# round if necessary, taking rounding mode from the context
rounding = context.rounding
precision = spec['precision']
if precision is not None:
if spec['type'] in 'eE':
self = self._round(precision+1, rounding)
elif spec['type'] in 'fF%':
self = self._rescale(-precision, rounding)
elif spec['type'] in 'gG' and len(self._int) > precision:
self = self._round(precision, rounding)
# special case: zeros with a positive exponent can't be
# represented in fixed point; rescale them to 0e0.
if not self and self._exp > 0 and spec['type'] in 'fF%':
self = self._rescale(0, rounding)
# figure out placement of the decimal point
leftdigits = self._exp + len(self._int)
if spec['type'] in 'eE':
if not self and precision is not None:
dotplace = 1 - precision
else:
dotplace = 1
elif spec['type'] in 'fF%':
dotplace = leftdigits
elif spec['type'] in 'gG':
if self._exp <= 0 and leftdigits > -6:
dotplace = leftdigits
else:
dotplace = 1
# find digits before and after decimal point, and get exponent
if dotplace < 0:
intpart = '0'
fracpart = '0'*(-dotplace) + self._int
elif dotplace > len(self._int):
intpart = self._int + '0'*(dotplace-len(self._int))
fracpart = ''
else:
intpart = self._int[:dotplace] or '0'
fracpart = self._int[dotplace:]
exp = leftdigits-dotplace
# done with the decimal-specific stuff; hand over the rest
# of the formatting to the _format_number function
return _format_number(self._sign, intpart, fracpart, exp, spec)
def _dec_from_triple(sign, coefficient, exponent, special=False):
"""Create a decimal instance directly, without any validation,
normalization (e.g. removal of leading zeros) or argument
conversion.
This function is for *internal use only*.
"""
self = object.__new__(Decimal)
self._sign = sign
self._int = coefficient
self._exp = exponent
self._is_special = special
return self
# Register Decimal as a kind of Number (an abstract base class).
# However, do not register it as Real (because Decimals are not
# interoperable with floats).
_numbers.Number.register(Decimal)
##### Context class #######################################################
class _ContextManager(object):
"""Context manager class to support localcontext().
Sets a copy of the supplied context in __enter__() and restores
the previous decimal context in __exit__()
"""
def __init__(self, new_context):
self.new_context = new_context.copy()
def __enter__(self):
self.saved_context = getcontext()
setcontext(self.new_context)
return self.new_context
def __exit__(self, t, v, tb):
setcontext(self.saved_context)
class Context(object):
"""Contains the context for a Decimal instance.
Contains:
prec - precision (for use in rounding, division, square roots..)
rounding - rounding type (how you round)
traps - If traps[exception] = 1, then the exception is
raised when it is caused. Otherwise, a value is
substituted in.
flags - When an exception is caused, flags[exception] is set.
(Whether or not the trap_enabler is set)
Should be reset by user of Decimal instance.
Emin - Minimum exponent
Emax - Maximum exponent
capitals - If 1, 1*10^1 is printed as 1E+1.
If 0, printed as 1e1
_clamp - If 1, change exponents if too high (Default 0)
"""
def __init__(self, prec=None, rounding=None,
traps=None, flags=None,
Emin=None, Emax=None,
capitals=None, _clamp=0,
_ignored_flags=None):
# Set defaults; for everything except flags and _ignored_flags,
# inherit from DefaultContext.
try:
dc = DefaultContext
except NameError:
pass
self.prec = prec if prec is not None else dc.prec
self.rounding = rounding if rounding is not None else dc.rounding
self.Emin = Emin if Emin is not None else dc.Emin
self.Emax = Emax if Emax is not None else dc.Emax
self.capitals = capitals if capitals is not None else dc.capitals
self._clamp = _clamp if _clamp is not None else dc._clamp
if _ignored_flags is None:
self._ignored_flags = []
else:
self._ignored_flags = _ignored_flags
if traps is None:
self.traps = dc.traps.copy()
elif not isinstance(traps, dict):
self.traps = dict((s, int(s in traps)) for s in _signals)
else:
self.traps = traps
if flags is None:
self.flags = dict.fromkeys(_signals, 0)
elif not isinstance(flags, dict):
self.flags = dict((s, int(s in flags)) for s in _signals)
else:
self.flags = flags
def __repr__(self):
"""Show the current context."""
s = []
s.append('Context(prec=%(prec)d, rounding=%(rounding)s, '
'Emin=%(Emin)d, Emax=%(Emax)d, capitals=%(capitals)d'
% vars(self))
names = [f.__name__ for f, v in self.flags.items() if v]
s.append('flags=[' + ', '.join(names) + ']')
names = [t.__name__ for t, v in self.traps.items() if v]
s.append('traps=[' + ', '.join(names) + ']')
return ', '.join(s) + ')'
def clear_flags(self):
"""Reset all flags to zero"""
for flag in self.flags:
self.flags[flag] = 0
def _shallow_copy(self):
"""Returns a shallow copy from self."""
nc = Context(self.prec, self.rounding, self.traps,
self.flags, self.Emin, self.Emax,
self.capitals, self._clamp, self._ignored_flags)
return nc
def copy(self):
"""Returns a deep copy from self."""
nc = Context(self.prec, self.rounding, self.traps.copy(),
self.flags.copy(), self.Emin, self.Emax,
self.capitals, self._clamp, self._ignored_flags)
return nc
__copy__ = copy
def _raise_error(self, condition, explanation = None, *args):
"""Handles an error
If the flag is in _ignored_flags, returns the default response.
Otherwise, it sets the flag, then, if the corresponding
trap_enabler is set, it reraises the exception. Otherwise, it returns
the default value after setting the flag.
"""
error = _condition_map.get(condition, condition)
if error in self._ignored_flags:
# Don't touch the flag
return error().handle(self, *args)
self.flags[error] = 1
if not self.traps[error]:
# The errors define how to handle themselves.
return condition().handle(self, *args)
# Errors should only be risked on copies of the context
# self._ignored_flags = []
raise error(explanation)
def _ignore_all_flags(self):
"""Ignore all flags, if they are raised"""
return self._ignore_flags(*_signals)
def _ignore_flags(self, *flags):
"""Ignore the flags, if they are raised"""
# Do not mutate-- This way, copies of a context leave the original
# alone.
self._ignored_flags = (self._ignored_flags + list(flags))
return list(flags)
def _regard_flags(self, *flags):
"""Stop ignoring the flags, if they are raised"""
if flags and isinstance(flags[0], (tuple,list)):
flags = flags[0]
for flag in flags:
self._ignored_flags.remove(flag)
# We inherit object.__hash__, so we must deny this explicitly
__hash__ = None
def Etiny(self):
"""Returns Etiny (= Emin - prec + 1)"""
return int(self.Emin - self.prec + 1)
def Etop(self):
"""Returns maximum exponent (= Emax - prec + 1)"""
return int(self.Emax - self.prec + 1)
def _set_rounding(self, type):
"""Sets the rounding type.
Sets the rounding type, and returns the current (previous)
rounding type. Often used like:
context = context.copy()
# so you don't change the calling context
# if an error occurs in the middle.
rounding = context._set_rounding(ROUND_UP)
val = self.__sub__(other, context=context)
context._set_rounding(rounding)
This will make it round up for that operation.
"""
rounding = self.rounding
self.rounding= type
return rounding
def create_decimal(self, num='0'):
"""Creates a new Decimal instance but using self as context.
This method implements the to-number operation of the
IBM Decimal specification."""
if isinstance(num, basestring) and num != num.strip():
return self._raise_error(ConversionSyntax,
"no trailing or leading whitespace is "
"permitted.")
d = Decimal(num, context=self)
if d._isnan() and len(d._int) > self.prec - self._clamp:
return self._raise_error(ConversionSyntax,
"diagnostic info too long in NaN")
return d._fix(self)
def create_decimal_from_float(self, f):
"""Creates a new Decimal instance from a float but rounding using self
as the context.
>>> context = Context(prec=5, rounding=ROUND_DOWN)
>>> context.create_decimal_from_float(3.1415926535897932)
Decimal('3.1415')
>>> context = Context(prec=5, traps=[Inexact])
>>> context.create_decimal_from_float(3.1415926535897932)
Traceback (most recent call last):
...
Inexact: None
"""
d = Decimal.from_float(f) # An exact conversion
return d._fix(self) # Apply the context rounding
# Methods
def abs(self, a):
"""Returns the absolute value of the operand.
If the operand is negative, the result is the same as using the minus
operation on the operand. Otherwise, the result is the same as using
the plus operation on the operand.
>>> ExtendedContext.abs(Decimal('2.1'))
Decimal('2.1')
>>> ExtendedContext.abs(Decimal('-100'))
Decimal('100')
>>> ExtendedContext.abs(Decimal('101.5'))
Decimal('101.5')
>>> ExtendedContext.abs(Decimal('-101.5'))
Decimal('101.5')
>>> ExtendedContext.abs(-1)
Decimal('1')
"""
a = _convert_other(a, raiseit=True)
return a.__abs__(context=self)
def add(self, a, b):
"""Return the sum of the two operands.
>>> ExtendedContext.add(Decimal('12'), Decimal('7.00'))
Decimal('19.00')
>>> ExtendedContext.add(Decimal('1E+2'), Decimal('1.01E+4'))
Decimal('1.02E+4')
>>> ExtendedContext.add(1, Decimal(2))
Decimal('3')
>>> ExtendedContext.add(Decimal(8), 5)
Decimal('13')
>>> ExtendedContext.add(5, 5)
Decimal('10')
"""
a = _convert_other(a, raiseit=True)
r = a.__add__(b, context=self)
if r is NotImplemented:
raise TypeError("Unable to convert %s to Decimal" % b)
else:
return r
def _apply(self, a):
return str(a._fix(self))
def canonical(self, a):
"""Returns the same Decimal object.
As we do not have different encodings for the same number, the
received object already is in its canonical form.
>>> ExtendedContext.canonical(Decimal('2.50'))
Decimal('2.50')
"""
return a.canonical(context=self)
def compare(self, a, b):
"""Compares values numerically.
If the signs of the operands differ, a value representing each operand
('-1' if the operand is less than zero, '0' if the operand is zero or
negative zero, or '1' if the operand is greater than zero) is used in
place of that operand for the comparison instead of the actual
operand.
The comparison is then effected by subtracting the second operand from
the first and then returning a value according to the result of the
subtraction: '-1' if the result is less than zero, '0' if the result is
zero or negative zero, or '1' if the result is greater than zero.
>>> ExtendedContext.compare(Decimal('2.1'), Decimal('3'))
Decimal('-1')
>>> ExtendedContext.compare(Decimal('2.1'), Decimal('2.1'))
Decimal('0')
>>> ExtendedContext.compare(Decimal('2.1'), Decimal('2.10'))
Decimal('0')
>>> ExtendedContext.compare(Decimal('3'), Decimal('2.1'))
Decimal('1')
>>> ExtendedContext.compare(Decimal('2.1'), Decimal('-3'))
Decimal('1')
>>> ExtendedContext.compare(Decimal('-3'), Decimal('2.1'))
Decimal('-1')
>>> ExtendedContext.compare(1, 2)
Decimal('-1')
>>> ExtendedContext.compare(Decimal(1), 2)
Decimal('-1')
>>> ExtendedContext.compare(1, Decimal(2))
Decimal('-1')
"""
a = _convert_other(a, raiseit=True)
return a.compare(b, context=self)
def compare_signal(self, a, b):
"""Compares the values of the two operands numerically.
It's pretty much like compare(), but all NaNs signal, with signaling
NaNs taking precedence over quiet NaNs.
>>> c = ExtendedContext
>>> c.compare_signal(Decimal('2.1'), Decimal('3'))
Decimal('-1')
>>> c.compare_signal(Decimal('2.1'), Decimal('2.1'))
Decimal('0')
>>> c.flags[InvalidOperation] = 0
>>> print c.flags[InvalidOperation]
0
>>> c.compare_signal(Decimal('NaN'), Decimal('2.1'))
Decimal('NaN')
>>> print c.flags[InvalidOperation]
1
>>> c.flags[InvalidOperation] = 0
>>> print c.flags[InvalidOperation]
0
>>> c.compare_signal(Decimal('sNaN'), Decimal('2.1'))
Decimal('NaN')
>>> print c.flags[InvalidOperation]
1
>>> c.compare_signal(-1, 2)
Decimal('-1')
>>> c.compare_signal(Decimal(-1), 2)
Decimal('-1')
>>> c.compare_signal(-1, Decimal(2))
Decimal('-1')
"""
a = _convert_other(a, raiseit=True)
return a.compare_signal(b, context=self)
def compare_total(self, a, b):
"""Compares two operands using their abstract representation.
This is not like the standard compare, which use their numerical
value. Note that a total ordering is defined for all possible abstract
representations.
>>> ExtendedContext.compare_total(Decimal('12.73'), Decimal('127.9'))
Decimal('-1')
>>> ExtendedContext.compare_total(Decimal('-127'), Decimal('12'))
Decimal('-1')
>>> ExtendedContext.compare_total(Decimal('12.30'), Decimal('12.3'))
Decimal('-1')
>>> ExtendedContext.compare_total(Decimal('12.30'), Decimal('12.30'))
Decimal('0')
>>> ExtendedContext.compare_total(Decimal('12.3'), Decimal('12.300'))
Decimal('1')
>>> ExtendedContext.compare_total(Decimal('12.3'), Decimal('NaN'))
Decimal('-1')
>>> ExtendedContext.compare_total(1, 2)
Decimal('-1')
>>> ExtendedContext.compare_total(Decimal(1), 2)
Decimal('-1')
>>> ExtendedContext.compare_total(1, Decimal(2))
Decimal('-1')
"""
a = _convert_other(a, raiseit=True)
return a.compare_total(b)
def compare_total_mag(self, a, b):
"""Compares two operands using their abstract representation ignoring sign.
Like compare_total, but with operand's sign ignored and assumed to be 0.
"""
a = _convert_other(a, raiseit=True)
return a.compare_total_mag(b)
def copy_abs(self, a):
"""Returns a copy of the operand with the sign set to 0.
>>> ExtendedContext.copy_abs(Decimal('2.1'))
Decimal('2.1')
>>> ExtendedContext.copy_abs(Decimal('-100'))
Decimal('100')
>>> ExtendedContext.copy_abs(-1)
Decimal('1')
"""
a = _convert_other(a, raiseit=True)
return a.copy_abs()
def copy_decimal(self, a):
"""Returns a copy of the decimal object.
>>> ExtendedContext.copy_decimal(Decimal('2.1'))
Decimal('2.1')
>>> ExtendedContext.copy_decimal(Decimal('-1.00'))
Decimal('-1.00')
>>> ExtendedContext.copy_decimal(1)
Decimal('1')
"""
a = _convert_other(a, raiseit=True)
return Decimal(a)
def copy_negate(self, a):
"""Returns a copy of the operand with the sign inverted.
>>> ExtendedContext.copy_negate(Decimal('101.5'))
Decimal('-101.5')
>>> ExtendedContext.copy_negate(Decimal('-101.5'))
Decimal('101.5')
>>> ExtendedContext.copy_negate(1)
Decimal('-1')
"""
a = _convert_other(a, raiseit=True)
return a.copy_negate()
def copy_sign(self, a, b):
"""Copies the second operand's sign to the first one.
In detail, it returns a copy of the first operand with the sign
equal to the sign of the second operand.
>>> ExtendedContext.copy_sign(Decimal( '1.50'), Decimal('7.33'))
Decimal('1.50')
>>> ExtendedContext.copy_sign(Decimal('-1.50'), Decimal('7.33'))
Decimal('1.50')
>>> ExtendedContext.copy_sign(Decimal( '1.50'), Decimal('-7.33'))
Decimal('-1.50')
>>> ExtendedContext.copy_sign(Decimal('-1.50'), Decimal('-7.33'))
Decimal('-1.50')
>>> ExtendedContext.copy_sign(1, -2)
Decimal('-1')
>>> ExtendedContext.copy_sign(Decimal(1), -2)
Decimal('-1')
>>> ExtendedContext.copy_sign(1, Decimal(-2))
Decimal('-1')
"""
a = _convert_other(a, raiseit=True)
return a.copy_sign(b)
def divide(self, a, b):
"""Decimal division in a specified context.
>>> ExtendedContext.divide(Decimal('1'), Decimal('3'))
Decimal('0.333333333')
>>> ExtendedContext.divide(Decimal('2'), Decimal('3'))
Decimal('0.666666667')
>>> ExtendedContext.divide(Decimal('5'), Decimal('2'))
Decimal('2.5')
>>> ExtendedContext.divide(Decimal('1'), Decimal('10'))
Decimal('0.1')
>>> ExtendedContext.divide(Decimal('12'), Decimal('12'))
Decimal('1')
>>> ExtendedContext.divide(Decimal('8.00'), Decimal('2'))
Decimal('4.00')
>>> ExtendedContext.divide(Decimal('2.400'), Decimal('2.0'))
Decimal('1.20')
>>> ExtendedContext.divide(Decimal('1000'), Decimal('100'))
Decimal('10')
>>> ExtendedContext.divide(Decimal('1000'), Decimal('1'))
Decimal('1000')
>>> ExtendedContext.divide(Decimal('2.40E+6'), Decimal('2'))
Decimal('1.20E+6')
>>> ExtendedContext.divide(5, 5)
Decimal('1')
>>> ExtendedContext.divide(Decimal(5), 5)
Decimal('1')
>>> ExtendedContext.divide(5, Decimal(5))
Decimal('1')
"""
a = _convert_other(a, raiseit=True)
r = a.__div__(b, context=self)
if r is NotImplemented:
raise TypeError("Unable to convert %s to Decimal" % b)
else:
return r
def divide_int(self, a, b):
"""Divides two numbers and returns the integer part of the result.
>>> ExtendedContext.divide_int(Decimal('2'), Decimal('3'))
Decimal('0')
>>> ExtendedContext.divide_int(Decimal('10'), Decimal('3'))
Decimal('3')
>>> ExtendedContext.divide_int(Decimal('1'), Decimal('0.3'))
Decimal('3')
>>> ExtendedContext.divide_int(10, 3)
Decimal('3')
>>> ExtendedContext.divide_int(Decimal(10), 3)
Decimal('3')
>>> ExtendedContext.divide_int(10, Decimal(3))
Decimal('3')
"""
a = _convert_other(a, raiseit=True)
r = a.__floordiv__(b, context=self)
if r is NotImplemented:
raise TypeError("Unable to convert %s to Decimal" % b)
else:
return r
def divmod(self, a, b):
"""Return (a // b, a % b).
>>> ExtendedContext.divmod(Decimal(8), Decimal(3))
(Decimal('2'), Decimal('2'))
>>> ExtendedContext.divmod(Decimal(8), Decimal(4))
(Decimal('2'), Decimal('0'))
>>> ExtendedContext.divmod(8, 4)
(Decimal('2'), Decimal('0'))
>>> ExtendedContext.divmod(Decimal(8), 4)
(Decimal('2'), Decimal('0'))
>>> ExtendedContext.divmod(8, Decimal(4))
(Decimal('2'), Decimal('0'))
"""
a = _convert_other(a, raiseit=True)
r = a.__divmod__(b, context=self)
if r is NotImplemented:
raise TypeError("Unable to convert %s to Decimal" % b)
else:
return r
def exp(self, a):
"""Returns e ** a.
>>> c = ExtendedContext.copy()
>>> c.Emin = -999
>>> c.Emax = 999
>>> c.exp(Decimal('-Infinity'))
Decimal('0')
>>> c.exp(Decimal('-1'))
Decimal('0.367879441')
>>> c.exp(Decimal('0'))
Decimal('1')
>>> c.exp(Decimal('1'))
Decimal('2.71828183')
>>> c.exp(Decimal('0.693147181'))
Decimal('2.00000000')
>>> c.exp(Decimal('+Infinity'))
Decimal('Infinity')
>>> c.exp(10)
Decimal('22026.4658')
"""
a =_convert_other(a, raiseit=True)
return a.exp(context=self)
def fma(self, a, b, c):
"""Returns a multiplied by b, plus c.
The first two operands are multiplied together, using multiply,
the third operand is then added to the result of that
multiplication, using add, all with only one final rounding.
>>> ExtendedContext.fma(Decimal('3'), Decimal('5'), Decimal('7'))
Decimal('22')
>>> ExtendedContext.fma(Decimal('3'), Decimal('-5'), Decimal('7'))
Decimal('-8')
>>> ExtendedContext.fma(Decimal('888565290'), Decimal('1557.96930'), Decimal('-86087.7578'))
Decimal('1.38435736E+12')
>>> ExtendedContext.fma(1, 3, 4)
Decimal('7')
>>> ExtendedContext.fma(1, Decimal(3), 4)
Decimal('7')
>>> ExtendedContext.fma(1, 3, Decimal(4))
Decimal('7')
"""
a = _convert_other(a, raiseit=True)
return a.fma(b, c, context=self)
def is_canonical(self, a):
"""Return True if the operand is canonical; otherwise return False.
Currently, the encoding of a Decimal instance is always
canonical, so this method returns True for any Decimal.
>>> ExtendedContext.is_canonical(Decimal('2.50'))
True
"""
return a.is_canonical()
def is_finite(self, a):
"""Return True if the operand is finite; otherwise return False.
A Decimal instance is considered finite if it is neither
infinite nor a NaN.
>>> ExtendedContext.is_finite(Decimal('2.50'))
True
>>> ExtendedContext.is_finite(Decimal('-0.3'))
True
>>> ExtendedContext.is_finite(Decimal('0'))
True
>>> ExtendedContext.is_finite(Decimal('Inf'))
False
>>> ExtendedContext.is_finite(Decimal('NaN'))
False
>>> ExtendedContext.is_finite(1)
True
"""
a = _convert_other(a, raiseit=True)
return a.is_finite()
def is_infinite(self, a):
"""Return True if the operand is infinite; otherwise return False.
>>> ExtendedContext.is_infinite(Decimal('2.50'))
False
>>> ExtendedContext.is_infinite(Decimal('-Inf'))
True
>>> ExtendedContext.is_infinite(Decimal('NaN'))
False
>>> ExtendedContext.is_infinite(1)
False
"""
a = _convert_other(a, raiseit=True)
return a.is_infinite()
def is_nan(self, a):
"""Return True if the operand is a qNaN or sNaN;
otherwise return False.
>>> ExtendedContext.is_nan(Decimal('2.50'))
False
>>> ExtendedContext.is_nan(Decimal('NaN'))
True
>>> ExtendedContext.is_nan(Decimal('-sNaN'))
True
>>> ExtendedContext.is_nan(1)
False
"""
a = _convert_other(a, raiseit=True)
return a.is_nan()
def is_normal(self, a):
"""Return True if the operand is a normal number;
otherwise return False.
>>> c = ExtendedContext.copy()
>>> c.Emin = -999
>>> c.Emax = 999
>>> c.is_normal(Decimal('2.50'))
True
>>> c.is_normal(Decimal('0.1E-999'))
False
>>> c.is_normal(Decimal('0.00'))
False
>>> c.is_normal(Decimal('-Inf'))
False
>>> c.is_normal(Decimal('NaN'))
False
>>> c.is_normal(1)
True
"""
a = _convert_other(a, raiseit=True)
return a.is_normal(context=self)
def is_qnan(self, a):
"""Return True if the operand is a quiet NaN; otherwise return False.
>>> ExtendedContext.is_qnan(Decimal('2.50'))
False
>>> ExtendedContext.is_qnan(Decimal('NaN'))
True
>>> ExtendedContext.is_qnan(Decimal('sNaN'))
False
>>> ExtendedContext.is_qnan(1)
False
"""
a = _convert_other(a, raiseit=True)
return a.is_qnan()
def is_signed(self, a):
"""Return True if the operand is negative; otherwise return False.
>>> ExtendedContext.is_signed(Decimal('2.50'))
False
>>> ExtendedContext.is_signed(Decimal('-12'))
True
>>> ExtendedContext.is_signed(Decimal('-0'))
True
>>> ExtendedContext.is_signed(8)
False
>>> ExtendedContext.is_signed(-8)
True
"""
a = _convert_other(a, raiseit=True)
return a.is_signed()
def is_snan(self, a):
"""Return True if the operand is a signaling NaN;
otherwise return False.
>>> ExtendedContext.is_snan(Decimal('2.50'))
False
>>> ExtendedContext.is_snan(Decimal('NaN'))
False
>>> ExtendedContext.is_snan(Decimal('sNaN'))
True
>>> ExtendedContext.is_snan(1)
False
"""
a = _convert_other(a, raiseit=True)
return a.is_snan()
def is_subnormal(self, a):
"""Return True if the operand is subnormal; otherwise return False.
>>> c = ExtendedContext.copy()
>>> c.Emin = -999
>>> c.Emax = 999
>>> c.is_subnormal(Decimal('2.50'))
False
>>> c.is_subnormal(Decimal('0.1E-999'))
True
>>> c.is_subnormal(Decimal('0.00'))
False
>>> c.is_subnormal(Decimal('-Inf'))
False
>>> c.is_subnormal(Decimal('NaN'))
False
>>> c.is_subnormal(1)
False
"""
a = _convert_other(a, raiseit=True)
return a.is_subnormal(context=self)
def is_zero(self, a):
"""Return True if the operand is a zero; otherwise return False.
>>> ExtendedContext.is_zero(Decimal('0'))
True
>>> ExtendedContext.is_zero(Decimal('2.50'))
False
>>> ExtendedContext.is_zero(Decimal('-0E+2'))
True
>>> ExtendedContext.is_zero(1)
False
>>> ExtendedContext.is_zero(0)
True
"""
a = _convert_other(a, raiseit=True)
return a.is_zero()
def ln(self, a):
"""Returns the natural (base e) logarithm of the operand.
>>> c = ExtendedContext.copy()
>>> c.Emin = -999
>>> c.Emax = 999
>>> c.ln(Decimal('0'))
Decimal('-Infinity')
>>> c.ln(Decimal('1.000'))
Decimal('0')
>>> c.ln(Decimal('2.71828183'))
Decimal('1.00000000')
>>> c.ln(Decimal('10'))
Decimal('2.30258509')
>>> c.ln(Decimal('+Infinity'))
Decimal('Infinity')
>>> c.ln(1)
Decimal('0')
"""
a = _convert_other(a, raiseit=True)
return a.ln(context=self)
def log10(self, a):
"""Returns the base 10 logarithm of the operand.
>>> c = ExtendedContext.copy()
>>> c.Emin = -999
>>> c.Emax = 999
>>> c.log10(Decimal('0'))
Decimal('-Infinity')
>>> c.log10(Decimal('0.001'))
Decimal('-3')
>>> c.log10(Decimal('1.000'))
Decimal('0')
>>> c.log10(Decimal('2'))
Decimal('0.301029996')
>>> c.log10(Decimal('10'))
Decimal('1')
>>> c.log10(Decimal('70'))
Decimal('1.84509804')
>>> c.log10(Decimal('+Infinity'))
Decimal('Infinity')
>>> c.log10(0)
Decimal('-Infinity')
>>> c.log10(1)
Decimal('0')
"""
a = _convert_other(a, raiseit=True)
return a.log10(context=self)
def logb(self, a):
""" Returns the exponent of the magnitude of the operand's MSD.
The result is the integer which is the exponent of the magnitude
of the most significant digit of the operand (as though the
operand were truncated to a single digit while maintaining the
value of that digit and without limiting the resulting exponent).
>>> ExtendedContext.logb(Decimal('250'))
Decimal('2')
>>> ExtendedContext.logb(Decimal('2.50'))
Decimal('0')
>>> ExtendedContext.logb(Decimal('0.03'))
Decimal('-2')
>>> ExtendedContext.logb(Decimal('0'))
Decimal('-Infinity')
>>> ExtendedContext.logb(1)
Decimal('0')
>>> ExtendedContext.logb(10)
Decimal('1')
>>> ExtendedContext.logb(100)
Decimal('2')
"""
a = _convert_other(a, raiseit=True)
return a.logb(context=self)
def logical_and(self, a, b):
"""Applies the logical operation 'and' between each operand's digits.
The operands must be both logical numbers.
>>> ExtendedContext.logical_and(Decimal('0'), Decimal('0'))
Decimal('0')
>>> ExtendedContext.logical_and(Decimal('0'), Decimal('1'))
Decimal('0')
>>> ExtendedContext.logical_and(Decimal('1'), Decimal('0'))
Decimal('0')
>>> ExtendedContext.logical_and(Decimal('1'), Decimal('1'))
Decimal('1')
>>> ExtendedContext.logical_and(Decimal('1100'), Decimal('1010'))
Decimal('1000')
>>> ExtendedContext.logical_and(Decimal('1111'), Decimal('10'))
Decimal('10')
>>> ExtendedContext.logical_and(110, 1101)
Decimal('100')
>>> ExtendedContext.logical_and(Decimal(110), 1101)
Decimal('100')
>>> ExtendedContext.logical_and(110, Decimal(1101))
Decimal('100')
"""
a = _convert_other(a, raiseit=True)
return a.logical_and(b, context=self)
def logical_invert(self, a):
"""Invert all the digits in the operand.
The operand must be a logical number.
>>> ExtendedContext.logical_invert(Decimal('0'))
Decimal('111111111')
>>> ExtendedContext.logical_invert(Decimal('1'))
Decimal('111111110')
>>> ExtendedContext.logical_invert(Decimal('111111111'))
Decimal('0')
>>> ExtendedContext.logical_invert(Decimal('101010101'))
Decimal('10101010')
>>> ExtendedContext.logical_invert(1101)
Decimal('111110010')
"""
a = _convert_other(a, raiseit=True)
return a.logical_invert(context=self)
def logical_or(self, a, b):
"""Applies the logical operation 'or' between each operand's digits.
The operands must be both logical numbers.
>>> ExtendedContext.logical_or(Decimal('0'), Decimal('0'))
Decimal('0')
>>> ExtendedContext.logical_or(Decimal('0'), Decimal('1'))
Decimal('1')
>>> ExtendedContext.logical_or(Decimal('1'), Decimal('0'))
Decimal('1')
>>> ExtendedContext.logical_or(Decimal('1'), Decimal('1'))
Decimal('1')
>>> ExtendedContext.logical_or(Decimal('1100'), Decimal('1010'))
Decimal('1110')
>>> ExtendedContext.logical_or(Decimal('1110'), Decimal('10'))
Decimal('1110')
>>> ExtendedContext.logical_or(110, 1101)
Decimal('1111')
>>> ExtendedContext.logical_or(Decimal(110), 1101)
Decimal('1111')
>>> ExtendedContext.logical_or(110, Decimal(1101))
Decimal('1111')
"""
a = _convert_other(a, raiseit=True)
return a.logical_or(b, context=self)
def logical_xor(self, a, b):
"""Applies the logical operation 'xor' between each operand's digits.
The operands must be both logical numbers.
>>> ExtendedContext.logical_xor(Decimal('0'), Decimal('0'))
Decimal('0')
>>> ExtendedContext.logical_xor(Decimal('0'), Decimal('1'))
Decimal('1')
>>> ExtendedContext.logical_xor(Decimal('1'), Decimal('0'))
Decimal('1')
>>> ExtendedContext.logical_xor(Decimal('1'), Decimal('1'))
Decimal('0')
>>> ExtendedContext.logical_xor(Decimal('1100'), Decimal('1010'))
Decimal('110')
>>> ExtendedContext.logical_xor(Decimal('1111'), Decimal('10'))
Decimal('1101')
>>> ExtendedContext.logical_xor(110, 1101)
Decimal('1011')
>>> ExtendedContext.logical_xor(Decimal(110), 1101)
Decimal('1011')
>>> ExtendedContext.logical_xor(110, Decimal(1101))
Decimal('1011')
"""
a = _convert_other(a, raiseit=True)
return a.logical_xor(b, context=self)
def max(self, a, b):
"""max compares two values numerically and returns the maximum.
If either operand is a NaN then the general rules apply.
Otherwise, the operands are compared as though by the compare
operation. If they are numerically equal then the left-hand operand
is chosen as the result. Otherwise the maximum (closer to positive
infinity) of the two operands is chosen as the result.
>>> ExtendedContext.max(Decimal('3'), Decimal('2'))
Decimal('3')
>>> ExtendedContext.max(Decimal('-10'), Decimal('3'))
Decimal('3')
>>> ExtendedContext.max(Decimal('1.0'), Decimal('1'))
Decimal('1')
>>> ExtendedContext.max(Decimal('7'), Decimal('NaN'))
Decimal('7')
>>> ExtendedContext.max(1, 2)
Decimal('2')
>>> ExtendedContext.max(Decimal(1), 2)
Decimal('2')
>>> ExtendedContext.max(1, Decimal(2))
Decimal('2')
"""
a = _convert_other(a, raiseit=True)
return a.max(b, context=self)
def max_mag(self, a, b):
"""Compares the values numerically with their sign ignored.
>>> ExtendedContext.max_mag(Decimal('7'), Decimal('NaN'))
Decimal('7')
>>> ExtendedContext.max_mag(Decimal('7'), Decimal('-10'))
Decimal('-10')
>>> ExtendedContext.max_mag(1, -2)
Decimal('-2')
>>> ExtendedContext.max_mag(Decimal(1), -2)
Decimal('-2')
>>> ExtendedContext.max_mag(1, Decimal(-2))
Decimal('-2')
"""
a = _convert_other(a, raiseit=True)
return a.max_mag(b, context=self)
def min(self, a, b):
"""min compares two values numerically and returns the minimum.
If either operand is a NaN then the general rules apply.
Otherwise, the operands are compared as though by the compare
operation. If they are numerically equal then the left-hand operand
is chosen as the result. Otherwise the minimum (closer to negative
infinity) of the two operands is chosen as the result.
>>> ExtendedContext.min(Decimal('3'), Decimal('2'))
Decimal('2')
>>> ExtendedContext.min(Decimal('-10'), Decimal('3'))
Decimal('-10')
>>> ExtendedContext.min(Decimal('1.0'), Decimal('1'))
Decimal('1.0')
>>> ExtendedContext.min(Decimal('7'), Decimal('NaN'))
Decimal('7')
>>> ExtendedContext.min(1, 2)
Decimal('1')
>>> ExtendedContext.min(Decimal(1), 2)
Decimal('1')
>>> ExtendedContext.min(1, Decimal(29))
Decimal('1')
"""
a = _convert_other(a, raiseit=True)
return a.min(b, context=self)
def min_mag(self, a, b):
"""Compares the values numerically with their sign ignored.
>>> ExtendedContext.min_mag(Decimal('3'), Decimal('-2'))
Decimal('-2')
>>> ExtendedContext.min_mag(Decimal('-3'), Decimal('NaN'))
Decimal('-3')
>>> ExtendedContext.min_mag(1, -2)
Decimal('1')
>>> ExtendedContext.min_mag(Decimal(1), -2)
Decimal('1')
>>> ExtendedContext.min_mag(1, Decimal(-2))
Decimal('1')
"""
a = _convert_other(a, raiseit=True)
return a.min_mag(b, context=self)
def minus(self, a):
"""Minus corresponds to unary prefix minus in Python.
The operation is evaluated using the same rules as subtract; the
operation minus(a) is calculated as subtract('0', a) where the '0'
has the same exponent as the operand.
>>> ExtendedContext.minus(Decimal('1.3'))
Decimal('-1.3')
>>> ExtendedContext.minus(Decimal('-1.3'))
Decimal('1.3')
>>> ExtendedContext.minus(1)
Decimal('-1')
"""
a = _convert_other(a, raiseit=True)
return a.__neg__(context=self)
def multiply(self, a, b):
"""multiply multiplies two operands.
If either operand is a special value then the general rules apply.
Otherwise, the operands are multiplied together
('long multiplication'), resulting in a number which may be as long as
the sum of the lengths of the two operands.
>>> ExtendedContext.multiply(Decimal('1.20'), Decimal('3'))
Decimal('3.60')
>>> ExtendedContext.multiply(Decimal('7'), Decimal('3'))
Decimal('21')
>>> ExtendedContext.multiply(Decimal('0.9'), Decimal('0.8'))
Decimal('0.72')
>>> ExtendedContext.multiply(Decimal('0.9'), Decimal('-0'))
Decimal('-0.0')
>>> ExtendedContext.multiply(Decimal('654321'), Decimal('654321'))
Decimal('4.28135971E+11')
>>> ExtendedContext.multiply(7, 7)
Decimal('49')
>>> ExtendedContext.multiply(Decimal(7), 7)
Decimal('49')
>>> ExtendedContext.multiply(7, Decimal(7))
Decimal('49')
"""
a = _convert_other(a, raiseit=True)
r = a.__mul__(b, context=self)
if r is NotImplemented:
raise TypeError("Unable to convert %s to Decimal" % b)
else:
return r
def next_minus(self, a):
"""Returns the largest representable number smaller than a.
>>> c = ExtendedContext.copy()
>>> c.Emin = -999
>>> c.Emax = 999
>>> ExtendedContext.next_minus(Decimal('1'))
Decimal('0.999999999')
>>> c.next_minus(Decimal('1E-1007'))
Decimal('0E-1007')
>>> ExtendedContext.next_minus(Decimal('-1.00000003'))
Decimal('-1.00000004')
>>> c.next_minus(Decimal('Infinity'))
Decimal('9.99999999E+999')
>>> c.next_minus(1)
Decimal('0.999999999')
"""
a = _convert_other(a, raiseit=True)
return a.next_minus(context=self)
def next_plus(self, a):
"""Returns the smallest representable number larger than a.
>>> c = ExtendedContext.copy()
>>> c.Emin = -999
>>> c.Emax = 999
>>> ExtendedContext.next_plus(Decimal('1'))
Decimal('1.00000001')
>>> c.next_plus(Decimal('-1E-1007'))
Decimal('-0E-1007')
>>> ExtendedContext.next_plus(Decimal('-1.00000003'))
Decimal('-1.00000002')
>>> c.next_plus(Decimal('-Infinity'))
Decimal('-9.99999999E+999')
>>> c.next_plus(1)
Decimal('1.00000001')
"""
a = _convert_other(a, raiseit=True)
return a.next_plus(context=self)
def next_toward(self, a, b):
"""Returns the number closest to a, in direction towards b.
The result is the closest representable number from the first
operand (but not the first operand) that is in the direction
towards the second operand, unless the operands have the same
value.
>>> c = ExtendedContext.copy()
>>> c.Emin = -999
>>> c.Emax = 999
>>> c.next_toward(Decimal('1'), Decimal('2'))
Decimal('1.00000001')
>>> c.next_toward(Decimal('-1E-1007'), Decimal('1'))
Decimal('-0E-1007')
>>> c.next_toward(Decimal('-1.00000003'), Decimal('0'))
Decimal('-1.00000002')
>>> c.next_toward(Decimal('1'), Decimal('0'))
Decimal('0.999999999')
>>> c.next_toward(Decimal('1E-1007'), Decimal('-100'))
Decimal('0E-1007')
>>> c.next_toward(Decimal('-1.00000003'), Decimal('-10'))
Decimal('-1.00000004')
>>> c.next_toward(Decimal('0.00'), Decimal('-0.0000'))
Decimal('-0.00')
>>> c.next_toward(0, 1)
Decimal('1E-1007')
>>> c.next_toward(Decimal(0), 1)
Decimal('1E-1007')
>>> c.next_toward(0, Decimal(1))
Decimal('1E-1007')
"""
a = _convert_other(a, raiseit=True)
return a.next_toward(b, context=self)
def normalize(self, a):
"""normalize reduces an operand to its simplest form.
Essentially a plus operation with all trailing zeros removed from the
result.
>>> ExtendedContext.normalize(Decimal('2.1'))
Decimal('2.1')
>>> ExtendedContext.normalize(Decimal('-2.0'))
Decimal('-2')
>>> ExtendedContext.normalize(Decimal('1.200'))
Decimal('1.2')
>>> ExtendedContext.normalize(Decimal('-120'))
Decimal('-1.2E+2')
>>> ExtendedContext.normalize(Decimal('120.00'))
Decimal('1.2E+2')
>>> ExtendedContext.normalize(Decimal('0.00'))
Decimal('0')
>>> ExtendedContext.normalize(6)
Decimal('6')
"""
a = _convert_other(a, raiseit=True)
return a.normalize(context=self)
def number_class(self, a):
"""Returns an indication of the class of the operand.
The class is one of the following strings:
-sNaN
-NaN
-Infinity
-Normal
-Subnormal
-Zero
+Zero
+Subnormal
+Normal
+Infinity
>>> c = Context(ExtendedContext)
>>> c.Emin = -999
>>> c.Emax = 999
>>> c.number_class(Decimal('Infinity'))
'+Infinity'
>>> c.number_class(Decimal('1E-10'))
'+Normal'
>>> c.number_class(Decimal('2.50'))
'+Normal'
>>> c.number_class(Decimal('0.1E-999'))
'+Subnormal'
>>> c.number_class(Decimal('0'))
'+Zero'
>>> c.number_class(Decimal('-0'))
'-Zero'
>>> c.number_class(Decimal('-0.1E-999'))
'-Subnormal'
>>> c.number_class(Decimal('-1E-10'))
'-Normal'
>>> c.number_class(Decimal('-2.50'))
'-Normal'
>>> c.number_class(Decimal('-Infinity'))
'-Infinity'
>>> c.number_class(Decimal('NaN'))
'NaN'
>>> c.number_class(Decimal('-NaN'))
'NaN'
>>> c.number_class(Decimal('sNaN'))
'sNaN'
>>> c.number_class(123)
'+Normal'
"""
a = _convert_other(a, raiseit=True)
return a.number_class(context=self)
def plus(self, a):
"""Plus corresponds to unary prefix plus in Python.
The operation is evaluated using the same rules as add; the
operation plus(a) is calculated as add('0', a) where the '0'
has the same exponent as the operand.
>>> ExtendedContext.plus(Decimal('1.3'))
Decimal('1.3')
>>> ExtendedContext.plus(Decimal('-1.3'))
Decimal('-1.3')
>>> ExtendedContext.plus(-1)
Decimal('-1')
"""
a = _convert_other(a, raiseit=True)
return a.__pos__(context=self)
def power(self, a, b, modulo=None):
"""Raises a to the power of b, to modulo if given.
With two arguments, compute a**b. If a is negative then b
must be integral. The result will be inexact unless b is
integral and the result is finite and can be expressed exactly
in 'precision' digits.
With three arguments, compute (a**b) % modulo. For the
three argument form, the following restrictions on the
arguments hold:
- all three arguments must be integral
- b must be nonnegative
- at least one of a or b must be nonzero
- modulo must be nonzero and have at most 'precision' digits
The result of pow(a, b, modulo) is identical to the result
that would be obtained by computing (a**b) % modulo with
unbounded precision, but is computed more efficiently. It is
always exact.
>>> c = ExtendedContext.copy()
>>> c.Emin = -999
>>> c.Emax = 999
>>> c.power(Decimal('2'), Decimal('3'))
Decimal('8')
>>> c.power(Decimal('-2'), Decimal('3'))
Decimal('-8')
>>> c.power(Decimal('2'), Decimal('-3'))
Decimal('0.125')
>>> c.power(Decimal('1.7'), Decimal('8'))
Decimal('69.7575744')
>>> c.power(Decimal('10'), Decimal('0.301029996'))
Decimal('2.00000000')
>>> c.power(Decimal('Infinity'), Decimal('-1'))
Decimal('0')
>>> c.power(Decimal('Infinity'), Decimal('0'))
Decimal('1')
>>> c.power(Decimal('Infinity'), Decimal('1'))
Decimal('Infinity')
>>> c.power(Decimal('-Infinity'), Decimal('-1'))
Decimal('-0')
>>> c.power(Decimal('-Infinity'), Decimal('0'))
Decimal('1')
>>> c.power(Decimal('-Infinity'), Decimal('1'))
Decimal('-Infinity')
>>> c.power(Decimal('-Infinity'), Decimal('2'))
Decimal('Infinity')
>>> c.power(Decimal('0'), Decimal('0'))
Decimal('NaN')
>>> c.power(Decimal('3'), Decimal('7'), Decimal('16'))
Decimal('11')
>>> c.power(Decimal('-3'), Decimal('7'), Decimal('16'))
Decimal('-11')
>>> c.power(Decimal('-3'), Decimal('8'), Decimal('16'))
Decimal('1')
>>> c.power(Decimal('3'), Decimal('7'), Decimal('-16'))
Decimal('11')
>>> c.power(Decimal('23E12345'), Decimal('67E189'), Decimal('123456789'))
Decimal('11729830')
>>> c.power(Decimal('-0'), Decimal('17'), Decimal('1729'))
Decimal('-0')
>>> c.power(Decimal('-23'), Decimal('0'), Decimal('65537'))
Decimal('1')
>>> ExtendedContext.power(7, 7)
Decimal('823543')
>>> ExtendedContext.power(Decimal(7), 7)
Decimal('823543')
>>> ExtendedContext.power(7, Decimal(7), 2)
Decimal('1')
"""
a = _convert_other(a, raiseit=True)
r = a.__pow__(b, modulo, context=self)
if r is NotImplemented:
raise TypeError("Unable to convert %s to Decimal" % b)
else:
return r
def quantize(self, a, b):
"""Returns a value equal to 'a' (rounded), having the exponent of 'b'.
The coefficient of the result is derived from that of the left-hand
operand. It may be rounded using the current rounding setting (if the
exponent is being increased), multiplied by a positive power of ten (if
the exponent is being decreased), or is unchanged (if the exponent is
already equal to that of the right-hand operand).
Unlike other operations, if the length of the coefficient after the
quantize operation would be greater than precision then an Invalid
operation condition is raised. This guarantees that, unless there is
an error condition, the exponent of the result of a quantize is always
equal to that of the right-hand operand.
Also unlike other operations, quantize will never raise Underflow, even
if the result is subnormal and inexact.
>>> ExtendedContext.quantize(Decimal('2.17'), Decimal('0.001'))
Decimal('2.170')
>>> ExtendedContext.quantize(Decimal('2.17'), Decimal('0.01'))
Decimal('2.17')
>>> ExtendedContext.quantize(Decimal('2.17'), Decimal('0.1'))
Decimal('2.2')
>>> ExtendedContext.quantize(Decimal('2.17'), Decimal('1e+0'))
Decimal('2')
>>> ExtendedContext.quantize(Decimal('2.17'), Decimal('1e+1'))
Decimal('0E+1')
>>> ExtendedContext.quantize(Decimal('-Inf'), Decimal('Infinity'))
Decimal('-Infinity')
>>> ExtendedContext.quantize(Decimal('2'), Decimal('Infinity'))
Decimal('NaN')
>>> ExtendedContext.quantize(Decimal('-0.1'), Decimal('1'))
Decimal('-0')
>>> ExtendedContext.quantize(Decimal('-0'), Decimal('1e+5'))
Decimal('-0E+5')
>>> ExtendedContext.quantize(Decimal('+35236450.6'), Decimal('1e-2'))
Decimal('NaN')
>>> ExtendedContext.quantize(Decimal('-35236450.6'), Decimal('1e-2'))
Decimal('NaN')
>>> ExtendedContext.quantize(Decimal('217'), Decimal('1e-1'))
Decimal('217.0')
>>> ExtendedContext.quantize(Decimal('217'), Decimal('1e-0'))
Decimal('217')
>>> ExtendedContext.quantize(Decimal('217'), Decimal('1e+1'))
Decimal('2.2E+2')
>>> ExtendedContext.quantize(Decimal('217'), Decimal('1e+2'))
Decimal('2E+2')
>>> ExtendedContext.quantize(1, 2)
Decimal('1')
>>> ExtendedContext.quantize(Decimal(1), 2)
Decimal('1')
>>> ExtendedContext.quantize(1, Decimal(2))
Decimal('1')
"""
a = _convert_other(a, raiseit=True)
return a.quantize(b, context=self)
def radix(self):
"""Just returns 10, as this is Decimal, :)
>>> ExtendedContext.radix()
Decimal('10')
"""
return Decimal(10)
def remainder(self, a, b):
"""Returns the remainder from integer division.
The result is the residue of the dividend after the operation of
calculating integer division as described for divide-integer, rounded
to precision digits if necessary. The sign of the result, if
non-zero, is the same as that of the original dividend.
This operation will fail under the same conditions as integer division
(that is, if integer division on the same two operands would fail, the
remainder cannot be calculated).
>>> ExtendedContext.remainder(Decimal('2.1'), Decimal('3'))
Decimal('2.1')
>>> ExtendedContext.remainder(Decimal('10'), Decimal('3'))
Decimal('1')
>>> ExtendedContext.remainder(Decimal('-10'), Decimal('3'))
Decimal('-1')
>>> ExtendedContext.remainder(Decimal('10.2'), Decimal('1'))
Decimal('0.2')
>>> ExtendedContext.remainder(Decimal('10'), Decimal('0.3'))
Decimal('0.1')
>>> ExtendedContext.remainder(Decimal('3.6'), Decimal('1.3'))
Decimal('1.0')
>>> ExtendedContext.remainder(22, 6)
Decimal('4')
>>> ExtendedContext.remainder(Decimal(22), 6)
Decimal('4')
>>> ExtendedContext.remainder(22, Decimal(6))
Decimal('4')
"""
a = _convert_other(a, raiseit=True)
r = a.__mod__(b, context=self)
if r is NotImplemented:
raise TypeError("Unable to convert %s to Decimal" % b)
else:
return r
def remainder_near(self, a, b):
"""Returns to be "a - b * n", where n is the integer nearest the exact
value of "x / b" (if two integers are equally near then the even one
is chosen). If the result is equal to 0 then its sign will be the
sign of a.
This operation will fail under the same conditions as integer division
(that is, if integer division on the same two operands would fail, the
remainder cannot be calculated).
>>> ExtendedContext.remainder_near(Decimal('2.1'), Decimal('3'))
Decimal('-0.9')
>>> ExtendedContext.remainder_near(Decimal('10'), Decimal('6'))
Decimal('-2')
>>> ExtendedContext.remainder_near(Decimal('10'), Decimal('3'))
Decimal('1')
>>> ExtendedContext.remainder_near(Decimal('-10'), Decimal('3'))
Decimal('-1')
>>> ExtendedContext.remainder_near(Decimal('10.2'), Decimal('1'))
Decimal('0.2')
>>> ExtendedContext.remainder_near(Decimal('10'), Decimal('0.3'))
Decimal('0.1')
>>> ExtendedContext.remainder_near(Decimal('3.6'), Decimal('1.3'))
Decimal('-0.3')
>>> ExtendedContext.remainder_near(3, 11)
Decimal('3')
>>> ExtendedContext.remainder_near(Decimal(3), 11)
Decimal('3')
>>> ExtendedContext.remainder_near(3, Decimal(11))
Decimal('3')
"""
a = _convert_other(a, raiseit=True)
return a.remainder_near(b, context=self)
def rotate(self, a, b):
"""Returns a rotated copy of a, b times.
The coefficient of the result is a rotated copy of the digits in
the coefficient of the first operand. The number of places of
rotation is taken from the absolute value of the second operand,
with the rotation being to the left if the second operand is
positive or to the right otherwise.
>>> ExtendedContext.rotate(Decimal('34'), Decimal('8'))
Decimal('400000003')
>>> ExtendedContext.rotate(Decimal('12'), Decimal('9'))
Decimal('12')
>>> ExtendedContext.rotate(Decimal('123456789'), Decimal('-2'))
Decimal('891234567')
>>> ExtendedContext.rotate(Decimal('123456789'), Decimal('0'))
Decimal('123456789')
>>> ExtendedContext.rotate(Decimal('123456789'), Decimal('+2'))
Decimal('345678912')
>>> ExtendedContext.rotate(1333333, 1)
Decimal('13333330')
>>> ExtendedContext.rotate(Decimal(1333333), 1)
Decimal('13333330')
>>> ExtendedContext.rotate(1333333, Decimal(1))
Decimal('13333330')
"""
a = _convert_other(a, raiseit=True)
return a.rotate(b, context=self)
def same_quantum(self, a, b):
"""Returns True if the two operands have the same exponent.
The result is never affected by either the sign or the coefficient of
either operand.
>>> ExtendedContext.same_quantum(Decimal('2.17'), Decimal('0.001'))
False
>>> ExtendedContext.same_quantum(Decimal('2.17'), Decimal('0.01'))
True
>>> ExtendedContext.same_quantum(Decimal('2.17'), Decimal('1'))
False
>>> ExtendedContext.same_quantum(Decimal('Inf'), Decimal('-Inf'))
True
>>> ExtendedContext.same_quantum(10000, -1)
True
>>> ExtendedContext.same_quantum(Decimal(10000), -1)
True
>>> ExtendedContext.same_quantum(10000, Decimal(-1))
True
"""
a = _convert_other(a, raiseit=True)
return a.same_quantum(b)
def scaleb (self, a, b):
"""Returns the first operand after adding the second value its exp.
>>> ExtendedContext.scaleb(Decimal('7.50'), Decimal('-2'))
Decimal('0.0750')
>>> ExtendedContext.scaleb(Decimal('7.50'), Decimal('0'))
Decimal('7.50')
>>> ExtendedContext.scaleb(Decimal('7.50'), Decimal('3'))
Decimal('7.50E+3')
>>> ExtendedContext.scaleb(1, 4)
Decimal('1E+4')
>>> ExtendedContext.scaleb(Decimal(1), 4)
Decimal('1E+4')
>>> ExtendedContext.scaleb(1, Decimal(4))
Decimal('1E+4')
"""
a = _convert_other(a, raiseit=True)
return a.scaleb(b, context=self)
def shift(self, a, b):
"""Returns a shifted copy of a, b times.
The coefficient of the result is a shifted copy of the digits
in the coefficient of the first operand. The number of places
to shift is taken from the absolute value of the second operand,
with the shift being to the left if the second operand is
positive or to the right otherwise. Digits shifted into the
coefficient are zeros.
>>> ExtendedContext.shift(Decimal('34'), Decimal('8'))
Decimal('400000000')
>>> ExtendedContext.shift(Decimal('12'), Decimal('9'))
Decimal('0')
>>> ExtendedContext.shift(Decimal('123456789'), Decimal('-2'))
Decimal('1234567')
>>> ExtendedContext.shift(Decimal('123456789'), Decimal('0'))
Decimal('123456789')
>>> ExtendedContext.shift(Decimal('123456789'), Decimal('+2'))
Decimal('345678900')
>>> ExtendedContext.shift(88888888, 2)
Decimal('888888800')
>>> ExtendedContext.shift(Decimal(88888888), 2)
Decimal('888888800')
>>> ExtendedContext.shift(88888888, Decimal(2))
Decimal('888888800')
"""
a = _convert_other(a, raiseit=True)
return a.shift(b, context=self)
def sqrt(self, a):
"""Square root of a non-negative number to context precision.
If the result must be inexact, it is rounded using the round-half-even
algorithm.
>>> ExtendedContext.sqrt(Decimal('0'))
Decimal('0')
>>> ExtendedContext.sqrt(Decimal('-0'))
Decimal('-0')
>>> ExtendedContext.sqrt(Decimal('0.39'))
Decimal('0.624499800')
>>> ExtendedContext.sqrt(Decimal('100'))
Decimal('10')
>>> ExtendedContext.sqrt(Decimal('1'))
Decimal('1')
>>> ExtendedContext.sqrt(Decimal('1.0'))
Decimal('1.0')
>>> ExtendedContext.sqrt(Decimal('1.00'))
Decimal('1.0')
>>> ExtendedContext.sqrt(Decimal('7'))
Decimal('2.64575131')
>>> ExtendedContext.sqrt(Decimal('10'))
Decimal('3.16227766')
>>> ExtendedContext.sqrt(2)
Decimal('1.41421356')
>>> ExtendedContext.prec
9
"""
a = _convert_other(a, raiseit=True)
return a.sqrt(context=self)
def subtract(self, a, b):
"""Return the difference between the two operands.
>>> ExtendedContext.subtract(Decimal('1.3'), Decimal('1.07'))
Decimal('0.23')
>>> ExtendedContext.subtract(Decimal('1.3'), Decimal('1.30'))
Decimal('0.00')
>>> ExtendedContext.subtract(Decimal('1.3'), Decimal('2.07'))
Decimal('-0.77')
>>> ExtendedContext.subtract(8, 5)
Decimal('3')
>>> ExtendedContext.subtract(Decimal(8), 5)
Decimal('3')
>>> ExtendedContext.subtract(8, Decimal(5))
Decimal('3')
"""
a = _convert_other(a, raiseit=True)
r = a.__sub__(b, context=self)
if r is NotImplemented:
raise TypeError("Unable to convert %s to Decimal" % b)
else:
return r
def to_eng_string(self, a):
"""Converts a number to a string, using scientific notation.
The operation is not affected by the context.
"""
a = _convert_other(a, raiseit=True)
return a.to_eng_string(context=self)
def to_sci_string(self, a):
"""Converts a number to a string, using scientific notation.
The operation is not affected by the context.
"""
a = _convert_other(a, raiseit=True)
return a.__str__(context=self)
def to_integral_exact(self, a):
"""Rounds to an integer.
When the operand has a negative exponent, the result is the same
as using the quantize() operation using the given operand as the
left-hand-operand, 1E+0 as the right-hand-operand, and the precision
of the operand as the precision setting; Inexact and Rounded flags
are allowed in this operation. The rounding mode is taken from the
context.
>>> ExtendedContext.to_integral_exact(Decimal('2.1'))
Decimal('2')
>>> ExtendedContext.to_integral_exact(Decimal('100'))
Decimal('100')
>>> ExtendedContext.to_integral_exact(Decimal('100.0'))
Decimal('100')
>>> ExtendedContext.to_integral_exact(Decimal('101.5'))
Decimal('102')
>>> ExtendedContext.to_integral_exact(Decimal('-101.5'))
Decimal('-102')
>>> ExtendedContext.to_integral_exact(Decimal('10E+5'))
Decimal('1.0E+6')
>>> ExtendedContext.to_integral_exact(Decimal('7.89E+77'))
Decimal('7.89E+77')
>>> ExtendedContext.to_integral_exact(Decimal('-Inf'))
Decimal('-Infinity')
"""
a = _convert_other(a, raiseit=True)
return a.to_integral_exact(context=self)
def to_integral_value(self, a):
"""Rounds to an integer.
When the operand has a negative exponent, the result is the same
as using the quantize() operation using the given operand as the
left-hand-operand, 1E+0 as the right-hand-operand, and the precision
of the operand as the precision setting, except that no flags will
be set. The rounding mode is taken from the context.
>>> ExtendedContext.to_integral_value(Decimal('2.1'))
Decimal('2')
>>> ExtendedContext.to_integral_value(Decimal('100'))
Decimal('100')
>>> ExtendedContext.to_integral_value(Decimal('100.0'))
Decimal('100')
>>> ExtendedContext.to_integral_value(Decimal('101.5'))
Decimal('102')
>>> ExtendedContext.to_integral_value(Decimal('-101.5'))
Decimal('-102')
>>> ExtendedContext.to_integral_value(Decimal('10E+5'))
Decimal('1.0E+6')
>>> ExtendedContext.to_integral_value(Decimal('7.89E+77'))
Decimal('7.89E+77')
>>> ExtendedContext.to_integral_value(Decimal('-Inf'))
Decimal('-Infinity')
"""
a = _convert_other(a, raiseit=True)
return a.to_integral_value(context=self)
# the method name changed, but we provide also the old one, for compatibility
to_integral = to_integral_value
class _WorkRep(object):
__slots__ = ('sign','int','exp')
# sign: 0 or 1
# int: int or long
# exp: None, int, or string
def __init__(self, value=None):
if value is None:
self.sign = None
self.int = 0
self.exp = None
elif isinstance(value, Decimal):
self.sign = value._sign
self.int = int(value._int)
self.exp = value._exp
else:
# assert isinstance(value, tuple)
self.sign = value[0]
self.int = value[1]
self.exp = value[2]
def __repr__(self):
return "(%r, %r, %r)" % (self.sign, self.int, self.exp)
__str__ = __repr__
def _normalize(op1, op2, prec = 0):
"""Normalizes op1, op2 to have the same exp and length of coefficient.
Done during addition.
"""
if op1.exp < op2.exp:
tmp = op2
other = op1
else:
tmp = op1
other = op2
# Let exp = min(tmp.exp - 1, tmp.adjusted() - precision - 1).
# Then adding 10**exp to tmp has the same effect (after rounding)
# as adding any positive quantity smaller than 10**exp; similarly
# for subtraction. So if other is smaller than 10**exp we replace
# it with 10**exp. This avoids tmp.exp - other.exp getting too large.
tmp_len = len(str(tmp.int))
other_len = len(str(other.int))
exp = tmp.exp + min(-1, tmp_len - prec - 2)
if other_len + other.exp - 1 < exp:
other.int = 1
other.exp = exp
tmp.int *= 10 ** (tmp.exp - other.exp)
tmp.exp = other.exp
return op1, op2
##### Integer arithmetic functions used by ln, log10, exp and __pow__ #####
# This function from Tim Peters was taken from here:
# http://mail.python.org/pipermail/python-list/1999-July/007758.html
# The correction being in the function definition is for speed, and
# the whole function is not resolved with math.log because of avoiding
# the use of floats.
def _nbits(n, correction = {
'0': 4, '1': 3, '2': 2, '3': 2,
'4': 1, '5': 1, '6': 1, '7': 1,
'8': 0, '9': 0, 'a': 0, 'b': 0,
'c': 0, 'd': 0, 'e': 0, 'f': 0}):
"""Number of bits in binary representation of the positive integer n,
or 0 if n == 0.
"""
if n < 0:
raise ValueError("The argument to _nbits should be nonnegative.")
hex_n = "%x" % n
return 4*len(hex_n) - correction[hex_n[0]]
def _decimal_lshift_exact(n, e):
""" Given integers n and e, return n * 10**e if it's an integer, else None.
The computation is designed to avoid computing large powers of 10
unnecessarily.
>>> _decimal_lshift_exact(3, 4)
30000
>>> _decimal_lshift_exact(300, -999999999) # returns None
"""
if n == 0:
return 0
elif e >= 0:
return n * 10**e
else:
# val_n = largest power of 10 dividing n.
str_n = str(abs(n))
val_n = len(str_n) - len(str_n.rstrip('0'))
return None if val_n < -e else n // 10**-e
def _sqrt_nearest(n, a):
"""Closest integer to the square root of the positive integer n. a is
an initial approximation to the square root. Any positive integer
will do for a, but the closer a is to the square root of n the
faster convergence will be.
"""
if n <= 0 or a <= 0:
raise ValueError("Both arguments to _sqrt_nearest should be positive.")
b=0
while a != b:
b, a = a, a--n//a>>1
return a
def _rshift_nearest(x, shift):
"""Given an integer x and a nonnegative integer shift, return closest
integer to x / 2**shift; use round-to-even in case of a tie.
"""
b, q = 1L << shift, x >> shift
return q + (2*(x & (b-1)) + (q&1) > b)
def _div_nearest(a, b):
"""Closest integer to a/b, a and b positive integers; rounds to even
in the case of a tie.
"""
q, r = divmod(a, b)
return q + (2*r + (q&1) > b)
def _ilog(x, M, L = 8):
"""Integer approximation to M*log(x/M), with absolute error boundable
in terms only of x/M.
Given positive integers x and M, return an integer approximation to
M * log(x/M). For L = 8 and 0.1 <= x/M <= 10 the difference
between the approximation and the exact result is at most 22. For
L = 8 and 1.0 <= x/M <= 10.0 the difference is at most 15. In
both cases these are upper bounds on the error; it will usually be
much smaller."""
# The basic algorithm is the following: let log1p be the function
# log1p(x) = log(1+x). Then log(x/M) = log1p((x-M)/M). We use
# the reduction
#
# log1p(y) = 2*log1p(y/(1+sqrt(1+y)))
#
# repeatedly until the argument to log1p is small (< 2**-L in
# absolute value). For small y we can use the Taylor series
# expansion
#
# log1p(y) ~ y - y**2/2 + y**3/3 - ... - (-y)**T/T
#
# truncating at T such that y**T is small enough. The whole
# computation is carried out in a form of fixed-point arithmetic,
# with a real number z being represented by an integer
# approximation to z*M. To avoid loss of precision, the y below
# is actually an integer approximation to 2**R*y*M, where R is the
# number of reductions performed so far.
y = x-M
# argument reduction; R = number of reductions performed
R = 0
while (R <= L and long(abs(y)) << L-R >= M or
R > L and abs(y) >> R-L >= M):
y = _div_nearest(long(M*y) << 1,
M + _sqrt_nearest(M*(M+_rshift_nearest(y, R)), M))
R += 1
# Taylor series with T terms
T = -int(-10*len(str(M))//(3*L))
yshift = _rshift_nearest(y, R)
w = _div_nearest(M, T)
for k in xrange(T-1, 0, -1):
w = _div_nearest(M, k) - _div_nearest(yshift*w, M)
return _div_nearest(w*y, M)
def _dlog10(c, e, p):
"""Given integers c, e and p with c > 0, p >= 0, compute an integer
approximation to 10**p * log10(c*10**e), with an absolute error of
at most 1. Assumes that c*10**e is not exactly 1."""
# increase precision by 2; compensate for this by dividing
# final result by 100
p += 2
# write c*10**e as d*10**f with either:
# f >= 0 and 1 <= d <= 10, or
# f <= 0 and 0.1 <= d <= 1.
# Thus for c*10**e close to 1, f = 0
l = len(str(c))
f = e+l - (e+l >= 1)
if p > 0:
M = 10**p
k = e+p-f
if k >= 0:
c *= 10**k
else:
c = _div_nearest(c, 10**-k)
log_d = _ilog(c, M) # error < 5 + 22 = 27
log_10 = _log10_digits(p) # error < 1
log_d = _div_nearest(log_d*M, log_10)
log_tenpower = f*M # exact
else:
log_d = 0 # error < 2.31
log_tenpower = _div_nearest(f, 10**-p) # error < 0.5
return _div_nearest(log_tenpower+log_d, 100)
def _dlog(c, e, p):
"""Given integers c, e and p with c > 0, compute an integer
approximation to 10**p * log(c*10**e), with an absolute error of
at most 1. Assumes that c*10**e is not exactly 1."""
# Increase precision by 2. The precision increase is compensated
# for at the end with a division by 100.
p += 2
# rewrite c*10**e as d*10**f with either f >= 0 and 1 <= d <= 10,
# or f <= 0 and 0.1 <= d <= 1. Then we can compute 10**p * log(c*10**e)
# as 10**p * log(d) + 10**p*f * log(10).
l = len(str(c))
f = e+l - (e+l >= 1)
# compute approximation to 10**p*log(d), with error < 27
if p > 0:
k = e+p-f
if k >= 0:
c *= 10**k
else:
c = _div_nearest(c, 10**-k) # error of <= 0.5 in c
# _ilog magnifies existing error in c by a factor of at most 10
log_d = _ilog(c, 10**p) # error < 5 + 22 = 27
else:
# p <= 0: just approximate the whole thing by 0; error < 2.31
log_d = 0
# compute approximation to f*10**p*log(10), with error < 11.
if f:
extra = len(str(abs(f)))-1
if p + extra >= 0:
# error in f * _log10_digits(p+extra) < |f| * 1 = |f|
# after division, error < |f|/10**extra + 0.5 < 10 + 0.5 < 11
f_log_ten = _div_nearest(f*_log10_digits(p+extra), 10**extra)
else:
f_log_ten = 0
else:
f_log_ten = 0
# error in sum < 11+27 = 38; error after division < 0.38 + 0.5 < 1
return _div_nearest(f_log_ten + log_d, 100)
class _Log10Memoize(object):
"""Class to compute, store, and allow retrieval of, digits of the
constant log(10) = 2.302585.... This constant is needed by
Decimal.ln, Decimal.log10, Decimal.exp and Decimal.__pow__."""
def __init__(self):
self.digits = "23025850929940456840179914546843642076011014886"
def getdigits(self, p):
"""Given an integer p >= 0, return floor(10**p)*log(10).
For example, self.getdigits(3) returns 2302.
"""
# digits are stored as a string, for quick conversion to
# integer in the case that we've already computed enough
# digits; the stored digits should always be correct
# (truncated, not rounded to nearest).
if p < 0:
raise ValueError("p should be nonnegative")
if p >= len(self.digits):
# compute p+3, p+6, p+9, ... digits; continue until at
# least one of the extra digits is nonzero
extra = 3
while True:
# compute p+extra digits, correct to within 1ulp
M = 10**(p+extra+2)
digits = str(_div_nearest(_ilog(10*M, M), 100))
if digits[-extra:] != '0'*extra:
break
extra += 3
# keep all reliable digits so far; remove trailing zeros
# and next nonzero digit
self.digits = digits.rstrip('0')[:-1]
return int(self.digits[:p+1])
_log10_digits = _Log10Memoize().getdigits
def _iexp(x, M, L=8):
"""Given integers x and M, M > 0, such that x/M is small in absolute
value, compute an integer approximation to M*exp(x/M). For 0 <=
x/M <= 2.4, the absolute error in the result is bounded by 60 (and
is usually much smaller)."""
# Algorithm: to compute exp(z) for a real number z, first divide z
# by a suitable power R of 2 so that |z/2**R| < 2**-L. Then
# compute expm1(z/2**R) = exp(z/2**R) - 1 using the usual Taylor
# series
#
# expm1(x) = x + x**2/2! + x**3/3! + ...
#
# Now use the identity
#
# expm1(2x) = expm1(x)*(expm1(x)+2)
#
# R times to compute the sequence expm1(z/2**R),
# expm1(z/2**(R-1)), ... , exp(z/2), exp(z).
# Find R such that x/2**R/M <= 2**-L
R = _nbits((long(x)<<L)//M)
# Taylor series. (2**L)**T > M
T = -int(-10*len(str(M))//(3*L))
y = _div_nearest(x, T)
Mshift = long(M)<<R
for i in xrange(T-1, 0, -1):
y = _div_nearest(x*(Mshift + y), Mshift * i)
# Expansion
for k in xrange(R-1, -1, -1):
Mshift = long(M)<<(k+2)
y = _div_nearest(y*(y+Mshift), Mshift)
return M+y
def _dexp(c, e, p):
"""Compute an approximation to exp(c*10**e), with p decimal places of
precision.
Returns integers d, f such that:
10**(p-1) <= d <= 10**p, and
(d-1)*10**f < exp(c*10**e) < (d+1)*10**f
In other words, d*10**f is an approximation to exp(c*10**e) with p
digits of precision, and with an error in d of at most 1. This is
almost, but not quite, the same as the error being < 1ulp: when d
= 10**(p-1) the error could be up to 10 ulp."""
# we'll call iexp with M = 10**(p+2), giving p+3 digits of precision
p += 2
# compute log(10) with extra precision = adjusted exponent of c*10**e
extra = max(0, e + len(str(c)) - 1)
q = p + extra
# compute quotient c*10**e/(log(10)) = c*10**(e+q)/(log(10)*10**q),
# rounding down
shift = e+q
if shift >= 0:
cshift = c*10**shift
else:
cshift = c//10**-shift
quot, rem = divmod(cshift, _log10_digits(q))
# reduce remainder back to original precision
rem = _div_nearest(rem, 10**extra)
# error in result of _iexp < 120; error after division < 0.62
return _div_nearest(_iexp(rem, 10**p), 1000), quot - p + 3
def _dpower(xc, xe, yc, ye, p):
"""Given integers xc, xe, yc and ye representing Decimals x = xc*10**xe and
y = yc*10**ye, compute x**y. Returns a pair of integers (c, e) such that:
10**(p-1) <= c <= 10**p, and
(c-1)*10**e < x**y < (c+1)*10**e
in other words, c*10**e is an approximation to x**y with p digits
of precision, and with an error in c of at most 1. (This is
almost, but not quite, the same as the error being < 1ulp: when c
== 10**(p-1) we can only guarantee error < 10ulp.)
We assume that: x is positive and not equal to 1, and y is nonzero.
"""
# Find b such that 10**(b-1) <= |y| <= 10**b
b = len(str(abs(yc))) + ye
# log(x) = lxc*10**(-p-b-1), to p+b+1 places after the decimal point
lxc = _dlog(xc, xe, p+b+1)
# compute product y*log(x) = yc*lxc*10**(-p-b-1+ye) = pc*10**(-p-1)
shift = ye-b
if shift >= 0:
pc = lxc*yc*10**shift
else:
pc = _div_nearest(lxc*yc, 10**-shift)
if pc == 0:
# we prefer a result that isn't exactly 1; this makes it
# easier to compute a correctly rounded result in __pow__
if ((len(str(xc)) + xe >= 1) == (yc > 0)): # if x**y > 1:
coeff, exp = 10**(p-1)+1, 1-p
else:
coeff, exp = 10**p-1, -p
else:
coeff, exp = _dexp(pc, -(p+1), p+1)
coeff = _div_nearest(coeff, 10)
exp += 1
return coeff, exp
def _log10_lb(c, correction = {
'1': 100, '2': 70, '3': 53, '4': 40, '5': 31,
'6': 23, '7': 16, '8': 10, '9': 5}):
"""Compute a lower bound for 100*log10(c) for a positive integer c."""
if c <= 0:
raise ValueError("The argument to _log10_lb should be nonnegative.")
str_c = str(c)
return 100*len(str_c) - correction[str_c[0]]
##### Helper Functions ####################################################
def _convert_other(other, raiseit=False, allow_float=False):
"""Convert other to Decimal.
Verifies that it's ok to use in an implicit construction.
If allow_float is true, allow conversion from float; this
is used in the comparison methods (__eq__ and friends).
"""
if isinstance(other, Decimal):
return other
if isinstance(other, (int, long)):
return Decimal(other)
if allow_float and isinstance(other, float):
return Decimal.from_float(other)
if raiseit:
raise TypeError("Unable to convert %s to Decimal" % other)
return NotImplemented
##### Setup Specific Contexts ############################################
# The default context prototype used by Context()
# Is mutable, so that new contexts can have different default values
DefaultContext = Context(
prec=28, rounding=ROUND_HALF_EVEN,
traps=[DivisionByZero, Overflow, InvalidOperation],
flags=[],
Emax=999999999,
Emin=-999999999,
capitals=1
)
# Pre-made alternate contexts offered by the specification
# Don't change these; the user should be able to select these
# contexts and be able to reproduce results from other implementations
# of the spec.
BasicContext = Context(
prec=9, rounding=ROUND_HALF_UP,
traps=[DivisionByZero, Overflow, InvalidOperation, Clamped, Underflow],
flags=[],
)
ExtendedContext = Context(
prec=9, rounding=ROUND_HALF_EVEN,
traps=[],
flags=[],
)
##### crud for parsing strings #############################################
#
# Regular expression used for parsing numeric strings. Additional
# comments:
#
# 1. Uncomment the two '\s*' lines to allow leading and/or trailing
# whitespace. But note that the specification disallows whitespace in
# a numeric string.
#
# 2. For finite numbers (not infinities and NaNs) the body of the
# number between the optional sign and the optional exponent must have
# at least one decimal digit, possibly after the decimal point. The
# lookahead expression '(?=\d|\.\d)' checks this.
import re
_parser = re.compile(r""" # A numeric string consists of:
# \s*
(?P<sign>[-+])? # an optional sign, followed by either...
(
(?=\d|\.\d) # ...a number (with at least one digit)
(?P<int>\d*) # having a (possibly empty) integer part
(\.(?P<frac>\d*))? # followed by an optional fractional part
(E(?P<exp>[-+]?\d+))? # followed by an optional exponent, or...
|
Inf(inity)? # ...an infinity, or...
|
(?P<signal>s)? # ...an (optionally signaling)
NaN # NaN
(?P<diag>\d*) # with (possibly empty) diagnostic info.
)
# \s*
\Z
""", re.VERBOSE | re.IGNORECASE | re.UNICODE).match
_all_zeros = re.compile('0*$').match
_exact_half = re.compile('50*$').match
##### PEP3101 support functions ##############################################
# The functions in this section have little to do with the Decimal
# class, and could potentially be reused or adapted for other pure
# Python numeric classes that want to implement __format__
#
# A format specifier for Decimal looks like:
#
# [[fill]align][sign][0][minimumwidth][,][.precision][type]
_parse_format_specifier_regex = re.compile(r"""\A
(?:
(?P<fill>.)?
(?P<align>[<>=^])
)?
(?P<sign>[-+ ])?
(?P<zeropad>0)?
(?P<minimumwidth>(?!0)\d+)?
(?P<thousands_sep>,)?
(?:\.(?P<precision>0|(?!0)\d+))?
(?P<type>[eEfFgGn%])?
\Z
""", re.VERBOSE)
del re
# The locale module is only needed for the 'n' format specifier. The
# rest of the PEP 3101 code functions quite happily without it, so we
# don't care too much if locale isn't present.
try:
import locale as _locale
except ImportError:
pass
def _parse_format_specifier(format_spec, _localeconv=None):
"""Parse and validate a format specifier.
Turns a standard numeric format specifier into a dict, with the
following entries:
fill: fill character to pad field to minimum width
align: alignment type, either '<', '>', '=' or '^'
sign: either '+', '-' or ' '
minimumwidth: nonnegative integer giving minimum width
zeropad: boolean, indicating whether to pad with zeros
thousands_sep: string to use as thousands separator, or ''
grouping: grouping for thousands separators, in format
used by localeconv
decimal_point: string to use for decimal point
precision: nonnegative integer giving precision, or None
type: one of the characters 'eEfFgG%', or None
unicode: boolean (always True for Python 3.x)
"""
m = _parse_format_specifier_regex.match(format_spec)
if m is None:
raise ValueError("Invalid format specifier: " + format_spec)
# get the dictionary
format_dict = m.groupdict()
# zeropad; defaults for fill and alignment. If zero padding
# is requested, the fill and align fields should be absent.
fill = format_dict['fill']
align = format_dict['align']
format_dict['zeropad'] = (format_dict['zeropad'] is not None)
if format_dict['zeropad']:
if fill is not None:
raise ValueError("Fill character conflicts with '0'"
" in format specifier: " + format_spec)
if align is not None:
raise ValueError("Alignment conflicts with '0' in "
"format specifier: " + format_spec)
format_dict['fill'] = fill or ' '
# PEP 3101 originally specified that the default alignment should
# be left; it was later agreed that right-aligned makes more sense
# for numeric types. See http://bugs.python.org/issue6857.
format_dict['align'] = align or '>'
# default sign handling: '-' for negative, '' for positive
if format_dict['sign'] is None:
format_dict['sign'] = '-'
# minimumwidth defaults to 0; precision remains None if not given
format_dict['minimumwidth'] = int(format_dict['minimumwidth'] or '0')
if format_dict['precision'] is not None:
format_dict['precision'] = int(format_dict['precision'])
# if format type is 'g' or 'G' then a precision of 0 makes little
# sense; convert it to 1. Same if format type is unspecified.
if format_dict['precision'] == 0:
if format_dict['type'] is None or format_dict['type'] in 'gG':
format_dict['precision'] = 1
# determine thousands separator, grouping, and decimal separator, and
# add appropriate entries to format_dict
if format_dict['type'] == 'n':
# apart from separators, 'n' behaves just like 'g'
format_dict['type'] = 'g'
if _localeconv is None:
_localeconv = _locale.localeconv()
if format_dict['thousands_sep'] is not None:
raise ValueError("Explicit thousands separator conflicts with "
"'n' type in format specifier: " + format_spec)
format_dict['thousands_sep'] = _localeconv['thousands_sep']
format_dict['grouping'] = _localeconv['grouping']
format_dict['decimal_point'] = _localeconv['decimal_point']
else:
if format_dict['thousands_sep'] is None:
format_dict['thousands_sep'] = ''
format_dict['grouping'] = [3, 0]
format_dict['decimal_point'] = '.'
# record whether return type should be str or unicode
format_dict['unicode'] = isinstance(format_spec, unicode)
return format_dict
def _format_align(sign, body, spec):
"""Given an unpadded, non-aligned numeric string 'body' and sign
string 'sign', add padding and alignment conforming to the given
format specifier dictionary 'spec' (as produced by
parse_format_specifier).
Also converts result to unicode if necessary.
"""
# how much extra space do we have to play with?
minimumwidth = spec['minimumwidth']
fill = spec['fill']
padding = fill*(minimumwidth - len(sign) - len(body))
align = spec['align']
if align == '<':
result = sign + body + padding
elif align == '>':
result = padding + sign + body
elif align == '=':
result = sign + padding + body
elif align == '^':
half = len(padding)//2
result = padding[:half] + sign + body + padding[half:]
else:
raise ValueError('Unrecognised alignment field')
# make sure that result is unicode if necessary
if spec['unicode']:
result = unicode(result)
return result
def _group_lengths(grouping):
"""Convert a localeconv-style grouping into a (possibly infinite)
iterable of integers representing group lengths.
"""
# The result from localeconv()['grouping'], and the input to this
# function, should be a list of integers in one of the
# following three forms:
#
# (1) an empty list, or
# (2) nonempty list of positive integers + [0]
# (3) list of positive integers + [locale.CHAR_MAX], or
from itertools import chain, repeat
if not grouping:
return []
elif grouping[-1] == 0 and len(grouping) >= 2:
return chain(grouping[:-1], repeat(grouping[-2]))
elif grouping[-1] == _locale.CHAR_MAX:
return grouping[:-1]
else:
raise ValueError('unrecognised format for grouping')
def _insert_thousands_sep(digits, spec, min_width=1):
"""Insert thousands separators into a digit string.
spec is a dictionary whose keys should include 'thousands_sep' and
'grouping'; typically it's the result of parsing the format
specifier using _parse_format_specifier.
The min_width keyword argument gives the minimum length of the
result, which will be padded on the left with zeros if necessary.
If necessary, the zero padding adds an extra '0' on the left to
avoid a leading thousands separator. For example, inserting
commas every three digits in '123456', with min_width=8, gives
'0,123,456', even though that has length 9.
"""
sep = spec['thousands_sep']
grouping = spec['grouping']
groups = []
for l in _group_lengths(grouping):
if l <= 0:
raise ValueError("group length should be positive")
# max(..., 1) forces at least 1 digit to the left of a separator
l = min(max(len(digits), min_width, 1), l)
groups.append('0'*(l - len(digits)) + digits[-l:])
digits = digits[:-l]
min_width -= l
if not digits and min_width <= 0:
break
min_width -= len(sep)
else:
l = max(len(digits), min_width, 1)
groups.append('0'*(l - len(digits)) + digits[-l:])
return sep.join(reversed(groups))
def _format_sign(is_negative, spec):
"""Determine sign character."""
if is_negative:
return '-'
elif spec['sign'] in ' +':
return spec['sign']
else:
return ''
def _format_number(is_negative, intpart, fracpart, exp, spec):
"""Format a number, given the following data:
is_negative: true if the number is negative, else false
intpart: string of digits that must appear before the decimal point
fracpart: string of digits that must come after the point
exp: exponent, as an integer
spec: dictionary resulting from parsing the format specifier
This function uses the information in spec to:
insert separators (decimal separator and thousands separators)
format the sign
format the exponent
add trailing '%' for the '%' type
zero-pad if necessary
fill and align if necessary
"""
sign = _format_sign(is_negative, spec)
if fracpart:
fracpart = spec['decimal_point'] + fracpart
if exp != 0 or spec['type'] in 'eE':
echar = {'E': 'E', 'e': 'e', 'G': 'E', 'g': 'e'}[spec['type']]
fracpart += "{0}{1:+}".format(echar, exp)
if spec['type'] == '%':
fracpart += '%'
if spec['zeropad']:
min_width = spec['minimumwidth'] - len(fracpart) - len(sign)
else:
min_width = 0
intpart = _insert_thousands_sep(intpart, spec, min_width)
return _format_align(sign, intpart+fracpart, spec)
##### Useful Constants (internal use only) ################################
# Reusable defaults
_Infinity = Decimal('Inf')
_NegativeInfinity = Decimal('-Inf')
_NaN = Decimal('NaN')
_Zero = Decimal(0)
_One = Decimal(1)
_NegativeOne = Decimal(-1)
# _SignedInfinity[sign] is infinity w/ that sign
_SignedInfinity = (_Infinity, _NegativeInfinity)
if __name__ == '__main__':
import doctest, sys
doctest.testmod(sys.modules[__name__])
|
"""
Rings
"""
# ****************************************************************************
# Copyright (C) 2005 William Stein <wstein@gmail.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
# https://www.gnu.org/licenses/
# ****************************************************************************
from sage.misc.lazy_import import lazy_import
# Ring base classes
from .ring import (Ring, Field, CommutativeRing, IntegralDomain,
DedekindDomain, PrincipalIdealDomain, EuclideanDomain)
# Ring element base classes
from sage.structure.element import (CommutativeAlgebraElement,
RingElement, CommutativeRingElement, IntegralDomainElement,
DedekindDomainElement, PrincipalIdealDomainElement,
EuclideanDomainElement, FieldElement)
# Ideals
from .ideal import Ideal
ideal = Ideal
# Quotient
from .quotient_ring import QuotientRing
# Infinities
from .infinity import infinity, Infinity, InfinityRing, unsigned_infinity, UnsignedInfinityRing
# Rational integers.
from .integer_ring import IntegerRing, ZZ, crt_basis
from .integer import Integer
# Rational numbers
from .rational_field import RationalField, QQ
from .rational import Rational
Rationals = RationalField
# Integers modulo n.
from sage.rings.finite_rings.integer_mod_ring import IntegerModRing, Zmod
from sage.rings.finite_rings.integer_mod import IntegerMod, Mod, mod
Integers = IntegerModRing
# Finite fields
from .finite_rings.all import *
# Number field
from .number_field.all import *
# Function field
from .function_field.all import *
# Finite residue fields
from .finite_rings.residue_field import ResidueField
# p-adic field
from .padics.all import *
from .padics.padic_printing import _printer_defaults as padic_printing
# valuations
from .valuation.all import *
# Semirings
from .semirings.all import *
# Real numbers
from .real_mpfr import (RealField, RR,
create_RealNumber as RealNumber) # this is used by the preparser to wrap real literals -- very important.
Reals = RealField
from .real_double import RealDoubleField, RDF, RealDoubleElement
from .real_lazy import RealLazyField, RLF, ComplexLazyField, CLF
from sage.rings.real_arb import RealBallField, RBF
# Polynomial Rings and Polynomial Quotient Rings
from .polynomial.all import *
# Algebraic numbers
from .qqbar import (AlgebraicRealField, AA,
AlgebraicReal,
AlgebraicField, QQbar,
AlgebraicNumber,
number_field_elements_from_algebraics)
from .universal_cyclotomic_field import UniversalCyclotomicField, E
# Intervals
from .real_mpfi import (RealIntervalField,
RIF,
RealInterval)
# Complex numbers
from .complex_mpfr import ComplexField
from .complex_mpfr import create_ComplexNumber as ComplexNumber
Complexes = ComplexField
from .complex_interval_field import ComplexIntervalField
from .complex_interval import (create_ComplexIntervalFieldElement as ComplexIntervalFieldElement)
from .complex_double import ComplexDoubleField, ComplexDoubleElement, CDF
from .complex_mpc import MPComplexField
from sage.rings.complex_arb import ComplexBallField, CBF
# Power series rings
from .power_series_ring import PowerSeriesRing
from .power_series_ring_element import PowerSeries
# Laurent series ring in one variable
from .laurent_series_ring import LaurentSeriesRing
from .laurent_series_ring_element import LaurentSeries
# Lazy Laurent series ring
lazy_import('sage.rings.lazy_laurent_series_ring', 'LazyLaurentSeriesRing')
# Tate algebras
from .tate_algebra import TateAlgebra
# Puiseux series ring
from .puiseux_series_ring import PuiseuxSeriesRing
from .puiseux_series_ring_element import PuiseuxSeries
# Pseudo-ring of PARI objects.
from .pari_ring import PariRing, Pari
# Big-oh notation
from .big_oh import O
# Fraction field
from .fraction_field import FractionField
Frac = FractionField
# Localization
from .localization import Localization
# c-finite sequences
from .cfinite_sequence import CFiniteSequence, CFiniteSequences
from .bernoulli_mod_p import bernoulli_mod_p, bernoulli_mod_p_single
from .monomials import monomials
CC = ComplexField()
CIF = ComplexIntervalField()
# invariant theory
from .invariants.all import *
from .fast_arith import prime_range
# continued fractions
from sage.rings.continued_fraction import (continued_fraction,
continued_fraction_list)
# asymptotic ring
from .asymptotic.all import *
# Register classes in numbers abc
from . import numbers_abc
|
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Node driver for Aliyun.
"""
try:
import simplejson as json
except ImportError:
import json
import time
from libcloud.common.aliyun import AliyunXmlResponse, SignedAliyunConnection
from libcloud.common.types import LibcloudError
from libcloud.compute.base import Node, NodeDriver, NodeImage, NodeSize, \
StorageVolume, VolumeSnapshot, NodeLocation
from libcloud.compute.types import NodeState, StorageVolumeState, \
VolumeSnapshotState
from libcloud.utils.py3 import _real_unicode as u
from libcloud.utils.xml import findall, findattr, findtext
__all__ = [
'DiskCategory',
'InternetChargeType',
'ECS_API_VERSION',
'ECSDriver',
'ECSSecurityGroup',
'ECSZone'
]
ECS_API_VERSION = '2014-05-26'
ECS_API_ENDPOINT = 'ecs.aliyuncs.com'
DEFAULT_SIGNATURE_VERSION = '1.0'
def _parse_bool(value):
if isinstance(value, bool):
return value
if u(value).lower() == 'true':
return True
return False
"""
Define the extra dictionary for specific resources
"""
RESOURCE_EXTRA_ATTRIBUTES_MAP = {
'node': {
'description': {
'xpath': 'Description',
'transform_func': u
},
'image_id': {
'xpath': 'ImageId',
'transform_func': u
},
'zone_id': {
'xpath': 'ZoneId',
'transform_func': u
},
'instance_type': {
'xpath': 'InstanceType',
'transform_func': u
},
'instance_type_family': {
'xpath': 'InstanceTypeFamily',
'transform_func': u
},
'hostname': {
'xpath': 'HostName',
'transform_func': u
},
'serial_number': {
'xpath': 'SerialNumber',
'transform_func': u
},
'internet_charge_type': {
'xpath': 'InternetChargeType',
'transform_func': u
},
'creation_time': {
'xpath': 'CreationTime',
'transform_func': u
},
'instance_network_type': {
'xpath': 'InstanceNetworkType',
'transform_func': u
},
'instance_charge_type': {
'xpath': 'InstanceChargeType',
'transform_func': u
},
'device_available': {
'xpath': 'DeviceAvailable',
'transform_func': u
},
'io_optimized': {
'xpath': 'IoOptimized',
'transform_func': u
},
'expired_time': {
'xpath': 'ExpiredTime',
'transform_func': u
}
},
'vpc_attributes': {
'vpc_id': {
'xpath': 'VpcId',
'transform_func': u
},
'vswitch_id': {
'xpath': 'VSwitchId',
'transform_func': u
},
'private_ip_address': {
'xpath': 'PrivateIpAddress/IpAddress',
'transform_func': u
},
'nat_ip_address': {
'xpath': 'NatIpAddress',
'transform_func': u
}
},
'eip_address_associate': {
'allocation_id': {
'xpath': 'AllocationId',
'transform_func': u
},
'ip_address': {
'xpath': 'IpAddress',
'transform_func': u
},
'bandwidth': {
'xpath': 'Bandwidth',
'transform_func': int
},
'internet_charge_type': {
'xpath': 'InternetChargeType',
'transform_func': u
}
},
'operation_locks': {
'lock_reason': {
'xpath': 'LockReason',
'transform_func': u
}
},
'volume': {
'region_id': {
'xpath': 'RegionId',
'transform_func': u
},
'zone_id': {
'xpath': 'ZoneId',
'transform_func': u
},
'description': {
'xpath': 'Description',
'transform_func': u
},
'type': {
'xpath': 'Type',
'transform_func': u
},
'category': {
'xpath': 'Category',
'transform_func': u
},
'image_id': {
'xpath': 'ImageId',
'transform_func': u
},
'source_snapshot_id': {
'xpath': 'SourceSnapshotId',
'transform_func': u
},
'product_code': {
'xpath': 'ProductCode',
'transform_func': u
},
'portable': {
'xpath': 'Portable',
'transform_func': _parse_bool
},
'instance_id': {
'xpath': 'InstanceId',
'transform_func': u
},
'device': {
'xpath': 'Device',
'transform_func': u
},
'delete_with_instance': {
'xpath': 'DeleteWithInstance',
'transform_func': _parse_bool
},
'enable_auto_snapshot': {
'xpath': 'EnableAutoSnapshot',
'transform_func': _parse_bool
},
'creation_time': {
'xpath': 'CreationTime',
'transform_func': u
},
'attached_time': {
'xpath': 'AttachedTime',
'transform_func': u
},
'detached_time': {
'xpath': 'DetachedTime',
'transform_func': u
},
'disk_charge_type': {
'xpath': 'DiskChargeType',
'transform_func': u
}
},
'snapshot': {
'snapshot_name': {
'xpath': 'SnapshotName',
'transform_func': u
},
'description': {
'xpath': 'Description',
'transform_func': u
},
'progress': {
'xpath': 'Progress',
'transform_func': u
},
'source_disk_id': {
'xpath': 'SourceDiskId',
'transform_func': u
},
'source_disk_size': {
'xpath': 'SourceDiskSize',
'transform_func': int
},
'source_disk_type': {
'xpath': 'SourceDiskType',
'transform_func': u
},
'product_code': {
'xpath': 'ProductCode',
'transform_func': u
},
'usage': {
'xpath': 'Usage',
'transform_func': u
}
},
'image': {
'image_version': {
'xpath': 'ImageVersion',
'transform_func': u
},
'os_type': {
'xpath': 'OSType',
'transform_func': u
},
'platform': {
'xpath': 'Platform',
'transform_func': u
},
'architecture': {
'xpath': 'Architecture',
'transform_func': u
},
'description': {
'xpath': 'Description',
'transform_func': u
},
'size': {
'xpath': 'Size',
'transform_func': int
},
'image_owner_alias': {
'xpath': 'ImageOwnerAlias',
'transform_func': u
},
'os_name': {
'xpath': 'OSName',
'transform_func': u
},
'product_code': {
'xpath': 'ProductCode',
'transform_func': u
},
'is_subscribed': {
'xpath': 'IsSubscribed',
'transform_func': _parse_bool
},
'progress': {
'xpath': 'Progress',
'transform_func': u
},
'creation_time': {
'xpath': 'CreationTime',
'transform_func': u
},
'usage': {
'xpath': 'Usage',
'transform_func': u
},
'is_copied': {
'xpath': 'IsCopied',
'transform_func': _parse_bool
}
},
'disk_device_mapping': {
'snapshot_id': {
'xpath': 'SnapshotId',
'transform_func': u
},
'size': {
'xpath': 'Size',
'transform_func': int
},
'device': {
'xpath': 'Device',
'transform_func': u
},
'format': {
'xpath': 'Format',
'transform_func': u
},
'import_oss_bucket': {
'xpath': 'ImportOSSBucket',
'transform_func': u
},
'import_oss_object': {
'xpath': 'ImportOSSObject',
'transform_func': u
}
}
}
class ECSConnection(SignedAliyunConnection):
"""
Represents a single connection to the Aliyun ECS Endpoint.
"""
api_version = ECS_API_VERSION
host = ECS_API_ENDPOINT
responseCls = AliyunXmlResponse
service_name = 'ecs'
class ECSSecurityGroup(object):
"""
Security group used to control nodes internet and intranet accessibility.
"""
def __init__(self, id, name, description=None, driver=None, vpc_id=None,
creation_time=None):
self.id = id
self.name = name
self.description = description
self.driver = driver
self.vpc_id = vpc_id
self.creation_time = creation_time
def __repr__(self):
return ('<ECSSecurityGroup: id=%s, name=%s, driver=%s ...>' %
(self.id, self.name, self.driver.name))
class ECSSecurityGroupAttribute(object):
"""
Security group attribute.
"""
def __init__(self, ip_protocol=None, port_range=None,
source_group_id=None, policy=None, nic_type=None):
self.ip_protocol = ip_protocol
self.port_range = port_range
self.source_group_id = source_group_id
self.policy = policy
self.nic_type = nic_type
def __repr__(self):
return ('<ECSSecurityGroupAttribute: ip_protocol=%s ...>' %
(self.ip_protocol))
class ECSZone(object):
"""
ECSZone used to represent an availability zone in a region.
"""
def __init__(self, id, name, driver=None,
available_resource_types=None,
available_instance_types=None,
available_disk_categories=None):
self.id = id
self.name = name
self.driver = driver
self.available_resource_types = available_resource_types
self.available_instance_types = available_instance_types
self.available_disk_categories = available_disk_categories
def __repr__(self):
return ('<ECSZone: id=%s, name=%s, driver=%s>' %
(self.id, self.name, self.driver))
class InternetChargeType(object):
"""
Internet connection billing types for Aliyun Nodes.
"""
BY_BANDWIDTH = 'PayByBandwidth'
BY_TRAFFIC = 'PayByTraffic'
class DiskCategory(object):
"""
Enum defined disk types supported by Aliyun system and data disks.
"""
CLOUD = 'cloud'
CLOUD_EFFICIENCY = 'cloud_efficiency'
CLOUD_SSD = 'cloud_ssd'
EPHEMERAL_SSD = 'ephemeral_ssd'
class Pagination(object):
"""
Pagination used to describe the multiple pages results.
"""
def __init__(self, total, size, current):
"""
Create a pagination.
:param total: the total count of the results
:param size: the page size of each page
:param current: the current page number, 1-based
"""
self.total = total
self.size = size
self.current = current
def next(self):
"""
Switch to the next page.
:return: the new pagination or None when no more page
:rtype: ``Pagination``
"""
if self.total is None or (self.size * self.current >= self.total):
return None
self.current += 1
return self
def to_dict(self):
return {'PageNumber': self.current,
'PageSize': self.size}
def __repr__(self):
return ('<Pagination total=%d, size=%d, current page=%d>' %
(self.total, self.size, self.current))
class ECSDriver(NodeDriver):
"""
Aliyun ECS node driver.
Used for Aliyun ECS service.
TODO:
Get guest OS root password
Adjust internet bandwidth settings
Manage security groups and rules
"""
name = 'Aliyun ECS'
website = 'https://www.aliyun.com/product/ecs'
connectionCls = ECSConnection
features = {'create_node': ['password']}
namespace = None
path = '/'
internet_charge_types = InternetChargeType
disk_categories = DiskCategory
NODE_STATE_MAPPING = {
'Starting': NodeState.PENDING,
'Running': NodeState.RUNNING,
'Stopping': NodeState.PENDING,
'Stopped': NodeState.STOPPED
}
VOLUME_STATE_MAPPING = {
'In_use': StorageVolumeState.INUSE,
'Available': StorageVolumeState.AVAILABLE,
'Attaching': StorageVolumeState.ATTACHING,
'Detaching': StorageVolumeState.INUSE,
'Creating': StorageVolumeState.CREATING,
'ReIniting': StorageVolumeState.CREATING}
SNAPSHOT_STATE_MAPPING = {
'progressing': VolumeSnapshotState.CREATING,
'accomplished': VolumeSnapshotState.AVAILABLE,
'failed': VolumeSnapshotState.ERROR}
def list_nodes(self, ex_node_ids=None, ex_filters=None):
"""
List all nodes.
@inherits: :class:`NodeDriver.create_node`
:keyword ex_node_ids: a list of node's ids used to filter nodes.
Only the nodes which's id in this list
will be returned.
:type ex_node_ids: ``list`` of ``str``
:keyword ex_filters: node attribute and value pairs to filter nodes.
Only the nodes which matchs all the pairs will
be returned.
If the filter attribute need a json array value,
use ``list`` object, the driver will convert it.
:type ex_filters: ``dict``
"""
params = {'Action': 'DescribeInstances',
'RegionId': self.region}
if ex_node_ids:
if isinstance(ex_node_ids, list):
params['InstanceIds'] = self._list_to_json_array(ex_node_ids)
else:
raise AttributeError('ex_node_ids should be a list of '
'node ids.')
if ex_filters:
if isinstance(ex_filters, dict):
params.update(ex_filters)
else:
raise AttributeError('ex_filters should be a dict of '
'node attributes.')
nodes = self._request_multiple_pages(self.path, params,
self._to_nodes)
return nodes
def list_sizes(self, location=None):
params = {'Action': 'DescribeInstanceTypes'}
resp_body = self.connection.request(self.path, params).object
size_elements = findall(resp_body, 'InstanceTypes/InstanceType',
namespace=self.namespace)
sizes = [self._to_size(each) for each in size_elements]
return sizes
def list_locations(self):
params = {'Action': 'DescribeRegions'}
resp_body = self.connection.request(self.path, params).object
location_elements = findall(resp_body, 'Regions/Region',
namespace=self.namespace)
locations = [self._to_location(each) for each in location_elements]
return locations
def create_node(self, name, size, image, auth=None,
ex_security_group_id=None, ex_description=None,
ex_internet_charge_type=None,
ex_internet_max_bandwidth_out=None,
ex_internet_max_bandwidth_in=None,
ex_hostname=None, ex_io_optimized=None,
ex_system_disk=None, ex_data_disks=None,
ex_vswitch_id=None, ex_private_ip_address=None,
ex_client_token=None, **kwargs):
"""
@inherits: :class:`NodeDriver.create_node`
:param name: The name for this new node (required)
:type name: ``str``
:param image: The image to use when creating this node (required)
:type image: `NodeImage`
:param size: The size of the node to create (required)
:type size: `NodeSize`
:keyword auth: Initial authentication information for the node
(optional)
:type auth: :class:`NodeAuthSSHKey` or :class:`NodeAuthPassword`
:keyword ex_security_group_id: The id of the security group the
new created node is attached to.
(required)
:type ex_security_group_id: ``str``
:keyword ex_description: A description string for this node (optional)
:type ex_description: ``str``
:keyword ex_internet_charge_type: The internet charge type (optional)
:type ex_internet_charge_type: a ``str`` of 'PayByTraffic'
or 'PayByBandwidth'
:keyword ex_internet_max_bandwidth_out: The max output bandwidth,
in Mbps (optional)
Required for 'PayByTraffic'
internet charge type
:type ex_internet_max_bandwidth_out: a ``int`` in range [0, 100]
a ``int`` in range [1, 100] for
'PayByTraffic' internet charge
type
:keyword ex_internet_max_bandwidth_in: The max input bandwidth,
in Mbps (optional)
:type ex_internet_max_bandwidth_in: a ``int`` in range [1, 200]
default to 200 in server side
:keyword ex_hostname: The hostname for the node (optional)
:type ex_hostname: ``str``
:keyword ex_io_optimized: Whether the node is IO optimized (optional)
:type ex_io_optimized: ``boll``
:keyword ex_system_disk: The system disk for the node (optional)
:type ex_system_disk: ``dict``
:keyword ex_data_disks: The data disks for the node (optional)
:type ex_data_disks: a `list` of `dict`
:keyword ex_vswitch_id: The id of vswitch for a VPC type node
(optional)
:type ex_vswitch_id: ``str``
:keyword ex_private_ip_address: The IP address in private network
(optional)
:type ex_private_ip_address: ``str``
:keyword ex_client_token: A token generated by client to keep
requests idempotency (optional)
:type keyword ex_client_token: ``str``
"""
params = {'Action': 'CreateInstance',
'RegionId': self.region,
'ImageId': image.id,
'InstanceType': size.id,
'InstanceName': name}
if not ex_security_group_id:
raise AttributeError('ex_security_group_id is mandatory')
params['SecurityGroupId'] = ex_security_group_id
if ex_description:
params['Description'] = ex_description
inet_params = self._get_internet_related_params(
ex_internet_charge_type,
ex_internet_max_bandwidth_in,
ex_internet_max_bandwidth_out)
if inet_params:
params.update(inet_params)
if ex_hostname:
params['HostName'] = ex_hostname
if auth:
auth = self._get_and_check_auth(auth)
params['Password'] = auth.password
if ex_io_optimized is not None:
optimized = ex_io_optimized
if isinstance(optimized, bool):
optimized = 'optimized' if optimized else 'none'
params['IoOptimized'] = optimized
if ex_system_disk:
system_disk = self._get_system_disk(ex_system_disk)
if system_disk:
params.update(system_disk)
if ex_data_disks:
data_disks = self._get_data_disks(ex_data_disks)
if data_disks:
params.update(data_disks)
if ex_vswitch_id:
params['VSwitchId'] = ex_vswitch_id
if ex_private_ip_address:
if not ex_vswitch_id:
raise AttributeError('must provide ex_private_ip_address '
'and ex_vswitch_id at the same time')
else:
params['PrivateIpAddress'] = ex_private_ip_address
if ex_client_token:
params['ClientToken'] = ex_client_token
resp = self.connection.request(self.path, params=params)
node_id = findtext(resp.object, xpath='InstanceId',
namespace=self.namespace)
nodes = self.list_nodes(ex_node_ids=[node_id])
if len(nodes) != 1:
raise LibcloudError('could not find the new created node '
'with id %s. ' % node_id,
driver=self)
node = nodes[0]
self.ex_start_node(node)
self._wait_until_state(nodes, NodeState.RUNNING)
return node
def reboot_node(self, node, ex_force_stop=False):
"""
Reboot the given node
@inherits :class:`NodeDriver.reboot_node`
:keyword ex_force_stop: if ``True``, stop node force (maybe lose data)
otherwise, stop node normally,
default to ``False``
:type ex_force_stop: ``bool``
"""
params = {'Action': 'RebootInstance',
'InstanceId': node.id,
'ForceStop': u(ex_force_stop).lower()}
resp = self.connection.request(self.path, params=params)
return resp.success() and \
self._wait_until_state([node], NodeState.RUNNING)
def destroy_node(self, node):
nodes = self.list_nodes(ex_node_ids=[node.id])
if len(nodes) != 1 and node.id != nodes[0].id:
raise LibcloudError('could not find the node with id %s.'
% node.id)
current = nodes[0]
if current.state == NodeState.RUNNING:
# stop node first
self.ex_stop_node(node)
self._wait_until_state(nodes, NodeState.STOPPED)
params = {'Action': 'DeleteInstance',
'InstanceId': node.id}
resp = self.connection.request(self.path, params)
return resp.success()
def ex_start_node(self, node):
"""
Start node to running state.
:param node: the ``Node`` object to start
:type node: ``Node``
:return: starting operation result.
:rtype: ``bool``
"""
params = {'Action': 'StartInstance',
'InstanceId': node.id}
resp = self.connection.request(self.path, params)
return resp.success() and \
self._wait_until_state([node], NodeState.RUNNING)
def ex_stop_node(self, node, ex_force_stop=False):
"""
Stop a running node.
:param node: The node to stop
:type node: :class:`Node`
:keyword ex_force_stop: if ``True``, stop node force (maybe lose data)
otherwise, stop node normally,
default to ``False``
:type ex_force_stop: ``bool``
:return: stopping operation result.
:rtype: ``bool``
"""
params = {'Action': 'StopInstance',
'InstanceId': node.id,
'ForceStop': u(ex_force_stop).lower()}
resp = self.connection.request(self.path, params)
return resp.success() and \
self._wait_until_state([node], NodeState.STOPPED)
def ex_create_security_group(self, description=None, client_token=None):
"""
Create a new security group.
:keyword description: security group description
:type description: ``unicode``
:keyword client_token: a token generated by client to identify
each request.
:type client_token: ``str``
"""
params = {'Action': 'CreateSecurityGroup',
'RegionId': self.region}
if description:
params['Description'] = description
if client_token:
params['ClientToken'] = client_token
resp = self.connection.request(self.path, params)
return findtext(resp.object, 'SecurityGroupId',
namespace=self.namespace)
def ex_delete_security_group_by_id(self, group_id=None):
"""
Delete a new security group.
:keyword group_id: security group id
:type group_id: ``str``
"""
params = {'Action': 'DeleteSecurityGroup',
'RegionId': self.region,
'SecurityGroupId': group_id}
resp = self.connection.request(self.path, params)
return resp.success()
def ex_list_security_groups(self, ex_filters=None):
"""
List security groups in the current region.
:keyword ex_filters: security group attributes to filter results.
:type ex_filters: ``dict``
:return: a list of defined security groups
:rtype: ``list`` of ``ECSSecurityGroup``
"""
params = {'Action': 'DescribeSecurityGroups',
'RegionId': self.region}
if ex_filters and isinstance(ex_filters, dict):
ex_filters.update(params)
params = ex_filters
def _parse_response(resp_object):
sg_elements = findall(resp_object, 'SecurityGroups/SecurityGroup',
namespace=self.namespace)
sgs = [self._to_security_group(el) for el in sg_elements]
return sgs
return self._request_multiple_pages(self.path, params,
_parse_response)
def ex_list_security_group_attributes(self, group_id=None,
nic_type='internet'):
"""
List security group attributes in the current region.
:keyword group_id: security group id.
:type ex_filters: ``str``
:keyword nic_type: internet|intranet.
:type nic_type: ``str``
:return: a list of defined security group Attributes
:rtype: ``list`` of ``ECSSecurityGroupAttribute``
"""
params = {'Action': 'DescribeSecurityGroupAttribute',
'RegionId': self.region,
'NicType': nic_type}
if group_id is None:
raise AttributeError('group_id is required')
params['SecurityGroupId'] = group_id
resp_object = self.connection.request(self.path, params).object
sga_elements = findall(resp_object, 'Permissions/Permission',
namespace=self.namespace)
return [self._to_security_group_attribute(el) for el in sga_elements]
def ex_list_zones(self, region_id=None):
"""
List availability zones in the given region or the current region.
:keyword region_id: the id of the region to query zones from
:type region_id: ``str``
:return: list of zones
:rtype: ``list`` of ``ECSZone``
"""
params = {'Action': 'DescribeZones'}
if region_id:
params['RegionId'] = region_id
else:
params['RegionId'] = self.region
resp_body = self.connection.request(self.path, params).object
zone_elements = findall(resp_body, 'Zones/Zone',
namespace=self.namespace)
zones = [self._to_zone(el) for el in zone_elements]
return zones
##
# Volume and snapshot management methods
##
def list_volumes(self, ex_volume_ids=None, ex_filters=None):
"""
List all volumes.
@inherits: :class:`NodeDriver.list_volumes`
:keyword ex_volume_ids: a list of volume's ids used to filter volumes.
Only the volumes which's id in this list
will be returned.
:type ex_volume_ids: ``list`` of ``str``
:keyword ex_filters: volume attribute and value pairs to filter
volumes. Only the volumes which matchs all will
be returned.
If the filter attribute need a json array value,
use ``list`` object, the driver will convert it.
:type ex_filters: ``dict``
"""
params = {'Action': 'DescribeDisks',
'RegionId': self.region}
if ex_volume_ids:
if isinstance(ex_volume_ids, list):
params['DiskIds'] = self._list_to_json_array(ex_volume_ids)
else:
raise AttributeError('ex_volume_ids should be a list of '
'volume ids.')
if ex_filters:
if not isinstance(ex_filters, dict):
raise AttributeError('ex_filters should be a dict of '
'volume attributes.')
else:
for key in ex_filters.keys():
params[key] = ex_filters[key]
def _parse_response(resp_object):
disk_elements = findall(resp_object, 'Disks/Disk',
namespace=self.namespace)
volumes = [self._to_volume(each) for each in disk_elements]
return volumes
return self._request_multiple_pages(self.path, params,
_parse_response)
def list_volume_snapshots(self, volume, ex_snapshot_ids=[],
ex_filters=None):
"""
List snapshots for a storage volume.
@inherites :class:`NodeDriver.list_volume_snapshots`
:keyword ex_snapshot_ids: a list of snapshot ids to filter the
snapshots returned.
:type ex_snapshot_ids: ``list`` of ``str``
:keyword ex_filters: snapshot attribute and value pairs to filter
snapshots. Only the snapshot which matchs all
the pairs will be returned.
If the filter attribute need a json array value,
use ``list`` object, the driver will convert it.
:type ex_filters: ``dict``
"""
params = {'Action': 'DescribeSnapshots',
'RegionId': self.region}
if volume:
params['DiskId'] = volume.id
if ex_snapshot_ids and isinstance(ex_snapshot_ids, list):
params['SnapshotIds'] = self._list_to_json_array(ex_snapshot_ids)
if ex_filters and isinstance(ex_filters, dict):
for key in ex_filters.keys():
params[key] = ex_filters[key]
def _parse_response(resp_body):
snapshot_elements = findall(resp_body, 'Snapshots/Snapshot',
namespace=self.namespace)
snapshots = [self._to_snapshot(each) for each in snapshot_elements]
return snapshots
return self._request_multiple_pages(self.path, params,
_parse_response)
def create_volume(self, size, name, location=None, snapshot=None,
ex_zone_id=None, ex_description=None,
ex_disk_category=None, ex_client_token=None):
"""
Create a new volume.
@inherites :class:`NodeDriver.create_volume`
:keyword ex_zone_id: the availability zone id (required)
:type ex_zone_id: ``str``
:keyword ex_description: volume description
:type ex_description: ``unicode``
:keyword ex_disk_category: disk category for data disk
:type ex_disk_category: ``str``
:keyword ex_client_token: a token generated by client to identify
each request.
:type ex_client_token: ``str``
"""
params = {'Action': 'CreateDisk',
'RegionId': self.region,
'DiskName': name,
'Size': size}
if ex_zone_id is None:
raise AttributeError('ex_zone_id is required')
params['ZoneId'] = ex_zone_id
if snapshot is not None and isinstance(snapshot, VolumeSnapshot):
params['SnapshotId'] = snapshot.id
if ex_description:
params['Description'] = ex_description
if ex_disk_category:
params['DiskCategory'] = ex_disk_category
if ex_client_token:
params['ClientToken'] = ex_client_token
resp = self.connection.request(self.path, params).object
volume_id = findtext(resp, 'DiskId', namespace=self.namespace)
volumes = self.list_volumes(ex_volume_ids=[volume_id])
if len(volumes) != 1:
raise LibcloudError('could not find the new create volume '
'with id %s.' % volume_id,
driver=self)
return volumes[0]
def create_volume_snapshot(self, volume, name=None, ex_description=None,
ex_client_token=None):
"""
Creates a snapshot of the storage volume.
@inherits :class:`NodeDriver.create_volume_snapshot`
:keyword ex_description: description of the snapshot.
:type ex_description: ``unicode``
:keyword ex_client_token: a token generated by client to identify
each request.
:type ex_client_token: ``str``
"""
params = {'Action': 'CreateSnapshot',
'DiskId': volume.id}
if name:
params['SnapshotName'] = name
if ex_description:
params['Description'] = ex_description
if ex_client_token:
params['ClientToken'] = ex_client_token
snapshot_elements = self.connection.request(self.path, params).object
snapshot_id = findtext(snapshot_elements, 'SnapshotId',
namespace=self.namespace)
snapshots = self.list_volume_snapshots(volume=None,
ex_snapshot_ids=[snapshot_id])
if len(snapshots) != 1:
raise LibcloudError('could not find new created snapshot with '
'id %s.' % snapshot_id, driver=self)
return snapshots[0]
def attach_volume(self, node, volume, device=None,
ex_delete_with_instance=None):
"""
Attaches volume to node.
@inherits :class:`NodeDriver.attach_volume`
:keyword device: device path allocated for this attached volume
:type device: ``str`` between /dev/xvdb to xvdz,
if empty, allocated by the system
:keyword ex_delete_with_instance: if to delete this volume when the
instance is deleted.
:type ex_delete_with_instance: ``bool``
"""
params = {'Action': 'AttachDisk',
'InstanceId': node.id,
'DiskId': volume.id}
if device:
params['Device'] = device
if ex_delete_with_instance:
params['DeleteWithInstance'] = \
str(bool(ex_delete_with_instance)).lower()
resp = self.connection.request(self.path, params)
return resp.success()
def detach_volume(self, volume, ex_instance_id=None):
"""
Detaches a volume from a node.
@inherits :class:`NodeDriver.detach_volume`
:keyword ex_instance_id: the id of the instance from which the volume
is detached.
:type ex_instance_id: ``str``
"""
params = {'Action': 'DetachDisk',
'DiskId': volume.id}
if ex_instance_id:
params['InstanceId'] = ex_instance_id
else:
volumes = self.list_volumes(ex_volume_ids=[volume.id])
if len(volumes) != 1:
raise AttributeError('could not find the instance id '
'the volume %s attached to, '
'ex_instance_id is required.' %
volume.id)
params['InstanceId'] = volumes[0].extra['instance_id']
resp = self.connection.request(self.path, params)
return resp.success()
def destroy_volume(self, volume):
params = {'Action': 'DeleteDisk',
'DiskId': volume.id}
volumes = self.list_volumes(ex_volume_ids=[volume.id])
if len(volumes) != 1:
raise LibcloudError('could not find the volume with id %s.' %
volume.id,
driver=self)
if volumes[0].state != StorageVolumeState.AVAILABLE:
raise LibcloudError('only volume in AVAILABLE state could be '
'destroyed.', driver=self)
resp = self.connection.request(self.path, params)
return resp.success()
def destroy_volume_snapshot(self, snapshot):
params = {'Action': 'DeleteSnapshot'}
if snapshot and isinstance(snapshot, VolumeSnapshot):
params['SnapshotId'] = snapshot.id
else:
raise AttributeError('snapshot is required and must be a '
'VolumeSnapshot')
resp = self.connection.request(self.path, params)
return resp.success()
##
# Image management methods
##
def list_images(self, location=None, ex_image_ids=None, ex_filters=None):
"""
List images on a provider.
@inherits :class:`NodeDriver.list_images`
:keyword ex_image_ids: a list of image ids to filter the images to
be returned.
:type ex_image_ids: ``list`` of ``str``
:keyword ex_filters: image attribute and value pairs to filter
images. Only the image which matchs all
the pairs will be returned.
If the filter attribute need a json array value,
use ``list`` object, the driver will convert it.
:type ex_filters: ``dict``
"""
if location and isinstance(location, NodeLocation):
region = location.id
else:
region = self.region
params = {'Action': 'DescribeImages',
'RegionId': region}
if ex_image_ids:
if isinstance(ex_image_ids, list):
params['ImageId'] = ','.join(ex_image_ids)
else:
raise AttributeError('ex_image_ids should be a list of '
'image ids')
if ex_filters and isinstance(ex_filters, dict):
for key in ex_filters.keys():
params[key] = ex_filters[key]
def _parse_response(resp_body):
image_elements = findall(resp_body, 'Images/Image',
namespace=self.namespace)
images = [self._to_image(each) for each in image_elements]
return images
return self._request_multiple_pages(self.path, params,
_parse_response)
def create_image(self, node, name, description=None, ex_snapshot_id=None,
ex_image_version=None, ex_client_token=None):
"""
Creates an image from a system disk snapshot.
@inherits :class:`NodeDriver.create_image`
:keyword ex_snapshot_id: the id of the snapshot to create the image.
(required)
:type ex_snapshot_id: ``str``
:keyword ex_image_version: the version number of the image
:type ex_image_version: ``str``
:keyword ex_client_token: a token generated by client to identify
each request.
:type ex_client_token: ``str``
"""
params = {'Action': 'CreateImage',
'RegionId': self.region}
if name:
params['ImageName'] = name
if description:
params['Description'] = description
if ex_snapshot_id:
params['SnapshotId'] = ex_snapshot_id
else:
raise AttributeError('ex_snapshot_id is required')
if ex_image_version:
params['ImageVersion'] = ex_image_version
if ex_client_token:
params['ClientToken'] = ex_client_token
resp = self.connection.request(self.path, params)
image_id = findtext(resp.object, 'ImageId', namespace=self.namespace)
return self.get_image(image_id=image_id)
def delete_image(self, node_image):
params = {'Action': 'DeleteImage',
'RegionId': self.region,
'ImageId': node_image.id}
resp = self.connection.request(self.path, params)
return resp.success()
def get_image(self, image_id, ex_region_id=None):
if ex_region_id:
region = ex_region_id
else:
region = self.region
location = NodeLocation(id=region, name=None, country=None,
driver=self)
images = self.list_images(location, ex_image_ids=[image_id])
if len(images) != 1:
raise LibcloudError('could not find the image with id %s' %
image_id,
driver=self)
return images[0]
def copy_image(self, source_region, node_image, name, description=None,
ex_destination_region_id=None, ex_client_token=None):
"""
Copies an image from a source region to the destination region.
If not provide a destination region, default to the current region.
@inherits :class:`NodeDriver.copy_image`
:keyword ex_destination_region_id: id of the destination region
:type ex_destination_region_id: ``str``
:keyword ex_client_token: a token generated by client to identify
each request.
:type ex_client_token: ``str``
"""
params = {'Action': 'CopyImage',
'RegionId': source_region,
'ImageId': node_image.id}
if ex_destination_region_id is not None:
params['DestinationRegionId'] = ex_destination_region_id
else:
params['DestinationRegionId'] = self.region
if name:
params['DestinationImageName'] = name
if description:
params['DestinationDescription'] = description
if ex_client_token:
params['ClientToken'] = ex_client_token
resp = self.connection.request(self.path, params)
image_id = findtext(resp.object, 'ImageId', namespace=self.namespace)
return self.get_image(image_id=image_id)
def create_public_ip(self, instance_id):
"""
Create public ip.
:keyword instance_id: instance id for allocating public ip.
:type instance_id: ``str``
:return public ip
:rtype ``str``
"""
params = {'Action': 'AllocatePublicIpAddress',
'InstanceId': instance_id}
resp = self.connection.request(self.path, params=params)
return findtext(resp.object, 'IpAddress',
namespace=self.namespace)
def _to_nodes(self, object):
"""
Convert response to Node object list
:param object: parsed response object
:return: a list of ``Node``
:rtype: ``list``
"""
node_elements = findall(object, 'Instances/Instance', self.namespace)
return [self._to_node(el) for el in node_elements]
def _to_node(self, instance):
"""
Convert an InstanceAttributesType object to ``Node`` object
:param instance: a xml element represents an instance
:return: a ``Node`` object
:rtype: ``Node``
"""
_id = findtext(element=instance, xpath='InstanceId',
namespace=self.namespace)
name = findtext(element=instance, xpath='InstanceName',
namespace=self.namespace)
instance_status = findtext(element=instance, xpath='Status',
namespace=self.namespace)
state = self.NODE_STATE_MAPPING.get(instance_status, NodeState.UNKNOWN)
def _get_ips(ip_address_els):
return [each.text for each in ip_address_els]
public_ip_els = findall(element=instance,
xpath='PublicIpAddress/IpAddress',
namespace=self.namespace)
public_ips = _get_ips(public_ip_els)
private_ip_els = findall(element=instance,
xpath='InnerIpAddress/IpAddress',
namespace=self.namespace)
private_ips = _get_ips(private_ip_els)
# Extra properties
extra = self._get_extra_dict(instance,
RESOURCE_EXTRA_ATTRIBUTES_MAP['node'])
extra['vpc_attributes'] = self._get_vpc_attributes(instance)
extra['eip_address'] = self._get_eip_address(instance)
extra['operation_locks'] = self._get_operation_locks(instance)
node = Node(id=_id, name=name, state=state,
public_ips=public_ips, private_ips=private_ips,
driver=self.connection.driver, extra=extra)
return node
def _get_extra_dict(self, element, mapping):
"""
Extract attributes from the element based on rules provided in the
mapping dictionary.
:param element: Element to parse the values from.
:type element: xml.etree.ElementTree.Element.
:param mapping: Dictionary with the extra layout
:type node: :class:`Node`
:rtype: ``dict``
"""
extra = {}
for attribute, values in mapping.items():
transform_func = values['transform_func']
value = findattr(element=element,
xpath=values['xpath'],
namespace=self.namespace)
if value:
try:
extra[attribute] = transform_func(value)
except Exception:
extra[attribute] = None
else:
extra[attribute] = value
return extra
def _get_internet_related_params(self, ex_internet_charge_type,
ex_internet_max_bandwidth_in,
ex_internet_max_bandwidth_out):
params = {}
if ex_internet_charge_type:
params['InternetChargeType'] = ex_internet_charge_type
if ex_internet_charge_type.lower() == 'paybytraffic':
if ex_internet_max_bandwidth_out:
params['InternetMaxBandwidthOut'] = \
ex_internet_max_bandwidth_out
else:
raise AttributeError('ex_internet_max_bandwidth_out is '
'mandatory for PayByTraffic internet'
' charge type.')
elif ex_internet_max_bandwidth_out:
params['InternetMaxBandwidthOut'] = \
ex_internet_max_bandwidth_out
if ex_internet_max_bandwidth_in:
params['InternetMaxBandwidthIn'] = \
ex_internet_max_bandwidth_in
return params
def _get_system_disk(self, ex_system_disk):
if not isinstance(ex_system_disk, dict):
raise AttributeError('ex_system_disk is not a dict')
sys_disk_dict = ex_system_disk
key_base = 'SystemDisk.'
# TODO(samsong8610): Use a type instead of dict
mappings = {'category': 'Category',
'disk_name': 'DiskName',
'description': 'Description'}
params = {}
for attr in mappings.keys():
if attr in sys_disk_dict:
params[key_base + mappings[attr]] = sys_disk_dict[attr]
return params
def _get_data_disks(self, ex_data_disks):
if isinstance(ex_data_disks, dict):
data_disks = [ex_data_disks]
elif isinstance(ex_data_disks, list):
data_disks = ex_data_disks
else:
raise AttributeError('ex_data_disks should be a list of dict')
# TODO(samsong8610): Use a type instead of dict
mappings = {'size': 'Size',
'category': 'Category',
'snapshot_id': 'SnapshotId',
'disk_name': 'DiskName',
'description': 'Description',
'device': 'Device',
'delete_with_instance': 'DeleteWithInstance'}
params = {}
for idx, disk in enumerate(data_disks):
key_base = 'DataDisk.{0}.'.format(idx + 1)
for attr in mappings.keys():
if attr in disk:
if attr == 'delete_with_instance':
# Convert bool value to str
value = str(disk[attr]).lower()
else:
value = disk[attr]
params[key_base + mappings[attr]] = value
return params
def _get_vpc_attributes(self, instance):
vpcs = findall(instance, xpath='VpcAttributes',
namespace=self.namespace)
if len(vpcs) <= 0:
return None
return self._get_extra_dict(
vpcs[0], RESOURCE_EXTRA_ATTRIBUTES_MAP['vpc_attributes'])
def _get_eip_address(self, instance):
eips = findall(instance, xpath='EipAddress',
namespace=self.namespace)
if len(eips) <= 0:
return None
return self._get_extra_dict(
eips[0], RESOURCE_EXTRA_ATTRIBUTES_MAP['eip_address_associate'])
def _get_operation_locks(self, instance):
locks = findall(instance, xpath='OperationLocks',
namespace=self.namespace)
if len(locks) <= 0:
return None
return self._get_extra_dict(
locks[0], RESOURCE_EXTRA_ATTRIBUTES_MAP['operation_locks'])
def _wait_until_state(self, nodes, state, wait_period=3, timeout=600):
"""
Block until the provided nodes are in the desired state.
:param nodes: List of nodes to wait for
:type nodes: ``list`` of :class:`.Node`
:param state: desired state
:type state: ``NodeState``
:param wait_period: How many seconds to wait between each loop
iteration. (default is 3)
:type wait_period: ``int``
:param timeout: How many seconds to wait before giving up.
(default is 600)
:type timeout: ``int``
:return: if the nodes are in the desired state.
:rtype: ``bool``
"""
start = time.time()
end = start + timeout
node_ids = [node.id for node in nodes]
while(time.time() < end):
matched_nodes = self.list_nodes(ex_node_ids=node_ids)
if len(matched_nodes) > len(node_ids):
found_ids = [node.id for node in matched_nodes]
msg = ('found multiple nodes with same ids, '
'desired ids: %(ids)s, found ids: %(found_ids)s' %
{'ids': node_ids, 'found_ids': found_ids})
raise LibcloudError(value=msg, driver=self)
desired_nodes = [node for node in matched_nodes
if node.state == state]
if len(desired_nodes) == len(node_ids):
return True
else:
time.sleep(wait_period)
continue
raise LibcloudError(value='Timed out after %s seconds' % (timeout),
driver=self)
def _to_volume(self, element):
_id = findtext(element, 'DiskId', namespace=self.namespace)
name = findtext(element, 'DiskName', namespace=self.namespace)
size = int(findtext(element, 'Size', namespace=self.namespace))
status_str = findtext(element, 'Status', namespace=self.namespace)
status = self.VOLUME_STATE_MAPPING.get(status_str,
StorageVolumeState.UNKNOWN)
extra = self._get_extra_dict(element,
RESOURCE_EXTRA_ATTRIBUTES_MAP['volume'])
extra['operation_locks'] = self._get_operation_locks(element)
return StorageVolume(_id, name, size, self, state=status, extra=extra)
def _list_to_json_array(self, value):
try:
return json.dumps(value)
except Exception:
raise AttributeError('could not convert list to json array')
def _to_snapshot(self, element):
_id = findtext(element, 'SnapshotId', namespace=self.namespace)
created = findtext(element, 'CreationTime', namespace=self.namespace)
status_str = findtext(element, 'Status', namespace=self.namespace)
state = self.SNAPSHOT_STATE_MAPPING.get(status_str,
VolumeSnapshotState.UNKNOWN)
extra = self._get_extra_dict(element,
RESOURCE_EXTRA_ATTRIBUTES_MAP['snapshot'])
return VolumeSnapshot(id=_id, driver=self, extra=extra,
created=created, state=state)
def _to_size(self, element):
_id = findtext(element, 'InstanceTypeId', namespace=self.namespace)
ram = float(findtext(element, 'MemorySize', namespace=self.namespace))
extra = {}
extra['cpu_core_count'] = int(findtext(element, 'CpuCoreCount',
namespace=self.namespace))
extra['instance_type_family'] = findtext(element, 'InstanceTypeFamily',
namespace=self.namespace)
return NodeSize(id=_id, name=_id, ram=ram, disk=None, bandwidth=None,
price=None, driver=self, extra=extra)
def _to_location(self, element):
_id = findtext(element, 'RegionId', namespace=self.namespace)
localname = findtext(element, 'LocalName', namespace=self.namespace)
return NodeLocation(id=_id, name=localname, country=None, driver=self)
def _to_image(self, element):
_id = findtext(element, 'ImageId', namespace=self.namespace)
name = findtext(element, 'ImageName', namespace=self.namespace)
extra = self._get_extra_dict(element,
RESOURCE_EXTRA_ATTRIBUTES_MAP['image'])
extra['disk_device_mappings'] = self._get_disk_device_mappings(
element.find('DiskDeviceMappings'))
return NodeImage(id=_id, name=name, driver=self, extra=extra)
def _get_disk_device_mappings(self, element):
if element is None:
return None
mapping_element = element.find('DiskDeviceMapping')
if mapping_element is not None:
return self._get_extra_dict(
mapping_element,
RESOURCE_EXTRA_ATTRIBUTES_MAP['disk_device_mapping'])
return None
def _to_security_group(self, element):
_id = findtext(element, 'SecurityGroupId', namespace=self.namespace)
name = findtext(element, 'SecurityGroupName',
namespace=self.namespace)
description = findtext(element, 'Description',
namespace=self.namespace)
vpc_id = findtext(element, 'VpcId', namespace=self.namespace)
creation_time = findtext(element, 'CreationTime',
namespace=self.namespace)
return ECSSecurityGroup(_id, name, description=description,
driver=self, vpc_id=vpc_id,
creation_time=creation_time)
def _to_security_group_attribute(self, element):
ip_protocol = findtext(element, 'IpProtocol', namespace=self.namespace)
port_range = findtext(element, 'PortRange', namespace=self.namespace)
source_group_id = findtext(element, 'SourceGroupId',
namespace=self.namespace)
policy = findtext(element, 'Policy', namespace=self.namespace)
nic_type = findtext(element, 'NicType', namespace=self.namespace)
return ECSSecurityGroupAttribute(ip_protocol=ip_protocol,
port_range=port_range,
source_group_id=source_group_id,
policy=policy, nic_type=nic_type)
def _to_zone(self, element):
_id = findtext(element, 'ZoneId', namespace=self.namespace)
local_name = findtext(element, 'LocalName', namespace=self.namespace)
resource_types = findall(element,
'AvailableResourceCreation/ResourceTypes',
namespace=self.namespace)
instance_types = findall(element,
'AvailableInstanceTypes/InstanceTypes',
namespace=self.namespace)
disk_categories = findall(element,
'AvailableDiskCategories/DiskCategories',
namespace=self.namespace)
def _text(element):
return element.text
return ECSZone(id=_id, name=local_name, driver=self,
available_resource_types=list(
map(_text, resource_types)),
available_instance_types=list(
map(_text, instance_types)),
available_disk_categories=list(
map(_text, disk_categories)))
def _get_pagination(self, element):
page_number = int(findtext(element, 'PageNumber'))
total_count = int(findtext(element, 'TotalCount'))
page_size = int(findtext(element, 'PageSize'))
return Pagination(total=total_count, size=page_size,
current=page_number)
def _request_multiple_pages(self, path, params, parse_func):
"""
Request all resources by multiple pages.
:param path: the resource path
:type path: ``str``
:param params: the query parameters
:type params: ``dict``
:param parse_func: the function object to parse the response body
:param type: ``function``
:return: list of resource object, if not found any, return []
:rtype: ``list``
"""
results = []
while True:
one_page = self.connection.request(path, params).object
resources = parse_func(one_page)
results += resources
pagination = self._get_pagination(one_page)
if pagination.next() is None:
break
params.update(pagination.to_dict())
return results
|
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyPydispatcher(PythonPackage):
"""Multi-producer-multi-consumer signal dispatching mechanism."""
homepage = "http://pydispatcher.sourceforge.net/"
pypi = "PyDispatcher/PyDispatcher-2.0.5.tar.gz"
version('2.0.5', sha256='5570069e1b1769af1fe481de6dd1d3a388492acddd2cdad7a3bde145615d5caf')
depends_on('py-setuptools', type='build')
|
import os
import logging
import contextlib
from kikimr.public.sdk.python import client as ydb
def make_driver_config(endpoint, database, path):
return ydb.DriverConfig(
endpoint, database, credentials=ydb.construct_credentials_from_environ(),
root_certificates=ydb.load_ydb_root_certificate(),
)
@contextlib.contextmanager
def session_pool_context(
driver_config: ydb.DriverConfig,
size=1, workers_threads_count=1
):
with ydb.Driver(driver_config) as driver:
try:
logging.info("connecting to the database")
driver.wait(timeout=15)
except TimeoutError:
logging.critical(f"connection failed\n"
f"last reported errors by discovery: {driver.discovery_debug_details()}")
raise
with ydb.SessionPool(driver, size=size, workers_threads_count=workers_threads_count) as session_pool:
try:
yield session_pool
except Exception as e:
logging.critical(f"failed to create session pool due to {repr(e)}")
|
# -*- coding: utf-8 -*-
"""This file contains the interface for analysis plugins."""
import abc
import calendar
import collections
import time
from plaso.analysis import definitions as analysis_definitions
from plaso.analysis import logger
from plaso.containers import events
from plaso.containers import reports
from plaso.lib import definitions
class AnalysisPlugin(object):
"""Class that defines the analysis plugin interface."""
# The name of the plugin. This is the name that is matched against when
# loading plugins, so it is important that this name is short, concise and
# explains the nature of the plugin easily. It also needs to be unique.
NAME = 'analysis_plugin'
# Flag to indicate the analysis is for testing purposes only.
TEST_PLUGIN = False
def __init__(self):
"""Initializes an analysis plugin."""
super(AnalysisPlugin, self).__init__()
self._analysis_counter = collections.Counter()
self.plugin_type = analysis_definitions.PLUGIN_TYPE_REPORT
@property
def plugin_name(self):
"""str: name of the plugin."""
return self.NAME
def _CreateEventTag(self, event, labels):
"""Creates an event tag.
Args:
event (EventObject): event to tag.
labels (list[str]): event tag labels.
Returns:
EventTag: the event tag.
"""
event_identifier = event.GetIdentifier()
event_tag = events.EventTag()
event_tag.SetEventIdentifier(event_identifier)
event_tag.AddLabels(labels)
event_identifier_string = event_identifier.CopyToString()
logger.debug('Tagged event: {0:s} with labels: {1:s}'.format(
event_identifier_string, ', '.join(labels)))
return event_tag
# pylint: disable=unused-argument
def CompileReport(self, mediator):
"""Compiles a report of the analysis.
After the plugin has received every copy of an event to analyze this
function will be called so that the report can be assembled.
Args:
mediator (AnalysisMediator): mediates interactions between
analysis plugins and other components, such as storage and dfvfs.
Returns:
AnalysisReport: report.
"""
analysis_report = reports.AnalysisReport(plugin_name=self.NAME)
time_elements = time.gmtime()
time_compiled = calendar.timegm(time_elements)
analysis_report.time_compiled = (
time_compiled * definitions.MICROSECONDS_PER_SECOND)
analysis_report.analysis_counter = self._analysis_counter
return analysis_report
@abc.abstractmethod
def ExamineEvent(self, mediator, event, event_data, event_data_stream):
"""Analyzes an event.
Args:
mediator (AnalysisMediator): mediates interactions between
analysis plugins and other components, such as storage and dfvfs.
event (EventObject): event.
event_data (EventData): event data.
event_data_stream (EventDataStream): event data stream.
"""
|
# sqlalchemy/pool/events.py
# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
from .base import Pool
from .. import event
from ..engine.base import Engine
class PoolEvents(event.Events):
"""Available events for :class:`_pool.Pool`.
The methods here define the name of an event as well
as the names of members that are passed to listener
functions.
e.g.::
from sqlalchemy import event
def my_on_checkout(dbapi_conn, connection_rec, connection_proxy):
"handle an on checkout event"
event.listen(Pool, 'checkout', my_on_checkout)
In addition to accepting the :class:`_pool.Pool` class and
:class:`_pool.Pool` instances, :class:`_events.PoolEvents` also accepts
:class:`_engine.Engine` objects and the :class:`_engine.Engine` class as
targets, which will be resolved to the ``.pool`` attribute of the
given engine or the :class:`_pool.Pool` class::
engine = create_engine("postgresql://scott:tiger@localhost/test")
# will associate with engine.pool
event.listen(engine, 'checkout', my_on_checkout)
"""
_target_class_doc = "SomeEngineOrPool"
_dispatch_target = Pool
@classmethod
def _accept_with(cls, target):
if isinstance(target, type):
if issubclass(target, Engine):
return Pool
elif issubclass(target, Pool):
return target
elif isinstance(target, Engine):
return target.pool
else:
return target
@classmethod
def _listen(cls, event_key, **kw):
target = event_key.dispatch_target
kw.setdefault("asyncio", target._is_asyncio)
event_key.base_listen(**kw)
def connect(self, dbapi_connection, connection_record):
"""Called at the moment a particular DBAPI connection is first
created for a given :class:`_pool.Pool`.
This event allows one to capture the point directly after which
the DBAPI module-level ``.connect()`` method has been used in order
to produce a new DBAPI connection.
:param dbapi_connection: a DBAPI connection.
:param connection_record: the :class:`._ConnectionRecord` managing the
DBAPI connection.
"""
def first_connect(self, dbapi_connection, connection_record):
"""Called exactly once for the first time a DBAPI connection is
checked out from a particular :class:`_pool.Pool`.
The rationale for :meth:`_events.PoolEvents.first_connect`
is to determine
information about a particular series of database connections based
on the settings used for all connections. Since a particular
:class:`_pool.Pool`
refers to a single "creator" function (which in terms
of a :class:`_engine.Engine`
refers to the URL and connection options used),
it is typically valid to make observations about a single connection
that can be safely assumed to be valid about all subsequent
connections, such as the database version, the server and client
encoding settings, collation settings, and many others.
:param dbapi_connection: a DBAPI connection.
:param connection_record: the :class:`._ConnectionRecord` managing the
DBAPI connection.
"""
def checkout(self, dbapi_connection, connection_record, connection_proxy):
"""Called when a connection is retrieved from the Pool.
:param dbapi_connection: a DBAPI connection.
:param connection_record: the :class:`._ConnectionRecord` managing the
DBAPI connection.
:param connection_proxy: the :class:`._ConnectionFairy` object which
will proxy the public interface of the DBAPI connection for the
lifespan of the checkout.
If you raise a :class:`~sqlalchemy.exc.DisconnectionError`, the current
connection will be disposed and a fresh connection retrieved.
Processing of all checkout listeners will abort and restart
using the new connection.
.. seealso:: :meth:`_events.ConnectionEvents.engine_connect`
- a similar event
which occurs upon creation of a new :class:`_engine.Connection`.
"""
def checkin(self, dbapi_connection, connection_record):
"""Called when a connection returns to the pool.
Note that the connection may be closed, and may be None if the
connection has been invalidated. ``checkin`` will not be called
for detached connections. (They do not return to the pool.)
:param dbapi_connection: a DBAPI connection.
:param connection_record: the :class:`._ConnectionRecord` managing the
DBAPI connection.
"""
def reset(self, dbapi_connection, connection_record):
"""Called before the "reset" action occurs for a pooled connection.
This event represents
when the ``rollback()`` method is called on the DBAPI connection
before it is returned to the pool. The behavior of "reset" can
be controlled, including disabled, using the ``reset_on_return``
pool argument.
The :meth:`_events.PoolEvents.reset` event is usually followed by the
:meth:`_events.PoolEvents.checkin` event is called, except in those
cases where the connection is discarded immediately after reset.
:param dbapi_connection: a DBAPI connection.
:param connection_record: the :class:`._ConnectionRecord` managing the
DBAPI connection.
.. seealso::
:meth:`_events.ConnectionEvents.rollback`
:meth:`_events.ConnectionEvents.commit`
"""
def invalidate(self, dbapi_connection, connection_record, exception):
"""Called when a DBAPI connection is to be "invalidated".
This event is called any time the :meth:`._ConnectionRecord.invalidate`
method is invoked, either from API usage or via "auto-invalidation",
without the ``soft`` flag.
The event occurs before a final attempt to call ``.close()`` on the
connection occurs.
:param dbapi_connection: a DBAPI connection.
:param connection_record: the :class:`._ConnectionRecord` managing the
DBAPI connection.
:param exception: the exception object corresponding to the reason
for this invalidation, if any. May be ``None``.
.. versionadded:: 0.9.2 Added support for connection invalidation
listening.
.. seealso::
:ref:`pool_connection_invalidation`
"""
def soft_invalidate(self, dbapi_connection, connection_record, exception):
"""Called when a DBAPI connection is to be "soft invalidated".
This event is called any time the :meth:`._ConnectionRecord.invalidate`
method is invoked with the ``soft`` flag.
Soft invalidation refers to when the connection record that tracks
this connection will force a reconnect after the current connection
is checked in. It does not actively close the dbapi_connection
at the point at which it is called.
.. versionadded:: 1.0.3
"""
def close(self, dbapi_connection, connection_record):
"""Called when a DBAPI connection is closed.
The event is emitted before the close occurs.
The close of a connection can fail; typically this is because
the connection is already closed. If the close operation fails,
the connection is discarded.
The :meth:`.close` event corresponds to a connection that's still
associated with the pool. To intercept close events for detached
connections use :meth:`.close_detached`.
.. versionadded:: 1.1
"""
def detach(self, dbapi_connection, connection_record):
"""Called when a DBAPI connection is "detached" from a pool.
This event is emitted after the detach occurs. The connection
is no longer associated with the given connection record.
.. versionadded:: 1.1
"""
def close_detached(self, dbapi_connection):
"""Called when a detached DBAPI connection is closed.
The event is emitted before the close occurs.
The close of a connection can fail; typically this is because
the connection is already closed. If the close operation fails,
the connection is discarded.
.. versionadded:: 1.1
"""
|
from mdsxray import open_mdsdataset
from gridops import MITgcmDataset
from regridding import regrid_vertical
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import hypothesis.strategies as st
import numpy as np
import numpy.testing as npt
import unittest
from hypothesis import given
import caffe2.python.hypothesis_test_util as hu
from caffe2.python import (
layer_model_instantiator,
core,
schema,
workspace,
)
from caffe2.python.layers.layers import (
InstantiationContext,
)
from caffe2.python.layers.tags import Tags
from caffe2.python.layer_test_util import (
LayersTestCase,
OpSpec,
)
from caffe2.python.layers.layers import (
IdList,
set_request_only,
is_request_only_scalar,
get_key,
)
import logging
logger = logging.getLogger(__name__)
class TestLayers(LayersTestCase):
def testAddLoss(self):
input_record_LR = self.new_record(
schema.Struct(
('label', schema.Scalar((np.float64, (1, )))),
('logit', schema.Scalar((np.float32, (2, )))),
('weight', schema.Scalar((np.float64, (1, ))))
)
)
loss_LR = self.model.BatchLRLoss(input_record_LR)
self.model.add_loss(loss_LR)
assert 'unnamed' in self.model.loss
self.assertEqual(
schema.Scalar((np.float32, tuple())), self.model.loss.unnamed
)
self.assertEqual(loss_LR, self.model.loss.unnamed)
self.model.add_loss(loss_LR, 'addLoss')
assert 'addLoss' in self.model.loss
self.assertEqual(
schema.Scalar((np.float32, tuple())), self.model.loss.addLoss
)
self.assertEqual(loss_LR, self.model.loss.addLoss)
self.model.add_loss(
schema.Scalar(
dtype=np.float32, blob=core.BlobReference('loss_blob_1')
), 'addLoss'
)
assert 'addLoss_auto_0' in self.model.loss
self.assertEqual(
schema.Scalar((np.float32, tuple())), self.model.loss.addLoss_auto_0
)
assert core.BlobReference('loss_blob_1') in self.model.loss.field_blobs()
self.model.add_loss(
schema.Struct(
(
'structName', schema.Scalar(
dtype=np.float32,
blob=core.BlobReference('loss_blob_2')
)
)
), 'addLoss'
)
assert 'addLoss_auto_1' in self.model.loss
self.assertEqual(
schema.Struct(('structName', schema.Scalar((np.float32, tuple())))),
self.model.loss.addLoss_auto_1
)
assert core.BlobReference('loss_blob_2') in self.model.loss.field_blobs()
loss_in_tuple_0 = schema.Scalar(
dtype=np.float32, blob=core.BlobReference('loss_blob_in_tuple_0')
)
loss_in_tuple_1 = schema.Scalar(
dtype=np.float32, blob=core.BlobReference('loss_blob_in_tuple_1')
)
loss_tuple = schema.NamedTuple(
'loss_in_tuple', * [loss_in_tuple_0, loss_in_tuple_1]
)
self.model.add_loss(loss_tuple, 'addLoss')
assert 'addLoss_auto_2' in self.model.loss
self.assertEqual(
schema.Struct(
('loss_in_tuple_0', schema.Scalar((np.float32, tuple()))),
('loss_in_tuple_1', schema.Scalar((np.float32, tuple())))
), self.model.loss.addLoss_auto_2
)
assert core.BlobReference('loss_blob_in_tuple_0')\
in self.model.loss.field_blobs()
assert core.BlobReference('loss_blob_in_tuple_1')\
in self.model.loss.field_blobs()
def testAddOutputSchema(self):
# add the first field
self.model.add_output_schema('struct', schema.Struct())
expected_output_schema = schema.Struct(('struct', schema.Struct()))
self.assertEqual(
self.model.output_schema,
expected_output_schema,
)
# add the second field
self.model.add_output_schema('scalar', schema.Scalar(np.float64))
expected_output_schema = schema.Struct(
('struct', schema.Struct()),
('scalar', schema.Scalar(np.float64)),
)
self.assertEqual(
self.model.output_schema,
expected_output_schema,
)
# overwrite a field should raise
with self.assertRaises(AssertionError):
self.model.add_output_schema('scalar', schema.Struct())
def _test_net(self, net, ops_list):
"""
Helper function to assert the net contains some set of operations and
then to run the net.
Inputs:
net -- the network to test and run
ops_list -- the list of operation specifications to check for
in the net
"""
ops_output = self.assertNetContainOps(net, ops_list)
workspace.RunNetOnce(net)
return ops_output
def testFCWithoutBias(self):
output_dims = 2
fc_without_bias = self.model.FCWithoutBias(
self.model.input_feature_schema.float_features, output_dims)
self.model.output_schema = fc_without_bias
self.assertEqual(
schema.Scalar((np.float32, (output_dims, ))),
fc_without_bias
)
train_init_net, train_net = self.get_training_nets()
init_ops = self.assertNetContainOps(
train_init_net,
[
OpSpec("UniformFill", None, None),
]
)
mat_mul_spec = OpSpec(
"MatMul",
[
self.model.input_feature_schema.float_features(),
init_ops[0].output[0],
],
fc_without_bias.field_blobs()
)
self.assertNetContainOps(train_net, [mat_mul_spec])
predict_net = self.get_predict_net()
self.assertNetContainOps(predict_net, [mat_mul_spec])
def testFCwithAxis2(self):
input_dim = 10
output_dim = 30
max_length = 20
input_record = self.new_record(
schema.Struct(
('history_sequence', schema.Scalar((np.float32, (max_length,
input_dim)))),
)
)
fc_out = self.model.FC(
input_record.history_sequence, output_dim,
axis=2)
self.model.output_schema = fc_out
self.assertEqual(
schema.Scalar((np.float32, (max_length, output_dim))),
fc_out
)
train_init_net, train_net = self.get_training_nets()
def testSparseLookupSumPooling(self):
record = schema.NewRecord(self.model.net, schema.Struct(
('sparse', schema.Struct(
('sparse_feature_0', schema.List(
schema.Scalar(np.int64,
metadata=schema.Metadata(categorical_limit=1000)))),
)),
))
embedding_dim = 64
embedding_after_pooling = self.model.SparseLookup(
record.sparse.sparse_feature_0, [embedding_dim], 'Sum')
self.model.output_schema = schema.Struct()
self.assertEqual(
schema.Scalar((np.float32, (embedding_dim, ))),
embedding_after_pooling
)
train_init_net, train_net = self.get_training_nets()
init_ops = self.assertNetContainOps(
train_init_net,
[
OpSpec("UniformFill", None, None),
OpSpec("ConstantFill", None, None),
]
)
sparse_lookup_op_spec = OpSpec(
'SparseLengthsSum',
[
init_ops[0].output[0],
record.sparse.sparse_feature_0.items(),
record.sparse.sparse_feature_0.lengths(),
],
[embedding_after_pooling()]
)
self.assertNetContainOps(train_net, [sparse_lookup_op_spec])
predict_net = self.get_predict_net()
self.assertNetContainOps(predict_net, [sparse_lookup_op_spec])
@given(
use_hashing=st.booleans(),
modulo=st.integers(min_value=100, max_value=200),
)
def testSparseFeatureHashIdList(self, use_hashing, modulo):
record = schema.NewRecord(
self.model.net,
schema.List(schema.Scalar(
np.int64,
metadata=schema.Metadata(categorical_limit=60000)
))
)
output_schema = self.model.SparseFeatureHash(
record,
modulo=modulo,
use_hashing=use_hashing)
self.model.output_schema = output_schema
self.assertEqual(len(self.model.layers), 1)
self.assertEqual(output_schema._items.metadata.categorical_limit,
modulo)
train_init_net, train_net = self.get_training_nets()
@given(
use_hashing=st.booleans(),
modulo=st.integers(min_value=100, max_value=200),
)
def testSparseFeatureHashIdScoreList(self, use_hashing, modulo):
record = schema.NewRecord(self.model.net,
schema.Map(schema.Scalar(np.int64,
metadata=schema.Metadata(
categorical_limit=60000)),
np.float32))
output_schema = self.model.SparseFeatureHash(
record,
modulo=modulo,
use_hashing=use_hashing)
self.model.output_schema = output_schema
self.assertEqual(len(self.model.layers), 1)
self.assertEqual(output_schema._items.keys.metadata.categorical_limit,
modulo)
train_init_net, train_net = self.get_training_nets()
def testSparseLookupIncorrectPositionWeightedOnIdList(self):
'''
Currently the implementation of SparseLookup assumed input is id_score_list
when use PositionWeighted.
'''
record = schema.NewRecord(self.model.net, schema.Struct(
('sparse', schema.Struct(
('sparse_feature_0', schema.List(
schema.Scalar(np.int64,
metadata=schema.Metadata(categorical_limit=1000)))),
)),
))
embedding_dim = 64
with self.assertRaises(AssertionError):
self.model.SparseLookup(
record.sparse.sparse_feature_0, [embedding_dim], 'PositionWeighted')
def testSparseLookupPositionWeightedOnIdList(self):
record = schema.NewRecord(self.model.net, schema.Struct(
('sparse', schema.Struct(
('sparse_feature_0', schema.List(
schema.Scalar(np.int64,
metadata=schema.Metadata(categorical_limit=1000)))),
)),
))
# convert id_list to id_score_list with PositionWeighted layer
sparse_segment = record.sparse.sparse_feature_0
pos_w_layer = self.model.PositionWeighted(sparse_segment)
sparse_segment = schema.Map(
keys=get_key(sparse_segment),
values=pos_w_layer.position_weights,
lengths_blob=sparse_segment.lengths
)
embedding_dim = 64
embedding_after_pooling = self.model.SparseLookup(
sparse_segment, [embedding_dim], 'PositionWeighted')
self.model.output_schema = schema.Struct()
self.assertEqual(
schema.Scalar((np.float32, (embedding_dim, ))),
embedding_after_pooling
)
train_init_net, train_net = self.get_training_nets()
self.assertNetContainOps(
train_init_net,
[
OpSpec("ConstantFill", None, None), # position_weights/pos_w
OpSpec("UniformFill", None, None),
OpSpec("ConstantFill", None, None),
]
)
self.assertNetContainOps(train_net, [
OpSpec("LengthsRangeFill", None, None),
OpSpec("Gather", None, None),
OpSpec("SparseLengthsWeightedSum", None, None),
])
predict_net = self.get_predict_net()
self.assertNetContainOps(predict_net, [
OpSpec("LengthsRangeFill", None, None),
OpSpec("Gather", None, None),
OpSpec("SparseLengthsWeightedSum", None, None),
])
def testSparseLookupPositionWeightedOnIdScoreList(self):
record = schema.NewRecord(self.model.net, schema.Struct(
('sparse', schema.Struct(
('id_score_list_0', schema.Map(
schema.Scalar(
np.int64,
metadata=schema.Metadata(
categorical_limit=1000
),
),
np.float32
)),
)),
))
embedding_dim = 64
embedding_after_pooling = self.model.SparseLookup(
record.sparse.id_score_list_0, [embedding_dim], 'PositionWeighted')
self.model.output_schema = schema.Struct()
self.assertEqual(
schema.Scalar((np.float32, (embedding_dim, ))),
embedding_after_pooling
)
train_init_net, train_net = self.get_training_nets()
init_ops = self.assertNetContainOps(
train_init_net,
[
OpSpec("UniformFill", None, None),
OpSpec("ConstantFill", None, None),
]
)
sparse_lookup_op_spec = OpSpec(
'SparseLengthsWeightedSum',
[
init_ops[0].output[0],
record.sparse.id_score_list_0.values(),
record.sparse.id_score_list_0.keys(),
record.sparse.id_score_list_0.lengths(),
],
[embedding_after_pooling()]
)
self.assertNetContainOps(train_net, [sparse_lookup_op_spec])
predict_net = self.get_predict_net()
self.assertNetContainOps(predict_net, [sparse_lookup_op_spec])
def testSparseLookupIncorrectRecencyWeightedOnIdList(self):
'''
Currently the implementation of SparseLookup assumed input is id_score_list
when use RecencyWeighted.
'''
record = schema.NewRecord(self.model.net, schema.Struct(
('sparse', schema.Struct(
('sparse_feature_0', schema.List(
schema.Scalar(np.int64,
metadata=schema.Metadata(categorical_limit=1000)))),
)),
))
embedding_dim = 64
with self.assertRaises(AssertionError):
self.model.SparseLookup(
record.sparse.sparse_feature_0, [embedding_dim], 'RecencyWeighted')
def testSparseLookupRecencyWeightedOnIdScoreList(self):
record = schema.NewRecord(self.model.net, schema.Struct(
('sparse', schema.Struct(
('id_score_list_0', schema.Map(
schema.Scalar(
np.int64,
metadata=schema.Metadata(
categorical_limit=1000
),
),
np.float32
)),
)),
))
embedding_dim = 64
embedding_after_pooling = self.model.SparseLookup(
record.sparse.id_score_list_0, [embedding_dim], 'RecencyWeighted')
self.model.output_schema = schema.Struct()
self.assertEqual(
schema.Scalar((np.float32, (embedding_dim, ))),
embedding_after_pooling
)
train_init_net, train_net = self.get_training_nets()
init_ops = self.assertNetContainOps(
train_init_net,
[
OpSpec("UniformFill", None, None),
OpSpec("ConstantFill", None, None),
]
)
sparse_lookup_op_spec = OpSpec(
'SparseLengthsWeightedSum',
[
init_ops[0].output[0],
record.sparse.id_score_list_0.values(),
record.sparse.id_score_list_0.keys(),
record.sparse.id_score_list_0.lengths(),
],
[embedding_after_pooling()]
)
self.assertNetContainOps(train_net, [sparse_lookup_op_spec])
predict_net = self.get_predict_net()
self.assertNetContainOps(predict_net, [sparse_lookup_op_spec])
def testPairwiseSimilarityWithAllEmbeddings(self):
embedding_dim = 64
N = 5
record = schema.NewRecord(self.model.net, schema.Struct(
('all_embeddings', schema.Scalar(
((np.float32, (N, embedding_dim)))
)),
))
current = self.model.PairwiseSimilarity(
record, N * N)
self.assertEqual(
schema.Scalar((np.float32, (N * N, ))),
current
)
train_init_net, train_net = self.get_training_nets()
self.assertNetContainOps(train_init_net, [])
self.assertNetContainOps(train_net, [
OpSpec("BatchMatMul", None, None),
OpSpec("Flatten", None, None),
])
def testPairwiseSimilarityWithXandYEmbeddings(self):
embedding_dim = 64
record = schema.NewRecord(self.model.net, schema.Struct(
('x_embeddings', schema.Scalar(
((np.float32, (5, embedding_dim)))
)),
('y_embeddings', schema.Scalar(
((np.float32, (6, embedding_dim)))
)),
))
current = self.model.PairwiseSimilarity(
record, 5 * 6)
self.assertEqual(
schema.Scalar((np.float32, (5 * 6, ))),
current
)
train_init_net, train_net = self.get_training_nets()
self.assertNetContainOps(train_init_net, [])
self.assertNetContainOps(train_net, [
OpSpec("BatchMatMul", None, None),
OpSpec("Flatten", None, None),
])
def testPairwiseSimilarityWithXandYEmbeddingsAndGather(self):
embedding_dim = 64
output_idx = [1, 3, 5]
output_idx_blob = self.model.add_global_constant(
str(self.model.net.NextScopedBlob('pairwise_dot_product_gather')),
output_idx,
dtype=np.int32,
)
indices_to_gather = schema.Scalar(
(np.int32, len(output_idx)),
output_idx_blob,
)
record = schema.NewRecord(self.model.net, schema.Struct(
('x_embeddings', schema.Scalar(
((np.float32, (5, embedding_dim)))
)),
('y_embeddings', schema.Scalar(
((np.float32, (6, embedding_dim)))
)),
('indices_to_gather', indices_to_gather),
))
current = self.model.PairwiseSimilarity(
record, len(output_idx))
# This assert is not necessary,
# output size is passed into PairwiseSimilarity
self.assertEqual(
schema.Scalar((np.float32, (len(output_idx), ))),
current
)
train_init_net, train_net = self.get_training_nets()
self.assertNetContainOps(train_init_net, [])
self.assertNetContainOps(train_net, [
OpSpec("BatchMatMul", None, None),
OpSpec("Flatten", None, None),
OpSpec("BatchGather", None, None),
])
def testPairwiseSimilarityIncorrectInput(self):
embedding_dim = 64
record = schema.NewRecord(self.model.net, schema.Struct(
('x_embeddings', schema.Scalar(
((np.float32, (5, embedding_dim)))
)),
))
with self.assertRaises(AssertionError):
self.model.PairwiseSimilarity(
record, 25)
record = schema.NewRecord(self.model.net, schema.Struct(
('all_embeddings', schema.List(np.float32))
))
with self.assertRaises(AssertionError):
self.model.PairwiseSimilarity(
record, 25)
def testConcat(self):
embedding_dim = 64
input_record = self.new_record(schema.Struct(
('input1', schema.Scalar((np.float32, (embedding_dim, )))),
('input2', schema.Scalar((np.float32, (embedding_dim, )))),
('input3', schema.Scalar((np.float32, (embedding_dim, )))),
))
output = self.model.Concat(input_record)
self.assertEqual(
schema.Scalar((np.float32, ((len(input_record.fields) * embedding_dim, )))),
output
)
# Note that in Concat layer we assume first dimension is batch.
# so input is B * embedding_dim
# add_axis=1 make it B * 1 * embedding_dim
# concat on axis=1 make it B * N * embedding_dim
output = self.model.Concat(input_record, axis=1, add_axis=1)
self.assertEqual(
schema.Scalar((np.float32, ((len(input_record.fields), embedding_dim)))),
output
)
def testSamplingTrain(self):
output_dims = 1000
indices = self.new_record(schema.Scalar((np.int32, (10,))))
sampling_prob = self.new_record(schema.Scalar((np.float32, (10, ))))
sampled_fc = self.model.SamplingTrain(
schema.Struct(
('input', self.model.input_feature_schema.float_features),
('indices', indices),
('sampling_prob', sampling_prob),
),
"FC",
output_dims,
)
self.model.output_schema = sampled_fc
# Check that we don't add prediction layer into the model
self.assertEqual(1, len(self.model.layers))
self.assertEqual(
schema.Scalar((np.float32, (output_dims, ))),
sampled_fc
)
train_init_net, train_net = self.get_training_nets()
init_ops = self.assertNetContainOps(
train_init_net,
[
OpSpec("UniformFill", None, None),
OpSpec("UniformFill", None, None),
]
)
sampled_fc_layer = self.model.layers[0]
gather_w_spec = OpSpec(
"Gather",
[
init_ops[0].output[0],
indices(),
],
[
sampled_fc_layer._prediction_layer.train_param_blobs[0]
]
)
gather_b_spec = OpSpec(
"Gather",
[
init_ops[1].output[0],
indices(),
],
[
sampled_fc_layer._prediction_layer.train_param_blobs[1]
]
)
train_fc_spec = OpSpec(
"FC",
[
self.model.input_feature_schema.float_features(),
] + sampled_fc_layer._prediction_layer.train_param_blobs,
sampled_fc.field_blobs()
)
log_spec = OpSpec("Log", [sampling_prob()], [None])
sub_spec = OpSpec(
"Sub",
[sampled_fc.field_blobs()[0], None],
sampled_fc.field_blobs()
)
train_ops = self.assertNetContainOps(
train_net,
[gather_w_spec, gather_b_spec, train_fc_spec, log_spec, sub_spec])
self.assertEqual(train_ops[3].output[0], train_ops[4].input[1])
predict_net = self.get_predict_net()
self.assertNetContainOps(
predict_net,
[
OpSpec(
"FC",
[
self.model.input_feature_schema.float_features(),
init_ops[0].output[0],
init_ops[1].output[0],
],
sampled_fc.field_blobs()
)
]
)
def testDistillBatchLRLoss(self):
input_record = self.new_record(schema.Struct(
('label', schema.Scalar((np.float64, (1,)))),
('logit', schema.Scalar((np.float32, (2,)))),
('teacher_label', schema.Scalar((np.float32(1,)))),
('weight', schema.Scalar((np.float64, (1,))))
))
loss = self.model.BatchDistillLRLoss(input_record)
self.assertEqual(schema.Scalar((np.float32, tuple())), loss)
def testDistillBatchLRLossWithTeacherWeightScreen(self):
input_record = self.new_record(schema.Struct(
('label', schema.Scalar((np.float32, (2,)))),
('logit', schema.Scalar((np.float32, (2, 1)))),
('teacher_label', schema.Scalar((np.float32(2,)))),
('weight', schema.Scalar((np.float64, (2,))))
))
label_items = np.array([1.0, 1.0], dtype=np.float32)
logit_items = np.array([[1.0], [1.0]], dtype=np.float32)
teacher_label_items = np.array([0.8, -1.0], dtype=np.float32)
weight_items = np.array([1.0, 1.0], dtype=np.float32)
schema.FeedRecord(
input_record,
[label_items, logit_items, teacher_label_items, weight_items]
)
loss = self.model.BatchDistillLRLoss(
input_record,
teacher_weight=0.5,
filter_invalid_teacher_label=True
)
self.run_train_net_forward_only()
tensor_loss = workspace.FetchBlob(loss.field_blobs()[0])
def cross_entropy(label, logit):
return logit - logit * label + np.log(1 + np.exp(-1.0 * logit))
def cal_cross_entropy(
label_items, logit_items, teacher_label_items, weight_items
):
total_ce = 0
for i in range(label_items.shape[0]):
true_xent = cross_entropy(label_items[i], logit_items[i, 0])
if teacher_label_items[i] > 0:
teacher_xent = cross_entropy(
teacher_label_items[i], logit_items[i, 0]
)
else:
teacher_xent = 0
teacher_weight = 0.5 if teacher_label_items[i] > 0 else 0
total_ce += (true_xent * (1 - teacher_weight) +
teacher_xent * teacher_weight) * weight_items[i]
return total_ce / label_items.shape[0]
correct_ace = cal_cross_entropy(
label_items,
logit_items,
teacher_label_items,
weight_items
)
self.assertAlmostEqual(
tensor_loss,
np.array(correct_ace),
delta=0.0000001,
msg="Wrong cross entropy {}".format(tensor_loss)
)
def testBatchLRLoss(self):
input_record = self.new_record(schema.Struct(
('label', schema.Scalar((np.float64, (1,)))),
('logit', schema.Scalar((np.float32, (2,)))),
('weight', schema.Scalar((np.float64, (1,))))
))
loss = self.model.BatchLRLoss(input_record)
self.assertEqual(schema.Scalar((np.float32, tuple())), loss)
def testBatchLRLossWithUncertainty(self):
input_record = self.new_record(schema.Struct(
('label', schema.Scalar((np.float64, (1,)))),
('logit', schema.Scalar((np.float32, (2,)))),
('weight', schema.Scalar((np.float64, (1,)))),
('log_variance', schema.Scalar((np.float64, (1,)))),
))
loss = self.model.BatchLRLoss(input_record)
self.assertEqual(schema.Scalar((np.float32, tuple())), loss)
def testMarginRankLoss(self):
input_record = self.new_record(schema.Struct(
('pos_prediction', schema.Scalar((np.float32, (1,)))),
('neg_prediction', schema.List(np.float32)),
))
pos_items = np.array([0.1, 0.2, 0.3], dtype=np.float32)
neg_lengths = np.array([1, 2, 3], dtype=np.int32)
neg_items = np.array([0.1, 0.2, 0.3, 0.4, 0.5, 0.6], dtype=np.float32)
schema.FeedRecord(
input_record,
[pos_items, neg_lengths, neg_items]
)
loss = self.model.MarginRankLoss(input_record)
self.run_train_net_forward_only()
self.assertEqual(schema.Scalar((np.float32, tuple())), loss)
def testBatchMSELoss(self):
input_record = self.new_record(schema.Struct(
('label', schema.Scalar((np.float64, (1,)))),
('prediction', schema.Scalar((np.float32, (2,)))),
))
loss = self.model.BatchMSELoss(input_record)
self.assertEqual(schema.Scalar((np.float32, tuple())), loss)
def testBatchSigmoidCrossEntropyLoss(self):
input_record = self.new_record(schema.Struct(
('label', schema.Scalar((np.float32, (32,)))),
('prediction', schema.Scalar((np.float32, (32,))))
))
loss = self.model.BatchSigmoidCrossEntropyLoss(input_record)
self.assertEqual(schema.Scalar((np.float32, tuple())), loss)
def testBatchSoftmaxLoss(self):
input_record = self.new_record(schema.Struct(
('label', schema.Scalar((np.float32, tuple()))),
('prediction', schema.Scalar((np.float32, (32,))))
))
loss = self.model.BatchSoftmaxLoss(input_record)
self.assertEqual(schema.Struct(
('softmax', schema.Scalar((np.float32, (32,)))),
('loss', schema.Scalar(np.float32)),
), loss)
def testBatchSoftmaxLossWeight(self):
input_record = self.new_record(schema.Struct(
('label', schema.Scalar((np.float32, tuple()))),
('prediction', schema.Scalar((np.float32, (32,)))),
('weight', schema.Scalar((np.float64, (1,))))
))
loss = self.model.BatchSoftmaxLoss(input_record)
self.assertEqual(schema.Struct(
('softmax', schema.Scalar((np.float32, (32,)))),
('loss', schema.Scalar(np.float32)),
), loss)
@given(
X=hu.arrays(dims=[2, 5]),
)
def testBatchNormalization(self, X):
input_record = self.new_record(schema.Scalar((np.float32, (5,))))
schema.FeedRecord(input_record, [X])
bn_output = self.model.BatchNormalization(input_record)
self.assertEqual(schema.Scalar((np.float32, (5,))), bn_output)
self.model.output_schema = schema.Struct()
train_init_net, train_net = self.get_training_nets()
init_ops = self.assertNetContainOps(
train_init_net,
[
OpSpec("ConstantFill", None, None),
OpSpec("ConstantFill", None, None),
OpSpec("ConstantFill", None, None),
OpSpec("ConstantFill", None, None),
]
)
input_blob = input_record.field_blobs()[0]
output_blob = bn_output.field_blobs()[0]
expand_dims_spec = OpSpec(
"ExpandDims",
[input_blob],
None,
)
train_bn_spec = OpSpec(
"SpatialBN",
[None, init_ops[0].output[0], init_ops[1].output[0],
init_ops[2].output[0], init_ops[3].output[0]],
[output_blob, init_ops[2].output[0], init_ops[3].output[0], None, None],
{'is_test': 0, 'order': 'NCHW', 'momentum': 0.9},
)
test_bn_spec = OpSpec(
"SpatialBN",
[None, init_ops[0].output[0], init_ops[1].output[0],
init_ops[2].output[0], init_ops[3].output[0]],
[output_blob],
{'is_test': 1, 'order': 'NCHW', 'momentum': 0.9},
)
squeeze_spec = OpSpec(
"Squeeze",
[output_blob],
[output_blob],
)
self.assertNetContainOps(
train_net,
[expand_dims_spec, train_bn_spec, squeeze_spec]
)
eval_net = self.get_eval_net()
self.assertNetContainOps(
eval_net,
[expand_dims_spec, test_bn_spec, squeeze_spec]
)
predict_net = self.get_predict_net()
self.assertNetContainOps(
predict_net,
[expand_dims_spec, test_bn_spec, squeeze_spec]
)
workspace.RunNetOnce(train_init_net)
workspace.RunNetOnce(train_net)
schema.FeedRecord(input_record, [X])
workspace.RunNetOnce(eval_net)
schema.FeedRecord(input_record, [X])
workspace.RunNetOnce(predict_net)
@given(
X=hu.arrays(dims=[2, 5, 6]),
use_layer_norm_op=st.booleans(),
)
def testLayerNormalization(self, X, use_layer_norm_op):
expect = (5, 6,)
if not use_layer_norm_op:
X = X.reshape(10, 6)
expect = (6,)
input_record = self.new_record(schema.Scalar((np.float32, expect)))
schema.FeedRecord(input_record, [X])
ln_output = self.model.LayerNormalization(
input_record, use_layer_norm_op=use_layer_norm_op
)
self.assertEqual(schema.Scalar((np.float32, expect)), ln_output)
self.model.output_schema = schema.Struct()
train_init_net, train_net = self.get_training_nets(add_constants=True)
workspace.RunNetOnce(train_init_net)
workspace.RunNetOnce(train_net)
@given(
X=hu.arrays(dims=[5, 2]),
num_to_collect=st.integers(min_value=1, max_value=10),
)
def testLastNWindowCollector(self, X, num_to_collect):
input_record = self.new_record(schema.Scalar(np.float32))
schema.FeedRecord(input_record, [X])
last_n = self.model.LastNWindowCollector(input_record, num_to_collect)
self.run_train_net_forward_only()
output_record = schema.FetchRecord(last_n.last_n)
start = max(0, 5 - num_to_collect)
npt.assert_array_equal(X[start:], output_record())
num_visited = schema.FetchRecord(last_n.num_visited)
npt.assert_array_equal([5], num_visited())
@given(
X=hu.arrays(dims=[5, 2]),
num_to_collect=st.integers(min_value=3, max_value=3),
)
def testReservoirSamplingWithID(self, X, num_to_collect):
ID = np.array([1, 2, 3, 1, 2], dtype=np.int64)
input_record = self.new_record(
schema.Struct(
('record', schema.Struct(
('dense', schema.Scalar()),
)),
('object_id', schema.Scalar(np.int64)),
)
)
schema.FeedRecord(input_record, [X, ID])
packed_record = self.model.PackRecords(
input_record.record, 1, fields=input_record.record.field_names())
reservoir_input = schema.Struct(
('data', packed_record),
('object_id', input_record.object_id),
)
reservoir = self.model.ReservoirSampling(
reservoir_input, num_to_collect)
self.model.output_schema = schema.Struct()
train_init_net, train_net = \
layer_model_instantiator.generate_training_nets_forward_only(
self.model)
workspace.RunNetOnce(train_init_net)
workspace.CreateNet(train_net)
workspace.RunNet(train_net.Proto().name, num_iter=2)
num_visited = schema.FetchRecord(reservoir.num_visited)
npt.assert_array_equal([3], num_visited())
for param in self.model.params:
serialized = workspace.SerializeBlob(str(param))
workspace.DeserializeBlob(str(param), serialized)
ID = np.array([3, 5, 3, 3, 5], dtype=np.int64)
schema.FeedRecord(input_record.object_id, [ID])
workspace.RunNet(train_net.Proto().name, num_iter=2)
num_visited = schema.FetchRecord(reservoir.num_visited)
npt.assert_array_equal([2], num_visited())
def testUniformSampling(self):
input_record = self.new_record(schema.Scalar(np.int32))
input_array = np.array([3, 10, 11, 15, 20, 99], dtype=np.int32)
schema.FeedRecord(input_record, [input_array])
num_samples = 20
num_elements = 100
uniform_sampling_output = self.model.UniformSampling(
input_record, num_samples, num_elements)
self.model.loss = uniform_sampling_output
self.run_train_net()
samples = workspace.FetchBlob(uniform_sampling_output.samples())
sampling_prob = workspace.FetchBlob(
uniform_sampling_output.sampling_prob())
self.assertEqual(num_samples, len(samples))
np.testing.assert_array_equal(input_array, samples[:len(input_array)])
np.testing.assert_almost_equal(
np.array([float(num_samples) / num_elements] * num_samples,
dtype=np.float32),
sampling_prob
)
def testUniformSamplingWithIncorrectSampleSize(self):
input_record = self.new_record(schema.Scalar(np.int32))
num_samples = 200
num_elements = 100
with self.assertRaises(AssertionError):
self.model.UniformSampling(input_record, num_samples, num_elements)
def testGatherRecord(self):
indices = np.array([1, 3, 4], dtype=np.int32)
dense = np.array(list(range(20)), dtype=np.float32).reshape(10, 2)
lengths = np.array(list(range(10)), dtype=np.int32)
items = np.array(list(range(lengths.sum())), dtype=np.int64)
items_lengths = np.array(list(range(lengths.sum())), dtype=np.int32)
items_items = np.array(list(range(items_lengths.sum())), dtype=np.int64)
record = self.new_record(schema.Struct(
('dense', schema.Scalar(np.float32)),
('sparse', schema.Struct(
('list', schema.List(np.int64)),
('list_of_list', schema.List(schema.List(np.int64))),
)),
('empty_struct', schema.Struct())
))
indices_record = self.new_record(schema.Scalar(np.int32))
input_record = schema.Struct(
('indices', indices_record),
('record', record),
)
schema.FeedRecord(
input_record,
[indices, dense, lengths, items, lengths, items_lengths,
items_items])
gathered_record = self.model.GatherRecord(input_record)
self.assertTrue(schema.equal_schemas(gathered_record, record))
self.run_train_net_forward_only()
gathered_dense = workspace.FetchBlob(gathered_record.dense())
np.testing.assert_array_equal(
np.concatenate([dense[i:i + 1] for i in indices]), gathered_dense)
gathered_lengths = workspace.FetchBlob(
gathered_record.sparse.list.lengths())
np.testing.assert_array_equal(
np.concatenate([lengths[i:i + 1] for i in indices]),
gathered_lengths)
gathered_items = workspace.FetchBlob(
gathered_record.sparse.list.items())
offsets = lengths.cumsum() - lengths
np.testing.assert_array_equal(
np.concatenate([
items[offsets[i]: offsets[i] + lengths[i]]
for i in indices
]), gathered_items)
gathered_items_lengths = workspace.FetchBlob(
gathered_record.sparse.list_of_list.items.lengths())
np.testing.assert_array_equal(
np.concatenate([
items_lengths[offsets[i]: offsets[i] + lengths[i]]
for i in indices
]),
gathered_items_lengths
)
nested_offsets = []
nested_lengths = []
nested_offset = 0
j = 0
for l in lengths:
nested_offsets.append(nested_offset)
nested_length = 0
for _i in range(l):
nested_offset += items_lengths[j]
nested_length += items_lengths[j]
j += 1
nested_lengths.append(nested_length)
gathered_items_items = workspace.FetchBlob(
gathered_record.sparse.list_of_list.items.items())
np.testing.assert_array_equal(
np.concatenate([
items_items[nested_offsets[i]:
nested_offsets[i] + nested_lengths[i]]
for i in indices
]),
gathered_items_items
)
def testMapToRange(self):
input_record = self.new_record(schema.Scalar(np.int32))
indices_blob = self.model.MapToRange(input_record,
max_index=100).indices
self.model.output_schema = schema.Struct()
train_init_net, train_net = self.get_training_nets()
schema.FeedRecord(
input_record,
[np.array([10, 3, 20, 99, 15, 11, 3, 11], dtype=np.int32)]
)
workspace.RunNetOnce(train_init_net)
workspace.RunNetOnce(train_net)
indices = workspace.FetchBlob(indices_blob())
np.testing.assert_array_equal(
np.array([1, 2, 3, 4, 5, 6, 2, 6], dtype=np.int32),
indices
)
schema.FeedRecord(
input_record,
[np.array([10, 3, 23, 35, 60, 15, 10, 15], dtype=np.int32)]
)
workspace.RunNetOnce(train_net)
indices = workspace.FetchBlob(indices_blob())
np.testing.assert_array_equal(
np.array([1, 2, 7, 8, 9, 5, 1, 5], dtype=np.int32),
indices
)
eval_net = self.get_eval_net()
schema.FeedRecord(
input_record,
[np.array([10, 3, 23, 35, 60, 15, 200], dtype=np.int32)]
)
workspace.RunNetOnce(eval_net)
indices = workspace.FetchBlob(indices_blob())
np.testing.assert_array_equal(
np.array([1, 2, 7, 8, 9, 5, 0], dtype=np.int32),
indices
)
schema.FeedRecord(
input_record,
[np.array([10, 3, 23, 15, 101, 115], dtype=np.int32)]
)
workspace.RunNetOnce(eval_net)
indices = workspace.FetchBlob(indices_blob())
np.testing.assert_array_equal(
np.array([1, 2, 7, 5, 0, 0], dtype=np.int32),
indices
)
predict_net = self.get_predict_net()
schema.FeedRecord(
input_record,
[np.array([3, 3, 20, 23, 151, 35, 60, 15, 200], dtype=np.int32)]
)
workspace.RunNetOnce(predict_net)
indices = workspace.FetchBlob(indices_blob())
np.testing.assert_array_equal(
np.array([2, 2, 3, 7, 0, 8, 9, 5, 0], dtype=np.int32),
indices
)
def testSelectRecordByContext(self):
float_features = self.model.input_feature_schema.float_features
float_array = np.array([1.0, 2.0], dtype=np.float32)
schema.FeedRecord(float_features, [float_array])
with Tags(Tags.EXCLUDE_FROM_PREDICTION):
log_float_features = self.model.Log(float_features, 1)
joined = self.model.SelectRecordByContext(
schema.Struct(
(InstantiationContext.PREDICTION, float_features),
(InstantiationContext.TRAINING, log_float_features),
# TODO: TRAIN_ONLY layers are also generated in eval
(InstantiationContext.EVAL, log_float_features),
)
)
# model.output_schema has to a struct
self.model.output_schema = schema.Struct((
'joined', joined
))
predict_net = layer_model_instantiator.generate_predict_net(self.model)
workspace.RunNetOnce(predict_net)
predict_output = schema.FetchRecord(predict_net.output_record())
npt.assert_array_equal(float_array,
predict_output['joined']())
eval_net = layer_model_instantiator.generate_eval_net(self.model)
workspace.RunNetOnce(eval_net)
eval_output = schema.FetchRecord(eval_net.output_record())
npt.assert_array_equal(np.log(float_array),
eval_output['joined']())
_, train_net = (
layer_model_instantiator.generate_training_nets_forward_only(
self.model
)
)
workspace.RunNetOnce(train_net)
train_output = schema.FetchRecord(train_net.output_record())
npt.assert_array_equal(np.log(float_array),
train_output['joined']())
def testFunctionalLayer(self):
def normalize(net, in_record, out_record):
mean = net.ReduceFrontMean(in_record(), 1)
net.Sub(
[in_record(), mean],
out_record(),
broadcast=1)
normalized = self.model.Functional(
self.model.input_feature_schema.float_features, 1,
normalize, name="normalizer")
# Attach metadata to one of the outputs and use it in FC
normalized.set_type((np.float32, 32))
self.model.output_schema = self.model.FC(normalized, 2)
predict_net = layer_model_instantiator.generate_predict_net(
self.model)
ops = predict_net.Proto().op
assert len(ops) == 3
assert ops[0].type == "ReduceFrontMean"
assert ops[1].type == "Sub"
assert ops[2].type == "FC"
assert len(ops[0].input) == 1
assert ops[0].input[0] ==\
self.model.input_feature_schema.float_features()
assert len(ops[1].output) == 1
assert ops[1].output[0] in ops[2].input
def testFunctionalLayerHelper(self):
mean = self.model.ReduceFrontMean(
self.model.input_feature_schema.float_features, 1)
normalized = self.model.Sub(
schema.Tuple(
self.model.input_feature_schema.float_features, mean),
1, broadcast=1)
# Attach metadata to one of the outputs and use it in FC
normalized.set_type((np.float32, (32,)))
self.model.output_schema = self.model.FC(normalized, 2)
predict_net = layer_model_instantiator.generate_predict_net(
self.model)
ops = predict_net.Proto().op
assert len(ops) == 3
assert ops[0].type == "ReduceFrontMean"
assert ops[1].type == "Sub"
assert ops[2].type == "FC"
assert len(ops[0].input) == 1
assert ops[0].input[0] ==\
self.model.input_feature_schema.float_features()
assert len(ops[1].output) == 1
assert ops[1].output[0] in ops[2].input
def testFunctionalLayerHelperAutoInference(self):
softsign = self.model.Softsign(
schema.Tuple(self.model.input_feature_schema.float_features),
1)
assert softsign.field_type().base == np.float32
assert softsign.field_type().shape == (32,)
self.model.output_schema = self.model.FC(softsign, 2)
predict_net = layer_model_instantiator.generate_predict_net(
self.model)
ops = predict_net.Proto().op
assert len(ops) == 2
assert ops[0].type == "Softsign"
assert ops[1].type == "FC"
assert len(ops[0].input) == 1
assert ops[0].input[0] ==\
self.model.input_feature_schema.float_features()
assert len(ops[0].output) == 1
assert ops[0].output[0] in ops[1].input
def testHalfToFloatTypeInference(self):
input = self.new_record(schema.Scalar((np.float32, (32,))))
output = self.model.FloatToHalf(input, 1)
assert output.field_type().base == np.float16
assert output.field_type().shape == (32, )
output = self.model.HalfToFloat(output, 1)
assert output.field_type().base == np.float32
assert output.field_type().shape == (32, )
def testFunctionalLayerHelperAutoInferenceScalar(self):
loss = self.model.AveragedLoss(self.model.input_feature_schema, 1)
self.assertEqual(1, len(loss.field_types()))
self.assertEqual(np.float32, loss.field_types()[0].base)
self.assertEqual(tuple(), loss.field_types()[0].shape)
def testFunctionalLayerInputCoercion(self):
one = self.model.global_constants['ONE']
two = self.model.Add([one, one], 1)
self.model.loss = two
self.run_train_net()
data = workspace.FetchBlob(two.field_blobs()[0])
np.testing.assert_array_equal([2.0], data)
def testFunctionalLayerWithOutputNames(self):
k = 3
topk = self.model.TopK(
self.model.input_feature_schema,
output_names_or_num=['values', 'indices'],
k=k,
)
self.assertEqual(2, len(topk.field_types()))
self.assertEqual(np.float32, topk.field_types()[0].base)
self.assertEqual((k,), topk.field_types()[0].shape)
self.assertEqual(np.int32, topk.field_types()[1].base)
self.assertEqual((k,), topk.field_types()[1].shape)
self.assertEqual(['TopK/values', 'TopK/indices'], topk.field_blobs())
def testFunctionalLayerSameOperatorOutputNames(self):
Con1 = self.model.ConstantFill([], 1, value=1)
Con2 = self.model.ConstantFill([], 1, value=2)
self.assertNotEqual(str(Con1), str(Con2))
def testFunctionalLayerWithOutputDtypes(self):
loss = self.model.AveragedLoss(
self.model.input_feature_schema,
1,
output_dtypes=(np.float32, (1,)),
)
self.assertEqual(1, len(loss.field_types()))
self.assertEqual(np.float32, loss.field_types()[0].base)
self.assertEqual((1,), loss.field_types()[0].shape)
def testPropagateRequestOnly(self):
# test case when output is request only
input_record = self.new_record(schema.Struct(
('input1', schema.Scalar((np.float32, (32, )))),
('input2', schema.Scalar((np.float32, (64, )))),
('input3', schema.Scalar((np.float32, (16, )))),
))
set_request_only(input_record)
concat_output = self.model.Concat(input_record)
self.assertEqual(is_request_only_scalar(concat_output), True)
# test case when output is not request only
input_record2 = self.new_record(schema.Struct(
('input4', schema.Scalar((np.float32, (100, ))))
)) + input_record
concat_output2 = self.model.Concat(input_record2)
self.assertEqual(is_request_only_scalar(concat_output2), False)
def testSetRequestOnly(self):
input_record = schema.Scalar(np.int64)
schema.attach_metadata_to_scalars(
input_record,
schema.Metadata(
categorical_limit=100000000,
expected_value=99,
feature_specs=schema.FeatureSpec(
feature_ids=[1, 100, 1001]
)
)
)
set_request_only(input_record)
self.assertEqual(input_record.metadata.categorical_limit, 100000000)
self.assertEqual(input_record.metadata.expected_value, 99)
self.assertEqual(
input_record.metadata.feature_specs.feature_ids,
[1, 100, 1001]
)
@given(
X=hu.arrays(dims=[5, 5]), # Shape of X is irrelevant
dropout_for_eval=st.booleans(),
)
def testDropout(self, X, dropout_for_eval):
input_record = self.new_record(schema.Scalar((np.float32, (1,))))
schema.FeedRecord(input_record, [X])
d_output = self.model.Dropout(
input_record,
dropout_for_eval=dropout_for_eval
)
self.assertEqual(schema.Scalar((np.float32, (1,))), d_output)
self.model.output_schema = schema.Struct()
train_init_net, train_net = self.get_training_nets()
input_blob = input_record.field_blobs()[0]
output_blob = d_output.field_blobs()[0]
with_d_spec = OpSpec(
"Dropout",
[input_blob],
[output_blob, None],
{'is_test': 0, 'ratio': 0.5}
)
without_d_spec = OpSpec(
"Dropout",
[input_blob],
[output_blob, None],
{'is_test': 1, 'ratio': 0.5}
)
self.assertNetContainOps(
train_net,
[with_d_spec]
)
eval_net = self.get_eval_net()
predict_net = self.get_predict_net()
if dropout_for_eval:
self.assertNetContainOps(
eval_net,
[with_d_spec]
)
self.assertNetContainOps(
predict_net,
[with_d_spec]
)
else:
self.assertNetContainOps(
eval_net,
[without_d_spec]
)
self.assertNetContainOps(
predict_net,
[without_d_spec]
)
workspace.RunNetOnce(train_init_net)
workspace.RunNetOnce(train_net)
schema.FeedRecord(input_record, [X])
workspace.RunNetOnce(eval_net)
schema.FeedRecord(input_record, [X])
workspace.RunNetOnce(predict_net)
@given(
num_inputs=st.integers(1, 3),
batch_size=st.integers(5, 10)
)
def testMergeIdListsLayer(self, num_inputs, batch_size):
inputs = []
for _ in range(num_inputs):
lengths = np.random.randint(5, size=batch_size).astype(np.int32)
size = lengths.sum()
values = np.random.randint(1, 10, size=size).astype(np.int64)
inputs.append(lengths)
inputs.append(values)
input_schema = schema.Tuple(
*[schema.List(
schema.Scalar(dtype=np.int64, metadata=schema.Metadata(
categorical_limit=20
))) for _ in range(num_inputs)]
)
input_record = schema.NewRecord(self.model.net, input_schema)
schema.FeedRecord(input_record, inputs)
output_schema = self.model.MergeIdLists(input_record)
assert schema.equal_schemas(
output_schema, IdList,
check_field_names=False)
@given(
batch_size=st.integers(min_value=2, max_value=10),
input_dims=st.integers(min_value=5, max_value=10),
output_dims=st.integers(min_value=5, max_value=10),
bandwidth=st.floats(min_value=0.1, max_value=5),
)
def testRandomFourierFeatures(self, batch_size, input_dims, output_dims, bandwidth):
def _rff_hypothesis_test(rff_output, X, W, b, scale):
"""
Runs hypothesis test for Semi Random Features layer.
Inputs:
rff_output -- output of net after running random fourier features layer
X -- input data
W -- weight parameter from train_init_net
b -- bias parameter from train_init_net
scale -- value by which to scale the output vector
"""
output = workspace.FetchBlob(rff_output)
output_ref = scale * np.cos(np.dot(X, np.transpose(W)) + b)
npt.assert_allclose(output, output_ref, rtol=1e-3, atol=1e-3)
X = np.random.random((batch_size, input_dims)).astype(np.float32)
scale = np.sqrt(2.0 / output_dims)
input_record = self.new_record(schema.Scalar((np.float32, (input_dims,))))
schema.FeedRecord(input_record, [X])
input_blob = input_record.field_blobs()[0]
rff_output = self.model.RandomFourierFeatures(input_record,
output_dims,
bandwidth)
self.model.output_schema = schema.Struct()
self.assertEqual(
schema.Scalar((np.float32, (output_dims, ))),
rff_output
)
train_init_net, train_net = self.get_training_nets()
# Init net assertions
init_ops_list = [
OpSpec("GaussianFill", None, None),
OpSpec("UniformFill", None, None),
]
init_ops = self._test_net(train_init_net, init_ops_list)
W = workspace.FetchBlob(self.model.layers[0].w)
b = workspace.FetchBlob(self.model.layers[0].b)
# Operation specifications
fc_spec = OpSpec("FC", [input_blob, init_ops[0].output[0],
init_ops[1].output[0]], None)
cosine_spec = OpSpec("Cos", None, None)
scale_spec = OpSpec("Scale", None, rff_output.field_blobs(),
{'scale': scale})
ops_list = [
fc_spec,
cosine_spec,
scale_spec
]
# Train net assertions
self._test_net(train_net, ops_list)
_rff_hypothesis_test(rff_output(), X, W, b, scale)
# Eval net assertions
eval_net = self.get_eval_net()
self._test_net(eval_net, ops_list)
_rff_hypothesis_test(rff_output(), X, W, b, scale)
# Predict net assertions
predict_net = self.get_predict_net()
self._test_net(predict_net, ops_list)
_rff_hypothesis_test(rff_output(), X, W, b, scale)
@given(
batch_size=st.integers(min_value=2, max_value=10),
input_dims=st.integers(min_value=5, max_value=10),
output_dims=st.integers(min_value=5, max_value=10),
s=st.integers(min_value=0, max_value=3),
scale=st.floats(min_value=0.1, max_value=5),
set_weight_as_global_constant=st.booleans()
)
def testArcCosineFeatureMap(self, batch_size, input_dims, output_dims, s, scale,
set_weight_as_global_constant):
def _arc_cosine_hypothesis_test(ac_output, X, W, b, s):
"""
Runs hypothesis test for Arc Cosine layer.
Inputs:
ac_output -- output of net after running arc cosine layer
X -- input data
W -- weight parameter from train_init_net
b -- bias parameter from train_init_net
s -- degree parameter
"""
# Get output from net
net_output = workspace.FetchBlob(ac_output)
# Computing output directly
x_rand = np.matmul(X, np.transpose(W)) + b
x_pow = np.power(x_rand, s)
if s > 0:
h_rand_features = np.piecewise(x_rand,
[x_rand <= 0, x_rand > 0],
[0, 1])
else:
h_rand_features = np.piecewise(x_rand,
[x_rand <= 0, x_rand > 0],
[0, lambda x: x / (1 + x)])
output_ref = np.multiply(x_pow, h_rand_features)
# Comparing net output and computed output
npt.assert_allclose(net_output, output_ref, rtol=1e-3, atol=1e-3)
X = np.random.normal(size=(batch_size, input_dims)).astype(np.float32)
input_record = self.new_record(schema.Scalar((np.float32, (input_dims,))))
schema.FeedRecord(input_record, [X])
input_blob = input_record.field_blobs()[0]
ac_output = self.model.ArcCosineFeatureMap(
input_record,
output_dims,
s=s,
scale=scale,
set_weight_as_global_constant=set_weight_as_global_constant
)
self.model.output_schema = schema.Struct()
self.assertEqual(
schema.Scalar((np.float32, (output_dims, ))),
ac_output
)
train_init_net, train_net = self.get_training_nets()
# Run create_init_net to initialize the global constants, and W and b
workspace.RunNetOnce(train_init_net)
workspace.RunNetOnce(self.model.create_init_net(name='init_net'))
if set_weight_as_global_constant:
W = workspace.FetchBlob(
self.model.global_constants['arc_cosine_feature_map_fixed_rand_W']
)
b = workspace.FetchBlob(
self.model.global_constants['arc_cosine_feature_map_fixed_rand_b']
)
else:
W = workspace.FetchBlob(self.model.layers[0].random_w)
b = workspace.FetchBlob(self.model.layers[0].random_b)
# Operation specifications
fc_spec = OpSpec("FC", [input_blob, None, None], None)
softsign_spec = OpSpec("Softsign", None, None)
relu_spec = OpSpec("Relu", None, None)
relu_spec_output = OpSpec("Relu", None, ac_output.field_blobs())
pow_spec = OpSpec("Pow", None, None, {'exponent': float(s - 1)})
mul_spec = OpSpec("Mul", None, ac_output.field_blobs())
if s == 0:
ops_list = [
fc_spec,
softsign_spec,
relu_spec_output,
]
elif s == 1:
ops_list = [
fc_spec,
relu_spec_output,
]
else:
ops_list = [
fc_spec,
relu_spec,
pow_spec,
mul_spec,
]
# Train net assertions
self._test_net(train_net, ops_list)
_arc_cosine_hypothesis_test(ac_output(), X, W, b, s)
# Eval net assertions
eval_net = self.get_eval_net()
self._test_net(eval_net, ops_list)
_arc_cosine_hypothesis_test(ac_output(), X, W, b, s)
# Predict net assertions
predict_net = self.get_predict_net()
self._test_net(predict_net, ops_list)
_arc_cosine_hypothesis_test(ac_output(), X, W, b, s)
@given(
batch_size=st.integers(min_value=2, max_value=10),
input_dims=st.integers(min_value=5, max_value=10),
output_dims=st.integers(min_value=5, max_value=10),
s=st.integers(min_value=0, max_value=3),
scale=st.floats(min_value=0.1, max_value=5),
set_weight_as_global_constant=st.booleans(),
use_struct_input=st.booleans(),
)
def testSemiRandomFeatures(self, batch_size, input_dims, output_dims, s, scale,
set_weight_as_global_constant, use_struct_input):
def _semi_random_hypothesis_test(srf_output, X_full, X_random, rand_w,
rand_b, s):
"""
Runs hypothesis test for Semi Random Features layer.
Inputs:
srf_output -- output of net after running semi random features layer
X_full -- full input data
X_random -- random-output input data
rand_w -- random-initialized weight parameter from train_init_net
rand_b -- random-initialized bias parameter from train_init_net
s -- degree parameter
"""
# Get output from net
net_output = workspace.FetchBlob(srf_output)
# Fetch learned parameter blobs
learned_w = workspace.FetchBlob(self.model.layers[0].learned_w)
learned_b = workspace.FetchBlob(self.model.layers[0].learned_b)
# Computing output directly
x_rand = np.matmul(X_random, np.transpose(rand_w)) + rand_b
x_learn = np.matmul(X_full, np.transpose(learned_w)) + learned_b
x_pow = np.power(x_rand, s)
if s > 0:
h_rand_features = np.piecewise(x_rand,
[x_rand <= 0, x_rand > 0],
[0, 1])
else:
h_rand_features = np.piecewise(x_rand,
[x_rand <= 0, x_rand > 0],
[0, lambda x: x / (1 + x)])
output_ref = np.multiply(np.multiply(x_pow, h_rand_features), x_learn)
# Comparing net output and computed output
npt.assert_allclose(net_output, output_ref, rtol=1e-3, atol=1e-3)
X_full = np.random.normal(size=(batch_size, input_dims)).astype(np.float32)
if use_struct_input:
X_random = np.random.normal(size=(batch_size, input_dims)).\
astype(np.float32)
input_data = [X_full, X_random]
input_record = self.new_record(schema.Struct(
('full', schema.Scalar(
(np.float32, (input_dims,))
)),
('random', schema.Scalar(
(np.float32, (input_dims,))
))
))
else:
X_random = X_full
input_data = [X_full]
input_record = self.new_record(schema.Scalar(
(np.float32, (input_dims,))
))
schema.FeedRecord(input_record, input_data)
srf_output = self.model.SemiRandomFeatures(
input_record,
output_dims,
s=s,
scale_random=scale,
scale_learned=scale,
set_weight_as_global_constant=set_weight_as_global_constant
)
self.model.output_schema = schema.Struct()
self.assertEqual(
schema.Struct(
('full', schema.Scalar(
(np.float32, (output_dims,))
)),
('random', schema.Scalar(
(np.float32, (output_dims,))
))
),
srf_output
)
init_ops_list = [
OpSpec("GaussianFill", None, None),
OpSpec("UniformFill", None, None),
OpSpec("GaussianFill", None, None),
OpSpec("UniformFill", None, None),
]
train_init_net, train_net = self.get_training_nets()
# Need to run to initialize the global constants for layer
workspace.RunNetOnce(self.model.create_init_net(name='init_net'))
if set_weight_as_global_constant:
# If weight params are global constants, they won't be in train_init_net
init_ops = self._test_net(train_init_net, init_ops_list[:2])
rand_w = workspace.FetchBlob(
self.model.global_constants['semi_random_features_fixed_rand_W']
)
rand_b = workspace.FetchBlob(
self.model.global_constants['semi_random_features_fixed_rand_b']
)
# Operation specifications
fc_random_spec = OpSpec("FC", [None, None, None], None)
fc_learned_spec = OpSpec("FC", [None, init_ops[0].output[0],
init_ops[1].output[0]], None)
else:
init_ops = self._test_net(train_init_net, init_ops_list)
rand_w = workspace.FetchBlob(self.model.layers[0].random_w)
rand_b = workspace.FetchBlob(self.model.layers[0].random_b)
# Operation specifications
fc_random_spec = OpSpec("FC", [None, init_ops[0].output[0],
init_ops[1].output[0]], None)
fc_learned_spec = OpSpec("FC", [None, init_ops[2].output[0],
init_ops[3].output[0]], None)
softsign_spec = OpSpec("Softsign", None, None)
relu_spec = OpSpec("Relu", None, None)
relu_output_spec = OpSpec("Relu", None, srf_output.random.field_blobs())
pow_spec = OpSpec("Pow", None, None, {'exponent': float(s - 1)})
mul_interim_spec = OpSpec("Mul", None, srf_output.random.field_blobs())
mul_spec = OpSpec("Mul", None, srf_output.full.field_blobs())
if s == 0:
ops_list = [
fc_learned_spec,
fc_random_spec,
softsign_spec,
relu_output_spec,
mul_spec,
]
elif s == 1:
ops_list = [
fc_learned_spec,
fc_random_spec,
relu_output_spec,
mul_spec,
]
else:
ops_list = [
fc_learned_spec,
fc_random_spec,
relu_spec,
pow_spec,
mul_interim_spec,
mul_spec,
]
# Train net assertions
self._test_net(train_net, ops_list)
_semi_random_hypothesis_test(srf_output.full(), X_full, X_random,
rand_w, rand_b, s)
# Eval net assertions
eval_net = self.get_eval_net()
self._test_net(eval_net, ops_list)
_semi_random_hypothesis_test(srf_output.full(), X_full, X_random,
rand_w, rand_b, s)
# Predict net assertions
predict_net = self.get_predict_net()
self._test_net(predict_net, ops_list)
_semi_random_hypothesis_test(srf_output.full(), X_full, X_random,
rand_w, rand_b, s)
def testConv(self):
batch_size = 50
H = 1
W = 10
C = 50
output_dims = 32
kernel_h = 1
kernel_w = 3
stride_h = 1
stride_w = 1
pad_t = 0
pad_b = 0
pad_r = None
pad_l = None
input_record = self.new_record(schema.Scalar((np.float32, (H, W, C))))
X = np.random.random((batch_size, H, W, C)).astype(np.float32)
schema.FeedRecord(input_record, [X])
conv = self.model.Conv(
input_record,
output_dims,
kernel_h=kernel_h,
kernel_w=kernel_w,
stride_h=stride_h,
stride_w=stride_w,
pad_t=pad_t,
pad_b=pad_b,
pad_r=pad_r,
pad_l=pad_l,
order='NHWC'
)
self.assertEqual(
schema.Scalar((np.float32, (output_dims,))),
conv
)
self.run_train_net_forward_only()
output_record = schema.FetchRecord(conv)
# check the number of output channels is the same as input in this example
assert output_record.field_types()[0].shape == (H, W, output_dims)
assert output_record().shape == (batch_size, H, W, output_dims)
train_init_net, train_net = self.get_training_nets()
# Init net assertions
init_ops = self.assertNetContainOps(
train_init_net,
[
OpSpec("XavierFill", None, None),
OpSpec("ConstantFill", None, None),
]
)
conv_spec = OpSpec(
"Conv",
[
input_record.field_blobs()[0],
init_ops[0].output[0],
init_ops[1].output[0],
],
conv.field_blobs()
)
# Train net assertions
self.assertNetContainOps(train_net, [conv_spec])
# Predict net assertions
predict_net = self.get_predict_net()
self.assertNetContainOps(predict_net, [conv_spec])
# Eval net assertions
eval_net = self.get_eval_net()
self.assertNetContainOps(eval_net, [conv_spec])
@given(
num=st.integers(min_value=10, max_value=100),
feed_weight=st.booleans(),
use_inv_var_parameterization=st.booleans(),
use_log_barrier=st.booleans(),
enable_diagnose=st.booleans(),
**hu.gcs
)
def testAdaptiveWeight(
self, num, feed_weight, use_inv_var_parameterization, use_log_barrier,
enable_diagnose, gc, dc
):
input_record = self.new_record(schema.RawTuple(num))
data = np.random.random(num)
schema.FeedRecord(
input_record, [np.array(x).astype(np.float32) for x in data]
)
weights = np.random.random(num) if feed_weight else None
result = self.model.AdaptiveWeight(
input_record,
weights=weights,
estimation_method=(
'inv_var' if use_inv_var_parameterization else 'log_std'
),
pos_optim_method=(
'log_barrier' if use_log_barrier else 'pos_grad_proj'
),
enable_diagnose=enable_diagnose
)
train_init_net, train_net = self.get_training_nets(True)
workspace.RunNetOnce(train_init_net)
workspace.RunNetOnce(train_net)
result = workspace.FetchBlob(result())
if not feed_weight:
weights = np.array([1. / num for _ in range(num)])
expected = np.sum(weights * data + 0.5 * np.log(1. / 2. / weights))
npt.assert_allclose(expected, result, atol=1e-4, rtol=1e-4)
if enable_diagnose:
assert len(self.model.ad_hoc_plot_blobs) == num
reconst_weights_from_ad_hoc = np.array(
[workspace.FetchBlob(b) for b in self.model.ad_hoc_plot_blobs]
).flatten()
npt.assert_allclose(
reconst_weights_from_ad_hoc, weights, atol=1e-4, rtol=1e-4
)
else:
assert len(self.model.ad_hoc_plot_blobs) == 0
@given(num=st.integers(min_value=10, max_value=100), **hu.gcs)
def testConstantWeight(self, num, gc, dc):
input_record = self.new_record(schema.RawTuple(num))
data = np.random.random(num)
schema.FeedRecord(
input_record, [np.array(x).astype(np.float32) for x in data]
)
weights = np.random.random(num)
result = self.model.ConstantWeight(input_record, weights=weights)
train_init_net, train_net = self.get_training_nets(True)
workspace.RunNetOnce(train_init_net)
workspace.RunNetOnce(train_net)
result = workspace.FetchBlob(result())
expected = np.sum(weights * data)
npt.assert_allclose(expected, result, atol=1e-4, rtol=1e-4)
@given(**hu.gcs)
def testHomotopyWeight(self, gc, dc):
input_record = self.new_record(schema.RawTuple(2))
data = np.random.random(2)
schema.FeedRecord(
input_record, [np.array(x).astype(np.float32) for x in data]
)
# ensure: quad_life > 2 * half_life
half_life = int(np.random.random() * 1e2 + 1)
quad_life = int(np.random.random() * 1e3 + 2 * half_life + 1)
min_weight = np.random.random()
max_weight = np.random.random() + min_weight + 1e-5
result = self.model.HomotopyWeight(
input_record,
min_weight=min_weight,
max_weight=max_weight,
half_life=half_life,
quad_life=quad_life,
)
train_init_net, train_net = self.get_training_nets(True)
workspace.RunNetOnce(train_init_net)
workspace.CreateNet(train_net)
workspace.RunNet(train_net.Name(), num_iter=half_life)
half_life_result = workspace.FetchBlob(result())
workspace.RunNet(train_net.Name(), num_iter=quad_life - half_life)
quad_life_result = workspace.FetchBlob(result())
alpha = (min_weight + max_weight) / 2.
beta = (min_weight + max_weight) / 2.
expected_half_life_result = alpha * data[0] + beta * data[1]
alpha = (3 * min_weight + max_weight) / 4.
beta = (min_weight + 3 * max_weight) / 4.
expected_quad_life_result = alpha * data[0] + beta * data[1]
npt.assert_allclose(
expected_half_life_result, half_life_result, atol=1e-2, rtol=1e-2
)
npt.assert_allclose(
expected_quad_life_result, quad_life_result, atol=1e-2, rtol=1e-2
)
def _testLabelSmooth(self, categories, binary_prob_label, bsz):
label = self.new_record(schema.Scalar((np.float32, (1, ))))
label_np = np.random.randint(categories, size=bsz).astype(np.float32)
schema.FeedRecord(label, [label_np])
smooth_matrix_shape = (
2 if binary_prob_label else (categories, categories)
)
smooth_matrix = np.random.random(smooth_matrix_shape)
smoothed_label = self.model.LabelSmooth(label, smooth_matrix)
train_init_net, train_net = self.get_training_nets(True)
workspace.RunNetOnce(train_init_net)
workspace.RunNetOnce(train_net)
smoothed_label_np = workspace.FetchBlob(smoothed_label())
if binary_prob_label:
expected = np.array(
[
smooth_matrix[0] if x == 0.0 else smooth_matrix[1]
for x in label_np
]
)
else:
expected = np.array([smooth_matrix[int(x)] for x in label_np])
npt.assert_allclose(expected, smoothed_label_np, atol=1e-4, rtol=1e-4)
@given(
categories=st.integers(min_value=2, max_value=10),
bsz=st.integers(min_value=10, max_value=100),
**hu.gcs
)
def testLabelSmoothForCategoricalLabel(self, categories, bsz, gc, dc):
self._testLabelSmooth(categories, False, bsz)
@given(
bsz=st.integers(min_value=10, max_value=100),
**hu.gcs
)
def testLabelSmoothForBinaryProbLabel(self, bsz, gc, dc):
self._testLabelSmooth(2, True, bsz)
@given(
num_inputs=st.integers(min_value=2, max_value=10),
batch_size=st.integers(min_value=2, max_value=10),
input_dim=st.integers(min_value=5, max_value=10),
seed=st.integers(1, 10),
)
def testBlobWeightedSum(self, num_inputs, batch_size, input_dim, seed):
def get_blob_weighted_sum():
weights = []
for i in range(num_inputs):
w_blob_name = 'blob_weighted_sum/w_{0}'.format(i)
assert workspace.HasBlob(w_blob_name), (
"cannot fine blob {}".format(w_blob_name)
)
w = workspace.FetchBlob(w_blob_name)
weights.append(w)
result = np.sum([
input_data[idx] * weights[idx] for idx in range(num_inputs)
], axis=0)
return result
np.random.seed(seed)
expected_output_schema = schema.Scalar((np.float32, (input_dim,)))
input_schema = schema.Tuple(
*[expected_output_schema for _ in range(num_inputs)]
)
input_data = [
np.random.random((batch_size, input_dim)).astype(np.float32)
for _ in range(num_inputs)
]
input_record = self.new_record(input_schema)
schema.FeedRecord(input_record, input_data)
# test output schema
ws_output = self.model.BlobWeightedSum(input_record)
self.assertEqual(len(self.model.layers), 1)
assert schema.equal_schemas(ws_output, expected_output_schema)
# test train net
train_init_net, train_net = self.get_training_nets()
workspace.RunNetOnce(train_init_net)
workspace.RunNetOnce(train_net)
output = workspace.FetchBlob(ws_output())
npt.assert_almost_equal(get_blob_weighted_sum(), output, decimal=5)
self.run_train_net_forward_only()
output = workspace.FetchBlob(ws_output())
npt.assert_almost_equal(get_blob_weighted_sum(), output, decimal=5)
# test eval net
eval_net = self.get_eval_net()
workspace.RunNetOnce(eval_net)
output = workspace.FetchBlob(ws_output())
npt.assert_almost_equal(get_blob_weighted_sum(), output, decimal=5)
# test pred net
pred_net = self.get_predict_net()
workspace.RunNetOnce(pred_net)
output = workspace.FetchBlob(ws_output())
npt.assert_almost_equal(get_blob_weighted_sum(), output, decimal=5)
|
# coding=utf-8
# Copyright 2020 The HuggingFace Team Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a clone of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import inspect
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ..test_modeling_common import floats_tensor, ids_tensor
if is_torch_available():
import torch
from transformers import (
AutoModelForSeq2SeqLM,
AutoTokenizer,
BartForConditionalGeneration,
BartTokenizer,
GPT2LMHeadModel,
GPT2Tokenizer,
ImageGPTForCausalImageModeling,
Speech2TextForConditionalGeneration,
SpeechEncoderDecoderModel,
VisionEncoderDecoderModel,
top_k_top_p_filtering,
)
from transformers.generation_beam_constraints import DisjunctiveConstraint, PhrasalConstraint
from transformers.generation_beam_search import BeamSearchScorer, ConstrainedBeamSearchScorer
from transformers.generation_logits_process import (
ForcedBOSTokenLogitsProcessor,
ForcedEOSTokenLogitsProcessor,
HammingDiversityLogitsProcessor,
InfNanRemoveLogitsProcessor,
LogitsProcessorList,
MinLengthLogitsProcessor,
NoBadWordsLogitsProcessor,
NoRepeatNGramLogitsProcessor,
RepetitionPenaltyLogitsProcessor,
TemperatureLogitsWarper,
TopKLogitsWarper,
TopPLogitsWarper,
)
from transformers.generation_stopping_criteria import MaxLengthCriteria, StoppingCriteria, StoppingCriteriaList
from transformers.generation_utils import (
BeamSampleDecoderOnlyOutput,
BeamSampleEncoderDecoderOutput,
BeamSearchDecoderOnlyOutput,
BeamSearchEncoderDecoderOutput,
GreedySearchDecoderOnlyOutput,
GreedySearchEncoderDecoderOutput,
SampleDecoderOnlyOutput,
SampleEncoderDecoderOutput,
)
class GenerationTesterMixin:
model_tester = None
all_generative_model_classes = ()
input_name = "input_ids"
def _get_input_ids_and_config(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
input_ids = inputs_dict[self.input_name]
attention_mask = torch.ones_like(input_ids, dtype=torch.long)
# cut to half length & take max batch_size 3
max_batch_size = 2
sequence_length = input_ids.shape[-1] // 2
input_ids = input_ids[:max_batch_size, :sequence_length]
attention_mask = attention_mask[:max_batch_size, :sequence_length]
# generate max 3 tokens
max_length = input_ids.shape[-1] + 3
if config.eos_token_id is not None and config.pad_token_id is None:
# hack to allow generate for models such as GPT2 as is done in `generate()`
config.pad_token_id = config.eos_token_id
return config, input_ids, attention_mask, max_length
@staticmethod
def _get_logits_processor_and_kwargs(
input_length,
eos_token_id,
forced_bos_token_id=None,
forced_eos_token_id=None,
max_length=None,
diversity_penalty=None,
):
process_kwargs = {
"min_length": input_length + 1,
"bad_words_ids": [[1, 0]],
"no_repeat_ngram_size": 2,
"repetition_penalty": 1.2,
}
logits_processor = LogitsProcessorList(
(
[
HammingDiversityLogitsProcessor(diversity_penalty, num_beams=2, num_beam_groups=2),
]
if diversity_penalty is not None
else []
)
+ (
[
MinLengthLogitsProcessor(process_kwargs["min_length"], eos_token_id),
]
if eos_token_id is not None
else []
)
+ (
[
ForcedBOSTokenLogitsProcessor(forced_bos_token_id),
]
if forced_bos_token_id is not None
else []
)
+ (
[ForcedEOSTokenLogitsProcessor(max_length, forced_eos_token_id)]
if forced_eos_token_id is not None
else []
)
+ [
NoBadWordsLogitsProcessor(process_kwargs["bad_words_ids"], eos_token_id),
NoRepeatNGramLogitsProcessor(process_kwargs["no_repeat_ngram_size"]),
RepetitionPenaltyLogitsProcessor(process_kwargs["repetition_penalty"]),
]
)
return process_kwargs, logits_processor
@staticmethod
def _get_warper_and_kwargs(num_beams):
warp_kwargs = {"top_k": 10, "top_p": 0.7, "temperature": 0.7}
logits_warper = LogitsProcessorList(
[
TemperatureLogitsWarper(warp_kwargs["temperature"]),
TopKLogitsWarper(top_k=warp_kwargs["top_k"], min_tokens_to_keep=(2 if num_beams > 1 else 1)),
TopPLogitsWarper(top_p=warp_kwargs["top_p"], min_tokens_to_keep=(2 if num_beams > 1 else 1)),
]
)
return warp_kwargs, logits_warper
@staticmethod
def _get_beam_scorer_and_kwargs(batch_size, max_length, num_return_sequences=1):
beam_kwargs = {
"early_stopping": False,
"length_penalty": 2.0,
"num_beams": 2,
"num_return_sequences": num_return_sequences,
}
beam_scorer = BeamSearchScorer(
batch_size=batch_size,
num_beams=beam_kwargs["num_beams"],
device=torch_device,
length_penalty=beam_kwargs["length_penalty"],
do_early_stopping=beam_kwargs["early_stopping"],
num_beam_hyps_to_keep=num_return_sequences,
)
return beam_kwargs, beam_scorer
@staticmethod
def _get_diverse_beam_scorer_and_kwargs(batch_size, max_length, num_return_sequences=1):
beam_kwargs = {
"early_stopping": False,
"length_penalty": 2.0,
"num_beams": 2,
"num_return_sequences": num_return_sequences,
"num_beam_groups": 2, # one beam per group
"diversity_penalty": 2.0,
}
beam_scorer = BeamSearchScorer(
batch_size=batch_size,
num_beams=beam_kwargs["num_beams"],
device=torch_device,
length_penalty=beam_kwargs["length_penalty"],
do_early_stopping=beam_kwargs["early_stopping"],
num_beam_hyps_to_keep=num_return_sequences,
num_beam_groups=beam_kwargs["num_beam_groups"],
)
return beam_kwargs, beam_scorer
@staticmethod
def _get_constrained_beam_scorer_and_kwargs(batch_size, max_length, constraints, num_return_sequences=1):
beam_kwargs = {
"early_stopping": False,
"length_penalty": 2.0,
"num_beams": num_return_sequences * 4,
"num_return_sequences": num_return_sequences,
}
beam_scorer = ConstrainedBeamSearchScorer(
batch_size=batch_size,
constraints=constraints,
num_beams=beam_kwargs["num_beams"],
device=torch_device,
length_penalty=beam_kwargs["length_penalty"],
do_early_stopping=beam_kwargs["early_stopping"],
num_beam_hyps_to_keep=num_return_sequences,
)
return beam_kwargs, beam_scorer
@staticmethod
def _get_encoder_outputs(
model, input_ids, attention_mask, output_attentions=None, output_hidden_states=None, num_interleave=1
):
encoder = model.get_encoder()
encoder_outputs = encoder(
input_ids,
attention_mask=attention_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
)
encoder_outputs["last_hidden_state"] = encoder_outputs.last_hidden_state.repeat_interleave(
num_interleave, dim=0
)
input_ids = torch.zeros_like(input_ids[:, :1]) + model._get_decoder_start_token_id()
attention_mask = None
return encoder_outputs, input_ids, attention_mask
def _greedy_generate(
self,
model,
input_ids,
attention_mask,
max_length,
output_scores=False,
output_attentions=False,
output_hidden_states=False,
return_dict_in_generate=False,
):
if model.config.is_encoder_decoder:
max_length = 4
logits_process_kwargs, logits_processor = self._get_logits_processor_and_kwargs(
input_ids.shape[-1],
eos_token_id=model.config.eos_token_id,
forced_bos_token_id=model.config.forced_bos_token_id,
forced_eos_token_id=model.config.forced_eos_token_id,
max_length=max_length,
)
kwargs = {}
output_generate = model.generate(
input_ids,
attention_mask=attention_mask,
do_sample=False,
num_beams=1,
max_length=max_length,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
output_scores=output_scores,
return_dict_in_generate=return_dict_in_generate,
remove_invalid_values=True,
**logits_process_kwargs,
)
if model.config.is_encoder_decoder:
encoder_outputs, input_ids, attention_mask = self._get_encoder_outputs(
model,
input_ids,
attention_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
)
kwargs["encoder_outputs"] = encoder_outputs
with torch.no_grad():
output_greedy = model.greedy_search(
input_ids,
max_length=max_length,
attention_mask=attention_mask,
logits_processor=logits_processor,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
output_scores=output_scores,
return_dict_in_generate=return_dict_in_generate,
**kwargs,
)
return output_greedy, output_generate
def _sample_generate(
self,
model,
input_ids,
attention_mask,
max_length,
num_return_sequences,
logits_processor,
logits_warper,
logits_warper_kwargs,
process_kwargs,
output_scores=False,
output_attentions=False,
output_hidden_states=False,
return_dict_in_generate=False,
):
torch.manual_seed(0)
output_generate = model.generate(
input_ids,
do_sample=True,
num_beams=1,
max_length=max_length,
num_return_sequences=num_return_sequences,
attention_mask=attention_mask,
output_scores=output_scores,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict_in_generate=return_dict_in_generate,
remove_invalid_values=True,
**logits_warper_kwargs,
**process_kwargs,
)
torch.manual_seed(0)
kwargs = {}
if model.config.is_encoder_decoder:
encoder_outputs, input_ids_clone, attention_mask_clone = self._get_encoder_outputs(
model,
input_ids,
attention_mask,
num_interleave=num_return_sequences,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
)
kwargs["encoder_outputs"] = encoder_outputs
input_ids_clone = input_ids_clone.repeat_interleave(num_return_sequences, dim=0)
else:
attention_mask_clone = attention_mask.repeat_interleave(num_return_sequences, dim=0)
input_ids_clone = input_ids.repeat_interleave(num_return_sequences, dim=0)
# prevent flaky generation test failures
logits_processor.append(InfNanRemoveLogitsProcessor())
with torch.no_grad():
output_sample = model.sample(
input_ids_clone,
attention_mask=attention_mask_clone,
max_length=max_length,
logits_processor=logits_processor,
logits_warper=logits_warper,
output_scores=output_scores,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict_in_generate=return_dict_in_generate,
**kwargs,
)
return output_sample, output_generate
def _beam_search_generate(
self,
model,
input_ids,
attention_mask,
max_length,
beam_scorer,
beam_kwargs,
logits_processor,
logits_process_kwargs,
output_scores=False,
output_attentions=False,
output_hidden_states=False,
return_dict_in_generate=False,
):
output_generate = model.generate(
input_ids,
attention_mask=attention_mask,
do_sample=False,
max_length=max_length,
output_scores=output_scores,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict_in_generate=return_dict_in_generate,
remove_invalid_values=True,
**beam_kwargs,
**logits_process_kwargs,
)
# beam_search does not automatically interleave `batch_size` dim for `num_beams`
kwargs = {}
if model.config.is_encoder_decoder:
encoder_outputs, input_ids_clone, attention_mask_clone = self._get_encoder_outputs(
model,
input_ids,
attention_mask,
num_interleave=beam_scorer.num_beams,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
)
kwargs["encoder_outputs"] = encoder_outputs
input_ids_clone = input_ids_clone.repeat_interleave(beam_scorer.num_beams, dim=0)
else:
attention_mask_clone = attention_mask.repeat_interleave(beam_scorer.num_beams, dim=0)
input_ids_clone = input_ids.repeat_interleave(beam_scorer.num_beams, dim=0)
with torch.no_grad():
output_beam_search = model.beam_search(
input_ids_clone,
beam_scorer,
max_length=max_length,
attention_mask=attention_mask_clone,
logits_processor=logits_processor,
output_scores=output_scores,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict_in_generate=return_dict_in_generate,
**kwargs,
)
return output_generate, output_beam_search
def _beam_sample_generate(
self,
model,
input_ids,
attention_mask,
max_length,
num_return_sequences,
beam_scorer,
beam_kwargs,
logits_warper,
logits_warper_kwargs,
output_scores=False,
output_attentions=False,
output_hidden_states=False,
return_dict_in_generate=False,
):
torch.manual_seed(0)
output_generate = model.generate(
input_ids,
attention_mask=attention_mask,
do_sample=True,
max_length=max_length,
output_scores=output_scores,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict_in_generate=return_dict_in_generate,
remove_invalid_values=True,
**beam_kwargs,
**logits_warper_kwargs,
)
# beam_search does not automatically interleave `batch_size` dim for `num_beams * num_return_sequences`
kwargs = {}
if model.config.is_encoder_decoder:
encoder_outputs, input_ids, attention_mask = self._get_encoder_outputs(
model,
input_ids,
attention_mask,
num_interleave=beam_scorer.num_beams * num_return_sequences,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
)
kwargs["encoder_outputs"] = encoder_outputs
else:
attention_mask = attention_mask.repeat_interleave(beam_scorer.num_beams * num_return_sequences, dim=0)
# prevent flaky generation test failures
logits_processor = LogitsProcessorList()
logits_processor.append(InfNanRemoveLogitsProcessor())
torch.manual_seed(0)
with torch.no_grad():
output_beam_sample = model.beam_sample(
input_ids.repeat_interleave(beam_scorer.num_beams * num_return_sequences, dim=0),
beam_scorer,
max_length=max_length,
attention_mask=attention_mask,
logits_warper=logits_warper,
logits_processor=logits_processor,
output_scores=output_scores,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict_in_generate=return_dict_in_generate,
**kwargs,
)
return output_generate, output_beam_sample
def _group_beam_search_generate(
self,
model,
input_ids,
attention_mask,
max_length,
beam_scorer,
beam_kwargs,
logits_processor,
logits_process_kwargs,
output_scores=False,
output_attentions=False,
output_hidden_states=False,
return_dict_in_generate=False,
):
output_generate = model.generate(
input_ids,
attention_mask=attention_mask,
do_sample=False,
max_length=max_length,
output_scores=output_scores,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict_in_generate=return_dict_in_generate,
remove_invalid_values=True,
**beam_kwargs,
**logits_process_kwargs,
)
# group_beam_search does not automatically interleave `batch_size` dim for `num_beams`
kwargs = {}
if model.config.is_encoder_decoder:
encoder_outputs, input_ids_clone, attention_mask_clone = self._get_encoder_outputs(
model,
input_ids,
attention_mask,
num_interleave=beam_scorer.num_beams,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
)
kwargs["encoder_outputs"] = encoder_outputs
input_ids_clone = input_ids_clone.repeat_interleave(beam_scorer.num_beams, dim=0)
else:
attention_mask_clone = attention_mask.repeat_interleave(beam_scorer.num_beams, dim=0)
input_ids_clone = input_ids.repeat_interleave(beam_scorer.num_beams, dim=0)
with torch.no_grad():
output_group_beam_search = model.group_beam_search(
input_ids_clone,
beam_scorer,
max_length=max_length,
attention_mask=attention_mask_clone,
logits_processor=logits_processor,
output_scores=output_scores,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict_in_generate=return_dict_in_generate,
**kwargs,
)
return output_generate, output_group_beam_search
def _constrained_beam_search_generate(
self,
model,
input_ids,
attention_mask,
max_length,
constrained_beam_scorer,
constraints,
beam_kwargs,
logits_processor,
logits_process_kwargs,
output_scores=False,
output_attentions=False,
output_hidden_states=False,
return_dict_in_generate=False,
):
output_generate = model.generate(
input_ids,
attention_mask=attention_mask,
do_sample=False,
max_length=max_length,
output_scores=output_scores,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict_in_generate=return_dict_in_generate,
remove_invalid_values=True,
constraints=constraints,
**beam_kwargs,
**logits_process_kwargs,
)
# group_beam_search does not automatically interleave `batch_size` dim for `num_beams`
kwargs = {}
if model.config.is_encoder_decoder:
encoder_outputs, input_ids_clone, attention_mask_clone = self._get_encoder_outputs(
model,
input_ids,
attention_mask,
num_interleave=constrained_beam_scorer.num_beams,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
)
kwargs["encoder_outputs"] = encoder_outputs
input_ids_clone = input_ids_clone.repeat_interleave(constrained_beam_scorer.num_beams, dim=0)
else:
attention_mask_clone = attention_mask.repeat_interleave(constrained_beam_scorer.num_beams, dim=0)
input_ids_clone = input_ids.repeat_interleave(constrained_beam_scorer.num_beams, dim=0)
with torch.no_grad():
output_group_beam_search = model.constrained_beam_search(
input_ids_clone,
constrained_beam_scorer,
max_length=max_length,
attention_mask=attention_mask_clone,
logits_processor=logits_processor,
output_scores=output_scores,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict_in_generate=return_dict_in_generate,
**kwargs,
)
return output_generate, output_group_beam_search
def test_greedy_generate(self):
# check `generate()` and `greedy_search()` are equal
for model_class in self.all_generative_model_classes:
config, input_ids, attention_mask, max_length = self._get_input_ids_and_config()
# test old generation output for backwards compatibility
model = model_class(config).to(torch_device).eval()
output_greedy, output_generate = self._greedy_generate(
model=model, input_ids=input_ids, attention_mask=attention_mask, max_length=max_length
)
self.assertListEqual(output_greedy.tolist(), output_generate.tolist())
def test_greedy_generate_dict_outputs(self):
for model_class in self.all_generative_model_classes:
# disable cache
config, input_ids, attention_mask, max_length = self._get_input_ids_and_config()
config.use_cache = False
model = model_class(config).to(torch_device).eval()
output_greedy, output_generate = self._greedy_generate(
model=model,
input_ids=input_ids,
attention_mask=attention_mask,
max_length=max_length,
output_scores=True,
output_hidden_states=True,
output_attentions=True,
return_dict_in_generate=True,
)
if model.config.is_encoder_decoder:
self.assertIsInstance(output_greedy, GreedySearchEncoderDecoderOutput)
self.assertIsInstance(output_generate, GreedySearchEncoderDecoderOutput)
else:
self.assertIsInstance(output_greedy, GreedySearchDecoderOnlyOutput)
self.assertIsInstance(output_generate, GreedySearchDecoderOnlyOutput)
self.assertListEqual(output_generate.sequences.tolist(), output_greedy.sequences.tolist())
for output in (output_greedy, output_generate):
self._check_outputs(output, input_ids, model.config)
def test_greedy_generate_dict_outputs_use_cache(self):
for model_class in self.all_generative_model_classes:
# enable cache
config, input_ids, attention_mask, max_length = self._get_input_ids_and_config()
if not hasattr(config, "use_cache"):
# only relevant if model has "use_cache"
return
config.use_cache = True
config.is_decoder = True
model = model_class(config).to(torch_device).eval()
output_greedy, output_generate = self._greedy_generate(
model=model,
input_ids=input_ids,
attention_mask=attention_mask,
max_length=max_length,
output_scores=True,
output_hidden_states=True,
output_attentions=True,
return_dict_in_generate=True,
)
self.assertListEqual(output_generate.sequences.tolist(), output_greedy.sequences.tolist())
for output in (output_greedy, output_generate):
self._check_outputs(output, input_ids, model.config, use_cache=True)
def test_sample_generate(self):
for model_class in self.all_generative_model_classes:
config, input_ids, attention_mask, max_length = self._get_input_ids_and_config()
model = model_class(config).to(torch_device).eval()
if model.config.is_encoder_decoder:
max_length = 4
process_kwargs, logits_processor = self._get_logits_processor_and_kwargs(
input_ids.shape[-1],
model.config.eos_token_id,
forced_bos_token_id=model.config.forced_bos_token_id,
forced_eos_token_id=model.config.forced_eos_token_id,
max_length=max_length,
)
logits_warper_kwargs, logits_warper = self._get_warper_and_kwargs(num_beams=1)
# check `generate()` and `sample()` are equal
output_sample, output_generate = self._sample_generate(
model=model,
input_ids=input_ids,
attention_mask=attention_mask,
max_length=max_length,
num_return_sequences=1,
logits_processor=logits_processor,
logits_warper=logits_warper,
logits_warper_kwargs=logits_warper_kwargs,
process_kwargs=process_kwargs,
)
self.assertListEqual(output_sample.tolist(), output_generate.tolist())
# check `generate()` and `sample()` yield equal results for `num_return_sequences`
output_sample, output_generate = self._sample_generate(
model=model,
input_ids=input_ids,
attention_mask=attention_mask,
max_length=max_length,
num_return_sequences=3,
logits_processor=logits_processor,
logits_warper=logits_warper,
logits_warper_kwargs=logits_warper_kwargs,
process_kwargs=process_kwargs,
)
self.assertListEqual(output_sample.tolist(), output_generate.tolist())
def test_sample_generate_dict_output(self):
for model_class in self.all_generative_model_classes:
# disable cache
config, input_ids, attention_mask, max_length = self._get_input_ids_and_config()
config.use_cache = False
model = model_class(config).to(torch_device).eval()
if model.config.is_encoder_decoder:
max_length = 4
process_kwargs, logits_processor = self._get_logits_processor_and_kwargs(
input_ids.shape[-1],
model.config.eos_token_id,
forced_bos_token_id=model.config.forced_bos_token_id,
forced_eos_token_id=model.config.forced_eos_token_id,
max_length=max_length,
)
logits_warper_kwargs, logits_warper = self._get_warper_and_kwargs(num_beams=1)
output_sample, output_generate = self._sample_generate(
model=model,
input_ids=input_ids,
attention_mask=attention_mask,
max_length=max_length,
num_return_sequences=2,
logits_processor=logits_processor,
logits_warper=logits_warper,
logits_warper_kwargs=logits_warper_kwargs,
process_kwargs=process_kwargs,
output_scores=True,
output_hidden_states=True,
output_attentions=True,
return_dict_in_generate=True,
)
if model.config.is_encoder_decoder:
self.assertIsInstance(output_sample, SampleEncoderDecoderOutput)
self.assertIsInstance(output_generate, SampleEncoderDecoderOutput)
else:
self.assertIsInstance(output_sample, SampleDecoderOnlyOutput)
self.assertIsInstance(output_generate, SampleDecoderOnlyOutput)
self.assertListEqual(output_generate.sequences.tolist(), output_sample.sequences.tolist())
for output in (output_sample, output_generate):
self._check_outputs(output, input_ids, model.config, num_return_sequences=2)
def test_beam_search_generate(self):
for model_class in self.all_generative_model_classes:
config, input_ids, attention_mask, max_length = self._get_input_ids_and_config()
# It is important set set the eos_token_id to None to ensure that no sequences
# shorter than `max_length` can be generated which could lead to flaky circle ci
# failures if the top `num_return_sequences` beams are all shorter than the longest beam
config.eos_token_id = None
config.forced_eos_token_id = None
model = model_class(config).to(torch_device).eval()
if model.config.is_encoder_decoder:
max_length = 4
logits_process_kwargs, logits_processor = self._get_logits_processor_and_kwargs(
input_ids.shape[-1],
config.eos_token_id,
config.forced_bos_token_id,
config.forced_eos_token_id,
max_length,
)
beam_kwargs, beam_scorer = self._get_beam_scorer_and_kwargs(input_ids.shape[0], max_length)
# check `generate()` and `beam_search()` are equal
output_generate, output_beam_search = self._beam_search_generate(
model=model,
input_ids=input_ids,
attention_mask=attention_mask,
max_length=max_length,
beam_scorer=beam_scorer,
beam_kwargs=beam_kwargs,
logits_process_kwargs=logits_process_kwargs,
logits_processor=logits_processor,
)
self.assertListEqual(output_generate.tolist(), output_beam_search.tolist())
# check `generate()` and `beam_search()` are equal for `num_return_sequences`
num_return_sequences = 2
if model.config.is_encoder_decoder:
max_length = 4
beam_kwargs, beam_scorer = self._get_beam_scorer_and_kwargs(
input_ids.shape[0], max_length, num_return_sequences=num_return_sequences
)
output_generate, output_beam_search = self._beam_search_generate(
model=model,
input_ids=input_ids,
attention_mask=attention_mask,
max_length=max_length,
beam_scorer=beam_scorer,
beam_kwargs=beam_kwargs,
logits_process_kwargs=logits_process_kwargs,
logits_processor=logits_processor,
)
self.assertListEqual(output_generate.tolist(), output_beam_search.tolist())
def test_beam_search_generate_dict_output(self):
for model_class in self.all_generative_model_classes:
config, input_ids, attention_mask, max_length = self._get_input_ids_and_config()
# disable cache
config.use_cache = False
# It is important set set the eos_token_id to None to ensure that no sequences
# shorter than `max_length` can be generated which could lead to flaky circle ci
# failures if the top `num_return_sequences` beams are all shorter than the longest beam
config.eos_token_id = None
config.forced_eos_token_id = None
model = model_class(config).to(torch_device).eval()
if model.config.is_encoder_decoder:
max_length = 4
logits_process_kwargs, logits_processor = self._get_logits_processor_and_kwargs(
input_ids.shape[-1],
config.eos_token_id,
config.forced_bos_token_id,
config.forced_eos_token_id,
max_length,
)
beam_kwargs, beam_scorer = self._get_beam_scorer_and_kwargs(input_ids.shape[0], max_length)
output_generate, output_beam_search = self._beam_search_generate(
model=model,
input_ids=input_ids,
attention_mask=attention_mask,
max_length=max_length,
beam_scorer=beam_scorer,
beam_kwargs=beam_kwargs,
logits_process_kwargs=logits_process_kwargs,
logits_processor=logits_processor,
output_scores=True,
output_hidden_states=True,
output_attentions=True,
return_dict_in_generate=True,
)
if model.config.is_encoder_decoder:
self.assertIsInstance(output_beam_search, BeamSearchEncoderDecoderOutput)
self.assertIsInstance(output_generate, BeamSearchEncoderDecoderOutput)
else:
self.assertIsInstance(output_beam_search, BeamSearchDecoderOnlyOutput)
self.assertIsInstance(output_generate, BeamSearchDecoderOnlyOutput)
self.assertListEqual(output_generate.sequences.tolist(), output_beam_search.sequences.tolist())
self.assertTrue(
torch.allclose(output_generate["sequences_scores"], output_beam_search["sequences_scores"], atol=1e-3)
)
self.assertTrue(output_generate["sequences_scores"].shape == (output_generate["sequences"].shape[0],))
self.assertTrue((output_generate["sequences_scores"] < 0).all().item())
for output in (output_beam_search, output_generate):
self._check_outputs(output, input_ids, model.config, num_return_sequences=beam_scorer.num_beams)
def test_beam_search_generate_dict_outputs_use_cache(self):
for model_class in self.all_generative_model_classes:
# enable cache
config, input_ids, attention_mask, max_length = self._get_input_ids_and_config()
# It is important set set the eos_token_id to None to ensure that no sequences
# shorter than `max_length` can be generated which could lead to flaky circle ci
# failures if the top `num_return_sequences` beams are all shorter than the longest beam
config.eos_token_id = None
config.forced_eos_token_id = None
if not hasattr(config, "use_cache"):
# only relevant if model has "use_cache"
return
model = model_class(config).to(torch_device).eval()
if model.config.is_encoder_decoder:
max_length = 4
logits_process_kwargs, logits_processor = self._get_logits_processor_and_kwargs(
input_ids.shape[-1],
config.eos_token_id,
config.forced_bos_token_id,
config.forced_eos_token_id,
max_length,
)
beam_kwargs, beam_scorer = self._get_beam_scorer_and_kwargs(input_ids.shape[0], max_length)
config.use_cache = True
config.is_decoder = True
model = model_class(config).to(torch_device).eval()
output_beam, output_generate = self._beam_search_generate(
model=model,
input_ids=input_ids,
attention_mask=attention_mask,
max_length=max_length,
beam_scorer=beam_scorer,
beam_kwargs=beam_kwargs,
logits_process_kwargs=logits_process_kwargs,
logits_processor=logits_processor,
output_scores=True,
output_hidden_states=True,
output_attentions=True,
return_dict_in_generate=True,
)
self.assertListEqual(output_generate.sequences.tolist(), output_beam.sequences.tolist())
for output in (output_beam, output_generate):
self._check_outputs(
output, input_ids, model.config, use_cache=True, num_return_sequences=beam_scorer.num_beams
)
def test_beam_sample_generate(self):
for model_class in self.all_generative_model_classes:
config, input_ids, attention_mask, max_length = self._get_input_ids_and_config()
# It is important set set the eos_token_id to None to ensure that no sequences
# shorter than `max_length` can be generated which could lead to flaky circle ci
# failures if the top `num_return_sequences` beams are all shorter than the longest beam
config.eos_token_id = None
config.forced_eos_token_id = None
logits_warper_kwargs, logits_warper = self._get_warper_and_kwargs(num_beams=1)
model = model_class(config).to(torch_device).eval()
# check `generate()` and `beam_search()` are equal
# change `num_return_sequences = 2` but not for `beam_scorer`
num_return_sequences = 2
if model.config.is_encoder_decoder:
max_length = 4
beam_kwargs, beam_scorer = self._get_beam_scorer_and_kwargs(
input_ids.shape[0] * num_return_sequences, max_length
)
beam_kwargs["num_return_sequences"] = num_return_sequences
output_generate, output_beam_sample = self._beam_sample_generate(
model=model,
input_ids=input_ids,
attention_mask=attention_mask,
max_length=max_length,
num_return_sequences=num_return_sequences,
beam_scorer=beam_scorer,
beam_kwargs=beam_kwargs,
logits_warper=logits_warper,
logits_warper_kwargs=logits_warper_kwargs,
)
self.assertListEqual(output_generate.tolist(), output_beam_sample.tolist())
def test_beam_sample_generate_dict_output(self):
for model_class in self.all_generative_model_classes:
config, input_ids, attention_mask, max_length = self._get_input_ids_and_config()
# disable cache
config.use_cache = False
# It is important set set the eos_token_id to None to ensure that no sequences
# shorter than `max_length` can be generated which could lead to flaky circle ci
# failures if the top `num_return_sequences` beams are all shorter than the longest beam
config.eos_token_id = None
config.forced_eos_token_id = None
model = model_class(config).to(torch_device).eval()
logits_warper_kwargs, logits_warper = self._get_warper_and_kwargs(num_beams=1)
num_return_sequences = 2
if model.config.is_encoder_decoder:
max_length = 4
beam_kwargs, beam_scorer = self._get_beam_scorer_and_kwargs(
input_ids.shape[0] * num_return_sequences, max_length
)
beam_kwargs["num_return_sequences"] = num_return_sequences
output_beam_sample, output_generate = self._beam_sample_generate(
model=model,
input_ids=input_ids,
attention_mask=attention_mask,
max_length=max_length,
num_return_sequences=num_return_sequences,
beam_scorer=beam_scorer,
beam_kwargs=beam_kwargs,
logits_warper=logits_warper,
logits_warper_kwargs=logits_warper_kwargs,
output_scores=True,
output_hidden_states=True,
output_attentions=True,
return_dict_in_generate=True,
)
if model.config.is_encoder_decoder:
self.assertIsInstance(output_beam_sample, BeamSampleEncoderDecoderOutput)
self.assertIsInstance(output_generate, BeamSampleEncoderDecoderOutput)
else:
self.assertIsInstance(output_beam_sample, BeamSampleDecoderOnlyOutput)
self.assertIsInstance(output_generate, BeamSampleDecoderOnlyOutput)
self.assertListEqual(output_generate.sequences.tolist(), output_beam_sample.sequences.tolist())
self.assertTrue(
torch.allclose(output_generate["sequences_scores"], output_beam_sample["sequences_scores"], atol=1e-3)
)
self.assertTrue(output_generate["sequences_scores"].shape == (output_generate["sequences"].shape[0],))
self.assertTrue((output_generate["sequences_scores"] < 0).all().item())
for output in (output_beam_sample, output_generate):
self._check_outputs(
output, input_ids, model.config, num_return_sequences=num_return_sequences * beam_scorer.num_beams
)
def test_generate_without_input_ids(self):
config, _, _, max_length = self._get_input_ids_and_config()
# if no bos token id => cannot generate from None
if config.bos_token_id is None:
return
for model_class in self.all_generative_model_classes:
model = model_class(config).to(torch_device)
model.eval()
output_ids_generate = model.generate(
do_sample=False,
max_length=max_length,
remove_invalid_values=True,
)
self.assertIsNotNone(output_ids_generate)
def test_group_beam_search_generate(self):
for model_class in self.all_generative_model_classes:
config, input_ids, attention_mask, max_length = self._get_input_ids_and_config()
# It is important set set the eos_token_id to None to ensure that no sequences
# shorter than `max_length` can be generated which could lead to flaky circle ci
# failures if the top `num_return_sequences` beams are all shorter than the longest beam
config.eos_token_id = None
config.forced_eos_token_id = None
model = model_class(config).to(torch_device).eval()
if model.config.is_encoder_decoder:
max_length = 4
logits_process_kwargs, logits_processor = self._get_logits_processor_and_kwargs(
input_ids.shape[-1],
config.eos_token_id,
config.forced_bos_token_id,
config.forced_eos_token_id,
max_length,
diversity_penalty=2.0,
)
# check `generate()` and `group_beam_search()` are equal
beam_kwargs, beam_scorer = self._get_diverse_beam_scorer_and_kwargs(input_ids.shape[0], max_length)
output_generate, output_group_beam_search = self._group_beam_search_generate(
model=model,
input_ids=input_ids,
attention_mask=attention_mask,
max_length=max_length,
beam_scorer=beam_scorer,
beam_kwargs=beam_kwargs,
logits_processor=logits_processor,
logits_process_kwargs=logits_process_kwargs,
)
self.assertListEqual(output_generate.tolist(), output_group_beam_search.tolist())
# check `generate()` and `group_beam_search()` are equal for `num_return_sequences`
num_return_sequences = 2
if model.config.is_encoder_decoder:
max_length = 4
beam_kwargs, beam_scorer = self._get_diverse_beam_scorer_and_kwargs(
input_ids.shape[0], max_length, num_return_sequences=num_return_sequences
)
output_generate, output_group_beam_search = self._group_beam_search_generate(
model=model,
input_ids=input_ids,
attention_mask=attention_mask,
max_length=max_length,
beam_scorer=beam_scorer,
beam_kwargs=beam_kwargs,
logits_processor=logits_processor,
logits_process_kwargs=logits_process_kwargs,
)
self.assertListEqual(output_generate.tolist(), output_group_beam_search.tolist())
def test_group_beam_search_generate_dict_output(self):
for model_class in self.all_generative_model_classes:
config, input_ids, attention_mask, max_length = self._get_input_ids_and_config()
config.use_cache = False
# It is important set set the eos_token_id to None to ensure that no sequences
# shorter than `max_length` can be generated which could lead to flaky circle ci
# failures if the top `num_return_sequences` beams are all shorter than the longest beam
config.eos_token_id = None
config.forced_eos_token_id = None
model = model_class(config).to(torch_device).eval()
if model.config.is_encoder_decoder:
max_length = 4
logits_process_kwargs, logits_processor = self._get_logits_processor_and_kwargs(
input_ids.shape[-1],
config.eos_token_id,
config.forced_bos_token_id,
config.forced_eos_token_id,
max_length,
diversity_penalty=2.0,
)
num_return_sequences = 1
beam_kwargs, beam_scorer = self._get_diverse_beam_scorer_and_kwargs(
input_ids.shape[0], max_length, num_return_sequences=num_return_sequences
)
output_generate, output_group_beam_search = self._group_beam_search_generate(
model=model,
input_ids=input_ids,
attention_mask=attention_mask,
max_length=max_length,
beam_scorer=beam_scorer,
beam_kwargs=beam_kwargs,
logits_processor=logits_processor,
logits_process_kwargs=logits_process_kwargs,
output_scores=True,
output_hidden_states=True,
output_attentions=True,
return_dict_in_generate=True,
)
if model.config.is_encoder_decoder:
self.assertIsInstance(output_group_beam_search, BeamSearchEncoderDecoderOutput)
self.assertIsInstance(output_generate, BeamSearchEncoderDecoderOutput)
else:
self.assertIsInstance(output_group_beam_search, BeamSearchDecoderOnlyOutput)
self.assertIsInstance(output_generate, BeamSearchDecoderOnlyOutput)
self.assertListEqual(output_generate.sequences.tolist(), output_group_beam_search.sequences.tolist())
self.assertTrue(
torch.allclose(
output_generate["sequences_scores"], output_group_beam_search["sequences_scores"], atol=1e-3
)
)
self.assertTrue(output_generate["sequences_scores"].shape == (output_generate["sequences"].shape[0],))
self.assertTrue((output_generate["sequences_scores"] < 0).all().item())
for output in (output_group_beam_search, output_generate):
self._check_outputs(
output, input_ids, model.config, num_return_sequences=num_return_sequences * beam_scorer.num_beams
)
def test_constrained_beam_search_generate(self):
for model_class in self.all_generative_model_classes:
config, input_ids, attention_mask, max_length = self._get_input_ids_and_config()
# It is important set set the eos_token_id to None to ensure that no sequences
# shorter than `max_length` can be generated which could lead to flaky circle ci
# failures if the top `num_return_sequences` beams are all shorter than the longest beam
config.eos_token_id = None
config.forced_eos_token_id = None
model = model_class(config).to(torch_device).eval()
max_length = 20
logits_process_kwargs, logits_processor = self._get_logits_processor_and_kwargs(
input_ids.shape[-1],
config.eos_token_id,
config.forced_bos_token_id,
config.forced_eos_token_id,
max_length,
)
# check `generate()` and `constrained_beam_search()` are equal
# Sample constraints
if not input_ids.dtype == torch.float32:
min_id = torch.min(input_ids) + 3
max_id = torch.max(input_ids)
else:
# otherwise this throws an error for Speech2TextModel since its inputs are floating points
min_id = 3
max_id = 100
force_tokens = torch.randint(min_id, max_id, (1, 2)).tolist()[0]
constraints = [
PhrasalConstraint(force_tokens),
]
beam_kwargs, beam_scorer = self._get_constrained_beam_scorer_and_kwargs(
input_ids.shape[0], max_length, constraints, num_return_sequences=1
)
output_generate, output_beam_search = self._constrained_beam_search_generate(
model=model,
input_ids=input_ids,
attention_mask=attention_mask,
max_length=max_length,
constrained_beam_scorer=beam_scorer,
constraints=constraints,
beam_kwargs=beam_kwargs,
logits_processor=logits_processor,
logits_process_kwargs=logits_process_kwargs,
)
self.assertListEqual(output_generate.tolist(), output_beam_search.tolist())
for generation_output in output_generate:
self._check_sequence_inside_sequence(force_tokens, generation_output)
# check `generate()` and `constrained_beam_search()` are equal for `num_return_sequences`
# Sample constraints
force_tokens = torch.randint(min_id, max_id, (1, 2)).tolist()[0]
constraints = [
PhrasalConstraint(force_tokens),
]
num_return_sequences = 2
max_length = 20
beam_kwargs, beam_scorer = self._get_constrained_beam_scorer_and_kwargs(
input_ids.shape[0], max_length, constraints, num_return_sequences=num_return_sequences
)
output_generate, output_beam_search = self._constrained_beam_search_generate(
model=model,
input_ids=input_ids,
attention_mask=attention_mask,
max_length=max_length,
constrained_beam_scorer=beam_scorer,
constraints=constraints,
beam_kwargs=beam_kwargs,
logits_processor=logits_processor,
logits_process_kwargs=logits_process_kwargs,
)
self.assertListEqual(output_generate.tolist(), output_beam_search.tolist())
for generation_output in output_generate:
self._check_sequence_inside_sequence(force_tokens, generation_output)
def test_constrained_beam_search_generate_dict_output(self):
for model_class in self.all_generative_model_classes:
config, input_ids, attention_mask, max_length = self._get_input_ids_and_config()
# disable cache
config.use_cache = False
# It is important set set the eos_token_id to None to ensure that no sequences
# shorter than `max_length` can be generated which could lead to flaky circle ci
# failures if the top `num_return_sequences` beams are all shorter than the longest beam
config.eos_token_id = None
config.forced_eos_token_id = None
model = model_class(config).to(torch_device).eval()
if model.config.is_encoder_decoder:
max_length = 20
logits_process_kwargs, logits_processor = self._get_logits_processor_and_kwargs(
input_ids.shape[-1],
config.eos_token_id,
config.forced_bos_token_id,
config.forced_eos_token_id,
max_length,
)
# Sample constraints
if not input_ids.dtype == torch.float32:
min_id = torch.min(input_ids) + 3
max_id = torch.max(input_ids)
else:
# otherwise this throws an error for Speech2TextModel since its inputs are floating points
min_id = 3
max_id = 100
force_tokens = torch.randint(min_id, max_id, (1, 2)).tolist()[0]
constraints = [
PhrasalConstraint(force_tokens),
]
beam_kwargs, beam_scorer = self._get_constrained_beam_scorer_and_kwargs(
input_ids.shape[0], max_length, constraints, num_return_sequences=1
)
output_generate, output_beam_search = self._constrained_beam_search_generate(
model=model,
input_ids=input_ids,
attention_mask=attention_mask,
max_length=max_length,
constrained_beam_scorer=beam_scorer,
constraints=constraints,
beam_kwargs=beam_kwargs,
logits_processor=logits_processor,
logits_process_kwargs=logits_process_kwargs,
output_scores=True,
output_hidden_states=True,
output_attentions=True,
return_dict_in_generate=True,
)
if model.config.is_encoder_decoder:
self.assertIsInstance(output_beam_search, BeamSearchEncoderDecoderOutput)
self.assertIsInstance(output_generate, BeamSearchEncoderDecoderOutput)
else:
self.assertIsInstance(output_beam_search, BeamSearchDecoderOnlyOutput)
self.assertIsInstance(output_generate, BeamSearchDecoderOnlyOutput)
self.assertListEqual(output_generate.sequences.tolist(), output_beam_search.sequences.tolist())
self.assertTrue(
torch.allclose(output_generate["sequences_scores"], output_beam_search["sequences_scores"], atol=1e-3)
)
self.assertTrue(output_generate["sequences_scores"].shape == (output_generate["sequences"].shape[0],))
self.assertTrue((output_generate["sequences_scores"] < 0).all().item())
for output in (output_beam_search, output_generate):
self._check_outputs(output, input_ids, model.config, num_return_sequences=beam_scorer.num_beams)
def test_generate_with_head_masking(self):
"""Test designed for encoder-decoder models to ensure the attention head masking is used."""
attention_names = ["encoder_attentions", "decoder_attentions", "cross_attentions"]
for model_class in self.all_generative_model_classes:
config, input_ids, attention_mask, max_length = self._get_input_ids_and_config()
model = model_class(config).to(torch_device)
# We want to test only encoder-decoder models
if not config.is_encoder_decoder:
continue
head_masking = {
"head_mask": torch.zeros(config.encoder_layers, config.encoder_attention_heads, device=torch_device),
"decoder_head_mask": torch.zeros(
config.decoder_layers, config.decoder_attention_heads, device=torch_device
),
"cross_attn_head_mask": torch.zeros(
config.decoder_layers, config.decoder_attention_heads, device=torch_device
),
}
signature = inspect.signature(model.forward)
# We want to test only models where encoder/decoder head masking is implemented
if not set(head_masking.keys()) < set([*signature.parameters.keys()]):
continue
for attn_name, (name, mask) in zip(attention_names, head_masking.items()):
out = model.generate(
input_ids,
attention_mask=attention_mask,
num_beams=1,
output_attentions=True,
return_dict_in_generate=True,
remove_invalid_values=True,
**{name: mask},
)
# We check the state of decoder_attentions and cross_attentions just from the last step
attn_weights = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1]
self.assertEqual(sum([w.sum().item() for w in attn_weights]), 0.0)
def _check_outputs(self, output, input_ids, config, use_cache=False, num_return_sequences=1):
batch_size, seq_length = input_ids.shape
num_sequences_in_output = batch_size * num_return_sequences
gen_len = (
output.sequences.shape[-1] - 1 if config.is_encoder_decoder else output.sequences.shape[-1] - seq_length
)
# scores
self._check_scores(num_sequences_in_output, output.scores, length=gen_len, config=config)
# Attentions
if config.is_encoder_decoder:
# encoder
self._check_encoder_attention_for_generate(output.encoder_attentions, batch_size, config, seq_length)
# decoder
self._check_attentions_for_generate(
num_sequences_in_output,
output.decoder_attentions,
min_length=1,
max_length=output.sequences.shape[-1],
config=config,
use_cache=use_cache,
)
else:
# if use_cache first input is equal to no use_cache, so skip here
attentions = output.attentions if not use_cache else output.attentions[1:]
min_length = seq_length if not use_cache else seq_length + 1
self._check_attentions_for_generate(
num_sequences_in_output,
attentions=attentions,
min_length=min_length,
max_length=output.sequences.shape[-1],
config=config,
use_cache=use_cache,
)
# Hidden States
if config.is_encoder_decoder:
# encoder
self._check_encoder_hidden_states_for_generate(
output.encoder_hidden_states, batch_size, config, seq_length
)
# decoder
self._check_hidden_states_for_generate(
num_sequences_in_output,
output.decoder_hidden_states,
min_length=1,
max_length=output.sequences.shape[-1],
config=config,
use_cache=use_cache,
)
else:
# if use_cache first input is equal to no use_cache, so skip here
hidden_states = output.hidden_states if not use_cache else output.hidden_states[1:]
min_length = seq_length if not use_cache else seq_length + 1
self._check_hidden_states_for_generate(
num_sequences_in_output,
hidden_states,
min_length=min_length,
max_length=output.sequences.shape[-1],
config=config,
use_cache=use_cache,
)
def _check_scores(self, batch_size, scores, length, config):
expected_shape = (batch_size, config.vocab_size)
self.assertIsInstance(scores, tuple)
self.assertEqual(len(scores), length)
self.assertListEqual([iter_scores.shape for iter_scores in scores], [expected_shape] * len(scores))
def _check_attentions_for_generate(
self, batch_size, attentions, min_length, max_length, config, use_cache=False, num_beam_groups=1
):
self.assertIsInstance(attentions, tuple)
self.assertListEqual(
[isinstance(iter_attentions, tuple) for iter_attentions in attentions], [True] * len(attentions)
)
self.assertEqual(len(attentions), (max_length - min_length) * num_beam_groups)
for idx, iter_attentions in enumerate(attentions):
tgt_len = min_length + idx if not use_cache else 1
src_len = min_length + idx
expected_shape = (
batch_size * num_beam_groups,
config.num_attention_heads,
tgt_len,
src_len,
)
# check attn size
self.assertListEqual(
[layer_attention.shape for layer_attention in iter_attentions], [expected_shape] * len(iter_attentions)
)
def _check_encoder_attention_for_generate(self, attentions, batch_size, config, seq_length):
encoder_expected_shape = (batch_size, config.num_attention_heads, seq_length, seq_length)
self.assertIsInstance(attentions, tuple)
self.assertListEqual(
[layer_attentions.shape for layer_attentions in attentions],
[encoder_expected_shape] * len(attentions),
)
def _check_hidden_states_for_generate(
self, batch_size, hidden_states, min_length, max_length, config, use_cache=False, num_beam_groups=1
):
self.assertIsInstance(hidden_states, tuple)
self.assertListEqual(
[isinstance(iter_hidden_states, tuple) for iter_hidden_states in hidden_states],
[True] * len(hidden_states),
)
self.assertEqual(len(hidden_states), (max_length - min_length) * num_beam_groups)
for idx, iter_hidden_states in enumerate(hidden_states):
seq_len = min_length + idx if not use_cache else 1
expected_shape = (batch_size * num_beam_groups, seq_len, config.hidden_size)
# check hidden size
self.assertListEqual(
[layer_hidden_states.shape for layer_hidden_states in iter_hidden_states],
[expected_shape] * len(iter_hidden_states),
)
def _check_encoder_hidden_states_for_generate(self, hidden_states, batch_size, config, seq_length):
encoder_expected_shape = (batch_size, seq_length, config.hidden_size)
self.assertIsInstance(hidden_states, tuple)
self.assertListEqual(
[layer_hidden_states.shape for layer_hidden_states in hidden_states],
[encoder_expected_shape] * len(hidden_states),
)
def _check_sequence_inside_sequence(self, tensor_1, tensor_2):
# check if tensor_1 inside tensor_2 or tensor_2 inside tensor_1.
# set to same device. we don't care what device.
if not isinstance(tensor_1, list):
tensor_1 = tensor_1.cpu().tolist()
if not isinstance(tensor_2, list):
tensor_2 = tensor_2.cpu().tolist()
in_order = len(tensor_1) <= len(tensor_2)
longer = tensor_2 if in_order else tensor_1
shorter = tensor_1 if in_order else tensor_2
flag = False
chunk_size = len(shorter)
for chunk_idx in range(len(longer) - chunk_size + 1):
subseq = longer[chunk_idx : chunk_idx + chunk_size]
if subseq == shorter:
flag = True
break
self.assertTrue(flag)
@require_torch
class UtilsFunctionsTest(unittest.TestCase):
# tests whether the top_k_top_p function behaves as expected
def test_top_k_top_p_filtering(self):
logits = torch.tensor(
[
[
8.2220991, # 3rd highest value; idx. 0
-0.5620044,
5.23229752,
4.0386393,
-6.8798378,
-0.54785802,
-3.2012153,
2.92777176,
1.88171953,
7.35341276,
8.43207833, # 2nd highest value; idx. 10
-9.85711836,
-5.96209236,
-1.13039161,
-7.1115294,
-0.8369633,
-5.3186408,
7.06427407,
0.81369344,
-0.82023817,
-5.9179796,
0.58813443,
-6.99778438,
4.71551189,
-0.18771637,
7.44020759, # 4th highest value; idx. 25
9.38450987, # 1st highest value; idx. 26
2.12662941,
-9.32562038,
2.35652522,
], # cummulative prob of 4 highest values <= 0.6
[
0.58425518,
4.53139238,
-5.57510464,
-6.28030699,
-7.19529503,
-4.02122551,
1.39337037,
-6.06707057,
1.59480517,
-9.643119,
0.03907799,
0.67231762,
-8.88206726,
6.27115922, # 4th highest value; idx. 13
2.28520723,
4.82767506,
4.30421368,
8.8275313, # 2nd highest value; idx. 17
5.44029958,
-4.4735794,
7.38579536, # 3rd highest value; idx. 20
-2.91051663,
2.61946077,
-2.5674762,
-9.48959302,
-4.02922645,
-1.35416918,
9.67702323, # 1st highest value; idx. 27
-5.89478553,
1.85370467,
], # cummulative prob of 4 highest values <= 0.6
],
dtype=torch.float,
device=torch_device,
)
non_inf_expected_idx = torch.tensor(
[[0, 0], [0, 10], [0, 25], [0, 26], [1, 13], [1, 17], [1, 20], [1, 27]],
dtype=torch.long,
device=torch_device,
) # expected non filtered idx as noted above
non_inf_expected_output = torch.tensor(
[
8.2221,
8.4321,
7.4402,
9.3845,
6.2712,
8.8275,
7.3858,
9.6770,
], # expected non filtered values as noted above
dtype=torch.float,
device=torch_device,
)
output = top_k_top_p_filtering(logits, top_k=10, top_p=0.6, min_tokens_to_keep=4)
non_inf_output = output[output != -float("inf")].to(device=torch_device)
non_inf_idx = (output != -float("inf")).nonzero().to(device=torch_device)
self.assertTrue(torch.allclose(non_inf_expected_output, non_inf_output, atol=1e-12))
self.assertTrue(torch.all(torch.eq(non_inf_expected_idx, non_inf_idx)))
@require_torch
class GenerationIntegrationTests(unittest.TestCase):
@slow
def test_diverse_beam_search(self):
article = """Justin Timberlake and Jessica Biel, welcome to parenthood.
The celebrity couple announced the arrival of their son, Silas Randall Timberlake, in statements to People.
"Silas was the middle name of Timberlake's maternal grandfather Bill Bomar, who died in 2012, while Randall is the musician's own middle name, as well as his father's first," People reports.
The couple announced the pregnancy in January, with an Instagram post. It is the first baby for both."""
bart_tokenizer = BartTokenizer.from_pretrained("facebook/bart-large-cnn")
bart_model = BartForConditionalGeneration.from_pretrained("facebook/bart-large-cnn").to(torch_device)
input_ids = bart_tokenizer(article, return_tensors="pt").input_ids.to(torch_device)
outputs = bart_model.generate(
input_ids,
num_beams=4,
num_return_sequences=2,
num_beam_groups=4,
diversity_penalty=2.0,
remove_invalid_values=True,
)
generated_text = bart_tokenizer.batch_decode(outputs, skip_special_tokens=True)
self.assertListEqual(
generated_text,
[
"The couple announced the birth of their son, Silas Randall Timberlake, in a statement. Silas was the middle name of Timberlake's maternal grandfather Bill Bomar. Randall is the musician's own middle name, as well as his father's first. It is the first baby for both of them.",
"Justin Timberlake and Jessica Biel have a son. The baby is named Silas Randall Timberlake. It is the first child for both. The couple announced the pregnancy in January. The name Silas is the middle name of Timberlake's maternal grandfather. It's also his own middle name.",
],
)
def test_max_length_backward_compat_greedy(self):
article = """Justin Timberlake and Jessica Biel, welcome to parenthood."""
bart_tokenizer = BartTokenizer.from_pretrained("hf-internal-testing/tiny-random-bart")
bart_model = BartForConditionalGeneration.from_pretrained("hf-internal-testing/tiny-random-bart").to(
torch_device
)
input_ids = bart_tokenizer(article, return_tensors="pt").input_ids.to(torch_device)
max_length = 20
input_ids = input_ids.expand(2, -1)
model_kwargs = bart_model._prepare_encoder_decoder_kwargs_for_generation(input_ids, {})
input_ids = bart_model._prepare_decoder_input_ids_for_generation(
input_ids.shape[0],
decoder_start_token_id=bart_model.config.decoder_start_token_id,
bos_token_id=bart_model.config.bos_token_id,
)
with self.assertWarns(UserWarning):
bart_model.greedy_search(
input_ids,
max_length=max_length,
pad_token_id=bart_model.config.pad_token_id,
eos_token_id=bart_model.config.eos_token_id,
**model_kwargs,
)
def test_max_length_backward_compat_sample(self):
article = """Justin Timberlake and Jessica Biel, welcome to parenthood."""
bart_tokenizer = BartTokenizer.from_pretrained("hf-internal-testing/tiny-random-bart")
bart_model = BartForConditionalGeneration.from_pretrained("hf-internal-testing/tiny-random-bart").to(
torch_device
)
input_ids = bart_tokenizer(article, return_tensors="pt").input_ids.to(torch_device)
max_length = 20
input_ids = input_ids.expand(2, -1)
model_kwargs = bart_model._prepare_encoder_decoder_kwargs_for_generation(input_ids, {})
input_ids = bart_model._prepare_decoder_input_ids_for_generation(
input_ids.shape[0],
decoder_start_token_id=bart_model.config.decoder_start_token_id,
bos_token_id=bart_model.config.bos_token_id,
)
with torch.no_grad():
with self.assertWarns(UserWarning):
bart_model.sample(
input_ids,
max_length=max_length,
pad_token_id=bart_model.config.pad_token_id,
eos_token_id=bart_model.config.eos_token_id,
**model_kwargs,
)
def test_max_length_backward_compat_beam_search(self):
article = """Justin Timberlake and Jessica Biel, welcome to parenthood."""
bart_tokenizer = BartTokenizer.from_pretrained("hf-internal-testing/tiny-random-bart")
bart_model = BartForConditionalGeneration.from_pretrained("hf-internal-testing/tiny-random-bart").to(
torch_device
)
input_ids = bart_tokenizer(article, return_tensors="pt").input_ids.to(torch_device)
batch_size = 1
max_length = 20
num_beams = 2
input_ids = input_ids.expand(2, -1)
model_kwargs = bart_model._prepare_encoder_decoder_kwargs_for_generation(input_ids, {})
input_ids = bart_model._prepare_decoder_input_ids_for_generation(
input_ids.shape[0],
decoder_start_token_id=bart_model.config.decoder_start_token_id,
bos_token_id=bart_model.config.bos_token_id,
)
beam_scorer = BeamSearchScorer(
batch_size=batch_size,
num_beams=num_beams,
device=torch_device,
)
with self.assertWarns(UserWarning):
_ = bart_model.beam_search(
input_ids, num_beams=num_beams, max_length=max_length, beam_scorer=beam_scorer, **model_kwargs
)
def test_max_length_backward_compat_group_beam_search(self):
article = """Justin Timberlake and Jessica Biel, welcome to parenthood."""
bart_tokenizer = BartTokenizer.from_pretrained("hf-internal-testing/tiny-random-bart")
bart_model = BartForConditionalGeneration.from_pretrained("hf-internal-testing/tiny-random-bart").to(
torch_device
)
input_ids = bart_tokenizer(article, return_tensors="pt").input_ids.to(torch_device)
batch_size = 1
max_length = 20
num_beams = 6
num_beam_groups = 3
num_return_sequences = num_beams * batch_size
input_ids = input_ids.expand(6, -1)
model_kwargs = bart_model._prepare_encoder_decoder_kwargs_for_generation(input_ids, {})
input_ids = bart_model._prepare_decoder_input_ids_for_generation(
input_ids.shape[0],
decoder_start_token_id=bart_model.config.decoder_start_token_id,
bos_token_id=bart_model.config.bos_token_id,
)
diverse_beam_scorer = BeamSearchScorer(
batch_size=batch_size,
num_beams=num_beams,
device=torch_device,
num_beam_hyps_to_keep=num_return_sequences,
num_beam_groups=num_beam_groups,
)
with self.assertWarns(UserWarning):
bart_model.group_beam_search(
input_ids, diverse_beam_scorer, num_beams=num_beams, max_length=max_length, **model_kwargs
)
def test_max_length_warning_if_different(self):
article = """Justin Timberlake and Jessica Biel, welcome to parenthood."""
bart_tokenizer = BartTokenizer.from_pretrained("hf-internal-testing/tiny-random-bart")
bart_model = BartForConditionalGeneration.from_pretrained("hf-internal-testing/tiny-random-bart").to(
torch_device
)
input_ids = bart_tokenizer(article, return_tensors="pt").input_ids.to(torch_device)
batch_size = 1
max_length = 20
num_beams = 6
num_beam_groups = 3
num_return_sequences = num_beams * batch_size
stopping_criteria_max_length = 18
stopping_criteria = StoppingCriteriaList([MaxLengthCriteria(max_length=stopping_criteria_max_length)])
# Greedy
input_ids = input_ids.expand(6, -1)
model_kwargs = bart_model._prepare_encoder_decoder_kwargs_for_generation(input_ids, {})
input_ids = bart_model._prepare_decoder_input_ids_for_generation(
input_ids.shape[0],
decoder_start_token_id=bart_model.config.decoder_start_token_id,
bos_token_id=bart_model.config.bos_token_id,
)
with self.assertWarns(UserWarning):
bart_model.greedy_search(
input_ids,
max_length=max_length,
pad_token_id=bart_model.config.pad_token_id,
stopping_criteria=stopping_criteria,
eos_token_id=bart_model.config.eos_token_id,
**model_kwargs,
)
# Sample
with self.assertWarns(UserWarning):
with torch.no_grad():
bart_model.sample(
input_ids,
max_length=max_length,
stopping_criteria=stopping_criteria,
pad_token_id=bart_model.config.pad_token_id,
eos_token_id=bart_model.config.eos_token_id,
**model_kwargs,
)
# Beam
beam_scorer = BeamSearchScorer(
batch_size=batch_size,
num_beams=num_beams,
device=torch_device,
)
with self.assertWarns(UserWarning):
with torch.no_grad():
bart_model.beam_search(
input_ids,
num_beams=num_beams,
stopping_criteria=stopping_criteria,
max_length=max_length,
beam_scorer=beam_scorer,
**model_kwargs,
)
# Grouped beam search
diverse_beam_scorer = BeamSearchScorer(
batch_size=batch_size,
num_beams=num_beams,
device=torch_device,
num_beam_hyps_to_keep=num_return_sequences,
num_beam_groups=num_beam_groups,
)
with self.assertWarns(UserWarning):
bart_model.group_beam_search(
input_ids,
diverse_beam_scorer,
stopping_criteria=stopping_criteria,
num_beams=num_beams,
max_length=max_length,
**model_kwargs,
)
def test_beam_search_warning_if_max_length_is_passed(self):
article = """Justin Timberlake and Jessica Biel, welcome to parenthood."""
bart_tokenizer = BartTokenizer.from_pretrained("hf-internal-testing/tiny-random-bart")
bart_model = BartForConditionalGeneration.from_pretrained("hf-internal-testing/tiny-random-bart").to(
torch_device
)
batch_size = 1
num_beams = 3
input_ids = bart_tokenizer(article, return_tensors="pt").input_ids.to(torch_device)
input_ids = input_ids.expand(num_beams, -1)
model_kwargs = bart_model._prepare_encoder_decoder_kwargs_for_generation(input_ids, {})
# pretend decoder_input_ids correspond to first encoder input id
decoder_input_ids = input_ids[:, :1]
stopping_criteria_max_length = 18
stopping_criteria = StoppingCriteriaList([MaxLengthCriteria(max_length=stopping_criteria_max_length)])
with self.assertWarns(UserWarning):
beam_scorer = BeamSearchScorer(
batch_size=batch_size,
num_beams=num_beams,
device=torch_device,
max_length=10,
)
generated_ids = bart_model.beam_search(
decoder_input_ids,
num_beams=num_beams,
stopping_criteria=stopping_criteria,
beam_scorer=beam_scorer,
**model_kwargs,
)
beam_scorer_no_max_len = BeamSearchScorer(
batch_size=batch_size,
num_beams=num_beams,
device=torch_device,
)
generated_ids_no_max_len = bart_model.beam_search(
decoder_input_ids,
num_beams=num_beams,
stopping_criteria=stopping_criteria,
beam_scorer=beam_scorer_no_max_len,
**model_kwargs,
)
# BeamSearchScorer max_length should not influence "real" max_length
self.assertEqual(generated_ids.tolist(), generated_ids_no_max_len.tolist())
def test_custom_stopping_criteria_overload_error(self):
article = """Justin Timberlake and Jessica Biel, welcome to parenthood."""
bart_tokenizer = BartTokenizer.from_pretrained("sshleifer/bart-tiny-random")
bart_model = BartForConditionalGeneration.from_pretrained("sshleifer/bart-tiny-random").to(torch_device)
input_ids = bart_tokenizer(article, return_tensors="pt").input_ids.to(torch_device)
stopping_criteria = StoppingCriteriaList()
stopping_criteria.append(MaxLengthCriteria(max_length=42))
with self.assertRaises(ValueError):
bart_model.generate(input_ids, stopping_criteria=stopping_criteria)
with self.assertRaises(ValueError):
bart_model.generate(input_ids, stopping_criteria=stopping_criteria, max_length=32)
def test_custom_stopping_criteria(self):
article = """Justin Timberlake and Jessica Biel, welcome to parenthood."""
bart_tokenizer = BartTokenizer.from_pretrained("sshleifer/bart-tiny-random")
bart_model = BartForConditionalGeneration.from_pretrained("sshleifer/bart-tiny-random").to(torch_device)
input_ids = bart_tokenizer(article, return_tensors="pt").input_ids.to(torch_device)
class DummyCriteria(StoppingCriteria):
def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor, **kwargs) -> bool:
return input_ids.shape[-1] >= 20
stopping_criteria = StoppingCriteriaList()
stopping_criteria.append(DummyCriteria())
self.assertEqual(
list(bart_model.generate(input_ids, stopping_criteria=stopping_criteria, max_length=22).shape),
[1, 20],
)
self.assertEqual(
list(bart_model.generate(input_ids, stopping_criteria=stopping_criteria, max_length=18).shape),
[1, 18],
)
def test_custom_logits_processor(self):
bart_tokenizer = BartTokenizer.from_pretrained("sshleifer/bart-tiny-random")
article = """Justin Timberlake and Jessica Biel, welcome to parenthood."""
bart_model = BartForConditionalGeneration.from_pretrained("sshleifer/bart-tiny-random", min_length=1).to(
torch_device
)
input_ids = bart_tokenizer(article, return_tensors="pt").input_ids.to(torch_device)
logits_processor = LogitsProcessorList()
logits_processor.append(MinLengthLogitsProcessor(min_length=10, eos_token_id=0))
# it should not be allowed to both define `min_length` via config and `logits_processor` list
with self.assertRaises(ValueError):
bart_model.generate(input_ids, logits_processor=logits_processor)
bart_model.config.min_length = None
bart_model.generate(input_ids, logits_processor=logits_processor)
def test_max_new_tokens_encoder_decoder(self):
article = """Justin Timberlake and Jessica Biel, welcome to parenthood."""
bart_tokenizer = BartTokenizer.from_pretrained("hf-internal-testing/tiny-random-bart")
bart_model = BartForConditionalGeneration.from_pretrained("hf-internal-testing/tiny-random-bart").to(
torch_device
)
input_ids = bart_tokenizer(article, return_tensors="pt").input_ids.to(torch_device)
self.assertEqual(list(input_ids.shape), [1, 29])
max_new_tokens = 3
bart_model.config.max_length = 20
bart_model.config.eos_token_id = None
# Encoder decoder call
outputs = bart_model.generate(input_ids, max_new_tokens=max_new_tokens)
# 1 BOS + 3 new tokens
self.assertEqual(list(outputs.shape), [1, 4])
# Decoder only call
outputs = bart_model.generate(decoder_input_ids=input_ids, max_new_tokens=max_new_tokens)
# 29 + 3 new tokens
self.assertEqual(list(outputs.shape), [1, 32])
# Encoder decoder call > 20
outputs = bart_model.generate(max_new_tokens=max_new_tokens + 20)
# 1 BOS + 20 + 3 new tokens
self.assertEqual(list(outputs.shape), [1, 24])
# max_new_tokens and max_length serve the same purpose and should not be used together.
with self.assertWarns(UserWarning):
bart_model.generate(decoder_input_ids=input_ids, max_new_tokens=10, max_length=20)
def test_max_new_tokens_decoder_only(self):
article = """Justin Timberlake."""
gpt2_tokenizer = GPT2Tokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2")
gpt2_model = GPT2LMHeadModel.from_pretrained("hf-internal-testing/tiny-random-gpt2").to(torch_device)
input_ids = gpt2_tokenizer(article, return_tensors="pt").input_ids.to(torch_device)
self.assertEqual(list(input_ids.shape), [1, 9])
max_new_tokens = 3
gpt2_model.config.max_length = 20
# call < 20
outputs = gpt2_model.generate(input_ids, max_new_tokens=max_new_tokens)
# 9 input_ids + 3 new tokens
self.assertEqual(list(outputs.shape), [1, 12])
# call > 20
outputs = gpt2_model.generate(max_new_tokens=max_new_tokens + 20)
# 1 BOS token + 23 new tokens
self.assertEqual(list(outputs.shape), [1, 24])
# max_new_tokens and max_length serve the same purpose and should not be used together.
with self.assertWarns(UserWarning):
gpt2_model.generate(decoder_input_ids=input_ids, max_new_tokens=10, max_length=20)
def test_encoder_decoder_generate_with_inputs_embeds(self):
article = """Justin Timberlake and Jessica Biel, welcome to parenthood."""
tokenizer = BartTokenizer.from_pretrained("hf-internal-testing/tiny-random-bart")
model = BartForConditionalGeneration.from_pretrained("hf-internal-testing/tiny-random-bart", max_length=5).to(
torch_device
)
model.config.eos_token_id = None
input_ids = tokenizer(article, return_tensors="pt").input_ids.to(torch_device)
inputs_embeds = model.get_input_embeddings()(input_ids)
output_sequences = model.generate(inputs_embeds=inputs_embeds)
# make sure model generated correctly until `max_length`
self.assertEqual(output_sequences.shape, (1, 5))
def test_encoder_decoder_generate_attention_mask(self):
articles = ["Timberlake", "Jessica Biel, welcome to parenthood among other things"]
tokenizer = BartTokenizer.from_pretrained("hf-internal-testing/tiny-random-bart")
# need extrem generation values here to force this test
# to fail when `attention_mask` is not correctly treated in generate
model = BartForConditionalGeneration.from_pretrained(
"hf-internal-testing/tiny-random-bart", max_length=50, num_beams=5, num_return_sequences=5
).to(torch_device)
model.config.eos_token_id = None
input_ids = tokenizer(articles[0], return_tensors="pt").input_ids.to(torch_device)
input_ids_batched = tokenizer(articles, padding=True, return_tensors="pt").input_ids.to(torch_device)
output_sequences_batched = model.generate(
input_ids=input_ids_batched, return_dict_in_generate=True, output_scores=True
)
output_sequences = model.generate(input_ids=input_ids, return_dict_in_generate=True, output_scores=True)
batched_out = output_sequences_batched.sequences_scores
out = output_sequences.sequences_scores
diff = (batched_out[:5].sum() - out.sum()).abs()
self.assertTrue(diff < 1e-4)
def test_decoder_generate_with_inputs_embeds(self):
article = """I need input_ids to generate"""
tokenizer = GPT2Tokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2")
model = GPT2LMHeadModel.from_pretrained("hf-internal-testing/tiny-random-gpt2", max_length=5).to(torch_device)
input_ids = tokenizer(article, return_tensors="pt").input_ids.to(torch_device)
inputs_embeds = model.get_input_embeddings()(input_ids)
# cannot generate from `inputs_embeds` for decoder only
with self.assertRaises(ValueError):
model.generate(inputs_embeds=inputs_embeds)
def test_generate_input_ids_as_kwarg(self):
article = """I need input_ids to generate"""
tokenizer = GPT2Tokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2")
model = GPT2LMHeadModel.from_pretrained("hf-internal-testing/tiny-random-gpt2", max_length=15).to(torch_device)
input_ids = tokenizer(article, return_tensors="pt").input_ids.to(torch_device)
output_sequences_kwargs = model.generate(input_ids=input_ids).cpu()
output_sequences = model.generate(input_ids).cpu()
self.assertListEqual(output_sequences.tolist(), output_sequences_kwargs.tolist())
self.assertEqual(output_sequences.shape, (1, 15))
def test_generate_non_nlp_input_ids_as_kwarg(self):
model = ImageGPTForCausalImageModeling.from_pretrained(
"hf-internal-testing/tiny-random-imagegpt", max_length=10
).to(torch_device)
input_ids = ids_tensor((3, 5), vocab_size=10)
output_sequences_kwargs = model.generate(input_ids=input_ids).cpu()
output_sequences = model.generate(input_ids).cpu()
self.assertListEqual(output_sequences.tolist(), output_sequences_kwargs.tolist())
self.assertEqual(output_sequences.shape, (3, 10))
def test_generate_input_ids_as_encoder_kwarg(self):
article = """Justin Timberlake and Jessica Biel, welcome to parenthood."""
tokenizer = BartTokenizer.from_pretrained("hf-internal-testing/tiny-random-bart")
model = BartForConditionalGeneration.from_pretrained("hf-internal-testing/tiny-random-bart", max_length=5).to(
torch_device
)
model.config.eos_token_id = None
input_ids = tokenizer(article, return_tensors="pt").input_ids.to(torch_device)
output_sequences_kwargs = model.generate(input_ids=input_ids).cpu()
output_sequences = model.generate(input_ids).cpu()
self.assertListEqual(output_sequences.tolist(), output_sequences_kwargs.tolist())
self.assertEqual(output_sequences.shape, (1, 5))
def test_generate_inputs_and_encoder_kwargs(self):
article = """I need input_ids to generate"""
tokenizer = GPT2Tokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2")
model = GPT2LMHeadModel.from_pretrained("hf-internal-testing/tiny-random-gpt2", max_length=10).to(torch_device)
input_ids = tokenizer(article, return_tensors="pt").input_ids.to(torch_device)
with self.assertRaises(ValueError):
model.generate(input_ids, input_ids=input_ids)
def test_generate_too_many_encoder_kwargs(self):
article = """I need input_ids to generate"""
tokenizer = GPT2Tokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2")
model = GPT2LMHeadModel.from_pretrained("hf-internal-testing/tiny-random-gpt2", max_length=10).to(torch_device)
input_ids = tokenizer(article, return_tensors="pt").input_ids.to(torch_device)
with self.assertRaises(ValueError):
model.generate(input_ids=input_ids, inputs_embeds=input_ids)
def test_generate_input_values_as_encoder_kwarg(self):
input_values = floats_tensor((2, 250))
model = SpeechEncoderDecoderModel.from_pretrained("hf-internal-testing/tiny-random-speech-encoder-decoder")
model = model.to(torch_device)
output_sequences_kwargs = model.generate(input_values=input_values, max_length=5).cpu()
output_sequences = model.generate(input_values, max_length=5).cpu()
self.assertListEqual(output_sequences.tolist(), output_sequences_kwargs.tolist())
self.assertEqual(output_sequences.shape, (2, 5))
def test_generate_input_features_as_encoder_kwarg(self):
input_features = floats_tensor((3, 20, 24))
model = Speech2TextForConditionalGeneration.from_pretrained("hf-internal-testing/tiny-random-speech_to_text")
model = model.to(torch_device)
output_sequences_kwargs = model.generate(input_features=input_features, max_length=5).cpu()
output_sequences = model.generate(input_features, max_length=5).cpu()
self.assertListEqual(output_sequences.tolist(), output_sequences_kwargs.tolist())
self.assertEqual(output_sequences.shape, (3, 5))
def test_generate_pixel_values_as_encoder_kwarg(self):
pixel_values = floats_tensor((2, 3, 30, 30))
model = VisionEncoderDecoderModel.from_pretrained("hf-internal-testing/tiny-random-vision-encoder-decoder")
model = model.to(torch_device)
output_sequences_kwargs = model.generate(pixel_values=pixel_values, max_length=5).cpu()
output_sequences = model.generate(pixel_values, max_length=5).cpu()
self.assertListEqual(output_sequences.tolist(), output_sequences_kwargs.tolist())
self.assertEqual(output_sequences.shape, (2, 5))
def test_generate_encoder_outputs_attention_mask(self):
input_values = floats_tensor((2, 250)).to(torch_device)
attention_mask = torch.ones_like(input_values)
model = SpeechEncoderDecoderModel.from_pretrained("hf-internal-testing/tiny-random-speech-encoder-decoder")
model = model.to(torch_device)
encoder = model.get_encoder()
encoder_outputs = encoder(input_values)
output_sequences_no_mask = model.generate(encoder_outputs=encoder_outputs).cpu()
output_sequences_with_mask = model.generate(encoder_outputs=encoder_outputs, attention_mask=attention_mask)
output_sequences_with_mask = output_sequences_with_mask.cpu()
self.assertListEqual(output_sequences_no_mask.tolist(), output_sequences_with_mask.tolist())
def test_transition_scores_beam_search_encoder_decoder(self):
articles = [
"Justin Timberlake and Jessica Biel, welcome to parenthood.",
"Michael Phelps is arguably the most decorated Olympian of all time.",
]
tokenizer = BartTokenizer.from_pretrained("hf-internal-testing/tiny-random-bart")
model = BartForConditionalGeneration.from_pretrained(
"hf-internal-testing/tiny-random-bart",
max_length=10,
num_beams=4,
num_return_sequences=2,
eos_token_id=None,
return_dict_in_generate=True,
output_scores=True,
length_penalty=0.0,
)
model = model.to(torch_device)
input_ids = tokenizer(articles, return_tensors="pt", padding=True).input_ids.to(torch_device)
outputs = model.generate(input_ids=input_ids)
transition_scores = model.compute_transition_beam_scores(
outputs.sequences, outputs.scores, outputs.beam_indices
)
transition_scores_sum = transition_scores.sum(-1)
self.assertTrue(torch.allclose(transition_scores_sum, outputs.sequences_scores, atol=1e-3))
def test_transition_scores_beam_search_encoder_decoder_with_eos(self):
articles = [
"Justin Timberlake and Jessica Biel, welcome to parenthood.",
"Michael Phelps is arguably the most decorated Olympian of all time.",
]
tokenizer = BartTokenizer.from_pretrained("hf-internal-testing/tiny-random-bart")
model = BartForConditionalGeneration.from_pretrained(
"hf-internal-testing/tiny-random-bart",
max_length=10,
num_beams=4,
num_return_sequences=2,
return_dict_in_generate=True,
output_scores=True,
length_penalty=0.0,
)
model = model.to(torch_device)
input_ids = tokenizer(articles, return_tensors="pt", padding=True).input_ids.to(torch_device)
outputs = model.generate(input_ids=input_ids)
transition_scores = model.compute_transition_beam_scores(
outputs.sequences, outputs.scores, outputs.beam_indices
)
transition_scores_sum = transition_scores.sum(-1)
self.assertTrue(torch.allclose(transition_scores_sum, outputs.sequences_scores, atol=1e-3))
def test_transition_scores_beam_search_decoder_only(self):
articles = [
"Justin Timberlake",
"Michael Phelps",
]
tokenizer = GPT2Tokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2")
tokenizer.pad_token = tokenizer.eos_token
model = GPT2LMHeadModel.from_pretrained(
"hf-internal-testing/tiny-random-gpt2",
max_length=10,
num_beams=4,
num_return_sequences=2,
pad_token_id=tokenizer.eos_token_id,
eos_token_id=None,
return_dict_in_generate=True,
output_scores=True,
length_penalty=0.0,
)
model = model.to(torch_device)
input_ids = tokenizer(articles, return_tensors="pt", padding=True).input_ids.to(torch_device)
outputs = model.generate(input_ids=input_ids)
transition_scores = model.compute_transition_beam_scores(
outputs.sequences, outputs.scores, outputs.beam_indices
)
transition_scores_sum = transition_scores.sum(-1)
self.assertTrue(torch.allclose(transition_scores_sum, outputs.sequences_scores, atol=1e-3))
def test_transition_scores_beam_sample_encoder_decoder(self):
articles = [
"Justin Timberlake and Jessica Biel, welcome to parenthood.",
"Michael Phelps is arguably the most decorated Olympian of all time.",
]
tokenizer = BartTokenizer.from_pretrained("hf-internal-testing/tiny-random-bart")
model = BartForConditionalGeneration.from_pretrained(
"hf-internal-testing/tiny-random-bart",
do_sample=True,
max_length=10,
num_beams=4,
num_return_sequences=2,
eos_token_id=None,
return_dict_in_generate=True,
output_scores=True,
length_penalty=0.0,
)
model = model.to(torch_device)
input_ids = tokenizer(articles, return_tensors="pt", padding=True).input_ids.to(torch_device)
outputs = model.generate(input_ids=input_ids)
transition_scores = model.compute_transition_beam_scores(
outputs.sequences, outputs.scores, outputs.beam_indices
)
transition_scores_sum = transition_scores.sum(-1)
self.assertTrue(torch.allclose(transition_scores_sum, outputs.sequences_scores, atol=1e-3))
def test_transition_scores_group_beam_search_encoder_decoder(self):
articles = [
"Justin Timberlake and Jessica Biel, welcome to parenthood.",
"Michael Phelps is arguably the most decorated Olympian of all time.",
]
tokenizer = BartTokenizer.from_pretrained("hf-internal-testing/tiny-random-bart")
model = BartForConditionalGeneration.from_pretrained(
"hf-internal-testing/tiny-random-bart",
max_length=10,
num_beams=2,
num_beam_groups=2,
num_return_sequences=2,
eos_token_id=None,
return_dict_in_generate=True,
output_scores=True,
length_penalty=0.0,
)
model = model.to(torch_device)
input_ids = tokenizer(articles, return_tensors="pt", padding=True).input_ids.to(torch_device)
outputs = model.generate(input_ids=input_ids)
transition_scores = model.compute_transition_beam_scores(
outputs.sequences, outputs.scores, outputs.beam_indices
)
transition_scores_sum = transition_scores.sum(-1)
self.assertTrue(torch.allclose(transition_scores_sum, outputs.sequences_scores, atol=1e-3))
@slow
def test_beam_search_example_integration(self):
# exactly the example provided in the docstrings of beam search, which previously
# failed after directly copying from it. Refer to PR #15555
tokenizer = AutoTokenizer.from_pretrained("t5-base")
model = AutoModelForSeq2SeqLM.from_pretrained("t5-base")
encoder_input_str = "translate English to German: How old are you?"
encoder_input_ids = tokenizer(encoder_input_str, return_tensors="pt").input_ids
# lets run beam search using 3 beams
num_beams = 3
# define decoder start token ids
input_ids = torch.ones((num_beams, 1), device=model.device, dtype=torch.long)
input_ids = input_ids * model.config.decoder_start_token_id
# add encoder_outputs to model keyword arguments
model_kwargs = {
"encoder_outputs": model.get_encoder()(
encoder_input_ids.repeat_interleave(num_beams, dim=0), return_dict=True
)
}
# instantiate beam scorer
beam_scorer = BeamSearchScorer(
batch_size=1,
num_beams=num_beams,
device=model.device,
)
# instantiate logits processors
logits_processor = LogitsProcessorList(
[
MinLengthLogitsProcessor(5, eos_token_id=model.config.eos_token_id),
]
)
outputs = model.beam_search(input_ids, beam_scorer, logits_processor=logits_processor, **model_kwargs)
outputs = tokenizer.batch_decode(outputs, skip_special_tokens=True)
self.assertListEqual(outputs, ["Wie alt bist du?"])
@slow
def test_constrained_beam_search(self):
model = GPT2LMHeadModel.from_pretrained("../gpt2").to(torch_device)
tokenizer = GPT2Tokenizer.from_pretrained("../gpt2")
force_tokens = tokenizer("scared", add_prefix_space=True, add_special_tokens=False).input_ids
force_tokens_2 = tokenizer("big weapons", add_prefix_space=True, add_special_tokens=False).input_ids
constraints = [
PhrasalConstraint(force_tokens),
PhrasalConstraint(force_tokens_2),
]
starting_text = ["The soldiers were not prepared and"]
input_ids = tokenizer(starting_text, return_tensors="pt").input_ids.to(torch_device)
outputs = model.generate(
input_ids,
constraints=constraints,
num_beams=10,
num_return_sequences=1,
no_repeat_ngram_size=1,
max_length=30,
remove_invalid_values=True,
)
generated_text = tokenizer.batch_decode(outputs, skip_special_tokens=True)
self.assertListEqual(
generated_text,
[
"The soldiers were not prepared and didn't know how big the big weapons would be, so they scared them off. They had no idea what to do",
],
)
@slow
def test_constrained_beam_search_mixed(self):
model = GPT2LMHeadModel.from_pretrained("../gpt2").to(torch_device)
tokenizer = GPT2Tokenizer.from_pretrained("../gpt2")
force_phrase = tokenizer("scared", add_prefix_space=True, add_special_tokens=False).input_ids
flexible_phrases = tokenizer(
["scream", "screams", "screaming", "screamed"], add_prefix_space=True, add_special_tokens=False
).input_ids
constraints = [
PhrasalConstraint(force_phrase),
DisjunctiveConstraint(flexible_phrases),
]
starting_text = ["The soldiers", "The child"]
input_ids = tokenizer(starting_text, return_tensors="pt").input_ids.to(torch_device)
outputs = model.generate(
input_ids,
constraints=constraints,
num_beams=10,
num_return_sequences=1,
no_repeat_ngram_size=1,
# max_length=20,
remove_invalid_values=True,
)
generated_text = tokenizer.batch_decode(outputs, skip_special_tokens=True)
self.assertListEqual(
generated_text,
[
"The soldiers, who were all scared and screaming at each other as they tried to get out of the",
"The child was taken to a local hospital where she screamed and scared for her life, police said.",
],
)
@slow
def test_constrained_beam_search_mixed_mixin(self):
model = GPT2LMHeadModel.from_pretrained("../gpt2").to(torch_device)
tokenizer = GPT2Tokenizer.from_pretrained("../gpt2")
force_word = "scared"
force_flexible = ["scream", "screams", "screaming", "screamed"]
force_words_ids = [
tokenizer([force_word], add_prefix_space=True, add_special_tokens=False).input_ids,
tokenizer(force_flexible, add_prefix_space=True, add_special_tokens=False).input_ids,
]
starting_text = ["The soldiers", "The child"]
input_ids = tokenizer(starting_text, return_tensors="pt").input_ids.to(torch_device)
outputs = model.generate(
input_ids,
force_words_ids=force_words_ids,
num_beams=10,
num_return_sequences=1,
no_repeat_ngram_size=1,
remove_invalid_values=True,
)
generated_text = tokenizer.batch_decode(outputs, skip_special_tokens=True)
self.assertListEqual(
generated_text,
[
"The soldiers, who were all scared and screaming at each other as they tried to get out of the",
"The child was taken to a local hospital where she screamed and scared for her life, police said.",
],
)
@slow
def test_constrained_beam_search_example_translation_mixin(self):
tokenizer = AutoTokenizer.from_pretrained("t5-base")
model = AutoModelForSeq2SeqLM.from_pretrained("t5-base")
encoder_input_str = "translate English to German: How old are you?"
force_words = ["sind"]
input_ids = tokenizer(encoder_input_str, return_tensors="pt").input_ids
force_words_ids = tokenizer(force_words, add_special_tokens=False).input_ids
outputs = model.generate(
input_ids,
force_words_ids=force_words_ids,
num_beams=10,
num_return_sequences=1,
no_repeat_ngram_size=1,
remove_invalid_values=True,
)
outputs = tokenizer.batch_decode(outputs, skip_special_tokens=True)
self.assertListEqual(outputs, ["Wie alter sind Sie?"])
@slow
def test_constrained_beam_search_example_integration(self):
tokenizer = AutoTokenizer.from_pretrained("t5-base")
model = AutoModelForSeq2SeqLM.from_pretrained("t5-base")
encoder_input_str = "translate English to German: How old are you?"
encoder_input_ids = tokenizer(encoder_input_str, return_tensors="pt").input_ids
# lets run beam search using 5 beams
num_beams = 5
# define decoder start token ids
input_ids = torch.ones((num_beams, 1), device=model.device, dtype=torch.long)
input_ids = input_ids * model.config.decoder_start_token_id
# add encoder_outputs to model keyword arguments
model_kwargs = {
"encoder_outputs": model.get_encoder()(
encoder_input_ids.repeat_interleave(num_beams, dim=0), return_dict=True
)
}
constraint_str = "sind"
constraint_token_ids = tokenizer.encode(constraint_str)[:-1] # remove eos token
constraints = [PhrasalConstraint(token_ids=constraint_token_ids)]
# instantiate beam scorer
beam_scorer = ConstrainedBeamSearchScorer(
batch_size=1, num_beams=num_beams, device=model.device, constraints=constraints
)
# instantiate logits processors
logits_processor = LogitsProcessorList(
[
MinLengthLogitsProcessor(5, eos_token_id=model.config.eos_token_id),
]
)
outputs = model.constrained_beam_search(
input_ids, beam_scorer, constraints=constraints, logits_processor=logits_processor, **model_kwargs
)
outputs = tokenizer.batch_decode(outputs, skip_special_tokens=True)
self.assertListEqual(outputs, ["Wie alter sind Sie?"])
def test_constrained_beam_search_mixin_type_checks(self):
tokenizer = AutoTokenizer.from_pretrained("t5-base")
model = AutoModelForSeq2SeqLM.from_pretrained("t5-base")
encoder_input_str = "translate English to German: How old are you?"
input_ids = tokenizer(encoder_input_str, return_tensors="pt").input_ids
with self.assertRaises(ValueError):
force_words = ["sind"]
force_words_ids = tokenizer(force_words, return_tensors="pt").input_ids
model.generate(
input_ids,
force_words_ids=force_words_ids,
num_beams=10,
num_return_sequences=1,
no_repeat_ngram_size=1,
remove_invalid_values=True,
)
with self.assertRaises(ValueError):
force_words = ["sind"]
force_words_ids = [tokenizer(force_words, return_tensors="pt").input_ids]
model.generate(
input_ids,
force_words_ids=force_words_ids,
num_beams=10,
num_return_sequences=1,
no_repeat_ngram_size=1,
remove_invalid_values=True,
)
with self.assertRaises(ValueError):
model.generate(input_ids, force_words_ids=[])
with self.assertRaises(ValueError):
model.generate(input_ids, force_words_ids=[[-1]])
with self.assertRaises(ValueError):
model.generate(input_ids, force_words_ids=[[[-1]]])
|
"""datapanel_ctrl.py - controller for the DataPanel element
Chris R. Coughlin (TRI/Austin, Inc.)
"""
__author__ = 'Chris R. Coughlin'
from models.datapanel_model import DataPanelModel
from controllers import pathfinder
import os.path
class DataPanelController(object):
"""Controller for the DataPanel"""
def __init__(self, view):
self.view = view
self.model = DataPanelModel(self)
self.data = None
def populate_tree(self):
"""Populates the view's tree with the contents in the data folder."""
self.clear_tree()
sub_folders = {pathfinder.data_path(): self.view.data_tree_root}
for file in self.model.find_data():
fldr, fname = os.path.split(file)
if fldr not in sub_folders:
sub_folders[fldr] = self.view.data_tree.AppendItem(self.view.data_tree_root,
os.path.relpath(fldr, pathfinder.data_path()))
data_item = self.view.data_tree.AppendItem(sub_folders.get(fldr, self.view.data_tree_root),
os.path.basename(file))
self.view.data_tree.SetItemData(data_item, file)
def clear_tree(self):
"""Removes the contents of the view's data tree"""
self.view.data_tree.DeleteChildren(self.view.data_tree_root)
# Event Handlers
def on_tree_selection_changed(self, evt):
"""Updates the currently selected data set"""
item = evt.GetItem()
if item:
self.data = self.view.data_tree.GetItemData(item)
evt.Skip()
|
#!/usr/bin/env python3
# Copyright (c) 2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the listsincelast RPC."""
from test_framework.test_framework import DigidinarTestFramework
from test_framework.util import (
assert_equal,
assert_array_result,
assert_raises_rpc_error,
connect_nodes,
)
class ListSinceBlockTest (DigidinarTestFramework):
def set_test_params(self):
self.num_nodes = 4
self.setup_clean_chain = True
def run_test(self):
# All nodes are in IBD from genesis, so they'll need the miner (node2) to be an outbound connection, or have
# only one connection. (See fPreferredDownload in net_processing)
connect_nodes(self.nodes[1], 2)
self.nodes[2].generate(101)
self.sync_all()
self.test_no_blockhash()
self.test_invalid_blockhash()
self.test_reorg()
self.test_double_spend()
self.test_double_send()
def test_no_blockhash(self):
txid = self.nodes[2].sendtoaddress(self.nodes[0].getnewaddress(), 1)
blockhash, = self.nodes[2].generate(1)
self.sync_all()
txs = self.nodes[0].listtransactions()
assert_array_result(txs, {"txid": txid}, {
"category": "receive",
"amount": 1,
"blockhash": blockhash,
"confirmations": 1,
})
assert_equal(
self.nodes[0].listsinceblock(),
{"lastblock": blockhash,
"transactions": txs})
assert_equal(
self.nodes[0].listsinceblock(""),
{"lastblock": blockhash,
"transactions": txs})
def test_invalid_blockhash(self):
assert_raises_rpc_error(-5, "Block not found", self.nodes[0].listsinceblock,
"42759cde25462784395a337460bde75f58e73d3f08bd31fdc3507cbac856a2c4")
assert_raises_rpc_error(-5, "Block not found", self.nodes[0].listsinceblock,
"0000000000000000000000000000000000000000000000000000000000000000")
assert_raises_rpc_error(-5, "Block not found", self.nodes[0].listsinceblock,
"invalid-hex")
def test_reorg(self):
'''
`listsinceblock` did not behave correctly when handed a block that was
no longer in the main chain:
ab0
/ \
aa1 [tx0] bb1
| |
aa2 bb2
| |
aa3 bb3
|
bb4
Consider a client that has only seen block `aa3` above. It asks the node
to `listsinceblock aa3`. But at some point prior the main chain switched
to the bb chain.
Previously: listsinceblock would find height=4 for block aa3 and compare
this to height=5 for the tip of the chain (bb4). It would then return
results restricted to bb3-bb4.
Now: listsinceblock finds the fork at ab0 and returns results in the
range bb1-bb4.
This test only checks that [tx0] is present.
'''
# Split network into two
self.split_network()
# send to nodes[0] from nodes[2]
senttx = self.nodes[2].sendtoaddress(self.nodes[0].getnewaddress(), 1)
# generate on both sides
lastblockhash = self.nodes[1].generate(6)[5]
self.nodes[2].generate(7)
self.log.info('lastblockhash=%s' % (lastblockhash))
self.sync_all([self.nodes[:2], self.nodes[2:]])
self.join_network()
# listsinceblock(lastblockhash) should now include tx, as seen from nodes[0]
lsbres = self.nodes[0].listsinceblock(lastblockhash)
found = False
for tx in lsbres['transactions']:
if tx['txid'] == senttx:
found = True
break
assert found
def test_double_spend(self):
'''
This tests the case where the same UTXO is spent twice on two separate
blocks as part of a reorg.
ab0
/ \
aa1 [tx1] bb1 [tx2]
| |
aa2 bb2
| |
aa3 bb3
|
bb4
Problematic case:
1. User 1 receives BTC in tx1 from utxo1 in block aa1.
2. User 2 receives BTC in tx2 from utxo1 (same) in block bb1
3. User 1 sees 2 confirmations at block aa3.
4. Reorg into bb chain.
5. User 1 asks `listsinceblock aa3` and does not see that tx1 is now
invalidated.
Currently the solution to this is to detect that a reorg'd block is
asked for in listsinceblock, and to iterate back over existing blocks up
until the fork point, and to include all transactions that relate to the
node wallet.
'''
self.sync_all()
# Split network into two
self.split_network()
# share utxo between nodes[1] and nodes[2]
utxos = self.nodes[2].listunspent()
utxo = utxos[0]
privkey = self.nodes[2].dumpprivkey(utxo['address'])
self.nodes[1].importprivkey(privkey)
# send from nodes[1] using utxo to nodes[0]
change = '%.8f' % (float(utxo['amount']) - 1.0003)
recipientDict = {
self.nodes[0].getnewaddress(): 1,
self.nodes[1].getnewaddress(): change,
}
utxoDicts = [{
'txid': utxo['txid'],
'vout': utxo['vout'],
}]
txid1 = self.nodes[1].sendrawtransaction(
self.nodes[1].signrawtransaction(
self.nodes[1].createrawtransaction(utxoDicts, recipientDict))['hex'])
# send from nodes[2] using utxo to nodes[3]
recipientDict2 = {
self.nodes[3].getnewaddress(): 1,
self.nodes[2].getnewaddress(): change,
}
self.nodes[2].sendrawtransaction(
self.nodes[2].signrawtransaction(
self.nodes[2].createrawtransaction(utxoDicts, recipientDict2))['hex'])
# generate on both sides
lastblockhash = self.nodes[1].generate(3)[2]
self.nodes[2].generate(4)
self.join_network()
self.sync_all()
# gettransaction should work for txid1
assert self.nodes[0].gettransaction(txid1)['txid'] == txid1, "gettransaction failed to find txid1"
# listsinceblock(lastblockhash) should now include txid1, as seen from nodes[0]
lsbres = self.nodes[0].listsinceblock(lastblockhash)
assert any(tx['txid'] == txid1 for tx in lsbres['removed'])
# but it should not include 'removed' if include_removed=false
lsbres2 = self.nodes[0].listsinceblock(blockhash=lastblockhash, include_removed=False)
assert 'removed' not in lsbres2
def test_double_send(self):
'''
This tests the case where the same transaction is submitted twice on two
separate blocks as part of a reorg. The former will vanish and the
latter will appear as the true transaction (with confirmations dropping
as a result).
ab0
/ \
aa1 [tx1] bb1
| |
aa2 bb2
| |
aa3 bb3 [tx1]
|
bb4
Asserted:
1. tx1 is listed in listsinceblock.
2. It is included in 'removed' as it was removed, even though it is now
present in a different block.
3. It is listed with a confirmations count of 2 (bb3, bb4), not
3 (aa1, aa2, aa3).
'''
self.sync_all()
# Split network into two
self.split_network()
# create and sign a transaction
utxos = self.nodes[2].listunspent()
utxo = utxos[0]
change = '%.8f' % (float(utxo['amount']) - 1.0003)
recipientDict = {
self.nodes[0].getnewaddress(): 1,
self.nodes[2].getnewaddress(): change,
}
utxoDicts = [{
'txid': utxo['txid'],
'vout': utxo['vout'],
}]
signedtxres = self.nodes[2].signrawtransaction(
self.nodes[2].createrawtransaction(utxoDicts, recipientDict))
assert signedtxres['complete']
signedtx = signedtxres['hex']
# send from nodes[1]; this will end up in aa1
txid1 = self.nodes[1].sendrawtransaction(signedtx)
# generate bb1-bb2 on right side
self.nodes[2].generate(2)
# send from nodes[2]; this will end up in bb3
txid2 = self.nodes[2].sendrawtransaction(signedtx)
assert_equal(txid1, txid2)
# generate on both sides
lastblockhash = self.nodes[1].generate(3)[2]
self.nodes[2].generate(2)
self.join_network()
self.sync_all()
# gettransaction should work for txid1
self.nodes[0].gettransaction(txid1)
# listsinceblock(lastblockhash) should now include txid1 in transactions
# as well as in removed
lsbres = self.nodes[0].listsinceblock(lastblockhash)
assert any(tx['txid'] == txid1 for tx in lsbres['transactions'])
assert any(tx['txid'] == txid1 for tx in lsbres['removed'])
# find transaction and ensure confirmations is valid
for tx in lsbres['transactions']:
if tx['txid'] == txid1:
assert_equal(tx['confirmations'], 2)
# the same check for the removed array; confirmations should STILL be 2
for tx in lsbres['removed']:
if tx['txid'] == txid1:
assert_equal(tx['confirmations'], 2)
if __name__ == '__main__':
ListSinceBlockTest().main()
|
# Copyright (c) 2015-2018 Software AG, Darmstadt, Germany and/or Software AG USA Inc., Reston, VA, USA, and/or its subsidiaries and/or its affiliates and/or their licensors.
#
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pysys.constants import *
from pysys.basetest import BaseTest
from apama.correlator import CorrelatorHelper
class PySysTest(BaseTest):
def execute(self):
correlator = CorrelatorHelper(self, name='correlator')
correlator.start(logfile='correlator.log', config=os.path.join(PROJECT.TEST_SUBJECT_DIR, 'initialization.yaml'))
correlator.flush()
correlator.injectEPL(filenames='testUtils.mon', filedir=PROJECT.UTILS_DIR)
# Start test results receiver
correlator.receive(filename='TestResult.evt', channels=['TestResult'], logChannels=True)
# Inject test
correlator.injectEPL(filenames=['test.mon'])
# wait for all events to be processed
correlator.flush()
# wait for test to complete
self.waitForSignal('TestResult.evt', expr="TestComplete", condition="==1", timeout=5)
def validate(self):
# check the main correlator log for Errors
self.assertGrep('correlator.log', expr=' (ERROR|FATAL) ', contains=False)
# Check that the test didn't fail
self.assertGrep('TestResult.evt', expr='TestFailed', contains=False)
|
import datetime
import importlib
import urllib.parse
from . import git_vcs
from ..error_state import HasErrorState
from ..reporter import ReportObserver, Reporter
from ...lib import utils
from ...lib.gravity import Dependency
__all__ = [
"GithubToken",
"GithubMainVcs"
]
github = None
def get_time():
return datetime.datetime.utcnow().replace(microsecond=0).isoformat() + "Z"
class GithubToken(HasErrorState):
@staticmethod
def define_arguments(argument_parser):
parser = argument_parser.get_or_create_group("GitHub", "GitHub repository settings")
parser.add_argument("--github-app-id", "-gta", dest="integration_id", metavar="GITHUB_APP_ID",
help="GitHub application ID to use for check run report. Only GitHub App "
"can report a check run result! If you don't have an App for reporting purposes, "
"please don't use ``--report-to-review`` with GitHub")
parser.add_argument("--github-private-key", "-gtk", dest="key", metavar="GITHUB_PRIVATE_KEY",
help="GitHub App private key for obtaining installation authentication token. "
"Pass raw key data via environment variable or pass a file path to read the key from "
"by starting the value string with '@'. File path can be either absolute or relative")
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.check_required_option("integration_id", """
The GitHub App ID is not specified.
Only GitHub Application owner knows this ID. If you are the App owner, please
check your App's general settings. If not, please contact the App owner for this
information.
Please note that 'universum github-handler' DOES NOT pass App ID to CI builds.
It is assumed that one CI configuration (job, build, workflow) serves as one
GitHub App. Because of that, it is required to specify App ID within the CI
configuration.
Please specify the GitHub App ID by using '--github-app-id' ('-gta') command
line parameter or by setting GITHUB_APP_ID environment variable.
""")
self.key = self.read_and_check_multiline_option("key", """
The GitHub App private key is not specified.
Please note that 'universum github-handler' DOES NOT pass private key to CI
builds. It is assumed that one CI configuration (job, build, workflow) serves as
one GitHub App. Because of that, it is required to specify private key within
the CI configuration.
As the private key is a multiline string, it is not convenient to pass it
directly via command line. If you start the parameter with '@', the rest should be
the path to a file containing the key. The path can be absolute or relative to the
project root. You can also store the key in GITHUB_PRIVATE_KEY environment variable.
""")
global github
try:
github = importlib.import_module("github")
except ImportError as e:
text = "Error: using GitHub Handler or VCS type 'github' requires Python package 'pygithub' " \
"to be installed to the system for correct GitHub App token processing. " \
"It also requires Python package 'cryptography' to be installed in addition. " \
"Please refer to 'Prerequisites' chapter of project documentation for detailed instructions"
raise ImportError(text) from e
self.token_issued = None
self._token = None
def _get_token(self, installation_id):
integration = github.GithubIntegration(self.settings.integration_id, self.key)
auth_obj = integration.get_access_token(installation_id)
return auth_obj.token
def get_token(self, installation_id):
if self._token:
token_age = (datetime.datetime.now() - self.token_issued).total_seconds() / 60
if token_age < 55: # GitHub token lasts for one hour
return self._token
self._token = self._get_token(installation_id)
self.token_issued = datetime.datetime.now()
return self._token
class GithubTokenWithInstallation(GithubToken):
@staticmethod
def define_arguments(argument_parser):
parser = argument_parser.get_or_create_group("GitHub", "GitHub repository settings")
parser.add_argument("--github-installation-id", "-gti", dest="installation_id",
metavar="GITHUB_INSTALLATION_ID",
help="GitHub installation ID identifies specific app installation into user account "
"or organization. Can be retrieved from web-hook or obtained via REST API; "
"in standard workflow should be received from 'universum github-handler'")
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.check_required_option("installation_id", """
The GitHub App installation ID not specified.
An installation refers to any user or organization account that has installed
the app. Even if someone installs the app on more than one repository, it only
counts as one installation because it's within the same account. Installation ID
can be retrieved via REST API or simply parsed from GitHub App web-hook.
If using 'universum github-handler', installation ID is automatically extracted
from the webhook payload and passed via GITHUB_INSTALLATION_ID environment
variable.
""")
def get_token(self, installation_id=None):
return super().get_token(installation_id=self.settings.installation_id)
class GithubMainVcs(ReportObserver, git_vcs.GitMainVcs, GithubTokenWithInstallation):
"""
This class mostly contains functions for Gihub report observer
"""
reporter_factory = Dependency(Reporter)
@staticmethod
def define_arguments(argument_parser):
parser = argument_parser.get_or_create_group("GitHub", "GitHub repository settings")
parser.add_argument("--github-check-name", "-ghc", dest="check_name", metavar="GITHUB_CHECK_NAME",
default="Universum check", help="The name of Github check run")
parser.add_argument("--github-check-id", "-ghi", dest="check_id", metavar="GITHUB_CHECK_ID",
help="Github check run ID")
parser.add_argument("--github-api-url", "-gha", dest="api_url", metavar="GITHUB_API_URL",
default="https://api.github.com/", help="API URL for integration")
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.reporter = None
self.check_required_option("checkout_id", """
The git checkout id for github is not specified.
For github the git checkout id defines the commit to be checked and reported.
Please specify the checkout id by using '--git-checkout-id' ('-gco') command
line parameter or by setting GIT_CHECKOUT_ID environment variable.
If using 'universum github-handler', the checkout ID is automatically extracted
from the webhook payload and passed via GIT_CHECKOUT_ID environment variable.
""")
self.check_required_option("check_id", """
The GitHub Check Run ID is not specified.
GitHub check runs each have unique ID, that is used to update check result. To
integrate Universum with GitHub, check run should be already created before
performing actual check. Please specify check run ID by using
'--github-check-id' ('-ghi') command line parameter or by setting
GITHUB_CHECK_ID environment variable.
If using 'universum github-handler', the check ID is automatically extracted
from the webhook payload and passed via GITHUB_CHECK_ID environment variable.
""")
self.request = dict()
self.request["status"] = "in_progress"
self.request["output"] = {
"title": self.settings.check_name,
"summary": ""
}
def _clone(self, history_depth, destination_directory, clone_url):
parsed_repo = urllib.parse.urlsplit(clone_url)
if parsed_repo.scheme == "https" and not parsed_repo.username:
new_netloc = "x-access-token:{}@{}".format(self.get_token(), parsed_repo.netloc)
parsed_repo = (parsed_repo.scheme, new_netloc, parsed_repo.path, parsed_repo.query, parsed_repo.fragment)
clone_url = urllib.parse.urlunsplit(parsed_repo)
super()._clone(history_depth, destination_directory, clone_url)
def code_review(self):
self.reporter = self.reporter_factory()
self.reporter.subscribe(self)
return self
def update_review_version(self):
self.out.log("GitHub has no review versions")
def get_review_link(self):
return self.settings.repo.rsplit(".git", 1)[0] + "/runs/" + self.settings.check_id
def is_latest_version(self): # pylint: disable=no-self-use
return True
def _report(self):
repo_path = str(urllib.parse.urlsplit(self.settings.repo).path).rsplit(".git", 1)[0]
check_url = self.settings.api_url + "repos" + repo_path + "/check-runs/" + self.settings.check_id
headers = {
"Accept": "application/vnd.github.antiope-preview+json",
"Authorization": "token " + self.get_token()
}
utils.make_request(check_url, request_method="PATCH", json=self.request, headers=headers)
def code_report_to_review(self, report):
# git show returns string, each file separated by \n,
# first line consists of commit id and commit comment, so it's skipped
commit_files = self.repo.git.show("--name-only", "--oneline", self.settings.checkout_id).split('\n')[1:]
comments = []
for path, issues in report.items():
if path in commit_files:
for issue in issues:
comments.append(dict(path=path,
message=issue['message'],
start_line=issue['line'],
end_line=issue['line'],
annotation_level="warning"))
self.request["output"]["annotations"] = comments
self._report()
def report_start(self, report_text):
self.request["started_at"] = get_time()
self.request["output"]["summary"] = report_text
self._report()
def report_result(self, result, report_text=None, no_vote=False):
if result:
conclusion = "success"
else:
conclusion = "failure"
if not report_text:
report_text = ""
self.request["status"] = "completed"
self.request["completed_at"] = get_time()
self.request["conclusion"] = conclusion
self.request["output"]["summary"] = report_text
self._report()
|
from .ideal_user import IdealUser
from .logit_user import LogitUser
from .null_user import NullUser
from .ransam_multiple_prior_user import RanSamMultiplePriorUser
from .ransam_prior_user import RanSamPriorUser
from .ransam_smooth_user import RanSamSmoothUser
from .ransam_universal_user import RanSamUniversalUser
from .ransam_user import RanSamUser
|
# Copyright (c) 2020 Graphcore Ltd. All rights reserved.
import tempfile
from tensorflow.python.ipu.config import IPUConfig
import numpy as np
from functools import partial
import tensorflow.compat.v1 as tf
from tensorflow.python import ipu
from ipu_sparse_ops import sparse, optimizers
import os
import logging
os.sys.path.append("../../") # dynamic_sparsity
from ipu_sparse_ops.model_baseclass import SparseModelOptions # noqa: E402
from ipu_sparse_ops.transformer.transformer_baseclass import TransformerOptions # noqa: E402
from ipu_sparse_ops.transformer.transformer_dense import DenseTransformer # noqa: E402
from ipu_sparse_ops.transformer.transformer_dynsparse import DynsparseTransformer # noqa: E402
# disable TF 2.0
tf.disable_eager_execution()
tf.disable_v2_behavior()
def get_program_arguments():
transformer_parser = TransformerOptions()
SparseModelOptions.add_all_arguments(transformer_parser)
transformer_parser.add_argument("--profile", action="store_true",
help="Enable profiling for mem profile")
default_settings = dict(
dtype=tf.float32,
source_sequence_length=12,
hidden_length=16,
ff_length=64,
attention_heads=1,
qkv_length=16,
sparsity=0.9,
batch_size=1,
random_seed=11,
pooling_type='NONE',
dropout_keep_prob=1
)
transformer_parser.set_defaults(**default_settings)
return transformer_parser.parse_args()
def stream_dense_grads_from_device(transformer, loss, ops=None):
# This will create tensorflow ops which have to be
# run in a session to retrieve the result
ops = {} if ops is None else ops
for name, sparse_layer in transformer.sparse_layers.items():
with tf.variable_scope(name, reuse=True):
dense_grad_w = sparse_layer.get_dense_grad_w(loss)
ops[name + '_grad_w'] = tf.convert_to_tensor(dense_grad_w)
return ops
def sparse_transformer_fwd_and_grad(transformer, input_activation):
transformer.compute_dense_grad = True
output_activation = transformer.encoder_layer(input_activation, mask=None, compute_dense_grad=True, debug_name="layer_0")
loss = tf.reduce_sum(output_activation)
# Wrap the optimizer (this would help manage the slot variables)
optimizer = optimizers.SparseOptimizer(tf.train.AdamOptimizer)
optimizer = optimizer(learning_rate=1e-3, sparse_layers=transformer.sparse_layers.values())
grads = optimizer.compute_gradients(loss)
input_grad = tf.gradients(loss, input_activation)[0]
with tf.control_dependencies([input_grad]):
train_op = optimizer.apply_gradients(grads)
with tf.control_dependencies([train_op]):
streamOps = {"output_activation": output_activation}
streamOps["input_grad"] = input_grad
# Sparse grads
for grad, var in grads:
streamOps[var.op.name + "_grad"] = grad
# Dense grads
stream_dense_grads_from_device(transformer, loss, streamOps)
return streamOps
def dense_transformer_fwd_and_grad(transformer, input_activation):
output_activation = transformer.encoder_layer(input_activation, mask=None, debug_name="layer_0")
loss = tf.reduce_sum(output_activation)
optimizer = tf.train.AdamOptimizer(learning_rate=1e-3)
grads = optimizer.compute_gradients(loss)
input_grad = tf.gradients(loss, input_activation)[0]
with tf.control_dependencies([input_grad]):
train_op = optimizer.apply_gradients(grads)
with tf.control_dependencies([train_op]):
streamOps = {"output_activation": output_activation}
streamOps["input_grad"] = input_grad
for grad, var in grads:
streamOps[var.op.name + "_grad"] = grad
return streamOps
def main(args):
tf.logging.set_verbosity(tf.logging.ERROR)
np.set_printoptions(linewidth=200)
random_seed = args.random_seed
checkpoint_path = os.path.join(tempfile.mkdtemp(), "model.ckpt")
# Input activations for the attention layer
random_gen = np.random.default_rng(seed=random_seed)
activations_np = random_gen.uniform(-0.1, 0.1, size=(args.batch_size, args.source_sequence_length, args.hidden_length))
# Configure the IPU
cfg = IPUConfig()
cfg.auto_select_ipus = 1
cfg.configure_ipu_system()
# Build IPU graphs
sparse_decoder_graph = tf.Graph()
sparse_transformer = DynsparseTransformer(args)
with sparse_decoder_graph.as_default():
with tf.device("cpu"):
# placeholder for activations
# weight placeholders are created inside sparse_transfomer
inputs_ph = tf.placeholder(args.dtype, activations_np.shape)
with ipu.scopes.ipu_scope("/device:IPU:0"):
sparse_decoder = partial(sparse_transformer_fwd_and_grad, sparse_transformer)
sparse_decoder_fetches = ipu.ipu_compiler.compile(sparse_decoder, [inputs_ph])
ipu.utils.move_variable_initialization_to_cpu()
# sparse-decoder
with tf.Session(graph=sparse_decoder_graph) as sess:
# initialize weights
sess.run(tf.global_variables_initializer())
# Save the sparse weights to checkpoint as dense
sparse_transformer.checkpointAsDense(checkpoint_path)
# run sparse decoder
sparse_result = sess.run(sparse_decoder_fetches, feed_dict={inputs_ph: activations_np})
# Create a dense transformer and initialize the weights to the values that
# the sparse model was initialzed with originally
dense_decoder_graph = tf.Graph()
dense_transformer = DenseTransformer(args)
with dense_decoder_graph.as_default():
with tf.device("cpu"):
# placeholder for activations
# weights will get streamed from checkpoint
inputs_ph = tf.placeholder(args.dtype, activations_np.shape)
with ipu.scopes.ipu_scope("/device:IPU:0"):
dense_decoder_fetches = partial(dense_transformer_fwd_and_grad, dense_transformer)
dense_graph = ipu.ipu_compiler.compile(dense_decoder_fetches, [inputs_ph])
ipu.utils.move_variable_initialization_to_cpu()
with tf.device("cpu"):
# We will only load the trainable variables, not momentum etc.
loader = tf.train.Saver(tf.trainable_variables())
# dense-decoder
with tf.Session(graph=dense_decoder_graph) as sess:
# Initialized momentums which are not part of the checkpoint
sess.run(tf.global_variables_initializer())
# Restore saved trainable variables
loader.restore(sess, checkpoint_path)
dense_result = sess.run(dense_graph, feed_dict={inputs_ph: activations_np})
# TEST
rtol = 1e-05
atol = 1e-05
if args.dtype == tf.float16:
rtol = 1e-04
atol = 1e-02
# Compare model output activations (actual vs. desired) -> (sparse vs. dense)
np.testing.assert_allclose(sparse_result["output_activation"], dense_result["output_activation"],
atol=atol, rtol=rtol, err_msg="Output activations do not match.")
# Compate gradient of output wrt. input
np.testing.assert_allclose(sparse_result["input_grad"], dense_result["input_grad"],
atol=atol, rtol=rtol, err_msg="Grads wrt. inputs do not match")
# Compare the dense_w and sparse grads of every sparse layer
for name, sparse_layer in sparse_transformer.sparse_layers.items():
# Compate the dense grads
dense_grad = dense_result[name + "/weight" + "_grad"]
sparse_grad_w = sparse_result[name + "_grad_w"]
np.testing.assert_allclose(sparse_grad_w, dense_grad, atol=atol, rtol=rtol,
err_msg=f"Dense grads for layer {name} do not match")
# Compare the sparse grads
sparse_grad_padded = sparse_result[name + "/sparse_layer/nz_values_grad"]
sparse_grad_data = sparse.SparseRepresentation(sparse_layer.weights.get_metainfo(), sparse_grad_padded)
i, j, sparse_grad = sparse.triplets_from_representation(sparse_layer.weights.spec, sparse_grad_data, sparse_layer.weights.matmul_options)
# Convert dense grads to blocks
block_size, _ = sparse_layer.get_nonzero_blocks_shape()
nx, ny = dense_grad.shape[0] // block_size, dense_grad.shape[1] // block_size
strides = np.array(dense_grad.strides) # strides are in bytes
strides = tuple(strides * block_size) + tuple(strides)
blocked_dense_grad = np.lib.stride_tricks.as_strided(dense_grad, (nx, ny, block_size, block_size), strides)
if block_size == 1:
blocked_dense_grad = np.squeeze(np.copy(blocked_dense_grad), axis=(-2, -1))
np.testing.assert_allclose(sparse_grad, blocked_dense_grad[i, j], atol=atol, rtol=rtol,
err_msg=f"Sparse grads for layer {name} do not match")
print("All results match.")
return sparse_result, dense_result
if __name__ == "__main__":
logging.basicConfig(
level=logging.getLevelName("DEBUG"),
format='%(asctime)s %(name)s %(levelname)s %(message)s',
datefmt='%Y-%m-%d %H:%M:%S')
args = get_program_arguments()
a, b = main(args)
|
import os
import random
import anndata
import numpy as np
import pandas as pd
import pytest
import scipy.sparse as sparse
from scipy.sparse.csr import csr_matrix
import scvi
from scvi import _CONSTANTS
from scvi.data import (
register_tensor_from_anndata,
setup_anndata,
synthetic_iid,
transfer_anndata_setup,
view_anndata_setup,
)
from scvi.data._anndata import get_from_registry
from scvi.dataloaders import AnnTorchDataset
def test_transfer_anndata_setup():
# test transfer_anndata function
adata1 = synthetic_iid(run_setup_anndata=False)
adata2 = synthetic_iid(run_setup_anndata=False)
adata2.X = adata1.X
setup_anndata(adata1)
transfer_anndata_setup(adata1, adata2)
np.testing.assert_array_equal(
adata1.obs["_scvi_local_l_mean"], adata2.obs["_scvi_local_l_mean"]
)
# test if layer was used initially, again used in transfer setup
adata1 = synthetic_iid(run_setup_anndata=False)
adata2 = synthetic_iid(run_setup_anndata=False)
raw_counts = adata1.X.copy()
adata1.layers["raw"] = raw_counts
adata2.layers["raw"] = raw_counts
zeros = np.zeros_like(adata1.X)
ones = np.ones_like(adata1.X)
adata1.X = zeros
adata2.X = ones
setup_anndata(adata1, layer="raw")
transfer_anndata_setup(adata1, adata2)
np.testing.assert_array_equal(
adata1.obs["_scvi_local_l_mean"], adata2.obs["_scvi_local_l_mean"]
)
# test that an unknown batch throws an error
adata1 = synthetic_iid()
adata2 = synthetic_iid(run_setup_anndata=False)
adata2.obs["batch"] = [2] * adata2.n_obs
with pytest.raises(ValueError):
transfer_anndata_setup(adata1, adata2)
# TODO: test that a batch with wrong dtype throws an error
# adata1 = synthetic_iid()
# adata2 = synthetic_iid(run_setup_anndata=False)
# adata2.obs["batch"] = ["0"] * adata2.n_obs
# with pytest.raises(ValueError):
# transfer_anndata_setup(adata1, adata2)
# test that an unknown label throws an error
adata1 = synthetic_iid()
adata2 = synthetic_iid(run_setup_anndata=False)
adata2.obs["labels"] = ["label_123"] * adata2.n_obs
with pytest.raises(ValueError):
transfer_anndata_setup(adata1, adata2)
# test that correct mapping was applied
adata1 = synthetic_iid()
adata2 = synthetic_iid(run_setup_anndata=False)
adata2.obs["labels"] = ["label_1"] * adata2.n_obs
transfer_anndata_setup(adata1, adata2)
labels_mapping = adata1.uns["_scvi"]["categorical_mappings"]["_scvi_labels"][
"mapping"
]
correct_label = np.where(labels_mapping == "label_1")[0][0]
adata2.obs["_scvi_labels"][0] == correct_label
# test that transfer_anndata_setup correctly looks for adata.obs['batch']
adata1 = synthetic_iid()
adata2 = synthetic_iid(run_setup_anndata=False)
del adata2.obs["batch"]
with pytest.raises(KeyError):
transfer_anndata_setup(adata1, adata2)
# test that transfer_anndata_setup assigns same batch and label to cells
# if the original anndata was also same batch and label
adata1 = synthetic_iid(run_setup_anndata=False)
setup_anndata(adata1)
adata2 = synthetic_iid(run_setup_anndata=False)
del adata2.obs["batch"]
transfer_anndata_setup(adata1, adata2)
assert adata2.obs["_scvi_batch"][0] == 0
assert adata2.obs["_scvi_labels"][0] == 0
# test that if a category mapping is a subset, transfer anndata is called
a1 = scvi.data.synthetic_iid()
a2 = scvi.data.synthetic_iid(run_setup_anndata=False)
a2.obs["batch"] = "batch_1"
scvi.data.setup_anndata(a2, batch_key="batch")
m = scvi.model.SCVI(a1)
m.train(1)
m.get_latent_representation(a2)
assert a2.obs["_scvi_batch"].all() == 1
def test_data_format():
# if data was dense np array, check after setup_anndata, data is C_CONTIGUOUS
adata = synthetic_iid(run_setup_anndata=False)
old_x = adata.X
old_pro = adata.obsm["protein_expression"]
old_obs = adata.obs
adata.X = np.asfortranarray(old_x)
adata.obsm["protein_expression"] = np.asfortranarray(old_pro)
assert adata.X.flags["C_CONTIGUOUS"] is False
assert adata.obsm["protein_expression"].flags["C_CONTIGUOUS"] is False
setup_anndata(adata, protein_expression_obsm_key="protein_expression")
assert adata.X.flags["C_CONTIGUOUS"] is True
assert adata.obsm["protein_expression"].flags["C_CONTIGUOUS"] is True
assert np.array_equal(old_x, adata.X)
assert np.array_equal(old_pro, adata.obsm["protein_expression"])
assert np.array_equal(old_obs, adata.obs)
assert np.array_equal(adata.X, get_from_registry(adata, _CONSTANTS.X_KEY))
assert np.array_equal(
adata.obsm["protein_expression"],
get_from_registry(adata, _CONSTANTS.PROTEIN_EXP_KEY),
)
# if obsm is dataframe, make it C_CONTIGUOUS if it isnt
adata = synthetic_iid()
pe = np.asfortranarray(adata.obsm["protein_expression"])
adata.obsm["protein_expression"] = pd.DataFrame(pe, index=adata.obs_names)
assert adata.obsm["protein_expression"].to_numpy().flags["C_CONTIGUOUS"] is False
setup_anndata(adata, protein_expression_obsm_key="protein_expression")
new_pe = get_from_registry(adata, "protein_expression")
assert new_pe.to_numpy().flags["C_CONTIGUOUS"] is True
assert np.array_equal(pe, new_pe)
assert np.array_equal(adata.X, get_from_registry(adata, _CONSTANTS.X_KEY))
assert np.array_equal(
adata.obsm["protein_expression"],
get_from_registry(adata, _CONSTANTS.PROTEIN_EXP_KEY),
)
def test_setup_anndata():
# test regular setup
adata = synthetic_iid(run_setup_anndata=False)
setup_anndata(
adata,
batch_key="batch",
labels_key="labels",
protein_expression_obsm_key="protein_expression",
protein_names_uns_key="protein_names",
)
np.testing.assert_array_equal(
get_from_registry(adata, "batch_indices"),
np.array(adata.obs["_scvi_batch"]).reshape((-1, 1)),
)
np.testing.assert_array_equal(
get_from_registry(adata, "labels"),
np.array(adata.obs["labels"].cat.codes).reshape((-1, 1)),
)
np.testing.assert_array_equal(get_from_registry(adata, "X"), adata.X)
np.testing.assert_array_equal(
get_from_registry(adata, "protein_expression"),
adata.obsm["protein_expression"],
)
np.testing.assert_array_equal(
adata.uns["_scvi"]["protein_names"], adata.uns["protein_names"]
)
# test that error is thrown if its a view:
adata = synthetic_iid()
with pytest.raises(ValueError):
setup_anndata(adata[1])
# If obsm is a df and protein_names_uns_key is None, protein names should be grabbed from column of df
adata = synthetic_iid()
new_protein_names = np.array(random.sample(range(100), 100)).astype("str")
df = pd.DataFrame(
adata.obsm["protein_expression"],
index=adata.obs_names,
columns=new_protein_names,
)
adata.obsm["protein_expression"] = df
setup_anndata(adata, protein_expression_obsm_key="protein_expression")
np.testing.assert_array_equal(
adata.uns["_scvi"]["protein_names"], new_protein_names
)
# test that layer is working properly
adata = synthetic_iid()
true_x = adata.X
adata.layers["X"] = true_x
adata.X = np.ones_like(adata.X)
setup_anndata(adata, layer="X")
np.testing.assert_array_equal(get_from_registry(adata, "X"), true_x)
# test that it creates layers and batch if no layers_key is passed
adata = synthetic_iid()
setup_anndata(
adata,
protein_expression_obsm_key="protein_expression",
protein_names_uns_key="protein_names",
)
np.testing.assert_array_equal(
get_from_registry(adata, "batch_indices"), np.zeros((adata.shape[0], 1))
)
np.testing.assert_array_equal(
get_from_registry(adata, "labels"), np.zeros((adata.shape[0], 1))
)
def test_save_setup_anndata(save_path):
adata = synthetic_iid()
adata.write(os.path.join(save_path, "test.h5ad"))
def test_extra_covariates():
adata = synthetic_iid()
adata.obs["cont1"] = np.random.normal(size=(adata.shape[0],))
adata.obs["cont2"] = np.random.normal(size=(adata.shape[0],))
adata.obs["cat1"] = np.random.randint(0, 5, size=(adata.shape[0],))
adata.obs["cat2"] = np.random.randint(0, 5, size=(adata.shape[0],))
setup_anndata(
adata,
batch_key="batch",
labels_key="labels",
protein_expression_obsm_key="protein_expression",
protein_names_uns_key="protein_names",
continuous_covariate_keys=["cont1", "cont2"],
categorical_covariate_keys=["cat1", "cat2"],
)
m = scvi.model.SCVI(adata)
m.train(1)
df1 = adata.obsm["_scvi_extra_continuous"]
df2 = adata.obs[["cont1", "cont2"]]
pd.testing.assert_frame_equal(df1, df2)
def test_extra_covariates_transfer():
adata = synthetic_iid()
adata.obs["cont1"] = np.random.normal(size=(adata.shape[0],))
adata.obs["cont2"] = np.random.normal(size=(adata.shape[0],))
adata.obs["cat1"] = np.random.randint(0, 5, size=(adata.shape[0],))
adata.obs["cat2"] = np.random.randint(0, 5, size=(adata.shape[0],))
setup_anndata(
adata,
batch_key="batch",
labels_key="labels",
protein_expression_obsm_key="protein_expression",
protein_names_uns_key="protein_names",
continuous_covariate_keys=["cont1", "cont2"],
categorical_covariate_keys=["cat1", "cat2"],
)
bdata = synthetic_iid()
bdata.obs["cont1"] = np.random.normal(size=(bdata.shape[0],))
bdata.obs["cont2"] = np.random.normal(size=(bdata.shape[0],))
bdata.obs["cat1"] = 0
bdata.obs["cat2"] = 1
transfer_anndata_setup(adata_source=adata, adata_target=bdata)
# give it a new category
del bdata.uns["_scvi"]
bdata.obs["cat1"] = 6
transfer_anndata_setup(
adata_source=adata, adata_target=bdata, extend_categories=True
)
assert bdata.uns["_scvi"]["extra_categoricals"]["mappings"]["cat1"][-1] == 6
def test_register_tensor_from_anndata():
adata = synthetic_iid()
adata.obs["cont1"] = np.random.normal(size=(adata.shape[0],))
register_tensor_from_anndata(
adata, registry_key="test", adata_attr_name="obs", adata_key_name="cont1"
)
assert "test" in adata.uns["_scvi"]["data_registry"]
assert adata.uns["_scvi"]["data_registry"]["test"] == dict(
attr_name="obs", attr_key="cont1"
)
def test_anntorchdataset_getitem():
adata = synthetic_iid()
setup_anndata(
adata,
batch_key="batch",
labels_key="labels",
protein_expression_obsm_key="protein_expression",
protein_names_uns_key="protein_names",
)
# check that we can successfully pass in a list of tensors to get
tensors_to_get = ["batch_indices", "local_l_var"]
bd = AnnTorchDataset(adata, getitem_tensors=tensors_to_get)
np.testing.assert_array_equal(tensors_to_get, list(bd[1].keys()))
# check that we can successfully pass in a dict of tensors and their associated types
bd = AnnTorchDataset(
adata, getitem_tensors={"X": np.int, "local_l_var": np.float64}
)
assert bd[1]["X"].dtype == np.int64
assert bd[1]["local_l_var"].dtype == np.float64
# check that by default we get all the registered tensors
bd = AnnTorchDataset(adata)
all_registered_tensors = list(adata.uns["_scvi"]["data_registry"].keys())
np.testing.assert_array_equal(all_registered_tensors, list(bd[1].keys()))
assert bd[1]["X"].shape[0] == bd.adata.uns["_scvi"]["summary_stats"]["n_vars"]
# check that AnnTorchDataset returns numpy array
adata1 = synthetic_iid()
bd = AnnTorchDataset(adata1)
for key, value in bd[1].items():
assert type(value) == np.ndarray
# check AnnTorchDataset returns numpy array counts were sparse
adata = synthetic_iid(run_setup_anndata=False)
adata.X = sparse.csr_matrix(adata.X)
setup_anndata(adata)
bd = AnnTorchDataset(adata)
for key, value in bd[1].items():
assert type(value) == np.ndarray
# check AnnTorchDataset returns numpy array if pro exp was sparse
adata = synthetic_iid(run_setup_anndata=False)
adata.obsm["protein_expression"] = sparse.csr_matrix(
adata.obsm["protein_expression"]
)
setup_anndata(
adata, batch_key="batch", protein_expression_obsm_key="protein_expression"
)
bd = AnnTorchDataset(adata)
for key, value in bd[1].items():
assert type(value) == np.ndarray
# check pro exp is being returned as numpy array even if its DF
adata = synthetic_iid(run_setup_anndata=False)
adata.obsm["protein_expression"] = pd.DataFrame(
adata.obsm["protein_expression"], index=adata.obs_names
)
setup_anndata(
adata, batch_key="batch", protein_expression_obsm_key="protein_expression"
)
bd = AnnTorchDataset(adata)
for key, value in bd[1].items():
assert type(value) == np.ndarray
def test_view_anndata_setup(save_path):
adata = synthetic_iid(run_setup_anndata=False)
adata.obs["cont1"] = np.random.uniform(5, adata.n_obs)
adata.obs["cont2"] = np.random.uniform(5, adata.n_obs)
adata.obs["cont1"][0] = 939543895847598301.423432423523512351234123421341234
adata.obs["cont2"][1] = 0.12938471298374691827634
adata.obs["cat1"] = np.random.randint(0, 5, adata.n_obs).astype(str)
adata.obs["cat1"][8] = "asdf"
adata.obs["cat1"][9] = "f34"
adata.obs["cat2"] = np.random.randint(0, 7, adata.n_obs)
setup_anndata(
adata,
protein_expression_obsm_key="protein_expression",
batch_key="batch",
labels_key="labels",
categorical_covariate_keys=["cat1", "cat2"],
continuous_covariate_keys=["cont1", "cont2"],
)
# test it works with adata
view_anndata_setup(adata)
# test it works with scvi setup dict
view_anndata_setup(adata.uns["_scvi"])
adata = scvi.data.synthetic_iid()
m = scvi.model.SCVI(adata)
folder_path = os.path.join(save_path, "tmp")
m.save(folder_path, save_anndata=True)
# test it works with a saved model folder
view_anndata_setup(folder_path)
adata_path = os.path.join(folder_path, "adata.h5ad")
# test it works with the path to an anndata
view_anndata_setup(adata_path)
m = scvi.model.SCVI(adata)
m.save(folder_path, overwrite=True)
# test it works without saving the anndata
view_anndata_setup(folder_path)
# test it throws error if adata was not setup
with pytest.raises(ValueError):
adata = synthetic_iid(run_setup_anndata=False)
view_anndata_setup(adata)
# test it throws error if we dont pass dict, anndata or str in
with pytest.raises(ValueError):
view_anndata_setup(0)
def test_saving(save_path):
save_path = os.path.join(save_path, "tmp_adata.h5ad")
adata = synthetic_iid(run_setup_anndata=False)
adata.obs["cont1"] = np.random.uniform(5, adata.n_obs)
adata.obs["cont2"] = np.random.uniform(5, adata.n_obs)
adata.obs["cat1"] = np.random.randint(0, 3, adata.n_obs).astype(str)
adata.obs["cat1"][1] = "asdf"
adata.obs["cat1"][2] = "f34"
adata.obs["cat2"] = np.random.randint(0, 7, adata.n_obs)
setup_anndata(
adata,
protein_expression_obsm_key="protein_expression",
batch_key="batch",
labels_key="labels",
categorical_covariate_keys=["cat1", "cat2"],
continuous_covariate_keys=["cont1", "cont2"],
)
adata.write(save_path)
anndata.read(save_path)
def test_backed_anndata(save_path):
adata = scvi.data.synthetic_iid()
path = os.path.join(save_path, "test_data.h5ad")
adata.write_h5ad(path)
adata = anndata.read_h5ad(path, backed="r+")
setup_anndata(adata, batch_key="batch")
# test get item
bd = AnnTorchDataset(adata)
bd[np.arange(adata.n_obs)]
# sparse
adata = scvi.data.synthetic_iid()
adata.X = csr_matrix(adata.X)
path = os.path.join(save_path, "test_data2.h5ad")
adata.write_h5ad(path)
adata = anndata.read_h5ad(path, backed="r+")
setup_anndata(adata, batch_key="batch")
# test get item
bd = AnnTorchDataset(adata)
bd[np.arange(adata.n_obs)]
|
# -*- coding: utf-8 -*-
# FLEDGE_BEGIN
# See: http://fledge.readthedocs.io/
# FLEDGE_END
from functools import lru_cache
from aiohttp import web
from fledge.common.service_record import ServiceRecord
from fledge.common.storage_client.payload_builder import PayloadBuilder
from fledge.services.core.service_registry.service_registry import ServiceRegistry
from fledge.services.core.service_registry.exceptions import DoesNotExist
from fledge.services.core import connect
from fledge.common.configuration_manager import ConfigurationManager
from fledge.common.plugin_discovery import PluginDiscovery
__author__ = "Praveen Garg"
__copyright__ = "Copyright (c) 2018 OSIsoft, LLC"
__license__ = "Apache 2.0"
__version__ = "${VERSION}"
_help = """
-------------------------------------------------------------------------------
| GET | /fledge/south |
-------------------------------------------------------------------------------
"""
async def _get_schedule_status(storage_client, svc_name):
payload = PayloadBuilder().SELECT("enabled").WHERE(['schedule_name', '=', svc_name]).payload()
result = await storage_client.query_tbl_with_payload('schedules', payload)
return True if result['rows'][0]['enabled'] == 't' else False
@lru_cache(maxsize=1024)
def _get_installed_plugins():
return PluginDiscovery.get_plugins_installed("south", False)
async def _services_with_assets(storage_client, south_services):
sr_list = list()
try:
try:
services_from_registry = ServiceRegistry.get(s_type="Southbound")
except DoesNotExist:
services_from_registry = []
def is_svc_in_service_registry(name):
return next((svc for svc in services_from_registry if svc._name == name), None)
installed_plugins = _get_installed_plugins()
for s_record in services_from_registry:
plugin, assets = await _get_tracked_plugin_assets_and_readings(storage_client, s_record._name)
plugin_version = ''
for p in installed_plugins:
if p["name"] == plugin:
plugin_version = p["version"]
break
# Service running on another machine have no scheduler entry
sched_enable = 'unknown'
try:
sched_enable = await _get_schedule_status(storage_client, s_record._name)
except:
pass
sr_list.append(
{
'name': s_record._name,
'address': s_record._address,
'management_port': s_record._management_port,
'service_port': s_record._port,
'protocol': s_record._protocol,
'status': ServiceRecord.Status(int(s_record._status)).name.lower(),
'assets': assets,
'plugin': {'name': plugin, 'version': plugin_version},
'schedule_enabled': sched_enable
})
for s_name in south_services:
south_svc = is_svc_in_service_registry(s_name)
if not south_svc:
plugin, assets = await _get_tracked_plugin_assets_and_readings(storage_client, s_name)
plugin_version = ''
for p in installed_plugins:
if p["name"] == plugin:
plugin_version = p["version"]
break
# Handle schedule status when there is no schedule entry matching a South child category name
sch_status = 'unknown'
try:
sch_status = await _get_schedule_status(storage_client, s_name)
except:
pass
sr_list.append(
{
'name': s_name,
'address': '',
'management_port': '',
'service_port': '',
'protocol': '',
'status': '',
'assets': assets,
'plugin': {'name': plugin, 'version': plugin_version},
'schedule_enabled': sch_status
})
except:
raise
else:
return sr_list
async def _get_tracked_plugin_assets_and_readings(storage_client, svc_name):
asset_json = []
payload = PayloadBuilder().SELECT(["asset", "plugin"]).WHERE(['service', '=', svc_name]).\
AND_WHERE(['event', '=', 'Ingest']).payload()
try:
result = await storage_client.query_tbl_with_payload('asset_tracker', payload)
asset_records = result['rows']
plugin = ''
if len(result['rows']):
plugin = result['rows'][0]['plugin']
for r in asset_records:
payload = PayloadBuilder().SELECT("value").WHERE(["key", "=", r["asset"].upper()]).payload()
results = await storage_client.query_tbl_with_payload("statistics", payload)
if int(results['count']):
asset_result = results['rows'][0]
asset_json.append({"count": asset_result['value'], "asset": r["asset"]})
except:
raise
else:
return plugin, asset_json
async def get_south_services(request):
"""
Args:
request:
Returns:
list of all south services with tracked assets and readings count
:Example:
curl -X GET http://localhost:8081/fledge/south
"""
if 'cached' in request.query and request.query['cached'].lower() == 'false':
_get_installed_plugins.cache_clear()
storage_client = connect.get_storage_async()
cf_mgr = ConfigurationManager(storage_client)
try:
south_cat = await cf_mgr.get_category_child("South")
south_categories = [nc["key"] for nc in south_cat]
except:
return web.json_response({'services': []})
response = await _services_with_assets(storage_client, south_categories)
return web.json_response({'services': response})
|
{
# Journey Page - map tab
"uidJourneyTabMapPanel": {
W3Const.w3PropType: W3Const.w3TypePanel,
W3Const.w3PropSubUI: [
"uidMapOperationPanel",
"uidMapPanel"
]
},
# Operation
"uidMapOperationPanel": {
W3Const.w3PropType: W3Const.w3TypePanel,
W3Const.w3PropSubUI: [
"uidSelectedJourneyPanel",
"uidMapSearchPanel",
"uidSelectedPlacePanel",
]
},
# Operation - place
"uidSelectedPlacePanel": {
W3Const.w3PropType: W3Const.w3TypePanel,
W3Const.w3PropSubUI: [
"uidSelectedPlaceTable"
],
W3Const.w3PropCSS: {
"display": "none"
}
},
"uidSelectedPlaceTable": {
W3Const.w3PropType: W3Const.w3TypeTable,
W3Const.w3PropSubUI: [
[], # No header
["uidNameLabel", "uidMapPlaceName"],
["uidLatitudeLabel", "uidMapPlaceLatitude"],
["uidLongitudeLabel", "uidMapPlaceLongitude"],
["uidMapPlaceDatetimeLabel", "uidMapPlaceDatetime"],
["uidMapPlaceRemarkLabel", "uidMapPlaceRemark"],
["uidMapPlaceNoteLabel", "uidMapPlaceNote"],
["uidSelectedPOIID", "uidMapPlaceOperationTable"]
]
},
"uidLatitudeLabel": {
W3Const.w3PropType: W3Const.w3TypeLabel,
W3Const.w3PropString: "sidLatitudeLabel",
W3Const.w3PropClass: "cidRightLabel"
},
"uidLongitudeLabel": {
W3Const.w3PropType: W3Const.w3TypeLabel,
W3Const.w3PropString: "sidLongitudeLabel",
W3Const.w3PropClass: "cidRightLabel"
},
"uidMapPlaceDatetimeLabel": {
W3Const.w3PropType: W3Const.w3TypeLabel,
W3Const.w3PropString: "sidDatetimeLabel",
W3Const.w3PropClass: "cidRightLabel",
W3Const.w3PropCSS: {
"display": "none"
}
},
"uidMapPlaceRemarkLabel": {
W3Const.w3PropType: W3Const.w3TypeLabel,
W3Const.w3PropString: "sidRemarkLabel",
W3Const.w3PropClass: "cidRightLabel",
W3Const.w3PropCSS: {
"display": "none"
}
},
"uidMapPlaceNoteLabel": {
W3Const.w3PropType: W3Const.w3TypeLabel,
W3Const.w3PropString: "sidNoteLabel",
W3Const.w3PropClass: "cidRightLabel"
},
"uidMapPlaceName": {
W3Const.w3PropType: W3Const.w3TypeText,
W3Const.w3PropAttr: {
W3Const.w3AttrDisabled: "true"
}
},
"uidMapPlaceLatitude": {
W3Const.w3PropType: W3Const.w3TypeLabel
},
"uidMapPlaceLongitude": {
W3Const.w3PropType: W3Const.w3TypeLabel
},
"uidMapPlaceDatetime": {
W3Const.w3PropType: W3Const.w3TypeDatePicker,
W3Const.w3PropCSS: {
"display": "none"
},
W3Const.w3PropAttr: {
W3Const.w3AttrDisabled: "true"
}
},
"uidMapPlaceRemark": {
W3Const.w3PropType: W3Const.w3TypeText,
W3Const.w3PropCSS: {
"display": "none"
},
W3Const.w3PropAttr: {
W3Const.w3AttrDisabled: "true"
}
},
"uidMapPlaceNote": {
W3Const.w3PropType: W3Const.w3TypeText,
W3Const.w3PropAttr: {
W3Const.w3AttrDisabled: "true"
}
},
"uidSelectedPOIID": {
W3Const.w3PropType: W3Const.w3TypeLabel,
W3Const.w3PropCSS: {
"display": "none",
"width": "0"
}
},
"uidMapPlaceOperationTable": {
W3Const.w3PropType: W3Const.w3TypeTable,
W3Const.w3PropSubUI: [
[],
["uidMapAddPlaceButton", "uidMapSwitchToPlacePanelButton", "uidMapConfirmAddPlaceButton", "uidMapAddPOIButton"]
]
},
"uidMapAddPlaceButton": {
W3Const.w3PropType: W3Const.w3TypeButton,
W3Const.w3PropString: "sidAddJourneyPlace",
W3Const.w3PropTriggerApi: [
{
W3Const.w3ApiID: "aidAddJourneyPlace",
W3Const.w3ApiParams: [
{
W3Const.w3ApiDataType: W3Const.w3ApiDataTypeUID,
W3Const.w3ApiDataValue: "uidSelectedJourneyID"
},
{
W3Const.w3ApiDataType: W3Const.w3ApiDataTypeUID,
W3Const.w3ApiDataValue: "uidMapPlaceName"
},
{
W3Const.w3ApiDataType: W3Const.w3ApiDataTypeUID,
W3Const.w3ApiDataValue: "uidMapPlaceDatetime"
},
{
W3Const.w3ApiDataType: W3Const.w3ApiDataTypeUID,
W3Const.w3ApiDataValue: "uidMapPlaceLatitude"
},
{
W3Const.w3ApiDataType: W3Const.w3ApiDataTypeUID,
W3Const.w3ApiDataValue: "uidMapPlaceLongitude"
},
{
W3Const.w3ApiDataType: W3Const.w3ApiDataTypeUID,
W3Const.w3ApiDataValue: "uidMapPlaceRemark"
},
{
W3Const.w3ApiDataType: W3Const.w3ApiDataTypeUID,
W3Const.w3ApiDataValue: "uidMapPlaceNote"
},
{
W3Const.w3ApiDataType: W3Const.w3ApiDataTypeVar,
W3Const.w3ApiDataValue: W3Const.w3Session
}]
}],
W3Const.w3PropEvent: {
W3Const.w3EventClick: [
W3Const.w3PlaceHolder_1,
"EJResetMap()"
]
},
W3Const.w3PropAttr: {
W3Const.w3AttrDisabled: "true"
},
W3Const.w3PropCSS: {
"display": "none"
}
},
"uidMapAddPOIButton": {
W3Const.w3PropType: W3Const.w3TypeButton,
W3Const.w3PropString: "sidAddPOI",
W3Const.w3PropTriggerApi: [
{
W3Const.w3ApiID: "aidAddPOI",
W3Const.w3ApiParams: [
{
W3Const.w3ApiDataType: W3Const.w3ApiDataTypeUID,
W3Const.w3ApiDataValue: "uidMapPlaceName"
},
{
W3Const.w3ApiDataType: W3Const.w3ApiDataTypeUID,
W3Const.w3ApiDataValue: "uidMapPlaceLatitude"
},
{
W3Const.w3ApiDataType: W3Const.w3ApiDataTypeUID,
W3Const.w3ApiDataValue: "uidMapPlaceLongitude"
},
{
W3Const.w3ApiDataType: W3Const.w3ApiDataTypeUID,
W3Const.w3ApiDataValue: "uidMapPlaceNote"
},
{
W3Const.w3ApiDataType: W3Const.w3ApiDataTypeVar,
W3Const.w3ApiDataValue: W3Const.w3Session
}]
}],
W3Const.w3PropEvent: {
W3Const.w3EventClick: [
W3Const.w3PlaceHolder_1,
"EJResetMap()"
]
},
W3Const.w3PropAttr: {
W3Const.w3AttrDisabled: "true"
}
},
"uidMapSwitchToPlacePanelButton": {
W3Const.w3PropType: W3Const.w3TypeButton,
W3Const.w3PropString: "sidAddJourneyPlace",
W3Const.w3PropEvent: {
W3Const.w3EventClick: [
"EJDisplayPOIOnPlacePanel()"
]
},
W3Const.w3PropAttr: {
W3Const.w3AttrDisabled: "true"
},
W3Const.w3PropCSS: {
"display": "none"
}
},
"uidMapConfirmAddPlaceButton": {
W3Const.w3PropType: W3Const.w3TypeButton,
W3Const.w3PropString: "sidSubmit",
W3Const.w3PropTriggerApi: [
{
W3Const.w3ApiID: "aidAddPOIToJourney",
W3Const.w3ApiParams: [
{
W3Const.w3ApiDataType: W3Const.w3ApiDataTypeUID,
W3Const.w3ApiDataValue: "uidSelectedPOIID"
},
{
W3Const.w3ApiDataType: W3Const.w3ApiDataTypeUID,
W3Const.w3ApiDataValue: "uidSelectedJourneyID"
},
{
W3Const.w3ApiDataType: W3Const.w3ApiDataTypeUID,
W3Const.w3ApiDataValue: "uidMapPlaceDatetime"
},
{
W3Const.w3ApiDataType: W3Const.w3ApiDataTypeUID,
W3Const.w3ApiDataValue: "uidMapPlaceRemark"
},
{
W3Const.w3ApiDataType: W3Const.w3ApiDataTypeUID,
W3Const.w3ApiDataValue: "uidMapPlaceNote"
},
{
W3Const.w3ApiDataType: W3Const.w3ApiDataTypeVar,
W3Const.w3ApiDataValue: W3Const.w3Session
}]
}],
W3Const.w3PropEvent: {
W3Const.w3EventClick: [
W3Const.w3PlaceHolder_1,
"EJResetMap()"
]
},
W3Const.w3PropAttr: {
W3Const.w3AttrDisabled: "true"
},
W3Const.w3PropCSS: {
"display": "none"
}
},
# Operation - journey
"uidSelectedJourneyPanel": {
W3Const.w3PropType: W3Const.w3TypePanel,
W3Const.w3PropSubUI: [
"uidSelectedJourneyTable"
],
W3Const.w3PropCSS: {
"display": "none"
}
},
"uidSelectedJourneyTable": {
W3Const.w3PropType: W3Const.w3TypeTable,
W3Const.w3PropSubUI: [
[],
["uidJourneyLabel", "uidSelectedJourneyName", "uidSelectedJourneyDatetime", "uidSelectedJourneyTraveler", "uidSelectedJourneyID", "uidDisplayJourneyButton"]
]
},
"uidJourneyLabel": {
W3Const.w3PropType: W3Const.w3TypeLabel,
W3Const.w3PropString: "sidJourneyLabel",
W3Const.w3PropClass: "cidRightLabel",
W3Const.w3PropCSS: {
"font-weight": "bold"
}
},
"uidSelectedJourneyName": {
W3Const.w3PropType: W3Const.w3TypeLabel,
W3Const.w3PropClass: "cidLRPadding"
},
"uidSelectedJourneyDatetime": {
W3Const.w3PropType: W3Const.w3TypeLabel,
W3Const.w3PropClass: "cidLRPadding"
},
"uidSelectedJourneyTraveler": {
W3Const.w3PropType: W3Const.w3TypeLabel,
W3Const.w3PropClass: "cidLRPadding"
},
"uidSelectedJourneyID": {
W3Const.w3PropType: W3Const.w3TypeLabel,
W3Const.w3PropCSS: {
"display": "none",
"width": "0"
}
},
"uidDisplayJourneyButton": {
W3Const.w3PropType: W3Const.w3TypeButton,
W3Const.w3PropString: "sidGotoMap",
W3Const.w3PropEvent: {
W3Const.w3EventClick: [
"EJDisplayCurrentJourneyOnMap()"
]
}
},
# Operation - search
"uidMapSearchPanel": {
W3Const.w3PropType: W3Const.w3TypePanel,
W3Const.w3PropSubUI: [
"uidMapSearchTable"
]
},
"uidMapSearchTable": {
W3Const.w3PropType: W3Const.w3TypeTable,
W3Const.w3PropSubUI: [
[],
["uidMapSearchInput", "uidMapSearchButton", "uidMapShowAllPlaceButton", "uidMapShowAllPOIButton", "uidMapClearButton"]
]
},
"uidMapSearchInput": {
W3Const.w3PropType: W3Const.w3TypeText
},
"uidMapSearchButton": {
W3Const.w3PropType: W3Const.w3TypeButton,
W3Const.w3PropString: "sidSearch",
W3Const.w3PropEvent: {
W3Const.w3EventClick: [
"EJMapSearch('uidMapSearchInput')"
]
}
},
"uidMapShowAllPlaceButton": {
W3Const.w3PropType: W3Const.w3TypeButton,
W3Const.w3PropString: "sidShowAllPlace",
W3Const.w3PropEvent: {
W3Const.w3EventClick: [
"EJShowAllPlaces()"
]
}
},
"uidMapShowAllPOIButton": {
W3Const.w3PropType: W3Const.w3TypeButton,
W3Const.w3PropString: "sidShowAllPOI",
W3Const.w3PropEvent: {
W3Const.w3EventClick: [
"EJShowAllPOIs()"
]
}
},
"uidMapClearButton": {
W3Const.w3PropType: W3Const.w3TypeButton,
W3Const.w3PropString: "sidClear",
W3Const.w3PropEvent: {
W3Const.w3EventClick: [
"EJResetMap()"
]
}
},
# Map
"uidMapPanel": {
W3Const.w3PropType: W3Const.w3TypePanel,
W3Const.w3PropSubUI: [
"uidMSMap"
]
},
"uidMSMap": {
W3Const.w3PropType: W3Const.w3TypeMap,
W3Const.w3PropMap: {
W3Const.w3AttrMapHandler: "EJMapHandler(w3PlaceHolder_1)",
W3Const.w3AttrMapLocation: [],
W3Const.w3AttrMapKey: ""
},
W3Const.w3PropCSS: {
"border": "2px solid",
"min-width": "800px",
"min-height": "600px"
}
}
}
|
import json
import aiohttp
import discord
from aiocache.decorators import cached
from utils.context import BlooContext, PromptData
from utils.permissions.permissions import permissions
from utils.views.menu import Menu
class TweakMenu(Menu):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs, timeout_function=self.on_timeout)
self.jump_button = JumpButton(self.ctx.bot, len(self.pages), self)
self.extra_buttons = []
def refresh_button_state(self):
if self.ctx.repo:
extra_buttons = [
discord.ui.Button(label='Add Repo to Sileo', emoji="<:sileo:679466569407004684>",
url=f'https://sharerepo.stkc.win/v2/?pkgman=sileo&repo={self.ctx.repo}', style=discord.ButtonStyle.url, row=1),
discord.ui.Button(label='Add Repo to Zebra', emoji="<:zebra:911433583032422420>",
url=f'https://sharerepo.stkc.win/v2/?pkgman=zebra&repo={self.ctx.repo}', style=discord.ButtonStyle.url, row=1),
discord.ui.Button(label='Other Package Managers', emoji="<:cydiasileosplit:932650041099825232>",
url=f'https://sharerepo.stkc.win/?repo={self.ctx.repo}', style=discord.ButtonStyle.url, row=1)
]
else:
extra_buttons = [
discord.ui.Button(label='Cannot add default repo', emoji="<:sileo:679466569407004684>",
url=f'https://sharerepo.stkc.win/v2/?pkgman=sileo&repo={self.ctx.repo}', disabled=True, style=discord.ButtonStyle.url, row=1),
discord.ui.Button(label='Cannot add default repo', emoji="<:zebra:911433583032422420>",
url=f'https://sharerepo.stkc.win/v2/?pkgman=zebra&repo={self.ctx.repo}', disabled=True, style=discord.ButtonStyle.url, row=1),
discord.ui.Button(label='Cannot add default repo', emoji="<:Add:947354227171262534>",
url=f'https://sharerepo.stkc.win/?repo={self.ctx.repo}', style=discord.ButtonStyle.url, disabled=True, row=1)
]
if self.ctx.depiction:
extra_buttons.insert(0,
discord.ui.Button(label='View Depiction', emoji="<:Depiction:947358756033949786>",
url=self.ctx.depiction, style=discord.ButtonStyle.url, row=1),
)
if len(self.pages) > 1:
extra_buttons.append(self.jump_button)
for button in self.extra_buttons:
self.remove_item(button)
for button in extra_buttons:
self.add_item(button)
self.extra_buttons = extra_buttons
super().refresh_button_state()
async def on_timeout(self):
self.jump_button.disabled = True
self.stopped = True
await self.refresh_response_message()
self.stop()
def on_interaction_check(self, interaction: discord.Interaction):
return interaction.user == self.ctx.author or permissions.has(interaction.guild, interaction.user, 4)
class BypassMenu(Menu):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs, timeout_function=self.on_timeout)
self.extra_buttons = []
def refresh_button_state(self):
app = self.ctx.app
bypass = self.ctx.current_bypass
extra_buttons = []
if bypass.get("guide") is not None:
extra_buttons.append(
discord.ui.Button(label="View Guide", style=discord.ButtonStyle.link, url=bypass.get("guide"))
)
if bypass.get("repository") is not None:
extra_buttons.append(
discord.ui.Button(label="View Repository", style=discord.ButtonStyle.link, url=bypass.get("repository").get("uri"))
)
if app.get("uri") is not None:
extra_buttons.append(
discord.ui.Button(label="View in App Store", emoji="<:appstore:392027597648822281>", style=discord.ButtonStyle.link, url=app.get("uri"))
)
for button in self.extra_buttons:
self.remove_item(button)
for button in extra_buttons:
self.add_item(button)
self.extra_buttons = extra_buttons
super().refresh_button_state()
async def on_timeout(self):
self.stopped = True
await self.refresh_response_message()
self.stop()
class JumpButton(discord.ui.Button):
def __init__(self, bot, max_page: int, tmb):
super().__init__(style=discord.ButtonStyle.primary, emoji="⤴️")
self.max_page = max_page
self.bot = bot
self.tmb = tmb
self.row = 2
async def callback(self, interaction: discord.Interaction):
if interaction.user != self.tmb.ctx.author:
return
ctx = await self.bot.get_application_context(interaction, cls=BlooContext)
await interaction.response.defer(ephemeral=True)
prompt = PromptData(
value_name="page",
description="What page do you want to jump to?",
timeout=10,
convertor=None)
res = await ctx.prompt(prompt)
if res is None:
await ctx.send_warning("Cancelled")
return
try:
res = int(res)
except ValueError:
await ctx.send_warning("Invalid page number!")
return
if res < 0 or res > self.max_page:
await ctx.send_warning("Invalid page number!")
return
self.tmb.current_page = res
await self.tmb.refresh_response_message()
await ctx.send_success(f"Jumped to page {res}!")
@cached(ttl=3600)
async def get_jailbreaks_jba():
"""Gets all apps on Jailbreaks.app
Returns
-------
dict
"Apps"
"""
res_apps = []
async with aiohttp.ClientSession() as session:
async with session.get("https://jailbreaks.app/json/apps.json") as resp:
if resp.status == 200:
res_apps = await resp.json()
return res_apps
@cached(ttl=1800)
async def get_signed_status():
"""Gets Jailbreaks.app's signed status"""
signed = []
async with aiohttp.ClientSession() as session:
async with session.get("https://jailbreaks.app/status.php") as resp:
if resp.status == 200:
res = await resp.text()
signed = json.loads(res)
return signed
async def iterate_apps(query) -> dict:
"""Iterates through Jailbreaks.app apps, looking for a matching query
Parameters
----------
query : str
"App to look for"
Returns
-------
dict
"List of apps that match the query"
"""
apps = await get_jailbreaks_jba()
for possibleApp in apps:
if possibleApp.get('name').lower() == query.lower().replace("œ", "oe"):
return possibleApp
class CIJMenu(Menu):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.extra_buttons = []
def refresh_button_state(self):
extra_buttons = []
if self.ctx.jb_info.get("website") is not None:
extra_buttons.append(discord.ui.Button(label='Website', url=self.ctx.jb_info.get(
"website").get("url"), style=discord.ButtonStyle.url, row=1))
if self.ctx.jb_info.get('guide'):
added = False
for guide in self.ctx.jb_info.get('guide')[1:]:
if self.ctx.build in guide.get("firmwares") and self.ctx.device_id in guide.get("devices"):
extra_buttons.append(discord.ui.Button(
label=f'{guide.get("name")} Guide', url=f"https://ios.cfw.guide{guide.get('url')}", style=discord.ButtonStyle.url, row=1))
added = True
break
if not added:
guide = self.ctx.jb_info.get('guide')[0]
extra_buttons.append(discord.ui.Button(
label=f'{guide.get("name")} Guide', url=f"https://ios.cfw.guide{guide.get('url')}", style=discord.ButtonStyle.url, row=1))
if self.ctx.jb_info.get("jailbreaksmeapp") is not None:
if self.ctx.jba is None or self.ctx.signed.get('status') != 'Signed':
extra_buttons.append(discord.ui.Button(label='Install with Jailbreaks.app',
url=f"https://api.jailbreaks.app/", style=discord.ButtonStyle.url, disabled=True, row=1))
else:
extra_buttons.append(discord.ui.Button(label='Install with Jailbreaks.app',
url=f"https://api.jailbreaks.app/install/{self.ctx.jba.get('name').replace(' ', '')}", style=discord.ButtonStyle.url, row=1))
for button in self.extra_buttons:
self.remove_item(button)
for button in extra_buttons:
self.add_item(button)
self.extra_buttons = extra_buttons
super().refresh_button_state()
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Talos information
# Based on Vulners
#
# Software is free software released under the "Modified BSD license"
#
# Copyright (c) 2017 Pieter-Jan Moreels - pieterjan.moreels@gmail.com
# Sources
SOURCE_NAME = 'talos'
SOURCE_FILE = "https://vulners.com/api/v3/archive/collection/?type=talos&api_key={}"
# Imports
import json
from collections import defaultdict
from lib.Config import Configuration as conf
from lib.Source import Source
def add_if(_, entry, item, name=None):
if not name: name=item
if entry.get(item): _[name] = entry[item]
def clean_date(_, item):
if _.get(item): _[item] = _[item].split('T')[0]
class Talos(Source):
def __init__(self):
self.name = SOURCE_NAME
self.cves = defaultdict(list)
source_file = SOURCE_FILE.format(conf.readSetting("vulners", "api_key", ""))
_file, r = conf.getFeedData(SOURCE_NAME, source_file)
data = json.loads(str(_file.read(), 'utf-8'))
for entry in data:
talos = {}
source = entry['_source']
add_if(talos, source, 'published')
add_if(talos, source, 'lastseen', 'last seen')
add_if(talos, source, 'id')
add_if(talos, source, 'title')
add_if(talos, source, 'references')
add_if(talos, source, 'reporter')
add_if(talos, source, 'href', 'source')
for date in ['published', 'last seen']: clean_date(talos, date)
if talos:
for CVE in source['cvelist']: self.cves[CVE].append(talos)
def getSearchables(self):
return ['id', 'reporter']
|
from pipe import select, where
import numpy as np
import functools as ft
with open("input4.txt") as f:
lines = f.read()
move = list(map(int,lines.split('\n\n')[0].split(",")))
board = lines.split('\n\n')[1:]
def string_to_matrix(m):
if m[-1]=="\n":
m=m[0:-1]
m=np.asmatrix(m.replace("\n",";"))
return(m)
board=list(map(string_to_matrix,board))
state=[np.zeros((5,5),dtype=int) for i in range(0,len(board))]
def check_bingo (m):
m=np.asarray(m)
l=np.concatenate((m,m.transpose(),[np.diag(m)],[np.diag(np.fliplr(m))]))
# bingo=list(l
# |select(lambda v:ft.reduce(lambda x,y:x*y,v))
# |lambda x,y: x|y
# )
bingo=ft.reduce(lambda x,y: x|y,map(lambda v:ft.reduce(lambda x,y:x*y,v),l))
return(bingo)
def find_matching(m,s,v):
x=np.where(m==v)
if(len(x)>0):
s[x]=1
return(s)
def match_through(key):
return(list(map(ft.partial(find_matching,v=key),board,state)))
for key in move:
bingo0=list(map(check_bingo,state))
state=match_through(key)
bingo=list(map(check_bingo,state))
if sum(bingo)-sum(bingo0)==1 and sum(map(lambda x:1-x,bingo))==0:
break
print(key)
lastwinner=bingo0.index(0)
notcircled=np.where(state[lastwinner]==0)
print(sum(np.asarray(board[lastwinner][notcircled])[0]))
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for sparse ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import test_util
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import sparse_ops
from tensorflow.python.platform import googletest
@test_util.run_all_in_graph_and_eager_modes
class SparseOpsTest(test_util.TensorFlowTestCase, parameterized.TestCase):
def testSparseEye(self):
def test_one(n, m, as_tensors):
expected = np.eye(n, m)
if as_tensors:
m = constant_op.constant(m)
n = constant_op.constant(n)
s = sparse_ops.sparse_eye(n, m)
d = sparse_ops.sparse_to_dense(s.indices, s.dense_shape, s.values)
self.assertAllEqual(self.evaluate(d), expected)
for n in range(2, 10, 2):
for m in range(2, 10, 2):
# Test with n and m as both constants and tensors.
test_one(n, m, True)
test_one(n, m, False)
def testSparseExpandDims(self):
for rank in range(1, 4):
# Create a dummy input. When rank=3, shape=[2, 4, 6].
shape = np.arange(1, rank + 1) * 2
before = np.arange(np.prod(shape)).reshape(shape)
# Make entries sparse.
before *= np.random.binomial(1, .2, before.shape)
dense_shape = before.shape
indices = np.array(np.where(before)).T
values = before[before != 0]
# Try every possible valid value of axis.
for axis in range(-rank - 1, rank):
expected_after = np.expand_dims(before, axis)
for axis_as_tensor in [False, True]:
dense_shape_t = constant_op.constant(dense_shape, dtype=dtypes.int64)
indices_t = constant_op.constant(indices)
values_t = constant_op.constant(values)
before_t = sparse_tensor.SparseTensor(
indices=indices_t, values=values_t, dense_shape=dense_shape_t)
if axis_as_tensor:
axis = constant_op.constant(axis)
s = sparse_ops.sparse_expand_dims(before_t, axis)
d = sparse_ops.sparse_to_dense(s.indices, s.dense_shape, s.values)
self.assertAllEqual(self.evaluate(d), expected_after)
@parameterized.parameters([
(math_ops.abs, [1.0, -1.0, 3.0, -4.0], [1.0, 1.0, 3.0, 4.0]),
(math_ops.negative, [1.0, -1.0, 3.0, -4.0], [-1.0, 1.0, -3.0, 4.0]),
(math_ops.sign, [3.0, -2.0, 0.0, -4.0], [1.0, -1.0, 0.0, -1.0]),
(math_ops.square, [1.0, -1.0, 3.0, -4.0], [1.0, 1.0, 9.0, 16.0]),
])
def testUnarySparseDispatch(self, op, values, expected):
st = sparse_tensor.SparseTensor(
indices=[[0, 0], [0, 1], [2, 0], [2, 4]],
values=values,
dense_shape=[3, 6])
result = op(st)
result_value = self.evaluate(result)
self.assertAllEqual(result_value.indices, st.indices)
self.assertAllEqual(result_value.values, expected)
self.assertAllEqual(result_value.dense_shape, st.dense_shape)
if __name__ == '__main__':
googletest.main()
|
import numpy as np
from scipy.stats import norm
from sklearn.model_selection import train_test_split
from keras.callbacks import EarlyStopping
import warnings
import keras.backend as K
from keras.initializers import glorot_uniform
import tensorflow as tf
from sklearn.model_selection import KFold
from scipy.stats import hmean, gmean
import scipy.optimize
from keras.models import load_model
class DnnT(object):
def __init__(self, inf_cov, model, model_mask, change='mask', alpha=.05, verbose=0, eva_metric='mse'):
self.inf_cov = inf_cov
self.model = model
self.model_mask = model_mask
self.alpha = alpha
self.change = change
self.eva_metric = eva_metric
def metric(self, y_true, y_pred):
if self.eva_metric == 'mse':
metric_tmp = ((y_true - y_pred)**2).flatten()
elif self.eva_metric == 'mae':
metric_tmp = abs(y_true - y_pred).flatten()
elif self.eva_metric == 'zero-one':
label_pred = np.argmax(y_pred, 1)
label_true = np.argmax(y_true, 1)
metric_tmp = 1. - 1.*(label_true == label_pred)
elif self.eva_metric == 'cross-entropy':
label_true = np.argmax(y_true, 1)
metric_tmp = np.log(y_pred[range(len(y_pred)),label_true])
else:
metric_tmp = self.eva_metric(y_true, y_pred)
return metric_tmp
# def reset_model(self):
# initial_weights = self.model.get_weights()
# backend_name = K.backend()
# if backend_name == 'tensorflow':
# k_eval = lambda placeholder: placeholder.eval(session=K.get_session())
# elif backend_name == 'theano':
# k_eval = lambda placeholder: placeholder.eval()
# else:
# raise ValueError("Unsupported backend")
# new_weights = [k_eval(glorot_uniform()(w.shape)) for w in initial_weights]
# self.model.set_weights(new_weights)
def reset_model(self):
if int(tf.__version__[0]) == 2:
for layer in self.model.layers:
if isinstance(layer, tf.keras.Model): #if you're using a model as a layer
reset_weights(layer) #apply function recursively
continue
#where are the initializers?
if hasattr(layer, 'cell'):
init_container = layer.cell
else:
init_container = layer
for key, initializer in init_container.__dict__.items():
if "initializer" not in key: #is this item an initializer?
continue #if no, skip it
# find the corresponding variable, like the kernel or the bias
if key == 'recurrent_initializer': #special case check
var = getattr(init_container, 'recurrent_kernel')
else:
var = getattr(init_container, key.replace("_initializer", ""))
if var is None:
continue
else:
var.assign(initializer(var.shape, var.dtype))
for layer in self.model_mask.layers:
if isinstance(layer, tf.keras.Model): #if you're using a model as a layer
reset_weights(layer) #apply function recursively
continue
#where are the initializers?
if hasattr(layer, 'cell'):
init_container = layer.cell
else:
init_container = layer
for key, initializer in init_container.__dict__.items():
if "initializer" not in key: #is this item an initializer?
continue #if no, skip it
# find the corresponding variable, like the kernel or the bias
if key == 'recurrent_initializer': #special case check
var = getattr(init_container, 'recurrent_kernel')
else:
var = getattr(init_container, key.replace("_initializer", ""))
if var is None:
continue
else:
var.assign(initializer(var.shape, var.dtype))
if int(tf.__version__[0]) == 1:
session = K.get_session()
for layer in self.model.layers:
if ((hasattr(layer, 'kernel_initializer')) and (layer.kernel != None)):
layer.kernel.initializer.run(session=session)
if ((hasattr(layer, 'bias_initializer')) and (layer.bias != None)):
layer.bias.initializer.run(session=session)
for layer in self.model_mask.layers:
if ((hasattr(layer, 'kernel_initializer')) and (layer.kernel != None)):
layer.kernel.initializer.run(session=session)
if ((hasattr(layer, 'bias_initializer')) and (layer.bias != None)):
layer.bias.initializer.run(session=session)
## can be extent to @abstractmethod
def mask_cov(self, X, k=0):
Z = X.copy()
if type(self.inf_cov[k]) is list:
## for channels_last image data: shape should be (#samples, img_rows, img_cols, channel)
Z[:, self.inf_cov[k][0][:,None], self.inf_cov[k][1], 0] = 0.
else:
Z[:,self.inf_cov[k]]= 0.
return Z
def perm_cov(self, X, k=0):
Z = X.copy()
if type(self.inf_cov[k]) is list:
## for channels_last image data: shape should be (#samples, img_rows, img_cols, channel)
Z[:,self.inf_cov[k][0][:,None], self.inf_cov[k][1], 0]= np.random.permutation(Z[:,self.inf_cov[k][0][:,None], self.inf_cov[k][1], 0])
else:
Z[:,self.inf_cov[k]]= np.random.permutation(Z[:,self.inf_cov[k]])
return Z
def noise_cov(self, X, k=0):
Z = X.copy()
Z[:,self.inf_cov[k]] = np.random.randn(len(X), len(self.inf_cov[k]))
return Z
def adaRatio(self, X, y, k=0, fit_params={}, perturb=None, split='one-split', perturb_grid=[.01, .05, .1, .5, 1.], ratio_grid=[.2, .4, .6, .8],
if_reverse=0, min_inf=0, min_est=0, ratio_method='fuse', num_perm=100, cv_num=1, cp='gmean', verbose=0):
ratio_grid.sort()
if if_reverse == 1:
ratio_grid = list(reversed(ratio_grid))
candidate, Err1_lst, ratio_lst, P_value_lst = [], [], [], []
found = 0
if split == 'two-split':
for ratio_tmp in ratio_grid:
ratio_tmp = ratio_tmp/2
m_tmp = int(len(X)*ratio_tmp)
if m_tmp < min_inf:
continue
n_tmp = len(X) - 2*m_tmp
if n_tmp < min_est:
continue
# split data
P_value = []
for h in range(cv_num):
self.reset_model()
P_value_cv = []
## generate permutated samples
# index_perm = np.random.permutation(range(len(y)))
X_perm = X.copy()
X_perm = self.perm_cov(X_perm, k)
# X_perm[:,self.inf_cov[k]] = X_perm[:,self.inf_cov[k]][index_perm,:]
## split sample
X_train, X_test, y_train, y_test = train_test_split(X_perm, y, train_size=n_tmp, random_state=1)
# training for full model
history = self.model.fit(x=X_train, y=y_train, **fit_params)
# training for mask model
if self.change == 'mask':
Z_train = self.mask_cov(X_train, k)
if self.change == 'perm':
Z_train = self.perm_cov(X_train, k)
history_mask = self.model_mask.fit(x=Z_train, y=y_train, **fit_params)
## save model
# self.model.save_weights('model.h5')
# self.model_mask.save_weights('model_mask.h5')
# self.model = load_model('model.h5', compile=False)
# self.model_mask = load_model('model_mask.h5', compile=False)
if self.change == 'mask':
Z_test = self.mask_cov(X_test, k)
if self.change == 'perm':
Z_test = self.perm_cov(X_test, k)
# evaluation
pred_y_mask = self.model_mask.predict_on_batch(Z_test)
for j in range(num_perm):
# ind_test_perm = np.random.permutation(range(len(y_test)))
X_test_perm = X_test.copy()
X_test_perm = self.perm_cov(X_test_perm, k)
# X_test_perm[:,self.inf_cov[k]] = X_test_perm[:,self.inf_cov[k]][ind_test_perm,:]
pred_y = self.model.predict_on_batch(X_test_perm)
ind_inf, ind_inf_mask = train_test_split(range(len(pred_y)), train_size=m_tmp, random_state=42)
metric_tmp = self.metric(y_test[ind_inf], pred_y[ind_inf])
metric_mask_tmp = self.metric(y_test[ind_inf_mask], pred_y_mask[ind_inf_mask])
diff_tmp = metric_tmp - metric_mask_tmp
Lambda_tmp = np.sqrt(len(diff_tmp)) * ( diff_tmp.std() )**(-1)*( diff_tmp.mean() )
p_value_tmp = norm.cdf(Lambda_tmp)
P_value_cv.append(p_value_tmp)
P_value.append(P_value_cv)
# if verbose == 1:
# print('(AdaRatio) cv: %d; p_value: %.3f, inference sample ratio: %.3f' %(h, p_value_tmp, ratio_tmp))
# print('(AdaRatio) diff: %.3f(%.3f); metric: %.3f(%.3f); metric_mask: %.3f(%.3f)' %(diff_tmp.mean(), diff_tmp.std(), metric_tmp.mean(), metric_tmp.std(), metric_mask_tmp.mean(), metric_mask_tmp.std()))
P_value = np.array(P_value)
# print(P_value)
if cv_num > 1:
P_value = np.array(P_value)
if cp == 'gmean':
P_value_cp = np.e*gmean(P_value, 0)
elif cp == 'median':
P_value_cp = 2*np.median(P_value, 0)
elif cp == 'Q1':
P_value_cp = cv_num/2.*np.partition(P_value, 1)[1]
elif cp == 'min':
P_value_cp = cv_num*np.min(P_value, 0)
elif cp == 'hmean':
P_value_cp = np.e * np.log(cv_num) * hmean(P_value, 0)
elif cp == 'hommel':
const = np.sum(1. / (np.arange(cv_num) + 1.))
P_value_cp = const*np.min(np.sort(P_value)*cv_num/(np.arange(cv_num) + 1.))
elif cp == 'cauchy':
t0 = np.mean(np.tan((.5 - P_value_cv)*np.pi))
p_value_mean = .5 - np.arctan(t0)/np.pi
else:
warnings.warn("cp should be geometric or min.")
else:
P_value_cp = np.mean(P_value, 0)
P_value_cp = np.minimum(P_value_cp, 1.)
## compute the type 1 error
Err1 = len(P_value_cp[P_value_cp < self.alpha]) / len(P_value_cp)
Err1_lst.append(Err1)
# P_value_lst.append(P_value)
ratio_lst.append(ratio_tmp)
if verbose==1:
print('(AdaRatio) Est. Type 1 error: %.3f; p_value_mean: %.3f, inference sample ratio: %.3f' %(Err1, P_value_cp.mean(), ratio_tmp))
# print('(AdaRatio) p_value: %.3f, inference sample ratio: %.3f' %(P_value.mean(), ratio_tmp))
# if P_value > self.alpha:
if Err1 < self.alpha:
found = 1
if ratio_method == 'fuse':
m_opt = m_tmp
n_opt = len(X) - 2*m_opt
break
if found == 1:
if ratio_method == 'close':
# P_value_lst = np.array(P_value_lst)
# ratio_lst = np.array(ratio_lst)
# m_opt = int(ratio_lst[np.argmin(np.abs(P_value_lst - 0.5))] * len(X))
# # m_opt = int(ratio_lst[np.argmax(P_value_lst)] * len(X))
n_opt = len(X) - 2*m_opt
if found==0:
warnings.warn("No ratio can control the Type 1 error, pls increase the sample size, and inference sample ratio is set as the min of ratio_grid.")
Err1_lst, ratio_lst = np.array(Err1_lst), np.array(ratio_lst)
# print('err list for the TS test: %s' %Err1_lst)
m_opt = int(ratio_lst[np.argmin(Err1_lst)] * len(X))
n_opt = len(X) - 2*m_opt
return n_opt, m_opt
if split == 'one-split':
if perturb != None:
perturb_grid = [perturb]
for perturb_tmp in perturb_grid:
## stop if current perturb is enough to control the type 1 error
if found == 1:
break
Err1_lst, ratio_lst, perturb_lst, P_value_lst = [], [], [], []
for ratio_tmp in ratio_grid:
m_tmp = int(len(X)*ratio_tmp)
if m_tmp < min_inf:
continue
n_tmp = len(X) - m_tmp
if n_tmp < min_est:
continue
# split data
P_value = []
for h in range(cv_num):
self.reset_model()
P_value_cv = []
## generate permutated samples
# index_perm = np.random.permutation(range(len(y)))
X_perm = X.copy()
X_perm = self.perm_cov(X_perm, k)
# X_perm[:,self.inf_cov[k]] = X_perm[:,self.inf_cov[k]][index_perm,:]
# split samples
X_train, X_test, y_train, y_test = train_test_split(X_perm, y, train_size=n_tmp, random_state=h)
# training for full model
history = self.model.fit(x=X_train, y=y_train, **fit_params)
# training for mask model
if self.change == 'mask':
Z_train = self.mask_cov(X_train, k)
if self.change == 'perm':
Z_train = self.perm_cov(X_train, k)
history_mask = self.model_mask.fit(x=Z_train, y=y_train, **fit_params)
## save and load models
# self.model.save('model.h5')
# self.model_mask.save('model_mask.h5')
# model_tmp = load_model('model.h5', compile=False)
# model_mask_tmp = load_model('model_mask.h5', compile=False)
# if stopping_metric == 'p-value':
if self.change == 'mask':
Z_test = self.mask_cov(X_test, k)
if self.change == 'perm':
Z_test = self.perm_cov(X_test, k)
# pred_y = self.model.predict_on_batch(X_test)
pred_y_mask = self.model_mask.predict_on_batch(Z_test)
# evaluation
for j in range(num_perm):
# ind_test_perm = np.random.permutation(range(len(y_test)))
X_test_perm = X_test.copy()
X_test_perm = self.perm_cov(X_test_perm, k)
# X_test_perm[:,self.inf_cov[k]] = X_test_perm[:,self.inf_cov[k]][ind_test_perm,:]
pred_y = self.model.predict_on_batch(X_test_perm)
metric_tmp = self.metric(y_test, pred_y)
metric_mask_tmp = self.metric(y_test, pred_y_mask)
diff_tmp = metric_tmp - metric_mask_tmp
Lambda_tmp = np.sqrt(len(diff_tmp)) * ( diff_tmp.std() )**(-1)*( diff_tmp.mean() )
p_value_tmp = norm.cdf(Lambda_tmp)
P_value_cv.append(p_value_tmp)
# if verbose == 1:
# print('(AdaRatio) diff: %.3f(%.3f); metric: %.3f(%.3f); metric_mask: %.3f(%.3f)' %(diff_tmp.mean(), diff_tmp.std(), metric_tmp.mean(), metric_tmp.std(), metric_mask_tmp.mean(), metric_mask_tmp.std()))
# print('(AdaRatio) cv: %d; p_value: %.3f, inference sample ratio: %.3f, perturb: %s' %(h, p_value_tmp, ratio_tmp, perturb_tmp))
P_value.append(P_value_cv)
P_value = np.array(P_value)
# print(P_value)
if cv_num > 1:
if cp == 'gmean':
P_value_cp = np.e*gmean(P_value, 0)
elif cp == 'median':
P_value_cp = 2*np.median(P_value, 0)
elif cp == 'Q1':
P_value_cp = cv_num/2.*np.partition(P_value, 1)[1]
elif cp == 'mean':
P_value_cp = 2*np.mean(P_value, 0)
elif cp == 'min':
P_value_cp = cv_num*np.min(P_value, 0)
elif cp == 'hommel':
const = np.sum(1. / (np.arange(cv_num) + 1.))
P_value_cp = const*np.min(np.sort(P_value)*cv_num/(np.arange(cv_num) + 1.))
elif cp == 'cauchy':
t0 = np.mean(np.tan((.5 - P_value_cv)*np.pi))
P_value_cp = .5 - np.arctan(t0)/np.pi
elif cp == 'hmean':
# def h_const(y): return y**2 - cv_num*( (y+1)*np.log(y+1) - y )
# sol_tmp = scipy.optimize.broyden1(h_const, xin=10., f_tol=1e-5)
# a_h = (sol_tmp + cv_num)**2 / (sol_tmp+1) / cv_num
P_value_cp = np.e * np.log(cv_num) * hmean(P_value_cv)
else:
warnings.warn("Not a well-defined cp method, pls check the document.")
else:
P_value_cp = np.mean(P_value, 0)
# compute the type 1 error
P_value_cp = np.minimum(P_value_cp, 1.)
# print('p_value: %s' %P_value_cp)
Err1 = len(P_value_cp[P_value_cp<=self.alpha])/len(P_value_cp)
Err1_lst.append(Err1)
if verbose==1:
print('(AdaRatio) Est. Type 1 error: %.3f; p_value_mean: %.3f, inference sample ratio: %.3f, perturb: %s' %(Err1, P_value_cp.mean(), ratio_tmp, perturb_tmp))
# print('(AdaRatio) p_value: %.3f, inference sample ratio: %.3f, perturb: %s' %(P_value.mean(), ratio_tmp, perturb_tmp))
P_value_lst.append(P_value_cp)
ratio_lst.append(ratio_tmp)
perturb_lst.append(perturb_tmp)
# if P_value > self.alpha:
if Err1 < self.alpha:
found = 1
if ratio_method == 'fuse':
m_opt = m_tmp
n_opt = len(X) - m_opt
perturb_opt = perturb_tmp
break
if found == 1:
if ratio_method == 'min':
Err1_lst, ratio_lst = np.array(Err1_lst), np.array(ratio_lst)
m_opt = int(ratio_lst[np.argmin(Err1_lst)] * len(X))
n_opt = len(X) - m_opt
perturb_opt = perturb_tmp
if ratio_method == 'close':
P_value_lst = np.array(P_value_lst)
ratio_lst, perturb_lst = np.array(ratio_lst), np.array(perturb_lst)
m_opt = int(ratio_lst[np.argmin(np.abs(P_value_lst - 0.5))] * len(X))
# m_opt = int(ratio_lst[np.argmax(P_value_lst)] * len(X))
n_opt = len(X) - m_opt
perturb_opt = perturb_lst[np.argmax(P_value_lst)]
if found==0:
warnings.warn("No ratio and perturb_level can control the Type 1 error, pls increase the perturb_level and sample size, and inference sample ratio is set as the min of ratio_grid.")
Err1_lst, ratio_lst = np.array(Err1_lst), np.array(ratio_lst)
m_opt = int(ratio_lst[np.argmin(Err1_lst)] * len(X))
n_opt = len(X) - m_opt
perturb_opt = perturb_lst[np.argmin(Err1_lst)]
return n_opt, m_opt, perturb_opt
def testing(self, X, y, fit_params, split_params={}, cv_num=5, cp='gmean', inf_ratio=None):
## update split_params
split_params_default = {'split': 'one-split',
'perturb': None,
'num_perm': 100,
'ratio_grid': [.2, .4, .6, .8],
'if_reverse': 0,
'perturb_grid': [.01, .05, .1, .5, 1.],
'min_inf': 0,
'min_est': 0,
'ratio_method': 'fuse',
'cv_num': 1,
'cp': 'hommel',
'verbose': 1}
split_params_default.update(split_params)
split_params = split_params_default
P_value = []
for k in range(len(self.inf_cov)):
self.reset_model()
if split_params['split'] == 'one-split':
if ((inf_ratio == None) or (split_params['perturb'] == None)):
n, m, perturb_level = self.adaRatio(X, y, k, fit_params=fit_params, **split_params)
print('%d-th inference; Adaptive data splitting: n: %d; m: %d; perturb: %s' %(k, n, m, perturb_level))
else:
m, n = int(inf_ratio * len(X)), len(X) - int(inf_ratio * len(X))
perturb_level = split_params['perturb']
elif split_params['split'] == 'two-split':
if inf_ratio == None:
n, m = self.adaRatio(X, y, k, fit_params=fit_params, **split_params)
print('%d-th inference; Adaptive data splitting: n: %d; m: %d' %(k, n, m))
else:
m, n = int(inf_ratio * len(X)/2)*2, len(X) - int(inf_ratio * len(X)/2)*2
else:
warnings.warn("split method must be 'one-split' or 'two-split'!")
P_value_cv = []
for h in range(cv_num):
X_train, X_test, y_train, y_test = train_test_split(X, y, train_size=n, random_state=h)
if split_params['split'] == 'two-split':
X_inf, X_inf_mask, y_inf, y_inf_mask = train_test_split(X_test, y_test, train_size=.5, random_state=42)
if split_params['split'] == 'one-split':
X_inf, X_inf_mask, y_inf, y_inf_mask = X_test.copy(), X_test.copy(), y_test.copy(), y_test.copy()
## prediction and inference in full model
self.reset_model()
history = self.model.fit(X_train, y_train, **fit_params)
pred_y = self.model.predict_on_batch(X_inf)
metric_full = self.metric(y_inf, pred_y)
# prediction and inference in mask model
if self.change == 'mask':
Z_train = self.mask_cov(X_train, k)
if self.change == 'perm':
Z_train = self.perm_cov(X_train, k)
self.reset_model()
history_mask = self.model_mask.fit(Z_train, y_train, **fit_params)
if self.change == 'mask':
Z_inf = self.mask_cov(X_inf_mask, k)
if self.change == 'perm':
Z_inf = self.perm_cov(X_inf_mask, k)
pred_y_mask = self.model_mask.predict_on_batch(Z_inf)
metric_mask = self.metric(y_inf_mask, pred_y_mask)
# if ((np.mean(metric_mask) > 3) or (np.mean(metric_full) > 3)):
# fit_err = 1
## compute p-value
if split_params['split'] == 'one-split':
if perturb_level == 'auto':
diff_tmp = metric_full - metric_mask + metric_full.std() * np.random.randn(len(metric_full))
else:
diff_tmp = metric_full - metric_mask + perturb_level * np.random.randn(len(metric_full))
if split_params['split'] == 'two-split':
diff_tmp = metric_full - metric_mask
Lambda = np.sqrt(len(diff_tmp)) * ( diff_tmp.std() )**(-1)*( diff_tmp.mean() )
p_value_tmp = norm.cdf(Lambda)
print('cv: %d; p_value: %.3f; diff: %.3f(%.3f); metric: %.3f(%.3f); metric_mask: %.3f(%.3f)' %(h, p_value_tmp, diff_tmp.mean(), diff_tmp.std(), metric_full.mean(), metric_full.std(), metric_mask.mean(), metric_mask.std()))
P_value_cv.append(p_value_tmp)
P_value_cv = np.array(P_value_cv)
if cv_num > 1:
if cp == 'gmean':
p_value_mean = np.e*gmean(P_value_cv)
elif cp == 'median':
p_value_mean = 2.*np.median(P_value_cv)
elif cp == 'Q1':
p_value_mean = cv_num/2.*np.partition(P_value_cv, 1)[1]
elif cp == 'min':
p_value_mean = cv_num*np.min(P_value_cv)
elif cp == 'hommel':
const = np.sum(1. / (np.arange(cv_num) + 1.))
p_value_mean = const*np.min(np.sort(P_value_cv)*cv_num/(np.arange(cv_num) + 1.))
elif cp == 'hmean':
# def h_const(y): return y**2 - cv_num*( (y+1)*np.log(y+1) - y )
# sol_tmp = scipy.optimize.broyden1(h_const, xin=10., f_tol=1e-5)
# a_h = (sol_tmp + cv_num)**2 / (sol_tmp+1) / cv_num
p_value_mean = np.e * np.log(cv_num) * hmean(P_value_cv)
# print('cv_p-value is %s; a_h: %.3f' %(P_value_cv, a_h))
elif cp == 'cauchy':
t0 = np.mean(np.tan((.5 - P_value_cv)*np.pi))
p_value_mean = .5 - np.arctan(t0)/np.pi
else:
warnings.warn("pls input correct way to combine p-values")
else:
p_value_mean = np.mean(P_value_cv)
p_value_mean = min(1, p_value_mean)
if p_value_mean < self.alpha:
print('reject H0 with p_value: %.3f' %p_value_mean)
else:
print('accept H0 with p_value: %.3f' %p_value_mean)
P_value.append(p_value_mean)
# return P_value, fit_err, P_value_cv
return P_value
|
import jax
import jax_dataclasses
import numpy as onp
from jax import numpy as jnp
from overrides import overrides
from . import _base, hints
from .utils import get_epsilon, register_lie_group
@register_lie_group(
matrix_dim=3,
parameters_dim=4,
tangent_dim=3,
space_dim=3,
)
@jax_dataclasses.pytree_dataclass
class SO3(_base.SOBase):
"""Special orthogonal group for 3D rotations.
Internal parameterization is `(qw, qx, qy, qz)`.
Tangent parameterization is `(omega_x, omega_y, omega_z)`.
"""
# SO3-specific
wxyz: hints.Vector
"""Internal parameters. `(w, x, y, z)` quaternion."""
@overrides
def __repr__(self) -> str:
wxyz = jnp.round(self.wxyz, 5)
return f"{self.__class__.__name__}(wxyz={wxyz})"
@staticmethod
def from_x_radians(theta: hints.Scalar) -> "SO3":
"""Generates a x-axis rotation.
Args:
angle: X rotation, in radians.
Returns:
Output.
"""
return SO3.exp(jnp.array([theta, 0.0, 0.0]))
@staticmethod
def from_y_radians(theta: hints.Scalar) -> "SO3":
"""Generates a y-axis rotation.
Args:
angle: Y rotation, in radians.
Returns:
Output.
"""
return SO3.exp(jnp.array([0.0, theta, 0.0]))
@staticmethod
def from_z_radians(theta: hints.Scalar) -> "SO3":
"""Generates a z-axis rotation.
Args:
angle: Z rotation, in radians.
Returns:
Output.
"""
return SO3.exp(jnp.array([0.0, 0.0, theta]))
@staticmethod
def from_rpy_radians(
roll: hints.Scalar,
pitch: hints.Scalar,
yaw: hints.Scalar,
) -> "SO3":
"""Generates a transform from a set of Euler angles.
Uses the ZYX mobile robot convention.
Args:
roll: X rotation, in radians. Applied first.
pitch: Y rotation, in radians. Applied second.
yaw: Z rotation, in radians. Applied last.
Returns:
Output.
"""
return (
SO3.from_z_radians(yaw)
@ SO3.from_y_radians(pitch)
@ SO3.from_x_radians(roll)
)
@staticmethod
def from_quaternion_xyzw(xyzw: hints.Vector) -> "SO3":
"""Construct a rotation from an `xyzw` quaternion.
Note that `wxyz` quaternions can be constructed using the default dataclass
constructor.
Args:
xyzw: xyzw quaternion. Shape should be (4,).
Returns:
Output.
"""
assert xyzw.shape == (4,)
return SO3(jnp.roll(xyzw, shift=1))
def as_quaternion_xyzw(self) -> hints.VectorJax:
"""Grab parameters as xyzw quaternion."""
return jnp.roll(self.wxyz, shift=-1)
def as_rpy_radians(self) -> hints.RollPitchYaw:
"""Computes roll, pitch, and yaw angles.
Uses the ZYX mobile robot convention.
Returns:
Named tuple containing Euler angles in radians.
"""
return hints.RollPitchYaw(
roll=self.compute_roll_radians(),
pitch=self.compute_pitch_radians(),
yaw=self.compute_yaw_radians(),
)
def compute_roll_radians(self) -> hints.ScalarJax:
"""Compute roll angle.
Uses the ZYX mobile robot convention.
Returns:
Euler angle in radians.
"""
# https://en.wikipedia.org/wiki/Conversion_between_quaternions_and_Euler_angles#Quaternion_to_Euler_angles_conversion
q0, q1, q2, q3 = self.wxyz
return jnp.arctan2(2 * (q0 * q1 + q2 * q3), 1 - 2 * (q1 ** 2 + q2 ** 2))
def compute_pitch_radians(self) -> hints.ScalarJax:
"""Compute pitch angle.
Uses the ZYX mobile robot convention.
Returns:
Euler angle in radians.
"""
# https://en.wikipedia.org/wiki/Conversion_between_quaternions_and_Euler_angles#Quaternion_to_Euler_angles_conversion
q0, q1, q2, q3 = self.wxyz
return jnp.arcsin(2 * (q0 * q2 - q3 * q1))
def compute_yaw_radians(self) -> hints.ScalarJax:
"""Compute yaw angle.
Uses the ZYX mobile robot convention.
Returns:
Euler angle in radians.
"""
# https://en.wikipedia.org/wiki/Conversion_between_quaternions_and_Euler_angles#Quaternion_to_Euler_angles_conversion
q0, q1, q2, q3 = self.wxyz
return jnp.arctan2(2 * (q0 * q3 + q1 * q2), 1 - 2 * (q2 ** 2 + q3 ** 2))
# Factory
@staticmethod
@overrides
def identity() -> "SO3":
return SO3(wxyz=onp.array([1.0, 0.0, 0.0, 0.0]))
@staticmethod
@overrides
def from_matrix(matrix: hints.Matrix) -> "SO3":
assert matrix.shape == (3, 3)
# Modified from:
# > "Converting a Rotation Matrix to a Quaternion" from Mike Day
# > https://d3cw3dd2w32x2b.cloudfront.net/wp-content/uploads/2015/01/matrix-to-quat.pdf
def case0(m):
t = 1 + m[0, 0] - m[1, 1] - m[2, 2]
q = jnp.array([m[2, 1] - m[1, 2], t, m[1, 0] + m[0, 1], m[0, 2] + m[2, 0]])
return t, q
def case1(m):
t = 1 - m[0, 0] + m[1, 1] - m[2, 2]
q = jnp.array([m[0, 2] - m[2, 0], m[1, 0] + m[0, 1], t, m[2, 1] + m[1, 2]])
return t, q
def case2(m):
t = 1 - m[0, 0] - m[1, 1] + m[2, 2]
q = jnp.array([m[1, 0] - m[0, 1], m[0, 2] + m[2, 0], m[2, 1] + m[1, 2], t])
return t, q
def case3(m):
t = 1 + m[0, 0] + m[1, 1] + m[2, 2]
q = jnp.array([t, m[2, 1] - m[1, 2], m[0, 2] - m[2, 0], m[1, 0] - m[0, 1]])
return t, q
# Compute four cases, then pick the most precise one
# Probably worth revisiting this!
case0_t, case0_q = case0(matrix)
case1_t, case1_q = case1(matrix)
case2_t, case2_q = case2(matrix)
case3_t, case3_q = case3(matrix)
cond0 = matrix[2, 2] < 0
cond1 = matrix[0, 0] > matrix[1, 1]
cond2 = matrix[0, 0] < -matrix[1, 1]
t = jnp.where(
cond0,
jnp.where(cond1, case0_t, case1_t),
jnp.where(cond2, case2_t, case3_t),
)
q = jnp.where(
cond0,
jnp.where(cond1, case0_q, case1_q),
jnp.where(cond2, case2_q, case3_q),
)
# # We can also choose to branch, but this is slower
# t, q = jax.lax.cond(
# matrix[2, 2] < 0,
# true_fun=lambda matrix: jax.lax.cond(
# matrix[0, 0] > matrix[1, 1],
# true_fun=case0,
# false_fun=case1,
# operand=matrix,
# ),
# false_fun=lambda matrix: jax.lax.cond(
# matrix[0, 0] < -matrix[1, 1],
# true_fun=case2,
# false_fun=case3,
# operand=matrix,
# ),
# operand=matrix,
# )
return SO3(wxyz=q * 0.5 / jnp.sqrt(t))
# Accessors
@overrides
def as_matrix(self) -> hints.MatrixJax:
norm = self.wxyz @ self.wxyz
q = self.wxyz * jnp.sqrt(2.0 / norm)
q = jnp.outer(q, q)
return jnp.array(
[
[1.0 - q[2, 2] - q[3, 3], q[1, 2] - q[3, 0], q[1, 3] + q[2, 0]],
[q[1, 2] + q[3, 0], 1.0 - q[1, 1] - q[3, 3], q[2, 3] - q[1, 0]],
[q[1, 3] - q[2, 0], q[2, 3] + q[1, 0], 1.0 - q[1, 1] - q[2, 2]],
]
)
@overrides
def parameters(self) -> hints.Vector:
return self.wxyz
# Operations
@overrides
def apply(self: "SO3", target: hints.Vector) -> hints.VectorJax:
assert target.shape == (3,)
# Compute using quaternion multiplys
padded_target = jnp.zeros(4).at[1:].set(target)
return (self @ SO3(wxyz=padded_target) @ self.inverse()).wxyz[1:]
@overrides
def multiply(self: "SO3", other: "SO3") -> "SO3":
w0, x0, y0, z0 = self.wxyz
w1, x1, y1, z1 = other.wxyz
return SO3(
wxyz=jnp.array(
[
-x0 * x1 - y0 * y1 - z0 * z1 + w0 * w1,
x0 * w1 + y0 * z1 - z0 * y1 + w0 * x1,
-x0 * z1 + y0 * w1 + z0 * x1 + w0 * y1,
x0 * y1 - y0 * x1 + z0 * w1 + w0 * z1,
]
)
)
@staticmethod
@overrides
def exp(tangent: hints.TangentVector) -> "SO3":
# Reference:
# > https://github.com/strasdat/Sophus/blob/a0fe89a323e20c42d3cecb590937eb7a06b8343a/sophus/so3.hpp#L583
assert tangent.shape == (3,)
theta_squared = tangent @ tangent
theta_pow_4 = theta_squared * theta_squared
use_taylor = theta_squared < get_epsilon(tangent.dtype)
# Shim to avoid NaNs in jnp.where branches, which cause failures for
# reverse-mode AD
safe_theta = jnp.sqrt(
jnp.where(
use_taylor,
0.0, # Any constant value should do here
theta_squared,
)
)
safe_half_theta = 0.5 * safe_theta
real_factor = jnp.where(
use_taylor,
1.0 - theta_squared / 8.0 + theta_pow_4 / 384.0,
jnp.cos(safe_half_theta),
)
imaginary_factor = jnp.where(
use_taylor,
0.5 - theta_squared / 48.0 + theta_pow_4 / 3840.0,
jnp.sin(safe_half_theta) / safe_theta,
)
return SO3(
wxyz=jnp.concatenate(
[
real_factor[None],
imaginary_factor * tangent,
]
)
)
@overrides
def log(self: "SO3") -> hints.TangentVectorJax:
# Reference:
# > https://github.com/strasdat/Sophus/blob/a0fe89a323e20c42d3cecb590937eb7a06b8343a/sophus/so3.hpp#L247
w = self.wxyz[..., 0]
norm_sq = self.wxyz[..., 1:] @ self.wxyz[..., 1:]
use_taylor = norm_sq < get_epsilon(norm_sq.dtype)
# Shim to avoid NaNs in jnp.where branches, which cause failures for
# reverse-mode AD
norm_safe = jnp.sqrt(
jnp.where(
use_taylor,
1.0, # Any non-zero value should do here
norm_sq,
)
)
atan_factor = jnp.where(
use_taylor,
2.0 / w - 2.0 / 3.0 * norm_sq / (w ** 3),
jnp.where(
jnp.abs(w) < get_epsilon(w.dtype),
jnp.where(w > 0, 1.0, -1.0) * jnp.pi / norm_safe,
2.0 * jnp.arctan(norm_safe / w) / norm_safe,
),
)
return atan_factor * self.wxyz[1:]
@overrides
def adjoint(self: "SO3") -> hints.MatrixJax:
return self.as_matrix()
@overrides
def inverse(self: "SO3") -> "SO3":
# Negate complex terms
return SO3(wxyz=self.wxyz * onp.array([1, -1, -1, -1]))
@overrides
def normalize(self: "SO3") -> "SO3":
return SO3(wxyz=self.wxyz / jnp.linalg.norm(self.wxyz))
@staticmethod
@overrides
def sample_uniform(key: jnp.ndarray) -> "SO3":
# Uniformly sample over S^4
# > Reference: http://planning.cs.uiuc.edu/node198.html
u1, u2, u3 = jax.random.uniform(
key=key,
shape=(3,),
minval=jnp.zeros(3),
maxval=jnp.array([1.0, 2.0 * jnp.pi, 2.0 * jnp.pi]),
)
a = jnp.sqrt(1.0 - u1)
b = jnp.sqrt(u1)
return SO3(
wxyz=jnp.array(
[
a * jnp.sin(u2),
a * jnp.cos(u2),
b * jnp.sin(u3),
b * jnp.cos(u3),
]
)
)
|
import datetime
from applications.models import db, ma
from marshmallow import fields
class Power(db.Model):
__tablename__ = 'admin_power'
id = db.Column(db.Integer, primary_key=True, comment='权限编号')
name = db.Column(db.String(255), comment='权限名称')
type = db.Column(db.String(1), comment='权限类型')
code = db.Column(db.String(30), comment='权限标识')
url = db.Column(db.String(255), comment='权限路径')
open_type = db.Column(db.String(10), comment='打开方式')
parent_id = db.Column(db.Integer, comment='父类编号')
icon = db.Column(db.String(128), comment='图标')
sort = db.Column(db.Integer, comment='排序')
create_time = db.Column(db.DateTime, default=datetime.datetime.now, comment='创建时间')
update_time = db.Column(db.DateTime, default=datetime.datetime.now, onupdate=datetime.datetime.now, comment='更新时间')
enable = db.Column(db.Integer, comment='是否开启')
# 权限models序列化类
class PowerSchema(ma.Schema):
id = fields.Integer()
title = fields.Str(attribute="name")
type = fields.Str()
code = fields.Str()
href = fields.Str(attribute="url")
openType = fields.Str(attribute="open_type")
parent_id = fields.Integer()
icon = fields.Str()
sort = fields.Integer()
create_time = fields.DateTime()
update_time = fields.DateTime()
enable = fields.Integer()
class PowerSchema2(ma.Schema): # 序列化类
powerId = fields.Str(attribute="id")
powerName = fields.Str(attribute="name")
powerType = fields.Str(attribute="type")
powerUrl = fields.Str(attribute="url")
openType = fields.Str(attribute="open_type")
parentId = fields.Str(attribute="parent_id")
icon = fields.Str()
sort = fields.Integer()
create_time = fields.DateTime()
update_time = fields.DateTime()
enable = fields.Integer()
|
# pylint: disable=R0903
"""
False positive case of E1101:
The error is triggered when the attribute set in the base class is
modified with augmented assignment in a derived class.
http://www.logilab.org/ticket/9588
"""
__revision__ = 0
class BaseClass(object):
"The base class"
def __init__(self):
"Set an attribute."
self.e1101 = 1
class FalsePositiveClass(BaseClass):
"The first derived class which triggers the false positive"
def __init__(self):
"Augmented assignment triggers E1101."
BaseClass.__init__(self)
self.e1101 += 1
def countup(self):
"Consequently this also triggers E1101."
self.e1101 += 1
class NegativeClass(BaseClass):
"The second derived class, which does not trigger the error E1101"
def __init__(self):
"Ordinary assignment is OK."
BaseClass.__init__(self)
self.e1101 = self.e1101 + 1
def countup(self):
"No problem."
self.e1101 += 1
|
import dash_bootstrap_components as dbc
def Navbar():
navbar = dbc.NavbarSimple(
children=[
dbc.DropdownMenu(
nav=True,
in_navbar=True,
label="Models",
children=[
dbc.DropdownMenuItem("RF",href="/model"),
dbc.DropdownMenuItem(divider=True),
dbc.DropdownMenuItem("LR",href="/Logistic"),
dbc.DropdownMenuItem(divider=True),
dbc.DropdownMenuItem("GBM",href="/gbm"),
],
),
],
brand="Home",
brand_href="/home",
sticky="top",
)
return navbar
|
import unittest
from mock import patch
from tornwamp.messages import Code, ErrorMessage, PublishMessage, SubscribeMessage
from tornwamp.processors.pubsub import PublishProcessor, SubscribeProcessor, customize
from tornwamp.session import ClientConnection
class SubscribeProcessorTestCase(unittest.TestCase):
def test_succeed(self):
message = SubscribeMessage(request_id=123, topic="olympic.games")
connection = ClientConnection(None)
processor = SubscribeProcessor(message, connection)
answer = processor.answer_message
self.assertEqual(answer.code, Code.SUBSCRIBED)
self.assertIsInstance(answer.subscription_id, int)
self.assertEqual(answer.request_id, 123)
@patch("tornwamp.processors.pubsub.customize.authorize_subscription", return_value=(False, "Your problem"))
def test_fail(self, mock_authorize):
message = SubscribeMessage(request_id=234, topic="olympic.games")
connection = ClientConnection(None)
processor = SubscribeProcessor(message, connection)
answer = processor.answer_message
self.assertEqual(answer.code, Code.ERROR)
self.assertEqual(answer.request_id, 234)
self.assertEqual(answer.request_code, Code.SUBSCRIBE)
self.assertEqual(answer.uri, "tornwamp.subscribe.unauthorized")
class PublishProcessorTestCase(unittest.TestCase):
def setUp(self):
super(PublishProcessorTestCase, self).setUp()
self.old_publish_messages = customize.get_publish_messages
def tearDown(self):
super(PublishProcessorTestCase, self).tearDown()
customize.get_publish_messages = self.old_publish_messages
def test_succeed_without_acknowledge(self):
message = PublishMessage(request_id=345, topic="world.cup")
connection = ClientConnection(None)
processor = PublishProcessor(message, connection)
answer = processor.answer_message
self.assertEqual(answer, None)
def test_succeed_with_acknowledge(self):
options = {"acknowledge": True}
message = PublishMessage(request_id=345, topic="world.cup", options=options)
connection = ClientConnection(None)
processor = PublishProcessor(message, connection)
answer = processor.answer_message
self.assertEqual(answer.code, Code.PUBLISHED)
self.assertEqual(answer.request_id, 345)
@patch("tornwamp.processors.pubsub.customize.authorize_publication", return_value=(False, "Your problem"))
def test_fail(self, mock_authorize):
message = PublishMessage(request_id=456, topic="world.cup")
connection = ClientConnection(None)
processor = PublishProcessor(message, connection)
answer = processor.answer_message
self.assertEqual(answer.code, Code.ERROR)
self.assertEqual(answer.request_id, 456)
self.assertEqual(answer.request_code, Code.PUBLISH)
self.assertEqual(answer.uri, "tornwamp.publish.unauthorized")
def test_use_customized_message_if_available(self):
options = {"acknowledge": True}
expected_answer = ErrorMessage(
request_id=345,
request_code=16,
uri="something.is.wrong"
)
def error(*args, **kwargs):
return None, expected_answer
customize.get_publish_messages = error
message = PublishMessage(request_id=345, topic="world.cup", options=options)
connection = ClientConnection(None)
processor = PublishProcessor(message, connection)
answer = processor.answer_message
self.assertEqual(answer, expected_answer)
|
#!/usr/bin/python
# This is statement is required by the build system to query build info
if __name__ == '__build__':
raise Exception
'''
lines.c from the Redbook examples.
Converted to Python by Jason L. Petrone 6/00
/*
* lines.c
* This program demonstrates geometric primitives and
* their attributes.
*/
Copyright (c) 1993-1997, Silicon Graphics, Inc.
ALL RIGHTS RESERVED
Permission to use, copy, modify, and distribute this software for
any purpose and without fee is hereby granted, provided that the above
copyright notice appear in all copies and that both the copyright notice
and this permission notice appear in supporting documentation, and that
the name of Silicon Graphics, Inc. not be used in advertising
or publicity pertaining to distribution of the software without specific,
written prior permission.
THE MATERIAL EMBODIED ON THIS SOFTWARE IS PROVIDED TO YOU "AS-IS"
AND WITHOUT WARRANTY OF ANY KIND, EXPRESS, IMPLIED OR OTHERWISE,
INCLUDING WITHOUT LIMITATION, ANY WARRANTY OF MERCHANTABILITY OR
FITNESS FOR A PARTICULAR PURPOSE. IN NO EVENT SHALL SILICON
GRAPHICS, INC. BE LIABLE TO YOU OR ANYONE ELSE FOR ANY DIRECT,
SPECIAL, INCIDENTAL, INDIRECT OR CONSEQUENTIAL DAMAGES OF ANY
KIND, OR ANY DAMAGES WHATSOEVER, INCLUDING WITHOUT LIMITATION,
LOSS OF PROFIT, LOSS OF USE, SAVINGS OR REVENUE, OR THE CLAIMS OF
THIRD PARTIES, WHETHER OR NOT SILICON GRAPHICS, INC. HAS BEEN
ADVISED OF THE POSSIBILITY OF SUCH LOSS, HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, ARISING OUT OF OR IN CONNECTION WITH THE
POSSESSION, USE OR PERFORMANCE OF THIS SOFTWARE.
US Government Users Restricted Rights
Use, duplication, or disclosure by the Government is subject to
restrictions set forth in FAR 52.227.19(c)(2) or subparagraph
(c)(1)(ii) of the Rights in Technical Data and Computer Software
clause at DFARS 252.227-7013 and/or in similar or successor
clauses in the FAR or the DOD or NASA FAR Supplement.
Unpublished-- rights reserved under the copyright laws of the
United States. Contractor/manufacturer is Silicon Graphics,
Inc., 2011 N. Shoreline Blvd., Mountain View, CA 94039-7311.
OpenGL(R) is a registered trademark of Silicon Graphics, Inc.
'''
import sys
try:
from OpenGL.GLUT import *
from OpenGL.GL import *
from OpenGL.GLU import *
except:
print '''
ERROR: PyOpenGL not installed properly.
'''
sys.exit()
def drawOneLine(x1, y1, x2, y2):
glBegin(GL_LINES)
glVertex2f(x1, y1)
glVertex2f(x2, y2)
glEnd()
def init():
glClearColor(0.0, 0.0, 0.0, 0.0)
glShadeModel(GL_FLAT)
def display():
glClear(GL_COLOR_BUFFER_BIT)
# select white for all lines
glColor3f(1.0, 1.0, 1.0)
# in 1st row, 3 lines, each with a different stipple
glEnable(GL_LINE_STIPPLE)
glLineStipple (1, 0x0101) # dotted
drawOneLine (50.0, 125.0, 150.0, 125.0)
glLineStipple (1, 0x00FF) # dashed
drawOneLine (150.0, 125.0, 250.0, 125.0);
glLineStipple (1, 0x1C47) # dash/dot/dash
drawOneLine (250.0, 125.0, 350.0, 125.0)
# in 2nd row, 3 wide lines, each with different stipple
glLineWidth(5.0)
glLineStipple(1, 0x0101) # dotted
drawOneLine(50.0, 100.0, 150.0, 100.0)
glLineStipple(1, 0x00FF) # dashed
drawOneLine(150.0, 100.0, 250.0, 100.0)
glLineStipple(1, 0x1C47) # dash/dot/dash
drawOneLine(250.0, 100.0, 350.0, 100.0)
glLineWidth(1.0)
# in 3rd row, 6 lines, with dash/dot/dash stipple
# as part of a single connected line strip
glLineStipple (1, 0x1C47) # dash/dot/dash
glBegin (GL_LINE_STRIP)
for i in range(0, 7):
glVertex2f(50.0 + (i * 50.0), 75.0)
glEnd()
# in 4th row, 6 independent lines with same stipple */
for i in range(0, 6):
drawOneLine (50.0 + (i * 50.0), 50.0, 50.0 + ((i+1) * 50.0), 50.0)
# in 5th row, 1 line, with dash/dot/dash stipple
# and a stipple repeat factor of 5
glLineStipple (5, 0x1C47) # dash/dot/dash
drawOneLine (50.0, 25.0, 350.0, 25.0)
glDisable (GL_LINE_STIPPLE)
glFlush ()
def reshape(w, h):
glViewport(0, 0, w, h)
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
gluOrtho2D(0.0, w, 0.0, h)
def keyboard(key, x, y):
if key == chr(27):
sys.exit(0)
glutInit(sys.argv)
glutInitDisplayMode(GLUT_SINGLE | GLUT_RGB)
glutInitWindowSize(400, 150)
glutInitWindowPosition(100, 100)
glutCreateWindow('Lines')
init()
glutDisplayFunc(display)
glutReshapeFunc(reshape)
glutKeyboardFunc(keyboard)
glutMainLoop()
|
import binascii
from tokenservices.jsonrpc.handlers import JsonRPCBase, map_jsonrpc_arguments
from tokenservices.jsonrpc.errors import JsonRPCInvalidParamsError, JsonRPCInternalError, JsonRPCError
from tokenservices.database import DatabaseMixin
from tokenservices.ethereum.mixin import EthereumMixin
from tokenservices.redis import RedisMixin
from tokenservices.ethereum.utils import data_decoder, data_encoder
from functools import partial
from tornado.ioloop import IOLoop
from tokenservices.utils import (
validate_address, parse_int, validate_signature, validate_transaction_hash
)
from tokenservices.ethereum.tx import (
DEFAULT_STARTGAS, DEFAULT_GASPRICE, create_transaction,
encode_transaction, decode_transaction, is_transaction_signed,
signature_from_transaction, add_signature_to_transaction,
transaction_to_json
)
from tokenservices.log import log
from .mixins import BalanceMixin
from .utils import RedisLock
class JsonRPCInsufficientFundsError(JsonRPCError):
def __init__(self, *, request=None, data=None):
super().__init__(request.get('id') if request else None,
-32000, "Insufficient Funds", data,
'id' not in request if request else False)
class TokenEthJsonRPC(JsonRPCBase, BalanceMixin, DatabaseMixin, EthereumMixin, RedisMixin):
def __init__(self, user_token_id, application):
self.user_token_id = user_token_id
self.application = application
async def get_balance(self, address):
if not validate_address(address):
raise JsonRPCInvalidParamsError(data={'id': 'invalid_address', 'message': 'Invalid Address'})
confirmed, unconfirmed = await self.get_balances(address)
return {
"confirmed_balance": hex(confirmed),
"unconfirmed_balance": hex(unconfirmed)
}
@map_jsonrpc_arguments({'from': 'from_address', 'to': 'to_address'})
async def create_transaction_skeleton(self, *, to_address, from_address, value=0, nonce=None, gas=None, gas_price=None, data=None):
if not validate_address(from_address):
raise JsonRPCInvalidParamsError(data={'id': 'invalid_from_address', 'message': 'Invalid From Address'})
if not validate_address(to_address):
raise JsonRPCInvalidParamsError(data={'id': 'invalid_to_address', 'message': 'Invalid To Address'})
if value:
value = parse_int(value)
if value is None:
raise JsonRPCInvalidParamsError(data={'id': 'invalid_value', 'message': 'Invalid Value'})
# check optional arguments
if nonce is None:
# check cache for nonce
nonce = self.redis.get("nonce:{}".format(from_address))
if nonce:
nonce = int(nonce)
# get the network's value too
nw_nonce = await self.eth.eth_getTransactionCount(from_address)
if nonce is None or nw_nonce > nonce:
# if not cached, or the cached value is lower than
# the network value, use the network value!
nonce = nw_nonce
else:
nonce = parse_int(nonce)
if nonce is None:
raise JsonRPCInvalidParamsError(data={'id': 'invalid_nonce', 'message': 'Invalid Nonce'})
if data is not None:
if isinstance(data, int):
data = hex(data)
if isinstance(data, str):
try:
data = data_decoder(data)
except binascii.Error:
pass
if not isinstance(data, bytes):
raise JsonRPCInvalidParamsError(data={'id': 'invalid_data', 'message': 'Invalid Data field'})
else:
data = b''
if gas is None:
# if there is data the default startgas value wont be enough
if data:
gas = await self.eth.eth_estimateGas(from_address, to_address, nonce=nonce, data=data)
else:
gas = DEFAULT_STARTGAS
else:
gas = parse_int(gas)
if gas is None:
raise JsonRPCInvalidParamsError(data={'id': 'invalid_gas', 'message': 'Invalid Gas'})
if gas_price is None:
gas_price = DEFAULT_GASPRICE
else:
gas_price = parse_int(gas_price)
if gas_price is None:
raise JsonRPCInvalidParamsError(data={'id': 'invalid_gas_price', 'message': 'Invalid Gas Price'})
tx = create_transaction(nonce=nonce, gasprice=gas_price, startgas=gas,
to=to_address, value=value, data=data)
transaction = encode_transaction(tx)
return transaction
async def send_transaction(self, *, tx, signature=None):
try:
tx = decode_transaction(tx)
except:
raise JsonRPCInvalidParamsError(data={'id': 'invalid_transaction', 'message': 'Invalid Transaction'})
if is_transaction_signed(tx):
if signature:
tx_sig = signature_from_transaction(tx)
if tx_sig != signature:
raise JsonRPCInvalidParamsError(data={
'id': 'invalid_signature',
'message': 'Invalid Signature: Signature in payload and signature of transaction do not match'
})
else:
if signature is None:
raise JsonRPCInvalidParamsError(data={'id': 'missing_signature', 'message': 'Missing Signature'})
if not validate_signature(signature):
raise JsonRPCInvalidParamsError(data={
'id': 'invalid_signature',
'message': 'Invalid Signature: {}'.format(
'Invalid length' if len(signature) != 132 else 'Invalid hex value')
})
try:
signature = data_decoder(signature)
except Exception:
log.exception("Unexpected error decoding valid signature: {}".format(signature))
raise JsonRPCInvalidParamsError(data={
'id': 'invalid_signature',
'message': 'Invalid Signature'
})
add_signature_to_transaction(tx, signature)
from_address = data_encoder(tx.sender)
to_address = data_encoder(tx.to)
# prevent spamming of transactions with the same nonce from the same sender
with RedisLock(self.redis, "{}:{}".format(from_address, tx.nonce),
raise_when_locked=partial(JsonRPCInvalidParamsError, data={'id': 'invalid_nonce', 'message': 'Nonce already used'}),
ex=5):
# disallow transaction overwriting for known transactions
async with self.db:
existing = await self.db.fetchrow("SELECT * FROM transactions WHERE "
"from_address = $1 AND nonce = $2 AND last_status != $3",
from_address, tx.nonce, 'error')
if existing:
# debugging checks
existing_tx = await self.eth.eth_getTransactionByHash(existing['transaction_hash'])
raise JsonRPCInvalidParamsError(data={'id': 'invalid_nonce', 'message': 'Nonce already used'})
# make sure the account has enough funds for the transaction
network_balance, balance = await self.get_balances(from_address, ignore_pending_recieved=True)
log.info("Attempting to send transaction\n{} -> {}\nValue: {} + {} (gas) * {} (startgas) = {}\nSender's Balance {} ({} unconfirmed)".format(
from_address, to_address, tx.value, tx.startgas, tx.gasprice, tx.value + (tx.startgas * tx.gasprice), network_balance, balance))
if balance < (tx.value + (tx.startgas * tx.gasprice)):
raise JsonRPCInsufficientFundsError(data={'id': 'insufficient_funds', 'message': 'Insufficient Funds'})
# validate the nonce
c_nonce = self.redis.get("nonce:{}".format(from_address))
if c_nonce:
c_nonce = int(c_nonce)
# get the network's value too
nw_nonce = await self.eth.eth_getTransactionCount(from_address)
if c_nonce is None or nw_nonce > c_nonce:
c_nonce = nw_nonce
if tx.nonce < c_nonce:
raise JsonRPCInvalidParamsError(data={'id': 'invalid_nonce', 'message': 'Provided nonce is too low'})
if tx.nonce > c_nonce:
raise JsonRPCInvalidParamsError(data={'id': 'invalid_nonce', 'message': 'Provided nonce is too high'})
# send the transaction to the network
try:
tx_encoded = encode_transaction(tx)
tx_hash = await self.eth.eth_sendRawTransaction(tx_encoded)
except JsonRPCError as e:
log.error(e.format())
raise JsonRPCInternalError(data={
'id': 'unexpected_error',
'message': 'An error occured communicating with the ethereum network, try again later'
})
# cache nonce
self.redis.set("nonce:{}".format(from_address), tx.nonce + 1)
# add tx to database
async with self.db:
await self.db.execute(
"INSERT INTO transactions "
"(transaction_hash, from_address, to_address, nonce, value, estimated_gas_cost, sender_token_id) "
"VALUES ($1, $2, $3, $4, $5, $6, $7)",
tx_hash, from_address, to_address, tx.nonce, str(tx.value), str(tx.startgas * tx.gasprice), self.user_token_id)
await self.db.commit()
# if there is a block monitor, force send PNs for this without
# waiting for the node to see it
if hasattr(self.application, 'monitor'):
txjson = transaction_to_json(tx)
assert txjson['hash'] == tx_hash
IOLoop.current().add_callback(self.application.monitor.send_transaction_notifications, txjson)
return tx_hash
async def get_transaction(self, tx_hash):
if not validate_transaction_hash(tx_hash):
raise JsonRPCInvalidParamsError(data={'id': 'invalid_transaction_hash', 'message': 'Invalid Transaction Hash'})
tx = await self.eth.eth_getTransactionByHash(tx_hash)
return tx
async def subscribe(self, *addresses):
insert_args = []
for address in addresses:
if not validate_address(address):
raise JsonRPCInvalidParamsError(data={'id': 'bad_arguments', 'message': 'Bad Arguments'})
insert_args.extend([self.user_token_id, address])
async with self.db:
await self.db.execute(
"INSERT INTO notification_registrations VALUES {} ON CONFLICT DO NOTHING".format(
', '.join('(${}, ${})'.format((i * 2) + 1, (i * 2) + 2) for i, _ in enumerate(addresses))),
*insert_args)
await self.db.commit()
return True
async def unsubscribe(self, *addresses):
for address in addresses:
if not validate_address(address):
raise JsonRPCInvalidParamsError(data={'id': 'bad_arguments', 'message': 'Bad Arguments'})
async with self.db:
await self.db.execute(
"DELETE FROM notification_registrations WHERE token_id = $1 AND ({})".format(
' OR '.join('eth_address = ${}'.format(i + 2) for i, _ in enumerate(addresses))),
self.user_token_id, *addresses)
await self.db.commit()
return True
async def list_subscriptions(self):
async with self.db:
rows = await self.db.fetch(
"SELECT eth_address FROM notification_registrations WHERE token_id = $1",
self.user_token_id)
return [row['eth_address'] for row in rows]
|
import warnings
from sklearn.model_selection import train_test_split
from Logging.logging import Logger
from Training_Clustering.clustering import Cluster
from Training_Preprocessing.preprocessor import Preprocessor
from Training_data_ingestion.data_loading_train import DataGetter
from model_methods.model_methods import modelMethods
from model_tuner.modeltuner import modelTuner
warnings.simplefilter(action='ignore', category=FutureWarning)
class modelTraining:
"""
Description: This class a method which is used to train a machine learning model for each data cluster
Written By: Shivam Shinde
Version: 1.0
Revision: None
:returns: None
"""
def __init__(self):
self.file_obj = open("TrainingLogs/ModelTraining.txt", "a+")
self.logger = Logger()
def trainingModels(self):
"""
Description: This method is used to train the machine learning model for the every cluster of the data
Written By: Shivam Shinde
Version: 1.0
Revision: None
:return: None
"""
try:
self.logger.log(
self.file_obj, "*************MACHINE LEARNING MODEL TRAINING FOR ALL THE CLUSTERS STARTED**************")
# preprocessing the obtained data
self.logger.log(self.file_obj, "Training_Preprocessing of the data started!!")
p = Preprocessor()
X, y = p.preprocess()
self.logger.log(self.file_obj, "Training_Preprocessing of the data completed!!")
# clustering the training and testing data into the same number of clusters
self.logger.log(self.file_obj, "Training_Clustering of the data started!!")
c = Cluster()
noOfClusters = c.createElbowPlot(X)
X = c.createCluster(X, noOfClusters)
self.logger.log(self.file_obj, "Training_Clustering of the data completed!!")
# Adding one more column to X i.e. dependent feature
X['Price'] = y
# finding the unique numbers in the ClusterNumber column of the X
clusters = X['ClusterNumber'].unique()
for i in clusters:
self.logger.log(self.file_obj, f"*************for the cluster number {i}**************")
clusterData = X[X['ClusterNumber'] == i]
clusterFeatures = clusterData.drop(
columns=['ClusterNumber', 'Price'], axis=1)
clusterLabel = clusterData['Price']
# splitting the cluster data into train and test data
X_train, X_test, y_train, y_test = train_test_split(
clusterFeatures, clusterLabel, test_size=0.2, random_state=348724)
self.logger.log(self.file_obj,f"Finding the best model for the cluster {i}")
# finding the best model for this cluster
mt = modelTuner()
bestModelName, bestModel = mt.bestModelFinder(
X_train, X_test, y_train, y_test)
# saving the best model obtained
mm = modelMethods()
mm.modelSaving(bestModel, bestModelName, i)
self.logger.log(
self.file_obj,
f"Training of the machine learning model for the data cluster {i} successfully completed")
self.logger.log(self.file_obj, "***************MACHINE LEARNING MODEL TRAINING FOR ALL CLUSTERS COMPLETED "
"SUCCESSFULLY*************")
except Exception as e:
self.logger.log(
self.file_obj, f"Exception occurred while training the machine learning model. Exception: {str(e)}")
raise e
|
import os
from PIL import Image
import base45
import zlib
import cbor2
import flynn
from pyzbar.pyzbar import decode
from datetime import date, datetime, time, timedelta, timezone
from cose.messages import CoseMessage
from cryptography import x509
from cose.keys import EC2Key
import cose.headers
import requests
from werkzeug.datastructures import FileStorage
from pdf2image import convert_from_bytes
import json
from twogplus import (
app,
) # Source: https://github.com/ehn-dcc-development/ehn-dcc-schema/blob/release/1.3.0/valuesets/disease-agent-targeted.json
COVID_19_ID = "840539006"
def calc_vaccinated_till(data) -> date:
hcert = data[-260][1]["v"][0]
vaccination_date = date.fromisoformat(hcert["dt"])
valid_until = None
# Check if it is the last dosis
if hcert["dn"] != hcert["sd"]:
raise Exception("With this certificate you are not fully immunized.")
# Check if it is johnson then its not valid anymore
if hcert["sd"] == 1:
raise Exception("With this certificate (johnson) you are not fully immunized.")
# Otherwise it was 270 days reduced to 5 months -> otherwise you need to be boostered
else:
valid_until = vaccination_date + timedelta(days=150)
if valid_until < date.today():
raise Exception("This certificate is already expired. (older than 5 months)")
return valid_until
def fetch_austria_data(ressource: str):
# Check if the cache is still hot
cache_filename = os.path.join(app.instance_path, f"{ressource}.cache")
try:
with open(cache_filename, "rb") as f:
cache_time = os.path.getmtime(f)
if (time.time() - cache_time) / 3600 > 12:
raise Exception()
return cbor2.loads(f.read())
except Exception:
pass
# Not in cache so lets download it
r = requests.get(f"https://dgc-trust.qr.gv.at/{ressource}")
if r.status_code != 200:
raise Exception("Unable to reach austria public key gateway")
content = r.content
# Update the cache
with open(cache_filename, "wb") as f:
f.write(content)
return cbor2.loads(content)
# This code is adapted from the following repository:
# https://github.com/lazka/pygraz-covid-cert
def assert_cert_sign(cose_data: bytes):
cose_msg = CoseMessage.decode(cose_data)
required_kid = cose_msg.get_attr(cose.headers.KID)
trustlist = fetch_austria_data("trustlist")
for entry in trustlist["c"]:
kid = entry["i"]
cert = entry["c"]
if kid == required_kid:
break
else:
raise Exception(
f"Unable validate certificate signature: kid '{required_kid}' not found"
)
found_cert = cert
NOW = datetime.now(timezone.utc)
cert = x509.load_der_x509_certificate(found_cert)
if NOW < cert.not_valid_before.replace(tzinfo=timezone.utc):
raise Exception("cert not valid")
if NOW > cert.not_valid_after.replace(tzinfo=timezone.utc):
raise Exception("cert not valid")
# Convert the CERT to a COSE key and verify the signature
# WARNING: we assume ES256 here but all other algorithms are allowed too
assert cose_msg.get_attr(cose.headers.Algorithm).fullname == "ES256"
public_key = cert.public_key()
x = public_key.public_numbers().x.to_bytes(32, "big")
y = public_key.public_numbers().y.to_bytes(32, "big")
cose_key = EC2Key(crv="P_256", x=x, y=y)
cose_msg.key = cose_key
if not cose_msg.verify_signature():
raise Exception("Unable to validate certificate signature")
print("Validated certificate :)")
def verify_vaccinated_cert(file: FileStorage) -> str:
# if the file is a pdf convert it to an image
if file.filename.rsplit(".", 1)[1].lower() == "pdf":
img = convert_from_bytes(file.read())[0]
else:
img = Image.open(file)
# decode the qr code
result = decode(img)
if result == []:
raise Exception("No QR Code was detected in the image")
# decode base45
data_zlib = base45.b45decode(result[0].data[4:])
# decompress zlib
cose_data = zlib.decompress(data_zlib)
# TODO: I think cbor2 is a more modern library than flynn
# decode cose
cbor_data = flynn.decoder.loads(cose_data)[1][2]
# decode cbor
data = flynn.decoder.loads(cbor_data)
# Verify that this is a vaccine certificate
if "v" not in data[-260][1]:
message = "The certificate must be for a vaccination."
raise Exception(message)
# Verify the data now
if COVID_19_ID != data[-260][1]["v"][0]["tg"]:
raise Exception("The certificate must be for covid19")
# Verify the certificate signature
assert_cert_sign(cose_data)
# Verify the expiration date is ok for the event
event_date = date.fromisoformat(app.config["EVENT_DATE"])
if calc_vaccinated_till(data) < event_date:
raise Exception(
f"Your vaccine will expire before the event at {event_date}"
)
# Return the name from the certificate
return data[-260][1]["nam"]["gnt"] + " " + data[-260][1]["nam"]["fnt"]
def verify_test_cert(file: FileStorage) -> str:
# if the file is a pdf convert it to an image
if file.filename.rsplit(".", 1)[1].lower() == "pdf":
img = convert_from_bytes(file.read())[0]
else:
img = Image.open(file)
# decode the qr code
result = decode(img)
if result == []:
raise Exception("No QR Code was detected in the image")
# decode base45
data_zlib = base45.b45decode(result[0].data[4:])
# decompress zlib
cose_data = zlib.decompress(data_zlib)
# TODO: I think cbor2 is a more modern library than flynn
# decode cose
cbor_data = flynn.decoder.loads(cose_data)[1][2]
# decode cbor
data = flynn.decoder.loads(cbor_data)
# Verify that this is a test certificate
if "t" not in data[-260][1]:
message = "The certificate must be for a test"
raise Exception(message)
# Verify the data now
if COVID_19_ID != data[-260][1]["t"][0]["tg"]:
raise Exception("The test must be for covid19")
# Verify that test was negative
if "260415000" != data[-260][1]["t"][0]["tr"]:
id = data[-260][1]["t"][0]["tr"]
raise Exception(f"The test was not negative ({id})")
# Verify a pcr test
if "nm" not in data[-260][1]["t"][0]:
raise Exception("We only allow PCR tests.")
# Verify the certificate signature
assert_cert_sign(cose_data)
# Verify the expiration date is ok for the event
event_date = datetime.fromisoformat(app.config["EVENT_DATE"])
event_date += timedelta(hours=(24 + 6))
time_of_test = datetime.fromisoformat(data[-260][1]["t"][0]["sc"][:-1])
if time_of_test + timedelta(hours=48) < event_date:
raise Exception(
f"Your test will expire before the event at {event_date}\n"
f"Time of test: {time_of_test}\nValid until: {time_of_test + timedelta(hours=48)}"
)
# Return the name from the certificate
return data[-260][1]["nam"]["gnt"] + " " + data[-260][1]["nam"]["fnt"]
|
import os
WORKFLOW_MODE = True
if "domoticz_cmdlinemode" in os.environ:
WORKFLOW_MODE = False
BASE_WORKFLOW_FOLDER = os.path.dirname(__file__)
BASE_API_URL = "http://{serverPort}/json.htm".format(serverPort=os.environ['domoticz_address'])
BASE_WEB_URL = "http://{serverPort}/".format(serverPort=os.environ['domoticz_address'])
BASE_PATH_ICNS = "{folder}/resources/icns".format(folder=BASE_WORKFLOW_FOLDER)
class DomoticzAction:
ON = "On"
OFF = "Off"
General_Status = "?type=command¶m=getversion"
Room_ListDevices = "?type=command¶m=getplandevices&idx={idxRoom}"
Device_Status = "?type=devices&rid={idx}"
Device_ListDevices = "?type=devices&filter=all&used=true&order=Name"
Device_Light_Switch = "?type=command¶m=switchlight&idx={idxLight}&switchcmd={cmd}"
class DomoticzIcons:
General_Back = "{basepath}/back.png".format(basepath=BASE_PATH_ICNS)
Device_ColorLight = "{basepath}/device/RGB48_{power}.png".format(basepath=BASE_PATH_ICNS,power="{power}")
Device_Current = "{basepath}/device/current.png".format(basepath=BASE_PATH_ICNS)
Device_Gas = "{basepath}/device/gas.png".format(basepath=BASE_PATH_ICNS)
Device_Light = "{basepath}/device/lightbulb-{power}.png".format(basepath=BASE_PATH_ICNS,power="{power}")
Device_Motion = "{basepath}/device/motion-{power}.png".format(basepath=BASE_PATH_ICNS,power="{power}")
Device_TempMin = "{basepath}/device/ice.png".format(basepath=BASE_PATH_ICNS)
Device_Temp_0_5 = "{basepath}/device/temp-0-5.png".format(basepath=BASE_PATH_ICNS)
Device_Temp_5_10 = "{basepath}/device/temp-5-10.png".format(basepath=BASE_PATH_ICNS)
Device_Temp_10_15 = "{basepath}/device/temp-10-15.png".format(basepath=BASE_PATH_ICNS)
Device_Temp_15_20 = "{basepath}/device/temp-15-20.png".format(basepath=BASE_PATH_ICNS)
Device_Temp_20_25 = "{basepath}/device/temp-20-25.png".format(basepath=BASE_PATH_ICNS)
Device_Temp_25_30 = "{basepath}/device/temp-25-30.png".format(basepath=BASE_PATH_ICNS)
Device_TempMax = "{basepath}/device/temp-gt-30.png".format(basepath=BASE_PATH_ICNS)
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example code demonstrating the Python Hanabi interface."""
from __future__ import print_function
import numpy as np
import pyhanabi
def run_game(game_parameters):
"""Play a game, selecting random actions."""
def print_state(state):
"""Print some basic information about the state."""
print("")
print("Current player: {}".format(state.cur_player()))
print(state)
# Example of more queries to provide more about this state. For
# example, bots could use these methods to to get information
# about the state in order to act accordingly.
print("### Information about the state retrieved separately ###")
print("### Information tokens: {}".format(state.information_tokens()))
print("### Life tokens: {}".format(state.life_tokens()))
print("### Fireworks: {}".format(state.fireworks()))
print("### Deck size: {}".format(state.deck_size()))
print("### Discard pile: {}".format(str(state.discard_pile())))
print("### Player hands: {}".format(str(state.player_hands())))
print("")
def print_observation(observation):
"""Print some basic information about an agent observation."""
print("--- Observation ---")
print(observation)
print("### Information about the observation retrieved separately ###")
print("### Current player, relative to self: {}".format(
observation.cur_player_offset()))
print("### Observed hands: {}".format(observation.observed_hands()))
print("### Card knowledge: {}".format(observation.card_knowledge()))
print("### Discard pile: {}".format(observation.discard_pile()))
print("### Fireworks: {}".format(observation.fireworks()))
print("### Deck size: {}".format(observation.deck_size()))
move_string = "### Last moves:"
for move_tuple in observation.last_moves():
move_string += " {}".format(move_tuple)
print(move_string)
print("### Information tokens: {}".format(observation.information_tokens()))
print("### Life tokens: {}".format(observation.life_tokens()))
print("### Legal moves: {}".format(observation.legal_moves()))
print("--- EndObservation ---")
def print_encoded_observations(encoder, state, num_players):
print("--- EncodedObservations ---")
print("Observation encoding shape: {}".format(encoder.shape()))
print("Current actual player: {}".format(state.cur_player()))
for i in range(num_players):
print("Encoded observation for player {}: {}".format(
i, encoder.encode(state.observation(i))))
print("--- EndEncodedObservations ---")
game = pyhanabi.HanabiGame(game_parameters)
print(game.parameter_string(), end="")
obs_encoder = pyhanabi.ObservationEncoder(
game, enc_type=pyhanabi.ObservationEncoderType.CANONICAL)
state = game.new_initial_state()
while not state.is_terminal():
if state.cur_player() == pyhanabi.CHANCE_PLAYER_ID:
state.deal_random_card()
continue
print_state(state)
observation = state.observation(state.cur_player())
print_observation(observation)
print_encoded_observations(obs_encoder, state, game.num_players())
legal_moves = state.legal_moves()
print("")
print("Number of legal moves: {}".format(len(legal_moves)))
move = np.random.choice(legal_moves)
print("Chose random legal move: {}".format(move))
state.apply_move(move)
print("")
print("Game done. Terminal state:")
print("")
print(state)
print("")
print("score: {}".format(state.score()))
if __name__ == "__main__":
# Check that the cdef and library were loaded from the standard paths.
assert pyhanabi.cdef_loaded(), "cdef failed to load"
assert pyhanabi.lib_loaded(), "lib failed to load"
run_game({"players": 3, "random_start_player": True})
|
import os,sys
sourcePath = os.path.join("..","..","..","src","build","bin")
sys.path.append(sourcePath)
import numpy as np
import tqdm
## TEST PARAMETERS: ***************************************************
Ntest = 10000
a = 1000
b = 500
data = {'R0': np.zeros((Ntest,3)), 'V0': np.zeros((Ntest,3))}
## RUN TEST: **********************************************************
for i in tqdm.trange(Ntest):
ang = 2.* np.pi * np.random.rand()
r = np.random.rand() * (a*b)/np.sqrt(a**2*np.sin(ang)**2 + b**2*np.cos(ang)**2)
data['R0'][i,:] = np.array([-2000. + r*np.cos(ang),\
0. + r*np.sin(ang),\
1500. + (100.*np.random.rand()-100.)])
th = np.pi * np.random.rand() - np.pi/2.
data['V0'][i,:] = np.array([100. * np.cos(th),\
100. * np.sin(th),\
-75. + (10.*np.random.rand() - 10.)])
## END: **************************************************************
# import pickle
# with open('data/EOL_IC.pickle', 'wb') as handle:
# pickle.dump(data, handle)
# Line to import data in other file
# sol = pickle.load(open('data/EOL_IC.pickle','rb'))
|
import functools
import os
import subprocess
import time
from typing import NamedTuple
import uuid
import anyio
from async_exit_stack import AsyncExitStack
from async_generator import asynccontextmanager
from p2pclient.libp2p_stubs.peer.id import ID
from multiaddr import Multiaddr, protocols
import multihash
import pytest
from p2pclient.exceptions import ControlFailure
from p2pclient.p2pclient import Client
import p2pclient.pb.p2pd_pb2 as p2pd_pb
from p2pclient.utils import get_unused_tcp_port, read_pbmsg_safe
TIMEOUT_DURATION = 30 # seconds
@pytest.fixture
def num_p2pds():
return 4
@pytest.fixture(scope="module")
def peer_id_random():
return ID.from_base58("QmcgpsyWgH8Y8ajJz1Cu72KnS5uo2Aa2LpzU7kinSupNK1")
@pytest.fixture
def enable_control():
return True
@pytest.fixture
def enable_connmgr():
return False
@pytest.fixture
def enable_dht():
return False
@pytest.fixture
def enable_pubsub():
return False
@pytest.fixture
def func_make_p2pd_pair():
return make_p2pd_pair_ip4
async def try_until_success(coro_func, timeout=TIMEOUT_DURATION):
"""
Keep running ``coro_func`` until the time is out.
All arguments of ``coro_func`` should be filled, i.e. it should be called without arguments.
"""
t_start = time.monotonic()
while True:
result = await coro_func()
if result:
break
if (time.monotonic() - t_start) >= timeout:
# timeout
assert False, f"{coro_func} still failed after `{timeout}` seconds"
await anyio.sleep(0.01)
class Daemon:
control_maddr = None
proc_daemon = None
log_filename = ""
f_log = None
closed = None
def __init__(
self, control_maddr, enable_control, enable_connmgr, enable_dht, enable_pubsub
):
self.control_maddr = control_maddr
self.enable_control = enable_control
self.enable_connmgr = enable_connmgr
self.enable_dht = enable_dht
self.enable_pubsub = enable_pubsub
self.is_closed = False
self._start_logging()
self._run()
def _start_logging(self):
name_control_maddr = str(self.control_maddr).replace("/", "_").replace(".", "_")
self.log_filename = f"/tmp/log_p2pd{name_control_maddr}.txt"
self.f_log = open(self.log_filename, "wb")
def _run(self):
cmd_list = ["jsp2pd", f"--listen={str(self.control_maddr)}"]
if self.enable_connmgr:
cmd_list += [
"--connManager=true",
"--connLo=1",
"--connHi=2",
"--connGrace=0",
]
if self.enable_dht:
cmd_list += ["--dht=true"]
if self.enable_pubsub:
cmd_list += ["--pubsub=true", "--pubsubRouter=gossipsub"]
self.proc_daemon = subprocess.Popen(
cmd_list, stdout=self.f_log, stderr=self.f_log, bufsize=0
)
async def wait_until_ready(self):
lines_head_pattern = (b"daemon has started",)
lines_head_occurred = {line: False for line in lines_head_pattern}
with open(self.log_filename, "rb") as f_log_read:
async def read_from_daemon_and_check():
line = f_log_read.readline()
for head_pattern in lines_head_occurred:
if line.startswith(head_pattern):
lines_head_occurred[head_pattern] = True
return all([value for _, value in lines_head_occurred.items()])
await try_until_success(read_from_daemon_and_check)
# sleep for a while in case that the daemon haven't been ready after emitting these lines
await anyio.sleep(0.1)
def close(self):
if self.is_closed:
return
# TODO: find out why terminate + wait locks sometimes.
self.proc_daemon.kill()
self.proc_daemon.wait()
self.f_log.close()
self.is_closed = True
class DaemonTuple(NamedTuple):
daemon: Daemon
client: Client
class ConnectionFailure(Exception):
pass
@asynccontextmanager
async def make_p2pd_pair_unix(
enable_control, enable_connmgr, enable_dht, enable_pubsub
):
name = str(uuid.uuid4())[:8]
control_maddr = Multiaddr(f"/unix/tmp/test_p2pd_control_{name}.sock")
listen_maddr = Multiaddr(f"/unix/tmp/test_p2pd_listen_{name}.sock")
# Remove the existing unix socket files if they are existing
try:
os.unlink(control_maddr.value_for_protocol(protocols.P_UNIX))
except FileNotFoundError:
pass
try:
os.unlink(listen_maddr.value_for_protocol(protocols.P_UNIX))
except FileNotFoundError:
pass
async with _make_p2pd_pair(
control_maddr=control_maddr,
listen_maddr=listen_maddr,
enable_control=enable_control,
enable_connmgr=enable_connmgr,
enable_dht=enable_dht,
enable_pubsub=enable_pubsub,
) as pair:
yield pair
@asynccontextmanager
async def make_p2pd_pair_ip4(enable_control, enable_connmgr, enable_dht, enable_pubsub):
control_maddr = Multiaddr(f"/ip4/127.0.0.1/tcp/{get_unused_tcp_port()}")
listen_maddr = Multiaddr(f"/ip4/127.0.0.1/tcp/{get_unused_tcp_port()}")
async with _make_p2pd_pair(
control_maddr=control_maddr,
listen_maddr=listen_maddr,
enable_control=enable_control,
enable_connmgr=enable_connmgr,
enable_dht=enable_dht,
enable_pubsub=enable_pubsub,
) as pair:
yield pair
@asynccontextmanager
async def _make_p2pd_pair(
control_maddr,
listen_maddr,
enable_control,
enable_connmgr,
enable_dht,
enable_pubsub,
):
p2pd = Daemon(
control_maddr=control_maddr,
enable_control=enable_control,
enable_connmgr=enable_connmgr,
enable_dht=enable_dht,
enable_pubsub=enable_pubsub,
)
# wait for daemon ready
await p2pd.wait_until_ready()
client = Client(control_maddr=control_maddr, listen_maddr=listen_maddr)
try:
async with client.listen():
yield DaemonTuple(daemon=p2pd, client=client)
finally:
if not p2pd.is_closed:
p2pd.close()
@pytest.fixture
async def p2pcs(
num_p2pds,
enable_control,
enable_connmgr,
enable_dht,
enable_pubsub,
func_make_p2pd_pair,
):
# TODO: Change back to gather style
async with AsyncExitStack() as stack:
p2pd_tuples = [
await stack.enter_async_context(
func_make_p2pd_pair(
enable_control=enable_control,
enable_connmgr=enable_connmgr,
enable_dht=enable_dht,
enable_pubsub=enable_pubsub,
)
)
for _ in range(num_p2pds)
]
yield tuple(p2pd_tuple.client for p2pd_tuple in p2pd_tuples)
@pytest.mark.parametrize(
"enable_control, func_make_p2pd_pair", ((True, make_p2pd_pair_unix),)
)
@pytest.mark.anyio
async def test_client_identify_unix_socket(p2pcs):
await p2pcs[0].identify()
@pytest.mark.parametrize("enable_control", (True,))
@pytest.mark.anyio
async def test_client_identify(p2pcs, anyio_backend):
await p2pcs[0].identify()
@pytest.mark.parametrize("enable_control", (True,))
@pytest.mark.anyio
async def test_client_connect_success(p2pcs):
peer_id_0, maddrs_0 = await p2pcs[0].identify()
peer_id_1, maddrs_1 = await p2pcs[1].identify()
await p2pcs[0].connect(peer_id_1, maddrs_1)
# test case: repeated connections
await p2pcs[1].connect(peer_id_0, maddrs_0)
@pytest.mark.parametrize("enable_control", (True,))
@pytest.mark.anyio
async def test_client_connect_failure(peer_id_random, p2pcs):
peer_id_1, maddrs_1 = await p2pcs[1].identify()
await p2pcs[0].identify()
# test case: `peer_id` mismatches
with pytest.raises(ControlFailure):
await p2pcs[0].connect(peer_id_random, maddrs_1)
# test case: empty maddrs
with pytest.raises(ControlFailure):
await p2pcs[0].connect(peer_id_1, [])
# test case: wrong maddrs
with pytest.raises(ControlFailure):
await p2pcs[0].connect(peer_id_1, [Multiaddr("/ip4/127.0.0.1/udp/0")])
async def _check_connection(p2pd_tuple_0, p2pd_tuple_1):
peer_id_0, _ = await p2pd_tuple_0.identify()
peer_id_1, _ = await p2pd_tuple_1.identify()
peers_0 = [pinfo.peer_id for pinfo in await p2pd_tuple_0.list_peers()]
peers_1 = [pinfo.peer_id for pinfo in await p2pd_tuple_1.list_peers()]
return (peer_id_0 in peers_1) and (peer_id_1 in peers_0)
async def connect_safe(p2pd_tuple_0, p2pd_tuple_1):
peer_id_1, maddrs_1 = await p2pd_tuple_1.identify()
await p2pd_tuple_0.connect(peer_id_1, maddrs_1)
await try_until_success(
functools.partial(
_check_connection, p2pd_tuple_0=p2pd_tuple_0, p2pd_tuple_1=p2pd_tuple_1
)
)
@pytest.mark.parametrize("enable_control", (True,))
@pytest.mark.anyio
async def test_connect_safe(p2pcs):
await connect_safe(p2pcs[0], p2pcs[1])
@pytest.mark.parametrize("enable_control", (True,))
@pytest.mark.anyio
async def test_client_list_peers(p2pcs):
# test case: no peers
assert len(await p2pcs[0].list_peers()) == 0
# test case: 1 peer
await connect_safe(p2pcs[0], p2pcs[1])
assert len(await p2pcs[0].list_peers()) == 1
assert len(await p2pcs[1].list_peers()) == 1
# test case: one more peer
await connect_safe(p2pcs[0], p2pcs[2])
assert len(await p2pcs[0].list_peers()) == 2
assert len(await p2pcs[1].list_peers()) == 1
assert len(await p2pcs[2].list_peers()) == 1
@pytest.mark.skip("DISCONNECT request not implemented in jsp2pd 0.10.1.")
@pytest.mark.parametrize("enable_control", (True,))
@pytest.mark.anyio
async def test_client_disconnect(peer_id_random, p2pcs):
# test case: disconnect a peer without connections
await p2pcs[1].disconnect(peer_id_random)
# test case: disconnect
peer_id_0, _ = await p2pcs[0].identify()
await connect_safe(p2pcs[0], p2pcs[1])
assert len(await p2pcs[0].list_peers()) == 1
assert len(await p2pcs[1].list_peers()) == 1
await p2pcs[1].disconnect(peer_id_0)
assert len(await p2pcs[0].list_peers()) == 0
assert len(await p2pcs[1].list_peers()) == 0
# test case: disconnect twice
await p2pcs[1].disconnect(peer_id_0)
assert len(await p2pcs[0].list_peers()) == 0
assert len(await p2pcs[1].list_peers()) == 0
@pytest.mark.skip(
"Fix the comparison of multiaddrs, the current code complains because the multiaddr returned"
+ "by the daemon contains its /p2p/ address."
)
@pytest.mark.parametrize("enable_control", (True,))
@pytest.mark.anyio
async def test_client_stream_open_success(p2pcs):
peer_id_1, maddrs_1 = await p2pcs[1].identify()
await connect_safe(p2pcs[0], p2pcs[1])
proto = "123"
async def handle_proto(stream_info, stream):
with pytest.raises(anyio.exceptions.IncompleteRead):
await stream.receive_exactly(1)
await p2pcs[1].stream_handler(proto, handle_proto)
# test case: normal
stream_info, stream = await p2pcs[0].stream_open(peer_id_1, (proto,))
assert stream_info.peer_id == peer_id_1
assert stream_info.addr in maddrs_1
assert stream_info.proto == "123"
await stream.close()
# test case: open with multiple protocols
stream_info, stream = await p2pcs[0].stream_open(
peer_id_1, (proto, "another_protocol")
)
assert stream_info.peer_id == peer_id_1
assert stream_info.addr in maddrs_1
assert stream_info.proto == "123"
await stream.close()
@pytest.mark.parametrize("enable_control", (True,))
@pytest.mark.anyio
async def test_client_stream_open_failure(p2pcs):
peer_id_1, _ = await p2pcs[1].identify()
await connect_safe(p2pcs[0], p2pcs[1])
proto = "123"
# test case: `stream_open` to a peer who didn't register the protocol
with pytest.raises(ControlFailure):
await p2pcs[0].stream_open(peer_id_1, (proto,))
# test case: `stream_open` to a peer for a non-registered protocol
async def handle_proto(stream_info, stream):
pass
await p2pcs[1].stream_handler(proto, handle_proto)
with pytest.raises(ControlFailure):
await p2pcs[0].stream_open(peer_id_1, ("another_protocol",))
@pytest.mark.parametrize("enable_control", (True,))
@pytest.mark.anyio
async def test_client_stream_handler_success(p2pcs):
peer_id_1, _ = await p2pcs[1].identify()
await connect_safe(p2pcs[0], p2pcs[1])
proto = "protocol123"
bytes_to_send = b"yoyoyoyoyog"
# event for this test function to wait until the handler function receiving the incoming data
event_handler_finished = anyio.create_event()
async def handle_proto(stream_info, stream):
nonlocal event_handler_finished
bytes_received = await stream.receive_exactly(len(bytes_to_send))
assert bytes_received == bytes_to_send
await event_handler_finished.set()
await p2pcs[1].stream_handler(proto, handle_proto)
assert proto in p2pcs[1].control.handlers
assert handle_proto == p2pcs[1].control.handlers[proto]
# test case: test the stream handler `handle_proto`
_, stream = await p2pcs[0].stream_open(peer_id_1, (proto,))
# wait until the handler function starts blocking waiting for the data
# because we haven't sent the data, we know the handler function must still blocking waiting.
# get the task of the protocol handler
await stream.send_all(bytes_to_send)
# wait for the handler to finish
await stream.close()
await event_handler_finished.wait()
# test case: two streams to different handlers respectively
another_proto = "another_protocol123"
another_bytes_to_send = b"456"
event_another_proto = anyio.create_event()
async def handle_another_proto(stream_info, stream):
await event_another_proto.set()
bytes_received = await stream.receive_exactly(len(another_bytes_to_send))
assert bytes_received == another_bytes_to_send
await p2pcs[1].stream_handler(another_proto, handle_another_proto)
assert another_proto in p2pcs[1].control.handlers
assert handle_another_proto == p2pcs[1].control.handlers[another_proto]
_, another_stream = await p2pcs[0].stream_open(peer_id_1, (another_proto,))
await event_another_proto.wait()
# we know at this moment the handler must still blocking wait
await another_stream.send_all(another_bytes_to_send)
await another_stream.close()
# test case: registering twice can override the previous registration
event_third = anyio.create_event()
async def handler_third(stream_info, stream):
await event_third.set()
await p2pcs[1].stream_handler(another_proto, handler_third)
assert another_proto in p2pcs[1].control.handlers
# ensure the handler is override
assert handler_third == p2pcs[1].control.handlers[another_proto]
await p2pcs[0].stream_open(peer_id_1, (another_proto,))
# ensure the overriding handler is called when the protocol is opened a stream
await event_third.wait()
@pytest.mark.parametrize("enable_control", (True,))
@pytest.mark.anyio
async def test_client_stream_handler_failure(p2pcs):
peer_id_1, _ = await p2pcs[1].identify()
await connect_safe(p2pcs[0], p2pcs[1])
proto = "123"
# test case: registered a wrong protocol name
async def handle_proto_correct_params(stream_info, stream):
pass
await p2pcs[1].stream_handler("another_protocol", handle_proto_correct_params)
with pytest.raises(ControlFailure):
await p2pcs[0].stream_open(peer_id_1, (proto,))
@pytest.mark.parametrize("enable_control, enable_dht", ((True, True),))
@pytest.mark.anyio
async def test_client_dht_find_peer_success(p2pcs):
peer_id_2, _ = await p2pcs[2].identify()
await connect_safe(p2pcs[0], p2pcs[1])
await connect_safe(p2pcs[1], p2pcs[2])
pinfo = await p2pcs[0].dht_find_peer(peer_id_2)
assert pinfo.peer_id == peer_id_2
assert len(pinfo.addrs) != 0
@pytest.mark.parametrize("enable_control, enable_dht", ((True, True),))
@pytest.mark.anyio
async def test_client_dht_find_peer_failure(peer_id_random, p2pcs):
peer_id_2, _ = await p2pcs[2].identify()
await connect_safe(p2pcs[0], p2pcs[1])
# test case: `peer_id` not found
with pytest.raises(ControlFailure):
await p2pcs[0].dht_find_peer(peer_id_random)
# test case: no route to the peer with peer_id_2
with pytest.raises(ControlFailure):
await p2pcs[0].dht_find_peer(peer_id_2)
@pytest.mark.skip("DHT FIND_PEERS_CONNECTED_TO_PEER not implemented in jsp2pd 0.10.1.")
@pytest.mark.parametrize("enable_control, enable_dht", ((True, True),))
@pytest.mark.anyio
async def test_client_dht_find_peers_connected_to_peer_success(p2pcs):
peer_id_2, _ = await p2pcs[2].identify()
await connect_safe(p2pcs[0], p2pcs[1])
# test case: 0 <-> 1 <-> 2
await connect_safe(p2pcs[1], p2pcs[2])
pinfos_connecting_to_2 = await p2pcs[0].dht_find_peers_connected_to_peer(peer_id_2)
# TODO: need to confirm this behaviour. Why the result is the PeerInfo of `peer_id_2`?
assert len(pinfos_connecting_to_2) == 1
@pytest.mark.skip("DHT FIND_PEERS_CONNECTED_TO_PEER not implemented in jsp2pd 0.10.1.")
@pytest.mark.parametrize("enable_control, enable_dht", ((True, True),))
@pytest.mark.anyio
async def test_client_dht_find_peers_connected_to_peer_failure(peer_id_random, p2pcs):
peer_id_2, _ = await p2pcs[2].identify()
await connect_safe(p2pcs[0], p2pcs[1])
# test case: request for random peer_id
pinfos = await p2pcs[0].dht_find_peers_connected_to_peer(peer_id_random)
assert not pinfos
# test case: no route to the peer with peer_id_2
pinfos = await p2pcs[0].dht_find_peers_connected_to_peer(peer_id_2)
assert not pinfos
@pytest.mark.skip("Fails randomly: response = type: ERROR error {msg: 'not found'}.")
@pytest.mark.parametrize("enable_control, enable_dht", ((True, True),))
@pytest.mark.anyio
async def test_client_dht_find_providers(p2pcs):
await connect_safe(p2pcs[0], p2pcs[1])
# borrowed from https://github.com/ipfs/go-cid#parsing-string-input-from-users
content_id_bytes = b"\x01r\x12 \xc0F\xc8\xechB\x17\xf0\x1b$\xb9\xecw\x11\xde\x11Cl\x8eF\xd8\x9a\xf1\xaeLa?\xb0\xaf\xe6K\x8b" # noqa: E501
pinfos = await p2pcs[1].dht_find_providers(content_id_bytes, 100)
assert not pinfos
@pytest.mark.skip("To fix: we expect get_closest_peers to return 2 peers, only one is returned.")
@pytest.mark.parametrize("enable_control, enable_dht", ((True, True),))
@pytest.mark.anyio
async def test_client_dht_get_closest_peers(p2pcs):
await connect_safe(p2pcs[0], p2pcs[1])
await connect_safe(p2pcs[1], p2pcs[2])
peer_ids_1 = await p2pcs[1].dht_get_closest_peers(b"123")
assert len(peer_ids_1) == 2
@pytest.mark.skip("To fix: The stream was closed before the read operation could be completed")
@pytest.mark.parametrize("enable_control, enable_dht", ((True, True),))
@pytest.mark.anyio
async def test_client_dht_get_public_key_success(peer_id_random, p2pcs):
peer_id_0, _ = await p2pcs[0].identify()
peer_id_1, _ = await p2pcs[1].identify()
await connect_safe(p2pcs[0], p2pcs[1])
await connect_safe(p2pcs[1], p2pcs[2])
await anyio.sleep(0.2)
pk0 = await p2pcs[0].dht_get_public_key(peer_id_0)
pk1 = await p2pcs[0].dht_get_public_key(peer_id_1)
assert pk0 != pk1
@pytest.mark.skip("To fix: The stream was closed before the read operation could be completed")
@pytest.mark.parametrize("enable_control, enable_dht", ((True, True),))
@pytest.mark.anyio
async def test_client_dht_get_public_key_failure(peer_id_random, p2pcs):
peer_id_2, _ = await p2pcs[2].identify()
await connect_safe(p2pcs[0], p2pcs[1])
await connect_safe(p2pcs[1], p2pcs[2])
# test case: failed to get the pubkey of the peer_id_random
with pytest.raises(ControlFailure):
await p2pcs[0].dht_get_public_key(peer_id_random)
# test case: should get the pubkey of the peer_id_2
await p2pcs[0].dht_get_public_key(peer_id_2)
@pytest.mark.parametrize("enable_control, enable_dht", ((True, True),))
@pytest.mark.anyio
async def test_client_dht_get_value(p2pcs):
key_not_existing = b"/123/456"
# test case: no peer in table
with pytest.raises(ControlFailure):
await p2pcs[0].dht_get_value(key_not_existing)
await connect_safe(p2pcs[0], p2pcs[1])
# test case: routing not found
with pytest.raises(ControlFailure):
await p2pcs[0].dht_get_value(key_not_existing)
@pytest.mark.skip("DHT SEARCH_VALUE not implemented in jsp2pd 0.10.1.")
@pytest.mark.parametrize("enable_control, enable_dht", ((True, True),))
@pytest.mark.anyio
async def test_client_dht_search_value(p2pcs):
key_not_existing = b"/123/456"
# test case: no peer in table
with pytest.raises(ControlFailure):
await p2pcs[0].dht_search_value(key_not_existing)
await connect_safe(p2pcs[0], p2pcs[1])
# test case: non-existing key
pinfos = await p2pcs[0].dht_search_value(key_not_existing)
assert len(pinfos) == 0
# FIXME
@pytest.mark.skip("Temporary skip the test since dht is not used often")
@pytest.mark.parametrize("enable_control, enable_dht", ((True, True),))
@pytest.mark.anyio
async def test_client_dht_put_value(p2pcs):
peer_id_0, _ = await p2pcs[0].identify()
await connect_safe(p2pcs[0], p2pcs[1])
# test case: valid key
pk0 = await p2pcs[0].dht_get_public_key(peer_id_0)
# make the `key` from pk0
algo = multihash.Func.sha2_256
value = pk0.data
mh_digest = multihash.digest(value, algo)
mh_digest_bytes = mh_digest.encode()
key = b"/pk/" + mh_digest_bytes
await p2pcs[0].dht_put_value(key, value)
# test case: get_value
await p2pcs[1].dht_get_value(key) == value
# test case: invalid key
key_invalid = b"/123/456"
with pytest.raises(ControlFailure):
await p2pcs[0].dht_put_value(key_invalid, key_invalid)
@pytest.mark.skip("Fails: response = type: ERROR error {msg: 'not found'}.")
@pytest.mark.parametrize("enable_control, enable_dht", ((True, True),))
@pytest.mark.anyio
async def test_client_dht_provide(p2pcs):
peer_id_0, _ = await p2pcs[0].identify()
await connect_safe(p2pcs[0], p2pcs[1])
# test case: no providers
content_id_bytes = b"\x01r\x12 \xc0F\xc8\xechB\x17\xf0\x1b$\xb9\xecw\x11\xde\x11Cl\x8eF\xd8\x9a\xf1\xaeLa?\xb0\xaf\xe6K\x8b" # noqa: E501
pinfos_empty = await p2pcs[1].dht_find_providers(content_id_bytes, 100)
assert not pinfos_empty
# test case: p2pcs[0] provides
await p2pcs[0].dht_provide(content_id_bytes)
pinfos = await p2pcs[1].dht_find_providers(content_id_bytes, 100)
assert len(pinfos) == 1
assert pinfos[0].peer_id == peer_id_0
@pytest.mark.skip("CONNMANAGER functionalities not implemented in jsp2pd 0.10.1.")
@pytest.mark.parametrize("enable_control, enable_connmgr", ((True, True),))
@pytest.mark.anyio
async def test_client_connmgr_tag_peer(peer_id_random, p2pcs):
peer_id_0, _ = await p2pcs[0].identify()
# test case: tag myself
await p2pcs[0].connmgr_tag_peer(peer_id_0, "123", 123)
# test case: tag others
await p2pcs[1].connmgr_tag_peer(peer_id_0, "123", 123)
# test case: tag the same peers multiple times
await p2pcs[1].connmgr_tag_peer(peer_id_0, "456", 456)
# test case: tag multiple peers
await p2pcs[1].connmgr_tag_peer(peer_id_random, "123", 1)
# test case: tag the same peer with the same tag but different weight
await p2pcs[1].connmgr_tag_peer(peer_id_random, "123", 123)
@pytest.mark.skip("CONNMANAGER functionalities not implemented in jsp2pd 0.10.1.")
@pytest.mark.parametrize("enable_control, enable_connmgr", ((True, True),))
@pytest.mark.anyio
async def test_client_connmgr_untag_peer(peer_id_random, p2pcs):
# test case: untag an inexisting tag
await p2pcs[0].connmgr_untag_peer(peer_id_random, "123")
# test case: untag a tag
await p2pcs[0].connmgr_tag_peer(peer_id_random, "123", 123)
await p2pcs[0].connmgr_untag_peer(peer_id_random, "123")
# test case: untag a tag twice
await p2pcs[0].connmgr_untag_peer(peer_id_random, "123")
@pytest.mark.skip("Skipped because automatic trim is not stable to test")
@pytest.mark.parametrize("enable_control, enable_connmgr", ((True, True),))
@pytest.mark.anyio
async def test_client_connmgr_trim_automatically_by_connmgr(p2pcs):
# test case: due to `connHi=2` and `connLo=1`, when `p2pcs[1]` connecting to the third peer,
# `p2pcs[3]`, the connmgr of `p2pcs[1]` will try to prune the connections, down to
# `connLo=1`.
peer_id_0, maddrs_0 = await p2pcs[0].identify()
peer_id_2, maddrs_2 = await p2pcs[2].identify()
peer_id_3, maddrs_3 = await p2pcs[3].identify()
await p2pcs[1].connect(peer_id_0, maddrs_0)
await p2pcs[1].connect(peer_id_2, maddrs_2)
await p2pcs[1].connect(peer_id_3, maddrs_3)
# sleep to wait for the goroutine `Connmgr.TrimOpenConns` invoked by `mNotifee.Connected`
await anyio.sleep(1)
assert len(await p2pcs[1].list_peers()) == 1
@pytest.mark.skip("CONNMANAGER functionalities not implemented in jsp2pd 0.10.1.")
@pytest.mark.parametrize("enable_control, enable_connmgr", ((True, True),))
@pytest.mark.anyio
async def test_client_connmgr_trim(p2pcs):
peer_id_0, _ = await p2pcs[0].identify()
peer_id_2, _ = await p2pcs[2].identify()
await connect_safe(p2pcs[1], p2pcs[0])
await connect_safe(p2pcs[1], p2pcs[2])
assert len(await p2pcs[1].list_peers()) == 2
await p2pcs[1].connmgr_tag_peer(peer_id_0, "123", 1)
await p2pcs[1].connmgr_tag_peer(peer_id_2, "123", 2)
# trim the connections, the number of connections should go down to the low watermark
await p2pcs[1].connmgr_trim()
peers_1 = await p2pcs[1].list_peers()
assert len(peers_1) == 1
assert peers_1[0].peer_id == peer_id_2
@pytest.mark.parametrize("enable_control, enable_pubsub", ((True, True),))
@pytest.mark.anyio
async def test_client_pubsub_get_topics(p2pcs):
topics = await p2pcs[0].pubsub_get_topics()
assert len(topics) == 0
@pytest.mark.skip("PUBSUB LIST_PEERS is not supported on jsp2pd 0.10.1.")
@pytest.mark.parametrize("enable_control, enable_pubsub", ((True, True),))
@pytest.mark.anyio
async def test_client_pubsub_list_topic_peers(p2pcs):
peers = await p2pcs[0].pubsub_list_peers("123")
assert len(peers) == 0
@pytest.mark.parametrize("enable_control, enable_pubsub", ((True, True),))
@pytest.mark.anyio
async def test_client_pubsub_publish(p2pcs):
await p2pcs[0].pubsub_publish("123", b"data")
@pytest.mark.skip("PUBSUB LIST_PEERS not implemented in jsp2pd 0.10.1.")
@pytest.mark.parametrize("enable_control, enable_pubsub", ((True, True),))
@pytest.mark.anyio
async def test_client_pubsub_subscribe(p2pcs):
peer_id_0, _ = await p2pcs[0].identify()
peer_id_1, _ = await p2pcs[1].identify()
await connect_safe(p2pcs[0], p2pcs[1])
await connect_safe(p2pcs[1], p2pcs[2])
topic = "topic123"
data = b"data"
stream_0 = await p2pcs[0].pubsub_subscribe(topic)
stream_1 = await p2pcs[1].pubsub_subscribe(topic)
# test case: `get_topics` after subscriptions
assert topic in await p2pcs[0].pubsub_get_topics()
assert topic in await p2pcs[1].pubsub_get_topics()
# wait for mesh built
await anyio.sleep(2)
# test case: `list_topic_peers` after subscriptions
assert peer_id_0 in await p2pcs[1].pubsub_list_peers(topic)
assert peer_id_1 in await p2pcs[0].pubsub_list_peers(topic)
# test case: publish, and both clients receive data
await p2pcs[0].pubsub_publish(topic, data)
pubsub_msg_0 = p2pd_pb.PSMessage()
await read_pbmsg_safe(stream_0, pubsub_msg_0)
assert pubsub_msg_0.data == data
pubsub_msg_1 = p2pd_pb.PSMessage()
await read_pbmsg_safe(stream_1, pubsub_msg_1)
assert pubsub_msg_1.data == data
# test case: publish more data
another_data_0 = b"another_data_0"
another_data_1 = b"another_data_1"
await p2pcs[0].pubsub_publish(topic, another_data_0)
await p2pcs[0].pubsub_publish(topic, another_data_1)
pubsub_msg_1_0 = p2pd_pb.PSMessage()
await read_pbmsg_safe(stream_1, pubsub_msg_1_0)
pubsub_msg_1_1 = p2pd_pb.PSMessage()
await read_pbmsg_safe(stream_1, pubsub_msg_1_1)
assert set([pubsub_msg_1_0.data, pubsub_msg_1_1.data]) == set(
[another_data_0, another_data_1]
)
# test case: subscribe to multiple topics
another_topic = "topic456"
await p2pcs[0].pubsub_subscribe(another_topic)
stream_1_another = await p2pcs[1].pubsub_subscribe(another_topic)
await p2pcs[0].pubsub_publish(another_topic, another_data_0)
pubsub_msg_1_another = p2pd_pb.PSMessage()
await read_pbmsg_safe(stream_1_another, pubsub_msg_1_another)
assert pubsub_msg_1_another.data == another_data_0
# test case: test `from`
assert ID(pubsub_msg_1_1.from_id) == peer_id_0
# test case: test `from_id`, when it is sent through 1 hop(p2pcs[1])
stream_2 = await p2pcs[2].pubsub_subscribe(topic)
another_data_2 = b"another_data_2"
await p2pcs[0].pubsub_publish(topic, another_data_2)
pubsub_msg_2_0 = p2pd_pb.PSMessage()
await read_pbmsg_safe(stream_2, pubsub_msg_2_0)
assert ID(pubsub_msg_2_0.from_id) == peer_id_0
# test case: unsubscribe by closing the stream
await stream_0.close()
await anyio.sleep(0)
assert topic not in await p2pcs[0].pubsub_get_topics()
async def is_peer_removed_from_topic():
return peer_id_0 not in await p2pcs[1].pubsub_list_peers(topic)
await try_until_success(is_peer_removed_from_topic)
|
import time
import json
import copy
import logging
import functools
from typing import List, Tuple, Optional, Union, cast
from cephlib.wally_storage import WallyDB
from cephlib.node import NodeInfo, IRPCNode, get_hw_info, get_sw_info, get_hostname
from cephlib.ssh import parse_ssh_uri
from cephlib.node_impl import setup_rpc, connect
from . import utils
from .config import ConfigBlock
from .stage import Stage, StepOrder
from .sensors import collect_sensors_data
from .suits.all_suits import all_suits
from .test_run_class import TestRun
from .result_classes import SuiteConfig
logger = logging.getLogger("wally")
class ConnectStage(Stage):
"""Connect to nodes stage"""
priority = StepOrder.CONNECT
def run(self, ctx: TestRun) -> None:
with ctx.get_pool() as pool:
logger.info("Connecting to %s nodes", len(ctx.nodes_info))
def connect_ext(node_info: NodeInfo) -> Tuple[bool, Union[IRPCNode, NodeInfo]]:
try:
ssh_node = connect(node_info, conn_timeout=ctx.config.connect_timeout)
return True, setup_rpc(ssh_node,
ctx.rpc_code,
ctx.default_rpc_plugins,
log_level=ctx.config.rpc_log_level,
sudo=True)
except Exception as exc:
logger.exception("During connect to %s: %s", node_info, exc)
return False, node_info
failed_testnodes = [] # type: List[NodeInfo]
failed_nodes = [] # type: List[NodeInfo]
ctx.nodes = []
for ok, node in pool.map(connect_ext, ctx.nodes_info.values()):
if not ok:
node = cast(NodeInfo, node)
if 'testnode' in node.roles:
failed_testnodes.append(node)
else:
failed_nodes.append(node)
else:
ctx.nodes.append(cast(IRPCNode, node))
if failed_nodes:
msg = "Node(s) {} would be excluded - can't connect"
logger.warning(msg.format(", ".join(map(str, failed_nodes))))
if failed_testnodes:
msg = "Can't start RPC on testnode(s) " + ",".join(map(str, failed_testnodes))
logger.error(msg)
raise utils.StopTestError(msg)
if not failed_nodes:
logger.info("All nodes connected successfully")
def get_time(node):
return node.conn.sys.time()
t_start = time.time()
tms = pool.map(get_time, ctx.nodes)
t_end = time.time()
for node, val in zip(ctx.nodes, tms):
delta = 0
if val > t_end:
delta = val - t_end
elif val < t_start:
delta = t_start - val
if delta > ctx.config.max_time_diff_ms:
msg = ("Too large time shift {}ms on node {}. Stopping test." +
" Fix time on cluster nodes and restart test, or change " +
"max_time_diff_ms(={}ms) setting in config").format(delta,
str(node),
ctx.config.max_time_diff_ms)
logger.error(msg)
raise utils.StopTestError(msg)
if delta > 1:
logger.warning("Node %s has time shift at least %s ms", node, int(delta))
def cleanup(self, ctx: TestRun) -> None:
if ctx.config.get("download_rpc_logs", False):
logger.info("Killing all outstanding processes")
for node in ctx.nodes:
node.conn.cli.killall()
if ctx.ceph_master_node:
ctx.ceph_master_node.conn.cli.killall()
logger.info("Downloading RPC servers logs")
for node in ctx.nodes:
if node.rpc_log_file is not None:
nid = node.node_id
path = WallyDB.rpc_logs.format(node_id=nid)
node.conn.server.flush_logs()
log = node.get_file_content(node.rpc_log_file)
if path in ctx.storage:
ctx.storage.append_raw(log, path)
else:
ctx.storage.put_raw(log, path)
logger.debug("RPC log from node {} stored into storage::{}".format(nid, path))
logger.info("Disconnecting")
with ctx.get_pool() as pool:
list(pool.map(lambda node: node.disconnect(stop=True),
ctx.nodes + ([ctx.ceph_master_node] if ctx.ceph_master_node else [])))
class CollectInfoStage(Stage):
"""Collect node info"""
priority = StepOrder.UPDATE_NODES_INFO
config_block = 'collect_info'
def run(self, ctx: TestRun) -> None:
with ctx.get_pool() as pool:
try:
# can't make next RPC request until finish with previous for same node
for node, hw_info in zip(ctx.nodes, pool.map(get_hw_info, ctx.nodes)):
node.info.hw_info = hw_info
for node, sw_info in zip(ctx.nodes, pool.map(get_sw_info, ctx.nodes)):
node.info.sw_info = sw_info
except Exception as exc:
logger.exception("During collecting cluster info")
raise utils.StopTestError() from exc
logger.debug("Collecting hostnames")
hostnames = pool.map(get_hostname, ctx.nodes)
for node, hostname in zip(ctx.nodes, hostnames):
node.info.hostname = hostname
class ExplicitNodesStage(Stage):
"""add explicit nodes"""
priority = StepOrder.DISCOVER
config_block = 'nodes'
def run(self, ctx: TestRun) -> None:
if WallyDB.all_nodes in ctx.storage:
logger.info("Skip explicid nodes filling, as all_nodes all ready in storage")
return
for url, roles in ctx.config.get('nodes', {}).raw().items():
ctx.merge_node(parse_ssh_uri(url), set(role.strip() for role in roles.split(",")))
logger.debug("Add node %s with roles %s", url, roles)
class SleepStage(Stage):
"""Save nodes list to file"""
priority = StepOrder.TEST
config_block = 'sleep'
def run(self, ctx: TestRun) -> None:
logger.debug("Will sleep for %r seconds", ctx.config.sleep)
stime = time.time()
time.sleep(ctx.config.sleep)
ctx.storage.put([int(stime), int(time.time())], 'idle')
class PrepareNodes(Stage):
priority = StepOrder.START_SENSORS - 1
def __init__(self):
Stage.__init__(self)
self.nodeepscrub_updated = False
self.noscrub_updated = False
def run(self, ctx: TestRun) -> None:
ceph_sett = ctx.config.get('ceph_settings', "").split()
if ceph_sett:
for node in ctx.nodes:
if "ceph-mon" in node.info.roles or "ceph-osd" in node.info.roles:
state = json.loads(node.run("ceph health --format json"))["summary"]["summary"]
if 'noscrub' in ceph_sett:
if 'noscrub' in state:
logger.debug("noscrub already set on cluster")
else:
logger.info("Applying noscrub settings to ceph cluster")
node.run("ceph osd set noscrub")
self.noscrub_updated = True
if 'nodeepscrub' in ceph_sett:
if 'nodeepscrub' in state:
logger.debug("noscrub already set on cluster")
else:
logger.info("Applying noscrub settings to ceph cluster")
node.run("ceph osd set noscrub")
self.nodeepscrub_updated = True
break
def cleanup(self, ctx: TestRun) -> None:
if self.nodeepscrub_updated or self.noscrub_updated:
for node in ctx.nodes:
if "ceph-mon" in node.info.roles or "ceph-osd" in node.info.roles :
if self.noscrub_updated:
logger.info("Reverting noscrub setting for ceph cluster")
node.run("ceph osd unset noscrub")
self.noscrub_updated = False
if self.nodeepscrub_updated:
logger.info("Reverting noscrub setting for ceph cluster")
node.run("ceph osd unset nodeepscrub")
self.nodeepscrub_updated = False
class RunTestsStage(Stage):
priority = StepOrder.TEST
config_block = 'tests'
def run(self, ctx: TestRun) -> None:
if ctx.config.no_tests:
logger.info("Skiping tests, as 'no_tests' config settings is True")
return
for suite_idx, test_suite in enumerate(ctx.config.get('tests', [])):
test_nodes = [node for node in ctx.nodes if 'testnode' in node.info.roles]
if not test_nodes:
logger.error("No test nodes found")
raise utils.StopTestError()
if len(test_suite) != 1:
logger.error("Test suite %s contain more than one test. Put each test in separated group", suite_idx)
raise utils.StopTestError()
name, params = list(test_suite.items())[0]
vm_count = params.get('node_limit', None) # type: Optional[int]
# select test nodes
if vm_count is None:
curr_test_nodes = test_nodes
else:
curr_test_nodes = test_nodes[:vm_count]
if not curr_test_nodes:
logger.error("No nodes found for test, skipping it.")
continue
if name not in all_suits:
logger.error("Test suite %r not found. Only suits [%s] available", name, ", ".join(all_suits))
raise utils.StopTestError()
test_cls = all_suits[name]
remote_dir = ctx.config.default_test_local_folder.format(name=name, uuid=ctx.config.run_uuid)
suite = SuiteConfig(test_cls.name,
params=params,
run_uuid=ctx.config.run_uuid,
nodes=test_nodes,
remote_dir=remote_dir,
idx=suite_idx,
keep_raw_files=ctx.config.keep_raw_files)
test_cls(storage=ctx.rstorage,
suite=suite,
on_tests_boundry=functools.partial(collect_sensors_data, ctx)).run()
@classmethod
def validate_config(cls, cfg: ConfigBlock) -> None:
pass
class SaveNodesStage(Stage):
"""Save nodes list to file"""
priority = StepOrder.UPDATE_NODES_INFO + 1
def run(self, ctx: TestRun) -> None:
infos = list(ctx.nodes_info.values())
params = {node.node_id: node.params for node in infos}
ninfos = [copy.copy(node) for node in infos]
for node in ninfos:
node.params = {"in file": WallyDB.nodes_params}
ctx.storage.put_list(ninfos, WallyDB.all_nodes)
ctx.storage.put_raw(json.dumps(params).encode('utf8'), WallyDB.nodes_params)
class LoadStoredNodesStage(Stage):
priority = StepOrder.DISCOVER
def run(self, ctx: TestRun) -> None:
if WallyDB.all_nodes in ctx.storage:
if ctx.nodes_info:
logger.error("Internal error: Some nodes already stored in " +
"nodes_info before LoadStoredNodesStage stage")
raise utils.StopTestError()
ctx.nodes_info = {node.node_id: node for node in ctx.rstorage.load_nodes()}
logger.info("%s nodes loaded from database", len(ctx.nodes_info))
|
"""Base implementation of event loop.
The event loop can be broken up into a multiplexer (the part
responsible for notifying us of I/O events) and the event loop proper,
which wraps a multiplexer with functionality for scheduling callbacks,
immediately or at a given time in the future.
Whenever a public API takes a callback, subsequent positional
arguments will be passed to the callback if/when it is called. This
avoids the proliferation of trivial lambdas implementing closures.
Keyword arguments for the callback are not supported; this is a
conscious design decision, leaving the door open for keyword arguments
to modify the meaning of the API call itself.
"""
import collections
import collections.abc
import concurrent.futures
import functools
import heapq
import itertools
import os
import socket
import stat
import subprocess
import threading
import time
import traceback
import sys
import warnings
import weakref
try:
import ssl
except ImportError: # pragma: no cover
ssl = None
from . import constants
from . import coroutines
from . import events
from . import exceptions
from . import futures
from . import protocols
from . import sslproto
from . import staggered
from . import tasks
from . import transports
from . import trsock
from .log import logger
__all__ = 'BaseEventLoop',
# Minimum number of _scheduled timer handles before cleanup of
# cancelled handles is performed.
_MIN_SCHEDULED_TIMER_HANDLES = 100
# Minimum fraction of _scheduled timer handles that are cancelled
# before cleanup of cancelled handles is performed.
_MIN_CANCELLED_TIMER_HANDLES_FRACTION = 0.5
_HAS_IPv6 = hasattr(socket, 'AF_INET6')
# Maximum timeout passed to select to avoid OS limitations
MAXIMUM_SELECT_TIMEOUT = 24 * 3600
def _format_handle(handle):
cb = handle._callback
if isinstance(getattr(cb, '__self__', None), tasks.Task):
# format the task
return repr(cb.__self__)
else:
return str(handle)
def _format_pipe(fd):
if fd == subprocess.PIPE:
return '<pipe>'
elif fd == subprocess.STDOUT:
return '<stdout>'
else:
return repr(fd)
def _set_reuseport(sock):
if not hasattr(socket, 'SO_REUSEPORT'):
raise ValueError('reuse_port not supported by socket module')
else:
try:
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1)
except OSError:
raise ValueError('reuse_port not supported by socket module, '
'SO_REUSEPORT defined but not implemented.')
def _ipaddr_info(host, port, family, type, proto, flowinfo=0, scopeid=0):
# Try to skip getaddrinfo if "host" is already an IP. Users might have
# handled name resolution in their own code and pass in resolved IPs.
if not hasattr(socket, 'inet_pton'):
return
if proto not in {0, socket.IPPROTO_TCP, socket.IPPROTO_UDP} or \
host is None:
return None
if type == socket.SOCK_STREAM:
proto = socket.IPPROTO_TCP
elif type == socket.SOCK_DGRAM:
proto = socket.IPPROTO_UDP
else:
return None
if port is None:
port = 0
elif isinstance(port, bytes) and port == b'':
port = 0
elif isinstance(port, str) and port == '':
port = 0
else:
# If port's a service name like "http", don't skip getaddrinfo.
try:
port = int(port)
except (TypeError, ValueError):
return None
if family == socket.AF_UNSPEC:
afs = [socket.AF_INET]
if _HAS_IPv6:
afs.append(socket.AF_INET6)
else:
afs = [family]
if isinstance(host, bytes):
host = host.decode('idna')
if '%' in host:
# Linux's inet_pton doesn't accept an IPv6 zone index after host,
# like '::1%lo0'.
return None
for af in afs:
try:
socket.inet_pton(af, host)
# The host has already been resolved.
if _HAS_IPv6 and af == socket.AF_INET6:
return af, type, proto, '', (host, port, flowinfo, scopeid)
else:
return af, type, proto, '', (host, port)
except OSError:
pass
# "host" is not an IP address.
return None
def _interleave_addrinfos(addrinfos, first_address_family_count=1):
"""Interleave list of addrinfo tuples by family."""
# Group addresses by family
addrinfos_by_family = collections.OrderedDict()
for addr in addrinfos:
family = addr[0]
if family not in addrinfos_by_family:
addrinfos_by_family[family] = []
addrinfos_by_family[family].append(addr)
addrinfos_lists = list(addrinfos_by_family.values())
reordered = []
if first_address_family_count > 1:
reordered.extend(addrinfos_lists[0][:first_address_family_count - 1])
del addrinfos_lists[0][:first_address_family_count - 1]
reordered.extend(
a for a in itertools.chain.from_iterable(
itertools.zip_longest(*addrinfos_lists)
) if a is not None)
return reordered
def _run_until_complete_cb(fut):
if not fut.cancelled():
exc = fut.exception()
if isinstance(exc, (SystemExit, KeyboardInterrupt)):
# Issue #22429: run_forever() already finished, no need to
# stop it.
return
futures._get_loop(fut).stop()
if hasattr(socket, 'TCP_NODELAY'):
def _set_nodelay(sock):
if (sock.family in {socket.AF_INET, socket.AF_INET6} and
sock.type == socket.SOCK_STREAM and
sock.proto == socket.IPPROTO_TCP):
sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
else:
def _set_nodelay(sock):
pass
class _SendfileFallbackProtocol(protocols.Protocol):
def __init__(self, transp):
if not isinstance(transp, transports._FlowControlMixin):
raise TypeError("transport should be _FlowControlMixin instance")
self._transport = transp
self._proto = transp.get_protocol()
self._should_resume_reading = transp.is_reading()
self._should_resume_writing = transp._protocol_paused
transp.pause_reading()
transp.set_protocol(self)
if self._should_resume_writing:
self._write_ready_fut = self._transport._loop.create_future()
else:
self._write_ready_fut = None
async def drain(self):
if self._transport.is_closing():
raise ConnectionError("Connection closed by peer")
fut = self._write_ready_fut
if fut is None:
return
await fut
def connection_made(self, transport):
raise RuntimeError("Invalid state: "
"connection should have been established already.")
def connection_lost(self, exc):
if self._write_ready_fut is not None:
# Never happens if peer disconnects after sending the whole content
# Thus disconnection is always an exception from user perspective
if exc is None:
self._write_ready_fut.set_exception(
ConnectionError("Connection is closed by peer"))
else:
self._write_ready_fut.set_exception(exc)
self._proto.connection_lost(exc)
def pause_writing(self):
if self._write_ready_fut is not None:
return
self._write_ready_fut = self._transport._loop.create_future()
def resume_writing(self):
if self._write_ready_fut is None:
return
self._write_ready_fut.set_result(False)
self._write_ready_fut = None
def data_received(self, data):
raise RuntimeError("Invalid state: reading should be paused")
def eof_received(self):
raise RuntimeError("Invalid state: reading should be paused")
async def restore(self):
self._transport.set_protocol(self._proto)
if self._should_resume_reading:
self._transport.resume_reading()
if self._write_ready_fut is not None:
# Cancel the future.
# Basically it has no effect because protocol is switched back,
# no code should wait for it anymore.
self._write_ready_fut.cancel()
if self._should_resume_writing:
self._proto.resume_writing()
class Server(events.AbstractServer):
def __init__(self, loop, sockets, protocol_factory, ssl_context, backlog,
ssl_handshake_timeout):
self._loop = loop
self._sockets = sockets
self._active_count = 0
self._waiters = []
self._protocol_factory = protocol_factory
self._backlog = backlog
self._ssl_context = ssl_context
self._ssl_handshake_timeout = ssl_handshake_timeout
self._serving = False
self._serving_forever_fut = None
def __repr__(self):
return f'<{self.__class__.__name__} sockets={self.sockets!r}>'
def _attach(self):
assert self._sockets is not None
self._active_count += 1
def _detach(self):
assert self._active_count > 0
self._active_count -= 1
if self._active_count == 0 and self._sockets is None:
self._wakeup()
def _wakeup(self):
waiters = self._waiters
self._waiters = None
for waiter in waiters:
if not waiter.done():
waiter.set_result(waiter)
def _start_serving(self):
if self._serving:
return
self._serving = True
for sock in self._sockets:
sock.listen(self._backlog)
self._loop._start_serving(
self._protocol_factory, sock, self._ssl_context,
self, self._backlog, self._ssl_handshake_timeout)
def get_loop(self):
return self._loop
def is_serving(self):
return self._serving
@property
def sockets(self):
if self._sockets is None:
return ()
return tuple(trsock.TransportSocket(s) for s in self._sockets)
def close(self):
sockets = self._sockets
if sockets is None:
return
self._sockets = None
for sock in sockets:
self._loop._stop_serving(sock)
self._serving = False
if (self._serving_forever_fut is not None and
not self._serving_forever_fut.done()):
self._serving_forever_fut.cancel()
self._serving_forever_fut = None
if self._active_count == 0:
self._wakeup()
async def start_serving(self):
self._start_serving()
# Skip one loop iteration so that all 'loop.add_reader'
# go through.
await tasks.sleep(0, loop=self._loop)
async def serve_forever(self):
if self._serving_forever_fut is not None:
raise RuntimeError(
f'server {self!r} is already being awaited on serve_forever()')
if self._sockets is None:
raise RuntimeError(f'server {self!r} is closed')
self._start_serving()
self._serving_forever_fut = self._loop.create_future()
try:
await self._serving_forever_fut
except exceptions.CancelledError:
try:
self.close()
await self.wait_closed()
finally:
raise
finally:
self._serving_forever_fut = None
async def wait_closed(self):
if self._sockets is None or self._waiters is None:
return
waiter = self._loop.create_future()
self._waiters.append(waiter)
await waiter
class BaseEventLoop(events.AbstractEventLoop):
def __init__(self):
self._timer_cancelled_count = 0
self._closed = False
self._stopping = False
self._ready = collections.deque()
self._scheduled = []
self._default_executor = None
self._internal_fds = 0
# Identifier of the thread running the event loop, or None if the
# event loop is not running
self._thread_id = None
self._clock_resolution = time.get_clock_info('monotonic').resolution
self._exception_handler = None
self.set_debug(coroutines._is_debug_mode())
# In debug mode, if the execution of a callback or a step of a task
# exceed this duration in seconds, the slow callback/task is logged.
self.slow_callback_duration = 0.1
self._current_handle = None
self._task_factory = None
self._coroutine_origin_tracking_enabled = False
self._coroutine_origin_tracking_saved_depth = None
# A weak set of all asynchronous generators that are
# being iterated by the loop.
self._asyncgens = weakref.WeakSet()
# Set to True when `loop.shutdown_asyncgens` is called.
self._asyncgens_shutdown_called = False
# Set to True when `loop.shutdown_default_executor` is called.
self._executor_shutdown_called = False
def __repr__(self):
return (
f'<{self.__class__.__name__} running={self.is_running()} '
f'closed={self.is_closed()} debug={self.get_debug()}>'
)
def create_future(self):
"""Create a Future object attached to the loop."""
return futures.Future(loop=self)
def create_task(self, coro, *, name=None):
"""Schedule a coroutine object.
Return a task object.
"""
self._check_closed()
if self._task_factory is None:
task = tasks.Task(coro, loop=self, name=name)
if task._source_traceback:
del task._source_traceback[-1]
else:
task = self._task_factory(self, coro)
tasks._set_task_name(task, name)
return task
def set_task_factory(self, factory):
"""Set a task factory that will be used by loop.create_task().
If factory is None the default task factory will be set.
If factory is a callable, it should have a signature matching
'(loop, coro)', where 'loop' will be a reference to the active
event loop, 'coro' will be a coroutine object. The callable
must return a Future.
"""
if factory is not None and not callable(factory):
raise TypeError('task factory must be a callable or None')
self._task_factory = factory
def get_task_factory(self):
"""Return a task factory, or None if the default one is in use."""
return self._task_factory
def _make_socket_transport(self, sock, protocol, waiter=None, *,
extra=None, server=None):
"""Create socket transport."""
raise NotImplementedError
def _make_ssl_transport(
self, rawsock, protocol, sslcontext, waiter=None,
*, server_side=False, server_hostname=None,
extra=None, server=None,
ssl_handshake_timeout=None,
call_connection_made=True):
"""Create SSL transport."""
raise NotImplementedError
def _make_datagram_transport(self, sock, protocol,
address=None, waiter=None, extra=None):
"""Create datagram transport."""
raise NotImplementedError
def _make_read_pipe_transport(self, pipe, protocol, waiter=None,
extra=None):
"""Create read pipe transport."""
raise NotImplementedError
def _make_write_pipe_transport(self, pipe, protocol, waiter=None,
extra=None):
"""Create write pipe transport."""
raise NotImplementedError
async def _make_subprocess_transport(self, protocol, args, shell,
stdin, stdout, stderr, bufsize,
extra=None, **kwargs):
"""Create subprocess transport."""
raise NotImplementedError
def _write_to_self(self):
"""Write a byte to self-pipe, to wake up the event loop.
This may be called from a different thread.
The subclass is responsible for implementing the self-pipe.
"""
raise NotImplementedError
def _process_events(self, event_list):
"""Process selector events."""
raise NotImplementedError
def _check_closed(self):
if self._closed:
raise RuntimeError('Event loop is closed')
def _check_default_executor(self):
if self._executor_shutdown_called:
raise RuntimeError('Executor shutdown has been called')
def _asyncgen_finalizer_hook(self, agen):
self._asyncgens.discard(agen)
if not self.is_closed():
self.call_soon_threadsafe(self.create_task, agen.aclose())
def _asyncgen_firstiter_hook(self, agen):
if self._asyncgens_shutdown_called:
warnings.warn(
f"asynchronous generator {agen!r} was scheduled after "
f"loop.shutdown_asyncgens() call",
ResourceWarning, source=self)
self._asyncgens.add(agen)
async def shutdown_asyncgens(self):
"""Shutdown all active asynchronous generators."""
self._asyncgens_shutdown_called = True
if not len(self._asyncgens):
# If Python version is <3.6 or we don't have any asynchronous
# generators alive.
return
closing_agens = list(self._asyncgens)
self._asyncgens.clear()
results = await tasks.gather(
*[ag.aclose() for ag in closing_agens],
return_exceptions=True,
loop=self)
for result, agen in zip(results, closing_agens):
if isinstance(result, Exception):
self.call_exception_handler({
'message': f'an error occurred during closing of '
f'asynchronous generator {agen!r}',
'exception': result,
'asyncgen': agen
})
async def shutdown_default_executor(self):
"""Schedule the shutdown of the default executor."""
self._executor_shutdown_called = True
if self._default_executor is None:
return
future = self.create_future()
thread = threading.Thread(target=self._do_shutdown, args=(future,))
thread.start()
try:
await future
finally:
thread.join()
def _do_shutdown(self, future):
try:
self._default_executor.shutdown(wait=True)
self.call_soon_threadsafe(future.set_result, None)
except Exception as ex:
self.call_soon_threadsafe(future.set_exception, ex)
def run_forever(self):
"""Run until stop() is called."""
self._check_closed()
if self.is_running():
raise RuntimeError('This event loop is already running')
if events._get_running_loop() is not None:
raise RuntimeError(
'Cannot run the event loop while another loop is running')
self._set_coroutine_origin_tracking(self._debug)
self._thread_id = threading.get_ident()
old_agen_hooks = sys.get_asyncgen_hooks()
sys.set_asyncgen_hooks(firstiter=self._asyncgen_firstiter_hook,
finalizer=self._asyncgen_finalizer_hook)
try:
events._set_running_loop(self)
while True:
self._run_once()
if self._stopping:
break
finally:
self._stopping = False
self._thread_id = None
events._set_running_loop(None)
self._set_coroutine_origin_tracking(False)
sys.set_asyncgen_hooks(*old_agen_hooks)
def run_until_complete(self, future):
"""Run until the Future is done.
If the argument is a coroutine, it is wrapped in a Task.
WARNING: It would be disastrous to call run_until_complete()
with the same coroutine twice -- it would wrap it in two
different Tasks and that can't be good.
Return the Future's result, or raise its exception.
"""
self._check_closed()
new_task = not futures.isfuture(future)
future = tasks.ensure_future(future, loop=self)
if new_task:
# An exception is raised if the future didn't complete, so there
# is no need to log the "destroy pending task" message
future._log_destroy_pending = False
future.add_done_callback(_run_until_complete_cb)
try:
self.run_forever()
except:
if new_task and future.done() and not future.cancelled():
# The coroutine raised a BaseException. Consume the exception
# to not log a warning, the caller doesn't have access to the
# local task.
future.exception()
raise
finally:
future.remove_done_callback(_run_until_complete_cb)
if not future.done():
raise RuntimeError('Event loop stopped before Future completed.')
return future.result()
def stop(self):
"""Stop running the event loop.
Every callback already scheduled will still run. This simply informs
run_forever to stop looping after a complete iteration.
"""
self._stopping = True
def close(self):
"""Close the event loop.
This clears the queues and shuts down the executor,
but does not wait for the executor to finish.
The event loop must not be running.
"""
if self.is_running():
raise RuntimeError("Cannot close a running event loop")
if self._closed:
return
if self._debug:
logger.debug("Close %r", self)
self._closed = True
self._ready.clear()
self._scheduled.clear()
self._executor_shutdown_called = True
executor = self._default_executor
if executor is not None:
self._default_executor = None
executor.shutdown(wait=False)
def is_closed(self):
"""Returns True if the event loop was closed."""
return self._closed
def __del__(self, _warn=warnings.warn):
if not self.is_closed():
_warn(f"unclosed event loop {self!r}", ResourceWarning, source=self)
if not self.is_running():
self.close()
def is_running(self):
"""Returns True if the event loop is running."""
return (self._thread_id is not None)
def time(self):
"""Return the time according to the event loop's clock.
This is a float expressed in seconds since an epoch, but the
epoch, precision, accuracy and drift are unspecified and may
differ per event loop.
"""
return time.monotonic()
def call_later(self, delay, callback, *args, context=None):
"""Arrange for a callback to be called at a given time.
Return a Handle: an opaque object with a cancel() method that
can be used to cancel the call.
The delay can be an int or float, expressed in seconds. It is
always relative to the current time.
Each callback will be called exactly once. If two callbacks
are scheduled for exactly the same time, it undefined which
will be called first.
Any positional arguments after the callback will be passed to
the callback when it is called.
"""
timer = self.call_at(self.time() + delay, callback, *args,
context=context)
if timer._source_traceback:
del timer._source_traceback[-1]
return timer
def call_at(self, when, callback, *args, context=None):
"""Like call_later(), but uses an absolute time.
Absolute time corresponds to the event loop's time() method.
"""
self._check_closed()
if self._debug:
self._check_thread()
self._check_callback(callback, 'call_at')
timer = events.TimerHandle(when, callback, args, self, context)
if timer._source_traceback:
del timer._source_traceback[-1]
heapq.heappush(self._scheduled, timer)
timer._scheduled = True
return timer
def call_soon(self, callback, *args, context=None):
"""Arrange for a callback to be called as soon as possible.
This operates as a FIFO queue: callbacks are called in the
order in which they are registered. Each callback will be
called exactly once.
Any positional arguments after the callback will be passed to
the callback when it is called.
"""
self._check_closed()
if self._debug:
self._check_thread()
self._check_callback(callback, 'call_soon')
handle = self._call_soon(callback, args, context)
if handle._source_traceback:
del handle._source_traceback[-1]
return handle
def _check_callback(self, callback, method):
if (coroutines.iscoroutine(callback) or
coroutines.iscoroutinefunction(callback)):
raise TypeError(
f"coroutines cannot be used with {method}()")
if not callable(callback):
raise TypeError(
f'a callable object was expected by {method}(), '
f'got {callback!r}')
def _call_soon(self, callback, args, context):
handle = events.Handle(callback, args, self, context)
if handle._source_traceback:
del handle._source_traceback[-1]
self._ready.append(handle)
return handle
def _check_thread(self):
"""Check that the current thread is the thread running the event loop.
Non-thread-safe methods of this class make this assumption and will
likely behave incorrectly when the assumption is violated.
Should only be called when (self._debug == True). The caller is
responsible for checking this condition for performance reasons.
"""
if self._thread_id is None:
return
thread_id = threading.get_ident()
if thread_id != self._thread_id:
raise RuntimeError(
"Non-thread-safe operation invoked on an event loop other "
"than the current one")
def call_soon_threadsafe(self, callback, *args, context=None):
"""Like call_soon(), but thread-safe."""
self._check_closed()
if self._debug:
self._check_callback(callback, 'call_soon_threadsafe')
handle = self._call_soon(callback, args, context)
if handle._source_traceback:
del handle._source_traceback[-1]
self._write_to_self()
return handle
def run_in_executor(self, executor, func, *args):
self._check_closed()
if self._debug:
self._check_callback(func, 'run_in_executor')
if executor is None:
executor = self._default_executor
# Only check when the default executor is being used
self._check_default_executor()
if executor is None:
executor = concurrent.futures.ThreadPoolExecutor()
self._default_executor = executor
return futures.wrap_future(
executor.submit(func, *args), loop=self)
def set_default_executor(self, executor):
if not isinstance(executor, concurrent.futures.ThreadPoolExecutor):
warnings.warn(
'Using the default executor that is not an instance of '
'ThreadPoolExecutor is deprecated and will be prohibited '
'in Python 3.9',
DeprecationWarning, 2)
self._default_executor = executor
def _getaddrinfo_debug(self, host, port, family, type, proto, flags):
msg = [f"{host}:{port!r}"]
if family:
msg.append(f'family={family!r}')
if type:
msg.append(f'type={type!r}')
if proto:
msg.append(f'proto={proto!r}')
if flags:
msg.append(f'flags={flags!r}')
msg = ', '.join(msg)
logger.debug('Get address info %s', msg)
t0 = self.time()
addrinfo = socket.getaddrinfo(host, port, family, type, proto, flags)
dt = self.time() - t0
msg = f'Getting address info {msg} took {dt * 1e3:.3f}ms: {addrinfo!r}'
if dt >= self.slow_callback_duration:
logger.info(msg)
else:
logger.debug(msg)
return addrinfo
async def getaddrinfo(self, host, port, *,
family=0, type=0, proto=0, flags=0):
if self._debug:
getaddr_func = self._getaddrinfo_debug
else:
getaddr_func = socket.getaddrinfo
return await self.run_in_executor(
None, getaddr_func, host, port, family, type, proto, flags)
async def getnameinfo(self, sockaddr, flags=0):
return await self.run_in_executor(
None, socket.getnameinfo, sockaddr, flags)
async def sock_sendfile(self, sock, file, offset=0, count=None,
*, fallback=True):
if self._debug and sock.gettimeout() != 0:
raise ValueError("the socket must be non-blocking")
self._check_sendfile_params(sock, file, offset, count)
try:
return await self._sock_sendfile_native(sock, file,
offset, count)
except exceptions.SendfileNotAvailableError as exc:
if not fallback:
raise
return await self._sock_sendfile_fallback(sock, file,
offset, count)
async def _sock_sendfile_native(self, sock, file, offset, count):
# NB: sendfile syscall is not supported for SSL sockets and
# non-mmap files even if sendfile is supported by OS
raise exceptions.SendfileNotAvailableError(
f"syscall sendfile is not available for socket {sock!r} "
"and file {file!r} combination")
async def _sock_sendfile_fallback(self, sock, file, offset, count):
if offset:
file.seek(offset)
blocksize = (
min(count, constants.SENDFILE_FALLBACK_READBUFFER_SIZE)
if count else constants.SENDFILE_FALLBACK_READBUFFER_SIZE
)
buf = bytearray(blocksize)
total_sent = 0
try:
while True:
if count:
blocksize = min(count - total_sent, blocksize)
if blocksize <= 0:
break
view = memoryview(buf)[:blocksize]
read = await self.run_in_executor(None, file.readinto, view)
if not read:
break # EOF
await self.sock_sendall(sock, view[:read])
total_sent += read
return total_sent
finally:
if total_sent > 0 and hasattr(file, 'seek'):
file.seek(offset + total_sent)
def _check_sendfile_params(self, sock, file, offset, count):
if 'b' not in getattr(file, 'mode', 'b'):
raise ValueError("file should be opened in binary mode")
if not sock.type == socket.SOCK_STREAM:
raise ValueError("only SOCK_STREAM type sockets are supported")
if count is not None:
if not isinstance(count, int):
raise TypeError(
"count must be a positive integer (got {!r})".format(count))
if count <= 0:
raise ValueError(
"count must be a positive integer (got {!r})".format(count))
if not isinstance(offset, int):
raise TypeError(
"offset must be a non-negative integer (got {!r})".format(
offset))
if offset < 0:
raise ValueError(
"offset must be a non-negative integer (got {!r})".format(
offset))
async def _connect_sock(self, exceptions, addr_info, local_addr_infos=None):
"""Create, bind and connect one socket."""
my_exceptions = []
exceptions.append(my_exceptions)
family, type_, proto, _, address = addr_info
sock = None
try:
sock = socket.socket(family=family, type=type_, proto=proto)
sock.setblocking(False)
if local_addr_infos is not None:
for _, _, _, _, laddr in local_addr_infos:
try:
sock.bind(laddr)
break
except OSError as exc:
msg = (
f'error while attempting to bind on '
f'address {laddr!r}: '
f'{exc.strerror.lower()}'
)
exc = OSError(exc.errno, msg)
my_exceptions.append(exc)
else: # all bind attempts failed
raise my_exceptions.pop()
await self.sock_connect(sock, address)
return sock
except OSError as exc:
my_exceptions.append(exc)
if sock is not None:
sock.close()
raise
except:
if sock is not None:
sock.close()
raise
async def create_connection(
self, protocol_factory, host=None, port=None,
*, ssl=None, family=0,
proto=0, flags=0, sock=None,
local_addr=None, server_hostname=None,
ssl_handshake_timeout=None,
happy_eyeballs_delay=None, interleave=None):
"""Connect to a TCP server.
Create a streaming transport connection to a given Internet host and
port: socket family AF_INET or socket.AF_INET6 depending on host (or
family if specified), socket type SOCK_STREAM. protocol_factory must be
a callable returning a protocol instance.
This method is a coroutine which will try to establish the connection
in the background. When successful, the coroutine returns a
(transport, protocol) pair.
"""
if server_hostname is not None and not ssl:
raise ValueError('server_hostname is only meaningful with ssl')
if server_hostname is None and ssl:
# Use host as default for server_hostname. It is an error
# if host is empty or not set, e.g. when an
# already-connected socket was passed or when only a port
# is given. To avoid this error, you can pass
# server_hostname='' -- this will bypass the hostname
# check. (This also means that if host is a numeric
# IP/IPv6 address, we will attempt to verify that exact
# address; this will probably fail, but it is possible to
# create a certificate for a specific IP address, so we
# don't judge it here.)
if not host:
raise ValueError('You must set server_hostname '
'when using ssl without a host')
server_hostname = host
if ssl_handshake_timeout is not None and not ssl:
raise ValueError(
'ssl_handshake_timeout is only meaningful with ssl')
if happy_eyeballs_delay is not None and interleave is None:
# If using happy eyeballs, default to interleave addresses by family
interleave = 1
if host is not None or port is not None:
if sock is not None:
raise ValueError(
'host/port and sock can not be specified at the same time')
infos = await self._ensure_resolved(
(host, port), family=family,
type=socket.SOCK_STREAM, proto=proto, flags=flags, loop=self)
if not infos:
raise OSError('getaddrinfo() returned empty list')
if local_addr is not None:
laddr_infos = await self._ensure_resolved(
local_addr, family=family,
type=socket.SOCK_STREAM, proto=proto,
flags=flags, loop=self)
if not laddr_infos:
raise OSError('getaddrinfo() returned empty list')
else:
laddr_infos = None
if interleave:
infos = _interleave_addrinfos(infos, interleave)
exceptions = []
if happy_eyeballs_delay is None:
# not using happy eyeballs
for addrinfo in infos:
try:
sock = await self._connect_sock(
exceptions, addrinfo, laddr_infos)
break
except OSError:
continue
else: # using happy eyeballs
sock, _, _ = await staggered.staggered_race(
(functools.partial(self._connect_sock,
exceptions, addrinfo, laddr_infos)
for addrinfo in infos),
happy_eyeballs_delay, loop=self)
if sock is None:
exceptions = [exc for sub in exceptions for exc in sub]
if len(exceptions) == 1:
raise exceptions[0]
else:
# If they all have the same str(), raise one.
model = str(exceptions[0])
if all(str(exc) == model for exc in exceptions):
raise exceptions[0]
# Raise a combined exception so the user can see all
# the various error messages.
raise OSError('Multiple exceptions: {}'.format(
', '.join(str(exc) for exc in exceptions)))
else:
if sock is None:
raise ValueError(
'host and port was not specified and no sock specified')
if sock.type != socket.SOCK_STREAM:
# We allow AF_INET, AF_INET6, AF_UNIX as long as they
# are SOCK_STREAM.
# We support passing AF_UNIX sockets even though we have
# a dedicated API for that: create_unix_connection.
# Disallowing AF_UNIX in this method, breaks backwards
# compatibility.
raise ValueError(
f'A Stream Socket was expected, got {sock!r}')
transport, protocol = await self._create_connection_transport(
sock, protocol_factory, ssl, server_hostname,
ssl_handshake_timeout=ssl_handshake_timeout)
if self._debug:
# Get the socket from the transport because SSL transport closes
# the old socket and creates a new SSL socket
sock = transport.get_extra_info('socket')
logger.debug("%r connected to %s:%r: (%r, %r)",
sock, host, port, transport, protocol)
return transport, protocol
async def _create_connection_transport(
self, sock, protocol_factory, ssl,
server_hostname, server_side=False,
ssl_handshake_timeout=None):
sock.setblocking(False)
protocol = protocol_factory()
waiter = self.create_future()
if ssl:
sslcontext = None if isinstance(ssl, bool) else ssl
transport = self._make_ssl_transport(
sock, protocol, sslcontext, waiter,
server_side=server_side, server_hostname=server_hostname,
ssl_handshake_timeout=ssl_handshake_timeout)
else:
transport = self._make_socket_transport(sock, protocol, waiter)
try:
await waiter
except:
transport.close()
raise
return transport, protocol
async def sendfile(self, transport, file, offset=0, count=None,
*, fallback=True):
"""Send a file to transport.
Return the total number of bytes which were sent.
The method uses high-performance os.sendfile if available.
file must be a regular file object opened in binary mode.
offset tells from where to start reading the file. If specified,
count is the total number of bytes to transmit as opposed to
sending the file until EOF is reached. File position is updated on
return or also in case of error in which case file.tell()
can be used to figure out the number of bytes
which were sent.
fallback set to True makes asyncio to manually read and send
the file when the platform does not support the sendfile syscall
(e.g. Windows or SSL socket on Unix).
Raise SendfileNotAvailableError if the system does not support
sendfile syscall and fallback is False.
"""
if transport.is_closing():
raise RuntimeError("Transport is closing")
mode = getattr(transport, '_sendfile_compatible',
constants._SendfileMode.UNSUPPORTED)
if mode is constants._SendfileMode.UNSUPPORTED:
raise RuntimeError(
f"sendfile is not supported for transport {transport!r}")
if mode is constants._SendfileMode.TRY_NATIVE:
try:
return await self._sendfile_native(transport, file,
offset, count)
except exceptions.SendfileNotAvailableError as exc:
if not fallback:
raise
if not fallback:
raise RuntimeError(
f"fallback is disabled and native sendfile is not "
f"supported for transport {transport!r}")
return await self._sendfile_fallback(transport, file,
offset, count)
async def _sendfile_native(self, transp, file, offset, count):
raise exceptions.SendfileNotAvailableError(
"sendfile syscall is not supported")
async def _sendfile_fallback(self, transp, file, offset, count):
if offset:
file.seek(offset)
blocksize = min(count, 16384) if count else 16384
buf = bytearray(blocksize)
total_sent = 0
proto = _SendfileFallbackProtocol(transp)
try:
while True:
if count:
blocksize = min(count - total_sent, blocksize)
if blocksize <= 0:
return total_sent
view = memoryview(buf)[:blocksize]
read = await self.run_in_executor(None, file.readinto, view)
if not read:
return total_sent # EOF
await proto.drain()
transp.write(view[:read])
total_sent += read
finally:
if total_sent > 0 and hasattr(file, 'seek'):
file.seek(offset + total_sent)
await proto.restore()
async def start_tls(self, transport, protocol, sslcontext, *,
server_side=False,
server_hostname=None,
ssl_handshake_timeout=None):
"""Upgrade transport to TLS.
Return a new transport that *protocol* should start using
immediately.
"""
if ssl is None:
raise RuntimeError('Python ssl module is not available')
if not isinstance(sslcontext, ssl.SSLContext):
raise TypeError(
f'sslcontext is expected to be an instance of ssl.SSLContext, '
f'got {sslcontext!r}')
if not getattr(transport, '_start_tls_compatible', False):
raise TypeError(
f'transport {transport!r} is not supported by start_tls()')
waiter = self.create_future()
ssl_protocol = sslproto.SSLProtocol(
self, protocol, sslcontext, waiter,
server_side, server_hostname,
ssl_handshake_timeout=ssl_handshake_timeout,
call_connection_made=False)
# Pause early so that "ssl_protocol.data_received()" doesn't
# have a chance to get called before "ssl_protocol.connection_made()".
transport.pause_reading()
transport.set_protocol(ssl_protocol)
conmade_cb = self.call_soon(ssl_protocol.connection_made, transport)
resume_cb = self.call_soon(transport.resume_reading)
try:
await waiter
except BaseException:
transport.close()
conmade_cb.cancel()
resume_cb.cancel()
raise
return ssl_protocol._app_transport
async def create_datagram_endpoint(self, protocol_factory,
local_addr=None, remote_addr=None, *,
family=0, proto=0, flags=0,
reuse_address=None, reuse_port=None,
allow_broadcast=None, sock=None):
"""Create datagram connection."""
if sock is not None:
if sock.type != socket.SOCK_DGRAM:
raise ValueError(
f'A UDP Socket was expected, got {sock!r}')
if (local_addr or remote_addr or
family or proto or flags or
reuse_address or reuse_port or allow_broadcast):
# show the problematic kwargs in exception msg
opts = dict(local_addr=local_addr, remote_addr=remote_addr,
family=family, proto=proto, flags=flags,
reuse_address=reuse_address, reuse_port=reuse_port,
allow_broadcast=allow_broadcast)
problems = ', '.join(f'{k}={v}' for k, v in opts.items() if v)
raise ValueError(
f'socket modifier keyword arguments can not be used '
f'when sock is specified. ({problems})')
sock.setblocking(False)
r_addr = None
else:
if not (local_addr or remote_addr):
if family == 0:
raise ValueError('unexpected address family')
addr_pairs_info = (((family, proto), (None, None)),)
elif hasattr(socket, 'AF_UNIX') and family == socket.AF_UNIX:
for addr in (local_addr, remote_addr):
if addr is not None and not isinstance(addr, str):
raise TypeError('string is expected')
if local_addr and local_addr[0] not in (0, '\x00'):
try:
if stat.S_ISSOCK(os.stat(local_addr).st_mode):
os.remove(local_addr)
except FileNotFoundError:
pass
except OSError as err:
# Directory may have permissions only to create socket.
logger.error('Unable to check or remove stale UNIX '
'socket %r: %r',
local_addr, err)
addr_pairs_info = (((family, proto),
(local_addr, remote_addr)), )
else:
# join address by (family, protocol)
addr_infos = {} # Using order preserving dict
for idx, addr in ((0, local_addr), (1, remote_addr)):
if addr is not None:
assert isinstance(addr, tuple) and len(addr) == 2, (
'2-tuple is expected')
infos = await self._ensure_resolved(
addr, family=family, type=socket.SOCK_DGRAM,
proto=proto, flags=flags, loop=self)
if not infos:
raise OSError('getaddrinfo() returned empty list')
for fam, _, pro, _, address in infos:
key = (fam, pro)
if key not in addr_infos:
addr_infos[key] = [None, None]
addr_infos[key][idx] = address
# each addr has to have info for each (family, proto) pair
addr_pairs_info = [
(key, addr_pair) for key, addr_pair in addr_infos.items()
if not ((local_addr and addr_pair[0] is None) or
(remote_addr and addr_pair[1] is None))]
if not addr_pairs_info:
raise ValueError('can not get address information')
exceptions = []
if reuse_address is None:
reuse_address = os.name == 'posix' and sys.platform != 'cygwin'
for ((family, proto),
(local_address, remote_address)) in addr_pairs_info:
sock = None
r_addr = None
try:
sock = socket.socket(
family=family, type=socket.SOCK_DGRAM, proto=proto)
if reuse_address:
sock.setsockopt(
socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
if reuse_port:
_set_reuseport(sock)
if allow_broadcast:
sock.setsockopt(
socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
sock.setblocking(False)
if local_addr:
sock.bind(local_address)
if remote_addr:
if not allow_broadcast:
await self.sock_connect(sock, remote_address)
r_addr = remote_address
except OSError as exc:
if sock is not None:
sock.close()
exceptions.append(exc)
except:
if sock is not None:
sock.close()
raise
else:
break
else:
raise exceptions[0]
protocol = protocol_factory()
waiter = self.create_future()
transport = self._make_datagram_transport(
sock, protocol, r_addr, waiter)
if self._debug:
if local_addr:
logger.info("Datagram endpoint local_addr=%r remote_addr=%r "
"created: (%r, %r)",
local_addr, remote_addr, transport, protocol)
else:
logger.debug("Datagram endpoint remote_addr=%r created: "
"(%r, %r)",
remote_addr, transport, protocol)
try:
await waiter
except:
transport.close()
raise
return transport, protocol
async def _ensure_resolved(self, address, *,
family=0, type=socket.SOCK_STREAM,
proto=0, flags=0, loop):
host, port = address[:2]
info = _ipaddr_info(host, port, family, type, proto, *address[2:])
if info is not None:
# "host" is already a resolved IP.
return [info]
else:
return await loop.getaddrinfo(host, port, family=family, type=type,
proto=proto, flags=flags)
async def _create_server_getaddrinfo(self, host, port, family, flags):
infos = await self._ensure_resolved((host, port), family=family,
type=socket.SOCK_STREAM,
flags=flags, loop=self)
if not infos:
raise OSError(f'getaddrinfo({host!r}) returned empty list')
return infos
async def create_server(
self, protocol_factory, host=None, port=None,
*,
family=socket.AF_UNSPEC,
flags=socket.AI_PASSIVE,
sock=None,
backlog=100,
ssl=None,
reuse_address=None,
reuse_port=None,
ssl_handshake_timeout=None,
start_serving=True):
"""Create a TCP server.
The host parameter can be a string, in that case the TCP server is
bound to host and port.
The host parameter can also be a sequence of strings and in that case
the TCP server is bound to all hosts of the sequence. If a host
appears multiple times (possibly indirectly e.g. when hostnames
resolve to the same IP address), the server is only bound once to that
host.
Return a Server object which can be used to stop the service.
This method is a coroutine.
"""
if isinstance(ssl, bool):
raise TypeError('ssl argument must be an SSLContext or None')
if ssl_handshake_timeout is not None and ssl is None:
raise ValueError(
'ssl_handshake_timeout is only meaningful with ssl')
if host is not None or port is not None:
if sock is not None:
raise ValueError(
'host/port and sock can not be specified at the same time')
if reuse_address is None:
reuse_address = os.name == 'posix' and sys.platform != 'cygwin'
sockets = []
if host == '':
hosts = [None]
elif (isinstance(host, str) or
not isinstance(host, collections.abc.Iterable)):
hosts = [host]
else:
hosts = host
fs = [self._create_server_getaddrinfo(host, port, family=family,
flags=flags)
for host in hosts]
infos = await tasks.gather(*fs, loop=self)
infos = set(itertools.chain.from_iterable(infos))
completed = False
try:
for res in infos:
af, socktype, proto, canonname, sa = res
try:
sock = socket.socket(af, socktype, proto)
except socket.error:
# Assume it's a bad family/type/protocol combination.
if self._debug:
logger.warning('create_server() failed to create '
'socket.socket(%r, %r, %r)',
af, socktype, proto, exc_info=True)
continue
sockets.append(sock)
if reuse_address:
sock.setsockopt(
socket.SOL_SOCKET, socket.SO_REUSEADDR, True)
if reuse_port:
_set_reuseport(sock)
# Disable IPv4/IPv6 dual stack support (enabled by
# default on Linux) which makes a single socket
# listen on both address families.
if (_HAS_IPv6 and
af == socket.AF_INET6 and
hasattr(socket, 'IPPROTO_IPV6')):
sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_V6ONLY,
True)
try:
sock.bind(sa)
except OSError as err:
raise OSError(err.errno, 'error while attempting '
'to bind on address %r: %s'
% (sa, err.strerror.lower())) from None
completed = True
finally:
if not completed:
for sock in sockets:
sock.close()
else:
if sock is None:
raise ValueError('Neither host/port nor sock were specified')
if sock.type != socket.SOCK_STREAM:
raise ValueError(f'A Stream Socket was expected, got {sock!r}')
sockets = [sock]
for sock in sockets:
sock.setblocking(False)
server = Server(self, sockets, protocol_factory,
ssl, backlog, ssl_handshake_timeout)
if start_serving:
server._start_serving()
# Skip one loop iteration so that all 'loop.add_reader'
# go through.
await tasks.sleep(0, loop=self)
if self._debug:
logger.info("%r is serving", server)
return server
async def connect_accepted_socket(
self, protocol_factory, sock,
*, ssl=None,
ssl_handshake_timeout=None):
"""Handle an accepted connection.
This is used by servers that accept connections outside of
asyncio but that use asyncio to handle connections.
This method is a coroutine. When completed, the coroutine
returns a (transport, protocol) pair.
"""
if sock.type != socket.SOCK_STREAM:
raise ValueError(f'A Stream Socket was expected, got {sock!r}')
if ssl_handshake_timeout is not None and not ssl:
raise ValueError(
'ssl_handshake_timeout is only meaningful with ssl')
transport, protocol = await self._create_connection_transport(
sock, protocol_factory, ssl, '', server_side=True,
ssl_handshake_timeout=ssl_handshake_timeout)
if self._debug:
# Get the socket from the transport because SSL transport closes
# the old socket and creates a new SSL socket
sock = transport.get_extra_info('socket')
logger.debug("%r handled: (%r, %r)", sock, transport, protocol)
return transport, protocol
async def connect_read_pipe(self, protocol_factory, pipe):
protocol = protocol_factory()
waiter = self.create_future()
transport = self._make_read_pipe_transport(pipe, protocol, waiter)
try:
await waiter
except:
transport.close()
raise
if self._debug:
logger.debug('Read pipe %r connected: (%r, %r)',
pipe.fileno(), transport, protocol)
return transport, protocol
async def connect_write_pipe(self, protocol_factory, pipe):
protocol = protocol_factory()
waiter = self.create_future()
transport = self._make_write_pipe_transport(pipe, protocol, waiter)
try:
await waiter
except:
transport.close()
raise
if self._debug:
logger.debug('Write pipe %r connected: (%r, %r)',
pipe.fileno(), transport, protocol)
return transport, protocol
def _log_subprocess(self, msg, stdin, stdout, stderr):
info = [msg]
if stdin is not None:
info.append(f'stdin={_format_pipe(stdin)}')
if stdout is not None and stderr == subprocess.STDOUT:
info.append(f'stdout=stderr={_format_pipe(stdout)}')
else:
if stdout is not None:
info.append(f'stdout={_format_pipe(stdout)}')
if stderr is not None:
info.append(f'stderr={_format_pipe(stderr)}')
logger.debug(' '.join(info))
async def subprocess_shell(self, protocol_factory, cmd, *,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=False,
shell=True, bufsize=0,
encoding=None, errors=None, text=None,
**kwargs):
if not isinstance(cmd, (bytes, str)):
raise ValueError("cmd must be a string")
if universal_newlines:
raise ValueError("universal_newlines must be False")
if not shell:
raise ValueError("shell must be True")
if bufsize != 0:
raise ValueError("bufsize must be 0")
if text:
raise ValueError("text must be False")
if encoding is not None:
raise ValueError("encoding must be None")
if errors is not None:
raise ValueError("errors must be None")
protocol = protocol_factory()
debug_log = None
if self._debug:
# don't log parameters: they may contain sensitive information
# (password) and may be too long
debug_log = 'run shell command %r' % cmd
self._log_subprocess(debug_log, stdin, stdout, stderr)
transport = await self._make_subprocess_transport(
protocol, cmd, True, stdin, stdout, stderr, bufsize, **kwargs)
if self._debug and debug_log is not None:
logger.info('%s: %r', debug_log, transport)
return transport, protocol
async def subprocess_exec(self, protocol_factory, program, *args,
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, universal_newlines=False,
shell=False, bufsize=0,
encoding=None, errors=None, text=None,
**kwargs):
if universal_newlines:
raise ValueError("universal_newlines must be False")
if shell:
raise ValueError("shell must be False")
if bufsize != 0:
raise ValueError("bufsize must be 0")
if text:
raise ValueError("text must be False")
if encoding is not None:
raise ValueError("encoding must be None")
if errors is not None:
raise ValueError("errors must be None")
popen_args = (program,) + args
protocol = protocol_factory()
debug_log = None
if self._debug:
# don't log parameters: they may contain sensitive information
# (password) and may be too long
debug_log = f'execute program {program!r}'
self._log_subprocess(debug_log, stdin, stdout, stderr)
transport = await self._make_subprocess_transport(
protocol, popen_args, False, stdin, stdout, stderr,
bufsize, **kwargs)
if self._debug and debug_log is not None:
logger.info('%s: %r', debug_log, transport)
return transport, protocol
def get_exception_handler(self):
"""Return an exception handler, or None if the default one is in use.
"""
return self._exception_handler
def set_exception_handler(self, handler):
"""Set handler as the new event loop exception handler.
If handler is None, the default exception handler will
be set.
If handler is a callable object, it should have a
signature matching '(loop, context)', where 'loop'
will be a reference to the active event loop, 'context'
will be a dict object (see `call_exception_handler()`
documentation for details about context).
"""
if handler is not None and not callable(handler):
raise TypeError(f'A callable object or None is expected, '
f'got {handler!r}')
self._exception_handler = handler
def default_exception_handler(self, context):
"""Default exception handler.
This is called when an exception occurs and no exception
handler is set, and can be called by a custom exception
handler that wants to defer to the default behavior.
This default handler logs the error message and other
context-dependent information. In debug mode, a truncated
stack trace is also appended showing where the given object
(e.g. a handle or future or task) was created, if any.
The context parameter has the same meaning as in
`call_exception_handler()`.
"""
message = context.get('message')
if not message:
message = 'Unhandled exception in event loop'
exception = context.get('exception')
if exception is not None:
exc_info = (type(exception), exception, exception.__traceback__)
else:
exc_info = False
if ('source_traceback' not in context and
self._current_handle is not None and
self._current_handle._source_traceback):
context['handle_traceback'] = \
self._current_handle._source_traceback
log_lines = [message]
for key in sorted(context):
if key in {'message', 'exception'}:
continue
value = context[key]
if key == 'source_traceback':
tb = ''.join(traceback.format_list(value))
value = 'Object created at (most recent call last):\n'
value += tb.rstrip()
elif key == 'handle_traceback':
tb = ''.join(traceback.format_list(value))
value = 'Handle created at (most recent call last):\n'
value += tb.rstrip()
else:
value = repr(value)
log_lines.append(f'{key}: {value}')
logger.error('\n'.join(log_lines), exc_info=exc_info)
def call_exception_handler(self, context):
"""Call the current event loop's exception handler.
The context argument is a dict containing the following keys:
- 'message': Error message;
- 'exception' (optional): Exception object;
- 'future' (optional): Future instance;
- 'task' (optional): Task instance;
- 'handle' (optional): Handle instance;
- 'protocol' (optional): Protocol instance;
- 'transport' (optional): Transport instance;
- 'socket' (optional): Socket instance;
- 'asyncgen' (optional): Asynchronous generator that caused
the exception.
New keys maybe introduced in the future.
Note: do not overload this method in an event loop subclass.
For custom exception handling, use the
`set_exception_handler()` method.
"""
if self._exception_handler is None:
try:
self.default_exception_handler(context)
except (SystemExit, KeyboardInterrupt):
raise
except BaseException:
# Second protection layer for unexpected errors
# in the default implementation, as well as for subclassed
# event loops with overloaded "default_exception_handler".
logger.error('Exception in default exception handler',
exc_info=True)
else:
try:
self._exception_handler(self, context)
except (SystemExit, KeyboardInterrupt):
raise
except BaseException as exc:
# Exception in the user set custom exception handler.
try:
# Let's try default handler.
self.default_exception_handler({
'message': 'Unhandled error in exception handler',
'exception': exc,
'context': context,
})
except (SystemExit, KeyboardInterrupt):
raise
except BaseException:
# Guard 'default_exception_handler' in case it is
# overloaded.
logger.error('Exception in default exception handler '
'while handling an unexpected error '
'in custom exception handler',
exc_info=True)
def _add_callback(self, handle):
"""Add a Handle to _scheduled (TimerHandle) or _ready."""
assert isinstance(handle, events.Handle), 'A Handle is required here'
if handle._cancelled:
return
assert not isinstance(handle, events.TimerHandle)
self._ready.append(handle)
def _add_callback_signalsafe(self, handle):
"""Like _add_callback() but called from a signal handler."""
self._add_callback(handle)
self._write_to_self()
def _timer_handle_cancelled(self, handle):
"""Notification that a TimerHandle has been cancelled."""
if handle._scheduled:
self._timer_cancelled_count += 1
def _run_once(self):
"""Run one full iteration of the event loop.
This calls all currently ready callbacks, polls for I/O,
schedules the resulting callbacks, and finally schedules
'call_later' callbacks.
"""
sched_count = len(self._scheduled)
if (sched_count > _MIN_SCHEDULED_TIMER_HANDLES and
self._timer_cancelled_count / sched_count >
_MIN_CANCELLED_TIMER_HANDLES_FRACTION):
# Remove delayed calls that were cancelled if their number
# is too high
new_scheduled = []
for handle in self._scheduled:
if handle._cancelled:
handle._scheduled = False
else:
new_scheduled.append(handle)
heapq.heapify(new_scheduled)
self._scheduled = new_scheduled
self._timer_cancelled_count = 0
else:
# Remove delayed calls that were cancelled from head of queue.
while self._scheduled and self._scheduled[0]._cancelled:
self._timer_cancelled_count -= 1
handle = heapq.heappop(self._scheduled)
handle._scheduled = False
timeout = None
if self._ready or self._stopping:
timeout = 0
elif self._scheduled:
# Compute the desired timeout.
when = self._scheduled[0]._when
timeout = min(max(0, when - self.time()), MAXIMUM_SELECT_TIMEOUT)
event_list = self._selector.select(timeout)
self._process_events(event_list)
# Handle 'later' callbacks that are ready.
end_time = self.time() + self._clock_resolution
while self._scheduled:
handle = self._scheduled[0]
if handle._when >= end_time:
break
handle = heapq.heappop(self._scheduled)
handle._scheduled = False
self._ready.append(handle)
# This is the only place where callbacks are actually *called*.
# All other places just add them to ready.
# Note: We run all currently scheduled callbacks, but not any
# callbacks scheduled by callbacks run this time around --
# they will be run the next time (after another I/O poll).
# Use an idiom that is thread-safe without using locks.
ntodo = len(self._ready)
for i in range(ntodo):
handle = self._ready.popleft()
if handle._cancelled:
continue
if self._debug:
try:
self._current_handle = handle
t0 = self.time()
handle._run()
dt = self.time() - t0
if dt >= self.slow_callback_duration:
logger.warning('Executing %s took %.3f seconds',
_format_handle(handle), dt)
finally:
self._current_handle = None
else:
handle._run()
handle = None # Needed to break cycles when an exception occurs.
def _set_coroutine_origin_tracking(self, enabled):
if bool(enabled) == bool(self._coroutine_origin_tracking_enabled):
return
if enabled:
self._coroutine_origin_tracking_saved_depth = (
sys.get_coroutine_origin_tracking_depth())
sys.set_coroutine_origin_tracking_depth(
constants.DEBUG_STACK_DEPTH)
else:
sys.set_coroutine_origin_tracking_depth(
self._coroutine_origin_tracking_saved_depth)
self._coroutine_origin_tracking_enabled = enabled
def get_debug(self):
return self._debug
def set_debug(self, enabled):
self._debug = enabled
if self.is_running():
self.call_soon_threadsafe(self._set_coroutine_origin_tracking, enabled)
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, TYPE_CHECKING
from azure.core.configuration import Configuration
from azure.core.pipeline import policies
from azure.mgmt.core.policies import ARMHttpLoggingPolicy
from .._version import VERSION
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from azure.core.credentials_async import AsyncTokenCredential
class MicrosoftResourceHealthConfiguration(Configuration):
"""Configuration for MicrosoftResourceHealth.
Note that all parameters used to create this instance are saved as instance
attributes.
:param credential: Credential needed for the client to connect to Azure.
:type credential: ~azure.core.credentials_async.AsyncTokenCredential
:param subscription_id: Subscription credentials which uniquely identify Microsoft Azure subscription. The subscription ID forms part of the URI for every service call.
:type subscription_id: str
"""
def __init__(
self,
credential: "AsyncTokenCredential",
subscription_id: str,
**kwargs: Any
) -> None:
if credential is None:
raise ValueError("Parameter 'credential' must not be None.")
if subscription_id is None:
raise ValueError("Parameter 'subscription_id' must not be None.")
super(MicrosoftResourceHealthConfiguration, self).__init__(**kwargs)
self.credential = credential
self.subscription_id = subscription_id
self.api_version = "2015-01-01"
self.credential_scopes = kwargs.pop('credential_scopes', ['https://management.azure.com/.default'])
kwargs.setdefault('sdk_moniker', 'mgmt-resourcehealth/{}'.format(VERSION))
self._configure(**kwargs)
def _configure(
self,
**kwargs: Any
) -> None:
self.user_agent_policy = kwargs.get('user_agent_policy') or policies.UserAgentPolicy(**kwargs)
self.headers_policy = kwargs.get('headers_policy') or policies.HeadersPolicy(**kwargs)
self.proxy_policy = kwargs.get('proxy_policy') or policies.ProxyPolicy(**kwargs)
self.logging_policy = kwargs.get('logging_policy') or policies.NetworkTraceLoggingPolicy(**kwargs)
self.http_logging_policy = kwargs.get('http_logging_policy') or ARMHttpLoggingPolicy(**kwargs)
self.retry_policy = kwargs.get('retry_policy') or policies.AsyncRetryPolicy(**kwargs)
self.custom_hook_policy = kwargs.get('custom_hook_policy') or policies.CustomHookPolicy(**kwargs)
self.redirect_policy = kwargs.get('redirect_policy') or policies.AsyncRedirectPolicy(**kwargs)
self.authentication_policy = kwargs.get('authentication_policy')
if self.credential and not self.authentication_policy:
self.authentication_policy = policies.AsyncBearerTokenCredentialPolicy(self.credential, *self.credential_scopes, **kwargs)
|
#!/usr/bin/env python
# Copyright 2016 IBM All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
from setuptools import setup
from setuptools.command.test import test as TestCommand
import os
import sys
__version__ = '3.1.0'
if sys.argv[-1] == 'publish':
# test server
os.system('python setup.py register -r pypitest')
os.system('python setup.py sdist upload -r pypitest')
# production server
os.system('python setup.py register -r pypi')
os.system('python setup.py sdist upload -r pypi')
sys.exit()
# Convert README.md to README.rst for pypi
try:
from pypandoc import convert_file
def read_md(f):
return convert_file(f, 'rst')
# read_md = lambda f: convert(f, 'rst')
except:
print('warning: pypandoc module not found, '
'could not convert Markdown to RST')
def read_md(f):
return open(f, 'rb').read().decode(encoding='utf-8')
# read_md = lambda f: open(f, 'rb').read().decode(encoding='utf-8')
class PyTest(TestCommand):
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = ['--strict', '--verbose', '--tb=long', 'test']
self.test_suite = True
def run_tests(self):
import pytest
errcode = pytest.main(self.test_args)
sys.exit(errcode)
setup(name='ibm-watson',
version=__version__,
description='Client library to use the IBM Watson Services',
license='Apache 2.0',
install_requires=['requests>=2.0, <3.0', 'python_dateutil>=2.5.3', 'websocket-client==0.48.0', 'ibm_cloud_sdk_core>=0.5.1'],
tests_require=['responses', 'pytest', 'python_dotenv', 'pytest-rerunfailures', 'tox'],
cmdclass={'test': PyTest},
author='IBM Watson',
author_email='watdevex@us.ibm.com',
long_description=read_md('README.md'),
url='https://github.com/watson-developer-cloud/python-sdk',
packages=['ibm_watson'],
include_package_data=True,
keywords='language, vision, question and answer' +
' tone_analyzer, natural language classifier,' +
' text to speech, language translation, ' +
'language identification, concept expansion, machine translation, ' +
'personality insights, message resonance, watson developer cloud, ' +
' wdc, watson, ibm, dialog, user modeling,' +
'tone analyzer, speech to text, visual recognition',
classifiers=[
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3',
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Software Development :: Libraries :: Application '
'Frameworks',
],
zip_safe=True
)
|
#!/usr/bin/env python3
# Copyright (c) 2016-2019 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
import re
import fnmatch
import sys
import subprocess
import datetime
import os
################################################################################
# file filtering
################################################################################
EXCLUDE = [
# auto generated:
'src/qt/satcoinstrings.cpp',
'src/chainparamsseeds.h',
# other external copyrights:
'src/reverse_iterator.h',
'src/test/fuzz/FuzzedDataProvider.h',
'src/tinyformat.h',
'test/functional/test_framework/bignum.py',
# python init:
'*__init__.py',
]
EXCLUDE_COMPILED = re.compile('|'.join([fnmatch.translate(m) for m in EXCLUDE]))
EXCLUDE_DIRS = [
# git subtrees
"src/crypto/ctaes/",
"src/leveldb/",
"src/secp256k1/",
"src/univalue/",
"src/crc32c/",
]
INCLUDE = ['*.h', '*.cpp', '*.cc', '*.c', '*.mm', '*.py', '*.sh', '*.bash-completion']
INCLUDE_COMPILED = re.compile('|'.join([fnmatch.translate(m) for m in INCLUDE]))
def applies_to_file(filename):
for excluded_dir in EXCLUDE_DIRS:
if filename.startswith(excluded_dir):
return False
return ((EXCLUDE_COMPILED.match(filename) is None) and
(INCLUDE_COMPILED.match(filename) is not None))
################################################################################
# obtain list of files in repo according to INCLUDE and EXCLUDE
################################################################################
GIT_LS_CMD = 'git ls-files --full-name'.split(' ')
GIT_TOPLEVEL_CMD = 'git rev-parse --show-toplevel'.split(' ')
def call_git_ls(base_directory):
out = subprocess.check_output([*GIT_LS_CMD, base_directory])
return [f for f in out.decode("utf-8").split('\n') if f != '']
def call_git_toplevel():
"Returns the absolute path to the project root"
return subprocess.check_output(GIT_TOPLEVEL_CMD).strip().decode("utf-8")
def get_filenames_to_examine(base_directory):
"Returns an array of absolute paths to any project files in the base_directory that pass the include/exclude filters"
root = call_git_toplevel()
filenames = call_git_ls(base_directory)
return sorted([os.path.join(root, filename) for filename in filenames if
applies_to_file(filename)])
################################################################################
# define and compile regexes for the patterns we are looking for
################################################################################
COPYRIGHT_WITH_C = r'Copyright \(c\)'
COPYRIGHT_WITHOUT_C = 'Copyright'
ANY_COPYRIGHT_STYLE = '(%s|%s)' % (COPYRIGHT_WITH_C, COPYRIGHT_WITHOUT_C)
YEAR = "20[0-9][0-9]"
YEAR_RANGE = '(%s)(-%s)?' % (YEAR, YEAR)
YEAR_LIST = '(%s)(, %s)+' % (YEAR, YEAR)
ANY_YEAR_STYLE = '(%s|%s)' % (YEAR_RANGE, YEAR_LIST)
ANY_COPYRIGHT_STYLE_OR_YEAR_STYLE = ("%s %s" % (ANY_COPYRIGHT_STYLE,
ANY_YEAR_STYLE))
ANY_COPYRIGHT_COMPILED = re.compile(ANY_COPYRIGHT_STYLE_OR_YEAR_STYLE)
def compile_copyright_regex(copyright_style, year_style, name):
return re.compile(r'%s %s,? %s( +\*)?\n' % (copyright_style, year_style, name))
EXPECTED_HOLDER_NAMES = [
r"Satoshi Nakamoto",
r"The Bitcoin Core developers",
r"BitPay Inc\.",
r"University of Illinois at Urbana-Champaign\.",
r"Pieter Wuille",
r"Wladimir J\. van der Laan",
r"Jeff Garzik",
r"Jan-Klaas Kollhof",
r"ArtForz -- public domain half-a-node",
r"Intel Corporation ?",
r"The Zcash developers",
r"Jeremy Rubin",
]
DOMINANT_STYLE_COMPILED = {}
YEAR_LIST_STYLE_COMPILED = {}
WITHOUT_C_STYLE_COMPILED = {}
for holder_name in EXPECTED_HOLDER_NAMES:
DOMINANT_STYLE_COMPILED[holder_name] = (
compile_copyright_regex(COPYRIGHT_WITH_C, YEAR_RANGE, holder_name))
YEAR_LIST_STYLE_COMPILED[holder_name] = (
compile_copyright_regex(COPYRIGHT_WITH_C, YEAR_LIST, holder_name))
WITHOUT_C_STYLE_COMPILED[holder_name] = (
compile_copyright_regex(COPYRIGHT_WITHOUT_C, ANY_YEAR_STYLE,
holder_name))
################################################################################
# search file contents for copyright message of particular category
################################################################################
def get_count_of_copyrights_of_any_style_any_holder(contents):
return len(ANY_COPYRIGHT_COMPILED.findall(contents))
def file_has_dominant_style_copyright_for_holder(contents, holder_name):
match = DOMINANT_STYLE_COMPILED[holder_name].search(contents)
return match is not None
def file_has_year_list_style_copyright_for_holder(contents, holder_name):
match = YEAR_LIST_STYLE_COMPILED[holder_name].search(contents)
return match is not None
def file_has_without_c_style_copyright_for_holder(contents, holder_name):
match = WITHOUT_C_STYLE_COMPILED[holder_name].search(contents)
return match is not None
################################################################################
# get file info
################################################################################
def read_file(filename):
return open(filename, 'r', encoding="utf8").read()
def gather_file_info(filename):
info = {}
info['filename'] = filename
c = read_file(filename)
info['contents'] = c
info['all_copyrights'] = get_count_of_copyrights_of_any_style_any_holder(c)
info['classified_copyrights'] = 0
info['dominant_style'] = {}
info['year_list_style'] = {}
info['without_c_style'] = {}
for holder_name in EXPECTED_HOLDER_NAMES:
has_dominant_style = (
file_has_dominant_style_copyright_for_holder(c, holder_name))
has_year_list_style = (
file_has_year_list_style_copyright_for_holder(c, holder_name))
has_without_c_style = (
file_has_without_c_style_copyright_for_holder(c, holder_name))
info['dominant_style'][holder_name] = has_dominant_style
info['year_list_style'][holder_name] = has_year_list_style
info['without_c_style'][holder_name] = has_without_c_style
if has_dominant_style or has_year_list_style or has_without_c_style:
info['classified_copyrights'] = info['classified_copyrights'] + 1
return info
################################################################################
# report execution
################################################################################
SEPARATOR = '-'.join(['' for _ in range(80)])
def print_filenames(filenames, verbose):
if not verbose:
return
for filename in filenames:
print("\t%s" % filename)
def print_report(file_infos, verbose):
print(SEPARATOR)
examined = [i['filename'] for i in file_infos]
print("%d files examined according to INCLUDE and EXCLUDE fnmatch rules" %
len(examined))
print_filenames(examined, verbose)
print(SEPARATOR)
print('')
zero_copyrights = [i['filename'] for i in file_infos if
i['all_copyrights'] == 0]
print("%4d with zero copyrights" % len(zero_copyrights))
print_filenames(zero_copyrights, verbose)
one_copyright = [i['filename'] for i in file_infos if
i['all_copyrights'] == 1]
print("%4d with one copyright" % len(one_copyright))
print_filenames(one_copyright, verbose)
two_copyrights = [i['filename'] for i in file_infos if
i['all_copyrights'] == 2]
print("%4d with two copyrights" % len(two_copyrights))
print_filenames(two_copyrights, verbose)
three_copyrights = [i['filename'] for i in file_infos if
i['all_copyrights'] == 3]
print("%4d with three copyrights" % len(three_copyrights))
print_filenames(three_copyrights, verbose)
four_or_more_copyrights = [i['filename'] for i in file_infos if
i['all_copyrights'] >= 4]
print("%4d with four or more copyrights" % len(four_or_more_copyrights))
print_filenames(four_or_more_copyrights, verbose)
print('')
print(SEPARATOR)
print('Copyrights with dominant style:\ne.g. "Copyright (c)" and '
'"<year>" or "<startYear>-<endYear>":\n')
for holder_name in EXPECTED_HOLDER_NAMES:
dominant_style = [i['filename'] for i in file_infos if
i['dominant_style'][holder_name]]
if len(dominant_style) > 0:
print("%4d with '%s'" % (len(dominant_style),
holder_name.replace('\n', '\\n')))
print_filenames(dominant_style, verbose)
print('')
print(SEPARATOR)
print('Copyrights with year list style:\ne.g. "Copyright (c)" and '
'"<year1>, <year2>, ...":\n')
for holder_name in EXPECTED_HOLDER_NAMES:
year_list_style = [i['filename'] for i in file_infos if
i['year_list_style'][holder_name]]
if len(year_list_style) > 0:
print("%4d with '%s'" % (len(year_list_style),
holder_name.replace('\n', '\\n')))
print_filenames(year_list_style, verbose)
print('')
print(SEPARATOR)
print('Copyrights with no "(c)" style:\ne.g. "Copyright" and "<year>" or '
'"<startYear>-<endYear>":\n')
for holder_name in EXPECTED_HOLDER_NAMES:
without_c_style = [i['filename'] for i in file_infos if
i['without_c_style'][holder_name]]
if len(without_c_style) > 0:
print("%4d with '%s'" % (len(without_c_style),
holder_name.replace('\n', '\\n')))
print_filenames(without_c_style, verbose)
print('')
print(SEPARATOR)
unclassified_copyrights = [i['filename'] for i in file_infos if
i['classified_copyrights'] < i['all_copyrights']]
print("%d with unexpected copyright holder names" %
len(unclassified_copyrights))
print_filenames(unclassified_copyrights, verbose)
print(SEPARATOR)
def exec_report(base_directory, verbose):
filenames = get_filenames_to_examine(base_directory)
file_infos = [gather_file_info(f) for f in filenames]
print_report(file_infos, verbose)
################################################################################
# report cmd
################################################################################
REPORT_USAGE = """
Produces a report of all copyright header notices found inside the source files
of a repository.
Usage:
$ ./copyright_header.py report <base_directory> [verbose]
Arguments:
<base_directory> - The base directory of a satcoin source code repository.
[verbose] - Includes a list of every file of each subcategory in the report.
"""
def report_cmd(argv):
if len(argv) == 2:
sys.exit(REPORT_USAGE)
base_directory = argv[2]
if not os.path.exists(base_directory):
sys.exit("*** bad <base_directory>: %s" % base_directory)
if len(argv) == 3:
verbose = False
elif argv[3] == 'verbose':
verbose = True
else:
sys.exit("*** unknown argument: %s" % argv[2])
exec_report(base_directory, verbose)
################################################################################
# query git for year of last change
################################################################################
GIT_LOG_CMD = "git log --pretty=format:%%ai %s"
def call_git_log(filename):
out = subprocess.check_output((GIT_LOG_CMD % filename).split(' '))
return out.decode("utf-8").split('\n')
def get_git_change_years(filename):
git_log_lines = call_git_log(filename)
if len(git_log_lines) == 0:
return [datetime.date.today().year]
# timestamp is in ISO 8601 format. e.g. "2016-09-05 14:25:32 -0600"
return [line.split(' ')[0].split('-')[0] for line in git_log_lines]
def get_most_recent_git_change_year(filename):
return max(get_git_change_years(filename))
################################################################################
# read and write to file
################################################################################
def read_file_lines(filename):
f = open(filename, 'r', encoding="utf8")
file_lines = f.readlines()
f.close()
return file_lines
def write_file_lines(filename, file_lines):
f = open(filename, 'w', encoding="utf8")
f.write(''.join(file_lines))
f.close()
################################################################################
# update header years execution
################################################################################
COPYRIGHT = r'Copyright \(c\)'
YEAR = "20[0-9][0-9]"
YEAR_RANGE = '(%s)(-%s)?' % (YEAR, YEAR)
HOLDER = 'The Bitcoin Core developers'
UPDATEABLE_LINE_COMPILED = re.compile(' '.join([COPYRIGHT, YEAR_RANGE, HOLDER]))
def get_updatable_copyright_line(file_lines):
index = 0
for line in file_lines:
if UPDATEABLE_LINE_COMPILED.search(line) is not None:
return index, line
index = index + 1
return None, None
def parse_year_range(year_range):
year_split = year_range.split('-')
start_year = year_split[0]
if len(year_split) == 1:
return start_year, start_year
return start_year, year_split[1]
def year_range_to_str(start_year, end_year):
if start_year == end_year:
return start_year
return "%s-%s" % (start_year, end_year)
def create_updated_copyright_line(line, last_git_change_year):
copyright_splitter = 'Copyright (c) '
copyright_split = line.split(copyright_splitter)
# Preserve characters on line that are ahead of the start of the copyright
# notice - they are part of the comment block and vary from file-to-file.
before_copyright = copyright_split[0]
after_copyright = copyright_split[1]
space_split = after_copyright.split(' ')
year_range = space_split[0]
start_year, end_year = parse_year_range(year_range)
if end_year == last_git_change_year:
return line
return (before_copyright + copyright_splitter +
year_range_to_str(start_year, last_git_change_year) + ' ' +
' '.join(space_split[1:]))
def update_updatable_copyright(filename):
file_lines = read_file_lines(filename)
index, line = get_updatable_copyright_line(file_lines)
if not line:
print_file_action_message(filename, "No updatable copyright.")
return
last_git_change_year = get_most_recent_git_change_year(filename)
new_line = create_updated_copyright_line(line, last_git_change_year)
if line == new_line:
print_file_action_message(filename, "Copyright up-to-date.")
return
file_lines[index] = new_line
write_file_lines(filename, file_lines)
print_file_action_message(filename,
"Copyright updated! -> %s" % last_git_change_year)
def exec_update_header_year(base_directory):
for filename in get_filenames_to_examine(base_directory):
update_updatable_copyright(filename)
################################################################################
# update cmd
################################################################################
UPDATE_USAGE = """
Updates all the copyright headers of "The Bitcoin Core developers" which were
changed in a year more recent than is listed. For example:
// Copyright (c) <firstYear>-<lastYear> The Bitcoin Core developers
will be updated to:
// Copyright (c) <firstYear>-<lastModifiedYear> The Bitcoin Core developers
where <lastModifiedYear> is obtained from the 'git log' history.
This subcommand also handles copyright headers that have only a single year. In those cases:
// Copyright (c) <year> The Bitcoin Core developers
will be updated to:
// Copyright (c) <year>-<lastModifiedYear> The Bitcoin Core developers
where the update is appropriate.
Usage:
$ ./copyright_header.py update <base_directory>
Arguments:
<base_directory> - The base directory of a satcoin source code repository.
"""
def print_file_action_message(filename, action):
print("%-52s %s" % (filename, action))
def update_cmd(argv):
if len(argv) != 3:
sys.exit(UPDATE_USAGE)
base_directory = argv[2]
if not os.path.exists(base_directory):
sys.exit("*** bad base_directory: %s" % base_directory)
exec_update_header_year(base_directory)
################################################################################
# inserted copyright header format
################################################################################
def get_header_lines(header, start_year, end_year):
lines = header.split('\n')[1:-1]
lines[0] = lines[0] % year_range_to_str(start_year, end_year)
return [line + '\n' for line in lines]
CPP_HEADER = '''
// Copyright (c) %s The Bitcoin Core developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
def get_cpp_header_lines_to_insert(start_year, end_year):
return reversed(get_header_lines(CPP_HEADER, start_year, end_year))
SCRIPT_HEADER = '''
# Copyright (c) %s The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
def get_script_header_lines_to_insert(start_year, end_year):
return reversed(get_header_lines(SCRIPT_HEADER, start_year, end_year))
################################################################################
# query git for year of last change
################################################################################
def get_git_change_year_range(filename):
years = get_git_change_years(filename)
return min(years), max(years)
################################################################################
# check for existing core copyright
################################################################################
def file_already_has_core_copyright(file_lines):
index, _ = get_updatable_copyright_line(file_lines)
return index is not None
################################################################################
# insert header execution
################################################################################
def file_has_hashbang(file_lines):
if len(file_lines) < 1:
return False
if len(file_lines[0]) <= 2:
return False
return file_lines[0][:2] == '#!'
def insert_script_header(filename, file_lines, start_year, end_year):
if file_has_hashbang(file_lines):
insert_idx = 1
else:
insert_idx = 0
header_lines = get_script_header_lines_to_insert(start_year, end_year)
for line in header_lines:
file_lines.insert(insert_idx, line)
write_file_lines(filename, file_lines)
def insert_cpp_header(filename, file_lines, start_year, end_year):
file_lines.insert(0, '\n')
header_lines = get_cpp_header_lines_to_insert(start_year, end_year)
for line in header_lines:
file_lines.insert(0, line)
write_file_lines(filename, file_lines)
def exec_insert_header(filename, style):
file_lines = read_file_lines(filename)
if file_already_has_core_copyright(file_lines):
sys.exit('*** %s already has a copyright by The Bitcoin Core developers'
% (filename))
start_year, end_year = get_git_change_year_range(filename)
if style in ['python', 'shell']:
insert_script_header(filename, file_lines, start_year, end_year)
else:
insert_cpp_header(filename, file_lines, start_year, end_year)
################################################################################
# insert cmd
################################################################################
INSERT_USAGE = """
Inserts a copyright header for "The Bitcoin Core developers" at the top of the
file in either Python or C++ style as determined by the file extension. If the
file is a Python file and it has a '#!' starting the first line, the header is
inserted in the line below it.
The copyright dates will be set to be:
"<year_introduced>-<current_year>"
where <year_introduced> is according to the 'git log' history. If
<year_introduced> is equal to <current_year>, the date will be set to be:
"<current_year>"
If the file already has a copyright for "The Bitcoin Core developers", the
script will exit.
Usage:
$ ./copyright_header.py insert <file>
Arguments:
<file> - A source file in the satcoin repository.
"""
def insert_cmd(argv):
if len(argv) != 3:
sys.exit(INSERT_USAGE)
filename = argv[2]
if not os.path.isfile(filename):
sys.exit("*** bad filename: %s" % filename)
_, extension = os.path.splitext(filename)
if extension not in ['.h', '.cpp', '.cc', '.c', '.py', '.sh']:
sys.exit("*** cannot insert for file extension %s" % extension)
if extension == '.py':
style = 'python'
elif extension == '.sh':
style = 'shell'
else:
style = 'cpp'
exec_insert_header(filename, style)
################################################################################
# UI
################################################################################
USAGE = """
copyright_header.py - utilities for managing copyright headers of 'The Satcoin
Core developers' in repository source files.
Usage:
$ ./copyright_header <subcommand>
Subcommands:
report
update
insert
To see subcommand usage, run them without arguments.
"""
SUBCOMMANDS = ['report', 'update', 'insert']
if __name__ == "__main__":
if len(sys.argv) == 1:
sys.exit(USAGE)
subcommand = sys.argv[1]
if subcommand not in SUBCOMMANDS:
sys.exit(USAGE)
if subcommand == 'report':
report_cmd(sys.argv)
elif subcommand == 'update':
update_cmd(sys.argv)
elif subcommand == 'insert':
insert_cmd(sys.argv)
|
import numpy as np
import cupy as cp
import cv2, time
import matplotlib.pyplot as plt
import scipy.stats as st
class Kuramoto:
def __init__(self, size, mean, std, coupling):
"""
mean: float
The mean frequency of oscillators in hertz
"""
self.internal_freq = cp.random.normal(mean, std, (1, size))
self.phase = 2*np.pi*cp.random.rand(1, size)
self.size = size
self.coupling = coupling
def derivative(self, phase):
phase_rep = np.repeat(phase, self.size, axis=0)
phase_diff = np.sum(self.coupling*np.sin(phase_rep.T - phase_rep), axis=0)
deriv = (self.internal_freq + phase_diff/self.size)
return deriv
def euler(self, dt):
return self.phase + self.derivative(self.phase)*dt
def runge_kutta(self, dt):
k1 = self.derivative(self.phase)*dt
k2 = self.derivative(self.phase + k1/2)*dt
k3 = self.derivative(self.phase + k2/2)*dt
k4 = self.derivative(self.phase + k3)*dt
return self.phase + (k1+2*k2+2*k3+k4)/6
def update(self, dt):
self.phase = np.mod(self.runge_kutta(dt), 2*np.pi)
self.hist = np.vstack((self.hist, self.phase))
r, phi = self.order()
self.rs.append(r)
self.phis.append(phi)
def order(self):
o = cp.mean(cp.exp(self.phase * np.complex(0,1)))
return abs(o), cp.angle(o)
def getPhaseIm(self, shape):
return cp.asnumpy(cp.reshape(self.phase, shape))
def norm(img):
return cv2.normalize(img, 0, 1, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_32F)
def gkern(kernlen=21, nsig=3):
"""Returns a 2D Gaussian kernel array."""
interval = (2*nsig+1.)/(kernlen)
x = np.linspace(-nsig-interval/2., nsig+interval/2., kernlen+1)
kern1d = cp.array(np.diff(st.norm.cdf(x)))
kernel_raw = cp.sqrt(cp.outer(kern1d, kern1d))
kernel = kernel_raw/kernel_raw.sum()
return kernel
def shift(a, x, y):
b = cp.roll(a, x, axis=1)
if x >= 0:
b[:, 0:x] = cp.zeros((a.shape[0], abs(x)))
else:
b[:, x:] = cp.zeros((a.shape[0], abs(x)))
b = cp.roll(b, y, axis=0)
if y >= 0:
b[0:y, :] = cp.zeros((abs(y), a.shape[1]))
else:
b[y:, :] = cp.zeros((abs(y), a.shape[1]))
return b
def ring_coupling(shape, weights):
e = cp.eye(*shape)
c = e.copy()
for i in range(len(weights)-1):
#c += weights[i]*cp.roll(e, i+1, axis=0)
#c += weights[i]*cp.roll(e, i+1, axis=1)
c += weights[i]*shift(e, i+1, 0)
c += weights[i]*shift(e, 0, i+1)
return c
def local_coupling(shape, kernel):
axis_pad = ((0,shape[0]-kernel.shape[0]),(0, shape[1]-kernel.shape[1]))
k_pad = cp.pad(kernel, axis_pad, "constant", constant_values=(0))
coupling = cp.zeros((shape[0]**2, shape[1]**2))
for i in range(shape[0]):
for j in range(shape[1]):
k = i*shape[0]+j
coupling[k] = shift(k_pad, i-kernel.shape[1]//2, j-kernel.shape[0]//2).flatten()
return coupling
# Tried params:
# size:40 coupling: 220 hz: 1 hzstd: 0.50 swirls
# size:60 coupling: 300 hz: 1 hzstd: 0.50 swirls
# size:120 coupling: 300 hz: 1 hzstd: 0.50 swirls
if __name__ == "__main__":
size = 120
#coupling = np.ones((size**2, size**2))
#coupling = np.random.normal(1, .5, (size**2, size**2))
#coupling = ring_coupling((size**2, size**2), [1, 1, 1, 0.75, .5, 0.25])
coupling = cp.array(norm(cp.asnumpy(local_coupling((size, size), gkern(5, 2)))))
#coupling = ring_coupling((size**2, size**2), [1, 1])
k = Kuramoto(size**2, 1, 0.50, 1500*coupling)
dt = .050
while True:
k.update(dt)
k.view((size, size))
k.store((size, size))
#print(k.order())
#k.view_fft()
if chr(cv2.waitKey(1) & 0xFF) == "q":
break
k.play()
k.save("kuramoto.avi")
k.view_hist()
cv2.waitKey(0)
|
# This example shows how to train an PPO agent on atari domain
# For complete experiments, please refer to
# experiments/ppo/run.py
# --- built in ---
import os
import time
import argparse
import functools
# --- 3rd party ---
import gym
# --- my module ---
import unstable_baselines as ub
from unstable_baselines.algo.ppo import PPO
def parse_config(env_id, root_path):
env_id = env_id
root_path = os.path.join(root_path, env_id)
# Create config sections (you can use python dict instead)
Config = ub.utils.StateObject
a = Config()
a.ARGS = Config()
a.ENV = Config()
a.MODEL = Config()
a.LEARN = Config()
a.EVAL = Config()
# Parameters
a.ARGS.logging = f'{root_path}/training.log'
a.ARGS.log_level = 'INFO'
a.ARGS.seed = 1
a.ARGS.eval_seed = 0
a.ARGS.n_envs = 8
# Env/Monitor parameters
a.ENV.env_id = env_id
a.ENV.monitor_dir = f'{root_path}/monitor'
a.ENV.video = True # record video
# Hyper parameters
a.MODEL.learning_rate = 3e-4
a.MODEL.gamma = 0.99
a.MODEL.gae_lambda = 0.95
a.MODEL.policy_clip = 0.1
a.MODEL.value_clip = 4.0
a.MODEL.dual_clip = 2.0
a.MODEL.ent_coef = 0.01
a.MODEL.vf_coef = 0.5
a.MODEL.reg_coef = 1e-6
a.MODEL.clipnorm = 0.5
a.MODEL.target_kl = None
a.MODEL.share_net = True
a.MODEL.force_mlp = False
a.MODEL.mlp_units = [64, 64]
a.MODEL.n_steps = 125
a.MODEL.n_subepochs = 8
a.MODEL.batch_size = 256
# Training parameters
a.LEARN.total_timesteps = a.ARGS.n_envs * a.MODEL.n_steps * 10000 # ~10M
a.LEARN.log_interval = 1 # epoch
a.LEARN.eval_interval = 1000 # epoch
a.LEARN.eval_episodes = 5
a.LEARN.eval_max_steps = 5000
a.LEARN.save_interval = 1000 # epoch
a.LEARN.save_path = f'{root_path}/save'
a.LEARN.tb_logdir = root_path
a.LEARN.verbose = 3
# Performance evaluations
a.EVAL.n_episodes = 10
a.EVAL.max_steps = 10000
a.EVAL.export_path = f'{root_path}/export'
return a
def print_args(LOG, a, group):
'''Pretty print args'''
LOG.subgroup(group)
label = '\n'.join(map('{:15s}'.format, a.keys()))
value = '\n'.join(map(str, a.values()))
LOG.add_rows(fmt='{label} {||} {value}', label=label, value=value)
def make_atari_env(a, eval, **monitor_params):
'''Make atari environment'''
env = ub.envs.make_atari(a.env_id)
env = ub.envs.Monitor(env, root_dir=a.monitor_dir, video=a.video,
**monitor_params)
env = ub.envs.wrap_deepmind(env, episode_life=not eval,
clip_rewards=not eval)
return env
def make_env(a, rank=0, eval=False):
# some params for monitering
monitor_params = dict(
# filename prefix
prefix = 'eval' if eval else f'{rank}.train',
# record every n episodes, None for cubic schedule
video_kwargs = dict(interval=1 if eval else None)
)
env = make_atari_env(a, eval=eval, **monitor_params)
return env
def evaluate_and_export_final_model(model, eval_env, a):
results = model.eval(eval_env, a.n_episodes, a.max_steps)
metrics = model.get_eval_metrics(results)
model.log_eval(a.n_episodes, results, metrics)
# export PPO agents (only inference mode)
ckpt_metrics = model.get_save_metrics(metrics)
model.agent.save(a.export_path, checkpoint_metrics=ckpt_metrics)
def main(a):
# =============== Reset logger ==============
ub.logger.Config.use(filename=a.ARGS.logging, level=a.ARGS.log_level,
colored=True, reset=False)
LOG = ub.logger.getLogger('PPO')
# ========== Print welcome message ==========
LOG.add_row('')
LOG.add_rows('PPO', fmt='{:@f:ANSI_Shadow}', align='center')
LOG.add_line()
LOG.add_rows(ub.__copyright__)
LOG.flush('INFO')
time.sleep(1)
# ============ Print parameters =============
print_args(LOG, a.ARGS, 'ARGS')
print_args(LOG, a.ENV, 'ENV')
print_args(LOG, a.MODEL, 'MODEL')
print_args(LOG, a.LEARN, 'LEARN')
print_args(LOG, a.EVAL, 'EVAL')
LOG.flush('WARN')
# ================ Make envs ================
env = ub.envs.SubprocVecEnv([
functools.partial(make_env, a.ENV, rank=rank, eval=False)
for rank in range(a.ARGS.n_envs)
])
eval_env = make_env(a.ENV, eval=True)
env.seed(a.ARGS.seed) # seed ~ seed+n_envs
eval_env.seed(a.ARGS.eval_seed)
ub.utils.set_seed(a.ARGS.seed)
# =============== Train model ===============
try:
# --- Setup model & train ---
model = PPO(env, **a.MODEL).learn(eval_env=eval_env, **a.LEARN)
LOG.info('DONE')
# Save model
saved_path = model.save(a.LEARN.save_path)
LOG.info(f'Saving model to {saved_path}')
del model
# --- Load model from the latest checkpoint ---
loaded_model = PPO.load(a.LEARN.save_path)
# Evaluate model
LOG.info('Evaluating the latest model ...')
evaluate_and_export_final_model(loaded_model, eval_env, a.EVAL)
# --- Load model from the best checkpoint ---
loaded_model = PPO.load(a.LEARN.save_path, best=True)
# Evaluate model
LOG.info('Evaluating the best model ...')
evaluate_and_export_final_model(loaded_model, eval_env, a.EVAL)
except:
LOG.exception('Exception occurred')
env.close()
eval_env.close()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Proximal Policy Optimization')
parser.add_argument('--env_id', type=str, default='BeamRiderNoFrameskip-v4')
parser.add_argument('--root', type=str, default='log/ppo')
args = parser.parse_args()
main(parse_config(args.env_id, args.root))
|
import argparse
import os
import tensorflow.keras as keras
import tensorflow as tf
from tensorflow.keras.callbacks import ModelCheckpoint, EarlyStopping, ReduceLROnPlateau
from tensorflow.keras.utils import multi_gpu_model
from config import patience, batch_size, epochs, num_train_samples, num_valid_samples
from data_generator import train_gen, valid_gen
from migrate import migrate_model
from segnet_v4 import build_encoder_decoder, build_refinement
from utils import overall_loss, get_available_cpus, get_available_gpus
log_dir = '/content/drive/Shared drives/DNN/Deep-Image-Matting/logs_4'
checkpoint_models_path = '/content/drive/Shared drives/DNN/Deep-Image-Matting/checkpoints_4/'
if __name__ == '__main__':
# Parse arguments
ap = argparse.ArgumentParser()
ap.add_argument("-p", "--pretrained", help="path to save pretrained model files")
args = vars(ap.parse_args())
pretrained_path = args["pretrained"]
# Callbacks
tensor_board = keras.callbacks.TensorBoard(log_dir=log_dir, histogram_freq=0, write_graph=True, write_images=True)
model_names = checkpoint_models_path + 'final.{epoch:02d}-{val_loss:.4f}.hdf5'
model_checkpoint = ModelCheckpoint(model_names, monitor='val_loss', verbose=1, save_best_only=True)
early_stop = EarlyStopping('val_loss', patience=patience)
reduce_lr = ReduceLROnPlateau('val_loss', factor=0.1, patience=int(patience / 4), verbose=1)
class MyCbk(keras.callbacks.Callback):
def __init__(self, model):
keras.callbacks.Callback.__init__(self)
self.model_to_save = model
def on_epoch_end(self, epoch, logs=None):
fmt = checkpoint_models_path + 'final.%02d-%.4f.hdf5'
self.model_to_save.save(fmt % (epoch, logs['val_loss']))
# Load our model, added support for Multi-GPUs
num_gpu = len(get_available_gpus())
if num_gpu >= 2:
with tf.device("/cpu:0"):
model = build_encoder_decoder()
model = build_refinement(model)
if pretrained_path is not None:
model.load_weights(pretrained_path)
final = multi_gpu_model(model, gpus=num_gpu)
# rewrite the callback: saving through the original model and not the multi-gpu model.
model_checkpoint = MyCbk(model)
else:
model = build_encoder_decoder()
final = build_refinement(model)
if pretrained_path is not None:
final.load_weights(pretrained_path)
final.compile(optimizer='nadam', loss=overall_loss)
print(final.summary())
# Final callbacks
callbacks = [tensor_board, model_checkpoint, early_stop, reduce_lr]
# Start Fine-tuning
final.fit_generator(train_gen(),
steps_per_epoch=num_train_samples // batch_size,
validation_data=valid_gen(),
validation_steps=num_valid_samples // batch_size,
epochs=epochs,
verbose=1,
callbacks=callbacks,
use_multiprocessing=True,
workers=2
)
|
import os
import shutil
import unittest
import docker
from .. import helpers
from docker.utils import kwargs_from_env
TEST_IMG = 'alpine:3.10'
TEST_API_VERSION = os.environ.get('DOCKER_TEST_API_VERSION')
class BaseIntegrationTest(unittest.TestCase):
"""
A base class for integration test cases. It cleans up the Docker server
after itself.
"""
def setUp(self):
self.tmp_imgs = []
self.tmp_containers = []
self.tmp_folders = []
self.tmp_volumes = []
self.tmp_networks = []
self.tmp_plugins = []
self.tmp_secrets = []
self.tmp_configs = []
def tearDown(self):
client = docker.from_env(version=TEST_API_VERSION)
try:
for img in self.tmp_imgs:
try:
client.api.remove_image(img)
except docker.errors.APIError:
pass
for container in self.tmp_containers:
try:
client.api.remove_container(container, force=True, v=True)
except docker.errors.APIError:
pass
for network in self.tmp_networks:
try:
client.api.remove_network(network)
except docker.errors.APIError:
pass
for volume in self.tmp_volumes:
try:
client.api.remove_volume(volume)
except docker.errors.APIError:
pass
for secret in self.tmp_secrets:
try:
client.api.remove_secret(secret)
except docker.errors.APIError:
pass
for config in self.tmp_configs:
try:
client.api.remove_config(config)
except docker.errors.APIError:
pass
for folder in self.tmp_folders:
shutil.rmtree(folder)
finally:
client.close()
class BaseAPIIntegrationTest(BaseIntegrationTest):
"""
A test case for `APIClient` integration tests. It sets up an `APIClient`
as `self.client`.
"""
def setUp(self):
super().setUp()
self.client = self.get_client_instance()
def tearDown(self):
super().tearDown()
self.client.close()
@staticmethod
def get_client_instance():
return docker.APIClient(
version=TEST_API_VERSION, timeout=60, **kwargs_from_env()
)
@staticmethod
def _init_swarm(client, **kwargs):
return client.init_swarm(
'127.0.0.1', listen_addr=helpers.swarm_listen_addr(), **kwargs
)
def run_container(self, *args, **kwargs):
container = self.client.create_container(*args, **kwargs)
self.tmp_containers.append(container)
self.client.start(container)
exitcode = self.client.wait(container)['StatusCode']
if exitcode != 0:
output = self.client.logs(container)
raise Exception(
"Container exited with code {}:\n{}"
.format(exitcode, output))
return container
def create_and_start(self, image=TEST_IMG, command='top', **kwargs):
container = self.client.create_container(
image=image, command=command, **kwargs)
self.tmp_containers.append(container)
self.client.start(container)
return container
def execute(self, container, cmd, exit_code=0, **kwargs):
exc = self.client.exec_create(container, cmd, **kwargs)
output = self.client.exec_start(exc)
actual_exit_code = self.client.exec_inspect(exc)['ExitCode']
msg = "Expected `{}` to exit with code {} but returned {}:\n{}".format(
" ".join(cmd), exit_code, actual_exit_code, output)
assert actual_exit_code == exit_code, msg
def init_swarm(self, **kwargs):
return self._init_swarm(self.client, **kwargs)
|
name = input("Enter file:")
if len(name) < 1:
name = "mbox-short.txt"
handle = open(name)
hist=dict()
for line in handle:
if line.startswith('From:'):
words=line.split()
if words[1] not in hist:
hist[words[1]]=1
else:
hist[words[1]]=hist[words[1]]+1
#print(hist)
nome=conta=None
for a,b in hist.items():
if conta==None or b>conta:
nome=a
conta=b
print(nome,conta)
# Alternativa
name = input("Enter file:")
if len(name) < 1:
name = "mbox-short.txt"
handle = open(name)
hist=dict()
for line in handle:
if line.startswith('From:'):
words=line.split()
hist[words[1]]=hist.get(words[1],0)+1
#print(hist)
nome=conta=None
for a,b in hist.items():
if conta==None or b>conta:
nome=a
conta=b
print(nome,conta)
|
from __future__ import print_function
import torch
import torch.nn as nn
import torch.utils.data
from torch.autograd import Variable
import torch.nn.functional as F
import math
from submodule import *
class hourglass(nn.Module):
def __init__(self, inplanes):
super(hourglass, self).__init__()
self.conv1 = nn.Sequential(convbn_3d(inplanes, inplanes*2, kernel_size=3, stride=2, pad=1),
nn.ReLU(inplace=True))
self.conv2 = convbn_3d(inplanes*2, inplanes*2, kernel_size=3, stride=1, pad=1)
self.conv3 = nn.Sequential(convbn_3d(inplanes*2, inplanes*2, kernel_size=3, stride=2, pad=1),
nn.ReLU(inplace=True))
self.conv4 = nn.Sequential(convbn_3d(inplanes*2, inplanes*2, kernel_size=3, stride=1, pad=1),
nn.ReLU(inplace=True))
self.conv5 = nn.Sequential(nn.ConvTranspose3d(inplanes*2, inplanes*2, kernel_size=3, padding=1, output_padding=1, stride=2,bias=False),
nn.BatchNorm3d(inplanes*2)) #+conv2
self.conv6 = nn.Sequential(nn.ConvTranspose3d(inplanes*2, inplanes, kernel_size=3, padding=1, output_padding=1, stride=2,bias=False),
nn.BatchNorm3d(inplanes)) #+x
def forward(self, x ,presqu, postsqu):
out = self.conv1(x) #in:1/4 out:1/8
pre = self.conv2(out) #in:1/8 out:1/8
if postsqu is not None:
pre = F.relu(pre + postsqu, inplace=True)
else:
pre = F.relu(pre, inplace=True)
out = self.conv3(pre) #in:1/8 out:1/16
out = self.conv4(out) #in:1/16 out:1/16
if presqu is not None:
post = F.relu(self.conv5(out)+presqu, inplace=True) #in:1/16 out:1/8
else:
post = F.relu(self.conv5(out)+pre, inplace=True)
out = self.conv6(post) #in:1/8 out:1/4
return out, pre, post
class PSMNet(nn.Module):
def __init__(self, maxdisp):
super(PSMNet, self).__init__()
self.maxdisp = maxdisp
self.feature_extraction = feature_extraction()
self.dres0 = nn.Sequential(convbn_3d(64, 32, 3, 1, 1),
nn.ReLU(inplace=True),
convbn_3d(32, 32, 3, 1, 1),
nn.ReLU(inplace=True))
self.dres1 = nn.Sequential(convbn_3d(32, 32, 3, 1, 1),
nn.ReLU(inplace=True),
convbn_3d(32, 32, 3, 1, 1))
self.dres2 = hourglass(32)
self.dres3 = hourglass(32)
self.dres4 = hourglass(32)
self.classif1 = nn.Sequential(convbn_3d(32, 32, 3, 1, 1),
nn.ReLU(inplace=True),
nn.Conv3d(32, 1, kernel_size=3, padding=1, stride=1,bias=False))
self.classif2 = nn.Sequential(convbn_3d(32, 32, 3, 1, 1),
nn.ReLU(inplace=True),
nn.Conv3d(32, 1, kernel_size=3, padding=1, stride=1,bias=False))
self.classif3 = nn.Sequential(convbn_3d(32, 32, 3, 1, 1),
nn.ReLU(inplace=True),
nn.Conv3d(32, 1, kernel_size=3, padding=1, stride=1,bias=False))
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.Conv3d):
n = m.kernel_size[0] * m.kernel_size[1]*m.kernel_size[2] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm3d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.bias.data.zero_()
def forward(self, left, right):
refimg_fea = self.feature_extraction(left)
targetimg_fea = self.feature_extraction(right)
#matching
cost = Variable(torch.FloatTensor(refimg_fea.size()[0], refimg_fea.size()[1]*2, self.maxdisp/4, refimg_fea.size()[2], refimg_fea.size()[3]).zero_()).cuda()
for i in range(self.maxdisp/4):
if i > 0 :
cost[:, :refimg_fea.size()[1], i, :,i:] = refimg_fea[:,:,:,i:]
cost[:, refimg_fea.size()[1]:, i, :,i:] = targetimg_fea[:,:,:,:-i]
else:
cost[:, :refimg_fea.size()[1], i, :,:] = refimg_fea
cost[:, refimg_fea.size()[1]:, i, :,:] = targetimg_fea
cost = cost.contiguous()
cost0 = self.dres0(cost)
cost0 = self.dres1(cost0) + cost0
out1, pre1, post1 = self.dres2(cost0, None, None)
out1 = out1+cost0
out2, pre2, post2 = self.dres3(out1, pre1, post1)
out2 = out2+cost0
out3, pre3, post3 = self.dres4(out2, pre1, post2)
out3 = out3+cost0
cost1 = self.classif1(out1)
cost2 = self.classif2(out2) + cost1
cost3 = self.classif3(out3) + cost2
if self.training:
cost1 = F.upsample(cost1, [self.maxdisp,left.size()[2],left.size()[3]], mode='trilinear')
cost2 = F.upsample(cost2, [self.maxdisp,left.size()[2],left.size()[3]], mode='trilinear')
cost1 = torch.squeeze(cost1,1)
pred1 = F.softmax(cost1,dim=1)
pred1 = disparityregression(self.maxdisp)(pred1)
cost2 = torch.squeeze(cost2,1)
pred2 = F.softmax(cost2,dim=1)
pred2 = disparityregression(self.maxdisp)(pred2)
cost3 = F.upsample(cost3, [self.maxdisp,left.size()[2],left.size()[3]], mode='trilinear')
cost3 = torch.squeeze(cost3,1)
cost3 = F.softmax(cost3,dim=1)
# pred3 = ((pred3+1e-10).log() * pred3).sum(1)
# use cost to predict
# print(pred3.size())
# _, pred3 = torch.max(cost3, dim=1)
# pred3 = pred3.float()
pred3 = disparityregression(self.maxdisp)(cost3)
print(pred3.size())
if self.training:
return pred1, pred2, pred3
else:
return pred3, cost3
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.